123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527 |
- /*
- Copyright 2019 The Kubernetes Authors.
- Licensed under the Apache License, Version 2.0 (the "License");
- you may not use this file except in compliance with the License.
- You may obtain a copy of the License at
- http://www.apache.org/licenses/LICENSE-2.0
- Unless required by applicable law or agreed to in writing, software
- distributed under the License is distributed on an "AS IS" BASIS,
- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- See the License for the specific language governing permissions and
- limitations under the License.
- */
- package noderesources
- import (
- "context"
- "fmt"
- "k8s.io/apimachinery/pkg/runtime"
- "reflect"
- "testing"
- v1 "k8s.io/api/core/v1"
- "k8s.io/apimachinery/pkg/api/resource"
- utilfeature "k8s.io/apiserver/pkg/util/feature"
- featuregatetesting "k8s.io/component-base/featuregate/testing"
- v1helper "k8s.io/kubernetes/pkg/apis/core/v1/helper"
- "k8s.io/kubernetes/pkg/features"
- framework "k8s.io/kubernetes/pkg/scheduler/framework/v1alpha1"
- schedulernodeinfo "k8s.io/kubernetes/pkg/scheduler/nodeinfo"
- )
- var (
- extendedResourceA = v1.ResourceName("example.com/aaa")
- extendedResourceB = v1.ResourceName("example.com/bbb")
- kubernetesIOResourceA = v1.ResourceName("kubernetes.io/something")
- kubernetesIOResourceB = v1.ResourceName("subdomain.kubernetes.io/something")
- hugePageResourceA = v1helper.HugePageResourceName(resource.MustParse("2Mi"))
- )
- func makeResources(milliCPU, memory, pods, extendedA, storage, hugePageA int64) v1.NodeResources {
- return v1.NodeResources{
- Capacity: v1.ResourceList{
- v1.ResourceCPU: *resource.NewMilliQuantity(milliCPU, resource.DecimalSI),
- v1.ResourceMemory: *resource.NewQuantity(memory, resource.BinarySI),
- v1.ResourcePods: *resource.NewQuantity(pods, resource.DecimalSI),
- extendedResourceA: *resource.NewQuantity(extendedA, resource.DecimalSI),
- v1.ResourceEphemeralStorage: *resource.NewQuantity(storage, resource.BinarySI),
- hugePageResourceA: *resource.NewQuantity(hugePageA, resource.BinarySI),
- },
- }
- }
- func makeAllocatableResources(milliCPU, memory, pods, extendedA, storage, hugePageA int64) v1.ResourceList {
- return v1.ResourceList{
- v1.ResourceCPU: *resource.NewMilliQuantity(milliCPU, resource.DecimalSI),
- v1.ResourceMemory: *resource.NewQuantity(memory, resource.BinarySI),
- v1.ResourcePods: *resource.NewQuantity(pods, resource.DecimalSI),
- extendedResourceA: *resource.NewQuantity(extendedA, resource.DecimalSI),
- v1.ResourceEphemeralStorage: *resource.NewQuantity(storage, resource.BinarySI),
- hugePageResourceA: *resource.NewQuantity(hugePageA, resource.BinarySI),
- }
- }
- func newResourcePod(usage ...schedulernodeinfo.Resource) *v1.Pod {
- containers := []v1.Container{}
- for _, req := range usage {
- containers = append(containers, v1.Container{
- Resources: v1.ResourceRequirements{Requests: req.ResourceList()},
- })
- }
- return &v1.Pod{
- Spec: v1.PodSpec{
- Containers: containers,
- },
- }
- }
- func newResourceInitPod(pod *v1.Pod, usage ...schedulernodeinfo.Resource) *v1.Pod {
- pod.Spec.InitContainers = newResourcePod(usage...).Spec.Containers
- return pod
- }
- func newResourceOverheadPod(pod *v1.Pod, overhead v1.ResourceList) *v1.Pod {
- pod.Spec.Overhead = overhead
- return pod
- }
- func getErrReason(rn v1.ResourceName) string {
- return fmt.Sprintf("Insufficient %v", rn)
- }
- func TestEnoughRequests(t *testing.T) {
- defer featuregatetesting.SetFeatureGateDuringTest(t, utilfeature.DefaultFeatureGate, features.PodOverhead, true)()
- enoughPodsTests := []struct {
- pod *v1.Pod
- nodeInfo *schedulernodeinfo.NodeInfo
- name string
- ignoredResources []byte
- wantInsufficientResources []InsufficientResource
- wantStatus *framework.Status
- }{
- {
- pod: &v1.Pod{},
- nodeInfo: schedulernodeinfo.NewNodeInfo(
- newResourcePod(schedulernodeinfo.Resource{MilliCPU: 10, Memory: 20})),
- name: "no resources requested always fits",
- wantInsufficientResources: []InsufficientResource{},
- },
- {
- pod: newResourcePod(schedulernodeinfo.Resource{MilliCPU: 1, Memory: 1}),
- nodeInfo: schedulernodeinfo.NewNodeInfo(
- newResourcePod(schedulernodeinfo.Resource{MilliCPU: 10, Memory: 20})),
- name: "too many resources fails",
- wantStatus: framework.NewStatus(framework.Unschedulable, getErrReason(v1.ResourceCPU), getErrReason(v1.ResourceMemory)),
- wantInsufficientResources: []InsufficientResource{{v1.ResourceCPU, getErrReason(v1.ResourceCPU), 1, 10, 10}, {v1.ResourceMemory, getErrReason(v1.ResourceMemory), 1, 20, 20}},
- },
- {
- pod: newResourceInitPod(newResourcePod(schedulernodeinfo.Resource{MilliCPU: 1, Memory: 1}), schedulernodeinfo.Resource{MilliCPU: 3, Memory: 1}),
- nodeInfo: schedulernodeinfo.NewNodeInfo(
- newResourcePod(schedulernodeinfo.Resource{MilliCPU: 8, Memory: 19})),
- name: "too many resources fails due to init container cpu",
- wantStatus: framework.NewStatus(framework.Unschedulable, getErrReason(v1.ResourceCPU)),
- wantInsufficientResources: []InsufficientResource{{v1.ResourceCPU, getErrReason(v1.ResourceCPU), 3, 8, 10}},
- },
- {
- pod: newResourceInitPod(newResourcePod(schedulernodeinfo.Resource{MilliCPU: 1, Memory: 1}), schedulernodeinfo.Resource{MilliCPU: 3, Memory: 1}, schedulernodeinfo.Resource{MilliCPU: 2, Memory: 1}),
- nodeInfo: schedulernodeinfo.NewNodeInfo(
- newResourcePod(schedulernodeinfo.Resource{MilliCPU: 8, Memory: 19})),
- name: "too many resources fails due to highest init container cpu",
- wantStatus: framework.NewStatus(framework.Unschedulable, getErrReason(v1.ResourceCPU)),
- wantInsufficientResources: []InsufficientResource{{v1.ResourceCPU, getErrReason(v1.ResourceCPU), 3, 8, 10}},
- },
- {
- pod: newResourceInitPod(newResourcePod(schedulernodeinfo.Resource{MilliCPU: 1, Memory: 1}), schedulernodeinfo.Resource{MilliCPU: 1, Memory: 3}),
- nodeInfo: schedulernodeinfo.NewNodeInfo(
- newResourcePod(schedulernodeinfo.Resource{MilliCPU: 9, Memory: 19})),
- name: "too many resources fails due to init container memory",
- wantStatus: framework.NewStatus(framework.Unschedulable, getErrReason(v1.ResourceMemory)),
- wantInsufficientResources: []InsufficientResource{{v1.ResourceMemory, getErrReason(v1.ResourceMemory), 3, 19, 20}},
- },
- {
- pod: newResourceInitPod(newResourcePod(schedulernodeinfo.Resource{MilliCPU: 1, Memory: 1}), schedulernodeinfo.Resource{MilliCPU: 1, Memory: 3}, schedulernodeinfo.Resource{MilliCPU: 1, Memory: 2}),
- nodeInfo: schedulernodeinfo.NewNodeInfo(
- newResourcePod(schedulernodeinfo.Resource{MilliCPU: 9, Memory: 19})),
- name: "too many resources fails due to highest init container memory",
- wantStatus: framework.NewStatus(framework.Unschedulable, getErrReason(v1.ResourceMemory)),
- wantInsufficientResources: []InsufficientResource{{v1.ResourceMemory, getErrReason(v1.ResourceMemory), 3, 19, 20}},
- },
- {
- pod: newResourceInitPod(newResourcePod(schedulernodeinfo.Resource{MilliCPU: 1, Memory: 1}), schedulernodeinfo.Resource{MilliCPU: 1, Memory: 1}),
- nodeInfo: schedulernodeinfo.NewNodeInfo(
- newResourcePod(schedulernodeinfo.Resource{MilliCPU: 9, Memory: 19})),
- name: "init container fits because it's the max, not sum, of containers and init containers",
- wantInsufficientResources: []InsufficientResource{},
- },
- {
- pod: newResourceInitPod(newResourcePod(schedulernodeinfo.Resource{MilliCPU: 1, Memory: 1}), schedulernodeinfo.Resource{MilliCPU: 1, Memory: 1}, schedulernodeinfo.Resource{MilliCPU: 1, Memory: 1}),
- nodeInfo: schedulernodeinfo.NewNodeInfo(
- newResourcePod(schedulernodeinfo.Resource{MilliCPU: 9, Memory: 19})),
- name: "multiple init containers fit because it's the max, not sum, of containers and init containers",
- wantInsufficientResources: []InsufficientResource{},
- },
- {
- pod: newResourcePod(schedulernodeinfo.Resource{MilliCPU: 1, Memory: 1}),
- nodeInfo: schedulernodeinfo.NewNodeInfo(
- newResourcePod(schedulernodeinfo.Resource{MilliCPU: 5, Memory: 5})),
- name: "both resources fit",
- wantInsufficientResources: []InsufficientResource{},
- },
- {
- pod: newResourcePod(schedulernodeinfo.Resource{MilliCPU: 2, Memory: 1}),
- nodeInfo: schedulernodeinfo.NewNodeInfo(
- newResourcePod(schedulernodeinfo.Resource{MilliCPU: 9, Memory: 5})),
- name: "one resource memory fits",
- wantStatus: framework.NewStatus(framework.Unschedulable, getErrReason(v1.ResourceCPU)),
- wantInsufficientResources: []InsufficientResource{{v1.ResourceCPU, getErrReason(v1.ResourceCPU), 2, 9, 10}},
- },
- {
- pod: newResourcePod(schedulernodeinfo.Resource{MilliCPU: 1, Memory: 2}),
- nodeInfo: schedulernodeinfo.NewNodeInfo(
- newResourcePod(schedulernodeinfo.Resource{MilliCPU: 5, Memory: 19})),
- name: "one resource cpu fits",
- wantStatus: framework.NewStatus(framework.Unschedulable, getErrReason(v1.ResourceMemory)),
- wantInsufficientResources: []InsufficientResource{{v1.ResourceMemory, getErrReason(v1.ResourceMemory), 2, 19, 20}},
- },
- {
- pod: newResourcePod(schedulernodeinfo.Resource{MilliCPU: 5, Memory: 1}),
- nodeInfo: schedulernodeinfo.NewNodeInfo(
- newResourcePod(schedulernodeinfo.Resource{MilliCPU: 5, Memory: 19})),
- name: "equal edge case",
- wantInsufficientResources: []InsufficientResource{},
- },
- {
- pod: newResourceInitPod(newResourcePod(schedulernodeinfo.Resource{MilliCPU: 4, Memory: 1}), schedulernodeinfo.Resource{MilliCPU: 5, Memory: 1}),
- nodeInfo: schedulernodeinfo.NewNodeInfo(
- newResourcePod(schedulernodeinfo.Resource{MilliCPU: 5, Memory: 19})),
- name: "equal edge case for init container",
- wantInsufficientResources: []InsufficientResource{},
- },
- {
- pod: newResourcePod(schedulernodeinfo.Resource{ScalarResources: map[v1.ResourceName]int64{extendedResourceA: 1}}),
- nodeInfo: schedulernodeinfo.NewNodeInfo(newResourcePod(schedulernodeinfo.Resource{})),
- name: "extended resource fits",
- wantInsufficientResources: []InsufficientResource{},
- },
- {
- pod: newResourceInitPod(newResourcePod(schedulernodeinfo.Resource{}), schedulernodeinfo.Resource{ScalarResources: map[v1.ResourceName]int64{extendedResourceA: 1}}),
- nodeInfo: schedulernodeinfo.NewNodeInfo(newResourcePod(schedulernodeinfo.Resource{})),
- name: "extended resource fits for init container",
- wantInsufficientResources: []InsufficientResource{},
- },
- {
- pod: newResourcePod(
- schedulernodeinfo.Resource{MilliCPU: 1, Memory: 1, ScalarResources: map[v1.ResourceName]int64{extendedResourceA: 10}}),
- nodeInfo: schedulernodeinfo.NewNodeInfo(
- newResourcePod(schedulernodeinfo.Resource{MilliCPU: 0, Memory: 0, ScalarResources: map[v1.ResourceName]int64{extendedResourceA: 0}})),
- name: "extended resource capacity enforced",
- wantStatus: framework.NewStatus(framework.Unschedulable, getErrReason(extendedResourceA)),
- wantInsufficientResources: []InsufficientResource{{extendedResourceA, getErrReason(extendedResourceA), 10, 0, 5}},
- },
- {
- pod: newResourceInitPod(newResourcePod(schedulernodeinfo.Resource{}),
- schedulernodeinfo.Resource{MilliCPU: 1, Memory: 1, ScalarResources: map[v1.ResourceName]int64{extendedResourceA: 10}}),
- nodeInfo: schedulernodeinfo.NewNodeInfo(
- newResourcePod(schedulernodeinfo.Resource{MilliCPU: 0, Memory: 0, ScalarResources: map[v1.ResourceName]int64{extendedResourceA: 0}})),
- name: "extended resource capacity enforced for init container",
- wantStatus: framework.NewStatus(framework.Unschedulable, getErrReason(extendedResourceA)),
- wantInsufficientResources: []InsufficientResource{{extendedResourceA, getErrReason(extendedResourceA), 10, 0, 5}},
- },
- {
- pod: newResourcePod(
- schedulernodeinfo.Resource{MilliCPU: 1, Memory: 1, ScalarResources: map[v1.ResourceName]int64{extendedResourceA: 1}}),
- nodeInfo: schedulernodeinfo.NewNodeInfo(
- newResourcePod(schedulernodeinfo.Resource{MilliCPU: 0, Memory: 0, ScalarResources: map[v1.ResourceName]int64{extendedResourceA: 5}})),
- name: "extended resource allocatable enforced",
- wantStatus: framework.NewStatus(framework.Unschedulable, getErrReason(extendedResourceA)),
- wantInsufficientResources: []InsufficientResource{{extendedResourceA, getErrReason(extendedResourceA), 1, 5, 5}},
- },
- {
- pod: newResourceInitPod(newResourcePod(schedulernodeinfo.Resource{}),
- schedulernodeinfo.Resource{MilliCPU: 1, Memory: 1, ScalarResources: map[v1.ResourceName]int64{extendedResourceA: 1}}),
- nodeInfo: schedulernodeinfo.NewNodeInfo(
- newResourcePod(schedulernodeinfo.Resource{MilliCPU: 0, Memory: 0, ScalarResources: map[v1.ResourceName]int64{extendedResourceA: 5}})),
- name: "extended resource allocatable enforced for init container",
- wantStatus: framework.NewStatus(framework.Unschedulable, getErrReason(extendedResourceA)),
- wantInsufficientResources: []InsufficientResource{{extendedResourceA, getErrReason(extendedResourceA), 1, 5, 5}},
- },
- {
- pod: newResourcePod(
- schedulernodeinfo.Resource{MilliCPU: 1, Memory: 1, ScalarResources: map[v1.ResourceName]int64{extendedResourceA: 3}},
- schedulernodeinfo.Resource{MilliCPU: 1, Memory: 1, ScalarResources: map[v1.ResourceName]int64{extendedResourceA: 3}}),
- nodeInfo: schedulernodeinfo.NewNodeInfo(
- newResourcePod(schedulernodeinfo.Resource{MilliCPU: 0, Memory: 0, ScalarResources: map[v1.ResourceName]int64{extendedResourceA: 2}})),
- name: "extended resource allocatable enforced for multiple containers",
- wantStatus: framework.NewStatus(framework.Unschedulable, getErrReason(extendedResourceA)),
- wantInsufficientResources: []InsufficientResource{{extendedResourceA, getErrReason(extendedResourceA), 6, 2, 5}},
- },
- {
- pod: newResourceInitPod(newResourcePod(schedulernodeinfo.Resource{}),
- schedulernodeinfo.Resource{MilliCPU: 1, Memory: 1, ScalarResources: map[v1.ResourceName]int64{extendedResourceA: 3}},
- schedulernodeinfo.Resource{MilliCPU: 1, Memory: 1, ScalarResources: map[v1.ResourceName]int64{extendedResourceA: 3}}),
- nodeInfo: schedulernodeinfo.NewNodeInfo(
- newResourcePod(schedulernodeinfo.Resource{MilliCPU: 0, Memory: 0, ScalarResources: map[v1.ResourceName]int64{extendedResourceA: 2}})),
- name: "extended resource allocatable admits multiple init containers",
- wantInsufficientResources: []InsufficientResource{},
- },
- {
- pod: newResourceInitPod(newResourcePod(schedulernodeinfo.Resource{}),
- schedulernodeinfo.Resource{MilliCPU: 1, Memory: 1, ScalarResources: map[v1.ResourceName]int64{extendedResourceA: 6}},
- schedulernodeinfo.Resource{MilliCPU: 1, Memory: 1, ScalarResources: map[v1.ResourceName]int64{extendedResourceA: 3}}),
- nodeInfo: schedulernodeinfo.NewNodeInfo(
- newResourcePod(schedulernodeinfo.Resource{MilliCPU: 0, Memory: 0, ScalarResources: map[v1.ResourceName]int64{extendedResourceA: 2}})),
- name: "extended resource allocatable enforced for multiple init containers",
- wantStatus: framework.NewStatus(framework.Unschedulable, getErrReason(extendedResourceA)),
- wantInsufficientResources: []InsufficientResource{{extendedResourceA, getErrReason(extendedResourceA), 6, 2, 5}},
- },
- {
- pod: newResourcePod(
- schedulernodeinfo.Resource{MilliCPU: 1, Memory: 1, ScalarResources: map[v1.ResourceName]int64{extendedResourceB: 1}}),
- nodeInfo: schedulernodeinfo.NewNodeInfo(
- newResourcePod(schedulernodeinfo.Resource{MilliCPU: 0, Memory: 0})),
- name: "extended resource allocatable enforced for unknown resource",
- wantStatus: framework.NewStatus(framework.Unschedulable, getErrReason(extendedResourceB)),
- wantInsufficientResources: []InsufficientResource{{extendedResourceB, getErrReason(extendedResourceB), 1, 0, 0}},
- },
- {
- pod: newResourceInitPod(newResourcePod(schedulernodeinfo.Resource{}),
- schedulernodeinfo.Resource{MilliCPU: 1, Memory: 1, ScalarResources: map[v1.ResourceName]int64{extendedResourceB: 1}}),
- nodeInfo: schedulernodeinfo.NewNodeInfo(
- newResourcePod(schedulernodeinfo.Resource{MilliCPU: 0, Memory: 0})),
- name: "extended resource allocatable enforced for unknown resource for init container",
- wantStatus: framework.NewStatus(framework.Unschedulable, getErrReason(extendedResourceB)),
- wantInsufficientResources: []InsufficientResource{{extendedResourceB, getErrReason(extendedResourceB), 1, 0, 0}},
- },
- {
- pod: newResourcePod(
- schedulernodeinfo.Resource{MilliCPU: 1, Memory: 1, ScalarResources: map[v1.ResourceName]int64{kubernetesIOResourceA: 10}}),
- nodeInfo: schedulernodeinfo.NewNodeInfo(
- newResourcePod(schedulernodeinfo.Resource{MilliCPU: 0, Memory: 0})),
- name: "kubernetes.io resource capacity enforced",
- wantStatus: framework.NewStatus(framework.Unschedulable, getErrReason(kubernetesIOResourceA)),
- wantInsufficientResources: []InsufficientResource{{kubernetesIOResourceA, getErrReason(kubernetesIOResourceA), 10, 0, 0}},
- },
- {
- pod: newResourceInitPod(newResourcePod(schedulernodeinfo.Resource{}),
- schedulernodeinfo.Resource{MilliCPU: 1, Memory: 1, ScalarResources: map[v1.ResourceName]int64{kubernetesIOResourceB: 10}}),
- nodeInfo: schedulernodeinfo.NewNodeInfo(
- newResourcePod(schedulernodeinfo.Resource{MilliCPU: 0, Memory: 0})),
- name: "kubernetes.io resource capacity enforced for init container",
- wantStatus: framework.NewStatus(framework.Unschedulable, getErrReason(kubernetesIOResourceB)),
- wantInsufficientResources: []InsufficientResource{{kubernetesIOResourceB, getErrReason(kubernetesIOResourceB), 10, 0, 0}},
- },
- {
- pod: newResourcePod(
- schedulernodeinfo.Resource{MilliCPU: 1, Memory: 1, ScalarResources: map[v1.ResourceName]int64{hugePageResourceA: 10}}),
- nodeInfo: schedulernodeinfo.NewNodeInfo(
- newResourcePod(schedulernodeinfo.Resource{MilliCPU: 0, Memory: 0, ScalarResources: map[v1.ResourceName]int64{hugePageResourceA: 0}})),
- name: "hugepages resource capacity enforced",
- wantStatus: framework.NewStatus(framework.Unschedulable, getErrReason(hugePageResourceA)),
- wantInsufficientResources: []InsufficientResource{{hugePageResourceA, getErrReason(hugePageResourceA), 10, 0, 5}},
- },
- {
- pod: newResourceInitPod(newResourcePod(schedulernodeinfo.Resource{}),
- schedulernodeinfo.Resource{MilliCPU: 1, Memory: 1, ScalarResources: map[v1.ResourceName]int64{hugePageResourceA: 10}}),
- nodeInfo: schedulernodeinfo.NewNodeInfo(
- newResourcePod(schedulernodeinfo.Resource{MilliCPU: 0, Memory: 0, ScalarResources: map[v1.ResourceName]int64{hugePageResourceA: 0}})),
- name: "hugepages resource capacity enforced for init container",
- wantStatus: framework.NewStatus(framework.Unschedulable, getErrReason(hugePageResourceA)),
- wantInsufficientResources: []InsufficientResource{{hugePageResourceA, getErrReason(hugePageResourceA), 10, 0, 5}},
- },
- {
- pod: newResourcePod(
- schedulernodeinfo.Resource{MilliCPU: 1, Memory: 1, ScalarResources: map[v1.ResourceName]int64{hugePageResourceA: 3}},
- schedulernodeinfo.Resource{MilliCPU: 1, Memory: 1, ScalarResources: map[v1.ResourceName]int64{hugePageResourceA: 3}}),
- nodeInfo: schedulernodeinfo.NewNodeInfo(
- newResourcePod(schedulernodeinfo.Resource{MilliCPU: 0, Memory: 0, ScalarResources: map[v1.ResourceName]int64{hugePageResourceA: 2}})),
- name: "hugepages resource allocatable enforced for multiple containers",
- wantStatus: framework.NewStatus(framework.Unschedulable, getErrReason(hugePageResourceA)),
- wantInsufficientResources: []InsufficientResource{{hugePageResourceA, getErrReason(hugePageResourceA), 6, 2, 5}},
- },
- {
- pod: newResourcePod(
- schedulernodeinfo.Resource{MilliCPU: 1, Memory: 1, ScalarResources: map[v1.ResourceName]int64{extendedResourceB: 1}}),
- nodeInfo: schedulernodeinfo.NewNodeInfo(newResourcePod(schedulernodeinfo.Resource{MilliCPU: 0, Memory: 0})),
- ignoredResources: []byte(`{"IgnoredResources" : ["example.com/bbb"]}`),
- name: "skip checking ignored extended resource",
- wantInsufficientResources: []InsufficientResource{},
- },
- {
- pod: newResourceOverheadPod(
- newResourcePod(schedulernodeinfo.Resource{MilliCPU: 1, Memory: 1}),
- v1.ResourceList{v1.ResourceCPU: resource.MustParse("3m"), v1.ResourceMemory: resource.MustParse("13")},
- ),
- nodeInfo: schedulernodeinfo.NewNodeInfo(newResourcePod(schedulernodeinfo.Resource{MilliCPU: 5, Memory: 5})),
- name: "resources + pod overhead fits",
- wantInsufficientResources: []InsufficientResource{},
- },
- {
- pod: newResourceOverheadPod(
- newResourcePod(schedulernodeinfo.Resource{MilliCPU: 1, Memory: 1}),
- v1.ResourceList{v1.ResourceCPU: resource.MustParse("1m"), v1.ResourceMemory: resource.MustParse("15")},
- ),
- nodeInfo: schedulernodeinfo.NewNodeInfo(newResourcePod(schedulernodeinfo.Resource{MilliCPU: 5, Memory: 5})),
- name: "requests + overhead does not fit for memory",
- wantStatus: framework.NewStatus(framework.Unschedulable, getErrReason(v1.ResourceMemory)),
- wantInsufficientResources: []InsufficientResource{{v1.ResourceMemory, getErrReason(v1.ResourceMemory), 16, 5, 20}},
- },
- }
- for _, test := range enoughPodsTests {
- t.Run(test.name, func(t *testing.T) {
- node := v1.Node{Status: v1.NodeStatus{Capacity: makeResources(10, 20, 32, 5, 20, 5).Capacity, Allocatable: makeAllocatableResources(10, 20, 32, 5, 20, 5)}}
- test.nodeInfo.SetNode(&node)
- args := &runtime.Unknown{Raw: test.ignoredResources}
- p, _ := NewFit(args, nil)
- cycleState := framework.NewCycleState()
- preFilterStatus := p.(framework.PreFilterPlugin).PreFilter(context.Background(), cycleState, test.pod)
- if !preFilterStatus.IsSuccess() {
- t.Errorf("prefilter failed with status: %v", preFilterStatus)
- }
- gotStatus := p.(framework.FilterPlugin).Filter(context.Background(), cycleState, test.pod, test.nodeInfo)
- if !reflect.DeepEqual(gotStatus, test.wantStatus) {
- t.Errorf("status does not match: %v, want: %v", gotStatus, test.wantStatus)
- }
- gotInsufficientResources := Fits(test.pod, test.nodeInfo, p.(*Fit).ignoredResources)
- if !reflect.DeepEqual(gotInsufficientResources, test.wantInsufficientResources) {
- t.Errorf("insufficient resources do not match: %v, want: %v", gotInsufficientResources, test.wantInsufficientResources)
- }
- })
- }
- }
- func TestPreFilterDisabled(t *testing.T) {
- pod := &v1.Pod{}
- nodeInfo := schedulernodeinfo.NewNodeInfo()
- node := v1.Node{}
- nodeInfo.SetNode(&node)
- p, _ := NewFit(nil, nil)
- cycleState := framework.NewCycleState()
- gotStatus := p.(framework.FilterPlugin).Filter(context.Background(), cycleState, pod, nodeInfo)
- wantStatus := framework.NewStatus(framework.Error, `error reading "PreFilterNodeResourcesFit" from cycleState: not found`)
- if !reflect.DeepEqual(gotStatus, wantStatus) {
- t.Errorf("status does not match: %v, want: %v", gotStatus, wantStatus)
- }
- }
- func TestNotEnoughRequests(t *testing.T) {
- defer featuregatetesting.SetFeatureGateDuringTest(t, utilfeature.DefaultFeatureGate, features.PodOverhead, true)()
- notEnoughPodsTests := []struct {
- pod *v1.Pod
- nodeInfo *schedulernodeinfo.NodeInfo
- fits bool
- name string
- wantStatus *framework.Status
- }{
- {
- pod: &v1.Pod{},
- nodeInfo: schedulernodeinfo.NewNodeInfo(newResourcePod(schedulernodeinfo.Resource{MilliCPU: 10, Memory: 20})),
- name: "even without specified resources predicate fails when there's no space for additional pod",
- wantStatus: framework.NewStatus(framework.Unschedulable, "Too many pods"),
- },
- {
- pod: newResourcePod(schedulernodeinfo.Resource{MilliCPU: 1, Memory: 1}),
- nodeInfo: schedulernodeinfo.NewNodeInfo(newResourcePod(schedulernodeinfo.Resource{MilliCPU: 5, Memory: 5})),
- name: "even if both resources fit predicate fails when there's no space for additional pod",
- wantStatus: framework.NewStatus(framework.Unschedulable, "Too many pods"),
- },
- {
- pod: newResourcePod(schedulernodeinfo.Resource{MilliCPU: 5, Memory: 1}),
- nodeInfo: schedulernodeinfo.NewNodeInfo(newResourcePod(schedulernodeinfo.Resource{MilliCPU: 5, Memory: 19})),
- name: "even for equal edge case predicate fails when there's no space for additional pod",
- wantStatus: framework.NewStatus(framework.Unschedulable, "Too many pods"),
- },
- {
- pod: newResourceInitPod(newResourcePod(schedulernodeinfo.Resource{MilliCPU: 5, Memory: 1}), schedulernodeinfo.Resource{MilliCPU: 5, Memory: 1}),
- nodeInfo: schedulernodeinfo.NewNodeInfo(newResourcePod(schedulernodeinfo.Resource{MilliCPU: 5, Memory: 19})),
- name: "even for equal edge case predicate fails when there's no space for additional pod due to init container",
- wantStatus: framework.NewStatus(framework.Unschedulable, "Too many pods"),
- },
- }
- for _, test := range notEnoughPodsTests {
- t.Run(test.name, func(t *testing.T) {
- node := v1.Node{Status: v1.NodeStatus{Capacity: v1.ResourceList{}, Allocatable: makeAllocatableResources(10, 20, 1, 0, 0, 0)}}
- test.nodeInfo.SetNode(&node)
- p, _ := NewFit(nil, nil)
- cycleState := framework.NewCycleState()
- preFilterStatus := p.(framework.PreFilterPlugin).PreFilter(context.Background(), cycleState, test.pod)
- if !preFilterStatus.IsSuccess() {
- t.Errorf("prefilter failed with status: %v", preFilterStatus)
- }
- gotStatus := p.(framework.FilterPlugin).Filter(context.Background(), cycleState, test.pod, test.nodeInfo)
- if !reflect.DeepEqual(gotStatus, test.wantStatus) {
- t.Errorf("status does not match: %v, want: %v", gotStatus, test.wantStatus)
- }
- })
- }
- }
- func TestStorageRequests(t *testing.T) {
- defer featuregatetesting.SetFeatureGateDuringTest(t, utilfeature.DefaultFeatureGate, features.PodOverhead, true)()
- storagePodsTests := []struct {
- pod *v1.Pod
- nodeInfo *schedulernodeinfo.NodeInfo
- name string
- wantStatus *framework.Status
- }{
- {
- pod: newResourcePod(schedulernodeinfo.Resource{MilliCPU: 1, Memory: 1}),
- nodeInfo: schedulernodeinfo.NewNodeInfo(
- newResourcePod(schedulernodeinfo.Resource{MilliCPU: 10, Memory: 10})),
- name: "due to container scratch disk",
- wantStatus: framework.NewStatus(framework.Unschedulable, getErrReason(v1.ResourceCPU)),
- },
- {
- pod: newResourcePod(schedulernodeinfo.Resource{MilliCPU: 1, Memory: 1}),
- nodeInfo: schedulernodeinfo.NewNodeInfo(
- newResourcePod(schedulernodeinfo.Resource{MilliCPU: 2, Memory: 10})),
- name: "pod fit",
- },
- {
- pod: newResourcePod(schedulernodeinfo.Resource{EphemeralStorage: 25}),
- nodeInfo: schedulernodeinfo.NewNodeInfo(
- newResourcePod(schedulernodeinfo.Resource{MilliCPU: 2, Memory: 2})),
- name: "storage ephemeral local storage request exceeds allocatable",
- wantStatus: framework.NewStatus(framework.Unschedulable, getErrReason(v1.ResourceEphemeralStorage)),
- },
- {
- pod: newResourcePod(schedulernodeinfo.Resource{EphemeralStorage: 10}),
- nodeInfo: schedulernodeinfo.NewNodeInfo(
- newResourcePod(schedulernodeinfo.Resource{MilliCPU: 2, Memory: 2})),
- name: "pod fits",
- },
- }
- for _, test := range storagePodsTests {
- t.Run(test.name, func(t *testing.T) {
- node := v1.Node{Status: v1.NodeStatus{Capacity: makeResources(10, 20, 32, 5, 20, 5).Capacity, Allocatable: makeAllocatableResources(10, 20, 32, 5, 20, 5)}}
- test.nodeInfo.SetNode(&node)
- p, _ := NewFit(nil, nil)
- cycleState := framework.NewCycleState()
- preFilterStatus := p.(framework.PreFilterPlugin).PreFilter(context.Background(), cycleState, test.pod)
- if !preFilterStatus.IsSuccess() {
- t.Errorf("prefilter failed with status: %v", preFilterStatus)
- }
- gotStatus := p.(framework.FilterPlugin).Filter(context.Background(), cycleState, test.pod, test.nodeInfo)
- if !reflect.DeepEqual(gotStatus, test.wantStatus) {
- t.Errorf("status does not match: %v, want: %v", gotStatus, test.wantStatus)
- }
- })
- }
- }
|