123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320132113221323132413251326132713281329133013311332133313341335133613371338133913401341134213431344134513461347134813491350135113521353135413551356135713581359136013611362136313641365136613671368136913701371137213731374137513761377137813791380138113821383138413851386138713881389139013911392139313941395139613971398139914001401140214031404140514061407140814091410141114121413141414151416141714181419142014211422142314241425142614271428142914301431143214331434143514361437143814391440144114421443144414451446144714481449145014511452145314541455145614571458145914601461146214631464146514661467146814691470147114721473147414751476147714781479148014811482148314841485148614871488148914901491149214931494149514961497149814991500150115021503150415051506150715081509151015111512151315141515151615171518151915201521152215231524152515261527152815291530153115321533153415351536153715381539154015411542154315441545154615471548154915501551155215531554155515561557155815591560156115621563156415651566156715681569157015711572157315741575157615771578157915801581158215831584158515861587158815891590159115921593159415951596159715981599160016011602160316041605160616071608160916101611161216131614161516161617161816191620162116221623162416251626162716281629163016311632163316341635163616371638163916401641164216431644164516461647164816491650165116521653165416551656165716581659166016611662166316641665166616671668166916701671167216731674167516761677167816791680168116821683168416851686168716881689169016911692169316941695169616971698169917001701170217031704170517061707170817091710171117121713171417151716171717181719172017211722172317241725172617271728172917301731173217331734173517361737173817391740174117421743174417451746174717481749175017511752175317541755175617571758175917601761176217631764176517661767176817691770177117721773177417751776177717781779178017811782178317841785178617871788178917901791179217931794179517961797179817991800180118021803180418051806180718081809181018111812181318141815181618171818181918201821182218231824182518261827182818291830183118321833183418351836183718381839184018411842184318441845184618471848184918501851185218531854185518561857185818591860186118621863186418651866186718681869187018711872187318741875187618771878187918801881188218831884188518861887188818891890189118921893189418951896189718981899190019011902190319041905190619071908190919101911191219131914191519161917191819191920192119221923192419251926192719281929193019311932193319341935193619371938193919401941194219431944194519461947194819491950195119521953195419551956195719581959196019611962196319641965196619671968196919701971197219731974197519761977197819791980198119821983198419851986198719881989199019911992199319941995199619971998199920002001200220032004200520062007200820092010201120122013201420152016201720182019202020212022202320242025202620272028202920302031203220332034203520362037203820392040204120422043204420452046204720482049205020512052205320542055205620572058205920602061206220632064206520662067206820692070207120722073207420752076207720782079208020812082208320842085208620872088208920902091209220932094209520962097209820992100210121022103210421052106210721082109211021112112211321142115211621172118211921202121212221232124212521262127212821292130213121322133213421352136213721382139214021412142214321442145214621472148214921502151215221532154215521562157215821592160216121622163216421652166216721682169 |
- /*
- Copyright 2016 The Kubernetes Authors.
- Licensed under the Apache License, Version 2.0 (the "License");
- you may not use this file except in compliance with the License.
- You may obtain a copy of the License at
- http://www.apache.org/licenses/LICENSE-2.0
- Unless required by applicable law or agreed to in writing, software
- distributed under the License is distributed on an "AS IS" BASIS,
- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- See the License for the specific language governing permissions and
- limitations under the License.
- */
- package kubelet
- import (
- "encoding/json"
- "fmt"
- "net"
- goruntime "runtime"
- "sort"
- "strconv"
- "sync/atomic"
- "testing"
- "time"
- "github.com/stretchr/testify/assert"
- "github.com/stretchr/testify/require"
- cadvisorapi "github.com/google/cadvisor/info/v1"
- v1 "k8s.io/api/core/v1"
- apiequality "k8s.io/apimachinery/pkg/api/equality"
- apierrors "k8s.io/apimachinery/pkg/api/errors"
- "k8s.io/apimachinery/pkg/api/resource"
- metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
- "k8s.io/apimachinery/pkg/runtime"
- "k8s.io/apimachinery/pkg/util/diff"
- "k8s.io/apimachinery/pkg/util/rand"
- "k8s.io/apimachinery/pkg/util/strategicpatch"
- "k8s.io/apimachinery/pkg/util/uuid"
- "k8s.io/apimachinery/pkg/util/wait"
- utilfeature "k8s.io/apiserver/pkg/util/feature"
- clientset "k8s.io/client-go/kubernetes"
- "k8s.io/client-go/kubernetes/fake"
- "k8s.io/client-go/rest"
- core "k8s.io/client-go/testing"
- "k8s.io/component-base/featuregate"
- featuregatetesting "k8s.io/component-base/featuregate/testing"
- "k8s.io/kubernetes/pkg/features"
- kubeletapis "k8s.io/kubernetes/pkg/kubelet/apis"
- cadvisortest "k8s.io/kubernetes/pkg/kubelet/cadvisor/testing"
- "k8s.io/kubernetes/pkg/kubelet/cm"
- kubecontainer "k8s.io/kubernetes/pkg/kubelet/container"
- "k8s.io/kubernetes/pkg/kubelet/nodestatus"
- "k8s.io/kubernetes/pkg/kubelet/util/sliceutils"
- kubeletvolume "k8s.io/kubernetes/pkg/kubelet/volumemanager"
- schedulerapi "k8s.io/kubernetes/pkg/scheduler/api"
- taintutil "k8s.io/kubernetes/pkg/util/taints"
- "k8s.io/kubernetes/pkg/version"
- "k8s.io/kubernetes/pkg/volume/util"
- )
- const (
- maxImageTagsForTest = 20
- )
- // generateTestingImageLists generate randomly generated image list and corresponding expectedImageList.
- func generateTestingImageLists(count int, maxImages int) ([]kubecontainer.Image, []v1.ContainerImage) {
- // imageList is randomly generated image list
- var imageList []kubecontainer.Image
- for ; count > 0; count-- {
- imageItem := kubecontainer.Image{
- ID: string(uuid.NewUUID()),
- RepoTags: generateImageTags(),
- Size: rand.Int63nRange(minImgSize, maxImgSize+1),
- }
- imageList = append(imageList, imageItem)
- }
- expectedImageList := makeExpectedImageList(imageList, maxImages)
- return imageList, expectedImageList
- }
- func makeExpectedImageList(imageList []kubecontainer.Image, maxImages int) []v1.ContainerImage {
- // expectedImageList is generated by imageList according to size and maxImages
- // 1. sort the imageList by size
- sort.Sort(sliceutils.ByImageSize(imageList))
- // 2. convert sorted imageList to v1.ContainerImage list
- var expectedImageList []v1.ContainerImage
- for _, kubeImage := range imageList {
- apiImage := v1.ContainerImage{
- Names: kubeImage.RepoTags[0:nodestatus.MaxNamesPerImageInNodeStatus],
- SizeBytes: kubeImage.Size,
- }
- expectedImageList = append(expectedImageList, apiImage)
- }
- // 3. only returns the top maxImages images in expectedImageList
- if maxImages == -1 { // -1 means no limit
- return expectedImageList
- }
- return expectedImageList[0:maxImages]
- }
- func generateImageTags() []string {
- var tagList []string
- // Generate > MaxNamesPerImageInNodeStatus tags so that the test can verify
- // that kubelet report up to MaxNamesPerImageInNodeStatus tags.
- count := rand.IntnRange(nodestatus.MaxNamesPerImageInNodeStatus+1, maxImageTagsForTest+1)
- for ; count > 0; count-- {
- tagList = append(tagList, "k8s.gcr.io:v"+strconv.Itoa(count))
- }
- return tagList
- }
- func applyNodeStatusPatch(originalNode *v1.Node, patch []byte) (*v1.Node, error) {
- original, err := json.Marshal(originalNode)
- if err != nil {
- return nil, fmt.Errorf("failed to marshal original node %#v: %v", originalNode, err)
- }
- updated, err := strategicpatch.StrategicMergePatch(original, patch, v1.Node{})
- if err != nil {
- return nil, fmt.Errorf("failed to apply strategic merge patch %q on node %#v: %v",
- patch, originalNode, err)
- }
- updatedNode := &v1.Node{}
- if err := json.Unmarshal(updated, updatedNode); err != nil {
- return nil, fmt.Errorf("failed to unmarshal updated node %q: %v", updated, err)
- }
- return updatedNode, nil
- }
- func notImplemented(action core.Action) (bool, runtime.Object, error) {
- return true, nil, fmt.Errorf("no reaction implemented for %s", action)
- }
- func addNotImplatedReaction(kubeClient *fake.Clientset) {
- if kubeClient == nil {
- return
- }
- kubeClient.AddReactor("*", "*", notImplemented)
- }
- type localCM struct {
- cm.ContainerManager
- allocatableReservation v1.ResourceList
- capacity v1.ResourceList
- }
- func (lcm *localCM) GetNodeAllocatableReservation() v1.ResourceList {
- return lcm.allocatableReservation
- }
- func (lcm *localCM) GetCapacity() v1.ResourceList {
- return lcm.capacity
- }
- // sortableNodeAddress is a type for sorting []v1.NodeAddress
- type sortableNodeAddress []v1.NodeAddress
- func (s sortableNodeAddress) Len() int { return len(s) }
- func (s sortableNodeAddress) Less(i, j int) bool {
- return (string(s[i].Type) + s[i].Address) < (string(s[j].Type) + s[j].Address)
- }
- func (s sortableNodeAddress) Swap(i, j int) { s[j], s[i] = s[i], s[j] }
- func TestUpdateNewNodeStatus(t *testing.T) {
- cases := []struct {
- desc string
- nodeStatusMaxImages int32
- }{
- {
- desc: "5 image limit",
- nodeStatusMaxImages: 5,
- },
- {
- desc: "no image limit",
- nodeStatusMaxImages: -1,
- },
- }
- for _, tc := range cases {
- t.Run(tc.desc, func(t *testing.T) {
- // generate one more in inputImageList than we configure the Kubelet to report,
- // or 5 images if unlimited
- numTestImages := int(tc.nodeStatusMaxImages) + 1
- if tc.nodeStatusMaxImages == -1 {
- numTestImages = 5
- }
- inputImageList, expectedImageList := generateTestingImageLists(numTestImages, int(tc.nodeStatusMaxImages))
- testKubelet := newTestKubeletWithImageList(
- t, inputImageList, false /* controllerAttachDetachEnabled */, true /*initFakeVolumePlugin*/)
- defer testKubelet.Cleanup()
- kubelet := testKubelet.kubelet
- kubelet.nodeStatusMaxImages = tc.nodeStatusMaxImages
- kubelet.kubeClient = nil // ensure only the heartbeat client is used
- kubelet.containerManager = &localCM{
- ContainerManager: cm.NewStubContainerManager(),
- allocatableReservation: v1.ResourceList{
- v1.ResourceCPU: *resource.NewMilliQuantity(200, resource.DecimalSI),
- v1.ResourceMemory: *resource.NewQuantity(100E6, resource.BinarySI),
- v1.ResourceEphemeralStorage: *resource.NewQuantity(2000, resource.BinarySI),
- },
- capacity: v1.ResourceList{
- v1.ResourceCPU: *resource.NewMilliQuantity(2000, resource.DecimalSI),
- v1.ResourceMemory: *resource.NewQuantity(10E9, resource.BinarySI),
- v1.ResourceEphemeralStorage: *resource.NewQuantity(5000, resource.BinarySI),
- },
- }
- // Since this test retroactively overrides the stub container manager,
- // we have to regenerate default status setters.
- kubelet.setNodeStatusFuncs = kubelet.defaultNodeStatusFuncs()
- kubeClient := testKubelet.fakeKubeClient
- existingNode := v1.Node{ObjectMeta: metav1.ObjectMeta{Name: testKubeletHostname}}
- kubeClient.ReactionChain = fake.NewSimpleClientset(&v1.NodeList{Items: []v1.Node{existingNode}}).ReactionChain
- machineInfo := &cadvisorapi.MachineInfo{
- MachineID: "123",
- SystemUUID: "abc",
- BootID: "1b3",
- NumCores: 2,
- MemoryCapacity: 10E9, // 10G
- }
- kubelet.machineInfo = machineInfo
- expectedNode := &v1.Node{
- ObjectMeta: metav1.ObjectMeta{Name: testKubeletHostname},
- Spec: v1.NodeSpec{},
- Status: v1.NodeStatus{
- Conditions: []v1.NodeCondition{
- {
- Type: v1.NodeMemoryPressure,
- Status: v1.ConditionFalse,
- Reason: "KubeletHasSufficientMemory",
- Message: fmt.Sprintf("kubelet has sufficient memory available"),
- LastHeartbeatTime: metav1.Time{},
- LastTransitionTime: metav1.Time{},
- },
- {
- Type: v1.NodeDiskPressure,
- Status: v1.ConditionFalse,
- Reason: "KubeletHasNoDiskPressure",
- Message: fmt.Sprintf("kubelet has no disk pressure"),
- LastHeartbeatTime: metav1.Time{},
- LastTransitionTime: metav1.Time{},
- },
- {
- Type: v1.NodePIDPressure,
- Status: v1.ConditionFalse,
- Reason: "KubeletHasSufficientPID",
- Message: fmt.Sprintf("kubelet has sufficient PID available"),
- LastHeartbeatTime: metav1.Time{},
- LastTransitionTime: metav1.Time{},
- },
- {
- Type: v1.NodeReady,
- Status: v1.ConditionTrue,
- Reason: "KubeletReady",
- Message: fmt.Sprintf("kubelet is posting ready status"),
- LastHeartbeatTime: metav1.Time{},
- LastTransitionTime: metav1.Time{},
- },
- },
- NodeInfo: v1.NodeSystemInfo{
- MachineID: "123",
- SystemUUID: "abc",
- BootID: "1b3",
- KernelVersion: cadvisortest.FakeKernelVersion,
- OSImage: cadvisortest.FakeContainerOsVersion,
- OperatingSystem: goruntime.GOOS,
- Architecture: goruntime.GOARCH,
- ContainerRuntimeVersion: "test://1.5.0",
- KubeletVersion: version.Get().String(),
- KubeProxyVersion: version.Get().String(),
- },
- Capacity: v1.ResourceList{
- v1.ResourceCPU: *resource.NewMilliQuantity(2000, resource.DecimalSI),
- v1.ResourceMemory: *resource.NewQuantity(10E9, resource.BinarySI),
- v1.ResourcePods: *resource.NewQuantity(0, resource.DecimalSI),
- v1.ResourceEphemeralStorage: *resource.NewQuantity(5000, resource.BinarySI),
- },
- Allocatable: v1.ResourceList{
- v1.ResourceCPU: *resource.NewMilliQuantity(1800, resource.DecimalSI),
- v1.ResourceMemory: *resource.NewQuantity(9900E6, resource.BinarySI),
- v1.ResourcePods: *resource.NewQuantity(0, resource.DecimalSI),
- v1.ResourceEphemeralStorage: *resource.NewQuantity(3000, resource.BinarySI),
- },
- Addresses: []v1.NodeAddress{
- {Type: v1.NodeInternalIP, Address: "127.0.0.1"},
- {Type: v1.NodeHostName, Address: testKubeletHostname},
- },
- Images: expectedImageList,
- },
- }
- kubelet.updateRuntimeUp()
- assert.NoError(t, kubelet.updateNodeStatus())
- actions := kubeClient.Actions()
- require.Len(t, actions, 2)
- require.True(t, actions[1].Matches("patch", "nodes"))
- require.Equal(t, actions[1].GetSubresource(), "status")
- updatedNode, err := applyNodeStatusPatch(&existingNode, actions[1].(core.PatchActionImpl).GetPatch())
- assert.NoError(t, err)
- for i, cond := range updatedNode.Status.Conditions {
- assert.False(t, cond.LastHeartbeatTime.IsZero(), "LastHeartbeatTime for %v condition is zero", cond.Type)
- assert.False(t, cond.LastTransitionTime.IsZero(), "LastTransitionTime for %v condition is zero", cond.Type)
- updatedNode.Status.Conditions[i].LastHeartbeatTime = metav1.Time{}
- updatedNode.Status.Conditions[i].LastTransitionTime = metav1.Time{}
- }
- // Version skew workaround. See: https://github.com/kubernetes/kubernetes/issues/16961
- assert.Equal(t, v1.NodeReady, updatedNode.Status.Conditions[len(updatedNode.Status.Conditions)-1].Type,
- "NotReady should be last")
- assert.Len(t, updatedNode.Status.Images, len(expectedImageList))
- assert.True(t, apiequality.Semantic.DeepEqual(expectedNode, updatedNode), "%s", diff.ObjectDiff(expectedNode, updatedNode))
- })
- }
- }
- func TestUpdateExistingNodeStatus(t *testing.T) {
- testKubelet := newTestKubelet(t, false /* controllerAttachDetachEnabled */)
- defer testKubelet.Cleanup()
- kubelet := testKubelet.kubelet
- kubelet.nodeStatusMaxImages = 5 // don't truncate the image list that gets constructed by hand for this test
- kubelet.kubeClient = nil // ensure only the heartbeat client is used
- kubelet.containerManager = &localCM{
- ContainerManager: cm.NewStubContainerManager(),
- allocatableReservation: v1.ResourceList{
- v1.ResourceCPU: *resource.NewMilliQuantity(200, resource.DecimalSI),
- v1.ResourceMemory: *resource.NewQuantity(100E6, resource.BinarySI),
- },
- capacity: v1.ResourceList{
- v1.ResourceCPU: *resource.NewMilliQuantity(2000, resource.DecimalSI),
- v1.ResourceMemory: *resource.NewQuantity(20E9, resource.BinarySI),
- v1.ResourceEphemeralStorage: *resource.NewQuantity(5000, resource.BinarySI),
- },
- }
- // Since this test retroactively overrides the stub container manager,
- // we have to regenerate default status setters.
- kubelet.setNodeStatusFuncs = kubelet.defaultNodeStatusFuncs()
- kubeClient := testKubelet.fakeKubeClient
- existingNode := v1.Node{
- ObjectMeta: metav1.ObjectMeta{Name: testKubeletHostname},
- Spec: v1.NodeSpec{},
- Status: v1.NodeStatus{
- Conditions: []v1.NodeCondition{
- {
- Type: v1.NodeMemoryPressure,
- Status: v1.ConditionFalse,
- Reason: "KubeletHasSufficientMemory",
- Message: fmt.Sprintf("kubelet has sufficient memory available"),
- LastHeartbeatTime: metav1.Date(2012, 1, 1, 0, 0, 0, 0, time.UTC),
- LastTransitionTime: metav1.Date(2012, 1, 1, 0, 0, 0, 0, time.UTC),
- },
- {
- Type: v1.NodeDiskPressure,
- Status: v1.ConditionFalse,
- Reason: "KubeletHasSufficientDisk",
- Message: fmt.Sprintf("kubelet has sufficient disk space available"),
- LastHeartbeatTime: metav1.Date(2012, 1, 1, 0, 0, 0, 0, time.UTC),
- LastTransitionTime: metav1.Date(2012, 1, 1, 0, 0, 0, 0, time.UTC),
- },
- {
- Type: v1.NodePIDPressure,
- Status: v1.ConditionFalse,
- Reason: "KubeletHasSufficientPID",
- Message: fmt.Sprintf("kubelet has sufficient PID available"),
- LastHeartbeatTime: metav1.Date(2012, 1, 1, 0, 0, 0, 0, time.UTC),
- LastTransitionTime: metav1.Date(2012, 1, 1, 0, 0, 0, 0, time.UTC),
- },
- {
- Type: v1.NodeReady,
- Status: v1.ConditionTrue,
- Reason: "KubeletReady",
- Message: fmt.Sprintf("kubelet is posting ready status"),
- LastHeartbeatTime: metav1.Date(2012, 1, 1, 0, 0, 0, 0, time.UTC),
- LastTransitionTime: metav1.Date(2012, 1, 1, 0, 0, 0, 0, time.UTC),
- },
- },
- Capacity: v1.ResourceList{
- v1.ResourceCPU: *resource.NewMilliQuantity(3000, resource.DecimalSI),
- v1.ResourceMemory: *resource.NewQuantity(20E9, resource.BinarySI),
- v1.ResourcePods: *resource.NewQuantity(0, resource.DecimalSI),
- },
- Allocatable: v1.ResourceList{
- v1.ResourceCPU: *resource.NewMilliQuantity(2800, resource.DecimalSI),
- v1.ResourceMemory: *resource.NewQuantity(19900E6, resource.BinarySI),
- v1.ResourcePods: *resource.NewQuantity(0, resource.DecimalSI),
- },
- },
- }
- kubeClient.ReactionChain = fake.NewSimpleClientset(&v1.NodeList{Items: []v1.Node{existingNode}}).ReactionChain
- machineInfo := &cadvisorapi.MachineInfo{
- MachineID: "123",
- SystemUUID: "abc",
- BootID: "1b3",
- NumCores: 2,
- MemoryCapacity: 20E9,
- }
- kubelet.machineInfo = machineInfo
- expectedNode := &v1.Node{
- ObjectMeta: metav1.ObjectMeta{Name: testKubeletHostname},
- Spec: v1.NodeSpec{},
- Status: v1.NodeStatus{
- Conditions: []v1.NodeCondition{
- {
- Type: v1.NodeMemoryPressure,
- Status: v1.ConditionFalse,
- Reason: "KubeletHasSufficientMemory",
- Message: fmt.Sprintf("kubelet has sufficient memory available"),
- LastHeartbeatTime: metav1.Time{},
- LastTransitionTime: metav1.Time{},
- },
- {
- Type: v1.NodeDiskPressure,
- Status: v1.ConditionFalse,
- Reason: "KubeletHasSufficientDisk",
- Message: fmt.Sprintf("kubelet has sufficient disk space available"),
- LastHeartbeatTime: metav1.Time{},
- LastTransitionTime: metav1.Time{},
- },
- {
- Type: v1.NodePIDPressure,
- Status: v1.ConditionFalse,
- Reason: "KubeletHasSufficientPID",
- Message: fmt.Sprintf("kubelet has sufficient PID available"),
- LastHeartbeatTime: metav1.Time{},
- LastTransitionTime: metav1.Time{},
- },
- {
- Type: v1.NodeReady,
- Status: v1.ConditionTrue,
- Reason: "KubeletReady",
- Message: fmt.Sprintf("kubelet is posting ready status"),
- LastHeartbeatTime: metav1.Time{}, // placeholder
- LastTransitionTime: metav1.Time{}, // placeholder
- },
- },
- NodeInfo: v1.NodeSystemInfo{
- MachineID: "123",
- SystemUUID: "abc",
- BootID: "1b3",
- KernelVersion: cadvisortest.FakeKernelVersion,
- OSImage: cadvisortest.FakeContainerOsVersion,
- OperatingSystem: goruntime.GOOS,
- Architecture: goruntime.GOARCH,
- ContainerRuntimeVersion: "test://1.5.0",
- KubeletVersion: version.Get().String(),
- KubeProxyVersion: version.Get().String(),
- },
- Capacity: v1.ResourceList{
- v1.ResourceCPU: *resource.NewMilliQuantity(2000, resource.DecimalSI),
- v1.ResourceMemory: *resource.NewQuantity(20E9, resource.BinarySI),
- v1.ResourcePods: *resource.NewQuantity(0, resource.DecimalSI),
- v1.ResourceEphemeralStorage: *resource.NewQuantity(5000, resource.BinarySI),
- },
- Allocatable: v1.ResourceList{
- v1.ResourceCPU: *resource.NewMilliQuantity(1800, resource.DecimalSI),
- v1.ResourceMemory: *resource.NewQuantity(19900E6, resource.BinarySI),
- v1.ResourcePods: *resource.NewQuantity(0, resource.DecimalSI),
- v1.ResourceEphemeralStorage: *resource.NewQuantity(5000, resource.BinarySI),
- },
- Addresses: []v1.NodeAddress{
- {Type: v1.NodeInternalIP, Address: "127.0.0.1"},
- {Type: v1.NodeHostName, Address: testKubeletHostname},
- },
- // images will be sorted from max to min in node status.
- Images: []v1.ContainerImage{
- {
- Names: []string{"k8s.gcr.io:v1", "k8s.gcr.io:v2"},
- SizeBytes: 123,
- },
- {
- Names: []string{"k8s.gcr.io:v3", "k8s.gcr.io:v4"},
- SizeBytes: 456,
- },
- },
- },
- }
- kubelet.updateRuntimeUp()
- assert.NoError(t, kubelet.updateNodeStatus())
- actions := kubeClient.Actions()
- assert.Len(t, actions, 2)
- assert.IsType(t, core.PatchActionImpl{}, actions[1])
- patchAction := actions[1].(core.PatchActionImpl)
- updatedNode, err := applyNodeStatusPatch(&existingNode, patchAction.GetPatch())
- require.NoError(t, err)
- for i, cond := range updatedNode.Status.Conditions {
- old := metav1.Date(2012, 1, 1, 0, 0, 0, 0, time.UTC).Time
- // Expect LastHearbeat to be updated to Now, while LastTransitionTime to be the same.
- assert.NotEqual(t, old, cond.LastHeartbeatTime.Rfc3339Copy().UTC(), "LastHeartbeatTime for condition %v", cond.Type)
- assert.EqualValues(t, old, cond.LastTransitionTime.Rfc3339Copy().UTC(), "LastTransitionTime for condition %v", cond.Type)
- updatedNode.Status.Conditions[i].LastHeartbeatTime = metav1.Time{}
- updatedNode.Status.Conditions[i].LastTransitionTime = metav1.Time{}
- }
- // Version skew workaround. See: https://github.com/kubernetes/kubernetes/issues/16961
- assert.Equal(t, v1.NodeReady, updatedNode.Status.Conditions[len(updatedNode.Status.Conditions)-1].Type,
- "NodeReady should be the last condition")
- assert.True(t, apiequality.Semantic.DeepEqual(expectedNode, updatedNode), "%s", diff.ObjectDiff(expectedNode, updatedNode))
- }
- func TestUpdateExistingNodeStatusTimeout(t *testing.T) {
- attempts := int64(0)
- failureCallbacks := int64(0)
- // set up a listener that hangs connections
- ln, err := net.Listen("tcp", "127.0.0.1:0")
- assert.NoError(t, err)
- defer ln.Close()
- go func() {
- // accept connections and just let them hang
- for {
- _, err := ln.Accept()
- if err != nil {
- t.Log(err)
- return
- }
- t.Log("accepted connection")
- atomic.AddInt64(&attempts, 1)
- }
- }()
- config := &rest.Config{
- Host: "http://" + ln.Addr().String(),
- QPS: -1,
- Timeout: time.Second,
- }
- assert.NoError(t, err)
- testKubelet := newTestKubelet(t, false /* controllerAttachDetachEnabled */)
- defer testKubelet.Cleanup()
- kubelet := testKubelet.kubelet
- kubelet.kubeClient = nil // ensure only the heartbeat client is used
- kubelet.heartbeatClient, err = clientset.NewForConfig(config)
- kubelet.onRepeatedHeartbeatFailure = func() {
- atomic.AddInt64(&failureCallbacks, 1)
- }
- kubelet.containerManager = &localCM{
- ContainerManager: cm.NewStubContainerManager(),
- allocatableReservation: v1.ResourceList{
- v1.ResourceCPU: *resource.NewMilliQuantity(200, resource.DecimalSI),
- v1.ResourceMemory: *resource.NewQuantity(100E6, resource.BinarySI),
- },
- capacity: v1.ResourceList{
- v1.ResourceCPU: *resource.NewMilliQuantity(2000, resource.DecimalSI),
- v1.ResourceMemory: *resource.NewQuantity(20E9, resource.BinarySI),
- },
- }
- // should return an error, but not hang
- assert.Error(t, kubelet.updateNodeStatus())
- // should have attempted multiple times
- if actualAttempts := atomic.LoadInt64(&attempts); actualAttempts < nodeStatusUpdateRetry {
- t.Errorf("Expected at least %d attempts, got %d", nodeStatusUpdateRetry, actualAttempts)
- }
- // should have gotten multiple failure callbacks
- if actualFailureCallbacks := atomic.LoadInt64(&failureCallbacks); actualFailureCallbacks < (nodeStatusUpdateRetry - 1) {
- t.Errorf("Expected %d failure callbacks, got %d", (nodeStatusUpdateRetry - 1), actualFailureCallbacks)
- }
- }
- func TestUpdateNodeStatusWithRuntimeStateError(t *testing.T) {
- testKubelet := newTestKubelet(t, false /* controllerAttachDetachEnabled */)
- defer testKubelet.Cleanup()
- kubelet := testKubelet.kubelet
- kubelet.nodeStatusMaxImages = 5 // don't truncate the image list that gets constructed by hand for this test
- kubelet.kubeClient = nil // ensure only the heartbeat client is used
- kubelet.containerManager = &localCM{
- ContainerManager: cm.NewStubContainerManager(),
- allocatableReservation: v1.ResourceList{
- v1.ResourceCPU: *resource.NewMilliQuantity(200, resource.DecimalSI),
- v1.ResourceMemory: *resource.NewQuantity(100E6, resource.BinarySI),
- v1.ResourceEphemeralStorage: *resource.NewQuantity(10E9, resource.BinarySI),
- },
- capacity: v1.ResourceList{
- v1.ResourceCPU: *resource.NewMilliQuantity(2000, resource.DecimalSI),
- v1.ResourceMemory: *resource.NewQuantity(10E9, resource.BinarySI),
- v1.ResourceEphemeralStorage: *resource.NewQuantity(20E9, resource.BinarySI),
- },
- }
- // Since this test retroactively overrides the stub container manager,
- // we have to regenerate default status setters.
- kubelet.setNodeStatusFuncs = kubelet.defaultNodeStatusFuncs()
- clock := testKubelet.fakeClock
- kubeClient := testKubelet.fakeKubeClient
- existingNode := v1.Node{ObjectMeta: metav1.ObjectMeta{Name: testKubeletHostname}}
- kubeClient.ReactionChain = fake.NewSimpleClientset(&v1.NodeList{Items: []v1.Node{existingNode}}).ReactionChain
- machineInfo := &cadvisorapi.MachineInfo{
- MachineID: "123",
- SystemUUID: "abc",
- BootID: "1b3",
- NumCores: 2,
- MemoryCapacity: 10E9,
- }
- kubelet.machineInfo = machineInfo
- expectedNode := &v1.Node{
- ObjectMeta: metav1.ObjectMeta{Name: testKubeletHostname},
- Spec: v1.NodeSpec{},
- Status: v1.NodeStatus{
- Conditions: []v1.NodeCondition{
- {
- Type: v1.NodeMemoryPressure,
- Status: v1.ConditionFalse,
- Reason: "KubeletHasSufficientMemory",
- Message: fmt.Sprintf("kubelet has sufficient memory available"),
- LastHeartbeatTime: metav1.Time{},
- LastTransitionTime: metav1.Time{},
- },
- {
- Type: v1.NodeDiskPressure,
- Status: v1.ConditionFalse,
- Reason: "KubeletHasNoDiskPressure",
- Message: fmt.Sprintf("kubelet has no disk pressure"),
- LastHeartbeatTime: metav1.Time{},
- LastTransitionTime: metav1.Time{},
- },
- {
- Type: v1.NodePIDPressure,
- Status: v1.ConditionFalse,
- Reason: "KubeletHasSufficientPID",
- Message: fmt.Sprintf("kubelet has sufficient PID available"),
- LastHeartbeatTime: metav1.Time{},
- LastTransitionTime: metav1.Time{},
- },
- {}, //placeholder
- },
- NodeInfo: v1.NodeSystemInfo{
- MachineID: "123",
- SystemUUID: "abc",
- BootID: "1b3",
- KernelVersion: cadvisortest.FakeKernelVersion,
- OSImage: cadvisortest.FakeContainerOsVersion,
- OperatingSystem: goruntime.GOOS,
- Architecture: goruntime.GOARCH,
- ContainerRuntimeVersion: "test://1.5.0",
- KubeletVersion: version.Get().String(),
- KubeProxyVersion: version.Get().String(),
- },
- Capacity: v1.ResourceList{
- v1.ResourceCPU: *resource.NewMilliQuantity(2000, resource.DecimalSI),
- v1.ResourceMemory: *resource.NewQuantity(10E9, resource.BinarySI),
- v1.ResourcePods: *resource.NewQuantity(0, resource.DecimalSI),
- v1.ResourceEphemeralStorage: *resource.NewQuantity(20E9, resource.BinarySI),
- },
- Allocatable: v1.ResourceList{
- v1.ResourceCPU: *resource.NewMilliQuantity(1800, resource.DecimalSI),
- v1.ResourceMemory: *resource.NewQuantity(9900E6, resource.BinarySI),
- v1.ResourcePods: *resource.NewQuantity(0, resource.DecimalSI),
- v1.ResourceEphemeralStorage: *resource.NewQuantity(10E9, resource.BinarySI),
- },
- Addresses: []v1.NodeAddress{
- {Type: v1.NodeInternalIP, Address: "127.0.0.1"},
- {Type: v1.NodeHostName, Address: testKubeletHostname},
- },
- Images: []v1.ContainerImage{
- {
- Names: []string{"k8s.gcr.io:v1", "k8s.gcr.io:v2"},
- SizeBytes: 123,
- },
- {
- Names: []string{"k8s.gcr.io:v3", "k8s.gcr.io:v4"},
- SizeBytes: 456,
- },
- },
- },
- }
- checkNodeStatus := func(status v1.ConditionStatus, reason string) {
- kubeClient.ClearActions()
- assert.NoError(t, kubelet.updateNodeStatus())
- actions := kubeClient.Actions()
- require.Len(t, actions, 2)
- require.True(t, actions[1].Matches("patch", "nodes"))
- require.Equal(t, actions[1].GetSubresource(), "status")
- updatedNode, err := kubeClient.CoreV1().Nodes().Get(testKubeletHostname, metav1.GetOptions{})
- require.NoError(t, err, "can't apply node status patch")
- for i, cond := range updatedNode.Status.Conditions {
- assert.False(t, cond.LastHeartbeatTime.IsZero(), "LastHeartbeatTime for %v condition is zero", cond.Type)
- assert.False(t, cond.LastTransitionTime.IsZero(), "LastTransitionTime for %v condition is zero", cond.Type)
- updatedNode.Status.Conditions[i].LastHeartbeatTime = metav1.Time{}
- updatedNode.Status.Conditions[i].LastTransitionTime = metav1.Time{}
- }
- // Version skew workaround. See: https://github.com/kubernetes/kubernetes/issues/16961
- lastIndex := len(updatedNode.Status.Conditions) - 1
- assert.Equal(t, v1.NodeReady, updatedNode.Status.Conditions[lastIndex].Type, "NodeReady should be the last condition")
- assert.NotEmpty(t, updatedNode.Status.Conditions[lastIndex].Message)
- updatedNode.Status.Conditions[lastIndex].Message = ""
- expectedNode.Status.Conditions[lastIndex] = v1.NodeCondition{
- Type: v1.NodeReady,
- Status: status,
- Reason: reason,
- LastHeartbeatTime: metav1.Time{},
- LastTransitionTime: metav1.Time{},
- }
- assert.True(t, apiequality.Semantic.DeepEqual(expectedNode, updatedNode), "%s", diff.ObjectDiff(expectedNode, updatedNode))
- }
- // TODO(random-liu): Refactor the unit test to be table driven test.
- // Should report kubelet not ready if the runtime check is out of date
- clock.SetTime(time.Now().Add(-maxWaitForContainerRuntime))
- kubelet.updateRuntimeUp()
- checkNodeStatus(v1.ConditionFalse, "KubeletNotReady")
- // Should report kubelet ready if the runtime check is updated
- clock.SetTime(time.Now())
- kubelet.updateRuntimeUp()
- checkNodeStatus(v1.ConditionTrue, "KubeletReady")
- // Should report kubelet not ready if the runtime check is out of date
- clock.SetTime(time.Now().Add(-maxWaitForContainerRuntime))
- kubelet.updateRuntimeUp()
- checkNodeStatus(v1.ConditionFalse, "KubeletNotReady")
- // Should report kubelet not ready if the runtime check failed
- fakeRuntime := testKubelet.fakeRuntime
- // Inject error into fake runtime status check, node should be NotReady
- fakeRuntime.StatusErr = fmt.Errorf("injected runtime status error")
- clock.SetTime(time.Now())
- kubelet.updateRuntimeUp()
- checkNodeStatus(v1.ConditionFalse, "KubeletNotReady")
- fakeRuntime.StatusErr = nil
- // Should report node not ready if runtime status is nil.
- fakeRuntime.RuntimeStatus = nil
- kubelet.updateRuntimeUp()
- checkNodeStatus(v1.ConditionFalse, "KubeletNotReady")
- // Should report node not ready if runtime status is empty.
- fakeRuntime.RuntimeStatus = &kubecontainer.RuntimeStatus{}
- kubelet.updateRuntimeUp()
- checkNodeStatus(v1.ConditionFalse, "KubeletNotReady")
- // Should report node not ready if RuntimeReady is false.
- fakeRuntime.RuntimeStatus = &kubecontainer.RuntimeStatus{
- Conditions: []kubecontainer.RuntimeCondition{
- {Type: kubecontainer.RuntimeReady, Status: false},
- {Type: kubecontainer.NetworkReady, Status: true},
- },
- }
- kubelet.updateRuntimeUp()
- checkNodeStatus(v1.ConditionFalse, "KubeletNotReady")
- // Should report node ready if RuntimeReady is true.
- fakeRuntime.RuntimeStatus = &kubecontainer.RuntimeStatus{
- Conditions: []kubecontainer.RuntimeCondition{
- {Type: kubecontainer.RuntimeReady, Status: true},
- {Type: kubecontainer.NetworkReady, Status: true},
- },
- }
- kubelet.updateRuntimeUp()
- checkNodeStatus(v1.ConditionTrue, "KubeletReady")
- // Should report node not ready if NetworkReady is false.
- fakeRuntime.RuntimeStatus = &kubecontainer.RuntimeStatus{
- Conditions: []kubecontainer.RuntimeCondition{
- {Type: kubecontainer.RuntimeReady, Status: true},
- {Type: kubecontainer.NetworkReady, Status: false},
- },
- }
- kubelet.updateRuntimeUp()
- checkNodeStatus(v1.ConditionFalse, "KubeletNotReady")
- }
- func TestUpdateNodeStatusError(t *testing.T) {
- testKubelet := newTestKubelet(t, false /* controllerAttachDetachEnabled */)
- defer testKubelet.Cleanup()
- kubelet := testKubelet.kubelet
- kubelet.kubeClient = nil // ensure only the heartbeat client is used
- // No matching node for the kubelet
- testKubelet.fakeKubeClient.ReactionChain = fake.NewSimpleClientset(&v1.NodeList{Items: []v1.Node{}}).ReactionChain
- assert.Error(t, kubelet.updateNodeStatus())
- assert.Len(t, testKubelet.fakeKubeClient.Actions(), nodeStatusUpdateRetry)
- }
- func TestUpdateNodeStatusWithLease(t *testing.T) {
- defer featuregatetesting.SetFeatureGateDuringTest(t, utilfeature.DefaultFeatureGate, features.NodeLease, true)()
- testKubelet := newTestKubelet(t, false /* controllerAttachDetachEnabled */)
- defer testKubelet.Cleanup()
- clock := testKubelet.fakeClock
- kubelet := testKubelet.kubelet
- kubelet.nodeStatusMaxImages = 5 // don't truncate the image list that gets constructed by hand for this test
- kubelet.kubeClient = nil // ensure only the heartbeat client is used
- kubelet.containerManager = &localCM{
- ContainerManager: cm.NewStubContainerManager(),
- allocatableReservation: v1.ResourceList{
- v1.ResourceCPU: *resource.NewMilliQuantity(200, resource.DecimalSI),
- v1.ResourceMemory: *resource.NewQuantity(100E6, resource.BinarySI),
- },
- capacity: v1.ResourceList{
- v1.ResourceCPU: *resource.NewMilliQuantity(2000, resource.DecimalSI),
- v1.ResourceMemory: *resource.NewQuantity(20E9, resource.BinarySI),
- v1.ResourceEphemeralStorage: *resource.NewQuantity(5000, resource.BinarySI),
- },
- }
- // Since this test retroactively overrides the stub container manager,
- // we have to regenerate default status setters.
- kubelet.setNodeStatusFuncs = kubelet.defaultNodeStatusFuncs()
- kubelet.nodeStatusReportFrequency = time.Minute
- kubeClient := testKubelet.fakeKubeClient
- existingNode := &v1.Node{ObjectMeta: metav1.ObjectMeta{Name: testKubeletHostname}}
- kubeClient.ReactionChain = fake.NewSimpleClientset(&v1.NodeList{Items: []v1.Node{*existingNode}}).ReactionChain
- machineInfo := &cadvisorapi.MachineInfo{
- MachineID: "123",
- SystemUUID: "abc",
- BootID: "1b3",
- NumCores: 2,
- MemoryCapacity: 20E9,
- }
- kubelet.machineInfo = machineInfo
- now := metav1.NewTime(clock.Now()).Rfc3339Copy()
- expectedNode := &v1.Node{
- ObjectMeta: metav1.ObjectMeta{Name: testKubeletHostname},
- Spec: v1.NodeSpec{},
- Status: v1.NodeStatus{
- Conditions: []v1.NodeCondition{
- {
- Type: v1.NodeMemoryPressure,
- Status: v1.ConditionFalse,
- Reason: "KubeletHasSufficientMemory",
- Message: fmt.Sprintf("kubelet has sufficient memory available"),
- LastHeartbeatTime: now,
- LastTransitionTime: now,
- },
- {
- Type: v1.NodeDiskPressure,
- Status: v1.ConditionFalse,
- Reason: "KubeletHasNoDiskPressure",
- Message: fmt.Sprintf("kubelet has no disk pressure"),
- LastHeartbeatTime: now,
- LastTransitionTime: now,
- },
- {
- Type: v1.NodePIDPressure,
- Status: v1.ConditionFalse,
- Reason: "KubeletHasSufficientPID",
- Message: fmt.Sprintf("kubelet has sufficient PID available"),
- LastHeartbeatTime: now,
- LastTransitionTime: now,
- },
- {
- Type: v1.NodeReady,
- Status: v1.ConditionTrue,
- Reason: "KubeletReady",
- Message: fmt.Sprintf("kubelet is posting ready status"),
- LastHeartbeatTime: now,
- LastTransitionTime: now,
- },
- },
- NodeInfo: v1.NodeSystemInfo{
- MachineID: "123",
- SystemUUID: "abc",
- BootID: "1b3",
- KernelVersion: cadvisortest.FakeKernelVersion,
- OSImage: cadvisortest.FakeContainerOsVersion,
- OperatingSystem: goruntime.GOOS,
- Architecture: goruntime.GOARCH,
- ContainerRuntimeVersion: "test://1.5.0",
- KubeletVersion: version.Get().String(),
- KubeProxyVersion: version.Get().String(),
- },
- Capacity: v1.ResourceList{
- v1.ResourceCPU: *resource.NewMilliQuantity(2000, resource.DecimalSI),
- v1.ResourceMemory: *resource.NewQuantity(20E9, resource.BinarySI),
- v1.ResourcePods: *resource.NewQuantity(0, resource.DecimalSI),
- v1.ResourceEphemeralStorage: *resource.NewQuantity(5000, resource.BinarySI),
- },
- Allocatable: v1.ResourceList{
- v1.ResourceCPU: *resource.NewMilliQuantity(1800, resource.DecimalSI),
- v1.ResourceMemory: *resource.NewQuantity(19900E6, resource.BinarySI),
- v1.ResourcePods: *resource.NewQuantity(0, resource.DecimalSI),
- v1.ResourceEphemeralStorage: *resource.NewQuantity(5000, resource.BinarySI),
- },
- Addresses: []v1.NodeAddress{
- {Type: v1.NodeInternalIP, Address: "127.0.0.1"},
- {Type: v1.NodeHostName, Address: testKubeletHostname},
- },
- // images will be sorted from max to min in node status.
- Images: []v1.ContainerImage{
- {
- Names: []string{"k8s.gcr.io:v1", "k8s.gcr.io:v2"},
- SizeBytes: 123,
- },
- {
- Names: []string{"k8s.gcr.io:v3", "k8s.gcr.io:v4"},
- SizeBytes: 456,
- },
- },
- },
- }
- // Update node status when node status is created.
- // Report node status.
- kubelet.updateRuntimeUp()
- assert.NoError(t, kubelet.updateNodeStatus())
- actions := kubeClient.Actions()
- assert.Len(t, actions, 2)
- assert.IsType(t, core.GetActionImpl{}, actions[0])
- assert.IsType(t, core.PatchActionImpl{}, actions[1])
- patchAction := actions[1].(core.PatchActionImpl)
- updatedNode, err := applyNodeStatusPatch(existingNode, patchAction.GetPatch())
- require.NoError(t, err)
- for _, cond := range updatedNode.Status.Conditions {
- cond.LastHeartbeatTime = cond.LastHeartbeatTime.Rfc3339Copy()
- cond.LastTransitionTime = cond.LastTransitionTime.Rfc3339Copy()
- }
- assert.True(t, apiequality.Semantic.DeepEqual(expectedNode, updatedNode), "%s", diff.ObjectDiff(expectedNode, updatedNode))
- // Version skew workaround. See: https://github.com/kubernetes/kubernetes/issues/16961
- assert.Equal(t, v1.NodeReady, updatedNode.Status.Conditions[len(updatedNode.Status.Conditions)-1].Type,
- "NodeReady should be the last condition")
- // Update node status again when nothing is changed (except heartbeat time).
- // Report node status if it has exceeded the duration of nodeStatusReportFrequency.
- clock.Step(time.Minute)
- assert.NoError(t, kubelet.updateNodeStatus())
- // 2 more action (There were 2 actions before).
- actions = kubeClient.Actions()
- assert.Len(t, actions, 4)
- assert.IsType(t, core.GetActionImpl{}, actions[2])
- assert.IsType(t, core.PatchActionImpl{}, actions[3])
- patchAction = actions[3].(core.PatchActionImpl)
- updatedNode, err = applyNodeStatusPatch(updatedNode, patchAction.GetPatch())
- require.NoError(t, err)
- for _, cond := range updatedNode.Status.Conditions {
- cond.LastHeartbeatTime = cond.LastHeartbeatTime.Rfc3339Copy()
- cond.LastTransitionTime = cond.LastTransitionTime.Rfc3339Copy()
- }
- // Expect LastHearbeat updated, other things unchanged.
- for i, cond := range expectedNode.Status.Conditions {
- expectedNode.Status.Conditions[i].LastHeartbeatTime = metav1.NewTime(cond.LastHeartbeatTime.Time.Add(time.Minute)).Rfc3339Copy()
- }
- assert.True(t, apiequality.Semantic.DeepEqual(expectedNode, updatedNode), "%s", diff.ObjectDiff(expectedNode, updatedNode))
- // Update node status again when nothing is changed (except heartbeat time).
- // Do not report node status if it is within the duration of nodeStatusReportFrequency.
- clock.Step(10 * time.Second)
- assert.NoError(t, kubelet.updateNodeStatus())
- // Only 1 more action (There were 4 actions before).
- actions = kubeClient.Actions()
- assert.Len(t, actions, 5)
- assert.IsType(t, core.GetActionImpl{}, actions[4])
- // Update node status again when something is changed.
- // Report node status even if it is still within the duration of nodeStatusReportFrequency.
- clock.Step(10 * time.Second)
- var newMemoryCapacity int64 = 40E9
- kubelet.machineInfo.MemoryCapacity = uint64(newMemoryCapacity)
- assert.NoError(t, kubelet.updateNodeStatus())
- // 2 more action (There were 5 actions before).
- actions = kubeClient.Actions()
- assert.Len(t, actions, 7)
- assert.IsType(t, core.GetActionImpl{}, actions[5])
- assert.IsType(t, core.PatchActionImpl{}, actions[6])
- patchAction = actions[6].(core.PatchActionImpl)
- updatedNode, err = applyNodeStatusPatch(updatedNode, patchAction.GetPatch())
- require.NoError(t, err)
- memCapacity, _ := updatedNode.Status.Capacity[v1.ResourceMemory]
- updatedMemoryCapacity, _ := (&memCapacity).AsInt64()
- assert.Equal(t, newMemoryCapacity, updatedMemoryCapacity, "Memory capacity")
- now = metav1.NewTime(clock.Now()).Rfc3339Copy()
- for _, cond := range updatedNode.Status.Conditions {
- // Expect LastHearbeat updated, while LastTransitionTime unchanged.
- assert.Equal(t, now, cond.LastHeartbeatTime.Rfc3339Copy(),
- "LastHeartbeatTime for condition %v", cond.Type)
- assert.Equal(t, now, metav1.NewTime(cond.LastTransitionTime.Time.Add(time.Minute+20*time.Second)).Rfc3339Copy(),
- "LastTransitionTime for condition %v", cond.Type)
- }
- // Update node status when changing pod CIDR.
- // Report node status if it is still within the duration of nodeStatusReportFrequency.
- clock.Step(10 * time.Second)
- assert.Equal(t, "", kubelet.runtimeState.podCIDR(), "Pod CIDR should be empty")
- podCIDR := "10.0.0.0/24"
- updatedNode.Spec.PodCIDR = podCIDR
- kubeClient.ReactionChain = fake.NewSimpleClientset(&v1.NodeList{Items: []v1.Node{*updatedNode}}).ReactionChain
- assert.NoError(t, kubelet.updateNodeStatus())
- assert.Equal(t, podCIDR, kubelet.runtimeState.podCIDR(), "Pod CIDR should be updated now")
- // 2 more action (There were 7 actions before).
- actions = kubeClient.Actions()
- assert.Len(t, actions, 9)
- assert.IsType(t, core.GetActionImpl{}, actions[7])
- assert.IsType(t, core.PatchActionImpl{}, actions[8])
- patchAction = actions[8].(core.PatchActionImpl)
- // Update node status when keeping the pod CIDR.
- // Do not report node status if it is within the duration of nodeStatusReportFrequency.
- clock.Step(10 * time.Second)
- assert.Equal(t, podCIDR, kubelet.runtimeState.podCIDR(), "Pod CIDR should already be updated")
- assert.NoError(t, kubelet.updateNodeStatus())
- // Only 1 more action (There were 9 actions before).
- actions = kubeClient.Actions()
- assert.Len(t, actions, 10)
- assert.IsType(t, core.GetActionImpl{}, actions[9])
- }
- func TestUpdateNodeStatusAndVolumesInUseWithoutNodeLease(t *testing.T) {
- defer featuregatetesting.SetFeatureGateDuringTest(t, utilfeature.DefaultFeatureGate, features.NodeLease, false)()
- cases := []struct {
- desc string
- existingVolumes []v1.UniqueVolumeName // volumes to initially populate volumeManager
- existingNode *v1.Node // existing node object
- expectedNode *v1.Node // new node object after patch
- expectedReportedInUse []v1.UniqueVolumeName // expected volumes reported in use in volumeManager
- }{
- {
- desc: "no volumes and no update",
- existingNode: &v1.Node{ObjectMeta: metav1.ObjectMeta{Name: testKubeletHostname}},
- expectedNode: &v1.Node{ObjectMeta: metav1.ObjectMeta{Name: testKubeletHostname}},
- },
- {
- desc: "volumes inuse on node and volumeManager",
- existingVolumes: []v1.UniqueVolumeName{"vol1"},
- existingNode: &v1.Node{
- ObjectMeta: metav1.ObjectMeta{Name: testKubeletHostname},
- Status: v1.NodeStatus{
- VolumesInUse: []v1.UniqueVolumeName{"vol1"},
- },
- },
- expectedNode: &v1.Node{
- ObjectMeta: metav1.ObjectMeta{Name: testKubeletHostname},
- Status: v1.NodeStatus{
- VolumesInUse: []v1.UniqueVolumeName{"vol1"},
- },
- },
- expectedReportedInUse: []v1.UniqueVolumeName{"vol1"},
- },
- {
- desc: "volumes inuse on node but not in volumeManager",
- existingNode: &v1.Node{
- ObjectMeta: metav1.ObjectMeta{Name: testKubeletHostname},
- Status: v1.NodeStatus{
- VolumesInUse: []v1.UniqueVolumeName{"vol1"},
- },
- },
- expectedNode: &v1.Node{ObjectMeta: metav1.ObjectMeta{Name: testKubeletHostname}},
- },
- {
- desc: "volumes inuse in volumeManager but not on node",
- existingVolumes: []v1.UniqueVolumeName{"vol1"},
- existingNode: &v1.Node{ObjectMeta: metav1.ObjectMeta{Name: testKubeletHostname}},
- expectedNode: &v1.Node{
- ObjectMeta: metav1.ObjectMeta{Name: testKubeletHostname},
- Status: v1.NodeStatus{
- VolumesInUse: []v1.UniqueVolumeName{"vol1"},
- },
- },
- expectedReportedInUse: []v1.UniqueVolumeName{"vol1"},
- },
- }
- for _, tc := range cases {
- t.Run(tc.desc, func(t *testing.T) {
- // Setup
- testKubelet := newTestKubelet(t, false /* controllerAttachDetachEnabled */)
- defer testKubelet.Cleanup()
- kubelet := testKubelet.kubelet
- kubelet.kubeClient = nil // ensure only the heartbeat client is used
- kubelet.containerManager = &localCM{ContainerManager: cm.NewStubContainerManager()}
- kubelet.lastStatusReportTime = kubelet.clock.Now()
- kubelet.nodeStatusReportFrequency = time.Hour
- kubelet.machineInfo = &cadvisorapi.MachineInfo{}
- // override test volumeManager
- fakeVolumeManager := kubeletvolume.NewFakeVolumeManager(tc.existingVolumes)
- kubelet.volumeManager = fakeVolumeManager
- // Only test VolumesInUse setter
- kubelet.setNodeStatusFuncs = []func(*v1.Node) error{
- nodestatus.VolumesInUse(kubelet.volumeManager.ReconcilerStatesHasBeenSynced,
- kubelet.volumeManager.GetVolumesInUse),
- }
- kubeClient := testKubelet.fakeKubeClient
- kubeClient.ReactionChain = fake.NewSimpleClientset(&v1.NodeList{Items: []v1.Node{*tc.existingNode}}).ReactionChain
- // Execute
- assert.NoError(t, kubelet.updateNodeStatus())
- // Validate
- actions := kubeClient.Actions()
- if tc.expectedNode != nil {
- assert.Len(t, actions, 2)
- assert.IsType(t, core.GetActionImpl{}, actions[0])
- assert.IsType(t, core.PatchActionImpl{}, actions[1])
- patchAction := actions[1].(core.PatchActionImpl)
- updatedNode, err := applyNodeStatusPatch(tc.existingNode, patchAction.GetPatch())
- require.NoError(t, err)
- assert.True(t, apiequality.Semantic.DeepEqual(tc.expectedNode, updatedNode), "%s", diff.ObjectDiff(tc.expectedNode, updatedNode))
- } else {
- assert.Len(t, actions, 1)
- assert.IsType(t, core.GetActionImpl{}, actions[0])
- }
- reportedInUse := fakeVolumeManager.GetVolumesReportedInUse()
- assert.True(t, apiequality.Semantic.DeepEqual(tc.expectedReportedInUse, reportedInUse), "%s", diff.ObjectDiff(tc.expectedReportedInUse, reportedInUse))
- })
- }
- }
- func TestUpdateNodeStatusAndVolumesInUseWithNodeLease(t *testing.T) {
- defer featuregatetesting.SetFeatureGateDuringTest(t, utilfeature.DefaultFeatureGate, features.NodeLease, true)()
- cases := []struct {
- desc string
- existingVolumes []v1.UniqueVolumeName // volumes to initially populate volumeManager
- existingNode *v1.Node // existing node object
- expectedNode *v1.Node // new node object after patch
- expectedReportedInUse []v1.UniqueVolumeName // expected volumes reported in use in volumeManager
- }{
- {
- desc: "no volumes and no update",
- existingNode: &v1.Node{ObjectMeta: metav1.ObjectMeta{Name: testKubeletHostname}},
- },
- {
- desc: "volumes inuse on node and volumeManager",
- existingVolumes: []v1.UniqueVolumeName{"vol1"},
- existingNode: &v1.Node{
- ObjectMeta: metav1.ObjectMeta{Name: testKubeletHostname},
- Status: v1.NodeStatus{
- VolumesInUse: []v1.UniqueVolumeName{"vol1"},
- },
- },
- expectedReportedInUse: []v1.UniqueVolumeName{"vol1"},
- },
- {
- desc: "volumes inuse on node but not in volumeManager",
- existingNode: &v1.Node{
- ObjectMeta: metav1.ObjectMeta{Name: testKubeletHostname},
- Status: v1.NodeStatus{
- VolumesInUse: []v1.UniqueVolumeName{"vol1"},
- },
- },
- expectedNode: &v1.Node{ObjectMeta: metav1.ObjectMeta{Name: testKubeletHostname}},
- },
- {
- desc: "volumes inuse in volumeManager but not on node",
- existingVolumes: []v1.UniqueVolumeName{"vol1"},
- existingNode: &v1.Node{ObjectMeta: metav1.ObjectMeta{Name: testKubeletHostname}},
- expectedNode: &v1.Node{
- ObjectMeta: metav1.ObjectMeta{Name: testKubeletHostname},
- Status: v1.NodeStatus{
- VolumesInUse: []v1.UniqueVolumeName{"vol1"},
- },
- },
- expectedReportedInUse: []v1.UniqueVolumeName{"vol1"},
- },
- }
- for _, tc := range cases {
- t.Run(tc.desc, func(t *testing.T) {
- // Setup
- testKubelet := newTestKubelet(t, false /* controllerAttachDetachEnabled */)
- defer testKubelet.Cleanup()
- kubelet := testKubelet.kubelet
- kubelet.kubeClient = nil // ensure only the heartbeat client is used
- kubelet.containerManager = &localCM{ContainerManager: cm.NewStubContainerManager()}
- kubelet.lastStatusReportTime = kubelet.clock.Now()
- kubelet.nodeStatusReportFrequency = time.Hour
- kubelet.machineInfo = &cadvisorapi.MachineInfo{}
- // override test volumeManager
- fakeVolumeManager := kubeletvolume.NewFakeVolumeManager(tc.existingVolumes)
- kubelet.volumeManager = fakeVolumeManager
- // Only test VolumesInUse setter
- kubelet.setNodeStatusFuncs = []func(*v1.Node) error{
- nodestatus.VolumesInUse(kubelet.volumeManager.ReconcilerStatesHasBeenSynced,
- kubelet.volumeManager.GetVolumesInUse),
- }
- kubeClient := testKubelet.fakeKubeClient
- kubeClient.ReactionChain = fake.NewSimpleClientset(&v1.NodeList{Items: []v1.Node{*tc.existingNode}}).ReactionChain
- // Execute
- assert.NoError(t, kubelet.updateNodeStatus())
- // Validate
- actions := kubeClient.Actions()
- if tc.expectedNode != nil {
- assert.Len(t, actions, 2)
- assert.IsType(t, core.GetActionImpl{}, actions[0])
- assert.IsType(t, core.PatchActionImpl{}, actions[1])
- patchAction := actions[1].(core.PatchActionImpl)
- updatedNode, err := applyNodeStatusPatch(tc.existingNode, patchAction.GetPatch())
- require.NoError(t, err)
- assert.True(t, apiequality.Semantic.DeepEqual(tc.expectedNode, updatedNode), "%s", diff.ObjectDiff(tc.expectedNode, updatedNode))
- } else {
- assert.Len(t, actions, 1)
- assert.IsType(t, core.GetActionImpl{}, actions[0])
- }
- reportedInUse := fakeVolumeManager.GetVolumesReportedInUse()
- assert.True(t, apiequality.Semantic.DeepEqual(tc.expectedReportedInUse, reportedInUse), "%s", diff.ObjectDiff(tc.expectedReportedInUse, reportedInUse))
- })
- }
- }
- func TestRegisterWithApiServer(t *testing.T) {
- testKubelet := newTestKubelet(t, false /* controllerAttachDetachEnabled */)
- defer testKubelet.Cleanup()
- kubelet := testKubelet.kubelet
- kubeClient := testKubelet.fakeKubeClient
- kubeClient.AddReactor("create", "nodes", func(action core.Action) (bool, runtime.Object, error) {
- // Return an error on create.
- return true, &v1.Node{}, &apierrors.StatusError{
- ErrStatus: metav1.Status{Reason: metav1.StatusReasonAlreadyExists},
- }
- })
- kubeClient.AddReactor("get", "nodes", func(action core.Action) (bool, runtime.Object, error) {
- // Return an existing (matching) node on get.
- return true, &v1.Node{
- ObjectMeta: metav1.ObjectMeta{
- Name: testKubeletHostname,
- Labels: map[string]string{
- v1.LabelHostname: testKubeletHostname,
- v1.LabelOSStable: goruntime.GOOS,
- v1.LabelArchStable: goruntime.GOARCH,
- kubeletapis.LabelOS: goruntime.GOOS,
- kubeletapis.LabelArch: goruntime.GOARCH,
- },
- },
- }, nil
- })
- addNotImplatedReaction(kubeClient)
- machineInfo := &cadvisorapi.MachineInfo{
- MachineID: "123",
- SystemUUID: "abc",
- BootID: "1b3",
- NumCores: 2,
- MemoryCapacity: 1024,
- }
- kubelet.machineInfo = machineInfo
- done := make(chan struct{})
- go func() {
- kubelet.registerWithAPIServer()
- done <- struct{}{}
- }()
- select {
- case <-time.After(wait.ForeverTestTimeout):
- assert.Fail(t, "timed out waiting for registration")
- case <-done:
- return
- }
- }
- func TestTryRegisterWithApiServer(t *testing.T) {
- alreadyExists := &apierrors.StatusError{
- ErrStatus: metav1.Status{Reason: metav1.StatusReasonAlreadyExists},
- }
- conflict := &apierrors.StatusError{
- ErrStatus: metav1.Status{Reason: metav1.StatusReasonConflict},
- }
- newNode := func(cmad bool) *v1.Node {
- node := &v1.Node{
- ObjectMeta: metav1.ObjectMeta{
- Labels: map[string]string{
- v1.LabelHostname: testKubeletHostname,
- v1.LabelOSStable: goruntime.GOOS,
- v1.LabelArchStable: goruntime.GOARCH,
- kubeletapis.LabelOS: goruntime.GOOS,
- kubeletapis.LabelArch: goruntime.GOARCH,
- },
- },
- }
- if cmad {
- node.Annotations = make(map[string]string)
- node.Annotations[util.ControllerManagedAttachAnnotation] = "true"
- }
- return node
- }
- cases := []struct {
- name string
- newNode *v1.Node
- existingNode *v1.Node
- createError error
- getError error
- patchError error
- deleteError error
- expectedResult bool
- expectedActions int
- testSavedNode bool
- savedNodeIndex int
- savedNodeCMAD bool
- }{
- {
- name: "success case - new node",
- newNode: &v1.Node{},
- expectedResult: true,
- expectedActions: 1,
- },
- {
- name: "success case - existing node - no change in CMAD",
- newNode: newNode(true),
- createError: alreadyExists,
- existingNode: newNode(true),
- expectedResult: true,
- expectedActions: 2,
- },
- {
- name: "success case - existing node - CMAD disabled",
- newNode: newNode(false),
- createError: alreadyExists,
- existingNode: newNode(true),
- expectedResult: true,
- expectedActions: 3,
- testSavedNode: true,
- savedNodeIndex: 2,
- savedNodeCMAD: false,
- },
- {
- name: "success case - existing node - CMAD enabled",
- newNode: newNode(true),
- createError: alreadyExists,
- existingNode: newNode(false),
- expectedResult: true,
- expectedActions: 3,
- testSavedNode: true,
- savedNodeIndex: 2,
- savedNodeCMAD: true,
- },
- {
- name: "create failed",
- newNode: newNode(false),
- createError: conflict,
- expectedResult: false,
- expectedActions: 1,
- },
- {
- name: "get existing node failed",
- newNode: newNode(false),
- createError: alreadyExists,
- getError: conflict,
- expectedResult: false,
- expectedActions: 2,
- },
- {
- name: "update existing node failed",
- newNode: newNode(false),
- createError: alreadyExists,
- existingNode: newNode(true),
- patchError: conflict,
- expectedResult: false,
- expectedActions: 3,
- },
- }
- for _, tc := range cases {
- testKubelet := newTestKubelet(t, false /* controllerAttachDetachEnabled is a don't-care for this test */)
- defer testKubelet.Cleanup()
- kubelet := testKubelet.kubelet
- kubeClient := testKubelet.fakeKubeClient
- kubeClient.AddReactor("create", "nodes", func(action core.Action) (bool, runtime.Object, error) {
- return true, nil, tc.createError
- })
- kubeClient.AddReactor("get", "nodes", func(action core.Action) (bool, runtime.Object, error) {
- // Return an existing (matching) node on get.
- return true, tc.existingNode, tc.getError
- })
- kubeClient.AddReactor("patch", "nodes", func(action core.Action) (bool, runtime.Object, error) {
- if action.GetSubresource() == "status" {
- return true, nil, tc.patchError
- }
- return notImplemented(action)
- })
- kubeClient.AddReactor("delete", "nodes", func(action core.Action) (bool, runtime.Object, error) {
- return true, nil, tc.deleteError
- })
- addNotImplatedReaction(kubeClient)
- result := kubelet.tryRegisterWithAPIServer(tc.newNode)
- require.Equal(t, tc.expectedResult, result, "test [%s]", tc.name)
- actions := kubeClient.Actions()
- assert.Len(t, actions, tc.expectedActions, "test [%s]", tc.name)
- if tc.testSavedNode {
- var savedNode *v1.Node
- t.Logf("actions: %v: %+v", len(actions), actions)
- action := actions[tc.savedNodeIndex]
- if action.GetVerb() == "create" {
- createAction := action.(core.CreateAction)
- obj := createAction.GetObject()
- require.IsType(t, &v1.Node{}, obj)
- savedNode = obj.(*v1.Node)
- } else if action.GetVerb() == "patch" {
- patchAction := action.(core.PatchActionImpl)
- var err error
- savedNode, err = applyNodeStatusPatch(tc.existingNode, patchAction.GetPatch())
- require.NoError(t, err)
- }
- actualCMAD, _ := strconv.ParseBool(savedNode.Annotations[util.ControllerManagedAttachAnnotation])
- assert.Equal(t, tc.savedNodeCMAD, actualCMAD, "test [%s]", tc.name)
- }
- }
- }
- func TestUpdateNewNodeStatusTooLargeReservation(t *testing.T) {
- const nodeStatusMaxImages = 5
- // generate one more in inputImageList than we configure the Kubelet to report
- inputImageList, _ := generateTestingImageLists(nodeStatusMaxImages+1, nodeStatusMaxImages)
- testKubelet := newTestKubeletWithImageList(
- t, inputImageList, false /* controllerAttachDetachEnabled */, true /* initFakeVolumePlugin */)
- defer testKubelet.Cleanup()
- kubelet := testKubelet.kubelet
- kubelet.nodeStatusMaxImages = nodeStatusMaxImages
- kubelet.kubeClient = nil // ensure only the heartbeat client is used
- kubelet.containerManager = &localCM{
- ContainerManager: cm.NewStubContainerManager(),
- allocatableReservation: v1.ResourceList{
- v1.ResourceCPU: *resource.NewMilliQuantity(40000, resource.DecimalSI),
- v1.ResourceEphemeralStorage: *resource.NewQuantity(1000, resource.BinarySI),
- },
- capacity: v1.ResourceList{
- v1.ResourceCPU: *resource.NewMilliQuantity(2000, resource.DecimalSI),
- v1.ResourceMemory: *resource.NewQuantity(10E9, resource.BinarySI),
- v1.ResourceEphemeralStorage: *resource.NewQuantity(3000, resource.BinarySI),
- },
- }
- // Since this test retroactively overrides the stub container manager,
- // we have to regenerate default status setters.
- kubelet.setNodeStatusFuncs = kubelet.defaultNodeStatusFuncs()
- kubeClient := testKubelet.fakeKubeClient
- existingNode := v1.Node{ObjectMeta: metav1.ObjectMeta{Name: testKubeletHostname}}
- kubeClient.ReactionChain = fake.NewSimpleClientset(&v1.NodeList{Items: []v1.Node{existingNode}}).ReactionChain
- machineInfo := &cadvisorapi.MachineInfo{
- MachineID: "123",
- SystemUUID: "abc",
- BootID: "1b3",
- NumCores: 2,
- MemoryCapacity: 10E9, // 10G
- }
- kubelet.machineInfo = machineInfo
- expectedNode := &v1.Node{
- ObjectMeta: metav1.ObjectMeta{Name: testKubeletHostname},
- Spec: v1.NodeSpec{},
- Status: v1.NodeStatus{
- Capacity: v1.ResourceList{
- v1.ResourceCPU: *resource.NewMilliQuantity(2000, resource.DecimalSI),
- v1.ResourceMemory: *resource.NewQuantity(10E9, resource.BinarySI),
- v1.ResourcePods: *resource.NewQuantity(0, resource.DecimalSI),
- v1.ResourceEphemeralStorage: *resource.NewQuantity(3000, resource.BinarySI),
- },
- Allocatable: v1.ResourceList{
- v1.ResourceCPU: *resource.NewMilliQuantity(0, resource.DecimalSI),
- v1.ResourceMemory: *resource.NewQuantity(10E9, resource.BinarySI),
- v1.ResourcePods: *resource.NewQuantity(0, resource.DecimalSI),
- v1.ResourceEphemeralStorage: *resource.NewQuantity(2000, resource.BinarySI),
- },
- },
- }
- kubelet.updateRuntimeUp()
- assert.NoError(t, kubelet.updateNodeStatus())
- actions := kubeClient.Actions()
- require.Len(t, actions, 2)
- require.True(t, actions[1].Matches("patch", "nodes"))
- require.Equal(t, actions[1].GetSubresource(), "status")
- updatedNode, err := applyNodeStatusPatch(&existingNode, actions[1].(core.PatchActionImpl).GetPatch())
- assert.NoError(t, err)
- assert.True(t, apiequality.Semantic.DeepEqual(expectedNode.Status.Allocatable, updatedNode.Status.Allocatable), "%s", diff.ObjectDiff(expectedNode.Status.Allocatable, updatedNode.Status.Allocatable))
- }
- func TestUpdateDefaultLabels(t *testing.T) {
- testKubelet := newTestKubelet(t, false /* controllerAttachDetachEnabled */)
- testKubelet.kubelet.kubeClient = nil // ensure only the heartbeat client is used
- cases := []struct {
- name string
- initialNode *v1.Node
- existingNode *v1.Node
- needsUpdate bool
- finalLabels map[string]string
- }{
- {
- name: "make sure default labels exist",
- initialNode: &v1.Node{
- ObjectMeta: metav1.ObjectMeta{
- Labels: map[string]string{
- v1.LabelHostname: "new-hostname",
- v1.LabelZoneFailureDomain: "new-zone-failure-domain",
- v1.LabelZoneRegion: "new-zone-region",
- v1.LabelInstanceType: "new-instance-type",
- kubeletapis.LabelOS: "new-os",
- kubeletapis.LabelArch: "new-arch",
- },
- },
- },
- existingNode: &v1.Node{
- ObjectMeta: metav1.ObjectMeta{
- Labels: map[string]string{},
- },
- },
- needsUpdate: true,
- finalLabels: map[string]string{
- v1.LabelHostname: "new-hostname",
- v1.LabelZoneFailureDomain: "new-zone-failure-domain",
- v1.LabelZoneRegion: "new-zone-region",
- v1.LabelInstanceType: "new-instance-type",
- kubeletapis.LabelOS: "new-os",
- kubeletapis.LabelArch: "new-arch",
- },
- },
- {
- name: "make sure default labels are up to date",
- initialNode: &v1.Node{
- ObjectMeta: metav1.ObjectMeta{
- Labels: map[string]string{
- v1.LabelHostname: "new-hostname",
- v1.LabelZoneFailureDomain: "new-zone-failure-domain",
- v1.LabelZoneRegion: "new-zone-region",
- v1.LabelInstanceType: "new-instance-type",
- kubeletapis.LabelOS: "new-os",
- kubeletapis.LabelArch: "new-arch",
- },
- },
- },
- existingNode: &v1.Node{
- ObjectMeta: metav1.ObjectMeta{
- Labels: map[string]string{
- v1.LabelHostname: "old-hostname",
- v1.LabelZoneFailureDomain: "old-zone-failure-domain",
- v1.LabelZoneRegion: "old-zone-region",
- v1.LabelInstanceType: "old-instance-type",
- kubeletapis.LabelOS: "old-os",
- kubeletapis.LabelArch: "old-arch",
- },
- },
- },
- needsUpdate: true,
- finalLabels: map[string]string{
- v1.LabelHostname: "new-hostname",
- v1.LabelZoneFailureDomain: "new-zone-failure-domain",
- v1.LabelZoneRegion: "new-zone-region",
- v1.LabelInstanceType: "new-instance-type",
- kubeletapis.LabelOS: "new-os",
- kubeletapis.LabelArch: "new-arch",
- },
- },
- {
- name: "make sure existing labels do not get deleted",
- initialNode: &v1.Node{
- ObjectMeta: metav1.ObjectMeta{
- Labels: map[string]string{
- v1.LabelHostname: "new-hostname",
- v1.LabelZoneFailureDomain: "new-zone-failure-domain",
- v1.LabelZoneRegion: "new-zone-region",
- v1.LabelInstanceType: "new-instance-type",
- kubeletapis.LabelOS: "new-os",
- kubeletapis.LabelArch: "new-arch",
- },
- },
- },
- existingNode: &v1.Node{
- ObjectMeta: metav1.ObjectMeta{
- Labels: map[string]string{
- v1.LabelHostname: "new-hostname",
- v1.LabelZoneFailureDomain: "new-zone-failure-domain",
- v1.LabelZoneRegion: "new-zone-region",
- v1.LabelInstanceType: "new-instance-type",
- kubeletapis.LabelOS: "new-os",
- kubeletapis.LabelArch: "new-arch",
- "please-persist": "foo",
- },
- },
- },
- needsUpdate: false,
- finalLabels: map[string]string{
- v1.LabelHostname: "new-hostname",
- v1.LabelZoneFailureDomain: "new-zone-failure-domain",
- v1.LabelZoneRegion: "new-zone-region",
- v1.LabelInstanceType: "new-instance-type",
- kubeletapis.LabelOS: "new-os",
- kubeletapis.LabelArch: "new-arch",
- "please-persist": "foo",
- },
- },
- {
- name: "make sure existing labels do not get deleted when initial node has no opinion",
- initialNode: &v1.Node{
- ObjectMeta: metav1.ObjectMeta{
- Labels: map[string]string{},
- },
- },
- existingNode: &v1.Node{
- ObjectMeta: metav1.ObjectMeta{
- Labels: map[string]string{
- v1.LabelHostname: "new-hostname",
- v1.LabelZoneFailureDomain: "new-zone-failure-domain",
- v1.LabelZoneRegion: "new-zone-region",
- v1.LabelInstanceType: "new-instance-type",
- kubeletapis.LabelOS: "new-os",
- kubeletapis.LabelArch: "new-arch",
- "please-persist": "foo",
- },
- },
- },
- needsUpdate: false,
- finalLabels: map[string]string{
- v1.LabelHostname: "new-hostname",
- v1.LabelZoneFailureDomain: "new-zone-failure-domain",
- v1.LabelZoneRegion: "new-zone-region",
- v1.LabelInstanceType: "new-instance-type",
- kubeletapis.LabelOS: "new-os",
- kubeletapis.LabelArch: "new-arch",
- "please-persist": "foo",
- },
- },
- {
- name: "no update needed",
- initialNode: &v1.Node{
- ObjectMeta: metav1.ObjectMeta{
- Labels: map[string]string{
- v1.LabelHostname: "new-hostname",
- v1.LabelZoneFailureDomain: "new-zone-failure-domain",
- v1.LabelZoneRegion: "new-zone-region",
- v1.LabelInstanceType: "new-instance-type",
- kubeletapis.LabelOS: "new-os",
- kubeletapis.LabelArch: "new-arch",
- },
- },
- },
- existingNode: &v1.Node{
- ObjectMeta: metav1.ObjectMeta{
- Labels: map[string]string{
- v1.LabelHostname: "new-hostname",
- v1.LabelZoneFailureDomain: "new-zone-failure-domain",
- v1.LabelZoneRegion: "new-zone-region",
- v1.LabelInstanceType: "new-instance-type",
- kubeletapis.LabelOS: "new-os",
- kubeletapis.LabelArch: "new-arch",
- },
- },
- },
- needsUpdate: false,
- finalLabels: map[string]string{
- v1.LabelHostname: "new-hostname",
- v1.LabelZoneFailureDomain: "new-zone-failure-domain",
- v1.LabelZoneRegion: "new-zone-region",
- v1.LabelInstanceType: "new-instance-type",
- kubeletapis.LabelOS: "new-os",
- kubeletapis.LabelArch: "new-arch",
- },
- },
- {
- name: "not panic when existing node has nil labels",
- initialNode: &v1.Node{
- ObjectMeta: metav1.ObjectMeta{
- Labels: map[string]string{
- v1.LabelHostname: "new-hostname",
- v1.LabelZoneFailureDomain: "new-zone-failure-domain",
- v1.LabelZoneRegion: "new-zone-region",
- v1.LabelInstanceType: "new-instance-type",
- kubeletapis.LabelOS: "new-os",
- kubeletapis.LabelArch: "new-arch",
- },
- },
- },
- existingNode: &v1.Node{
- ObjectMeta: metav1.ObjectMeta{},
- },
- needsUpdate: true,
- finalLabels: map[string]string{
- v1.LabelHostname: "new-hostname",
- v1.LabelZoneFailureDomain: "new-zone-failure-domain",
- v1.LabelZoneRegion: "new-zone-region",
- v1.LabelInstanceType: "new-instance-type",
- kubeletapis.LabelOS: "new-os",
- kubeletapis.LabelArch: "new-arch",
- },
- },
- }
- for _, tc := range cases {
- defer testKubelet.Cleanup()
- kubelet := testKubelet.kubelet
- needsUpdate := kubelet.updateDefaultLabels(tc.initialNode, tc.existingNode)
- assert.Equal(t, tc.needsUpdate, needsUpdate, tc.name)
- assert.Equal(t, tc.finalLabels, tc.existingNode.Labels, tc.name)
- }
- }
- func TestReconcileExtendedResource(t *testing.T) {
- testKubelet := newTestKubelet(t, false /* controllerAttachDetachEnabled */)
- testKubelet.kubelet.kubeClient = nil // ensure only the heartbeat client is used
- testKubelet.kubelet.containerManager = cm.NewStubContainerManagerWithExtendedResource(true /* shouldResetExtendedResourceCapacity*/)
- testKubeletNoReset := newTestKubelet(t, false /* controllerAttachDetachEnabled */)
- extendedResourceName1 := v1.ResourceName("test.com/resource1")
- extendedResourceName2 := v1.ResourceName("test.com/resource2")
- cases := []struct {
- name string
- testKubelet *TestKubelet
- existingNode *v1.Node
- expectedNode *v1.Node
- needsUpdate bool
- }{
- {
- name: "no update needed without extended resource",
- testKubelet: testKubelet,
- existingNode: &v1.Node{
- Status: v1.NodeStatus{
- Capacity: v1.ResourceList{
- v1.ResourceCPU: *resource.NewMilliQuantity(2000, resource.DecimalSI),
- v1.ResourceMemory: *resource.NewQuantity(10E9, resource.BinarySI),
- v1.ResourceEphemeralStorage: *resource.NewQuantity(5000, resource.BinarySI),
- },
- Allocatable: v1.ResourceList{
- v1.ResourceCPU: *resource.NewMilliQuantity(2000, resource.DecimalSI),
- v1.ResourceMemory: *resource.NewQuantity(10E9, resource.BinarySI),
- v1.ResourceEphemeralStorage: *resource.NewQuantity(5000, resource.BinarySI),
- },
- },
- },
- expectedNode: &v1.Node{
- Status: v1.NodeStatus{
- Capacity: v1.ResourceList{
- v1.ResourceCPU: *resource.NewMilliQuantity(2000, resource.DecimalSI),
- v1.ResourceMemory: *resource.NewQuantity(10E9, resource.BinarySI),
- v1.ResourceEphemeralStorage: *resource.NewQuantity(5000, resource.BinarySI),
- },
- Allocatable: v1.ResourceList{
- v1.ResourceCPU: *resource.NewMilliQuantity(2000, resource.DecimalSI),
- v1.ResourceMemory: *resource.NewQuantity(10E9, resource.BinarySI),
- v1.ResourceEphemeralStorage: *resource.NewQuantity(5000, resource.BinarySI),
- },
- },
- },
- needsUpdate: false,
- },
- {
- name: "extended resource capacity is not zeroed due to presence of checkpoint file",
- testKubelet: testKubelet,
- existingNode: &v1.Node{
- Status: v1.NodeStatus{
- Capacity: v1.ResourceList{
- v1.ResourceCPU: *resource.NewMilliQuantity(2000, resource.DecimalSI),
- v1.ResourceMemory: *resource.NewQuantity(10E9, resource.BinarySI),
- v1.ResourceEphemeralStorage: *resource.NewQuantity(5000, resource.BinarySI),
- },
- Allocatable: v1.ResourceList{
- v1.ResourceCPU: *resource.NewMilliQuantity(2000, resource.DecimalSI),
- v1.ResourceMemory: *resource.NewQuantity(10E9, resource.BinarySI),
- v1.ResourceEphemeralStorage: *resource.NewQuantity(5000, resource.BinarySI),
- },
- },
- },
- expectedNode: &v1.Node{
- Status: v1.NodeStatus{
- Capacity: v1.ResourceList{
- v1.ResourceCPU: *resource.NewMilliQuantity(2000, resource.DecimalSI),
- v1.ResourceMemory: *resource.NewQuantity(10E9, resource.BinarySI),
- v1.ResourceEphemeralStorage: *resource.NewQuantity(5000, resource.BinarySI),
- },
- Allocatable: v1.ResourceList{
- v1.ResourceCPU: *resource.NewMilliQuantity(2000, resource.DecimalSI),
- v1.ResourceMemory: *resource.NewQuantity(10E9, resource.BinarySI),
- v1.ResourceEphemeralStorage: *resource.NewQuantity(5000, resource.BinarySI),
- },
- },
- },
- needsUpdate: false,
- },
- {
- name: "extended resource capacity is zeroed",
- testKubelet: testKubeletNoReset,
- existingNode: &v1.Node{
- Status: v1.NodeStatus{
- Capacity: v1.ResourceList{
- v1.ResourceCPU: *resource.NewMilliQuantity(2000, resource.DecimalSI),
- v1.ResourceMemory: *resource.NewQuantity(10E9, resource.BinarySI),
- v1.ResourceEphemeralStorage: *resource.NewQuantity(5000, resource.BinarySI),
- extendedResourceName1: *resource.NewQuantity(int64(2), resource.DecimalSI),
- extendedResourceName2: *resource.NewQuantity(int64(10), resource.DecimalSI),
- },
- Allocatable: v1.ResourceList{
- v1.ResourceCPU: *resource.NewMilliQuantity(2000, resource.DecimalSI),
- v1.ResourceMemory: *resource.NewQuantity(10E9, resource.BinarySI),
- v1.ResourceEphemeralStorage: *resource.NewQuantity(5000, resource.BinarySI),
- extendedResourceName1: *resource.NewQuantity(int64(2), resource.DecimalSI),
- extendedResourceName2: *resource.NewQuantity(int64(10), resource.DecimalSI),
- },
- },
- },
- expectedNode: &v1.Node{
- Status: v1.NodeStatus{
- Capacity: v1.ResourceList{
- v1.ResourceCPU: *resource.NewMilliQuantity(2000, resource.DecimalSI),
- v1.ResourceMemory: *resource.NewQuantity(10E9, resource.BinarySI),
- v1.ResourceEphemeralStorage: *resource.NewQuantity(5000, resource.BinarySI),
- extendedResourceName1: *resource.NewQuantity(int64(0), resource.DecimalSI),
- extendedResourceName2: *resource.NewQuantity(int64(0), resource.DecimalSI),
- },
- Allocatable: v1.ResourceList{
- v1.ResourceCPU: *resource.NewMilliQuantity(2000, resource.DecimalSI),
- v1.ResourceMemory: *resource.NewQuantity(10E9, resource.BinarySI),
- v1.ResourceEphemeralStorage: *resource.NewQuantity(5000, resource.BinarySI),
- extendedResourceName1: *resource.NewQuantity(int64(0), resource.DecimalSI),
- extendedResourceName2: *resource.NewQuantity(int64(0), resource.DecimalSI),
- },
- },
- },
- needsUpdate: true,
- },
- }
- for _, tc := range cases {
- defer testKubelet.Cleanup()
- kubelet := testKubelet.kubelet
- initialNode := &v1.Node{}
- needsUpdate := kubelet.reconcileExtendedResource(initialNode, tc.existingNode)
- assert.Equal(t, tc.needsUpdate, needsUpdate, tc.name)
- assert.Equal(t, tc.expectedNode, tc.existingNode, tc.name)
- }
- }
- func TestValidateNodeIPParam(t *testing.T) {
- type test struct {
- nodeIP string
- success bool
- testName string
- }
- tests := []test{
- {
- nodeIP: "",
- success: false,
- testName: "IP not set",
- },
- {
- nodeIP: "127.0.0.1",
- success: false,
- testName: "IPv4 loopback address",
- },
- {
- nodeIP: "::1",
- success: false,
- testName: "IPv6 loopback address",
- },
- {
- nodeIP: "224.0.0.1",
- success: false,
- testName: "multicast IPv4 address",
- },
- {
- nodeIP: "ff00::1",
- success: false,
- testName: "multicast IPv6 address",
- },
- {
- nodeIP: "169.254.0.1",
- success: false,
- testName: "IPv4 link-local unicast address",
- },
- {
- nodeIP: "fe80::0202:b3ff:fe1e:8329",
- success: false,
- testName: "IPv6 link-local unicast address",
- },
- {
- nodeIP: "0.0.0.0",
- success: false,
- testName: "Unspecified IPv4 address",
- },
- {
- nodeIP: "::",
- success: false,
- testName: "Unspecified IPv6 address",
- },
- {
- nodeIP: "1.2.3.4",
- success: false,
- testName: "IPv4 address that doesn't belong to host",
- },
- }
- addrs, err := net.InterfaceAddrs()
- if err != nil {
- assert.Error(t, err, fmt.Sprintf(
- "Unable to obtain a list of the node's unicast interface addresses."))
- }
- for _, addr := range addrs {
- var ip net.IP
- switch v := addr.(type) {
- case *net.IPNet:
- ip = v.IP
- case *net.IPAddr:
- ip = v.IP
- }
- if ip.IsLoopback() || ip.IsLinkLocalUnicast() {
- break
- }
- successTest := test{
- nodeIP: ip.String(),
- success: true,
- testName: fmt.Sprintf("Success test case for address %s", ip.String()),
- }
- tests = append(tests, successTest)
- }
- for _, test := range tests {
- err := validateNodeIP(net.ParseIP(test.nodeIP))
- if test.success {
- assert.NoError(t, err, "test %s", test.testName)
- } else {
- assert.Error(t, err, fmt.Sprintf("test %s", test.testName))
- }
- }
- }
- func TestRegisterWithApiServerWithTaint(t *testing.T) {
- testKubelet := newTestKubelet(t, false /* controllerAttachDetachEnabled */)
- defer testKubelet.Cleanup()
- kubelet := testKubelet.kubelet
- kubeClient := testKubelet.fakeKubeClient
- machineInfo := &cadvisorapi.MachineInfo{
- MachineID: "123",
- SystemUUID: "abc",
- BootID: "1b3",
- NumCores: 2,
- MemoryCapacity: 1024,
- }
- kubelet.machineInfo = machineInfo
- var gotNode runtime.Object
- kubeClient.AddReactor("create", "nodes", func(action core.Action) (bool, runtime.Object, error) {
- createAction := action.(core.CreateAction)
- gotNode = createAction.GetObject()
- return true, gotNode, nil
- })
- addNotImplatedReaction(kubeClient)
- // Make node to be unschedulable.
- kubelet.registerSchedulable = false
- forEachFeatureGate(t, []featuregate.Feature{features.TaintNodesByCondition}, func(t *testing.T) {
- // Reset kubelet status for each test.
- kubelet.registrationCompleted = false
- // Register node to apiserver.
- kubelet.registerWithAPIServer()
- // Check the unschedulable taint.
- got := gotNode.(*v1.Node)
- unschedulableTaint := &v1.Taint{
- Key: schedulerapi.TaintNodeUnschedulable,
- Effect: v1.TaintEffectNoSchedule,
- }
- require.Equal(t,
- utilfeature.DefaultFeatureGate.Enabled(features.TaintNodesByCondition),
- taintutil.TaintExists(got.Spec.Taints, unschedulableTaint),
- "test unschedulable taint for TaintNodesByCondition")
- return
- })
- }
- func TestNodeStatusHasChanged(t *testing.T) {
- fakeNow := metav1.Date(2015, 1, 1, 12, 0, 0, 0, time.UTC)
- fakeFuture := metav1.Time{Time: fakeNow.Time.Add(time.Minute)}
- readyCondition := v1.NodeCondition{
- Type: v1.NodeReady,
- Status: v1.ConditionTrue,
- LastHeartbeatTime: fakeNow,
- LastTransitionTime: fakeNow,
- }
- readyConditionAtDiffHearbeatTime := v1.NodeCondition{
- Type: v1.NodeReady,
- Status: v1.ConditionTrue,
- LastHeartbeatTime: fakeFuture,
- LastTransitionTime: fakeNow,
- }
- readyConditionAtDiffTransitionTime := v1.NodeCondition{
- Type: v1.NodeReady,
- Status: v1.ConditionTrue,
- LastHeartbeatTime: fakeFuture,
- LastTransitionTime: fakeFuture,
- }
- notReadyCondition := v1.NodeCondition{
- Type: v1.NodeReady,
- Status: v1.ConditionFalse,
- LastHeartbeatTime: fakeNow,
- LastTransitionTime: fakeNow,
- }
- memoryPressureCondition := v1.NodeCondition{
- Type: v1.NodeMemoryPressure,
- Status: v1.ConditionFalse,
- LastHeartbeatTime: fakeNow,
- LastTransitionTime: fakeNow,
- }
- testcases := []struct {
- name string
- originalStatus *v1.NodeStatus
- status *v1.NodeStatus
- expectChange bool
- }{
- {
- name: "Node status does not change with nil status.",
- originalStatus: nil,
- status: nil,
- expectChange: false,
- },
- {
- name: "Node status does not change with default status.",
- originalStatus: &v1.NodeStatus{},
- status: &v1.NodeStatus{},
- expectChange: false,
- },
- {
- name: "Node status changes with nil and default status.",
- originalStatus: nil,
- status: &v1.NodeStatus{},
- expectChange: true,
- },
- {
- name: "Node status changes with nil and status.",
- originalStatus: nil,
- status: &v1.NodeStatus{
- Conditions: []v1.NodeCondition{readyCondition, memoryPressureCondition},
- },
- expectChange: true,
- },
- {
- name: "Node status does not change with empty conditions.",
- originalStatus: &v1.NodeStatus{Conditions: []v1.NodeCondition{}},
- status: &v1.NodeStatus{Conditions: []v1.NodeCondition{}},
- expectChange: false,
- },
- {
- name: "Node status does not change",
- originalStatus: &v1.NodeStatus{
- Conditions: []v1.NodeCondition{readyCondition, memoryPressureCondition},
- },
- status: &v1.NodeStatus{
- Conditions: []v1.NodeCondition{readyCondition, memoryPressureCondition},
- },
- expectChange: false,
- },
- {
- name: "Node status does not change even if heartbeat time changes.",
- originalStatus: &v1.NodeStatus{
- Conditions: []v1.NodeCondition{readyCondition, memoryPressureCondition},
- },
- status: &v1.NodeStatus{
- Conditions: []v1.NodeCondition{readyConditionAtDiffHearbeatTime, memoryPressureCondition},
- },
- expectChange: false,
- },
- {
- name: "Node status does not change even if the orders of conditions are different.",
- originalStatus: &v1.NodeStatus{
- Conditions: []v1.NodeCondition{readyCondition, memoryPressureCondition},
- },
- status: &v1.NodeStatus{
- Conditions: []v1.NodeCondition{memoryPressureCondition, readyConditionAtDiffHearbeatTime},
- },
- expectChange: false,
- },
- {
- name: "Node status changes if condition status differs.",
- originalStatus: &v1.NodeStatus{
- Conditions: []v1.NodeCondition{readyCondition, memoryPressureCondition},
- },
- status: &v1.NodeStatus{
- Conditions: []v1.NodeCondition{notReadyCondition, memoryPressureCondition},
- },
- expectChange: true,
- },
- {
- name: "Node status changes if transition time changes.",
- originalStatus: &v1.NodeStatus{
- Conditions: []v1.NodeCondition{readyCondition, memoryPressureCondition},
- },
- status: &v1.NodeStatus{
- Conditions: []v1.NodeCondition{readyConditionAtDiffTransitionTime, memoryPressureCondition},
- },
- expectChange: true,
- },
- {
- name: "Node status changes with different number of conditions.",
- originalStatus: &v1.NodeStatus{
- Conditions: []v1.NodeCondition{readyCondition},
- },
- status: &v1.NodeStatus{
- Conditions: []v1.NodeCondition{readyCondition, memoryPressureCondition},
- },
- expectChange: true,
- },
- {
- name: "Node status changes with different phase.",
- originalStatus: &v1.NodeStatus{
- Phase: v1.NodePending,
- Conditions: []v1.NodeCondition{readyCondition},
- },
- status: &v1.NodeStatus{
- Phase: v1.NodeRunning,
- Conditions: []v1.NodeCondition{readyCondition},
- },
- expectChange: true,
- },
- }
- for _, tc := range testcases {
- t.Run(tc.name, func(t *testing.T) {
- originalStatusCopy := tc.originalStatus.DeepCopy()
- statusCopy := tc.status.DeepCopy()
- changed := nodeStatusHasChanged(tc.originalStatus, tc.status)
- assert.Equal(t, tc.expectChange, changed, "Expect node status change to be %t, but got %t.", tc.expectChange, changed)
- assert.True(t, apiequality.Semantic.DeepEqual(originalStatusCopy, tc.originalStatus), "%s", diff.ObjectDiff(originalStatusCopy, tc.originalStatus))
- assert.True(t, apiequality.Semantic.DeepEqual(statusCopy, tc.status), "%s", diff.ObjectDiff(statusCopy, tc.status))
- })
- }
- }
|