123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320132113221323132413251326132713281329133013311332133313341335133613371338133913401341134213431344134513461347134813491350135113521353135413551356135713581359136013611362136313641365136613671368136913701371137213731374137513761377137813791380138113821383138413851386138713881389139013911392139313941395139613971398139914001401140214031404140514061407140814091410141114121413141414151416141714181419142014211422142314241425142614271428142914301431143214331434143514361437143814391440144114421443144414451446144714481449145014511452145314541455145614571458145914601461146214631464146514661467146814691470147114721473147414751476147714781479148014811482148314841485148614871488148914901491149214931494149514961497149814991500150115021503150415051506150715081509151015111512151315141515151615171518151915201521152215231524152515261527152815291530153115321533153415351536153715381539154015411542154315441545154615471548154915501551155215531554155515561557155815591560156115621563156415651566156715681569157015711572157315741575157615771578157915801581158215831584158515861587158815891590159115921593159415951596159715981599160016011602160316041605160616071608160916101611161216131614161516161617161816191620162116221623162416251626162716281629163016311632163316341635163616371638163916401641164216431644164516461647164816491650165116521653165416551656165716581659166016611662166316641665166616671668166916701671167216731674167516761677167816791680168116821683168416851686168716881689169016911692169316941695169616971698169917001701170217031704170517061707170817091710171117121713171417151716171717181719172017211722172317241725172617271728172917301731173217331734173517361737173817391740174117421743174417451746174717481749175017511752175317541755175617571758175917601761176217631764176517661767176817691770177117721773177417751776177717781779178017811782178317841785178617871788178917901791179217931794179517961797179817991800180118021803180418051806180718081809181018111812181318141815181618171818181918201821182218231824182518261827182818291830183118321833183418351836183718381839184018411842184318441845184618471848184918501851185218531854185518561857185818591860186118621863186418651866186718681869187018711872187318741875187618771878187918801881188218831884188518861887188818891890189118921893189418951896189718981899190019011902190319041905190619071908190919101911191219131914191519161917191819191920192119221923192419251926192719281929193019311932193319341935193619371938193919401941194219431944194519461947194819491950195119521953195419551956195719581959196019611962196319641965196619671968196919701971197219731974197519761977197819791980198119821983198419851986198719881989199019911992199319941995199619971998199920002001200220032004200520062007200820092010201120122013201420152016201720182019202020212022202320242025202620272028202920302031203220332034203520362037203820392040204120422043 |
- /*
- Copyright 2014 The Kubernetes Authors.
- Licensed under the Apache License, Version 2.0 (the "License");
- you may not use this file except in compliance with the License.
- You may obtain a copy of the License at
- http://www.apache.org/licenses/LICENSE-2.0
- Unless required by applicable law or agreed to in writing, software
- distributed under the License is distributed on an "AS IS" BASIS,
- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- See the License for the specific language governing permissions and
- limitations under the License.
- */
- package kubelet
- import (
- "fmt"
- "io/ioutil"
- "os"
- "sort"
- "testing"
- "time"
- cadvisorapi "github.com/google/cadvisor/info/v1"
- "github.com/stretchr/testify/assert"
- "github.com/stretchr/testify/require"
- "k8s.io/utils/mount"
- v1 "k8s.io/api/core/v1"
- "k8s.io/apimachinery/pkg/api/resource"
- metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
- "k8s.io/apimachinery/pkg/labels"
- "k8s.io/apimachinery/pkg/types"
- "k8s.io/apimachinery/pkg/util/clock"
- utilruntime "k8s.io/apimachinery/pkg/util/runtime"
- "k8s.io/apimachinery/pkg/util/sets"
- "k8s.io/apimachinery/pkg/util/wait"
- "k8s.io/client-go/kubernetes/fake"
- "k8s.io/client-go/tools/record"
- "k8s.io/client-go/util/flowcontrol"
- cadvisortest "k8s.io/kubernetes/pkg/kubelet/cadvisor/testing"
- "k8s.io/kubernetes/pkg/kubelet/cm"
- "k8s.io/kubernetes/pkg/kubelet/config"
- "k8s.io/kubernetes/pkg/kubelet/configmap"
- kubecontainer "k8s.io/kubernetes/pkg/kubelet/container"
- containertest "k8s.io/kubernetes/pkg/kubelet/container/testing"
- "k8s.io/kubernetes/pkg/kubelet/eviction"
- "k8s.io/kubernetes/pkg/kubelet/images"
- "k8s.io/kubernetes/pkg/kubelet/lifecycle"
- "k8s.io/kubernetes/pkg/kubelet/logs"
- "k8s.io/kubernetes/pkg/kubelet/network/dns"
- "k8s.io/kubernetes/pkg/kubelet/pleg"
- "k8s.io/kubernetes/pkg/kubelet/pluginmanager"
- kubepod "k8s.io/kubernetes/pkg/kubelet/pod"
- podtest "k8s.io/kubernetes/pkg/kubelet/pod/testing"
- proberesults "k8s.io/kubernetes/pkg/kubelet/prober/results"
- probetest "k8s.io/kubernetes/pkg/kubelet/prober/testing"
- "k8s.io/kubernetes/pkg/kubelet/secret"
- serverstats "k8s.io/kubernetes/pkg/kubelet/server/stats"
- "k8s.io/kubernetes/pkg/kubelet/stats"
- "k8s.io/kubernetes/pkg/kubelet/status"
- statustest "k8s.io/kubernetes/pkg/kubelet/status/testing"
- "k8s.io/kubernetes/pkg/kubelet/token"
- kubetypes "k8s.io/kubernetes/pkg/kubelet/types"
- "k8s.io/kubernetes/pkg/kubelet/util/queue"
- kubeletvolume "k8s.io/kubernetes/pkg/kubelet/volumemanager"
- schedulernodeinfo "k8s.io/kubernetes/pkg/scheduler/nodeinfo"
- "k8s.io/kubernetes/pkg/volume"
- "k8s.io/kubernetes/pkg/volume/awsebs"
- "k8s.io/kubernetes/pkg/volume/azure_dd"
- "k8s.io/kubernetes/pkg/volume/gcepd"
- _ "k8s.io/kubernetes/pkg/volume/hostpath"
- volumetest "k8s.io/kubernetes/pkg/volume/testing"
- "k8s.io/kubernetes/pkg/volume/util"
- "k8s.io/kubernetes/pkg/volume/util/hostutil"
- "k8s.io/kubernetes/pkg/volume/util/subpath"
- )
- func init() {
- utilruntime.ReallyCrash = true
- }
- const (
- testKubeletHostname = "127.0.0.1"
- testKubeletHostIP = "127.0.0.1"
- // TODO(harry) any global place for these two?
- // Reasonable size range of all container images. 90%ile of images on dockerhub drops into this range.
- minImgSize int64 = 23 * 1024 * 1024
- maxImgSize int64 = 1000 * 1024 * 1024
- )
- // fakeImageGCManager is a fake image gc manager for testing. It will return image
- // list from fake runtime directly instead of caching it.
- type fakeImageGCManager struct {
- fakeImageService kubecontainer.ImageService
- images.ImageGCManager
- }
- func (f *fakeImageGCManager) GetImageList() ([]kubecontainer.Image, error) {
- return f.fakeImageService.ListImages()
- }
- type TestKubelet struct {
- kubelet *Kubelet
- fakeRuntime *containertest.FakeRuntime
- fakeKubeClient *fake.Clientset
- fakeMirrorClient *podtest.FakeMirrorClient
- fakeClock *clock.FakeClock
- mounter mount.Interface
- volumePlugin *volumetest.FakeVolumePlugin
- }
- func (tk *TestKubelet) Cleanup() {
- if tk.kubelet != nil {
- os.RemoveAll(tk.kubelet.rootDirectory)
- }
- }
- // newTestKubelet returns test kubelet with two images.
- func newTestKubelet(t *testing.T, controllerAttachDetachEnabled bool) *TestKubelet {
- imageList := []kubecontainer.Image{
- {
- ID: "abc",
- RepoTags: []string{"k8s.gcr.io:v1", "k8s.gcr.io:v2"},
- Size: 123,
- },
- {
- ID: "efg",
- RepoTags: []string{"k8s.gcr.io:v3", "k8s.gcr.io:v4"},
- Size: 456,
- },
- }
- return newTestKubeletWithImageList(t, imageList, controllerAttachDetachEnabled, true /*initFakeVolumePlugin*/)
- }
- func newTestKubeletWithImageList(
- t *testing.T,
- imageList []kubecontainer.Image,
- controllerAttachDetachEnabled bool,
- initFakeVolumePlugin bool) *TestKubelet {
- fakeRuntime := &containertest.FakeRuntime{}
- fakeRuntime.RuntimeType = "test"
- fakeRuntime.VersionInfo = "1.5.0"
- fakeRuntime.ImageList = imageList
- // Set ready conditions by default.
- fakeRuntime.RuntimeStatus = &kubecontainer.RuntimeStatus{
- Conditions: []kubecontainer.RuntimeCondition{
- {Type: "RuntimeReady", Status: true},
- {Type: "NetworkReady", Status: true},
- },
- }
- fakeRecorder := &record.FakeRecorder{}
- fakeKubeClient := &fake.Clientset{}
- kubelet := &Kubelet{}
- kubelet.recorder = fakeRecorder
- kubelet.kubeClient = fakeKubeClient
- kubelet.heartbeatClient = fakeKubeClient
- kubelet.os = &containertest.FakeOS{}
- kubelet.mounter = mount.NewFakeMounter(nil)
- kubelet.hostutil = hostutil.NewFakeHostUtil(nil)
- kubelet.subpather = &subpath.FakeSubpath{}
- kubelet.hostname = testKubeletHostname
- kubelet.nodeName = types.NodeName(testKubeletHostname)
- kubelet.runtimeState = newRuntimeState(maxWaitForContainerRuntime)
- kubelet.runtimeState.setNetworkState(nil)
- if tempDir, err := ioutil.TempDir("", "kubelet_test."); err != nil {
- t.Fatalf("can't make a temp rootdir: %v", err)
- } else {
- kubelet.rootDirectory = tempDir
- }
- if err := os.MkdirAll(kubelet.rootDirectory, 0750); err != nil {
- t.Fatalf("can't mkdir(%q): %v", kubelet.rootDirectory, err)
- }
- kubelet.sourcesReady = config.NewSourcesReady(func(_ sets.String) bool { return true })
- kubelet.masterServiceNamespace = metav1.NamespaceDefault
- kubelet.serviceLister = testServiceLister{}
- kubelet.nodeLister = testNodeLister{
- nodes: []*v1.Node{
- {
- ObjectMeta: metav1.ObjectMeta{
- Name: string(kubelet.nodeName),
- },
- Status: v1.NodeStatus{
- Conditions: []v1.NodeCondition{
- {
- Type: v1.NodeReady,
- Status: v1.ConditionTrue,
- Reason: "Ready",
- Message: "Node ready",
- },
- },
- Addresses: []v1.NodeAddress{
- {
- Type: v1.NodeInternalIP,
- Address: testKubeletHostIP,
- },
- },
- },
- },
- },
- }
- kubelet.recorder = fakeRecorder
- if err := kubelet.setupDataDirs(); err != nil {
- t.Fatalf("can't initialize kubelet data dirs: %v", err)
- }
- kubelet.daemonEndpoints = &v1.NodeDaemonEndpoints{}
- kubelet.cadvisor = &cadvisortest.Fake{}
- machineInfo, _ := kubelet.cadvisor.MachineInfo()
- kubelet.machineInfo = machineInfo
- fakeMirrorClient := podtest.NewFakeMirrorClient()
- secretManager := secret.NewSimpleSecretManager(kubelet.kubeClient)
- kubelet.secretManager = secretManager
- configMapManager := configmap.NewSimpleConfigMapManager(kubelet.kubeClient)
- kubelet.configMapManager = configMapManager
- kubelet.podManager = kubepod.NewBasicPodManager(fakeMirrorClient, kubelet.secretManager, kubelet.configMapManager, podtest.NewMockCheckpointManager())
- kubelet.statusManager = status.NewManager(fakeKubeClient, kubelet.podManager, &statustest.FakePodDeletionSafetyProvider{})
- kubelet.containerRuntime = fakeRuntime
- kubelet.runtimeCache = containertest.NewFakeRuntimeCache(kubelet.containerRuntime)
- kubelet.reasonCache = NewReasonCache()
- kubelet.podCache = containertest.NewFakeCache(kubelet.containerRuntime)
- kubelet.podWorkers = &fakePodWorkers{
- syncPodFn: kubelet.syncPod,
- cache: kubelet.podCache,
- t: t,
- }
- kubelet.probeManager = probetest.FakeManager{}
- kubelet.livenessManager = proberesults.NewManager()
- kubelet.startupManager = proberesults.NewManager()
- kubelet.containerManager = cm.NewStubContainerManager()
- fakeNodeRef := &v1.ObjectReference{
- Kind: "Node",
- Name: testKubeletHostname,
- UID: types.UID(testKubeletHostname),
- Namespace: "",
- }
- volumeStatsAggPeriod := time.Second * 10
- kubelet.resourceAnalyzer = serverstats.NewResourceAnalyzer(kubelet, volumeStatsAggPeriod)
- kubelet.StatsProvider = stats.NewCadvisorStatsProvider(
- kubelet.cadvisor,
- kubelet.resourceAnalyzer,
- kubelet.podManager,
- kubelet.runtimeCache,
- fakeRuntime,
- kubelet.statusManager)
- fakeImageGCPolicy := images.ImageGCPolicy{
- HighThresholdPercent: 90,
- LowThresholdPercent: 80,
- }
- imageGCManager, err := images.NewImageGCManager(fakeRuntime, kubelet.StatsProvider, fakeRecorder, fakeNodeRef, fakeImageGCPolicy, "")
- assert.NoError(t, err)
- kubelet.imageManager = &fakeImageGCManager{
- fakeImageService: fakeRuntime,
- ImageGCManager: imageGCManager,
- }
- kubelet.containerLogManager = logs.NewStubContainerLogManager()
- containerGCPolicy := kubecontainer.ContainerGCPolicy{
- MinAge: time.Duration(0),
- MaxPerPodContainer: 1,
- MaxContainers: -1,
- }
- containerGC, err := kubecontainer.NewContainerGC(fakeRuntime, containerGCPolicy, kubelet.sourcesReady)
- assert.NoError(t, err)
- kubelet.containerGC = containerGC
- fakeClock := clock.NewFakeClock(time.Now())
- kubelet.backOff = flowcontrol.NewBackOff(time.Second, time.Minute)
- kubelet.backOff.Clock = fakeClock
- kubelet.podKillingCh = make(chan *kubecontainer.PodPair, 20)
- kubelet.resyncInterval = 10 * time.Second
- kubelet.workQueue = queue.NewBasicWorkQueue(fakeClock)
- // Relist period does not affect the tests.
- kubelet.pleg = pleg.NewGenericPLEG(fakeRuntime, 100, time.Hour, nil, clock.RealClock{})
- kubelet.clock = fakeClock
- nodeRef := &v1.ObjectReference{
- Kind: "Node",
- Name: string(kubelet.nodeName),
- UID: types.UID(kubelet.nodeName),
- Namespace: "",
- }
- // setup eviction manager
- evictionManager, evictionAdmitHandler := eviction.NewManager(kubelet.resourceAnalyzer, eviction.Config{}, killPodNow(kubelet.podWorkers, fakeRecorder), kubelet.podManager.GetMirrorPodByPod, kubelet.imageManager, kubelet.containerGC, fakeRecorder, nodeRef, kubelet.clock)
- kubelet.evictionManager = evictionManager
- kubelet.admitHandlers.AddPodAdmitHandler(evictionAdmitHandler)
- // Add this as cleanup predicate pod admitter
- kubelet.admitHandlers.AddPodAdmitHandler(lifecycle.NewPredicateAdmitHandler(kubelet.getNodeAnyWay, lifecycle.NewAdmissionFailureHandlerStub(), kubelet.containerManager.UpdatePluginResources))
- allPlugins := []volume.VolumePlugin{}
- plug := &volumetest.FakeVolumePlugin{PluginName: "fake", Host: nil}
- if initFakeVolumePlugin {
- allPlugins = append(allPlugins, plug)
- } else {
- allPlugins = append(allPlugins, awsebs.ProbeVolumePlugins()...)
- allPlugins = append(allPlugins, gcepd.ProbeVolumePlugins()...)
- allPlugins = append(allPlugins, azure_dd.ProbeVolumePlugins()...)
- }
- var prober volume.DynamicPluginProber // TODO (#51147) inject mock
- kubelet.volumePluginMgr, err =
- NewInitializedVolumePluginMgr(kubelet, kubelet.secretManager, kubelet.configMapManager, token.NewManager(kubelet.kubeClient), allPlugins, prober)
- require.NoError(t, err, "Failed to initialize VolumePluginMgr")
- kubelet.volumeManager = kubeletvolume.NewVolumeManager(
- controllerAttachDetachEnabled,
- kubelet.nodeName,
- kubelet.podManager,
- kubelet.statusManager,
- fakeKubeClient,
- kubelet.volumePluginMgr,
- fakeRuntime,
- kubelet.mounter,
- kubelet.hostutil,
- kubelet.getPodsDir(),
- kubelet.recorder,
- false, /* experimentalCheckNodeCapabilitiesBeforeMount*/
- false, /* keepTerminatedPodVolumes */
- volumetest.NewBlockVolumePathHandler())
- kubelet.pluginManager = pluginmanager.NewPluginManager(
- kubelet.getPluginsRegistrationDir(), /* sockDir */
- kubelet.recorder,
- )
- kubelet.setNodeStatusFuncs = kubelet.defaultNodeStatusFuncs()
- // enable active deadline handler
- activeDeadlineHandler, err := newActiveDeadlineHandler(kubelet.statusManager, kubelet.recorder, kubelet.clock)
- require.NoError(t, err, "Can't initialize active deadline handler")
- kubelet.AddPodSyncLoopHandler(activeDeadlineHandler)
- kubelet.AddPodSyncHandler(activeDeadlineHandler)
- return &TestKubelet{kubelet, fakeRuntime, fakeKubeClient, fakeMirrorClient, fakeClock, nil, plug}
- }
- func newTestPods(count int) []*v1.Pod {
- pods := make([]*v1.Pod, count)
- for i := 0; i < count; i++ {
- pods[i] = &v1.Pod{
- Spec: v1.PodSpec{
- HostNetwork: true,
- },
- ObjectMeta: metav1.ObjectMeta{
- UID: types.UID(10000 + i),
- Name: fmt.Sprintf("pod%d", i),
- },
- }
- }
- return pods
- }
- func TestSyncLoopAbort(t *testing.T) {
- testKubelet := newTestKubelet(t, false /* controllerAttachDetachEnabled */)
- defer testKubelet.Cleanup()
- kubelet := testKubelet.kubelet
- kubelet.runtimeState.setRuntimeSync(time.Now())
- // The syncLoop waits on time.After(resyncInterval), set it really big so that we don't race for
- // the channel close
- kubelet.resyncInterval = time.Second * 30
- ch := make(chan kubetypes.PodUpdate)
- close(ch)
- // sanity check (also prevent this test from hanging in the next step)
- ok := kubelet.syncLoopIteration(ch, kubelet, make(chan time.Time), make(chan time.Time), make(chan *pleg.PodLifecycleEvent, 1))
- require.False(t, ok, "Expected syncLoopIteration to return !ok since update chan was closed")
- // this should terminate immediately; if it hangs then the syncLoopIteration isn't aborting properly
- kubelet.syncLoop(ch, kubelet)
- }
- func TestSyncPodsStartPod(t *testing.T) {
- testKubelet := newTestKubelet(t, false /* controllerAttachDetachEnabled */)
- defer testKubelet.Cleanup()
- kubelet := testKubelet.kubelet
- fakeRuntime := testKubelet.fakeRuntime
- pods := []*v1.Pod{
- podWithUIDNameNsSpec("12345678", "foo", "new", v1.PodSpec{
- Containers: []v1.Container{
- {Name: "bar"},
- },
- }),
- }
- kubelet.podManager.SetPods(pods)
- kubelet.HandlePodSyncs(pods)
- fakeRuntime.AssertStartedPods([]string{string(pods[0].UID)})
- }
- func TestSyncPodsDeletesWhenSourcesAreReady(t *testing.T) {
- ready := false
- testKubelet := newTestKubelet(t, false /* controllerAttachDetachEnabled */)
- defer testKubelet.Cleanup()
- fakeRuntime := testKubelet.fakeRuntime
- kubelet := testKubelet.kubelet
- kubelet.sourcesReady = config.NewSourcesReady(func(_ sets.String) bool { return ready })
- fakeRuntime.PodList = []*containertest.FakePod{
- {Pod: &kubecontainer.Pod{
- ID: "12345678",
- Name: "foo",
- Namespace: "new",
- Containers: []*kubecontainer.Container{
- {Name: "bar"},
- },
- }},
- }
- kubelet.HandlePodCleanups()
- // Sources are not ready yet. Don't remove any pods.
- fakeRuntime.AssertKilledPods([]string{})
- ready = true
- kubelet.HandlePodCleanups()
- // Sources are ready. Remove unwanted pods.
- fakeRuntime.AssertKilledPods([]string{"12345678"})
- }
- type testNodeLister struct {
- nodes []*v1.Node
- }
- func (nl testNodeLister) Get(name string) (*v1.Node, error) {
- for _, node := range nl.nodes {
- if node.Name == name {
- return node, nil
- }
- }
- return nil, fmt.Errorf("Node with name: %s does not exist", name)
- }
- func (nl testNodeLister) List(_ labels.Selector) (ret []*v1.Node, err error) {
- return nl.nodes, nil
- }
- func checkPodStatus(t *testing.T, kl *Kubelet, pod *v1.Pod, phase v1.PodPhase) {
- status, found := kl.statusManager.GetPodStatus(pod.UID)
- require.True(t, found, "Status of pod %q is not found in the status map", pod.UID)
- require.Equal(t, phase, status.Phase)
- }
- // Tests that we handle port conflicts correctly by setting the failed status in status map.
- func TestHandlePortConflicts(t *testing.T) {
- testKubelet := newTestKubelet(t, false /* controllerAttachDetachEnabled */)
- defer testKubelet.Cleanup()
- kl := testKubelet.kubelet
- kl.nodeLister = testNodeLister{nodes: []*v1.Node{
- {
- ObjectMeta: metav1.ObjectMeta{Name: string(kl.nodeName)},
- Status: v1.NodeStatus{
- Allocatable: v1.ResourceList{
- v1.ResourcePods: *resource.NewQuantity(110, resource.DecimalSI),
- },
- },
- },
- }}
- recorder := record.NewFakeRecorder(20)
- nodeRef := &v1.ObjectReference{
- Kind: "Node",
- Name: string("testNode"),
- UID: types.UID("testNode"),
- Namespace: "",
- }
- testClusterDNSDomain := "TEST"
- kl.dnsConfigurer = dns.NewConfigurer(recorder, nodeRef, nil, nil, testClusterDNSDomain, "")
- spec := v1.PodSpec{NodeName: string(kl.nodeName), Containers: []v1.Container{{Ports: []v1.ContainerPort{{HostPort: 80}}}}}
- pods := []*v1.Pod{
- podWithUIDNameNsSpec("123456789", "newpod", "foo", spec),
- podWithUIDNameNsSpec("987654321", "oldpod", "foo", spec),
- }
- // Make sure the Pods are in the reverse order of creation time.
- pods[1].CreationTimestamp = metav1.NewTime(time.Now())
- pods[0].CreationTimestamp = metav1.NewTime(time.Now().Add(1 * time.Second))
- // The newer pod should be rejected.
- notfittingPod := pods[0]
- fittingPod := pods[1]
- kl.HandlePodAdditions(pods)
- // Check pod status stored in the status map.
- checkPodStatus(t, kl, notfittingPod, v1.PodFailed)
- checkPodStatus(t, kl, fittingPod, v1.PodPending)
- }
- // Tests that we handle host name conflicts correctly by setting the failed status in status map.
- func TestHandleHostNameConflicts(t *testing.T) {
- testKubelet := newTestKubelet(t, false /* controllerAttachDetachEnabled */)
- defer testKubelet.Cleanup()
- kl := testKubelet.kubelet
- kl.nodeLister = testNodeLister{nodes: []*v1.Node{
- {
- ObjectMeta: metav1.ObjectMeta{Name: "127.0.0.1"},
- Status: v1.NodeStatus{
- Allocatable: v1.ResourceList{
- v1.ResourcePods: *resource.NewQuantity(110, resource.DecimalSI),
- },
- },
- },
- }}
- recorder := record.NewFakeRecorder(20)
- nodeRef := &v1.ObjectReference{
- Kind: "Node",
- Name: string("testNode"),
- UID: types.UID("testNode"),
- Namespace: "",
- }
- testClusterDNSDomain := "TEST"
- kl.dnsConfigurer = dns.NewConfigurer(recorder, nodeRef, nil, nil, testClusterDNSDomain, "")
- // default NodeName in test is 127.0.0.1
- pods := []*v1.Pod{
- podWithUIDNameNsSpec("123456789", "notfittingpod", "foo", v1.PodSpec{NodeName: "127.0.0.2"}),
- podWithUIDNameNsSpec("987654321", "fittingpod", "foo", v1.PodSpec{NodeName: "127.0.0.1"}),
- }
- notfittingPod := pods[0]
- fittingPod := pods[1]
- kl.HandlePodAdditions(pods)
- // Check pod status stored in the status map.
- checkPodStatus(t, kl, notfittingPod, v1.PodFailed)
- checkPodStatus(t, kl, fittingPod, v1.PodPending)
- }
- // Tests that we handle not matching labels selector correctly by setting the failed status in status map.
- func TestHandleNodeSelector(t *testing.T) {
- testKubelet := newTestKubelet(t, false /* controllerAttachDetachEnabled */)
- defer testKubelet.Cleanup()
- kl := testKubelet.kubelet
- nodes := []*v1.Node{
- {
- ObjectMeta: metav1.ObjectMeta{Name: testKubeletHostname, Labels: map[string]string{"key": "B"}},
- Status: v1.NodeStatus{
- Allocatable: v1.ResourceList{
- v1.ResourcePods: *resource.NewQuantity(110, resource.DecimalSI),
- },
- },
- },
- }
- kl.nodeLister = testNodeLister{nodes: nodes}
- recorder := record.NewFakeRecorder(20)
- nodeRef := &v1.ObjectReference{
- Kind: "Node",
- Name: string("testNode"),
- UID: types.UID("testNode"),
- Namespace: "",
- }
- testClusterDNSDomain := "TEST"
- kl.dnsConfigurer = dns.NewConfigurer(recorder, nodeRef, nil, nil, testClusterDNSDomain, "")
- pods := []*v1.Pod{
- podWithUIDNameNsSpec("123456789", "podA", "foo", v1.PodSpec{NodeSelector: map[string]string{"key": "A"}}),
- podWithUIDNameNsSpec("987654321", "podB", "foo", v1.PodSpec{NodeSelector: map[string]string{"key": "B"}}),
- }
- // The first pod should be rejected.
- notfittingPod := pods[0]
- fittingPod := pods[1]
- kl.HandlePodAdditions(pods)
- // Check pod status stored in the status map.
- checkPodStatus(t, kl, notfittingPod, v1.PodFailed)
- checkPodStatus(t, kl, fittingPod, v1.PodPending)
- }
- // Tests that we handle exceeded resources correctly by setting the failed status in status map.
- func TestHandleMemExceeded(t *testing.T) {
- testKubelet := newTestKubelet(t, false /* controllerAttachDetachEnabled */)
- defer testKubelet.Cleanup()
- kl := testKubelet.kubelet
- nodes := []*v1.Node{
- {ObjectMeta: metav1.ObjectMeta{Name: testKubeletHostname},
- Status: v1.NodeStatus{Capacity: v1.ResourceList{}, Allocatable: v1.ResourceList{
- v1.ResourceCPU: *resource.NewMilliQuantity(10, resource.DecimalSI),
- v1.ResourceMemory: *resource.NewQuantity(100, resource.BinarySI),
- v1.ResourcePods: *resource.NewQuantity(40, resource.DecimalSI),
- }}},
- }
- kl.nodeLister = testNodeLister{nodes: nodes}
- recorder := record.NewFakeRecorder(20)
- nodeRef := &v1.ObjectReference{
- Kind: "Node",
- Name: string("testNode"),
- UID: types.UID("testNode"),
- Namespace: "",
- }
- testClusterDNSDomain := "TEST"
- kl.dnsConfigurer = dns.NewConfigurer(recorder, nodeRef, nil, nil, testClusterDNSDomain, "")
- spec := v1.PodSpec{NodeName: string(kl.nodeName),
- Containers: []v1.Container{{Resources: v1.ResourceRequirements{
- Requests: v1.ResourceList{
- v1.ResourceMemory: resource.MustParse("90"),
- },
- }}},
- }
- pods := []*v1.Pod{
- podWithUIDNameNsSpec("123456789", "newpod", "foo", spec),
- podWithUIDNameNsSpec("987654321", "oldpod", "foo", spec),
- }
- // Make sure the Pods are in the reverse order of creation time.
- pods[1].CreationTimestamp = metav1.NewTime(time.Now())
- pods[0].CreationTimestamp = metav1.NewTime(time.Now().Add(1 * time.Second))
- // The newer pod should be rejected.
- notfittingPod := pods[0]
- fittingPod := pods[1]
- kl.HandlePodAdditions(pods)
- // Check pod status stored in the status map.
- checkPodStatus(t, kl, notfittingPod, v1.PodFailed)
- checkPodStatus(t, kl, fittingPod, v1.PodPending)
- }
- // Tests that we handle result of interface UpdatePluginResources correctly
- // by setting corresponding status in status map.
- func TestHandlePluginResources(t *testing.T) {
- testKubelet := newTestKubelet(t, false /* controllerAttachDetachEnabled */)
- defer testKubelet.Cleanup()
- kl := testKubelet.kubelet
- adjustedResource := v1.ResourceName("domain1.com/adjustedResource")
- emptyResource := v1.ResourceName("domain2.com/emptyResource")
- missingResource := v1.ResourceName("domain2.com/missingResource")
- failedResource := v1.ResourceName("domain2.com/failedResource")
- resourceQuantity0 := *resource.NewQuantity(int64(0), resource.DecimalSI)
- resourceQuantity1 := *resource.NewQuantity(int64(1), resource.DecimalSI)
- resourceQuantity2 := *resource.NewQuantity(int64(2), resource.DecimalSI)
- resourceQuantityInvalid := *resource.NewQuantity(int64(-1), resource.DecimalSI)
- allowedPodQuantity := *resource.NewQuantity(int64(10), resource.DecimalSI)
- nodes := []*v1.Node{
- {ObjectMeta: metav1.ObjectMeta{Name: testKubeletHostname},
- Status: v1.NodeStatus{Capacity: v1.ResourceList{}, Allocatable: v1.ResourceList{
- adjustedResource: resourceQuantity1,
- emptyResource: resourceQuantity0,
- v1.ResourcePods: allowedPodQuantity,
- }}},
- }
- kl.nodeLister = testNodeLister{nodes: nodes}
- updatePluginResourcesFunc := func(node *schedulernodeinfo.NodeInfo, attrs *lifecycle.PodAdmitAttributes) error {
- // Maps from resourceName to the value we use to set node.allocatableResource[resourceName].
- // A resource with invalid value (< 0) causes the function to return an error
- // to emulate resource Allocation failure.
- // Resources not contained in this map will have their node.allocatableResource
- // quantity unchanged.
- updateResourceMap := map[v1.ResourceName]resource.Quantity{
- adjustedResource: resourceQuantity2,
- emptyResource: resourceQuantity0,
- failedResource: resourceQuantityInvalid,
- }
- pod := attrs.Pod
- allocatableResource := node.AllocatableResource()
- newAllocatableResource := allocatableResource.Clone()
- for _, container := range pod.Spec.Containers {
- for resource := range container.Resources.Requests {
- newQuantity, exist := updateResourceMap[resource]
- if !exist {
- continue
- }
- if newQuantity.Value() < 0 {
- return fmt.Errorf("Allocation failed")
- }
- newAllocatableResource.ScalarResources[resource] = newQuantity.Value()
- }
- }
- node.SetAllocatableResource(newAllocatableResource)
- return nil
- }
- // add updatePluginResourcesFunc to admission handler, to test it's behavior.
- kl.admitHandlers = lifecycle.PodAdmitHandlers{}
- kl.admitHandlers.AddPodAdmitHandler(lifecycle.NewPredicateAdmitHandler(kl.getNodeAnyWay, lifecycle.NewAdmissionFailureHandlerStub(), updatePluginResourcesFunc))
- recorder := record.NewFakeRecorder(20)
- nodeRef := &v1.ObjectReference{
- Kind: "Node",
- Name: string("testNode"),
- UID: types.UID("testNode"),
- Namespace: "",
- }
- testClusterDNSDomain := "TEST"
- kl.dnsConfigurer = dns.NewConfigurer(recorder, nodeRef, nil, nil, testClusterDNSDomain, "")
- // pod requiring adjustedResource can be successfully allocated because updatePluginResourcesFunc
- // adjusts node.allocatableResource for this resource to a sufficient value.
- fittingPodSpec := v1.PodSpec{NodeName: string(kl.nodeName),
- Containers: []v1.Container{{Resources: v1.ResourceRequirements{
- Limits: v1.ResourceList{
- adjustedResource: resourceQuantity2,
- },
- Requests: v1.ResourceList{
- adjustedResource: resourceQuantity2,
- },
- }}},
- }
- // pod requiring emptyResource (extended resources with 0 allocatable) will
- // not pass PredicateAdmit.
- emptyPodSpec := v1.PodSpec{NodeName: string(kl.nodeName),
- Containers: []v1.Container{{Resources: v1.ResourceRequirements{
- Limits: v1.ResourceList{
- emptyResource: resourceQuantity2,
- },
- Requests: v1.ResourceList{
- emptyResource: resourceQuantity2,
- },
- }}},
- }
- // pod requiring missingResource will pass PredicateAdmit.
- //
- // Extended resources missing in node status are ignored in PredicateAdmit.
- // This is required to support extended resources that are not managed by
- // device plugin, such as cluster-level resources.
- missingPodSpec := v1.PodSpec{NodeName: string(kl.nodeName),
- Containers: []v1.Container{{Resources: v1.ResourceRequirements{
- Limits: v1.ResourceList{
- missingResource: resourceQuantity2,
- },
- Requests: v1.ResourceList{
- missingResource: resourceQuantity2,
- },
- }}},
- }
- // pod requiring failedResource will fail with the resource failed to be allocated.
- failedPodSpec := v1.PodSpec{NodeName: string(kl.nodeName),
- Containers: []v1.Container{{Resources: v1.ResourceRequirements{
- Limits: v1.ResourceList{
- failedResource: resourceQuantity1,
- },
- Requests: v1.ResourceList{
- failedResource: resourceQuantity1,
- },
- }}},
- }
- fittingPod := podWithUIDNameNsSpec("1", "fittingpod", "foo", fittingPodSpec)
- emptyPod := podWithUIDNameNsSpec("2", "emptypod", "foo", emptyPodSpec)
- missingPod := podWithUIDNameNsSpec("3", "missingpod", "foo", missingPodSpec)
- failedPod := podWithUIDNameNsSpec("4", "failedpod", "foo", failedPodSpec)
- kl.HandlePodAdditions([]*v1.Pod{fittingPod, emptyPod, missingPod, failedPod})
- // Check pod status stored in the status map.
- checkPodStatus(t, kl, fittingPod, v1.PodPending)
- checkPodStatus(t, kl, emptyPod, v1.PodFailed)
- checkPodStatus(t, kl, missingPod, v1.PodPending)
- checkPodStatus(t, kl, failedPod, v1.PodFailed)
- }
- // TODO(filipg): This test should be removed once StatusSyncer can do garbage collection without external signal.
- func TestPurgingObsoleteStatusMapEntries(t *testing.T) {
- testKubelet := newTestKubelet(t, false /* controllerAttachDetachEnabled */)
- defer testKubelet.Cleanup()
- kl := testKubelet.kubelet
- pods := []*v1.Pod{
- {ObjectMeta: metav1.ObjectMeta{Name: "pod1", UID: "1234"}, Spec: v1.PodSpec{Containers: []v1.Container{{Ports: []v1.ContainerPort{{HostPort: 80}}}}}},
- {ObjectMeta: metav1.ObjectMeta{Name: "pod2", UID: "4567"}, Spec: v1.PodSpec{Containers: []v1.Container{{Ports: []v1.ContainerPort{{HostPort: 80}}}}}},
- }
- podToTest := pods[1]
- // Run once to populate the status map.
- kl.HandlePodAdditions(pods)
- if _, found := kl.statusManager.GetPodStatus(podToTest.UID); !found {
- t.Fatalf("expected to have status cached for pod2")
- }
- // Sync with empty pods so that the entry in status map will be removed.
- kl.podManager.SetPods([]*v1.Pod{})
- kl.HandlePodCleanups()
- if _, found := kl.statusManager.GetPodStatus(podToTest.UID); found {
- t.Fatalf("expected to not have status cached for pod2")
- }
- }
- func TestValidateContainerLogStatus(t *testing.T) {
- testKubelet := newTestKubelet(t, false /* controllerAttachDetachEnabled */)
- defer testKubelet.Cleanup()
- kubelet := testKubelet.kubelet
- containerName := "x"
- testCases := []struct {
- statuses []v1.ContainerStatus
- success bool // whether getting logs for the container should succeed.
- pSuccess bool // whether getting logs for the previous container should succeed.
- }{
- {
- statuses: []v1.ContainerStatus{
- {
- Name: containerName,
- State: v1.ContainerState{
- Running: &v1.ContainerStateRunning{},
- },
- LastTerminationState: v1.ContainerState{
- Terminated: &v1.ContainerStateTerminated{ContainerID: "docker://fakeid"},
- },
- },
- },
- success: true,
- pSuccess: true,
- },
- {
- statuses: []v1.ContainerStatus{
- {
- Name: containerName,
- State: v1.ContainerState{
- Running: &v1.ContainerStateRunning{},
- },
- },
- },
- success: true,
- pSuccess: false,
- },
- {
- statuses: []v1.ContainerStatus{
- {
- Name: containerName,
- State: v1.ContainerState{
- Terminated: &v1.ContainerStateTerminated{},
- },
- },
- },
- success: false,
- pSuccess: false,
- },
- {
- statuses: []v1.ContainerStatus{
- {
- Name: containerName,
- State: v1.ContainerState{
- Terminated: &v1.ContainerStateTerminated{ContainerID: "docker://fakeid"},
- },
- },
- },
- success: true,
- pSuccess: false,
- },
- {
- statuses: []v1.ContainerStatus{
- {
- Name: containerName,
- State: v1.ContainerState{
- Terminated: &v1.ContainerStateTerminated{},
- },
- LastTerminationState: v1.ContainerState{
- Terminated: &v1.ContainerStateTerminated{},
- },
- },
- },
- success: false,
- pSuccess: false,
- },
- {
- statuses: []v1.ContainerStatus{
- {
- Name: containerName,
- State: v1.ContainerState{
- Terminated: &v1.ContainerStateTerminated{},
- },
- LastTerminationState: v1.ContainerState{
- Terminated: &v1.ContainerStateTerminated{ContainerID: "docker://fakeid"},
- },
- },
- },
- success: true,
- pSuccess: true,
- },
- {
- statuses: []v1.ContainerStatus{
- {
- Name: containerName,
- State: v1.ContainerState{
- Waiting: &v1.ContainerStateWaiting{},
- },
- },
- },
- success: false,
- pSuccess: false,
- },
- {
- statuses: []v1.ContainerStatus{
- {
- Name: containerName,
- State: v1.ContainerState{Waiting: &v1.ContainerStateWaiting{Reason: "ErrImagePull"}},
- },
- },
- success: false,
- pSuccess: false,
- },
- {
- statuses: []v1.ContainerStatus{
- {
- Name: containerName,
- State: v1.ContainerState{Waiting: &v1.ContainerStateWaiting{Reason: "ErrImagePullBackOff"}},
- },
- },
- success: false,
- pSuccess: false,
- },
- }
- for i, tc := range testCases {
- // Access the log of the most recent container
- previous := false
- podStatus := &v1.PodStatus{ContainerStatuses: tc.statuses}
- _, err := kubelet.validateContainerLogStatus("podName", podStatus, containerName, previous)
- if !tc.success {
- assert.Error(t, err, fmt.Sprintf("[case %d] error", i))
- } else {
- assert.NoError(t, err, "[case %d] error", i)
- }
- // Access the log of the previous, terminated container
- previous = true
- _, err = kubelet.validateContainerLogStatus("podName", podStatus, containerName, previous)
- if !tc.pSuccess {
- assert.Error(t, err, fmt.Sprintf("[case %d] error", i))
- } else {
- assert.NoError(t, err, "[case %d] error", i)
- }
- // Access the log of a container that's not in the pod
- _, err = kubelet.validateContainerLogStatus("podName", podStatus, "blah", false)
- assert.Error(t, err, fmt.Sprintf("[case %d] invalid container name should cause an error", i))
- }
- }
- func TestCreateMirrorPod(t *testing.T) {
- for _, updateType := range []kubetypes.SyncPodType{kubetypes.SyncPodCreate, kubetypes.SyncPodUpdate} {
- testKubelet := newTestKubelet(t, false /* controllerAttachDetachEnabled */)
- defer testKubelet.Cleanup()
- kl := testKubelet.kubelet
- manager := testKubelet.fakeMirrorClient
- pod := podWithUIDNameNs("12345678", "bar", "foo")
- pod.Annotations[kubetypes.ConfigSourceAnnotationKey] = "file"
- pods := []*v1.Pod{pod}
- kl.podManager.SetPods(pods)
- err := kl.syncPod(syncPodOptions{
- pod: pod,
- podStatus: &kubecontainer.PodStatus{},
- updateType: updateType,
- })
- assert.NoError(t, err)
- podFullName := kubecontainer.GetPodFullName(pod)
- assert.True(t, manager.HasPod(podFullName), "Expected mirror pod %q to be created", podFullName)
- assert.Equal(t, 1, manager.NumOfPods(), "Expected only 1 mirror pod %q, got %+v", podFullName, manager.GetPods())
- }
- }
- func TestDeleteOutdatedMirrorPod(t *testing.T) {
- testKubelet := newTestKubelet(t, false /* controllerAttachDetachEnabled */)
- defer testKubelet.Cleanup()
- kl := testKubelet.kubelet
- manager := testKubelet.fakeMirrorClient
- pod := podWithUIDNameNsSpec("12345678", "foo", "ns", v1.PodSpec{
- Containers: []v1.Container{
- {Name: "1234", Image: "foo"},
- },
- })
- pod.Annotations[kubetypes.ConfigSourceAnnotationKey] = "file"
- // Mirror pod has an outdated spec.
- mirrorPod := podWithUIDNameNsSpec("11111111", "foo", "ns", v1.PodSpec{
- Containers: []v1.Container{
- {Name: "1234", Image: "bar"},
- },
- })
- mirrorPod.Annotations[kubetypes.ConfigSourceAnnotationKey] = "api"
- mirrorPod.Annotations[kubetypes.ConfigMirrorAnnotationKey] = "mirror"
- pods := []*v1.Pod{pod, mirrorPod}
- kl.podManager.SetPods(pods)
- err := kl.syncPod(syncPodOptions{
- pod: pod,
- mirrorPod: mirrorPod,
- podStatus: &kubecontainer.PodStatus{},
- updateType: kubetypes.SyncPodUpdate,
- })
- assert.NoError(t, err)
- name := kubecontainer.GetPodFullName(pod)
- creates, deletes := manager.GetCounts(name)
- if creates != 1 || deletes != 1 {
- t.Errorf("expected 1 creation and 1 deletion of %q, got %d, %d", name, creates, deletes)
- }
- }
- func TestDeleteOrphanedMirrorPods(t *testing.T) {
- testKubelet := newTestKubelet(t, false /* controllerAttachDetachEnabled */)
- defer testKubelet.Cleanup()
- kl := testKubelet.kubelet
- manager := testKubelet.fakeMirrorClient
- orphanPods := []*v1.Pod{
- {
- ObjectMeta: metav1.ObjectMeta{
- UID: "12345678",
- Name: "pod1",
- Namespace: "ns",
- Annotations: map[string]string{
- kubetypes.ConfigSourceAnnotationKey: "api",
- kubetypes.ConfigMirrorAnnotationKey: "mirror",
- },
- },
- },
- {
- ObjectMeta: metav1.ObjectMeta{
- UID: "12345679",
- Name: "pod2",
- Namespace: "ns",
- Annotations: map[string]string{
- kubetypes.ConfigSourceAnnotationKey: "api",
- kubetypes.ConfigMirrorAnnotationKey: "mirror",
- },
- },
- },
- }
- kl.podManager.SetPods(orphanPods)
- // Sync with an empty pod list to delete all mirror pods.
- kl.HandlePodCleanups()
- assert.Len(t, manager.GetPods(), 0, "Expected 0 mirror pods")
- for _, pod := range orphanPods {
- name := kubecontainer.GetPodFullName(pod)
- creates, deletes := manager.GetCounts(name)
- if creates != 0 || deletes != 1 {
- t.Errorf("expected 0 creation and one deletion of %q, got %d, %d", name, creates, deletes)
- }
- }
- }
- func TestGetContainerInfoForMirrorPods(t *testing.T) {
- // pods contain one static and one mirror pod with the same name but
- // different UIDs.
- pods := []*v1.Pod{
- {
- ObjectMeta: metav1.ObjectMeta{
- UID: "1234",
- Name: "qux",
- Namespace: "ns",
- Annotations: map[string]string{
- kubetypes.ConfigSourceAnnotationKey: "file",
- },
- },
- Spec: v1.PodSpec{
- Containers: []v1.Container{
- {Name: "foo"},
- },
- },
- },
- {
- ObjectMeta: metav1.ObjectMeta{
- UID: "5678",
- Name: "qux",
- Namespace: "ns",
- Annotations: map[string]string{
- kubetypes.ConfigSourceAnnotationKey: "api",
- kubetypes.ConfigMirrorAnnotationKey: "mirror",
- },
- },
- Spec: v1.PodSpec{
- Containers: []v1.Container{
- {Name: "foo"},
- },
- },
- },
- }
- testKubelet := newTestKubelet(t, false /* controllerAttachDetachEnabled */)
- defer testKubelet.Cleanup()
- fakeRuntime := testKubelet.fakeRuntime
- cadvisorReq := &cadvisorapi.ContainerInfoRequest{}
- kubelet := testKubelet.kubelet
- fakeRuntime.PodList = []*containertest.FakePod{
- {Pod: &kubecontainer.Pod{
- ID: "1234",
- Name: "qux",
- Namespace: "ns",
- Containers: []*kubecontainer.Container{
- {
- Name: "foo",
- ID: kubecontainer.ContainerID{Type: "test", ID: "ab2cdf"},
- },
- },
- }},
- }
- kubelet.podManager.SetPods(pods)
- // Use the mirror pod UID to retrieve the stats.
- stats, err := kubelet.GetContainerInfo("qux_ns", "5678", "foo", cadvisorReq)
- assert.NoError(t, err)
- require.NotNil(t, stats)
- }
- func TestNetworkErrorsWithoutHostNetwork(t *testing.T) {
- testKubelet := newTestKubelet(t, false /* controllerAttachDetachEnabled */)
- defer testKubelet.Cleanup()
- kubelet := testKubelet.kubelet
- kubelet.runtimeState.setNetworkState(fmt.Errorf("simulated network error"))
- pod := podWithUIDNameNsSpec("12345678", "hostnetwork", "new", v1.PodSpec{
- HostNetwork: false,
- Containers: []v1.Container{
- {Name: "foo"},
- },
- })
- kubelet.podManager.SetPods([]*v1.Pod{pod})
- err := kubelet.syncPod(syncPodOptions{
- pod: pod,
- podStatus: &kubecontainer.PodStatus{},
- updateType: kubetypes.SyncPodUpdate,
- })
- assert.Error(t, err, "expected pod with hostNetwork=false to fail when network in error")
- pod.Annotations[kubetypes.ConfigSourceAnnotationKey] = kubetypes.FileSource
- pod.Spec.HostNetwork = true
- err = kubelet.syncPod(syncPodOptions{
- pod: pod,
- podStatus: &kubecontainer.PodStatus{},
- updateType: kubetypes.SyncPodUpdate,
- })
- assert.NoError(t, err, "expected pod with hostNetwork=true to succeed when network in error")
- }
- func TestFilterOutTerminatedPods(t *testing.T) {
- testKubelet := newTestKubelet(t, false /* controllerAttachDetachEnabled */)
- defer testKubelet.Cleanup()
- kubelet := testKubelet.kubelet
- pods := newTestPods(5)
- now := metav1.NewTime(time.Now())
- pods[0].Status.Phase = v1.PodFailed
- pods[1].Status.Phase = v1.PodSucceeded
- // The pod is terminating, should not filter out.
- pods[2].Status.Phase = v1.PodRunning
- pods[2].DeletionTimestamp = &now
- pods[2].Status.ContainerStatuses = []v1.ContainerStatus{
- {State: v1.ContainerState{
- Running: &v1.ContainerStateRunning{
- StartedAt: now,
- },
- }},
- }
- pods[3].Status.Phase = v1.PodPending
- pods[4].Status.Phase = v1.PodRunning
- expected := []*v1.Pod{pods[2], pods[3], pods[4]}
- kubelet.podManager.SetPods(pods)
- actual := kubelet.filterOutTerminatedPods(pods)
- assert.Equal(t, expected, actual)
- }
- func TestSyncPodsSetStatusToFailedForPodsThatRunTooLong(t *testing.T) {
- testKubelet := newTestKubelet(t, false /* controllerAttachDetachEnabled */)
- defer testKubelet.Cleanup()
- fakeRuntime := testKubelet.fakeRuntime
- kubelet := testKubelet.kubelet
- now := metav1.Now()
- startTime := metav1.NewTime(now.Time.Add(-1 * time.Minute))
- exceededActiveDeadlineSeconds := int64(30)
- pods := []*v1.Pod{
- {
- ObjectMeta: metav1.ObjectMeta{
- UID: "12345678",
- Name: "bar",
- Namespace: "new",
- },
- Spec: v1.PodSpec{
- Containers: []v1.Container{
- {Name: "foo"},
- },
- ActiveDeadlineSeconds: &exceededActiveDeadlineSeconds,
- },
- Status: v1.PodStatus{
- StartTime: &startTime,
- },
- },
- }
- fakeRuntime.PodList = []*containertest.FakePod{
- {Pod: &kubecontainer.Pod{
- ID: "12345678",
- Name: "bar",
- Namespace: "new",
- Containers: []*kubecontainer.Container{
- {Name: "foo"},
- },
- }},
- }
- // Let the pod worker sets the status to fail after this sync.
- kubelet.HandlePodUpdates(pods)
- status, found := kubelet.statusManager.GetPodStatus(pods[0].UID)
- assert.True(t, found, "expected to found status for pod %q", pods[0].UID)
- assert.Equal(t, v1.PodFailed, status.Phase)
- // check pod status contains ContainerStatuses, etc.
- assert.NotNil(t, status.ContainerStatuses)
- }
- func TestSyncPodsDoesNotSetPodsThatDidNotRunTooLongToFailed(t *testing.T) {
- testKubelet := newTestKubelet(t, false /* controllerAttachDetachEnabled */)
- defer testKubelet.Cleanup()
- fakeRuntime := testKubelet.fakeRuntime
- kubelet := testKubelet.kubelet
- now := metav1.Now()
- startTime := metav1.NewTime(now.Time.Add(-1 * time.Minute))
- exceededActiveDeadlineSeconds := int64(300)
- pods := []*v1.Pod{
- {
- ObjectMeta: metav1.ObjectMeta{
- UID: "12345678",
- Name: "bar",
- Namespace: "new",
- },
- Spec: v1.PodSpec{
- Containers: []v1.Container{
- {Name: "foo"},
- },
- ActiveDeadlineSeconds: &exceededActiveDeadlineSeconds,
- },
- Status: v1.PodStatus{
- StartTime: &startTime,
- },
- },
- }
- fakeRuntime.PodList = []*containertest.FakePod{
- {Pod: &kubecontainer.Pod{
- ID: "12345678",
- Name: "bar",
- Namespace: "new",
- Containers: []*kubecontainer.Container{
- {Name: "foo"},
- },
- }},
- }
- kubelet.podManager.SetPods(pods)
- kubelet.HandlePodUpdates(pods)
- status, found := kubelet.statusManager.GetPodStatus(pods[0].UID)
- assert.True(t, found, "expected to found status for pod %q", pods[0].UID)
- assert.NotEqual(t, v1.PodFailed, status.Phase)
- }
- func podWithUIDNameNs(uid types.UID, name, namespace string) *v1.Pod {
- return &v1.Pod{
- ObjectMeta: metav1.ObjectMeta{
- UID: uid,
- Name: name,
- Namespace: namespace,
- Annotations: map[string]string{},
- },
- }
- }
- func podWithUIDNameNsSpec(uid types.UID, name, namespace string, spec v1.PodSpec) *v1.Pod {
- pod := podWithUIDNameNs(uid, name, namespace)
- pod.Spec = spec
- return pod
- }
- func TestDeletePodDirsForDeletedPods(t *testing.T) {
- testKubelet := newTestKubelet(t, false /* controllerAttachDetachEnabled */)
- defer testKubelet.Cleanup()
- kl := testKubelet.kubelet
- pods := []*v1.Pod{
- podWithUIDNameNs("12345678", "pod1", "ns"),
- podWithUIDNameNs("12345679", "pod2", "ns"),
- }
- kl.podManager.SetPods(pods)
- // Sync to create pod directories.
- kl.HandlePodSyncs(kl.podManager.GetPods())
- for i := range pods {
- assert.True(t, dirExists(kl.getPodDir(pods[i].UID)), "Expected directory to exist for pod %d", i)
- }
- // Pod 1 has been deleted and no longer exists.
- kl.podManager.SetPods([]*v1.Pod{pods[0]})
- kl.HandlePodCleanups()
- assert.True(t, dirExists(kl.getPodDir(pods[0].UID)), "Expected directory to exist for pod 0")
- assert.False(t, dirExists(kl.getPodDir(pods[1].UID)), "Expected directory to be deleted for pod 1")
- }
- func syncAndVerifyPodDir(t *testing.T, testKubelet *TestKubelet, pods []*v1.Pod, podsToCheck []*v1.Pod, shouldExist bool) {
- kl := testKubelet.kubelet
- kl.podManager.SetPods(pods)
- kl.HandlePodSyncs(pods)
- kl.HandlePodCleanups()
- for i, pod := range podsToCheck {
- exist := dirExists(kl.getPodDir(pod.UID))
- assert.Equal(t, shouldExist, exist, "directory of pod %d", i)
- }
- }
- func TestDoesNotDeletePodDirsForTerminatedPods(t *testing.T) {
- testKubelet := newTestKubelet(t, false /* controllerAttachDetachEnabled */)
- defer testKubelet.Cleanup()
- kl := testKubelet.kubelet
- pods := []*v1.Pod{
- podWithUIDNameNs("12345678", "pod1", "ns"),
- podWithUIDNameNs("12345679", "pod2", "ns"),
- podWithUIDNameNs("12345680", "pod3", "ns"),
- }
- syncAndVerifyPodDir(t, testKubelet, pods, pods, true)
- // Pod 1 failed, and pod 2 succeeded. None of the pod directories should be
- // deleted.
- kl.statusManager.SetPodStatus(pods[1], v1.PodStatus{Phase: v1.PodFailed})
- kl.statusManager.SetPodStatus(pods[2], v1.PodStatus{Phase: v1.PodSucceeded})
- syncAndVerifyPodDir(t, testKubelet, pods, pods, true)
- }
- func TestDoesNotDeletePodDirsIfContainerIsRunning(t *testing.T) {
- testKubelet := newTestKubelet(t, false /* controllerAttachDetachEnabled */)
- defer testKubelet.Cleanup()
- runningPod := &kubecontainer.Pod{
- ID: "12345678",
- Name: "pod1",
- Namespace: "ns",
- }
- apiPod := podWithUIDNameNs(runningPod.ID, runningPod.Name, runningPod.Namespace)
- // Sync once to create pod directory; confirm that the pod directory has
- // already been created.
- pods := []*v1.Pod{apiPod}
- syncAndVerifyPodDir(t, testKubelet, pods, []*v1.Pod{apiPod}, true)
- // Pretend the pod is deleted from apiserver, but is still active on the node.
- // The pod directory should not be removed.
- pods = []*v1.Pod{}
- testKubelet.fakeRuntime.PodList = []*containertest.FakePod{{Pod: runningPod, NetnsPath: ""}}
- syncAndVerifyPodDir(t, testKubelet, pods, []*v1.Pod{apiPod}, true)
- // The pod is deleted and also not active on the node. The pod directory
- // should be removed.
- pods = []*v1.Pod{}
- testKubelet.fakeRuntime.PodList = []*containertest.FakePod{}
- syncAndVerifyPodDir(t, testKubelet, pods, []*v1.Pod{apiPod}, false)
- }
- func TestGetPodsToSync(t *testing.T) {
- testKubelet := newTestKubelet(t, false /* controllerAttachDetachEnabled */)
- defer testKubelet.Cleanup()
- kubelet := testKubelet.kubelet
- clock := testKubelet.fakeClock
- pods := newTestPods(5)
- exceededActiveDeadlineSeconds := int64(30)
- notYetActiveDeadlineSeconds := int64(120)
- startTime := metav1.NewTime(clock.Now())
- pods[0].Status.StartTime = &startTime
- pods[0].Spec.ActiveDeadlineSeconds = &exceededActiveDeadlineSeconds
- pods[1].Status.StartTime = &startTime
- pods[1].Spec.ActiveDeadlineSeconds = ¬YetActiveDeadlineSeconds
- pods[2].Status.StartTime = &startTime
- pods[2].Spec.ActiveDeadlineSeconds = &exceededActiveDeadlineSeconds
- kubelet.podManager.SetPods(pods)
- kubelet.workQueue.Enqueue(pods[2].UID, 0)
- kubelet.workQueue.Enqueue(pods[3].UID, 30*time.Second)
- kubelet.workQueue.Enqueue(pods[4].UID, 2*time.Minute)
- clock.Step(1 * time.Minute)
- expected := []*v1.Pod{pods[2], pods[3], pods[0]}
- podsToSync := kubelet.getPodsToSync()
- sort.Sort(podsByUID(expected))
- sort.Sort(podsByUID(podsToSync))
- assert.Equal(t, expected, podsToSync)
- }
- func TestGenerateAPIPodStatusWithSortedContainers(t *testing.T) {
- testKubelet := newTestKubelet(t, false /* controllerAttachDetachEnabled */)
- defer testKubelet.Cleanup()
- kubelet := testKubelet.kubelet
- numContainers := 10
- expectedOrder := []string{}
- cStatuses := []*kubecontainer.ContainerStatus{}
- specContainerList := []v1.Container{}
- for i := 0; i < numContainers; i++ {
- id := fmt.Sprintf("%v", i)
- containerName := fmt.Sprintf("%vcontainer", id)
- expectedOrder = append(expectedOrder, containerName)
- cStatus := &kubecontainer.ContainerStatus{
- ID: kubecontainer.BuildContainerID("test", id),
- Name: containerName,
- }
- // Rearrange container statuses
- if i%2 == 0 {
- cStatuses = append(cStatuses, cStatus)
- } else {
- cStatuses = append([]*kubecontainer.ContainerStatus{cStatus}, cStatuses...)
- }
- specContainerList = append(specContainerList, v1.Container{Name: containerName})
- }
- pod := podWithUIDNameNs("uid1", "foo", "test")
- pod.Spec = v1.PodSpec{
- Containers: specContainerList,
- }
- status := &kubecontainer.PodStatus{
- ID: pod.UID,
- Name: pod.Name,
- Namespace: pod.Namespace,
- ContainerStatuses: cStatuses,
- }
- for i := 0; i < 5; i++ {
- apiStatus := kubelet.generateAPIPodStatus(pod, status)
- for i, c := range apiStatus.ContainerStatuses {
- if expectedOrder[i] != c.Name {
- t.Fatalf("Container status not sorted, expected %v at index %d, but found %v", expectedOrder[i], i, c.Name)
- }
- }
- }
- }
- func verifyContainerStatuses(t *testing.T, statuses []v1.ContainerStatus, state, lastTerminationState map[string]v1.ContainerState, message string) {
- for _, s := range statuses {
- assert.Equal(t, s.State, state[s.Name], "%s: state", message)
- assert.Equal(t, s.LastTerminationState, lastTerminationState[s.Name], "%s: last terminated state", message)
- }
- }
- // Test generateAPIPodStatus with different reason cache and old api pod status.
- func TestGenerateAPIPodStatusWithReasonCache(t *testing.T) {
- // The following waiting reason and message are generated in convertStatusToAPIStatus()
- startWaitingReason := "ContainerCreating"
- initWaitingReason := "PodInitializing"
- testTimestamp := time.Unix(123456789, 987654321)
- testErrorReason := fmt.Errorf("test-error")
- emptyContainerID := (&kubecontainer.ContainerID{}).String()
- testKubelet := newTestKubelet(t, false /* controllerAttachDetachEnabled */)
- defer testKubelet.Cleanup()
- kubelet := testKubelet.kubelet
- pod := podWithUIDNameNs("12345678", "foo", "new")
- pod.Spec = v1.PodSpec{RestartPolicy: v1.RestartPolicyOnFailure}
- podStatus := &kubecontainer.PodStatus{
- ID: pod.UID,
- Name: pod.Name,
- Namespace: pod.Namespace,
- }
- tests := []struct {
- containers []v1.Container
- statuses []*kubecontainer.ContainerStatus
- reasons map[string]error
- oldStatuses []v1.ContainerStatus
- expectedState map[string]v1.ContainerState
- // Only set expectedInitState when it is different from expectedState
- expectedInitState map[string]v1.ContainerState
- expectedLastTerminationState map[string]v1.ContainerState
- }{
- // For container with no historical record, State should be Waiting, LastTerminationState should be retrieved from
- // old status from apiserver.
- {
- containers: []v1.Container{{Name: "without-old-record"}, {Name: "with-old-record"}},
- statuses: []*kubecontainer.ContainerStatus{},
- reasons: map[string]error{},
- oldStatuses: []v1.ContainerStatus{{
- Name: "with-old-record",
- LastTerminationState: v1.ContainerState{Terminated: &v1.ContainerStateTerminated{}},
- }},
- expectedState: map[string]v1.ContainerState{
- "without-old-record": {Waiting: &v1.ContainerStateWaiting{
- Reason: startWaitingReason,
- }},
- "with-old-record": {Waiting: &v1.ContainerStateWaiting{
- Reason: startWaitingReason,
- }},
- },
- expectedInitState: map[string]v1.ContainerState{
- "without-old-record": {Waiting: &v1.ContainerStateWaiting{
- Reason: initWaitingReason,
- }},
- "with-old-record": {Waiting: &v1.ContainerStateWaiting{
- Reason: initWaitingReason,
- }},
- },
- expectedLastTerminationState: map[string]v1.ContainerState{
- "with-old-record": {Terminated: &v1.ContainerStateTerminated{}},
- },
- },
- // For running container, State should be Running, LastTerminationState should be retrieved from latest terminated status.
- {
- containers: []v1.Container{{Name: "running"}},
- statuses: []*kubecontainer.ContainerStatus{
- {
- Name: "running",
- State: kubecontainer.ContainerStateRunning,
- StartedAt: testTimestamp,
- },
- {
- Name: "running",
- State: kubecontainer.ContainerStateExited,
- ExitCode: 1,
- },
- },
- reasons: map[string]error{},
- oldStatuses: []v1.ContainerStatus{},
- expectedState: map[string]v1.ContainerState{
- "running": {Running: &v1.ContainerStateRunning{
- StartedAt: metav1.NewTime(testTimestamp),
- }},
- },
- expectedLastTerminationState: map[string]v1.ContainerState{
- "running": {Terminated: &v1.ContainerStateTerminated{
- ExitCode: 1,
- ContainerID: emptyContainerID,
- }},
- },
- },
- // For terminated container:
- // * If there is no recent start error record, State should be Terminated, LastTerminationState should be retrieved from
- // second latest terminated status;
- // * If there is recent start error record, State should be Waiting, LastTerminationState should be retrieved from latest
- // terminated status;
- // * If ExitCode = 0, restart policy is RestartPolicyOnFailure, the container shouldn't be restarted. No matter there is
- // recent start error or not, State should be Terminated, LastTerminationState should be retrieved from second latest
- // terminated status.
- {
- containers: []v1.Container{{Name: "without-reason"}, {Name: "with-reason"}},
- statuses: []*kubecontainer.ContainerStatus{
- {
- Name: "without-reason",
- State: kubecontainer.ContainerStateExited,
- ExitCode: 1,
- },
- {
- Name: "with-reason",
- State: kubecontainer.ContainerStateExited,
- ExitCode: 2,
- },
- {
- Name: "without-reason",
- State: kubecontainer.ContainerStateExited,
- ExitCode: 3,
- },
- {
- Name: "with-reason",
- State: kubecontainer.ContainerStateExited,
- ExitCode: 4,
- },
- {
- Name: "succeed",
- State: kubecontainer.ContainerStateExited,
- ExitCode: 0,
- },
- {
- Name: "succeed",
- State: kubecontainer.ContainerStateExited,
- ExitCode: 5,
- },
- },
- reasons: map[string]error{"with-reason": testErrorReason, "succeed": testErrorReason},
- oldStatuses: []v1.ContainerStatus{},
- expectedState: map[string]v1.ContainerState{
- "without-reason": {Terminated: &v1.ContainerStateTerminated{
- ExitCode: 1,
- ContainerID: emptyContainerID,
- }},
- "with-reason": {Waiting: &v1.ContainerStateWaiting{Reason: testErrorReason.Error()}},
- "succeed": {Terminated: &v1.ContainerStateTerminated{
- ExitCode: 0,
- ContainerID: emptyContainerID,
- }},
- },
- expectedLastTerminationState: map[string]v1.ContainerState{
- "without-reason": {Terminated: &v1.ContainerStateTerminated{
- ExitCode: 3,
- ContainerID: emptyContainerID,
- }},
- "with-reason": {Terminated: &v1.ContainerStateTerminated{
- ExitCode: 2,
- ContainerID: emptyContainerID,
- }},
- "succeed": {Terminated: &v1.ContainerStateTerminated{
- ExitCode: 5,
- ContainerID: emptyContainerID,
- }},
- },
- },
- }
- for i, test := range tests {
- kubelet.reasonCache = NewReasonCache()
- for n, e := range test.reasons {
- kubelet.reasonCache.add(pod.UID, n, e, "")
- }
- pod.Spec.Containers = test.containers
- pod.Status.ContainerStatuses = test.oldStatuses
- podStatus.ContainerStatuses = test.statuses
- apiStatus := kubelet.generateAPIPodStatus(pod, podStatus)
- verifyContainerStatuses(t, apiStatus.ContainerStatuses, test.expectedState, test.expectedLastTerminationState, fmt.Sprintf("case %d", i))
- }
- // Everything should be the same for init containers
- for i, test := range tests {
- kubelet.reasonCache = NewReasonCache()
- for n, e := range test.reasons {
- kubelet.reasonCache.add(pod.UID, n, e, "")
- }
- pod.Spec.InitContainers = test.containers
- pod.Status.InitContainerStatuses = test.oldStatuses
- podStatus.ContainerStatuses = test.statuses
- apiStatus := kubelet.generateAPIPodStatus(pod, podStatus)
- expectedState := test.expectedState
- if test.expectedInitState != nil {
- expectedState = test.expectedInitState
- }
- verifyContainerStatuses(t, apiStatus.InitContainerStatuses, expectedState, test.expectedLastTerminationState, fmt.Sprintf("case %d", i))
- }
- }
- // Test generateAPIPodStatus with different restart policies.
- func TestGenerateAPIPodStatusWithDifferentRestartPolicies(t *testing.T) {
- testErrorReason := fmt.Errorf("test-error")
- emptyContainerID := (&kubecontainer.ContainerID{}).String()
- testKubelet := newTestKubelet(t, false /* controllerAttachDetachEnabled */)
- defer testKubelet.Cleanup()
- kubelet := testKubelet.kubelet
- pod := podWithUIDNameNs("12345678", "foo", "new")
- containers := []v1.Container{{Name: "succeed"}, {Name: "failed"}}
- podStatus := &kubecontainer.PodStatus{
- ID: pod.UID,
- Name: pod.Name,
- Namespace: pod.Namespace,
- ContainerStatuses: []*kubecontainer.ContainerStatus{
- {
- Name: "succeed",
- State: kubecontainer.ContainerStateExited,
- ExitCode: 0,
- },
- {
- Name: "failed",
- State: kubecontainer.ContainerStateExited,
- ExitCode: 1,
- },
- {
- Name: "succeed",
- State: kubecontainer.ContainerStateExited,
- ExitCode: 2,
- },
- {
- Name: "failed",
- State: kubecontainer.ContainerStateExited,
- ExitCode: 3,
- },
- },
- }
- kubelet.reasonCache.add(pod.UID, "succeed", testErrorReason, "")
- kubelet.reasonCache.add(pod.UID, "failed", testErrorReason, "")
- for c, test := range []struct {
- restartPolicy v1.RestartPolicy
- expectedState map[string]v1.ContainerState
- expectedLastTerminationState map[string]v1.ContainerState
- // Only set expectedInitState when it is different from expectedState
- expectedInitState map[string]v1.ContainerState
- // Only set expectedInitLastTerminationState when it is different from expectedLastTerminationState
- expectedInitLastTerminationState map[string]v1.ContainerState
- }{
- {
- restartPolicy: v1.RestartPolicyNever,
- expectedState: map[string]v1.ContainerState{
- "succeed": {Terminated: &v1.ContainerStateTerminated{
- ExitCode: 0,
- ContainerID: emptyContainerID,
- }},
- "failed": {Terminated: &v1.ContainerStateTerminated{
- ExitCode: 1,
- ContainerID: emptyContainerID,
- }},
- },
- expectedLastTerminationState: map[string]v1.ContainerState{
- "succeed": {Terminated: &v1.ContainerStateTerminated{
- ExitCode: 2,
- ContainerID: emptyContainerID,
- }},
- "failed": {Terminated: &v1.ContainerStateTerminated{
- ExitCode: 3,
- ContainerID: emptyContainerID,
- }},
- },
- },
- {
- restartPolicy: v1.RestartPolicyOnFailure,
- expectedState: map[string]v1.ContainerState{
- "succeed": {Terminated: &v1.ContainerStateTerminated{
- ExitCode: 0,
- ContainerID: emptyContainerID,
- }},
- "failed": {Waiting: &v1.ContainerStateWaiting{Reason: testErrorReason.Error()}},
- },
- expectedLastTerminationState: map[string]v1.ContainerState{
- "succeed": {Terminated: &v1.ContainerStateTerminated{
- ExitCode: 2,
- ContainerID: emptyContainerID,
- }},
- "failed": {Terminated: &v1.ContainerStateTerminated{
- ExitCode: 1,
- ContainerID: emptyContainerID,
- }},
- },
- },
- {
- restartPolicy: v1.RestartPolicyAlways,
- expectedState: map[string]v1.ContainerState{
- "succeed": {Waiting: &v1.ContainerStateWaiting{Reason: testErrorReason.Error()}},
- "failed": {Waiting: &v1.ContainerStateWaiting{Reason: testErrorReason.Error()}},
- },
- expectedLastTerminationState: map[string]v1.ContainerState{
- "succeed": {Terminated: &v1.ContainerStateTerminated{
- ExitCode: 0,
- ContainerID: emptyContainerID,
- }},
- "failed": {Terminated: &v1.ContainerStateTerminated{
- ExitCode: 1,
- ContainerID: emptyContainerID,
- }},
- },
- // If the init container is terminated with exit code 0, it won't be restarted even when the
- // restart policy is RestartAlways.
- expectedInitState: map[string]v1.ContainerState{
- "succeed": {Terminated: &v1.ContainerStateTerminated{
- ExitCode: 0,
- ContainerID: emptyContainerID,
- }},
- "failed": {Waiting: &v1.ContainerStateWaiting{Reason: testErrorReason.Error()}},
- },
- expectedInitLastTerminationState: map[string]v1.ContainerState{
- "succeed": {Terminated: &v1.ContainerStateTerminated{
- ExitCode: 2,
- ContainerID: emptyContainerID,
- }},
- "failed": {Terminated: &v1.ContainerStateTerminated{
- ExitCode: 1,
- ContainerID: emptyContainerID,
- }},
- },
- },
- } {
- pod.Spec.RestartPolicy = test.restartPolicy
- // Test normal containers
- pod.Spec.Containers = containers
- apiStatus := kubelet.generateAPIPodStatus(pod, podStatus)
- expectedState, expectedLastTerminationState := test.expectedState, test.expectedLastTerminationState
- verifyContainerStatuses(t, apiStatus.ContainerStatuses, expectedState, expectedLastTerminationState, fmt.Sprintf("case %d", c))
- pod.Spec.Containers = nil
- // Test init containers
- pod.Spec.InitContainers = containers
- apiStatus = kubelet.generateAPIPodStatus(pod, podStatus)
- if test.expectedInitState != nil {
- expectedState = test.expectedInitState
- }
- if test.expectedInitLastTerminationState != nil {
- expectedLastTerminationState = test.expectedInitLastTerminationState
- }
- verifyContainerStatuses(t, apiStatus.InitContainerStatuses, expectedState, expectedLastTerminationState, fmt.Sprintf("case %d", c))
- pod.Spec.InitContainers = nil
- }
- }
- // testPodAdmitHandler is a lifecycle.PodAdmitHandler for testing.
- type testPodAdmitHandler struct {
- // list of pods to reject.
- podsToReject []*v1.Pod
- }
- // Admit rejects all pods in the podsToReject list with a matching UID.
- func (a *testPodAdmitHandler) Admit(attrs *lifecycle.PodAdmitAttributes) lifecycle.PodAdmitResult {
- for _, podToReject := range a.podsToReject {
- if podToReject.UID == attrs.Pod.UID {
- return lifecycle.PodAdmitResult{Admit: false, Reason: "Rejected", Message: "Pod is rejected"}
- }
- }
- return lifecycle.PodAdmitResult{Admit: true}
- }
- // Test verifies that the kubelet invokes an admission handler during HandlePodAdditions.
- func TestHandlePodAdditionsInvokesPodAdmitHandlers(t *testing.T) {
- testKubelet := newTestKubelet(t, false /* controllerAttachDetachEnabled */)
- defer testKubelet.Cleanup()
- kl := testKubelet.kubelet
- kl.nodeLister = testNodeLister{nodes: []*v1.Node{
- {
- ObjectMeta: metav1.ObjectMeta{Name: string(kl.nodeName)},
- Status: v1.NodeStatus{
- Allocatable: v1.ResourceList{
- v1.ResourcePods: *resource.NewQuantity(110, resource.DecimalSI),
- },
- },
- },
- }}
- pods := []*v1.Pod{
- {
- ObjectMeta: metav1.ObjectMeta{
- UID: "123456789",
- Name: "podA",
- Namespace: "foo",
- },
- },
- {
- ObjectMeta: metav1.ObjectMeta{
- UID: "987654321",
- Name: "podB",
- Namespace: "foo",
- },
- },
- }
- podToReject := pods[0]
- podToAdmit := pods[1]
- podsToReject := []*v1.Pod{podToReject}
- kl.admitHandlers.AddPodAdmitHandler(&testPodAdmitHandler{podsToReject: podsToReject})
- kl.HandlePodAdditions(pods)
- // Check pod status stored in the status map.
- checkPodStatus(t, kl, podToReject, v1.PodFailed)
- checkPodStatus(t, kl, podToAdmit, v1.PodPending)
- }
- // testPodSyncLoopHandler is a lifecycle.PodSyncLoopHandler that is used for testing.
- type testPodSyncLoopHandler struct {
- // list of pods to sync
- podsToSync []*v1.Pod
- }
- // ShouldSync evaluates if the pod should be synced from the kubelet.
- func (a *testPodSyncLoopHandler) ShouldSync(pod *v1.Pod) bool {
- for _, podToSync := range a.podsToSync {
- if podToSync.UID == pod.UID {
- return true
- }
- }
- return false
- }
- // TestGetPodsToSyncInvokesPodSyncLoopHandlers ensures that the get pods to sync routine invokes the handler.
- func TestGetPodsToSyncInvokesPodSyncLoopHandlers(t *testing.T) {
- testKubelet := newTestKubelet(t, false /* controllerAttachDetachEnabled */)
- defer testKubelet.Cleanup()
- kubelet := testKubelet.kubelet
- pods := newTestPods(5)
- expected := []*v1.Pod{pods[0]}
- kubelet.AddPodSyncLoopHandler(&testPodSyncLoopHandler{expected})
- kubelet.podManager.SetPods(pods)
- podsToSync := kubelet.getPodsToSync()
- sort.Sort(podsByUID(expected))
- sort.Sort(podsByUID(podsToSync))
- assert.Equal(t, expected, podsToSync)
- }
- // testPodSyncHandler is a lifecycle.PodSyncHandler that is used for testing.
- type testPodSyncHandler struct {
- // list of pods to evict.
- podsToEvict []*v1.Pod
- // the reason for the eviction
- reason string
- // the message for the eviction
- message string
- }
- // ShouldEvict evaluates if the pod should be evicted from the kubelet.
- func (a *testPodSyncHandler) ShouldEvict(pod *v1.Pod) lifecycle.ShouldEvictResponse {
- for _, podToEvict := range a.podsToEvict {
- if podToEvict.UID == pod.UID {
- return lifecycle.ShouldEvictResponse{Evict: true, Reason: a.reason, Message: a.message}
- }
- }
- return lifecycle.ShouldEvictResponse{Evict: false}
- }
- // TestGenerateAPIPodStatusInvokesPodSyncHandlers invokes the handlers and reports the proper status
- func TestGenerateAPIPodStatusInvokesPodSyncHandlers(t *testing.T) {
- testKubelet := newTestKubelet(t, false /* controllerAttachDetachEnabled */)
- defer testKubelet.Cleanup()
- kubelet := testKubelet.kubelet
- pod := newTestPods(1)[0]
- podsToEvict := []*v1.Pod{pod}
- kubelet.AddPodSyncHandler(&testPodSyncHandler{podsToEvict, "Evicted", "because"})
- status := &kubecontainer.PodStatus{
- ID: pod.UID,
- Name: pod.Name,
- Namespace: pod.Namespace,
- }
- apiStatus := kubelet.generateAPIPodStatus(pod, status)
- require.Equal(t, v1.PodFailed, apiStatus.Phase)
- require.Equal(t, "Evicted", apiStatus.Reason)
- require.Equal(t, "because", apiStatus.Message)
- }
- func TestSyncPodKillPod(t *testing.T) {
- testKubelet := newTestKubelet(t, false /* controllerAttachDetachEnabled */)
- defer testKubelet.Cleanup()
- kl := testKubelet.kubelet
- pod := &v1.Pod{
- ObjectMeta: metav1.ObjectMeta{
- UID: "12345678",
- Name: "bar",
- Namespace: "foo",
- },
- }
- pods := []*v1.Pod{pod}
- kl.podManager.SetPods(pods)
- gracePeriodOverride := int64(0)
- err := kl.syncPod(syncPodOptions{
- pod: pod,
- podStatus: &kubecontainer.PodStatus{},
- updateType: kubetypes.SyncPodKill,
- killPodOptions: &KillPodOptions{
- PodStatusFunc: func(p *v1.Pod, podStatus *kubecontainer.PodStatus) v1.PodStatus {
- return v1.PodStatus{
- Phase: v1.PodFailed,
- Reason: "reason",
- Message: "message",
- }
- },
- PodTerminationGracePeriodSecondsOverride: &gracePeriodOverride,
- },
- })
- require.NoError(t, err)
- // Check pod status stored in the status map.
- checkPodStatus(t, kl, pod, v1.PodFailed)
- }
- func waitForVolumeUnmount(
- volumeManager kubeletvolume.VolumeManager,
- pod *v1.Pod) error {
- var podVolumes kubecontainer.VolumeMap
- err := retryWithExponentialBackOff(
- time.Duration(50*time.Millisecond),
- func() (bool, error) {
- // Verify volumes detached
- podVolumes = volumeManager.GetMountedVolumesForPod(
- util.GetUniquePodName(pod))
- if len(podVolumes) != 0 {
- return false, nil
- }
- return true, nil
- },
- )
- if err != nil {
- return fmt.Errorf(
- "Expected volumes to be unmounted. But some volumes are still mounted: %#v", podVolumes)
- }
- return nil
- }
- func waitForVolumeDetach(
- volumeName v1.UniqueVolumeName,
- volumeManager kubeletvolume.VolumeManager) error {
- attachedVolumes := []v1.UniqueVolumeName{}
- err := retryWithExponentialBackOff(
- time.Duration(50*time.Millisecond),
- func() (bool, error) {
- // Verify volumes detached
- volumeAttached := volumeManager.VolumeIsAttached(volumeName)
- return !volumeAttached, nil
- },
- )
- if err != nil {
- return fmt.Errorf(
- "Expected volumes to be detached. But some volumes are still attached: %#v", attachedVolumes)
- }
- return nil
- }
- func retryWithExponentialBackOff(initialDuration time.Duration, fn wait.ConditionFunc) error {
- backoff := wait.Backoff{
- Duration: initialDuration,
- Factor: 3,
- Jitter: 0,
- Steps: 6,
- }
- return wait.ExponentialBackoff(backoff, fn)
- }
- func simulateVolumeInUseUpdate(
- volumeName v1.UniqueVolumeName,
- stopCh <-chan struct{},
- volumeManager kubeletvolume.VolumeManager) {
- ticker := time.NewTicker(100 * time.Millisecond)
- defer ticker.Stop()
- for {
- select {
- case <-ticker.C:
- volumeManager.MarkVolumesAsReportedInUse(
- []v1.UniqueVolumeName{volumeName})
- case <-stopCh:
- return
- }
- }
- }
- func runVolumeManager(kubelet *Kubelet) chan struct{} {
- stopCh := make(chan struct{})
- go kubelet.volumeManager.Run(kubelet.sourcesReady, stopCh)
- return stopCh
- }
- // Sort pods by UID.
- type podsByUID []*v1.Pod
- func (p podsByUID) Len() int { return len(p) }
- func (p podsByUID) Swap(i, j int) { p[i], p[j] = p[j], p[i] }
- func (p podsByUID) Less(i, j int) bool { return p[i].UID < p[j].UID }
|