kubelet_test.go 66 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320132113221323132413251326132713281329133013311332133313341335133613371338133913401341134213431344134513461347134813491350135113521353135413551356135713581359136013611362136313641365136613671368136913701371137213731374137513761377137813791380138113821383138413851386138713881389139013911392139313941395139613971398139914001401140214031404140514061407140814091410141114121413141414151416141714181419142014211422142314241425142614271428142914301431143214331434143514361437143814391440144114421443144414451446144714481449145014511452145314541455145614571458145914601461146214631464146514661467146814691470147114721473147414751476147714781479148014811482148314841485148614871488148914901491149214931494149514961497149814991500150115021503150415051506150715081509151015111512151315141515151615171518151915201521152215231524152515261527152815291530153115321533153415351536153715381539154015411542154315441545154615471548154915501551155215531554155515561557155815591560156115621563156415651566156715681569157015711572157315741575157615771578157915801581158215831584158515861587158815891590159115921593159415951596159715981599160016011602160316041605160616071608160916101611161216131614161516161617161816191620162116221623162416251626162716281629163016311632163316341635163616371638163916401641164216431644164516461647164816491650165116521653165416551656165716581659166016611662166316641665166616671668166916701671167216731674167516761677167816791680168116821683168416851686168716881689169016911692169316941695169616971698169917001701170217031704170517061707170817091710171117121713171417151716171717181719172017211722172317241725172617271728172917301731173217331734173517361737173817391740174117421743174417451746174717481749175017511752175317541755175617571758175917601761176217631764176517661767176817691770177117721773177417751776177717781779178017811782178317841785178617871788178917901791179217931794179517961797179817991800180118021803180418051806180718081809181018111812181318141815181618171818181918201821182218231824182518261827182818291830183118321833183418351836183718381839184018411842184318441845184618471848184918501851185218531854185518561857185818591860186118621863186418651866186718681869187018711872187318741875187618771878187918801881188218831884188518861887188818891890189118921893189418951896189718981899190019011902190319041905190619071908190919101911191219131914191519161917191819191920192119221923192419251926192719281929193019311932193319341935193619371938193919401941194219431944194519461947194819491950195119521953195419551956195719581959196019611962196319641965196619671968196919701971197219731974197519761977197819791980198119821983198419851986198719881989199019911992199319941995199619971998199920002001200220032004200520062007200820092010201120122013201420152016201720182019202020212022202320242025202620272028202920302031203220332034203520362037203820392040204120422043
  1. /*
  2. Copyright 2014 The Kubernetes Authors.
  3. Licensed under the Apache License, Version 2.0 (the "License");
  4. you may not use this file except in compliance with the License.
  5. You may obtain a copy of the License at
  6. http://www.apache.org/licenses/LICENSE-2.0
  7. Unless required by applicable law or agreed to in writing, software
  8. distributed under the License is distributed on an "AS IS" BASIS,
  9. WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
  10. See the License for the specific language governing permissions and
  11. limitations under the License.
  12. */
  13. package kubelet
  14. import (
  15. "fmt"
  16. "io/ioutil"
  17. "os"
  18. "sort"
  19. "testing"
  20. "time"
  21. cadvisorapi "github.com/google/cadvisor/info/v1"
  22. "github.com/stretchr/testify/assert"
  23. "github.com/stretchr/testify/require"
  24. "k8s.io/utils/mount"
  25. v1 "k8s.io/api/core/v1"
  26. "k8s.io/apimachinery/pkg/api/resource"
  27. metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
  28. "k8s.io/apimachinery/pkg/labels"
  29. "k8s.io/apimachinery/pkg/types"
  30. "k8s.io/apimachinery/pkg/util/clock"
  31. utilruntime "k8s.io/apimachinery/pkg/util/runtime"
  32. "k8s.io/apimachinery/pkg/util/sets"
  33. "k8s.io/apimachinery/pkg/util/wait"
  34. "k8s.io/client-go/kubernetes/fake"
  35. "k8s.io/client-go/tools/record"
  36. "k8s.io/client-go/util/flowcontrol"
  37. cadvisortest "k8s.io/kubernetes/pkg/kubelet/cadvisor/testing"
  38. "k8s.io/kubernetes/pkg/kubelet/cm"
  39. "k8s.io/kubernetes/pkg/kubelet/config"
  40. "k8s.io/kubernetes/pkg/kubelet/configmap"
  41. kubecontainer "k8s.io/kubernetes/pkg/kubelet/container"
  42. containertest "k8s.io/kubernetes/pkg/kubelet/container/testing"
  43. "k8s.io/kubernetes/pkg/kubelet/eviction"
  44. "k8s.io/kubernetes/pkg/kubelet/images"
  45. "k8s.io/kubernetes/pkg/kubelet/lifecycle"
  46. "k8s.io/kubernetes/pkg/kubelet/logs"
  47. "k8s.io/kubernetes/pkg/kubelet/network/dns"
  48. "k8s.io/kubernetes/pkg/kubelet/pleg"
  49. "k8s.io/kubernetes/pkg/kubelet/pluginmanager"
  50. kubepod "k8s.io/kubernetes/pkg/kubelet/pod"
  51. podtest "k8s.io/kubernetes/pkg/kubelet/pod/testing"
  52. proberesults "k8s.io/kubernetes/pkg/kubelet/prober/results"
  53. probetest "k8s.io/kubernetes/pkg/kubelet/prober/testing"
  54. "k8s.io/kubernetes/pkg/kubelet/secret"
  55. serverstats "k8s.io/kubernetes/pkg/kubelet/server/stats"
  56. "k8s.io/kubernetes/pkg/kubelet/stats"
  57. "k8s.io/kubernetes/pkg/kubelet/status"
  58. statustest "k8s.io/kubernetes/pkg/kubelet/status/testing"
  59. "k8s.io/kubernetes/pkg/kubelet/token"
  60. kubetypes "k8s.io/kubernetes/pkg/kubelet/types"
  61. "k8s.io/kubernetes/pkg/kubelet/util/queue"
  62. kubeletvolume "k8s.io/kubernetes/pkg/kubelet/volumemanager"
  63. schedulernodeinfo "k8s.io/kubernetes/pkg/scheduler/nodeinfo"
  64. "k8s.io/kubernetes/pkg/volume"
  65. "k8s.io/kubernetes/pkg/volume/awsebs"
  66. "k8s.io/kubernetes/pkg/volume/azure_dd"
  67. "k8s.io/kubernetes/pkg/volume/gcepd"
  68. _ "k8s.io/kubernetes/pkg/volume/hostpath"
  69. volumetest "k8s.io/kubernetes/pkg/volume/testing"
  70. "k8s.io/kubernetes/pkg/volume/util"
  71. "k8s.io/kubernetes/pkg/volume/util/hostutil"
  72. "k8s.io/kubernetes/pkg/volume/util/subpath"
  73. )
  74. func init() {
  75. utilruntime.ReallyCrash = true
  76. }
  77. const (
  78. testKubeletHostname = "127.0.0.1"
  79. testKubeletHostIP = "127.0.0.1"
  80. // TODO(harry) any global place for these two?
  81. // Reasonable size range of all container images. 90%ile of images on dockerhub drops into this range.
  82. minImgSize int64 = 23 * 1024 * 1024
  83. maxImgSize int64 = 1000 * 1024 * 1024
  84. )
  85. // fakeImageGCManager is a fake image gc manager for testing. It will return image
  86. // list from fake runtime directly instead of caching it.
  87. type fakeImageGCManager struct {
  88. fakeImageService kubecontainer.ImageService
  89. images.ImageGCManager
  90. }
  91. func (f *fakeImageGCManager) GetImageList() ([]kubecontainer.Image, error) {
  92. return f.fakeImageService.ListImages()
  93. }
  94. type TestKubelet struct {
  95. kubelet *Kubelet
  96. fakeRuntime *containertest.FakeRuntime
  97. fakeKubeClient *fake.Clientset
  98. fakeMirrorClient *podtest.FakeMirrorClient
  99. fakeClock *clock.FakeClock
  100. mounter mount.Interface
  101. volumePlugin *volumetest.FakeVolumePlugin
  102. }
  103. func (tk *TestKubelet) Cleanup() {
  104. if tk.kubelet != nil {
  105. os.RemoveAll(tk.kubelet.rootDirectory)
  106. }
  107. }
  108. // newTestKubelet returns test kubelet with two images.
  109. func newTestKubelet(t *testing.T, controllerAttachDetachEnabled bool) *TestKubelet {
  110. imageList := []kubecontainer.Image{
  111. {
  112. ID: "abc",
  113. RepoTags: []string{"k8s.gcr.io:v1", "k8s.gcr.io:v2"},
  114. Size: 123,
  115. },
  116. {
  117. ID: "efg",
  118. RepoTags: []string{"k8s.gcr.io:v3", "k8s.gcr.io:v4"},
  119. Size: 456,
  120. },
  121. }
  122. return newTestKubeletWithImageList(t, imageList, controllerAttachDetachEnabled, true /*initFakeVolumePlugin*/)
  123. }
  124. func newTestKubeletWithImageList(
  125. t *testing.T,
  126. imageList []kubecontainer.Image,
  127. controllerAttachDetachEnabled bool,
  128. initFakeVolumePlugin bool) *TestKubelet {
  129. fakeRuntime := &containertest.FakeRuntime{}
  130. fakeRuntime.RuntimeType = "test"
  131. fakeRuntime.VersionInfo = "1.5.0"
  132. fakeRuntime.ImageList = imageList
  133. // Set ready conditions by default.
  134. fakeRuntime.RuntimeStatus = &kubecontainer.RuntimeStatus{
  135. Conditions: []kubecontainer.RuntimeCondition{
  136. {Type: "RuntimeReady", Status: true},
  137. {Type: "NetworkReady", Status: true},
  138. },
  139. }
  140. fakeRecorder := &record.FakeRecorder{}
  141. fakeKubeClient := &fake.Clientset{}
  142. kubelet := &Kubelet{}
  143. kubelet.recorder = fakeRecorder
  144. kubelet.kubeClient = fakeKubeClient
  145. kubelet.heartbeatClient = fakeKubeClient
  146. kubelet.os = &containertest.FakeOS{}
  147. kubelet.mounter = mount.NewFakeMounter(nil)
  148. kubelet.hostutil = hostutil.NewFakeHostUtil(nil)
  149. kubelet.subpather = &subpath.FakeSubpath{}
  150. kubelet.hostname = testKubeletHostname
  151. kubelet.nodeName = types.NodeName(testKubeletHostname)
  152. kubelet.runtimeState = newRuntimeState(maxWaitForContainerRuntime)
  153. kubelet.runtimeState.setNetworkState(nil)
  154. if tempDir, err := ioutil.TempDir("", "kubelet_test."); err != nil {
  155. t.Fatalf("can't make a temp rootdir: %v", err)
  156. } else {
  157. kubelet.rootDirectory = tempDir
  158. }
  159. if err := os.MkdirAll(kubelet.rootDirectory, 0750); err != nil {
  160. t.Fatalf("can't mkdir(%q): %v", kubelet.rootDirectory, err)
  161. }
  162. kubelet.sourcesReady = config.NewSourcesReady(func(_ sets.String) bool { return true })
  163. kubelet.masterServiceNamespace = metav1.NamespaceDefault
  164. kubelet.serviceLister = testServiceLister{}
  165. kubelet.nodeLister = testNodeLister{
  166. nodes: []*v1.Node{
  167. {
  168. ObjectMeta: metav1.ObjectMeta{
  169. Name: string(kubelet.nodeName),
  170. },
  171. Status: v1.NodeStatus{
  172. Conditions: []v1.NodeCondition{
  173. {
  174. Type: v1.NodeReady,
  175. Status: v1.ConditionTrue,
  176. Reason: "Ready",
  177. Message: "Node ready",
  178. },
  179. },
  180. Addresses: []v1.NodeAddress{
  181. {
  182. Type: v1.NodeInternalIP,
  183. Address: testKubeletHostIP,
  184. },
  185. },
  186. },
  187. },
  188. },
  189. }
  190. kubelet.recorder = fakeRecorder
  191. if err := kubelet.setupDataDirs(); err != nil {
  192. t.Fatalf("can't initialize kubelet data dirs: %v", err)
  193. }
  194. kubelet.daemonEndpoints = &v1.NodeDaemonEndpoints{}
  195. kubelet.cadvisor = &cadvisortest.Fake{}
  196. machineInfo, _ := kubelet.cadvisor.MachineInfo()
  197. kubelet.machineInfo = machineInfo
  198. fakeMirrorClient := podtest.NewFakeMirrorClient()
  199. secretManager := secret.NewSimpleSecretManager(kubelet.kubeClient)
  200. kubelet.secretManager = secretManager
  201. configMapManager := configmap.NewSimpleConfigMapManager(kubelet.kubeClient)
  202. kubelet.configMapManager = configMapManager
  203. kubelet.podManager = kubepod.NewBasicPodManager(fakeMirrorClient, kubelet.secretManager, kubelet.configMapManager, podtest.NewMockCheckpointManager())
  204. kubelet.statusManager = status.NewManager(fakeKubeClient, kubelet.podManager, &statustest.FakePodDeletionSafetyProvider{})
  205. kubelet.containerRuntime = fakeRuntime
  206. kubelet.runtimeCache = containertest.NewFakeRuntimeCache(kubelet.containerRuntime)
  207. kubelet.reasonCache = NewReasonCache()
  208. kubelet.podCache = containertest.NewFakeCache(kubelet.containerRuntime)
  209. kubelet.podWorkers = &fakePodWorkers{
  210. syncPodFn: kubelet.syncPod,
  211. cache: kubelet.podCache,
  212. t: t,
  213. }
  214. kubelet.probeManager = probetest.FakeManager{}
  215. kubelet.livenessManager = proberesults.NewManager()
  216. kubelet.startupManager = proberesults.NewManager()
  217. kubelet.containerManager = cm.NewStubContainerManager()
  218. fakeNodeRef := &v1.ObjectReference{
  219. Kind: "Node",
  220. Name: testKubeletHostname,
  221. UID: types.UID(testKubeletHostname),
  222. Namespace: "",
  223. }
  224. volumeStatsAggPeriod := time.Second * 10
  225. kubelet.resourceAnalyzer = serverstats.NewResourceAnalyzer(kubelet, volumeStatsAggPeriod)
  226. kubelet.StatsProvider = stats.NewCadvisorStatsProvider(
  227. kubelet.cadvisor,
  228. kubelet.resourceAnalyzer,
  229. kubelet.podManager,
  230. kubelet.runtimeCache,
  231. fakeRuntime,
  232. kubelet.statusManager)
  233. fakeImageGCPolicy := images.ImageGCPolicy{
  234. HighThresholdPercent: 90,
  235. LowThresholdPercent: 80,
  236. }
  237. imageGCManager, err := images.NewImageGCManager(fakeRuntime, kubelet.StatsProvider, fakeRecorder, fakeNodeRef, fakeImageGCPolicy, "")
  238. assert.NoError(t, err)
  239. kubelet.imageManager = &fakeImageGCManager{
  240. fakeImageService: fakeRuntime,
  241. ImageGCManager: imageGCManager,
  242. }
  243. kubelet.containerLogManager = logs.NewStubContainerLogManager()
  244. containerGCPolicy := kubecontainer.ContainerGCPolicy{
  245. MinAge: time.Duration(0),
  246. MaxPerPodContainer: 1,
  247. MaxContainers: -1,
  248. }
  249. containerGC, err := kubecontainer.NewContainerGC(fakeRuntime, containerGCPolicy, kubelet.sourcesReady)
  250. assert.NoError(t, err)
  251. kubelet.containerGC = containerGC
  252. fakeClock := clock.NewFakeClock(time.Now())
  253. kubelet.backOff = flowcontrol.NewBackOff(time.Second, time.Minute)
  254. kubelet.backOff.Clock = fakeClock
  255. kubelet.podKillingCh = make(chan *kubecontainer.PodPair, 20)
  256. kubelet.resyncInterval = 10 * time.Second
  257. kubelet.workQueue = queue.NewBasicWorkQueue(fakeClock)
  258. // Relist period does not affect the tests.
  259. kubelet.pleg = pleg.NewGenericPLEG(fakeRuntime, 100, time.Hour, nil, clock.RealClock{})
  260. kubelet.clock = fakeClock
  261. nodeRef := &v1.ObjectReference{
  262. Kind: "Node",
  263. Name: string(kubelet.nodeName),
  264. UID: types.UID(kubelet.nodeName),
  265. Namespace: "",
  266. }
  267. // setup eviction manager
  268. evictionManager, evictionAdmitHandler := eviction.NewManager(kubelet.resourceAnalyzer, eviction.Config{}, killPodNow(kubelet.podWorkers, fakeRecorder), kubelet.podManager.GetMirrorPodByPod, kubelet.imageManager, kubelet.containerGC, fakeRecorder, nodeRef, kubelet.clock)
  269. kubelet.evictionManager = evictionManager
  270. kubelet.admitHandlers.AddPodAdmitHandler(evictionAdmitHandler)
  271. // Add this as cleanup predicate pod admitter
  272. kubelet.admitHandlers.AddPodAdmitHandler(lifecycle.NewPredicateAdmitHandler(kubelet.getNodeAnyWay, lifecycle.NewAdmissionFailureHandlerStub(), kubelet.containerManager.UpdatePluginResources))
  273. allPlugins := []volume.VolumePlugin{}
  274. plug := &volumetest.FakeVolumePlugin{PluginName: "fake", Host: nil}
  275. if initFakeVolumePlugin {
  276. allPlugins = append(allPlugins, plug)
  277. } else {
  278. allPlugins = append(allPlugins, awsebs.ProbeVolumePlugins()...)
  279. allPlugins = append(allPlugins, gcepd.ProbeVolumePlugins()...)
  280. allPlugins = append(allPlugins, azure_dd.ProbeVolumePlugins()...)
  281. }
  282. var prober volume.DynamicPluginProber // TODO (#51147) inject mock
  283. kubelet.volumePluginMgr, err =
  284. NewInitializedVolumePluginMgr(kubelet, kubelet.secretManager, kubelet.configMapManager, token.NewManager(kubelet.kubeClient), allPlugins, prober)
  285. require.NoError(t, err, "Failed to initialize VolumePluginMgr")
  286. kubelet.volumeManager = kubeletvolume.NewVolumeManager(
  287. controllerAttachDetachEnabled,
  288. kubelet.nodeName,
  289. kubelet.podManager,
  290. kubelet.statusManager,
  291. fakeKubeClient,
  292. kubelet.volumePluginMgr,
  293. fakeRuntime,
  294. kubelet.mounter,
  295. kubelet.hostutil,
  296. kubelet.getPodsDir(),
  297. kubelet.recorder,
  298. false, /* experimentalCheckNodeCapabilitiesBeforeMount*/
  299. false, /* keepTerminatedPodVolumes */
  300. volumetest.NewBlockVolumePathHandler())
  301. kubelet.pluginManager = pluginmanager.NewPluginManager(
  302. kubelet.getPluginsRegistrationDir(), /* sockDir */
  303. kubelet.recorder,
  304. )
  305. kubelet.setNodeStatusFuncs = kubelet.defaultNodeStatusFuncs()
  306. // enable active deadline handler
  307. activeDeadlineHandler, err := newActiveDeadlineHandler(kubelet.statusManager, kubelet.recorder, kubelet.clock)
  308. require.NoError(t, err, "Can't initialize active deadline handler")
  309. kubelet.AddPodSyncLoopHandler(activeDeadlineHandler)
  310. kubelet.AddPodSyncHandler(activeDeadlineHandler)
  311. return &TestKubelet{kubelet, fakeRuntime, fakeKubeClient, fakeMirrorClient, fakeClock, nil, plug}
  312. }
  313. func newTestPods(count int) []*v1.Pod {
  314. pods := make([]*v1.Pod, count)
  315. for i := 0; i < count; i++ {
  316. pods[i] = &v1.Pod{
  317. Spec: v1.PodSpec{
  318. HostNetwork: true,
  319. },
  320. ObjectMeta: metav1.ObjectMeta{
  321. UID: types.UID(10000 + i),
  322. Name: fmt.Sprintf("pod%d", i),
  323. },
  324. }
  325. }
  326. return pods
  327. }
  328. func TestSyncLoopAbort(t *testing.T) {
  329. testKubelet := newTestKubelet(t, false /* controllerAttachDetachEnabled */)
  330. defer testKubelet.Cleanup()
  331. kubelet := testKubelet.kubelet
  332. kubelet.runtimeState.setRuntimeSync(time.Now())
  333. // The syncLoop waits on time.After(resyncInterval), set it really big so that we don't race for
  334. // the channel close
  335. kubelet.resyncInterval = time.Second * 30
  336. ch := make(chan kubetypes.PodUpdate)
  337. close(ch)
  338. // sanity check (also prevent this test from hanging in the next step)
  339. ok := kubelet.syncLoopIteration(ch, kubelet, make(chan time.Time), make(chan time.Time), make(chan *pleg.PodLifecycleEvent, 1))
  340. require.False(t, ok, "Expected syncLoopIteration to return !ok since update chan was closed")
  341. // this should terminate immediately; if it hangs then the syncLoopIteration isn't aborting properly
  342. kubelet.syncLoop(ch, kubelet)
  343. }
  344. func TestSyncPodsStartPod(t *testing.T) {
  345. testKubelet := newTestKubelet(t, false /* controllerAttachDetachEnabled */)
  346. defer testKubelet.Cleanup()
  347. kubelet := testKubelet.kubelet
  348. fakeRuntime := testKubelet.fakeRuntime
  349. pods := []*v1.Pod{
  350. podWithUIDNameNsSpec("12345678", "foo", "new", v1.PodSpec{
  351. Containers: []v1.Container{
  352. {Name: "bar"},
  353. },
  354. }),
  355. }
  356. kubelet.podManager.SetPods(pods)
  357. kubelet.HandlePodSyncs(pods)
  358. fakeRuntime.AssertStartedPods([]string{string(pods[0].UID)})
  359. }
  360. func TestSyncPodsDeletesWhenSourcesAreReady(t *testing.T) {
  361. ready := false
  362. testKubelet := newTestKubelet(t, false /* controllerAttachDetachEnabled */)
  363. defer testKubelet.Cleanup()
  364. fakeRuntime := testKubelet.fakeRuntime
  365. kubelet := testKubelet.kubelet
  366. kubelet.sourcesReady = config.NewSourcesReady(func(_ sets.String) bool { return ready })
  367. fakeRuntime.PodList = []*containertest.FakePod{
  368. {Pod: &kubecontainer.Pod{
  369. ID: "12345678",
  370. Name: "foo",
  371. Namespace: "new",
  372. Containers: []*kubecontainer.Container{
  373. {Name: "bar"},
  374. },
  375. }},
  376. }
  377. kubelet.HandlePodCleanups()
  378. // Sources are not ready yet. Don't remove any pods.
  379. fakeRuntime.AssertKilledPods([]string{})
  380. ready = true
  381. kubelet.HandlePodCleanups()
  382. // Sources are ready. Remove unwanted pods.
  383. fakeRuntime.AssertKilledPods([]string{"12345678"})
  384. }
  385. type testNodeLister struct {
  386. nodes []*v1.Node
  387. }
  388. func (nl testNodeLister) Get(name string) (*v1.Node, error) {
  389. for _, node := range nl.nodes {
  390. if node.Name == name {
  391. return node, nil
  392. }
  393. }
  394. return nil, fmt.Errorf("Node with name: %s does not exist", name)
  395. }
  396. func (nl testNodeLister) List(_ labels.Selector) (ret []*v1.Node, err error) {
  397. return nl.nodes, nil
  398. }
  399. func checkPodStatus(t *testing.T, kl *Kubelet, pod *v1.Pod, phase v1.PodPhase) {
  400. status, found := kl.statusManager.GetPodStatus(pod.UID)
  401. require.True(t, found, "Status of pod %q is not found in the status map", pod.UID)
  402. require.Equal(t, phase, status.Phase)
  403. }
  404. // Tests that we handle port conflicts correctly by setting the failed status in status map.
  405. func TestHandlePortConflicts(t *testing.T) {
  406. testKubelet := newTestKubelet(t, false /* controllerAttachDetachEnabled */)
  407. defer testKubelet.Cleanup()
  408. kl := testKubelet.kubelet
  409. kl.nodeLister = testNodeLister{nodes: []*v1.Node{
  410. {
  411. ObjectMeta: metav1.ObjectMeta{Name: string(kl.nodeName)},
  412. Status: v1.NodeStatus{
  413. Allocatable: v1.ResourceList{
  414. v1.ResourcePods: *resource.NewQuantity(110, resource.DecimalSI),
  415. },
  416. },
  417. },
  418. }}
  419. recorder := record.NewFakeRecorder(20)
  420. nodeRef := &v1.ObjectReference{
  421. Kind: "Node",
  422. Name: string("testNode"),
  423. UID: types.UID("testNode"),
  424. Namespace: "",
  425. }
  426. testClusterDNSDomain := "TEST"
  427. kl.dnsConfigurer = dns.NewConfigurer(recorder, nodeRef, nil, nil, testClusterDNSDomain, "")
  428. spec := v1.PodSpec{NodeName: string(kl.nodeName), Containers: []v1.Container{{Ports: []v1.ContainerPort{{HostPort: 80}}}}}
  429. pods := []*v1.Pod{
  430. podWithUIDNameNsSpec("123456789", "newpod", "foo", spec),
  431. podWithUIDNameNsSpec("987654321", "oldpod", "foo", spec),
  432. }
  433. // Make sure the Pods are in the reverse order of creation time.
  434. pods[1].CreationTimestamp = metav1.NewTime(time.Now())
  435. pods[0].CreationTimestamp = metav1.NewTime(time.Now().Add(1 * time.Second))
  436. // The newer pod should be rejected.
  437. notfittingPod := pods[0]
  438. fittingPod := pods[1]
  439. kl.HandlePodAdditions(pods)
  440. // Check pod status stored in the status map.
  441. checkPodStatus(t, kl, notfittingPod, v1.PodFailed)
  442. checkPodStatus(t, kl, fittingPod, v1.PodPending)
  443. }
  444. // Tests that we handle host name conflicts correctly by setting the failed status in status map.
  445. func TestHandleHostNameConflicts(t *testing.T) {
  446. testKubelet := newTestKubelet(t, false /* controllerAttachDetachEnabled */)
  447. defer testKubelet.Cleanup()
  448. kl := testKubelet.kubelet
  449. kl.nodeLister = testNodeLister{nodes: []*v1.Node{
  450. {
  451. ObjectMeta: metav1.ObjectMeta{Name: "127.0.0.1"},
  452. Status: v1.NodeStatus{
  453. Allocatable: v1.ResourceList{
  454. v1.ResourcePods: *resource.NewQuantity(110, resource.DecimalSI),
  455. },
  456. },
  457. },
  458. }}
  459. recorder := record.NewFakeRecorder(20)
  460. nodeRef := &v1.ObjectReference{
  461. Kind: "Node",
  462. Name: string("testNode"),
  463. UID: types.UID("testNode"),
  464. Namespace: "",
  465. }
  466. testClusterDNSDomain := "TEST"
  467. kl.dnsConfigurer = dns.NewConfigurer(recorder, nodeRef, nil, nil, testClusterDNSDomain, "")
  468. // default NodeName in test is 127.0.0.1
  469. pods := []*v1.Pod{
  470. podWithUIDNameNsSpec("123456789", "notfittingpod", "foo", v1.PodSpec{NodeName: "127.0.0.2"}),
  471. podWithUIDNameNsSpec("987654321", "fittingpod", "foo", v1.PodSpec{NodeName: "127.0.0.1"}),
  472. }
  473. notfittingPod := pods[0]
  474. fittingPod := pods[1]
  475. kl.HandlePodAdditions(pods)
  476. // Check pod status stored in the status map.
  477. checkPodStatus(t, kl, notfittingPod, v1.PodFailed)
  478. checkPodStatus(t, kl, fittingPod, v1.PodPending)
  479. }
  480. // Tests that we handle not matching labels selector correctly by setting the failed status in status map.
  481. func TestHandleNodeSelector(t *testing.T) {
  482. testKubelet := newTestKubelet(t, false /* controllerAttachDetachEnabled */)
  483. defer testKubelet.Cleanup()
  484. kl := testKubelet.kubelet
  485. nodes := []*v1.Node{
  486. {
  487. ObjectMeta: metav1.ObjectMeta{Name: testKubeletHostname, Labels: map[string]string{"key": "B"}},
  488. Status: v1.NodeStatus{
  489. Allocatable: v1.ResourceList{
  490. v1.ResourcePods: *resource.NewQuantity(110, resource.DecimalSI),
  491. },
  492. },
  493. },
  494. }
  495. kl.nodeLister = testNodeLister{nodes: nodes}
  496. recorder := record.NewFakeRecorder(20)
  497. nodeRef := &v1.ObjectReference{
  498. Kind: "Node",
  499. Name: string("testNode"),
  500. UID: types.UID("testNode"),
  501. Namespace: "",
  502. }
  503. testClusterDNSDomain := "TEST"
  504. kl.dnsConfigurer = dns.NewConfigurer(recorder, nodeRef, nil, nil, testClusterDNSDomain, "")
  505. pods := []*v1.Pod{
  506. podWithUIDNameNsSpec("123456789", "podA", "foo", v1.PodSpec{NodeSelector: map[string]string{"key": "A"}}),
  507. podWithUIDNameNsSpec("987654321", "podB", "foo", v1.PodSpec{NodeSelector: map[string]string{"key": "B"}}),
  508. }
  509. // The first pod should be rejected.
  510. notfittingPod := pods[0]
  511. fittingPod := pods[1]
  512. kl.HandlePodAdditions(pods)
  513. // Check pod status stored in the status map.
  514. checkPodStatus(t, kl, notfittingPod, v1.PodFailed)
  515. checkPodStatus(t, kl, fittingPod, v1.PodPending)
  516. }
  517. // Tests that we handle exceeded resources correctly by setting the failed status in status map.
  518. func TestHandleMemExceeded(t *testing.T) {
  519. testKubelet := newTestKubelet(t, false /* controllerAttachDetachEnabled */)
  520. defer testKubelet.Cleanup()
  521. kl := testKubelet.kubelet
  522. nodes := []*v1.Node{
  523. {ObjectMeta: metav1.ObjectMeta{Name: testKubeletHostname},
  524. Status: v1.NodeStatus{Capacity: v1.ResourceList{}, Allocatable: v1.ResourceList{
  525. v1.ResourceCPU: *resource.NewMilliQuantity(10, resource.DecimalSI),
  526. v1.ResourceMemory: *resource.NewQuantity(100, resource.BinarySI),
  527. v1.ResourcePods: *resource.NewQuantity(40, resource.DecimalSI),
  528. }}},
  529. }
  530. kl.nodeLister = testNodeLister{nodes: nodes}
  531. recorder := record.NewFakeRecorder(20)
  532. nodeRef := &v1.ObjectReference{
  533. Kind: "Node",
  534. Name: string("testNode"),
  535. UID: types.UID("testNode"),
  536. Namespace: "",
  537. }
  538. testClusterDNSDomain := "TEST"
  539. kl.dnsConfigurer = dns.NewConfigurer(recorder, nodeRef, nil, nil, testClusterDNSDomain, "")
  540. spec := v1.PodSpec{NodeName: string(kl.nodeName),
  541. Containers: []v1.Container{{Resources: v1.ResourceRequirements{
  542. Requests: v1.ResourceList{
  543. v1.ResourceMemory: resource.MustParse("90"),
  544. },
  545. }}},
  546. }
  547. pods := []*v1.Pod{
  548. podWithUIDNameNsSpec("123456789", "newpod", "foo", spec),
  549. podWithUIDNameNsSpec("987654321", "oldpod", "foo", spec),
  550. }
  551. // Make sure the Pods are in the reverse order of creation time.
  552. pods[1].CreationTimestamp = metav1.NewTime(time.Now())
  553. pods[0].CreationTimestamp = metav1.NewTime(time.Now().Add(1 * time.Second))
  554. // The newer pod should be rejected.
  555. notfittingPod := pods[0]
  556. fittingPod := pods[1]
  557. kl.HandlePodAdditions(pods)
  558. // Check pod status stored in the status map.
  559. checkPodStatus(t, kl, notfittingPod, v1.PodFailed)
  560. checkPodStatus(t, kl, fittingPod, v1.PodPending)
  561. }
  562. // Tests that we handle result of interface UpdatePluginResources correctly
  563. // by setting corresponding status in status map.
  564. func TestHandlePluginResources(t *testing.T) {
  565. testKubelet := newTestKubelet(t, false /* controllerAttachDetachEnabled */)
  566. defer testKubelet.Cleanup()
  567. kl := testKubelet.kubelet
  568. adjustedResource := v1.ResourceName("domain1.com/adjustedResource")
  569. emptyResource := v1.ResourceName("domain2.com/emptyResource")
  570. missingResource := v1.ResourceName("domain2.com/missingResource")
  571. failedResource := v1.ResourceName("domain2.com/failedResource")
  572. resourceQuantity0 := *resource.NewQuantity(int64(0), resource.DecimalSI)
  573. resourceQuantity1 := *resource.NewQuantity(int64(1), resource.DecimalSI)
  574. resourceQuantity2 := *resource.NewQuantity(int64(2), resource.DecimalSI)
  575. resourceQuantityInvalid := *resource.NewQuantity(int64(-1), resource.DecimalSI)
  576. allowedPodQuantity := *resource.NewQuantity(int64(10), resource.DecimalSI)
  577. nodes := []*v1.Node{
  578. {ObjectMeta: metav1.ObjectMeta{Name: testKubeletHostname},
  579. Status: v1.NodeStatus{Capacity: v1.ResourceList{}, Allocatable: v1.ResourceList{
  580. adjustedResource: resourceQuantity1,
  581. emptyResource: resourceQuantity0,
  582. v1.ResourcePods: allowedPodQuantity,
  583. }}},
  584. }
  585. kl.nodeLister = testNodeLister{nodes: nodes}
  586. updatePluginResourcesFunc := func(node *schedulernodeinfo.NodeInfo, attrs *lifecycle.PodAdmitAttributes) error {
  587. // Maps from resourceName to the value we use to set node.allocatableResource[resourceName].
  588. // A resource with invalid value (< 0) causes the function to return an error
  589. // to emulate resource Allocation failure.
  590. // Resources not contained in this map will have their node.allocatableResource
  591. // quantity unchanged.
  592. updateResourceMap := map[v1.ResourceName]resource.Quantity{
  593. adjustedResource: resourceQuantity2,
  594. emptyResource: resourceQuantity0,
  595. failedResource: resourceQuantityInvalid,
  596. }
  597. pod := attrs.Pod
  598. allocatableResource := node.AllocatableResource()
  599. newAllocatableResource := allocatableResource.Clone()
  600. for _, container := range pod.Spec.Containers {
  601. for resource := range container.Resources.Requests {
  602. newQuantity, exist := updateResourceMap[resource]
  603. if !exist {
  604. continue
  605. }
  606. if newQuantity.Value() < 0 {
  607. return fmt.Errorf("Allocation failed")
  608. }
  609. newAllocatableResource.ScalarResources[resource] = newQuantity.Value()
  610. }
  611. }
  612. node.SetAllocatableResource(newAllocatableResource)
  613. return nil
  614. }
  615. // add updatePluginResourcesFunc to admission handler, to test it's behavior.
  616. kl.admitHandlers = lifecycle.PodAdmitHandlers{}
  617. kl.admitHandlers.AddPodAdmitHandler(lifecycle.NewPredicateAdmitHandler(kl.getNodeAnyWay, lifecycle.NewAdmissionFailureHandlerStub(), updatePluginResourcesFunc))
  618. recorder := record.NewFakeRecorder(20)
  619. nodeRef := &v1.ObjectReference{
  620. Kind: "Node",
  621. Name: string("testNode"),
  622. UID: types.UID("testNode"),
  623. Namespace: "",
  624. }
  625. testClusterDNSDomain := "TEST"
  626. kl.dnsConfigurer = dns.NewConfigurer(recorder, nodeRef, nil, nil, testClusterDNSDomain, "")
  627. // pod requiring adjustedResource can be successfully allocated because updatePluginResourcesFunc
  628. // adjusts node.allocatableResource for this resource to a sufficient value.
  629. fittingPodSpec := v1.PodSpec{NodeName: string(kl.nodeName),
  630. Containers: []v1.Container{{Resources: v1.ResourceRequirements{
  631. Limits: v1.ResourceList{
  632. adjustedResource: resourceQuantity2,
  633. },
  634. Requests: v1.ResourceList{
  635. adjustedResource: resourceQuantity2,
  636. },
  637. }}},
  638. }
  639. // pod requiring emptyResource (extended resources with 0 allocatable) will
  640. // not pass PredicateAdmit.
  641. emptyPodSpec := v1.PodSpec{NodeName: string(kl.nodeName),
  642. Containers: []v1.Container{{Resources: v1.ResourceRequirements{
  643. Limits: v1.ResourceList{
  644. emptyResource: resourceQuantity2,
  645. },
  646. Requests: v1.ResourceList{
  647. emptyResource: resourceQuantity2,
  648. },
  649. }}},
  650. }
  651. // pod requiring missingResource will pass PredicateAdmit.
  652. //
  653. // Extended resources missing in node status are ignored in PredicateAdmit.
  654. // This is required to support extended resources that are not managed by
  655. // device plugin, such as cluster-level resources.
  656. missingPodSpec := v1.PodSpec{NodeName: string(kl.nodeName),
  657. Containers: []v1.Container{{Resources: v1.ResourceRequirements{
  658. Limits: v1.ResourceList{
  659. missingResource: resourceQuantity2,
  660. },
  661. Requests: v1.ResourceList{
  662. missingResource: resourceQuantity2,
  663. },
  664. }}},
  665. }
  666. // pod requiring failedResource will fail with the resource failed to be allocated.
  667. failedPodSpec := v1.PodSpec{NodeName: string(kl.nodeName),
  668. Containers: []v1.Container{{Resources: v1.ResourceRequirements{
  669. Limits: v1.ResourceList{
  670. failedResource: resourceQuantity1,
  671. },
  672. Requests: v1.ResourceList{
  673. failedResource: resourceQuantity1,
  674. },
  675. }}},
  676. }
  677. fittingPod := podWithUIDNameNsSpec("1", "fittingpod", "foo", fittingPodSpec)
  678. emptyPod := podWithUIDNameNsSpec("2", "emptypod", "foo", emptyPodSpec)
  679. missingPod := podWithUIDNameNsSpec("3", "missingpod", "foo", missingPodSpec)
  680. failedPod := podWithUIDNameNsSpec("4", "failedpod", "foo", failedPodSpec)
  681. kl.HandlePodAdditions([]*v1.Pod{fittingPod, emptyPod, missingPod, failedPod})
  682. // Check pod status stored in the status map.
  683. checkPodStatus(t, kl, fittingPod, v1.PodPending)
  684. checkPodStatus(t, kl, emptyPod, v1.PodFailed)
  685. checkPodStatus(t, kl, missingPod, v1.PodPending)
  686. checkPodStatus(t, kl, failedPod, v1.PodFailed)
  687. }
  688. // TODO(filipg): This test should be removed once StatusSyncer can do garbage collection without external signal.
  689. func TestPurgingObsoleteStatusMapEntries(t *testing.T) {
  690. testKubelet := newTestKubelet(t, false /* controllerAttachDetachEnabled */)
  691. defer testKubelet.Cleanup()
  692. kl := testKubelet.kubelet
  693. pods := []*v1.Pod{
  694. {ObjectMeta: metav1.ObjectMeta{Name: "pod1", UID: "1234"}, Spec: v1.PodSpec{Containers: []v1.Container{{Ports: []v1.ContainerPort{{HostPort: 80}}}}}},
  695. {ObjectMeta: metav1.ObjectMeta{Name: "pod2", UID: "4567"}, Spec: v1.PodSpec{Containers: []v1.Container{{Ports: []v1.ContainerPort{{HostPort: 80}}}}}},
  696. }
  697. podToTest := pods[1]
  698. // Run once to populate the status map.
  699. kl.HandlePodAdditions(pods)
  700. if _, found := kl.statusManager.GetPodStatus(podToTest.UID); !found {
  701. t.Fatalf("expected to have status cached for pod2")
  702. }
  703. // Sync with empty pods so that the entry in status map will be removed.
  704. kl.podManager.SetPods([]*v1.Pod{})
  705. kl.HandlePodCleanups()
  706. if _, found := kl.statusManager.GetPodStatus(podToTest.UID); found {
  707. t.Fatalf("expected to not have status cached for pod2")
  708. }
  709. }
  710. func TestValidateContainerLogStatus(t *testing.T) {
  711. testKubelet := newTestKubelet(t, false /* controllerAttachDetachEnabled */)
  712. defer testKubelet.Cleanup()
  713. kubelet := testKubelet.kubelet
  714. containerName := "x"
  715. testCases := []struct {
  716. statuses []v1.ContainerStatus
  717. success bool // whether getting logs for the container should succeed.
  718. pSuccess bool // whether getting logs for the previous container should succeed.
  719. }{
  720. {
  721. statuses: []v1.ContainerStatus{
  722. {
  723. Name: containerName,
  724. State: v1.ContainerState{
  725. Running: &v1.ContainerStateRunning{},
  726. },
  727. LastTerminationState: v1.ContainerState{
  728. Terminated: &v1.ContainerStateTerminated{ContainerID: "docker://fakeid"},
  729. },
  730. },
  731. },
  732. success: true,
  733. pSuccess: true,
  734. },
  735. {
  736. statuses: []v1.ContainerStatus{
  737. {
  738. Name: containerName,
  739. State: v1.ContainerState{
  740. Running: &v1.ContainerStateRunning{},
  741. },
  742. },
  743. },
  744. success: true,
  745. pSuccess: false,
  746. },
  747. {
  748. statuses: []v1.ContainerStatus{
  749. {
  750. Name: containerName,
  751. State: v1.ContainerState{
  752. Terminated: &v1.ContainerStateTerminated{},
  753. },
  754. },
  755. },
  756. success: false,
  757. pSuccess: false,
  758. },
  759. {
  760. statuses: []v1.ContainerStatus{
  761. {
  762. Name: containerName,
  763. State: v1.ContainerState{
  764. Terminated: &v1.ContainerStateTerminated{ContainerID: "docker://fakeid"},
  765. },
  766. },
  767. },
  768. success: true,
  769. pSuccess: false,
  770. },
  771. {
  772. statuses: []v1.ContainerStatus{
  773. {
  774. Name: containerName,
  775. State: v1.ContainerState{
  776. Terminated: &v1.ContainerStateTerminated{},
  777. },
  778. LastTerminationState: v1.ContainerState{
  779. Terminated: &v1.ContainerStateTerminated{},
  780. },
  781. },
  782. },
  783. success: false,
  784. pSuccess: false,
  785. },
  786. {
  787. statuses: []v1.ContainerStatus{
  788. {
  789. Name: containerName,
  790. State: v1.ContainerState{
  791. Terminated: &v1.ContainerStateTerminated{},
  792. },
  793. LastTerminationState: v1.ContainerState{
  794. Terminated: &v1.ContainerStateTerminated{ContainerID: "docker://fakeid"},
  795. },
  796. },
  797. },
  798. success: true,
  799. pSuccess: true,
  800. },
  801. {
  802. statuses: []v1.ContainerStatus{
  803. {
  804. Name: containerName,
  805. State: v1.ContainerState{
  806. Waiting: &v1.ContainerStateWaiting{},
  807. },
  808. },
  809. },
  810. success: false,
  811. pSuccess: false,
  812. },
  813. {
  814. statuses: []v1.ContainerStatus{
  815. {
  816. Name: containerName,
  817. State: v1.ContainerState{Waiting: &v1.ContainerStateWaiting{Reason: "ErrImagePull"}},
  818. },
  819. },
  820. success: false,
  821. pSuccess: false,
  822. },
  823. {
  824. statuses: []v1.ContainerStatus{
  825. {
  826. Name: containerName,
  827. State: v1.ContainerState{Waiting: &v1.ContainerStateWaiting{Reason: "ErrImagePullBackOff"}},
  828. },
  829. },
  830. success: false,
  831. pSuccess: false,
  832. },
  833. }
  834. for i, tc := range testCases {
  835. // Access the log of the most recent container
  836. previous := false
  837. podStatus := &v1.PodStatus{ContainerStatuses: tc.statuses}
  838. _, err := kubelet.validateContainerLogStatus("podName", podStatus, containerName, previous)
  839. if !tc.success {
  840. assert.Error(t, err, fmt.Sprintf("[case %d] error", i))
  841. } else {
  842. assert.NoError(t, err, "[case %d] error", i)
  843. }
  844. // Access the log of the previous, terminated container
  845. previous = true
  846. _, err = kubelet.validateContainerLogStatus("podName", podStatus, containerName, previous)
  847. if !tc.pSuccess {
  848. assert.Error(t, err, fmt.Sprintf("[case %d] error", i))
  849. } else {
  850. assert.NoError(t, err, "[case %d] error", i)
  851. }
  852. // Access the log of a container that's not in the pod
  853. _, err = kubelet.validateContainerLogStatus("podName", podStatus, "blah", false)
  854. assert.Error(t, err, fmt.Sprintf("[case %d] invalid container name should cause an error", i))
  855. }
  856. }
  857. func TestCreateMirrorPod(t *testing.T) {
  858. for _, updateType := range []kubetypes.SyncPodType{kubetypes.SyncPodCreate, kubetypes.SyncPodUpdate} {
  859. testKubelet := newTestKubelet(t, false /* controllerAttachDetachEnabled */)
  860. defer testKubelet.Cleanup()
  861. kl := testKubelet.kubelet
  862. manager := testKubelet.fakeMirrorClient
  863. pod := podWithUIDNameNs("12345678", "bar", "foo")
  864. pod.Annotations[kubetypes.ConfigSourceAnnotationKey] = "file"
  865. pods := []*v1.Pod{pod}
  866. kl.podManager.SetPods(pods)
  867. err := kl.syncPod(syncPodOptions{
  868. pod: pod,
  869. podStatus: &kubecontainer.PodStatus{},
  870. updateType: updateType,
  871. })
  872. assert.NoError(t, err)
  873. podFullName := kubecontainer.GetPodFullName(pod)
  874. assert.True(t, manager.HasPod(podFullName), "Expected mirror pod %q to be created", podFullName)
  875. assert.Equal(t, 1, manager.NumOfPods(), "Expected only 1 mirror pod %q, got %+v", podFullName, manager.GetPods())
  876. }
  877. }
  878. func TestDeleteOutdatedMirrorPod(t *testing.T) {
  879. testKubelet := newTestKubelet(t, false /* controllerAttachDetachEnabled */)
  880. defer testKubelet.Cleanup()
  881. kl := testKubelet.kubelet
  882. manager := testKubelet.fakeMirrorClient
  883. pod := podWithUIDNameNsSpec("12345678", "foo", "ns", v1.PodSpec{
  884. Containers: []v1.Container{
  885. {Name: "1234", Image: "foo"},
  886. },
  887. })
  888. pod.Annotations[kubetypes.ConfigSourceAnnotationKey] = "file"
  889. // Mirror pod has an outdated spec.
  890. mirrorPod := podWithUIDNameNsSpec("11111111", "foo", "ns", v1.PodSpec{
  891. Containers: []v1.Container{
  892. {Name: "1234", Image: "bar"},
  893. },
  894. })
  895. mirrorPod.Annotations[kubetypes.ConfigSourceAnnotationKey] = "api"
  896. mirrorPod.Annotations[kubetypes.ConfigMirrorAnnotationKey] = "mirror"
  897. pods := []*v1.Pod{pod, mirrorPod}
  898. kl.podManager.SetPods(pods)
  899. err := kl.syncPod(syncPodOptions{
  900. pod: pod,
  901. mirrorPod: mirrorPod,
  902. podStatus: &kubecontainer.PodStatus{},
  903. updateType: kubetypes.SyncPodUpdate,
  904. })
  905. assert.NoError(t, err)
  906. name := kubecontainer.GetPodFullName(pod)
  907. creates, deletes := manager.GetCounts(name)
  908. if creates != 1 || deletes != 1 {
  909. t.Errorf("expected 1 creation and 1 deletion of %q, got %d, %d", name, creates, deletes)
  910. }
  911. }
  912. func TestDeleteOrphanedMirrorPods(t *testing.T) {
  913. testKubelet := newTestKubelet(t, false /* controllerAttachDetachEnabled */)
  914. defer testKubelet.Cleanup()
  915. kl := testKubelet.kubelet
  916. manager := testKubelet.fakeMirrorClient
  917. orphanPods := []*v1.Pod{
  918. {
  919. ObjectMeta: metav1.ObjectMeta{
  920. UID: "12345678",
  921. Name: "pod1",
  922. Namespace: "ns",
  923. Annotations: map[string]string{
  924. kubetypes.ConfigSourceAnnotationKey: "api",
  925. kubetypes.ConfigMirrorAnnotationKey: "mirror",
  926. },
  927. },
  928. },
  929. {
  930. ObjectMeta: metav1.ObjectMeta{
  931. UID: "12345679",
  932. Name: "pod2",
  933. Namespace: "ns",
  934. Annotations: map[string]string{
  935. kubetypes.ConfigSourceAnnotationKey: "api",
  936. kubetypes.ConfigMirrorAnnotationKey: "mirror",
  937. },
  938. },
  939. },
  940. }
  941. kl.podManager.SetPods(orphanPods)
  942. // Sync with an empty pod list to delete all mirror pods.
  943. kl.HandlePodCleanups()
  944. assert.Len(t, manager.GetPods(), 0, "Expected 0 mirror pods")
  945. for _, pod := range orphanPods {
  946. name := kubecontainer.GetPodFullName(pod)
  947. creates, deletes := manager.GetCounts(name)
  948. if creates != 0 || deletes != 1 {
  949. t.Errorf("expected 0 creation and one deletion of %q, got %d, %d", name, creates, deletes)
  950. }
  951. }
  952. }
  953. func TestGetContainerInfoForMirrorPods(t *testing.T) {
  954. // pods contain one static and one mirror pod with the same name but
  955. // different UIDs.
  956. pods := []*v1.Pod{
  957. {
  958. ObjectMeta: metav1.ObjectMeta{
  959. UID: "1234",
  960. Name: "qux",
  961. Namespace: "ns",
  962. Annotations: map[string]string{
  963. kubetypes.ConfigSourceAnnotationKey: "file",
  964. },
  965. },
  966. Spec: v1.PodSpec{
  967. Containers: []v1.Container{
  968. {Name: "foo"},
  969. },
  970. },
  971. },
  972. {
  973. ObjectMeta: metav1.ObjectMeta{
  974. UID: "5678",
  975. Name: "qux",
  976. Namespace: "ns",
  977. Annotations: map[string]string{
  978. kubetypes.ConfigSourceAnnotationKey: "api",
  979. kubetypes.ConfigMirrorAnnotationKey: "mirror",
  980. },
  981. },
  982. Spec: v1.PodSpec{
  983. Containers: []v1.Container{
  984. {Name: "foo"},
  985. },
  986. },
  987. },
  988. }
  989. testKubelet := newTestKubelet(t, false /* controllerAttachDetachEnabled */)
  990. defer testKubelet.Cleanup()
  991. fakeRuntime := testKubelet.fakeRuntime
  992. cadvisorReq := &cadvisorapi.ContainerInfoRequest{}
  993. kubelet := testKubelet.kubelet
  994. fakeRuntime.PodList = []*containertest.FakePod{
  995. {Pod: &kubecontainer.Pod{
  996. ID: "1234",
  997. Name: "qux",
  998. Namespace: "ns",
  999. Containers: []*kubecontainer.Container{
  1000. {
  1001. Name: "foo",
  1002. ID: kubecontainer.ContainerID{Type: "test", ID: "ab2cdf"},
  1003. },
  1004. },
  1005. }},
  1006. }
  1007. kubelet.podManager.SetPods(pods)
  1008. // Use the mirror pod UID to retrieve the stats.
  1009. stats, err := kubelet.GetContainerInfo("qux_ns", "5678", "foo", cadvisorReq)
  1010. assert.NoError(t, err)
  1011. require.NotNil(t, stats)
  1012. }
  1013. func TestNetworkErrorsWithoutHostNetwork(t *testing.T) {
  1014. testKubelet := newTestKubelet(t, false /* controllerAttachDetachEnabled */)
  1015. defer testKubelet.Cleanup()
  1016. kubelet := testKubelet.kubelet
  1017. kubelet.runtimeState.setNetworkState(fmt.Errorf("simulated network error"))
  1018. pod := podWithUIDNameNsSpec("12345678", "hostnetwork", "new", v1.PodSpec{
  1019. HostNetwork: false,
  1020. Containers: []v1.Container{
  1021. {Name: "foo"},
  1022. },
  1023. })
  1024. kubelet.podManager.SetPods([]*v1.Pod{pod})
  1025. err := kubelet.syncPod(syncPodOptions{
  1026. pod: pod,
  1027. podStatus: &kubecontainer.PodStatus{},
  1028. updateType: kubetypes.SyncPodUpdate,
  1029. })
  1030. assert.Error(t, err, "expected pod with hostNetwork=false to fail when network in error")
  1031. pod.Annotations[kubetypes.ConfigSourceAnnotationKey] = kubetypes.FileSource
  1032. pod.Spec.HostNetwork = true
  1033. err = kubelet.syncPod(syncPodOptions{
  1034. pod: pod,
  1035. podStatus: &kubecontainer.PodStatus{},
  1036. updateType: kubetypes.SyncPodUpdate,
  1037. })
  1038. assert.NoError(t, err, "expected pod with hostNetwork=true to succeed when network in error")
  1039. }
  1040. func TestFilterOutTerminatedPods(t *testing.T) {
  1041. testKubelet := newTestKubelet(t, false /* controllerAttachDetachEnabled */)
  1042. defer testKubelet.Cleanup()
  1043. kubelet := testKubelet.kubelet
  1044. pods := newTestPods(5)
  1045. now := metav1.NewTime(time.Now())
  1046. pods[0].Status.Phase = v1.PodFailed
  1047. pods[1].Status.Phase = v1.PodSucceeded
  1048. // The pod is terminating, should not filter out.
  1049. pods[2].Status.Phase = v1.PodRunning
  1050. pods[2].DeletionTimestamp = &now
  1051. pods[2].Status.ContainerStatuses = []v1.ContainerStatus{
  1052. {State: v1.ContainerState{
  1053. Running: &v1.ContainerStateRunning{
  1054. StartedAt: now,
  1055. },
  1056. }},
  1057. }
  1058. pods[3].Status.Phase = v1.PodPending
  1059. pods[4].Status.Phase = v1.PodRunning
  1060. expected := []*v1.Pod{pods[2], pods[3], pods[4]}
  1061. kubelet.podManager.SetPods(pods)
  1062. actual := kubelet.filterOutTerminatedPods(pods)
  1063. assert.Equal(t, expected, actual)
  1064. }
  1065. func TestSyncPodsSetStatusToFailedForPodsThatRunTooLong(t *testing.T) {
  1066. testKubelet := newTestKubelet(t, false /* controllerAttachDetachEnabled */)
  1067. defer testKubelet.Cleanup()
  1068. fakeRuntime := testKubelet.fakeRuntime
  1069. kubelet := testKubelet.kubelet
  1070. now := metav1.Now()
  1071. startTime := metav1.NewTime(now.Time.Add(-1 * time.Minute))
  1072. exceededActiveDeadlineSeconds := int64(30)
  1073. pods := []*v1.Pod{
  1074. {
  1075. ObjectMeta: metav1.ObjectMeta{
  1076. UID: "12345678",
  1077. Name: "bar",
  1078. Namespace: "new",
  1079. },
  1080. Spec: v1.PodSpec{
  1081. Containers: []v1.Container{
  1082. {Name: "foo"},
  1083. },
  1084. ActiveDeadlineSeconds: &exceededActiveDeadlineSeconds,
  1085. },
  1086. Status: v1.PodStatus{
  1087. StartTime: &startTime,
  1088. },
  1089. },
  1090. }
  1091. fakeRuntime.PodList = []*containertest.FakePod{
  1092. {Pod: &kubecontainer.Pod{
  1093. ID: "12345678",
  1094. Name: "bar",
  1095. Namespace: "new",
  1096. Containers: []*kubecontainer.Container{
  1097. {Name: "foo"},
  1098. },
  1099. }},
  1100. }
  1101. // Let the pod worker sets the status to fail after this sync.
  1102. kubelet.HandlePodUpdates(pods)
  1103. status, found := kubelet.statusManager.GetPodStatus(pods[0].UID)
  1104. assert.True(t, found, "expected to found status for pod %q", pods[0].UID)
  1105. assert.Equal(t, v1.PodFailed, status.Phase)
  1106. // check pod status contains ContainerStatuses, etc.
  1107. assert.NotNil(t, status.ContainerStatuses)
  1108. }
  1109. func TestSyncPodsDoesNotSetPodsThatDidNotRunTooLongToFailed(t *testing.T) {
  1110. testKubelet := newTestKubelet(t, false /* controllerAttachDetachEnabled */)
  1111. defer testKubelet.Cleanup()
  1112. fakeRuntime := testKubelet.fakeRuntime
  1113. kubelet := testKubelet.kubelet
  1114. now := metav1.Now()
  1115. startTime := metav1.NewTime(now.Time.Add(-1 * time.Minute))
  1116. exceededActiveDeadlineSeconds := int64(300)
  1117. pods := []*v1.Pod{
  1118. {
  1119. ObjectMeta: metav1.ObjectMeta{
  1120. UID: "12345678",
  1121. Name: "bar",
  1122. Namespace: "new",
  1123. },
  1124. Spec: v1.PodSpec{
  1125. Containers: []v1.Container{
  1126. {Name: "foo"},
  1127. },
  1128. ActiveDeadlineSeconds: &exceededActiveDeadlineSeconds,
  1129. },
  1130. Status: v1.PodStatus{
  1131. StartTime: &startTime,
  1132. },
  1133. },
  1134. }
  1135. fakeRuntime.PodList = []*containertest.FakePod{
  1136. {Pod: &kubecontainer.Pod{
  1137. ID: "12345678",
  1138. Name: "bar",
  1139. Namespace: "new",
  1140. Containers: []*kubecontainer.Container{
  1141. {Name: "foo"},
  1142. },
  1143. }},
  1144. }
  1145. kubelet.podManager.SetPods(pods)
  1146. kubelet.HandlePodUpdates(pods)
  1147. status, found := kubelet.statusManager.GetPodStatus(pods[0].UID)
  1148. assert.True(t, found, "expected to found status for pod %q", pods[0].UID)
  1149. assert.NotEqual(t, v1.PodFailed, status.Phase)
  1150. }
  1151. func podWithUIDNameNs(uid types.UID, name, namespace string) *v1.Pod {
  1152. return &v1.Pod{
  1153. ObjectMeta: metav1.ObjectMeta{
  1154. UID: uid,
  1155. Name: name,
  1156. Namespace: namespace,
  1157. Annotations: map[string]string{},
  1158. },
  1159. }
  1160. }
  1161. func podWithUIDNameNsSpec(uid types.UID, name, namespace string, spec v1.PodSpec) *v1.Pod {
  1162. pod := podWithUIDNameNs(uid, name, namespace)
  1163. pod.Spec = spec
  1164. return pod
  1165. }
  1166. func TestDeletePodDirsForDeletedPods(t *testing.T) {
  1167. testKubelet := newTestKubelet(t, false /* controllerAttachDetachEnabled */)
  1168. defer testKubelet.Cleanup()
  1169. kl := testKubelet.kubelet
  1170. pods := []*v1.Pod{
  1171. podWithUIDNameNs("12345678", "pod1", "ns"),
  1172. podWithUIDNameNs("12345679", "pod2", "ns"),
  1173. }
  1174. kl.podManager.SetPods(pods)
  1175. // Sync to create pod directories.
  1176. kl.HandlePodSyncs(kl.podManager.GetPods())
  1177. for i := range pods {
  1178. assert.True(t, dirExists(kl.getPodDir(pods[i].UID)), "Expected directory to exist for pod %d", i)
  1179. }
  1180. // Pod 1 has been deleted and no longer exists.
  1181. kl.podManager.SetPods([]*v1.Pod{pods[0]})
  1182. kl.HandlePodCleanups()
  1183. assert.True(t, dirExists(kl.getPodDir(pods[0].UID)), "Expected directory to exist for pod 0")
  1184. assert.False(t, dirExists(kl.getPodDir(pods[1].UID)), "Expected directory to be deleted for pod 1")
  1185. }
  1186. func syncAndVerifyPodDir(t *testing.T, testKubelet *TestKubelet, pods []*v1.Pod, podsToCheck []*v1.Pod, shouldExist bool) {
  1187. kl := testKubelet.kubelet
  1188. kl.podManager.SetPods(pods)
  1189. kl.HandlePodSyncs(pods)
  1190. kl.HandlePodCleanups()
  1191. for i, pod := range podsToCheck {
  1192. exist := dirExists(kl.getPodDir(pod.UID))
  1193. assert.Equal(t, shouldExist, exist, "directory of pod %d", i)
  1194. }
  1195. }
  1196. func TestDoesNotDeletePodDirsForTerminatedPods(t *testing.T) {
  1197. testKubelet := newTestKubelet(t, false /* controllerAttachDetachEnabled */)
  1198. defer testKubelet.Cleanup()
  1199. kl := testKubelet.kubelet
  1200. pods := []*v1.Pod{
  1201. podWithUIDNameNs("12345678", "pod1", "ns"),
  1202. podWithUIDNameNs("12345679", "pod2", "ns"),
  1203. podWithUIDNameNs("12345680", "pod3", "ns"),
  1204. }
  1205. syncAndVerifyPodDir(t, testKubelet, pods, pods, true)
  1206. // Pod 1 failed, and pod 2 succeeded. None of the pod directories should be
  1207. // deleted.
  1208. kl.statusManager.SetPodStatus(pods[1], v1.PodStatus{Phase: v1.PodFailed})
  1209. kl.statusManager.SetPodStatus(pods[2], v1.PodStatus{Phase: v1.PodSucceeded})
  1210. syncAndVerifyPodDir(t, testKubelet, pods, pods, true)
  1211. }
  1212. func TestDoesNotDeletePodDirsIfContainerIsRunning(t *testing.T) {
  1213. testKubelet := newTestKubelet(t, false /* controllerAttachDetachEnabled */)
  1214. defer testKubelet.Cleanup()
  1215. runningPod := &kubecontainer.Pod{
  1216. ID: "12345678",
  1217. Name: "pod1",
  1218. Namespace: "ns",
  1219. }
  1220. apiPod := podWithUIDNameNs(runningPod.ID, runningPod.Name, runningPod.Namespace)
  1221. // Sync once to create pod directory; confirm that the pod directory has
  1222. // already been created.
  1223. pods := []*v1.Pod{apiPod}
  1224. syncAndVerifyPodDir(t, testKubelet, pods, []*v1.Pod{apiPod}, true)
  1225. // Pretend the pod is deleted from apiserver, but is still active on the node.
  1226. // The pod directory should not be removed.
  1227. pods = []*v1.Pod{}
  1228. testKubelet.fakeRuntime.PodList = []*containertest.FakePod{{Pod: runningPod, NetnsPath: ""}}
  1229. syncAndVerifyPodDir(t, testKubelet, pods, []*v1.Pod{apiPod}, true)
  1230. // The pod is deleted and also not active on the node. The pod directory
  1231. // should be removed.
  1232. pods = []*v1.Pod{}
  1233. testKubelet.fakeRuntime.PodList = []*containertest.FakePod{}
  1234. syncAndVerifyPodDir(t, testKubelet, pods, []*v1.Pod{apiPod}, false)
  1235. }
  1236. func TestGetPodsToSync(t *testing.T) {
  1237. testKubelet := newTestKubelet(t, false /* controllerAttachDetachEnabled */)
  1238. defer testKubelet.Cleanup()
  1239. kubelet := testKubelet.kubelet
  1240. clock := testKubelet.fakeClock
  1241. pods := newTestPods(5)
  1242. exceededActiveDeadlineSeconds := int64(30)
  1243. notYetActiveDeadlineSeconds := int64(120)
  1244. startTime := metav1.NewTime(clock.Now())
  1245. pods[0].Status.StartTime = &startTime
  1246. pods[0].Spec.ActiveDeadlineSeconds = &exceededActiveDeadlineSeconds
  1247. pods[1].Status.StartTime = &startTime
  1248. pods[1].Spec.ActiveDeadlineSeconds = &notYetActiveDeadlineSeconds
  1249. pods[2].Status.StartTime = &startTime
  1250. pods[2].Spec.ActiveDeadlineSeconds = &exceededActiveDeadlineSeconds
  1251. kubelet.podManager.SetPods(pods)
  1252. kubelet.workQueue.Enqueue(pods[2].UID, 0)
  1253. kubelet.workQueue.Enqueue(pods[3].UID, 30*time.Second)
  1254. kubelet.workQueue.Enqueue(pods[4].UID, 2*time.Minute)
  1255. clock.Step(1 * time.Minute)
  1256. expected := []*v1.Pod{pods[2], pods[3], pods[0]}
  1257. podsToSync := kubelet.getPodsToSync()
  1258. sort.Sort(podsByUID(expected))
  1259. sort.Sort(podsByUID(podsToSync))
  1260. assert.Equal(t, expected, podsToSync)
  1261. }
  1262. func TestGenerateAPIPodStatusWithSortedContainers(t *testing.T) {
  1263. testKubelet := newTestKubelet(t, false /* controllerAttachDetachEnabled */)
  1264. defer testKubelet.Cleanup()
  1265. kubelet := testKubelet.kubelet
  1266. numContainers := 10
  1267. expectedOrder := []string{}
  1268. cStatuses := []*kubecontainer.ContainerStatus{}
  1269. specContainerList := []v1.Container{}
  1270. for i := 0; i < numContainers; i++ {
  1271. id := fmt.Sprintf("%v", i)
  1272. containerName := fmt.Sprintf("%vcontainer", id)
  1273. expectedOrder = append(expectedOrder, containerName)
  1274. cStatus := &kubecontainer.ContainerStatus{
  1275. ID: kubecontainer.BuildContainerID("test", id),
  1276. Name: containerName,
  1277. }
  1278. // Rearrange container statuses
  1279. if i%2 == 0 {
  1280. cStatuses = append(cStatuses, cStatus)
  1281. } else {
  1282. cStatuses = append([]*kubecontainer.ContainerStatus{cStatus}, cStatuses...)
  1283. }
  1284. specContainerList = append(specContainerList, v1.Container{Name: containerName})
  1285. }
  1286. pod := podWithUIDNameNs("uid1", "foo", "test")
  1287. pod.Spec = v1.PodSpec{
  1288. Containers: specContainerList,
  1289. }
  1290. status := &kubecontainer.PodStatus{
  1291. ID: pod.UID,
  1292. Name: pod.Name,
  1293. Namespace: pod.Namespace,
  1294. ContainerStatuses: cStatuses,
  1295. }
  1296. for i := 0; i < 5; i++ {
  1297. apiStatus := kubelet.generateAPIPodStatus(pod, status)
  1298. for i, c := range apiStatus.ContainerStatuses {
  1299. if expectedOrder[i] != c.Name {
  1300. t.Fatalf("Container status not sorted, expected %v at index %d, but found %v", expectedOrder[i], i, c.Name)
  1301. }
  1302. }
  1303. }
  1304. }
  1305. func verifyContainerStatuses(t *testing.T, statuses []v1.ContainerStatus, state, lastTerminationState map[string]v1.ContainerState, message string) {
  1306. for _, s := range statuses {
  1307. assert.Equal(t, s.State, state[s.Name], "%s: state", message)
  1308. assert.Equal(t, s.LastTerminationState, lastTerminationState[s.Name], "%s: last terminated state", message)
  1309. }
  1310. }
  1311. // Test generateAPIPodStatus with different reason cache and old api pod status.
  1312. func TestGenerateAPIPodStatusWithReasonCache(t *testing.T) {
  1313. // The following waiting reason and message are generated in convertStatusToAPIStatus()
  1314. startWaitingReason := "ContainerCreating"
  1315. initWaitingReason := "PodInitializing"
  1316. testTimestamp := time.Unix(123456789, 987654321)
  1317. testErrorReason := fmt.Errorf("test-error")
  1318. emptyContainerID := (&kubecontainer.ContainerID{}).String()
  1319. testKubelet := newTestKubelet(t, false /* controllerAttachDetachEnabled */)
  1320. defer testKubelet.Cleanup()
  1321. kubelet := testKubelet.kubelet
  1322. pod := podWithUIDNameNs("12345678", "foo", "new")
  1323. pod.Spec = v1.PodSpec{RestartPolicy: v1.RestartPolicyOnFailure}
  1324. podStatus := &kubecontainer.PodStatus{
  1325. ID: pod.UID,
  1326. Name: pod.Name,
  1327. Namespace: pod.Namespace,
  1328. }
  1329. tests := []struct {
  1330. containers []v1.Container
  1331. statuses []*kubecontainer.ContainerStatus
  1332. reasons map[string]error
  1333. oldStatuses []v1.ContainerStatus
  1334. expectedState map[string]v1.ContainerState
  1335. // Only set expectedInitState when it is different from expectedState
  1336. expectedInitState map[string]v1.ContainerState
  1337. expectedLastTerminationState map[string]v1.ContainerState
  1338. }{
  1339. // For container with no historical record, State should be Waiting, LastTerminationState should be retrieved from
  1340. // old status from apiserver.
  1341. {
  1342. containers: []v1.Container{{Name: "without-old-record"}, {Name: "with-old-record"}},
  1343. statuses: []*kubecontainer.ContainerStatus{},
  1344. reasons: map[string]error{},
  1345. oldStatuses: []v1.ContainerStatus{{
  1346. Name: "with-old-record",
  1347. LastTerminationState: v1.ContainerState{Terminated: &v1.ContainerStateTerminated{}},
  1348. }},
  1349. expectedState: map[string]v1.ContainerState{
  1350. "without-old-record": {Waiting: &v1.ContainerStateWaiting{
  1351. Reason: startWaitingReason,
  1352. }},
  1353. "with-old-record": {Waiting: &v1.ContainerStateWaiting{
  1354. Reason: startWaitingReason,
  1355. }},
  1356. },
  1357. expectedInitState: map[string]v1.ContainerState{
  1358. "without-old-record": {Waiting: &v1.ContainerStateWaiting{
  1359. Reason: initWaitingReason,
  1360. }},
  1361. "with-old-record": {Waiting: &v1.ContainerStateWaiting{
  1362. Reason: initWaitingReason,
  1363. }},
  1364. },
  1365. expectedLastTerminationState: map[string]v1.ContainerState{
  1366. "with-old-record": {Terminated: &v1.ContainerStateTerminated{}},
  1367. },
  1368. },
  1369. // For running container, State should be Running, LastTerminationState should be retrieved from latest terminated status.
  1370. {
  1371. containers: []v1.Container{{Name: "running"}},
  1372. statuses: []*kubecontainer.ContainerStatus{
  1373. {
  1374. Name: "running",
  1375. State: kubecontainer.ContainerStateRunning,
  1376. StartedAt: testTimestamp,
  1377. },
  1378. {
  1379. Name: "running",
  1380. State: kubecontainer.ContainerStateExited,
  1381. ExitCode: 1,
  1382. },
  1383. },
  1384. reasons: map[string]error{},
  1385. oldStatuses: []v1.ContainerStatus{},
  1386. expectedState: map[string]v1.ContainerState{
  1387. "running": {Running: &v1.ContainerStateRunning{
  1388. StartedAt: metav1.NewTime(testTimestamp),
  1389. }},
  1390. },
  1391. expectedLastTerminationState: map[string]v1.ContainerState{
  1392. "running": {Terminated: &v1.ContainerStateTerminated{
  1393. ExitCode: 1,
  1394. ContainerID: emptyContainerID,
  1395. }},
  1396. },
  1397. },
  1398. // For terminated container:
  1399. // * If there is no recent start error record, State should be Terminated, LastTerminationState should be retrieved from
  1400. // second latest terminated status;
  1401. // * If there is recent start error record, State should be Waiting, LastTerminationState should be retrieved from latest
  1402. // terminated status;
  1403. // * If ExitCode = 0, restart policy is RestartPolicyOnFailure, the container shouldn't be restarted. No matter there is
  1404. // recent start error or not, State should be Terminated, LastTerminationState should be retrieved from second latest
  1405. // terminated status.
  1406. {
  1407. containers: []v1.Container{{Name: "without-reason"}, {Name: "with-reason"}},
  1408. statuses: []*kubecontainer.ContainerStatus{
  1409. {
  1410. Name: "without-reason",
  1411. State: kubecontainer.ContainerStateExited,
  1412. ExitCode: 1,
  1413. },
  1414. {
  1415. Name: "with-reason",
  1416. State: kubecontainer.ContainerStateExited,
  1417. ExitCode: 2,
  1418. },
  1419. {
  1420. Name: "without-reason",
  1421. State: kubecontainer.ContainerStateExited,
  1422. ExitCode: 3,
  1423. },
  1424. {
  1425. Name: "with-reason",
  1426. State: kubecontainer.ContainerStateExited,
  1427. ExitCode: 4,
  1428. },
  1429. {
  1430. Name: "succeed",
  1431. State: kubecontainer.ContainerStateExited,
  1432. ExitCode: 0,
  1433. },
  1434. {
  1435. Name: "succeed",
  1436. State: kubecontainer.ContainerStateExited,
  1437. ExitCode: 5,
  1438. },
  1439. },
  1440. reasons: map[string]error{"with-reason": testErrorReason, "succeed": testErrorReason},
  1441. oldStatuses: []v1.ContainerStatus{},
  1442. expectedState: map[string]v1.ContainerState{
  1443. "without-reason": {Terminated: &v1.ContainerStateTerminated{
  1444. ExitCode: 1,
  1445. ContainerID: emptyContainerID,
  1446. }},
  1447. "with-reason": {Waiting: &v1.ContainerStateWaiting{Reason: testErrorReason.Error()}},
  1448. "succeed": {Terminated: &v1.ContainerStateTerminated{
  1449. ExitCode: 0,
  1450. ContainerID: emptyContainerID,
  1451. }},
  1452. },
  1453. expectedLastTerminationState: map[string]v1.ContainerState{
  1454. "without-reason": {Terminated: &v1.ContainerStateTerminated{
  1455. ExitCode: 3,
  1456. ContainerID: emptyContainerID,
  1457. }},
  1458. "with-reason": {Terminated: &v1.ContainerStateTerminated{
  1459. ExitCode: 2,
  1460. ContainerID: emptyContainerID,
  1461. }},
  1462. "succeed": {Terminated: &v1.ContainerStateTerminated{
  1463. ExitCode: 5,
  1464. ContainerID: emptyContainerID,
  1465. }},
  1466. },
  1467. },
  1468. }
  1469. for i, test := range tests {
  1470. kubelet.reasonCache = NewReasonCache()
  1471. for n, e := range test.reasons {
  1472. kubelet.reasonCache.add(pod.UID, n, e, "")
  1473. }
  1474. pod.Spec.Containers = test.containers
  1475. pod.Status.ContainerStatuses = test.oldStatuses
  1476. podStatus.ContainerStatuses = test.statuses
  1477. apiStatus := kubelet.generateAPIPodStatus(pod, podStatus)
  1478. verifyContainerStatuses(t, apiStatus.ContainerStatuses, test.expectedState, test.expectedLastTerminationState, fmt.Sprintf("case %d", i))
  1479. }
  1480. // Everything should be the same for init containers
  1481. for i, test := range tests {
  1482. kubelet.reasonCache = NewReasonCache()
  1483. for n, e := range test.reasons {
  1484. kubelet.reasonCache.add(pod.UID, n, e, "")
  1485. }
  1486. pod.Spec.InitContainers = test.containers
  1487. pod.Status.InitContainerStatuses = test.oldStatuses
  1488. podStatus.ContainerStatuses = test.statuses
  1489. apiStatus := kubelet.generateAPIPodStatus(pod, podStatus)
  1490. expectedState := test.expectedState
  1491. if test.expectedInitState != nil {
  1492. expectedState = test.expectedInitState
  1493. }
  1494. verifyContainerStatuses(t, apiStatus.InitContainerStatuses, expectedState, test.expectedLastTerminationState, fmt.Sprintf("case %d", i))
  1495. }
  1496. }
  1497. // Test generateAPIPodStatus with different restart policies.
  1498. func TestGenerateAPIPodStatusWithDifferentRestartPolicies(t *testing.T) {
  1499. testErrorReason := fmt.Errorf("test-error")
  1500. emptyContainerID := (&kubecontainer.ContainerID{}).String()
  1501. testKubelet := newTestKubelet(t, false /* controllerAttachDetachEnabled */)
  1502. defer testKubelet.Cleanup()
  1503. kubelet := testKubelet.kubelet
  1504. pod := podWithUIDNameNs("12345678", "foo", "new")
  1505. containers := []v1.Container{{Name: "succeed"}, {Name: "failed"}}
  1506. podStatus := &kubecontainer.PodStatus{
  1507. ID: pod.UID,
  1508. Name: pod.Name,
  1509. Namespace: pod.Namespace,
  1510. ContainerStatuses: []*kubecontainer.ContainerStatus{
  1511. {
  1512. Name: "succeed",
  1513. State: kubecontainer.ContainerStateExited,
  1514. ExitCode: 0,
  1515. },
  1516. {
  1517. Name: "failed",
  1518. State: kubecontainer.ContainerStateExited,
  1519. ExitCode: 1,
  1520. },
  1521. {
  1522. Name: "succeed",
  1523. State: kubecontainer.ContainerStateExited,
  1524. ExitCode: 2,
  1525. },
  1526. {
  1527. Name: "failed",
  1528. State: kubecontainer.ContainerStateExited,
  1529. ExitCode: 3,
  1530. },
  1531. },
  1532. }
  1533. kubelet.reasonCache.add(pod.UID, "succeed", testErrorReason, "")
  1534. kubelet.reasonCache.add(pod.UID, "failed", testErrorReason, "")
  1535. for c, test := range []struct {
  1536. restartPolicy v1.RestartPolicy
  1537. expectedState map[string]v1.ContainerState
  1538. expectedLastTerminationState map[string]v1.ContainerState
  1539. // Only set expectedInitState when it is different from expectedState
  1540. expectedInitState map[string]v1.ContainerState
  1541. // Only set expectedInitLastTerminationState when it is different from expectedLastTerminationState
  1542. expectedInitLastTerminationState map[string]v1.ContainerState
  1543. }{
  1544. {
  1545. restartPolicy: v1.RestartPolicyNever,
  1546. expectedState: map[string]v1.ContainerState{
  1547. "succeed": {Terminated: &v1.ContainerStateTerminated{
  1548. ExitCode: 0,
  1549. ContainerID: emptyContainerID,
  1550. }},
  1551. "failed": {Terminated: &v1.ContainerStateTerminated{
  1552. ExitCode: 1,
  1553. ContainerID: emptyContainerID,
  1554. }},
  1555. },
  1556. expectedLastTerminationState: map[string]v1.ContainerState{
  1557. "succeed": {Terminated: &v1.ContainerStateTerminated{
  1558. ExitCode: 2,
  1559. ContainerID: emptyContainerID,
  1560. }},
  1561. "failed": {Terminated: &v1.ContainerStateTerminated{
  1562. ExitCode: 3,
  1563. ContainerID: emptyContainerID,
  1564. }},
  1565. },
  1566. },
  1567. {
  1568. restartPolicy: v1.RestartPolicyOnFailure,
  1569. expectedState: map[string]v1.ContainerState{
  1570. "succeed": {Terminated: &v1.ContainerStateTerminated{
  1571. ExitCode: 0,
  1572. ContainerID: emptyContainerID,
  1573. }},
  1574. "failed": {Waiting: &v1.ContainerStateWaiting{Reason: testErrorReason.Error()}},
  1575. },
  1576. expectedLastTerminationState: map[string]v1.ContainerState{
  1577. "succeed": {Terminated: &v1.ContainerStateTerminated{
  1578. ExitCode: 2,
  1579. ContainerID: emptyContainerID,
  1580. }},
  1581. "failed": {Terminated: &v1.ContainerStateTerminated{
  1582. ExitCode: 1,
  1583. ContainerID: emptyContainerID,
  1584. }},
  1585. },
  1586. },
  1587. {
  1588. restartPolicy: v1.RestartPolicyAlways,
  1589. expectedState: map[string]v1.ContainerState{
  1590. "succeed": {Waiting: &v1.ContainerStateWaiting{Reason: testErrorReason.Error()}},
  1591. "failed": {Waiting: &v1.ContainerStateWaiting{Reason: testErrorReason.Error()}},
  1592. },
  1593. expectedLastTerminationState: map[string]v1.ContainerState{
  1594. "succeed": {Terminated: &v1.ContainerStateTerminated{
  1595. ExitCode: 0,
  1596. ContainerID: emptyContainerID,
  1597. }},
  1598. "failed": {Terminated: &v1.ContainerStateTerminated{
  1599. ExitCode: 1,
  1600. ContainerID: emptyContainerID,
  1601. }},
  1602. },
  1603. // If the init container is terminated with exit code 0, it won't be restarted even when the
  1604. // restart policy is RestartAlways.
  1605. expectedInitState: map[string]v1.ContainerState{
  1606. "succeed": {Terminated: &v1.ContainerStateTerminated{
  1607. ExitCode: 0,
  1608. ContainerID: emptyContainerID,
  1609. }},
  1610. "failed": {Waiting: &v1.ContainerStateWaiting{Reason: testErrorReason.Error()}},
  1611. },
  1612. expectedInitLastTerminationState: map[string]v1.ContainerState{
  1613. "succeed": {Terminated: &v1.ContainerStateTerminated{
  1614. ExitCode: 2,
  1615. ContainerID: emptyContainerID,
  1616. }},
  1617. "failed": {Terminated: &v1.ContainerStateTerminated{
  1618. ExitCode: 1,
  1619. ContainerID: emptyContainerID,
  1620. }},
  1621. },
  1622. },
  1623. } {
  1624. pod.Spec.RestartPolicy = test.restartPolicy
  1625. // Test normal containers
  1626. pod.Spec.Containers = containers
  1627. apiStatus := kubelet.generateAPIPodStatus(pod, podStatus)
  1628. expectedState, expectedLastTerminationState := test.expectedState, test.expectedLastTerminationState
  1629. verifyContainerStatuses(t, apiStatus.ContainerStatuses, expectedState, expectedLastTerminationState, fmt.Sprintf("case %d", c))
  1630. pod.Spec.Containers = nil
  1631. // Test init containers
  1632. pod.Spec.InitContainers = containers
  1633. apiStatus = kubelet.generateAPIPodStatus(pod, podStatus)
  1634. if test.expectedInitState != nil {
  1635. expectedState = test.expectedInitState
  1636. }
  1637. if test.expectedInitLastTerminationState != nil {
  1638. expectedLastTerminationState = test.expectedInitLastTerminationState
  1639. }
  1640. verifyContainerStatuses(t, apiStatus.InitContainerStatuses, expectedState, expectedLastTerminationState, fmt.Sprintf("case %d", c))
  1641. pod.Spec.InitContainers = nil
  1642. }
  1643. }
  1644. // testPodAdmitHandler is a lifecycle.PodAdmitHandler for testing.
  1645. type testPodAdmitHandler struct {
  1646. // list of pods to reject.
  1647. podsToReject []*v1.Pod
  1648. }
  1649. // Admit rejects all pods in the podsToReject list with a matching UID.
  1650. func (a *testPodAdmitHandler) Admit(attrs *lifecycle.PodAdmitAttributes) lifecycle.PodAdmitResult {
  1651. for _, podToReject := range a.podsToReject {
  1652. if podToReject.UID == attrs.Pod.UID {
  1653. return lifecycle.PodAdmitResult{Admit: false, Reason: "Rejected", Message: "Pod is rejected"}
  1654. }
  1655. }
  1656. return lifecycle.PodAdmitResult{Admit: true}
  1657. }
  1658. // Test verifies that the kubelet invokes an admission handler during HandlePodAdditions.
  1659. func TestHandlePodAdditionsInvokesPodAdmitHandlers(t *testing.T) {
  1660. testKubelet := newTestKubelet(t, false /* controllerAttachDetachEnabled */)
  1661. defer testKubelet.Cleanup()
  1662. kl := testKubelet.kubelet
  1663. kl.nodeLister = testNodeLister{nodes: []*v1.Node{
  1664. {
  1665. ObjectMeta: metav1.ObjectMeta{Name: string(kl.nodeName)},
  1666. Status: v1.NodeStatus{
  1667. Allocatable: v1.ResourceList{
  1668. v1.ResourcePods: *resource.NewQuantity(110, resource.DecimalSI),
  1669. },
  1670. },
  1671. },
  1672. }}
  1673. pods := []*v1.Pod{
  1674. {
  1675. ObjectMeta: metav1.ObjectMeta{
  1676. UID: "123456789",
  1677. Name: "podA",
  1678. Namespace: "foo",
  1679. },
  1680. },
  1681. {
  1682. ObjectMeta: metav1.ObjectMeta{
  1683. UID: "987654321",
  1684. Name: "podB",
  1685. Namespace: "foo",
  1686. },
  1687. },
  1688. }
  1689. podToReject := pods[0]
  1690. podToAdmit := pods[1]
  1691. podsToReject := []*v1.Pod{podToReject}
  1692. kl.admitHandlers.AddPodAdmitHandler(&testPodAdmitHandler{podsToReject: podsToReject})
  1693. kl.HandlePodAdditions(pods)
  1694. // Check pod status stored in the status map.
  1695. checkPodStatus(t, kl, podToReject, v1.PodFailed)
  1696. checkPodStatus(t, kl, podToAdmit, v1.PodPending)
  1697. }
  1698. // testPodSyncLoopHandler is a lifecycle.PodSyncLoopHandler that is used for testing.
  1699. type testPodSyncLoopHandler struct {
  1700. // list of pods to sync
  1701. podsToSync []*v1.Pod
  1702. }
  1703. // ShouldSync evaluates if the pod should be synced from the kubelet.
  1704. func (a *testPodSyncLoopHandler) ShouldSync(pod *v1.Pod) bool {
  1705. for _, podToSync := range a.podsToSync {
  1706. if podToSync.UID == pod.UID {
  1707. return true
  1708. }
  1709. }
  1710. return false
  1711. }
  1712. // TestGetPodsToSyncInvokesPodSyncLoopHandlers ensures that the get pods to sync routine invokes the handler.
  1713. func TestGetPodsToSyncInvokesPodSyncLoopHandlers(t *testing.T) {
  1714. testKubelet := newTestKubelet(t, false /* controllerAttachDetachEnabled */)
  1715. defer testKubelet.Cleanup()
  1716. kubelet := testKubelet.kubelet
  1717. pods := newTestPods(5)
  1718. expected := []*v1.Pod{pods[0]}
  1719. kubelet.AddPodSyncLoopHandler(&testPodSyncLoopHandler{expected})
  1720. kubelet.podManager.SetPods(pods)
  1721. podsToSync := kubelet.getPodsToSync()
  1722. sort.Sort(podsByUID(expected))
  1723. sort.Sort(podsByUID(podsToSync))
  1724. assert.Equal(t, expected, podsToSync)
  1725. }
  1726. // testPodSyncHandler is a lifecycle.PodSyncHandler that is used for testing.
  1727. type testPodSyncHandler struct {
  1728. // list of pods to evict.
  1729. podsToEvict []*v1.Pod
  1730. // the reason for the eviction
  1731. reason string
  1732. // the message for the eviction
  1733. message string
  1734. }
  1735. // ShouldEvict evaluates if the pod should be evicted from the kubelet.
  1736. func (a *testPodSyncHandler) ShouldEvict(pod *v1.Pod) lifecycle.ShouldEvictResponse {
  1737. for _, podToEvict := range a.podsToEvict {
  1738. if podToEvict.UID == pod.UID {
  1739. return lifecycle.ShouldEvictResponse{Evict: true, Reason: a.reason, Message: a.message}
  1740. }
  1741. }
  1742. return lifecycle.ShouldEvictResponse{Evict: false}
  1743. }
  1744. // TestGenerateAPIPodStatusInvokesPodSyncHandlers invokes the handlers and reports the proper status
  1745. func TestGenerateAPIPodStatusInvokesPodSyncHandlers(t *testing.T) {
  1746. testKubelet := newTestKubelet(t, false /* controllerAttachDetachEnabled */)
  1747. defer testKubelet.Cleanup()
  1748. kubelet := testKubelet.kubelet
  1749. pod := newTestPods(1)[0]
  1750. podsToEvict := []*v1.Pod{pod}
  1751. kubelet.AddPodSyncHandler(&testPodSyncHandler{podsToEvict, "Evicted", "because"})
  1752. status := &kubecontainer.PodStatus{
  1753. ID: pod.UID,
  1754. Name: pod.Name,
  1755. Namespace: pod.Namespace,
  1756. }
  1757. apiStatus := kubelet.generateAPIPodStatus(pod, status)
  1758. require.Equal(t, v1.PodFailed, apiStatus.Phase)
  1759. require.Equal(t, "Evicted", apiStatus.Reason)
  1760. require.Equal(t, "because", apiStatus.Message)
  1761. }
  1762. func TestSyncPodKillPod(t *testing.T) {
  1763. testKubelet := newTestKubelet(t, false /* controllerAttachDetachEnabled */)
  1764. defer testKubelet.Cleanup()
  1765. kl := testKubelet.kubelet
  1766. pod := &v1.Pod{
  1767. ObjectMeta: metav1.ObjectMeta{
  1768. UID: "12345678",
  1769. Name: "bar",
  1770. Namespace: "foo",
  1771. },
  1772. }
  1773. pods := []*v1.Pod{pod}
  1774. kl.podManager.SetPods(pods)
  1775. gracePeriodOverride := int64(0)
  1776. err := kl.syncPod(syncPodOptions{
  1777. pod: pod,
  1778. podStatus: &kubecontainer.PodStatus{},
  1779. updateType: kubetypes.SyncPodKill,
  1780. killPodOptions: &KillPodOptions{
  1781. PodStatusFunc: func(p *v1.Pod, podStatus *kubecontainer.PodStatus) v1.PodStatus {
  1782. return v1.PodStatus{
  1783. Phase: v1.PodFailed,
  1784. Reason: "reason",
  1785. Message: "message",
  1786. }
  1787. },
  1788. PodTerminationGracePeriodSecondsOverride: &gracePeriodOverride,
  1789. },
  1790. })
  1791. require.NoError(t, err)
  1792. // Check pod status stored in the status map.
  1793. checkPodStatus(t, kl, pod, v1.PodFailed)
  1794. }
  1795. func waitForVolumeUnmount(
  1796. volumeManager kubeletvolume.VolumeManager,
  1797. pod *v1.Pod) error {
  1798. var podVolumes kubecontainer.VolumeMap
  1799. err := retryWithExponentialBackOff(
  1800. time.Duration(50*time.Millisecond),
  1801. func() (bool, error) {
  1802. // Verify volumes detached
  1803. podVolumes = volumeManager.GetMountedVolumesForPod(
  1804. util.GetUniquePodName(pod))
  1805. if len(podVolumes) != 0 {
  1806. return false, nil
  1807. }
  1808. return true, nil
  1809. },
  1810. )
  1811. if err != nil {
  1812. return fmt.Errorf(
  1813. "Expected volumes to be unmounted. But some volumes are still mounted: %#v", podVolumes)
  1814. }
  1815. return nil
  1816. }
  1817. func waitForVolumeDetach(
  1818. volumeName v1.UniqueVolumeName,
  1819. volumeManager kubeletvolume.VolumeManager) error {
  1820. attachedVolumes := []v1.UniqueVolumeName{}
  1821. err := retryWithExponentialBackOff(
  1822. time.Duration(50*time.Millisecond),
  1823. func() (bool, error) {
  1824. // Verify volumes detached
  1825. volumeAttached := volumeManager.VolumeIsAttached(volumeName)
  1826. return !volumeAttached, nil
  1827. },
  1828. )
  1829. if err != nil {
  1830. return fmt.Errorf(
  1831. "Expected volumes to be detached. But some volumes are still attached: %#v", attachedVolumes)
  1832. }
  1833. return nil
  1834. }
  1835. func retryWithExponentialBackOff(initialDuration time.Duration, fn wait.ConditionFunc) error {
  1836. backoff := wait.Backoff{
  1837. Duration: initialDuration,
  1838. Factor: 3,
  1839. Jitter: 0,
  1840. Steps: 6,
  1841. }
  1842. return wait.ExponentialBackoff(backoff, fn)
  1843. }
  1844. func simulateVolumeInUseUpdate(
  1845. volumeName v1.UniqueVolumeName,
  1846. stopCh <-chan struct{},
  1847. volumeManager kubeletvolume.VolumeManager) {
  1848. ticker := time.NewTicker(100 * time.Millisecond)
  1849. defer ticker.Stop()
  1850. for {
  1851. select {
  1852. case <-ticker.C:
  1853. volumeManager.MarkVolumesAsReportedInUse(
  1854. []v1.UniqueVolumeName{volumeName})
  1855. case <-stopCh:
  1856. return
  1857. }
  1858. }
  1859. }
  1860. func runVolumeManager(kubelet *Kubelet) chan struct{} {
  1861. stopCh := make(chan struct{})
  1862. go kubelet.volumeManager.Run(kubelet.sourcesReady, stopCh)
  1863. return stopCh
  1864. }
  1865. // Sort pods by UID.
  1866. type podsByUID []*v1.Pod
  1867. func (p podsByUID) Len() int { return len(p) }
  1868. func (p podsByUID) Swap(i, j int) { p[i], p[j] = p[j], p[i] }
  1869. func (p podsByUID) Less(i, j int) bool { return p[i].UID < p[j].UID }