kubelet_test.go 66 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250125112521253125412551256125712581259126012611262126312641265126612671268126912701271127212731274127512761277127812791280128112821283128412851286128712881289129012911292129312941295129612971298129913001301130213031304130513061307130813091310131113121313131413151316131713181319132013211322132313241325132613271328132913301331133213331334133513361337133813391340134113421343134413451346134713481349135013511352135313541355135613571358135913601361136213631364136513661367136813691370137113721373137413751376137713781379138013811382138313841385138613871388138913901391139213931394139513961397139813991400140114021403140414051406140714081409141014111412141314141415141614171418141914201421142214231424142514261427142814291430143114321433143414351436143714381439144014411442144314441445144614471448144914501451145214531454145514561457145814591460146114621463146414651466146714681469147014711472147314741475147614771478147914801481148214831484148514861487148814891490149114921493149414951496149714981499150015011502150315041505150615071508150915101511151215131514151515161517151815191520152115221523152415251526152715281529153015311532153315341535153615371538153915401541154215431544154515461547154815491550155115521553155415551556155715581559156015611562156315641565156615671568156915701571157215731574157515761577157815791580158115821583158415851586158715881589159015911592159315941595159615971598159916001601160216031604160516061607160816091610161116121613161416151616161716181619162016211622162316241625162616271628162916301631163216331634163516361637163816391640164116421643164416451646164716481649165016511652165316541655165616571658165916601661166216631664166516661667166816691670167116721673167416751676167716781679168016811682168316841685168616871688168916901691169216931694169516961697169816991700170117021703170417051706170717081709171017111712171317141715171617171718171917201721172217231724172517261727172817291730173117321733173417351736173717381739174017411742174317441745174617471748174917501751175217531754175517561757175817591760176117621763176417651766176717681769177017711772177317741775177617771778177917801781178217831784178517861787178817891790179117921793179417951796179717981799180018011802180318041805180618071808180918101811181218131814181518161817181818191820182118221823182418251826182718281829183018311832183318341835183618371838183918401841184218431844184518461847184818491850185118521853185418551856185718581859186018611862186318641865186618671868186918701871187218731874187518761877187818791880188118821883188418851886188718881889189018911892189318941895189618971898189919001901190219031904190519061907190819091910191119121913191419151916191719181919192019211922192319241925192619271928192919301931193219331934193519361937193819391940194119421943194419451946194719481949195019511952195319541955195619571958195919601961196219631964196519661967196819691970197119721973197419751976197719781979198019811982198319841985198619871988198919901991199219931994199519961997199819992000200120022003200420052006200720082009201020112012201320142015201620172018201920202021202220232024202520262027202820292030203120322033203420352036203720382039204020412042204320442045204620472048
  1. /*
  2. Copyright 2014 The Kubernetes Authors.
  3. Licensed under the Apache License, Version 2.0 (the "License");
  4. you may not use this file except in compliance with the License.
  5. You may obtain a copy of the License at
  6. http://www.apache.org/licenses/LICENSE-2.0
  7. Unless required by applicable law or agreed to in writing, software
  8. distributed under the License is distributed on an "AS IS" BASIS,
  9. WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
  10. See the License for the specific language governing permissions and
  11. limitations under the License.
  12. */
  13. package kubelet
  14. import (
  15. "fmt"
  16. "io/ioutil"
  17. "os"
  18. "sort"
  19. "testing"
  20. "time"
  21. cadvisorapi "github.com/google/cadvisor/info/v1"
  22. "github.com/stretchr/testify/assert"
  23. "github.com/stretchr/testify/require"
  24. v1 "k8s.io/api/core/v1"
  25. "k8s.io/apimachinery/pkg/api/resource"
  26. metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
  27. "k8s.io/apimachinery/pkg/types"
  28. "k8s.io/apimachinery/pkg/util/clock"
  29. utilruntime "k8s.io/apimachinery/pkg/util/runtime"
  30. "k8s.io/apimachinery/pkg/util/sets"
  31. "k8s.io/apimachinery/pkg/util/wait"
  32. utilfeature "k8s.io/apiserver/pkg/util/feature"
  33. "k8s.io/client-go/kubernetes/fake"
  34. "k8s.io/client-go/tools/record"
  35. "k8s.io/client-go/util/flowcontrol"
  36. "k8s.io/component-base/featuregate"
  37. featuregatetesting "k8s.io/component-base/featuregate/testing"
  38. cadvisortest "k8s.io/kubernetes/pkg/kubelet/cadvisor/testing"
  39. "k8s.io/kubernetes/pkg/kubelet/cm"
  40. "k8s.io/kubernetes/pkg/kubelet/config"
  41. "k8s.io/kubernetes/pkg/kubelet/configmap"
  42. kubecontainer "k8s.io/kubernetes/pkg/kubelet/container"
  43. containertest "k8s.io/kubernetes/pkg/kubelet/container/testing"
  44. "k8s.io/kubernetes/pkg/kubelet/eviction"
  45. "k8s.io/kubernetes/pkg/kubelet/images"
  46. "k8s.io/kubernetes/pkg/kubelet/lifecycle"
  47. "k8s.io/kubernetes/pkg/kubelet/logs"
  48. "k8s.io/kubernetes/pkg/kubelet/network/dns"
  49. "k8s.io/kubernetes/pkg/kubelet/pleg"
  50. "k8s.io/kubernetes/pkg/kubelet/pluginmanager"
  51. kubepod "k8s.io/kubernetes/pkg/kubelet/pod"
  52. podtest "k8s.io/kubernetes/pkg/kubelet/pod/testing"
  53. proberesults "k8s.io/kubernetes/pkg/kubelet/prober/results"
  54. probetest "k8s.io/kubernetes/pkg/kubelet/prober/testing"
  55. "k8s.io/kubernetes/pkg/kubelet/secret"
  56. serverstats "k8s.io/kubernetes/pkg/kubelet/server/stats"
  57. "k8s.io/kubernetes/pkg/kubelet/stats"
  58. "k8s.io/kubernetes/pkg/kubelet/status"
  59. statustest "k8s.io/kubernetes/pkg/kubelet/status/testing"
  60. "k8s.io/kubernetes/pkg/kubelet/token"
  61. kubetypes "k8s.io/kubernetes/pkg/kubelet/types"
  62. "k8s.io/kubernetes/pkg/kubelet/util/queue"
  63. kubeletvolume "k8s.io/kubernetes/pkg/kubelet/volumemanager"
  64. schedulernodeinfo "k8s.io/kubernetes/pkg/scheduler/nodeinfo"
  65. "k8s.io/kubernetes/pkg/util/mount"
  66. "k8s.io/kubernetes/pkg/volume"
  67. "k8s.io/kubernetes/pkg/volume/awsebs"
  68. "k8s.io/kubernetes/pkg/volume/azure_dd"
  69. "k8s.io/kubernetes/pkg/volume/gcepd"
  70. _ "k8s.io/kubernetes/pkg/volume/hostpath"
  71. volumetest "k8s.io/kubernetes/pkg/volume/testing"
  72. "k8s.io/kubernetes/pkg/volume/util"
  73. "k8s.io/kubernetes/pkg/volume/util/subpath"
  74. )
  75. func init() {
  76. utilruntime.ReallyCrash = true
  77. }
  78. const (
  79. testKubeletHostname = "127.0.0.1"
  80. testKubeletHostIP = "127.0.0.1"
  81. // TODO(harry) any global place for these two?
  82. // Reasonable size range of all container images. 90%ile of images on dockerhub drops into this range.
  83. minImgSize int64 = 23 * 1024 * 1024
  84. maxImgSize int64 = 1000 * 1024 * 1024
  85. )
  86. // fakeImageGCManager is a fake image gc manager for testing. It will return image
  87. // list from fake runtime directly instead of caching it.
  88. type fakeImageGCManager struct {
  89. fakeImageService kubecontainer.ImageService
  90. images.ImageGCManager
  91. }
  92. func (f *fakeImageGCManager) GetImageList() ([]kubecontainer.Image, error) {
  93. return f.fakeImageService.ListImages()
  94. }
  95. type TestKubelet struct {
  96. kubelet *Kubelet
  97. fakeRuntime *containertest.FakeRuntime
  98. fakeKubeClient *fake.Clientset
  99. fakeMirrorClient *podtest.FakeMirrorClient
  100. fakeClock *clock.FakeClock
  101. mounter mount.Interface
  102. volumePlugin *volumetest.FakeVolumePlugin
  103. }
  104. func (tk *TestKubelet) Cleanup() {
  105. if tk.kubelet != nil {
  106. os.RemoveAll(tk.kubelet.rootDirectory)
  107. }
  108. }
  109. // newTestKubelet returns test kubelet with two images.
  110. func newTestKubelet(t *testing.T, controllerAttachDetachEnabled bool) *TestKubelet {
  111. imageList := []kubecontainer.Image{
  112. {
  113. ID: "abc",
  114. RepoTags: []string{"k8s.gcr.io:v1", "k8s.gcr.io:v2"},
  115. Size: 123,
  116. },
  117. {
  118. ID: "efg",
  119. RepoTags: []string{"k8s.gcr.io:v3", "k8s.gcr.io:v4"},
  120. Size: 456,
  121. },
  122. }
  123. return newTestKubeletWithImageList(t, imageList, controllerAttachDetachEnabled, true /*initFakeVolumePlugin*/)
  124. }
  125. func newTestKubeletWithImageList(
  126. t *testing.T,
  127. imageList []kubecontainer.Image,
  128. controllerAttachDetachEnabled bool,
  129. initFakeVolumePlugin bool) *TestKubelet {
  130. fakeRuntime := &containertest.FakeRuntime{}
  131. fakeRuntime.RuntimeType = "test"
  132. fakeRuntime.VersionInfo = "1.5.0"
  133. fakeRuntime.ImageList = imageList
  134. // Set ready conditions by default.
  135. fakeRuntime.RuntimeStatus = &kubecontainer.RuntimeStatus{
  136. Conditions: []kubecontainer.RuntimeCondition{
  137. {Type: "RuntimeReady", Status: true},
  138. {Type: "NetworkReady", Status: true},
  139. },
  140. }
  141. fakeRecorder := &record.FakeRecorder{}
  142. fakeKubeClient := &fake.Clientset{}
  143. kubelet := &Kubelet{}
  144. kubelet.recorder = fakeRecorder
  145. kubelet.kubeClient = fakeKubeClient
  146. kubelet.heartbeatClient = fakeKubeClient
  147. kubelet.os = &containertest.FakeOS{}
  148. kubelet.mounter = &mount.FakeMounter{}
  149. kubelet.subpather = &subpath.FakeSubpath{}
  150. kubelet.hostname = testKubeletHostname
  151. kubelet.nodeName = types.NodeName(testKubeletHostname)
  152. kubelet.runtimeState = newRuntimeState(maxWaitForContainerRuntime)
  153. kubelet.runtimeState.setNetworkState(nil)
  154. if tempDir, err := ioutil.TempDir("", "kubelet_test."); err != nil {
  155. t.Fatalf("can't make a temp rootdir: %v", err)
  156. } else {
  157. kubelet.rootDirectory = tempDir
  158. }
  159. if err := os.MkdirAll(kubelet.rootDirectory, 0750); err != nil {
  160. t.Fatalf("can't mkdir(%q): %v", kubelet.rootDirectory, err)
  161. }
  162. kubelet.sourcesReady = config.NewSourcesReady(func(_ sets.String) bool { return true })
  163. kubelet.masterServiceNamespace = metav1.NamespaceDefault
  164. kubelet.serviceLister = testServiceLister{}
  165. kubelet.nodeInfo = testNodeInfo{
  166. nodes: []*v1.Node{
  167. {
  168. ObjectMeta: metav1.ObjectMeta{
  169. Name: string(kubelet.nodeName),
  170. },
  171. Status: v1.NodeStatus{
  172. Conditions: []v1.NodeCondition{
  173. {
  174. Type: v1.NodeReady,
  175. Status: v1.ConditionTrue,
  176. Reason: "Ready",
  177. Message: "Node ready",
  178. },
  179. },
  180. Addresses: []v1.NodeAddress{
  181. {
  182. Type: v1.NodeInternalIP,
  183. Address: testKubeletHostIP,
  184. },
  185. },
  186. },
  187. },
  188. },
  189. }
  190. kubelet.recorder = fakeRecorder
  191. if err := kubelet.setupDataDirs(); err != nil {
  192. t.Fatalf("can't initialize kubelet data dirs: %v", err)
  193. }
  194. kubelet.daemonEndpoints = &v1.NodeDaemonEndpoints{}
  195. kubelet.cadvisor = &cadvisortest.Fake{}
  196. machineInfo, _ := kubelet.cadvisor.MachineInfo()
  197. kubelet.machineInfo = machineInfo
  198. fakeMirrorClient := podtest.NewFakeMirrorClient()
  199. secretManager := secret.NewSimpleSecretManager(kubelet.kubeClient)
  200. kubelet.secretManager = secretManager
  201. configMapManager := configmap.NewSimpleConfigMapManager(kubelet.kubeClient)
  202. kubelet.configMapManager = configMapManager
  203. kubelet.podManager = kubepod.NewBasicPodManager(fakeMirrorClient, kubelet.secretManager, kubelet.configMapManager, podtest.NewMockCheckpointManager())
  204. kubelet.statusManager = status.NewManager(fakeKubeClient, kubelet.podManager, &statustest.FakePodDeletionSafetyProvider{})
  205. kubelet.containerRuntime = fakeRuntime
  206. kubelet.runtimeCache = containertest.NewFakeRuntimeCache(kubelet.containerRuntime)
  207. kubelet.reasonCache = NewReasonCache()
  208. kubelet.podCache = containertest.NewFakeCache(kubelet.containerRuntime)
  209. kubelet.podWorkers = &fakePodWorkers{
  210. syncPodFn: kubelet.syncPod,
  211. cache: kubelet.podCache,
  212. t: t,
  213. }
  214. kubelet.probeManager = probetest.FakeManager{}
  215. kubelet.livenessManager = proberesults.NewManager()
  216. kubelet.containerManager = cm.NewStubContainerManager()
  217. fakeNodeRef := &v1.ObjectReference{
  218. Kind: "Node",
  219. Name: testKubeletHostname,
  220. UID: types.UID(testKubeletHostname),
  221. Namespace: "",
  222. }
  223. volumeStatsAggPeriod := time.Second * 10
  224. kubelet.resourceAnalyzer = serverstats.NewResourceAnalyzer(kubelet, volumeStatsAggPeriod)
  225. kubelet.StatsProvider = stats.NewCadvisorStatsProvider(
  226. kubelet.cadvisor,
  227. kubelet.resourceAnalyzer,
  228. kubelet.podManager,
  229. kubelet.runtimeCache,
  230. fakeRuntime,
  231. kubelet.statusManager)
  232. fakeImageGCPolicy := images.ImageGCPolicy{
  233. HighThresholdPercent: 90,
  234. LowThresholdPercent: 80,
  235. }
  236. imageGCManager, err := images.NewImageGCManager(fakeRuntime, kubelet.StatsProvider, fakeRecorder, fakeNodeRef, fakeImageGCPolicy, "")
  237. assert.NoError(t, err)
  238. kubelet.imageManager = &fakeImageGCManager{
  239. fakeImageService: fakeRuntime,
  240. ImageGCManager: imageGCManager,
  241. }
  242. kubelet.containerLogManager = logs.NewStubContainerLogManager()
  243. containerGCPolicy := kubecontainer.ContainerGCPolicy{
  244. MinAge: time.Duration(0),
  245. MaxPerPodContainer: 1,
  246. MaxContainers: -1,
  247. }
  248. containerGC, err := kubecontainer.NewContainerGC(fakeRuntime, containerGCPolicy, kubelet.sourcesReady)
  249. assert.NoError(t, err)
  250. kubelet.containerGC = containerGC
  251. fakeClock := clock.NewFakeClock(time.Now())
  252. kubelet.backOff = flowcontrol.NewBackOff(time.Second, time.Minute)
  253. kubelet.backOff.Clock = fakeClock
  254. kubelet.podKillingCh = make(chan *kubecontainer.PodPair, 20)
  255. kubelet.resyncInterval = 10 * time.Second
  256. kubelet.workQueue = queue.NewBasicWorkQueue(fakeClock)
  257. // Relist period does not affect the tests.
  258. kubelet.pleg = pleg.NewGenericPLEG(fakeRuntime, 100, time.Hour, nil, clock.RealClock{})
  259. kubelet.clock = fakeClock
  260. nodeRef := &v1.ObjectReference{
  261. Kind: "Node",
  262. Name: string(kubelet.nodeName),
  263. UID: types.UID(kubelet.nodeName),
  264. Namespace: "",
  265. }
  266. // setup eviction manager
  267. evictionManager, evictionAdmitHandler := eviction.NewManager(kubelet.resourceAnalyzer, eviction.Config{}, killPodNow(kubelet.podWorkers, fakeRecorder), kubelet.podManager.GetMirrorPodByPod, kubelet.imageManager, kubelet.containerGC, fakeRecorder, nodeRef, kubelet.clock)
  268. kubelet.evictionManager = evictionManager
  269. kubelet.admitHandlers.AddPodAdmitHandler(evictionAdmitHandler)
  270. // Add this as cleanup predicate pod admitter
  271. kubelet.admitHandlers.AddPodAdmitHandler(lifecycle.NewPredicateAdmitHandler(kubelet.getNodeAnyWay, lifecycle.NewAdmissionFailureHandlerStub(), kubelet.containerManager.UpdatePluginResources))
  272. allPlugins := []volume.VolumePlugin{}
  273. plug := &volumetest.FakeVolumePlugin{PluginName: "fake", Host: nil}
  274. if initFakeVolumePlugin {
  275. allPlugins = append(allPlugins, plug)
  276. } else {
  277. allPlugins = append(allPlugins, awsebs.ProbeVolumePlugins()...)
  278. allPlugins = append(allPlugins, gcepd.ProbeVolumePlugins()...)
  279. allPlugins = append(allPlugins, azure_dd.ProbeVolumePlugins()...)
  280. }
  281. var prober volume.DynamicPluginProber // TODO (#51147) inject mock
  282. kubelet.volumePluginMgr, err =
  283. NewInitializedVolumePluginMgr(kubelet, kubelet.secretManager, kubelet.configMapManager, token.NewManager(kubelet.kubeClient), allPlugins, prober)
  284. require.NoError(t, err, "Failed to initialize VolumePluginMgr")
  285. kubelet.mounter = &mount.FakeMounter{}
  286. kubelet.volumeManager = kubeletvolume.NewVolumeManager(
  287. controllerAttachDetachEnabled,
  288. kubelet.nodeName,
  289. kubelet.podManager,
  290. kubelet.statusManager,
  291. fakeKubeClient,
  292. kubelet.volumePluginMgr,
  293. fakeRuntime,
  294. kubelet.mounter,
  295. kubelet.getPodsDir(),
  296. kubelet.recorder,
  297. false, /* experimentalCheckNodeCapabilitiesBeforeMount*/
  298. false /* keepTerminatedPodVolumes */)
  299. kubelet.pluginManager = pluginmanager.NewPluginManager(
  300. kubelet.getPluginsRegistrationDir(), /* sockDir */
  301. kubelet.getPluginsDir(), /* deprecatedSockDir */
  302. kubelet.recorder,
  303. )
  304. kubelet.setNodeStatusFuncs = kubelet.defaultNodeStatusFuncs()
  305. // enable active deadline handler
  306. activeDeadlineHandler, err := newActiveDeadlineHandler(kubelet.statusManager, kubelet.recorder, kubelet.clock)
  307. require.NoError(t, err, "Can't initialize active deadline handler")
  308. kubelet.AddPodSyncLoopHandler(activeDeadlineHandler)
  309. kubelet.AddPodSyncHandler(activeDeadlineHandler)
  310. return &TestKubelet{kubelet, fakeRuntime, fakeKubeClient, fakeMirrorClient, fakeClock, nil, plug}
  311. }
  312. func newTestPods(count int) []*v1.Pod {
  313. pods := make([]*v1.Pod, count)
  314. for i := 0; i < count; i++ {
  315. pods[i] = &v1.Pod{
  316. Spec: v1.PodSpec{
  317. HostNetwork: true,
  318. },
  319. ObjectMeta: metav1.ObjectMeta{
  320. UID: types.UID(10000 + i),
  321. Name: fmt.Sprintf("pod%d", i),
  322. },
  323. }
  324. }
  325. return pods
  326. }
  327. func TestSyncLoopAbort(t *testing.T) {
  328. testKubelet := newTestKubelet(t, false /* controllerAttachDetachEnabled */)
  329. defer testKubelet.Cleanup()
  330. kubelet := testKubelet.kubelet
  331. kubelet.runtimeState.setRuntimeSync(time.Now())
  332. // The syncLoop waits on time.After(resyncInterval), set it really big so that we don't race for
  333. // the channel close
  334. kubelet.resyncInterval = time.Second * 30
  335. ch := make(chan kubetypes.PodUpdate)
  336. close(ch)
  337. // sanity check (also prevent this test from hanging in the next step)
  338. ok := kubelet.syncLoopIteration(ch, kubelet, make(chan time.Time), make(chan time.Time), make(chan *pleg.PodLifecycleEvent, 1))
  339. require.False(t, ok, "Expected syncLoopIteration to return !ok since update chan was closed")
  340. // this should terminate immediately; if it hangs then the syncLoopIteration isn't aborting properly
  341. kubelet.syncLoop(ch, kubelet)
  342. }
  343. func TestSyncPodsStartPod(t *testing.T) {
  344. testKubelet := newTestKubelet(t, false /* controllerAttachDetachEnabled */)
  345. defer testKubelet.Cleanup()
  346. kubelet := testKubelet.kubelet
  347. fakeRuntime := testKubelet.fakeRuntime
  348. pods := []*v1.Pod{
  349. podWithUIDNameNsSpec("12345678", "foo", "new", v1.PodSpec{
  350. Containers: []v1.Container{
  351. {Name: "bar"},
  352. },
  353. }),
  354. }
  355. kubelet.podManager.SetPods(pods)
  356. kubelet.HandlePodSyncs(pods)
  357. fakeRuntime.AssertStartedPods([]string{string(pods[0].UID)})
  358. }
  359. func TestSyncPodsDeletesWhenSourcesAreReady(t *testing.T) {
  360. ready := false
  361. testKubelet := newTestKubelet(t, false /* controllerAttachDetachEnabled */)
  362. defer testKubelet.Cleanup()
  363. fakeRuntime := testKubelet.fakeRuntime
  364. kubelet := testKubelet.kubelet
  365. kubelet.sourcesReady = config.NewSourcesReady(func(_ sets.String) bool { return ready })
  366. fakeRuntime.PodList = []*containertest.FakePod{
  367. {Pod: &kubecontainer.Pod{
  368. ID: "12345678",
  369. Name: "foo",
  370. Namespace: "new",
  371. Containers: []*kubecontainer.Container{
  372. {Name: "bar"},
  373. },
  374. }},
  375. }
  376. kubelet.HandlePodCleanups()
  377. // Sources are not ready yet. Don't remove any pods.
  378. fakeRuntime.AssertKilledPods([]string{})
  379. ready = true
  380. kubelet.HandlePodCleanups()
  381. // Sources are ready. Remove unwanted pods.
  382. fakeRuntime.AssertKilledPods([]string{"12345678"})
  383. }
  384. type testNodeInfo struct {
  385. nodes []*v1.Node
  386. }
  387. func (ls testNodeInfo) GetNodeInfo(id string) (*v1.Node, error) {
  388. for _, node := range ls.nodes {
  389. if node.Name == id {
  390. return node, nil
  391. }
  392. }
  393. return nil, fmt.Errorf("Node with name: %s does not exist", id)
  394. }
  395. func checkPodStatus(t *testing.T, kl *Kubelet, pod *v1.Pod, phase v1.PodPhase) {
  396. status, found := kl.statusManager.GetPodStatus(pod.UID)
  397. require.True(t, found, "Status of pod %q is not found in the status map", pod.UID)
  398. require.Equal(t, phase, status.Phase)
  399. }
  400. // Tests that we handle port conflicts correctly by setting the failed status in status map.
  401. func TestHandlePortConflicts(t *testing.T) {
  402. testKubelet := newTestKubelet(t, false /* controllerAttachDetachEnabled */)
  403. defer testKubelet.Cleanup()
  404. kl := testKubelet.kubelet
  405. kl.nodeInfo = testNodeInfo{nodes: []*v1.Node{
  406. {
  407. ObjectMeta: metav1.ObjectMeta{Name: string(kl.nodeName)},
  408. Status: v1.NodeStatus{
  409. Allocatable: v1.ResourceList{
  410. v1.ResourcePods: *resource.NewQuantity(110, resource.DecimalSI),
  411. },
  412. },
  413. },
  414. }}
  415. recorder := record.NewFakeRecorder(20)
  416. nodeRef := &v1.ObjectReference{
  417. Kind: "Node",
  418. Name: string("testNode"),
  419. UID: types.UID("testNode"),
  420. Namespace: "",
  421. }
  422. testClusterDNSDomain := "TEST"
  423. kl.dnsConfigurer = dns.NewConfigurer(recorder, nodeRef, nil, nil, testClusterDNSDomain, "")
  424. spec := v1.PodSpec{NodeName: string(kl.nodeName), Containers: []v1.Container{{Ports: []v1.ContainerPort{{HostPort: 80}}}}}
  425. pods := []*v1.Pod{
  426. podWithUIDNameNsSpec("123456789", "newpod", "foo", spec),
  427. podWithUIDNameNsSpec("987654321", "oldpod", "foo", spec),
  428. }
  429. // Make sure the Pods are in the reverse order of creation time.
  430. pods[1].CreationTimestamp = metav1.NewTime(time.Now())
  431. pods[0].CreationTimestamp = metav1.NewTime(time.Now().Add(1 * time.Second))
  432. // The newer pod should be rejected.
  433. notfittingPod := pods[0]
  434. fittingPod := pods[1]
  435. kl.HandlePodAdditions(pods)
  436. // Check pod status stored in the status map.
  437. checkPodStatus(t, kl, notfittingPod, v1.PodFailed)
  438. checkPodStatus(t, kl, fittingPod, v1.PodPending)
  439. }
  440. // Tests that we handle host name conflicts correctly by setting the failed status in status map.
  441. func TestHandleHostNameConflicts(t *testing.T) {
  442. testKubelet := newTestKubelet(t, false /* controllerAttachDetachEnabled */)
  443. defer testKubelet.Cleanup()
  444. kl := testKubelet.kubelet
  445. kl.nodeInfo = testNodeInfo{nodes: []*v1.Node{
  446. {
  447. ObjectMeta: metav1.ObjectMeta{Name: "127.0.0.1"},
  448. Status: v1.NodeStatus{
  449. Allocatable: v1.ResourceList{
  450. v1.ResourcePods: *resource.NewQuantity(110, resource.DecimalSI),
  451. },
  452. },
  453. },
  454. }}
  455. recorder := record.NewFakeRecorder(20)
  456. nodeRef := &v1.ObjectReference{
  457. Kind: "Node",
  458. Name: string("testNode"),
  459. UID: types.UID("testNode"),
  460. Namespace: "",
  461. }
  462. testClusterDNSDomain := "TEST"
  463. kl.dnsConfigurer = dns.NewConfigurer(recorder, nodeRef, nil, nil, testClusterDNSDomain, "")
  464. // default NodeName in test is 127.0.0.1
  465. pods := []*v1.Pod{
  466. podWithUIDNameNsSpec("123456789", "notfittingpod", "foo", v1.PodSpec{NodeName: "127.0.0.2"}),
  467. podWithUIDNameNsSpec("987654321", "fittingpod", "foo", v1.PodSpec{NodeName: "127.0.0.1"}),
  468. }
  469. notfittingPod := pods[0]
  470. fittingPod := pods[1]
  471. kl.HandlePodAdditions(pods)
  472. // Check pod status stored in the status map.
  473. checkPodStatus(t, kl, notfittingPod, v1.PodFailed)
  474. checkPodStatus(t, kl, fittingPod, v1.PodPending)
  475. }
  476. // Tests that we handle not matching labels selector correctly by setting the failed status in status map.
  477. func TestHandleNodeSelector(t *testing.T) {
  478. testKubelet := newTestKubelet(t, false /* controllerAttachDetachEnabled */)
  479. defer testKubelet.Cleanup()
  480. kl := testKubelet.kubelet
  481. nodes := []*v1.Node{
  482. {
  483. ObjectMeta: metav1.ObjectMeta{Name: testKubeletHostname, Labels: map[string]string{"key": "B"}},
  484. Status: v1.NodeStatus{
  485. Allocatable: v1.ResourceList{
  486. v1.ResourcePods: *resource.NewQuantity(110, resource.DecimalSI),
  487. },
  488. },
  489. },
  490. }
  491. kl.nodeInfo = testNodeInfo{nodes: nodes}
  492. recorder := record.NewFakeRecorder(20)
  493. nodeRef := &v1.ObjectReference{
  494. Kind: "Node",
  495. Name: string("testNode"),
  496. UID: types.UID("testNode"),
  497. Namespace: "",
  498. }
  499. testClusterDNSDomain := "TEST"
  500. kl.dnsConfigurer = dns.NewConfigurer(recorder, nodeRef, nil, nil, testClusterDNSDomain, "")
  501. pods := []*v1.Pod{
  502. podWithUIDNameNsSpec("123456789", "podA", "foo", v1.PodSpec{NodeSelector: map[string]string{"key": "A"}}),
  503. podWithUIDNameNsSpec("987654321", "podB", "foo", v1.PodSpec{NodeSelector: map[string]string{"key": "B"}}),
  504. }
  505. // The first pod should be rejected.
  506. notfittingPod := pods[0]
  507. fittingPod := pods[1]
  508. kl.HandlePodAdditions(pods)
  509. // Check pod status stored in the status map.
  510. checkPodStatus(t, kl, notfittingPod, v1.PodFailed)
  511. checkPodStatus(t, kl, fittingPod, v1.PodPending)
  512. }
  513. // Tests that we handle exceeded resources correctly by setting the failed status in status map.
  514. func TestHandleMemExceeded(t *testing.T) {
  515. testKubelet := newTestKubelet(t, false /* controllerAttachDetachEnabled */)
  516. defer testKubelet.Cleanup()
  517. kl := testKubelet.kubelet
  518. nodes := []*v1.Node{
  519. {ObjectMeta: metav1.ObjectMeta{Name: testKubeletHostname},
  520. Status: v1.NodeStatus{Capacity: v1.ResourceList{}, Allocatable: v1.ResourceList{
  521. v1.ResourceCPU: *resource.NewMilliQuantity(10, resource.DecimalSI),
  522. v1.ResourceMemory: *resource.NewQuantity(100, resource.BinarySI),
  523. v1.ResourcePods: *resource.NewQuantity(40, resource.DecimalSI),
  524. }}},
  525. }
  526. kl.nodeInfo = testNodeInfo{nodes: nodes}
  527. recorder := record.NewFakeRecorder(20)
  528. nodeRef := &v1.ObjectReference{
  529. Kind: "Node",
  530. Name: string("testNode"),
  531. UID: types.UID("testNode"),
  532. Namespace: "",
  533. }
  534. testClusterDNSDomain := "TEST"
  535. kl.dnsConfigurer = dns.NewConfigurer(recorder, nodeRef, nil, nil, testClusterDNSDomain, "")
  536. spec := v1.PodSpec{NodeName: string(kl.nodeName),
  537. Containers: []v1.Container{{Resources: v1.ResourceRequirements{
  538. Requests: v1.ResourceList{
  539. v1.ResourceMemory: resource.MustParse("90"),
  540. },
  541. }}},
  542. }
  543. pods := []*v1.Pod{
  544. podWithUIDNameNsSpec("123456789", "newpod", "foo", spec),
  545. podWithUIDNameNsSpec("987654321", "oldpod", "foo", spec),
  546. }
  547. // Make sure the Pods are in the reverse order of creation time.
  548. pods[1].CreationTimestamp = metav1.NewTime(time.Now())
  549. pods[0].CreationTimestamp = metav1.NewTime(time.Now().Add(1 * time.Second))
  550. // The newer pod should be rejected.
  551. notfittingPod := pods[0]
  552. fittingPod := pods[1]
  553. kl.HandlePodAdditions(pods)
  554. // Check pod status stored in the status map.
  555. checkPodStatus(t, kl, notfittingPod, v1.PodFailed)
  556. checkPodStatus(t, kl, fittingPod, v1.PodPending)
  557. }
  558. // Tests that we handle result of interface UpdatePluginResources correctly
  559. // by setting corresponding status in status map.
  560. func TestHandlePluginResources(t *testing.T) {
  561. testKubelet := newTestKubelet(t, false /* controllerAttachDetachEnabled */)
  562. defer testKubelet.Cleanup()
  563. kl := testKubelet.kubelet
  564. adjustedResource := v1.ResourceName("domain1.com/adjustedResource")
  565. emptyResource := v1.ResourceName("domain2.com/emptyResource")
  566. missingResource := v1.ResourceName("domain2.com/missingResource")
  567. failedResource := v1.ResourceName("domain2.com/failedResource")
  568. resourceQuantity0 := *resource.NewQuantity(int64(0), resource.DecimalSI)
  569. resourceQuantity1 := *resource.NewQuantity(int64(1), resource.DecimalSI)
  570. resourceQuantity2 := *resource.NewQuantity(int64(2), resource.DecimalSI)
  571. resourceQuantityInvalid := *resource.NewQuantity(int64(-1), resource.DecimalSI)
  572. allowedPodQuantity := *resource.NewQuantity(int64(10), resource.DecimalSI)
  573. nodes := []*v1.Node{
  574. {ObjectMeta: metav1.ObjectMeta{Name: testKubeletHostname},
  575. Status: v1.NodeStatus{Capacity: v1.ResourceList{}, Allocatable: v1.ResourceList{
  576. adjustedResource: resourceQuantity1,
  577. emptyResource: resourceQuantity0,
  578. v1.ResourcePods: allowedPodQuantity,
  579. }}},
  580. }
  581. kl.nodeInfo = testNodeInfo{nodes: nodes}
  582. updatePluginResourcesFunc := func(node *schedulernodeinfo.NodeInfo, attrs *lifecycle.PodAdmitAttributes) error {
  583. // Maps from resourceName to the value we use to set node.allocatableResource[resourceName].
  584. // A resource with invalid value (< 0) causes the function to return an error
  585. // to emulate resource Allocation failure.
  586. // Resources not contained in this map will have their node.allocatableResource
  587. // quantity unchanged.
  588. updateResourceMap := map[v1.ResourceName]resource.Quantity{
  589. adjustedResource: resourceQuantity2,
  590. emptyResource: resourceQuantity0,
  591. failedResource: resourceQuantityInvalid,
  592. }
  593. pod := attrs.Pod
  594. allocatableResource := node.AllocatableResource()
  595. newAllocatableResource := allocatableResource.Clone()
  596. for _, container := range pod.Spec.Containers {
  597. for resource := range container.Resources.Requests {
  598. newQuantity, exist := updateResourceMap[resource]
  599. if !exist {
  600. continue
  601. }
  602. if newQuantity.Value() < 0 {
  603. return fmt.Errorf("Allocation failed")
  604. }
  605. newAllocatableResource.ScalarResources[resource] = newQuantity.Value()
  606. }
  607. }
  608. node.SetAllocatableResource(newAllocatableResource)
  609. return nil
  610. }
  611. // add updatePluginResourcesFunc to admission handler, to test it's behavior.
  612. kl.admitHandlers = lifecycle.PodAdmitHandlers{}
  613. kl.admitHandlers.AddPodAdmitHandler(lifecycle.NewPredicateAdmitHandler(kl.getNodeAnyWay, lifecycle.NewAdmissionFailureHandlerStub(), updatePluginResourcesFunc))
  614. recorder := record.NewFakeRecorder(20)
  615. nodeRef := &v1.ObjectReference{
  616. Kind: "Node",
  617. Name: string("testNode"),
  618. UID: types.UID("testNode"),
  619. Namespace: "",
  620. }
  621. testClusterDNSDomain := "TEST"
  622. kl.dnsConfigurer = dns.NewConfigurer(recorder, nodeRef, nil, nil, testClusterDNSDomain, "")
  623. // pod requiring adjustedResource can be successfully allocated because updatePluginResourcesFunc
  624. // adjusts node.allocatableResource for this resource to a sufficient value.
  625. fittingPodSpec := v1.PodSpec{NodeName: string(kl.nodeName),
  626. Containers: []v1.Container{{Resources: v1.ResourceRequirements{
  627. Limits: v1.ResourceList{
  628. adjustedResource: resourceQuantity2,
  629. },
  630. Requests: v1.ResourceList{
  631. adjustedResource: resourceQuantity2,
  632. },
  633. }}},
  634. }
  635. // pod requiring emptyResource (extended resources with 0 allocatable) will
  636. // not pass PredicateAdmit.
  637. emptyPodSpec := v1.PodSpec{NodeName: string(kl.nodeName),
  638. Containers: []v1.Container{{Resources: v1.ResourceRequirements{
  639. Limits: v1.ResourceList{
  640. emptyResource: resourceQuantity2,
  641. },
  642. Requests: v1.ResourceList{
  643. emptyResource: resourceQuantity2,
  644. },
  645. }}},
  646. }
  647. // pod requiring missingResource will pass PredicateAdmit.
  648. //
  649. // Extended resources missing in node status are ignored in PredicateAdmit.
  650. // This is required to support extended resources that are not managed by
  651. // device plugin, such as cluster-level resources.
  652. missingPodSpec := v1.PodSpec{NodeName: string(kl.nodeName),
  653. Containers: []v1.Container{{Resources: v1.ResourceRequirements{
  654. Limits: v1.ResourceList{
  655. missingResource: resourceQuantity2,
  656. },
  657. Requests: v1.ResourceList{
  658. missingResource: resourceQuantity2,
  659. },
  660. }}},
  661. }
  662. // pod requiring failedResource will fail with the resource failed to be allocated.
  663. failedPodSpec := v1.PodSpec{NodeName: string(kl.nodeName),
  664. Containers: []v1.Container{{Resources: v1.ResourceRequirements{
  665. Limits: v1.ResourceList{
  666. failedResource: resourceQuantity1,
  667. },
  668. Requests: v1.ResourceList{
  669. failedResource: resourceQuantity1,
  670. },
  671. }}},
  672. }
  673. fittingPod := podWithUIDNameNsSpec("1", "fittingpod", "foo", fittingPodSpec)
  674. emptyPod := podWithUIDNameNsSpec("2", "emptypod", "foo", emptyPodSpec)
  675. missingPod := podWithUIDNameNsSpec("3", "missingpod", "foo", missingPodSpec)
  676. failedPod := podWithUIDNameNsSpec("4", "failedpod", "foo", failedPodSpec)
  677. kl.HandlePodAdditions([]*v1.Pod{fittingPod, emptyPod, missingPod, failedPod})
  678. // Check pod status stored in the status map.
  679. checkPodStatus(t, kl, fittingPod, v1.PodPending)
  680. checkPodStatus(t, kl, emptyPod, v1.PodFailed)
  681. checkPodStatus(t, kl, missingPod, v1.PodPending)
  682. checkPodStatus(t, kl, failedPod, v1.PodFailed)
  683. }
  684. // TODO(filipg): This test should be removed once StatusSyncer can do garbage collection without external signal.
  685. func TestPurgingObsoleteStatusMapEntries(t *testing.T) {
  686. testKubelet := newTestKubelet(t, false /* controllerAttachDetachEnabled */)
  687. defer testKubelet.Cleanup()
  688. kl := testKubelet.kubelet
  689. pods := []*v1.Pod{
  690. {ObjectMeta: metav1.ObjectMeta{Name: "pod1", UID: "1234"}, Spec: v1.PodSpec{Containers: []v1.Container{{Ports: []v1.ContainerPort{{HostPort: 80}}}}}},
  691. {ObjectMeta: metav1.ObjectMeta{Name: "pod2", UID: "4567"}, Spec: v1.PodSpec{Containers: []v1.Container{{Ports: []v1.ContainerPort{{HostPort: 80}}}}}},
  692. }
  693. podToTest := pods[1]
  694. // Run once to populate the status map.
  695. kl.HandlePodAdditions(pods)
  696. if _, found := kl.statusManager.GetPodStatus(podToTest.UID); !found {
  697. t.Fatalf("expected to have status cached for pod2")
  698. }
  699. // Sync with empty pods so that the entry in status map will be removed.
  700. kl.podManager.SetPods([]*v1.Pod{})
  701. kl.HandlePodCleanups()
  702. if _, found := kl.statusManager.GetPodStatus(podToTest.UID); found {
  703. t.Fatalf("expected to not have status cached for pod2")
  704. }
  705. }
  706. func TestValidateContainerLogStatus(t *testing.T) {
  707. testKubelet := newTestKubelet(t, false /* controllerAttachDetachEnabled */)
  708. defer testKubelet.Cleanup()
  709. kubelet := testKubelet.kubelet
  710. containerName := "x"
  711. testCases := []struct {
  712. statuses []v1.ContainerStatus
  713. success bool // whether getting logs for the container should succeed.
  714. pSuccess bool // whether getting logs for the previous container should succeed.
  715. }{
  716. {
  717. statuses: []v1.ContainerStatus{
  718. {
  719. Name: containerName,
  720. State: v1.ContainerState{
  721. Running: &v1.ContainerStateRunning{},
  722. },
  723. LastTerminationState: v1.ContainerState{
  724. Terminated: &v1.ContainerStateTerminated{ContainerID: "docker://fakeid"},
  725. },
  726. },
  727. },
  728. success: true,
  729. pSuccess: true,
  730. },
  731. {
  732. statuses: []v1.ContainerStatus{
  733. {
  734. Name: containerName,
  735. State: v1.ContainerState{
  736. Running: &v1.ContainerStateRunning{},
  737. },
  738. },
  739. },
  740. success: true,
  741. pSuccess: false,
  742. },
  743. {
  744. statuses: []v1.ContainerStatus{
  745. {
  746. Name: containerName,
  747. State: v1.ContainerState{
  748. Terminated: &v1.ContainerStateTerminated{},
  749. },
  750. },
  751. },
  752. success: false,
  753. pSuccess: false,
  754. },
  755. {
  756. statuses: []v1.ContainerStatus{
  757. {
  758. Name: containerName,
  759. State: v1.ContainerState{
  760. Terminated: &v1.ContainerStateTerminated{ContainerID: "docker://fakeid"},
  761. },
  762. },
  763. },
  764. success: true,
  765. pSuccess: false,
  766. },
  767. {
  768. statuses: []v1.ContainerStatus{
  769. {
  770. Name: containerName,
  771. State: v1.ContainerState{
  772. Terminated: &v1.ContainerStateTerminated{},
  773. },
  774. LastTerminationState: v1.ContainerState{
  775. Terminated: &v1.ContainerStateTerminated{},
  776. },
  777. },
  778. },
  779. success: false,
  780. pSuccess: false,
  781. },
  782. {
  783. statuses: []v1.ContainerStatus{
  784. {
  785. Name: containerName,
  786. State: v1.ContainerState{
  787. Terminated: &v1.ContainerStateTerminated{},
  788. },
  789. LastTerminationState: v1.ContainerState{
  790. Terminated: &v1.ContainerStateTerminated{ContainerID: "docker://fakeid"},
  791. },
  792. },
  793. },
  794. success: true,
  795. pSuccess: true,
  796. },
  797. {
  798. statuses: []v1.ContainerStatus{
  799. {
  800. Name: containerName,
  801. State: v1.ContainerState{
  802. Waiting: &v1.ContainerStateWaiting{},
  803. },
  804. },
  805. },
  806. success: false,
  807. pSuccess: false,
  808. },
  809. {
  810. statuses: []v1.ContainerStatus{
  811. {
  812. Name: containerName,
  813. State: v1.ContainerState{Waiting: &v1.ContainerStateWaiting{Reason: "ErrImagePull"}},
  814. },
  815. },
  816. success: false,
  817. pSuccess: false,
  818. },
  819. {
  820. statuses: []v1.ContainerStatus{
  821. {
  822. Name: containerName,
  823. State: v1.ContainerState{Waiting: &v1.ContainerStateWaiting{Reason: "ErrImagePullBackOff"}},
  824. },
  825. },
  826. success: false,
  827. pSuccess: false,
  828. },
  829. }
  830. for i, tc := range testCases {
  831. // Access the log of the most recent container
  832. previous := false
  833. podStatus := &v1.PodStatus{ContainerStatuses: tc.statuses}
  834. _, err := kubelet.validateContainerLogStatus("podName", podStatus, containerName, previous)
  835. if !tc.success {
  836. assert.Error(t, err, fmt.Sprintf("[case %d] error", i))
  837. } else {
  838. assert.NoError(t, err, "[case %d] error", i)
  839. }
  840. // Access the log of the previous, terminated container
  841. previous = true
  842. _, err = kubelet.validateContainerLogStatus("podName", podStatus, containerName, previous)
  843. if !tc.pSuccess {
  844. assert.Error(t, err, fmt.Sprintf("[case %d] error", i))
  845. } else {
  846. assert.NoError(t, err, "[case %d] error", i)
  847. }
  848. // Access the log of a container that's not in the pod
  849. _, err = kubelet.validateContainerLogStatus("podName", podStatus, "blah", false)
  850. assert.Error(t, err, fmt.Sprintf("[case %d] invalid container name should cause an error", i))
  851. }
  852. }
  853. func TestCreateMirrorPod(t *testing.T) {
  854. for _, updateType := range []kubetypes.SyncPodType{kubetypes.SyncPodCreate, kubetypes.SyncPodUpdate} {
  855. testKubelet := newTestKubelet(t, false /* controllerAttachDetachEnabled */)
  856. defer testKubelet.Cleanup()
  857. kl := testKubelet.kubelet
  858. manager := testKubelet.fakeMirrorClient
  859. pod := podWithUIDNameNs("12345678", "bar", "foo")
  860. pod.Annotations[kubetypes.ConfigSourceAnnotationKey] = "file"
  861. pods := []*v1.Pod{pod}
  862. kl.podManager.SetPods(pods)
  863. err := kl.syncPod(syncPodOptions{
  864. pod: pod,
  865. podStatus: &kubecontainer.PodStatus{},
  866. updateType: updateType,
  867. })
  868. assert.NoError(t, err)
  869. podFullName := kubecontainer.GetPodFullName(pod)
  870. assert.True(t, manager.HasPod(podFullName), "Expected mirror pod %q to be created", podFullName)
  871. assert.Equal(t, 1, manager.NumOfPods(), "Expected only 1 mirror pod %q, got %+v", podFullName, manager.GetPods())
  872. }
  873. }
  874. func TestDeleteOutdatedMirrorPod(t *testing.T) {
  875. testKubelet := newTestKubelet(t, false /* controllerAttachDetachEnabled */)
  876. defer testKubelet.Cleanup()
  877. kl := testKubelet.kubelet
  878. manager := testKubelet.fakeMirrorClient
  879. pod := podWithUIDNameNsSpec("12345678", "foo", "ns", v1.PodSpec{
  880. Containers: []v1.Container{
  881. {Name: "1234", Image: "foo"},
  882. },
  883. })
  884. pod.Annotations[kubetypes.ConfigSourceAnnotationKey] = "file"
  885. // Mirror pod has an outdated spec.
  886. mirrorPod := podWithUIDNameNsSpec("11111111", "foo", "ns", v1.PodSpec{
  887. Containers: []v1.Container{
  888. {Name: "1234", Image: "bar"},
  889. },
  890. })
  891. mirrorPod.Annotations[kubetypes.ConfigSourceAnnotationKey] = "api"
  892. mirrorPod.Annotations[kubetypes.ConfigMirrorAnnotationKey] = "mirror"
  893. pods := []*v1.Pod{pod, mirrorPod}
  894. kl.podManager.SetPods(pods)
  895. err := kl.syncPod(syncPodOptions{
  896. pod: pod,
  897. mirrorPod: mirrorPod,
  898. podStatus: &kubecontainer.PodStatus{},
  899. updateType: kubetypes.SyncPodUpdate,
  900. })
  901. assert.NoError(t, err)
  902. name := kubecontainer.GetPodFullName(pod)
  903. creates, deletes := manager.GetCounts(name)
  904. if creates != 1 || deletes != 1 {
  905. t.Errorf("expected 1 creation and 1 deletion of %q, got %d, %d", name, creates, deletes)
  906. }
  907. }
  908. func TestDeleteOrphanedMirrorPods(t *testing.T) {
  909. testKubelet := newTestKubelet(t, false /* controllerAttachDetachEnabled */)
  910. defer testKubelet.Cleanup()
  911. kl := testKubelet.kubelet
  912. manager := testKubelet.fakeMirrorClient
  913. orphanPods := []*v1.Pod{
  914. {
  915. ObjectMeta: metav1.ObjectMeta{
  916. UID: "12345678",
  917. Name: "pod1",
  918. Namespace: "ns",
  919. Annotations: map[string]string{
  920. kubetypes.ConfigSourceAnnotationKey: "api",
  921. kubetypes.ConfigMirrorAnnotationKey: "mirror",
  922. },
  923. },
  924. },
  925. {
  926. ObjectMeta: metav1.ObjectMeta{
  927. UID: "12345679",
  928. Name: "pod2",
  929. Namespace: "ns",
  930. Annotations: map[string]string{
  931. kubetypes.ConfigSourceAnnotationKey: "api",
  932. kubetypes.ConfigMirrorAnnotationKey: "mirror",
  933. },
  934. },
  935. },
  936. }
  937. kl.podManager.SetPods(orphanPods)
  938. // Sync with an empty pod list to delete all mirror pods.
  939. kl.HandlePodCleanups()
  940. assert.Len(t, manager.GetPods(), 0, "Expected 0 mirror pods")
  941. for _, pod := range orphanPods {
  942. name := kubecontainer.GetPodFullName(pod)
  943. creates, deletes := manager.GetCounts(name)
  944. if creates != 0 || deletes != 1 {
  945. t.Errorf("expected 0 creation and one deletion of %q, got %d, %d", name, creates, deletes)
  946. }
  947. }
  948. }
  949. func TestGetContainerInfoForMirrorPods(t *testing.T) {
  950. // pods contain one static and one mirror pod with the same name but
  951. // different UIDs.
  952. pods := []*v1.Pod{
  953. {
  954. ObjectMeta: metav1.ObjectMeta{
  955. UID: "1234",
  956. Name: "qux",
  957. Namespace: "ns",
  958. Annotations: map[string]string{
  959. kubetypes.ConfigSourceAnnotationKey: "file",
  960. },
  961. },
  962. Spec: v1.PodSpec{
  963. Containers: []v1.Container{
  964. {Name: "foo"},
  965. },
  966. },
  967. },
  968. {
  969. ObjectMeta: metav1.ObjectMeta{
  970. UID: "5678",
  971. Name: "qux",
  972. Namespace: "ns",
  973. Annotations: map[string]string{
  974. kubetypes.ConfigSourceAnnotationKey: "api",
  975. kubetypes.ConfigMirrorAnnotationKey: "mirror",
  976. },
  977. },
  978. Spec: v1.PodSpec{
  979. Containers: []v1.Container{
  980. {Name: "foo"},
  981. },
  982. },
  983. },
  984. }
  985. testKubelet := newTestKubelet(t, false /* controllerAttachDetachEnabled */)
  986. defer testKubelet.Cleanup()
  987. fakeRuntime := testKubelet.fakeRuntime
  988. cadvisorReq := &cadvisorapi.ContainerInfoRequest{}
  989. kubelet := testKubelet.kubelet
  990. fakeRuntime.PodList = []*containertest.FakePod{
  991. {Pod: &kubecontainer.Pod{
  992. ID: "1234",
  993. Name: "qux",
  994. Namespace: "ns",
  995. Containers: []*kubecontainer.Container{
  996. {
  997. Name: "foo",
  998. ID: kubecontainer.ContainerID{Type: "test", ID: "ab2cdf"},
  999. },
  1000. },
  1001. }},
  1002. }
  1003. kubelet.podManager.SetPods(pods)
  1004. // Use the mirror pod UID to retrieve the stats.
  1005. stats, err := kubelet.GetContainerInfo("qux_ns", "5678", "foo", cadvisorReq)
  1006. assert.NoError(t, err)
  1007. require.NotNil(t, stats)
  1008. }
  1009. func TestNetworkErrorsWithoutHostNetwork(t *testing.T) {
  1010. testKubelet := newTestKubelet(t, false /* controllerAttachDetachEnabled */)
  1011. defer testKubelet.Cleanup()
  1012. kubelet := testKubelet.kubelet
  1013. kubelet.runtimeState.setNetworkState(fmt.Errorf("simulated network error"))
  1014. pod := podWithUIDNameNsSpec("12345678", "hostnetwork", "new", v1.PodSpec{
  1015. HostNetwork: false,
  1016. Containers: []v1.Container{
  1017. {Name: "foo"},
  1018. },
  1019. })
  1020. kubelet.podManager.SetPods([]*v1.Pod{pod})
  1021. err := kubelet.syncPod(syncPodOptions{
  1022. pod: pod,
  1023. podStatus: &kubecontainer.PodStatus{},
  1024. updateType: kubetypes.SyncPodUpdate,
  1025. })
  1026. assert.Error(t, err, "expected pod with hostNetwork=false to fail when network in error")
  1027. pod.Annotations[kubetypes.ConfigSourceAnnotationKey] = kubetypes.FileSource
  1028. pod.Spec.HostNetwork = true
  1029. err = kubelet.syncPod(syncPodOptions{
  1030. pod: pod,
  1031. podStatus: &kubecontainer.PodStatus{},
  1032. updateType: kubetypes.SyncPodUpdate,
  1033. })
  1034. assert.NoError(t, err, "expected pod with hostNetwork=true to succeed when network in error")
  1035. }
  1036. func TestFilterOutTerminatedPods(t *testing.T) {
  1037. testKubelet := newTestKubelet(t, false /* controllerAttachDetachEnabled */)
  1038. defer testKubelet.Cleanup()
  1039. kubelet := testKubelet.kubelet
  1040. pods := newTestPods(5)
  1041. now := metav1.NewTime(time.Now())
  1042. pods[0].Status.Phase = v1.PodFailed
  1043. pods[1].Status.Phase = v1.PodSucceeded
  1044. // The pod is terminating, should not filter out.
  1045. pods[2].Status.Phase = v1.PodRunning
  1046. pods[2].DeletionTimestamp = &now
  1047. pods[2].Status.ContainerStatuses = []v1.ContainerStatus{
  1048. {State: v1.ContainerState{
  1049. Running: &v1.ContainerStateRunning{
  1050. StartedAt: now,
  1051. },
  1052. }},
  1053. }
  1054. pods[3].Status.Phase = v1.PodPending
  1055. pods[4].Status.Phase = v1.PodRunning
  1056. expected := []*v1.Pod{pods[2], pods[3], pods[4]}
  1057. kubelet.podManager.SetPods(pods)
  1058. actual := kubelet.filterOutTerminatedPods(pods)
  1059. assert.Equal(t, expected, actual)
  1060. }
  1061. func TestSyncPodsSetStatusToFailedForPodsThatRunTooLong(t *testing.T) {
  1062. testKubelet := newTestKubelet(t, false /* controllerAttachDetachEnabled */)
  1063. defer testKubelet.Cleanup()
  1064. fakeRuntime := testKubelet.fakeRuntime
  1065. kubelet := testKubelet.kubelet
  1066. now := metav1.Now()
  1067. startTime := metav1.NewTime(now.Time.Add(-1 * time.Minute))
  1068. exceededActiveDeadlineSeconds := int64(30)
  1069. pods := []*v1.Pod{
  1070. {
  1071. ObjectMeta: metav1.ObjectMeta{
  1072. UID: "12345678",
  1073. Name: "bar",
  1074. Namespace: "new",
  1075. },
  1076. Spec: v1.PodSpec{
  1077. Containers: []v1.Container{
  1078. {Name: "foo"},
  1079. },
  1080. ActiveDeadlineSeconds: &exceededActiveDeadlineSeconds,
  1081. },
  1082. Status: v1.PodStatus{
  1083. StartTime: &startTime,
  1084. },
  1085. },
  1086. }
  1087. fakeRuntime.PodList = []*containertest.FakePod{
  1088. {Pod: &kubecontainer.Pod{
  1089. ID: "12345678",
  1090. Name: "bar",
  1091. Namespace: "new",
  1092. Containers: []*kubecontainer.Container{
  1093. {Name: "foo"},
  1094. },
  1095. }},
  1096. }
  1097. // Let the pod worker sets the status to fail after this sync.
  1098. kubelet.HandlePodUpdates(pods)
  1099. status, found := kubelet.statusManager.GetPodStatus(pods[0].UID)
  1100. assert.True(t, found, "expected to found status for pod %q", pods[0].UID)
  1101. assert.Equal(t, v1.PodFailed, status.Phase)
  1102. // check pod status contains ContainerStatuses, etc.
  1103. assert.NotNil(t, status.ContainerStatuses)
  1104. }
  1105. func TestSyncPodsDoesNotSetPodsThatDidNotRunTooLongToFailed(t *testing.T) {
  1106. testKubelet := newTestKubelet(t, false /* controllerAttachDetachEnabled */)
  1107. defer testKubelet.Cleanup()
  1108. fakeRuntime := testKubelet.fakeRuntime
  1109. kubelet := testKubelet.kubelet
  1110. now := metav1.Now()
  1111. startTime := metav1.NewTime(now.Time.Add(-1 * time.Minute))
  1112. exceededActiveDeadlineSeconds := int64(300)
  1113. pods := []*v1.Pod{
  1114. {
  1115. ObjectMeta: metav1.ObjectMeta{
  1116. UID: "12345678",
  1117. Name: "bar",
  1118. Namespace: "new",
  1119. },
  1120. Spec: v1.PodSpec{
  1121. Containers: []v1.Container{
  1122. {Name: "foo"},
  1123. },
  1124. ActiveDeadlineSeconds: &exceededActiveDeadlineSeconds,
  1125. },
  1126. Status: v1.PodStatus{
  1127. StartTime: &startTime,
  1128. },
  1129. },
  1130. }
  1131. fakeRuntime.PodList = []*containertest.FakePod{
  1132. {Pod: &kubecontainer.Pod{
  1133. ID: "12345678",
  1134. Name: "bar",
  1135. Namespace: "new",
  1136. Containers: []*kubecontainer.Container{
  1137. {Name: "foo"},
  1138. },
  1139. }},
  1140. }
  1141. kubelet.podManager.SetPods(pods)
  1142. kubelet.HandlePodUpdates(pods)
  1143. status, found := kubelet.statusManager.GetPodStatus(pods[0].UID)
  1144. assert.True(t, found, "expected to found status for pod %q", pods[0].UID)
  1145. assert.NotEqual(t, v1.PodFailed, status.Phase)
  1146. }
  1147. func podWithUIDNameNs(uid types.UID, name, namespace string) *v1.Pod {
  1148. return &v1.Pod{
  1149. ObjectMeta: metav1.ObjectMeta{
  1150. UID: uid,
  1151. Name: name,
  1152. Namespace: namespace,
  1153. Annotations: map[string]string{},
  1154. },
  1155. }
  1156. }
  1157. func podWithUIDNameNsSpec(uid types.UID, name, namespace string, spec v1.PodSpec) *v1.Pod {
  1158. pod := podWithUIDNameNs(uid, name, namespace)
  1159. pod.Spec = spec
  1160. return pod
  1161. }
  1162. func TestDeletePodDirsForDeletedPods(t *testing.T) {
  1163. testKubelet := newTestKubelet(t, false /* controllerAttachDetachEnabled */)
  1164. defer testKubelet.Cleanup()
  1165. kl := testKubelet.kubelet
  1166. pods := []*v1.Pod{
  1167. podWithUIDNameNs("12345678", "pod1", "ns"),
  1168. podWithUIDNameNs("12345679", "pod2", "ns"),
  1169. }
  1170. kl.podManager.SetPods(pods)
  1171. // Sync to create pod directories.
  1172. kl.HandlePodSyncs(kl.podManager.GetPods())
  1173. for i := range pods {
  1174. assert.True(t, dirExists(kl.getPodDir(pods[i].UID)), "Expected directory to exist for pod %d", i)
  1175. }
  1176. // Pod 1 has been deleted and no longer exists.
  1177. kl.podManager.SetPods([]*v1.Pod{pods[0]})
  1178. kl.HandlePodCleanups()
  1179. assert.True(t, dirExists(kl.getPodDir(pods[0].UID)), "Expected directory to exist for pod 0")
  1180. assert.False(t, dirExists(kl.getPodDir(pods[1].UID)), "Expected directory to be deleted for pod 1")
  1181. }
  1182. func syncAndVerifyPodDir(t *testing.T, testKubelet *TestKubelet, pods []*v1.Pod, podsToCheck []*v1.Pod, shouldExist bool) {
  1183. kl := testKubelet.kubelet
  1184. kl.podManager.SetPods(pods)
  1185. kl.HandlePodSyncs(pods)
  1186. kl.HandlePodCleanups()
  1187. for i, pod := range podsToCheck {
  1188. exist := dirExists(kl.getPodDir(pod.UID))
  1189. assert.Equal(t, shouldExist, exist, "directory of pod %d", i)
  1190. }
  1191. }
  1192. func TestDoesNotDeletePodDirsForTerminatedPods(t *testing.T) {
  1193. testKubelet := newTestKubelet(t, false /* controllerAttachDetachEnabled */)
  1194. defer testKubelet.Cleanup()
  1195. kl := testKubelet.kubelet
  1196. pods := []*v1.Pod{
  1197. podWithUIDNameNs("12345678", "pod1", "ns"),
  1198. podWithUIDNameNs("12345679", "pod2", "ns"),
  1199. podWithUIDNameNs("12345680", "pod3", "ns"),
  1200. }
  1201. syncAndVerifyPodDir(t, testKubelet, pods, pods, true)
  1202. // Pod 1 failed, and pod 2 succeeded. None of the pod directories should be
  1203. // deleted.
  1204. kl.statusManager.SetPodStatus(pods[1], v1.PodStatus{Phase: v1.PodFailed})
  1205. kl.statusManager.SetPodStatus(pods[2], v1.PodStatus{Phase: v1.PodSucceeded})
  1206. syncAndVerifyPodDir(t, testKubelet, pods, pods, true)
  1207. }
  1208. func TestDoesNotDeletePodDirsIfContainerIsRunning(t *testing.T) {
  1209. testKubelet := newTestKubelet(t, false /* controllerAttachDetachEnabled */)
  1210. defer testKubelet.Cleanup()
  1211. runningPod := &kubecontainer.Pod{
  1212. ID: "12345678",
  1213. Name: "pod1",
  1214. Namespace: "ns",
  1215. }
  1216. apiPod := podWithUIDNameNs(runningPod.ID, runningPod.Name, runningPod.Namespace)
  1217. // Sync once to create pod directory; confirm that the pod directory has
  1218. // already been created.
  1219. pods := []*v1.Pod{apiPod}
  1220. syncAndVerifyPodDir(t, testKubelet, pods, []*v1.Pod{apiPod}, true)
  1221. // Pretend the pod is deleted from apiserver, but is still active on the node.
  1222. // The pod directory should not be removed.
  1223. pods = []*v1.Pod{}
  1224. testKubelet.fakeRuntime.PodList = []*containertest.FakePod{{Pod: runningPod, NetnsPath: ""}}
  1225. syncAndVerifyPodDir(t, testKubelet, pods, []*v1.Pod{apiPod}, true)
  1226. // The pod is deleted and also not active on the node. The pod directory
  1227. // should be removed.
  1228. pods = []*v1.Pod{}
  1229. testKubelet.fakeRuntime.PodList = []*containertest.FakePod{}
  1230. syncAndVerifyPodDir(t, testKubelet, pods, []*v1.Pod{apiPod}, false)
  1231. }
  1232. func TestGetPodsToSync(t *testing.T) {
  1233. testKubelet := newTestKubelet(t, false /* controllerAttachDetachEnabled */)
  1234. defer testKubelet.Cleanup()
  1235. kubelet := testKubelet.kubelet
  1236. clock := testKubelet.fakeClock
  1237. pods := newTestPods(5)
  1238. exceededActiveDeadlineSeconds := int64(30)
  1239. notYetActiveDeadlineSeconds := int64(120)
  1240. startTime := metav1.NewTime(clock.Now())
  1241. pods[0].Status.StartTime = &startTime
  1242. pods[0].Spec.ActiveDeadlineSeconds = &exceededActiveDeadlineSeconds
  1243. pods[1].Status.StartTime = &startTime
  1244. pods[1].Spec.ActiveDeadlineSeconds = &notYetActiveDeadlineSeconds
  1245. pods[2].Status.StartTime = &startTime
  1246. pods[2].Spec.ActiveDeadlineSeconds = &exceededActiveDeadlineSeconds
  1247. kubelet.podManager.SetPods(pods)
  1248. kubelet.workQueue.Enqueue(pods[2].UID, 0)
  1249. kubelet.workQueue.Enqueue(pods[3].UID, 30*time.Second)
  1250. kubelet.workQueue.Enqueue(pods[4].UID, 2*time.Minute)
  1251. clock.Step(1 * time.Minute)
  1252. expected := []*v1.Pod{pods[2], pods[3], pods[0]}
  1253. podsToSync := kubelet.getPodsToSync()
  1254. sort.Sort(podsByUID(expected))
  1255. sort.Sort(podsByUID(podsToSync))
  1256. assert.Equal(t, expected, podsToSync)
  1257. }
  1258. func TestGenerateAPIPodStatusWithSortedContainers(t *testing.T) {
  1259. testKubelet := newTestKubelet(t, false /* controllerAttachDetachEnabled */)
  1260. defer testKubelet.Cleanup()
  1261. kubelet := testKubelet.kubelet
  1262. numContainers := 10
  1263. expectedOrder := []string{}
  1264. cStatuses := []*kubecontainer.ContainerStatus{}
  1265. specContainerList := []v1.Container{}
  1266. for i := 0; i < numContainers; i++ {
  1267. id := fmt.Sprintf("%v", i)
  1268. containerName := fmt.Sprintf("%vcontainer", id)
  1269. expectedOrder = append(expectedOrder, containerName)
  1270. cStatus := &kubecontainer.ContainerStatus{
  1271. ID: kubecontainer.BuildContainerID("test", id),
  1272. Name: containerName,
  1273. }
  1274. // Rearrange container statuses
  1275. if i%2 == 0 {
  1276. cStatuses = append(cStatuses, cStatus)
  1277. } else {
  1278. cStatuses = append([]*kubecontainer.ContainerStatus{cStatus}, cStatuses...)
  1279. }
  1280. specContainerList = append(specContainerList, v1.Container{Name: containerName})
  1281. }
  1282. pod := podWithUIDNameNs("uid1", "foo", "test")
  1283. pod.Spec = v1.PodSpec{
  1284. Containers: specContainerList,
  1285. }
  1286. status := &kubecontainer.PodStatus{
  1287. ID: pod.UID,
  1288. Name: pod.Name,
  1289. Namespace: pod.Namespace,
  1290. ContainerStatuses: cStatuses,
  1291. }
  1292. for i := 0; i < 5; i++ {
  1293. apiStatus := kubelet.generateAPIPodStatus(pod, status)
  1294. for i, c := range apiStatus.ContainerStatuses {
  1295. if expectedOrder[i] != c.Name {
  1296. t.Fatalf("Container status not sorted, expected %v at index %d, but found %v", expectedOrder[i], i, c.Name)
  1297. }
  1298. }
  1299. }
  1300. }
  1301. func verifyContainerStatuses(t *testing.T, statuses []v1.ContainerStatus, state, lastTerminationState map[string]v1.ContainerState, message string) {
  1302. for _, s := range statuses {
  1303. assert.Equal(t, s.State, state[s.Name], "%s: state", message)
  1304. assert.Equal(t, s.LastTerminationState, lastTerminationState[s.Name], "%s: last terminated state", message)
  1305. }
  1306. }
  1307. // Test generateAPIPodStatus with different reason cache and old api pod status.
  1308. func TestGenerateAPIPodStatusWithReasonCache(t *testing.T) {
  1309. // The following waiting reason and message are generated in convertStatusToAPIStatus()
  1310. startWaitingReason := "ContainerCreating"
  1311. initWaitingReason := "PodInitializing"
  1312. testTimestamp := time.Unix(123456789, 987654321)
  1313. testErrorReason := fmt.Errorf("test-error")
  1314. emptyContainerID := (&kubecontainer.ContainerID{}).String()
  1315. testKubelet := newTestKubelet(t, false /* controllerAttachDetachEnabled */)
  1316. defer testKubelet.Cleanup()
  1317. kubelet := testKubelet.kubelet
  1318. pod := podWithUIDNameNs("12345678", "foo", "new")
  1319. pod.Spec = v1.PodSpec{RestartPolicy: v1.RestartPolicyOnFailure}
  1320. podStatus := &kubecontainer.PodStatus{
  1321. ID: pod.UID,
  1322. Name: pod.Name,
  1323. Namespace: pod.Namespace,
  1324. }
  1325. tests := []struct {
  1326. containers []v1.Container
  1327. statuses []*kubecontainer.ContainerStatus
  1328. reasons map[string]error
  1329. oldStatuses []v1.ContainerStatus
  1330. expectedState map[string]v1.ContainerState
  1331. // Only set expectedInitState when it is different from expectedState
  1332. expectedInitState map[string]v1.ContainerState
  1333. expectedLastTerminationState map[string]v1.ContainerState
  1334. }{
  1335. // For container with no historical record, State should be Waiting, LastTerminationState should be retrieved from
  1336. // old status from apiserver.
  1337. {
  1338. containers: []v1.Container{{Name: "without-old-record"}, {Name: "with-old-record"}},
  1339. statuses: []*kubecontainer.ContainerStatus{},
  1340. reasons: map[string]error{},
  1341. oldStatuses: []v1.ContainerStatus{{
  1342. Name: "with-old-record",
  1343. LastTerminationState: v1.ContainerState{Terminated: &v1.ContainerStateTerminated{}},
  1344. }},
  1345. expectedState: map[string]v1.ContainerState{
  1346. "without-old-record": {Waiting: &v1.ContainerStateWaiting{
  1347. Reason: startWaitingReason,
  1348. }},
  1349. "with-old-record": {Waiting: &v1.ContainerStateWaiting{
  1350. Reason: startWaitingReason,
  1351. }},
  1352. },
  1353. expectedInitState: map[string]v1.ContainerState{
  1354. "without-old-record": {Waiting: &v1.ContainerStateWaiting{
  1355. Reason: initWaitingReason,
  1356. }},
  1357. "with-old-record": {Waiting: &v1.ContainerStateWaiting{
  1358. Reason: initWaitingReason,
  1359. }},
  1360. },
  1361. expectedLastTerminationState: map[string]v1.ContainerState{
  1362. "with-old-record": {Terminated: &v1.ContainerStateTerminated{}},
  1363. },
  1364. },
  1365. // For running container, State should be Running, LastTerminationState should be retrieved from latest terminated status.
  1366. {
  1367. containers: []v1.Container{{Name: "running"}},
  1368. statuses: []*kubecontainer.ContainerStatus{
  1369. {
  1370. Name: "running",
  1371. State: kubecontainer.ContainerStateRunning,
  1372. StartedAt: testTimestamp,
  1373. },
  1374. {
  1375. Name: "running",
  1376. State: kubecontainer.ContainerStateExited,
  1377. ExitCode: 1,
  1378. },
  1379. },
  1380. reasons: map[string]error{},
  1381. oldStatuses: []v1.ContainerStatus{},
  1382. expectedState: map[string]v1.ContainerState{
  1383. "running": {Running: &v1.ContainerStateRunning{
  1384. StartedAt: metav1.NewTime(testTimestamp),
  1385. }},
  1386. },
  1387. expectedLastTerminationState: map[string]v1.ContainerState{
  1388. "running": {Terminated: &v1.ContainerStateTerminated{
  1389. ExitCode: 1,
  1390. ContainerID: emptyContainerID,
  1391. }},
  1392. },
  1393. },
  1394. // For terminated container:
  1395. // * If there is no recent start error record, State should be Terminated, LastTerminationState should be retrieved from
  1396. // second latest terminated status;
  1397. // * If there is recent start error record, State should be Waiting, LastTerminationState should be retrieved from latest
  1398. // terminated status;
  1399. // * If ExitCode = 0, restart policy is RestartPolicyOnFailure, the container shouldn't be restarted. No matter there is
  1400. // recent start error or not, State should be Terminated, LastTerminationState should be retrieved from second latest
  1401. // terminated status.
  1402. {
  1403. containers: []v1.Container{{Name: "without-reason"}, {Name: "with-reason"}},
  1404. statuses: []*kubecontainer.ContainerStatus{
  1405. {
  1406. Name: "without-reason",
  1407. State: kubecontainer.ContainerStateExited,
  1408. ExitCode: 1,
  1409. },
  1410. {
  1411. Name: "with-reason",
  1412. State: kubecontainer.ContainerStateExited,
  1413. ExitCode: 2,
  1414. },
  1415. {
  1416. Name: "without-reason",
  1417. State: kubecontainer.ContainerStateExited,
  1418. ExitCode: 3,
  1419. },
  1420. {
  1421. Name: "with-reason",
  1422. State: kubecontainer.ContainerStateExited,
  1423. ExitCode: 4,
  1424. },
  1425. {
  1426. Name: "succeed",
  1427. State: kubecontainer.ContainerStateExited,
  1428. ExitCode: 0,
  1429. },
  1430. {
  1431. Name: "succeed",
  1432. State: kubecontainer.ContainerStateExited,
  1433. ExitCode: 5,
  1434. },
  1435. },
  1436. reasons: map[string]error{"with-reason": testErrorReason, "succeed": testErrorReason},
  1437. oldStatuses: []v1.ContainerStatus{},
  1438. expectedState: map[string]v1.ContainerState{
  1439. "without-reason": {Terminated: &v1.ContainerStateTerminated{
  1440. ExitCode: 1,
  1441. ContainerID: emptyContainerID,
  1442. }},
  1443. "with-reason": {Waiting: &v1.ContainerStateWaiting{Reason: testErrorReason.Error()}},
  1444. "succeed": {Terminated: &v1.ContainerStateTerminated{
  1445. ExitCode: 0,
  1446. ContainerID: emptyContainerID,
  1447. }},
  1448. },
  1449. expectedLastTerminationState: map[string]v1.ContainerState{
  1450. "without-reason": {Terminated: &v1.ContainerStateTerminated{
  1451. ExitCode: 3,
  1452. ContainerID: emptyContainerID,
  1453. }},
  1454. "with-reason": {Terminated: &v1.ContainerStateTerminated{
  1455. ExitCode: 2,
  1456. ContainerID: emptyContainerID,
  1457. }},
  1458. "succeed": {Terminated: &v1.ContainerStateTerminated{
  1459. ExitCode: 5,
  1460. ContainerID: emptyContainerID,
  1461. }},
  1462. },
  1463. },
  1464. }
  1465. for i, test := range tests {
  1466. kubelet.reasonCache = NewReasonCache()
  1467. for n, e := range test.reasons {
  1468. kubelet.reasonCache.add(pod.UID, n, e, "")
  1469. }
  1470. pod.Spec.Containers = test.containers
  1471. pod.Status.ContainerStatuses = test.oldStatuses
  1472. podStatus.ContainerStatuses = test.statuses
  1473. apiStatus := kubelet.generateAPIPodStatus(pod, podStatus)
  1474. verifyContainerStatuses(t, apiStatus.ContainerStatuses, test.expectedState, test.expectedLastTerminationState, fmt.Sprintf("case %d", i))
  1475. }
  1476. // Everything should be the same for init containers
  1477. for i, test := range tests {
  1478. kubelet.reasonCache = NewReasonCache()
  1479. for n, e := range test.reasons {
  1480. kubelet.reasonCache.add(pod.UID, n, e, "")
  1481. }
  1482. pod.Spec.InitContainers = test.containers
  1483. pod.Status.InitContainerStatuses = test.oldStatuses
  1484. podStatus.ContainerStatuses = test.statuses
  1485. apiStatus := kubelet.generateAPIPodStatus(pod, podStatus)
  1486. expectedState := test.expectedState
  1487. if test.expectedInitState != nil {
  1488. expectedState = test.expectedInitState
  1489. }
  1490. verifyContainerStatuses(t, apiStatus.InitContainerStatuses, expectedState, test.expectedLastTerminationState, fmt.Sprintf("case %d", i))
  1491. }
  1492. }
  1493. // Test generateAPIPodStatus with different restart policies.
  1494. func TestGenerateAPIPodStatusWithDifferentRestartPolicies(t *testing.T) {
  1495. testErrorReason := fmt.Errorf("test-error")
  1496. emptyContainerID := (&kubecontainer.ContainerID{}).String()
  1497. testKubelet := newTestKubelet(t, false /* controllerAttachDetachEnabled */)
  1498. defer testKubelet.Cleanup()
  1499. kubelet := testKubelet.kubelet
  1500. pod := podWithUIDNameNs("12345678", "foo", "new")
  1501. containers := []v1.Container{{Name: "succeed"}, {Name: "failed"}}
  1502. podStatus := &kubecontainer.PodStatus{
  1503. ID: pod.UID,
  1504. Name: pod.Name,
  1505. Namespace: pod.Namespace,
  1506. ContainerStatuses: []*kubecontainer.ContainerStatus{
  1507. {
  1508. Name: "succeed",
  1509. State: kubecontainer.ContainerStateExited,
  1510. ExitCode: 0,
  1511. },
  1512. {
  1513. Name: "failed",
  1514. State: kubecontainer.ContainerStateExited,
  1515. ExitCode: 1,
  1516. },
  1517. {
  1518. Name: "succeed",
  1519. State: kubecontainer.ContainerStateExited,
  1520. ExitCode: 2,
  1521. },
  1522. {
  1523. Name: "failed",
  1524. State: kubecontainer.ContainerStateExited,
  1525. ExitCode: 3,
  1526. },
  1527. },
  1528. }
  1529. kubelet.reasonCache.add(pod.UID, "succeed", testErrorReason, "")
  1530. kubelet.reasonCache.add(pod.UID, "failed", testErrorReason, "")
  1531. for c, test := range []struct {
  1532. restartPolicy v1.RestartPolicy
  1533. expectedState map[string]v1.ContainerState
  1534. expectedLastTerminationState map[string]v1.ContainerState
  1535. // Only set expectedInitState when it is different from expectedState
  1536. expectedInitState map[string]v1.ContainerState
  1537. // Only set expectedInitLastTerminationState when it is different from expectedLastTerminationState
  1538. expectedInitLastTerminationState map[string]v1.ContainerState
  1539. }{
  1540. {
  1541. restartPolicy: v1.RestartPolicyNever,
  1542. expectedState: map[string]v1.ContainerState{
  1543. "succeed": {Terminated: &v1.ContainerStateTerminated{
  1544. ExitCode: 0,
  1545. ContainerID: emptyContainerID,
  1546. }},
  1547. "failed": {Terminated: &v1.ContainerStateTerminated{
  1548. ExitCode: 1,
  1549. ContainerID: emptyContainerID,
  1550. }},
  1551. },
  1552. expectedLastTerminationState: map[string]v1.ContainerState{
  1553. "succeed": {Terminated: &v1.ContainerStateTerminated{
  1554. ExitCode: 2,
  1555. ContainerID: emptyContainerID,
  1556. }},
  1557. "failed": {Terminated: &v1.ContainerStateTerminated{
  1558. ExitCode: 3,
  1559. ContainerID: emptyContainerID,
  1560. }},
  1561. },
  1562. },
  1563. {
  1564. restartPolicy: v1.RestartPolicyOnFailure,
  1565. expectedState: map[string]v1.ContainerState{
  1566. "succeed": {Terminated: &v1.ContainerStateTerminated{
  1567. ExitCode: 0,
  1568. ContainerID: emptyContainerID,
  1569. }},
  1570. "failed": {Waiting: &v1.ContainerStateWaiting{Reason: testErrorReason.Error()}},
  1571. },
  1572. expectedLastTerminationState: map[string]v1.ContainerState{
  1573. "succeed": {Terminated: &v1.ContainerStateTerminated{
  1574. ExitCode: 2,
  1575. ContainerID: emptyContainerID,
  1576. }},
  1577. "failed": {Terminated: &v1.ContainerStateTerminated{
  1578. ExitCode: 1,
  1579. ContainerID: emptyContainerID,
  1580. }},
  1581. },
  1582. },
  1583. {
  1584. restartPolicy: v1.RestartPolicyAlways,
  1585. expectedState: map[string]v1.ContainerState{
  1586. "succeed": {Waiting: &v1.ContainerStateWaiting{Reason: testErrorReason.Error()}},
  1587. "failed": {Waiting: &v1.ContainerStateWaiting{Reason: testErrorReason.Error()}},
  1588. },
  1589. expectedLastTerminationState: map[string]v1.ContainerState{
  1590. "succeed": {Terminated: &v1.ContainerStateTerminated{
  1591. ExitCode: 0,
  1592. ContainerID: emptyContainerID,
  1593. }},
  1594. "failed": {Terminated: &v1.ContainerStateTerminated{
  1595. ExitCode: 1,
  1596. ContainerID: emptyContainerID,
  1597. }},
  1598. },
  1599. // If the init container is terminated with exit code 0, it won't be restarted even when the
  1600. // restart policy is RestartAlways.
  1601. expectedInitState: map[string]v1.ContainerState{
  1602. "succeed": {Terminated: &v1.ContainerStateTerminated{
  1603. ExitCode: 0,
  1604. ContainerID: emptyContainerID,
  1605. }},
  1606. "failed": {Waiting: &v1.ContainerStateWaiting{Reason: testErrorReason.Error()}},
  1607. },
  1608. expectedInitLastTerminationState: map[string]v1.ContainerState{
  1609. "succeed": {Terminated: &v1.ContainerStateTerminated{
  1610. ExitCode: 2,
  1611. ContainerID: emptyContainerID,
  1612. }},
  1613. "failed": {Terminated: &v1.ContainerStateTerminated{
  1614. ExitCode: 1,
  1615. ContainerID: emptyContainerID,
  1616. }},
  1617. },
  1618. },
  1619. } {
  1620. pod.Spec.RestartPolicy = test.restartPolicy
  1621. // Test normal containers
  1622. pod.Spec.Containers = containers
  1623. apiStatus := kubelet.generateAPIPodStatus(pod, podStatus)
  1624. expectedState, expectedLastTerminationState := test.expectedState, test.expectedLastTerminationState
  1625. verifyContainerStatuses(t, apiStatus.ContainerStatuses, expectedState, expectedLastTerminationState, fmt.Sprintf("case %d", c))
  1626. pod.Spec.Containers = nil
  1627. // Test init containers
  1628. pod.Spec.InitContainers = containers
  1629. apiStatus = kubelet.generateAPIPodStatus(pod, podStatus)
  1630. if test.expectedInitState != nil {
  1631. expectedState = test.expectedInitState
  1632. }
  1633. if test.expectedInitLastTerminationState != nil {
  1634. expectedLastTerminationState = test.expectedInitLastTerminationState
  1635. }
  1636. verifyContainerStatuses(t, apiStatus.InitContainerStatuses, expectedState, expectedLastTerminationState, fmt.Sprintf("case %d", c))
  1637. pod.Spec.InitContainers = nil
  1638. }
  1639. }
  1640. // testPodAdmitHandler is a lifecycle.PodAdmitHandler for testing.
  1641. type testPodAdmitHandler struct {
  1642. // list of pods to reject.
  1643. podsToReject []*v1.Pod
  1644. }
  1645. // Admit rejects all pods in the podsToReject list with a matching UID.
  1646. func (a *testPodAdmitHandler) Admit(attrs *lifecycle.PodAdmitAttributes) lifecycle.PodAdmitResult {
  1647. for _, podToReject := range a.podsToReject {
  1648. if podToReject.UID == attrs.Pod.UID {
  1649. return lifecycle.PodAdmitResult{Admit: false, Reason: "Rejected", Message: "Pod is rejected"}
  1650. }
  1651. }
  1652. return lifecycle.PodAdmitResult{Admit: true}
  1653. }
  1654. // Test verifies that the kubelet invokes an admission handler during HandlePodAdditions.
  1655. func TestHandlePodAdditionsInvokesPodAdmitHandlers(t *testing.T) {
  1656. testKubelet := newTestKubelet(t, false /* controllerAttachDetachEnabled */)
  1657. defer testKubelet.Cleanup()
  1658. kl := testKubelet.kubelet
  1659. kl.nodeInfo = testNodeInfo{nodes: []*v1.Node{
  1660. {
  1661. ObjectMeta: metav1.ObjectMeta{Name: string(kl.nodeName)},
  1662. Status: v1.NodeStatus{
  1663. Allocatable: v1.ResourceList{
  1664. v1.ResourcePods: *resource.NewQuantity(110, resource.DecimalSI),
  1665. },
  1666. },
  1667. },
  1668. }}
  1669. pods := []*v1.Pod{
  1670. {
  1671. ObjectMeta: metav1.ObjectMeta{
  1672. UID: "123456789",
  1673. Name: "podA",
  1674. Namespace: "foo",
  1675. },
  1676. },
  1677. {
  1678. ObjectMeta: metav1.ObjectMeta{
  1679. UID: "987654321",
  1680. Name: "podB",
  1681. Namespace: "foo",
  1682. },
  1683. },
  1684. }
  1685. podToReject := pods[0]
  1686. podToAdmit := pods[1]
  1687. podsToReject := []*v1.Pod{podToReject}
  1688. kl.admitHandlers.AddPodAdmitHandler(&testPodAdmitHandler{podsToReject: podsToReject})
  1689. kl.HandlePodAdditions(pods)
  1690. // Check pod status stored in the status map.
  1691. checkPodStatus(t, kl, podToReject, v1.PodFailed)
  1692. checkPodStatus(t, kl, podToAdmit, v1.PodPending)
  1693. }
  1694. // testPodSyncLoopHandler is a lifecycle.PodSyncLoopHandler that is used for testing.
  1695. type testPodSyncLoopHandler struct {
  1696. // list of pods to sync
  1697. podsToSync []*v1.Pod
  1698. }
  1699. // ShouldSync evaluates if the pod should be synced from the kubelet.
  1700. func (a *testPodSyncLoopHandler) ShouldSync(pod *v1.Pod) bool {
  1701. for _, podToSync := range a.podsToSync {
  1702. if podToSync.UID == pod.UID {
  1703. return true
  1704. }
  1705. }
  1706. return false
  1707. }
  1708. // TestGetPodsToSyncInvokesPodSyncLoopHandlers ensures that the get pods to sync routine invokes the handler.
  1709. func TestGetPodsToSyncInvokesPodSyncLoopHandlers(t *testing.T) {
  1710. testKubelet := newTestKubelet(t, false /* controllerAttachDetachEnabled */)
  1711. defer testKubelet.Cleanup()
  1712. kubelet := testKubelet.kubelet
  1713. pods := newTestPods(5)
  1714. expected := []*v1.Pod{pods[0]}
  1715. kubelet.AddPodSyncLoopHandler(&testPodSyncLoopHandler{expected})
  1716. kubelet.podManager.SetPods(pods)
  1717. podsToSync := kubelet.getPodsToSync()
  1718. sort.Sort(podsByUID(expected))
  1719. sort.Sort(podsByUID(podsToSync))
  1720. assert.Equal(t, expected, podsToSync)
  1721. }
  1722. // testPodSyncHandler is a lifecycle.PodSyncHandler that is used for testing.
  1723. type testPodSyncHandler struct {
  1724. // list of pods to evict.
  1725. podsToEvict []*v1.Pod
  1726. // the reason for the eviction
  1727. reason string
  1728. // the message for the eviction
  1729. message string
  1730. }
  1731. // ShouldEvict evaluates if the pod should be evicted from the kubelet.
  1732. func (a *testPodSyncHandler) ShouldEvict(pod *v1.Pod) lifecycle.ShouldEvictResponse {
  1733. for _, podToEvict := range a.podsToEvict {
  1734. if podToEvict.UID == pod.UID {
  1735. return lifecycle.ShouldEvictResponse{Evict: true, Reason: a.reason, Message: a.message}
  1736. }
  1737. }
  1738. return lifecycle.ShouldEvictResponse{Evict: false}
  1739. }
  1740. // TestGenerateAPIPodStatusInvokesPodSyncHandlers invokes the handlers and reports the proper status
  1741. func TestGenerateAPIPodStatusInvokesPodSyncHandlers(t *testing.T) {
  1742. testKubelet := newTestKubelet(t, false /* controllerAttachDetachEnabled */)
  1743. defer testKubelet.Cleanup()
  1744. kubelet := testKubelet.kubelet
  1745. pod := newTestPods(1)[0]
  1746. podsToEvict := []*v1.Pod{pod}
  1747. kubelet.AddPodSyncHandler(&testPodSyncHandler{podsToEvict, "Evicted", "because"})
  1748. status := &kubecontainer.PodStatus{
  1749. ID: pod.UID,
  1750. Name: pod.Name,
  1751. Namespace: pod.Namespace,
  1752. }
  1753. apiStatus := kubelet.generateAPIPodStatus(pod, status)
  1754. require.Equal(t, v1.PodFailed, apiStatus.Phase)
  1755. require.Equal(t, "Evicted", apiStatus.Reason)
  1756. require.Equal(t, "because", apiStatus.Message)
  1757. }
  1758. func TestSyncPodKillPod(t *testing.T) {
  1759. testKubelet := newTestKubelet(t, false /* controllerAttachDetachEnabled */)
  1760. defer testKubelet.Cleanup()
  1761. kl := testKubelet.kubelet
  1762. pod := &v1.Pod{
  1763. ObjectMeta: metav1.ObjectMeta{
  1764. UID: "12345678",
  1765. Name: "bar",
  1766. Namespace: "foo",
  1767. },
  1768. }
  1769. pods := []*v1.Pod{pod}
  1770. kl.podManager.SetPods(pods)
  1771. gracePeriodOverride := int64(0)
  1772. err := kl.syncPod(syncPodOptions{
  1773. pod: pod,
  1774. podStatus: &kubecontainer.PodStatus{},
  1775. updateType: kubetypes.SyncPodKill,
  1776. killPodOptions: &KillPodOptions{
  1777. PodStatusFunc: func(p *v1.Pod, podStatus *kubecontainer.PodStatus) v1.PodStatus {
  1778. return v1.PodStatus{
  1779. Phase: v1.PodFailed,
  1780. Reason: "reason",
  1781. Message: "message",
  1782. }
  1783. },
  1784. PodTerminationGracePeriodSecondsOverride: &gracePeriodOverride,
  1785. },
  1786. })
  1787. require.NoError(t, err)
  1788. // Check pod status stored in the status map.
  1789. checkPodStatus(t, kl, pod, v1.PodFailed)
  1790. }
  1791. func waitForVolumeUnmount(
  1792. volumeManager kubeletvolume.VolumeManager,
  1793. pod *v1.Pod) error {
  1794. var podVolumes kubecontainer.VolumeMap
  1795. err := retryWithExponentialBackOff(
  1796. time.Duration(50*time.Millisecond),
  1797. func() (bool, error) {
  1798. // Verify volumes detached
  1799. podVolumes = volumeManager.GetMountedVolumesForPod(
  1800. util.GetUniquePodName(pod))
  1801. if len(podVolumes) != 0 {
  1802. return false, nil
  1803. }
  1804. return true, nil
  1805. },
  1806. )
  1807. if err != nil {
  1808. return fmt.Errorf(
  1809. "Expected volumes to be unmounted. But some volumes are still mounted: %#v", podVolumes)
  1810. }
  1811. return nil
  1812. }
  1813. func waitForVolumeDetach(
  1814. volumeName v1.UniqueVolumeName,
  1815. volumeManager kubeletvolume.VolumeManager) error {
  1816. attachedVolumes := []v1.UniqueVolumeName{}
  1817. err := retryWithExponentialBackOff(
  1818. time.Duration(50*time.Millisecond),
  1819. func() (bool, error) {
  1820. // Verify volumes detached
  1821. volumeAttached := volumeManager.VolumeIsAttached(volumeName)
  1822. return !volumeAttached, nil
  1823. },
  1824. )
  1825. if err != nil {
  1826. return fmt.Errorf(
  1827. "Expected volumes to be detached. But some volumes are still attached: %#v", attachedVolumes)
  1828. }
  1829. return nil
  1830. }
  1831. func retryWithExponentialBackOff(initialDuration time.Duration, fn wait.ConditionFunc) error {
  1832. backoff := wait.Backoff{
  1833. Duration: initialDuration,
  1834. Factor: 3,
  1835. Jitter: 0,
  1836. Steps: 6,
  1837. }
  1838. return wait.ExponentialBackoff(backoff, fn)
  1839. }
  1840. func simulateVolumeInUseUpdate(
  1841. volumeName v1.UniqueVolumeName,
  1842. stopCh <-chan struct{},
  1843. volumeManager kubeletvolume.VolumeManager) {
  1844. ticker := time.NewTicker(100 * time.Millisecond)
  1845. defer ticker.Stop()
  1846. for {
  1847. select {
  1848. case <-ticker.C:
  1849. volumeManager.MarkVolumesAsReportedInUse(
  1850. []v1.UniqueVolumeName{volumeName})
  1851. case <-stopCh:
  1852. return
  1853. }
  1854. }
  1855. }
  1856. func runVolumeManager(kubelet *Kubelet) chan struct{} {
  1857. stopCh := make(chan struct{})
  1858. go kubelet.volumeManager.Run(kubelet.sourcesReady, stopCh)
  1859. return stopCh
  1860. }
  1861. func forEachFeatureGate(t *testing.T, fs []featuregate.Feature, tf func(t *testing.T)) {
  1862. for _, fg := range fs {
  1863. for _, f := range []bool{true, false} {
  1864. func() {
  1865. defer featuregatetesting.SetFeatureGateDuringTest(t, utilfeature.DefaultFeatureGate, fg, f)()
  1866. t.Run(fmt.Sprintf("%v(%t)", fg, f), tf)
  1867. }()
  1868. }
  1869. }
  1870. }
  1871. // Sort pods by UID.
  1872. type podsByUID []*v1.Pod
  1873. func (p podsByUID) Len() int { return len(p) }
  1874. func (p podsByUID) Swap(i, j int) { p[i], p[j] = p[j], p[i] }
  1875. func (p podsByUID) Less(i, j int) bool { return p[i].UID < p[j].UID }