kubelet_node_status_test.go 79 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320132113221323132413251326132713281329133013311332133313341335133613371338133913401341134213431344134513461347134813491350135113521353135413551356135713581359136013611362136313641365136613671368136913701371137213731374137513761377137813791380138113821383138413851386138713881389139013911392139313941395139613971398139914001401140214031404140514061407140814091410141114121413141414151416141714181419142014211422142314241425142614271428142914301431143214331434143514361437143814391440144114421443144414451446144714481449145014511452145314541455145614571458145914601461146214631464146514661467146814691470147114721473147414751476147714781479148014811482148314841485148614871488148914901491149214931494149514961497149814991500150115021503150415051506150715081509151015111512151315141515151615171518151915201521152215231524152515261527152815291530153115321533153415351536153715381539154015411542154315441545154615471548154915501551155215531554155515561557155815591560156115621563156415651566156715681569157015711572157315741575157615771578157915801581158215831584158515861587158815891590159115921593159415951596159715981599160016011602160316041605160616071608160916101611161216131614161516161617161816191620162116221623162416251626162716281629163016311632163316341635163616371638163916401641164216431644164516461647164816491650165116521653165416551656165716581659166016611662166316641665166616671668166916701671167216731674167516761677167816791680168116821683168416851686168716881689169016911692169316941695169616971698169917001701170217031704170517061707170817091710171117121713171417151716171717181719172017211722172317241725172617271728172917301731173217331734173517361737173817391740174117421743174417451746174717481749175017511752175317541755175617571758175917601761176217631764176517661767176817691770177117721773177417751776177717781779178017811782178317841785178617871788178917901791179217931794179517961797179817991800180118021803180418051806180718081809181018111812181318141815181618171818181918201821182218231824182518261827182818291830183118321833183418351836183718381839184018411842184318441845184618471848184918501851185218531854185518561857185818591860186118621863186418651866186718681869187018711872187318741875187618771878187918801881188218831884188518861887188818891890189118921893189418951896189718981899190019011902190319041905190619071908190919101911191219131914191519161917191819191920192119221923192419251926192719281929193019311932193319341935193619371938193919401941194219431944194519461947194819491950195119521953195419551956195719581959196019611962196319641965196619671968196919701971197219731974197519761977197819791980198119821983198419851986198719881989199019911992199319941995199619971998199920002001200220032004200520062007200820092010201120122013201420152016201720182019202020212022202320242025202620272028202920302031203220332034203520362037203820392040204120422043204420452046204720482049205020512052205320542055205620572058205920602061206220632064206520662067206820692070207120722073207420752076207720782079208020812082208320842085208620872088208920902091209220932094209520962097209820992100210121022103210421052106210721082109211021112112211321142115211621172118211921202121212221232124212521262127212821292130213121322133213421352136213721382139214021412142214321442145214621472148214921502151215221532154215521562157215821592160216121622163216421652166216721682169
  1. /*
  2. Copyright 2016 The Kubernetes Authors.
  3. Licensed under the Apache License, Version 2.0 (the "License");
  4. you may not use this file except in compliance with the License.
  5. You may obtain a copy of the License at
  6. http://www.apache.org/licenses/LICENSE-2.0
  7. Unless required by applicable law or agreed to in writing, software
  8. distributed under the License is distributed on an "AS IS" BASIS,
  9. WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
  10. See the License for the specific language governing permissions and
  11. limitations under the License.
  12. */
  13. package kubelet
  14. import (
  15. "encoding/json"
  16. "fmt"
  17. "net"
  18. goruntime "runtime"
  19. "sort"
  20. "strconv"
  21. "sync/atomic"
  22. "testing"
  23. "time"
  24. "github.com/stretchr/testify/assert"
  25. "github.com/stretchr/testify/require"
  26. cadvisorapi "github.com/google/cadvisor/info/v1"
  27. v1 "k8s.io/api/core/v1"
  28. apiequality "k8s.io/apimachinery/pkg/api/equality"
  29. apierrors "k8s.io/apimachinery/pkg/api/errors"
  30. "k8s.io/apimachinery/pkg/api/resource"
  31. metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
  32. "k8s.io/apimachinery/pkg/runtime"
  33. "k8s.io/apimachinery/pkg/util/diff"
  34. "k8s.io/apimachinery/pkg/util/rand"
  35. "k8s.io/apimachinery/pkg/util/strategicpatch"
  36. "k8s.io/apimachinery/pkg/util/uuid"
  37. "k8s.io/apimachinery/pkg/util/wait"
  38. utilfeature "k8s.io/apiserver/pkg/util/feature"
  39. clientset "k8s.io/client-go/kubernetes"
  40. "k8s.io/client-go/kubernetes/fake"
  41. "k8s.io/client-go/rest"
  42. core "k8s.io/client-go/testing"
  43. "k8s.io/component-base/featuregate"
  44. featuregatetesting "k8s.io/component-base/featuregate/testing"
  45. "k8s.io/kubernetes/pkg/features"
  46. kubeletapis "k8s.io/kubernetes/pkg/kubelet/apis"
  47. cadvisortest "k8s.io/kubernetes/pkg/kubelet/cadvisor/testing"
  48. "k8s.io/kubernetes/pkg/kubelet/cm"
  49. kubecontainer "k8s.io/kubernetes/pkg/kubelet/container"
  50. "k8s.io/kubernetes/pkg/kubelet/nodestatus"
  51. "k8s.io/kubernetes/pkg/kubelet/util/sliceutils"
  52. kubeletvolume "k8s.io/kubernetes/pkg/kubelet/volumemanager"
  53. schedulerapi "k8s.io/kubernetes/pkg/scheduler/api"
  54. taintutil "k8s.io/kubernetes/pkg/util/taints"
  55. "k8s.io/kubernetes/pkg/version"
  56. "k8s.io/kubernetes/pkg/volume/util"
  57. )
  58. const (
  59. maxImageTagsForTest = 20
  60. )
  61. // generateTestingImageLists generate randomly generated image list and corresponding expectedImageList.
  62. func generateTestingImageLists(count int, maxImages int) ([]kubecontainer.Image, []v1.ContainerImage) {
  63. // imageList is randomly generated image list
  64. var imageList []kubecontainer.Image
  65. for ; count > 0; count-- {
  66. imageItem := kubecontainer.Image{
  67. ID: string(uuid.NewUUID()),
  68. RepoTags: generateImageTags(),
  69. Size: rand.Int63nRange(minImgSize, maxImgSize+1),
  70. }
  71. imageList = append(imageList, imageItem)
  72. }
  73. expectedImageList := makeExpectedImageList(imageList, maxImages)
  74. return imageList, expectedImageList
  75. }
  76. func makeExpectedImageList(imageList []kubecontainer.Image, maxImages int) []v1.ContainerImage {
  77. // expectedImageList is generated by imageList according to size and maxImages
  78. // 1. sort the imageList by size
  79. sort.Sort(sliceutils.ByImageSize(imageList))
  80. // 2. convert sorted imageList to v1.ContainerImage list
  81. var expectedImageList []v1.ContainerImage
  82. for _, kubeImage := range imageList {
  83. apiImage := v1.ContainerImage{
  84. Names: kubeImage.RepoTags[0:nodestatus.MaxNamesPerImageInNodeStatus],
  85. SizeBytes: kubeImage.Size,
  86. }
  87. expectedImageList = append(expectedImageList, apiImage)
  88. }
  89. // 3. only returns the top maxImages images in expectedImageList
  90. if maxImages == -1 { // -1 means no limit
  91. return expectedImageList
  92. }
  93. return expectedImageList[0:maxImages]
  94. }
  95. func generateImageTags() []string {
  96. var tagList []string
  97. // Generate > MaxNamesPerImageInNodeStatus tags so that the test can verify
  98. // that kubelet report up to MaxNamesPerImageInNodeStatus tags.
  99. count := rand.IntnRange(nodestatus.MaxNamesPerImageInNodeStatus+1, maxImageTagsForTest+1)
  100. for ; count > 0; count-- {
  101. tagList = append(tagList, "k8s.gcr.io:v"+strconv.Itoa(count))
  102. }
  103. return tagList
  104. }
  105. func applyNodeStatusPatch(originalNode *v1.Node, patch []byte) (*v1.Node, error) {
  106. original, err := json.Marshal(originalNode)
  107. if err != nil {
  108. return nil, fmt.Errorf("failed to marshal original node %#v: %v", originalNode, err)
  109. }
  110. updated, err := strategicpatch.StrategicMergePatch(original, patch, v1.Node{})
  111. if err != nil {
  112. return nil, fmt.Errorf("failed to apply strategic merge patch %q on node %#v: %v",
  113. patch, originalNode, err)
  114. }
  115. updatedNode := &v1.Node{}
  116. if err := json.Unmarshal(updated, updatedNode); err != nil {
  117. return nil, fmt.Errorf("failed to unmarshal updated node %q: %v", updated, err)
  118. }
  119. return updatedNode, nil
  120. }
  121. func notImplemented(action core.Action) (bool, runtime.Object, error) {
  122. return true, nil, fmt.Errorf("no reaction implemented for %s", action)
  123. }
  124. func addNotImplatedReaction(kubeClient *fake.Clientset) {
  125. if kubeClient == nil {
  126. return
  127. }
  128. kubeClient.AddReactor("*", "*", notImplemented)
  129. }
  130. type localCM struct {
  131. cm.ContainerManager
  132. allocatableReservation v1.ResourceList
  133. capacity v1.ResourceList
  134. }
  135. func (lcm *localCM) GetNodeAllocatableReservation() v1.ResourceList {
  136. return lcm.allocatableReservation
  137. }
  138. func (lcm *localCM) GetCapacity() v1.ResourceList {
  139. return lcm.capacity
  140. }
  141. // sortableNodeAddress is a type for sorting []v1.NodeAddress
  142. type sortableNodeAddress []v1.NodeAddress
  143. func (s sortableNodeAddress) Len() int { return len(s) }
  144. func (s sortableNodeAddress) Less(i, j int) bool {
  145. return (string(s[i].Type) + s[i].Address) < (string(s[j].Type) + s[j].Address)
  146. }
  147. func (s sortableNodeAddress) Swap(i, j int) { s[j], s[i] = s[i], s[j] }
  148. func TestUpdateNewNodeStatus(t *testing.T) {
  149. cases := []struct {
  150. desc string
  151. nodeStatusMaxImages int32
  152. }{
  153. {
  154. desc: "5 image limit",
  155. nodeStatusMaxImages: 5,
  156. },
  157. {
  158. desc: "no image limit",
  159. nodeStatusMaxImages: -1,
  160. },
  161. }
  162. for _, tc := range cases {
  163. t.Run(tc.desc, func(t *testing.T) {
  164. // generate one more in inputImageList than we configure the Kubelet to report,
  165. // or 5 images if unlimited
  166. numTestImages := int(tc.nodeStatusMaxImages) + 1
  167. if tc.nodeStatusMaxImages == -1 {
  168. numTestImages = 5
  169. }
  170. inputImageList, expectedImageList := generateTestingImageLists(numTestImages, int(tc.nodeStatusMaxImages))
  171. testKubelet := newTestKubeletWithImageList(
  172. t, inputImageList, false /* controllerAttachDetachEnabled */, true /*initFakeVolumePlugin*/)
  173. defer testKubelet.Cleanup()
  174. kubelet := testKubelet.kubelet
  175. kubelet.nodeStatusMaxImages = tc.nodeStatusMaxImages
  176. kubelet.kubeClient = nil // ensure only the heartbeat client is used
  177. kubelet.containerManager = &localCM{
  178. ContainerManager: cm.NewStubContainerManager(),
  179. allocatableReservation: v1.ResourceList{
  180. v1.ResourceCPU: *resource.NewMilliQuantity(200, resource.DecimalSI),
  181. v1.ResourceMemory: *resource.NewQuantity(100E6, resource.BinarySI),
  182. v1.ResourceEphemeralStorage: *resource.NewQuantity(2000, resource.BinarySI),
  183. },
  184. capacity: v1.ResourceList{
  185. v1.ResourceCPU: *resource.NewMilliQuantity(2000, resource.DecimalSI),
  186. v1.ResourceMemory: *resource.NewQuantity(10E9, resource.BinarySI),
  187. v1.ResourceEphemeralStorage: *resource.NewQuantity(5000, resource.BinarySI),
  188. },
  189. }
  190. // Since this test retroactively overrides the stub container manager,
  191. // we have to regenerate default status setters.
  192. kubelet.setNodeStatusFuncs = kubelet.defaultNodeStatusFuncs()
  193. kubeClient := testKubelet.fakeKubeClient
  194. existingNode := v1.Node{ObjectMeta: metav1.ObjectMeta{Name: testKubeletHostname}}
  195. kubeClient.ReactionChain = fake.NewSimpleClientset(&v1.NodeList{Items: []v1.Node{existingNode}}).ReactionChain
  196. machineInfo := &cadvisorapi.MachineInfo{
  197. MachineID: "123",
  198. SystemUUID: "abc",
  199. BootID: "1b3",
  200. NumCores: 2,
  201. MemoryCapacity: 10E9, // 10G
  202. }
  203. kubelet.machineInfo = machineInfo
  204. expectedNode := &v1.Node{
  205. ObjectMeta: metav1.ObjectMeta{Name: testKubeletHostname},
  206. Spec: v1.NodeSpec{},
  207. Status: v1.NodeStatus{
  208. Conditions: []v1.NodeCondition{
  209. {
  210. Type: v1.NodeMemoryPressure,
  211. Status: v1.ConditionFalse,
  212. Reason: "KubeletHasSufficientMemory",
  213. Message: fmt.Sprintf("kubelet has sufficient memory available"),
  214. LastHeartbeatTime: metav1.Time{},
  215. LastTransitionTime: metav1.Time{},
  216. },
  217. {
  218. Type: v1.NodeDiskPressure,
  219. Status: v1.ConditionFalse,
  220. Reason: "KubeletHasNoDiskPressure",
  221. Message: fmt.Sprintf("kubelet has no disk pressure"),
  222. LastHeartbeatTime: metav1.Time{},
  223. LastTransitionTime: metav1.Time{},
  224. },
  225. {
  226. Type: v1.NodePIDPressure,
  227. Status: v1.ConditionFalse,
  228. Reason: "KubeletHasSufficientPID",
  229. Message: fmt.Sprintf("kubelet has sufficient PID available"),
  230. LastHeartbeatTime: metav1.Time{},
  231. LastTransitionTime: metav1.Time{},
  232. },
  233. {
  234. Type: v1.NodeReady,
  235. Status: v1.ConditionTrue,
  236. Reason: "KubeletReady",
  237. Message: fmt.Sprintf("kubelet is posting ready status"),
  238. LastHeartbeatTime: metav1.Time{},
  239. LastTransitionTime: metav1.Time{},
  240. },
  241. },
  242. NodeInfo: v1.NodeSystemInfo{
  243. MachineID: "123",
  244. SystemUUID: "abc",
  245. BootID: "1b3",
  246. KernelVersion: cadvisortest.FakeKernelVersion,
  247. OSImage: cadvisortest.FakeContainerOsVersion,
  248. OperatingSystem: goruntime.GOOS,
  249. Architecture: goruntime.GOARCH,
  250. ContainerRuntimeVersion: "test://1.5.0",
  251. KubeletVersion: version.Get().String(),
  252. KubeProxyVersion: version.Get().String(),
  253. },
  254. Capacity: v1.ResourceList{
  255. v1.ResourceCPU: *resource.NewMilliQuantity(2000, resource.DecimalSI),
  256. v1.ResourceMemory: *resource.NewQuantity(10E9, resource.BinarySI),
  257. v1.ResourcePods: *resource.NewQuantity(0, resource.DecimalSI),
  258. v1.ResourceEphemeralStorage: *resource.NewQuantity(5000, resource.BinarySI),
  259. },
  260. Allocatable: v1.ResourceList{
  261. v1.ResourceCPU: *resource.NewMilliQuantity(1800, resource.DecimalSI),
  262. v1.ResourceMemory: *resource.NewQuantity(9900E6, resource.BinarySI),
  263. v1.ResourcePods: *resource.NewQuantity(0, resource.DecimalSI),
  264. v1.ResourceEphemeralStorage: *resource.NewQuantity(3000, resource.BinarySI),
  265. },
  266. Addresses: []v1.NodeAddress{
  267. {Type: v1.NodeInternalIP, Address: "127.0.0.1"},
  268. {Type: v1.NodeHostName, Address: testKubeletHostname},
  269. },
  270. Images: expectedImageList,
  271. },
  272. }
  273. kubelet.updateRuntimeUp()
  274. assert.NoError(t, kubelet.updateNodeStatus())
  275. actions := kubeClient.Actions()
  276. require.Len(t, actions, 2)
  277. require.True(t, actions[1].Matches("patch", "nodes"))
  278. require.Equal(t, actions[1].GetSubresource(), "status")
  279. updatedNode, err := applyNodeStatusPatch(&existingNode, actions[1].(core.PatchActionImpl).GetPatch())
  280. assert.NoError(t, err)
  281. for i, cond := range updatedNode.Status.Conditions {
  282. assert.False(t, cond.LastHeartbeatTime.IsZero(), "LastHeartbeatTime for %v condition is zero", cond.Type)
  283. assert.False(t, cond.LastTransitionTime.IsZero(), "LastTransitionTime for %v condition is zero", cond.Type)
  284. updatedNode.Status.Conditions[i].LastHeartbeatTime = metav1.Time{}
  285. updatedNode.Status.Conditions[i].LastTransitionTime = metav1.Time{}
  286. }
  287. // Version skew workaround. See: https://github.com/kubernetes/kubernetes/issues/16961
  288. assert.Equal(t, v1.NodeReady, updatedNode.Status.Conditions[len(updatedNode.Status.Conditions)-1].Type,
  289. "NotReady should be last")
  290. assert.Len(t, updatedNode.Status.Images, len(expectedImageList))
  291. assert.True(t, apiequality.Semantic.DeepEqual(expectedNode, updatedNode), "%s", diff.ObjectDiff(expectedNode, updatedNode))
  292. })
  293. }
  294. }
  295. func TestUpdateExistingNodeStatus(t *testing.T) {
  296. testKubelet := newTestKubelet(t, false /* controllerAttachDetachEnabled */)
  297. defer testKubelet.Cleanup()
  298. kubelet := testKubelet.kubelet
  299. kubelet.nodeStatusMaxImages = 5 // don't truncate the image list that gets constructed by hand for this test
  300. kubelet.kubeClient = nil // ensure only the heartbeat client is used
  301. kubelet.containerManager = &localCM{
  302. ContainerManager: cm.NewStubContainerManager(),
  303. allocatableReservation: v1.ResourceList{
  304. v1.ResourceCPU: *resource.NewMilliQuantity(200, resource.DecimalSI),
  305. v1.ResourceMemory: *resource.NewQuantity(100E6, resource.BinarySI),
  306. },
  307. capacity: v1.ResourceList{
  308. v1.ResourceCPU: *resource.NewMilliQuantity(2000, resource.DecimalSI),
  309. v1.ResourceMemory: *resource.NewQuantity(20E9, resource.BinarySI),
  310. v1.ResourceEphemeralStorage: *resource.NewQuantity(5000, resource.BinarySI),
  311. },
  312. }
  313. // Since this test retroactively overrides the stub container manager,
  314. // we have to regenerate default status setters.
  315. kubelet.setNodeStatusFuncs = kubelet.defaultNodeStatusFuncs()
  316. kubeClient := testKubelet.fakeKubeClient
  317. existingNode := v1.Node{
  318. ObjectMeta: metav1.ObjectMeta{Name: testKubeletHostname},
  319. Spec: v1.NodeSpec{},
  320. Status: v1.NodeStatus{
  321. Conditions: []v1.NodeCondition{
  322. {
  323. Type: v1.NodeMemoryPressure,
  324. Status: v1.ConditionFalse,
  325. Reason: "KubeletHasSufficientMemory",
  326. Message: fmt.Sprintf("kubelet has sufficient memory available"),
  327. LastHeartbeatTime: metav1.Date(2012, 1, 1, 0, 0, 0, 0, time.UTC),
  328. LastTransitionTime: metav1.Date(2012, 1, 1, 0, 0, 0, 0, time.UTC),
  329. },
  330. {
  331. Type: v1.NodeDiskPressure,
  332. Status: v1.ConditionFalse,
  333. Reason: "KubeletHasSufficientDisk",
  334. Message: fmt.Sprintf("kubelet has sufficient disk space available"),
  335. LastHeartbeatTime: metav1.Date(2012, 1, 1, 0, 0, 0, 0, time.UTC),
  336. LastTransitionTime: metav1.Date(2012, 1, 1, 0, 0, 0, 0, time.UTC),
  337. },
  338. {
  339. Type: v1.NodePIDPressure,
  340. Status: v1.ConditionFalse,
  341. Reason: "KubeletHasSufficientPID",
  342. Message: fmt.Sprintf("kubelet has sufficient PID available"),
  343. LastHeartbeatTime: metav1.Date(2012, 1, 1, 0, 0, 0, 0, time.UTC),
  344. LastTransitionTime: metav1.Date(2012, 1, 1, 0, 0, 0, 0, time.UTC),
  345. },
  346. {
  347. Type: v1.NodeReady,
  348. Status: v1.ConditionTrue,
  349. Reason: "KubeletReady",
  350. Message: fmt.Sprintf("kubelet is posting ready status"),
  351. LastHeartbeatTime: metav1.Date(2012, 1, 1, 0, 0, 0, 0, time.UTC),
  352. LastTransitionTime: metav1.Date(2012, 1, 1, 0, 0, 0, 0, time.UTC),
  353. },
  354. },
  355. Capacity: v1.ResourceList{
  356. v1.ResourceCPU: *resource.NewMilliQuantity(3000, resource.DecimalSI),
  357. v1.ResourceMemory: *resource.NewQuantity(20E9, resource.BinarySI),
  358. v1.ResourcePods: *resource.NewQuantity(0, resource.DecimalSI),
  359. },
  360. Allocatable: v1.ResourceList{
  361. v1.ResourceCPU: *resource.NewMilliQuantity(2800, resource.DecimalSI),
  362. v1.ResourceMemory: *resource.NewQuantity(19900E6, resource.BinarySI),
  363. v1.ResourcePods: *resource.NewQuantity(0, resource.DecimalSI),
  364. },
  365. },
  366. }
  367. kubeClient.ReactionChain = fake.NewSimpleClientset(&v1.NodeList{Items: []v1.Node{existingNode}}).ReactionChain
  368. machineInfo := &cadvisorapi.MachineInfo{
  369. MachineID: "123",
  370. SystemUUID: "abc",
  371. BootID: "1b3",
  372. NumCores: 2,
  373. MemoryCapacity: 20E9,
  374. }
  375. kubelet.machineInfo = machineInfo
  376. expectedNode := &v1.Node{
  377. ObjectMeta: metav1.ObjectMeta{Name: testKubeletHostname},
  378. Spec: v1.NodeSpec{},
  379. Status: v1.NodeStatus{
  380. Conditions: []v1.NodeCondition{
  381. {
  382. Type: v1.NodeMemoryPressure,
  383. Status: v1.ConditionFalse,
  384. Reason: "KubeletHasSufficientMemory",
  385. Message: fmt.Sprintf("kubelet has sufficient memory available"),
  386. LastHeartbeatTime: metav1.Time{},
  387. LastTransitionTime: metav1.Time{},
  388. },
  389. {
  390. Type: v1.NodeDiskPressure,
  391. Status: v1.ConditionFalse,
  392. Reason: "KubeletHasSufficientDisk",
  393. Message: fmt.Sprintf("kubelet has sufficient disk space available"),
  394. LastHeartbeatTime: metav1.Time{},
  395. LastTransitionTime: metav1.Time{},
  396. },
  397. {
  398. Type: v1.NodePIDPressure,
  399. Status: v1.ConditionFalse,
  400. Reason: "KubeletHasSufficientPID",
  401. Message: fmt.Sprintf("kubelet has sufficient PID available"),
  402. LastHeartbeatTime: metav1.Time{},
  403. LastTransitionTime: metav1.Time{},
  404. },
  405. {
  406. Type: v1.NodeReady,
  407. Status: v1.ConditionTrue,
  408. Reason: "KubeletReady",
  409. Message: fmt.Sprintf("kubelet is posting ready status"),
  410. LastHeartbeatTime: metav1.Time{}, // placeholder
  411. LastTransitionTime: metav1.Time{}, // placeholder
  412. },
  413. },
  414. NodeInfo: v1.NodeSystemInfo{
  415. MachineID: "123",
  416. SystemUUID: "abc",
  417. BootID: "1b3",
  418. KernelVersion: cadvisortest.FakeKernelVersion,
  419. OSImage: cadvisortest.FakeContainerOsVersion,
  420. OperatingSystem: goruntime.GOOS,
  421. Architecture: goruntime.GOARCH,
  422. ContainerRuntimeVersion: "test://1.5.0",
  423. KubeletVersion: version.Get().String(),
  424. KubeProxyVersion: version.Get().String(),
  425. },
  426. Capacity: v1.ResourceList{
  427. v1.ResourceCPU: *resource.NewMilliQuantity(2000, resource.DecimalSI),
  428. v1.ResourceMemory: *resource.NewQuantity(20E9, resource.BinarySI),
  429. v1.ResourcePods: *resource.NewQuantity(0, resource.DecimalSI),
  430. v1.ResourceEphemeralStorage: *resource.NewQuantity(5000, resource.BinarySI),
  431. },
  432. Allocatable: v1.ResourceList{
  433. v1.ResourceCPU: *resource.NewMilliQuantity(1800, resource.DecimalSI),
  434. v1.ResourceMemory: *resource.NewQuantity(19900E6, resource.BinarySI),
  435. v1.ResourcePods: *resource.NewQuantity(0, resource.DecimalSI),
  436. v1.ResourceEphemeralStorage: *resource.NewQuantity(5000, resource.BinarySI),
  437. },
  438. Addresses: []v1.NodeAddress{
  439. {Type: v1.NodeInternalIP, Address: "127.0.0.1"},
  440. {Type: v1.NodeHostName, Address: testKubeletHostname},
  441. },
  442. // images will be sorted from max to min in node status.
  443. Images: []v1.ContainerImage{
  444. {
  445. Names: []string{"k8s.gcr.io:v1", "k8s.gcr.io:v2"},
  446. SizeBytes: 123,
  447. },
  448. {
  449. Names: []string{"k8s.gcr.io:v3", "k8s.gcr.io:v4"},
  450. SizeBytes: 456,
  451. },
  452. },
  453. },
  454. }
  455. kubelet.updateRuntimeUp()
  456. assert.NoError(t, kubelet.updateNodeStatus())
  457. actions := kubeClient.Actions()
  458. assert.Len(t, actions, 2)
  459. assert.IsType(t, core.PatchActionImpl{}, actions[1])
  460. patchAction := actions[1].(core.PatchActionImpl)
  461. updatedNode, err := applyNodeStatusPatch(&existingNode, patchAction.GetPatch())
  462. require.NoError(t, err)
  463. for i, cond := range updatedNode.Status.Conditions {
  464. old := metav1.Date(2012, 1, 1, 0, 0, 0, 0, time.UTC).Time
  465. // Expect LastHearbeat to be updated to Now, while LastTransitionTime to be the same.
  466. assert.NotEqual(t, old, cond.LastHeartbeatTime.Rfc3339Copy().UTC(), "LastHeartbeatTime for condition %v", cond.Type)
  467. assert.EqualValues(t, old, cond.LastTransitionTime.Rfc3339Copy().UTC(), "LastTransitionTime for condition %v", cond.Type)
  468. updatedNode.Status.Conditions[i].LastHeartbeatTime = metav1.Time{}
  469. updatedNode.Status.Conditions[i].LastTransitionTime = metav1.Time{}
  470. }
  471. // Version skew workaround. See: https://github.com/kubernetes/kubernetes/issues/16961
  472. assert.Equal(t, v1.NodeReady, updatedNode.Status.Conditions[len(updatedNode.Status.Conditions)-1].Type,
  473. "NodeReady should be the last condition")
  474. assert.True(t, apiequality.Semantic.DeepEqual(expectedNode, updatedNode), "%s", diff.ObjectDiff(expectedNode, updatedNode))
  475. }
  476. func TestUpdateExistingNodeStatusTimeout(t *testing.T) {
  477. attempts := int64(0)
  478. failureCallbacks := int64(0)
  479. // set up a listener that hangs connections
  480. ln, err := net.Listen("tcp", "127.0.0.1:0")
  481. assert.NoError(t, err)
  482. defer ln.Close()
  483. go func() {
  484. // accept connections and just let them hang
  485. for {
  486. _, err := ln.Accept()
  487. if err != nil {
  488. t.Log(err)
  489. return
  490. }
  491. t.Log("accepted connection")
  492. atomic.AddInt64(&attempts, 1)
  493. }
  494. }()
  495. config := &rest.Config{
  496. Host: "http://" + ln.Addr().String(),
  497. QPS: -1,
  498. Timeout: time.Second,
  499. }
  500. assert.NoError(t, err)
  501. testKubelet := newTestKubelet(t, false /* controllerAttachDetachEnabled */)
  502. defer testKubelet.Cleanup()
  503. kubelet := testKubelet.kubelet
  504. kubelet.kubeClient = nil // ensure only the heartbeat client is used
  505. kubelet.heartbeatClient, err = clientset.NewForConfig(config)
  506. kubelet.onRepeatedHeartbeatFailure = func() {
  507. atomic.AddInt64(&failureCallbacks, 1)
  508. }
  509. kubelet.containerManager = &localCM{
  510. ContainerManager: cm.NewStubContainerManager(),
  511. allocatableReservation: v1.ResourceList{
  512. v1.ResourceCPU: *resource.NewMilliQuantity(200, resource.DecimalSI),
  513. v1.ResourceMemory: *resource.NewQuantity(100E6, resource.BinarySI),
  514. },
  515. capacity: v1.ResourceList{
  516. v1.ResourceCPU: *resource.NewMilliQuantity(2000, resource.DecimalSI),
  517. v1.ResourceMemory: *resource.NewQuantity(20E9, resource.BinarySI),
  518. },
  519. }
  520. // should return an error, but not hang
  521. assert.Error(t, kubelet.updateNodeStatus())
  522. // should have attempted multiple times
  523. if actualAttempts := atomic.LoadInt64(&attempts); actualAttempts < nodeStatusUpdateRetry {
  524. t.Errorf("Expected at least %d attempts, got %d", nodeStatusUpdateRetry, actualAttempts)
  525. }
  526. // should have gotten multiple failure callbacks
  527. if actualFailureCallbacks := atomic.LoadInt64(&failureCallbacks); actualFailureCallbacks < (nodeStatusUpdateRetry - 1) {
  528. t.Errorf("Expected %d failure callbacks, got %d", (nodeStatusUpdateRetry - 1), actualFailureCallbacks)
  529. }
  530. }
  531. func TestUpdateNodeStatusWithRuntimeStateError(t *testing.T) {
  532. testKubelet := newTestKubelet(t, false /* controllerAttachDetachEnabled */)
  533. defer testKubelet.Cleanup()
  534. kubelet := testKubelet.kubelet
  535. kubelet.nodeStatusMaxImages = 5 // don't truncate the image list that gets constructed by hand for this test
  536. kubelet.kubeClient = nil // ensure only the heartbeat client is used
  537. kubelet.containerManager = &localCM{
  538. ContainerManager: cm.NewStubContainerManager(),
  539. allocatableReservation: v1.ResourceList{
  540. v1.ResourceCPU: *resource.NewMilliQuantity(200, resource.DecimalSI),
  541. v1.ResourceMemory: *resource.NewQuantity(100E6, resource.BinarySI),
  542. v1.ResourceEphemeralStorage: *resource.NewQuantity(10E9, resource.BinarySI),
  543. },
  544. capacity: v1.ResourceList{
  545. v1.ResourceCPU: *resource.NewMilliQuantity(2000, resource.DecimalSI),
  546. v1.ResourceMemory: *resource.NewQuantity(10E9, resource.BinarySI),
  547. v1.ResourceEphemeralStorage: *resource.NewQuantity(20E9, resource.BinarySI),
  548. },
  549. }
  550. // Since this test retroactively overrides the stub container manager,
  551. // we have to regenerate default status setters.
  552. kubelet.setNodeStatusFuncs = kubelet.defaultNodeStatusFuncs()
  553. clock := testKubelet.fakeClock
  554. kubeClient := testKubelet.fakeKubeClient
  555. existingNode := v1.Node{ObjectMeta: metav1.ObjectMeta{Name: testKubeletHostname}}
  556. kubeClient.ReactionChain = fake.NewSimpleClientset(&v1.NodeList{Items: []v1.Node{existingNode}}).ReactionChain
  557. machineInfo := &cadvisorapi.MachineInfo{
  558. MachineID: "123",
  559. SystemUUID: "abc",
  560. BootID: "1b3",
  561. NumCores: 2,
  562. MemoryCapacity: 10E9,
  563. }
  564. kubelet.machineInfo = machineInfo
  565. expectedNode := &v1.Node{
  566. ObjectMeta: metav1.ObjectMeta{Name: testKubeletHostname},
  567. Spec: v1.NodeSpec{},
  568. Status: v1.NodeStatus{
  569. Conditions: []v1.NodeCondition{
  570. {
  571. Type: v1.NodeMemoryPressure,
  572. Status: v1.ConditionFalse,
  573. Reason: "KubeletHasSufficientMemory",
  574. Message: fmt.Sprintf("kubelet has sufficient memory available"),
  575. LastHeartbeatTime: metav1.Time{},
  576. LastTransitionTime: metav1.Time{},
  577. },
  578. {
  579. Type: v1.NodeDiskPressure,
  580. Status: v1.ConditionFalse,
  581. Reason: "KubeletHasNoDiskPressure",
  582. Message: fmt.Sprintf("kubelet has no disk pressure"),
  583. LastHeartbeatTime: metav1.Time{},
  584. LastTransitionTime: metav1.Time{},
  585. },
  586. {
  587. Type: v1.NodePIDPressure,
  588. Status: v1.ConditionFalse,
  589. Reason: "KubeletHasSufficientPID",
  590. Message: fmt.Sprintf("kubelet has sufficient PID available"),
  591. LastHeartbeatTime: metav1.Time{},
  592. LastTransitionTime: metav1.Time{},
  593. },
  594. {}, //placeholder
  595. },
  596. NodeInfo: v1.NodeSystemInfo{
  597. MachineID: "123",
  598. SystemUUID: "abc",
  599. BootID: "1b3",
  600. KernelVersion: cadvisortest.FakeKernelVersion,
  601. OSImage: cadvisortest.FakeContainerOsVersion,
  602. OperatingSystem: goruntime.GOOS,
  603. Architecture: goruntime.GOARCH,
  604. ContainerRuntimeVersion: "test://1.5.0",
  605. KubeletVersion: version.Get().String(),
  606. KubeProxyVersion: version.Get().String(),
  607. },
  608. Capacity: v1.ResourceList{
  609. v1.ResourceCPU: *resource.NewMilliQuantity(2000, resource.DecimalSI),
  610. v1.ResourceMemory: *resource.NewQuantity(10E9, resource.BinarySI),
  611. v1.ResourcePods: *resource.NewQuantity(0, resource.DecimalSI),
  612. v1.ResourceEphemeralStorage: *resource.NewQuantity(20E9, resource.BinarySI),
  613. },
  614. Allocatable: v1.ResourceList{
  615. v1.ResourceCPU: *resource.NewMilliQuantity(1800, resource.DecimalSI),
  616. v1.ResourceMemory: *resource.NewQuantity(9900E6, resource.BinarySI),
  617. v1.ResourcePods: *resource.NewQuantity(0, resource.DecimalSI),
  618. v1.ResourceEphemeralStorage: *resource.NewQuantity(10E9, resource.BinarySI),
  619. },
  620. Addresses: []v1.NodeAddress{
  621. {Type: v1.NodeInternalIP, Address: "127.0.0.1"},
  622. {Type: v1.NodeHostName, Address: testKubeletHostname},
  623. },
  624. Images: []v1.ContainerImage{
  625. {
  626. Names: []string{"k8s.gcr.io:v1", "k8s.gcr.io:v2"},
  627. SizeBytes: 123,
  628. },
  629. {
  630. Names: []string{"k8s.gcr.io:v3", "k8s.gcr.io:v4"},
  631. SizeBytes: 456,
  632. },
  633. },
  634. },
  635. }
  636. checkNodeStatus := func(status v1.ConditionStatus, reason string) {
  637. kubeClient.ClearActions()
  638. assert.NoError(t, kubelet.updateNodeStatus())
  639. actions := kubeClient.Actions()
  640. require.Len(t, actions, 2)
  641. require.True(t, actions[1].Matches("patch", "nodes"))
  642. require.Equal(t, actions[1].GetSubresource(), "status")
  643. updatedNode, err := kubeClient.CoreV1().Nodes().Get(testKubeletHostname, metav1.GetOptions{})
  644. require.NoError(t, err, "can't apply node status patch")
  645. for i, cond := range updatedNode.Status.Conditions {
  646. assert.False(t, cond.LastHeartbeatTime.IsZero(), "LastHeartbeatTime for %v condition is zero", cond.Type)
  647. assert.False(t, cond.LastTransitionTime.IsZero(), "LastTransitionTime for %v condition is zero", cond.Type)
  648. updatedNode.Status.Conditions[i].LastHeartbeatTime = metav1.Time{}
  649. updatedNode.Status.Conditions[i].LastTransitionTime = metav1.Time{}
  650. }
  651. // Version skew workaround. See: https://github.com/kubernetes/kubernetes/issues/16961
  652. lastIndex := len(updatedNode.Status.Conditions) - 1
  653. assert.Equal(t, v1.NodeReady, updatedNode.Status.Conditions[lastIndex].Type, "NodeReady should be the last condition")
  654. assert.NotEmpty(t, updatedNode.Status.Conditions[lastIndex].Message)
  655. updatedNode.Status.Conditions[lastIndex].Message = ""
  656. expectedNode.Status.Conditions[lastIndex] = v1.NodeCondition{
  657. Type: v1.NodeReady,
  658. Status: status,
  659. Reason: reason,
  660. LastHeartbeatTime: metav1.Time{},
  661. LastTransitionTime: metav1.Time{},
  662. }
  663. assert.True(t, apiequality.Semantic.DeepEqual(expectedNode, updatedNode), "%s", diff.ObjectDiff(expectedNode, updatedNode))
  664. }
  665. // TODO(random-liu): Refactor the unit test to be table driven test.
  666. // Should report kubelet not ready if the runtime check is out of date
  667. clock.SetTime(time.Now().Add(-maxWaitForContainerRuntime))
  668. kubelet.updateRuntimeUp()
  669. checkNodeStatus(v1.ConditionFalse, "KubeletNotReady")
  670. // Should report kubelet ready if the runtime check is updated
  671. clock.SetTime(time.Now())
  672. kubelet.updateRuntimeUp()
  673. checkNodeStatus(v1.ConditionTrue, "KubeletReady")
  674. // Should report kubelet not ready if the runtime check is out of date
  675. clock.SetTime(time.Now().Add(-maxWaitForContainerRuntime))
  676. kubelet.updateRuntimeUp()
  677. checkNodeStatus(v1.ConditionFalse, "KubeletNotReady")
  678. // Should report kubelet not ready if the runtime check failed
  679. fakeRuntime := testKubelet.fakeRuntime
  680. // Inject error into fake runtime status check, node should be NotReady
  681. fakeRuntime.StatusErr = fmt.Errorf("injected runtime status error")
  682. clock.SetTime(time.Now())
  683. kubelet.updateRuntimeUp()
  684. checkNodeStatus(v1.ConditionFalse, "KubeletNotReady")
  685. fakeRuntime.StatusErr = nil
  686. // Should report node not ready if runtime status is nil.
  687. fakeRuntime.RuntimeStatus = nil
  688. kubelet.updateRuntimeUp()
  689. checkNodeStatus(v1.ConditionFalse, "KubeletNotReady")
  690. // Should report node not ready if runtime status is empty.
  691. fakeRuntime.RuntimeStatus = &kubecontainer.RuntimeStatus{}
  692. kubelet.updateRuntimeUp()
  693. checkNodeStatus(v1.ConditionFalse, "KubeletNotReady")
  694. // Should report node not ready if RuntimeReady is false.
  695. fakeRuntime.RuntimeStatus = &kubecontainer.RuntimeStatus{
  696. Conditions: []kubecontainer.RuntimeCondition{
  697. {Type: kubecontainer.RuntimeReady, Status: false},
  698. {Type: kubecontainer.NetworkReady, Status: true},
  699. },
  700. }
  701. kubelet.updateRuntimeUp()
  702. checkNodeStatus(v1.ConditionFalse, "KubeletNotReady")
  703. // Should report node ready if RuntimeReady is true.
  704. fakeRuntime.RuntimeStatus = &kubecontainer.RuntimeStatus{
  705. Conditions: []kubecontainer.RuntimeCondition{
  706. {Type: kubecontainer.RuntimeReady, Status: true},
  707. {Type: kubecontainer.NetworkReady, Status: true},
  708. },
  709. }
  710. kubelet.updateRuntimeUp()
  711. checkNodeStatus(v1.ConditionTrue, "KubeletReady")
  712. // Should report node not ready if NetworkReady is false.
  713. fakeRuntime.RuntimeStatus = &kubecontainer.RuntimeStatus{
  714. Conditions: []kubecontainer.RuntimeCondition{
  715. {Type: kubecontainer.RuntimeReady, Status: true},
  716. {Type: kubecontainer.NetworkReady, Status: false},
  717. },
  718. }
  719. kubelet.updateRuntimeUp()
  720. checkNodeStatus(v1.ConditionFalse, "KubeletNotReady")
  721. }
  722. func TestUpdateNodeStatusError(t *testing.T) {
  723. testKubelet := newTestKubelet(t, false /* controllerAttachDetachEnabled */)
  724. defer testKubelet.Cleanup()
  725. kubelet := testKubelet.kubelet
  726. kubelet.kubeClient = nil // ensure only the heartbeat client is used
  727. // No matching node for the kubelet
  728. testKubelet.fakeKubeClient.ReactionChain = fake.NewSimpleClientset(&v1.NodeList{Items: []v1.Node{}}).ReactionChain
  729. assert.Error(t, kubelet.updateNodeStatus())
  730. assert.Len(t, testKubelet.fakeKubeClient.Actions(), nodeStatusUpdateRetry)
  731. }
  732. func TestUpdateNodeStatusWithLease(t *testing.T) {
  733. defer featuregatetesting.SetFeatureGateDuringTest(t, utilfeature.DefaultFeatureGate, features.NodeLease, true)()
  734. testKubelet := newTestKubelet(t, false /* controllerAttachDetachEnabled */)
  735. defer testKubelet.Cleanup()
  736. clock := testKubelet.fakeClock
  737. kubelet := testKubelet.kubelet
  738. kubelet.nodeStatusMaxImages = 5 // don't truncate the image list that gets constructed by hand for this test
  739. kubelet.kubeClient = nil // ensure only the heartbeat client is used
  740. kubelet.containerManager = &localCM{
  741. ContainerManager: cm.NewStubContainerManager(),
  742. allocatableReservation: v1.ResourceList{
  743. v1.ResourceCPU: *resource.NewMilliQuantity(200, resource.DecimalSI),
  744. v1.ResourceMemory: *resource.NewQuantity(100E6, resource.BinarySI),
  745. },
  746. capacity: v1.ResourceList{
  747. v1.ResourceCPU: *resource.NewMilliQuantity(2000, resource.DecimalSI),
  748. v1.ResourceMemory: *resource.NewQuantity(20E9, resource.BinarySI),
  749. v1.ResourceEphemeralStorage: *resource.NewQuantity(5000, resource.BinarySI),
  750. },
  751. }
  752. // Since this test retroactively overrides the stub container manager,
  753. // we have to regenerate default status setters.
  754. kubelet.setNodeStatusFuncs = kubelet.defaultNodeStatusFuncs()
  755. kubelet.nodeStatusReportFrequency = time.Minute
  756. kubeClient := testKubelet.fakeKubeClient
  757. existingNode := &v1.Node{ObjectMeta: metav1.ObjectMeta{Name: testKubeletHostname}}
  758. kubeClient.ReactionChain = fake.NewSimpleClientset(&v1.NodeList{Items: []v1.Node{*existingNode}}).ReactionChain
  759. machineInfo := &cadvisorapi.MachineInfo{
  760. MachineID: "123",
  761. SystemUUID: "abc",
  762. BootID: "1b3",
  763. NumCores: 2,
  764. MemoryCapacity: 20E9,
  765. }
  766. kubelet.machineInfo = machineInfo
  767. now := metav1.NewTime(clock.Now()).Rfc3339Copy()
  768. expectedNode := &v1.Node{
  769. ObjectMeta: metav1.ObjectMeta{Name: testKubeletHostname},
  770. Spec: v1.NodeSpec{},
  771. Status: v1.NodeStatus{
  772. Conditions: []v1.NodeCondition{
  773. {
  774. Type: v1.NodeMemoryPressure,
  775. Status: v1.ConditionFalse,
  776. Reason: "KubeletHasSufficientMemory",
  777. Message: fmt.Sprintf("kubelet has sufficient memory available"),
  778. LastHeartbeatTime: now,
  779. LastTransitionTime: now,
  780. },
  781. {
  782. Type: v1.NodeDiskPressure,
  783. Status: v1.ConditionFalse,
  784. Reason: "KubeletHasNoDiskPressure",
  785. Message: fmt.Sprintf("kubelet has no disk pressure"),
  786. LastHeartbeatTime: now,
  787. LastTransitionTime: now,
  788. },
  789. {
  790. Type: v1.NodePIDPressure,
  791. Status: v1.ConditionFalse,
  792. Reason: "KubeletHasSufficientPID",
  793. Message: fmt.Sprintf("kubelet has sufficient PID available"),
  794. LastHeartbeatTime: now,
  795. LastTransitionTime: now,
  796. },
  797. {
  798. Type: v1.NodeReady,
  799. Status: v1.ConditionTrue,
  800. Reason: "KubeletReady",
  801. Message: fmt.Sprintf("kubelet is posting ready status"),
  802. LastHeartbeatTime: now,
  803. LastTransitionTime: now,
  804. },
  805. },
  806. NodeInfo: v1.NodeSystemInfo{
  807. MachineID: "123",
  808. SystemUUID: "abc",
  809. BootID: "1b3",
  810. KernelVersion: cadvisortest.FakeKernelVersion,
  811. OSImage: cadvisortest.FakeContainerOsVersion,
  812. OperatingSystem: goruntime.GOOS,
  813. Architecture: goruntime.GOARCH,
  814. ContainerRuntimeVersion: "test://1.5.0",
  815. KubeletVersion: version.Get().String(),
  816. KubeProxyVersion: version.Get().String(),
  817. },
  818. Capacity: v1.ResourceList{
  819. v1.ResourceCPU: *resource.NewMilliQuantity(2000, resource.DecimalSI),
  820. v1.ResourceMemory: *resource.NewQuantity(20E9, resource.BinarySI),
  821. v1.ResourcePods: *resource.NewQuantity(0, resource.DecimalSI),
  822. v1.ResourceEphemeralStorage: *resource.NewQuantity(5000, resource.BinarySI),
  823. },
  824. Allocatable: v1.ResourceList{
  825. v1.ResourceCPU: *resource.NewMilliQuantity(1800, resource.DecimalSI),
  826. v1.ResourceMemory: *resource.NewQuantity(19900E6, resource.BinarySI),
  827. v1.ResourcePods: *resource.NewQuantity(0, resource.DecimalSI),
  828. v1.ResourceEphemeralStorage: *resource.NewQuantity(5000, resource.BinarySI),
  829. },
  830. Addresses: []v1.NodeAddress{
  831. {Type: v1.NodeInternalIP, Address: "127.0.0.1"},
  832. {Type: v1.NodeHostName, Address: testKubeletHostname},
  833. },
  834. // images will be sorted from max to min in node status.
  835. Images: []v1.ContainerImage{
  836. {
  837. Names: []string{"k8s.gcr.io:v1", "k8s.gcr.io:v2"},
  838. SizeBytes: 123,
  839. },
  840. {
  841. Names: []string{"k8s.gcr.io:v3", "k8s.gcr.io:v4"},
  842. SizeBytes: 456,
  843. },
  844. },
  845. },
  846. }
  847. // Update node status when node status is created.
  848. // Report node status.
  849. kubelet.updateRuntimeUp()
  850. assert.NoError(t, kubelet.updateNodeStatus())
  851. actions := kubeClient.Actions()
  852. assert.Len(t, actions, 2)
  853. assert.IsType(t, core.GetActionImpl{}, actions[0])
  854. assert.IsType(t, core.PatchActionImpl{}, actions[1])
  855. patchAction := actions[1].(core.PatchActionImpl)
  856. updatedNode, err := applyNodeStatusPatch(existingNode, patchAction.GetPatch())
  857. require.NoError(t, err)
  858. for _, cond := range updatedNode.Status.Conditions {
  859. cond.LastHeartbeatTime = cond.LastHeartbeatTime.Rfc3339Copy()
  860. cond.LastTransitionTime = cond.LastTransitionTime.Rfc3339Copy()
  861. }
  862. assert.True(t, apiequality.Semantic.DeepEqual(expectedNode, updatedNode), "%s", diff.ObjectDiff(expectedNode, updatedNode))
  863. // Version skew workaround. See: https://github.com/kubernetes/kubernetes/issues/16961
  864. assert.Equal(t, v1.NodeReady, updatedNode.Status.Conditions[len(updatedNode.Status.Conditions)-1].Type,
  865. "NodeReady should be the last condition")
  866. // Update node status again when nothing is changed (except heartbeat time).
  867. // Report node status if it has exceeded the duration of nodeStatusReportFrequency.
  868. clock.Step(time.Minute)
  869. assert.NoError(t, kubelet.updateNodeStatus())
  870. // 2 more action (There were 2 actions before).
  871. actions = kubeClient.Actions()
  872. assert.Len(t, actions, 4)
  873. assert.IsType(t, core.GetActionImpl{}, actions[2])
  874. assert.IsType(t, core.PatchActionImpl{}, actions[3])
  875. patchAction = actions[3].(core.PatchActionImpl)
  876. updatedNode, err = applyNodeStatusPatch(updatedNode, patchAction.GetPatch())
  877. require.NoError(t, err)
  878. for _, cond := range updatedNode.Status.Conditions {
  879. cond.LastHeartbeatTime = cond.LastHeartbeatTime.Rfc3339Copy()
  880. cond.LastTransitionTime = cond.LastTransitionTime.Rfc3339Copy()
  881. }
  882. // Expect LastHearbeat updated, other things unchanged.
  883. for i, cond := range expectedNode.Status.Conditions {
  884. expectedNode.Status.Conditions[i].LastHeartbeatTime = metav1.NewTime(cond.LastHeartbeatTime.Time.Add(time.Minute)).Rfc3339Copy()
  885. }
  886. assert.True(t, apiequality.Semantic.DeepEqual(expectedNode, updatedNode), "%s", diff.ObjectDiff(expectedNode, updatedNode))
  887. // Update node status again when nothing is changed (except heartbeat time).
  888. // Do not report node status if it is within the duration of nodeStatusReportFrequency.
  889. clock.Step(10 * time.Second)
  890. assert.NoError(t, kubelet.updateNodeStatus())
  891. // Only 1 more action (There were 4 actions before).
  892. actions = kubeClient.Actions()
  893. assert.Len(t, actions, 5)
  894. assert.IsType(t, core.GetActionImpl{}, actions[4])
  895. // Update node status again when something is changed.
  896. // Report node status even if it is still within the duration of nodeStatusReportFrequency.
  897. clock.Step(10 * time.Second)
  898. var newMemoryCapacity int64 = 40E9
  899. kubelet.machineInfo.MemoryCapacity = uint64(newMemoryCapacity)
  900. assert.NoError(t, kubelet.updateNodeStatus())
  901. // 2 more action (There were 5 actions before).
  902. actions = kubeClient.Actions()
  903. assert.Len(t, actions, 7)
  904. assert.IsType(t, core.GetActionImpl{}, actions[5])
  905. assert.IsType(t, core.PatchActionImpl{}, actions[6])
  906. patchAction = actions[6].(core.PatchActionImpl)
  907. updatedNode, err = applyNodeStatusPatch(updatedNode, patchAction.GetPatch())
  908. require.NoError(t, err)
  909. memCapacity, _ := updatedNode.Status.Capacity[v1.ResourceMemory]
  910. updatedMemoryCapacity, _ := (&memCapacity).AsInt64()
  911. assert.Equal(t, newMemoryCapacity, updatedMemoryCapacity, "Memory capacity")
  912. now = metav1.NewTime(clock.Now()).Rfc3339Copy()
  913. for _, cond := range updatedNode.Status.Conditions {
  914. // Expect LastHearbeat updated, while LastTransitionTime unchanged.
  915. assert.Equal(t, now, cond.LastHeartbeatTime.Rfc3339Copy(),
  916. "LastHeartbeatTime for condition %v", cond.Type)
  917. assert.Equal(t, now, metav1.NewTime(cond.LastTransitionTime.Time.Add(time.Minute+20*time.Second)).Rfc3339Copy(),
  918. "LastTransitionTime for condition %v", cond.Type)
  919. }
  920. // Update node status when changing pod CIDR.
  921. // Report node status if it is still within the duration of nodeStatusReportFrequency.
  922. clock.Step(10 * time.Second)
  923. assert.Equal(t, "", kubelet.runtimeState.podCIDR(), "Pod CIDR should be empty")
  924. podCIDR := "10.0.0.0/24"
  925. updatedNode.Spec.PodCIDR = podCIDR
  926. kubeClient.ReactionChain = fake.NewSimpleClientset(&v1.NodeList{Items: []v1.Node{*updatedNode}}).ReactionChain
  927. assert.NoError(t, kubelet.updateNodeStatus())
  928. assert.Equal(t, podCIDR, kubelet.runtimeState.podCIDR(), "Pod CIDR should be updated now")
  929. // 2 more action (There were 7 actions before).
  930. actions = kubeClient.Actions()
  931. assert.Len(t, actions, 9)
  932. assert.IsType(t, core.GetActionImpl{}, actions[7])
  933. assert.IsType(t, core.PatchActionImpl{}, actions[8])
  934. patchAction = actions[8].(core.PatchActionImpl)
  935. // Update node status when keeping the pod CIDR.
  936. // Do not report node status if it is within the duration of nodeStatusReportFrequency.
  937. clock.Step(10 * time.Second)
  938. assert.Equal(t, podCIDR, kubelet.runtimeState.podCIDR(), "Pod CIDR should already be updated")
  939. assert.NoError(t, kubelet.updateNodeStatus())
  940. // Only 1 more action (There were 9 actions before).
  941. actions = kubeClient.Actions()
  942. assert.Len(t, actions, 10)
  943. assert.IsType(t, core.GetActionImpl{}, actions[9])
  944. }
  945. func TestUpdateNodeStatusAndVolumesInUseWithoutNodeLease(t *testing.T) {
  946. defer featuregatetesting.SetFeatureGateDuringTest(t, utilfeature.DefaultFeatureGate, features.NodeLease, false)()
  947. cases := []struct {
  948. desc string
  949. existingVolumes []v1.UniqueVolumeName // volumes to initially populate volumeManager
  950. existingNode *v1.Node // existing node object
  951. expectedNode *v1.Node // new node object after patch
  952. expectedReportedInUse []v1.UniqueVolumeName // expected volumes reported in use in volumeManager
  953. }{
  954. {
  955. desc: "no volumes and no update",
  956. existingNode: &v1.Node{ObjectMeta: metav1.ObjectMeta{Name: testKubeletHostname}},
  957. expectedNode: &v1.Node{ObjectMeta: metav1.ObjectMeta{Name: testKubeletHostname}},
  958. },
  959. {
  960. desc: "volumes inuse on node and volumeManager",
  961. existingVolumes: []v1.UniqueVolumeName{"vol1"},
  962. existingNode: &v1.Node{
  963. ObjectMeta: metav1.ObjectMeta{Name: testKubeletHostname},
  964. Status: v1.NodeStatus{
  965. VolumesInUse: []v1.UniqueVolumeName{"vol1"},
  966. },
  967. },
  968. expectedNode: &v1.Node{
  969. ObjectMeta: metav1.ObjectMeta{Name: testKubeletHostname},
  970. Status: v1.NodeStatus{
  971. VolumesInUse: []v1.UniqueVolumeName{"vol1"},
  972. },
  973. },
  974. expectedReportedInUse: []v1.UniqueVolumeName{"vol1"},
  975. },
  976. {
  977. desc: "volumes inuse on node but not in volumeManager",
  978. existingNode: &v1.Node{
  979. ObjectMeta: metav1.ObjectMeta{Name: testKubeletHostname},
  980. Status: v1.NodeStatus{
  981. VolumesInUse: []v1.UniqueVolumeName{"vol1"},
  982. },
  983. },
  984. expectedNode: &v1.Node{ObjectMeta: metav1.ObjectMeta{Name: testKubeletHostname}},
  985. },
  986. {
  987. desc: "volumes inuse in volumeManager but not on node",
  988. existingVolumes: []v1.UniqueVolumeName{"vol1"},
  989. existingNode: &v1.Node{ObjectMeta: metav1.ObjectMeta{Name: testKubeletHostname}},
  990. expectedNode: &v1.Node{
  991. ObjectMeta: metav1.ObjectMeta{Name: testKubeletHostname},
  992. Status: v1.NodeStatus{
  993. VolumesInUse: []v1.UniqueVolumeName{"vol1"},
  994. },
  995. },
  996. expectedReportedInUse: []v1.UniqueVolumeName{"vol1"},
  997. },
  998. }
  999. for _, tc := range cases {
  1000. t.Run(tc.desc, func(t *testing.T) {
  1001. // Setup
  1002. testKubelet := newTestKubelet(t, false /* controllerAttachDetachEnabled */)
  1003. defer testKubelet.Cleanup()
  1004. kubelet := testKubelet.kubelet
  1005. kubelet.kubeClient = nil // ensure only the heartbeat client is used
  1006. kubelet.containerManager = &localCM{ContainerManager: cm.NewStubContainerManager()}
  1007. kubelet.lastStatusReportTime = kubelet.clock.Now()
  1008. kubelet.nodeStatusReportFrequency = time.Hour
  1009. kubelet.machineInfo = &cadvisorapi.MachineInfo{}
  1010. // override test volumeManager
  1011. fakeVolumeManager := kubeletvolume.NewFakeVolumeManager(tc.existingVolumes)
  1012. kubelet.volumeManager = fakeVolumeManager
  1013. // Only test VolumesInUse setter
  1014. kubelet.setNodeStatusFuncs = []func(*v1.Node) error{
  1015. nodestatus.VolumesInUse(kubelet.volumeManager.ReconcilerStatesHasBeenSynced,
  1016. kubelet.volumeManager.GetVolumesInUse),
  1017. }
  1018. kubeClient := testKubelet.fakeKubeClient
  1019. kubeClient.ReactionChain = fake.NewSimpleClientset(&v1.NodeList{Items: []v1.Node{*tc.existingNode}}).ReactionChain
  1020. // Execute
  1021. assert.NoError(t, kubelet.updateNodeStatus())
  1022. // Validate
  1023. actions := kubeClient.Actions()
  1024. if tc.expectedNode != nil {
  1025. assert.Len(t, actions, 2)
  1026. assert.IsType(t, core.GetActionImpl{}, actions[0])
  1027. assert.IsType(t, core.PatchActionImpl{}, actions[1])
  1028. patchAction := actions[1].(core.PatchActionImpl)
  1029. updatedNode, err := applyNodeStatusPatch(tc.existingNode, patchAction.GetPatch())
  1030. require.NoError(t, err)
  1031. assert.True(t, apiequality.Semantic.DeepEqual(tc.expectedNode, updatedNode), "%s", diff.ObjectDiff(tc.expectedNode, updatedNode))
  1032. } else {
  1033. assert.Len(t, actions, 1)
  1034. assert.IsType(t, core.GetActionImpl{}, actions[0])
  1035. }
  1036. reportedInUse := fakeVolumeManager.GetVolumesReportedInUse()
  1037. assert.True(t, apiequality.Semantic.DeepEqual(tc.expectedReportedInUse, reportedInUse), "%s", diff.ObjectDiff(tc.expectedReportedInUse, reportedInUse))
  1038. })
  1039. }
  1040. }
  1041. func TestUpdateNodeStatusAndVolumesInUseWithNodeLease(t *testing.T) {
  1042. defer featuregatetesting.SetFeatureGateDuringTest(t, utilfeature.DefaultFeatureGate, features.NodeLease, true)()
  1043. cases := []struct {
  1044. desc string
  1045. existingVolumes []v1.UniqueVolumeName // volumes to initially populate volumeManager
  1046. existingNode *v1.Node // existing node object
  1047. expectedNode *v1.Node // new node object after patch
  1048. expectedReportedInUse []v1.UniqueVolumeName // expected volumes reported in use in volumeManager
  1049. }{
  1050. {
  1051. desc: "no volumes and no update",
  1052. existingNode: &v1.Node{ObjectMeta: metav1.ObjectMeta{Name: testKubeletHostname}},
  1053. },
  1054. {
  1055. desc: "volumes inuse on node and volumeManager",
  1056. existingVolumes: []v1.UniqueVolumeName{"vol1"},
  1057. existingNode: &v1.Node{
  1058. ObjectMeta: metav1.ObjectMeta{Name: testKubeletHostname},
  1059. Status: v1.NodeStatus{
  1060. VolumesInUse: []v1.UniqueVolumeName{"vol1"},
  1061. },
  1062. },
  1063. expectedReportedInUse: []v1.UniqueVolumeName{"vol1"},
  1064. },
  1065. {
  1066. desc: "volumes inuse on node but not in volumeManager",
  1067. existingNode: &v1.Node{
  1068. ObjectMeta: metav1.ObjectMeta{Name: testKubeletHostname},
  1069. Status: v1.NodeStatus{
  1070. VolumesInUse: []v1.UniqueVolumeName{"vol1"},
  1071. },
  1072. },
  1073. expectedNode: &v1.Node{ObjectMeta: metav1.ObjectMeta{Name: testKubeletHostname}},
  1074. },
  1075. {
  1076. desc: "volumes inuse in volumeManager but not on node",
  1077. existingVolumes: []v1.UniqueVolumeName{"vol1"},
  1078. existingNode: &v1.Node{ObjectMeta: metav1.ObjectMeta{Name: testKubeletHostname}},
  1079. expectedNode: &v1.Node{
  1080. ObjectMeta: metav1.ObjectMeta{Name: testKubeletHostname},
  1081. Status: v1.NodeStatus{
  1082. VolumesInUse: []v1.UniqueVolumeName{"vol1"},
  1083. },
  1084. },
  1085. expectedReportedInUse: []v1.UniqueVolumeName{"vol1"},
  1086. },
  1087. }
  1088. for _, tc := range cases {
  1089. t.Run(tc.desc, func(t *testing.T) {
  1090. // Setup
  1091. testKubelet := newTestKubelet(t, false /* controllerAttachDetachEnabled */)
  1092. defer testKubelet.Cleanup()
  1093. kubelet := testKubelet.kubelet
  1094. kubelet.kubeClient = nil // ensure only the heartbeat client is used
  1095. kubelet.containerManager = &localCM{ContainerManager: cm.NewStubContainerManager()}
  1096. kubelet.lastStatusReportTime = kubelet.clock.Now()
  1097. kubelet.nodeStatusReportFrequency = time.Hour
  1098. kubelet.machineInfo = &cadvisorapi.MachineInfo{}
  1099. // override test volumeManager
  1100. fakeVolumeManager := kubeletvolume.NewFakeVolumeManager(tc.existingVolumes)
  1101. kubelet.volumeManager = fakeVolumeManager
  1102. // Only test VolumesInUse setter
  1103. kubelet.setNodeStatusFuncs = []func(*v1.Node) error{
  1104. nodestatus.VolumesInUse(kubelet.volumeManager.ReconcilerStatesHasBeenSynced,
  1105. kubelet.volumeManager.GetVolumesInUse),
  1106. }
  1107. kubeClient := testKubelet.fakeKubeClient
  1108. kubeClient.ReactionChain = fake.NewSimpleClientset(&v1.NodeList{Items: []v1.Node{*tc.existingNode}}).ReactionChain
  1109. // Execute
  1110. assert.NoError(t, kubelet.updateNodeStatus())
  1111. // Validate
  1112. actions := kubeClient.Actions()
  1113. if tc.expectedNode != nil {
  1114. assert.Len(t, actions, 2)
  1115. assert.IsType(t, core.GetActionImpl{}, actions[0])
  1116. assert.IsType(t, core.PatchActionImpl{}, actions[1])
  1117. patchAction := actions[1].(core.PatchActionImpl)
  1118. updatedNode, err := applyNodeStatusPatch(tc.existingNode, patchAction.GetPatch())
  1119. require.NoError(t, err)
  1120. assert.True(t, apiequality.Semantic.DeepEqual(tc.expectedNode, updatedNode), "%s", diff.ObjectDiff(tc.expectedNode, updatedNode))
  1121. } else {
  1122. assert.Len(t, actions, 1)
  1123. assert.IsType(t, core.GetActionImpl{}, actions[0])
  1124. }
  1125. reportedInUse := fakeVolumeManager.GetVolumesReportedInUse()
  1126. assert.True(t, apiequality.Semantic.DeepEqual(tc.expectedReportedInUse, reportedInUse), "%s", diff.ObjectDiff(tc.expectedReportedInUse, reportedInUse))
  1127. })
  1128. }
  1129. }
  1130. func TestRegisterWithApiServer(t *testing.T) {
  1131. testKubelet := newTestKubelet(t, false /* controllerAttachDetachEnabled */)
  1132. defer testKubelet.Cleanup()
  1133. kubelet := testKubelet.kubelet
  1134. kubeClient := testKubelet.fakeKubeClient
  1135. kubeClient.AddReactor("create", "nodes", func(action core.Action) (bool, runtime.Object, error) {
  1136. // Return an error on create.
  1137. return true, &v1.Node{}, &apierrors.StatusError{
  1138. ErrStatus: metav1.Status{Reason: metav1.StatusReasonAlreadyExists},
  1139. }
  1140. })
  1141. kubeClient.AddReactor("get", "nodes", func(action core.Action) (bool, runtime.Object, error) {
  1142. // Return an existing (matching) node on get.
  1143. return true, &v1.Node{
  1144. ObjectMeta: metav1.ObjectMeta{
  1145. Name: testKubeletHostname,
  1146. Labels: map[string]string{
  1147. v1.LabelHostname: testKubeletHostname,
  1148. v1.LabelOSStable: goruntime.GOOS,
  1149. v1.LabelArchStable: goruntime.GOARCH,
  1150. kubeletapis.LabelOS: goruntime.GOOS,
  1151. kubeletapis.LabelArch: goruntime.GOARCH,
  1152. },
  1153. },
  1154. }, nil
  1155. })
  1156. addNotImplatedReaction(kubeClient)
  1157. machineInfo := &cadvisorapi.MachineInfo{
  1158. MachineID: "123",
  1159. SystemUUID: "abc",
  1160. BootID: "1b3",
  1161. NumCores: 2,
  1162. MemoryCapacity: 1024,
  1163. }
  1164. kubelet.machineInfo = machineInfo
  1165. done := make(chan struct{})
  1166. go func() {
  1167. kubelet.registerWithAPIServer()
  1168. done <- struct{}{}
  1169. }()
  1170. select {
  1171. case <-time.After(wait.ForeverTestTimeout):
  1172. assert.Fail(t, "timed out waiting for registration")
  1173. case <-done:
  1174. return
  1175. }
  1176. }
  1177. func TestTryRegisterWithApiServer(t *testing.T) {
  1178. alreadyExists := &apierrors.StatusError{
  1179. ErrStatus: metav1.Status{Reason: metav1.StatusReasonAlreadyExists},
  1180. }
  1181. conflict := &apierrors.StatusError{
  1182. ErrStatus: metav1.Status{Reason: metav1.StatusReasonConflict},
  1183. }
  1184. newNode := func(cmad bool) *v1.Node {
  1185. node := &v1.Node{
  1186. ObjectMeta: metav1.ObjectMeta{
  1187. Labels: map[string]string{
  1188. v1.LabelHostname: testKubeletHostname,
  1189. v1.LabelOSStable: goruntime.GOOS,
  1190. v1.LabelArchStable: goruntime.GOARCH,
  1191. kubeletapis.LabelOS: goruntime.GOOS,
  1192. kubeletapis.LabelArch: goruntime.GOARCH,
  1193. },
  1194. },
  1195. }
  1196. if cmad {
  1197. node.Annotations = make(map[string]string)
  1198. node.Annotations[util.ControllerManagedAttachAnnotation] = "true"
  1199. }
  1200. return node
  1201. }
  1202. cases := []struct {
  1203. name string
  1204. newNode *v1.Node
  1205. existingNode *v1.Node
  1206. createError error
  1207. getError error
  1208. patchError error
  1209. deleteError error
  1210. expectedResult bool
  1211. expectedActions int
  1212. testSavedNode bool
  1213. savedNodeIndex int
  1214. savedNodeCMAD bool
  1215. }{
  1216. {
  1217. name: "success case - new node",
  1218. newNode: &v1.Node{},
  1219. expectedResult: true,
  1220. expectedActions: 1,
  1221. },
  1222. {
  1223. name: "success case - existing node - no change in CMAD",
  1224. newNode: newNode(true),
  1225. createError: alreadyExists,
  1226. existingNode: newNode(true),
  1227. expectedResult: true,
  1228. expectedActions: 2,
  1229. },
  1230. {
  1231. name: "success case - existing node - CMAD disabled",
  1232. newNode: newNode(false),
  1233. createError: alreadyExists,
  1234. existingNode: newNode(true),
  1235. expectedResult: true,
  1236. expectedActions: 3,
  1237. testSavedNode: true,
  1238. savedNodeIndex: 2,
  1239. savedNodeCMAD: false,
  1240. },
  1241. {
  1242. name: "success case - existing node - CMAD enabled",
  1243. newNode: newNode(true),
  1244. createError: alreadyExists,
  1245. existingNode: newNode(false),
  1246. expectedResult: true,
  1247. expectedActions: 3,
  1248. testSavedNode: true,
  1249. savedNodeIndex: 2,
  1250. savedNodeCMAD: true,
  1251. },
  1252. {
  1253. name: "create failed",
  1254. newNode: newNode(false),
  1255. createError: conflict,
  1256. expectedResult: false,
  1257. expectedActions: 1,
  1258. },
  1259. {
  1260. name: "get existing node failed",
  1261. newNode: newNode(false),
  1262. createError: alreadyExists,
  1263. getError: conflict,
  1264. expectedResult: false,
  1265. expectedActions: 2,
  1266. },
  1267. {
  1268. name: "update existing node failed",
  1269. newNode: newNode(false),
  1270. createError: alreadyExists,
  1271. existingNode: newNode(true),
  1272. patchError: conflict,
  1273. expectedResult: false,
  1274. expectedActions: 3,
  1275. },
  1276. }
  1277. for _, tc := range cases {
  1278. testKubelet := newTestKubelet(t, false /* controllerAttachDetachEnabled is a don't-care for this test */)
  1279. defer testKubelet.Cleanup()
  1280. kubelet := testKubelet.kubelet
  1281. kubeClient := testKubelet.fakeKubeClient
  1282. kubeClient.AddReactor("create", "nodes", func(action core.Action) (bool, runtime.Object, error) {
  1283. return true, nil, tc.createError
  1284. })
  1285. kubeClient.AddReactor("get", "nodes", func(action core.Action) (bool, runtime.Object, error) {
  1286. // Return an existing (matching) node on get.
  1287. return true, tc.existingNode, tc.getError
  1288. })
  1289. kubeClient.AddReactor("patch", "nodes", func(action core.Action) (bool, runtime.Object, error) {
  1290. if action.GetSubresource() == "status" {
  1291. return true, nil, tc.patchError
  1292. }
  1293. return notImplemented(action)
  1294. })
  1295. kubeClient.AddReactor("delete", "nodes", func(action core.Action) (bool, runtime.Object, error) {
  1296. return true, nil, tc.deleteError
  1297. })
  1298. addNotImplatedReaction(kubeClient)
  1299. result := kubelet.tryRegisterWithAPIServer(tc.newNode)
  1300. require.Equal(t, tc.expectedResult, result, "test [%s]", tc.name)
  1301. actions := kubeClient.Actions()
  1302. assert.Len(t, actions, tc.expectedActions, "test [%s]", tc.name)
  1303. if tc.testSavedNode {
  1304. var savedNode *v1.Node
  1305. t.Logf("actions: %v: %+v", len(actions), actions)
  1306. action := actions[tc.savedNodeIndex]
  1307. if action.GetVerb() == "create" {
  1308. createAction := action.(core.CreateAction)
  1309. obj := createAction.GetObject()
  1310. require.IsType(t, &v1.Node{}, obj)
  1311. savedNode = obj.(*v1.Node)
  1312. } else if action.GetVerb() == "patch" {
  1313. patchAction := action.(core.PatchActionImpl)
  1314. var err error
  1315. savedNode, err = applyNodeStatusPatch(tc.existingNode, patchAction.GetPatch())
  1316. require.NoError(t, err)
  1317. }
  1318. actualCMAD, _ := strconv.ParseBool(savedNode.Annotations[util.ControllerManagedAttachAnnotation])
  1319. assert.Equal(t, tc.savedNodeCMAD, actualCMAD, "test [%s]", tc.name)
  1320. }
  1321. }
  1322. }
  1323. func TestUpdateNewNodeStatusTooLargeReservation(t *testing.T) {
  1324. const nodeStatusMaxImages = 5
  1325. // generate one more in inputImageList than we configure the Kubelet to report
  1326. inputImageList, _ := generateTestingImageLists(nodeStatusMaxImages+1, nodeStatusMaxImages)
  1327. testKubelet := newTestKubeletWithImageList(
  1328. t, inputImageList, false /* controllerAttachDetachEnabled */, true /* initFakeVolumePlugin */)
  1329. defer testKubelet.Cleanup()
  1330. kubelet := testKubelet.kubelet
  1331. kubelet.nodeStatusMaxImages = nodeStatusMaxImages
  1332. kubelet.kubeClient = nil // ensure only the heartbeat client is used
  1333. kubelet.containerManager = &localCM{
  1334. ContainerManager: cm.NewStubContainerManager(),
  1335. allocatableReservation: v1.ResourceList{
  1336. v1.ResourceCPU: *resource.NewMilliQuantity(40000, resource.DecimalSI),
  1337. v1.ResourceEphemeralStorage: *resource.NewQuantity(1000, resource.BinarySI),
  1338. },
  1339. capacity: v1.ResourceList{
  1340. v1.ResourceCPU: *resource.NewMilliQuantity(2000, resource.DecimalSI),
  1341. v1.ResourceMemory: *resource.NewQuantity(10E9, resource.BinarySI),
  1342. v1.ResourceEphemeralStorage: *resource.NewQuantity(3000, resource.BinarySI),
  1343. },
  1344. }
  1345. // Since this test retroactively overrides the stub container manager,
  1346. // we have to regenerate default status setters.
  1347. kubelet.setNodeStatusFuncs = kubelet.defaultNodeStatusFuncs()
  1348. kubeClient := testKubelet.fakeKubeClient
  1349. existingNode := v1.Node{ObjectMeta: metav1.ObjectMeta{Name: testKubeletHostname}}
  1350. kubeClient.ReactionChain = fake.NewSimpleClientset(&v1.NodeList{Items: []v1.Node{existingNode}}).ReactionChain
  1351. machineInfo := &cadvisorapi.MachineInfo{
  1352. MachineID: "123",
  1353. SystemUUID: "abc",
  1354. BootID: "1b3",
  1355. NumCores: 2,
  1356. MemoryCapacity: 10E9, // 10G
  1357. }
  1358. kubelet.machineInfo = machineInfo
  1359. expectedNode := &v1.Node{
  1360. ObjectMeta: metav1.ObjectMeta{Name: testKubeletHostname},
  1361. Spec: v1.NodeSpec{},
  1362. Status: v1.NodeStatus{
  1363. Capacity: v1.ResourceList{
  1364. v1.ResourceCPU: *resource.NewMilliQuantity(2000, resource.DecimalSI),
  1365. v1.ResourceMemory: *resource.NewQuantity(10E9, resource.BinarySI),
  1366. v1.ResourcePods: *resource.NewQuantity(0, resource.DecimalSI),
  1367. v1.ResourceEphemeralStorage: *resource.NewQuantity(3000, resource.BinarySI),
  1368. },
  1369. Allocatable: v1.ResourceList{
  1370. v1.ResourceCPU: *resource.NewMilliQuantity(0, resource.DecimalSI),
  1371. v1.ResourceMemory: *resource.NewQuantity(10E9, resource.BinarySI),
  1372. v1.ResourcePods: *resource.NewQuantity(0, resource.DecimalSI),
  1373. v1.ResourceEphemeralStorage: *resource.NewQuantity(2000, resource.BinarySI),
  1374. },
  1375. },
  1376. }
  1377. kubelet.updateRuntimeUp()
  1378. assert.NoError(t, kubelet.updateNodeStatus())
  1379. actions := kubeClient.Actions()
  1380. require.Len(t, actions, 2)
  1381. require.True(t, actions[1].Matches("patch", "nodes"))
  1382. require.Equal(t, actions[1].GetSubresource(), "status")
  1383. updatedNode, err := applyNodeStatusPatch(&existingNode, actions[1].(core.PatchActionImpl).GetPatch())
  1384. assert.NoError(t, err)
  1385. assert.True(t, apiequality.Semantic.DeepEqual(expectedNode.Status.Allocatable, updatedNode.Status.Allocatable), "%s", diff.ObjectDiff(expectedNode.Status.Allocatable, updatedNode.Status.Allocatable))
  1386. }
  1387. func TestUpdateDefaultLabels(t *testing.T) {
  1388. testKubelet := newTestKubelet(t, false /* controllerAttachDetachEnabled */)
  1389. testKubelet.kubelet.kubeClient = nil // ensure only the heartbeat client is used
  1390. cases := []struct {
  1391. name string
  1392. initialNode *v1.Node
  1393. existingNode *v1.Node
  1394. needsUpdate bool
  1395. finalLabels map[string]string
  1396. }{
  1397. {
  1398. name: "make sure default labels exist",
  1399. initialNode: &v1.Node{
  1400. ObjectMeta: metav1.ObjectMeta{
  1401. Labels: map[string]string{
  1402. v1.LabelHostname: "new-hostname",
  1403. v1.LabelZoneFailureDomain: "new-zone-failure-domain",
  1404. v1.LabelZoneRegion: "new-zone-region",
  1405. v1.LabelInstanceType: "new-instance-type",
  1406. kubeletapis.LabelOS: "new-os",
  1407. kubeletapis.LabelArch: "new-arch",
  1408. },
  1409. },
  1410. },
  1411. existingNode: &v1.Node{
  1412. ObjectMeta: metav1.ObjectMeta{
  1413. Labels: map[string]string{},
  1414. },
  1415. },
  1416. needsUpdate: true,
  1417. finalLabels: map[string]string{
  1418. v1.LabelHostname: "new-hostname",
  1419. v1.LabelZoneFailureDomain: "new-zone-failure-domain",
  1420. v1.LabelZoneRegion: "new-zone-region",
  1421. v1.LabelInstanceType: "new-instance-type",
  1422. kubeletapis.LabelOS: "new-os",
  1423. kubeletapis.LabelArch: "new-arch",
  1424. },
  1425. },
  1426. {
  1427. name: "make sure default labels are up to date",
  1428. initialNode: &v1.Node{
  1429. ObjectMeta: metav1.ObjectMeta{
  1430. Labels: map[string]string{
  1431. v1.LabelHostname: "new-hostname",
  1432. v1.LabelZoneFailureDomain: "new-zone-failure-domain",
  1433. v1.LabelZoneRegion: "new-zone-region",
  1434. v1.LabelInstanceType: "new-instance-type",
  1435. kubeletapis.LabelOS: "new-os",
  1436. kubeletapis.LabelArch: "new-arch",
  1437. },
  1438. },
  1439. },
  1440. existingNode: &v1.Node{
  1441. ObjectMeta: metav1.ObjectMeta{
  1442. Labels: map[string]string{
  1443. v1.LabelHostname: "old-hostname",
  1444. v1.LabelZoneFailureDomain: "old-zone-failure-domain",
  1445. v1.LabelZoneRegion: "old-zone-region",
  1446. v1.LabelInstanceType: "old-instance-type",
  1447. kubeletapis.LabelOS: "old-os",
  1448. kubeletapis.LabelArch: "old-arch",
  1449. },
  1450. },
  1451. },
  1452. needsUpdate: true,
  1453. finalLabels: map[string]string{
  1454. v1.LabelHostname: "new-hostname",
  1455. v1.LabelZoneFailureDomain: "new-zone-failure-domain",
  1456. v1.LabelZoneRegion: "new-zone-region",
  1457. v1.LabelInstanceType: "new-instance-type",
  1458. kubeletapis.LabelOS: "new-os",
  1459. kubeletapis.LabelArch: "new-arch",
  1460. },
  1461. },
  1462. {
  1463. name: "make sure existing labels do not get deleted",
  1464. initialNode: &v1.Node{
  1465. ObjectMeta: metav1.ObjectMeta{
  1466. Labels: map[string]string{
  1467. v1.LabelHostname: "new-hostname",
  1468. v1.LabelZoneFailureDomain: "new-zone-failure-domain",
  1469. v1.LabelZoneRegion: "new-zone-region",
  1470. v1.LabelInstanceType: "new-instance-type",
  1471. kubeletapis.LabelOS: "new-os",
  1472. kubeletapis.LabelArch: "new-arch",
  1473. },
  1474. },
  1475. },
  1476. existingNode: &v1.Node{
  1477. ObjectMeta: metav1.ObjectMeta{
  1478. Labels: map[string]string{
  1479. v1.LabelHostname: "new-hostname",
  1480. v1.LabelZoneFailureDomain: "new-zone-failure-domain",
  1481. v1.LabelZoneRegion: "new-zone-region",
  1482. v1.LabelInstanceType: "new-instance-type",
  1483. kubeletapis.LabelOS: "new-os",
  1484. kubeletapis.LabelArch: "new-arch",
  1485. "please-persist": "foo",
  1486. },
  1487. },
  1488. },
  1489. needsUpdate: false,
  1490. finalLabels: map[string]string{
  1491. v1.LabelHostname: "new-hostname",
  1492. v1.LabelZoneFailureDomain: "new-zone-failure-domain",
  1493. v1.LabelZoneRegion: "new-zone-region",
  1494. v1.LabelInstanceType: "new-instance-type",
  1495. kubeletapis.LabelOS: "new-os",
  1496. kubeletapis.LabelArch: "new-arch",
  1497. "please-persist": "foo",
  1498. },
  1499. },
  1500. {
  1501. name: "make sure existing labels do not get deleted when initial node has no opinion",
  1502. initialNode: &v1.Node{
  1503. ObjectMeta: metav1.ObjectMeta{
  1504. Labels: map[string]string{},
  1505. },
  1506. },
  1507. existingNode: &v1.Node{
  1508. ObjectMeta: metav1.ObjectMeta{
  1509. Labels: map[string]string{
  1510. v1.LabelHostname: "new-hostname",
  1511. v1.LabelZoneFailureDomain: "new-zone-failure-domain",
  1512. v1.LabelZoneRegion: "new-zone-region",
  1513. v1.LabelInstanceType: "new-instance-type",
  1514. kubeletapis.LabelOS: "new-os",
  1515. kubeletapis.LabelArch: "new-arch",
  1516. "please-persist": "foo",
  1517. },
  1518. },
  1519. },
  1520. needsUpdate: false,
  1521. finalLabels: map[string]string{
  1522. v1.LabelHostname: "new-hostname",
  1523. v1.LabelZoneFailureDomain: "new-zone-failure-domain",
  1524. v1.LabelZoneRegion: "new-zone-region",
  1525. v1.LabelInstanceType: "new-instance-type",
  1526. kubeletapis.LabelOS: "new-os",
  1527. kubeletapis.LabelArch: "new-arch",
  1528. "please-persist": "foo",
  1529. },
  1530. },
  1531. {
  1532. name: "no update needed",
  1533. initialNode: &v1.Node{
  1534. ObjectMeta: metav1.ObjectMeta{
  1535. Labels: map[string]string{
  1536. v1.LabelHostname: "new-hostname",
  1537. v1.LabelZoneFailureDomain: "new-zone-failure-domain",
  1538. v1.LabelZoneRegion: "new-zone-region",
  1539. v1.LabelInstanceType: "new-instance-type",
  1540. kubeletapis.LabelOS: "new-os",
  1541. kubeletapis.LabelArch: "new-arch",
  1542. },
  1543. },
  1544. },
  1545. existingNode: &v1.Node{
  1546. ObjectMeta: metav1.ObjectMeta{
  1547. Labels: map[string]string{
  1548. v1.LabelHostname: "new-hostname",
  1549. v1.LabelZoneFailureDomain: "new-zone-failure-domain",
  1550. v1.LabelZoneRegion: "new-zone-region",
  1551. v1.LabelInstanceType: "new-instance-type",
  1552. kubeletapis.LabelOS: "new-os",
  1553. kubeletapis.LabelArch: "new-arch",
  1554. },
  1555. },
  1556. },
  1557. needsUpdate: false,
  1558. finalLabels: map[string]string{
  1559. v1.LabelHostname: "new-hostname",
  1560. v1.LabelZoneFailureDomain: "new-zone-failure-domain",
  1561. v1.LabelZoneRegion: "new-zone-region",
  1562. v1.LabelInstanceType: "new-instance-type",
  1563. kubeletapis.LabelOS: "new-os",
  1564. kubeletapis.LabelArch: "new-arch",
  1565. },
  1566. },
  1567. {
  1568. name: "not panic when existing node has nil labels",
  1569. initialNode: &v1.Node{
  1570. ObjectMeta: metav1.ObjectMeta{
  1571. Labels: map[string]string{
  1572. v1.LabelHostname: "new-hostname",
  1573. v1.LabelZoneFailureDomain: "new-zone-failure-domain",
  1574. v1.LabelZoneRegion: "new-zone-region",
  1575. v1.LabelInstanceType: "new-instance-type",
  1576. kubeletapis.LabelOS: "new-os",
  1577. kubeletapis.LabelArch: "new-arch",
  1578. },
  1579. },
  1580. },
  1581. existingNode: &v1.Node{
  1582. ObjectMeta: metav1.ObjectMeta{},
  1583. },
  1584. needsUpdate: true,
  1585. finalLabels: map[string]string{
  1586. v1.LabelHostname: "new-hostname",
  1587. v1.LabelZoneFailureDomain: "new-zone-failure-domain",
  1588. v1.LabelZoneRegion: "new-zone-region",
  1589. v1.LabelInstanceType: "new-instance-type",
  1590. kubeletapis.LabelOS: "new-os",
  1591. kubeletapis.LabelArch: "new-arch",
  1592. },
  1593. },
  1594. }
  1595. for _, tc := range cases {
  1596. defer testKubelet.Cleanup()
  1597. kubelet := testKubelet.kubelet
  1598. needsUpdate := kubelet.updateDefaultLabels(tc.initialNode, tc.existingNode)
  1599. assert.Equal(t, tc.needsUpdate, needsUpdate, tc.name)
  1600. assert.Equal(t, tc.finalLabels, tc.existingNode.Labels, tc.name)
  1601. }
  1602. }
  1603. func TestReconcileExtendedResource(t *testing.T) {
  1604. testKubelet := newTestKubelet(t, false /* controllerAttachDetachEnabled */)
  1605. testKubelet.kubelet.kubeClient = nil // ensure only the heartbeat client is used
  1606. testKubelet.kubelet.containerManager = cm.NewStubContainerManagerWithExtendedResource(true /* shouldResetExtendedResourceCapacity*/)
  1607. testKubeletNoReset := newTestKubelet(t, false /* controllerAttachDetachEnabled */)
  1608. extendedResourceName1 := v1.ResourceName("test.com/resource1")
  1609. extendedResourceName2 := v1.ResourceName("test.com/resource2")
  1610. cases := []struct {
  1611. name string
  1612. testKubelet *TestKubelet
  1613. existingNode *v1.Node
  1614. expectedNode *v1.Node
  1615. needsUpdate bool
  1616. }{
  1617. {
  1618. name: "no update needed without extended resource",
  1619. testKubelet: testKubelet,
  1620. existingNode: &v1.Node{
  1621. Status: v1.NodeStatus{
  1622. Capacity: v1.ResourceList{
  1623. v1.ResourceCPU: *resource.NewMilliQuantity(2000, resource.DecimalSI),
  1624. v1.ResourceMemory: *resource.NewQuantity(10E9, resource.BinarySI),
  1625. v1.ResourceEphemeralStorage: *resource.NewQuantity(5000, resource.BinarySI),
  1626. },
  1627. Allocatable: v1.ResourceList{
  1628. v1.ResourceCPU: *resource.NewMilliQuantity(2000, resource.DecimalSI),
  1629. v1.ResourceMemory: *resource.NewQuantity(10E9, resource.BinarySI),
  1630. v1.ResourceEphemeralStorage: *resource.NewQuantity(5000, resource.BinarySI),
  1631. },
  1632. },
  1633. },
  1634. expectedNode: &v1.Node{
  1635. Status: v1.NodeStatus{
  1636. Capacity: v1.ResourceList{
  1637. v1.ResourceCPU: *resource.NewMilliQuantity(2000, resource.DecimalSI),
  1638. v1.ResourceMemory: *resource.NewQuantity(10E9, resource.BinarySI),
  1639. v1.ResourceEphemeralStorage: *resource.NewQuantity(5000, resource.BinarySI),
  1640. },
  1641. Allocatable: v1.ResourceList{
  1642. v1.ResourceCPU: *resource.NewMilliQuantity(2000, resource.DecimalSI),
  1643. v1.ResourceMemory: *resource.NewQuantity(10E9, resource.BinarySI),
  1644. v1.ResourceEphemeralStorage: *resource.NewQuantity(5000, resource.BinarySI),
  1645. },
  1646. },
  1647. },
  1648. needsUpdate: false,
  1649. },
  1650. {
  1651. name: "extended resource capacity is not zeroed due to presence of checkpoint file",
  1652. testKubelet: testKubelet,
  1653. existingNode: &v1.Node{
  1654. Status: v1.NodeStatus{
  1655. Capacity: v1.ResourceList{
  1656. v1.ResourceCPU: *resource.NewMilliQuantity(2000, resource.DecimalSI),
  1657. v1.ResourceMemory: *resource.NewQuantity(10E9, resource.BinarySI),
  1658. v1.ResourceEphemeralStorage: *resource.NewQuantity(5000, resource.BinarySI),
  1659. },
  1660. Allocatable: v1.ResourceList{
  1661. v1.ResourceCPU: *resource.NewMilliQuantity(2000, resource.DecimalSI),
  1662. v1.ResourceMemory: *resource.NewQuantity(10E9, resource.BinarySI),
  1663. v1.ResourceEphemeralStorage: *resource.NewQuantity(5000, resource.BinarySI),
  1664. },
  1665. },
  1666. },
  1667. expectedNode: &v1.Node{
  1668. Status: v1.NodeStatus{
  1669. Capacity: v1.ResourceList{
  1670. v1.ResourceCPU: *resource.NewMilliQuantity(2000, resource.DecimalSI),
  1671. v1.ResourceMemory: *resource.NewQuantity(10E9, resource.BinarySI),
  1672. v1.ResourceEphemeralStorage: *resource.NewQuantity(5000, resource.BinarySI),
  1673. },
  1674. Allocatable: v1.ResourceList{
  1675. v1.ResourceCPU: *resource.NewMilliQuantity(2000, resource.DecimalSI),
  1676. v1.ResourceMemory: *resource.NewQuantity(10E9, resource.BinarySI),
  1677. v1.ResourceEphemeralStorage: *resource.NewQuantity(5000, resource.BinarySI),
  1678. },
  1679. },
  1680. },
  1681. needsUpdate: false,
  1682. },
  1683. {
  1684. name: "extended resource capacity is zeroed",
  1685. testKubelet: testKubeletNoReset,
  1686. existingNode: &v1.Node{
  1687. Status: v1.NodeStatus{
  1688. Capacity: v1.ResourceList{
  1689. v1.ResourceCPU: *resource.NewMilliQuantity(2000, resource.DecimalSI),
  1690. v1.ResourceMemory: *resource.NewQuantity(10E9, resource.BinarySI),
  1691. v1.ResourceEphemeralStorage: *resource.NewQuantity(5000, resource.BinarySI),
  1692. extendedResourceName1: *resource.NewQuantity(int64(2), resource.DecimalSI),
  1693. extendedResourceName2: *resource.NewQuantity(int64(10), resource.DecimalSI),
  1694. },
  1695. Allocatable: v1.ResourceList{
  1696. v1.ResourceCPU: *resource.NewMilliQuantity(2000, resource.DecimalSI),
  1697. v1.ResourceMemory: *resource.NewQuantity(10E9, resource.BinarySI),
  1698. v1.ResourceEphemeralStorage: *resource.NewQuantity(5000, resource.BinarySI),
  1699. extendedResourceName1: *resource.NewQuantity(int64(2), resource.DecimalSI),
  1700. extendedResourceName2: *resource.NewQuantity(int64(10), resource.DecimalSI),
  1701. },
  1702. },
  1703. },
  1704. expectedNode: &v1.Node{
  1705. Status: v1.NodeStatus{
  1706. Capacity: v1.ResourceList{
  1707. v1.ResourceCPU: *resource.NewMilliQuantity(2000, resource.DecimalSI),
  1708. v1.ResourceMemory: *resource.NewQuantity(10E9, resource.BinarySI),
  1709. v1.ResourceEphemeralStorage: *resource.NewQuantity(5000, resource.BinarySI),
  1710. extendedResourceName1: *resource.NewQuantity(int64(0), resource.DecimalSI),
  1711. extendedResourceName2: *resource.NewQuantity(int64(0), resource.DecimalSI),
  1712. },
  1713. Allocatable: v1.ResourceList{
  1714. v1.ResourceCPU: *resource.NewMilliQuantity(2000, resource.DecimalSI),
  1715. v1.ResourceMemory: *resource.NewQuantity(10E9, resource.BinarySI),
  1716. v1.ResourceEphemeralStorage: *resource.NewQuantity(5000, resource.BinarySI),
  1717. extendedResourceName1: *resource.NewQuantity(int64(0), resource.DecimalSI),
  1718. extendedResourceName2: *resource.NewQuantity(int64(0), resource.DecimalSI),
  1719. },
  1720. },
  1721. },
  1722. needsUpdate: true,
  1723. },
  1724. }
  1725. for _, tc := range cases {
  1726. defer testKubelet.Cleanup()
  1727. kubelet := testKubelet.kubelet
  1728. initialNode := &v1.Node{}
  1729. needsUpdate := kubelet.reconcileExtendedResource(initialNode, tc.existingNode)
  1730. assert.Equal(t, tc.needsUpdate, needsUpdate, tc.name)
  1731. assert.Equal(t, tc.expectedNode, tc.existingNode, tc.name)
  1732. }
  1733. }
  1734. func TestValidateNodeIPParam(t *testing.T) {
  1735. type test struct {
  1736. nodeIP string
  1737. success bool
  1738. testName string
  1739. }
  1740. tests := []test{
  1741. {
  1742. nodeIP: "",
  1743. success: false,
  1744. testName: "IP not set",
  1745. },
  1746. {
  1747. nodeIP: "127.0.0.1",
  1748. success: false,
  1749. testName: "IPv4 loopback address",
  1750. },
  1751. {
  1752. nodeIP: "::1",
  1753. success: false,
  1754. testName: "IPv6 loopback address",
  1755. },
  1756. {
  1757. nodeIP: "224.0.0.1",
  1758. success: false,
  1759. testName: "multicast IPv4 address",
  1760. },
  1761. {
  1762. nodeIP: "ff00::1",
  1763. success: false,
  1764. testName: "multicast IPv6 address",
  1765. },
  1766. {
  1767. nodeIP: "169.254.0.1",
  1768. success: false,
  1769. testName: "IPv4 link-local unicast address",
  1770. },
  1771. {
  1772. nodeIP: "fe80::0202:b3ff:fe1e:8329",
  1773. success: false,
  1774. testName: "IPv6 link-local unicast address",
  1775. },
  1776. {
  1777. nodeIP: "0.0.0.0",
  1778. success: false,
  1779. testName: "Unspecified IPv4 address",
  1780. },
  1781. {
  1782. nodeIP: "::",
  1783. success: false,
  1784. testName: "Unspecified IPv6 address",
  1785. },
  1786. {
  1787. nodeIP: "1.2.3.4",
  1788. success: false,
  1789. testName: "IPv4 address that doesn't belong to host",
  1790. },
  1791. }
  1792. addrs, err := net.InterfaceAddrs()
  1793. if err != nil {
  1794. assert.Error(t, err, fmt.Sprintf(
  1795. "Unable to obtain a list of the node's unicast interface addresses."))
  1796. }
  1797. for _, addr := range addrs {
  1798. var ip net.IP
  1799. switch v := addr.(type) {
  1800. case *net.IPNet:
  1801. ip = v.IP
  1802. case *net.IPAddr:
  1803. ip = v.IP
  1804. }
  1805. if ip.IsLoopback() || ip.IsLinkLocalUnicast() {
  1806. break
  1807. }
  1808. successTest := test{
  1809. nodeIP: ip.String(),
  1810. success: true,
  1811. testName: fmt.Sprintf("Success test case for address %s", ip.String()),
  1812. }
  1813. tests = append(tests, successTest)
  1814. }
  1815. for _, test := range tests {
  1816. err := validateNodeIP(net.ParseIP(test.nodeIP))
  1817. if test.success {
  1818. assert.NoError(t, err, "test %s", test.testName)
  1819. } else {
  1820. assert.Error(t, err, fmt.Sprintf("test %s", test.testName))
  1821. }
  1822. }
  1823. }
  1824. func TestRegisterWithApiServerWithTaint(t *testing.T) {
  1825. testKubelet := newTestKubelet(t, false /* controllerAttachDetachEnabled */)
  1826. defer testKubelet.Cleanup()
  1827. kubelet := testKubelet.kubelet
  1828. kubeClient := testKubelet.fakeKubeClient
  1829. machineInfo := &cadvisorapi.MachineInfo{
  1830. MachineID: "123",
  1831. SystemUUID: "abc",
  1832. BootID: "1b3",
  1833. NumCores: 2,
  1834. MemoryCapacity: 1024,
  1835. }
  1836. kubelet.machineInfo = machineInfo
  1837. var gotNode runtime.Object
  1838. kubeClient.AddReactor("create", "nodes", func(action core.Action) (bool, runtime.Object, error) {
  1839. createAction := action.(core.CreateAction)
  1840. gotNode = createAction.GetObject()
  1841. return true, gotNode, nil
  1842. })
  1843. addNotImplatedReaction(kubeClient)
  1844. // Make node to be unschedulable.
  1845. kubelet.registerSchedulable = false
  1846. forEachFeatureGate(t, []featuregate.Feature{features.TaintNodesByCondition}, func(t *testing.T) {
  1847. // Reset kubelet status for each test.
  1848. kubelet.registrationCompleted = false
  1849. // Register node to apiserver.
  1850. kubelet.registerWithAPIServer()
  1851. // Check the unschedulable taint.
  1852. got := gotNode.(*v1.Node)
  1853. unschedulableTaint := &v1.Taint{
  1854. Key: schedulerapi.TaintNodeUnschedulable,
  1855. Effect: v1.TaintEffectNoSchedule,
  1856. }
  1857. require.Equal(t,
  1858. utilfeature.DefaultFeatureGate.Enabled(features.TaintNodesByCondition),
  1859. taintutil.TaintExists(got.Spec.Taints, unschedulableTaint),
  1860. "test unschedulable taint for TaintNodesByCondition")
  1861. return
  1862. })
  1863. }
  1864. func TestNodeStatusHasChanged(t *testing.T) {
  1865. fakeNow := metav1.Date(2015, 1, 1, 12, 0, 0, 0, time.UTC)
  1866. fakeFuture := metav1.Time{Time: fakeNow.Time.Add(time.Minute)}
  1867. readyCondition := v1.NodeCondition{
  1868. Type: v1.NodeReady,
  1869. Status: v1.ConditionTrue,
  1870. LastHeartbeatTime: fakeNow,
  1871. LastTransitionTime: fakeNow,
  1872. }
  1873. readyConditionAtDiffHearbeatTime := v1.NodeCondition{
  1874. Type: v1.NodeReady,
  1875. Status: v1.ConditionTrue,
  1876. LastHeartbeatTime: fakeFuture,
  1877. LastTransitionTime: fakeNow,
  1878. }
  1879. readyConditionAtDiffTransitionTime := v1.NodeCondition{
  1880. Type: v1.NodeReady,
  1881. Status: v1.ConditionTrue,
  1882. LastHeartbeatTime: fakeFuture,
  1883. LastTransitionTime: fakeFuture,
  1884. }
  1885. notReadyCondition := v1.NodeCondition{
  1886. Type: v1.NodeReady,
  1887. Status: v1.ConditionFalse,
  1888. LastHeartbeatTime: fakeNow,
  1889. LastTransitionTime: fakeNow,
  1890. }
  1891. memoryPressureCondition := v1.NodeCondition{
  1892. Type: v1.NodeMemoryPressure,
  1893. Status: v1.ConditionFalse,
  1894. LastHeartbeatTime: fakeNow,
  1895. LastTransitionTime: fakeNow,
  1896. }
  1897. testcases := []struct {
  1898. name string
  1899. originalStatus *v1.NodeStatus
  1900. status *v1.NodeStatus
  1901. expectChange bool
  1902. }{
  1903. {
  1904. name: "Node status does not change with nil status.",
  1905. originalStatus: nil,
  1906. status: nil,
  1907. expectChange: false,
  1908. },
  1909. {
  1910. name: "Node status does not change with default status.",
  1911. originalStatus: &v1.NodeStatus{},
  1912. status: &v1.NodeStatus{},
  1913. expectChange: false,
  1914. },
  1915. {
  1916. name: "Node status changes with nil and default status.",
  1917. originalStatus: nil,
  1918. status: &v1.NodeStatus{},
  1919. expectChange: true,
  1920. },
  1921. {
  1922. name: "Node status changes with nil and status.",
  1923. originalStatus: nil,
  1924. status: &v1.NodeStatus{
  1925. Conditions: []v1.NodeCondition{readyCondition, memoryPressureCondition},
  1926. },
  1927. expectChange: true,
  1928. },
  1929. {
  1930. name: "Node status does not change with empty conditions.",
  1931. originalStatus: &v1.NodeStatus{Conditions: []v1.NodeCondition{}},
  1932. status: &v1.NodeStatus{Conditions: []v1.NodeCondition{}},
  1933. expectChange: false,
  1934. },
  1935. {
  1936. name: "Node status does not change",
  1937. originalStatus: &v1.NodeStatus{
  1938. Conditions: []v1.NodeCondition{readyCondition, memoryPressureCondition},
  1939. },
  1940. status: &v1.NodeStatus{
  1941. Conditions: []v1.NodeCondition{readyCondition, memoryPressureCondition},
  1942. },
  1943. expectChange: false,
  1944. },
  1945. {
  1946. name: "Node status does not change even if heartbeat time changes.",
  1947. originalStatus: &v1.NodeStatus{
  1948. Conditions: []v1.NodeCondition{readyCondition, memoryPressureCondition},
  1949. },
  1950. status: &v1.NodeStatus{
  1951. Conditions: []v1.NodeCondition{readyConditionAtDiffHearbeatTime, memoryPressureCondition},
  1952. },
  1953. expectChange: false,
  1954. },
  1955. {
  1956. name: "Node status does not change even if the orders of conditions are different.",
  1957. originalStatus: &v1.NodeStatus{
  1958. Conditions: []v1.NodeCondition{readyCondition, memoryPressureCondition},
  1959. },
  1960. status: &v1.NodeStatus{
  1961. Conditions: []v1.NodeCondition{memoryPressureCondition, readyConditionAtDiffHearbeatTime},
  1962. },
  1963. expectChange: false,
  1964. },
  1965. {
  1966. name: "Node status changes if condition status differs.",
  1967. originalStatus: &v1.NodeStatus{
  1968. Conditions: []v1.NodeCondition{readyCondition, memoryPressureCondition},
  1969. },
  1970. status: &v1.NodeStatus{
  1971. Conditions: []v1.NodeCondition{notReadyCondition, memoryPressureCondition},
  1972. },
  1973. expectChange: true,
  1974. },
  1975. {
  1976. name: "Node status changes if transition time changes.",
  1977. originalStatus: &v1.NodeStatus{
  1978. Conditions: []v1.NodeCondition{readyCondition, memoryPressureCondition},
  1979. },
  1980. status: &v1.NodeStatus{
  1981. Conditions: []v1.NodeCondition{readyConditionAtDiffTransitionTime, memoryPressureCondition},
  1982. },
  1983. expectChange: true,
  1984. },
  1985. {
  1986. name: "Node status changes with different number of conditions.",
  1987. originalStatus: &v1.NodeStatus{
  1988. Conditions: []v1.NodeCondition{readyCondition},
  1989. },
  1990. status: &v1.NodeStatus{
  1991. Conditions: []v1.NodeCondition{readyCondition, memoryPressureCondition},
  1992. },
  1993. expectChange: true,
  1994. },
  1995. {
  1996. name: "Node status changes with different phase.",
  1997. originalStatus: &v1.NodeStatus{
  1998. Phase: v1.NodePending,
  1999. Conditions: []v1.NodeCondition{readyCondition},
  2000. },
  2001. status: &v1.NodeStatus{
  2002. Phase: v1.NodeRunning,
  2003. Conditions: []v1.NodeCondition{readyCondition},
  2004. },
  2005. expectChange: true,
  2006. },
  2007. }
  2008. for _, tc := range testcases {
  2009. t.Run(tc.name, func(t *testing.T) {
  2010. originalStatusCopy := tc.originalStatus.DeepCopy()
  2011. statusCopy := tc.status.DeepCopy()
  2012. changed := nodeStatusHasChanged(tc.originalStatus, tc.status)
  2013. assert.Equal(t, tc.expectChange, changed, "Expect node status change to be %t, but got %t.", tc.expectChange, changed)
  2014. assert.True(t, apiequality.Semantic.DeepEqual(originalStatusCopy, tc.originalStatus), "%s", diff.ObjectDiff(originalStatusCopy, tc.originalStatus))
  2015. assert.True(t, apiequality.Semantic.DeepEqual(statusCopy, tc.status), "%s", diff.ObjectDiff(statusCopy, tc.status))
  2016. })
  2017. }
  2018. }