kubelet_node_status_test.go 84 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320132113221323132413251326132713281329133013311332133313341335133613371338133913401341134213431344134513461347134813491350135113521353135413551356135713581359136013611362136313641365136613671368136913701371137213731374137513761377137813791380138113821383138413851386138713881389139013911392139313941395139613971398139914001401140214031404140514061407140814091410141114121413141414151416141714181419142014211422142314241425142614271428142914301431143214331434143514361437143814391440144114421443144414451446144714481449145014511452145314541455145614571458145914601461146214631464146514661467146814691470147114721473147414751476147714781479148014811482148314841485148614871488148914901491149214931494149514961497149814991500150115021503150415051506150715081509151015111512151315141515151615171518151915201521152215231524152515261527152815291530153115321533153415351536153715381539154015411542154315441545154615471548154915501551155215531554155515561557155815591560156115621563156415651566156715681569157015711572157315741575157615771578157915801581158215831584158515861587158815891590159115921593159415951596159715981599160016011602160316041605160616071608160916101611161216131614161516161617161816191620162116221623162416251626162716281629163016311632163316341635163616371638163916401641164216431644164516461647164816491650165116521653165416551656165716581659166016611662166316641665166616671668166916701671167216731674167516761677167816791680168116821683168416851686168716881689169016911692169316941695169616971698169917001701170217031704170517061707170817091710171117121713171417151716171717181719172017211722172317241725172617271728172917301731173217331734173517361737173817391740174117421743174417451746174717481749175017511752175317541755175617571758175917601761176217631764176517661767176817691770177117721773177417751776177717781779178017811782178317841785178617871788178917901791179217931794179517961797179817991800180118021803180418051806180718081809181018111812181318141815181618171818181918201821182218231824182518261827182818291830183118321833183418351836183718381839184018411842184318441845184618471848184918501851185218531854185518561857185818591860186118621863186418651866186718681869187018711872187318741875187618771878187918801881188218831884188518861887188818891890189118921893189418951896189718981899190019011902190319041905190619071908190919101911191219131914191519161917191819191920192119221923192419251926192719281929193019311932193319341935193619371938193919401941194219431944194519461947194819491950195119521953195419551956195719581959196019611962196319641965196619671968196919701971197219731974197519761977197819791980198119821983198419851986198719881989199019911992199319941995199619971998199920002001200220032004200520062007200820092010201120122013201420152016201720182019202020212022202320242025202620272028202920302031203220332034203520362037203820392040204120422043204420452046204720482049205020512052205320542055205620572058205920602061206220632064206520662067206820692070207120722073207420752076207720782079208020812082208320842085208620872088208920902091209220932094209520962097209820992100210121022103210421052106210721082109211021112112211321142115211621172118211921202121212221232124212521262127212821292130213121322133213421352136213721382139214021412142214321442145214621472148214921502151215221532154215521562157215821592160216121622163216421652166216721682169217021712172217321742175217621772178217921802181218221832184218521862187218821892190219121922193219421952196219721982199220022012202220322042205220622072208220922102211221222132214221522162217221822192220222122222223222422252226222722282229223022312232223322342235223622372238223922402241224222432244224522462247224822492250225122522253225422552256225722582259226022612262226322642265226622672268226922702271227222732274227522762277227822792280228122822283228422852286228722882289
  1. /*
  2. Copyright 2016 The Kubernetes Authors.
  3. Licensed under the Apache License, Version 2.0 (the "License");
  4. you may not use this file except in compliance with the License.
  5. You may obtain a copy of the License at
  6. http://www.apache.org/licenses/LICENSE-2.0
  7. Unless required by applicable law or agreed to in writing, software
  8. distributed under the License is distributed on an "AS IS" BASIS,
  9. WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
  10. See the License for the specific language governing permissions and
  11. limitations under the License.
  12. */
  13. package kubelet
  14. import (
  15. "context"
  16. "encoding/json"
  17. "fmt"
  18. "net"
  19. goruntime "runtime"
  20. "sort"
  21. "strconv"
  22. "strings"
  23. "sync/atomic"
  24. "testing"
  25. "time"
  26. "github.com/stretchr/testify/assert"
  27. "github.com/stretchr/testify/require"
  28. cadvisorapi "github.com/google/cadvisor/info/v1"
  29. v1 "k8s.io/api/core/v1"
  30. apiequality "k8s.io/apimachinery/pkg/api/equality"
  31. apierrors "k8s.io/apimachinery/pkg/api/errors"
  32. "k8s.io/apimachinery/pkg/api/resource"
  33. metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
  34. "k8s.io/apimachinery/pkg/runtime"
  35. "k8s.io/apimachinery/pkg/util/diff"
  36. "k8s.io/apimachinery/pkg/util/rand"
  37. "k8s.io/apimachinery/pkg/util/strategicpatch"
  38. "k8s.io/apimachinery/pkg/util/uuid"
  39. "k8s.io/apimachinery/pkg/util/wait"
  40. clientset "k8s.io/client-go/kubernetes"
  41. "k8s.io/client-go/kubernetes/fake"
  42. "k8s.io/client-go/rest"
  43. core "k8s.io/client-go/testing"
  44. "k8s.io/component-base/version"
  45. kubeletapis "k8s.io/kubernetes/pkg/kubelet/apis"
  46. cadvisortest "k8s.io/kubernetes/pkg/kubelet/cadvisor/testing"
  47. "k8s.io/kubernetes/pkg/kubelet/cm"
  48. kubecontainer "k8s.io/kubernetes/pkg/kubelet/container"
  49. "k8s.io/kubernetes/pkg/kubelet/nodestatus"
  50. "k8s.io/kubernetes/pkg/kubelet/util/sliceutils"
  51. kubeletvolume "k8s.io/kubernetes/pkg/kubelet/volumemanager"
  52. taintutil "k8s.io/kubernetes/pkg/util/taints"
  53. "k8s.io/kubernetes/pkg/volume/util"
  54. )
  55. const (
  56. maxImageTagsForTest = 20
  57. )
  58. // generateTestingImageLists generate randomly generated image list and corresponding expectedImageList.
  59. func generateTestingImageLists(count int, maxImages int) ([]kubecontainer.Image, []v1.ContainerImage) {
  60. // imageList is randomly generated image list
  61. var imageList []kubecontainer.Image
  62. for ; count > 0; count-- {
  63. imageItem := kubecontainer.Image{
  64. ID: string(uuid.NewUUID()),
  65. RepoTags: generateImageTags(),
  66. Size: rand.Int63nRange(minImgSize, maxImgSize+1),
  67. }
  68. imageList = append(imageList, imageItem)
  69. }
  70. expectedImageList := makeExpectedImageList(imageList, maxImages)
  71. return imageList, expectedImageList
  72. }
  73. func makeExpectedImageList(imageList []kubecontainer.Image, maxImages int) []v1.ContainerImage {
  74. // expectedImageList is generated by imageList according to size and maxImages
  75. // 1. sort the imageList by size
  76. sort.Sort(sliceutils.ByImageSize(imageList))
  77. // 2. convert sorted imageList to v1.ContainerImage list
  78. var expectedImageList []v1.ContainerImage
  79. for _, kubeImage := range imageList {
  80. apiImage := v1.ContainerImage{
  81. Names: kubeImage.RepoTags[0:nodestatus.MaxNamesPerImageInNodeStatus],
  82. SizeBytes: kubeImage.Size,
  83. }
  84. expectedImageList = append(expectedImageList, apiImage)
  85. }
  86. // 3. only returns the top maxImages images in expectedImageList
  87. if maxImages == -1 { // -1 means no limit
  88. return expectedImageList
  89. }
  90. return expectedImageList[0:maxImages]
  91. }
  92. func generateImageTags() []string {
  93. var tagList []string
  94. // Generate > MaxNamesPerImageInNodeStatus tags so that the test can verify
  95. // that kubelet report up to MaxNamesPerImageInNodeStatus tags.
  96. count := rand.IntnRange(nodestatus.MaxNamesPerImageInNodeStatus+1, maxImageTagsForTest+1)
  97. for ; count > 0; count-- {
  98. tagList = append(tagList, "k8s.gcr.io:v"+strconv.Itoa(count))
  99. }
  100. return tagList
  101. }
  102. func applyNodeStatusPatch(originalNode *v1.Node, patch []byte) (*v1.Node, error) {
  103. original, err := json.Marshal(originalNode)
  104. if err != nil {
  105. return nil, fmt.Errorf("failed to marshal original node %#v: %v", originalNode, err)
  106. }
  107. updated, err := strategicpatch.StrategicMergePatch(original, patch, v1.Node{})
  108. if err != nil {
  109. return nil, fmt.Errorf("failed to apply strategic merge patch %q on node %#v: %v",
  110. patch, originalNode, err)
  111. }
  112. updatedNode := &v1.Node{}
  113. if err := json.Unmarshal(updated, updatedNode); err != nil {
  114. return nil, fmt.Errorf("failed to unmarshal updated node %q: %v", updated, err)
  115. }
  116. return updatedNode, nil
  117. }
  118. func notImplemented(action core.Action) (bool, runtime.Object, error) {
  119. return true, nil, fmt.Errorf("no reaction implemented for %s", action)
  120. }
  121. func addNotImplatedReaction(kubeClient *fake.Clientset) {
  122. if kubeClient == nil {
  123. return
  124. }
  125. kubeClient.AddReactor("*", "*", notImplemented)
  126. }
  127. type localCM struct {
  128. cm.ContainerManager
  129. allocatableReservation v1.ResourceList
  130. capacity v1.ResourceList
  131. }
  132. func (lcm *localCM) GetNodeAllocatableReservation() v1.ResourceList {
  133. return lcm.allocatableReservation
  134. }
  135. func (lcm *localCM) GetCapacity() v1.ResourceList {
  136. return lcm.capacity
  137. }
  138. func TestUpdateNewNodeStatus(t *testing.T) {
  139. cases := []struct {
  140. desc string
  141. nodeStatusMaxImages int32
  142. }{
  143. {
  144. desc: "5 image limit",
  145. nodeStatusMaxImages: 5,
  146. },
  147. {
  148. desc: "no image limit",
  149. nodeStatusMaxImages: -1,
  150. },
  151. }
  152. for _, tc := range cases {
  153. t.Run(tc.desc, func(t *testing.T) {
  154. // generate one more in inputImageList than we configure the Kubelet to report,
  155. // or 5 images if unlimited
  156. numTestImages := int(tc.nodeStatusMaxImages) + 1
  157. if tc.nodeStatusMaxImages == -1 {
  158. numTestImages = 5
  159. }
  160. inputImageList, expectedImageList := generateTestingImageLists(numTestImages, int(tc.nodeStatusMaxImages))
  161. testKubelet := newTestKubeletWithImageList(
  162. t, inputImageList, false /* controllerAttachDetachEnabled */, true /*initFakeVolumePlugin*/)
  163. defer testKubelet.Cleanup()
  164. kubelet := testKubelet.kubelet
  165. kubelet.nodeStatusMaxImages = tc.nodeStatusMaxImages
  166. kubelet.kubeClient = nil // ensure only the heartbeat client is used
  167. kubelet.containerManager = &localCM{
  168. ContainerManager: cm.NewStubContainerManager(),
  169. allocatableReservation: v1.ResourceList{
  170. v1.ResourceCPU: *resource.NewMilliQuantity(200, resource.DecimalSI),
  171. v1.ResourceMemory: *resource.NewQuantity(100e6, resource.BinarySI),
  172. v1.ResourceEphemeralStorage: *resource.NewQuantity(2000, resource.BinarySI),
  173. },
  174. capacity: v1.ResourceList{
  175. v1.ResourceCPU: *resource.NewMilliQuantity(2000, resource.DecimalSI),
  176. v1.ResourceMemory: *resource.NewQuantity(10e9, resource.BinarySI),
  177. v1.ResourceEphemeralStorage: *resource.NewQuantity(5000, resource.BinarySI),
  178. },
  179. }
  180. // Since this test retroactively overrides the stub container manager,
  181. // we have to regenerate default status setters.
  182. kubelet.setNodeStatusFuncs = kubelet.defaultNodeStatusFuncs()
  183. kubeClient := testKubelet.fakeKubeClient
  184. existingNode := v1.Node{ObjectMeta: metav1.ObjectMeta{Name: testKubeletHostname}}
  185. kubeClient.ReactionChain = fake.NewSimpleClientset(&v1.NodeList{Items: []v1.Node{existingNode}}).ReactionChain
  186. machineInfo := &cadvisorapi.MachineInfo{
  187. MachineID: "123",
  188. SystemUUID: "abc",
  189. BootID: "1b3",
  190. NumCores: 2,
  191. MemoryCapacity: 10e9, // 10G
  192. }
  193. kubelet.machineInfo = machineInfo
  194. expectedNode := &v1.Node{
  195. ObjectMeta: metav1.ObjectMeta{Name: testKubeletHostname},
  196. Spec: v1.NodeSpec{},
  197. Status: v1.NodeStatus{
  198. Conditions: []v1.NodeCondition{
  199. {
  200. Type: v1.NodeMemoryPressure,
  201. Status: v1.ConditionFalse,
  202. Reason: "KubeletHasSufficientMemory",
  203. Message: fmt.Sprintf("kubelet has sufficient memory available"),
  204. LastHeartbeatTime: metav1.Time{},
  205. LastTransitionTime: metav1.Time{},
  206. },
  207. {
  208. Type: v1.NodeDiskPressure,
  209. Status: v1.ConditionFalse,
  210. Reason: "KubeletHasNoDiskPressure",
  211. Message: fmt.Sprintf("kubelet has no disk pressure"),
  212. LastHeartbeatTime: metav1.Time{},
  213. LastTransitionTime: metav1.Time{},
  214. },
  215. {
  216. Type: v1.NodePIDPressure,
  217. Status: v1.ConditionFalse,
  218. Reason: "KubeletHasSufficientPID",
  219. Message: fmt.Sprintf("kubelet has sufficient PID available"),
  220. LastHeartbeatTime: metav1.Time{},
  221. LastTransitionTime: metav1.Time{},
  222. },
  223. {
  224. Type: v1.NodeReady,
  225. Status: v1.ConditionTrue,
  226. Reason: "KubeletReady",
  227. Message: fmt.Sprintf("kubelet is posting ready status"),
  228. LastHeartbeatTime: metav1.Time{},
  229. LastTransitionTime: metav1.Time{},
  230. },
  231. },
  232. NodeInfo: v1.NodeSystemInfo{
  233. MachineID: "123",
  234. SystemUUID: "abc",
  235. BootID: "1b3",
  236. KernelVersion: cadvisortest.FakeKernelVersion,
  237. OSImage: cadvisortest.FakeContainerOSVersion,
  238. OperatingSystem: goruntime.GOOS,
  239. Architecture: goruntime.GOARCH,
  240. ContainerRuntimeVersion: "test://1.5.0",
  241. KubeletVersion: version.Get().String(),
  242. KubeProxyVersion: version.Get().String(),
  243. },
  244. Capacity: v1.ResourceList{
  245. v1.ResourceCPU: *resource.NewMilliQuantity(2000, resource.DecimalSI),
  246. v1.ResourceMemory: *resource.NewQuantity(10e9, resource.BinarySI),
  247. v1.ResourcePods: *resource.NewQuantity(0, resource.DecimalSI),
  248. v1.ResourceEphemeralStorage: *resource.NewQuantity(5000, resource.BinarySI),
  249. },
  250. Allocatable: v1.ResourceList{
  251. v1.ResourceCPU: *resource.NewMilliQuantity(1800, resource.DecimalSI),
  252. v1.ResourceMemory: *resource.NewQuantity(9900e6, resource.BinarySI),
  253. v1.ResourcePods: *resource.NewQuantity(0, resource.DecimalSI),
  254. v1.ResourceEphemeralStorage: *resource.NewQuantity(3000, resource.BinarySI),
  255. },
  256. Addresses: []v1.NodeAddress{
  257. {Type: v1.NodeInternalIP, Address: "127.0.0.1"},
  258. {Type: v1.NodeHostName, Address: testKubeletHostname},
  259. },
  260. Images: expectedImageList,
  261. },
  262. }
  263. kubelet.updateRuntimeUp()
  264. assert.NoError(t, kubelet.updateNodeStatus())
  265. actions := kubeClient.Actions()
  266. require.Len(t, actions, 2)
  267. require.True(t, actions[1].Matches("patch", "nodes"))
  268. require.Equal(t, actions[1].GetSubresource(), "status")
  269. updatedNode, err := applyNodeStatusPatch(&existingNode, actions[1].(core.PatchActionImpl).GetPatch())
  270. assert.NoError(t, err)
  271. for i, cond := range updatedNode.Status.Conditions {
  272. assert.False(t, cond.LastHeartbeatTime.IsZero(), "LastHeartbeatTime for %v condition is zero", cond.Type)
  273. assert.False(t, cond.LastTransitionTime.IsZero(), "LastTransitionTime for %v condition is zero", cond.Type)
  274. updatedNode.Status.Conditions[i].LastHeartbeatTime = metav1.Time{}
  275. updatedNode.Status.Conditions[i].LastTransitionTime = metav1.Time{}
  276. }
  277. // Version skew workaround. See: https://github.com/kubernetes/kubernetes/issues/16961
  278. assert.Equal(t, v1.NodeReady, updatedNode.Status.Conditions[len(updatedNode.Status.Conditions)-1].Type,
  279. "NotReady should be last")
  280. assert.Len(t, updatedNode.Status.Images, len(expectedImageList))
  281. assert.True(t, apiequality.Semantic.DeepEqual(expectedNode, updatedNode), "%s", diff.ObjectDiff(expectedNode, updatedNode))
  282. })
  283. }
  284. }
  285. func TestUpdateExistingNodeStatus(t *testing.T) {
  286. testKubelet := newTestKubelet(t, false /* controllerAttachDetachEnabled */)
  287. defer testKubelet.Cleanup()
  288. kubelet := testKubelet.kubelet
  289. kubelet.nodeStatusMaxImages = 5 // don't truncate the image list that gets constructed by hand for this test
  290. kubelet.kubeClient = nil // ensure only the heartbeat client is used
  291. kubelet.containerManager = &localCM{
  292. ContainerManager: cm.NewStubContainerManager(),
  293. allocatableReservation: v1.ResourceList{
  294. v1.ResourceCPU: *resource.NewMilliQuantity(200, resource.DecimalSI),
  295. v1.ResourceMemory: *resource.NewQuantity(100e6, resource.BinarySI),
  296. },
  297. capacity: v1.ResourceList{
  298. v1.ResourceCPU: *resource.NewMilliQuantity(2000, resource.DecimalSI),
  299. v1.ResourceMemory: *resource.NewQuantity(20e9, resource.BinarySI),
  300. v1.ResourceEphemeralStorage: *resource.NewQuantity(5000, resource.BinarySI),
  301. },
  302. }
  303. // Since this test retroactively overrides the stub container manager,
  304. // we have to regenerate default status setters.
  305. kubelet.setNodeStatusFuncs = kubelet.defaultNodeStatusFuncs()
  306. kubeClient := testKubelet.fakeKubeClient
  307. existingNode := v1.Node{
  308. ObjectMeta: metav1.ObjectMeta{Name: testKubeletHostname},
  309. Spec: v1.NodeSpec{},
  310. Status: v1.NodeStatus{
  311. Conditions: []v1.NodeCondition{
  312. {
  313. Type: v1.NodeMemoryPressure,
  314. Status: v1.ConditionFalse,
  315. Reason: "KubeletHasSufficientMemory",
  316. Message: fmt.Sprintf("kubelet has sufficient memory available"),
  317. LastHeartbeatTime: metav1.Date(2012, 1, 1, 0, 0, 0, 0, time.UTC),
  318. LastTransitionTime: metav1.Date(2012, 1, 1, 0, 0, 0, 0, time.UTC),
  319. },
  320. {
  321. Type: v1.NodeDiskPressure,
  322. Status: v1.ConditionFalse,
  323. Reason: "KubeletHasSufficientDisk",
  324. Message: fmt.Sprintf("kubelet has sufficient disk space available"),
  325. LastHeartbeatTime: metav1.Date(2012, 1, 1, 0, 0, 0, 0, time.UTC),
  326. LastTransitionTime: metav1.Date(2012, 1, 1, 0, 0, 0, 0, time.UTC),
  327. },
  328. {
  329. Type: v1.NodePIDPressure,
  330. Status: v1.ConditionFalse,
  331. Reason: "KubeletHasSufficientPID",
  332. Message: fmt.Sprintf("kubelet has sufficient PID available"),
  333. LastHeartbeatTime: metav1.Date(2012, 1, 1, 0, 0, 0, 0, time.UTC),
  334. LastTransitionTime: metav1.Date(2012, 1, 1, 0, 0, 0, 0, time.UTC),
  335. },
  336. {
  337. Type: v1.NodeReady,
  338. Status: v1.ConditionTrue,
  339. Reason: "KubeletReady",
  340. Message: fmt.Sprintf("kubelet is posting ready status"),
  341. LastHeartbeatTime: metav1.Date(2012, 1, 1, 0, 0, 0, 0, time.UTC),
  342. LastTransitionTime: metav1.Date(2012, 1, 1, 0, 0, 0, 0, time.UTC),
  343. },
  344. },
  345. Capacity: v1.ResourceList{
  346. v1.ResourceCPU: *resource.NewMilliQuantity(3000, resource.DecimalSI),
  347. v1.ResourceMemory: *resource.NewQuantity(20e9, resource.BinarySI),
  348. v1.ResourcePods: *resource.NewQuantity(0, resource.DecimalSI),
  349. },
  350. Allocatable: v1.ResourceList{
  351. v1.ResourceCPU: *resource.NewMilliQuantity(2800, resource.DecimalSI),
  352. v1.ResourceMemory: *resource.NewQuantity(19900e6, resource.BinarySI),
  353. v1.ResourcePods: *resource.NewQuantity(0, resource.DecimalSI),
  354. },
  355. },
  356. }
  357. kubeClient.ReactionChain = fake.NewSimpleClientset(&v1.NodeList{Items: []v1.Node{existingNode}}).ReactionChain
  358. machineInfo := &cadvisorapi.MachineInfo{
  359. MachineID: "123",
  360. SystemUUID: "abc",
  361. BootID: "1b3",
  362. NumCores: 2,
  363. MemoryCapacity: 20e9,
  364. }
  365. kubelet.machineInfo = machineInfo
  366. expectedNode := &v1.Node{
  367. ObjectMeta: metav1.ObjectMeta{Name: testKubeletHostname},
  368. Spec: v1.NodeSpec{},
  369. Status: v1.NodeStatus{
  370. Conditions: []v1.NodeCondition{
  371. {
  372. Type: v1.NodeMemoryPressure,
  373. Status: v1.ConditionFalse,
  374. Reason: "KubeletHasSufficientMemory",
  375. Message: fmt.Sprintf("kubelet has sufficient memory available"),
  376. LastHeartbeatTime: metav1.Time{},
  377. LastTransitionTime: metav1.Time{},
  378. },
  379. {
  380. Type: v1.NodeDiskPressure,
  381. Status: v1.ConditionFalse,
  382. Reason: "KubeletHasSufficientDisk",
  383. Message: fmt.Sprintf("kubelet has sufficient disk space available"),
  384. LastHeartbeatTime: metav1.Time{},
  385. LastTransitionTime: metav1.Time{},
  386. },
  387. {
  388. Type: v1.NodePIDPressure,
  389. Status: v1.ConditionFalse,
  390. Reason: "KubeletHasSufficientPID",
  391. Message: fmt.Sprintf("kubelet has sufficient PID available"),
  392. LastHeartbeatTime: metav1.Time{},
  393. LastTransitionTime: metav1.Time{},
  394. },
  395. {
  396. Type: v1.NodeReady,
  397. Status: v1.ConditionTrue,
  398. Reason: "KubeletReady",
  399. Message: fmt.Sprintf("kubelet is posting ready status"),
  400. LastHeartbeatTime: metav1.Time{}, // placeholder
  401. LastTransitionTime: metav1.Time{}, // placeholder
  402. },
  403. },
  404. NodeInfo: v1.NodeSystemInfo{
  405. MachineID: "123",
  406. SystemUUID: "abc",
  407. BootID: "1b3",
  408. KernelVersion: cadvisortest.FakeKernelVersion,
  409. OSImage: cadvisortest.FakeContainerOSVersion,
  410. OperatingSystem: goruntime.GOOS,
  411. Architecture: goruntime.GOARCH,
  412. ContainerRuntimeVersion: "test://1.5.0",
  413. KubeletVersion: version.Get().String(),
  414. KubeProxyVersion: version.Get().String(),
  415. },
  416. Capacity: v1.ResourceList{
  417. v1.ResourceCPU: *resource.NewMilliQuantity(2000, resource.DecimalSI),
  418. v1.ResourceMemory: *resource.NewQuantity(20e9, resource.BinarySI),
  419. v1.ResourcePods: *resource.NewQuantity(0, resource.DecimalSI),
  420. v1.ResourceEphemeralStorage: *resource.NewQuantity(5000, resource.BinarySI),
  421. },
  422. Allocatable: v1.ResourceList{
  423. v1.ResourceCPU: *resource.NewMilliQuantity(1800, resource.DecimalSI),
  424. v1.ResourceMemory: *resource.NewQuantity(19900e6, resource.BinarySI),
  425. v1.ResourcePods: *resource.NewQuantity(0, resource.DecimalSI),
  426. v1.ResourceEphemeralStorage: *resource.NewQuantity(5000, resource.BinarySI),
  427. },
  428. Addresses: []v1.NodeAddress{
  429. {Type: v1.NodeInternalIP, Address: "127.0.0.1"},
  430. {Type: v1.NodeHostName, Address: testKubeletHostname},
  431. },
  432. // images will be sorted from max to min in node status.
  433. Images: []v1.ContainerImage{
  434. {
  435. Names: []string{"k8s.gcr.io:v1", "k8s.gcr.io:v2"},
  436. SizeBytes: 123,
  437. },
  438. {
  439. Names: []string{"k8s.gcr.io:v3", "k8s.gcr.io:v4"},
  440. SizeBytes: 456,
  441. },
  442. },
  443. },
  444. }
  445. kubelet.updateRuntimeUp()
  446. assert.NoError(t, kubelet.updateNodeStatus())
  447. actions := kubeClient.Actions()
  448. assert.Len(t, actions, 2)
  449. assert.IsType(t, core.PatchActionImpl{}, actions[1])
  450. patchAction := actions[1].(core.PatchActionImpl)
  451. updatedNode, err := applyNodeStatusPatch(&existingNode, patchAction.GetPatch())
  452. require.NoError(t, err)
  453. for i, cond := range updatedNode.Status.Conditions {
  454. old := metav1.Date(2012, 1, 1, 0, 0, 0, 0, time.UTC).Time
  455. // Expect LastHearbeat to be updated to Now, while LastTransitionTime to be the same.
  456. assert.NotEqual(t, old, cond.LastHeartbeatTime.Rfc3339Copy().UTC(), "LastHeartbeatTime for condition %v", cond.Type)
  457. assert.EqualValues(t, old, cond.LastTransitionTime.Rfc3339Copy().UTC(), "LastTransitionTime for condition %v", cond.Type)
  458. updatedNode.Status.Conditions[i].LastHeartbeatTime = metav1.Time{}
  459. updatedNode.Status.Conditions[i].LastTransitionTime = metav1.Time{}
  460. }
  461. // Version skew workaround. See: https://github.com/kubernetes/kubernetes/issues/16961
  462. assert.Equal(t, v1.NodeReady, updatedNode.Status.Conditions[len(updatedNode.Status.Conditions)-1].Type,
  463. "NodeReady should be the last condition")
  464. assert.True(t, apiequality.Semantic.DeepEqual(expectedNode, updatedNode), "%s", diff.ObjectDiff(expectedNode, updatedNode))
  465. }
  466. func TestUpdateExistingNodeStatusTimeout(t *testing.T) {
  467. attempts := int64(0)
  468. failureCallbacks := int64(0)
  469. // set up a listener that hangs connections
  470. ln, err := net.Listen("tcp", "127.0.0.1:0")
  471. assert.NoError(t, err)
  472. defer ln.Close()
  473. go func() {
  474. // accept connections and just let them hang
  475. for {
  476. _, err := ln.Accept()
  477. if err != nil {
  478. t.Log(err)
  479. return
  480. }
  481. t.Log("accepted connection")
  482. atomic.AddInt64(&attempts, 1)
  483. }
  484. }()
  485. config := &rest.Config{
  486. Host: "http://" + ln.Addr().String(),
  487. QPS: -1,
  488. Timeout: time.Second,
  489. }
  490. assert.NoError(t, err)
  491. testKubelet := newTestKubelet(t, false /* controllerAttachDetachEnabled */)
  492. defer testKubelet.Cleanup()
  493. kubelet := testKubelet.kubelet
  494. kubelet.kubeClient = nil // ensure only the heartbeat client is used
  495. kubelet.heartbeatClient, err = clientset.NewForConfig(config)
  496. require.NoError(t, err)
  497. kubelet.onRepeatedHeartbeatFailure = func() {
  498. atomic.AddInt64(&failureCallbacks, 1)
  499. }
  500. kubelet.containerManager = &localCM{
  501. ContainerManager: cm.NewStubContainerManager(),
  502. allocatableReservation: v1.ResourceList{
  503. v1.ResourceCPU: *resource.NewMilliQuantity(200, resource.DecimalSI),
  504. v1.ResourceMemory: *resource.NewQuantity(100e6, resource.BinarySI),
  505. },
  506. capacity: v1.ResourceList{
  507. v1.ResourceCPU: *resource.NewMilliQuantity(2000, resource.DecimalSI),
  508. v1.ResourceMemory: *resource.NewQuantity(20e9, resource.BinarySI),
  509. },
  510. }
  511. // should return an error, but not hang
  512. assert.Error(t, kubelet.updateNodeStatus())
  513. // should have attempted multiple times
  514. if actualAttempts := atomic.LoadInt64(&attempts); actualAttempts < nodeStatusUpdateRetry {
  515. t.Errorf("Expected at least %d attempts, got %d", nodeStatusUpdateRetry, actualAttempts)
  516. }
  517. // should have gotten multiple failure callbacks
  518. if actualFailureCallbacks := atomic.LoadInt64(&failureCallbacks); actualFailureCallbacks < (nodeStatusUpdateRetry - 1) {
  519. t.Errorf("Expected %d failure callbacks, got %d", (nodeStatusUpdateRetry - 1), actualFailureCallbacks)
  520. }
  521. }
  522. func TestUpdateNodeStatusWithRuntimeStateError(t *testing.T) {
  523. testKubelet := newTestKubelet(t, false /* controllerAttachDetachEnabled */)
  524. defer testKubelet.Cleanup()
  525. kubelet := testKubelet.kubelet
  526. kubelet.nodeStatusMaxImages = 5 // don't truncate the image list that gets constructed by hand for this test
  527. kubelet.kubeClient = nil // ensure only the heartbeat client is used
  528. kubelet.containerManager = &localCM{
  529. ContainerManager: cm.NewStubContainerManager(),
  530. allocatableReservation: v1.ResourceList{
  531. v1.ResourceCPU: *resource.NewMilliQuantity(200, resource.DecimalSI),
  532. v1.ResourceMemory: *resource.NewQuantity(100e6, resource.BinarySI),
  533. v1.ResourceEphemeralStorage: *resource.NewQuantity(10e9, resource.BinarySI),
  534. },
  535. capacity: v1.ResourceList{
  536. v1.ResourceCPU: *resource.NewMilliQuantity(2000, resource.DecimalSI),
  537. v1.ResourceMemory: *resource.NewQuantity(10e9, resource.BinarySI),
  538. v1.ResourceEphemeralStorage: *resource.NewQuantity(20e9, resource.BinarySI),
  539. },
  540. }
  541. // Since this test retroactively overrides the stub container manager,
  542. // we have to regenerate default status setters.
  543. kubelet.setNodeStatusFuncs = kubelet.defaultNodeStatusFuncs()
  544. clock := testKubelet.fakeClock
  545. kubeClient := testKubelet.fakeKubeClient
  546. existingNode := v1.Node{ObjectMeta: metav1.ObjectMeta{Name: testKubeletHostname}}
  547. kubeClient.ReactionChain = fake.NewSimpleClientset(&v1.NodeList{Items: []v1.Node{existingNode}}).ReactionChain
  548. machineInfo := &cadvisorapi.MachineInfo{
  549. MachineID: "123",
  550. SystemUUID: "abc",
  551. BootID: "1b3",
  552. NumCores: 2,
  553. MemoryCapacity: 10e9,
  554. }
  555. kubelet.machineInfo = machineInfo
  556. expectedNode := &v1.Node{
  557. ObjectMeta: metav1.ObjectMeta{Name: testKubeletHostname},
  558. Spec: v1.NodeSpec{},
  559. Status: v1.NodeStatus{
  560. Conditions: []v1.NodeCondition{
  561. {
  562. Type: v1.NodeMemoryPressure,
  563. Status: v1.ConditionFalse,
  564. Reason: "KubeletHasSufficientMemory",
  565. Message: fmt.Sprintf("kubelet has sufficient memory available"),
  566. LastHeartbeatTime: metav1.Time{},
  567. LastTransitionTime: metav1.Time{},
  568. },
  569. {
  570. Type: v1.NodeDiskPressure,
  571. Status: v1.ConditionFalse,
  572. Reason: "KubeletHasNoDiskPressure",
  573. Message: fmt.Sprintf("kubelet has no disk pressure"),
  574. LastHeartbeatTime: metav1.Time{},
  575. LastTransitionTime: metav1.Time{},
  576. },
  577. {
  578. Type: v1.NodePIDPressure,
  579. Status: v1.ConditionFalse,
  580. Reason: "KubeletHasSufficientPID",
  581. Message: fmt.Sprintf("kubelet has sufficient PID available"),
  582. LastHeartbeatTime: metav1.Time{},
  583. LastTransitionTime: metav1.Time{},
  584. },
  585. {}, //placeholder
  586. },
  587. NodeInfo: v1.NodeSystemInfo{
  588. MachineID: "123",
  589. SystemUUID: "abc",
  590. BootID: "1b3",
  591. KernelVersion: cadvisortest.FakeKernelVersion,
  592. OSImage: cadvisortest.FakeContainerOSVersion,
  593. OperatingSystem: goruntime.GOOS,
  594. Architecture: goruntime.GOARCH,
  595. ContainerRuntimeVersion: "test://1.5.0",
  596. KubeletVersion: version.Get().String(),
  597. KubeProxyVersion: version.Get().String(),
  598. },
  599. Capacity: v1.ResourceList{
  600. v1.ResourceCPU: *resource.NewMilliQuantity(2000, resource.DecimalSI),
  601. v1.ResourceMemory: *resource.NewQuantity(10e9, resource.BinarySI),
  602. v1.ResourcePods: *resource.NewQuantity(0, resource.DecimalSI),
  603. v1.ResourceEphemeralStorage: *resource.NewQuantity(20e9, resource.BinarySI),
  604. },
  605. Allocatable: v1.ResourceList{
  606. v1.ResourceCPU: *resource.NewMilliQuantity(1800, resource.DecimalSI),
  607. v1.ResourceMemory: *resource.NewQuantity(9900e6, resource.BinarySI),
  608. v1.ResourcePods: *resource.NewQuantity(0, resource.DecimalSI),
  609. v1.ResourceEphemeralStorage: *resource.NewQuantity(10e9, resource.BinarySI),
  610. },
  611. Addresses: []v1.NodeAddress{
  612. {Type: v1.NodeInternalIP, Address: "127.0.0.1"},
  613. {Type: v1.NodeHostName, Address: testKubeletHostname},
  614. },
  615. Images: []v1.ContainerImage{
  616. {
  617. Names: []string{"k8s.gcr.io:v1", "k8s.gcr.io:v2"},
  618. SizeBytes: 123,
  619. },
  620. {
  621. Names: []string{"k8s.gcr.io:v3", "k8s.gcr.io:v4"},
  622. SizeBytes: 456,
  623. },
  624. },
  625. },
  626. }
  627. checkNodeStatus := func(status v1.ConditionStatus, reason string) {
  628. kubeClient.ClearActions()
  629. assert.NoError(t, kubelet.updateNodeStatus())
  630. actions := kubeClient.Actions()
  631. require.Len(t, actions, 2)
  632. require.True(t, actions[1].Matches("patch", "nodes"))
  633. require.Equal(t, actions[1].GetSubresource(), "status")
  634. updatedNode, err := kubeClient.CoreV1().Nodes().Get(context.TODO(), testKubeletHostname, metav1.GetOptions{})
  635. require.NoError(t, err, "can't apply node status patch")
  636. for i, cond := range updatedNode.Status.Conditions {
  637. assert.False(t, cond.LastHeartbeatTime.IsZero(), "LastHeartbeatTime for %v condition is zero", cond.Type)
  638. assert.False(t, cond.LastTransitionTime.IsZero(), "LastTransitionTime for %v condition is zero", cond.Type)
  639. updatedNode.Status.Conditions[i].LastHeartbeatTime = metav1.Time{}
  640. updatedNode.Status.Conditions[i].LastTransitionTime = metav1.Time{}
  641. }
  642. // Version skew workaround. See: https://github.com/kubernetes/kubernetes/issues/16961
  643. lastIndex := len(updatedNode.Status.Conditions) - 1
  644. assert.Equal(t, v1.NodeReady, updatedNode.Status.Conditions[lastIndex].Type, "NodeReady should be the last condition")
  645. assert.NotEmpty(t, updatedNode.Status.Conditions[lastIndex].Message)
  646. updatedNode.Status.Conditions[lastIndex].Message = ""
  647. expectedNode.Status.Conditions[lastIndex] = v1.NodeCondition{
  648. Type: v1.NodeReady,
  649. Status: status,
  650. Reason: reason,
  651. LastHeartbeatTime: metav1.Time{},
  652. LastTransitionTime: metav1.Time{},
  653. }
  654. assert.True(t, apiequality.Semantic.DeepEqual(expectedNode, updatedNode), "%s", diff.ObjectDiff(expectedNode, updatedNode))
  655. }
  656. // TODO(random-liu): Refactor the unit test to be table driven test.
  657. // Should report kubelet not ready if the runtime check is out of date
  658. clock.SetTime(time.Now().Add(-maxWaitForContainerRuntime))
  659. kubelet.updateRuntimeUp()
  660. checkNodeStatus(v1.ConditionFalse, "KubeletNotReady")
  661. // Should report kubelet ready if the runtime check is updated
  662. clock.SetTime(time.Now())
  663. kubelet.updateRuntimeUp()
  664. checkNodeStatus(v1.ConditionTrue, "KubeletReady")
  665. // Should report kubelet not ready if the runtime check is out of date
  666. clock.SetTime(time.Now().Add(-maxWaitForContainerRuntime))
  667. kubelet.updateRuntimeUp()
  668. checkNodeStatus(v1.ConditionFalse, "KubeletNotReady")
  669. // Should report kubelet not ready if the runtime check failed
  670. fakeRuntime := testKubelet.fakeRuntime
  671. // Inject error into fake runtime status check, node should be NotReady
  672. fakeRuntime.StatusErr = fmt.Errorf("injected runtime status error")
  673. clock.SetTime(time.Now())
  674. kubelet.updateRuntimeUp()
  675. checkNodeStatus(v1.ConditionFalse, "KubeletNotReady")
  676. fakeRuntime.StatusErr = nil
  677. // Should report node not ready if runtime status is nil.
  678. fakeRuntime.RuntimeStatus = nil
  679. kubelet.updateRuntimeUp()
  680. checkNodeStatus(v1.ConditionFalse, "KubeletNotReady")
  681. // Should report node not ready if runtime status is empty.
  682. fakeRuntime.RuntimeStatus = &kubecontainer.RuntimeStatus{}
  683. kubelet.updateRuntimeUp()
  684. checkNodeStatus(v1.ConditionFalse, "KubeletNotReady")
  685. // Should report node not ready if RuntimeReady is false.
  686. fakeRuntime.RuntimeStatus = &kubecontainer.RuntimeStatus{
  687. Conditions: []kubecontainer.RuntimeCondition{
  688. {Type: kubecontainer.RuntimeReady, Status: false},
  689. {Type: kubecontainer.NetworkReady, Status: true},
  690. },
  691. }
  692. kubelet.updateRuntimeUp()
  693. checkNodeStatus(v1.ConditionFalse, "KubeletNotReady")
  694. // Should report node ready if RuntimeReady is true.
  695. fakeRuntime.RuntimeStatus = &kubecontainer.RuntimeStatus{
  696. Conditions: []kubecontainer.RuntimeCondition{
  697. {Type: kubecontainer.RuntimeReady, Status: true},
  698. {Type: kubecontainer.NetworkReady, Status: true},
  699. },
  700. }
  701. kubelet.updateRuntimeUp()
  702. checkNodeStatus(v1.ConditionTrue, "KubeletReady")
  703. // Should report node not ready if NetworkReady is false.
  704. fakeRuntime.RuntimeStatus = &kubecontainer.RuntimeStatus{
  705. Conditions: []kubecontainer.RuntimeCondition{
  706. {Type: kubecontainer.RuntimeReady, Status: true},
  707. {Type: kubecontainer.NetworkReady, Status: false},
  708. },
  709. }
  710. kubelet.updateRuntimeUp()
  711. checkNodeStatus(v1.ConditionFalse, "KubeletNotReady")
  712. }
  713. func TestUpdateNodeStatusError(t *testing.T) {
  714. testKubelet := newTestKubelet(t, false /* controllerAttachDetachEnabled */)
  715. defer testKubelet.Cleanup()
  716. kubelet := testKubelet.kubelet
  717. kubelet.kubeClient = nil // ensure only the heartbeat client is used
  718. // No matching node for the kubelet
  719. testKubelet.fakeKubeClient.ReactionChain = fake.NewSimpleClientset(&v1.NodeList{Items: []v1.Node{}}).ReactionChain
  720. assert.Error(t, kubelet.updateNodeStatus())
  721. assert.Len(t, testKubelet.fakeKubeClient.Actions(), nodeStatusUpdateRetry)
  722. }
  723. func TestUpdateNodeStatusWithLease(t *testing.T) {
  724. testKubelet := newTestKubelet(t, false /* controllerAttachDetachEnabled */)
  725. defer testKubelet.Cleanup()
  726. clock := testKubelet.fakeClock
  727. kubelet := testKubelet.kubelet
  728. kubelet.nodeStatusMaxImages = 5 // don't truncate the image list that gets constructed by hand for this test
  729. kubelet.kubeClient = nil // ensure only the heartbeat client is used
  730. kubelet.containerManager = &localCM{
  731. ContainerManager: cm.NewStubContainerManager(),
  732. allocatableReservation: v1.ResourceList{
  733. v1.ResourceCPU: *resource.NewMilliQuantity(200, resource.DecimalSI),
  734. v1.ResourceMemory: *resource.NewQuantity(100e6, resource.BinarySI),
  735. },
  736. capacity: v1.ResourceList{
  737. v1.ResourceCPU: *resource.NewMilliQuantity(2000, resource.DecimalSI),
  738. v1.ResourceMemory: *resource.NewQuantity(20e9, resource.BinarySI),
  739. v1.ResourceEphemeralStorage: *resource.NewQuantity(5000, resource.BinarySI),
  740. },
  741. }
  742. // Since this test retroactively overrides the stub container manager,
  743. // we have to regenerate default status setters.
  744. kubelet.setNodeStatusFuncs = kubelet.defaultNodeStatusFuncs()
  745. kubelet.nodeStatusReportFrequency = time.Minute
  746. kubeClient := testKubelet.fakeKubeClient
  747. existingNode := &v1.Node{ObjectMeta: metav1.ObjectMeta{Name: testKubeletHostname}}
  748. kubeClient.ReactionChain = fake.NewSimpleClientset(&v1.NodeList{Items: []v1.Node{*existingNode}}).ReactionChain
  749. machineInfo := &cadvisorapi.MachineInfo{
  750. MachineID: "123",
  751. SystemUUID: "abc",
  752. BootID: "1b3",
  753. NumCores: 2,
  754. MemoryCapacity: 20e9,
  755. }
  756. kubelet.machineInfo = machineInfo
  757. now := metav1.NewTime(clock.Now()).Rfc3339Copy()
  758. expectedNode := &v1.Node{
  759. ObjectMeta: metav1.ObjectMeta{Name: testKubeletHostname},
  760. Spec: v1.NodeSpec{},
  761. Status: v1.NodeStatus{
  762. Conditions: []v1.NodeCondition{
  763. {
  764. Type: v1.NodeMemoryPressure,
  765. Status: v1.ConditionFalse,
  766. Reason: "KubeletHasSufficientMemory",
  767. Message: fmt.Sprintf("kubelet has sufficient memory available"),
  768. LastHeartbeatTime: now,
  769. LastTransitionTime: now,
  770. },
  771. {
  772. Type: v1.NodeDiskPressure,
  773. Status: v1.ConditionFalse,
  774. Reason: "KubeletHasNoDiskPressure",
  775. Message: fmt.Sprintf("kubelet has no disk pressure"),
  776. LastHeartbeatTime: now,
  777. LastTransitionTime: now,
  778. },
  779. {
  780. Type: v1.NodePIDPressure,
  781. Status: v1.ConditionFalse,
  782. Reason: "KubeletHasSufficientPID",
  783. Message: fmt.Sprintf("kubelet has sufficient PID available"),
  784. LastHeartbeatTime: now,
  785. LastTransitionTime: now,
  786. },
  787. {
  788. Type: v1.NodeReady,
  789. Status: v1.ConditionTrue,
  790. Reason: "KubeletReady",
  791. Message: fmt.Sprintf("kubelet is posting ready status"),
  792. LastHeartbeatTime: now,
  793. LastTransitionTime: now,
  794. },
  795. },
  796. NodeInfo: v1.NodeSystemInfo{
  797. MachineID: "123",
  798. SystemUUID: "abc",
  799. BootID: "1b3",
  800. KernelVersion: cadvisortest.FakeKernelVersion,
  801. OSImage: cadvisortest.FakeContainerOSVersion,
  802. OperatingSystem: goruntime.GOOS,
  803. Architecture: goruntime.GOARCH,
  804. ContainerRuntimeVersion: "test://1.5.0",
  805. KubeletVersion: version.Get().String(),
  806. KubeProxyVersion: version.Get().String(),
  807. },
  808. Capacity: v1.ResourceList{
  809. v1.ResourceCPU: *resource.NewMilliQuantity(2000, resource.DecimalSI),
  810. v1.ResourceMemory: *resource.NewQuantity(20e9, resource.BinarySI),
  811. v1.ResourcePods: *resource.NewQuantity(0, resource.DecimalSI),
  812. v1.ResourceEphemeralStorage: *resource.NewQuantity(5000, resource.BinarySI),
  813. },
  814. Allocatable: v1.ResourceList{
  815. v1.ResourceCPU: *resource.NewMilliQuantity(1800, resource.DecimalSI),
  816. v1.ResourceMemory: *resource.NewQuantity(19900e6, resource.BinarySI),
  817. v1.ResourcePods: *resource.NewQuantity(0, resource.DecimalSI),
  818. v1.ResourceEphemeralStorage: *resource.NewQuantity(5000, resource.BinarySI),
  819. },
  820. Addresses: []v1.NodeAddress{
  821. {Type: v1.NodeInternalIP, Address: "127.0.0.1"},
  822. {Type: v1.NodeHostName, Address: testKubeletHostname},
  823. },
  824. // images will be sorted from max to min in node status.
  825. Images: []v1.ContainerImage{
  826. {
  827. Names: []string{"k8s.gcr.io:v1", "k8s.gcr.io:v2"},
  828. SizeBytes: 123,
  829. },
  830. {
  831. Names: []string{"k8s.gcr.io:v3", "k8s.gcr.io:v4"},
  832. SizeBytes: 456,
  833. },
  834. },
  835. },
  836. }
  837. // Update node status when node status is created.
  838. // Report node status.
  839. kubelet.updateRuntimeUp()
  840. assert.NoError(t, kubelet.updateNodeStatus())
  841. actions := kubeClient.Actions()
  842. assert.Len(t, actions, 2)
  843. assert.IsType(t, core.GetActionImpl{}, actions[0])
  844. assert.IsType(t, core.PatchActionImpl{}, actions[1])
  845. patchAction := actions[1].(core.PatchActionImpl)
  846. updatedNode, err := applyNodeStatusPatch(existingNode, patchAction.GetPatch())
  847. require.NoError(t, err)
  848. for _, cond := range updatedNode.Status.Conditions {
  849. cond.LastHeartbeatTime = cond.LastHeartbeatTime.Rfc3339Copy()
  850. cond.LastTransitionTime = cond.LastTransitionTime.Rfc3339Copy()
  851. }
  852. assert.True(t, apiequality.Semantic.DeepEqual(expectedNode, updatedNode), "%s", diff.ObjectDiff(expectedNode, updatedNode))
  853. // Version skew workaround. See: https://github.com/kubernetes/kubernetes/issues/16961
  854. assert.Equal(t, v1.NodeReady, updatedNode.Status.Conditions[len(updatedNode.Status.Conditions)-1].Type,
  855. "NodeReady should be the last condition")
  856. // Update node status again when nothing is changed (except heartbeat time).
  857. // Report node status if it has exceeded the duration of nodeStatusReportFrequency.
  858. clock.Step(time.Minute)
  859. assert.NoError(t, kubelet.updateNodeStatus())
  860. // 2 more action (There were 2 actions before).
  861. actions = kubeClient.Actions()
  862. assert.Len(t, actions, 4)
  863. assert.IsType(t, core.GetActionImpl{}, actions[2])
  864. assert.IsType(t, core.PatchActionImpl{}, actions[3])
  865. patchAction = actions[3].(core.PatchActionImpl)
  866. updatedNode, err = applyNodeStatusPatch(updatedNode, patchAction.GetPatch())
  867. require.NoError(t, err)
  868. for _, cond := range updatedNode.Status.Conditions {
  869. cond.LastHeartbeatTime = cond.LastHeartbeatTime.Rfc3339Copy()
  870. cond.LastTransitionTime = cond.LastTransitionTime.Rfc3339Copy()
  871. }
  872. // Expect LastHearbeat updated, other things unchanged.
  873. for i, cond := range expectedNode.Status.Conditions {
  874. expectedNode.Status.Conditions[i].LastHeartbeatTime = metav1.NewTime(cond.LastHeartbeatTime.Time.Add(time.Minute)).Rfc3339Copy()
  875. }
  876. assert.True(t, apiequality.Semantic.DeepEqual(expectedNode, updatedNode), "%s", diff.ObjectDiff(expectedNode, updatedNode))
  877. // Update node status again when nothing is changed (except heartbeat time).
  878. // Do not report node status if it is within the duration of nodeStatusReportFrequency.
  879. clock.Step(10 * time.Second)
  880. assert.NoError(t, kubelet.updateNodeStatus())
  881. // Only 1 more action (There were 4 actions before).
  882. actions = kubeClient.Actions()
  883. assert.Len(t, actions, 5)
  884. assert.IsType(t, core.GetActionImpl{}, actions[4])
  885. // Update node status again when something is changed.
  886. // Report node status even if it is still within the duration of nodeStatusReportFrequency.
  887. clock.Step(10 * time.Second)
  888. var newMemoryCapacity int64 = 40e9
  889. kubelet.machineInfo.MemoryCapacity = uint64(newMemoryCapacity)
  890. assert.NoError(t, kubelet.updateNodeStatus())
  891. // 2 more action (There were 5 actions before).
  892. actions = kubeClient.Actions()
  893. assert.Len(t, actions, 7)
  894. assert.IsType(t, core.GetActionImpl{}, actions[5])
  895. assert.IsType(t, core.PatchActionImpl{}, actions[6])
  896. patchAction = actions[6].(core.PatchActionImpl)
  897. updatedNode, err = applyNodeStatusPatch(updatedNode, patchAction.GetPatch())
  898. require.NoError(t, err)
  899. memCapacity := updatedNode.Status.Capacity[v1.ResourceMemory]
  900. updatedMemoryCapacity, _ := (&memCapacity).AsInt64()
  901. assert.Equal(t, newMemoryCapacity, updatedMemoryCapacity, "Memory capacity")
  902. now = metav1.NewTime(clock.Now()).Rfc3339Copy()
  903. for _, cond := range updatedNode.Status.Conditions {
  904. // Expect LastHearbeat updated, while LastTransitionTime unchanged.
  905. assert.Equal(t, now, cond.LastHeartbeatTime.Rfc3339Copy(),
  906. "LastHeartbeatTime for condition %v", cond.Type)
  907. assert.Equal(t, now, metav1.NewTime(cond.LastTransitionTime.Time.Add(time.Minute+20*time.Second)).Rfc3339Copy(),
  908. "LastTransitionTime for condition %v", cond.Type)
  909. }
  910. // Update node status when changing pod CIDR.
  911. // Report node status if it is still within the duration of nodeStatusReportFrequency.
  912. clock.Step(10 * time.Second)
  913. assert.Equal(t, "", kubelet.runtimeState.podCIDR(), "Pod CIDR should be empty")
  914. podCIDRs := []string{"10.0.0.0/24", "2000::/10"}
  915. updatedNode.Spec.PodCIDR = podCIDRs[0]
  916. updatedNode.Spec.PodCIDRs = podCIDRs
  917. kubeClient.ReactionChain = fake.NewSimpleClientset(&v1.NodeList{Items: []v1.Node{*updatedNode}}).ReactionChain
  918. assert.NoError(t, kubelet.updateNodeStatus())
  919. assert.Equal(t, strings.Join(podCIDRs, ","), kubelet.runtimeState.podCIDR(), "Pod CIDR should be updated now")
  920. // 2 more action (There were 7 actions before).
  921. actions = kubeClient.Actions()
  922. assert.Len(t, actions, 9)
  923. assert.IsType(t, core.GetActionImpl{}, actions[7])
  924. assert.IsType(t, core.PatchActionImpl{}, actions[8])
  925. patchAction = actions[8].(core.PatchActionImpl)
  926. // Update node status when keeping the pod CIDR.
  927. // Do not report node status if it is within the duration of nodeStatusReportFrequency.
  928. clock.Step(10 * time.Second)
  929. assert.Equal(t, strings.Join(podCIDRs, ","), kubelet.runtimeState.podCIDR(), "Pod CIDR should already be updated")
  930. assert.NoError(t, kubelet.updateNodeStatus())
  931. // Only 1 more action (There were 9 actions before).
  932. actions = kubeClient.Actions()
  933. assert.Len(t, actions, 10)
  934. assert.IsType(t, core.GetActionImpl{}, actions[9])
  935. }
  936. func TestUpdateNodeStatusAndVolumesInUseWithNodeLease(t *testing.T) {
  937. cases := []struct {
  938. desc string
  939. existingVolumes []v1.UniqueVolumeName // volumes to initially populate volumeManager
  940. existingNode *v1.Node // existing node object
  941. expectedNode *v1.Node // new node object after patch
  942. expectedReportedInUse []v1.UniqueVolumeName // expected volumes reported in use in volumeManager
  943. }{
  944. {
  945. desc: "no volumes and no update",
  946. existingNode: &v1.Node{ObjectMeta: metav1.ObjectMeta{Name: testKubeletHostname}},
  947. },
  948. {
  949. desc: "volumes inuse on node and volumeManager",
  950. existingVolumes: []v1.UniqueVolumeName{"vol1"},
  951. existingNode: &v1.Node{
  952. ObjectMeta: metav1.ObjectMeta{Name: testKubeletHostname},
  953. Status: v1.NodeStatus{
  954. VolumesInUse: []v1.UniqueVolumeName{"vol1"},
  955. },
  956. },
  957. expectedReportedInUse: []v1.UniqueVolumeName{"vol1"},
  958. },
  959. {
  960. desc: "volumes inuse on node but not in volumeManager",
  961. existingNode: &v1.Node{
  962. ObjectMeta: metav1.ObjectMeta{Name: testKubeletHostname},
  963. Status: v1.NodeStatus{
  964. VolumesInUse: []v1.UniqueVolumeName{"vol1"},
  965. },
  966. },
  967. expectedNode: &v1.Node{ObjectMeta: metav1.ObjectMeta{Name: testKubeletHostname}},
  968. },
  969. {
  970. desc: "volumes inuse in volumeManager but not on node",
  971. existingVolumes: []v1.UniqueVolumeName{"vol1"},
  972. existingNode: &v1.Node{ObjectMeta: metav1.ObjectMeta{Name: testKubeletHostname}},
  973. expectedNode: &v1.Node{
  974. ObjectMeta: metav1.ObjectMeta{Name: testKubeletHostname},
  975. Status: v1.NodeStatus{
  976. VolumesInUse: []v1.UniqueVolumeName{"vol1"},
  977. },
  978. },
  979. expectedReportedInUse: []v1.UniqueVolumeName{"vol1"},
  980. },
  981. }
  982. for _, tc := range cases {
  983. t.Run(tc.desc, func(t *testing.T) {
  984. // Setup
  985. testKubelet := newTestKubelet(t, false /* controllerAttachDetachEnabled */)
  986. defer testKubelet.Cleanup()
  987. kubelet := testKubelet.kubelet
  988. kubelet.kubeClient = nil // ensure only the heartbeat client is used
  989. kubelet.containerManager = &localCM{ContainerManager: cm.NewStubContainerManager()}
  990. kubelet.lastStatusReportTime = kubelet.clock.Now()
  991. kubelet.nodeStatusReportFrequency = time.Hour
  992. kubelet.machineInfo = &cadvisorapi.MachineInfo{}
  993. // override test volumeManager
  994. fakeVolumeManager := kubeletvolume.NewFakeVolumeManager(tc.existingVolumes)
  995. kubelet.volumeManager = fakeVolumeManager
  996. // Only test VolumesInUse setter
  997. kubelet.setNodeStatusFuncs = []func(*v1.Node) error{
  998. nodestatus.VolumesInUse(kubelet.volumeManager.ReconcilerStatesHasBeenSynced,
  999. kubelet.volumeManager.GetVolumesInUse),
  1000. }
  1001. kubeClient := testKubelet.fakeKubeClient
  1002. kubeClient.ReactionChain = fake.NewSimpleClientset(&v1.NodeList{Items: []v1.Node{*tc.existingNode}}).ReactionChain
  1003. // Execute
  1004. assert.NoError(t, kubelet.updateNodeStatus())
  1005. // Validate
  1006. actions := kubeClient.Actions()
  1007. if tc.expectedNode != nil {
  1008. assert.Len(t, actions, 2)
  1009. assert.IsType(t, core.GetActionImpl{}, actions[0])
  1010. assert.IsType(t, core.PatchActionImpl{}, actions[1])
  1011. patchAction := actions[1].(core.PatchActionImpl)
  1012. updatedNode, err := applyNodeStatusPatch(tc.existingNode, patchAction.GetPatch())
  1013. require.NoError(t, err)
  1014. assert.True(t, apiequality.Semantic.DeepEqual(tc.expectedNode, updatedNode), "%s", diff.ObjectDiff(tc.expectedNode, updatedNode))
  1015. } else {
  1016. assert.Len(t, actions, 1)
  1017. assert.IsType(t, core.GetActionImpl{}, actions[0])
  1018. }
  1019. reportedInUse := fakeVolumeManager.GetVolumesReportedInUse()
  1020. assert.True(t, apiequality.Semantic.DeepEqual(tc.expectedReportedInUse, reportedInUse), "%s", diff.ObjectDiff(tc.expectedReportedInUse, reportedInUse))
  1021. })
  1022. }
  1023. }
  1024. func TestRegisterWithApiServer(t *testing.T) {
  1025. testKubelet := newTestKubelet(t, false /* controllerAttachDetachEnabled */)
  1026. defer testKubelet.Cleanup()
  1027. kubelet := testKubelet.kubelet
  1028. kubeClient := testKubelet.fakeKubeClient
  1029. kubeClient.AddReactor("create", "nodes", func(action core.Action) (bool, runtime.Object, error) {
  1030. // Return an error on create.
  1031. return true, &v1.Node{}, &apierrors.StatusError{
  1032. ErrStatus: metav1.Status{Reason: metav1.StatusReasonAlreadyExists},
  1033. }
  1034. })
  1035. kubeClient.AddReactor("get", "nodes", func(action core.Action) (bool, runtime.Object, error) {
  1036. // Return an existing (matching) node on get.
  1037. return true, &v1.Node{
  1038. ObjectMeta: metav1.ObjectMeta{
  1039. Name: testKubeletHostname,
  1040. Labels: map[string]string{
  1041. v1.LabelHostname: testKubeletHostname,
  1042. v1.LabelOSStable: goruntime.GOOS,
  1043. v1.LabelArchStable: goruntime.GOARCH,
  1044. kubeletapis.LabelOS: goruntime.GOOS,
  1045. kubeletapis.LabelArch: goruntime.GOARCH,
  1046. },
  1047. },
  1048. }, nil
  1049. })
  1050. addNotImplatedReaction(kubeClient)
  1051. machineInfo := &cadvisorapi.MachineInfo{
  1052. MachineID: "123",
  1053. SystemUUID: "abc",
  1054. BootID: "1b3",
  1055. NumCores: 2,
  1056. MemoryCapacity: 1024,
  1057. }
  1058. kubelet.machineInfo = machineInfo
  1059. done := make(chan struct{})
  1060. go func() {
  1061. kubelet.registerWithAPIServer()
  1062. done <- struct{}{}
  1063. }()
  1064. select {
  1065. case <-time.After(wait.ForeverTestTimeout):
  1066. assert.Fail(t, "timed out waiting for registration")
  1067. case <-done:
  1068. return
  1069. }
  1070. }
  1071. func TestTryRegisterWithApiServer(t *testing.T) {
  1072. alreadyExists := &apierrors.StatusError{
  1073. ErrStatus: metav1.Status{Reason: metav1.StatusReasonAlreadyExists},
  1074. }
  1075. conflict := &apierrors.StatusError{
  1076. ErrStatus: metav1.Status{Reason: metav1.StatusReasonConflict},
  1077. }
  1078. newNode := func(cmad bool) *v1.Node {
  1079. node := &v1.Node{
  1080. ObjectMeta: metav1.ObjectMeta{
  1081. Labels: map[string]string{
  1082. v1.LabelHostname: testKubeletHostname,
  1083. v1.LabelOSStable: goruntime.GOOS,
  1084. v1.LabelArchStable: goruntime.GOARCH,
  1085. kubeletapis.LabelOS: goruntime.GOOS,
  1086. kubeletapis.LabelArch: goruntime.GOARCH,
  1087. },
  1088. },
  1089. }
  1090. if cmad {
  1091. node.Annotations = make(map[string]string)
  1092. node.Annotations[util.ControllerManagedAttachAnnotation] = "true"
  1093. }
  1094. return node
  1095. }
  1096. cases := []struct {
  1097. name string
  1098. newNode *v1.Node
  1099. existingNode *v1.Node
  1100. createError error
  1101. getError error
  1102. patchError error
  1103. deleteError error
  1104. expectedResult bool
  1105. expectedActions int
  1106. testSavedNode bool
  1107. savedNodeIndex int
  1108. savedNodeCMAD bool
  1109. }{
  1110. {
  1111. name: "success case - new node",
  1112. newNode: &v1.Node{},
  1113. expectedResult: true,
  1114. expectedActions: 1,
  1115. },
  1116. {
  1117. name: "success case - existing node - no change in CMAD",
  1118. newNode: newNode(true),
  1119. createError: alreadyExists,
  1120. existingNode: newNode(true),
  1121. expectedResult: true,
  1122. expectedActions: 2,
  1123. },
  1124. {
  1125. name: "success case - existing node - CMAD disabled",
  1126. newNode: newNode(false),
  1127. createError: alreadyExists,
  1128. existingNode: newNode(true),
  1129. expectedResult: true,
  1130. expectedActions: 3,
  1131. testSavedNode: true,
  1132. savedNodeIndex: 2,
  1133. savedNodeCMAD: false,
  1134. },
  1135. {
  1136. name: "success case - existing node - CMAD enabled",
  1137. newNode: newNode(true),
  1138. createError: alreadyExists,
  1139. existingNode: newNode(false),
  1140. expectedResult: true,
  1141. expectedActions: 3,
  1142. testSavedNode: true,
  1143. savedNodeIndex: 2,
  1144. savedNodeCMAD: true,
  1145. },
  1146. {
  1147. name: "create failed",
  1148. newNode: newNode(false),
  1149. createError: conflict,
  1150. expectedResult: false,
  1151. expectedActions: 1,
  1152. },
  1153. {
  1154. name: "get existing node failed",
  1155. newNode: newNode(false),
  1156. createError: alreadyExists,
  1157. getError: conflict,
  1158. expectedResult: false,
  1159. expectedActions: 2,
  1160. },
  1161. {
  1162. name: "update existing node failed",
  1163. newNode: newNode(false),
  1164. createError: alreadyExists,
  1165. existingNode: newNode(true),
  1166. patchError: conflict,
  1167. expectedResult: false,
  1168. expectedActions: 3,
  1169. },
  1170. }
  1171. for _, tc := range cases {
  1172. testKubelet := newTestKubelet(t, false /* controllerAttachDetachEnabled is a don't-care for this test */)
  1173. defer testKubelet.Cleanup()
  1174. kubelet := testKubelet.kubelet
  1175. kubeClient := testKubelet.fakeKubeClient
  1176. kubeClient.AddReactor("create", "nodes", func(action core.Action) (bool, runtime.Object, error) {
  1177. return true, nil, tc.createError
  1178. })
  1179. kubeClient.AddReactor("get", "nodes", func(action core.Action) (bool, runtime.Object, error) {
  1180. // Return an existing (matching) node on get.
  1181. return true, tc.existingNode, tc.getError
  1182. })
  1183. kubeClient.AddReactor("patch", "nodes", func(action core.Action) (bool, runtime.Object, error) {
  1184. if action.GetSubresource() == "status" {
  1185. return true, nil, tc.patchError
  1186. }
  1187. return notImplemented(action)
  1188. })
  1189. kubeClient.AddReactor("delete", "nodes", func(action core.Action) (bool, runtime.Object, error) {
  1190. return true, nil, tc.deleteError
  1191. })
  1192. addNotImplatedReaction(kubeClient)
  1193. result := kubelet.tryRegisterWithAPIServer(tc.newNode)
  1194. require.Equal(t, tc.expectedResult, result, "test [%s]", tc.name)
  1195. actions := kubeClient.Actions()
  1196. assert.Len(t, actions, tc.expectedActions, "test [%s]", tc.name)
  1197. if tc.testSavedNode {
  1198. var savedNode *v1.Node
  1199. t.Logf("actions: %v: %+v", len(actions), actions)
  1200. action := actions[tc.savedNodeIndex]
  1201. if action.GetVerb() == "create" {
  1202. createAction := action.(core.CreateAction)
  1203. obj := createAction.GetObject()
  1204. require.IsType(t, &v1.Node{}, obj)
  1205. savedNode = obj.(*v1.Node)
  1206. } else if action.GetVerb() == "patch" {
  1207. patchAction := action.(core.PatchActionImpl)
  1208. var err error
  1209. savedNode, err = applyNodeStatusPatch(tc.existingNode, patchAction.GetPatch())
  1210. require.NoError(t, err)
  1211. }
  1212. actualCMAD, _ := strconv.ParseBool(savedNode.Annotations[util.ControllerManagedAttachAnnotation])
  1213. assert.Equal(t, tc.savedNodeCMAD, actualCMAD, "test [%s]", tc.name)
  1214. }
  1215. }
  1216. }
  1217. func TestUpdateNewNodeStatusTooLargeReservation(t *testing.T) {
  1218. const nodeStatusMaxImages = 5
  1219. // generate one more in inputImageList than we configure the Kubelet to report
  1220. inputImageList, _ := generateTestingImageLists(nodeStatusMaxImages+1, nodeStatusMaxImages)
  1221. testKubelet := newTestKubeletWithImageList(
  1222. t, inputImageList, false /* controllerAttachDetachEnabled */, true /* initFakeVolumePlugin */)
  1223. defer testKubelet.Cleanup()
  1224. kubelet := testKubelet.kubelet
  1225. kubelet.nodeStatusMaxImages = nodeStatusMaxImages
  1226. kubelet.kubeClient = nil // ensure only the heartbeat client is used
  1227. kubelet.containerManager = &localCM{
  1228. ContainerManager: cm.NewStubContainerManager(),
  1229. allocatableReservation: v1.ResourceList{
  1230. v1.ResourceCPU: *resource.NewMilliQuantity(40000, resource.DecimalSI),
  1231. v1.ResourceEphemeralStorage: *resource.NewQuantity(1000, resource.BinarySI),
  1232. },
  1233. capacity: v1.ResourceList{
  1234. v1.ResourceCPU: *resource.NewMilliQuantity(2000, resource.DecimalSI),
  1235. v1.ResourceMemory: *resource.NewQuantity(10e9, resource.BinarySI),
  1236. v1.ResourceEphemeralStorage: *resource.NewQuantity(3000, resource.BinarySI),
  1237. },
  1238. }
  1239. // Since this test retroactively overrides the stub container manager,
  1240. // we have to regenerate default status setters.
  1241. kubelet.setNodeStatusFuncs = kubelet.defaultNodeStatusFuncs()
  1242. kubeClient := testKubelet.fakeKubeClient
  1243. existingNode := v1.Node{ObjectMeta: metav1.ObjectMeta{Name: testKubeletHostname}}
  1244. kubeClient.ReactionChain = fake.NewSimpleClientset(&v1.NodeList{Items: []v1.Node{existingNode}}).ReactionChain
  1245. machineInfo := &cadvisorapi.MachineInfo{
  1246. MachineID: "123",
  1247. SystemUUID: "abc",
  1248. BootID: "1b3",
  1249. NumCores: 2,
  1250. MemoryCapacity: 10e9, // 10G
  1251. }
  1252. kubelet.machineInfo = machineInfo
  1253. expectedNode := &v1.Node{
  1254. ObjectMeta: metav1.ObjectMeta{Name: testKubeletHostname},
  1255. Spec: v1.NodeSpec{},
  1256. Status: v1.NodeStatus{
  1257. Capacity: v1.ResourceList{
  1258. v1.ResourceCPU: *resource.NewMilliQuantity(2000, resource.DecimalSI),
  1259. v1.ResourceMemory: *resource.NewQuantity(10e9, resource.BinarySI),
  1260. v1.ResourcePods: *resource.NewQuantity(0, resource.DecimalSI),
  1261. v1.ResourceEphemeralStorage: *resource.NewQuantity(3000, resource.BinarySI),
  1262. },
  1263. Allocatable: v1.ResourceList{
  1264. v1.ResourceCPU: *resource.NewMilliQuantity(0, resource.DecimalSI),
  1265. v1.ResourceMemory: *resource.NewQuantity(10e9, resource.BinarySI),
  1266. v1.ResourcePods: *resource.NewQuantity(0, resource.DecimalSI),
  1267. v1.ResourceEphemeralStorage: *resource.NewQuantity(2000, resource.BinarySI),
  1268. },
  1269. },
  1270. }
  1271. kubelet.updateRuntimeUp()
  1272. assert.NoError(t, kubelet.updateNodeStatus())
  1273. actions := kubeClient.Actions()
  1274. require.Len(t, actions, 2)
  1275. require.True(t, actions[1].Matches("patch", "nodes"))
  1276. require.Equal(t, actions[1].GetSubresource(), "status")
  1277. updatedNode, err := applyNodeStatusPatch(&existingNode, actions[1].(core.PatchActionImpl).GetPatch())
  1278. assert.NoError(t, err)
  1279. assert.True(t, apiequality.Semantic.DeepEqual(expectedNode.Status.Allocatable, updatedNode.Status.Allocatable), "%s", diff.ObjectDiff(expectedNode.Status.Allocatable, updatedNode.Status.Allocatable))
  1280. }
  1281. func TestUpdateDefaultLabels(t *testing.T) {
  1282. testKubelet := newTestKubelet(t, false /* controllerAttachDetachEnabled */)
  1283. testKubelet.kubelet.kubeClient = nil // ensure only the heartbeat client is used
  1284. cases := []struct {
  1285. name string
  1286. initialNode *v1.Node
  1287. existingNode *v1.Node
  1288. needsUpdate bool
  1289. finalLabels map[string]string
  1290. }{
  1291. {
  1292. name: "make sure default labels exist",
  1293. initialNode: &v1.Node{
  1294. ObjectMeta: metav1.ObjectMeta{
  1295. Labels: map[string]string{
  1296. v1.LabelHostname: "new-hostname",
  1297. v1.LabelZoneFailureDomainStable: "new-zone-failure-domain",
  1298. v1.LabelZoneRegionStable: "new-zone-region",
  1299. v1.LabelZoneFailureDomain: "new-zone-failure-domain",
  1300. v1.LabelZoneRegion: "new-zone-region",
  1301. v1.LabelInstanceTypeStable: "new-instance-type",
  1302. v1.LabelInstanceType: "new-instance-type",
  1303. kubeletapis.LabelOS: "new-os",
  1304. kubeletapis.LabelArch: "new-arch",
  1305. },
  1306. },
  1307. },
  1308. existingNode: &v1.Node{
  1309. ObjectMeta: metav1.ObjectMeta{
  1310. Labels: map[string]string{},
  1311. },
  1312. },
  1313. needsUpdate: true,
  1314. finalLabels: map[string]string{
  1315. v1.LabelHostname: "new-hostname",
  1316. v1.LabelZoneFailureDomainStable: "new-zone-failure-domain",
  1317. v1.LabelZoneRegionStable: "new-zone-region",
  1318. v1.LabelZoneFailureDomain: "new-zone-failure-domain",
  1319. v1.LabelZoneRegion: "new-zone-region",
  1320. v1.LabelInstanceTypeStable: "new-instance-type",
  1321. v1.LabelInstanceType: "new-instance-type",
  1322. kubeletapis.LabelOS: "new-os",
  1323. kubeletapis.LabelArch: "new-arch",
  1324. },
  1325. },
  1326. {
  1327. name: "make sure default labels are up to date",
  1328. initialNode: &v1.Node{
  1329. ObjectMeta: metav1.ObjectMeta{
  1330. Labels: map[string]string{
  1331. v1.LabelHostname: "new-hostname",
  1332. v1.LabelZoneFailureDomainStable: "new-zone-failure-domain",
  1333. v1.LabelZoneRegionStable: "new-zone-region",
  1334. v1.LabelZoneFailureDomain: "new-zone-failure-domain",
  1335. v1.LabelZoneRegion: "new-zone-region",
  1336. v1.LabelInstanceTypeStable: "new-instance-type",
  1337. v1.LabelInstanceType: "new-instance-type",
  1338. kubeletapis.LabelOS: "new-os",
  1339. kubeletapis.LabelArch: "new-arch",
  1340. },
  1341. },
  1342. },
  1343. existingNode: &v1.Node{
  1344. ObjectMeta: metav1.ObjectMeta{
  1345. Labels: map[string]string{
  1346. v1.LabelHostname: "old-hostname",
  1347. v1.LabelZoneFailureDomainStable: "old-zone-failure-domain",
  1348. v1.LabelZoneRegionStable: "old-zone-region",
  1349. v1.LabelZoneFailureDomain: "old-zone-failure-domain",
  1350. v1.LabelZoneRegion: "old-zone-region",
  1351. v1.LabelInstanceTypeStable: "old-instance-type",
  1352. v1.LabelInstanceType: "old-instance-type",
  1353. kubeletapis.LabelOS: "old-os",
  1354. kubeletapis.LabelArch: "old-arch",
  1355. },
  1356. },
  1357. },
  1358. needsUpdate: true,
  1359. finalLabels: map[string]string{
  1360. v1.LabelHostname: "new-hostname",
  1361. v1.LabelZoneFailureDomainStable: "new-zone-failure-domain",
  1362. v1.LabelZoneRegionStable: "new-zone-region",
  1363. v1.LabelZoneFailureDomain: "new-zone-failure-domain",
  1364. v1.LabelZoneRegion: "new-zone-region",
  1365. v1.LabelInstanceTypeStable: "new-instance-type",
  1366. v1.LabelInstanceType: "new-instance-type",
  1367. kubeletapis.LabelOS: "new-os",
  1368. kubeletapis.LabelArch: "new-arch",
  1369. },
  1370. },
  1371. {
  1372. name: "make sure existing labels do not get deleted",
  1373. initialNode: &v1.Node{
  1374. ObjectMeta: metav1.ObjectMeta{
  1375. Labels: map[string]string{
  1376. v1.LabelHostname: "new-hostname",
  1377. v1.LabelZoneFailureDomainStable: "new-zone-failure-domain",
  1378. v1.LabelZoneRegionStable: "new-zone-region",
  1379. v1.LabelZoneFailureDomain: "new-zone-failure-domain",
  1380. v1.LabelZoneRegion: "new-zone-region",
  1381. v1.LabelInstanceTypeStable: "new-instance-type",
  1382. v1.LabelInstanceType: "new-instance-type",
  1383. kubeletapis.LabelOS: "new-os",
  1384. kubeletapis.LabelArch: "new-arch",
  1385. },
  1386. },
  1387. },
  1388. existingNode: &v1.Node{
  1389. ObjectMeta: metav1.ObjectMeta{
  1390. Labels: map[string]string{
  1391. v1.LabelHostname: "new-hostname",
  1392. v1.LabelZoneFailureDomainStable: "new-zone-failure-domain",
  1393. v1.LabelZoneRegionStable: "new-zone-region",
  1394. v1.LabelZoneFailureDomain: "new-zone-failure-domain",
  1395. v1.LabelZoneRegion: "new-zone-region",
  1396. v1.LabelInstanceTypeStable: "new-instance-type",
  1397. v1.LabelInstanceType: "new-instance-type",
  1398. kubeletapis.LabelOS: "new-os",
  1399. kubeletapis.LabelArch: "new-arch",
  1400. "please-persist": "foo",
  1401. },
  1402. },
  1403. },
  1404. needsUpdate: false,
  1405. finalLabels: map[string]string{
  1406. v1.LabelHostname: "new-hostname",
  1407. v1.LabelZoneFailureDomainStable: "new-zone-failure-domain",
  1408. v1.LabelZoneRegionStable: "new-zone-region",
  1409. v1.LabelZoneFailureDomain: "new-zone-failure-domain",
  1410. v1.LabelZoneRegion: "new-zone-region",
  1411. v1.LabelInstanceTypeStable: "new-instance-type",
  1412. v1.LabelInstanceType: "new-instance-type",
  1413. kubeletapis.LabelOS: "new-os",
  1414. kubeletapis.LabelArch: "new-arch",
  1415. "please-persist": "foo",
  1416. },
  1417. },
  1418. {
  1419. name: "make sure existing labels do not get deleted when initial node has no opinion",
  1420. initialNode: &v1.Node{
  1421. ObjectMeta: metav1.ObjectMeta{
  1422. Labels: map[string]string{},
  1423. },
  1424. },
  1425. existingNode: &v1.Node{
  1426. ObjectMeta: metav1.ObjectMeta{
  1427. Labels: map[string]string{
  1428. v1.LabelHostname: "new-hostname",
  1429. v1.LabelZoneFailureDomainStable: "new-zone-failure-domain",
  1430. v1.LabelZoneRegionStable: "new-zone-region",
  1431. v1.LabelZoneFailureDomain: "new-zone-failure-domain",
  1432. v1.LabelZoneRegion: "new-zone-region",
  1433. v1.LabelInstanceTypeStable: "new-instance-type",
  1434. v1.LabelInstanceType: "new-instance-type",
  1435. kubeletapis.LabelOS: "new-os",
  1436. kubeletapis.LabelArch: "new-arch",
  1437. "please-persist": "foo",
  1438. },
  1439. },
  1440. },
  1441. needsUpdate: false,
  1442. finalLabels: map[string]string{
  1443. v1.LabelHostname: "new-hostname",
  1444. v1.LabelZoneFailureDomainStable: "new-zone-failure-domain",
  1445. v1.LabelZoneRegionStable: "new-zone-region",
  1446. v1.LabelZoneFailureDomain: "new-zone-failure-domain",
  1447. v1.LabelZoneRegion: "new-zone-region",
  1448. v1.LabelInstanceTypeStable: "new-instance-type",
  1449. v1.LabelInstanceType: "new-instance-type",
  1450. kubeletapis.LabelOS: "new-os",
  1451. kubeletapis.LabelArch: "new-arch",
  1452. "please-persist": "foo",
  1453. },
  1454. },
  1455. {
  1456. name: "no update needed",
  1457. initialNode: &v1.Node{
  1458. ObjectMeta: metav1.ObjectMeta{
  1459. Labels: map[string]string{
  1460. v1.LabelHostname: "new-hostname",
  1461. v1.LabelZoneFailureDomainStable: "new-zone-failure-domain",
  1462. v1.LabelZoneRegionStable: "new-zone-region",
  1463. v1.LabelZoneFailureDomain: "new-zone-failure-domain",
  1464. v1.LabelZoneRegion: "new-zone-region",
  1465. v1.LabelInstanceTypeStable: "new-instance-type",
  1466. v1.LabelInstanceType: "new-instance-type",
  1467. kubeletapis.LabelOS: "new-os",
  1468. kubeletapis.LabelArch: "new-arch",
  1469. },
  1470. },
  1471. },
  1472. existingNode: &v1.Node{
  1473. ObjectMeta: metav1.ObjectMeta{
  1474. Labels: map[string]string{
  1475. v1.LabelHostname: "new-hostname",
  1476. v1.LabelZoneFailureDomainStable: "new-zone-failure-domain",
  1477. v1.LabelZoneRegionStable: "new-zone-region",
  1478. v1.LabelZoneFailureDomain: "new-zone-failure-domain",
  1479. v1.LabelZoneRegion: "new-zone-region",
  1480. v1.LabelInstanceTypeStable: "new-instance-type",
  1481. v1.LabelInstanceType: "new-instance-type",
  1482. kubeletapis.LabelOS: "new-os",
  1483. kubeletapis.LabelArch: "new-arch",
  1484. },
  1485. },
  1486. },
  1487. needsUpdate: false,
  1488. finalLabels: map[string]string{
  1489. v1.LabelHostname: "new-hostname",
  1490. v1.LabelZoneFailureDomainStable: "new-zone-failure-domain",
  1491. v1.LabelZoneRegionStable: "new-zone-region",
  1492. v1.LabelZoneFailureDomain: "new-zone-failure-domain",
  1493. v1.LabelZoneRegion: "new-zone-region",
  1494. v1.LabelInstanceTypeStable: "new-instance-type",
  1495. v1.LabelInstanceType: "new-instance-type",
  1496. kubeletapis.LabelOS: "new-os",
  1497. kubeletapis.LabelArch: "new-arch",
  1498. },
  1499. },
  1500. {
  1501. name: "not panic when existing node has nil labels",
  1502. initialNode: &v1.Node{
  1503. ObjectMeta: metav1.ObjectMeta{
  1504. Labels: map[string]string{
  1505. v1.LabelHostname: "new-hostname",
  1506. v1.LabelZoneFailureDomainStable: "new-zone-failure-domain",
  1507. v1.LabelZoneRegionStable: "new-zone-region",
  1508. v1.LabelZoneFailureDomain: "new-zone-failure-domain",
  1509. v1.LabelZoneRegion: "new-zone-region",
  1510. v1.LabelInstanceTypeStable: "new-instance-type",
  1511. v1.LabelInstanceType: "new-instance-type",
  1512. kubeletapis.LabelOS: "new-os",
  1513. kubeletapis.LabelArch: "new-arch",
  1514. },
  1515. },
  1516. },
  1517. existingNode: &v1.Node{
  1518. ObjectMeta: metav1.ObjectMeta{},
  1519. },
  1520. needsUpdate: true,
  1521. finalLabels: map[string]string{
  1522. v1.LabelHostname: "new-hostname",
  1523. v1.LabelZoneFailureDomainStable: "new-zone-failure-domain",
  1524. v1.LabelZoneRegionStable: "new-zone-region",
  1525. v1.LabelZoneFailureDomain: "new-zone-failure-domain",
  1526. v1.LabelZoneRegion: "new-zone-region",
  1527. v1.LabelInstanceTypeStable: "new-instance-type",
  1528. v1.LabelInstanceType: "new-instance-type",
  1529. kubeletapis.LabelOS: "new-os",
  1530. kubeletapis.LabelArch: "new-arch",
  1531. },
  1532. },
  1533. {
  1534. name: "backfill required for new stable labels for os/arch/zones/regions/instance-type",
  1535. initialNode: &v1.Node{
  1536. ObjectMeta: metav1.ObjectMeta{
  1537. Labels: map[string]string{
  1538. v1.LabelHostname: "new-hostname",
  1539. v1.LabelZoneFailureDomainStable: "new-zone-failure-domain",
  1540. v1.LabelZoneRegionStable: "new-zone-region",
  1541. v1.LabelZoneFailureDomain: "new-zone-failure-domain",
  1542. v1.LabelZoneRegion: "new-zone-region",
  1543. v1.LabelInstanceTypeStable: "new-instance-type",
  1544. v1.LabelInstanceType: "new-instance-type",
  1545. kubeletapis.LabelOS: "new-os",
  1546. kubeletapis.LabelArch: "new-arch",
  1547. v1.LabelOSStable: "new-os",
  1548. v1.LabelArchStable: "new-arch",
  1549. },
  1550. },
  1551. },
  1552. existingNode: &v1.Node{
  1553. ObjectMeta: metav1.ObjectMeta{
  1554. Labels: map[string]string{
  1555. v1.LabelHostname: "new-hostname",
  1556. v1.LabelZoneFailureDomain: "new-zone-failure-domain",
  1557. v1.LabelZoneRegion: "new-zone-region",
  1558. v1.LabelInstanceType: "new-instance-type",
  1559. kubeletapis.LabelOS: "new-os",
  1560. kubeletapis.LabelArch: "new-arch",
  1561. },
  1562. },
  1563. },
  1564. needsUpdate: true,
  1565. finalLabels: map[string]string{
  1566. v1.LabelHostname: "new-hostname",
  1567. v1.LabelZoneFailureDomainStable: "new-zone-failure-domain",
  1568. v1.LabelZoneRegionStable: "new-zone-region",
  1569. v1.LabelZoneFailureDomain: "new-zone-failure-domain",
  1570. v1.LabelZoneRegion: "new-zone-region",
  1571. v1.LabelInstanceTypeStable: "new-instance-type",
  1572. v1.LabelInstanceType: "new-instance-type",
  1573. kubeletapis.LabelOS: "new-os",
  1574. kubeletapis.LabelArch: "new-arch",
  1575. v1.LabelOSStable: "new-os",
  1576. v1.LabelArchStable: "new-arch",
  1577. },
  1578. },
  1579. }
  1580. for _, tc := range cases {
  1581. defer testKubelet.Cleanup()
  1582. kubelet := testKubelet.kubelet
  1583. needsUpdate := kubelet.updateDefaultLabels(tc.initialNode, tc.existingNode)
  1584. assert.Equal(t, tc.needsUpdate, needsUpdate, tc.name)
  1585. assert.Equal(t, tc.finalLabels, tc.existingNode.Labels, tc.name)
  1586. }
  1587. }
  1588. func TestReconcileExtendedResource(t *testing.T) {
  1589. testKubelet := newTestKubelet(t, false /* controllerAttachDetachEnabled */)
  1590. testKubelet.kubelet.kubeClient = nil // ensure only the heartbeat client is used
  1591. testKubelet.kubelet.containerManager = cm.NewStubContainerManagerWithExtendedResource(true /* shouldResetExtendedResourceCapacity*/)
  1592. testKubeletNoReset := newTestKubelet(t, false /* controllerAttachDetachEnabled */)
  1593. extendedResourceName1 := v1.ResourceName("test.com/resource1")
  1594. extendedResourceName2 := v1.ResourceName("test.com/resource2")
  1595. cases := []struct {
  1596. name string
  1597. testKubelet *TestKubelet
  1598. existingNode *v1.Node
  1599. expectedNode *v1.Node
  1600. needsUpdate bool
  1601. }{
  1602. {
  1603. name: "no update needed without extended resource",
  1604. testKubelet: testKubelet,
  1605. existingNode: &v1.Node{
  1606. Status: v1.NodeStatus{
  1607. Capacity: v1.ResourceList{
  1608. v1.ResourceCPU: *resource.NewMilliQuantity(2000, resource.DecimalSI),
  1609. v1.ResourceMemory: *resource.NewQuantity(10e9, resource.BinarySI),
  1610. v1.ResourceEphemeralStorage: *resource.NewQuantity(5000, resource.BinarySI),
  1611. },
  1612. Allocatable: v1.ResourceList{
  1613. v1.ResourceCPU: *resource.NewMilliQuantity(2000, resource.DecimalSI),
  1614. v1.ResourceMemory: *resource.NewQuantity(10e9, resource.BinarySI),
  1615. v1.ResourceEphemeralStorage: *resource.NewQuantity(5000, resource.BinarySI),
  1616. },
  1617. },
  1618. },
  1619. expectedNode: &v1.Node{
  1620. Status: v1.NodeStatus{
  1621. Capacity: v1.ResourceList{
  1622. v1.ResourceCPU: *resource.NewMilliQuantity(2000, resource.DecimalSI),
  1623. v1.ResourceMemory: *resource.NewQuantity(10e9, resource.BinarySI),
  1624. v1.ResourceEphemeralStorage: *resource.NewQuantity(5000, resource.BinarySI),
  1625. },
  1626. Allocatable: v1.ResourceList{
  1627. v1.ResourceCPU: *resource.NewMilliQuantity(2000, resource.DecimalSI),
  1628. v1.ResourceMemory: *resource.NewQuantity(10e9, resource.BinarySI),
  1629. v1.ResourceEphemeralStorage: *resource.NewQuantity(5000, resource.BinarySI),
  1630. },
  1631. },
  1632. },
  1633. needsUpdate: false,
  1634. },
  1635. {
  1636. name: "extended resource capacity is not zeroed due to presence of checkpoint file",
  1637. testKubelet: testKubelet,
  1638. existingNode: &v1.Node{
  1639. Status: v1.NodeStatus{
  1640. Capacity: v1.ResourceList{
  1641. v1.ResourceCPU: *resource.NewMilliQuantity(2000, resource.DecimalSI),
  1642. v1.ResourceMemory: *resource.NewQuantity(10e9, resource.BinarySI),
  1643. v1.ResourceEphemeralStorage: *resource.NewQuantity(5000, resource.BinarySI),
  1644. },
  1645. Allocatable: v1.ResourceList{
  1646. v1.ResourceCPU: *resource.NewMilliQuantity(2000, resource.DecimalSI),
  1647. v1.ResourceMemory: *resource.NewQuantity(10e9, resource.BinarySI),
  1648. v1.ResourceEphemeralStorage: *resource.NewQuantity(5000, resource.BinarySI),
  1649. },
  1650. },
  1651. },
  1652. expectedNode: &v1.Node{
  1653. Status: v1.NodeStatus{
  1654. Capacity: v1.ResourceList{
  1655. v1.ResourceCPU: *resource.NewMilliQuantity(2000, resource.DecimalSI),
  1656. v1.ResourceMemory: *resource.NewQuantity(10e9, resource.BinarySI),
  1657. v1.ResourceEphemeralStorage: *resource.NewQuantity(5000, resource.BinarySI),
  1658. },
  1659. Allocatable: v1.ResourceList{
  1660. v1.ResourceCPU: *resource.NewMilliQuantity(2000, resource.DecimalSI),
  1661. v1.ResourceMemory: *resource.NewQuantity(10e9, resource.BinarySI),
  1662. v1.ResourceEphemeralStorage: *resource.NewQuantity(5000, resource.BinarySI),
  1663. },
  1664. },
  1665. },
  1666. needsUpdate: false,
  1667. },
  1668. {
  1669. name: "extended resource capacity is zeroed",
  1670. testKubelet: testKubeletNoReset,
  1671. existingNode: &v1.Node{
  1672. Status: v1.NodeStatus{
  1673. Capacity: v1.ResourceList{
  1674. v1.ResourceCPU: *resource.NewMilliQuantity(2000, resource.DecimalSI),
  1675. v1.ResourceMemory: *resource.NewQuantity(10e9, resource.BinarySI),
  1676. v1.ResourceEphemeralStorage: *resource.NewQuantity(5000, resource.BinarySI),
  1677. extendedResourceName1: *resource.NewQuantity(int64(2), resource.DecimalSI),
  1678. extendedResourceName2: *resource.NewQuantity(int64(10), resource.DecimalSI),
  1679. },
  1680. Allocatable: v1.ResourceList{
  1681. v1.ResourceCPU: *resource.NewMilliQuantity(2000, resource.DecimalSI),
  1682. v1.ResourceMemory: *resource.NewQuantity(10e9, resource.BinarySI),
  1683. v1.ResourceEphemeralStorage: *resource.NewQuantity(5000, resource.BinarySI),
  1684. extendedResourceName1: *resource.NewQuantity(int64(2), resource.DecimalSI),
  1685. extendedResourceName2: *resource.NewQuantity(int64(10), resource.DecimalSI),
  1686. },
  1687. },
  1688. },
  1689. expectedNode: &v1.Node{
  1690. Status: v1.NodeStatus{
  1691. Capacity: v1.ResourceList{
  1692. v1.ResourceCPU: *resource.NewMilliQuantity(2000, resource.DecimalSI),
  1693. v1.ResourceMemory: *resource.NewQuantity(10e9, resource.BinarySI),
  1694. v1.ResourceEphemeralStorage: *resource.NewQuantity(5000, resource.BinarySI),
  1695. extendedResourceName1: *resource.NewQuantity(int64(0), resource.DecimalSI),
  1696. extendedResourceName2: *resource.NewQuantity(int64(0), resource.DecimalSI),
  1697. },
  1698. Allocatable: v1.ResourceList{
  1699. v1.ResourceCPU: *resource.NewMilliQuantity(2000, resource.DecimalSI),
  1700. v1.ResourceMemory: *resource.NewQuantity(10e9, resource.BinarySI),
  1701. v1.ResourceEphemeralStorage: *resource.NewQuantity(5000, resource.BinarySI),
  1702. extendedResourceName1: *resource.NewQuantity(int64(0), resource.DecimalSI),
  1703. extendedResourceName2: *resource.NewQuantity(int64(0), resource.DecimalSI),
  1704. },
  1705. },
  1706. },
  1707. needsUpdate: true,
  1708. },
  1709. }
  1710. for _, tc := range cases {
  1711. defer testKubelet.Cleanup()
  1712. kubelet := testKubelet.kubelet
  1713. initialNode := &v1.Node{}
  1714. needsUpdate := kubelet.reconcileExtendedResource(initialNode, tc.existingNode)
  1715. assert.Equal(t, tc.needsUpdate, needsUpdate, tc.name)
  1716. assert.Equal(t, tc.expectedNode, tc.existingNode, tc.name)
  1717. }
  1718. }
  1719. func TestValidateNodeIPParam(t *testing.T) {
  1720. type test struct {
  1721. nodeIP string
  1722. success bool
  1723. testName string
  1724. }
  1725. tests := []test{
  1726. {
  1727. nodeIP: "",
  1728. success: false,
  1729. testName: "IP not set",
  1730. },
  1731. {
  1732. nodeIP: "127.0.0.1",
  1733. success: false,
  1734. testName: "IPv4 loopback address",
  1735. },
  1736. {
  1737. nodeIP: "::1",
  1738. success: false,
  1739. testName: "IPv6 loopback address",
  1740. },
  1741. {
  1742. nodeIP: "224.0.0.1",
  1743. success: false,
  1744. testName: "multicast IPv4 address",
  1745. },
  1746. {
  1747. nodeIP: "ff00::1",
  1748. success: false,
  1749. testName: "multicast IPv6 address",
  1750. },
  1751. {
  1752. nodeIP: "169.254.0.1",
  1753. success: false,
  1754. testName: "IPv4 link-local unicast address",
  1755. },
  1756. {
  1757. nodeIP: "fe80::0202:b3ff:fe1e:8329",
  1758. success: false,
  1759. testName: "IPv6 link-local unicast address",
  1760. },
  1761. {
  1762. nodeIP: "0.0.0.0",
  1763. success: false,
  1764. testName: "Unspecified IPv4 address",
  1765. },
  1766. {
  1767. nodeIP: "::",
  1768. success: false,
  1769. testName: "Unspecified IPv6 address",
  1770. },
  1771. {
  1772. nodeIP: "1.2.3.4",
  1773. success: false,
  1774. testName: "IPv4 address that doesn't belong to host",
  1775. },
  1776. }
  1777. addrs, err := net.InterfaceAddrs()
  1778. if err != nil {
  1779. assert.Error(t, err, fmt.Sprintf(
  1780. "Unable to obtain a list of the node's unicast interface addresses."))
  1781. }
  1782. for _, addr := range addrs {
  1783. var ip net.IP
  1784. switch v := addr.(type) {
  1785. case *net.IPNet:
  1786. ip = v.IP
  1787. case *net.IPAddr:
  1788. ip = v.IP
  1789. }
  1790. if ip.IsLoopback() || ip.IsLinkLocalUnicast() {
  1791. break
  1792. }
  1793. successTest := test{
  1794. nodeIP: ip.String(),
  1795. success: true,
  1796. testName: fmt.Sprintf("Success test case for address %s", ip.String()),
  1797. }
  1798. tests = append(tests, successTest)
  1799. }
  1800. for _, test := range tests {
  1801. err := validateNodeIP(net.ParseIP(test.nodeIP))
  1802. if test.success {
  1803. assert.NoError(t, err, "test %s", test.testName)
  1804. } else {
  1805. assert.Error(t, err, fmt.Sprintf("test %s", test.testName))
  1806. }
  1807. }
  1808. }
  1809. func TestRegisterWithApiServerWithTaint(t *testing.T) {
  1810. testKubelet := newTestKubelet(t, false /* controllerAttachDetachEnabled */)
  1811. defer testKubelet.Cleanup()
  1812. kubelet := testKubelet.kubelet
  1813. kubeClient := testKubelet.fakeKubeClient
  1814. machineInfo := &cadvisorapi.MachineInfo{
  1815. MachineID: "123",
  1816. SystemUUID: "abc",
  1817. BootID: "1b3",
  1818. NumCores: 2,
  1819. MemoryCapacity: 1024,
  1820. }
  1821. kubelet.machineInfo = machineInfo
  1822. var gotNode runtime.Object
  1823. kubeClient.AddReactor("create", "nodes", func(action core.Action) (bool, runtime.Object, error) {
  1824. createAction := action.(core.CreateAction)
  1825. gotNode = createAction.GetObject()
  1826. return true, gotNode, nil
  1827. })
  1828. addNotImplatedReaction(kubeClient)
  1829. // Make node to be unschedulable.
  1830. kubelet.registerSchedulable = false
  1831. // Reset kubelet status for each test.
  1832. kubelet.registrationCompleted = false
  1833. // Register node to apiserver.
  1834. kubelet.registerWithAPIServer()
  1835. // Check the unschedulable taint.
  1836. got := gotNode.(*v1.Node)
  1837. unschedulableTaint := &v1.Taint{
  1838. Key: v1.TaintNodeUnschedulable,
  1839. Effect: v1.TaintEffectNoSchedule,
  1840. }
  1841. require.Equal(t,
  1842. true,
  1843. taintutil.TaintExists(got.Spec.Taints, unschedulableTaint),
  1844. "test unschedulable taint for TaintNodesByCondition")
  1845. }
  1846. func TestNodeStatusHasChanged(t *testing.T) {
  1847. fakeNow := metav1.Date(2015, 1, 1, 12, 0, 0, 0, time.UTC)
  1848. fakeFuture := metav1.Time{Time: fakeNow.Time.Add(time.Minute)}
  1849. readyCondition := v1.NodeCondition{
  1850. Type: v1.NodeReady,
  1851. Status: v1.ConditionTrue,
  1852. LastHeartbeatTime: fakeNow,
  1853. LastTransitionTime: fakeNow,
  1854. }
  1855. readyConditionAtDiffHearbeatTime := v1.NodeCondition{
  1856. Type: v1.NodeReady,
  1857. Status: v1.ConditionTrue,
  1858. LastHeartbeatTime: fakeFuture,
  1859. LastTransitionTime: fakeNow,
  1860. }
  1861. readyConditionAtDiffTransitionTime := v1.NodeCondition{
  1862. Type: v1.NodeReady,
  1863. Status: v1.ConditionTrue,
  1864. LastHeartbeatTime: fakeFuture,
  1865. LastTransitionTime: fakeFuture,
  1866. }
  1867. notReadyCondition := v1.NodeCondition{
  1868. Type: v1.NodeReady,
  1869. Status: v1.ConditionFalse,
  1870. LastHeartbeatTime: fakeNow,
  1871. LastTransitionTime: fakeNow,
  1872. }
  1873. memoryPressureCondition := v1.NodeCondition{
  1874. Type: v1.NodeMemoryPressure,
  1875. Status: v1.ConditionFalse,
  1876. LastHeartbeatTime: fakeNow,
  1877. LastTransitionTime: fakeNow,
  1878. }
  1879. testcases := []struct {
  1880. name string
  1881. originalStatus *v1.NodeStatus
  1882. status *v1.NodeStatus
  1883. expectChange bool
  1884. }{
  1885. {
  1886. name: "Node status does not change with nil status.",
  1887. originalStatus: nil,
  1888. status: nil,
  1889. expectChange: false,
  1890. },
  1891. {
  1892. name: "Node status does not change with default status.",
  1893. originalStatus: &v1.NodeStatus{},
  1894. status: &v1.NodeStatus{},
  1895. expectChange: false,
  1896. },
  1897. {
  1898. name: "Node status changes with nil and default status.",
  1899. originalStatus: nil,
  1900. status: &v1.NodeStatus{},
  1901. expectChange: true,
  1902. },
  1903. {
  1904. name: "Node status changes with nil and status.",
  1905. originalStatus: nil,
  1906. status: &v1.NodeStatus{
  1907. Conditions: []v1.NodeCondition{readyCondition, memoryPressureCondition},
  1908. },
  1909. expectChange: true,
  1910. },
  1911. {
  1912. name: "Node status does not change with empty conditions.",
  1913. originalStatus: &v1.NodeStatus{Conditions: []v1.NodeCondition{}},
  1914. status: &v1.NodeStatus{Conditions: []v1.NodeCondition{}},
  1915. expectChange: false,
  1916. },
  1917. {
  1918. name: "Node status does not change",
  1919. originalStatus: &v1.NodeStatus{
  1920. Conditions: []v1.NodeCondition{readyCondition, memoryPressureCondition},
  1921. },
  1922. status: &v1.NodeStatus{
  1923. Conditions: []v1.NodeCondition{readyCondition, memoryPressureCondition},
  1924. },
  1925. expectChange: false,
  1926. },
  1927. {
  1928. name: "Node status does not change even if heartbeat time changes.",
  1929. originalStatus: &v1.NodeStatus{
  1930. Conditions: []v1.NodeCondition{readyCondition, memoryPressureCondition},
  1931. },
  1932. status: &v1.NodeStatus{
  1933. Conditions: []v1.NodeCondition{readyConditionAtDiffHearbeatTime, memoryPressureCondition},
  1934. },
  1935. expectChange: false,
  1936. },
  1937. {
  1938. name: "Node status does not change even if the orders of conditions are different.",
  1939. originalStatus: &v1.NodeStatus{
  1940. Conditions: []v1.NodeCondition{readyCondition, memoryPressureCondition},
  1941. },
  1942. status: &v1.NodeStatus{
  1943. Conditions: []v1.NodeCondition{memoryPressureCondition, readyConditionAtDiffHearbeatTime},
  1944. },
  1945. expectChange: false,
  1946. },
  1947. {
  1948. name: "Node status changes if condition status differs.",
  1949. originalStatus: &v1.NodeStatus{
  1950. Conditions: []v1.NodeCondition{readyCondition, memoryPressureCondition},
  1951. },
  1952. status: &v1.NodeStatus{
  1953. Conditions: []v1.NodeCondition{notReadyCondition, memoryPressureCondition},
  1954. },
  1955. expectChange: true,
  1956. },
  1957. {
  1958. name: "Node status changes if transition time changes.",
  1959. originalStatus: &v1.NodeStatus{
  1960. Conditions: []v1.NodeCondition{readyCondition, memoryPressureCondition},
  1961. },
  1962. status: &v1.NodeStatus{
  1963. Conditions: []v1.NodeCondition{readyConditionAtDiffTransitionTime, memoryPressureCondition},
  1964. },
  1965. expectChange: true,
  1966. },
  1967. {
  1968. name: "Node status changes with different number of conditions.",
  1969. originalStatus: &v1.NodeStatus{
  1970. Conditions: []v1.NodeCondition{readyCondition},
  1971. },
  1972. status: &v1.NodeStatus{
  1973. Conditions: []v1.NodeCondition{readyCondition, memoryPressureCondition},
  1974. },
  1975. expectChange: true,
  1976. },
  1977. {
  1978. name: "Node status changes with different phase.",
  1979. originalStatus: &v1.NodeStatus{
  1980. Phase: v1.NodePending,
  1981. Conditions: []v1.NodeCondition{readyCondition},
  1982. },
  1983. status: &v1.NodeStatus{
  1984. Phase: v1.NodeRunning,
  1985. Conditions: []v1.NodeCondition{readyCondition},
  1986. },
  1987. expectChange: true,
  1988. },
  1989. }
  1990. for _, tc := range testcases {
  1991. t.Run(tc.name, func(t *testing.T) {
  1992. originalStatusCopy := tc.originalStatus.DeepCopy()
  1993. statusCopy := tc.status.DeepCopy()
  1994. changed := nodeStatusHasChanged(tc.originalStatus, tc.status)
  1995. assert.Equal(t, tc.expectChange, changed, "Expect node status change to be %t, but got %t.", tc.expectChange, changed)
  1996. assert.True(t, apiequality.Semantic.DeepEqual(originalStatusCopy, tc.originalStatus), "%s", diff.ObjectDiff(originalStatusCopy, tc.originalStatus))
  1997. assert.True(t, apiequality.Semantic.DeepEqual(statusCopy, tc.status), "%s", diff.ObjectDiff(statusCopy, tc.status))
  1998. })
  1999. }
  2000. }
  2001. func TestUpdateNodeAddresses(t *testing.T) {
  2002. testKubelet := newTestKubelet(t, false /* controllerAttachDetachEnabled */)
  2003. defer testKubelet.Cleanup()
  2004. kubelet := testKubelet.kubelet
  2005. kubeClient := testKubelet.fakeKubeClient
  2006. existingNode := v1.Node{ObjectMeta: metav1.ObjectMeta{Name: testKubeletHostname}}
  2007. kubeClient.ReactionChain = fake.NewSimpleClientset(&v1.NodeList{Items: []v1.Node{existingNode}}).ReactionChain
  2008. tests := []struct {
  2009. Name string
  2010. Before []v1.NodeAddress
  2011. After []v1.NodeAddress
  2012. }{
  2013. {
  2014. Name: "nil to populated",
  2015. Before: nil,
  2016. After: []v1.NodeAddress{
  2017. {Type: v1.NodeInternalIP, Address: "127.0.0.1"},
  2018. {Type: v1.NodeHostName, Address: testKubeletHostname},
  2019. },
  2020. },
  2021. {
  2022. Name: "empty to populated",
  2023. Before: []v1.NodeAddress{},
  2024. After: []v1.NodeAddress{
  2025. {Type: v1.NodeInternalIP, Address: "127.0.0.1"},
  2026. {Type: v1.NodeHostName, Address: testKubeletHostname},
  2027. },
  2028. },
  2029. {
  2030. Name: "populated to nil",
  2031. Before: []v1.NodeAddress{
  2032. {Type: v1.NodeInternalIP, Address: "127.0.0.1"},
  2033. {Type: v1.NodeHostName, Address: testKubeletHostname},
  2034. },
  2035. After: nil,
  2036. },
  2037. {
  2038. Name: "populated to empty",
  2039. Before: []v1.NodeAddress{
  2040. {Type: v1.NodeInternalIP, Address: "127.0.0.1"},
  2041. {Type: v1.NodeHostName, Address: testKubeletHostname},
  2042. },
  2043. After: []v1.NodeAddress{},
  2044. },
  2045. {
  2046. Name: "multiple addresses of same type, no change",
  2047. Before: []v1.NodeAddress{
  2048. {Type: v1.NodeInternalIP, Address: "127.0.0.1"},
  2049. {Type: v1.NodeInternalIP, Address: "127.0.0.2"},
  2050. {Type: v1.NodeInternalIP, Address: "127.0.0.3"},
  2051. {Type: v1.NodeHostName, Address: testKubeletHostname},
  2052. },
  2053. After: []v1.NodeAddress{
  2054. {Type: v1.NodeInternalIP, Address: "127.0.0.1"},
  2055. {Type: v1.NodeInternalIP, Address: "127.0.0.2"},
  2056. {Type: v1.NodeInternalIP, Address: "127.0.0.3"},
  2057. {Type: v1.NodeHostName, Address: testKubeletHostname},
  2058. },
  2059. },
  2060. {
  2061. Name: "1 InternalIP to 2 InternalIP",
  2062. Before: []v1.NodeAddress{
  2063. {Type: v1.NodeInternalIP, Address: "127.0.0.1"},
  2064. {Type: v1.NodeHostName, Address: testKubeletHostname},
  2065. },
  2066. After: []v1.NodeAddress{
  2067. {Type: v1.NodeInternalIP, Address: "127.0.0.1"},
  2068. {Type: v1.NodeInternalIP, Address: "127.0.0.2"},
  2069. {Type: v1.NodeHostName, Address: testKubeletHostname},
  2070. },
  2071. },
  2072. {
  2073. Name: "2 InternalIP to 1 InternalIP",
  2074. Before: []v1.NodeAddress{
  2075. {Type: v1.NodeInternalIP, Address: "127.0.0.1"},
  2076. {Type: v1.NodeInternalIP, Address: "127.0.0.2"},
  2077. {Type: v1.NodeHostName, Address: testKubeletHostname},
  2078. },
  2079. After: []v1.NodeAddress{
  2080. {Type: v1.NodeInternalIP, Address: "127.0.0.1"},
  2081. {Type: v1.NodeHostName, Address: testKubeletHostname},
  2082. },
  2083. },
  2084. {
  2085. Name: "2 InternalIP to 2 different InternalIP",
  2086. Before: []v1.NodeAddress{
  2087. {Type: v1.NodeInternalIP, Address: "127.0.0.1"},
  2088. {Type: v1.NodeInternalIP, Address: "127.0.0.2"},
  2089. {Type: v1.NodeHostName, Address: testKubeletHostname},
  2090. },
  2091. After: []v1.NodeAddress{
  2092. {Type: v1.NodeInternalIP, Address: "127.0.0.3"},
  2093. {Type: v1.NodeInternalIP, Address: "127.0.0.4"},
  2094. {Type: v1.NodeHostName, Address: testKubeletHostname},
  2095. },
  2096. },
  2097. {
  2098. Name: "2 InternalIP to reversed order",
  2099. Before: []v1.NodeAddress{
  2100. {Type: v1.NodeInternalIP, Address: "127.0.0.1"},
  2101. {Type: v1.NodeInternalIP, Address: "127.0.0.2"},
  2102. {Type: v1.NodeHostName, Address: testKubeletHostname},
  2103. },
  2104. After: []v1.NodeAddress{
  2105. {Type: v1.NodeInternalIP, Address: "127.0.0.2"},
  2106. {Type: v1.NodeInternalIP, Address: "127.0.0.1"},
  2107. {Type: v1.NodeHostName, Address: testKubeletHostname},
  2108. },
  2109. },
  2110. }
  2111. for _, test := range tests {
  2112. t.Run(test.Name, func(t *testing.T) {
  2113. oldNode := &v1.Node{
  2114. ObjectMeta: metav1.ObjectMeta{Name: testKubeletHostname},
  2115. Spec: v1.NodeSpec{},
  2116. Status: v1.NodeStatus{
  2117. Addresses: test.Before,
  2118. },
  2119. }
  2120. expectedNode := &v1.Node{
  2121. ObjectMeta: metav1.ObjectMeta{Name: testKubeletHostname},
  2122. Spec: v1.NodeSpec{},
  2123. Status: v1.NodeStatus{
  2124. Addresses: test.After,
  2125. },
  2126. }
  2127. _, err := kubeClient.CoreV1().Nodes().Update(context.TODO(), oldNode, metav1.UpdateOptions{})
  2128. assert.NoError(t, err)
  2129. kubelet.setNodeStatusFuncs = []func(*v1.Node) error{
  2130. func(node *v1.Node) error {
  2131. node.Status.Addresses = expectedNode.Status.Addresses
  2132. return nil
  2133. },
  2134. }
  2135. assert.NoError(t, kubelet.updateNodeStatus())
  2136. actions := kubeClient.Actions()
  2137. lastAction := actions[len(actions)-1]
  2138. assert.IsType(t, core.PatchActionImpl{}, lastAction)
  2139. patchAction := lastAction.(core.PatchActionImpl)
  2140. updatedNode, err := applyNodeStatusPatch(oldNode, patchAction.GetPatch())
  2141. require.NoError(t, err)
  2142. assert.True(t, apiequality.Semantic.DeepEqual(updatedNode, expectedNode), "%s", diff.ObjectDiff(expectedNode, updatedNode))
  2143. })
  2144. }
  2145. }