node_lifecycle_controller_test.go 13 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462
  1. /*
  2. Copyright 2016 The Kubernetes Authors.
  3. Licensed under the Apache License, Version 2.0 (the "License");
  4. you may not use this file except in compliance with the License.
  5. You may obtain a copy of the License at
  6. http://www.apache.org/licenses/LICENSE-2.0
  7. Unless required by applicable law or agreed to in writing, software
  8. distributed under the License is distributed on an "AS IS" BASIS,
  9. WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
  10. See the License for the specific language governing permissions and
  11. limitations under the License.
  12. */
  13. package cloud
  14. import (
  15. "context"
  16. "errors"
  17. "reflect"
  18. "testing"
  19. "time"
  20. v1 "k8s.io/api/core/v1"
  21. metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
  22. "k8s.io/apimachinery/pkg/types"
  23. "k8s.io/client-go/informers"
  24. coreinformers "k8s.io/client-go/informers/core/v1"
  25. "k8s.io/client-go/kubernetes/fake"
  26. "k8s.io/client-go/kubernetes/scheme"
  27. "k8s.io/client-go/tools/record"
  28. fakecloud "k8s.io/cloud-provider/fake"
  29. "k8s.io/klog"
  30. "k8s.io/kubernetes/pkg/controller/testutil"
  31. )
  32. func Test_NodesDeleted(t *testing.T) {
  33. testcases := []struct {
  34. name string
  35. fnh *testutil.FakeNodeHandler
  36. fakeCloud *fakecloud.Cloud
  37. deleteNodes []*v1.Node
  38. }{
  39. {
  40. name: "node is not ready and does not exist",
  41. fnh: &testutil.FakeNodeHandler{
  42. Existing: []*v1.Node{
  43. {
  44. ObjectMeta: metav1.ObjectMeta{
  45. Name: "node0",
  46. CreationTimestamp: metav1.Date(2012, 1, 1, 0, 0, 0, 0, time.UTC),
  47. },
  48. Status: v1.NodeStatus{
  49. Conditions: []v1.NodeCondition{
  50. {
  51. Type: v1.NodeReady,
  52. Status: v1.ConditionFalse,
  53. LastHeartbeatTime: metav1.Date(2015, 1, 1, 12, 0, 0, 0, time.UTC),
  54. LastTransitionTime: metav1.Date(2015, 1, 1, 12, 0, 0, 0, time.UTC),
  55. },
  56. },
  57. },
  58. },
  59. },
  60. DeletedNodes: []*v1.Node{},
  61. Clientset: fake.NewSimpleClientset(),
  62. },
  63. fakeCloud: &fakecloud.Cloud{
  64. ExistsByProviderID: false,
  65. },
  66. deleteNodes: []*v1.Node{
  67. testutil.NewNode("node0"),
  68. },
  69. },
  70. {
  71. name: "node is not ready and provider returns err",
  72. fnh: &testutil.FakeNodeHandler{
  73. Existing: []*v1.Node{
  74. {
  75. ObjectMeta: metav1.ObjectMeta{
  76. Name: "node0",
  77. CreationTimestamp: metav1.Date(2012, 1, 1, 0, 0, 0, 0, time.UTC),
  78. },
  79. Spec: v1.NodeSpec{
  80. ProviderID: "node0",
  81. },
  82. Status: v1.NodeStatus{
  83. Conditions: []v1.NodeCondition{
  84. {
  85. Type: v1.NodeReady,
  86. Status: v1.ConditionFalse,
  87. LastHeartbeatTime: metav1.Date(2015, 1, 1, 12, 0, 0, 0, time.UTC),
  88. LastTransitionTime: metav1.Date(2015, 1, 1, 12, 0, 0, 0, time.UTC),
  89. },
  90. },
  91. },
  92. },
  93. },
  94. DeletedNodes: []*v1.Node{},
  95. Clientset: fake.NewSimpleClientset(),
  96. },
  97. fakeCloud: &fakecloud.Cloud{
  98. ExistsByProviderID: false,
  99. ErrByProviderID: errors.New("err!"),
  100. },
  101. deleteNodes: []*v1.Node{},
  102. },
  103. {
  104. name: "node is not ready but still exists",
  105. fnh: &testutil.FakeNodeHandler{
  106. Existing: []*v1.Node{
  107. {
  108. ObjectMeta: metav1.ObjectMeta{
  109. Name: "node0",
  110. CreationTimestamp: metav1.Date(2012, 1, 1, 0, 0, 0, 0, time.UTC),
  111. },
  112. Spec: v1.NodeSpec{
  113. ProviderID: "node0",
  114. },
  115. Status: v1.NodeStatus{
  116. Conditions: []v1.NodeCondition{
  117. {
  118. Type: v1.NodeReady,
  119. Status: v1.ConditionFalse,
  120. LastHeartbeatTime: metav1.Date(2015, 1, 1, 12, 0, 0, 0, time.UTC),
  121. LastTransitionTime: metav1.Date(2015, 1, 1, 12, 0, 0, 0, time.UTC),
  122. },
  123. },
  124. },
  125. },
  126. },
  127. DeletedNodes: []*v1.Node{},
  128. Clientset: fake.NewSimpleClientset(),
  129. },
  130. fakeCloud: &fakecloud.Cloud{
  131. ExistsByProviderID: true,
  132. },
  133. deleteNodes: []*v1.Node{},
  134. },
  135. {
  136. name: "node ready condition is unknown, node doesn't exist",
  137. fnh: &testutil.FakeNodeHandler{
  138. Existing: []*v1.Node{
  139. {
  140. ObjectMeta: metav1.ObjectMeta{
  141. Name: "node0",
  142. CreationTimestamp: metav1.Date(2012, 1, 1, 0, 0, 0, 0, time.UTC),
  143. },
  144. Status: v1.NodeStatus{
  145. Conditions: []v1.NodeCondition{
  146. {
  147. Type: v1.NodeReady,
  148. Status: v1.ConditionUnknown,
  149. LastHeartbeatTime: metav1.Date(2015, 1, 1, 12, 0, 0, 0, time.UTC),
  150. LastTransitionTime: metav1.Date(2015, 1, 1, 12, 0, 0, 0, time.UTC),
  151. },
  152. },
  153. },
  154. },
  155. },
  156. DeletedNodes: []*v1.Node{},
  157. Clientset: fake.NewSimpleClientset(),
  158. },
  159. fakeCloud: &fakecloud.Cloud{
  160. ExistsByProviderID: false,
  161. },
  162. deleteNodes: []*v1.Node{
  163. testutil.NewNode("node0"),
  164. },
  165. },
  166. {
  167. name: "node ready condition is unknown, node exists",
  168. fnh: &testutil.FakeNodeHandler{
  169. Existing: []*v1.Node{
  170. {
  171. ObjectMeta: metav1.ObjectMeta{
  172. Name: "node0",
  173. CreationTimestamp: metav1.Date(2012, 1, 1, 0, 0, 0, 0, time.UTC),
  174. },
  175. Status: v1.NodeStatus{
  176. Conditions: []v1.NodeCondition{
  177. {
  178. Type: v1.NodeReady,
  179. Status: v1.ConditionUnknown,
  180. LastHeartbeatTime: metav1.Date(2015, 1, 1, 12, 0, 0, 0, time.UTC),
  181. LastTransitionTime: metav1.Date(2015, 1, 1, 12, 0, 0, 0, time.UTC),
  182. },
  183. },
  184. },
  185. },
  186. },
  187. DeletedNodes: []*v1.Node{},
  188. Clientset: fake.NewSimpleClientset(),
  189. },
  190. fakeCloud: &fakecloud.Cloud{
  191. NodeShutdown: false,
  192. ExistsByProviderID: true,
  193. ExtID: map[types.NodeName]string{
  194. types.NodeName("node0"): "foo://12345",
  195. },
  196. },
  197. deleteNodes: []*v1.Node{},
  198. },
  199. {
  200. name: "node is ready, but provider said it is deleted (maybe a bug in provider)",
  201. fnh: &testutil.FakeNodeHandler{
  202. Existing: []*v1.Node{
  203. {
  204. ObjectMeta: metav1.ObjectMeta{
  205. Name: "node0",
  206. CreationTimestamp: metav1.Date(2012, 1, 1, 0, 0, 0, 0, time.UTC),
  207. },
  208. Spec: v1.NodeSpec{
  209. ProviderID: "node0",
  210. },
  211. Status: v1.NodeStatus{
  212. Conditions: []v1.NodeCondition{
  213. {
  214. Type: v1.NodeReady,
  215. Status: v1.ConditionTrue,
  216. LastHeartbeatTime: metav1.Date(2015, 1, 1, 12, 0, 0, 0, time.UTC),
  217. LastTransitionTime: metav1.Date(2015, 1, 1, 12, 0, 0, 0, time.UTC),
  218. },
  219. },
  220. },
  221. },
  222. },
  223. DeletedNodes: []*v1.Node{},
  224. Clientset: fake.NewSimpleClientset(),
  225. },
  226. fakeCloud: &fakecloud.Cloud{
  227. ExistsByProviderID: false,
  228. },
  229. deleteNodes: []*v1.Node{},
  230. },
  231. }
  232. for _, testcase := range testcases {
  233. t.Run(testcase.name, func(t *testing.T) {
  234. informer := informers.NewSharedInformerFactory(testcase.fnh.Clientset, time.Second)
  235. nodeInformer := informer.Core().V1().Nodes()
  236. if err := syncNodeStore(nodeInformer, testcase.fnh); err != nil {
  237. t.Errorf("unexpected error: %v", err)
  238. }
  239. eventBroadcaster := record.NewBroadcaster()
  240. cloudNodeLifecycleController := &CloudNodeLifecycleController{
  241. nodeLister: nodeInformer.Lister(),
  242. kubeClient: testcase.fnh,
  243. cloud: testcase.fakeCloud,
  244. recorder: eventBroadcaster.NewRecorder(scheme.Scheme, v1.EventSource{Component: "cloud-node-lifecycle-controller"}),
  245. nodeMonitorPeriod: 1 * time.Second,
  246. }
  247. eventBroadcaster.StartLogging(klog.Infof)
  248. cloudNodeLifecycleController.MonitorNodes()
  249. if !reflect.DeepEqual(testcase.fnh.DeletedNodes, testcase.deleteNodes) {
  250. t.Logf("actual nodes: %v", testcase.fnh.DeletedNodes)
  251. t.Logf("expected nodes: %v", testcase.deleteNodes)
  252. t.Error("unexpected deleted nodes")
  253. }
  254. })
  255. }
  256. }
  257. func Test_NodesShutdown(t *testing.T) {
  258. testcases := []struct {
  259. name string
  260. fnh *testutil.FakeNodeHandler
  261. fakeCloud *fakecloud.Cloud
  262. updatedNodes []*v1.Node
  263. }{
  264. {
  265. name: "node is not ready and was shutdown",
  266. fnh: &testutil.FakeNodeHandler{
  267. Existing: []*v1.Node{
  268. {
  269. ObjectMeta: metav1.ObjectMeta{
  270. Name: "node0",
  271. CreationTimestamp: metav1.Date(2012, 1, 1, 0, 0, 0, 0, time.Local),
  272. },
  273. Status: v1.NodeStatus{
  274. Conditions: []v1.NodeCondition{
  275. {
  276. Type: v1.NodeReady,
  277. Status: v1.ConditionFalse,
  278. LastHeartbeatTime: metav1.Date(2015, 1, 1, 12, 0, 0, 0, time.Local),
  279. LastTransitionTime: metav1.Date(2015, 1, 1, 12, 0, 0, 0, time.Local),
  280. },
  281. },
  282. },
  283. },
  284. },
  285. UpdatedNodes: []*v1.Node{},
  286. Clientset: fake.NewSimpleClientset(),
  287. },
  288. fakeCloud: &fakecloud.Cloud{
  289. NodeShutdown: true,
  290. ErrShutdownByProviderID: nil,
  291. },
  292. updatedNodes: []*v1.Node{
  293. {
  294. ObjectMeta: metav1.ObjectMeta{
  295. Name: "node0",
  296. CreationTimestamp: metav1.Date(2012, 1, 1, 0, 0, 0, 0, time.Local),
  297. },
  298. Spec: v1.NodeSpec{
  299. Taints: []v1.Taint{
  300. *ShutdownTaint,
  301. },
  302. },
  303. Status: v1.NodeStatus{
  304. Conditions: []v1.NodeCondition{
  305. {
  306. Type: v1.NodeReady,
  307. Status: v1.ConditionFalse,
  308. LastHeartbeatTime: metav1.Date(2015, 1, 1, 12, 0, 0, 0, time.Local),
  309. LastTransitionTime: metav1.Date(2015, 1, 1, 12, 0, 0, 0, time.Local),
  310. },
  311. },
  312. },
  313. },
  314. },
  315. },
  316. {
  317. name: "node is not ready, but there is error checking if node is shutdown",
  318. fnh: &testutil.FakeNodeHandler{
  319. Existing: []*v1.Node{
  320. {
  321. ObjectMeta: metav1.ObjectMeta{
  322. Name: "node0",
  323. CreationTimestamp: metav1.Date(2012, 1, 1, 0, 0, 0, 0, time.Local),
  324. },
  325. Status: v1.NodeStatus{
  326. Conditions: []v1.NodeCondition{
  327. {
  328. Type: v1.NodeReady,
  329. Status: v1.ConditionFalse,
  330. LastHeartbeatTime: metav1.Date(2015, 1, 1, 12, 0, 0, 0, time.Local),
  331. LastTransitionTime: metav1.Date(2015, 1, 1, 12, 0, 0, 0, time.Local),
  332. },
  333. },
  334. },
  335. },
  336. },
  337. UpdatedNodes: []*v1.Node{},
  338. Clientset: fake.NewSimpleClientset(),
  339. },
  340. fakeCloud: &fakecloud.Cloud{
  341. NodeShutdown: false,
  342. ErrShutdownByProviderID: errors.New("err!"),
  343. },
  344. updatedNodes: []*v1.Node{},
  345. },
  346. {
  347. name: "node is not ready and is not shutdown",
  348. fnh: &testutil.FakeNodeHandler{
  349. Existing: []*v1.Node{
  350. {
  351. ObjectMeta: metav1.ObjectMeta{
  352. Name: "node0",
  353. CreationTimestamp: metav1.Date(2012, 1, 1, 0, 0, 0, 0, time.Local),
  354. },
  355. Status: v1.NodeStatus{
  356. Conditions: []v1.NodeCondition{
  357. {
  358. Type: v1.NodeReady,
  359. Status: v1.ConditionFalse,
  360. LastHeartbeatTime: metav1.Date(2015, 1, 1, 12, 0, 0, 0, time.Local),
  361. LastTransitionTime: metav1.Date(2015, 1, 1, 12, 0, 0, 0, time.Local),
  362. },
  363. },
  364. },
  365. },
  366. },
  367. UpdatedNodes: []*v1.Node{},
  368. Clientset: fake.NewSimpleClientset(),
  369. },
  370. fakeCloud: &fakecloud.Cloud{
  371. NodeShutdown: false,
  372. ErrShutdownByProviderID: nil,
  373. },
  374. updatedNodes: []*v1.Node{},
  375. },
  376. {
  377. name: "node is ready but provider says it's shutdown (maybe a bug by provider)",
  378. fnh: &testutil.FakeNodeHandler{
  379. Existing: []*v1.Node{
  380. {
  381. ObjectMeta: metav1.ObjectMeta{
  382. Name: "node0",
  383. CreationTimestamp: metav1.Date(2012, 1, 1, 0, 0, 0, 0, time.Local),
  384. },
  385. Status: v1.NodeStatus{
  386. Conditions: []v1.NodeCondition{
  387. {
  388. Type: v1.NodeReady,
  389. Status: v1.ConditionTrue,
  390. LastHeartbeatTime: metav1.Date(2015, 1, 1, 12, 0, 0, 0, time.Local),
  391. LastTransitionTime: metav1.Date(2015, 1, 1, 12, 0, 0, 0, time.Local),
  392. },
  393. },
  394. },
  395. },
  396. },
  397. UpdatedNodes: []*v1.Node{},
  398. Clientset: fake.NewSimpleClientset(),
  399. },
  400. fakeCloud: &fakecloud.Cloud{
  401. NodeShutdown: true,
  402. ErrShutdownByProviderID: nil,
  403. },
  404. updatedNodes: []*v1.Node{},
  405. },
  406. }
  407. for _, testcase := range testcases {
  408. t.Run(testcase.name, func(t *testing.T) {
  409. informer := informers.NewSharedInformerFactory(testcase.fnh.Clientset, time.Second)
  410. nodeInformer := informer.Core().V1().Nodes()
  411. if err := syncNodeStore(nodeInformer, testcase.fnh); err != nil {
  412. t.Errorf("unexpected error: %v", err)
  413. }
  414. eventBroadcaster := record.NewBroadcaster()
  415. cloudNodeLifecycleController := &CloudNodeLifecycleController{
  416. nodeLister: nodeInformer.Lister(),
  417. kubeClient: testcase.fnh,
  418. cloud: testcase.fakeCloud,
  419. recorder: eventBroadcaster.NewRecorder(scheme.Scheme, v1.EventSource{Component: "cloud-node-lifecycle-controller"}),
  420. nodeMonitorPeriod: 1 * time.Second,
  421. }
  422. eventBroadcaster.StartLogging(klog.Infof)
  423. cloudNodeLifecycleController.MonitorNodes()
  424. if !reflect.DeepEqual(testcase.fnh.UpdatedNodes, testcase.updatedNodes) {
  425. t.Logf("actual nodes: %v", testcase.fnh.UpdatedNodes)
  426. t.Logf("expected nodes: %v", testcase.updatedNodes)
  427. t.Error("unexpected updated nodes")
  428. }
  429. })
  430. }
  431. }
  432. func syncNodeStore(nodeinformer coreinformers.NodeInformer, f *testutil.FakeNodeHandler) error {
  433. nodes, err := f.List(context.TODO(), metav1.ListOptions{})
  434. if err != nil {
  435. return err
  436. }
  437. newElems := make([]interface{}, 0, len(nodes.Items))
  438. for i := range nodes.Items {
  439. newElems = append(newElems, &nodes.Items[i])
  440. }
  441. return nodeinformer.Informer().GetStore().Replace(newElems, "newRV")
  442. }