node_lifecycle_controller_test.go 13 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461
  1. /*
  2. Copyright 2016 The Kubernetes Authors.
  3. Licensed under the Apache License, Version 2.0 (the "License");
  4. you may not use this file except in compliance with the License.
  5. You may obtain a copy of the License at
  6. http://www.apache.org/licenses/LICENSE-2.0
  7. Unless required by applicable law or agreed to in writing, software
  8. distributed under the License is distributed on an "AS IS" BASIS,
  9. WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
  10. See the License for the specific language governing permissions and
  11. limitations under the License.
  12. */
  13. package cloud
  14. import (
  15. "errors"
  16. "reflect"
  17. "testing"
  18. "time"
  19. "k8s.io/api/core/v1"
  20. metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
  21. "k8s.io/apimachinery/pkg/types"
  22. "k8s.io/client-go/informers"
  23. coreinformers "k8s.io/client-go/informers/core/v1"
  24. "k8s.io/client-go/kubernetes/fake"
  25. "k8s.io/client-go/kubernetes/scheme"
  26. "k8s.io/client-go/tools/record"
  27. fakecloud "k8s.io/cloud-provider/fake"
  28. "k8s.io/klog"
  29. "k8s.io/kubernetes/pkg/controller/testutil"
  30. )
  31. func Test_NodesDeleted(t *testing.T) {
  32. testcases := []struct {
  33. name string
  34. fnh *testutil.FakeNodeHandler
  35. fakeCloud *fakecloud.Cloud
  36. deleteNodes []*v1.Node
  37. }{
  38. {
  39. name: "node is not ready and does not exist",
  40. fnh: &testutil.FakeNodeHandler{
  41. Existing: []*v1.Node{
  42. {
  43. ObjectMeta: metav1.ObjectMeta{
  44. Name: "node0",
  45. CreationTimestamp: metav1.Date(2012, 1, 1, 0, 0, 0, 0, time.UTC),
  46. },
  47. Status: v1.NodeStatus{
  48. Conditions: []v1.NodeCondition{
  49. {
  50. Type: v1.NodeReady,
  51. Status: v1.ConditionFalse,
  52. LastHeartbeatTime: metav1.Date(2015, 1, 1, 12, 0, 0, 0, time.UTC),
  53. LastTransitionTime: metav1.Date(2015, 1, 1, 12, 0, 0, 0, time.UTC),
  54. },
  55. },
  56. },
  57. },
  58. },
  59. DeletedNodes: []*v1.Node{},
  60. Clientset: fake.NewSimpleClientset(),
  61. },
  62. fakeCloud: &fakecloud.Cloud{
  63. ExistsByProviderID: false,
  64. },
  65. deleteNodes: []*v1.Node{
  66. testutil.NewNode("node0"),
  67. },
  68. },
  69. {
  70. name: "node is not ready and provider returns err",
  71. fnh: &testutil.FakeNodeHandler{
  72. Existing: []*v1.Node{
  73. {
  74. ObjectMeta: metav1.ObjectMeta{
  75. Name: "node0",
  76. CreationTimestamp: metav1.Date(2012, 1, 1, 0, 0, 0, 0, time.UTC),
  77. },
  78. Spec: v1.NodeSpec{
  79. ProviderID: "node0",
  80. },
  81. Status: v1.NodeStatus{
  82. Conditions: []v1.NodeCondition{
  83. {
  84. Type: v1.NodeReady,
  85. Status: v1.ConditionFalse,
  86. LastHeartbeatTime: metav1.Date(2015, 1, 1, 12, 0, 0, 0, time.UTC),
  87. LastTransitionTime: metav1.Date(2015, 1, 1, 12, 0, 0, 0, time.UTC),
  88. },
  89. },
  90. },
  91. },
  92. },
  93. DeletedNodes: []*v1.Node{},
  94. Clientset: fake.NewSimpleClientset(),
  95. },
  96. fakeCloud: &fakecloud.Cloud{
  97. ExistsByProviderID: false,
  98. ErrByProviderID: errors.New("err!"),
  99. },
  100. deleteNodes: []*v1.Node{},
  101. },
  102. {
  103. name: "node is not ready but still exists",
  104. fnh: &testutil.FakeNodeHandler{
  105. Existing: []*v1.Node{
  106. {
  107. ObjectMeta: metav1.ObjectMeta{
  108. Name: "node0",
  109. CreationTimestamp: metav1.Date(2012, 1, 1, 0, 0, 0, 0, time.UTC),
  110. },
  111. Spec: v1.NodeSpec{
  112. ProviderID: "node0",
  113. },
  114. Status: v1.NodeStatus{
  115. Conditions: []v1.NodeCondition{
  116. {
  117. Type: v1.NodeReady,
  118. Status: v1.ConditionFalse,
  119. LastHeartbeatTime: metav1.Date(2015, 1, 1, 12, 0, 0, 0, time.UTC),
  120. LastTransitionTime: metav1.Date(2015, 1, 1, 12, 0, 0, 0, time.UTC),
  121. },
  122. },
  123. },
  124. },
  125. },
  126. DeletedNodes: []*v1.Node{},
  127. Clientset: fake.NewSimpleClientset(),
  128. },
  129. fakeCloud: &fakecloud.Cloud{
  130. ExistsByProviderID: true,
  131. },
  132. deleteNodes: []*v1.Node{},
  133. },
  134. {
  135. name: "node ready condition is unknown, node doesn't exist",
  136. fnh: &testutil.FakeNodeHandler{
  137. Existing: []*v1.Node{
  138. {
  139. ObjectMeta: metav1.ObjectMeta{
  140. Name: "node0",
  141. CreationTimestamp: metav1.Date(2012, 1, 1, 0, 0, 0, 0, time.UTC),
  142. },
  143. Status: v1.NodeStatus{
  144. Conditions: []v1.NodeCondition{
  145. {
  146. Type: v1.NodeReady,
  147. Status: v1.ConditionUnknown,
  148. LastHeartbeatTime: metav1.Date(2015, 1, 1, 12, 0, 0, 0, time.UTC),
  149. LastTransitionTime: metav1.Date(2015, 1, 1, 12, 0, 0, 0, time.UTC),
  150. },
  151. },
  152. },
  153. },
  154. },
  155. DeletedNodes: []*v1.Node{},
  156. Clientset: fake.NewSimpleClientset(),
  157. },
  158. fakeCloud: &fakecloud.Cloud{
  159. ExistsByProviderID: false,
  160. },
  161. deleteNodes: []*v1.Node{
  162. testutil.NewNode("node0"),
  163. },
  164. },
  165. {
  166. name: "node ready condition is unknown, node exists",
  167. fnh: &testutil.FakeNodeHandler{
  168. Existing: []*v1.Node{
  169. {
  170. ObjectMeta: metav1.ObjectMeta{
  171. Name: "node0",
  172. CreationTimestamp: metav1.Date(2012, 1, 1, 0, 0, 0, 0, time.UTC),
  173. },
  174. Status: v1.NodeStatus{
  175. Conditions: []v1.NodeCondition{
  176. {
  177. Type: v1.NodeReady,
  178. Status: v1.ConditionUnknown,
  179. LastHeartbeatTime: metav1.Date(2015, 1, 1, 12, 0, 0, 0, time.UTC),
  180. LastTransitionTime: metav1.Date(2015, 1, 1, 12, 0, 0, 0, time.UTC),
  181. },
  182. },
  183. },
  184. },
  185. },
  186. DeletedNodes: []*v1.Node{},
  187. Clientset: fake.NewSimpleClientset(),
  188. },
  189. fakeCloud: &fakecloud.Cloud{
  190. NodeShutdown: false,
  191. ExistsByProviderID: true,
  192. ExtID: map[types.NodeName]string{
  193. types.NodeName("node0"): "foo://12345",
  194. },
  195. },
  196. deleteNodes: []*v1.Node{},
  197. },
  198. {
  199. name: "node is ready, but provider said it is deleted (maybe a bug in provider)",
  200. fnh: &testutil.FakeNodeHandler{
  201. Existing: []*v1.Node{
  202. {
  203. ObjectMeta: metav1.ObjectMeta{
  204. Name: "node0",
  205. CreationTimestamp: metav1.Date(2012, 1, 1, 0, 0, 0, 0, time.UTC),
  206. },
  207. Spec: v1.NodeSpec{
  208. ProviderID: "node0",
  209. },
  210. Status: v1.NodeStatus{
  211. Conditions: []v1.NodeCondition{
  212. {
  213. Type: v1.NodeReady,
  214. Status: v1.ConditionTrue,
  215. LastHeartbeatTime: metav1.Date(2015, 1, 1, 12, 0, 0, 0, time.UTC),
  216. LastTransitionTime: metav1.Date(2015, 1, 1, 12, 0, 0, 0, time.UTC),
  217. },
  218. },
  219. },
  220. },
  221. },
  222. DeletedNodes: []*v1.Node{},
  223. Clientset: fake.NewSimpleClientset(),
  224. },
  225. fakeCloud: &fakecloud.Cloud{
  226. ExistsByProviderID: false,
  227. },
  228. deleteNodes: []*v1.Node{},
  229. },
  230. }
  231. for _, testcase := range testcases {
  232. t.Run(testcase.name, func(t *testing.T) {
  233. informer := informers.NewSharedInformerFactory(testcase.fnh.Clientset, time.Second)
  234. nodeInformer := informer.Core().V1().Nodes()
  235. if err := syncNodeStore(nodeInformer, testcase.fnh); err != nil {
  236. t.Errorf("unexpected error: %v", err)
  237. }
  238. eventBroadcaster := record.NewBroadcaster()
  239. cloudNodeLifecycleController := &CloudNodeLifecycleController{
  240. nodeLister: nodeInformer.Lister(),
  241. kubeClient: testcase.fnh,
  242. cloud: testcase.fakeCloud,
  243. recorder: eventBroadcaster.NewRecorder(scheme.Scheme, v1.EventSource{Component: "cloud-node-lifecycle-controller"}),
  244. nodeMonitorPeriod: 1 * time.Second,
  245. }
  246. eventBroadcaster.StartLogging(klog.Infof)
  247. cloudNodeLifecycleController.MonitorNodes()
  248. if !reflect.DeepEqual(testcase.fnh.DeletedNodes, testcase.deleteNodes) {
  249. t.Logf("actual nodes: %v", testcase.fnh.DeletedNodes)
  250. t.Logf("expected nodes: %v", testcase.deleteNodes)
  251. t.Error("unexpected deleted nodes")
  252. }
  253. })
  254. }
  255. }
  256. func Test_NodesShutdown(t *testing.T) {
  257. testcases := []struct {
  258. name string
  259. fnh *testutil.FakeNodeHandler
  260. fakeCloud *fakecloud.Cloud
  261. updatedNodes []*v1.Node
  262. }{
  263. {
  264. name: "node is not ready and was shutdown",
  265. fnh: &testutil.FakeNodeHandler{
  266. Existing: []*v1.Node{
  267. {
  268. ObjectMeta: metav1.ObjectMeta{
  269. Name: "node0",
  270. CreationTimestamp: metav1.Date(2012, 1, 1, 0, 0, 0, 0, time.Local),
  271. },
  272. Status: v1.NodeStatus{
  273. Conditions: []v1.NodeCondition{
  274. {
  275. Type: v1.NodeReady,
  276. Status: v1.ConditionFalse,
  277. LastHeartbeatTime: metav1.Date(2015, 1, 1, 12, 0, 0, 0, time.Local),
  278. LastTransitionTime: metav1.Date(2015, 1, 1, 12, 0, 0, 0, time.Local),
  279. },
  280. },
  281. },
  282. },
  283. },
  284. UpdatedNodes: []*v1.Node{},
  285. Clientset: fake.NewSimpleClientset(),
  286. },
  287. fakeCloud: &fakecloud.Cloud{
  288. NodeShutdown: true,
  289. ErrShutdownByProviderID: nil,
  290. },
  291. updatedNodes: []*v1.Node{
  292. {
  293. ObjectMeta: metav1.ObjectMeta{
  294. Name: "node0",
  295. CreationTimestamp: metav1.Date(2012, 1, 1, 0, 0, 0, 0, time.Local),
  296. },
  297. Spec: v1.NodeSpec{
  298. Taints: []v1.Taint{
  299. *ShutdownTaint,
  300. },
  301. },
  302. Status: v1.NodeStatus{
  303. Conditions: []v1.NodeCondition{
  304. {
  305. Type: v1.NodeReady,
  306. Status: v1.ConditionFalse,
  307. LastHeartbeatTime: metav1.Date(2015, 1, 1, 12, 0, 0, 0, time.Local),
  308. LastTransitionTime: metav1.Date(2015, 1, 1, 12, 0, 0, 0, time.Local),
  309. },
  310. },
  311. },
  312. },
  313. },
  314. },
  315. {
  316. name: "node is not ready, but there is error checking if node is shutdown",
  317. fnh: &testutil.FakeNodeHandler{
  318. Existing: []*v1.Node{
  319. {
  320. ObjectMeta: metav1.ObjectMeta{
  321. Name: "node0",
  322. CreationTimestamp: metav1.Date(2012, 1, 1, 0, 0, 0, 0, time.Local),
  323. },
  324. Status: v1.NodeStatus{
  325. Conditions: []v1.NodeCondition{
  326. {
  327. Type: v1.NodeReady,
  328. Status: v1.ConditionFalse,
  329. LastHeartbeatTime: metav1.Date(2015, 1, 1, 12, 0, 0, 0, time.Local),
  330. LastTransitionTime: metav1.Date(2015, 1, 1, 12, 0, 0, 0, time.Local),
  331. },
  332. },
  333. },
  334. },
  335. },
  336. UpdatedNodes: []*v1.Node{},
  337. Clientset: fake.NewSimpleClientset(),
  338. },
  339. fakeCloud: &fakecloud.Cloud{
  340. NodeShutdown: false,
  341. ErrShutdownByProviderID: errors.New("err!"),
  342. },
  343. updatedNodes: []*v1.Node{},
  344. },
  345. {
  346. name: "node is not ready and is not shutdown",
  347. fnh: &testutil.FakeNodeHandler{
  348. Existing: []*v1.Node{
  349. {
  350. ObjectMeta: metav1.ObjectMeta{
  351. Name: "node0",
  352. CreationTimestamp: metav1.Date(2012, 1, 1, 0, 0, 0, 0, time.Local),
  353. },
  354. Status: v1.NodeStatus{
  355. Conditions: []v1.NodeCondition{
  356. {
  357. Type: v1.NodeReady,
  358. Status: v1.ConditionFalse,
  359. LastHeartbeatTime: metav1.Date(2015, 1, 1, 12, 0, 0, 0, time.Local),
  360. LastTransitionTime: metav1.Date(2015, 1, 1, 12, 0, 0, 0, time.Local),
  361. },
  362. },
  363. },
  364. },
  365. },
  366. UpdatedNodes: []*v1.Node{},
  367. Clientset: fake.NewSimpleClientset(),
  368. },
  369. fakeCloud: &fakecloud.Cloud{
  370. NodeShutdown: false,
  371. ErrShutdownByProviderID: nil,
  372. },
  373. updatedNodes: []*v1.Node{},
  374. },
  375. {
  376. name: "node is ready but provider says it's shutdown (maybe a bug by provider)",
  377. fnh: &testutil.FakeNodeHandler{
  378. Existing: []*v1.Node{
  379. {
  380. ObjectMeta: metav1.ObjectMeta{
  381. Name: "node0",
  382. CreationTimestamp: metav1.Date(2012, 1, 1, 0, 0, 0, 0, time.Local),
  383. },
  384. Status: v1.NodeStatus{
  385. Conditions: []v1.NodeCondition{
  386. {
  387. Type: v1.NodeReady,
  388. Status: v1.ConditionTrue,
  389. LastHeartbeatTime: metav1.Date(2015, 1, 1, 12, 0, 0, 0, time.Local),
  390. LastTransitionTime: metav1.Date(2015, 1, 1, 12, 0, 0, 0, time.Local),
  391. },
  392. },
  393. },
  394. },
  395. },
  396. UpdatedNodes: []*v1.Node{},
  397. Clientset: fake.NewSimpleClientset(),
  398. },
  399. fakeCloud: &fakecloud.Cloud{
  400. NodeShutdown: true,
  401. ErrShutdownByProviderID: nil,
  402. },
  403. updatedNodes: []*v1.Node{},
  404. },
  405. }
  406. for _, testcase := range testcases {
  407. t.Run(testcase.name, func(t *testing.T) {
  408. informer := informers.NewSharedInformerFactory(testcase.fnh.Clientset, time.Second)
  409. nodeInformer := informer.Core().V1().Nodes()
  410. if err := syncNodeStore(nodeInformer, testcase.fnh); err != nil {
  411. t.Errorf("unexpected error: %v", err)
  412. }
  413. eventBroadcaster := record.NewBroadcaster()
  414. cloudNodeLifecycleController := &CloudNodeLifecycleController{
  415. nodeLister: nodeInformer.Lister(),
  416. kubeClient: testcase.fnh,
  417. cloud: testcase.fakeCloud,
  418. recorder: eventBroadcaster.NewRecorder(scheme.Scheme, v1.EventSource{Component: "cloud-node-lifecycle-controller"}),
  419. nodeMonitorPeriod: 1 * time.Second,
  420. }
  421. eventBroadcaster.StartLogging(klog.Infof)
  422. cloudNodeLifecycleController.MonitorNodes()
  423. if !reflect.DeepEqual(testcase.fnh.UpdatedNodes, testcase.updatedNodes) {
  424. t.Logf("actual nodes: %v", testcase.fnh.UpdatedNodes)
  425. t.Logf("expected nodes: %v", testcase.updatedNodes)
  426. t.Error("unexpected updated nodes")
  427. }
  428. })
  429. }
  430. }
  431. func syncNodeStore(nodeinformer coreinformers.NodeInformer, f *testutil.FakeNodeHandler) error {
  432. nodes, err := f.List(metav1.ListOptions{})
  433. if err != nil {
  434. return err
  435. }
  436. newElems := make([]interface{}, 0, len(nodes.Items))
  437. for i := range nodes.Items {
  438. newElems = append(newElems, &nodes.Items[i])
  439. }
  440. return nodeinformer.Informer().GetStore().Replace(newElems, "newRV")
  441. }