generic_test.go 21 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629
  1. /*
  2. Copyright 2015 The Kubernetes Authors.
  3. Licensed under the Apache License, Version 2.0 (the "License");
  4. you may not use this file except in compliance with the License.
  5. You may obtain a copy of the License at
  6. http://www.apache.org/licenses/LICENSE-2.0
  7. Unless required by applicable law or agreed to in writing, software
  8. distributed under the License is distributed on an "AS IS" BASIS,
  9. WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
  10. See the License for the specific language governing permissions and
  11. limitations under the License.
  12. */
  13. package pleg
  14. import (
  15. "errors"
  16. "fmt"
  17. "reflect"
  18. "sort"
  19. "testing"
  20. "time"
  21. "github.com/stretchr/testify/assert"
  22. "k8s.io/apimachinery/pkg/types"
  23. "k8s.io/apimachinery/pkg/util/clock"
  24. "k8s.io/apimachinery/pkg/util/diff"
  25. kubecontainer "k8s.io/kubernetes/pkg/kubelet/container"
  26. containertest "k8s.io/kubernetes/pkg/kubelet/container/testing"
  27. )
  28. const (
  29. testContainerRuntimeType = "fooRuntime"
  30. // largeChannelCap is a large enough capacity to hold all events in a single test.
  31. largeChannelCap = 100
  32. )
  33. type TestGenericPLEG struct {
  34. pleg *GenericPLEG
  35. runtime *containertest.FakeRuntime
  36. clock *clock.FakeClock
  37. }
  38. func newTestGenericPLEG() *TestGenericPLEG {
  39. return newTestGenericPLEGWithChannelSize(largeChannelCap)
  40. }
  41. func newTestGenericPLEGWithChannelSize(eventChannelCap int) *TestGenericPLEG {
  42. fakeRuntime := &containertest.FakeRuntime{}
  43. clock := clock.NewFakeClock(time.Time{})
  44. // The channel capacity should be large enough to hold all events in a
  45. // single test.
  46. pleg := &GenericPLEG{
  47. relistPeriod: time.Hour,
  48. runtime: fakeRuntime,
  49. eventChannel: make(chan *PodLifecycleEvent, eventChannelCap),
  50. podRecords: make(podRecords),
  51. clock: clock,
  52. }
  53. return &TestGenericPLEG{pleg: pleg, runtime: fakeRuntime, clock: clock}
  54. }
  55. func getEventsFromChannel(ch <-chan *PodLifecycleEvent) []*PodLifecycleEvent {
  56. events := []*PodLifecycleEvent{}
  57. for len(ch) > 0 {
  58. e := <-ch
  59. events = append(events, e)
  60. }
  61. return events
  62. }
  63. func createTestContainer(ID string, state kubecontainer.ContainerState) *kubecontainer.Container {
  64. return &kubecontainer.Container{
  65. ID: kubecontainer.ContainerID{Type: testContainerRuntimeType, ID: ID},
  66. State: state,
  67. }
  68. }
  69. type sortableEvents []*PodLifecycleEvent
  70. func (a sortableEvents) Len() int { return len(a) }
  71. func (a sortableEvents) Swap(i, j int) { a[i], a[j] = a[j], a[i] }
  72. func (a sortableEvents) Less(i, j int) bool {
  73. if a[i].ID != a[j].ID {
  74. return a[i].ID < a[j].ID
  75. }
  76. return a[i].Data.(string) < a[j].Data.(string)
  77. }
  78. func verifyEvents(t *testing.T, expected, actual []*PodLifecycleEvent) {
  79. sort.Sort(sortableEvents(expected))
  80. sort.Sort(sortableEvents(actual))
  81. if !reflect.DeepEqual(expected, actual) {
  82. t.Errorf("Actual events differ from the expected; diff:\n %v", diff.ObjectDiff(expected, actual))
  83. }
  84. }
  85. func TestRelisting(t *testing.T) {
  86. testPleg := newTestGenericPLEG()
  87. pleg, runtime := testPleg.pleg, testPleg.runtime
  88. ch := pleg.Watch()
  89. // The first relist should send a PodSync event to each pod.
  90. runtime.AllPodList = []*containertest.FakePod{
  91. {Pod: &kubecontainer.Pod{
  92. ID: "1234",
  93. Containers: []*kubecontainer.Container{
  94. createTestContainer("c1", kubecontainer.ContainerStateExited),
  95. createTestContainer("c2", kubecontainer.ContainerStateRunning),
  96. createTestContainer("c3", kubecontainer.ContainerStateUnknown),
  97. },
  98. }},
  99. {Pod: &kubecontainer.Pod{
  100. ID: "4567",
  101. Containers: []*kubecontainer.Container{
  102. createTestContainer("c1", kubecontainer.ContainerStateExited),
  103. },
  104. }},
  105. }
  106. pleg.relist()
  107. // Report every running/exited container if we see them for the first time.
  108. expected := []*PodLifecycleEvent{
  109. {ID: "1234", Type: ContainerStarted, Data: "c2"},
  110. {ID: "4567", Type: ContainerDied, Data: "c1"},
  111. {ID: "1234", Type: ContainerDied, Data: "c1"},
  112. }
  113. actual := getEventsFromChannel(ch)
  114. verifyEvents(t, expected, actual)
  115. // The second relist should not send out any event because no container has
  116. // changed.
  117. pleg.relist()
  118. verifyEvents(t, expected, actual)
  119. runtime.AllPodList = []*containertest.FakePod{
  120. {Pod: &kubecontainer.Pod{
  121. ID: "1234",
  122. Containers: []*kubecontainer.Container{
  123. createTestContainer("c2", kubecontainer.ContainerStateExited),
  124. createTestContainer("c3", kubecontainer.ContainerStateRunning),
  125. },
  126. }},
  127. {Pod: &kubecontainer.Pod{
  128. ID: "4567",
  129. Containers: []*kubecontainer.Container{
  130. createTestContainer("c4", kubecontainer.ContainerStateRunning),
  131. },
  132. }},
  133. }
  134. pleg.relist()
  135. // Only report containers that transitioned to running or exited status.
  136. expected = []*PodLifecycleEvent{
  137. {ID: "1234", Type: ContainerRemoved, Data: "c1"},
  138. {ID: "1234", Type: ContainerDied, Data: "c2"},
  139. {ID: "1234", Type: ContainerStarted, Data: "c3"},
  140. {ID: "4567", Type: ContainerRemoved, Data: "c1"},
  141. {ID: "4567", Type: ContainerStarted, Data: "c4"},
  142. }
  143. actual = getEventsFromChannel(ch)
  144. verifyEvents(t, expected, actual)
  145. }
  146. // TestEventChannelFull test when channel is full, the events will be discard.
  147. func TestEventChannelFull(t *testing.T) {
  148. testPleg := newTestGenericPLEGWithChannelSize(4)
  149. pleg, runtime := testPleg.pleg, testPleg.runtime
  150. ch := pleg.Watch()
  151. // The first relist should send a PodSync event to each pod.
  152. runtime.AllPodList = []*containertest.FakePod{
  153. {Pod: &kubecontainer.Pod{
  154. ID: "1234",
  155. Containers: []*kubecontainer.Container{
  156. createTestContainer("c1", kubecontainer.ContainerStateExited),
  157. createTestContainer("c2", kubecontainer.ContainerStateRunning),
  158. createTestContainer("c3", kubecontainer.ContainerStateUnknown),
  159. },
  160. }},
  161. {Pod: &kubecontainer.Pod{
  162. ID: "4567",
  163. Containers: []*kubecontainer.Container{
  164. createTestContainer("c1", kubecontainer.ContainerStateExited),
  165. },
  166. }},
  167. }
  168. pleg.relist()
  169. // Report every running/exited container if we see them for the first time.
  170. expected := []*PodLifecycleEvent{
  171. {ID: "1234", Type: ContainerStarted, Data: "c2"},
  172. {ID: "4567", Type: ContainerDied, Data: "c1"},
  173. {ID: "1234", Type: ContainerDied, Data: "c1"},
  174. }
  175. actual := getEventsFromChannel(ch)
  176. verifyEvents(t, expected, actual)
  177. runtime.AllPodList = []*containertest.FakePod{
  178. {Pod: &kubecontainer.Pod{
  179. ID: "1234",
  180. Containers: []*kubecontainer.Container{
  181. createTestContainer("c2", kubecontainer.ContainerStateExited),
  182. createTestContainer("c3", kubecontainer.ContainerStateRunning),
  183. },
  184. }},
  185. {Pod: &kubecontainer.Pod{
  186. ID: "4567",
  187. Containers: []*kubecontainer.Container{
  188. createTestContainer("c4", kubecontainer.ContainerStateRunning),
  189. },
  190. }},
  191. }
  192. pleg.relist()
  193. allEvents := []*PodLifecycleEvent{
  194. {ID: "1234", Type: ContainerRemoved, Data: "c1"},
  195. {ID: "1234", Type: ContainerDied, Data: "c2"},
  196. {ID: "1234", Type: ContainerStarted, Data: "c3"},
  197. {ID: "4567", Type: ContainerRemoved, Data: "c1"},
  198. {ID: "4567", Type: ContainerStarted, Data: "c4"},
  199. }
  200. // event channel is full, discard events
  201. actual = getEventsFromChannel(ch)
  202. assert.True(t, len(actual) == 4, "channel length should be 4")
  203. assert.Subsetf(t, allEvents, actual, "actual events should in all events")
  204. }
  205. func TestDetectingContainerDeaths(t *testing.T) {
  206. // Vary the number of relists after the container started and before the
  207. // container died to account for the changes in pleg's internal states.
  208. testReportMissingContainers(t, 1)
  209. testReportMissingPods(t, 1)
  210. testReportMissingContainers(t, 3)
  211. testReportMissingPods(t, 3)
  212. }
  213. func testReportMissingContainers(t *testing.T, numRelists int) {
  214. testPleg := newTestGenericPLEG()
  215. pleg, runtime := testPleg.pleg, testPleg.runtime
  216. ch := pleg.Watch()
  217. runtime.AllPodList = []*containertest.FakePod{
  218. {Pod: &kubecontainer.Pod{
  219. ID: "1234",
  220. Containers: []*kubecontainer.Container{
  221. createTestContainer("c1", kubecontainer.ContainerStateRunning),
  222. createTestContainer("c2", kubecontainer.ContainerStateRunning),
  223. createTestContainer("c3", kubecontainer.ContainerStateExited),
  224. },
  225. }},
  226. }
  227. // Relist and drain the events from the channel.
  228. for i := 0; i < numRelists; i++ {
  229. pleg.relist()
  230. getEventsFromChannel(ch)
  231. }
  232. // Container c2 was stopped and removed between relists. We should report
  233. // the event. The exited container c3 was garbage collected (i.e., removed)
  234. // between relists. We should ignore that event.
  235. runtime.AllPodList = []*containertest.FakePod{
  236. {Pod: &kubecontainer.Pod{
  237. ID: "1234",
  238. Containers: []*kubecontainer.Container{
  239. createTestContainer("c1", kubecontainer.ContainerStateRunning),
  240. },
  241. }},
  242. }
  243. pleg.relist()
  244. expected := []*PodLifecycleEvent{
  245. {ID: "1234", Type: ContainerDied, Data: "c2"},
  246. {ID: "1234", Type: ContainerRemoved, Data: "c2"},
  247. {ID: "1234", Type: ContainerRemoved, Data: "c3"},
  248. }
  249. actual := getEventsFromChannel(ch)
  250. verifyEvents(t, expected, actual)
  251. }
  252. func testReportMissingPods(t *testing.T, numRelists int) {
  253. testPleg := newTestGenericPLEG()
  254. pleg, runtime := testPleg.pleg, testPleg.runtime
  255. ch := pleg.Watch()
  256. runtime.AllPodList = []*containertest.FakePod{
  257. {Pod: &kubecontainer.Pod{
  258. ID: "1234",
  259. Containers: []*kubecontainer.Container{
  260. createTestContainer("c2", kubecontainer.ContainerStateRunning),
  261. },
  262. }},
  263. }
  264. // Relist and drain the events from the channel.
  265. for i := 0; i < numRelists; i++ {
  266. pleg.relist()
  267. getEventsFromChannel(ch)
  268. }
  269. // Container c2 was stopped and removed between relists. We should report
  270. // the event.
  271. runtime.AllPodList = []*containertest.FakePod{}
  272. pleg.relist()
  273. expected := []*PodLifecycleEvent{
  274. {ID: "1234", Type: ContainerDied, Data: "c2"},
  275. {ID: "1234", Type: ContainerRemoved, Data: "c2"},
  276. }
  277. actual := getEventsFromChannel(ch)
  278. verifyEvents(t, expected, actual)
  279. }
  280. func newTestGenericPLEGWithRuntimeMock() (*GenericPLEG, *containertest.Mock) {
  281. runtimeMock := &containertest.Mock{}
  282. pleg := &GenericPLEG{
  283. relistPeriod: time.Hour,
  284. runtime: runtimeMock,
  285. eventChannel: make(chan *PodLifecycleEvent, 100),
  286. podRecords: make(podRecords),
  287. cache: kubecontainer.NewCache(),
  288. clock: clock.RealClock{},
  289. }
  290. return pleg, runtimeMock
  291. }
  292. func createTestPodsStatusesAndEvents(num int) ([]*kubecontainer.Pod, []*kubecontainer.PodStatus, []*PodLifecycleEvent) {
  293. var pods []*kubecontainer.Pod
  294. var statuses []*kubecontainer.PodStatus
  295. var events []*PodLifecycleEvent
  296. for i := 0; i < num; i++ {
  297. id := types.UID(fmt.Sprintf("test-pod-%d", i))
  298. cState := kubecontainer.ContainerStateRunning
  299. container := createTestContainer(fmt.Sprintf("c%d", i), cState)
  300. pod := &kubecontainer.Pod{
  301. ID: id,
  302. Containers: []*kubecontainer.Container{container},
  303. }
  304. status := &kubecontainer.PodStatus{
  305. ID: id,
  306. ContainerStatuses: []*kubecontainer.ContainerStatus{{ID: container.ID, State: cState}},
  307. }
  308. event := &PodLifecycleEvent{ID: pod.ID, Type: ContainerStarted, Data: container.ID.ID}
  309. pods = append(pods, pod)
  310. statuses = append(statuses, status)
  311. events = append(events, event)
  312. }
  313. return pods, statuses, events
  314. }
  315. func TestRelistWithCache(t *testing.T) {
  316. pleg, runtimeMock := newTestGenericPLEGWithRuntimeMock()
  317. ch := pleg.Watch()
  318. pods, statuses, events := createTestPodsStatusesAndEvents(2)
  319. runtimeMock.On("GetPods", true).Return(pods, nil)
  320. runtimeMock.On("GetPodStatus", pods[0].ID, "", "").Return(statuses[0], nil).Once()
  321. // Inject an error when querying runtime for the pod status for pods[1].
  322. statusErr := fmt.Errorf("unable to get status")
  323. runtimeMock.On("GetPodStatus", pods[1].ID, "", "").Return(&kubecontainer.PodStatus{}, statusErr).Once()
  324. pleg.relist()
  325. actualEvents := getEventsFromChannel(ch)
  326. cases := []struct {
  327. pod *kubecontainer.Pod
  328. status *kubecontainer.PodStatus
  329. error error
  330. }{
  331. {pod: pods[0], status: statuses[0], error: nil},
  332. {pod: pods[1], status: &kubecontainer.PodStatus{}, error: statusErr},
  333. }
  334. for i, c := range cases {
  335. testStr := fmt.Sprintf("test[%d]", i)
  336. actualStatus, actualErr := pleg.cache.Get(c.pod.ID)
  337. assert.Equal(t, c.status, actualStatus, testStr)
  338. assert.Equal(t, c.error, actualErr, testStr)
  339. }
  340. // pleg should not generate any event for pods[1] because of the error.
  341. assert.Exactly(t, []*PodLifecycleEvent{events[0]}, actualEvents)
  342. // Return normal status for pods[1].
  343. runtimeMock.On("GetPodStatus", pods[1].ID, "", "").Return(statuses[1], nil).Once()
  344. pleg.relist()
  345. actualEvents = getEventsFromChannel(ch)
  346. cases = []struct {
  347. pod *kubecontainer.Pod
  348. status *kubecontainer.PodStatus
  349. error error
  350. }{
  351. {pod: pods[0], status: statuses[0], error: nil},
  352. {pod: pods[1], status: statuses[1], error: nil},
  353. }
  354. for i, c := range cases {
  355. testStr := fmt.Sprintf("test[%d]", i)
  356. actualStatus, actualErr := pleg.cache.Get(c.pod.ID)
  357. assert.Equal(t, c.status, actualStatus, testStr)
  358. assert.Equal(t, c.error, actualErr, testStr)
  359. }
  360. // Now that we are able to query status for pods[1], pleg should generate an event.
  361. assert.Exactly(t, []*PodLifecycleEvent{events[1]}, actualEvents)
  362. }
  363. func TestRemoveCacheEntry(t *testing.T) {
  364. pleg, runtimeMock := newTestGenericPLEGWithRuntimeMock()
  365. pods, statuses, _ := createTestPodsStatusesAndEvents(1)
  366. runtimeMock.On("GetPods", true).Return(pods, nil).Once()
  367. runtimeMock.On("GetPodStatus", pods[0].ID, "", "").Return(statuses[0], nil).Once()
  368. // Does a relist to populate the cache.
  369. pleg.relist()
  370. // Delete the pod from runtime. Verify that the cache entry has been
  371. // removed after relisting.
  372. runtimeMock.On("GetPods", true).Return([]*kubecontainer.Pod{}, nil).Once()
  373. pleg.relist()
  374. actualStatus, actualErr := pleg.cache.Get(pods[0].ID)
  375. assert.Equal(t, &kubecontainer.PodStatus{ID: pods[0].ID}, actualStatus)
  376. assert.Equal(t, nil, actualErr)
  377. }
  378. func TestHealthy(t *testing.T) {
  379. testPleg := newTestGenericPLEG()
  380. // pleg should initially be unhealthy
  381. pleg, _, clock := testPleg.pleg, testPleg.runtime, testPleg.clock
  382. ok, _ := pleg.Healthy()
  383. assert.False(t, ok, "pleg should be unhealthy")
  384. // Advance the clock without any relisting.
  385. clock.Step(time.Minute * 10)
  386. ok, _ = pleg.Healthy()
  387. assert.False(t, ok, "pleg should be unhealthy")
  388. // Relist and than advance the time by 1 minute. pleg should be healthy
  389. // because this is within the allowed limit.
  390. pleg.relist()
  391. clock.Step(time.Minute * 1)
  392. ok, _ = pleg.Healthy()
  393. assert.True(t, ok, "pleg should be healthy")
  394. // Advance by relistThreshold without any relisting. pleg should be unhealthy
  395. // because it has been longer than relistThreshold since a relist occurred.
  396. clock.Step(relistThreshold)
  397. ok, _ = pleg.Healthy()
  398. assert.False(t, ok, "pleg should be unhealthy")
  399. }
  400. func TestRelistWithReinspection(t *testing.T) {
  401. pleg, runtimeMock := newTestGenericPLEGWithRuntimeMock()
  402. ch := pleg.Watch()
  403. infraContainer := createTestContainer("infra", kubecontainer.ContainerStateRunning)
  404. podID := types.UID("test-pod")
  405. pods := []*kubecontainer.Pod{{
  406. ID: podID,
  407. Containers: []*kubecontainer.Container{infraContainer},
  408. }}
  409. runtimeMock.On("GetPods", true).Return(pods, nil).Once()
  410. goodStatus := &kubecontainer.PodStatus{
  411. ID: podID,
  412. ContainerStatuses: []*kubecontainer.ContainerStatus{{ID: infraContainer.ID, State: infraContainer.State}},
  413. }
  414. runtimeMock.On("GetPodStatus", podID, "", "").Return(goodStatus, nil).Once()
  415. goodEvent := &PodLifecycleEvent{ID: podID, Type: ContainerStarted, Data: infraContainer.ID.ID}
  416. // listing 1 - everything ok, infra container set up for pod
  417. pleg.relist()
  418. actualEvents := getEventsFromChannel(ch)
  419. actualStatus, actualErr := pleg.cache.Get(podID)
  420. assert.Equal(t, goodStatus, actualStatus)
  421. assert.Equal(t, nil, actualErr)
  422. assert.Exactly(t, []*PodLifecycleEvent{goodEvent}, actualEvents)
  423. // listing 2 - pretend runtime was in the middle of creating the non-infra container for the pod
  424. // and return an error during inspection
  425. transientContainer := createTestContainer("transient", kubecontainer.ContainerStateUnknown)
  426. podsWithTransientContainer := []*kubecontainer.Pod{{
  427. ID: podID,
  428. Containers: []*kubecontainer.Container{infraContainer, transientContainer},
  429. }}
  430. runtimeMock.On("GetPods", true).Return(podsWithTransientContainer, nil).Once()
  431. badStatus := &kubecontainer.PodStatus{
  432. ID: podID,
  433. ContainerStatuses: []*kubecontainer.ContainerStatus{},
  434. }
  435. runtimeMock.On("GetPodStatus", podID, "", "").Return(badStatus, errors.New("inspection error")).Once()
  436. pleg.relist()
  437. actualEvents = getEventsFromChannel(ch)
  438. actualStatus, actualErr = pleg.cache.Get(podID)
  439. assert.Equal(t, badStatus, actualStatus)
  440. assert.Equal(t, errors.New("inspection error"), actualErr)
  441. assert.Exactly(t, []*PodLifecycleEvent{}, actualEvents)
  442. // listing 3 - pretend the transient container has now disappeared, leaving just the infra
  443. // container. Make sure the pod is reinspected for its status and the cache is updated.
  444. runtimeMock.On("GetPods", true).Return(pods, nil).Once()
  445. runtimeMock.On("GetPodStatus", podID, "", "").Return(goodStatus, nil).Once()
  446. pleg.relist()
  447. actualEvents = getEventsFromChannel(ch)
  448. actualStatus, actualErr = pleg.cache.Get(podID)
  449. assert.Equal(t, goodStatus, actualStatus)
  450. assert.Equal(t, nil, actualErr)
  451. // no events are expected because relist #1 set the old pod record which has the infra container
  452. // running. relist #2 had the inspection error and therefore didn't modify either old or new.
  453. // relist #3 forced the reinspection of the pod to retrieve its status, but because the list of
  454. // containers was the same as relist #1, nothing "changed", so there are no new events.
  455. assert.Exactly(t, []*PodLifecycleEvent{}, actualEvents)
  456. }
  457. // Test detecting sandbox state changes.
  458. func TestRelistingWithSandboxes(t *testing.T) {
  459. testPleg := newTestGenericPLEG()
  460. pleg, runtime := testPleg.pleg, testPleg.runtime
  461. ch := pleg.Watch()
  462. // The first relist should send a PodSync event to each pod.
  463. runtime.AllPodList = []*containertest.FakePod{
  464. {Pod: &kubecontainer.Pod{
  465. ID: "1234",
  466. Sandboxes: []*kubecontainer.Container{
  467. createTestContainer("c1", kubecontainer.ContainerStateExited),
  468. createTestContainer("c2", kubecontainer.ContainerStateRunning),
  469. createTestContainer("c3", kubecontainer.ContainerStateUnknown),
  470. },
  471. }},
  472. {Pod: &kubecontainer.Pod{
  473. ID: "4567",
  474. Sandboxes: []*kubecontainer.Container{
  475. createTestContainer("c1", kubecontainer.ContainerStateExited),
  476. },
  477. }},
  478. }
  479. pleg.relist()
  480. // Report every running/exited container if we see them for the first time.
  481. expected := []*PodLifecycleEvent{
  482. {ID: "1234", Type: ContainerStarted, Data: "c2"},
  483. {ID: "4567", Type: ContainerDied, Data: "c1"},
  484. {ID: "1234", Type: ContainerDied, Data: "c1"},
  485. }
  486. actual := getEventsFromChannel(ch)
  487. verifyEvents(t, expected, actual)
  488. // The second relist should not send out any event because no container has
  489. // changed.
  490. pleg.relist()
  491. verifyEvents(t, expected, actual)
  492. runtime.AllPodList = []*containertest.FakePod{
  493. {Pod: &kubecontainer.Pod{
  494. ID: "1234",
  495. Sandboxes: []*kubecontainer.Container{
  496. createTestContainer("c2", kubecontainer.ContainerStateExited),
  497. createTestContainer("c3", kubecontainer.ContainerStateRunning),
  498. },
  499. }},
  500. {Pod: &kubecontainer.Pod{
  501. ID: "4567",
  502. Sandboxes: []*kubecontainer.Container{
  503. createTestContainer("c4", kubecontainer.ContainerStateRunning),
  504. },
  505. }},
  506. }
  507. pleg.relist()
  508. // Only report containers that transitioned to running or exited status.
  509. expected = []*PodLifecycleEvent{
  510. {ID: "1234", Type: ContainerRemoved, Data: "c1"},
  511. {ID: "1234", Type: ContainerDied, Data: "c2"},
  512. {ID: "1234", Type: ContainerStarted, Data: "c3"},
  513. {ID: "4567", Type: ContainerRemoved, Data: "c1"},
  514. {ID: "4567", Type: ContainerStarted, Data: "c4"},
  515. }
  516. actual = getEventsFromChannel(ch)
  517. verifyEvents(t, expected, actual)
  518. }
  519. func TestRelistIPChange(t *testing.T) {
  520. pleg, runtimeMock := newTestGenericPLEGWithRuntimeMock()
  521. ch := pleg.Watch()
  522. id := types.UID("test-pod-0")
  523. cState := kubecontainer.ContainerStateRunning
  524. container := createTestContainer("c0", cState)
  525. pod := &kubecontainer.Pod{
  526. ID: id,
  527. Containers: []*kubecontainer.Container{container},
  528. }
  529. ipAddr := "192.168.1.5/24"
  530. status := &kubecontainer.PodStatus{
  531. ID: id,
  532. IP: ipAddr,
  533. ContainerStatuses: []*kubecontainer.ContainerStatus{{ID: container.ID, State: cState}},
  534. }
  535. event := &PodLifecycleEvent{ID: pod.ID, Type: ContainerStarted, Data: container.ID.ID}
  536. runtimeMock.On("GetPods", true).Return([]*kubecontainer.Pod{pod}, nil).Once()
  537. runtimeMock.On("GetPodStatus", pod.ID, "", "").Return(status, nil).Once()
  538. pleg.relist()
  539. actualEvents := getEventsFromChannel(ch)
  540. actualStatus, actualErr := pleg.cache.Get(pod.ID)
  541. assert.Equal(t, status, actualStatus, "test0")
  542. assert.Nil(t, actualErr, "test0")
  543. assert.Exactly(t, []*PodLifecycleEvent{event}, actualEvents)
  544. // Clear the IP address and mark the container terminated
  545. container = createTestContainer("c0", kubecontainer.ContainerStateExited)
  546. pod = &kubecontainer.Pod{
  547. ID: id,
  548. Containers: []*kubecontainer.Container{container},
  549. }
  550. status = &kubecontainer.PodStatus{
  551. ID: id,
  552. ContainerStatuses: []*kubecontainer.ContainerStatus{{ID: container.ID, State: kubecontainer.ContainerStateExited}},
  553. }
  554. event = &PodLifecycleEvent{ID: pod.ID, Type: ContainerDied, Data: container.ID.ID}
  555. runtimeMock.On("GetPods", true).Return([]*kubecontainer.Pod{pod}, nil).Once()
  556. runtimeMock.On("GetPodStatus", pod.ID, "", "").Return(status, nil).Once()
  557. pleg.relist()
  558. actualEvents = getEventsFromChannel(ch)
  559. actualStatus, actualErr = pleg.cache.Get(pod.ID)
  560. // Must copy status to compare since its pointer gets passed through all
  561. // the way to the event
  562. statusCopy := *status
  563. statusCopy.IP = ipAddr
  564. assert.Equal(t, &statusCopy, actualStatus, "test0")
  565. assert.Nil(t, actualErr, "test0")
  566. assert.Exactly(t, []*PodLifecycleEvent{event}, actualEvents)
  567. }