garbagecollector_test.go 28 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972
  1. /*
  2. Copyright 2016 The Kubernetes Authors.
  3. Licensed under the Apache License, Version 2.0 (the "License");
  4. you may not use this file except in compliance with the License.
  5. You may obtain a copy of the License at
  6. http://www.apache.org/licenses/LICENSE-2.0
  7. Unless required by applicable law or agreed to in writing, software
  8. distributed under the License is distributed on an "AS IS" BASIS,
  9. WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
  10. See the License for the specific language governing permissions and
  11. limitations under the License.
  12. */
  13. package garbagecollector
  14. import (
  15. "fmt"
  16. "net/http"
  17. "net/http/httptest"
  18. "reflect"
  19. "strings"
  20. "sync"
  21. "testing"
  22. "time"
  23. "github.com/stretchr/testify/assert"
  24. _ "k8s.io/kubernetes/pkg/apis/core/install"
  25. v1 "k8s.io/api/core/v1"
  26. "k8s.io/apimachinery/pkg/api/meta"
  27. "k8s.io/apimachinery/pkg/api/meta/testrestmapper"
  28. metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
  29. "k8s.io/apimachinery/pkg/runtime/schema"
  30. "k8s.io/apimachinery/pkg/types"
  31. "k8s.io/apimachinery/pkg/util/json"
  32. "k8s.io/apimachinery/pkg/util/sets"
  33. "k8s.io/apimachinery/pkg/util/strategicpatch"
  34. "k8s.io/client-go/discovery"
  35. "k8s.io/client-go/informers"
  36. "k8s.io/client-go/kubernetes"
  37. "k8s.io/client-go/kubernetes/fake"
  38. "k8s.io/client-go/metadata"
  39. "k8s.io/client-go/metadata/metadatainformer"
  40. restclient "k8s.io/client-go/rest"
  41. "k8s.io/client-go/util/workqueue"
  42. "k8s.io/kubernetes/pkg/api/legacyscheme"
  43. "k8s.io/kubernetes/pkg/controller"
  44. )
  45. type testRESTMapper struct {
  46. meta.RESTMapper
  47. }
  48. func (*testRESTMapper) Reset() {}
  49. func TestGarbageCollectorConstruction(t *testing.T) {
  50. config := &restclient.Config{}
  51. tweakableRM := meta.NewDefaultRESTMapper(nil)
  52. rm := &testRESTMapper{meta.MultiRESTMapper{tweakableRM, testrestmapper.TestOnlyStaticRESTMapper(legacyscheme.Scheme)}}
  53. metadataClient, err := metadata.NewForConfig(config)
  54. if err != nil {
  55. t.Fatal(err)
  56. }
  57. podResource := map[schema.GroupVersionResource]struct{}{
  58. {Version: "v1", Resource: "pods"}: {},
  59. }
  60. twoResources := map[schema.GroupVersionResource]struct{}{
  61. {Version: "v1", Resource: "pods"}: {},
  62. {Group: "tpr.io", Version: "v1", Resource: "unknown"}: {},
  63. }
  64. client := fake.NewSimpleClientset()
  65. sharedInformers := informers.NewSharedInformerFactory(client, 0)
  66. metadataInformers := metadatainformer.NewSharedInformerFactory(metadataClient, 0)
  67. // No monitor will be constructed for the non-core resource, but the GC
  68. // construction will not fail.
  69. alwaysStarted := make(chan struct{})
  70. close(alwaysStarted)
  71. gc, err := NewGarbageCollector(metadataClient, rm, twoResources, map[schema.GroupResource]struct{}{},
  72. controller.NewInformerFactory(sharedInformers, metadataInformers), alwaysStarted)
  73. if err != nil {
  74. t.Fatal(err)
  75. }
  76. assert.Equal(t, 1, len(gc.dependencyGraphBuilder.monitors))
  77. // Make sure resource monitor syncing creates and stops resource monitors.
  78. tweakableRM.Add(schema.GroupVersionKind{Group: "tpr.io", Version: "v1", Kind: "unknown"}, nil)
  79. err = gc.resyncMonitors(twoResources)
  80. if err != nil {
  81. t.Errorf("Failed adding a monitor: %v", err)
  82. }
  83. assert.Equal(t, 2, len(gc.dependencyGraphBuilder.monitors))
  84. err = gc.resyncMonitors(podResource)
  85. if err != nil {
  86. t.Errorf("Failed removing a monitor: %v", err)
  87. }
  88. assert.Equal(t, 1, len(gc.dependencyGraphBuilder.monitors))
  89. // Make sure the syncing mechanism also works after Run() has been called
  90. stopCh := make(chan struct{})
  91. defer close(stopCh)
  92. go gc.Run(1, stopCh)
  93. err = gc.resyncMonitors(twoResources)
  94. if err != nil {
  95. t.Errorf("Failed adding a monitor: %v", err)
  96. }
  97. assert.Equal(t, 2, len(gc.dependencyGraphBuilder.monitors))
  98. err = gc.resyncMonitors(podResource)
  99. if err != nil {
  100. t.Errorf("Failed removing a monitor: %v", err)
  101. }
  102. assert.Equal(t, 1, len(gc.dependencyGraphBuilder.monitors))
  103. }
  104. // fakeAction records information about requests to aid in testing.
  105. type fakeAction struct {
  106. method string
  107. path string
  108. query string
  109. }
  110. // String returns method=path to aid in testing
  111. func (f *fakeAction) String() string {
  112. return strings.Join([]string{f.method, f.path}, "=")
  113. }
  114. type FakeResponse struct {
  115. statusCode int
  116. content []byte
  117. }
  118. // fakeActionHandler holds a list of fakeActions received
  119. type fakeActionHandler struct {
  120. // statusCode and content returned by this handler for different method + path.
  121. response map[string]FakeResponse
  122. lock sync.Mutex
  123. actions []fakeAction
  124. }
  125. // ServeHTTP logs the action that occurred and always returns the associated status code
  126. func (f *fakeActionHandler) ServeHTTP(response http.ResponseWriter, request *http.Request) {
  127. func() {
  128. f.lock.Lock()
  129. defer f.lock.Unlock()
  130. f.actions = append(f.actions, fakeAction{method: request.Method, path: request.URL.Path, query: request.URL.RawQuery})
  131. fakeResponse, ok := f.response[request.Method+request.URL.Path]
  132. if !ok {
  133. fakeResponse.statusCode = 200
  134. fakeResponse.content = []byte(`{"apiVersion": "v1", "kind": "List"}`)
  135. }
  136. response.Header().Set("Content-Type", "application/json")
  137. response.WriteHeader(fakeResponse.statusCode)
  138. response.Write(fakeResponse.content)
  139. }()
  140. // This is to allow the fakeActionHandler to simulate a watch being opened
  141. if strings.Contains(request.URL.RawQuery, "watch=true") {
  142. hijacker, ok := response.(http.Hijacker)
  143. if !ok {
  144. return
  145. }
  146. connection, _, err := hijacker.Hijack()
  147. if err != nil {
  148. return
  149. }
  150. defer connection.Close()
  151. time.Sleep(30 * time.Second)
  152. }
  153. }
  154. // testServerAndClientConfig returns a server that listens and a config that can reference it
  155. func testServerAndClientConfig(handler func(http.ResponseWriter, *http.Request)) (*httptest.Server, *restclient.Config) {
  156. srv := httptest.NewServer(http.HandlerFunc(handler))
  157. config := &restclient.Config{
  158. Host: srv.URL,
  159. }
  160. return srv, config
  161. }
  162. type garbageCollector struct {
  163. *GarbageCollector
  164. stop chan struct{}
  165. }
  166. func setupGC(t *testing.T, config *restclient.Config) garbageCollector {
  167. metadataClient, err := metadata.NewForConfig(config)
  168. if err != nil {
  169. t.Fatal(err)
  170. }
  171. podResource := map[schema.GroupVersionResource]struct{}{{Version: "v1", Resource: "pods"}: {}}
  172. client := fake.NewSimpleClientset()
  173. sharedInformers := informers.NewSharedInformerFactory(client, 0)
  174. alwaysStarted := make(chan struct{})
  175. close(alwaysStarted)
  176. gc, err := NewGarbageCollector(metadataClient, &testRESTMapper{testrestmapper.TestOnlyStaticRESTMapper(legacyscheme.Scheme)}, podResource, ignoredResources, sharedInformers, alwaysStarted)
  177. if err != nil {
  178. t.Fatal(err)
  179. }
  180. stop := make(chan struct{})
  181. go sharedInformers.Start(stop)
  182. return garbageCollector{gc, stop}
  183. }
  184. func getPod(podName string, ownerReferences []metav1.OwnerReference) *v1.Pod {
  185. return &v1.Pod{
  186. TypeMeta: metav1.TypeMeta{
  187. Kind: "Pod",
  188. APIVersion: "v1",
  189. },
  190. ObjectMeta: metav1.ObjectMeta{
  191. Name: podName,
  192. Namespace: "ns1",
  193. UID: "456",
  194. OwnerReferences: ownerReferences,
  195. },
  196. }
  197. }
  198. func serilizeOrDie(t *testing.T, object interface{}) []byte {
  199. data, err := json.Marshal(object)
  200. if err != nil {
  201. t.Fatal(err)
  202. }
  203. return data
  204. }
  205. // test the attemptToDeleteItem function making the expected actions.
  206. func TestAttemptToDeleteItem(t *testing.T) {
  207. pod := getPod("ToBeDeletedPod", []metav1.OwnerReference{
  208. {
  209. Kind: "ReplicationController",
  210. Name: "owner1",
  211. UID: "123",
  212. APIVersion: "v1",
  213. },
  214. })
  215. testHandler := &fakeActionHandler{
  216. response: map[string]FakeResponse{
  217. "GET" + "/api/v1/namespaces/ns1/replicationcontrollers/owner1": {
  218. 404,
  219. []byte{},
  220. },
  221. "GET" + "/api/v1/namespaces/ns1/pods/ToBeDeletedPod": {
  222. 200,
  223. serilizeOrDie(t, pod),
  224. },
  225. },
  226. }
  227. srv, clientConfig := testServerAndClientConfig(testHandler.ServeHTTP)
  228. defer srv.Close()
  229. gc := setupGC(t, clientConfig)
  230. defer close(gc.stop)
  231. item := &node{
  232. identity: objectReference{
  233. OwnerReference: metav1.OwnerReference{
  234. Kind: pod.Kind,
  235. APIVersion: pod.APIVersion,
  236. Name: pod.Name,
  237. UID: pod.UID,
  238. },
  239. Namespace: pod.Namespace,
  240. },
  241. // owners are intentionally left empty. The attemptToDeleteItem routine should get the latest item from the server.
  242. owners: nil,
  243. }
  244. err := gc.attemptToDeleteItem(item)
  245. if err != nil {
  246. t.Errorf("Unexpected Error: %v", err)
  247. }
  248. expectedActionSet := sets.NewString()
  249. expectedActionSet.Insert("GET=/api/v1/namespaces/ns1/replicationcontrollers/owner1")
  250. expectedActionSet.Insert("DELETE=/api/v1/namespaces/ns1/pods/ToBeDeletedPod")
  251. expectedActionSet.Insert("GET=/api/v1/namespaces/ns1/pods/ToBeDeletedPod")
  252. actualActionSet := sets.NewString()
  253. for _, action := range testHandler.actions {
  254. actualActionSet.Insert(action.String())
  255. }
  256. if !expectedActionSet.Equal(actualActionSet) {
  257. t.Errorf("expected actions:\n%v\n but got:\n%v\nDifference:\n%v", expectedActionSet,
  258. actualActionSet, expectedActionSet.Difference(actualActionSet))
  259. }
  260. }
  261. // verifyGraphInvariants verifies that all of a node's owners list the node as a
  262. // dependent and vice versa. uidToNode has all the nodes in the graph.
  263. func verifyGraphInvariants(scenario string, uidToNode map[types.UID]*node, t *testing.T) {
  264. for myUID, node := range uidToNode {
  265. for dependentNode := range node.dependents {
  266. found := false
  267. for _, owner := range dependentNode.owners {
  268. if owner.UID == myUID {
  269. found = true
  270. break
  271. }
  272. }
  273. if !found {
  274. t.Errorf("scenario: %s: node %s has node %s as a dependent, but it's not present in the latter node's owners list", scenario, node.identity, dependentNode.identity)
  275. }
  276. }
  277. for _, owner := range node.owners {
  278. ownerNode, ok := uidToNode[owner.UID]
  279. if !ok {
  280. // It's possible that the owner node doesn't exist
  281. continue
  282. }
  283. if _, ok := ownerNode.dependents[node]; !ok {
  284. t.Errorf("node %s has node %s as an owner, but it's not present in the latter node's dependents list", node.identity, ownerNode.identity)
  285. }
  286. }
  287. }
  288. }
  289. func createEvent(eventType eventType, selfUID string, owners []string) event {
  290. var ownerReferences []metav1.OwnerReference
  291. for i := 0; i < len(owners); i++ {
  292. ownerReferences = append(ownerReferences, metav1.OwnerReference{UID: types.UID(owners[i])})
  293. }
  294. return event{
  295. eventType: eventType,
  296. obj: &v1.Pod{
  297. ObjectMeta: metav1.ObjectMeta{
  298. UID: types.UID(selfUID),
  299. OwnerReferences: ownerReferences,
  300. },
  301. },
  302. }
  303. }
  304. func TestProcessEvent(t *testing.T) {
  305. var testScenarios = []struct {
  306. name string
  307. // a series of events that will be supplied to the
  308. // GraphBuilder.graphChanges.
  309. events []event
  310. }{
  311. {
  312. name: "test1",
  313. events: []event{
  314. createEvent(addEvent, "1", []string{}),
  315. createEvent(addEvent, "2", []string{"1"}),
  316. createEvent(addEvent, "3", []string{"1", "2"}),
  317. },
  318. },
  319. {
  320. name: "test2",
  321. events: []event{
  322. createEvent(addEvent, "1", []string{}),
  323. createEvent(addEvent, "2", []string{"1"}),
  324. createEvent(addEvent, "3", []string{"1", "2"}),
  325. createEvent(addEvent, "4", []string{"2"}),
  326. createEvent(deleteEvent, "2", []string{"doesn't matter"}),
  327. },
  328. },
  329. {
  330. name: "test3",
  331. events: []event{
  332. createEvent(addEvent, "1", []string{}),
  333. createEvent(addEvent, "2", []string{"1"}),
  334. createEvent(addEvent, "3", []string{"1", "2"}),
  335. createEvent(addEvent, "4", []string{"3"}),
  336. createEvent(updateEvent, "2", []string{"4"}),
  337. },
  338. },
  339. {
  340. name: "reverse test2",
  341. events: []event{
  342. createEvent(addEvent, "4", []string{"2"}),
  343. createEvent(addEvent, "3", []string{"1", "2"}),
  344. createEvent(addEvent, "2", []string{"1"}),
  345. createEvent(addEvent, "1", []string{}),
  346. createEvent(deleteEvent, "2", []string{"doesn't matter"}),
  347. },
  348. },
  349. }
  350. alwaysStarted := make(chan struct{})
  351. close(alwaysStarted)
  352. for _, scenario := range testScenarios {
  353. dependencyGraphBuilder := &GraphBuilder{
  354. informersStarted: alwaysStarted,
  355. graphChanges: workqueue.NewRateLimitingQueue(workqueue.DefaultControllerRateLimiter()),
  356. uidToNode: &concurrentUIDToNode{
  357. uidToNodeLock: sync.RWMutex{},
  358. uidToNode: make(map[types.UID]*node),
  359. },
  360. attemptToDelete: workqueue.NewRateLimitingQueue(workqueue.DefaultControllerRateLimiter()),
  361. absentOwnerCache: NewUIDCache(2),
  362. }
  363. for i := 0; i < len(scenario.events); i++ {
  364. dependencyGraphBuilder.graphChanges.Add(&scenario.events[i])
  365. dependencyGraphBuilder.processGraphChanges()
  366. verifyGraphInvariants(scenario.name, dependencyGraphBuilder.uidToNode.uidToNode, t)
  367. }
  368. }
  369. }
  370. func BenchmarkReferencesDiffs(t *testing.B) {
  371. t.ReportAllocs()
  372. t.ResetTimer()
  373. for n := 0; n < t.N; n++ {
  374. old := []metav1.OwnerReference{{UID: "1"}, {UID: "2"}}
  375. new := []metav1.OwnerReference{{UID: "2"}, {UID: "3"}}
  376. referencesDiffs(old, new)
  377. }
  378. }
  379. // TestDependentsRace relies on golang's data race detector to check if there is
  380. // data race among in the dependents field.
  381. func TestDependentsRace(t *testing.T) {
  382. gc := setupGC(t, &restclient.Config{})
  383. defer close(gc.stop)
  384. const updates = 100
  385. owner := &node{dependents: make(map[*node]struct{})}
  386. ownerUID := types.UID("owner")
  387. gc.dependencyGraphBuilder.uidToNode.Write(owner)
  388. go func() {
  389. for i := 0; i < updates; i++ {
  390. dependent := &node{}
  391. gc.dependencyGraphBuilder.addDependentToOwners(dependent, []metav1.OwnerReference{{UID: ownerUID}})
  392. gc.dependencyGraphBuilder.removeDependentFromOwners(dependent, []metav1.OwnerReference{{UID: ownerUID}})
  393. }
  394. }()
  395. go func() {
  396. gc.attemptToOrphan.Add(owner)
  397. for i := 0; i < updates; i++ {
  398. gc.attemptToOrphanWorker()
  399. }
  400. }()
  401. }
  402. func podToGCNode(pod *v1.Pod) *node {
  403. return &node{
  404. identity: objectReference{
  405. OwnerReference: metav1.OwnerReference{
  406. Kind: pod.Kind,
  407. APIVersion: pod.APIVersion,
  408. Name: pod.Name,
  409. UID: pod.UID,
  410. },
  411. Namespace: pod.Namespace,
  412. },
  413. // owners are intentionally left empty. The attemptToDeleteItem routine should get the latest item from the server.
  414. owners: nil,
  415. }
  416. }
  417. func TestAbsentUIDCache(t *testing.T) {
  418. rc1Pod1 := getPod("rc1Pod1", []metav1.OwnerReference{
  419. {
  420. Kind: "ReplicationController",
  421. Name: "rc1",
  422. UID: "1",
  423. APIVersion: "v1",
  424. },
  425. })
  426. rc1Pod2 := getPod("rc1Pod2", []metav1.OwnerReference{
  427. {
  428. Kind: "ReplicationController",
  429. Name: "rc1",
  430. UID: "1",
  431. APIVersion: "v1",
  432. },
  433. })
  434. rc2Pod1 := getPod("rc2Pod1", []metav1.OwnerReference{
  435. {
  436. Kind: "ReplicationController",
  437. Name: "rc2",
  438. UID: "2",
  439. APIVersion: "v1",
  440. },
  441. })
  442. rc3Pod1 := getPod("rc3Pod1", []metav1.OwnerReference{
  443. {
  444. Kind: "ReplicationController",
  445. Name: "rc3",
  446. UID: "3",
  447. APIVersion: "v1",
  448. },
  449. })
  450. testHandler := &fakeActionHandler{
  451. response: map[string]FakeResponse{
  452. "GET" + "/api/v1/namespaces/ns1/pods/rc1Pod1": {
  453. 200,
  454. serilizeOrDie(t, rc1Pod1),
  455. },
  456. "GET" + "/api/v1/namespaces/ns1/pods/rc1Pod2": {
  457. 200,
  458. serilizeOrDie(t, rc1Pod2),
  459. },
  460. "GET" + "/api/v1/namespaces/ns1/pods/rc2Pod1": {
  461. 200,
  462. serilizeOrDie(t, rc2Pod1),
  463. },
  464. "GET" + "/api/v1/namespaces/ns1/pods/rc3Pod1": {
  465. 200,
  466. serilizeOrDie(t, rc3Pod1),
  467. },
  468. "GET" + "/api/v1/namespaces/ns1/replicationcontrollers/rc1": {
  469. 404,
  470. []byte{},
  471. },
  472. "GET" + "/api/v1/namespaces/ns1/replicationcontrollers/rc2": {
  473. 404,
  474. []byte{},
  475. },
  476. "GET" + "/api/v1/namespaces/ns1/replicationcontrollers/rc3": {
  477. 404,
  478. []byte{},
  479. },
  480. },
  481. }
  482. srv, clientConfig := testServerAndClientConfig(testHandler.ServeHTTP)
  483. defer srv.Close()
  484. gc := setupGC(t, clientConfig)
  485. defer close(gc.stop)
  486. gc.absentOwnerCache = NewUIDCache(2)
  487. gc.attemptToDeleteItem(podToGCNode(rc1Pod1))
  488. gc.attemptToDeleteItem(podToGCNode(rc2Pod1))
  489. // rc1 should already be in the cache, no request should be sent. rc1 should be promoted in the UIDCache
  490. gc.attemptToDeleteItem(podToGCNode(rc1Pod2))
  491. // after this call, rc2 should be evicted from the UIDCache
  492. gc.attemptToDeleteItem(podToGCNode(rc3Pod1))
  493. // check cache
  494. if !gc.absentOwnerCache.Has(types.UID("1")) {
  495. t.Errorf("expected rc1 to be in the cache")
  496. }
  497. if gc.absentOwnerCache.Has(types.UID("2")) {
  498. t.Errorf("expected rc2 to not exist in the cache")
  499. }
  500. if !gc.absentOwnerCache.Has(types.UID("3")) {
  501. t.Errorf("expected rc3 to be in the cache")
  502. }
  503. // check the request sent to the server
  504. count := 0
  505. for _, action := range testHandler.actions {
  506. if action.String() == "GET=/api/v1/namespaces/ns1/replicationcontrollers/rc1" {
  507. count++
  508. }
  509. }
  510. if count != 1 {
  511. t.Errorf("expected only 1 GET rc1 request, got %d", count)
  512. }
  513. }
  514. func TestDeleteOwnerRefPatch(t *testing.T) {
  515. original := v1.Pod{
  516. ObjectMeta: metav1.ObjectMeta{
  517. UID: "100",
  518. OwnerReferences: []metav1.OwnerReference{
  519. {UID: "1"},
  520. {UID: "2"},
  521. {UID: "3"},
  522. },
  523. },
  524. }
  525. originalData := serilizeOrDie(t, original)
  526. expected := v1.Pod{
  527. ObjectMeta: metav1.ObjectMeta{
  528. UID: "100",
  529. OwnerReferences: []metav1.OwnerReference{
  530. {UID: "1"},
  531. },
  532. },
  533. }
  534. patch := deleteOwnerRefStrategicMergePatch("100", "2", "3")
  535. patched, err := strategicpatch.StrategicMergePatch(originalData, patch, v1.Pod{})
  536. if err != nil {
  537. t.Fatal(err)
  538. }
  539. var got v1.Pod
  540. if err := json.Unmarshal(patched, &got); err != nil {
  541. t.Fatal(err)
  542. }
  543. if !reflect.DeepEqual(expected, got) {
  544. t.Errorf("expected: %#v,\ngot: %#v", expected, got)
  545. }
  546. }
  547. func TestUnblockOwnerReference(t *testing.T) {
  548. trueVar := true
  549. falseVar := false
  550. original := v1.Pod{
  551. ObjectMeta: metav1.ObjectMeta{
  552. UID: "100",
  553. OwnerReferences: []metav1.OwnerReference{
  554. {UID: "1", BlockOwnerDeletion: &trueVar},
  555. {UID: "2", BlockOwnerDeletion: &falseVar},
  556. {UID: "3"},
  557. },
  558. },
  559. }
  560. originalData := serilizeOrDie(t, original)
  561. expected := v1.Pod{
  562. ObjectMeta: metav1.ObjectMeta{
  563. UID: "100",
  564. OwnerReferences: []metav1.OwnerReference{
  565. {UID: "1", BlockOwnerDeletion: &falseVar},
  566. {UID: "2", BlockOwnerDeletion: &falseVar},
  567. {UID: "3"},
  568. },
  569. },
  570. }
  571. accessor, err := meta.Accessor(&original)
  572. if err != nil {
  573. t.Fatal(err)
  574. }
  575. n := node{
  576. owners: accessor.GetOwnerReferences(),
  577. }
  578. patch, err := n.unblockOwnerReferencesStrategicMergePatch()
  579. if err != nil {
  580. t.Fatal(err)
  581. }
  582. patched, err := strategicpatch.StrategicMergePatch(originalData, patch, v1.Pod{})
  583. if err != nil {
  584. t.Fatal(err)
  585. }
  586. var got v1.Pod
  587. if err := json.Unmarshal(patched, &got); err != nil {
  588. t.Fatal(err)
  589. }
  590. if !reflect.DeepEqual(expected, got) {
  591. t.Errorf("expected: %#v,\ngot: %#v", expected, got)
  592. t.Errorf("expected: %#v,\ngot: %#v", expected.OwnerReferences, got.OwnerReferences)
  593. for _, ref := range got.OwnerReferences {
  594. t.Errorf("ref.UID=%s, ref.BlockOwnerDeletion=%v", ref.UID, *ref.BlockOwnerDeletion)
  595. }
  596. }
  597. }
  598. func TestOrphanDependentsFailure(t *testing.T) {
  599. testHandler := &fakeActionHandler{
  600. response: map[string]FakeResponse{
  601. "PATCH" + "/api/v1/namespaces/ns1/pods/pod": {
  602. 409,
  603. []byte{},
  604. },
  605. },
  606. }
  607. srv, clientConfig := testServerAndClientConfig(testHandler.ServeHTTP)
  608. defer srv.Close()
  609. gc := setupGC(t, clientConfig)
  610. defer close(gc.stop)
  611. dependents := []*node{
  612. {
  613. identity: objectReference{
  614. OwnerReference: metav1.OwnerReference{
  615. Kind: "Pod",
  616. APIVersion: "v1",
  617. Name: "pod",
  618. },
  619. Namespace: "ns1",
  620. },
  621. },
  622. }
  623. err := gc.orphanDependents(objectReference{}, dependents)
  624. expected := `the server reported a conflict`
  625. if err == nil || !strings.Contains(err.Error(), expected) {
  626. if err != nil {
  627. t.Errorf("expected error contains text %q, got %q", expected, err.Error())
  628. } else {
  629. t.Errorf("expected error contains text %q, got nil", expected)
  630. }
  631. }
  632. }
  633. // TestGetDeletableResources ensures GetDeletableResources always returns
  634. // something usable regardless of discovery output.
  635. func TestGetDeletableResources(t *testing.T) {
  636. tests := map[string]struct {
  637. serverResources []*metav1.APIResourceList
  638. err error
  639. deletableResources map[schema.GroupVersionResource]struct{}
  640. }{
  641. "no error": {
  642. serverResources: []*metav1.APIResourceList{
  643. {
  644. // Valid GroupVersion
  645. GroupVersion: "apps/v1",
  646. APIResources: []metav1.APIResource{
  647. {Name: "pods", Namespaced: true, Kind: "Pod", Verbs: metav1.Verbs{"delete", "list", "watch"}},
  648. {Name: "services", Namespaced: true, Kind: "Service"},
  649. },
  650. },
  651. {
  652. // Invalid GroupVersion, should be ignored
  653. GroupVersion: "foo//whatever",
  654. APIResources: []metav1.APIResource{
  655. {Name: "bars", Namespaced: true, Kind: "Bar", Verbs: metav1.Verbs{"delete", "list", "watch"}},
  656. },
  657. },
  658. {
  659. // Valid GroupVersion, missing required verbs, should be ignored
  660. GroupVersion: "acme/v1",
  661. APIResources: []metav1.APIResource{
  662. {Name: "widgets", Namespaced: true, Kind: "Widget", Verbs: metav1.Verbs{"delete"}},
  663. },
  664. },
  665. },
  666. err: nil,
  667. deletableResources: map[schema.GroupVersionResource]struct{}{
  668. {Group: "apps", Version: "v1", Resource: "pods"}: {},
  669. },
  670. },
  671. "nonspecific failure, includes usable results": {
  672. serverResources: []*metav1.APIResourceList{
  673. {
  674. GroupVersion: "apps/v1",
  675. APIResources: []metav1.APIResource{
  676. {Name: "pods", Namespaced: true, Kind: "Pod", Verbs: metav1.Verbs{"delete", "list", "watch"}},
  677. {Name: "services", Namespaced: true, Kind: "Service"},
  678. },
  679. },
  680. },
  681. err: fmt.Errorf("internal error"),
  682. deletableResources: map[schema.GroupVersionResource]struct{}{
  683. {Group: "apps", Version: "v1", Resource: "pods"}: {},
  684. },
  685. },
  686. "partial discovery failure, includes usable results": {
  687. serverResources: []*metav1.APIResourceList{
  688. {
  689. GroupVersion: "apps/v1",
  690. APIResources: []metav1.APIResource{
  691. {Name: "pods", Namespaced: true, Kind: "Pod", Verbs: metav1.Verbs{"delete", "list", "watch"}},
  692. {Name: "services", Namespaced: true, Kind: "Service"},
  693. },
  694. },
  695. },
  696. err: &discovery.ErrGroupDiscoveryFailed{
  697. Groups: map[schema.GroupVersion]error{
  698. {Group: "foo", Version: "v1"}: fmt.Errorf("discovery failure"),
  699. },
  700. },
  701. deletableResources: map[schema.GroupVersionResource]struct{}{
  702. {Group: "apps", Version: "v1", Resource: "pods"}: {},
  703. },
  704. },
  705. "discovery failure, no results": {
  706. serverResources: nil,
  707. err: fmt.Errorf("internal error"),
  708. deletableResources: map[schema.GroupVersionResource]struct{}{},
  709. },
  710. }
  711. for name, test := range tests {
  712. t.Logf("testing %q", name)
  713. client := &fakeServerResources{
  714. PreferredResources: test.serverResources,
  715. Error: test.err,
  716. }
  717. actual := GetDeletableResources(client)
  718. if !reflect.DeepEqual(test.deletableResources, actual) {
  719. t.Errorf("expected resources:\n%v\ngot:\n%v", test.deletableResources, actual)
  720. }
  721. }
  722. }
  723. // TestGarbageCollectorSync ensures that a discovery client error
  724. // will not cause the garbage collector to block infinitely.
  725. func TestGarbageCollectorSync(t *testing.T) {
  726. serverResources := []*metav1.APIResourceList{
  727. {
  728. GroupVersion: "v1",
  729. APIResources: []metav1.APIResource{
  730. {Name: "pods", Namespaced: true, Kind: "Pod", Verbs: metav1.Verbs{"delete", "list", "watch"}},
  731. },
  732. },
  733. }
  734. unsyncableServerResources := []*metav1.APIResourceList{
  735. {
  736. GroupVersion: "v1",
  737. APIResources: []metav1.APIResource{
  738. {Name: "pods", Namespaced: true, Kind: "Pod", Verbs: metav1.Verbs{"delete", "list", "watch"}},
  739. {Name: "secrets", Namespaced: true, Kind: "Secret", Verbs: metav1.Verbs{"delete", "list", "watch"}},
  740. },
  741. },
  742. }
  743. fakeDiscoveryClient := &fakeServerResources{
  744. PreferredResources: serverResources,
  745. Error: nil,
  746. Lock: sync.Mutex{},
  747. InterfaceUsedCount: 0,
  748. }
  749. testHandler := &fakeActionHandler{
  750. response: map[string]FakeResponse{
  751. "GET" + "/api/v1/pods": {
  752. 200,
  753. []byte("{}"),
  754. },
  755. "GET" + "/api/v1/secrets": {
  756. 404,
  757. []byte("{}"),
  758. },
  759. },
  760. }
  761. srv, clientConfig := testServerAndClientConfig(testHandler.ServeHTTP)
  762. defer srv.Close()
  763. clientConfig.ContentConfig.NegotiatedSerializer = nil
  764. client, err := kubernetes.NewForConfig(clientConfig)
  765. if err != nil {
  766. t.Fatal(err)
  767. }
  768. rm := &testRESTMapper{testrestmapper.TestOnlyStaticRESTMapper(legacyscheme.Scheme)}
  769. metadataClient, err := metadata.NewForConfig(clientConfig)
  770. if err != nil {
  771. t.Fatal(err)
  772. }
  773. podResource := map[schema.GroupVersionResource]struct{}{
  774. {Group: "", Version: "v1", Resource: "pods"}: {},
  775. }
  776. sharedInformers := informers.NewSharedInformerFactory(client, 0)
  777. alwaysStarted := make(chan struct{})
  778. close(alwaysStarted)
  779. gc, err := NewGarbageCollector(metadataClient, rm, podResource, map[schema.GroupResource]struct{}{}, sharedInformers, alwaysStarted)
  780. if err != nil {
  781. t.Fatal(err)
  782. }
  783. stopCh := make(chan struct{})
  784. defer close(stopCh)
  785. go gc.Run(1, stopCh)
  786. // The pseudo-code of GarbageCollector.Sync():
  787. // GarbageCollector.Sync(client, period, stopCh):
  788. // wait.Until() loops with `period` until the `stopCh` is closed :
  789. // wait.PollImmediateUntil() loops with 100ms (hardcode) util the `stopCh` is closed:
  790. // GetDeletableResources()
  791. // gc.resyncMonitors()
  792. // cache.WaitForNamedCacheSync() loops with `syncedPollPeriod` (hardcoded to 100ms), until either its stop channel is closed after `period`, or all caches synced.
  793. //
  794. // Setting the period to 200ms allows the WaitForCacheSync() to check
  795. // for cache sync ~2 times in every wait.PollImmediateUntil() loop.
  796. //
  797. // The 1s sleep in the test allows GetDelableResources and
  798. // gc.resyncMoitors to run ~5 times to ensure the changes to the
  799. // fakeDiscoveryClient are picked up.
  800. go gc.Sync(fakeDiscoveryClient, 200*time.Millisecond, stopCh)
  801. // Wait until the sync discovers the initial resources
  802. time.Sleep(1 * time.Second)
  803. err = expectSyncNotBlocked(fakeDiscoveryClient, &gc.workerLock)
  804. if err != nil {
  805. t.Fatalf("Expected garbagecollector.Sync to be running but it is blocked: %v", err)
  806. }
  807. // Simulate the discovery client returning an error
  808. fakeDiscoveryClient.setPreferredResources(nil)
  809. fakeDiscoveryClient.setError(fmt.Errorf("error calling discoveryClient.ServerPreferredResources()"))
  810. // Wait until sync discovers the change
  811. time.Sleep(1 * time.Second)
  812. // Remove the error from being returned and see if the garbage collector sync is still working
  813. fakeDiscoveryClient.setPreferredResources(serverResources)
  814. fakeDiscoveryClient.setError(nil)
  815. err = expectSyncNotBlocked(fakeDiscoveryClient, &gc.workerLock)
  816. if err != nil {
  817. t.Fatalf("Expected garbagecollector.Sync to still be running but it is blocked: %v", err)
  818. }
  819. // Simulate the discovery client returning a resource the restmapper can resolve, but will not sync caches
  820. fakeDiscoveryClient.setPreferredResources(unsyncableServerResources)
  821. fakeDiscoveryClient.setError(nil)
  822. // Wait until sync discovers the change
  823. time.Sleep(1 * time.Second)
  824. // Put the resources back to normal and ensure garbage collector sync recovers
  825. fakeDiscoveryClient.setPreferredResources(serverResources)
  826. fakeDiscoveryClient.setError(nil)
  827. err = expectSyncNotBlocked(fakeDiscoveryClient, &gc.workerLock)
  828. if err != nil {
  829. t.Fatalf("Expected garbagecollector.Sync to still be running but it is blocked: %v", err)
  830. }
  831. }
  832. func expectSyncNotBlocked(fakeDiscoveryClient *fakeServerResources, workerLock *sync.RWMutex) error {
  833. before := fakeDiscoveryClient.getInterfaceUsedCount()
  834. t := 1 * time.Second
  835. time.Sleep(t)
  836. after := fakeDiscoveryClient.getInterfaceUsedCount()
  837. if before == after {
  838. return fmt.Errorf("discoveryClient.ServerPreferredResources() called %d times over %v", after-before, t)
  839. }
  840. workerLockAcquired := make(chan struct{})
  841. go func() {
  842. workerLock.Lock()
  843. workerLock.Unlock()
  844. close(workerLockAcquired)
  845. }()
  846. select {
  847. case <-workerLockAcquired:
  848. return nil
  849. case <-time.After(t):
  850. return fmt.Errorf("workerLock blocked for at least %v", t)
  851. }
  852. }
  853. type fakeServerResources struct {
  854. PreferredResources []*metav1.APIResourceList
  855. Error error
  856. Lock sync.Mutex
  857. InterfaceUsedCount int
  858. }
  859. func (*fakeServerResources) ServerResourcesForGroupVersion(groupVersion string) (*metav1.APIResourceList, error) {
  860. return nil, nil
  861. }
  862. // Deprecated: use ServerGroupsAndResources instead.
  863. func (*fakeServerResources) ServerResources() ([]*metav1.APIResourceList, error) {
  864. return nil, nil
  865. }
  866. func (*fakeServerResources) ServerGroupsAndResources() ([]*metav1.APIGroup, []*metav1.APIResourceList, error) {
  867. return nil, nil, nil
  868. }
  869. func (f *fakeServerResources) ServerPreferredResources() ([]*metav1.APIResourceList, error) {
  870. f.Lock.Lock()
  871. defer f.Lock.Unlock()
  872. f.InterfaceUsedCount++
  873. return f.PreferredResources, f.Error
  874. }
  875. func (f *fakeServerResources) setPreferredResources(resources []*metav1.APIResourceList) {
  876. f.Lock.Lock()
  877. defer f.Lock.Unlock()
  878. f.PreferredResources = resources
  879. }
  880. func (f *fakeServerResources) setError(err error) {
  881. f.Lock.Lock()
  882. defer f.Lock.Unlock()
  883. f.Error = err
  884. }
  885. func (f *fakeServerResources) getInterfaceUsedCount() int {
  886. f.Lock.Lock()
  887. defer f.Lock.Unlock()
  888. return f.InterfaceUsedCount
  889. }
  890. func (*fakeServerResources) ServerPreferredNamespacedResources() ([]*metav1.APIResourceList, error) {
  891. return nil, nil
  892. }