disruption_test.go 37 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177
  1. /*
  2. Copyright 2016 The Kubernetes Authors.
  3. Licensed under the Apache License, Version 2.0 (the "License");
  4. you may not use this file except in compliance with the License.
  5. You may obtain a copy of the License at
  6. http://www.apache.org/licenses/LICENSE-2.0
  7. Unless required by applicable law or agreed to in writing, software
  8. distributed under the License is distributed on an "AS IS" BASIS,
  9. WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
  10. See the License for the specific language governing permissions and
  11. limitations under the License.
  12. */
  13. package disruption
  14. import (
  15. "context"
  16. "flag"
  17. "fmt"
  18. "os"
  19. "runtime/debug"
  20. "sync"
  21. "testing"
  22. "time"
  23. apps "k8s.io/api/apps/v1"
  24. autoscalingapi "k8s.io/api/autoscaling/v1"
  25. v1 "k8s.io/api/core/v1"
  26. policy "k8s.io/api/policy/v1beta1"
  27. apiequality "k8s.io/apimachinery/pkg/api/equality"
  28. "k8s.io/apimachinery/pkg/api/errors"
  29. "k8s.io/apimachinery/pkg/api/meta/testrestmapper"
  30. metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
  31. "k8s.io/apimachinery/pkg/runtime"
  32. "k8s.io/apimachinery/pkg/runtime/schema"
  33. "k8s.io/apimachinery/pkg/types"
  34. "k8s.io/apimachinery/pkg/util/intstr"
  35. "k8s.io/apimachinery/pkg/util/uuid"
  36. "k8s.io/apimachinery/pkg/util/wait"
  37. "k8s.io/client-go/informers"
  38. "k8s.io/client-go/kubernetes/fake"
  39. scalefake "k8s.io/client-go/scale/fake"
  40. core "k8s.io/client-go/testing"
  41. "k8s.io/client-go/tools/cache"
  42. "k8s.io/client-go/util/workqueue"
  43. "k8s.io/klog"
  44. _ "k8s.io/kubernetes/pkg/apis/core/install"
  45. "k8s.io/kubernetes/pkg/controller"
  46. utilpointer "k8s.io/utils/pointer"
  47. )
  48. type pdbStates map[string]policy.PodDisruptionBudget
  49. var alwaysReady = func() bool { return true }
  50. func (ps *pdbStates) Set(pdb *policy.PodDisruptionBudget) error {
  51. key, err := controller.KeyFunc(pdb)
  52. if err != nil {
  53. return err
  54. }
  55. (*ps)[key] = *pdb.DeepCopy()
  56. return nil
  57. }
  58. func (ps *pdbStates) Get(key string) policy.PodDisruptionBudget {
  59. return (*ps)[key]
  60. }
  61. func (ps *pdbStates) VerifyPdbStatus(t *testing.T, key string, disruptionsAllowed, currentHealthy, desiredHealthy, expectedPods int32,
  62. disruptedPodMap map[string]metav1.Time) {
  63. actualPDB := ps.Get(key)
  64. expectedStatus := policy.PodDisruptionBudgetStatus{
  65. DisruptionsAllowed: disruptionsAllowed,
  66. CurrentHealthy: currentHealthy,
  67. DesiredHealthy: desiredHealthy,
  68. ExpectedPods: expectedPods,
  69. DisruptedPods: disruptedPodMap,
  70. ObservedGeneration: actualPDB.Generation,
  71. }
  72. actualStatus := actualPDB.Status
  73. if !apiequality.Semantic.DeepEqual(actualStatus, expectedStatus) {
  74. debug.PrintStack()
  75. t.Fatalf("PDB %q status mismatch. Expected %+v but got %+v.", key, expectedStatus, actualStatus)
  76. }
  77. }
  78. func (ps *pdbStates) VerifyDisruptionAllowed(t *testing.T, key string, disruptionsAllowed int32) {
  79. pdb := ps.Get(key)
  80. if pdb.Status.DisruptionsAllowed != disruptionsAllowed {
  81. debug.PrintStack()
  82. t.Fatalf("PodDisruptionAllowed mismatch for PDB %q. Expected %v but got %v.", key, disruptionsAllowed, pdb.Status.DisruptionsAllowed)
  83. }
  84. }
  85. type disruptionController struct {
  86. *DisruptionController
  87. podStore cache.Store
  88. pdbStore cache.Store
  89. rcStore cache.Store
  90. rsStore cache.Store
  91. dStore cache.Store
  92. ssStore cache.Store
  93. coreClient *fake.Clientset
  94. scaleClient *scalefake.FakeScaleClient
  95. }
  96. var customGVK = schema.GroupVersionKind{
  97. Group: "custom.k8s.io",
  98. Version: "v1",
  99. Kind: "customresource",
  100. }
  101. func newFakeDisruptionController() (*disruptionController, *pdbStates) {
  102. ps := &pdbStates{}
  103. coreClient := fake.NewSimpleClientset()
  104. informerFactory := informers.NewSharedInformerFactory(coreClient, controller.NoResyncPeriodFunc())
  105. scheme := runtime.NewScheme()
  106. scheme.AddKnownTypeWithName(customGVK, &v1.Service{})
  107. fakeScaleClient := &scalefake.FakeScaleClient{}
  108. dc := NewDisruptionController(
  109. informerFactory.Core().V1().Pods(),
  110. informerFactory.Policy().V1beta1().PodDisruptionBudgets(),
  111. informerFactory.Core().V1().ReplicationControllers(),
  112. informerFactory.Apps().V1().ReplicaSets(),
  113. informerFactory.Apps().V1().Deployments(),
  114. informerFactory.Apps().V1().StatefulSets(),
  115. coreClient,
  116. testrestmapper.TestOnlyStaticRESTMapper(scheme),
  117. fakeScaleClient,
  118. )
  119. dc.getUpdater = func() updater { return ps.Set }
  120. dc.podListerSynced = alwaysReady
  121. dc.pdbListerSynced = alwaysReady
  122. dc.rcListerSynced = alwaysReady
  123. dc.rsListerSynced = alwaysReady
  124. dc.dListerSynced = alwaysReady
  125. dc.ssListerSynced = alwaysReady
  126. informerFactory.Start(context.TODO().Done())
  127. informerFactory.WaitForCacheSync(nil)
  128. return &disruptionController{
  129. dc,
  130. informerFactory.Core().V1().Pods().Informer().GetStore(),
  131. informerFactory.Policy().V1beta1().PodDisruptionBudgets().Informer().GetStore(),
  132. informerFactory.Core().V1().ReplicationControllers().Informer().GetStore(),
  133. informerFactory.Apps().V1().ReplicaSets().Informer().GetStore(),
  134. informerFactory.Apps().V1().Deployments().Informer().GetStore(),
  135. informerFactory.Apps().V1().StatefulSets().Informer().GetStore(),
  136. coreClient,
  137. fakeScaleClient,
  138. }, ps
  139. }
  140. func fooBar() map[string]string {
  141. return map[string]string{"foo": "bar"}
  142. }
  143. func newSel(labels map[string]string) *metav1.LabelSelector {
  144. return &metav1.LabelSelector{MatchLabels: labels}
  145. }
  146. func newSelFooBar() *metav1.LabelSelector {
  147. return newSel(map[string]string{"foo": "bar"})
  148. }
  149. func newMinAvailablePodDisruptionBudget(t *testing.T, minAvailable intstr.IntOrString) (*policy.PodDisruptionBudget, string) {
  150. pdb := &policy.PodDisruptionBudget{
  151. TypeMeta: metav1.TypeMeta{APIVersion: "v1"},
  152. ObjectMeta: metav1.ObjectMeta{
  153. UID: uuid.NewUUID(),
  154. Name: "foobar",
  155. Namespace: metav1.NamespaceDefault,
  156. ResourceVersion: "18",
  157. },
  158. Spec: policy.PodDisruptionBudgetSpec{
  159. MinAvailable: &minAvailable,
  160. Selector: newSelFooBar(),
  161. },
  162. }
  163. pdbName, err := controller.KeyFunc(pdb)
  164. if err != nil {
  165. t.Fatalf("Unexpected error naming pdb %q: %v", pdb.Name, err)
  166. }
  167. return pdb, pdbName
  168. }
  169. func newMaxUnavailablePodDisruptionBudget(t *testing.T, maxUnavailable intstr.IntOrString) (*policy.PodDisruptionBudget, string) {
  170. pdb := &policy.PodDisruptionBudget{
  171. TypeMeta: metav1.TypeMeta{APIVersion: "v1"},
  172. ObjectMeta: metav1.ObjectMeta{
  173. UID: uuid.NewUUID(),
  174. Name: "foobar",
  175. Namespace: metav1.NamespaceDefault,
  176. ResourceVersion: "18",
  177. },
  178. Spec: policy.PodDisruptionBudgetSpec{
  179. MaxUnavailable: &maxUnavailable,
  180. Selector: newSelFooBar(),
  181. },
  182. }
  183. pdbName, err := controller.KeyFunc(pdb)
  184. if err != nil {
  185. t.Fatalf("Unexpected error naming pdb %q: %v", pdb.Name, err)
  186. }
  187. return pdb, pdbName
  188. }
  189. func updatePodOwnerToRc(t *testing.T, pod *v1.Pod, rc *v1.ReplicationController) {
  190. var controllerReference metav1.OwnerReference
  191. var trueVar = true
  192. controllerReference = metav1.OwnerReference{UID: rc.UID, APIVersion: controllerKindRC.GroupVersion().String(), Kind: controllerKindRC.Kind, Name: rc.Name, Controller: &trueVar}
  193. pod.OwnerReferences = append(pod.OwnerReferences, controllerReference)
  194. }
  195. func updatePodOwnerToRs(t *testing.T, pod *v1.Pod, rs *apps.ReplicaSet) {
  196. var controllerReference metav1.OwnerReference
  197. var trueVar = true
  198. controllerReference = metav1.OwnerReference{UID: rs.UID, APIVersion: controllerKindRS.GroupVersion().String(), Kind: controllerKindRS.Kind, Name: rs.Name, Controller: &trueVar}
  199. pod.OwnerReferences = append(pod.OwnerReferences, controllerReference)
  200. }
  201. // pod, podName := newPod(t, name)
  202. func updatePodOwnerToSs(t *testing.T, pod *v1.Pod, ss *apps.StatefulSet) {
  203. var controllerReference metav1.OwnerReference
  204. var trueVar = true
  205. controllerReference = metav1.OwnerReference{UID: ss.UID, APIVersion: controllerKindSS.GroupVersion().String(), Kind: controllerKindSS.Kind, Name: ss.Name, Controller: &trueVar}
  206. pod.OwnerReferences = append(pod.OwnerReferences, controllerReference)
  207. }
  208. func newPod(t *testing.T, name string) (*v1.Pod, string) {
  209. pod := &v1.Pod{
  210. TypeMeta: metav1.TypeMeta{APIVersion: "v1"},
  211. ObjectMeta: metav1.ObjectMeta{
  212. UID: uuid.NewUUID(),
  213. Annotations: make(map[string]string),
  214. Name: name,
  215. Namespace: metav1.NamespaceDefault,
  216. ResourceVersion: "18",
  217. Labels: fooBar(),
  218. },
  219. Spec: v1.PodSpec{},
  220. Status: v1.PodStatus{
  221. Conditions: []v1.PodCondition{
  222. {Type: v1.PodReady, Status: v1.ConditionTrue},
  223. },
  224. },
  225. }
  226. podName, err := controller.KeyFunc(pod)
  227. if err != nil {
  228. t.Fatalf("Unexpected error naming pod %q: %v", pod.Name, err)
  229. }
  230. return pod, podName
  231. }
  232. func newReplicationController(t *testing.T, size int32) (*v1.ReplicationController, string) {
  233. rc := &v1.ReplicationController{
  234. TypeMeta: metav1.TypeMeta{APIVersion: "v1"},
  235. ObjectMeta: metav1.ObjectMeta{
  236. UID: uuid.NewUUID(),
  237. Name: "foobar",
  238. Namespace: metav1.NamespaceDefault,
  239. ResourceVersion: "18",
  240. Labels: fooBar(),
  241. },
  242. Spec: v1.ReplicationControllerSpec{
  243. Replicas: &size,
  244. Selector: fooBar(),
  245. },
  246. }
  247. rcName, err := controller.KeyFunc(rc)
  248. if err != nil {
  249. t.Fatalf("Unexpected error naming RC %q", rc.Name)
  250. }
  251. return rc, rcName
  252. }
  253. func newDeployment(t *testing.T, size int32) (*apps.Deployment, string) {
  254. d := &apps.Deployment{
  255. TypeMeta: metav1.TypeMeta{APIVersion: "v1"},
  256. ObjectMeta: metav1.ObjectMeta{
  257. UID: uuid.NewUUID(),
  258. Name: "foobar",
  259. Namespace: metav1.NamespaceDefault,
  260. ResourceVersion: "18",
  261. Labels: fooBar(),
  262. },
  263. Spec: apps.DeploymentSpec{
  264. Replicas: &size,
  265. Selector: newSelFooBar(),
  266. },
  267. }
  268. dName, err := controller.KeyFunc(d)
  269. if err != nil {
  270. t.Fatalf("Unexpected error naming Deployment %q: %v", d.Name, err)
  271. }
  272. return d, dName
  273. }
  274. func newReplicaSet(t *testing.T, size int32) (*apps.ReplicaSet, string) {
  275. rs := &apps.ReplicaSet{
  276. TypeMeta: metav1.TypeMeta{APIVersion: "v1"},
  277. ObjectMeta: metav1.ObjectMeta{
  278. UID: uuid.NewUUID(),
  279. Name: "foobar",
  280. Namespace: metav1.NamespaceDefault,
  281. ResourceVersion: "18",
  282. Labels: fooBar(),
  283. },
  284. Spec: apps.ReplicaSetSpec{
  285. Replicas: &size,
  286. Selector: newSelFooBar(),
  287. },
  288. }
  289. rsName, err := controller.KeyFunc(rs)
  290. if err != nil {
  291. t.Fatalf("Unexpected error naming ReplicaSet %q: %v", rs.Name, err)
  292. }
  293. return rs, rsName
  294. }
  295. func newStatefulSet(t *testing.T, size int32) (*apps.StatefulSet, string) {
  296. ss := &apps.StatefulSet{
  297. TypeMeta: metav1.TypeMeta{APIVersion: "v1"},
  298. ObjectMeta: metav1.ObjectMeta{
  299. UID: uuid.NewUUID(),
  300. Name: "foobar",
  301. Namespace: metav1.NamespaceDefault,
  302. ResourceVersion: "18",
  303. Labels: fooBar(),
  304. },
  305. Spec: apps.StatefulSetSpec{
  306. Replicas: &size,
  307. Selector: newSelFooBar(),
  308. },
  309. }
  310. ssName, err := controller.KeyFunc(ss)
  311. if err != nil {
  312. t.Fatalf("Unexpected error naming StatefulSet %q: %v", ss.Name, err)
  313. }
  314. return ss, ssName
  315. }
  316. func update(t *testing.T, store cache.Store, obj interface{}) {
  317. if err := store.Update(obj); err != nil {
  318. t.Fatalf("Could not add %+v to %+v: %v", obj, store, err)
  319. }
  320. }
  321. func add(t *testing.T, store cache.Store, obj interface{}) {
  322. if err := store.Add(obj); err != nil {
  323. t.Fatalf("Could not add %+v to %+v: %v", obj, store, err)
  324. }
  325. }
  326. // Create one with no selector. Verify it matches 0 pods.
  327. func TestNoSelector(t *testing.T) {
  328. dc, ps := newFakeDisruptionController()
  329. pdb, pdbName := newMinAvailablePodDisruptionBudget(t, intstr.FromInt(3))
  330. pdb.Spec.Selector = &metav1.LabelSelector{}
  331. pod, _ := newPod(t, "yo-yo-yo")
  332. add(t, dc.pdbStore, pdb)
  333. dc.sync(pdbName)
  334. ps.VerifyPdbStatus(t, pdbName, 0, 0, 3, 0, map[string]metav1.Time{})
  335. add(t, dc.podStore, pod)
  336. dc.sync(pdbName)
  337. ps.VerifyPdbStatus(t, pdbName, 0, 0, 3, 0, map[string]metav1.Time{})
  338. }
  339. // Verify that available/expected counts go up as we add pods, then verify that
  340. // available count goes down when we make a pod unavailable.
  341. func TestUnavailable(t *testing.T) {
  342. dc, ps := newFakeDisruptionController()
  343. pdb, pdbName := newMinAvailablePodDisruptionBudget(t, intstr.FromInt(3))
  344. add(t, dc.pdbStore, pdb)
  345. dc.sync(pdbName)
  346. // Add three pods, verifying that the counts go up at each step.
  347. pods := []*v1.Pod{}
  348. for i := int32(0); i < 4; i++ {
  349. ps.VerifyPdbStatus(t, pdbName, 0, i, 3, i, map[string]metav1.Time{})
  350. pod, _ := newPod(t, fmt.Sprintf("yo-yo-yo %d", i))
  351. pods = append(pods, pod)
  352. add(t, dc.podStore, pod)
  353. dc.sync(pdbName)
  354. }
  355. ps.VerifyPdbStatus(t, pdbName, 1, 4, 3, 4, map[string]metav1.Time{})
  356. // Now set one pod as unavailable
  357. pods[0].Status.Conditions = []v1.PodCondition{}
  358. update(t, dc.podStore, pods[0])
  359. dc.sync(pdbName)
  360. // Verify expected update
  361. ps.VerifyPdbStatus(t, pdbName, 0, 3, 3, 4, map[string]metav1.Time{})
  362. }
  363. // Verify that an integer MaxUnavailable won't
  364. // allow a disruption for pods with no controller.
  365. func TestIntegerMaxUnavailable(t *testing.T) {
  366. dc, ps := newFakeDisruptionController()
  367. pdb, pdbName := newMaxUnavailablePodDisruptionBudget(t, intstr.FromInt(1))
  368. add(t, dc.pdbStore, pdb)
  369. dc.sync(pdbName)
  370. // This verifies that when a PDB has 0 pods, disruptions are not allowed.
  371. ps.VerifyDisruptionAllowed(t, pdbName, 0)
  372. pod, _ := newPod(t, "naked")
  373. add(t, dc.podStore, pod)
  374. dc.sync(pdbName)
  375. ps.VerifyDisruptionAllowed(t, pdbName, 0)
  376. }
  377. // Verify that an integer MaxUnavailable will recompute allowed disruptions when the scale of
  378. // the selected pod's controller is modified.
  379. func TestIntegerMaxUnavailableWithScaling(t *testing.T) {
  380. dc, ps := newFakeDisruptionController()
  381. pdb, pdbName := newMaxUnavailablePodDisruptionBudget(t, intstr.FromInt(2))
  382. add(t, dc.pdbStore, pdb)
  383. rs, _ := newReplicaSet(t, 7)
  384. add(t, dc.rsStore, rs)
  385. pod, _ := newPod(t, "pod")
  386. updatePodOwnerToRs(t, pod, rs)
  387. add(t, dc.podStore, pod)
  388. dc.sync(pdbName)
  389. ps.VerifyPdbStatus(t, pdbName, 0, 1, 5, 7, map[string]metav1.Time{})
  390. // Update scale of ReplicaSet and check PDB
  391. rs.Spec.Replicas = utilpointer.Int32Ptr(5)
  392. update(t, dc.rsStore, rs)
  393. dc.sync(pdbName)
  394. ps.VerifyPdbStatus(t, pdbName, 0, 1, 3, 5, map[string]metav1.Time{})
  395. }
  396. // Verify that an percentage MaxUnavailable will recompute allowed disruptions when the scale of
  397. // the selected pod's controller is modified.
  398. func TestPercentageMaxUnavailableWithScaling(t *testing.T) {
  399. dc, ps := newFakeDisruptionController()
  400. pdb, pdbName := newMaxUnavailablePodDisruptionBudget(t, intstr.FromString("30%"))
  401. add(t, dc.pdbStore, pdb)
  402. rs, _ := newReplicaSet(t, 7)
  403. add(t, dc.rsStore, rs)
  404. pod, _ := newPod(t, "pod")
  405. updatePodOwnerToRs(t, pod, rs)
  406. add(t, dc.podStore, pod)
  407. dc.sync(pdbName)
  408. ps.VerifyPdbStatus(t, pdbName, 0, 1, 4, 7, map[string]metav1.Time{})
  409. // Update scale of ReplicaSet and check PDB
  410. rs.Spec.Replicas = utilpointer.Int32Ptr(3)
  411. update(t, dc.rsStore, rs)
  412. dc.sync(pdbName)
  413. ps.VerifyPdbStatus(t, pdbName, 0, 1, 2, 3, map[string]metav1.Time{})
  414. }
  415. // Create a pod with no controller, and verify that a PDB with a percentage
  416. // specified won't allow a disruption.
  417. func TestNakedPod(t *testing.T) {
  418. dc, ps := newFakeDisruptionController()
  419. pdb, pdbName := newMinAvailablePodDisruptionBudget(t, intstr.FromString("28%"))
  420. add(t, dc.pdbStore, pdb)
  421. dc.sync(pdbName)
  422. // This verifies that when a PDB has 0 pods, disruptions are not allowed.
  423. ps.VerifyDisruptionAllowed(t, pdbName, 0)
  424. pod, _ := newPod(t, "naked")
  425. add(t, dc.podStore, pod)
  426. dc.sync(pdbName)
  427. ps.VerifyDisruptionAllowed(t, pdbName, 0)
  428. }
  429. // Verify that we count the scale of a ReplicaSet even when it has no Deployment.
  430. func TestReplicaSet(t *testing.T) {
  431. dc, ps := newFakeDisruptionController()
  432. pdb, pdbName := newMinAvailablePodDisruptionBudget(t, intstr.FromString("20%"))
  433. add(t, dc.pdbStore, pdb)
  434. rs, _ := newReplicaSet(t, 10)
  435. add(t, dc.rsStore, rs)
  436. pod, _ := newPod(t, "pod")
  437. updatePodOwnerToRs(t, pod, rs)
  438. add(t, dc.podStore, pod)
  439. dc.sync(pdbName)
  440. ps.VerifyPdbStatus(t, pdbName, 0, 1, 2, 10, map[string]metav1.Time{})
  441. }
  442. func TestScaleResource(t *testing.T) {
  443. customResourceUID := uuid.NewUUID()
  444. replicas := int32(10)
  445. pods := int32(4)
  446. maxUnavailable := int32(5)
  447. dc, ps := newFakeDisruptionController()
  448. dc.scaleClient.AddReactor("get", "customresources", func(action core.Action) (handled bool, ret runtime.Object, err error) {
  449. obj := &autoscalingapi.Scale{
  450. ObjectMeta: metav1.ObjectMeta{
  451. Namespace: metav1.NamespaceDefault,
  452. UID: customResourceUID,
  453. },
  454. Spec: autoscalingapi.ScaleSpec{
  455. Replicas: replicas,
  456. },
  457. }
  458. return true, obj, nil
  459. })
  460. pdb, pdbName := newMaxUnavailablePodDisruptionBudget(t, intstr.FromInt(int(maxUnavailable)))
  461. add(t, dc.pdbStore, pdb)
  462. trueVal := true
  463. for i := 0; i < int(pods); i++ {
  464. pod, _ := newPod(t, fmt.Sprintf("pod-%d", i))
  465. pod.SetOwnerReferences([]metav1.OwnerReference{
  466. {
  467. Kind: customGVK.Kind,
  468. APIVersion: customGVK.GroupVersion().String(),
  469. Controller: &trueVal,
  470. UID: customResourceUID,
  471. },
  472. })
  473. add(t, dc.podStore, pod)
  474. }
  475. dc.sync(pdbName)
  476. disruptionsAllowed := int32(0)
  477. if replicas-pods < maxUnavailable {
  478. disruptionsAllowed = maxUnavailable - (replicas - pods)
  479. }
  480. ps.VerifyPdbStatus(t, pdbName, disruptionsAllowed, pods, replicas-maxUnavailable, replicas, map[string]metav1.Time{})
  481. }
  482. // Verify that multiple controllers doesn't allow the PDB to be set true.
  483. func TestMultipleControllers(t *testing.T) {
  484. const podCount = 2
  485. dc, ps := newFakeDisruptionController()
  486. pdb, pdbName := newMinAvailablePodDisruptionBudget(t, intstr.FromString("1%"))
  487. add(t, dc.pdbStore, pdb)
  488. pods := []*v1.Pod{}
  489. for i := 0; i < podCount; i++ {
  490. pod, _ := newPod(t, fmt.Sprintf("pod %d", i))
  491. pods = append(pods, pod)
  492. add(t, dc.podStore, pod)
  493. }
  494. dc.sync(pdbName)
  495. // No controllers yet => no disruption allowed
  496. ps.VerifyDisruptionAllowed(t, pdbName, 0)
  497. rc, _ := newReplicationController(t, 1)
  498. rc.Name = "rc 1"
  499. for i := 0; i < podCount; i++ {
  500. updatePodOwnerToRc(t, pods[i], rc)
  501. }
  502. add(t, dc.rcStore, rc)
  503. dc.sync(pdbName)
  504. // One RC and 200%>1% healthy => disruption allowed
  505. ps.VerifyDisruptionAllowed(t, pdbName, 1)
  506. rc, _ = newReplicationController(t, 1)
  507. rc.Name = "rc 2"
  508. for i := 0; i < podCount; i++ {
  509. updatePodOwnerToRc(t, pods[i], rc)
  510. }
  511. add(t, dc.rcStore, rc)
  512. dc.sync(pdbName)
  513. // 100%>1% healthy BUT two RCs => no disruption allowed
  514. // TODO: Find out if this assert is still needed
  515. //ps.VerifyDisruptionAllowed(t, pdbName, 0)
  516. }
  517. func TestReplicationController(t *testing.T) {
  518. // The budget in this test matches foo=bar, but the RC and its pods match
  519. // {foo=bar, baz=quux}. Later, when we add a rogue pod with only a foo=bar
  520. // label, it will match the budget but have no controllers, which should
  521. // trigger the controller to set PodDisruptionAllowed to false.
  522. labels := map[string]string{
  523. "foo": "bar",
  524. "baz": "quux",
  525. }
  526. dc, ps := newFakeDisruptionController()
  527. // 34% should round up to 2
  528. pdb, pdbName := newMinAvailablePodDisruptionBudget(t, intstr.FromString("34%"))
  529. add(t, dc.pdbStore, pdb)
  530. rc, _ := newReplicationController(t, 3)
  531. rc.Spec.Selector = labels
  532. add(t, dc.rcStore, rc)
  533. dc.sync(pdbName)
  534. // It starts out at 0 expected because, with no pods, the PDB doesn't know
  535. // about the RC. This is a known bug. TODO(mml): file issue
  536. ps.VerifyPdbStatus(t, pdbName, 0, 0, 0, 0, map[string]metav1.Time{})
  537. for i := int32(0); i < 3; i++ {
  538. pod, _ := newPod(t, fmt.Sprintf("foobar %d", i))
  539. updatePodOwnerToRc(t, pod, rc)
  540. pod.Labels = labels
  541. add(t, dc.podStore, pod)
  542. dc.sync(pdbName)
  543. if i < 2 {
  544. ps.VerifyPdbStatus(t, pdbName, 0, i+1, 2, 3, map[string]metav1.Time{})
  545. } else {
  546. ps.VerifyPdbStatus(t, pdbName, 1, 3, 2, 3, map[string]metav1.Time{})
  547. }
  548. }
  549. rogue, _ := newPod(t, "rogue")
  550. add(t, dc.podStore, rogue)
  551. dc.sync(pdbName)
  552. ps.VerifyDisruptionAllowed(t, pdbName, 0)
  553. }
  554. func TestStatefulSetController(t *testing.T) {
  555. labels := map[string]string{
  556. "foo": "bar",
  557. "baz": "quux",
  558. }
  559. dc, ps := newFakeDisruptionController()
  560. // 34% should round up to 2
  561. pdb, pdbName := newMinAvailablePodDisruptionBudget(t, intstr.FromString("34%"))
  562. add(t, dc.pdbStore, pdb)
  563. ss, _ := newStatefulSet(t, 3)
  564. add(t, dc.ssStore, ss)
  565. dc.sync(pdbName)
  566. // It starts out at 0 expected because, with no pods, the PDB doesn't know
  567. // about the SS. This is a known bug. TODO(mml): file issue
  568. ps.VerifyPdbStatus(t, pdbName, 0, 0, 0, 0, map[string]metav1.Time{})
  569. for i := int32(0); i < 3; i++ {
  570. pod, _ := newPod(t, fmt.Sprintf("foobar %d", i))
  571. updatePodOwnerToSs(t, pod, ss)
  572. pod.Labels = labels
  573. add(t, dc.podStore, pod)
  574. dc.sync(pdbName)
  575. if i < 2 {
  576. ps.VerifyPdbStatus(t, pdbName, 0, i+1, 2, 3, map[string]metav1.Time{})
  577. } else {
  578. ps.VerifyPdbStatus(t, pdbName, 1, 3, 2, 3, map[string]metav1.Time{})
  579. }
  580. }
  581. }
  582. func TestTwoControllers(t *testing.T) {
  583. // Most of this test is in verifying intermediate cases as we define the
  584. // three controllers and create the pods.
  585. rcLabels := map[string]string{
  586. "foo": "bar",
  587. "baz": "quux",
  588. }
  589. dLabels := map[string]string{
  590. "foo": "bar",
  591. "baz": "quuux",
  592. }
  593. dc, ps := newFakeDisruptionController()
  594. // These constants are related, but I avoid calculating the correct values in
  595. // code. If you update a parameter here, recalculate the correct values for
  596. // all of them. Further down in the test, we use these to control loops, and
  597. // that level of logic is enough complexity for me.
  598. const collectionSize int32 = 11 // How big each collection is
  599. const minimumOne int32 = 4 // integer minimum with one controller
  600. const minimumTwo int32 = 7 // integer minimum with two controllers
  601. pdb, pdbName := newMinAvailablePodDisruptionBudget(t, intstr.FromString("28%"))
  602. add(t, dc.pdbStore, pdb)
  603. rc, _ := newReplicationController(t, collectionSize)
  604. rc.Spec.Selector = rcLabels
  605. add(t, dc.rcStore, rc)
  606. dc.sync(pdbName)
  607. ps.VerifyPdbStatus(t, pdbName, 0, 0, 0, 0, map[string]metav1.Time{})
  608. pods := []*v1.Pod{}
  609. unavailablePods := collectionSize - minimumOne - 1
  610. for i := int32(1); i <= collectionSize; i++ {
  611. pod, _ := newPod(t, fmt.Sprintf("quux %d", i))
  612. updatePodOwnerToRc(t, pod, rc)
  613. pods = append(pods, pod)
  614. pod.Labels = rcLabels
  615. if i <= unavailablePods {
  616. pod.Status.Conditions = []v1.PodCondition{}
  617. }
  618. add(t, dc.podStore, pod)
  619. dc.sync(pdbName)
  620. if i <= unavailablePods {
  621. ps.VerifyPdbStatus(t, pdbName, 0, 0, minimumOne, collectionSize, map[string]metav1.Time{})
  622. } else if i-unavailablePods <= minimumOne {
  623. ps.VerifyPdbStatus(t, pdbName, 0, i-unavailablePods, minimumOne, collectionSize, map[string]metav1.Time{})
  624. } else {
  625. ps.VerifyPdbStatus(t, pdbName, 1, i-unavailablePods, minimumOne, collectionSize, map[string]metav1.Time{})
  626. }
  627. }
  628. d, _ := newDeployment(t, collectionSize)
  629. d.Spec.Selector = newSel(dLabels)
  630. add(t, dc.dStore, d)
  631. dc.sync(pdbName)
  632. ps.VerifyPdbStatus(t, pdbName, 1, minimumOne+1, minimumOne, collectionSize, map[string]metav1.Time{})
  633. rs, _ := newReplicaSet(t, collectionSize)
  634. rs.Spec.Selector = newSel(dLabels)
  635. rs.Labels = dLabels
  636. add(t, dc.rsStore, rs)
  637. dc.sync(pdbName)
  638. ps.VerifyPdbStatus(t, pdbName, 1, minimumOne+1, minimumOne, collectionSize, map[string]metav1.Time{})
  639. // By the end of this loop, the number of ready pods should be N+2 (hence minimumTwo+2).
  640. unavailablePods = 2*collectionSize - (minimumTwo + 2) - unavailablePods
  641. for i := int32(1); i <= collectionSize; i++ {
  642. pod, _ := newPod(t, fmt.Sprintf("quuux %d", i))
  643. updatePodOwnerToRs(t, pod, rs)
  644. pods = append(pods, pod)
  645. pod.Labels = dLabels
  646. if i <= unavailablePods {
  647. pod.Status.Conditions = []v1.PodCondition{}
  648. }
  649. add(t, dc.podStore, pod)
  650. dc.sync(pdbName)
  651. if i <= unavailablePods {
  652. ps.VerifyPdbStatus(t, pdbName, 0, minimumOne+1, minimumTwo, 2*collectionSize, map[string]metav1.Time{})
  653. } else if i-unavailablePods <= minimumTwo-(minimumOne+1) {
  654. ps.VerifyPdbStatus(t, pdbName, 0, (minimumOne+1)+(i-unavailablePods), minimumTwo, 2*collectionSize, map[string]metav1.Time{})
  655. } else {
  656. ps.VerifyPdbStatus(t, pdbName, i-unavailablePods-(minimumTwo-(minimumOne+1)),
  657. (minimumOne+1)+(i-unavailablePods), minimumTwo, 2*collectionSize, map[string]metav1.Time{})
  658. }
  659. }
  660. // Now we verify we can bring down 1 pod and a disruption is still permitted,
  661. // but if we bring down two, it's not. Then we make the pod ready again and
  662. // verify that a disruption is permitted again.
  663. ps.VerifyPdbStatus(t, pdbName, 2, 2+minimumTwo, minimumTwo, 2*collectionSize, map[string]metav1.Time{})
  664. pods[collectionSize-1].Status.Conditions = []v1.PodCondition{}
  665. update(t, dc.podStore, pods[collectionSize-1])
  666. dc.sync(pdbName)
  667. ps.VerifyPdbStatus(t, pdbName, 1, 1+minimumTwo, minimumTwo, 2*collectionSize, map[string]metav1.Time{})
  668. pods[collectionSize-2].Status.Conditions = []v1.PodCondition{}
  669. update(t, dc.podStore, pods[collectionSize-2])
  670. dc.sync(pdbName)
  671. ps.VerifyPdbStatus(t, pdbName, 0, minimumTwo, minimumTwo, 2*collectionSize, map[string]metav1.Time{})
  672. pods[collectionSize-1].Status.Conditions = []v1.PodCondition{{Type: v1.PodReady, Status: v1.ConditionTrue}}
  673. update(t, dc.podStore, pods[collectionSize-1])
  674. dc.sync(pdbName)
  675. ps.VerifyPdbStatus(t, pdbName, 1, 1+minimumTwo, minimumTwo, 2*collectionSize, map[string]metav1.Time{})
  676. }
  677. // Test pdb doesn't exist
  678. func TestPDBNotExist(t *testing.T) {
  679. dc, _ := newFakeDisruptionController()
  680. pdb, _ := newMinAvailablePodDisruptionBudget(t, intstr.FromString("67%"))
  681. add(t, dc.pdbStore, pdb)
  682. if err := dc.sync("notExist"); err != nil {
  683. t.Errorf("Unexpected error: %v, expect nil", err)
  684. }
  685. }
  686. func TestUpdateDisruptedPods(t *testing.T) {
  687. dc, ps := newFakeDisruptionController()
  688. dc.recheckQueue = workqueue.NewNamedDelayingQueue("pdb_queue")
  689. pdb, pdbName := newMinAvailablePodDisruptionBudget(t, intstr.FromInt(1))
  690. currentTime := time.Now()
  691. pdb.Status.DisruptedPods = map[string]metav1.Time{
  692. "p1": {Time: currentTime}, // Should be removed, pod deletion started.
  693. "p2": {Time: currentTime.Add(-5 * time.Minute)}, // Should be removed, expired.
  694. "p3": {Time: currentTime}, // Should remain, pod untouched.
  695. "notthere": {Time: currentTime}, // Should be removed, pod deleted.
  696. }
  697. add(t, dc.pdbStore, pdb)
  698. pod1, _ := newPod(t, "p1")
  699. pod1.DeletionTimestamp = &metav1.Time{Time: time.Now()}
  700. pod2, _ := newPod(t, "p2")
  701. pod3, _ := newPod(t, "p3")
  702. add(t, dc.podStore, pod1)
  703. add(t, dc.podStore, pod2)
  704. add(t, dc.podStore, pod3)
  705. dc.sync(pdbName)
  706. ps.VerifyPdbStatus(t, pdbName, 0, 1, 1, 3, map[string]metav1.Time{"p3": {Time: currentTime}})
  707. }
  708. func TestBasicFinderFunctions(t *testing.T) {
  709. dc, _ := newFakeDisruptionController()
  710. rs, _ := newReplicaSet(t, 10)
  711. add(t, dc.rsStore, rs)
  712. rc, _ := newReplicationController(t, 12)
  713. add(t, dc.rcStore, rc)
  714. ss, _ := newStatefulSet(t, 14)
  715. add(t, dc.ssStore, ss)
  716. testCases := map[string]struct {
  717. finderFunc podControllerFinder
  718. apiVersion string
  719. kind string
  720. name string
  721. uid types.UID
  722. findsScale bool
  723. expectedScale int32
  724. }{
  725. "replicaset controller with apps group": {
  726. finderFunc: dc.getPodReplicaSet,
  727. apiVersion: "apps/v1",
  728. kind: controllerKindRS.Kind,
  729. name: rs.Name,
  730. uid: rs.UID,
  731. findsScale: true,
  732. expectedScale: 10,
  733. },
  734. "replicaset controller with invalid group": {
  735. finderFunc: dc.getPodReplicaSet,
  736. apiVersion: "invalid/v1",
  737. kind: controllerKindRS.Kind,
  738. name: rs.Name,
  739. uid: rs.UID,
  740. findsScale: false,
  741. },
  742. "replicationcontroller with empty group": {
  743. finderFunc: dc.getPodReplicationController,
  744. apiVersion: "/v1",
  745. kind: controllerKindRC.Kind,
  746. name: rc.Name,
  747. uid: rc.UID,
  748. findsScale: true,
  749. expectedScale: 12,
  750. },
  751. "replicationcontroller with invalid group": {
  752. finderFunc: dc.getPodReplicationController,
  753. apiVersion: "apps/v1",
  754. kind: controllerKindRC.Kind,
  755. name: rc.Name,
  756. uid: rc.UID,
  757. findsScale: false,
  758. },
  759. "statefulset controller with extensions group": {
  760. finderFunc: dc.getPodStatefulSet,
  761. apiVersion: "apps/v1",
  762. kind: controllerKindSS.Kind,
  763. name: ss.Name,
  764. uid: ss.UID,
  765. findsScale: true,
  766. expectedScale: 14,
  767. },
  768. "statefulset controller with invalid kind": {
  769. finderFunc: dc.getPodStatefulSet,
  770. apiVersion: "apps/v1",
  771. kind: controllerKindRS.Kind,
  772. name: ss.Name,
  773. uid: ss.UID,
  774. findsScale: false,
  775. },
  776. }
  777. for tn, tc := range testCases {
  778. t.Run(tn, func(t *testing.T) {
  779. controllerRef := &metav1.OwnerReference{
  780. APIVersion: tc.apiVersion,
  781. Kind: tc.kind,
  782. Name: tc.name,
  783. UID: tc.uid,
  784. }
  785. controllerAndScale, _ := tc.finderFunc(controllerRef, metav1.NamespaceDefault)
  786. if controllerAndScale == nil {
  787. if tc.findsScale {
  788. t.Error("Expected scale, but got nil")
  789. }
  790. return
  791. }
  792. if got, want := controllerAndScale.scale, tc.expectedScale; got != want {
  793. t.Errorf("Expected scale %d, but got %d", want, got)
  794. }
  795. if got, want := controllerAndScale.UID, tc.uid; got != want {
  796. t.Errorf("Expected uid %s, but got %s", want, got)
  797. }
  798. })
  799. }
  800. }
  801. func TestDeploymentFinderFunction(t *testing.T) {
  802. labels := map[string]string{
  803. "foo": "bar",
  804. }
  805. testCases := map[string]struct {
  806. rsApiVersion string
  807. rsKind string
  808. depApiVersion string
  809. depKind string
  810. findsScale bool
  811. expectedScale int32
  812. }{
  813. "happy path": {
  814. rsApiVersion: "apps/v1",
  815. rsKind: controllerKindRS.Kind,
  816. depApiVersion: "extensions/v1",
  817. depKind: controllerKindDep.Kind,
  818. findsScale: true,
  819. expectedScale: 10,
  820. },
  821. "invalid rs apiVersion": {
  822. rsApiVersion: "invalid/v1",
  823. rsKind: controllerKindRS.Kind,
  824. depApiVersion: "apps/v1",
  825. depKind: controllerKindDep.Kind,
  826. findsScale: false,
  827. },
  828. "invalid rs kind": {
  829. rsApiVersion: "apps/v1",
  830. rsKind: "InvalidKind",
  831. depApiVersion: "apps/v1",
  832. depKind: controllerKindDep.Kind,
  833. findsScale: false,
  834. },
  835. "invalid deployment apiVersion": {
  836. rsApiVersion: "extensions/v1",
  837. rsKind: controllerKindRS.Kind,
  838. depApiVersion: "deployment/v1",
  839. depKind: controllerKindDep.Kind,
  840. findsScale: false,
  841. },
  842. "invalid deployment kind": {
  843. rsApiVersion: "apps/v1",
  844. rsKind: controllerKindRS.Kind,
  845. depApiVersion: "extensions/v1",
  846. depKind: "InvalidKind",
  847. findsScale: false,
  848. },
  849. }
  850. for tn, tc := range testCases {
  851. t.Run(tn, func(t *testing.T) {
  852. dc, _ := newFakeDisruptionController()
  853. dep, _ := newDeployment(t, 10)
  854. dep.Spec.Selector = newSel(labels)
  855. add(t, dc.dStore, dep)
  856. rs, _ := newReplicaSet(t, 5)
  857. rs.Labels = labels
  858. trueVal := true
  859. rs.OwnerReferences = append(rs.OwnerReferences, metav1.OwnerReference{
  860. APIVersion: tc.depApiVersion,
  861. Kind: tc.depKind,
  862. Name: dep.Name,
  863. UID: dep.UID,
  864. Controller: &trueVal,
  865. })
  866. add(t, dc.rsStore, rs)
  867. controllerRef := &metav1.OwnerReference{
  868. APIVersion: tc.rsApiVersion,
  869. Kind: tc.rsKind,
  870. Name: rs.Name,
  871. UID: rs.UID,
  872. }
  873. controllerAndScale, _ := dc.getPodDeployment(controllerRef, metav1.NamespaceDefault)
  874. if controllerAndScale == nil {
  875. if tc.findsScale {
  876. t.Error("Expected scale, but got nil")
  877. }
  878. return
  879. }
  880. if got, want := controllerAndScale.scale, tc.expectedScale; got != want {
  881. t.Errorf("Expected scale %d, but got %d", want, got)
  882. }
  883. if got, want := controllerAndScale.UID, dep.UID; got != want {
  884. t.Errorf("Expected uid %s, but got %s", want, got)
  885. }
  886. })
  887. }
  888. }
  889. // This test checks that the disruption controller does not write stale data to
  890. // a PDB status during race conditions with the eviction handler. Specifically,
  891. // failed updates due to ResourceVersion conflict should not cause a stale value
  892. // of DisruptionsAllowed to be written.
  893. //
  894. // In this test, DisruptionsAllowed starts at 2.
  895. // (A) We will delete 1 pod and trigger DisruptionController to set
  896. // DisruptionsAllowed to 1.
  897. // (B) As the DisruptionController attempts this write, we will evict the
  898. // remaining 2 pods and update DisruptionsAllowed to 0. (The real eviction
  899. // handler would allow this because it still sees DisruptionsAllowed=2.)
  900. // (C) If the DisruptionController writes DisruptionsAllowed=1 despite the
  901. // resource conflict error, then there is a bug.
  902. func TestUpdatePDBStatusRetries(t *testing.T) {
  903. dc, _ := newFakeDisruptionController()
  904. // Inject the production code over our fake impl
  905. dc.getUpdater = func() updater { return dc.writePdbStatus }
  906. // Create a PDB and 3 pods that match it.
  907. pdb, pdbKey := newMinAvailablePodDisruptionBudget(t, intstr.FromInt(1))
  908. pdb, err := dc.coreClient.PolicyV1beta1().PodDisruptionBudgets(pdb.Namespace).Create(context.TODO(), pdb, metav1.CreateOptions{})
  909. if err != nil {
  910. t.Fatalf("Failed to create PDB: %v", err)
  911. }
  912. podNames := []string{"moe", "larry", "curly"}
  913. for _, name := range podNames {
  914. pod, _ := newPod(t, name)
  915. _, err := dc.coreClient.CoreV1().Pods(pod.Namespace).Create(context.TODO(), pod, metav1.CreateOptions{})
  916. if err != nil {
  917. t.Fatalf("Failed to create pod: %v", err)
  918. }
  919. }
  920. // Block until the fake clientset writes are observable in the informer caches.
  921. // FUN FACT: This guarantees that the informer caches have updated, but it does
  922. // not guarantee that informer event handlers have completed. Fortunately,
  923. // DisruptionController does most of its logic by reading from informer
  924. // listers, so this guarantee is sufficient.
  925. if err := waitForCacheCount(dc.pdbStore, 1); err != nil {
  926. t.Fatalf("Failed to verify PDB in informer cache: %v", err)
  927. }
  928. if err := waitForCacheCount(dc.podStore, len(podNames)); err != nil {
  929. t.Fatalf("Failed to verify pods in informer cache: %v", err)
  930. }
  931. // Sync DisruptionController once to update PDB status.
  932. if err := dc.sync(pdbKey); err != nil {
  933. t.Fatalf("Failed initial sync: %v", err)
  934. }
  935. // Evict simulates the visible effects of eviction in our fake client.
  936. evict := func(podNames ...string) {
  937. // These GVRs are copied from the generated fake code because they are not exported.
  938. var (
  939. podsResource = schema.GroupVersionResource{Group: "", Version: "v1", Resource: "pods"}
  940. poddisruptionbudgetsResource = schema.GroupVersionResource{Group: "policy", Version: "v1beta1", Resource: "poddisruptionbudgets"}
  941. )
  942. // Bypass the coreClient.Fake and write directly to the ObjectTracker, because
  943. // this helper will be called while the Fake is holding a lock.
  944. obj, err := dc.coreClient.Tracker().Get(poddisruptionbudgetsResource, pdb.Namespace, pdb.Name)
  945. if err != nil {
  946. t.Fatalf("Failed to get PDB: %v", err)
  947. }
  948. updatedPDB := obj.(*policy.PodDisruptionBudget)
  949. // Each eviction,
  950. // - decrements DisruptionsAllowed
  951. // - adds the pod to DisruptedPods
  952. // - deletes the pod
  953. updatedPDB.Status.DisruptionsAllowed -= int32(len(podNames))
  954. updatedPDB.Status.DisruptedPods = make(map[string]metav1.Time)
  955. for _, name := range podNames {
  956. updatedPDB.Status.DisruptedPods[name] = metav1.NewTime(time.Now())
  957. }
  958. if err := dc.coreClient.Tracker().Update(poddisruptionbudgetsResource, updatedPDB, updatedPDB.Namespace); err != nil {
  959. t.Fatalf("Eviction (PDB update) failed: %v", err)
  960. }
  961. for _, name := range podNames {
  962. if err := dc.coreClient.Tracker().Delete(podsResource, "default", name); err != nil {
  963. t.Fatalf("Eviction (pod delete) failed: %v", err)
  964. }
  965. }
  966. }
  967. // The fake kube client does not update ResourceVersion or check for conflicts.
  968. // Instead, we add a reactor that returns a conflict error on the first PDB
  969. // update and success after that.
  970. var failOnce sync.Once
  971. dc.coreClient.Fake.PrependReactor("update", "poddisruptionbudgets", func(a core.Action) (handled bool, obj runtime.Object, err error) {
  972. failOnce.Do(func() {
  973. // (B) Evict two pods and fail this update.
  974. evict(podNames[1], podNames[2])
  975. handled = true
  976. err = errors.NewConflict(a.GetResource().GroupResource(), pdb.Name, fmt.Errorf("conflict"))
  977. })
  978. return handled, obj, err
  979. })
  980. // (A) Delete one pod
  981. if err := dc.coreClient.CoreV1().Pods("default").Delete(context.TODO(), podNames[0], &metav1.DeleteOptions{}); err != nil {
  982. t.Fatal(err)
  983. }
  984. if err := waitForCacheCount(dc.podStore, len(podNames)-1); err != nil {
  985. t.Fatalf("Failed to verify pods in informer cache: %v", err)
  986. }
  987. // The sync() function should either write a correct status which takes the
  988. // evictions into account, or re-queue the PDB for another sync (by returning
  989. // an error)
  990. if err := dc.sync(pdbKey); err != nil {
  991. t.Logf("sync() returned with error: %v", err)
  992. } else {
  993. t.Logf("sync() returned with no error")
  994. }
  995. // (C) Whether or not sync() returned an error, the PDB status should reflect
  996. // the evictions that took place.
  997. finalPDB, err := dc.coreClient.PolicyV1beta1().PodDisruptionBudgets("default").Get(context.TODO(), pdb.Name, metav1.GetOptions{})
  998. if err != nil {
  999. t.Fatalf("Failed to get PDB: %v", err)
  1000. }
  1001. if expected, actual := int32(0), finalPDB.Status.DisruptionsAllowed; expected != actual {
  1002. t.Errorf("DisruptionsAllowed should be %d, got %d", expected, actual)
  1003. }
  1004. }
  1005. // waitForCacheCount blocks until the given cache store has the desired number
  1006. // of items in it. This will return an error if the condition is not met after a
  1007. // 10 second timeout.
  1008. func waitForCacheCount(store cache.Store, n int) error {
  1009. return wait.Poll(10*time.Millisecond, 10*time.Second, func() (bool, error) {
  1010. return len(store.List()) == n, nil
  1011. })
  1012. }
  1013. // TestMain adds klog flags to make debugging tests easier.
  1014. func TestMain(m *testing.M) {
  1015. klog.InitFlags(flag.CommandLine)
  1016. os.Exit(m.Run())
  1017. }