disruption_test.go 31 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029
  1. /*
  2. Copyright 2016 The Kubernetes Authors.
  3. Licensed under the Apache License, Version 2.0 (the "License");
  4. you may not use this file except in compliance with the License.
  5. You may obtain a copy of the License at
  6. http://www.apache.org/licenses/LICENSE-2.0
  7. Unless required by applicable law or agreed to in writing, software
  8. distributed under the License is distributed on an "AS IS" BASIS,
  9. WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
  10. See the License for the specific language governing permissions and
  11. limitations under the License.
  12. */
  13. package disruption
  14. import (
  15. "fmt"
  16. "runtime/debug"
  17. "testing"
  18. "time"
  19. apps "k8s.io/api/apps/v1"
  20. autoscalingapi "k8s.io/api/autoscaling/v1"
  21. "k8s.io/api/core/v1"
  22. policy "k8s.io/api/policy/v1beta1"
  23. apiequality "k8s.io/apimachinery/pkg/api/equality"
  24. "k8s.io/apimachinery/pkg/api/meta/testrestmapper"
  25. metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
  26. "k8s.io/apimachinery/pkg/runtime"
  27. "k8s.io/apimachinery/pkg/runtime/schema"
  28. "k8s.io/apimachinery/pkg/types"
  29. "k8s.io/apimachinery/pkg/util/intstr"
  30. "k8s.io/apimachinery/pkg/util/uuid"
  31. "k8s.io/client-go/informers"
  32. scalefake "k8s.io/client-go/scale/fake"
  33. core "k8s.io/client-go/testing"
  34. "k8s.io/client-go/tools/cache"
  35. "k8s.io/client-go/util/workqueue"
  36. _ "k8s.io/kubernetes/pkg/apis/core/install"
  37. "k8s.io/kubernetes/pkg/controller"
  38. "github.com/Azure/go-autorest/autorest/to"
  39. )
  40. type pdbStates map[string]policy.PodDisruptionBudget
  41. var alwaysReady = func() bool { return true }
  42. func (ps *pdbStates) Set(pdb *policy.PodDisruptionBudget) error {
  43. key, err := controller.KeyFunc(pdb)
  44. if err != nil {
  45. return err
  46. }
  47. (*ps)[key] = *pdb.DeepCopy()
  48. return nil
  49. }
  50. func (ps *pdbStates) Get(key string) policy.PodDisruptionBudget {
  51. return (*ps)[key]
  52. }
  53. func (ps *pdbStates) VerifyPdbStatus(t *testing.T, key string, disruptionsAllowed, currentHealthy, desiredHealthy, expectedPods int32,
  54. disruptedPodMap map[string]metav1.Time) {
  55. actualPDB := ps.Get(key)
  56. expectedStatus := policy.PodDisruptionBudgetStatus{
  57. PodDisruptionsAllowed: disruptionsAllowed,
  58. CurrentHealthy: currentHealthy,
  59. DesiredHealthy: desiredHealthy,
  60. ExpectedPods: expectedPods,
  61. DisruptedPods: disruptedPodMap,
  62. ObservedGeneration: actualPDB.Generation,
  63. }
  64. actualStatus := actualPDB.Status
  65. if !apiequality.Semantic.DeepEqual(actualStatus, expectedStatus) {
  66. debug.PrintStack()
  67. t.Fatalf("PDB %q status mismatch. Expected %+v but got %+v.", key, expectedStatus, actualStatus)
  68. }
  69. }
  70. func (ps *pdbStates) VerifyDisruptionAllowed(t *testing.T, key string, disruptionsAllowed int32) {
  71. pdb := ps.Get(key)
  72. if pdb.Status.PodDisruptionsAllowed != disruptionsAllowed {
  73. debug.PrintStack()
  74. t.Fatalf("PodDisruptionAllowed mismatch for PDB %q. Expected %v but got %v.", key, disruptionsAllowed, pdb.Status.PodDisruptionsAllowed)
  75. }
  76. }
  77. type disruptionController struct {
  78. *DisruptionController
  79. podStore cache.Store
  80. pdbStore cache.Store
  81. rcStore cache.Store
  82. rsStore cache.Store
  83. dStore cache.Store
  84. ssStore cache.Store
  85. scaleClient *scalefake.FakeScaleClient
  86. }
  87. var customGVK = schema.GroupVersionKind{
  88. Group: "custom.k8s.io",
  89. Version: "v1",
  90. Kind: "customresource",
  91. }
  92. func newFakeDisruptionController() (*disruptionController, *pdbStates) {
  93. ps := &pdbStates{}
  94. informerFactory := informers.NewSharedInformerFactory(nil, controller.NoResyncPeriodFunc())
  95. scheme := runtime.NewScheme()
  96. scheme.AddKnownTypeWithName(customGVK, &v1.Service{})
  97. fakeScaleClient := &scalefake.FakeScaleClient{}
  98. dc := NewDisruptionController(
  99. informerFactory.Core().V1().Pods(),
  100. informerFactory.Policy().V1beta1().PodDisruptionBudgets(),
  101. informerFactory.Core().V1().ReplicationControllers(),
  102. informerFactory.Apps().V1().ReplicaSets(),
  103. informerFactory.Apps().V1().Deployments(),
  104. informerFactory.Apps().V1().StatefulSets(),
  105. nil,
  106. testrestmapper.TestOnlyStaticRESTMapper(scheme),
  107. fakeScaleClient,
  108. )
  109. dc.getUpdater = func() updater { return ps.Set }
  110. dc.podListerSynced = alwaysReady
  111. dc.pdbListerSynced = alwaysReady
  112. dc.rcListerSynced = alwaysReady
  113. dc.rsListerSynced = alwaysReady
  114. dc.dListerSynced = alwaysReady
  115. dc.ssListerSynced = alwaysReady
  116. return &disruptionController{
  117. dc,
  118. informerFactory.Core().V1().Pods().Informer().GetStore(),
  119. informerFactory.Policy().V1beta1().PodDisruptionBudgets().Informer().GetStore(),
  120. informerFactory.Core().V1().ReplicationControllers().Informer().GetStore(),
  121. informerFactory.Apps().V1().ReplicaSets().Informer().GetStore(),
  122. informerFactory.Apps().V1().Deployments().Informer().GetStore(),
  123. informerFactory.Apps().V1().StatefulSets().Informer().GetStore(),
  124. fakeScaleClient,
  125. }, ps
  126. }
  127. func fooBar() map[string]string {
  128. return map[string]string{"foo": "bar"}
  129. }
  130. func newSel(labels map[string]string) *metav1.LabelSelector {
  131. return &metav1.LabelSelector{MatchLabels: labels}
  132. }
  133. func newSelFooBar() *metav1.LabelSelector {
  134. return newSel(map[string]string{"foo": "bar"})
  135. }
  136. func newMinAvailablePodDisruptionBudget(t *testing.T, minAvailable intstr.IntOrString) (*policy.PodDisruptionBudget, string) {
  137. pdb := &policy.PodDisruptionBudget{
  138. TypeMeta: metav1.TypeMeta{APIVersion: "v1"},
  139. ObjectMeta: metav1.ObjectMeta{
  140. UID: uuid.NewUUID(),
  141. Name: "foobar",
  142. Namespace: metav1.NamespaceDefault,
  143. ResourceVersion: "18",
  144. },
  145. Spec: policy.PodDisruptionBudgetSpec{
  146. MinAvailable: &minAvailable,
  147. Selector: newSelFooBar(),
  148. },
  149. }
  150. pdbName, err := controller.KeyFunc(pdb)
  151. if err != nil {
  152. t.Fatalf("Unexpected error naming pdb %q: %v", pdb.Name, err)
  153. }
  154. return pdb, pdbName
  155. }
  156. func newMaxUnavailablePodDisruptionBudget(t *testing.T, maxUnavailable intstr.IntOrString) (*policy.PodDisruptionBudget, string) {
  157. pdb := &policy.PodDisruptionBudget{
  158. TypeMeta: metav1.TypeMeta{APIVersion: "v1"},
  159. ObjectMeta: metav1.ObjectMeta{
  160. UID: uuid.NewUUID(),
  161. Name: "foobar",
  162. Namespace: metav1.NamespaceDefault,
  163. ResourceVersion: "18",
  164. },
  165. Spec: policy.PodDisruptionBudgetSpec{
  166. MaxUnavailable: &maxUnavailable,
  167. Selector: newSelFooBar(),
  168. },
  169. }
  170. pdbName, err := controller.KeyFunc(pdb)
  171. if err != nil {
  172. t.Fatalf("Unexpected error naming pdb %q: %v", pdb.Name, err)
  173. }
  174. return pdb, pdbName
  175. }
  176. func updatePodOwnerToRc(t *testing.T, pod *v1.Pod, rc *v1.ReplicationController) {
  177. var controllerReference metav1.OwnerReference
  178. var trueVar = true
  179. controllerReference = metav1.OwnerReference{UID: rc.UID, APIVersion: controllerKindRC.GroupVersion().String(), Kind: controllerKindRC.Kind, Name: rc.Name, Controller: &trueVar}
  180. pod.OwnerReferences = append(pod.OwnerReferences, controllerReference)
  181. }
  182. func updatePodOwnerToRs(t *testing.T, pod *v1.Pod, rs *apps.ReplicaSet) {
  183. var controllerReference metav1.OwnerReference
  184. var trueVar = true
  185. controllerReference = metav1.OwnerReference{UID: rs.UID, APIVersion: controllerKindRS.GroupVersion().String(), Kind: controllerKindRS.Kind, Name: rs.Name, Controller: &trueVar}
  186. pod.OwnerReferences = append(pod.OwnerReferences, controllerReference)
  187. }
  188. // pod, podName := newPod(t, name)
  189. func updatePodOwnerToSs(t *testing.T, pod *v1.Pod, ss *apps.StatefulSet) {
  190. var controllerReference metav1.OwnerReference
  191. var trueVar = true
  192. controllerReference = metav1.OwnerReference{UID: ss.UID, APIVersion: controllerKindSS.GroupVersion().String(), Kind: controllerKindSS.Kind, Name: ss.Name, Controller: &trueVar}
  193. pod.OwnerReferences = append(pod.OwnerReferences, controllerReference)
  194. }
  195. func newPod(t *testing.T, name string) (*v1.Pod, string) {
  196. pod := &v1.Pod{
  197. TypeMeta: metav1.TypeMeta{APIVersion: "v1"},
  198. ObjectMeta: metav1.ObjectMeta{
  199. UID: uuid.NewUUID(),
  200. Annotations: make(map[string]string),
  201. Name: name,
  202. Namespace: metav1.NamespaceDefault,
  203. ResourceVersion: "18",
  204. Labels: fooBar(),
  205. },
  206. Spec: v1.PodSpec{},
  207. Status: v1.PodStatus{
  208. Conditions: []v1.PodCondition{
  209. {Type: v1.PodReady, Status: v1.ConditionTrue},
  210. },
  211. },
  212. }
  213. podName, err := controller.KeyFunc(pod)
  214. if err != nil {
  215. t.Fatalf("Unexpected error naming pod %q: %v", pod.Name, err)
  216. }
  217. return pod, podName
  218. }
  219. func newReplicationController(t *testing.T, size int32) (*v1.ReplicationController, string) {
  220. rc := &v1.ReplicationController{
  221. TypeMeta: metav1.TypeMeta{APIVersion: "v1"},
  222. ObjectMeta: metav1.ObjectMeta{
  223. UID: uuid.NewUUID(),
  224. Name: "foobar",
  225. Namespace: metav1.NamespaceDefault,
  226. ResourceVersion: "18",
  227. Labels: fooBar(),
  228. },
  229. Spec: v1.ReplicationControllerSpec{
  230. Replicas: &size,
  231. Selector: fooBar(),
  232. },
  233. }
  234. rcName, err := controller.KeyFunc(rc)
  235. if err != nil {
  236. t.Fatalf("Unexpected error naming RC %q", rc.Name)
  237. }
  238. return rc, rcName
  239. }
  240. func newDeployment(t *testing.T, size int32) (*apps.Deployment, string) {
  241. d := &apps.Deployment{
  242. TypeMeta: metav1.TypeMeta{APIVersion: "v1"},
  243. ObjectMeta: metav1.ObjectMeta{
  244. UID: uuid.NewUUID(),
  245. Name: "foobar",
  246. Namespace: metav1.NamespaceDefault,
  247. ResourceVersion: "18",
  248. Labels: fooBar(),
  249. },
  250. Spec: apps.DeploymentSpec{
  251. Replicas: &size,
  252. Selector: newSelFooBar(),
  253. },
  254. }
  255. dName, err := controller.KeyFunc(d)
  256. if err != nil {
  257. t.Fatalf("Unexpected error naming Deployment %q: %v", d.Name, err)
  258. }
  259. return d, dName
  260. }
  261. func newReplicaSet(t *testing.T, size int32) (*apps.ReplicaSet, string) {
  262. rs := &apps.ReplicaSet{
  263. TypeMeta: metav1.TypeMeta{APIVersion: "v1"},
  264. ObjectMeta: metav1.ObjectMeta{
  265. UID: uuid.NewUUID(),
  266. Name: "foobar",
  267. Namespace: metav1.NamespaceDefault,
  268. ResourceVersion: "18",
  269. Labels: fooBar(),
  270. },
  271. Spec: apps.ReplicaSetSpec{
  272. Replicas: &size,
  273. Selector: newSelFooBar(),
  274. },
  275. }
  276. rsName, err := controller.KeyFunc(rs)
  277. if err != nil {
  278. t.Fatalf("Unexpected error naming ReplicaSet %q: %v", rs.Name, err)
  279. }
  280. return rs, rsName
  281. }
  282. func newStatefulSet(t *testing.T, size int32) (*apps.StatefulSet, string) {
  283. ss := &apps.StatefulSet{
  284. TypeMeta: metav1.TypeMeta{APIVersion: "v1"},
  285. ObjectMeta: metav1.ObjectMeta{
  286. UID: uuid.NewUUID(),
  287. Name: "foobar",
  288. Namespace: metav1.NamespaceDefault,
  289. ResourceVersion: "18",
  290. Labels: fooBar(),
  291. },
  292. Spec: apps.StatefulSetSpec{
  293. Replicas: &size,
  294. Selector: newSelFooBar(),
  295. },
  296. }
  297. ssName, err := controller.KeyFunc(ss)
  298. if err != nil {
  299. t.Fatalf("Unexpected error naming StatefulSet %q: %v", ss.Name, err)
  300. }
  301. return ss, ssName
  302. }
  303. func update(t *testing.T, store cache.Store, obj interface{}) {
  304. if err := store.Update(obj); err != nil {
  305. t.Fatalf("Could not add %+v to %+v: %v", obj, store, err)
  306. }
  307. }
  308. func add(t *testing.T, store cache.Store, obj interface{}) {
  309. if err := store.Add(obj); err != nil {
  310. t.Fatalf("Could not add %+v to %+v: %v", obj, store, err)
  311. }
  312. }
  313. // Create one with no selector. Verify it matches 0 pods.
  314. func TestNoSelector(t *testing.T) {
  315. dc, ps := newFakeDisruptionController()
  316. pdb, pdbName := newMinAvailablePodDisruptionBudget(t, intstr.FromInt(3))
  317. pdb.Spec.Selector = &metav1.LabelSelector{}
  318. pod, _ := newPod(t, "yo-yo-yo")
  319. add(t, dc.pdbStore, pdb)
  320. dc.sync(pdbName)
  321. ps.VerifyPdbStatus(t, pdbName, 0, 0, 3, 0, map[string]metav1.Time{})
  322. add(t, dc.podStore, pod)
  323. dc.sync(pdbName)
  324. ps.VerifyPdbStatus(t, pdbName, 0, 0, 3, 0, map[string]metav1.Time{})
  325. }
  326. // Verify that available/expected counts go up as we add pods, then verify that
  327. // available count goes down when we make a pod unavailable.
  328. func TestUnavailable(t *testing.T) {
  329. dc, ps := newFakeDisruptionController()
  330. pdb, pdbName := newMinAvailablePodDisruptionBudget(t, intstr.FromInt(3))
  331. add(t, dc.pdbStore, pdb)
  332. dc.sync(pdbName)
  333. // Add three pods, verifying that the counts go up at each step.
  334. pods := []*v1.Pod{}
  335. for i := int32(0); i < 4; i++ {
  336. ps.VerifyPdbStatus(t, pdbName, 0, i, 3, i, map[string]metav1.Time{})
  337. pod, _ := newPod(t, fmt.Sprintf("yo-yo-yo %d", i))
  338. pods = append(pods, pod)
  339. add(t, dc.podStore, pod)
  340. dc.sync(pdbName)
  341. }
  342. ps.VerifyPdbStatus(t, pdbName, 1, 4, 3, 4, map[string]metav1.Time{})
  343. // Now set one pod as unavailable
  344. pods[0].Status.Conditions = []v1.PodCondition{}
  345. update(t, dc.podStore, pods[0])
  346. dc.sync(pdbName)
  347. // Verify expected update
  348. ps.VerifyPdbStatus(t, pdbName, 0, 3, 3, 4, map[string]metav1.Time{})
  349. }
  350. // Verify that an integer MaxUnavailable won't
  351. // allow a disruption for pods with no controller.
  352. func TestIntegerMaxUnavailable(t *testing.T) {
  353. dc, ps := newFakeDisruptionController()
  354. pdb, pdbName := newMaxUnavailablePodDisruptionBudget(t, intstr.FromInt(1))
  355. add(t, dc.pdbStore, pdb)
  356. dc.sync(pdbName)
  357. // This verifies that when a PDB has 0 pods, disruptions are not allowed.
  358. ps.VerifyDisruptionAllowed(t, pdbName, 0)
  359. pod, _ := newPod(t, "naked")
  360. add(t, dc.podStore, pod)
  361. dc.sync(pdbName)
  362. ps.VerifyDisruptionAllowed(t, pdbName, 0)
  363. }
  364. // Verify that an integer MaxUnavailable will recompute allowed disruptions when the scale of
  365. // the selected pod's controller is modified.
  366. func TestIntegerMaxUnavailableWithScaling(t *testing.T) {
  367. dc, ps := newFakeDisruptionController()
  368. pdb, pdbName := newMaxUnavailablePodDisruptionBudget(t, intstr.FromInt(2))
  369. add(t, dc.pdbStore, pdb)
  370. rs, _ := newReplicaSet(t, 7)
  371. add(t, dc.rsStore, rs)
  372. pod, _ := newPod(t, "pod")
  373. updatePodOwnerToRs(t, pod, rs)
  374. add(t, dc.podStore, pod)
  375. dc.sync(pdbName)
  376. ps.VerifyPdbStatus(t, pdbName, 0, 1, 5, 7, map[string]metav1.Time{})
  377. // Update scale of ReplicaSet and check PDB
  378. rs.Spec.Replicas = to.Int32Ptr(5)
  379. update(t, dc.rsStore, rs)
  380. dc.sync(pdbName)
  381. ps.VerifyPdbStatus(t, pdbName, 0, 1, 3, 5, map[string]metav1.Time{})
  382. }
  383. // Verify that an percentage MaxUnavailable will recompute allowed disruptions when the scale of
  384. // the selected pod's controller is modified.
  385. func TestPercentageMaxUnavailableWithScaling(t *testing.T) {
  386. dc, ps := newFakeDisruptionController()
  387. pdb, pdbName := newMaxUnavailablePodDisruptionBudget(t, intstr.FromString("30%"))
  388. add(t, dc.pdbStore, pdb)
  389. rs, _ := newReplicaSet(t, 7)
  390. add(t, dc.rsStore, rs)
  391. pod, _ := newPod(t, "pod")
  392. updatePodOwnerToRs(t, pod, rs)
  393. add(t, dc.podStore, pod)
  394. dc.sync(pdbName)
  395. ps.VerifyPdbStatus(t, pdbName, 0, 1, 4, 7, map[string]metav1.Time{})
  396. // Update scale of ReplicaSet and check PDB
  397. rs.Spec.Replicas = to.Int32Ptr(3)
  398. update(t, dc.rsStore, rs)
  399. dc.sync(pdbName)
  400. ps.VerifyPdbStatus(t, pdbName, 0, 1, 2, 3, map[string]metav1.Time{})
  401. }
  402. // Create a pod with no controller, and verify that a PDB with a percentage
  403. // specified won't allow a disruption.
  404. func TestNakedPod(t *testing.T) {
  405. dc, ps := newFakeDisruptionController()
  406. pdb, pdbName := newMinAvailablePodDisruptionBudget(t, intstr.FromString("28%"))
  407. add(t, dc.pdbStore, pdb)
  408. dc.sync(pdbName)
  409. // This verifies that when a PDB has 0 pods, disruptions are not allowed.
  410. ps.VerifyDisruptionAllowed(t, pdbName, 0)
  411. pod, _ := newPod(t, "naked")
  412. add(t, dc.podStore, pod)
  413. dc.sync(pdbName)
  414. ps.VerifyDisruptionAllowed(t, pdbName, 0)
  415. }
  416. // Verify that we count the scale of a ReplicaSet even when it has no Deployment.
  417. func TestReplicaSet(t *testing.T) {
  418. dc, ps := newFakeDisruptionController()
  419. pdb, pdbName := newMinAvailablePodDisruptionBudget(t, intstr.FromString("20%"))
  420. add(t, dc.pdbStore, pdb)
  421. rs, _ := newReplicaSet(t, 10)
  422. add(t, dc.rsStore, rs)
  423. pod, _ := newPod(t, "pod")
  424. updatePodOwnerToRs(t, pod, rs)
  425. add(t, dc.podStore, pod)
  426. dc.sync(pdbName)
  427. ps.VerifyPdbStatus(t, pdbName, 0, 1, 2, 10, map[string]metav1.Time{})
  428. }
  429. func TestScaleResource(t *testing.T) {
  430. customResourceUID := uuid.NewUUID()
  431. replicas := int32(10)
  432. pods := int32(4)
  433. maxUnavailable := int32(5)
  434. dc, ps := newFakeDisruptionController()
  435. dc.scaleClient.AddReactor("get", "customresources", func(action core.Action) (handled bool, ret runtime.Object, err error) {
  436. obj := &autoscalingapi.Scale{
  437. ObjectMeta: metav1.ObjectMeta{
  438. Namespace: metav1.NamespaceDefault,
  439. UID: customResourceUID,
  440. },
  441. Spec: autoscalingapi.ScaleSpec{
  442. Replicas: replicas,
  443. },
  444. }
  445. return true, obj, nil
  446. })
  447. pdb, pdbName := newMaxUnavailablePodDisruptionBudget(t, intstr.FromInt(int(maxUnavailable)))
  448. add(t, dc.pdbStore, pdb)
  449. trueVal := true
  450. for i := 0; i < int(pods); i++ {
  451. pod, _ := newPod(t, fmt.Sprintf("pod-%d", i))
  452. pod.SetOwnerReferences([]metav1.OwnerReference{
  453. {
  454. Kind: customGVK.Kind,
  455. APIVersion: customGVK.GroupVersion().String(),
  456. Controller: &trueVal,
  457. UID: customResourceUID,
  458. },
  459. })
  460. add(t, dc.podStore, pod)
  461. }
  462. dc.sync(pdbName)
  463. disruptionsAllowed := int32(0)
  464. if replicas-pods < maxUnavailable {
  465. disruptionsAllowed = maxUnavailable - (replicas - pods)
  466. }
  467. ps.VerifyPdbStatus(t, pdbName, disruptionsAllowed, pods, replicas-maxUnavailable, replicas, map[string]metav1.Time{})
  468. }
  469. // Verify that multiple controllers doesn't allow the PDB to be set true.
  470. func TestMultipleControllers(t *testing.T) {
  471. const podCount = 2
  472. dc, ps := newFakeDisruptionController()
  473. pdb, pdbName := newMinAvailablePodDisruptionBudget(t, intstr.FromString("1%"))
  474. add(t, dc.pdbStore, pdb)
  475. pods := []*v1.Pod{}
  476. for i := 0; i < podCount; i++ {
  477. pod, _ := newPod(t, fmt.Sprintf("pod %d", i))
  478. pods = append(pods, pod)
  479. add(t, dc.podStore, pod)
  480. }
  481. dc.sync(pdbName)
  482. // No controllers yet => no disruption allowed
  483. ps.VerifyDisruptionAllowed(t, pdbName, 0)
  484. rc, _ := newReplicationController(t, 1)
  485. rc.Name = "rc 1"
  486. for i := 0; i < podCount; i++ {
  487. updatePodOwnerToRc(t, pods[i], rc)
  488. }
  489. add(t, dc.rcStore, rc)
  490. dc.sync(pdbName)
  491. // One RC and 200%>1% healthy => disruption allowed
  492. ps.VerifyDisruptionAllowed(t, pdbName, 1)
  493. rc, _ = newReplicationController(t, 1)
  494. rc.Name = "rc 2"
  495. for i := 0; i < podCount; i++ {
  496. updatePodOwnerToRc(t, pods[i], rc)
  497. }
  498. add(t, dc.rcStore, rc)
  499. dc.sync(pdbName)
  500. // 100%>1% healthy BUT two RCs => no disruption allowed
  501. // TODO: Find out if this assert is still needed
  502. //ps.VerifyDisruptionAllowed(t, pdbName, 0)
  503. }
  504. func TestReplicationController(t *testing.T) {
  505. // The budget in this test matches foo=bar, but the RC and its pods match
  506. // {foo=bar, baz=quux}. Later, when we add a rogue pod with only a foo=bar
  507. // label, it will match the budget but have no controllers, which should
  508. // trigger the controller to set PodDisruptionAllowed to false.
  509. labels := map[string]string{
  510. "foo": "bar",
  511. "baz": "quux",
  512. }
  513. dc, ps := newFakeDisruptionController()
  514. // 34% should round up to 2
  515. pdb, pdbName := newMinAvailablePodDisruptionBudget(t, intstr.FromString("34%"))
  516. add(t, dc.pdbStore, pdb)
  517. rc, _ := newReplicationController(t, 3)
  518. rc.Spec.Selector = labels
  519. add(t, dc.rcStore, rc)
  520. dc.sync(pdbName)
  521. // It starts out at 0 expected because, with no pods, the PDB doesn't know
  522. // about the RC. This is a known bug. TODO(mml): file issue
  523. ps.VerifyPdbStatus(t, pdbName, 0, 0, 0, 0, map[string]metav1.Time{})
  524. pods := []*v1.Pod{}
  525. for i := int32(0); i < 3; i++ {
  526. pod, _ := newPod(t, fmt.Sprintf("foobar %d", i))
  527. updatePodOwnerToRc(t, pod, rc)
  528. pods = append(pods, pod)
  529. pod.Labels = labels
  530. add(t, dc.podStore, pod)
  531. dc.sync(pdbName)
  532. if i < 2 {
  533. ps.VerifyPdbStatus(t, pdbName, 0, i+1, 2, 3, map[string]metav1.Time{})
  534. } else {
  535. ps.VerifyPdbStatus(t, pdbName, 1, 3, 2, 3, map[string]metav1.Time{})
  536. }
  537. }
  538. rogue, _ := newPod(t, "rogue")
  539. add(t, dc.podStore, rogue)
  540. dc.sync(pdbName)
  541. ps.VerifyDisruptionAllowed(t, pdbName, 0)
  542. }
  543. func TestStatefulSetController(t *testing.T) {
  544. labels := map[string]string{
  545. "foo": "bar",
  546. "baz": "quux",
  547. }
  548. dc, ps := newFakeDisruptionController()
  549. // 34% should round up to 2
  550. pdb, pdbName := newMinAvailablePodDisruptionBudget(t, intstr.FromString("34%"))
  551. add(t, dc.pdbStore, pdb)
  552. ss, _ := newStatefulSet(t, 3)
  553. add(t, dc.ssStore, ss)
  554. dc.sync(pdbName)
  555. // It starts out at 0 expected because, with no pods, the PDB doesn't know
  556. // about the SS. This is a known bug. TODO(mml): file issue
  557. ps.VerifyPdbStatus(t, pdbName, 0, 0, 0, 0, map[string]metav1.Time{})
  558. pods := []*v1.Pod{}
  559. for i := int32(0); i < 3; i++ {
  560. pod, _ := newPod(t, fmt.Sprintf("foobar %d", i))
  561. updatePodOwnerToSs(t, pod, ss)
  562. pods = append(pods, pod)
  563. pod.Labels = labels
  564. add(t, dc.podStore, pod)
  565. dc.sync(pdbName)
  566. if i < 2 {
  567. ps.VerifyPdbStatus(t, pdbName, 0, i+1, 2, 3, map[string]metav1.Time{})
  568. } else {
  569. ps.VerifyPdbStatus(t, pdbName, 1, 3, 2, 3, map[string]metav1.Time{})
  570. }
  571. }
  572. }
  573. func TestTwoControllers(t *testing.T) {
  574. // Most of this test is in verifying intermediate cases as we define the
  575. // three controllers and create the pods.
  576. rcLabels := map[string]string{
  577. "foo": "bar",
  578. "baz": "quux",
  579. }
  580. dLabels := map[string]string{
  581. "foo": "bar",
  582. "baz": "quuux",
  583. }
  584. dc, ps := newFakeDisruptionController()
  585. // These constants are related, but I avoid calculating the correct values in
  586. // code. If you update a parameter here, recalculate the correct values for
  587. // all of them. Further down in the test, we use these to control loops, and
  588. // that level of logic is enough complexity for me.
  589. const collectionSize int32 = 11 // How big each collection is
  590. const minimumOne int32 = 4 // integer minimum with one controller
  591. const minimumTwo int32 = 7 // integer minimum with two controllers
  592. pdb, pdbName := newMinAvailablePodDisruptionBudget(t, intstr.FromString("28%"))
  593. add(t, dc.pdbStore, pdb)
  594. rc, _ := newReplicationController(t, collectionSize)
  595. rc.Spec.Selector = rcLabels
  596. add(t, dc.rcStore, rc)
  597. dc.sync(pdbName)
  598. ps.VerifyPdbStatus(t, pdbName, 0, 0, 0, 0, map[string]metav1.Time{})
  599. pods := []*v1.Pod{}
  600. unavailablePods := collectionSize - minimumOne - 1
  601. for i := int32(1); i <= collectionSize; i++ {
  602. pod, _ := newPod(t, fmt.Sprintf("quux %d", i))
  603. updatePodOwnerToRc(t, pod, rc)
  604. pods = append(pods, pod)
  605. pod.Labels = rcLabels
  606. if i <= unavailablePods {
  607. pod.Status.Conditions = []v1.PodCondition{}
  608. }
  609. add(t, dc.podStore, pod)
  610. dc.sync(pdbName)
  611. if i <= unavailablePods {
  612. ps.VerifyPdbStatus(t, pdbName, 0, 0, minimumOne, collectionSize, map[string]metav1.Time{})
  613. } else if i-unavailablePods <= minimumOne {
  614. ps.VerifyPdbStatus(t, pdbName, 0, i-unavailablePods, minimumOne, collectionSize, map[string]metav1.Time{})
  615. } else {
  616. ps.VerifyPdbStatus(t, pdbName, 1, i-unavailablePods, minimumOne, collectionSize, map[string]metav1.Time{})
  617. }
  618. }
  619. d, _ := newDeployment(t, collectionSize)
  620. d.Spec.Selector = newSel(dLabels)
  621. add(t, dc.dStore, d)
  622. dc.sync(pdbName)
  623. ps.VerifyPdbStatus(t, pdbName, 1, minimumOne+1, minimumOne, collectionSize, map[string]metav1.Time{})
  624. rs, _ := newReplicaSet(t, collectionSize)
  625. rs.Spec.Selector = newSel(dLabels)
  626. rs.Labels = dLabels
  627. add(t, dc.rsStore, rs)
  628. dc.sync(pdbName)
  629. ps.VerifyPdbStatus(t, pdbName, 1, minimumOne+1, minimumOne, collectionSize, map[string]metav1.Time{})
  630. // By the end of this loop, the number of ready pods should be N+2 (hence minimumTwo+2).
  631. unavailablePods = 2*collectionSize - (minimumTwo + 2) - unavailablePods
  632. for i := int32(1); i <= collectionSize; i++ {
  633. pod, _ := newPod(t, fmt.Sprintf("quuux %d", i))
  634. updatePodOwnerToRs(t, pod, rs)
  635. pods = append(pods, pod)
  636. pod.Labels = dLabels
  637. if i <= unavailablePods {
  638. pod.Status.Conditions = []v1.PodCondition{}
  639. }
  640. add(t, dc.podStore, pod)
  641. dc.sync(pdbName)
  642. if i <= unavailablePods {
  643. ps.VerifyPdbStatus(t, pdbName, 0, minimumOne+1, minimumTwo, 2*collectionSize, map[string]metav1.Time{})
  644. } else if i-unavailablePods <= minimumTwo-(minimumOne+1) {
  645. ps.VerifyPdbStatus(t, pdbName, 0, (minimumOne+1)+(i-unavailablePods), minimumTwo, 2*collectionSize, map[string]metav1.Time{})
  646. } else {
  647. ps.VerifyPdbStatus(t, pdbName, i-unavailablePods-(minimumTwo-(minimumOne+1)),
  648. (minimumOne+1)+(i-unavailablePods), minimumTwo, 2*collectionSize, map[string]metav1.Time{})
  649. }
  650. }
  651. // Now we verify we can bring down 1 pod and a disruption is still permitted,
  652. // but if we bring down two, it's not. Then we make the pod ready again and
  653. // verify that a disruption is permitted again.
  654. ps.VerifyPdbStatus(t, pdbName, 2, 2+minimumTwo, minimumTwo, 2*collectionSize, map[string]metav1.Time{})
  655. pods[collectionSize-1].Status.Conditions = []v1.PodCondition{}
  656. update(t, dc.podStore, pods[collectionSize-1])
  657. dc.sync(pdbName)
  658. ps.VerifyPdbStatus(t, pdbName, 1, 1+minimumTwo, minimumTwo, 2*collectionSize, map[string]metav1.Time{})
  659. pods[collectionSize-2].Status.Conditions = []v1.PodCondition{}
  660. update(t, dc.podStore, pods[collectionSize-2])
  661. dc.sync(pdbName)
  662. ps.VerifyPdbStatus(t, pdbName, 0, minimumTwo, minimumTwo, 2*collectionSize, map[string]metav1.Time{})
  663. pods[collectionSize-1].Status.Conditions = []v1.PodCondition{{Type: v1.PodReady, Status: v1.ConditionTrue}}
  664. update(t, dc.podStore, pods[collectionSize-1])
  665. dc.sync(pdbName)
  666. ps.VerifyPdbStatus(t, pdbName, 1, 1+minimumTwo, minimumTwo, 2*collectionSize, map[string]metav1.Time{})
  667. }
  668. // Test pdb doesn't exist
  669. func TestPDBNotExist(t *testing.T) {
  670. dc, _ := newFakeDisruptionController()
  671. pdb, _ := newMinAvailablePodDisruptionBudget(t, intstr.FromString("67%"))
  672. add(t, dc.pdbStore, pdb)
  673. if err := dc.sync("notExist"); err != nil {
  674. t.Errorf("Unexpected error: %v, expect nil", err)
  675. }
  676. }
  677. func TestUpdateDisruptedPods(t *testing.T) {
  678. dc, ps := newFakeDisruptionController()
  679. dc.recheckQueue = workqueue.NewNamedDelayingQueue("pdb_queue")
  680. pdb, pdbName := newMinAvailablePodDisruptionBudget(t, intstr.FromInt(1))
  681. currentTime := time.Now()
  682. pdb.Status.DisruptedPods = map[string]metav1.Time{
  683. "p1": {Time: currentTime}, // Should be removed, pod deletion started.
  684. "p2": {Time: currentTime.Add(-5 * time.Minute)}, // Should be removed, expired.
  685. "p3": {Time: currentTime}, // Should remain, pod untouched.
  686. "notthere": {Time: currentTime}, // Should be removed, pod deleted.
  687. }
  688. add(t, dc.pdbStore, pdb)
  689. pod1, _ := newPod(t, "p1")
  690. pod1.DeletionTimestamp = &metav1.Time{Time: time.Now()}
  691. pod2, _ := newPod(t, "p2")
  692. pod3, _ := newPod(t, "p3")
  693. add(t, dc.podStore, pod1)
  694. add(t, dc.podStore, pod2)
  695. add(t, dc.podStore, pod3)
  696. dc.sync(pdbName)
  697. ps.VerifyPdbStatus(t, pdbName, 0, 1, 1, 3, map[string]metav1.Time{"p3": {Time: currentTime}})
  698. }
  699. func TestBasicFinderFunctions(t *testing.T) {
  700. dc, _ := newFakeDisruptionController()
  701. rs, _ := newReplicaSet(t, 10)
  702. add(t, dc.rsStore, rs)
  703. rc, _ := newReplicationController(t, 12)
  704. add(t, dc.rcStore, rc)
  705. ss, _ := newStatefulSet(t, 14)
  706. add(t, dc.ssStore, ss)
  707. testCases := map[string]struct {
  708. finderFunc podControllerFinder
  709. apiVersion string
  710. kind string
  711. name string
  712. uid types.UID
  713. findsScale bool
  714. expectedScale int32
  715. }{
  716. "replicaset controller with apps group": {
  717. finderFunc: dc.getPodReplicaSet,
  718. apiVersion: "apps/v1",
  719. kind: controllerKindRS.Kind,
  720. name: rs.Name,
  721. uid: rs.UID,
  722. findsScale: true,
  723. expectedScale: 10,
  724. },
  725. "replicaset controller with invalid group": {
  726. finderFunc: dc.getPodReplicaSet,
  727. apiVersion: "invalid/v1",
  728. kind: controllerKindRS.Kind,
  729. name: rs.Name,
  730. uid: rs.UID,
  731. findsScale: false,
  732. },
  733. "replicationcontroller with empty group": {
  734. finderFunc: dc.getPodReplicationController,
  735. apiVersion: "/v1",
  736. kind: controllerKindRC.Kind,
  737. name: rc.Name,
  738. uid: rc.UID,
  739. findsScale: true,
  740. expectedScale: 12,
  741. },
  742. "replicationcontroller with invalid group": {
  743. finderFunc: dc.getPodReplicationController,
  744. apiVersion: "apps/v1",
  745. kind: controllerKindRC.Kind,
  746. name: rc.Name,
  747. uid: rc.UID,
  748. findsScale: false,
  749. },
  750. "statefulset controller with extensions group": {
  751. finderFunc: dc.getPodStatefulSet,
  752. apiVersion: "apps/v1",
  753. kind: controllerKindSS.Kind,
  754. name: ss.Name,
  755. uid: ss.UID,
  756. findsScale: true,
  757. expectedScale: 14,
  758. },
  759. "statefulset controller with invalid kind": {
  760. finderFunc: dc.getPodStatefulSet,
  761. apiVersion: "apps/v1",
  762. kind: controllerKindRS.Kind,
  763. name: ss.Name,
  764. uid: ss.UID,
  765. findsScale: false,
  766. },
  767. }
  768. for tn, tc := range testCases {
  769. t.Run(tn, func(t *testing.T) {
  770. controllerRef := &metav1.OwnerReference{
  771. APIVersion: tc.apiVersion,
  772. Kind: tc.kind,
  773. Name: tc.name,
  774. UID: tc.uid,
  775. }
  776. controllerAndScale, _ := tc.finderFunc(controllerRef, metav1.NamespaceDefault)
  777. if controllerAndScale == nil {
  778. if tc.findsScale {
  779. t.Error("Expected scale, but got nil")
  780. }
  781. return
  782. }
  783. if got, want := controllerAndScale.scale, tc.expectedScale; got != want {
  784. t.Errorf("Expected scale %d, but got %d", want, got)
  785. }
  786. if got, want := controllerAndScale.UID, tc.uid; got != want {
  787. t.Errorf("Expected uid %s, but got %s", want, got)
  788. }
  789. })
  790. }
  791. }
  792. func TestDeploymentFinderFunction(t *testing.T) {
  793. labels := map[string]string{
  794. "foo": "bar",
  795. }
  796. testCases := map[string]struct {
  797. rsApiVersion string
  798. rsKind string
  799. depApiVersion string
  800. depKind string
  801. findsScale bool
  802. expectedScale int32
  803. }{
  804. "happy path": {
  805. rsApiVersion: "apps/v1",
  806. rsKind: controllerKindRS.Kind,
  807. depApiVersion: "extensions/v1",
  808. depKind: controllerKindDep.Kind,
  809. findsScale: true,
  810. expectedScale: 10,
  811. },
  812. "invalid rs apiVersion": {
  813. rsApiVersion: "invalid/v1",
  814. rsKind: controllerKindRS.Kind,
  815. depApiVersion: "apps/v1",
  816. depKind: controllerKindDep.Kind,
  817. findsScale: false,
  818. },
  819. "invalid rs kind": {
  820. rsApiVersion: "apps/v1",
  821. rsKind: "InvalidKind",
  822. depApiVersion: "apps/v1",
  823. depKind: controllerKindDep.Kind,
  824. findsScale: false,
  825. },
  826. "invalid deployment apiVersion": {
  827. rsApiVersion: "extensions/v1",
  828. rsKind: controllerKindRS.Kind,
  829. depApiVersion: "deployment/v1",
  830. depKind: controllerKindDep.Kind,
  831. findsScale: false,
  832. },
  833. "invalid deployment kind": {
  834. rsApiVersion: "apps/v1",
  835. rsKind: controllerKindRS.Kind,
  836. depApiVersion: "extensions/v1",
  837. depKind: "InvalidKind",
  838. findsScale: false,
  839. },
  840. }
  841. for tn, tc := range testCases {
  842. t.Run(tn, func(t *testing.T) {
  843. dc, _ := newFakeDisruptionController()
  844. dep, _ := newDeployment(t, 10)
  845. dep.Spec.Selector = newSel(labels)
  846. add(t, dc.dStore, dep)
  847. rs, _ := newReplicaSet(t, 5)
  848. rs.Labels = labels
  849. trueVal := true
  850. rs.OwnerReferences = append(rs.OwnerReferences, metav1.OwnerReference{
  851. APIVersion: tc.depApiVersion,
  852. Kind: tc.depKind,
  853. Name: dep.Name,
  854. UID: dep.UID,
  855. Controller: &trueVal,
  856. })
  857. add(t, dc.rsStore, rs)
  858. controllerRef := &metav1.OwnerReference{
  859. APIVersion: tc.rsApiVersion,
  860. Kind: tc.rsKind,
  861. Name: rs.Name,
  862. UID: rs.UID,
  863. }
  864. controllerAndScale, _ := dc.getPodDeployment(controllerRef, metav1.NamespaceDefault)
  865. if controllerAndScale == nil {
  866. if tc.findsScale {
  867. t.Error("Expected scale, but got nil")
  868. }
  869. return
  870. }
  871. if got, want := controllerAndScale.scale, tc.expectedScale; got != want {
  872. t.Errorf("Expected scale %d, but got %d", want, got)
  873. }
  874. if got, want := controllerAndScale.UID, dep.UID; got != want {
  875. t.Errorf("Expected uid %s, but got %s", want, got)
  876. }
  877. })
  878. }
  879. }