disruption.go 22 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620
  1. /*
  2. Copyright 2016 The Kubernetes Authors.
  3. Licensed under the Apache License, Version 2.0 (the "License");
  4. you may not use this file except in compliance with the License.
  5. You may obtain a copy of the License at
  6. http://www.apache.org/licenses/LICENSE-2.0
  7. Unless required by applicable law or agreed to in writing, software
  8. distributed under the License is distributed on an "AS IS" BASIS,
  9. WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
  10. See the License for the specific language governing permissions and
  11. limitations under the License.
  12. */
  13. package apps
  14. import (
  15. "context"
  16. "fmt"
  17. "time"
  18. jsonpatch "github.com/evanphx/json-patch"
  19. "github.com/onsi/ginkgo"
  20. "github.com/onsi/gomega"
  21. appsv1 "k8s.io/api/apps/v1"
  22. v1 "k8s.io/api/core/v1"
  23. policyv1beta1 "k8s.io/api/policy/v1beta1"
  24. apierrors "k8s.io/apimachinery/pkg/api/errors"
  25. metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
  26. "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured"
  27. "k8s.io/apimachinery/pkg/labels"
  28. "k8s.io/apimachinery/pkg/runtime"
  29. "k8s.io/apimachinery/pkg/types"
  30. "k8s.io/apimachinery/pkg/util/intstr"
  31. "k8s.io/apimachinery/pkg/util/json"
  32. "k8s.io/apimachinery/pkg/util/wait"
  33. "k8s.io/client-go/dynamic"
  34. "k8s.io/client-go/kubernetes"
  35. clientscheme "k8s.io/client-go/kubernetes/scheme"
  36. "k8s.io/client-go/util/retry"
  37. podutil "k8s.io/kubernetes/pkg/api/v1/pod"
  38. "k8s.io/kubernetes/test/e2e/framework"
  39. e2eskipper "k8s.io/kubernetes/test/e2e/framework/skipper"
  40. imageutils "k8s.io/kubernetes/test/utils/image"
  41. )
  42. // schedulingTimeout is longer specifically because sometimes we need to wait
  43. // awhile to guarantee that we've been patient waiting for something ordinary
  44. // to happen: a pod to get scheduled and move into Ready
  45. const (
  46. bigClusterSize = 7
  47. schedulingTimeout = 10 * time.Minute
  48. timeout = 60 * time.Second
  49. defaultName = "foo"
  50. )
  51. var defaultLabels = map[string]string{"foo": "bar"}
  52. var _ = SIGDescribe("DisruptionController", func() {
  53. f := framework.NewDefaultFramework("disruption")
  54. var ns string
  55. var cs kubernetes.Interface
  56. var dc dynamic.Interface
  57. ginkgo.BeforeEach(func() {
  58. cs = f.ClientSet
  59. ns = f.Namespace.Name
  60. dc = f.DynamicClient
  61. })
  62. ginkgo.Context("Listing PodDisruptionBudgets for all namespaces", func() {
  63. anotherFramework := framework.NewDefaultFramework("disruption-2")
  64. ginkgo.It("should list and delete a collection of PodDisruptionBudgets", func() {
  65. specialLabels := map[string]string{"foo_pdb": "bar_pdb"}
  66. labelSelector := labels.SelectorFromSet(specialLabels).String()
  67. createPDBMinAvailableOrDie(cs, ns, defaultName, intstr.FromInt(2), specialLabels)
  68. createPDBMinAvailableOrDie(cs, ns, "foo2", intstr.FromString("1%"), specialLabels)
  69. createPDBMinAvailableOrDie(anotherFramework.ClientSet, anotherFramework.Namespace.Name, "foo3", intstr.FromInt(2), specialLabels)
  70. ginkgo.By("listing a collection of PDBs across all namespaces")
  71. listPDBs(cs, metav1.NamespaceAll, labelSelector, 3, []string{defaultName, "foo2", "foo3"})
  72. ginkgo.By("listing a collection of PDBs in namespace " + ns)
  73. listPDBs(cs, ns, labelSelector, 2, []string{defaultName, "foo2"})
  74. deletePDBCollection(cs, ns)
  75. })
  76. })
  77. ginkgo.It("should create a PodDisruptionBudget", func() {
  78. createPDBMinAvailableOrDie(cs, ns, defaultName, intstr.FromString("1%"), defaultLabels)
  79. })
  80. ginkgo.It("should observe PodDisruptionBudget status updated", func() {
  81. createPDBMinAvailableOrDie(cs, ns, defaultName, intstr.FromInt(1), defaultLabels)
  82. createPodsOrDie(cs, ns, 3)
  83. waitForPodsOrDie(cs, ns, 3)
  84. // Since disruptionAllowed starts out 0, if we see it ever become positive,
  85. // that means the controller is working.
  86. err := wait.PollImmediate(framework.Poll, timeout, func() (bool, error) {
  87. pdb, err := cs.PolicyV1beta1().PodDisruptionBudgets(ns).Get(context.TODO(), defaultName, metav1.GetOptions{})
  88. if err != nil {
  89. return false, err
  90. }
  91. return pdb.Status.DisruptionsAllowed > 0, nil
  92. })
  93. framework.ExpectNoError(err)
  94. })
  95. ginkgo.It("should update/patch PodDisruptionBudget status", func() {
  96. createPDBMinAvailableOrDie(cs, ns, defaultName, intstr.FromInt(1), defaultLabels)
  97. ginkgo.By("Updating PodDisruptionBudget status")
  98. // PDB status can be updated by both PDB controller and the status API. The test selects `DisruptedPods` field to show immediate update via API.
  99. // The pod has to exist, otherwise wil be removed by the controller. Other fields may not reflect the change from API.
  100. createPodsOrDie(cs, ns, 1)
  101. waitForPodsOrDie(cs, ns, 1)
  102. pod, _ := locateRunningPod(cs, ns)
  103. updatePDBOrDie(cs, ns, defaultName, func(old *policyv1beta1.PodDisruptionBudget) *policyv1beta1.PodDisruptionBudget {
  104. old.Status.DisruptedPods = make(map[string]metav1.Time)
  105. old.Status.DisruptedPods[pod.Name] = metav1.NewTime(time.Now())
  106. return old
  107. }, cs.PolicyV1beta1().PodDisruptionBudgets(ns).UpdateStatus)
  108. // fetch again to make sure the update from API was effective
  109. updated := getPDBStatusOrDie(dc, ns, defaultName)
  110. framework.ExpectHaveKey(updated.Status.DisruptedPods, pod.Name, "Expecting the DisruptedPods have %s", pod.Name)
  111. ginkgo.By("Patching PodDisruptionBudget status")
  112. patched, _ := patchPDBOrDie(cs, dc, ns, defaultName, func(old *policyv1beta1.PodDisruptionBudget) (bytes []byte, err error) {
  113. oldBytes, _ := json.Marshal(old)
  114. old.Status.DisruptedPods = make(map[string]metav1.Time)
  115. newBytes, _ := json.Marshal(old)
  116. return jsonpatch.CreateMergePatch(oldBytes, newBytes)
  117. }, "status")
  118. framework.ExpectEmpty(patched.Status.DisruptedPods, "Expecting the PodDisruptionBudget's be empty")
  119. })
  120. evictionCases := []struct {
  121. description string
  122. minAvailable intstr.IntOrString
  123. maxUnavailable intstr.IntOrString
  124. podCount int
  125. replicaSetSize int32
  126. shouldDeny bool
  127. exclusive bool
  128. skipForBigClusters bool
  129. }{
  130. {
  131. description: "no PDB",
  132. minAvailable: intstr.FromString(""),
  133. maxUnavailable: intstr.FromString(""),
  134. podCount: 1,
  135. shouldDeny: false,
  136. }, {
  137. description: "too few pods, absolute",
  138. minAvailable: intstr.FromInt(2),
  139. maxUnavailable: intstr.FromString(""),
  140. podCount: 2,
  141. shouldDeny: true,
  142. }, {
  143. description: "enough pods, absolute",
  144. minAvailable: intstr.FromInt(2),
  145. maxUnavailable: intstr.FromString(""),
  146. podCount: 3,
  147. shouldDeny: false,
  148. }, {
  149. description: "enough pods, replicaSet, percentage",
  150. minAvailable: intstr.FromString("90%"),
  151. maxUnavailable: intstr.FromString(""),
  152. replicaSetSize: 10,
  153. exclusive: false,
  154. shouldDeny: false,
  155. }, {
  156. description: "too few pods, replicaSet, percentage",
  157. minAvailable: intstr.FromString("90%"),
  158. maxUnavailable: intstr.FromString(""),
  159. replicaSetSize: 10,
  160. exclusive: true,
  161. shouldDeny: true,
  162. // This tests assumes that there is less than replicaSetSize nodes in the cluster.
  163. skipForBigClusters: true,
  164. },
  165. {
  166. description: "maxUnavailable allow single eviction, percentage",
  167. minAvailable: intstr.FromString(""),
  168. maxUnavailable: intstr.FromString("10%"),
  169. replicaSetSize: 10,
  170. exclusive: false,
  171. shouldDeny: false,
  172. },
  173. {
  174. description: "maxUnavailable deny evictions, integer",
  175. minAvailable: intstr.FromString(""),
  176. maxUnavailable: intstr.FromInt(1),
  177. replicaSetSize: 10,
  178. exclusive: true,
  179. shouldDeny: true,
  180. // This tests assumes that there is less than replicaSetSize nodes in the cluster.
  181. skipForBigClusters: true,
  182. },
  183. }
  184. for i := range evictionCases {
  185. c := evictionCases[i]
  186. expectation := "should allow an eviction"
  187. if c.shouldDeny {
  188. expectation = "should not allow an eviction"
  189. }
  190. // tests with exclusive set to true relies on HostPort to make sure
  191. // only one pod from the replicaset is assigned to each node. This
  192. // requires these tests to be run serially.
  193. var serial string
  194. if c.exclusive {
  195. serial = " [Serial]"
  196. }
  197. ginkgo.It(fmt.Sprintf("evictions: %s => %s%s", c.description, expectation, serial), func() {
  198. if c.skipForBigClusters {
  199. e2eskipper.SkipUnlessNodeCountIsAtMost(bigClusterSize - 1)
  200. }
  201. createPodsOrDie(cs, ns, c.podCount)
  202. if c.replicaSetSize > 0 {
  203. createReplicaSetOrDie(cs, ns, c.replicaSetSize, c.exclusive)
  204. }
  205. if c.minAvailable.String() != "" {
  206. createPDBMinAvailableOrDie(cs, ns, defaultName, c.minAvailable, defaultLabels)
  207. }
  208. if c.maxUnavailable.String() != "" {
  209. createPDBMaxUnavailableOrDie(cs, ns, defaultName, c.maxUnavailable)
  210. }
  211. // Locate a running pod.
  212. pod, err := locateRunningPod(cs, ns)
  213. framework.ExpectNoError(err)
  214. e := &policyv1beta1.Eviction{
  215. ObjectMeta: metav1.ObjectMeta{
  216. Name: pod.Name,
  217. Namespace: ns,
  218. },
  219. }
  220. if c.shouldDeny {
  221. err = cs.CoreV1().Pods(ns).Evict(e)
  222. gomega.Expect(err).Should(gomega.MatchError("Cannot evict pod as it would violate the pod's disruption budget."))
  223. } else {
  224. // Only wait for running pods in the "allow" case
  225. // because one of shouldDeny cases relies on the
  226. // replicaSet not fitting on the cluster.
  227. waitForPodsOrDie(cs, ns, c.podCount+int(c.replicaSetSize))
  228. // Since disruptionAllowed starts out false, if an eviction is ever allowed,
  229. // that means the controller is working.
  230. err = wait.PollImmediate(framework.Poll, timeout, func() (bool, error) {
  231. err = cs.CoreV1().Pods(ns).Evict(e)
  232. if err != nil {
  233. return false, nil
  234. }
  235. return true, nil
  236. })
  237. framework.ExpectNoError(err)
  238. }
  239. })
  240. }
  241. ginkgo.It("should block an eviction until the PDB is updated to allow it", func() {
  242. ginkgo.By("Creating a pdb that targets all three pods in a test replica set")
  243. createPDBMinAvailableOrDie(cs, ns, defaultName, intstr.FromInt(3), defaultLabels)
  244. createReplicaSetOrDie(cs, ns, 3, false)
  245. ginkgo.By("First trying to evict a pod which shouldn't be evictable")
  246. pod, err := locateRunningPod(cs, ns)
  247. framework.ExpectNoError(err)
  248. waitForPodsOrDie(cs, ns, 3) // make sure that they are running and so would be evictable with a different pdb
  249. e := &policyv1beta1.Eviction{
  250. ObjectMeta: metav1.ObjectMeta{
  251. Name: pod.Name,
  252. Namespace: ns,
  253. },
  254. }
  255. err = cs.CoreV1().Pods(ns).Evict(e)
  256. gomega.Expect(err).Should(gomega.MatchError("Cannot evict pod as it would violate the pod's disruption budget."))
  257. ginkgo.By("Updating the pdb to allow a pod to be evicted")
  258. updatePDBOrDie(cs, ns, defaultName, func(pdb *policyv1beta1.PodDisruptionBudget) *policyv1beta1.PodDisruptionBudget {
  259. newMinAvailable := intstr.FromInt(2)
  260. pdb.Spec.MinAvailable = &newMinAvailable
  261. return pdb
  262. }, cs.PolicyV1beta1().PodDisruptionBudgets(ns).Update)
  263. ginkgo.By("Trying to evict the same pod we tried earlier which should now be evictable")
  264. waitForPodsOrDie(cs, ns, 3)
  265. waitForPdbToObserveHealthyPods(cs, ns, 3)
  266. err = cs.CoreV1().Pods(ns).Evict(e)
  267. framework.ExpectNoError(err) // the eviction is now allowed
  268. ginkgo.By("Patching the pdb to disallow a pod to be evicted")
  269. patchPDBOrDie(cs, dc, ns, defaultName, func(old *policyv1beta1.PodDisruptionBudget) (bytes []byte, err error) {
  270. oldData, err := json.Marshal(old)
  271. old.Spec.MinAvailable = nil
  272. maxUnavailable := intstr.FromInt(0)
  273. old.Spec.MaxUnavailable = &maxUnavailable
  274. newData, _ := json.Marshal(old)
  275. return jsonpatch.CreateMergePatch(oldData, newData)
  276. })
  277. pod, err = locateRunningPod(cs, ns) // locate a new running pod
  278. framework.ExpectNoError(err)
  279. waitForPodsOrDie(cs, ns, 3)
  280. e = &policyv1beta1.Eviction{
  281. ObjectMeta: metav1.ObjectMeta{
  282. Name: pod.Name,
  283. Namespace: ns,
  284. },
  285. }
  286. err = cs.CoreV1().Pods(ns).Evict(e)
  287. gomega.Expect(err).Should(gomega.MatchError("Cannot evict pod as it would violate the pod's disruption budget."))
  288. ginkgo.By("Deleting the pdb to allow a pod to be evicted")
  289. deletePDBOrDie(cs, ns, defaultName)
  290. ginkgo.By("Trying to evict the same pod we tried earlier which should now be evictable")
  291. waitForPodsOrDie(cs, ns, 3)
  292. err = cs.CoreV1().Pods(ns).Evict(e)
  293. framework.ExpectNoError(err) // the eviction is now allowed
  294. })
  295. })
  296. func createPDBMinAvailableOrDie(cs kubernetes.Interface, ns string, name string, minAvailable intstr.IntOrString, labels map[string]string) {
  297. pdb := policyv1beta1.PodDisruptionBudget{
  298. ObjectMeta: metav1.ObjectMeta{
  299. Name: name,
  300. Namespace: ns,
  301. Labels: labels,
  302. },
  303. Spec: policyv1beta1.PodDisruptionBudgetSpec{
  304. Selector: &metav1.LabelSelector{MatchLabels: map[string]string{"foo": "bar"}},
  305. MinAvailable: &minAvailable,
  306. },
  307. }
  308. _, err := cs.PolicyV1beta1().PodDisruptionBudgets(ns).Create(context.TODO(), &pdb, metav1.CreateOptions{})
  309. framework.ExpectNoError(err, "Waiting for the pdb to be created with minAvailable %d in namespace %s", minAvailable.IntVal, ns)
  310. waitForPdbToBeProcessed(cs, ns, name)
  311. }
  312. func createPDBMaxUnavailableOrDie(cs kubernetes.Interface, ns string, name string, maxUnavailable intstr.IntOrString) {
  313. pdb := policyv1beta1.PodDisruptionBudget{
  314. ObjectMeta: metav1.ObjectMeta{
  315. Name: name,
  316. Namespace: ns,
  317. },
  318. Spec: policyv1beta1.PodDisruptionBudgetSpec{
  319. Selector: &metav1.LabelSelector{MatchLabels: map[string]string{"foo": "bar"}},
  320. MaxUnavailable: &maxUnavailable,
  321. },
  322. }
  323. _, err := cs.PolicyV1beta1().PodDisruptionBudgets(ns).Create(context.TODO(), &pdb, metav1.CreateOptions{})
  324. framework.ExpectNoError(err, "Waiting for the pdb to be created with maxUnavailable %d in namespace %s", maxUnavailable.IntVal, ns)
  325. waitForPdbToBeProcessed(cs, ns, name)
  326. }
  327. type updateFunc func(pdb *policyv1beta1.PodDisruptionBudget) *policyv1beta1.PodDisruptionBudget
  328. type updateRestAPI func(ctx context.Context, podDisruptionBudget *policyv1beta1.PodDisruptionBudget, opts metav1.UpdateOptions) (*policyv1beta1.PodDisruptionBudget, error)
  329. type patchFunc func(pdb *policyv1beta1.PodDisruptionBudget) ([]byte, error)
  330. func updatePDBOrDie(cs kubernetes.Interface, ns string, name string, f updateFunc, api updateRestAPI) (updated *policyv1beta1.PodDisruptionBudget, err error) {
  331. err = retry.RetryOnConflict(retry.DefaultRetry, func() error {
  332. old, err := cs.PolicyV1beta1().PodDisruptionBudgets(ns).Get(context.TODO(), name, metav1.GetOptions{})
  333. if err != nil {
  334. return err
  335. }
  336. old = f(old)
  337. if updated, err = api(context.TODO(), old, metav1.UpdateOptions{}); err != nil {
  338. return err
  339. }
  340. return nil
  341. })
  342. framework.ExpectNoError(err, "Waiting for the PDB update to be processed in namespace %s", ns)
  343. waitForPdbToBeProcessed(cs, ns, name)
  344. return updated, err
  345. }
  346. func patchPDBOrDie(cs kubernetes.Interface, dc dynamic.Interface, ns string, name string, f patchFunc, subresources ...string) (updated *policyv1beta1.PodDisruptionBudget, err error) {
  347. err = retry.RetryOnConflict(retry.DefaultRetry, func() error {
  348. old := getPDBStatusOrDie(dc, ns, name)
  349. patchBytes, err := f(old)
  350. if updated, err = cs.PolicyV1beta1().PodDisruptionBudgets(ns).Patch(context.TODO(), old.Name, types.MergePatchType, patchBytes, metav1.PatchOptions{}, subresources...); err != nil {
  351. return err
  352. }
  353. return nil
  354. })
  355. framework.ExpectNoError(err, "Waiting for the pdb update to be processed in namespace %s", ns)
  356. waitForPdbToBeProcessed(cs, ns, name)
  357. return updated, err
  358. }
  359. func deletePDBOrDie(cs kubernetes.Interface, ns string, name string) {
  360. err := cs.PolicyV1beta1().PodDisruptionBudgets(ns).Delete(context.TODO(), name, &metav1.DeleteOptions{})
  361. framework.ExpectNoError(err, "Deleting pdb in namespace %s", ns)
  362. waitForPdbToBeDeleted(cs, ns, name)
  363. }
  364. func listPDBs(cs kubernetes.Interface, ns string, labelSelector string, count int, expectedPDBNames []string) {
  365. pdbList, err := cs.PolicyV1beta1().PodDisruptionBudgets(ns).List(context.TODO(), metav1.ListOptions{LabelSelector: labelSelector})
  366. framework.ExpectNoError(err, "Listing PDB set in namespace %s", ns)
  367. framework.ExpectEqual(len(pdbList.Items), count, "Expecting %d PDBs returned in namespace %s", count, ns)
  368. pdbNames := make([]string, 0)
  369. for _, item := range pdbList.Items {
  370. pdbNames = append(pdbNames, item.Name)
  371. }
  372. framework.ExpectConsistOf(pdbNames, expectedPDBNames, "Expecting returned PDBs '%s' in namespace %s", expectedPDBNames, ns)
  373. }
  374. func deletePDBCollection(cs kubernetes.Interface, ns string) {
  375. ginkgo.By("deleting a collection of PDBs")
  376. err := cs.PolicyV1beta1().PodDisruptionBudgets(ns).DeleteCollection(context.TODO(), &metav1.DeleteOptions{}, metav1.ListOptions{})
  377. framework.ExpectNoError(err, "Deleting PDB set in namespace %s", ns)
  378. waitForPDBCollectionToBeDeleted(cs, ns)
  379. }
  380. func waitForPDBCollectionToBeDeleted(cs kubernetes.Interface, ns string) {
  381. ginkgo.By("Waiting for the PDB collection to be deleted")
  382. wait.PollImmediate(framework.Poll, schedulingTimeout, func() (bool, error) {
  383. pdbList, err := cs.PolicyV1beta1().PodDisruptionBudgets(ns).List(context.TODO(), metav1.ListOptions{})
  384. if err != nil {
  385. return false, err
  386. }
  387. framework.ExpectNoError(err, "Listing PDB set in namespace %s", ns)
  388. framework.ExpectEqual(len(pdbList.Items), 0, "Expecting No PDBs returned in namespace %s", ns)
  389. return true, nil
  390. })
  391. }
  392. func createPodsOrDie(cs kubernetes.Interface, ns string, n int) {
  393. for i := 0; i < n; i++ {
  394. pod := &v1.Pod{
  395. ObjectMeta: metav1.ObjectMeta{
  396. Name: fmt.Sprintf("pod-%d", i),
  397. Namespace: ns,
  398. Labels: map[string]string{"foo": "bar"},
  399. },
  400. Spec: v1.PodSpec{
  401. Containers: []v1.Container{
  402. {
  403. Name: "busybox",
  404. Image: imageutils.GetE2EImage(imageutils.EchoServer),
  405. },
  406. },
  407. RestartPolicy: v1.RestartPolicyAlways,
  408. },
  409. }
  410. _, err := cs.CoreV1().Pods(ns).Create(context.TODO(), pod, metav1.CreateOptions{})
  411. framework.ExpectNoError(err, "Creating pod %q in namespace %q", pod.Name, ns)
  412. }
  413. }
  414. func waitForPodsOrDie(cs kubernetes.Interface, ns string, n int) {
  415. ginkgo.By("Waiting for all pods to be running")
  416. err := wait.PollImmediate(framework.Poll, schedulingTimeout, func() (bool, error) {
  417. pods, err := cs.CoreV1().Pods(ns).List(context.TODO(), metav1.ListOptions{LabelSelector: "foo=bar"})
  418. if err != nil {
  419. return false, err
  420. }
  421. if pods == nil {
  422. return false, fmt.Errorf("pods is nil")
  423. }
  424. if len(pods.Items) < n {
  425. framework.Logf("pods: %v < %v", len(pods.Items), n)
  426. return false, nil
  427. }
  428. ready := 0
  429. for i := range pods.Items {
  430. pod := pods.Items[i]
  431. if podutil.IsPodReady(&pod) {
  432. ready++
  433. }
  434. }
  435. if ready < n {
  436. framework.Logf("running pods: %v < %v", ready, n)
  437. return false, nil
  438. }
  439. return true, nil
  440. })
  441. framework.ExpectNoError(err, "Waiting for pods in namespace %q to be ready", ns)
  442. }
  443. func createReplicaSetOrDie(cs kubernetes.Interface, ns string, size int32, exclusive bool) {
  444. container := v1.Container{
  445. Name: "busybox",
  446. Image: imageutils.GetE2EImage(imageutils.EchoServer),
  447. }
  448. if exclusive {
  449. container.Ports = []v1.ContainerPort{
  450. {HostPort: 5555, ContainerPort: 5555},
  451. }
  452. }
  453. rs := &appsv1.ReplicaSet{
  454. ObjectMeta: metav1.ObjectMeta{
  455. Name: "rs",
  456. Namespace: ns,
  457. },
  458. Spec: appsv1.ReplicaSetSpec{
  459. Replicas: &size,
  460. Selector: &metav1.LabelSelector{
  461. MatchLabels: map[string]string{"foo": "bar"},
  462. },
  463. Template: v1.PodTemplateSpec{
  464. ObjectMeta: metav1.ObjectMeta{
  465. Labels: map[string]string{"foo": "bar"},
  466. },
  467. Spec: v1.PodSpec{
  468. Containers: []v1.Container{container},
  469. },
  470. },
  471. },
  472. }
  473. _, err := cs.AppsV1().ReplicaSets(ns).Create(context.TODO(), rs, metav1.CreateOptions{})
  474. framework.ExpectNoError(err, "Creating replica set %q in namespace %q", rs.Name, ns)
  475. }
  476. func locateRunningPod(cs kubernetes.Interface, ns string) (pod *v1.Pod, err error) {
  477. ginkgo.By("locating a running pod")
  478. err = wait.PollImmediate(framework.Poll, schedulingTimeout, func() (bool, error) {
  479. podList, err := cs.CoreV1().Pods(ns).List(context.TODO(), metav1.ListOptions{})
  480. if err != nil {
  481. return false, err
  482. }
  483. for i := range podList.Items {
  484. p := podList.Items[i]
  485. if podutil.IsPodReady(&p) {
  486. pod = &p
  487. return true, nil
  488. }
  489. }
  490. return false, nil
  491. })
  492. return pod, err
  493. }
  494. func waitForPdbToBeProcessed(cs kubernetes.Interface, ns string, name string) {
  495. ginkgo.By("Waiting for the pdb to be processed")
  496. err := wait.PollImmediate(framework.Poll, schedulingTimeout, func() (bool, error) {
  497. pdb, err := cs.PolicyV1beta1().PodDisruptionBudgets(ns).Get(context.TODO(), name, metav1.GetOptions{})
  498. if err != nil {
  499. return false, err
  500. }
  501. if pdb.Status.ObservedGeneration < pdb.Generation {
  502. return false, nil
  503. }
  504. return true, nil
  505. })
  506. framework.ExpectNoError(err, "Waiting for the pdb to be processed in namespace %s", ns)
  507. }
  508. func waitForPdbToBeDeleted(cs kubernetes.Interface, ns string, name string) {
  509. ginkgo.By("Waiting for the pdb to be deleted")
  510. err := wait.PollImmediate(framework.Poll, schedulingTimeout, func() (bool, error) {
  511. _, err := cs.PolicyV1beta1().PodDisruptionBudgets(ns).Get(context.TODO(), name, metav1.GetOptions{})
  512. if apierrors.IsNotFound(err) {
  513. return true, nil // done
  514. }
  515. if err != nil {
  516. return false, err
  517. }
  518. return false, nil
  519. })
  520. framework.ExpectNoError(err, "Waiting for the pdb to be deleted in namespace %s", ns)
  521. }
  522. func waitForPdbToObserveHealthyPods(cs kubernetes.Interface, ns string, healthyCount int32) {
  523. ginkgo.By("Waiting for the pdb to observed all healthy pods")
  524. err := wait.PollImmediate(framework.Poll, wait.ForeverTestTimeout, func() (bool, error) {
  525. pdb, err := cs.PolicyV1beta1().PodDisruptionBudgets(ns).Get(context.TODO(), "foo", metav1.GetOptions{})
  526. if err != nil {
  527. return false, err
  528. }
  529. if pdb.Status.CurrentHealthy != healthyCount {
  530. return false, nil
  531. }
  532. return true, nil
  533. })
  534. framework.ExpectNoError(err, "Waiting for the pdb in namespace %s to observed %d healthy pods", ns, healthyCount)
  535. }
  536. func getPDBStatusOrDie(dc dynamic.Interface, ns string, name string) *policyv1beta1.PodDisruptionBudget {
  537. pdbStatusResource := policyv1beta1.SchemeGroupVersion.WithResource("poddisruptionbudgets")
  538. unstruct, err := dc.Resource(pdbStatusResource).Namespace(ns).Get(name, metav1.GetOptions{}, "status")
  539. pdb, err := unstructuredToPDB(unstruct)
  540. framework.ExpectNoError(err, "Getting the status of the pdb %s in namespace %s", name, ns)
  541. return pdb
  542. }
  543. func unstructuredToPDB(obj *unstructured.Unstructured) (*policyv1beta1.PodDisruptionBudget, error) {
  544. json, err := runtime.Encode(unstructured.UnstructuredJSONScheme, obj)
  545. if err != nil {
  546. return nil, err
  547. }
  548. pdb := &policyv1beta1.PodDisruptionBudget{}
  549. err = runtime.DecodeInto(clientscheme.Codecs.LegacyCodec(v1.SchemeGroupVersion), json, pdb)
  550. pdb.Kind = ""
  551. pdb.APIVersion = ""
  552. return pdb, err
  553. }