etcd_failure.go 4.9 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148
  1. /*
  2. Copyright 2015 The Kubernetes Authors.
  3. Licensed under the Apache License, Version 2.0 (the "License");
  4. you may not use this file except in compliance with the License.
  5. You may obtain a copy of the License at
  6. http://www.apache.org/licenses/LICENSE-2.0
  7. Unless required by applicable law or agreed to in writing, software
  8. distributed under the License is distributed on an "AS IS" BASIS,
  9. WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
  10. See the License for the specific language governing permissions and
  11. limitations under the License.
  12. */
  13. package apimachinery
  14. import (
  15. "context"
  16. "time"
  17. metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
  18. "k8s.io/apimachinery/pkg/labels"
  19. "k8s.io/apimachinery/pkg/util/wait"
  20. podutil "k8s.io/kubernetes/pkg/api/v1/pod"
  21. "k8s.io/kubernetes/test/e2e/apps"
  22. "k8s.io/kubernetes/test/e2e/framework"
  23. e2erc "k8s.io/kubernetes/test/e2e/framework/rc"
  24. e2eskipper "k8s.io/kubernetes/test/e2e/framework/skipper"
  25. e2essh "k8s.io/kubernetes/test/e2e/framework/ssh"
  26. testutils "k8s.io/kubernetes/test/utils"
  27. imageutils "k8s.io/kubernetes/test/utils/image"
  28. "github.com/onsi/ginkgo"
  29. )
  30. var _ = SIGDescribe("Etcd failure [Disruptive]", func() {
  31. f := framework.NewDefaultFramework("etcd-failure")
  32. ginkgo.BeforeEach(func() {
  33. // This test requires:
  34. // - SSH
  35. // - master access
  36. // ... so the provider check should be identical to the intersection of
  37. // providers that provide those capabilities.
  38. e2eskipper.SkipUnlessProviderIs("gce")
  39. e2eskipper.SkipUnlessSSHKeyPresent()
  40. err := e2erc.RunRC(testutils.RCConfig{
  41. Client: f.ClientSet,
  42. Name: "baz",
  43. Namespace: f.Namespace.Name,
  44. Image: imageutils.GetPauseImageName(),
  45. Replicas: 1,
  46. })
  47. framework.ExpectNoError(err)
  48. })
  49. ginkgo.It("should recover from network partition with master", func() {
  50. etcdFailTest(
  51. f,
  52. "sudo iptables -A INPUT -p tcp --destination-port 2379 -j DROP",
  53. "sudo iptables -D INPUT -p tcp --destination-port 2379 -j DROP",
  54. )
  55. })
  56. ginkgo.It("should recover from SIGKILL", func() {
  57. etcdFailTest(
  58. f,
  59. "pgrep etcd | xargs -I {} sudo kill -9 {}",
  60. "echo 'do nothing. monit should restart etcd.'",
  61. )
  62. })
  63. })
  64. func etcdFailTest(f *framework.Framework, failCommand, fixCommand string) {
  65. doEtcdFailure(failCommand, fixCommand)
  66. checkExistingRCRecovers(f)
  67. apps.TestReplicationControllerServeImageOrFail(f, "basic", framework.ServeHostnameImage)
  68. }
  69. // For this duration, etcd will be failed by executing a failCommand on the master.
  70. // If repeat is true, the failCommand will be called at a rate of once per second for
  71. // the failure duration. If repeat is false, failCommand will only be called once at the
  72. // beginning of the failure duration. After this duration, we execute a fixCommand on the
  73. // master and go on to assert that etcd and kubernetes components recover.
  74. const etcdFailureDuration = 20 * time.Second
  75. func doEtcdFailure(failCommand, fixCommand string) {
  76. ginkgo.By("failing etcd")
  77. masterExec(failCommand)
  78. time.Sleep(etcdFailureDuration)
  79. masterExec(fixCommand)
  80. }
  81. func masterExec(cmd string) {
  82. host := framework.GetMasterHost() + ":22"
  83. result, err := e2essh.SSH(cmd, host, framework.TestContext.Provider)
  84. framework.ExpectNoError(err, "failed to SSH to host %s on provider %s and run command: %q", host, framework.TestContext.Provider, cmd)
  85. if result.Code != 0 {
  86. e2essh.LogResult(result)
  87. framework.Failf("master exec command returned non-zero")
  88. }
  89. }
  90. func checkExistingRCRecovers(f *framework.Framework) {
  91. ginkgo.By("assert that the pre-existing replication controller recovers")
  92. podClient := f.ClientSet.CoreV1().Pods(f.Namespace.Name)
  93. rcSelector := labels.Set{"name": "baz"}.AsSelector()
  94. ginkgo.By("deleting pods from existing replication controller")
  95. framework.ExpectNoError(wait.Poll(time.Millisecond*500, time.Second*60, func() (bool, error) {
  96. options := metav1.ListOptions{LabelSelector: rcSelector.String()}
  97. pods, err := podClient.List(context.TODO(), options)
  98. if err != nil {
  99. framework.Logf("apiserver returned error, as expected before recovery: %v", err)
  100. return false, nil
  101. }
  102. if len(pods.Items) == 0 {
  103. return false, nil
  104. }
  105. for _, pod := range pods.Items {
  106. err = podClient.Delete(context.TODO(), pod.Name, metav1.NewDeleteOptions(0))
  107. framework.ExpectNoError(err, "failed to delete pod %s in namespace: %s", pod.Name, f.Namespace.Name)
  108. }
  109. framework.Logf("apiserver has recovered")
  110. return true, nil
  111. }))
  112. ginkgo.By("waiting for replication controller to recover")
  113. framework.ExpectNoError(wait.Poll(time.Millisecond*500, time.Second*60, func() (bool, error) {
  114. options := metav1.ListOptions{LabelSelector: rcSelector.String()}
  115. pods, err := podClient.List(context.TODO(), options)
  116. framework.ExpectNoError(err, "failed to list pods in namespace: %s, that match label selector: %s", f.Namespace.Name, rcSelector.String())
  117. for _, pod := range pods.Items {
  118. if pod.DeletionTimestamp == nil && podutil.IsPodReady(&pod) {
  119. return true, nil
  120. }
  121. }
  122. return false, nil
  123. }))
  124. }