etcd_failure.go 4.8 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145
  1. /*
  2. Copyright 2015 The Kubernetes Authors.
  3. Licensed under the Apache License, Version 2.0 (the "License");
  4. you may not use this file except in compliance with the License.
  5. You may obtain a copy of the License at
  6. http://www.apache.org/licenses/LICENSE-2.0
  7. Unless required by applicable law or agreed to in writing, software
  8. distributed under the License is distributed on an "AS IS" BASIS,
  9. WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
  10. See the License for the specific language governing permissions and
  11. limitations under the License.
  12. */
  13. package apimachinery
  14. import (
  15. "time"
  16. metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
  17. "k8s.io/apimachinery/pkg/labels"
  18. "k8s.io/apimachinery/pkg/util/wait"
  19. podutil "k8s.io/kubernetes/pkg/api/v1/pod"
  20. "k8s.io/kubernetes/test/e2e/apps"
  21. "k8s.io/kubernetes/test/e2e/framework"
  22. e2elog "k8s.io/kubernetes/test/e2e/framework/log"
  23. e2essh "k8s.io/kubernetes/test/e2e/framework/ssh"
  24. testutils "k8s.io/kubernetes/test/utils"
  25. imageutils "k8s.io/kubernetes/test/utils/image"
  26. "github.com/onsi/ginkgo"
  27. )
  28. var _ = SIGDescribe("Etcd failure [Disruptive]", func() {
  29. f := framework.NewDefaultFramework("etcd-failure")
  30. ginkgo.BeforeEach(func() {
  31. // This test requires:
  32. // - SSH
  33. // - master access
  34. // ... so the provider check should be identical to the intersection of
  35. // providers that provide those capabilities.
  36. framework.SkipUnlessProviderIs("gce")
  37. err := framework.RunRC(testutils.RCConfig{
  38. Client: f.ClientSet,
  39. Name: "baz",
  40. Namespace: f.Namespace.Name,
  41. Image: imageutils.GetPauseImageName(),
  42. Replicas: 1,
  43. })
  44. framework.ExpectNoError(err)
  45. })
  46. ginkgo.It("should recover from network partition with master", func() {
  47. etcdFailTest(
  48. f,
  49. "sudo iptables -A INPUT -p tcp --destination-port 2379 -j DROP",
  50. "sudo iptables -D INPUT -p tcp --destination-port 2379 -j DROP",
  51. )
  52. })
  53. ginkgo.It("should recover from SIGKILL", func() {
  54. etcdFailTest(
  55. f,
  56. "pgrep etcd | xargs -I {} sudo kill -9 {}",
  57. "echo 'do nothing. monit should restart etcd.'",
  58. )
  59. })
  60. })
  61. func etcdFailTest(f *framework.Framework, failCommand, fixCommand string) {
  62. doEtcdFailure(failCommand, fixCommand)
  63. checkExistingRCRecovers(f)
  64. apps.TestReplicationControllerServeImageOrFail(f, "basic", framework.ServeHostnameImage)
  65. }
  66. // For this duration, etcd will be failed by executing a failCommand on the master.
  67. // If repeat is true, the failCommand will be called at a rate of once per second for
  68. // the failure duration. If repeat is false, failCommand will only be called once at the
  69. // beginning of the failure duration. After this duration, we execute a fixCommand on the
  70. // master and go on to assert that etcd and kubernetes components recover.
  71. const etcdFailureDuration = 20 * time.Second
  72. func doEtcdFailure(failCommand, fixCommand string) {
  73. ginkgo.By("failing etcd")
  74. masterExec(failCommand)
  75. time.Sleep(etcdFailureDuration)
  76. masterExec(fixCommand)
  77. }
  78. func masterExec(cmd string) {
  79. host := framework.GetMasterHost() + ":22"
  80. result, err := e2essh.SSH(cmd, host, framework.TestContext.Provider)
  81. framework.ExpectNoError(err, "failed to SSH to host %s on provider %s and run command: %q", host, framework.TestContext.Provider, cmd)
  82. if result.Code != 0 {
  83. e2essh.LogResult(result)
  84. framework.Failf("master exec command returned non-zero")
  85. }
  86. }
  87. func checkExistingRCRecovers(f *framework.Framework) {
  88. ginkgo.By("assert that the pre-existing replication controller recovers")
  89. podClient := f.ClientSet.CoreV1().Pods(f.Namespace.Name)
  90. rcSelector := labels.Set{"name": "baz"}.AsSelector()
  91. ginkgo.By("deleting pods from existing replication controller")
  92. framework.ExpectNoError(wait.Poll(time.Millisecond*500, time.Second*60, func() (bool, error) {
  93. options := metav1.ListOptions{LabelSelector: rcSelector.String()}
  94. pods, err := podClient.List(options)
  95. if err != nil {
  96. e2elog.Logf("apiserver returned error, as expected before recovery: %v", err)
  97. return false, nil
  98. }
  99. if len(pods.Items) == 0 {
  100. return false, nil
  101. }
  102. for _, pod := range pods.Items {
  103. err = podClient.Delete(pod.Name, metav1.NewDeleteOptions(0))
  104. framework.ExpectNoError(err, "failed to delete pod %s in namespace: %s", pod.Name, f.Namespace.Name)
  105. }
  106. e2elog.Logf("apiserver has recovered")
  107. return true, nil
  108. }))
  109. ginkgo.By("waiting for replication controller to recover")
  110. framework.ExpectNoError(wait.Poll(time.Millisecond*500, time.Second*60, func() (bool, error) {
  111. options := metav1.ListOptions{LabelSelector: rcSelector.String()}
  112. pods, err := podClient.List(options)
  113. framework.ExpectNoError(err, "failed to list pods in namespace: %s, that match label selector: %s", f.Namespace.Name, rcSelector.String())
  114. for _, pod := range pods.Items {
  115. if pod.DeletionTimestamp == nil && podutil.IsPodReady(&pod) {
  116. return true, nil
  117. }
  118. }
  119. return false, nil
  120. }))
  121. }