wait.go 10 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283
  1. /*
  2. Copyright 2019 The Kubernetes Authors.
  3. Licensed under the Apache License, Version 2.0 (the "License");
  4. you may not use this file except in compliance with the License.
  5. You may obtain a copy of the License at
  6. http://www.apache.org/licenses/LICENSE-2.0
  7. Unless required by applicable law or agreed to in writing, software
  8. distributed under the License is distributed on an "AS IS" BASIS,
  9. WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
  10. See the License for the specific language governing permissions and
  11. limitations under the License.
  12. */
  13. package node
  14. import (
  15. "context"
  16. "fmt"
  17. "regexp"
  18. "time"
  19. v1 "k8s.io/api/core/v1"
  20. metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
  21. "k8s.io/apimachinery/pkg/fields"
  22. "k8s.io/apimachinery/pkg/util/wait"
  23. clientset "k8s.io/client-go/kubernetes"
  24. e2elog "k8s.io/kubernetes/test/e2e/framework/log"
  25. "k8s.io/kubernetes/test/e2e/system"
  26. testutils "k8s.io/kubernetes/test/utils"
  27. )
  28. const sleepTime = 20 * time.Second
  29. var requiredPerNodePods = []*regexp.Regexp{
  30. regexp.MustCompile(".*kube-proxy.*"),
  31. regexp.MustCompile(".*fluentd-elasticsearch.*"),
  32. regexp.MustCompile(".*node-problem-detector.*"),
  33. }
  34. // WaitForReadyNodes waits up to timeout for cluster to has desired size and
  35. // there is no not-ready nodes in it. By cluster size we mean number of Nodes
  36. // excluding Master Node.
  37. func WaitForReadyNodes(c clientset.Interface, size int, timeout time.Duration) error {
  38. _, err := CheckReady(c, size, timeout)
  39. return err
  40. }
  41. // WaitForTotalHealthy checks whether all registered nodes are ready and all required Pods are running on them.
  42. func WaitForTotalHealthy(c clientset.Interface, timeout time.Duration) error {
  43. e2elog.Logf("Waiting up to %v for all nodes to be ready", timeout)
  44. var notReady []v1.Node
  45. var missingPodsPerNode map[string][]string
  46. err := wait.PollImmediate(poll, timeout, func() (bool, error) {
  47. notReady = nil
  48. // It should be OK to list unschedulable Nodes here.
  49. nodes, err := c.CoreV1().Nodes().List(context.TODO(), metav1.ListOptions{ResourceVersion: "0"})
  50. if err != nil {
  51. if testutils.IsRetryableAPIError(err) {
  52. return false, nil
  53. }
  54. return false, err
  55. }
  56. for _, node := range nodes.Items {
  57. if !IsConditionSetAsExpected(&node, v1.NodeReady, true) {
  58. notReady = append(notReady, node)
  59. }
  60. }
  61. pods, err := c.CoreV1().Pods(metav1.NamespaceAll).List(context.TODO(), metav1.ListOptions{ResourceVersion: "0"})
  62. if err != nil {
  63. return false, err
  64. }
  65. systemPodsPerNode := make(map[string][]string)
  66. for _, pod := range pods.Items {
  67. if pod.Namespace == metav1.NamespaceSystem && pod.Status.Phase == v1.PodRunning {
  68. if pod.Spec.NodeName != "" {
  69. systemPodsPerNode[pod.Spec.NodeName] = append(systemPodsPerNode[pod.Spec.NodeName], pod.Name)
  70. }
  71. }
  72. }
  73. missingPodsPerNode = make(map[string][]string)
  74. for _, node := range nodes.Items {
  75. if !system.DeprecatedMightBeMasterNode(node.Name) {
  76. for _, requiredPod := range requiredPerNodePods {
  77. foundRequired := false
  78. for _, presentPod := range systemPodsPerNode[node.Name] {
  79. if requiredPod.MatchString(presentPod) {
  80. foundRequired = true
  81. break
  82. }
  83. }
  84. if !foundRequired {
  85. missingPodsPerNode[node.Name] = append(missingPodsPerNode[node.Name], requiredPod.String())
  86. }
  87. }
  88. }
  89. }
  90. return len(notReady) == 0 && len(missingPodsPerNode) == 0, nil
  91. })
  92. if err != nil && err != wait.ErrWaitTimeout {
  93. return err
  94. }
  95. if len(notReady) > 0 {
  96. return fmt.Errorf("Not ready nodes: %v", notReady)
  97. }
  98. if len(missingPodsPerNode) > 0 {
  99. return fmt.Errorf("Not running system Pods: %v", missingPodsPerNode)
  100. }
  101. return nil
  102. }
  103. // WaitConditionToBe returns whether node "name's" condition state matches wantTrue
  104. // within timeout. If wantTrue is true, it will ensure the node condition status
  105. // is ConditionTrue; if it's false, it ensures the node condition is in any state
  106. // other than ConditionTrue (e.g. not true or unknown).
  107. func WaitConditionToBe(c clientset.Interface, name string, conditionType v1.NodeConditionType, wantTrue bool, timeout time.Duration) bool {
  108. e2elog.Logf("Waiting up to %v for node %s condition %s to be %t", timeout, name, conditionType, wantTrue)
  109. for start := time.Now(); time.Since(start) < timeout; time.Sleep(poll) {
  110. node, err := c.CoreV1().Nodes().Get(context.TODO(), name, metav1.GetOptions{})
  111. if err != nil {
  112. e2elog.Logf("Couldn't get node %s", name)
  113. continue
  114. }
  115. if IsConditionSetAsExpected(node, conditionType, wantTrue) {
  116. return true
  117. }
  118. }
  119. e2elog.Logf("Node %s didn't reach desired %s condition status (%t) within %v", name, conditionType, wantTrue, timeout)
  120. return false
  121. }
  122. // WaitForNodeToBeNotReady returns whether node name is not ready (i.e. the
  123. // readiness condition is anything but ready, e.g false or unknown) within
  124. // timeout.
  125. func WaitForNodeToBeNotReady(c clientset.Interface, name string, timeout time.Duration) bool {
  126. return WaitConditionToBe(c, name, v1.NodeReady, false, timeout)
  127. }
  128. // WaitForNodeToBeReady returns whether node name is ready within timeout.
  129. func WaitForNodeToBeReady(c clientset.Interface, name string, timeout time.Duration) bool {
  130. return WaitConditionToBe(c, name, v1.NodeReady, true, timeout)
  131. }
  132. // CheckReady waits up to timeout for cluster to has desired size and
  133. // there is no not-ready nodes in it. By cluster size we mean number of Nodes
  134. // excluding Master Node.
  135. func CheckReady(c clientset.Interface, size int, timeout time.Duration) ([]v1.Node, error) {
  136. for start := time.Now(); time.Since(start) < timeout; time.Sleep(sleepTime) {
  137. nodes, err := waitListSchedulableNodes(c)
  138. if err != nil {
  139. e2elog.Logf("Failed to list nodes: %v", err)
  140. continue
  141. }
  142. numNodes := len(nodes.Items)
  143. // Filter out not-ready nodes.
  144. Filter(nodes, func(node v1.Node) bool {
  145. nodeReady := IsConditionSetAsExpected(&node, v1.NodeReady, true)
  146. networkReady := isConditionUnset(&node, v1.NodeNetworkUnavailable) || IsConditionSetAsExpected(&node, v1.NodeNetworkUnavailable, false)
  147. return nodeReady && networkReady
  148. })
  149. numReady := len(nodes.Items)
  150. if numNodes == size && numReady == size {
  151. e2elog.Logf("Cluster has reached the desired number of ready nodes %d", size)
  152. return nodes.Items, nil
  153. }
  154. e2elog.Logf("Waiting for ready nodes %d, current ready %d, not ready nodes %d", size, numReady, numNodes-numReady)
  155. }
  156. return nil, fmt.Errorf("timeout waiting %v for number of ready nodes to be %d", timeout, size)
  157. }
  158. // waitListSchedulableNodes is a wrapper around listing nodes supporting retries.
  159. func waitListSchedulableNodes(c clientset.Interface) (*v1.NodeList, error) {
  160. var nodes *v1.NodeList
  161. var err error
  162. if wait.PollImmediate(poll, singleCallTimeout, func() (bool, error) {
  163. nodes, err = c.CoreV1().Nodes().List(context.TODO(), metav1.ListOptions{FieldSelector: fields.Set{
  164. "spec.unschedulable": "false",
  165. }.AsSelector().String()})
  166. if err != nil {
  167. if testutils.IsRetryableAPIError(err) {
  168. return false, nil
  169. }
  170. return false, err
  171. }
  172. return true, nil
  173. }) != nil {
  174. return nodes, err
  175. }
  176. return nodes, nil
  177. }
  178. // checkWaitListSchedulableNodes is a wrapper around listing nodes supporting retries.
  179. func checkWaitListSchedulableNodes(c clientset.Interface) (*v1.NodeList, error) {
  180. nodes, err := waitListSchedulableNodes(c)
  181. if err != nil {
  182. return nil, fmt.Errorf("error: %s. Non-retryable failure or timed out while listing nodes for e2e cluster", err)
  183. }
  184. return nodes, nil
  185. }
  186. // CheckReadyForTests returns a method usable in polling methods which will check that the nodes are
  187. // in a testable state based on schedulability.
  188. func CheckReadyForTests(c clientset.Interface, nonblockingTaints string, allowedNotReadyNodes, largeClusterThreshold int) func() (bool, error) {
  189. attempt := 0
  190. var notSchedulable []*v1.Node
  191. return func() (bool, error) {
  192. attempt++
  193. notSchedulable = nil
  194. opts := metav1.ListOptions{
  195. ResourceVersion: "0",
  196. FieldSelector: fields.Set{"spec.unschedulable": "false"}.AsSelector().String(),
  197. }
  198. nodes, err := c.CoreV1().Nodes().List(context.TODO(), opts)
  199. if err != nil {
  200. e2elog.Logf("Unexpected error listing nodes: %v", err)
  201. if testutils.IsRetryableAPIError(err) {
  202. return false, nil
  203. }
  204. return false, err
  205. }
  206. for i := range nodes.Items {
  207. node := &nodes.Items[i]
  208. if !readyForTests(node, nonblockingTaints) {
  209. notSchedulable = append(notSchedulable, node)
  210. }
  211. }
  212. // Framework allows for <TestContext.AllowedNotReadyNodes> nodes to be non-ready,
  213. // to make it possible e.g. for incorrect deployment of some small percentage
  214. // of nodes (which we allow in cluster validation). Some nodes that are not
  215. // provisioned correctly at startup will never become ready (e.g. when something
  216. // won't install correctly), so we can't expect them to be ready at any point.
  217. //
  218. // However, we only allow non-ready nodes with some specific reasons.
  219. if len(notSchedulable) > 0 {
  220. // In large clusters, log them only every 10th pass.
  221. if len(nodes.Items) < largeClusterThreshold || attempt%10 == 0 {
  222. e2elog.Logf("Unschedulable nodes:")
  223. for i := range notSchedulable {
  224. e2elog.Logf("-> %s Ready=%t Network=%t Taints=%v NonblockingTaints:%v",
  225. notSchedulable[i].Name,
  226. IsConditionSetAsExpectedSilent(notSchedulable[i], v1.NodeReady, true),
  227. IsConditionSetAsExpectedSilent(notSchedulable[i], v1.NodeNetworkUnavailable, false),
  228. notSchedulable[i].Spec.Taints,
  229. nonblockingTaints,
  230. )
  231. }
  232. e2elog.Logf("================================")
  233. }
  234. }
  235. return len(notSchedulable) <= allowedNotReadyNodes, nil
  236. }
  237. }
  238. // readyForTests determines whether or not we should continue waiting for the nodes
  239. // to enter a testable state. By default this means it is schedulable, NodeReady, and untainted.
  240. // Nodes with taints nonblocking taints are permitted to have that taint and
  241. // also have their node.Spec.Unschedulable field ignored for the purposes of this function.
  242. func readyForTests(node *v1.Node, nonblockingTaints string) bool {
  243. if hasNonblockingTaint(node, nonblockingTaints) {
  244. // If the node has one of the nonblockingTaints taints; just check that it is ready
  245. // and don't require node.Spec.Unschedulable to be set either way.
  246. if !IsNodeReady(node) || !isNodeUntaintedWithNonblocking(node, nonblockingTaints) {
  247. return false
  248. }
  249. } else {
  250. if !IsNodeSchedulable(node) || !isNodeUntainted(node) {
  251. return false
  252. }
  253. }
  254. return true
  255. }