predicates.go 32 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826
  1. /*
  2. Copyright 2015 The Kubernetes Authors.
  3. Licensed under the Apache License, Version 2.0 (the "License");
  4. you may not use this file except in compliance with the License.
  5. You may obtain a copy of the License at
  6. http://www.apache.org/licenses/LICENSE-2.0
  7. Unless required by applicable law or agreed to in writing, software
  8. distributed under the License is distributed on an "AS IS" BASIS,
  9. WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
  10. See the License for the specific language governing permissions and
  11. limitations under the License.
  12. */
  13. package scheduling
  14. import (
  15. "fmt"
  16. "time"
  17. "k8s.io/api/core/v1"
  18. "k8s.io/apimachinery/pkg/api/resource"
  19. metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
  20. "k8s.io/apimachinery/pkg/labels"
  21. "k8s.io/apimachinery/pkg/util/sets"
  22. "k8s.io/apimachinery/pkg/util/uuid"
  23. utilversion "k8s.io/apimachinery/pkg/util/version"
  24. clientset "k8s.io/client-go/kubernetes"
  25. "k8s.io/kubernetes/test/e2e/common"
  26. "k8s.io/kubernetes/test/e2e/framework"
  27. e2elog "k8s.io/kubernetes/test/e2e/framework/log"
  28. testutils "k8s.io/kubernetes/test/utils"
  29. imageutils "k8s.io/kubernetes/test/utils/image"
  30. "github.com/onsi/ginkgo"
  31. "github.com/onsi/gomega"
  32. // ensure libs have a chance to initialize
  33. _ "github.com/stretchr/testify/assert"
  34. )
  35. const maxNumberOfPods int64 = 10
  36. var localStorageVersion = utilversion.MustParseSemantic("v1.8.0-beta.0")
  37. // variable set in BeforeEach, never modified afterwards
  38. var masterNodes sets.String
  39. type pausePodConfig struct {
  40. Name string
  41. Namespace string
  42. Affinity *v1.Affinity
  43. Annotations, Labels, NodeSelector map[string]string
  44. Resources *v1.ResourceRequirements
  45. Tolerations []v1.Toleration
  46. NodeName string
  47. Ports []v1.ContainerPort
  48. OwnerReferences []metav1.OwnerReference
  49. PriorityClassName string
  50. DeletionGracePeriodSeconds *int64
  51. }
  52. var _ = SIGDescribe("SchedulerPredicates [Serial]", func() {
  53. var cs clientset.Interface
  54. var nodeList *v1.NodeList
  55. var totalPodCapacity int64
  56. var RCName string
  57. var ns string
  58. f := framework.NewDefaultFramework("sched-pred")
  59. ginkgo.AfterEach(func() {
  60. rc, err := cs.CoreV1().ReplicationControllers(ns).Get(RCName, metav1.GetOptions{})
  61. if err == nil && *(rc.Spec.Replicas) != 0 {
  62. ginkgo.By("Cleaning up the replication controller")
  63. err := framework.DeleteRCAndWaitForGC(f.ClientSet, ns, RCName)
  64. framework.ExpectNoError(err)
  65. }
  66. })
  67. ginkgo.BeforeEach(func() {
  68. cs = f.ClientSet
  69. ns = f.Namespace.Name
  70. nodeList = &v1.NodeList{}
  71. framework.AllNodesReady(cs, time.Minute)
  72. masterNodes, nodeList = framework.GetMasterAndWorkerNodesOrDie(cs)
  73. err := framework.CheckTestingNSDeletedExcept(cs, ns)
  74. framework.ExpectNoError(err)
  75. for _, node := range nodeList.Items {
  76. e2elog.Logf("\nLogging pods the kubelet thinks is on node %v before test", node.Name)
  77. framework.PrintAllKubeletPods(cs, node.Name)
  78. }
  79. })
  80. // This test verifies that max-pods flag works as advertised. It assumes that cluster add-on pods stay stable
  81. // and cannot be run in parallel with any other test that touches Nodes or Pods. It is so because to check
  82. // if max-pods is working we need to fully saturate the cluster and keep it in this state for few seconds.
  83. //
  84. // Slow PR #13315 (8 min)
  85. ginkgo.It("validates MaxPods limit number of pods that are allowed to run [Slow]", func() {
  86. totalPodCapacity = 0
  87. for _, node := range nodeList.Items {
  88. e2elog.Logf("Node: %v", node)
  89. podCapacity, found := node.Status.Capacity[v1.ResourcePods]
  90. gomega.Expect(found).To(gomega.Equal(true))
  91. totalPodCapacity += podCapacity.Value()
  92. }
  93. currentlyScheduledPods := framework.WaitForStableCluster(cs, masterNodes)
  94. podsNeededForSaturation := int(totalPodCapacity) - currentlyScheduledPods
  95. ginkgo.By(fmt.Sprintf("Starting additional %v Pods to fully saturate the cluster max pods and trying to start another one", podsNeededForSaturation))
  96. // As the pods are distributed randomly among nodes,
  97. // it can easily happen that all nodes are satured
  98. // and there is no need to create additional pods.
  99. // StartPods requires at least one pod to replicate.
  100. if podsNeededForSaturation > 0 {
  101. framework.ExpectNoError(testutils.StartPods(cs, podsNeededForSaturation, ns, "maxp",
  102. *initPausePod(f, pausePodConfig{
  103. Name: "",
  104. Labels: map[string]string{"name": ""},
  105. }), true, e2elog.Logf))
  106. }
  107. podName := "additional-pod"
  108. WaitForSchedulerAfterAction(f, createPausePodAction(f, pausePodConfig{
  109. Name: podName,
  110. Labels: map[string]string{"name": "additional"},
  111. }), ns, podName, false)
  112. verifyResult(cs, podsNeededForSaturation, 1, ns)
  113. })
  114. // This test verifies we don't allow scheduling of pods in a way that sum of local ephemeral storage limits of pods is greater than machines capacity.
  115. // It assumes that cluster add-on pods stay stable and cannot be run in parallel with any other test that touches Nodes or Pods.
  116. // It is so because we need to have precise control on what's running in the cluster.
  117. ginkgo.It("validates local ephemeral storage resource limits of pods that are allowed to run [Feature:LocalStorageCapacityIsolation]", func() {
  118. framework.SkipUnlessServerVersionGTE(localStorageVersion, f.ClientSet.Discovery())
  119. nodeMaxAllocatable := int64(0)
  120. nodeToAllocatableMap := make(map[string]int64)
  121. for _, node := range nodeList.Items {
  122. allocatable, found := node.Status.Allocatable[v1.ResourceEphemeralStorage]
  123. gomega.Expect(found).To(gomega.Equal(true))
  124. nodeToAllocatableMap[node.Name] = allocatable.MilliValue()
  125. if nodeMaxAllocatable < allocatable.MilliValue() {
  126. nodeMaxAllocatable = allocatable.MilliValue()
  127. }
  128. }
  129. framework.WaitForStableCluster(cs, masterNodes)
  130. pods, err := cs.CoreV1().Pods(metav1.NamespaceAll).List(metav1.ListOptions{})
  131. framework.ExpectNoError(err)
  132. for _, pod := range pods.Items {
  133. _, found := nodeToAllocatableMap[pod.Spec.NodeName]
  134. if found && pod.Status.Phase != v1.PodSucceeded && pod.Status.Phase != v1.PodFailed {
  135. e2elog.Logf("Pod %v requesting local ephemeral resource =%vm on Node %v", pod.Name, getRequestedStorageEphemeralStorage(pod), pod.Spec.NodeName)
  136. nodeToAllocatableMap[pod.Spec.NodeName] -= getRequestedStorageEphemeralStorage(pod)
  137. }
  138. }
  139. var podsNeededForSaturation int
  140. milliEphemeralStoragePerPod := nodeMaxAllocatable / maxNumberOfPods
  141. e2elog.Logf("Using pod capacity: %vm", milliEphemeralStoragePerPod)
  142. for name, leftAllocatable := range nodeToAllocatableMap {
  143. e2elog.Logf("Node: %v has local ephemeral resource allocatable: %vm", name, leftAllocatable)
  144. podsNeededForSaturation += (int)(leftAllocatable / milliEphemeralStoragePerPod)
  145. }
  146. ginkgo.By(fmt.Sprintf("Starting additional %v Pods to fully saturate the cluster local ephemeral resource and trying to start another one", podsNeededForSaturation))
  147. // As the pods are distributed randomly among nodes,
  148. // it can easily happen that all nodes are saturated
  149. // and there is no need to create additional pods.
  150. // StartPods requires at least one pod to replicate.
  151. if podsNeededForSaturation > 0 {
  152. framework.ExpectNoError(testutils.StartPods(cs, podsNeededForSaturation, ns, "overcommit",
  153. *initPausePod(f, pausePodConfig{
  154. Name: "",
  155. Labels: map[string]string{"name": ""},
  156. Resources: &v1.ResourceRequirements{
  157. Limits: v1.ResourceList{
  158. v1.ResourceEphemeralStorage: *resource.NewMilliQuantity(milliEphemeralStoragePerPod, "DecimalSI"),
  159. },
  160. Requests: v1.ResourceList{
  161. v1.ResourceEphemeralStorage: *resource.NewMilliQuantity(milliEphemeralStoragePerPod, "DecimalSI"),
  162. },
  163. },
  164. }), true, e2elog.Logf))
  165. }
  166. podName := "additional-pod"
  167. conf := pausePodConfig{
  168. Name: podName,
  169. Labels: map[string]string{"name": "additional"},
  170. Resources: &v1.ResourceRequirements{
  171. Limits: v1.ResourceList{
  172. v1.ResourceEphemeralStorage: *resource.NewMilliQuantity(milliEphemeralStoragePerPod, "DecimalSI"),
  173. },
  174. },
  175. }
  176. WaitForSchedulerAfterAction(f, createPausePodAction(f, conf), ns, podName, false)
  177. verifyResult(cs, podsNeededForSaturation, 1, ns)
  178. })
  179. // This test verifies we don't allow scheduling of pods in a way that sum of
  180. // limits of pods is greater than machines capacity.
  181. // It assumes that cluster add-on pods stay stable and cannot be run in parallel
  182. // with any other test that touches Nodes or Pods.
  183. // It is so because we need to have precise control on what's running in the cluster.
  184. // Test scenario:
  185. // 1. Find the amount CPU resources on each node.
  186. // 2. Create one pod with affinity to each node that uses 70% of the node CPU.
  187. // 3. Wait for the pods to be scheduled.
  188. // 4. Create another pod with no affinity to any node that need 50% of the largest node CPU.
  189. // 5. Make sure this additional pod is not scheduled.
  190. /*
  191. Release : v1.9
  192. Testname: Scheduler, resource limits
  193. Description: Scheduling Pods MUST fail if the resource limits exceed Machine capacity.
  194. */
  195. framework.ConformanceIt("validates resource limits of pods that are allowed to run ", func() {
  196. framework.WaitForStableCluster(cs, masterNodes)
  197. nodeMaxAllocatable := int64(0)
  198. nodeToAllocatableMap := make(map[string]int64)
  199. for _, node := range nodeList.Items {
  200. nodeReady := false
  201. for _, condition := range node.Status.Conditions {
  202. if condition.Type == v1.NodeReady && condition.Status == v1.ConditionTrue {
  203. nodeReady = true
  204. break
  205. }
  206. }
  207. if !nodeReady {
  208. continue
  209. }
  210. // Apply node label to each node
  211. framework.AddOrUpdateLabelOnNode(cs, node.Name, "node", node.Name)
  212. framework.ExpectNodeHasLabel(cs, node.Name, "node", node.Name)
  213. // Find allocatable amount of CPU.
  214. allocatable, found := node.Status.Allocatable[v1.ResourceCPU]
  215. gomega.Expect(found).To(gomega.Equal(true))
  216. nodeToAllocatableMap[node.Name] = allocatable.MilliValue()
  217. if nodeMaxAllocatable < allocatable.MilliValue() {
  218. nodeMaxAllocatable = allocatable.MilliValue()
  219. }
  220. }
  221. // Clean up added labels after this test.
  222. defer func() {
  223. for nodeName := range nodeToAllocatableMap {
  224. framework.RemoveLabelOffNode(cs, nodeName, "node")
  225. }
  226. }()
  227. pods, err := cs.CoreV1().Pods(metav1.NamespaceAll).List(metav1.ListOptions{})
  228. framework.ExpectNoError(err)
  229. for _, pod := range pods.Items {
  230. _, found := nodeToAllocatableMap[pod.Spec.NodeName]
  231. if found && pod.Status.Phase != v1.PodSucceeded && pod.Status.Phase != v1.PodFailed {
  232. e2elog.Logf("Pod %v requesting resource cpu=%vm on Node %v", pod.Name, getRequestedCPU(pod), pod.Spec.NodeName)
  233. nodeToAllocatableMap[pod.Spec.NodeName] -= getRequestedCPU(pod)
  234. }
  235. }
  236. ginkgo.By("Starting Pods to consume most of the cluster CPU.")
  237. // Create one pod per node that requires 70% of the node remaining CPU.
  238. fillerPods := []*v1.Pod{}
  239. for nodeName, cpu := range nodeToAllocatableMap {
  240. requestedCPU := cpu * 7 / 10
  241. fillerPods = append(fillerPods, createPausePod(f, pausePodConfig{
  242. Name: "filler-pod-" + string(uuid.NewUUID()),
  243. Resources: &v1.ResourceRequirements{
  244. Limits: v1.ResourceList{
  245. v1.ResourceCPU: *resource.NewMilliQuantity(requestedCPU, "DecimalSI"),
  246. },
  247. Requests: v1.ResourceList{
  248. v1.ResourceCPU: *resource.NewMilliQuantity(requestedCPU, "DecimalSI"),
  249. },
  250. },
  251. Affinity: &v1.Affinity{
  252. NodeAffinity: &v1.NodeAffinity{
  253. RequiredDuringSchedulingIgnoredDuringExecution: &v1.NodeSelector{
  254. NodeSelectorTerms: []v1.NodeSelectorTerm{
  255. {
  256. MatchExpressions: []v1.NodeSelectorRequirement{
  257. {
  258. Key: "node",
  259. Operator: v1.NodeSelectorOpIn,
  260. Values: []string{nodeName},
  261. },
  262. },
  263. },
  264. },
  265. },
  266. },
  267. },
  268. }))
  269. }
  270. // Wait for filler pods to schedule.
  271. for _, pod := range fillerPods {
  272. framework.ExpectNoError(framework.WaitForPodRunningInNamespace(cs, pod))
  273. }
  274. ginkgo.By("Creating another pod that requires unavailable amount of CPU.")
  275. // Create another pod that requires 50% of the largest node CPU resources.
  276. // This pod should remain pending as at least 70% of CPU of other nodes in
  277. // the cluster are already consumed.
  278. podName := "additional-pod"
  279. conf := pausePodConfig{
  280. Name: podName,
  281. Labels: map[string]string{"name": "additional"},
  282. Resources: &v1.ResourceRequirements{
  283. Limits: v1.ResourceList{
  284. v1.ResourceCPU: *resource.NewMilliQuantity(nodeMaxAllocatable*5/10, "DecimalSI"),
  285. },
  286. },
  287. }
  288. WaitForSchedulerAfterAction(f, createPausePodAction(f, conf), ns, podName, false)
  289. verifyResult(cs, len(fillerPods), 1, ns)
  290. })
  291. // Test Nodes does not have any label, hence it should be impossible to schedule Pod with
  292. // nonempty Selector set.
  293. /*
  294. Release : v1.9
  295. Testname: Scheduler, node selector not matching
  296. Description: Create a Pod with a NodeSelector set to a value that does not match a node in the cluster. Since there are no nodes matching the criteria the Pod MUST not be scheduled.
  297. */
  298. framework.ConformanceIt("validates that NodeSelector is respected if not matching ", func() {
  299. ginkgo.By("Trying to schedule Pod with nonempty NodeSelector.")
  300. podName := "restricted-pod"
  301. framework.WaitForStableCluster(cs, masterNodes)
  302. conf := pausePodConfig{
  303. Name: podName,
  304. Labels: map[string]string{"name": "restricted"},
  305. NodeSelector: map[string]string{
  306. "label": "nonempty",
  307. },
  308. }
  309. WaitForSchedulerAfterAction(f, createPausePodAction(f, conf), ns, podName, false)
  310. verifyResult(cs, 0, 1, ns)
  311. })
  312. /*
  313. Release : v1.9
  314. Testname: Scheduler, node selector matching
  315. Description: Create a label on the node {k: v}. Then create a Pod with a NodeSelector set to {k: v}. Check to see if the Pod is scheduled. When the NodeSelector matches then Pod MUST be scheduled on that node.
  316. */
  317. framework.ConformanceIt("validates that NodeSelector is respected if matching ", func() {
  318. nodeName := GetNodeThatCanRunPod(f)
  319. ginkgo.By("Trying to apply a random label on the found node.")
  320. k := fmt.Sprintf("kubernetes.io/e2e-%s", string(uuid.NewUUID()))
  321. v := "42"
  322. framework.AddOrUpdateLabelOnNode(cs, nodeName, k, v)
  323. framework.ExpectNodeHasLabel(cs, nodeName, k, v)
  324. defer framework.RemoveLabelOffNode(cs, nodeName, k)
  325. ginkgo.By("Trying to relaunch the pod, now with labels.")
  326. labelPodName := "with-labels"
  327. createPausePod(f, pausePodConfig{
  328. Name: labelPodName,
  329. NodeSelector: map[string]string{
  330. k: v,
  331. },
  332. })
  333. // check that pod got scheduled. We intentionally DO NOT check that the
  334. // pod is running because this will create a race condition with the
  335. // kubelet and the scheduler: the scheduler might have scheduled a pod
  336. // already when the kubelet does not know about its new label yet. The
  337. // kubelet will then refuse to launch the pod.
  338. framework.ExpectNoError(framework.WaitForPodNotPending(cs, ns, labelPodName))
  339. labelPod, err := cs.CoreV1().Pods(ns).Get(labelPodName, metav1.GetOptions{})
  340. framework.ExpectNoError(err)
  341. gomega.Expect(labelPod.Spec.NodeName).To(gomega.Equal(nodeName))
  342. })
  343. // Test Nodes does not have any label, hence it should be impossible to schedule Pod with
  344. // non-nil NodeAffinity.RequiredDuringSchedulingIgnoredDuringExecution.
  345. ginkgo.It("validates that NodeAffinity is respected if not matching", func() {
  346. ginkgo.By("Trying to schedule Pod with nonempty NodeSelector.")
  347. podName := "restricted-pod"
  348. framework.WaitForStableCluster(cs, masterNodes)
  349. conf := pausePodConfig{
  350. Name: podName,
  351. Affinity: &v1.Affinity{
  352. NodeAffinity: &v1.NodeAffinity{
  353. RequiredDuringSchedulingIgnoredDuringExecution: &v1.NodeSelector{
  354. NodeSelectorTerms: []v1.NodeSelectorTerm{
  355. {
  356. MatchExpressions: []v1.NodeSelectorRequirement{
  357. {
  358. Key: "foo",
  359. Operator: v1.NodeSelectorOpIn,
  360. Values: []string{"bar", "value2"},
  361. },
  362. },
  363. }, {
  364. MatchExpressions: []v1.NodeSelectorRequirement{
  365. {
  366. Key: "diffkey",
  367. Operator: v1.NodeSelectorOpIn,
  368. Values: []string{"wrong", "value2"},
  369. },
  370. },
  371. },
  372. },
  373. },
  374. },
  375. },
  376. Labels: map[string]string{"name": "restricted"},
  377. }
  378. WaitForSchedulerAfterAction(f, createPausePodAction(f, conf), ns, podName, false)
  379. verifyResult(cs, 0, 1, ns)
  380. })
  381. // Keep the same steps with the test on NodeSelector,
  382. // but specify Affinity in Pod.Spec.Affinity, instead of NodeSelector.
  383. ginkgo.It("validates that required NodeAffinity setting is respected if matching", func() {
  384. nodeName := GetNodeThatCanRunPod(f)
  385. ginkgo.By("Trying to apply a random label on the found node.")
  386. k := fmt.Sprintf("kubernetes.io/e2e-%s", string(uuid.NewUUID()))
  387. v := "42"
  388. framework.AddOrUpdateLabelOnNode(cs, nodeName, k, v)
  389. framework.ExpectNodeHasLabel(cs, nodeName, k, v)
  390. defer framework.RemoveLabelOffNode(cs, nodeName, k)
  391. ginkgo.By("Trying to relaunch the pod, now with labels.")
  392. labelPodName := "with-labels"
  393. createPausePod(f, pausePodConfig{
  394. Name: labelPodName,
  395. Affinity: &v1.Affinity{
  396. NodeAffinity: &v1.NodeAffinity{
  397. RequiredDuringSchedulingIgnoredDuringExecution: &v1.NodeSelector{
  398. NodeSelectorTerms: []v1.NodeSelectorTerm{
  399. {
  400. MatchExpressions: []v1.NodeSelectorRequirement{
  401. {
  402. Key: k,
  403. Operator: v1.NodeSelectorOpIn,
  404. Values: []string{v},
  405. },
  406. },
  407. },
  408. },
  409. },
  410. },
  411. },
  412. })
  413. // check that pod got scheduled. We intentionally DO NOT check that the
  414. // pod is running because this will create a race condition with the
  415. // kubelet and the scheduler: the scheduler might have scheduled a pod
  416. // already when the kubelet does not know about its new label yet. The
  417. // kubelet will then refuse to launch the pod.
  418. framework.ExpectNoError(framework.WaitForPodNotPending(cs, ns, labelPodName))
  419. labelPod, err := cs.CoreV1().Pods(ns).Get(labelPodName, metav1.GetOptions{})
  420. framework.ExpectNoError(err)
  421. gomega.Expect(labelPod.Spec.NodeName).To(gomega.Equal(nodeName))
  422. })
  423. // 1. Run a pod to get an available node, then delete the pod
  424. // 2. Taint the node with a random taint
  425. // 3. Try to relaunch the pod with tolerations tolerate the taints on node,
  426. // and the pod's nodeName specified to the name of node found in step 1
  427. ginkgo.It("validates that taints-tolerations is respected if matching", func() {
  428. nodeName := getNodeThatCanRunPodWithoutToleration(f)
  429. ginkgo.By("Trying to apply a random taint on the found node.")
  430. testTaint := v1.Taint{
  431. Key: fmt.Sprintf("kubernetes.io/e2e-taint-key-%s", string(uuid.NewUUID())),
  432. Value: "testing-taint-value",
  433. Effect: v1.TaintEffectNoSchedule,
  434. }
  435. framework.AddOrUpdateTaintOnNode(cs, nodeName, testTaint)
  436. framework.ExpectNodeHasTaint(cs, nodeName, &testTaint)
  437. defer framework.RemoveTaintOffNode(cs, nodeName, testTaint)
  438. ginkgo.By("Trying to apply a random label on the found node.")
  439. labelKey := fmt.Sprintf("kubernetes.io/e2e-label-key-%s", string(uuid.NewUUID()))
  440. labelValue := "testing-label-value"
  441. framework.AddOrUpdateLabelOnNode(cs, nodeName, labelKey, labelValue)
  442. framework.ExpectNodeHasLabel(cs, nodeName, labelKey, labelValue)
  443. defer framework.RemoveLabelOffNode(cs, nodeName, labelKey)
  444. ginkgo.By("Trying to relaunch the pod, now with tolerations.")
  445. tolerationPodName := "with-tolerations"
  446. createPausePod(f, pausePodConfig{
  447. Name: tolerationPodName,
  448. Tolerations: []v1.Toleration{{Key: testTaint.Key, Value: testTaint.Value, Effect: testTaint.Effect}},
  449. NodeSelector: map[string]string{labelKey: labelValue},
  450. })
  451. // check that pod got scheduled. We intentionally DO NOT check that the
  452. // pod is running because this will create a race condition with the
  453. // kubelet and the scheduler: the scheduler might have scheduled a pod
  454. // already when the kubelet does not know about its new taint yet. The
  455. // kubelet will then refuse to launch the pod.
  456. framework.ExpectNoError(framework.WaitForPodNotPending(cs, ns, tolerationPodName))
  457. deployedPod, err := cs.CoreV1().Pods(ns).Get(tolerationPodName, metav1.GetOptions{})
  458. framework.ExpectNoError(err)
  459. gomega.Expect(deployedPod.Spec.NodeName).To(gomega.Equal(nodeName))
  460. })
  461. // 1. Run a pod to get an available node, then delete the pod
  462. // 2. Taint the node with a random taint
  463. // 3. Try to relaunch the pod still no tolerations,
  464. // and the pod's nodeName specified to the name of node found in step 1
  465. ginkgo.It("validates that taints-tolerations is respected if not matching", func() {
  466. nodeName := getNodeThatCanRunPodWithoutToleration(f)
  467. ginkgo.By("Trying to apply a random taint on the found node.")
  468. testTaint := v1.Taint{
  469. Key: fmt.Sprintf("kubernetes.io/e2e-taint-key-%s", string(uuid.NewUUID())),
  470. Value: "testing-taint-value",
  471. Effect: v1.TaintEffectNoSchedule,
  472. }
  473. framework.AddOrUpdateTaintOnNode(cs, nodeName, testTaint)
  474. framework.ExpectNodeHasTaint(cs, nodeName, &testTaint)
  475. defer framework.RemoveTaintOffNode(cs, nodeName, testTaint)
  476. ginkgo.By("Trying to apply a random label on the found node.")
  477. labelKey := fmt.Sprintf("kubernetes.io/e2e-label-key-%s", string(uuid.NewUUID()))
  478. labelValue := "testing-label-value"
  479. framework.AddOrUpdateLabelOnNode(cs, nodeName, labelKey, labelValue)
  480. framework.ExpectNodeHasLabel(cs, nodeName, labelKey, labelValue)
  481. defer framework.RemoveLabelOffNode(cs, nodeName, labelKey)
  482. ginkgo.By("Trying to relaunch the pod, still no tolerations.")
  483. podNameNoTolerations := "still-no-tolerations"
  484. conf := pausePodConfig{
  485. Name: podNameNoTolerations,
  486. NodeSelector: map[string]string{labelKey: labelValue},
  487. }
  488. WaitForSchedulerAfterAction(f, createPausePodAction(f, conf), ns, podNameNoTolerations, false)
  489. verifyResult(cs, 0, 1, ns)
  490. ginkgo.By("Removing taint off the node")
  491. WaitForSchedulerAfterAction(f, removeTaintFromNodeAction(cs, nodeName, testTaint), ns, podNameNoTolerations, true)
  492. verifyResult(cs, 1, 0, ns)
  493. })
  494. ginkgo.It("validates that there is no conflict between pods with same hostPort but different hostIP and protocol", func() {
  495. nodeName := GetNodeThatCanRunPod(f)
  496. // use nodeSelector to make sure the testing pods get assigned on the same node to explicitly verify there exists conflict or not
  497. ginkgo.By("Trying to apply a random label on the found node.")
  498. k := fmt.Sprintf("kubernetes.io/e2e-%s", string(uuid.NewUUID()))
  499. v := "90"
  500. nodeSelector := make(map[string]string)
  501. nodeSelector[k] = v
  502. framework.AddOrUpdateLabelOnNode(cs, nodeName, k, v)
  503. framework.ExpectNodeHasLabel(cs, nodeName, k, v)
  504. defer framework.RemoveLabelOffNode(cs, nodeName, k)
  505. port := int32(54321)
  506. ginkgo.By(fmt.Sprintf("Trying to create a pod(pod1) with hostport %v and hostIP 127.0.0.1 and expect scheduled", port))
  507. createHostPortPodOnNode(f, "pod1", ns, "127.0.0.1", port, v1.ProtocolTCP, nodeSelector, true)
  508. ginkgo.By(fmt.Sprintf("Trying to create another pod(pod2) with hostport %v but hostIP 127.0.0.2 on the node which pod1 resides and expect scheduled", port))
  509. createHostPortPodOnNode(f, "pod2", ns, "127.0.0.2", port, v1.ProtocolTCP, nodeSelector, true)
  510. ginkgo.By(fmt.Sprintf("Trying to create a third pod(pod3) with hostport %v, hostIP 127.0.0.2 but use UDP protocol on the node which pod2 resides", port))
  511. createHostPortPodOnNode(f, "pod3", ns, "127.0.0.2", port, v1.ProtocolUDP, nodeSelector, true)
  512. })
  513. ginkgo.It("validates that there exists conflict between pods with same hostPort and protocol but one using 0.0.0.0 hostIP", func() {
  514. nodeName := GetNodeThatCanRunPod(f)
  515. // use nodeSelector to make sure the testing pods get assigned on the same node to explicitly verify there exists conflict or not
  516. ginkgo.By("Trying to apply a random label on the found node.")
  517. k := fmt.Sprintf("kubernetes.io/e2e-%s", string(uuid.NewUUID()))
  518. v := "95"
  519. nodeSelector := make(map[string]string)
  520. nodeSelector[k] = v
  521. framework.AddOrUpdateLabelOnNode(cs, nodeName, k, v)
  522. framework.ExpectNodeHasLabel(cs, nodeName, k, v)
  523. defer framework.RemoveLabelOffNode(cs, nodeName, k)
  524. port := int32(54322)
  525. ginkgo.By(fmt.Sprintf("Trying to create a pod(pod4) with hostport %v and hostIP 0.0.0.0(empty string here) and expect scheduled", port))
  526. createHostPortPodOnNode(f, "pod4", ns, "", port, v1.ProtocolTCP, nodeSelector, true)
  527. ginkgo.By(fmt.Sprintf("Trying to create another pod(pod5) with hostport %v but hostIP 127.0.0.1 on the node which pod4 resides and expect not scheduled", port))
  528. createHostPortPodOnNode(f, "pod5", ns, "127.0.0.1", port, v1.ProtocolTCP, nodeSelector, false)
  529. })
  530. })
  531. func initPausePod(f *framework.Framework, conf pausePodConfig) *v1.Pod {
  532. var gracePeriod = int64(1)
  533. pod := &v1.Pod{
  534. ObjectMeta: metav1.ObjectMeta{
  535. Name: conf.Name,
  536. Namespace: conf.Namespace,
  537. Labels: conf.Labels,
  538. Annotations: conf.Annotations,
  539. OwnerReferences: conf.OwnerReferences,
  540. },
  541. Spec: v1.PodSpec{
  542. NodeSelector: conf.NodeSelector,
  543. Affinity: conf.Affinity,
  544. Containers: []v1.Container{
  545. {
  546. Name: conf.Name,
  547. Image: imageutils.GetPauseImageName(),
  548. Ports: conf.Ports,
  549. },
  550. },
  551. Tolerations: conf.Tolerations,
  552. NodeName: conf.NodeName,
  553. PriorityClassName: conf.PriorityClassName,
  554. TerminationGracePeriodSeconds: &gracePeriod,
  555. },
  556. }
  557. if conf.Resources != nil {
  558. pod.Spec.Containers[0].Resources = *conf.Resources
  559. }
  560. if conf.DeletionGracePeriodSeconds != nil {
  561. pod.ObjectMeta.DeletionGracePeriodSeconds = conf.DeletionGracePeriodSeconds
  562. }
  563. return pod
  564. }
  565. func createPausePod(f *framework.Framework, conf pausePodConfig) *v1.Pod {
  566. namespace := conf.Namespace
  567. if len(namespace) == 0 {
  568. namespace = f.Namespace.Name
  569. }
  570. pod, err := f.ClientSet.CoreV1().Pods(namespace).Create(initPausePod(f, conf))
  571. framework.ExpectNoError(err)
  572. return pod
  573. }
  574. func runPausePod(f *framework.Framework, conf pausePodConfig) *v1.Pod {
  575. pod := createPausePod(f, conf)
  576. framework.ExpectNoError(framework.WaitForPodRunningInNamespace(f.ClientSet, pod))
  577. pod, err := f.ClientSet.CoreV1().Pods(pod.Namespace).Get(conf.Name, metav1.GetOptions{})
  578. framework.ExpectNoError(err)
  579. return pod
  580. }
  581. func runPodAndGetNodeName(f *framework.Framework, conf pausePodConfig) string {
  582. // launch a pod to find a node which can launch a pod. We intentionally do
  583. // not just take the node list and choose the first of them. Depending on the
  584. // cluster and the scheduler it might be that a "normal" pod cannot be
  585. // scheduled onto it.
  586. pod := runPausePod(f, conf)
  587. ginkgo.By("Explicitly delete pod here to free the resource it takes.")
  588. err := f.ClientSet.CoreV1().Pods(f.Namespace.Name).Delete(pod.Name, metav1.NewDeleteOptions(0))
  589. framework.ExpectNoError(err)
  590. return pod.Spec.NodeName
  591. }
  592. func getRequestedCPU(pod v1.Pod) int64 {
  593. var result int64
  594. for _, container := range pod.Spec.Containers {
  595. result += container.Resources.Requests.Cpu().MilliValue()
  596. }
  597. return result
  598. }
  599. func getRequestedStorageEphemeralStorage(pod v1.Pod) int64 {
  600. var result int64
  601. for _, container := range pod.Spec.Containers {
  602. result += container.Resources.Requests.StorageEphemeral().MilliValue()
  603. }
  604. return result
  605. }
  606. // removeTaintFromNodeAction returns a closure that removes the given taint
  607. // from the given node upon invocation.
  608. func removeTaintFromNodeAction(cs clientset.Interface, nodeName string, testTaint v1.Taint) common.Action {
  609. return func() error {
  610. framework.RemoveTaintOffNode(cs, nodeName, testTaint)
  611. return nil
  612. }
  613. }
  614. // createPausePodAction returns a closure that creates a pause pod upon invocation.
  615. func createPausePodAction(f *framework.Framework, conf pausePodConfig) common.Action {
  616. return func() error {
  617. _, err := f.ClientSet.CoreV1().Pods(f.Namespace.Name).Create(initPausePod(f, conf))
  618. return err
  619. }
  620. }
  621. // WaitForSchedulerAfterAction performs the provided action and then waits for
  622. // scheduler to act on the given pod.
  623. func WaitForSchedulerAfterAction(f *framework.Framework, action common.Action, ns, podName string, expectSuccess bool) {
  624. predicate := scheduleFailureEvent(podName)
  625. if expectSuccess {
  626. predicate = scheduleSuccessEvent(ns, podName, "" /* any node */)
  627. }
  628. success, err := common.ObserveEventAfterAction(f, predicate, action)
  629. framework.ExpectNoError(err)
  630. gomega.Expect(success).To(gomega.Equal(true))
  631. }
  632. // TODO: upgrade calls in PodAffinity tests when we're able to run them
  633. func verifyResult(c clientset.Interface, expectedScheduled int, expectedNotScheduled int, ns string) {
  634. allPods, err := c.CoreV1().Pods(ns).List(metav1.ListOptions{})
  635. framework.ExpectNoError(err)
  636. scheduledPods, notScheduledPods := framework.GetPodsScheduled(masterNodes, allPods)
  637. printed := false
  638. printOnce := func(msg string) string {
  639. if !printed {
  640. printed = true
  641. return msg
  642. }
  643. return ""
  644. }
  645. gomega.Expect(len(notScheduledPods)).To(gomega.Equal(expectedNotScheduled), printOnce(fmt.Sprintf("Not scheduled Pods: %#v", notScheduledPods)))
  646. gomega.Expect(len(scheduledPods)).To(gomega.Equal(expectedScheduled), printOnce(fmt.Sprintf("Scheduled Pods: %#v", scheduledPods)))
  647. }
  648. // verifyReplicasResult is wrapper of verifyResult for a group pods with same "name: labelName" label, which means they belong to same RC
  649. func verifyReplicasResult(c clientset.Interface, expectedScheduled int, expectedNotScheduled int, ns string, labelName string) {
  650. allPods := getPodsByLabels(c, ns, map[string]string{"name": labelName})
  651. scheduledPods, notScheduledPods := framework.GetPodsScheduled(masterNodes, allPods)
  652. printed := false
  653. printOnce := func(msg string) string {
  654. if !printed {
  655. printed = true
  656. return msg
  657. }
  658. return ""
  659. }
  660. gomega.Expect(len(notScheduledPods)).To(gomega.Equal(expectedNotScheduled), printOnce(fmt.Sprintf("Not scheduled Pods: %#v", notScheduledPods)))
  661. gomega.Expect(len(scheduledPods)).To(gomega.Equal(expectedScheduled), printOnce(fmt.Sprintf("Scheduled Pods: %#v", scheduledPods)))
  662. }
  663. func getPodsByLabels(c clientset.Interface, ns string, labelsMap map[string]string) *v1.PodList {
  664. selector := labels.SelectorFromSet(labels.Set(labelsMap))
  665. allPods, err := c.CoreV1().Pods(ns).List(metav1.ListOptions{LabelSelector: selector.String()})
  666. framework.ExpectNoError(err)
  667. return allPods
  668. }
  669. func runAndKeepPodWithLabelAndGetNodeName(f *framework.Framework) (string, string) {
  670. // launch a pod to find a node which can launch a pod. We intentionally do
  671. // not just take the node list and choose the first of them. Depending on the
  672. // cluster and the scheduler it might be that a "normal" pod cannot be
  673. // scheduled onto it.
  674. ginkgo.By("Trying to launch a pod with a label to get a node which can launch it.")
  675. pod := runPausePod(f, pausePodConfig{
  676. Name: "with-label-" + string(uuid.NewUUID()),
  677. Labels: map[string]string{"security": "S1"},
  678. })
  679. return pod.Spec.NodeName, pod.Name
  680. }
  681. // GetNodeThatCanRunPod trying to launch a pod without a label to get a node which can launch it
  682. func GetNodeThatCanRunPod(f *framework.Framework) string {
  683. ginkgo.By("Trying to launch a pod without a label to get a node which can launch it.")
  684. return runPodAndGetNodeName(f, pausePodConfig{Name: "without-label"})
  685. }
  686. func getNodeThatCanRunPodWithoutToleration(f *framework.Framework) string {
  687. ginkgo.By("Trying to launch a pod without a toleration to get a node which can launch it.")
  688. return runPodAndGetNodeName(f, pausePodConfig{Name: "without-toleration"})
  689. }
  690. // CreateHostPortPods creates RC with host port 4321
  691. func CreateHostPortPods(f *framework.Framework, id string, replicas int, expectRunning bool) {
  692. ginkgo.By(fmt.Sprintf("Running RC which reserves host port"))
  693. config := &testutils.RCConfig{
  694. Client: f.ClientSet,
  695. Name: id,
  696. Namespace: f.Namespace.Name,
  697. Timeout: defaultTimeout,
  698. Image: imageutils.GetPauseImageName(),
  699. Replicas: replicas,
  700. HostPorts: map[string]int{"port1": 4321},
  701. }
  702. err := framework.RunRC(*config)
  703. if expectRunning {
  704. framework.ExpectNoError(err)
  705. }
  706. }
  707. // create pod which using hostport on the specified node according to the nodeSelector
  708. func createHostPortPodOnNode(f *framework.Framework, podName, ns, hostIP string, port int32, protocol v1.Protocol, nodeSelector map[string]string, expectScheduled bool) {
  709. createPausePod(f, pausePodConfig{
  710. Name: podName,
  711. Ports: []v1.ContainerPort{
  712. {
  713. HostPort: port,
  714. ContainerPort: 80,
  715. Protocol: protocol,
  716. HostIP: hostIP,
  717. },
  718. },
  719. NodeSelector: nodeSelector,
  720. })
  721. err := framework.WaitForPodNotPending(f.ClientSet, ns, podName)
  722. if expectScheduled {
  723. framework.ExpectNoError(err)
  724. }
  725. }