predicates.go 41 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068
  1. /*
  2. Copyright 2015 The Kubernetes Authors.
  3. Licensed under the Apache License, Version 2.0 (the "License");
  4. you may not use this file except in compliance with the License.
  5. You may obtain a copy of the License at
  6. http://www.apache.org/licenses/LICENSE-2.0
  7. Unless required by applicable law or agreed to in writing, software
  8. distributed under the License is distributed on an "AS IS" BASIS,
  9. WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
  10. See the License for the specific language governing permissions and
  11. limitations under the License.
  12. */
  13. package scheduling
  14. import (
  15. "context"
  16. "fmt"
  17. "time"
  18. v1 "k8s.io/api/core/v1"
  19. nodev1beta1 "k8s.io/api/node/v1beta1"
  20. "k8s.io/apimachinery/pkg/api/resource"
  21. metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
  22. "k8s.io/apimachinery/pkg/util/sets"
  23. "k8s.io/apimachinery/pkg/util/uuid"
  24. utilversion "k8s.io/apimachinery/pkg/util/version"
  25. clientset "k8s.io/client-go/kubernetes"
  26. podutil "k8s.io/kubernetes/pkg/api/v1/pod"
  27. "k8s.io/kubernetes/test/e2e/framework"
  28. e2ekubelet "k8s.io/kubernetes/test/e2e/framework/kubelet"
  29. e2enode "k8s.io/kubernetes/test/e2e/framework/node"
  30. e2epod "k8s.io/kubernetes/test/e2e/framework/pod"
  31. e2erc "k8s.io/kubernetes/test/e2e/framework/rc"
  32. e2eskipper "k8s.io/kubernetes/test/e2e/framework/skipper"
  33. testutils "k8s.io/kubernetes/test/utils"
  34. imageutils "k8s.io/kubernetes/test/utils/image"
  35. k8utilnet "k8s.io/utils/net"
  36. "github.com/onsi/ginkgo"
  37. // ensure libs have a chance to initialize
  38. _ "github.com/stretchr/testify/assert"
  39. )
  40. const (
  41. maxNumberOfPods int64 = 10
  42. defaultTimeout = 3 * time.Minute
  43. )
  44. var localStorageVersion = utilversion.MustParseSemantic("v1.8.0-beta.0")
  45. // variable set in BeforeEach, never modified afterwards
  46. var masterNodes sets.String
  47. type pausePodConfig struct {
  48. Name string
  49. Namespace string
  50. Affinity *v1.Affinity
  51. Annotations, Labels, NodeSelector map[string]string
  52. Resources *v1.ResourceRequirements
  53. RuntimeClassHandler *string
  54. Tolerations []v1.Toleration
  55. NodeName string
  56. Ports []v1.ContainerPort
  57. OwnerReferences []metav1.OwnerReference
  58. PriorityClassName string
  59. DeletionGracePeriodSeconds *int64
  60. TopologySpreadConstraints []v1.TopologySpreadConstraint
  61. }
  62. var _ = SIGDescribe("SchedulerPredicates [Serial]", func() {
  63. var cs clientset.Interface
  64. var nodeList *v1.NodeList
  65. var RCName string
  66. var ns string
  67. f := framework.NewDefaultFramework("sched-pred")
  68. ginkgo.AfterEach(func() {
  69. rc, err := cs.CoreV1().ReplicationControllers(ns).Get(context.TODO(), RCName, metav1.GetOptions{})
  70. if err == nil && *(rc.Spec.Replicas) != 0 {
  71. ginkgo.By("Cleaning up the replication controller")
  72. err := e2erc.DeleteRCAndWaitForGC(f.ClientSet, ns, RCName)
  73. framework.ExpectNoError(err)
  74. }
  75. })
  76. ginkgo.BeforeEach(func() {
  77. cs = f.ClientSet
  78. ns = f.Namespace.Name
  79. nodeList = &v1.NodeList{}
  80. var err error
  81. framework.AllNodesReady(cs, time.Minute)
  82. // NOTE: Here doesn't get nodeList for supporting a master nodes which can host workload pods.
  83. masterNodes, _, err = e2enode.GetMasterAndWorkerNodes(cs)
  84. if err != nil {
  85. framework.Logf("Unexpected error occurred: %v", err)
  86. }
  87. nodeList, err = e2enode.GetReadySchedulableNodes(cs)
  88. if err != nil {
  89. framework.Logf("Unexpected error occurred: %v", err)
  90. }
  91. // TODO: write a wrapper for ExpectNoErrorWithOffset()
  92. framework.ExpectNoErrorWithOffset(0, err)
  93. err = framework.CheckTestingNSDeletedExcept(cs, ns)
  94. framework.ExpectNoError(err)
  95. for _, node := range nodeList.Items {
  96. framework.Logf("\nLogging pods the kubelet thinks is on node %v before test", node.Name)
  97. printAllKubeletPods(cs, node.Name)
  98. }
  99. })
  100. // This test verifies we don't allow scheduling of pods in a way that sum of local ephemeral storage resource requests of pods is greater than machines capacity.
  101. // It assumes that cluster add-on pods stay stable and cannot be run in parallel with any other test that touches Nodes or Pods.
  102. // It is so because we need to have precise control on what's running in the cluster.
  103. ginkgo.It("validates local ephemeral storage resource limits of pods that are allowed to run [Feature:LocalStorageCapacityIsolation]", func() {
  104. e2eskipper.SkipUnlessServerVersionGTE(localStorageVersion, f.ClientSet.Discovery())
  105. nodeMaxAllocatable := int64(0)
  106. nodeToAllocatableMap := make(map[string]int64)
  107. for _, node := range nodeList.Items {
  108. allocatable, found := node.Status.Allocatable[v1.ResourceEphemeralStorage]
  109. framework.ExpectEqual(found, true)
  110. nodeToAllocatableMap[node.Name] = allocatable.Value()
  111. if nodeMaxAllocatable < allocatable.Value() {
  112. nodeMaxAllocatable = allocatable.Value()
  113. }
  114. }
  115. WaitForStableCluster(cs, masterNodes)
  116. pods, err := cs.CoreV1().Pods(metav1.NamespaceAll).List(context.TODO(), metav1.ListOptions{})
  117. framework.ExpectNoError(err)
  118. for _, pod := range pods.Items {
  119. _, found := nodeToAllocatableMap[pod.Spec.NodeName]
  120. if found && pod.Status.Phase != v1.PodSucceeded && pod.Status.Phase != v1.PodFailed {
  121. framework.Logf("Pod %v requesting local ephemeral resource =%v on Node %v", pod.Name, getRequestedStorageEphemeralStorage(pod), pod.Spec.NodeName)
  122. nodeToAllocatableMap[pod.Spec.NodeName] -= getRequestedStorageEphemeralStorage(pod)
  123. }
  124. }
  125. var podsNeededForSaturation int
  126. var ephemeralStoragePerPod int64
  127. ephemeralStoragePerPod = nodeMaxAllocatable / maxNumberOfPods
  128. framework.Logf("Using pod capacity: %v", ephemeralStoragePerPod)
  129. for name, leftAllocatable := range nodeToAllocatableMap {
  130. framework.Logf("Node: %v has local ephemeral resource allocatable: %v", name, leftAllocatable)
  131. podsNeededForSaturation += (int)(leftAllocatable / ephemeralStoragePerPod)
  132. }
  133. ginkgo.By(fmt.Sprintf("Starting additional %v Pods to fully saturate the cluster local ephemeral resource and trying to start another one", podsNeededForSaturation))
  134. // As the pods are distributed randomly among nodes,
  135. // it can easily happen that all nodes are saturated
  136. // and there is no need to create additional pods.
  137. // StartPods requires at least one pod to replicate.
  138. if podsNeededForSaturation > 0 {
  139. framework.ExpectNoError(testutils.StartPods(cs, podsNeededForSaturation, ns, "overcommit",
  140. *initPausePod(f, pausePodConfig{
  141. Name: "",
  142. Labels: map[string]string{"name": ""},
  143. Resources: &v1.ResourceRequirements{
  144. Limits: v1.ResourceList{
  145. v1.ResourceEphemeralStorage: *resource.NewQuantity(ephemeralStoragePerPod, "DecimalSI"),
  146. },
  147. Requests: v1.ResourceList{
  148. v1.ResourceEphemeralStorage: *resource.NewQuantity(ephemeralStoragePerPod, "DecimalSI"),
  149. },
  150. },
  151. }), true, framework.Logf))
  152. }
  153. podName := "additional-pod"
  154. conf := pausePodConfig{
  155. Name: podName,
  156. Labels: map[string]string{"name": "additional"},
  157. Resources: &v1.ResourceRequirements{
  158. Limits: v1.ResourceList{
  159. v1.ResourceEphemeralStorage: *resource.NewQuantity(ephemeralStoragePerPod, "DecimalSI"),
  160. },
  161. Requests: v1.ResourceList{
  162. v1.ResourceEphemeralStorage: *resource.NewQuantity(ephemeralStoragePerPod, "DecimalSI"),
  163. },
  164. },
  165. }
  166. WaitForSchedulerAfterAction(f, createPausePodAction(f, conf), ns, podName, false)
  167. verifyResult(cs, podsNeededForSaturation, 1, ns)
  168. })
  169. // This test verifies we don't allow scheduling of pods in a way that sum of limits +
  170. // associated overhead is greater than machine's capacity.
  171. // It assumes that cluster add-on pods stay stable and cannot be run in parallel
  172. // with any other test that touches Nodes or Pods.
  173. // Because of this we need to have precise control on what's running in the cluster.
  174. // Test scenario:
  175. // 1. Find the first ready node on the system, and add a fake resource for test
  176. // 2. Create one with affinity to the particular node that uses 70% of the fake resource.
  177. // 3. Wait for the pod to be scheduled.
  178. // 4. Create another pod with affinity to the particular node that needs 20% of the fake resource and
  179. // an overhead set as 25% of the fake resource.
  180. // 5. Make sure this additional pod is not scheduled.
  181. ginkgo.Context("validates pod overhead is considered along with resource limits of pods that are allowed to run", func() {
  182. var testNodeName string
  183. var handler string
  184. var beardsecond v1.ResourceName = "example.com/beardsecond"
  185. ginkgo.BeforeEach(func() {
  186. WaitForStableCluster(cs, masterNodes)
  187. ginkgo.By("Add RuntimeClass and fake resource")
  188. // find a node which can run a pod:
  189. testNodeName = GetNodeThatCanRunPod(f)
  190. // Get node object:
  191. node, err := cs.CoreV1().Nodes().Get(context.TODO(), testNodeName, metav1.GetOptions{})
  192. framework.ExpectNoError(err, "unable to get node object for node %v", testNodeName)
  193. // update Node API object with a fake resource
  194. nodeCopy := node.DeepCopy()
  195. nodeCopy.ResourceVersion = "0"
  196. nodeCopy.Status.Capacity[beardsecond] = resource.MustParse("1000")
  197. _, err = cs.CoreV1().Nodes().UpdateStatus(context.TODO(), nodeCopy, metav1.UpdateOptions{})
  198. framework.ExpectNoError(err, "unable to apply fake resource to %v", testNodeName)
  199. // Register a runtimeClass with overhead set as 25% of the available beard-seconds
  200. handler = e2enode.PreconfiguredRuntimeClassHandler(framework.TestContext.ContainerRuntime)
  201. rc := &nodev1beta1.RuntimeClass{
  202. ObjectMeta: metav1.ObjectMeta{Name: handler},
  203. Handler: handler,
  204. Overhead: &nodev1beta1.Overhead{
  205. PodFixed: v1.ResourceList{
  206. beardsecond: resource.MustParse("250"),
  207. },
  208. },
  209. }
  210. _, err = cs.NodeV1beta1().RuntimeClasses().Create(context.TODO(), rc, metav1.CreateOptions{})
  211. framework.ExpectNoError(err, "failed to create RuntimeClass resource")
  212. })
  213. ginkgo.AfterEach(func() {
  214. ginkgo.By("Remove fake resource and RuntimeClass")
  215. // remove fake resource:
  216. if testNodeName != "" {
  217. // Get node object:
  218. node, err := cs.CoreV1().Nodes().Get(context.TODO(), testNodeName, metav1.GetOptions{})
  219. framework.ExpectNoError(err, "unable to get node object for node %v", testNodeName)
  220. nodeCopy := node.DeepCopy()
  221. // force it to update
  222. nodeCopy.ResourceVersion = "0"
  223. delete(nodeCopy.Status.Capacity, beardsecond)
  224. _, err = cs.CoreV1().Nodes().UpdateStatus(context.TODO(), nodeCopy, metav1.UpdateOptions{})
  225. framework.ExpectNoError(err, "unable to update node %v", testNodeName)
  226. }
  227. // remove RuntimeClass
  228. cs.NodeV1beta1().RuntimeClasses().Delete(context.TODO(), e2enode.PreconfiguredRuntimeClassHandler(framework.TestContext.ContainerRuntime), nil)
  229. })
  230. ginkgo.It("verify pod overhead is accounted for", func() {
  231. framework.ExpectEqual(testNodeName != "", true)
  232. ginkgo.By("Starting Pod to consume most of the node's resource.")
  233. // Create pod which requires 70% of the available beard-seconds.
  234. fillerPod := createPausePod(f, pausePodConfig{
  235. Name: "filler-pod-" + string(uuid.NewUUID()),
  236. Resources: &v1.ResourceRequirements{
  237. Requests: v1.ResourceList{beardsecond: resource.MustParse("700")},
  238. Limits: v1.ResourceList{beardsecond: resource.MustParse("700")},
  239. },
  240. })
  241. // Wait for filler pod to schedule.
  242. framework.ExpectNoError(e2epod.WaitForPodRunningInNamespace(cs, fillerPod))
  243. ginkgo.By("Creating another pod that requires unavailable amount of resources.")
  244. // Create another pod that requires 20% of available beard-seconds, but utilizes the RuntimeClass
  245. // which defines a pod overhead that requires an additional 25%.
  246. // This pod should remain pending as at least 70% of beard-second in
  247. // the node are already consumed.
  248. podName := "additional-pod" + string(uuid.NewUUID())
  249. conf := pausePodConfig{
  250. RuntimeClassHandler: &handler,
  251. Name: podName,
  252. Labels: map[string]string{"name": "additional"},
  253. Resources: &v1.ResourceRequirements{
  254. Limits: v1.ResourceList{beardsecond: resource.MustParse("200")},
  255. },
  256. }
  257. WaitForSchedulerAfterAction(f, createPausePodAction(f, conf), ns, podName, false)
  258. verifyResult(cs, 1, 1, ns)
  259. })
  260. })
  261. // This test verifies we don't allow scheduling of pods in a way that sum of
  262. // resource requests of pods is greater than machines capacity.
  263. // It assumes that cluster add-on pods stay stable and cannot be run in parallel
  264. // with any other test that touches Nodes or Pods.
  265. // It is so because we need to have precise control on what's running in the cluster.
  266. // Test scenario:
  267. // 1. Find the amount CPU resources on each node.
  268. // 2. Create one pod with affinity to each node that uses 70% of the node CPU.
  269. // 3. Wait for the pods to be scheduled.
  270. // 4. Create another pod with no affinity to any node that need 50% of the largest node CPU.
  271. // 5. Make sure this additional pod is not scheduled.
  272. /*
  273. Release : v1.9
  274. Testname: Scheduler, resource limits
  275. Description: Scheduling Pods MUST fail if the resource requests exceed Machine capacity.
  276. */
  277. framework.ConformanceIt("validates resource limits of pods that are allowed to run ", func() {
  278. WaitForStableCluster(cs, masterNodes)
  279. nodeMaxAllocatable := int64(0)
  280. nodeToAllocatableMap := make(map[string]int64)
  281. for _, node := range nodeList.Items {
  282. nodeReady := false
  283. for _, condition := range node.Status.Conditions {
  284. if condition.Type == v1.NodeReady && condition.Status == v1.ConditionTrue {
  285. nodeReady = true
  286. break
  287. }
  288. }
  289. if !nodeReady {
  290. continue
  291. }
  292. // Apply node label to each node
  293. framework.AddOrUpdateLabelOnNode(cs, node.Name, "node", node.Name)
  294. framework.ExpectNodeHasLabel(cs, node.Name, "node", node.Name)
  295. // Find allocatable amount of CPU.
  296. allocatable, found := node.Status.Allocatable[v1.ResourceCPU]
  297. framework.ExpectEqual(found, true)
  298. nodeToAllocatableMap[node.Name] = allocatable.MilliValue()
  299. if nodeMaxAllocatable < allocatable.MilliValue() {
  300. nodeMaxAllocatable = allocatable.MilliValue()
  301. }
  302. }
  303. // Clean up added labels after this test.
  304. defer func() {
  305. for nodeName := range nodeToAllocatableMap {
  306. framework.RemoveLabelOffNode(cs, nodeName, "node")
  307. }
  308. }()
  309. pods, err := cs.CoreV1().Pods(metav1.NamespaceAll).List(context.TODO(), metav1.ListOptions{})
  310. framework.ExpectNoError(err)
  311. for _, pod := range pods.Items {
  312. _, found := nodeToAllocatableMap[pod.Spec.NodeName]
  313. if found && pod.Status.Phase != v1.PodSucceeded && pod.Status.Phase != v1.PodFailed {
  314. framework.Logf("Pod %v requesting resource cpu=%vm on Node %v", pod.Name, getRequestedCPU(pod), pod.Spec.NodeName)
  315. nodeToAllocatableMap[pod.Spec.NodeName] -= getRequestedCPU(pod)
  316. }
  317. }
  318. ginkgo.By("Starting Pods to consume most of the cluster CPU.")
  319. // Create one pod per node that requires 70% of the node remaining CPU.
  320. fillerPods := []*v1.Pod{}
  321. for nodeName, cpu := range nodeToAllocatableMap {
  322. requestedCPU := cpu * 7 / 10
  323. framework.Logf("Creating a pod which consumes cpu=%vm on Node %v", requestedCPU, nodeName)
  324. fillerPods = append(fillerPods, createPausePod(f, pausePodConfig{
  325. Name: "filler-pod-" + string(uuid.NewUUID()),
  326. Resources: &v1.ResourceRequirements{
  327. Limits: v1.ResourceList{
  328. v1.ResourceCPU: *resource.NewMilliQuantity(requestedCPU, "DecimalSI"),
  329. },
  330. Requests: v1.ResourceList{
  331. v1.ResourceCPU: *resource.NewMilliQuantity(requestedCPU, "DecimalSI"),
  332. },
  333. },
  334. Affinity: &v1.Affinity{
  335. NodeAffinity: &v1.NodeAffinity{
  336. RequiredDuringSchedulingIgnoredDuringExecution: &v1.NodeSelector{
  337. NodeSelectorTerms: []v1.NodeSelectorTerm{
  338. {
  339. MatchExpressions: []v1.NodeSelectorRequirement{
  340. {
  341. Key: "node",
  342. Operator: v1.NodeSelectorOpIn,
  343. Values: []string{nodeName},
  344. },
  345. },
  346. },
  347. },
  348. },
  349. },
  350. },
  351. }))
  352. }
  353. // Wait for filler pods to schedule.
  354. for _, pod := range fillerPods {
  355. framework.ExpectNoError(e2epod.WaitForPodRunningInNamespace(cs, pod))
  356. }
  357. ginkgo.By("Creating another pod that requires unavailable amount of CPU.")
  358. // Create another pod that requires 50% of the largest node CPU resources.
  359. // This pod should remain pending as at least 70% of CPU of other nodes in
  360. // the cluster are already consumed.
  361. podName := "additional-pod"
  362. conf := pausePodConfig{
  363. Name: podName,
  364. Labels: map[string]string{"name": "additional"},
  365. Resources: &v1.ResourceRequirements{
  366. Limits: v1.ResourceList{
  367. v1.ResourceCPU: *resource.NewMilliQuantity(nodeMaxAllocatable*5/10, "DecimalSI"),
  368. },
  369. Requests: v1.ResourceList{
  370. v1.ResourceCPU: *resource.NewMilliQuantity(nodeMaxAllocatable*5/10, "DecimalSI"),
  371. },
  372. },
  373. }
  374. WaitForSchedulerAfterAction(f, createPausePodAction(f, conf), ns, podName, false)
  375. verifyResult(cs, len(fillerPods), 1, ns)
  376. })
  377. // Test Nodes does not have any label, hence it should be impossible to schedule Pod with
  378. // nonempty Selector set.
  379. /*
  380. Release : v1.9
  381. Testname: Scheduler, node selector not matching
  382. Description: Create a Pod with a NodeSelector set to a value that does not match a node in the cluster. Since there are no nodes matching the criteria the Pod MUST not be scheduled.
  383. */
  384. framework.ConformanceIt("validates that NodeSelector is respected if not matching ", func() {
  385. ginkgo.By("Trying to schedule Pod with nonempty NodeSelector.")
  386. podName := "restricted-pod"
  387. WaitForStableCluster(cs, masterNodes)
  388. conf := pausePodConfig{
  389. Name: podName,
  390. Labels: map[string]string{"name": "restricted"},
  391. NodeSelector: map[string]string{
  392. "label": "nonempty",
  393. },
  394. }
  395. WaitForSchedulerAfterAction(f, createPausePodAction(f, conf), ns, podName, false)
  396. verifyResult(cs, 0, 1, ns)
  397. })
  398. /*
  399. Release : v1.9
  400. Testname: Scheduler, node selector matching
  401. Description: Create a label on the node {k: v}. Then create a Pod with a NodeSelector set to {k: v}. Check to see if the Pod is scheduled. When the NodeSelector matches then Pod MUST be scheduled on that node.
  402. */
  403. framework.ConformanceIt("validates that NodeSelector is respected if matching ", func() {
  404. nodeName := GetNodeThatCanRunPod(f)
  405. ginkgo.By("Trying to apply a random label on the found node.")
  406. k := fmt.Sprintf("kubernetes.io/e2e-%s", string(uuid.NewUUID()))
  407. v := "42"
  408. framework.AddOrUpdateLabelOnNode(cs, nodeName, k, v)
  409. framework.ExpectNodeHasLabel(cs, nodeName, k, v)
  410. defer framework.RemoveLabelOffNode(cs, nodeName, k)
  411. ginkgo.By("Trying to relaunch the pod, now with labels.")
  412. labelPodName := "with-labels"
  413. createPausePod(f, pausePodConfig{
  414. Name: labelPodName,
  415. NodeSelector: map[string]string{
  416. k: v,
  417. },
  418. })
  419. // check that pod got scheduled. We intentionally DO NOT check that the
  420. // pod is running because this will create a race condition with the
  421. // kubelet and the scheduler: the scheduler might have scheduled a pod
  422. // already when the kubelet does not know about its new label yet. The
  423. // kubelet will then refuse to launch the pod.
  424. framework.ExpectNoError(e2epod.WaitForPodNotPending(cs, ns, labelPodName))
  425. labelPod, err := cs.CoreV1().Pods(ns).Get(context.TODO(), labelPodName, metav1.GetOptions{})
  426. framework.ExpectNoError(err)
  427. framework.ExpectEqual(labelPod.Spec.NodeName, nodeName)
  428. })
  429. // Test Nodes does not have any label, hence it should be impossible to schedule Pod with
  430. // non-nil NodeAffinity.RequiredDuringSchedulingIgnoredDuringExecution.
  431. ginkgo.It("validates that NodeAffinity is respected if not matching", func() {
  432. ginkgo.By("Trying to schedule Pod with nonempty NodeSelector.")
  433. podName := "restricted-pod"
  434. WaitForStableCluster(cs, masterNodes)
  435. conf := pausePodConfig{
  436. Name: podName,
  437. Affinity: &v1.Affinity{
  438. NodeAffinity: &v1.NodeAffinity{
  439. RequiredDuringSchedulingIgnoredDuringExecution: &v1.NodeSelector{
  440. NodeSelectorTerms: []v1.NodeSelectorTerm{
  441. {
  442. MatchExpressions: []v1.NodeSelectorRequirement{
  443. {
  444. Key: "foo",
  445. Operator: v1.NodeSelectorOpIn,
  446. Values: []string{"bar", "value2"},
  447. },
  448. },
  449. }, {
  450. MatchExpressions: []v1.NodeSelectorRequirement{
  451. {
  452. Key: "diffkey",
  453. Operator: v1.NodeSelectorOpIn,
  454. Values: []string{"wrong", "value2"},
  455. },
  456. },
  457. },
  458. },
  459. },
  460. },
  461. },
  462. Labels: map[string]string{"name": "restricted"},
  463. }
  464. WaitForSchedulerAfterAction(f, createPausePodAction(f, conf), ns, podName, false)
  465. verifyResult(cs, 0, 1, ns)
  466. })
  467. // Keep the same steps with the test on NodeSelector,
  468. // but specify Affinity in Pod.Spec.Affinity, instead of NodeSelector.
  469. ginkgo.It("validates that required NodeAffinity setting is respected if matching", func() {
  470. nodeName := GetNodeThatCanRunPod(f)
  471. ginkgo.By("Trying to apply a random label on the found node.")
  472. k := fmt.Sprintf("kubernetes.io/e2e-%s", string(uuid.NewUUID()))
  473. v := "42"
  474. framework.AddOrUpdateLabelOnNode(cs, nodeName, k, v)
  475. framework.ExpectNodeHasLabel(cs, nodeName, k, v)
  476. defer framework.RemoveLabelOffNode(cs, nodeName, k)
  477. ginkgo.By("Trying to relaunch the pod, now with labels.")
  478. labelPodName := "with-labels"
  479. createPausePod(f, pausePodConfig{
  480. Name: labelPodName,
  481. Affinity: &v1.Affinity{
  482. NodeAffinity: &v1.NodeAffinity{
  483. RequiredDuringSchedulingIgnoredDuringExecution: &v1.NodeSelector{
  484. NodeSelectorTerms: []v1.NodeSelectorTerm{
  485. {
  486. MatchExpressions: []v1.NodeSelectorRequirement{
  487. {
  488. Key: k,
  489. Operator: v1.NodeSelectorOpIn,
  490. Values: []string{v},
  491. },
  492. },
  493. },
  494. },
  495. },
  496. },
  497. },
  498. })
  499. // check that pod got scheduled. We intentionally DO NOT check that the
  500. // pod is running because this will create a race condition with the
  501. // kubelet and the scheduler: the scheduler might have scheduled a pod
  502. // already when the kubelet does not know about its new label yet. The
  503. // kubelet will then refuse to launch the pod.
  504. framework.ExpectNoError(e2epod.WaitForPodNotPending(cs, ns, labelPodName))
  505. labelPod, err := cs.CoreV1().Pods(ns).Get(context.TODO(), labelPodName, metav1.GetOptions{})
  506. framework.ExpectNoError(err)
  507. framework.ExpectEqual(labelPod.Spec.NodeName, nodeName)
  508. })
  509. // 1. Run a pod to get an available node, then delete the pod
  510. // 2. Taint the node with a random taint
  511. // 3. Try to relaunch the pod with tolerations tolerate the taints on node,
  512. // and the pod's nodeName specified to the name of node found in step 1
  513. ginkgo.It("validates that taints-tolerations is respected if matching", func() {
  514. nodeName := getNodeThatCanRunPodWithoutToleration(f)
  515. ginkgo.By("Trying to apply a random taint on the found node.")
  516. testTaint := v1.Taint{
  517. Key: fmt.Sprintf("kubernetes.io/e2e-taint-key-%s", string(uuid.NewUUID())),
  518. Value: "testing-taint-value",
  519. Effect: v1.TaintEffectNoSchedule,
  520. }
  521. framework.AddOrUpdateTaintOnNode(cs, nodeName, testTaint)
  522. framework.ExpectNodeHasTaint(cs, nodeName, &testTaint)
  523. defer framework.RemoveTaintOffNode(cs, nodeName, testTaint)
  524. ginkgo.By("Trying to apply a random label on the found node.")
  525. labelKey := fmt.Sprintf("kubernetes.io/e2e-label-key-%s", string(uuid.NewUUID()))
  526. labelValue := "testing-label-value"
  527. framework.AddOrUpdateLabelOnNode(cs, nodeName, labelKey, labelValue)
  528. framework.ExpectNodeHasLabel(cs, nodeName, labelKey, labelValue)
  529. defer framework.RemoveLabelOffNode(cs, nodeName, labelKey)
  530. ginkgo.By("Trying to relaunch the pod, now with tolerations.")
  531. tolerationPodName := "with-tolerations"
  532. createPausePod(f, pausePodConfig{
  533. Name: tolerationPodName,
  534. Tolerations: []v1.Toleration{{Key: testTaint.Key, Value: testTaint.Value, Effect: testTaint.Effect}},
  535. NodeSelector: map[string]string{labelKey: labelValue},
  536. })
  537. // check that pod got scheduled. We intentionally DO NOT check that the
  538. // pod is running because this will create a race condition with the
  539. // kubelet and the scheduler: the scheduler might have scheduled a pod
  540. // already when the kubelet does not know about its new taint yet. The
  541. // kubelet will then refuse to launch the pod.
  542. framework.ExpectNoError(e2epod.WaitForPodNotPending(cs, ns, tolerationPodName))
  543. deployedPod, err := cs.CoreV1().Pods(ns).Get(context.TODO(), tolerationPodName, metav1.GetOptions{})
  544. framework.ExpectNoError(err)
  545. framework.ExpectEqual(deployedPod.Spec.NodeName, nodeName)
  546. })
  547. // 1. Run a pod to get an available node, then delete the pod
  548. // 2. Taint the node with a random taint
  549. // 3. Try to relaunch the pod still no tolerations,
  550. // and the pod's nodeName specified to the name of node found in step 1
  551. ginkgo.It("validates that taints-tolerations is respected if not matching", func() {
  552. nodeName := getNodeThatCanRunPodWithoutToleration(f)
  553. ginkgo.By("Trying to apply a random taint on the found node.")
  554. testTaint := v1.Taint{
  555. Key: fmt.Sprintf("kubernetes.io/e2e-taint-key-%s", string(uuid.NewUUID())),
  556. Value: "testing-taint-value",
  557. Effect: v1.TaintEffectNoSchedule,
  558. }
  559. framework.AddOrUpdateTaintOnNode(cs, nodeName, testTaint)
  560. framework.ExpectNodeHasTaint(cs, nodeName, &testTaint)
  561. defer framework.RemoveTaintOffNode(cs, nodeName, testTaint)
  562. ginkgo.By("Trying to apply a random label on the found node.")
  563. labelKey := fmt.Sprintf("kubernetes.io/e2e-label-key-%s", string(uuid.NewUUID()))
  564. labelValue := "testing-label-value"
  565. framework.AddOrUpdateLabelOnNode(cs, nodeName, labelKey, labelValue)
  566. framework.ExpectNodeHasLabel(cs, nodeName, labelKey, labelValue)
  567. defer framework.RemoveLabelOffNode(cs, nodeName, labelKey)
  568. ginkgo.By("Trying to relaunch the pod, still no tolerations.")
  569. podNameNoTolerations := "still-no-tolerations"
  570. conf := pausePodConfig{
  571. Name: podNameNoTolerations,
  572. NodeSelector: map[string]string{labelKey: labelValue},
  573. }
  574. WaitForSchedulerAfterAction(f, createPausePodAction(f, conf), ns, podNameNoTolerations, false)
  575. verifyResult(cs, 0, 1, ns)
  576. ginkgo.By("Removing taint off the node")
  577. WaitForSchedulerAfterAction(f, removeTaintFromNodeAction(cs, nodeName, testTaint), ns, podNameNoTolerations, true)
  578. verifyResult(cs, 1, 0, ns)
  579. })
  580. /*
  581. Release : v1.16
  582. Testname: Scheduling, HostPort matching and HostIP and Protocol not-matching
  583. Description: Pods with the same HostPort value MUST be able to be scheduled to the same node
  584. if the HostIP or Protocol is different.
  585. */
  586. framework.ConformanceIt("validates that there is no conflict between pods with same hostPort but different hostIP and protocol", func() {
  587. nodeName := GetNodeThatCanRunPod(f)
  588. // use nodeSelector to make sure the testing pods get assigned on the same node to explicitly verify there exists conflict or not
  589. ginkgo.By("Trying to apply a random label on the found node.")
  590. k := fmt.Sprintf("kubernetes.io/e2e-%s", string(uuid.NewUUID()))
  591. v := "90"
  592. nodeSelector := make(map[string]string)
  593. nodeSelector[k] = v
  594. framework.AddOrUpdateLabelOnNode(cs, nodeName, k, v)
  595. framework.ExpectNodeHasLabel(cs, nodeName, k, v)
  596. defer framework.RemoveLabelOffNode(cs, nodeName, k)
  597. port := int32(54321)
  598. ginkgo.By(fmt.Sprintf("Trying to create a pod(pod1) with hostport %v and hostIP 127.0.0.1 and expect scheduled", port))
  599. createHostPortPodOnNode(f, "pod1", ns, "127.0.0.1", port, v1.ProtocolTCP, nodeSelector, true)
  600. ginkgo.By(fmt.Sprintf("Trying to create another pod(pod2) with hostport %v but hostIP 127.0.0.2 on the node which pod1 resides and expect scheduled", port))
  601. createHostPortPodOnNode(f, "pod2", ns, "127.0.0.2", port, v1.ProtocolTCP, nodeSelector, true)
  602. ginkgo.By(fmt.Sprintf("Trying to create a third pod(pod3) with hostport %v, hostIP 127.0.0.2 but use UDP protocol on the node which pod2 resides", port))
  603. createHostPortPodOnNode(f, "pod3", ns, "127.0.0.2", port, v1.ProtocolUDP, nodeSelector, true)
  604. })
  605. /*
  606. Release : v1.16
  607. Testname: Scheduling, HostPort and Protocol match, HostIPs different but one is default HostIP (0.0.0.0)
  608. Description: Pods with the same HostPort and Protocol, but different HostIPs, MUST NOT schedule to the
  609. same node if one of those IPs is the default HostIP of 0.0.0.0, which represents all IPs on the host.
  610. */
  611. framework.ConformanceIt("validates that there exists conflict between pods with same hostPort and protocol but one using 0.0.0.0 hostIP", func() {
  612. nodeName := GetNodeThatCanRunPod(f)
  613. // use nodeSelector to make sure the testing pods get assigned on the same node to explicitly verify there exists conflict or not
  614. ginkgo.By("Trying to apply a random label on the found node.")
  615. k := fmt.Sprintf("kubernetes.io/e2e-%s", string(uuid.NewUUID()))
  616. v := "95"
  617. nodeSelector := make(map[string]string)
  618. nodeSelector[k] = v
  619. framework.AddOrUpdateLabelOnNode(cs, nodeName, k, v)
  620. framework.ExpectNodeHasLabel(cs, nodeName, k, v)
  621. defer framework.RemoveLabelOffNode(cs, nodeName, k)
  622. port := int32(54322)
  623. ginkgo.By(fmt.Sprintf("Trying to create a pod(pod4) with hostport %v and hostIP 0.0.0.0(empty string here) and expect scheduled", port))
  624. createHostPortPodOnNode(f, "pod4", ns, "", port, v1.ProtocolTCP, nodeSelector, true)
  625. ginkgo.By(fmt.Sprintf("Trying to create another pod(pod5) with hostport %v but hostIP 127.0.0.1 on the node which pod4 resides and expect not scheduled", port))
  626. createHostPortPodOnNode(f, "pod5", ns, "127.0.0.1", port, v1.ProtocolTCP, nodeSelector, false)
  627. })
  628. ginkgo.Context("PodTopologySpread Filtering", func() {
  629. var nodeNames []string
  630. topologyKey := "kubernetes.io/e2e-pts-filter"
  631. ginkgo.BeforeEach(func() {
  632. ginkgo.By("Trying to get 2 available nodes which can run pod")
  633. nodeNames = Get2NodesThatCanRunPod(f)
  634. ginkgo.By(fmt.Sprintf("Apply dedicated topologyKey %v for this test on the 2 nodes.", topologyKey))
  635. for _, nodeName := range nodeNames {
  636. framework.AddOrUpdateLabelOnNode(cs, nodeName, topologyKey, nodeName)
  637. }
  638. })
  639. ginkgo.AfterEach(func() {
  640. for _, nodeName := range nodeNames {
  641. framework.RemoveLabelOffNode(cs, nodeName, topologyKey)
  642. }
  643. })
  644. ginkgo.It("validates 4 pods with MaxSkew=1 are evenly distributed into 2 nodes", func() {
  645. podLabel := "e2e-pts-filter"
  646. replicas := 4
  647. rsConfig := pauseRSConfig{
  648. Replicas: int32(replicas),
  649. PodConfig: pausePodConfig{
  650. Name: podLabel,
  651. Namespace: ns,
  652. Labels: map[string]string{podLabel: ""},
  653. Affinity: &v1.Affinity{
  654. NodeAffinity: &v1.NodeAffinity{
  655. RequiredDuringSchedulingIgnoredDuringExecution: &v1.NodeSelector{
  656. NodeSelectorTerms: []v1.NodeSelectorTerm{
  657. {
  658. MatchExpressions: []v1.NodeSelectorRequirement{
  659. {
  660. Key: topologyKey,
  661. Operator: v1.NodeSelectorOpIn,
  662. Values: nodeNames,
  663. },
  664. },
  665. },
  666. },
  667. },
  668. },
  669. },
  670. TopologySpreadConstraints: []v1.TopologySpreadConstraint{
  671. {
  672. MaxSkew: 1,
  673. TopologyKey: topologyKey,
  674. WhenUnsatisfiable: v1.DoNotSchedule,
  675. LabelSelector: &metav1.LabelSelector{
  676. MatchExpressions: []metav1.LabelSelectorRequirement{
  677. {
  678. Key: podLabel,
  679. Operator: metav1.LabelSelectorOpExists,
  680. },
  681. },
  682. },
  683. },
  684. },
  685. },
  686. }
  687. runPauseRS(f, rsConfig)
  688. podList, err := cs.CoreV1().Pods(ns).List(context.TODO(), metav1.ListOptions{})
  689. framework.ExpectNoError(err)
  690. numInNode1, numInNode2 := 0, 0
  691. for _, pod := range podList.Items {
  692. if pod.Spec.NodeName == nodeNames[0] {
  693. numInNode1++
  694. } else if pod.Spec.NodeName == nodeNames[1] {
  695. numInNode2++
  696. }
  697. }
  698. expected := replicas / len(nodeNames)
  699. framework.ExpectEqual(numInNode1, expected, fmt.Sprintf("Pods are not distributed as expected on node %q", nodeNames[0]))
  700. framework.ExpectEqual(numInNode2, expected, fmt.Sprintf("Pods are not distributed as expected on node %q", nodeNames[1]))
  701. })
  702. })
  703. })
  704. // printAllKubeletPods outputs status of all kubelet pods into log.
  705. func printAllKubeletPods(c clientset.Interface, nodeName string) {
  706. podList, err := e2ekubelet.GetKubeletPods(c, nodeName)
  707. if err != nil {
  708. framework.Logf("Unable to retrieve kubelet pods for node %v: %v", nodeName, err)
  709. return
  710. }
  711. for _, p := range podList.Items {
  712. framework.Logf("%v from %v started at %v (%d container statuses recorded)", p.Name, p.Namespace, p.Status.StartTime, len(p.Status.ContainerStatuses))
  713. for _, c := range p.Status.ContainerStatuses {
  714. framework.Logf("\tContainer %v ready: %v, restart count %v",
  715. c.Name, c.Ready, c.RestartCount)
  716. }
  717. }
  718. }
  719. func initPausePod(f *framework.Framework, conf pausePodConfig) *v1.Pod {
  720. var gracePeriod = int64(1)
  721. pod := &v1.Pod{
  722. ObjectMeta: metav1.ObjectMeta{
  723. Name: conf.Name,
  724. Namespace: conf.Namespace,
  725. Labels: conf.Labels,
  726. Annotations: conf.Annotations,
  727. OwnerReferences: conf.OwnerReferences,
  728. },
  729. Spec: v1.PodSpec{
  730. NodeSelector: conf.NodeSelector,
  731. Affinity: conf.Affinity,
  732. TopologySpreadConstraints: conf.TopologySpreadConstraints,
  733. RuntimeClassName: conf.RuntimeClassHandler,
  734. Containers: []v1.Container{
  735. {
  736. Name: conf.Name,
  737. Image: imageutils.GetPauseImageName(),
  738. Ports: conf.Ports,
  739. },
  740. },
  741. Tolerations: conf.Tolerations,
  742. NodeName: conf.NodeName,
  743. PriorityClassName: conf.PriorityClassName,
  744. TerminationGracePeriodSeconds: &gracePeriod,
  745. },
  746. }
  747. if conf.Resources != nil {
  748. pod.Spec.Containers[0].Resources = *conf.Resources
  749. }
  750. if conf.DeletionGracePeriodSeconds != nil {
  751. pod.ObjectMeta.DeletionGracePeriodSeconds = conf.DeletionGracePeriodSeconds
  752. }
  753. return pod
  754. }
  755. func createPausePod(f *framework.Framework, conf pausePodConfig) *v1.Pod {
  756. namespace := conf.Namespace
  757. if len(namespace) == 0 {
  758. namespace = f.Namespace.Name
  759. }
  760. pod, err := f.ClientSet.CoreV1().Pods(namespace).Create(context.TODO(), initPausePod(f, conf), metav1.CreateOptions{})
  761. framework.ExpectNoError(err)
  762. return pod
  763. }
  764. func runPausePod(f *framework.Framework, conf pausePodConfig) *v1.Pod {
  765. pod := createPausePod(f, conf)
  766. framework.ExpectNoError(e2epod.WaitTimeoutForPodRunningInNamespace(f.ClientSet, pod.Name, pod.Namespace, framework.PollShortTimeout))
  767. pod, err := f.ClientSet.CoreV1().Pods(pod.Namespace).Get(context.TODO(), conf.Name, metav1.GetOptions{})
  768. framework.ExpectNoError(err)
  769. return pod
  770. }
  771. func runPodAndGetNodeName(f *framework.Framework, conf pausePodConfig) string {
  772. // launch a pod to find a node which can launch a pod. We intentionally do
  773. // not just take the node list and choose the first of them. Depending on the
  774. // cluster and the scheduler it might be that a "normal" pod cannot be
  775. // scheduled onto it.
  776. pod := runPausePod(f, conf)
  777. ginkgo.By("Explicitly delete pod here to free the resource it takes.")
  778. err := f.ClientSet.CoreV1().Pods(f.Namespace.Name).Delete(context.TODO(), pod.Name, metav1.NewDeleteOptions(0))
  779. framework.ExpectNoError(err)
  780. return pod.Spec.NodeName
  781. }
  782. func getRequestedCPU(pod v1.Pod) int64 {
  783. var result int64
  784. for _, container := range pod.Spec.Containers {
  785. result += container.Resources.Requests.Cpu().MilliValue()
  786. }
  787. return result
  788. }
  789. func getRequestedStorageEphemeralStorage(pod v1.Pod) int64 {
  790. var result int64
  791. for _, container := range pod.Spec.Containers {
  792. result += container.Resources.Requests.StorageEphemeral().Value()
  793. }
  794. return result
  795. }
  796. // removeTaintFromNodeAction returns a closure that removes the given taint
  797. // from the given node upon invocation.
  798. func removeTaintFromNodeAction(cs clientset.Interface, nodeName string, testTaint v1.Taint) Action {
  799. return func() error {
  800. framework.RemoveTaintOffNode(cs, nodeName, testTaint)
  801. return nil
  802. }
  803. }
  804. // createPausePodAction returns a closure that creates a pause pod upon invocation.
  805. func createPausePodAction(f *framework.Framework, conf pausePodConfig) Action {
  806. return func() error {
  807. _, err := f.ClientSet.CoreV1().Pods(f.Namespace.Name).Create(context.TODO(), initPausePod(f, conf), metav1.CreateOptions{})
  808. return err
  809. }
  810. }
  811. // WaitForSchedulerAfterAction performs the provided action and then waits for
  812. // scheduler to act on the given pod.
  813. func WaitForSchedulerAfterAction(f *framework.Framework, action Action, ns, podName string, expectSuccess bool) {
  814. predicate := scheduleFailureEvent(podName)
  815. if expectSuccess {
  816. predicate = scheduleSuccessEvent(ns, podName, "" /* any node */)
  817. }
  818. success, err := observeEventAfterAction(f.ClientSet, f.Namespace.Name, predicate, action)
  819. framework.ExpectNoError(err)
  820. framework.ExpectEqual(success, true)
  821. }
  822. // TODO: upgrade calls in PodAffinity tests when we're able to run them
  823. func verifyResult(c clientset.Interface, expectedScheduled int, expectedNotScheduled int, ns string) {
  824. allPods, err := c.CoreV1().Pods(ns).List(context.TODO(), metav1.ListOptions{})
  825. framework.ExpectNoError(err)
  826. scheduledPods, notScheduledPods := GetPodsScheduled(masterNodes, allPods)
  827. framework.ExpectEqual(len(notScheduledPods), expectedNotScheduled, fmt.Sprintf("Not scheduled Pods: %#v", notScheduledPods))
  828. framework.ExpectEqual(len(scheduledPods), expectedScheduled, fmt.Sprintf("Scheduled Pods: %#v", scheduledPods))
  829. }
  830. // GetNodeThatCanRunPod trying to launch a pod without a label to get a node which can launch it
  831. func GetNodeThatCanRunPod(f *framework.Framework) string {
  832. ginkgo.By("Trying to launch a pod without a label to get a node which can launch it.")
  833. return runPodAndGetNodeName(f, pausePodConfig{Name: "without-label"})
  834. }
  835. // Get2NodesThatCanRunPod return a 2-node slice where can run pod.
  836. func Get2NodesThatCanRunPod(f *framework.Framework) []string {
  837. firstNode := GetNodeThatCanRunPod(f)
  838. ginkgo.By("Trying to launch a pod without a label to get a node which can launch it.")
  839. pod := pausePodConfig{
  840. Name: "without-label",
  841. Affinity: &v1.Affinity{
  842. NodeAffinity: &v1.NodeAffinity{
  843. RequiredDuringSchedulingIgnoredDuringExecution: &v1.NodeSelector{
  844. NodeSelectorTerms: []v1.NodeSelectorTerm{
  845. {
  846. MatchFields: []v1.NodeSelectorRequirement{
  847. {Key: "metadata.name", Operator: v1.NodeSelectorOpNotIn, Values: []string{firstNode}},
  848. },
  849. },
  850. },
  851. },
  852. },
  853. },
  854. }
  855. secondNode := runPodAndGetNodeName(f, pod)
  856. return []string{firstNode, secondNode}
  857. }
  858. func getNodeThatCanRunPodWithoutToleration(f *framework.Framework) string {
  859. ginkgo.By("Trying to launch a pod without a toleration to get a node which can launch it.")
  860. return runPodAndGetNodeName(f, pausePodConfig{Name: "without-toleration"})
  861. }
  862. // CreateHostPortPods creates RC with host port 4321
  863. func CreateHostPortPods(f *framework.Framework, id string, replicas int, expectRunning bool) {
  864. ginkgo.By(fmt.Sprintf("Running RC which reserves host port"))
  865. config := &testutils.RCConfig{
  866. Client: f.ClientSet,
  867. Name: id,
  868. Namespace: f.Namespace.Name,
  869. Timeout: defaultTimeout,
  870. Image: imageutils.GetPauseImageName(),
  871. Replicas: replicas,
  872. HostPorts: map[string]int{"port1": 4321},
  873. }
  874. err := e2erc.RunRC(*config)
  875. if expectRunning {
  876. framework.ExpectNoError(err)
  877. }
  878. }
  879. // CreateNodeSelectorPods creates RC with host port 4321 and defines node selector
  880. func CreateNodeSelectorPods(f *framework.Framework, id string, replicas int, nodeSelector map[string]string, expectRunning bool) error {
  881. ginkgo.By(fmt.Sprintf("Running RC which reserves host port and defines node selector"))
  882. config := &testutils.RCConfig{
  883. Client: f.ClientSet,
  884. Name: id,
  885. Namespace: f.Namespace.Name,
  886. Timeout: defaultTimeout,
  887. Image: imageutils.GetPauseImageName(),
  888. Replicas: replicas,
  889. HostPorts: map[string]int{"port1": 4321},
  890. NodeSelector: nodeSelector,
  891. }
  892. err := e2erc.RunRC(*config)
  893. if expectRunning {
  894. return err
  895. }
  896. return nil
  897. }
  898. // create pod which using hostport on the specified node according to the nodeSelector
  899. func createHostPortPodOnNode(f *framework.Framework, podName, ns, hostIP string, port int32, protocol v1.Protocol, nodeSelector map[string]string, expectScheduled bool) {
  900. hostIP = translateIPv4ToIPv6(hostIP)
  901. createPausePod(f, pausePodConfig{
  902. Name: podName,
  903. Ports: []v1.ContainerPort{
  904. {
  905. HostPort: port,
  906. ContainerPort: 80,
  907. Protocol: protocol,
  908. HostIP: hostIP,
  909. },
  910. },
  911. NodeSelector: nodeSelector,
  912. })
  913. err := e2epod.WaitForPodNotPending(f.ClientSet, ns, podName)
  914. if expectScheduled {
  915. framework.ExpectNoError(err)
  916. }
  917. }
  918. // translateIPv4ToIPv6 maps an IPv4 address into a valid IPv6 address
  919. // adding the well known prefix "0::ffff:" https://tools.ietf.org/html/rfc2765
  920. // if the ip is IPv4 and the cluster IPFamily is IPv6, otherwise returns the same ip
  921. func translateIPv4ToIPv6(ip string) string {
  922. if framework.TestContext.IPFamily == "ipv6" && !k8utilnet.IsIPv6String(ip) && ip != "" {
  923. ip = "0::ffff:" + ip
  924. }
  925. return ip
  926. }
  927. // GetPodsScheduled returns a number of currently scheduled and not scheduled Pods.
  928. func GetPodsScheduled(masterNodes sets.String, pods *v1.PodList) (scheduledPods, notScheduledPods []v1.Pod) {
  929. for _, pod := range pods.Items {
  930. if !masterNodes.Has(pod.Spec.NodeName) {
  931. if pod.Spec.NodeName != "" {
  932. _, scheduledCondition := podutil.GetPodCondition(&pod.Status, v1.PodScheduled)
  933. framework.ExpectEqual(scheduledCondition != nil, true)
  934. framework.ExpectEqual(scheduledCondition.Status, v1.ConditionTrue)
  935. scheduledPods = append(scheduledPods, pod)
  936. } else {
  937. _, scheduledCondition := podutil.GetPodCondition(&pod.Status, v1.PodScheduled)
  938. framework.ExpectEqual(scheduledCondition != nil, true)
  939. framework.ExpectEqual(scheduledCondition.Status, v1.ConditionFalse)
  940. if scheduledCondition.Reason == "Unschedulable" {
  941. notScheduledPods = append(notScheduledPods, pod)
  942. }
  943. }
  944. }
  945. }
  946. return
  947. }