priorities_test.go 13 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362
  1. /*
  2. Copyright 2017 The Kubernetes Authors.
  3. Licensed under the Apache License, Version 2.0 (the "License");
  4. you may not use this file except in compliance with the License.
  5. You may obtain a copy of the License at
  6. http://www.apache.org/licenses/LICENSE-2.0
  7. Unless required by applicable law or agreed to in writing, software
  8. distributed under the License is distributed on an "AS IS" BASIS,
  9. WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
  10. See the License for the specific language governing permissions and
  11. limitations under the License.
  12. */
  13. package scheduler
  14. import (
  15. "context"
  16. "fmt"
  17. "strings"
  18. "testing"
  19. "k8s.io/api/core/v1"
  20. apierrors "k8s.io/apimachinery/pkg/api/errors"
  21. metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
  22. "k8s.io/apimachinery/pkg/util/wait"
  23. utilfeature "k8s.io/apiserver/pkg/util/feature"
  24. featuregatetesting "k8s.io/component-base/featuregate/testing"
  25. "k8s.io/kubernetes/pkg/features"
  26. st "k8s.io/kubernetes/pkg/scheduler/testing"
  27. testutils "k8s.io/kubernetes/test/utils"
  28. imageutils "k8s.io/kubernetes/test/utils/image"
  29. )
  30. // This file tests the scheduler priority functions.
  31. // TestNodeAffinity verifies that scheduler's node affinity priority function
  32. // works correctly.
  33. func TestNodeAffinity(t *testing.T) {
  34. testCtx := initTest(t, "node-affinity")
  35. defer cleanupTest(t, testCtx)
  36. // Add a few nodes.
  37. nodes, err := createNodes(testCtx.clientSet, "testnode", nil, 5)
  38. if err != nil {
  39. t.Fatalf("Cannot create nodes: %v", err)
  40. }
  41. // Add a label to one of the nodes.
  42. labeledNode := nodes[1]
  43. labelKey := "kubernetes.io/node-topologyKey"
  44. labelValue := "topologyvalue"
  45. labels := map[string]string{
  46. labelKey: labelValue,
  47. }
  48. if err = testutils.AddLabelsToNode(testCtx.clientSet, labeledNode.Name, labels); err != nil {
  49. t.Fatalf("Cannot add labels to node: %v", err)
  50. }
  51. if err = waitForNodeLabels(testCtx.clientSet, labeledNode.Name, labels); err != nil {
  52. t.Fatalf("Adding labels to node didn't succeed: %v", err)
  53. }
  54. // Create a pod with node affinity.
  55. podName := "pod-with-node-affinity"
  56. pod, err := runPausePod(testCtx.clientSet, initPausePod(testCtx.clientSet, &pausePodConfig{
  57. Name: podName,
  58. Namespace: testCtx.ns.Name,
  59. Affinity: &v1.Affinity{
  60. NodeAffinity: &v1.NodeAffinity{
  61. PreferredDuringSchedulingIgnoredDuringExecution: []v1.PreferredSchedulingTerm{
  62. {
  63. Preference: v1.NodeSelectorTerm{
  64. MatchExpressions: []v1.NodeSelectorRequirement{
  65. {
  66. Key: labelKey,
  67. Operator: v1.NodeSelectorOpIn,
  68. Values: []string{labelValue},
  69. },
  70. },
  71. },
  72. Weight: 20,
  73. },
  74. },
  75. },
  76. },
  77. }))
  78. if err != nil {
  79. t.Fatalf("Error running pause pod: %v", err)
  80. }
  81. if pod.Spec.NodeName != labeledNode.Name {
  82. t.Errorf("Pod %v got scheduled on an unexpected node: %v. Expected node: %v.", podName, pod.Spec.NodeName, labeledNode.Name)
  83. } else {
  84. t.Logf("Pod %v got successfully scheduled on node %v.", podName, pod.Spec.NodeName)
  85. }
  86. }
  87. // TestPodAffinity verifies that scheduler's pod affinity priority function
  88. // works correctly.
  89. func TestPodAffinity(t *testing.T) {
  90. testCtx := initTest(t, "pod-affinity")
  91. defer cleanupTest(t, testCtx)
  92. // Add a few nodes.
  93. nodesInTopology, err := createNodes(testCtx.clientSet, "in-topology", nil, 5)
  94. if err != nil {
  95. t.Fatalf("Cannot create nodes: %v", err)
  96. }
  97. topologyKey := "node-topologykey"
  98. topologyValue := "topologyvalue"
  99. nodeLabels := map[string]string{
  100. topologyKey: topologyValue,
  101. }
  102. for _, node := range nodesInTopology {
  103. // Add topology key to all the nodes.
  104. if err = testutils.AddLabelsToNode(testCtx.clientSet, node.Name, nodeLabels); err != nil {
  105. t.Fatalf("Cannot add labels to node %v: %v", node.Name, err)
  106. }
  107. if err = waitForNodeLabels(testCtx.clientSet, node.Name, nodeLabels); err != nil {
  108. t.Fatalf("Adding labels to node %v didn't succeed: %v", node.Name, err)
  109. }
  110. }
  111. // Add a pod with a label and wait for it to schedule.
  112. labelKey := "service"
  113. labelValue := "S1"
  114. _, err = runPausePod(testCtx.clientSet, initPausePod(testCtx.clientSet, &pausePodConfig{
  115. Name: "attractor-pod",
  116. Namespace: testCtx.ns.Name,
  117. Labels: map[string]string{labelKey: labelValue},
  118. }))
  119. if err != nil {
  120. t.Fatalf("Error running the attractor pod: %v", err)
  121. }
  122. // Add a few more nodes without the topology label.
  123. _, err = createNodes(testCtx.clientSet, "other-node", nil, 5)
  124. if err != nil {
  125. t.Fatalf("Cannot create the second set of nodes: %v", err)
  126. }
  127. // Add a new pod with affinity to the attractor pod.
  128. podName := "pod-with-podaffinity"
  129. pod, err := runPausePod(testCtx.clientSet, initPausePod(testCtx.clientSet, &pausePodConfig{
  130. Name: podName,
  131. Namespace: testCtx.ns.Name,
  132. Affinity: &v1.Affinity{
  133. PodAffinity: &v1.PodAffinity{
  134. PreferredDuringSchedulingIgnoredDuringExecution: []v1.WeightedPodAffinityTerm{
  135. {
  136. PodAffinityTerm: v1.PodAffinityTerm{
  137. LabelSelector: &metav1.LabelSelector{
  138. MatchExpressions: []metav1.LabelSelectorRequirement{
  139. {
  140. Key: labelKey,
  141. Operator: metav1.LabelSelectorOpIn,
  142. Values: []string{labelValue, "S3"},
  143. },
  144. {
  145. Key: labelKey,
  146. Operator: metav1.LabelSelectorOpNotIn,
  147. Values: []string{"S2"},
  148. }, {
  149. Key: labelKey,
  150. Operator: metav1.LabelSelectorOpExists,
  151. },
  152. },
  153. },
  154. TopologyKey: topologyKey,
  155. Namespaces: []string{testCtx.ns.Name},
  156. },
  157. Weight: 50,
  158. },
  159. },
  160. },
  161. },
  162. }))
  163. if err != nil {
  164. t.Fatalf("Error running pause pod: %v", err)
  165. }
  166. // The new pod must be scheduled on one of the nodes with the same topology
  167. // key-value as the attractor pod.
  168. for _, node := range nodesInTopology {
  169. if node.Name == pod.Spec.NodeName {
  170. t.Logf("Pod %v got successfully scheduled on node %v.", podName, pod.Spec.NodeName)
  171. return
  172. }
  173. }
  174. t.Errorf("Pod %v got scheduled on an unexpected node: %v.", podName, pod.Spec.NodeName)
  175. }
  176. // TestImageLocality verifies that the scheduler's image locality priority function
  177. // works correctly, i.e., the pod gets scheduled to the node where its container images are ready.
  178. func TestImageLocality(t *testing.T) {
  179. testCtx := initTest(t, "image-locality")
  180. defer cleanupTest(t, testCtx)
  181. // We use a fake large image as the test image used by the pod, which has relatively large image size.
  182. image := v1.ContainerImage{
  183. Names: []string{
  184. "fake-large-image:v1",
  185. },
  186. SizeBytes: 3000 * 1024 * 1024,
  187. }
  188. // Create a node with the large image.
  189. nodeWithLargeImage, err := createNodeWithImages(testCtx.clientSet, "testnode-large-image", nil, []v1.ContainerImage{image})
  190. if err != nil {
  191. t.Fatalf("cannot create node with a large image: %v", err)
  192. }
  193. // Add a few nodes.
  194. _, err = createNodes(testCtx.clientSet, "testnode", nil, 10)
  195. if err != nil {
  196. t.Fatalf("cannot create nodes: %v", err)
  197. }
  198. // Create a pod with containers each having the specified image.
  199. podName := "pod-using-large-image"
  200. pod, err := runPodWithContainers(testCtx.clientSet, initPodWithContainers(testCtx.clientSet, &podWithContainersConfig{
  201. Name: podName,
  202. Namespace: testCtx.ns.Name,
  203. Containers: makeContainersWithImages(image.Names),
  204. }))
  205. if err != nil {
  206. t.Fatalf("error running pod with images: %v", err)
  207. }
  208. if pod.Spec.NodeName != nodeWithLargeImage.Name {
  209. t.Errorf("pod %v got scheduled on an unexpected node: %v. Expected node: %v.", podName, pod.Spec.NodeName, nodeWithLargeImage.Name)
  210. } else {
  211. t.Logf("pod %v got successfully scheduled on node %v.", podName, pod.Spec.NodeName)
  212. }
  213. }
  214. // makeContainerWithImage returns a list of v1.Container objects for each given image. Duplicates of an image are ignored,
  215. // i.e., each image is used only once.
  216. func makeContainersWithImages(images []string) []v1.Container {
  217. var containers []v1.Container
  218. usedImages := make(map[string]struct{})
  219. for _, image := range images {
  220. if _, ok := usedImages[image]; !ok {
  221. containers = append(containers, v1.Container{
  222. Name: strings.Replace(image, ":", "-", -1) + "-container",
  223. Image: image,
  224. })
  225. usedImages[image] = struct{}{}
  226. }
  227. }
  228. return containers
  229. }
  230. // TestEvenPodsSpreadPriority verifies that EvenPodsSpread priority functions well.
  231. func TestEvenPodsSpreadPriority(t *testing.T) {
  232. defer featuregatetesting.SetFeatureGateDuringTest(t, utilfeature.DefaultFeatureGate, features.EvenPodsSpread, true)()
  233. testCtx := initTest(t, "eps-priority")
  234. cs := testCtx.clientSet
  235. ns := testCtx.ns.Name
  236. defer cleanupTest(t, testCtx)
  237. // Add 4 nodes.
  238. nodes, err := createNodes(cs, "node", nil, 4)
  239. if err != nil {
  240. t.Fatalf("Cannot create nodes: %v", err)
  241. }
  242. for i, node := range nodes {
  243. // Apply labels "zone: zone-{0,1}" and "node: <node name>" to each node.
  244. labels := map[string]string{
  245. "zone": fmt.Sprintf("zone-%d", i/2),
  246. "node": node.Name,
  247. }
  248. if err = testutils.AddLabelsToNode(cs, node.Name, labels); err != nil {
  249. t.Fatalf("Cannot add labels to node: %v", err)
  250. }
  251. if err = waitForNodeLabels(cs, node.Name, labels); err != nil {
  252. t.Fatalf("Adding labels to node failed: %v", err)
  253. }
  254. }
  255. // Taint the 0th node
  256. taint := v1.Taint{
  257. Key: "k1",
  258. Value: "v1",
  259. Effect: v1.TaintEffectNoSchedule,
  260. }
  261. if err = addTaintToNode(cs, nodes[0].Name, taint); err != nil {
  262. t.Fatalf("Adding taint to node failed: %v", err)
  263. }
  264. if err = waitForNodeTaints(cs, nodes[0], []v1.Taint{taint}); err != nil {
  265. t.Fatalf("Taint not seen on node: %v", err)
  266. }
  267. pause := imageutils.GetPauseImageName()
  268. tests := []struct {
  269. name string
  270. incomingPod *v1.Pod
  271. existingPods []*v1.Pod
  272. fits bool
  273. want []string // nodes expected to schedule onto
  274. }{
  275. // note: naming starts at index 0
  276. // the symbol ~X~ means that node is infeasible
  277. {
  278. name: "place pod on a ~0~/1/2/3 cluster with MaxSkew=1, node-1 is the preferred fit",
  279. incomingPod: st.MakePod().Namespace(ns).Name("p").Label("foo", "").Container(pause).
  280. SpreadConstraint(1, "node", softSpread, st.MakeLabelSelector().Exists("foo").Obj()).
  281. Obj(),
  282. existingPods: []*v1.Pod{
  283. st.MakePod().Namespace(ns).Name("p1").Node("node-1").Label("foo", "").Container(pause).Obj(),
  284. st.MakePod().Namespace(ns).Name("p2a").Node("node-2").Label("foo", "").Container(pause).Obj(),
  285. st.MakePod().Namespace(ns).Name("p2b").Node("node-2").Label("foo", "").Container(pause).Obj(),
  286. st.MakePod().Namespace(ns).Name("p3a").Node("node-3").Label("foo", "").Container(pause).Obj(),
  287. st.MakePod().Namespace(ns).Name("p3b").Node("node-3").Label("foo", "").Container(pause).Obj(),
  288. st.MakePod().Namespace(ns).Name("p3c").Node("node-3").Label("foo", "").Container(pause).Obj(),
  289. },
  290. fits: true,
  291. want: []string{"node-1"},
  292. },
  293. {
  294. name: "combined with hardSpread constraint on a ~4~/0/1/2 cluster",
  295. incomingPod: st.MakePod().Namespace(ns).Name("p").Label("foo", "").Container(pause).
  296. SpreadConstraint(1, "node", softSpread, st.MakeLabelSelector().Exists("foo").Obj()).
  297. SpreadConstraint(1, "zone", hardSpread, st.MakeLabelSelector().Exists("foo").Obj()).
  298. Obj(),
  299. existingPods: []*v1.Pod{
  300. st.MakePod().Namespace(ns).Name("p0a").Node("node-0").Label("foo", "").Container(pause).Obj(),
  301. st.MakePod().Namespace(ns).Name("p0b").Node("node-0").Label("foo", "").Container(pause).Obj(),
  302. st.MakePod().Namespace(ns).Name("p0c").Node("node-0").Label("foo", "").Container(pause).Obj(),
  303. st.MakePod().Namespace(ns).Name("p0d").Node("node-0").Label("foo", "").Container(pause).Obj(),
  304. st.MakePod().Namespace(ns).Name("p2").Node("node-2").Label("foo", "").Container(pause).Obj(),
  305. st.MakePod().Namespace(ns).Name("p3a").Node("node-3").Label("foo", "").Container(pause).Obj(),
  306. st.MakePod().Namespace(ns).Name("p3b").Node("node-3").Label("foo", "").Container(pause).Obj(),
  307. },
  308. fits: true,
  309. want: []string{"node-2"},
  310. },
  311. }
  312. for _, tt := range tests {
  313. t.Run(tt.name, func(t *testing.T) {
  314. allPods := append(tt.existingPods, tt.incomingPod)
  315. defer cleanupPods(cs, t, allPods)
  316. for _, pod := range tt.existingPods {
  317. createdPod, err := cs.CoreV1().Pods(pod.Namespace).Create(context.TODO(), pod, metav1.CreateOptions{})
  318. if err != nil {
  319. t.Fatalf("Test Failed: error while creating pod during test: %v", err)
  320. }
  321. err = wait.Poll(pollInterval, wait.ForeverTestTimeout, podScheduled(cs, createdPod.Namespace, createdPod.Name))
  322. if err != nil {
  323. t.Errorf("Test Failed: error while waiting for pod during test: %v", err)
  324. }
  325. }
  326. testPod, err := cs.CoreV1().Pods(tt.incomingPod.Namespace).Create(context.TODO(), tt.incomingPod, metav1.CreateOptions{})
  327. if err != nil && !apierrors.IsInvalid(err) {
  328. t.Fatalf("Test Failed: error while creating pod during test: %v", err)
  329. }
  330. if tt.fits {
  331. err = wait.Poll(pollInterval, wait.ForeverTestTimeout, podScheduledIn(cs, testPod.Namespace, testPod.Name, tt.want))
  332. } else {
  333. err = wait.Poll(pollInterval, wait.ForeverTestTimeout, podUnschedulable(cs, testPod.Namespace, testPod.Name))
  334. }
  335. if err != nil {
  336. t.Errorf("Test Failed: %v", err)
  337. }
  338. })
  339. }
  340. }