balanced_allocation_test.go 13 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406
  1. /*
  2. Copyright 2019 The Kubernetes Authors.
  3. Licensed under the Apache License, Version 2.0 (the "License");
  4. you may not use this file except in compliance with the License.
  5. You may obtain a copy of the License at
  6. http://www.apache.org/licenses/LICENSE-2.0
  7. Unless required by applicable law or agreed to in writing, software
  8. distributed under the License is distributed on an "AS IS" BASIS,
  9. WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
  10. See the License for the specific language governing permissions and
  11. limitations under the License.
  12. */
  13. package noderesources
  14. import (
  15. "context"
  16. "reflect"
  17. "testing"
  18. v1 "k8s.io/api/core/v1"
  19. "k8s.io/apimachinery/pkg/api/resource"
  20. metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
  21. utilfeature "k8s.io/apiserver/pkg/util/feature"
  22. featuregatetesting "k8s.io/component-base/featuregate/testing"
  23. "k8s.io/kubernetes/pkg/features"
  24. framework "k8s.io/kubernetes/pkg/scheduler/framework/v1alpha1"
  25. "k8s.io/kubernetes/pkg/scheduler/internal/cache"
  26. )
  27. // getExistingVolumeCountForNode gets the current number of volumes on node.
  28. func getExistingVolumeCountForNode(pods []*v1.Pod, maxVolumes int) int {
  29. volumeCount := 0
  30. for _, pod := range pods {
  31. volumeCount += len(pod.Spec.Volumes)
  32. }
  33. if maxVolumes-volumeCount > 0 {
  34. return maxVolumes - volumeCount
  35. }
  36. return 0
  37. }
  38. func TestNodeResourcesBalancedAllocation(t *testing.T) {
  39. // Enable volumesOnNodeForBalancing to do balanced node resource allocation
  40. defer featuregatetesting.SetFeatureGateDuringTest(t, utilfeature.DefaultFeatureGate, features.BalanceAttachedNodeVolumes, true)()
  41. podwithVol1 := v1.PodSpec{
  42. Containers: []v1.Container{
  43. {
  44. Resources: v1.ResourceRequirements{
  45. Requests: v1.ResourceList{
  46. v1.ResourceCPU: resource.MustParse("1000m"),
  47. v1.ResourceMemory: resource.MustParse("2000"),
  48. },
  49. },
  50. },
  51. {
  52. Resources: v1.ResourceRequirements{
  53. Requests: v1.ResourceList{
  54. v1.ResourceCPU: resource.MustParse("2000m"),
  55. v1.ResourceMemory: resource.MustParse("3000"),
  56. },
  57. },
  58. },
  59. },
  60. Volumes: []v1.Volume{
  61. {
  62. VolumeSource: v1.VolumeSource{
  63. AWSElasticBlockStore: &v1.AWSElasticBlockStoreVolumeSource{VolumeID: "ovp"},
  64. },
  65. },
  66. },
  67. NodeName: "machine4",
  68. }
  69. podwithVol2 := v1.PodSpec{
  70. Containers: []v1.Container{
  71. {
  72. Resources: v1.ResourceRequirements{
  73. Requests: v1.ResourceList{
  74. v1.ResourceCPU: resource.MustParse("0m"),
  75. v1.ResourceMemory: resource.MustParse("0"),
  76. },
  77. },
  78. },
  79. {
  80. Resources: v1.ResourceRequirements{
  81. Requests: v1.ResourceList{
  82. v1.ResourceCPU: resource.MustParse("0m"),
  83. v1.ResourceMemory: resource.MustParse("0"),
  84. },
  85. },
  86. },
  87. },
  88. Volumes: []v1.Volume{
  89. {
  90. VolumeSource: v1.VolumeSource{
  91. AWSElasticBlockStore: &v1.AWSElasticBlockStoreVolumeSource{VolumeID: "ovp1"},
  92. },
  93. },
  94. },
  95. NodeName: "machine4",
  96. }
  97. podwithVol3 := v1.PodSpec{
  98. Containers: []v1.Container{
  99. {
  100. Resources: v1.ResourceRequirements{
  101. Requests: v1.ResourceList{
  102. v1.ResourceCPU: resource.MustParse("0m"),
  103. v1.ResourceMemory: resource.MustParse("0"),
  104. },
  105. },
  106. },
  107. {
  108. Resources: v1.ResourceRequirements{
  109. Requests: v1.ResourceList{
  110. v1.ResourceCPU: resource.MustParse("0m"),
  111. v1.ResourceMemory: resource.MustParse("0"),
  112. },
  113. },
  114. },
  115. },
  116. Volumes: []v1.Volume{
  117. {
  118. VolumeSource: v1.VolumeSource{
  119. AWSElasticBlockStore: &v1.AWSElasticBlockStoreVolumeSource{VolumeID: "ovp1"},
  120. },
  121. },
  122. },
  123. NodeName: "machine4",
  124. }
  125. labels1 := map[string]string{
  126. "foo": "bar",
  127. "baz": "blah",
  128. }
  129. labels2 := map[string]string{
  130. "bar": "foo",
  131. "baz": "blah",
  132. }
  133. machine1Spec := v1.PodSpec{
  134. NodeName: "machine1",
  135. }
  136. machine2Spec := v1.PodSpec{
  137. NodeName: "machine2",
  138. }
  139. noResources := v1.PodSpec{
  140. Containers: []v1.Container{},
  141. }
  142. cpuOnly := v1.PodSpec{
  143. NodeName: "machine1",
  144. Containers: []v1.Container{
  145. {
  146. Resources: v1.ResourceRequirements{
  147. Requests: v1.ResourceList{
  148. v1.ResourceCPU: resource.MustParse("1000m"),
  149. v1.ResourceMemory: resource.MustParse("0"),
  150. },
  151. },
  152. },
  153. {
  154. Resources: v1.ResourceRequirements{
  155. Requests: v1.ResourceList{
  156. v1.ResourceCPU: resource.MustParse("2000m"),
  157. v1.ResourceMemory: resource.MustParse("0"),
  158. },
  159. },
  160. },
  161. },
  162. }
  163. cpuOnly2 := cpuOnly
  164. cpuOnly2.NodeName = "machine2"
  165. cpuAndMemory := v1.PodSpec{
  166. NodeName: "machine2",
  167. Containers: []v1.Container{
  168. {
  169. Resources: v1.ResourceRequirements{
  170. Requests: v1.ResourceList{
  171. v1.ResourceCPU: resource.MustParse("1000m"),
  172. v1.ResourceMemory: resource.MustParse("2000"),
  173. },
  174. },
  175. },
  176. {
  177. Resources: v1.ResourceRequirements{
  178. Requests: v1.ResourceList{
  179. v1.ResourceCPU: resource.MustParse("2000m"),
  180. v1.ResourceMemory: resource.MustParse("3000"),
  181. },
  182. },
  183. },
  184. },
  185. }
  186. cpuAndMemory3 := v1.PodSpec{
  187. NodeName: "machine3",
  188. Containers: []v1.Container{
  189. {
  190. Resources: v1.ResourceRequirements{
  191. Requests: v1.ResourceList{
  192. v1.ResourceCPU: resource.MustParse("1000m"),
  193. v1.ResourceMemory: resource.MustParse("2000"),
  194. },
  195. },
  196. },
  197. {
  198. Resources: v1.ResourceRequirements{
  199. Requests: v1.ResourceList{
  200. v1.ResourceCPU: resource.MustParse("2000m"),
  201. v1.ResourceMemory: resource.MustParse("3000"),
  202. },
  203. },
  204. },
  205. },
  206. }
  207. tests := []struct {
  208. pod *v1.Pod
  209. pods []*v1.Pod
  210. nodes []*v1.Node
  211. expectedList framework.NodeScoreList
  212. name string
  213. }{
  214. {
  215. // Node1 scores (remaining resources) on 0-10 scale
  216. // CPU Fraction: 0 / 4000 = 0%
  217. // Memory Fraction: 0 / 10000 = 0%
  218. // Node1 Score: 10 - (0-0)*100 = 100
  219. // Node2 scores (remaining resources) on 0-10 scale
  220. // CPU Fraction: 0 / 4000 = 0 %
  221. // Memory Fraction: 0 / 10000 = 0%
  222. // Node2 Score: 10 - (0-0)*100 = 100
  223. pod: &v1.Pod{Spec: noResources},
  224. nodes: []*v1.Node{makeNode("machine1", 4000, 10000), makeNode("machine2", 4000, 10000)},
  225. expectedList: []framework.NodeScore{{Name: "machine1", Score: framework.MaxNodeScore}, {Name: "machine2", Score: framework.MaxNodeScore}},
  226. name: "nothing scheduled, nothing requested",
  227. },
  228. {
  229. // Node1 scores on 0-10 scale
  230. // CPU Fraction: 3000 / 4000= 75%
  231. // Memory Fraction: 5000 / 10000 = 50%
  232. // Node1 Score: 10 - (0.75-0.5)*100 = 75
  233. // Node2 scores on 0-10 scale
  234. // CPU Fraction: 3000 / 6000= 50%
  235. // Memory Fraction: 5000/10000 = 50%
  236. // Node2 Score: 10 - (0.5-0.5)*100 = 100
  237. pod: &v1.Pod{Spec: cpuAndMemory},
  238. nodes: []*v1.Node{makeNode("machine1", 4000, 10000), makeNode("machine2", 6000, 10000)},
  239. expectedList: []framework.NodeScore{{Name: "machine1", Score: 75}, {Name: "machine2", Score: framework.MaxNodeScore}},
  240. name: "nothing scheduled, resources requested, differently sized machines",
  241. },
  242. {
  243. // Node1 scores on 0-10 scale
  244. // CPU Fraction: 0 / 4000= 0%
  245. // Memory Fraction: 0 / 10000 = 0%
  246. // Node1 Score: 10 - (0-0)*100 = 100
  247. // Node2 scores on 0-10 scale
  248. // CPU Fraction: 0 / 4000= 0%
  249. // Memory Fraction: 0 / 10000 = 0%
  250. // Node2 Score: 10 - (0-0)*100 = 100
  251. pod: &v1.Pod{Spec: noResources},
  252. nodes: []*v1.Node{makeNode("machine1", 4000, 10000), makeNode("machine2", 4000, 10000)},
  253. expectedList: []framework.NodeScore{{Name: "machine1", Score: framework.MaxNodeScore}, {Name: "machine2", Score: framework.MaxNodeScore}},
  254. name: "no resources requested, pods scheduled",
  255. pods: []*v1.Pod{
  256. {Spec: machine1Spec, ObjectMeta: metav1.ObjectMeta{Labels: labels2}},
  257. {Spec: machine1Spec, ObjectMeta: metav1.ObjectMeta{Labels: labels1}},
  258. {Spec: machine2Spec, ObjectMeta: metav1.ObjectMeta{Labels: labels1}},
  259. {Spec: machine2Spec, ObjectMeta: metav1.ObjectMeta{Labels: labels1}},
  260. },
  261. },
  262. {
  263. // Node1 scores on 0-10 scale
  264. // CPU Fraction: 6000 / 10000 = 60%
  265. // Memory Fraction: 0 / 20000 = 0%
  266. // Node1 Score: 10 - (0.6-0)*100 = 40
  267. // Node2 scores on 0-10 scale
  268. // CPU Fraction: 6000 / 10000 = 60%
  269. // Memory Fraction: 5000 / 20000 = 25%
  270. // Node2 Score: 10 - (0.6-0.25)*100 = 65
  271. pod: &v1.Pod{Spec: noResources},
  272. nodes: []*v1.Node{makeNode("machine1", 10000, 20000), makeNode("machine2", 10000, 20000)},
  273. expectedList: []framework.NodeScore{{Name: "machine1", Score: 40}, {Name: "machine2", Score: 65}},
  274. name: "no resources requested, pods scheduled with resources",
  275. pods: []*v1.Pod{
  276. {Spec: cpuOnly, ObjectMeta: metav1.ObjectMeta{Labels: labels2}},
  277. {Spec: cpuOnly, ObjectMeta: metav1.ObjectMeta{Labels: labels1}},
  278. {Spec: cpuOnly2, ObjectMeta: metav1.ObjectMeta{Labels: labels1}},
  279. {Spec: cpuAndMemory, ObjectMeta: metav1.ObjectMeta{Labels: labels1}},
  280. },
  281. },
  282. {
  283. // Node1 scores on 0-10 scale
  284. // CPU Fraction: 6000 / 10000 = 60%
  285. // Memory Fraction: 5000 / 20000 = 25%
  286. // Node1 Score: 10 - (0.6-0.25)*100 = 65
  287. // Node2 scores on 0-10 scale
  288. // CPU Fraction: 6000 / 10000 = 60%
  289. // Memory Fraction: 10000 / 20000 = 50%
  290. // Node2 Score: 10 - (0.6-0.5)*100 = 9
  291. pod: &v1.Pod{Spec: cpuAndMemory},
  292. nodes: []*v1.Node{makeNode("machine1", 10000, 20000), makeNode("machine2", 10000, 20000)},
  293. expectedList: []framework.NodeScore{{Name: "machine1", Score: 65}, {Name: "machine2", Score: 90}},
  294. name: "resources requested, pods scheduled with resources",
  295. pods: []*v1.Pod{
  296. {Spec: cpuOnly},
  297. {Spec: cpuAndMemory},
  298. },
  299. },
  300. {
  301. // Node1 scores on 0-10 scale
  302. // CPU Fraction: 6000 / 10000 = 60%
  303. // Memory Fraction: 5000 / 20000 = 25%
  304. // Node1 Score: 10 - (0.6-0.25)*100 = 65
  305. // Node2 scores on 0-10 scale
  306. // CPU Fraction: 6000 / 10000 = 60%
  307. // Memory Fraction: 10000 / 50000 = 20%
  308. // Node2 Score: 10 - (0.6-0.2)*100 = 60
  309. pod: &v1.Pod{Spec: cpuAndMemory},
  310. nodes: []*v1.Node{makeNode("machine1", 10000, 20000), makeNode("machine2", 10000, 50000)},
  311. expectedList: []framework.NodeScore{{Name: "machine1", Score: 65}, {Name: "machine2", Score: 60}},
  312. name: "resources requested, pods scheduled with resources, differently sized machines",
  313. pods: []*v1.Pod{
  314. {Spec: cpuOnly},
  315. {Spec: cpuAndMemory},
  316. },
  317. },
  318. {
  319. // Node1 scores on 0-10 scale
  320. // CPU Fraction: 6000 / 4000 > 100% ==> Score := 0
  321. // Memory Fraction: 0 / 10000 = 0
  322. // Node1 Score: 0
  323. // Node2 scores on 0-10 scale
  324. // CPU Fraction: 6000 / 4000 > 100% ==> Score := 0
  325. // Memory Fraction 5000 / 10000 = 50%
  326. // Node2 Score: 0
  327. pod: &v1.Pod{Spec: cpuOnly},
  328. nodes: []*v1.Node{makeNode("machine1", 4000, 10000), makeNode("machine2", 4000, 10000)},
  329. expectedList: []framework.NodeScore{{Name: "machine1", Score: 0}, {Name: "machine2", Score: 0}},
  330. name: "requested resources exceed node capacity",
  331. pods: []*v1.Pod{
  332. {Spec: cpuOnly},
  333. {Spec: cpuAndMemory},
  334. },
  335. },
  336. {
  337. pod: &v1.Pod{Spec: noResources},
  338. nodes: []*v1.Node{makeNode("machine1", 0, 0), makeNode("machine2", 0, 0)},
  339. expectedList: []framework.NodeScore{{Name: "machine1", Score: 0}, {Name: "machine2", Score: 0}},
  340. name: "zero node resources, pods scheduled with resources",
  341. pods: []*v1.Pod{
  342. {Spec: cpuOnly},
  343. {Spec: cpuAndMemory},
  344. },
  345. },
  346. {
  347. // Machine4 will be chosen here because it already has a existing volume making the variance
  348. // of volume count, CPU usage, memory usage closer.
  349. pod: &v1.Pod{
  350. Spec: v1.PodSpec{
  351. Volumes: []v1.Volume{
  352. {
  353. VolumeSource: v1.VolumeSource{
  354. AWSElasticBlockStore: &v1.AWSElasticBlockStoreVolumeSource{VolumeID: "ovp2"},
  355. },
  356. },
  357. },
  358. },
  359. },
  360. nodes: []*v1.Node{makeNode("machine3", 3500, 40000), makeNode("machine4", 4000, 10000)},
  361. expectedList: []framework.NodeScore{{Name: "machine3", Score: 89}, {Name: "machine4", Score: 98}},
  362. name: "Include volume count on a node for balanced resource allocation",
  363. pods: []*v1.Pod{
  364. {Spec: cpuAndMemory3},
  365. {Spec: podwithVol1},
  366. {Spec: podwithVol2},
  367. {Spec: podwithVol3},
  368. },
  369. },
  370. }
  371. for _, test := range tests {
  372. t.Run(test.name, func(t *testing.T) {
  373. snapshot := cache.NewSnapshot(test.pods, test.nodes)
  374. if len(test.pod.Spec.Volumes) > 0 {
  375. maxVolumes := 5
  376. nodeInfoList, _ := snapshot.NodeInfos().List()
  377. for _, info := range nodeInfoList {
  378. info.TransientInfo.TransNodeInfo.AllocatableVolumesCount = getExistingVolumeCountForNode(info.Pods(), maxVolumes)
  379. info.TransientInfo.TransNodeInfo.RequestedVolumes = len(test.pod.Spec.Volumes)
  380. }
  381. }
  382. fh, _ := framework.NewFramework(nil, nil, nil, framework.WithSnapshotSharedLister(snapshot))
  383. p, _ := NewBalancedAllocation(nil, fh)
  384. for i := range test.nodes {
  385. hostResult, err := p.(framework.ScorePlugin).Score(context.Background(), nil, test.pod, test.nodes[i].Name)
  386. if err != nil {
  387. t.Errorf("unexpected error: %v", err)
  388. }
  389. if !reflect.DeepEqual(test.expectedList[i].Score, hostResult) {
  390. t.Errorf("expected %#v, got %#v", test.expectedList[i].Score, hostResult)
  391. }
  392. }
  393. })
  394. }
  395. }