scheduler_bench_test.go 22 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638
  1. /*
  2. Copyright 2015 The Kubernetes Authors.
  3. Licensed under the Apache License, Version 2.0 (the "License");
  4. you may not use this file except in compliance with the License.
  5. You may obtain a copy of the License at
  6. http://www.apache.org/licenses/LICENSE-2.0
  7. Unless required by applicable law or agreed to in writing, software
  8. distributed under the License is distributed on an "AS IS" BASIS,
  9. WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
  10. See the License for the specific language governing permissions and
  11. limitations under the License.
  12. */
  13. package benchmark
  14. import (
  15. "fmt"
  16. "sync/atomic"
  17. "testing"
  18. "time"
  19. v1 "k8s.io/api/core/v1"
  20. storagev1beta1 "k8s.io/api/storage/v1beta1"
  21. "k8s.io/apimachinery/pkg/api/resource"
  22. metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
  23. utilfeature "k8s.io/apiserver/pkg/util/feature"
  24. "k8s.io/client-go/tools/cache"
  25. featuregatetesting "k8s.io/component-base/featuregate/testing"
  26. "k8s.io/csi-translation-lib/plugins"
  27. csilibplugins "k8s.io/csi-translation-lib/plugins"
  28. "k8s.io/klog"
  29. "k8s.io/kubernetes/pkg/features"
  30. "k8s.io/kubernetes/pkg/volume/util"
  31. "k8s.io/kubernetes/test/integration/framework"
  32. testutils "k8s.io/kubernetes/test/utils"
  33. )
  34. var (
  35. defaultNodeStrategy = &testutils.TrivialNodePrepareStrategy{}
  36. testCSIDriver = plugins.AWSEBSDriverName
  37. // From PV controller
  38. annBindCompleted = "pv.kubernetes.io/bind-completed"
  39. defaultTests = []struct{ nodes, existingPods, minPods int }{
  40. {nodes: 500, existingPods: 500, minPods: 1000},
  41. {nodes: 5000, existingPods: 5000, minPods: 1000},
  42. }
  43. testNamespace = "sched-test"
  44. setupNamespace = "sched-setup"
  45. )
  46. // BenchmarkScheduling benchmarks the scheduling rate when the cluster has
  47. // various quantities of nodes and scheduled pods.
  48. func BenchmarkScheduling(b *testing.B) {
  49. testStrategy := testutils.NewSimpleWithControllerCreatePodStrategy("rc1")
  50. for _, test := range defaultTests {
  51. name := fmt.Sprintf("%vNodes/%vPods", test.nodes, test.existingPods)
  52. b.Run(name, func(b *testing.B) {
  53. nodeStrategies := []testutils.CountToStrategy{{Count: test.nodes, Strategy: defaultNodeStrategy}}
  54. benchmarkScheduling(test.existingPods, test.minPods, nodeStrategies, testStrategy, b)
  55. })
  56. }
  57. }
  58. // BenchmarkSchedulingPodAntiAffinity benchmarks the scheduling rate of pods with
  59. // PodAntiAffinity rules when the cluster has various quantities of nodes and
  60. // scheduled pods.
  61. func BenchmarkSchedulingPodAntiAffinity(b *testing.B) {
  62. // Since the pods has anti affinity to each other, the number of pods to schedule
  63. // can't exceed the number of nodes (the topology used in the test)
  64. tests := []struct{ nodes, existingPods, minPods int }{
  65. {nodes: 500, existingPods: 100, minPods: 400},
  66. {nodes: 5000, existingPods: 1000, minPods: 1000},
  67. }
  68. testBasePod := makeBasePodWithPodAntiAffinity(
  69. map[string]string{"name": "test", "color": "green"},
  70. map[string]string{"color": "green"})
  71. // The test strategy creates pods with anti-affinity to each other, each pod ending up in a separate node.
  72. testStrategy := testutils.NewCustomCreatePodStrategy(testBasePod)
  73. for _, test := range tests {
  74. name := fmt.Sprintf("%vNodes/%vPods", test.nodes, test.existingPods)
  75. b.Run(name, func(b *testing.B) {
  76. var nodeStrategies []testutils.CountToStrategy
  77. for i := 0; i < test.nodes; i++ {
  78. nodeStrategy := testutils.NewLabelNodePrepareStrategy(v1.LabelHostname, fmt.Sprintf("node-%d", i))
  79. nodeStrategies = append(nodeStrategies, testutils.CountToStrategy{Count: 1, Strategy: nodeStrategy})
  80. }
  81. benchmarkScheduling(test.existingPods, test.minPods, nodeStrategies, testStrategy, b)
  82. })
  83. }
  84. }
  85. // BenchmarkSchedulingSecrets benchmarks the scheduling rate of pods with
  86. // volumes that don't require any special handling, such as Secrets.
  87. // It can be used to compare scheduler efficiency with the other benchmarks
  88. // that use volume scheduling predicates.
  89. func BenchmarkSchedulingSecrets(b *testing.B) {
  90. // The test strategy creates pods with a secret.
  91. testBasePod := makeBasePodWithSecret()
  92. testStrategy := testutils.NewCustomCreatePodStrategy(testBasePod)
  93. for _, test := range defaultTests {
  94. name := fmt.Sprintf("%vNodes/%vPods", test.nodes, test.existingPods)
  95. b.Run(name, func(b *testing.B) {
  96. nodeStrategies := []testutils.CountToStrategy{{Count: test.nodes, Strategy: defaultNodeStrategy}}
  97. benchmarkScheduling(test.existingPods, test.minPods, nodeStrategies, testStrategy, b)
  98. })
  99. }
  100. }
  101. // BenchmarkSchedulingInTreePVs benchmarks the scheduling rate of pods with
  102. // in-tree volumes (used via PV/PVC). Nodes have default hardcoded attach limits
  103. // (39 for AWS EBS).
  104. func BenchmarkSchedulingInTreePVs(b *testing.B) {
  105. // The test strategy creates pods with AWS EBS volume used via PV.
  106. baseClaim := makeBasePersistentVolumeClaim()
  107. basePod := makeBasePod()
  108. testStrategy := testutils.NewCreatePodWithPersistentVolumeStrategy(baseClaim, awsVolumeFactory, basePod)
  109. for _, test := range defaultTests {
  110. name := fmt.Sprintf("%vNodes/%vPods", test.nodes, test.existingPods)
  111. b.Run(name, func(b *testing.B) {
  112. nodeStrategies := []testutils.CountToStrategy{{Count: test.nodes, Strategy: defaultNodeStrategy}}
  113. benchmarkScheduling(test.existingPods, test.minPods, nodeStrategies, testStrategy, b)
  114. })
  115. }
  116. }
  117. // BenchmarkSchedulingWaitForFirstConsumerPVs benchmarks the scheduling rate
  118. // of pods with volumes with VolumeBindingMode set to WaitForFirstConsumer.
  119. func BenchmarkSchedulingWaitForFirstConsumerPVs(b *testing.B) {
  120. tests := []struct{ nodes, existingPods, minPods int }{
  121. {nodes: 500, existingPods: 500, minPods: 1000},
  122. // default 5000 existingPods is a way too much for now
  123. }
  124. basePod := makeBasePod()
  125. testStrategy := testutils.NewCreatePodWithPersistentVolumeWithFirstConsumerStrategy(gceVolumeFactory, basePod)
  126. nodeStrategy := testutils.NewLabelNodePrepareStrategy(v1.LabelZoneFailureDomain, "zone1")
  127. for _, test := range tests {
  128. name := fmt.Sprintf("%vNodes/%vPods", test.nodes, test.existingPods)
  129. b.Run(name, func(b *testing.B) {
  130. nodeStrategies := []testutils.CountToStrategy{{Count: test.nodes, Strategy: nodeStrategy}}
  131. benchmarkScheduling(test.existingPods, test.minPods, nodeStrategies, testStrategy, b)
  132. })
  133. }
  134. }
  135. // BenchmarkSchedulingMigratedInTreePVs benchmarks the scheduling rate of pods with
  136. // in-tree volumes (used via PV/PVC) that are migrated to CSI. CSINode instances exist
  137. // for all nodes and have proper annotation that AWS is migrated.
  138. func BenchmarkSchedulingMigratedInTreePVs(b *testing.B) {
  139. // The test strategy creates pods with AWS EBS volume used via PV.
  140. baseClaim := makeBasePersistentVolumeClaim()
  141. basePod := makeBasePod()
  142. testStrategy := testutils.NewCreatePodWithPersistentVolumeStrategy(baseClaim, awsVolumeFactory, basePod)
  143. // Each node can use the same amount of CSI volumes as in-tree AWS volume
  144. // plugin, so the results should be comparable with BenchmarkSchedulingInTreePVs.
  145. driverKey := util.GetCSIAttachLimitKey(testCSIDriver)
  146. allocatable := map[v1.ResourceName]string{
  147. v1.ResourceName(driverKey): fmt.Sprintf("%d", util.DefaultMaxEBSVolumes),
  148. }
  149. var count int32 = util.DefaultMaxEBSVolumes
  150. csiAllocatable := map[string]*storagev1beta1.VolumeNodeResources{
  151. testCSIDriver: {
  152. Count: &count,
  153. },
  154. }
  155. nodeStrategy := testutils.NewNodeAllocatableStrategy(allocatable, csiAllocatable, []string{csilibplugins.AWSEBSInTreePluginName})
  156. for _, test := range defaultTests {
  157. name := fmt.Sprintf("%vNodes/%vPods", test.nodes, test.existingPods)
  158. b.Run(name, func(b *testing.B) {
  159. defer featuregatetesting.SetFeatureGateDuringTest(b, utilfeature.DefaultFeatureGate, features.CSIMigration, true)()
  160. defer featuregatetesting.SetFeatureGateDuringTest(b, utilfeature.DefaultFeatureGate, features.CSIMigrationAWS, true)()
  161. nodeStrategies := []testutils.CountToStrategy{{Count: test.nodes, Strategy: nodeStrategy}}
  162. benchmarkScheduling(test.existingPods, test.minPods, nodeStrategies, testStrategy, b)
  163. })
  164. }
  165. }
  166. // node.status.allocatable.
  167. func BenchmarkSchedulingCSIPVs(b *testing.B) {
  168. // The test strategy creates pods with CSI volume via PV.
  169. baseClaim := makeBasePersistentVolumeClaim()
  170. basePod := makeBasePod()
  171. testStrategy := testutils.NewCreatePodWithPersistentVolumeStrategy(baseClaim, csiVolumeFactory, basePod)
  172. // Each node can use the same amount of CSI volumes as in-tree AWS volume
  173. // plugin, so the results should be comparable with BenchmarkSchedulingInTreePVs.
  174. driverKey := util.GetCSIAttachLimitKey(testCSIDriver)
  175. allocatable := map[v1.ResourceName]string{
  176. v1.ResourceName(driverKey): fmt.Sprintf("%d", util.DefaultMaxEBSVolumes),
  177. }
  178. var count int32 = util.DefaultMaxEBSVolumes
  179. csiAllocatable := map[string]*storagev1beta1.VolumeNodeResources{
  180. testCSIDriver: {
  181. Count: &count,
  182. },
  183. }
  184. nodeStrategy := testutils.NewNodeAllocatableStrategy(allocatable, csiAllocatable, []string{})
  185. for _, test := range defaultTests {
  186. name := fmt.Sprintf("%vNodes/%vPods", test.nodes, test.existingPods)
  187. b.Run(name, func(b *testing.B) {
  188. nodeStrategies := []testutils.CountToStrategy{{Count: test.nodes, Strategy: nodeStrategy}}
  189. benchmarkScheduling(test.existingPods, test.minPods, nodeStrategies, testStrategy, b)
  190. })
  191. }
  192. }
  193. // BenchmarkSchedulingPodAffinity benchmarks the scheduling rate of pods with
  194. // PodAffinity rules when the cluster has various quantities of nodes and
  195. // scheduled pods.
  196. func BenchmarkSchedulingPodAffinity(b *testing.B) {
  197. testBasePod := makeBasePodWithPodAffinity(
  198. map[string]string{"foo": ""},
  199. map[string]string{"foo": ""},
  200. )
  201. // The test strategy creates pods with affinity for each other.
  202. testStrategy := testutils.NewCustomCreatePodStrategy(testBasePod)
  203. nodeStrategy := testutils.NewLabelNodePrepareStrategy(v1.LabelZoneFailureDomain, "zone1")
  204. for _, test := range defaultTests {
  205. name := fmt.Sprintf("%vNodes/%vPods", test.nodes, test.existingPods)
  206. b.Run(name, func(b *testing.B) {
  207. nodeStrategies := []testutils.CountToStrategy{{Count: test.nodes, Strategy: nodeStrategy}}
  208. benchmarkScheduling(test.existingPods, test.minPods, nodeStrategies, testStrategy, b)
  209. })
  210. }
  211. }
  212. // BenchmarkSchedulingPreferredPodAffinity benchmarks the scheduling rate of pods with
  213. // preferred PodAffinity rules when the cluster has various quantities of nodes and
  214. // scheduled pods.
  215. func BenchmarkSchedulingPreferredPodAffinity(b *testing.B) {
  216. testBasePod := makeBasePodWithPreferredPodAffinity(
  217. map[string]string{"foo": ""},
  218. map[string]string{"foo": ""},
  219. )
  220. // The test strategy creates pods with affinity for each other.
  221. testStrategy := testutils.NewCustomCreatePodStrategy(testBasePod)
  222. for _, test := range defaultTests {
  223. name := fmt.Sprintf("%vNodes/%vPods", test.nodes, test.existingPods)
  224. b.Run(name, func(b *testing.B) {
  225. var nodeStrategies []testutils.CountToStrategy
  226. for i := 0; i < test.nodes; i++ {
  227. nodeStrategy := testutils.NewLabelNodePrepareStrategy(v1.LabelHostname, fmt.Sprintf("node-%d", i))
  228. nodeStrategies = append(nodeStrategies, testutils.CountToStrategy{Count: 1, Strategy: nodeStrategy})
  229. }
  230. benchmarkScheduling(test.existingPods, test.minPods, nodeStrategies, testStrategy, b)
  231. })
  232. }
  233. }
  234. // BenchmarkSchedulingPreferredPodAntiAffinity benchmarks the scheduling rate of pods with
  235. // preferred PodAntiAffinity rules when the cluster has various quantities of nodes and
  236. // scheduled pods.
  237. func BenchmarkSchedulingPreferredPodAntiAffinity(b *testing.B) {
  238. testBasePod := makeBasePodWithPreferredPodAntiAffinity(
  239. map[string]string{"foo": ""},
  240. map[string]string{"foo": ""},
  241. )
  242. // The test strategy creates pods with anti affinity to each other.
  243. testStrategy := testutils.NewCustomCreatePodStrategy(testBasePod)
  244. for _, test := range defaultTests {
  245. name := fmt.Sprintf("%vNodes/%vPods", test.nodes, test.existingPods)
  246. b.Run(name, func(b *testing.B) {
  247. var nodeStrategies []testutils.CountToStrategy
  248. for i := 0; i < test.nodes; i++ {
  249. nodeStrategy := testutils.NewLabelNodePrepareStrategy(v1.LabelHostname, fmt.Sprintf("node-%d", i))
  250. nodeStrategies = append(nodeStrategies, testutils.CountToStrategy{Count: 1, Strategy: nodeStrategy})
  251. }
  252. benchmarkScheduling(test.existingPods, test.minPods, nodeStrategies, testStrategy, b)
  253. })
  254. }
  255. }
  256. // BenchmarkSchedulingNodeAffinity benchmarks the scheduling rate of pods with
  257. // NodeAffinity rules when the cluster has various quantities of nodes and
  258. // scheduled pods.
  259. func BenchmarkSchedulingNodeAffinity(b *testing.B) {
  260. testBasePod := makeBasePodWithNodeAffinity(v1.LabelZoneFailureDomain, []string{"zone1", "zone2"})
  261. // The test strategy creates pods with node-affinity for each other.
  262. testStrategy := testutils.NewCustomCreatePodStrategy(testBasePod)
  263. nodeStrategy := testutils.NewLabelNodePrepareStrategy(v1.LabelZoneFailureDomain, "zone1")
  264. for _, test := range defaultTests {
  265. name := fmt.Sprintf("%vNodes/%vPods", test.nodes, test.existingPods)
  266. b.Run(name, func(b *testing.B) {
  267. nodeStrategies := []testutils.CountToStrategy{{Count: test.nodes, Strategy: nodeStrategy}}
  268. benchmarkScheduling(test.existingPods, test.minPods, nodeStrategies, testStrategy, b)
  269. })
  270. }
  271. }
  272. // makeBasePodWithPodAntiAffinity creates a Pod object to be used as a template.
  273. // The Pod has a PodAntiAffinity requirement against pods with the given labels.
  274. func makeBasePodWithPodAntiAffinity(podLabels, affinityLabels map[string]string) *v1.Pod {
  275. basePod := &v1.Pod{
  276. ObjectMeta: metav1.ObjectMeta{
  277. GenerateName: "anti-affinity-pod-",
  278. Labels: podLabels,
  279. },
  280. Spec: testutils.MakePodSpec(),
  281. }
  282. basePod.Spec.Affinity = &v1.Affinity{
  283. PodAntiAffinity: &v1.PodAntiAffinity{
  284. RequiredDuringSchedulingIgnoredDuringExecution: []v1.PodAffinityTerm{
  285. {
  286. LabelSelector: &metav1.LabelSelector{
  287. MatchLabels: affinityLabels,
  288. },
  289. TopologyKey: v1.LabelHostname,
  290. Namespaces: []string{testNamespace, setupNamespace},
  291. },
  292. },
  293. },
  294. }
  295. return basePod
  296. }
  297. // makeBasePodWithPreferredPodAntiAffinity creates a Pod object to be used as a template.
  298. // The Pod has a preferred PodAntiAffinity with pods with the given labels.
  299. func makeBasePodWithPreferredPodAntiAffinity(podLabels, affinityLabels map[string]string) *v1.Pod {
  300. basePod := &v1.Pod{
  301. ObjectMeta: metav1.ObjectMeta{
  302. GenerateName: "preferred-affinity-pod-",
  303. Labels: podLabels,
  304. },
  305. Spec: testutils.MakePodSpec(),
  306. }
  307. basePod.Spec.Affinity = &v1.Affinity{
  308. PodAntiAffinity: &v1.PodAntiAffinity{
  309. PreferredDuringSchedulingIgnoredDuringExecution: []v1.WeightedPodAffinityTerm{
  310. {
  311. PodAffinityTerm: v1.PodAffinityTerm{
  312. LabelSelector: &metav1.LabelSelector{
  313. MatchLabels: affinityLabels,
  314. },
  315. TopologyKey: v1.LabelHostname,
  316. Namespaces: []string{testNamespace, setupNamespace},
  317. },
  318. Weight: 1,
  319. },
  320. },
  321. },
  322. }
  323. return basePod
  324. }
  325. // makeBasePodWithPreferredPodAffinity creates a Pod object to be used as a template.
  326. // The Pod has a preferred PodAffinity with pods with the given labels.
  327. func makeBasePodWithPreferredPodAffinity(podLabels, affinityLabels map[string]string) *v1.Pod {
  328. basePod := &v1.Pod{
  329. ObjectMeta: metav1.ObjectMeta{
  330. GenerateName: "preferred-affinity-pod-",
  331. Labels: podLabels,
  332. },
  333. Spec: testutils.MakePodSpec(),
  334. }
  335. basePod.Spec.Affinity = &v1.Affinity{
  336. PodAffinity: &v1.PodAffinity{
  337. PreferredDuringSchedulingIgnoredDuringExecution: []v1.WeightedPodAffinityTerm{
  338. {
  339. PodAffinityTerm: v1.PodAffinityTerm{
  340. LabelSelector: &metav1.LabelSelector{
  341. MatchLabels: affinityLabels,
  342. },
  343. TopologyKey: v1.LabelHostname,
  344. Namespaces: []string{testNamespace, setupNamespace},
  345. },
  346. Weight: 1,
  347. },
  348. },
  349. },
  350. }
  351. return basePod
  352. }
  353. // makeBasePodWithPodAffinity creates a Pod object to be used as a template.
  354. // The Pod has a PodAffinity requirement against pods with the given labels.
  355. func makeBasePodWithPodAffinity(podLabels, affinityZoneLabels map[string]string) *v1.Pod {
  356. basePod := &v1.Pod{
  357. ObjectMeta: metav1.ObjectMeta{
  358. GenerateName: "affinity-pod-",
  359. Labels: podLabels,
  360. },
  361. Spec: testutils.MakePodSpec(),
  362. }
  363. basePod.Spec.Affinity = &v1.Affinity{
  364. PodAffinity: &v1.PodAffinity{
  365. RequiredDuringSchedulingIgnoredDuringExecution: []v1.PodAffinityTerm{
  366. {
  367. LabelSelector: &metav1.LabelSelector{
  368. MatchLabels: affinityZoneLabels,
  369. },
  370. TopologyKey: v1.LabelZoneFailureDomain,
  371. Namespaces: []string{testNamespace, setupNamespace},
  372. },
  373. },
  374. },
  375. }
  376. return basePod
  377. }
  378. // makeBasePodWithNodeAffinity creates a Pod object to be used as a template.
  379. // The Pod has a NodeAffinity requirement against nodes with the given expressions.
  380. func makeBasePodWithNodeAffinity(key string, vals []string) *v1.Pod {
  381. basePod := &v1.Pod{
  382. ObjectMeta: metav1.ObjectMeta{
  383. GenerateName: "node-affinity-",
  384. },
  385. Spec: testutils.MakePodSpec(),
  386. }
  387. basePod.Spec.Affinity = &v1.Affinity{
  388. NodeAffinity: &v1.NodeAffinity{
  389. RequiredDuringSchedulingIgnoredDuringExecution: &v1.NodeSelector{
  390. NodeSelectorTerms: []v1.NodeSelectorTerm{
  391. {
  392. MatchExpressions: []v1.NodeSelectorRequirement{
  393. {
  394. Key: key,
  395. Operator: v1.NodeSelectorOpIn,
  396. Values: vals,
  397. },
  398. },
  399. },
  400. },
  401. },
  402. },
  403. }
  404. return basePod
  405. }
  406. // benchmarkScheduling benchmarks scheduling rate with specific number of nodes
  407. // and specific number of pods already scheduled.
  408. // This will schedule numExistingPods pods before the benchmark starts, and at
  409. // least minPods pods during the benchmark.
  410. func benchmarkScheduling(numExistingPods, minPods int,
  411. nodeStrategies []testutils.CountToStrategy,
  412. testPodStrategy testutils.TestPodCreateStrategy,
  413. b *testing.B) {
  414. if b.N < minPods {
  415. b.N = minPods
  416. }
  417. finalFunc, podInformer, clientset := mustSetupScheduler()
  418. defer finalFunc()
  419. nodePreparer := framework.NewIntegrationTestNodePreparer(
  420. clientset,
  421. nodeStrategies,
  422. "scheduler-perf-")
  423. if err := nodePreparer.PrepareNodes(); err != nil {
  424. klog.Fatalf("%v", err)
  425. }
  426. defer nodePreparer.CleanupNodes()
  427. config := testutils.NewTestPodCreatorConfig()
  428. config.AddStrategy(setupNamespace, numExistingPods, testPodStrategy)
  429. podCreator := testutils.NewTestPodCreator(clientset, config)
  430. podCreator.CreatePods()
  431. for {
  432. scheduled, err := getScheduledPods(podInformer)
  433. if err != nil {
  434. klog.Fatalf("%v", err)
  435. }
  436. if len(scheduled) >= numExistingPods {
  437. break
  438. }
  439. klog.Infof("got %d existing pods, required: %d", len(scheduled), numExistingPods)
  440. time.Sleep(1 * time.Second)
  441. }
  442. scheduled := int32(0)
  443. completedCh := make(chan struct{})
  444. podInformer.Informer().AddEventHandler(cache.ResourceEventHandlerFuncs{
  445. UpdateFunc: func(old, cur interface{}) {
  446. curPod := cur.(*v1.Pod)
  447. oldPod := old.(*v1.Pod)
  448. if len(oldPod.Spec.NodeName) == 0 && len(curPod.Spec.NodeName) > 0 {
  449. if atomic.AddInt32(&scheduled, 1) >= int32(b.N) {
  450. completedCh <- struct{}{}
  451. }
  452. }
  453. },
  454. })
  455. // start benchmark
  456. b.ResetTimer()
  457. config = testutils.NewTestPodCreatorConfig()
  458. config.AddStrategy(testNamespace, b.N, testPodStrategy)
  459. podCreator = testutils.NewTestPodCreator(clientset, config)
  460. podCreator.CreatePods()
  461. <-completedCh
  462. // Note: without this line we're taking the overhead of defer() into account.
  463. b.StopTimer()
  464. }
  465. // makeBasePodWithSecrets creates a Pod object to be used as a template.
  466. // The pod uses a single Secrets volume.
  467. func makeBasePodWithSecret() *v1.Pod {
  468. basePod := &v1.Pod{
  469. ObjectMeta: metav1.ObjectMeta{
  470. GenerateName: "secret-volume-",
  471. },
  472. Spec: testutils.MakePodSpec(),
  473. }
  474. volumes := []v1.Volume{
  475. {
  476. Name: "secret",
  477. VolumeSource: v1.VolumeSource{
  478. Secret: &v1.SecretVolumeSource{
  479. SecretName: "secret",
  480. },
  481. },
  482. },
  483. }
  484. basePod.Spec.Volumes = volumes
  485. return basePod
  486. }
  487. // makeBasePod creates a Pod object to be used as a template.
  488. func makeBasePod() *v1.Pod {
  489. basePod := &v1.Pod{
  490. ObjectMeta: metav1.ObjectMeta{
  491. GenerateName: "pod-",
  492. },
  493. Spec: testutils.MakePodSpec(),
  494. }
  495. return basePod
  496. }
  497. func makeBasePersistentVolumeClaim() *v1.PersistentVolumeClaim {
  498. return &v1.PersistentVolumeClaim{
  499. ObjectMeta: metav1.ObjectMeta{
  500. // Name is filled in NewCreatePodWithPersistentVolumeStrategy
  501. Annotations: map[string]string{
  502. annBindCompleted: "true",
  503. },
  504. },
  505. Spec: v1.PersistentVolumeClaimSpec{
  506. AccessModes: []v1.PersistentVolumeAccessMode{v1.ReadOnlyMany},
  507. Resources: v1.ResourceRequirements{
  508. Requests: v1.ResourceList{
  509. v1.ResourceName(v1.ResourceStorage): resource.MustParse("1Gi"),
  510. },
  511. },
  512. },
  513. }
  514. }
  515. func awsVolumeFactory(id int) *v1.PersistentVolume {
  516. return &v1.PersistentVolume{
  517. ObjectMeta: metav1.ObjectMeta{
  518. Name: fmt.Sprintf("vol-%d", id),
  519. },
  520. Spec: v1.PersistentVolumeSpec{
  521. AccessModes: []v1.PersistentVolumeAccessMode{v1.ReadOnlyMany},
  522. Capacity: v1.ResourceList{
  523. v1.ResourceName(v1.ResourceStorage): resource.MustParse("1Gi"),
  524. },
  525. PersistentVolumeReclaimPolicy: v1.PersistentVolumeReclaimRetain,
  526. PersistentVolumeSource: v1.PersistentVolumeSource{
  527. AWSElasticBlockStore: &v1.AWSElasticBlockStoreVolumeSource{
  528. // VolumeID must be unique for each PV, so every PV is
  529. // counted as a separate volume in MaxPDVolumeCountChecker
  530. // predicate.
  531. VolumeID: fmt.Sprintf("vol-%d", id),
  532. },
  533. },
  534. },
  535. }
  536. }
  537. func gceVolumeFactory(id int) *v1.PersistentVolume {
  538. return &v1.PersistentVolume{
  539. ObjectMeta: metav1.ObjectMeta{
  540. Name: fmt.Sprintf("vol-%d", id),
  541. },
  542. Spec: v1.PersistentVolumeSpec{
  543. AccessModes: []v1.PersistentVolumeAccessMode{v1.ReadOnlyMany},
  544. Capacity: v1.ResourceList{
  545. v1.ResourceName(v1.ResourceStorage): resource.MustParse("1Gi"),
  546. },
  547. PersistentVolumeReclaimPolicy: v1.PersistentVolumeReclaimRetain,
  548. PersistentVolumeSource: v1.PersistentVolumeSource{
  549. GCEPersistentDisk: &v1.GCEPersistentDiskVolumeSource{
  550. FSType: "ext4",
  551. PDName: fmt.Sprintf("vol-%d-pvc", id),
  552. },
  553. },
  554. NodeAffinity: &v1.VolumeNodeAffinity{
  555. Required: &v1.NodeSelector{
  556. NodeSelectorTerms: []v1.NodeSelectorTerm{
  557. {
  558. MatchExpressions: []v1.NodeSelectorRequirement{
  559. {
  560. Key: v1.LabelZoneFailureDomain,
  561. Operator: v1.NodeSelectorOpIn,
  562. Values: []string{"zone1"},
  563. },
  564. },
  565. },
  566. },
  567. },
  568. },
  569. },
  570. }
  571. }
  572. func csiVolumeFactory(id int) *v1.PersistentVolume {
  573. return &v1.PersistentVolume{
  574. ObjectMeta: metav1.ObjectMeta{
  575. Name: fmt.Sprintf("vol-%d", id),
  576. },
  577. Spec: v1.PersistentVolumeSpec{
  578. AccessModes: []v1.PersistentVolumeAccessMode{v1.ReadOnlyMany},
  579. Capacity: v1.ResourceList{
  580. v1.ResourceName(v1.ResourceStorage): resource.MustParse("1Gi"),
  581. },
  582. PersistentVolumeReclaimPolicy: v1.PersistentVolumeReclaimRetain,
  583. PersistentVolumeSource: v1.PersistentVolumeSource{
  584. CSI: &v1.CSIPersistentVolumeSource{
  585. // Handle must be unique for each PV, so every PV is
  586. // counted as a separate volume in CSIMaxVolumeLimitChecker
  587. // predicate.
  588. VolumeHandle: fmt.Sprintf("vol-%d", id),
  589. Driver: testCSIDriver,
  590. },
  591. },
  592. },
  593. }
  594. }