density.go 38 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041
  1. /*
  2. Copyright 2015 The Kubernetes Authors.
  3. Licensed under the Apache License, Version 2.0 (the "License");
  4. you may not use this file except in compliance with the License.
  5. You may obtain a copy of the License at
  6. http://www.apache.org/licenses/LICENSE-2.0
  7. Unless required by applicable law or agreed to in writing, software
  8. distributed under the License is distributed on an "AS IS" BASIS,
  9. WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
  10. See the License for the specific language governing permissions and
  11. limitations under the License.
  12. */
  13. package scalability
  14. import (
  15. "context"
  16. "fmt"
  17. "math"
  18. "os"
  19. "sort"
  20. "strconv"
  21. "sync"
  22. "time"
  23. v1 "k8s.io/api/core/v1"
  24. "k8s.io/apimachinery/pkg/api/resource"
  25. metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
  26. "k8s.io/apimachinery/pkg/fields"
  27. "k8s.io/apimachinery/pkg/labels"
  28. "k8s.io/apimachinery/pkg/runtime"
  29. "k8s.io/apimachinery/pkg/runtime/schema"
  30. utiluuid "k8s.io/apimachinery/pkg/util/uuid"
  31. "k8s.io/apimachinery/pkg/watch"
  32. clientset "k8s.io/client-go/kubernetes"
  33. scaleclient "k8s.io/client-go/scale"
  34. "k8s.io/client-go/tools/cache"
  35. "k8s.io/client-go/util/workqueue"
  36. "k8s.io/kubernetes/pkg/apis/batch"
  37. api "k8s.io/kubernetes/pkg/apis/core"
  38. "k8s.io/kubernetes/pkg/apis/extensions"
  39. "k8s.io/kubernetes/test/e2e/framework"
  40. e2elog "k8s.io/kubernetes/test/e2e/framework/log"
  41. "k8s.io/kubernetes/test/e2e/framework/timer"
  42. testutils "k8s.io/kubernetes/test/utils"
  43. imageutils "k8s.io/kubernetes/test/utils/image"
  44. . "github.com/onsi/ginkgo"
  45. . "github.com/onsi/gomega"
  46. )
  47. const (
  48. PodStartupLatencyThreshold = 5 * time.Second
  49. MinSaturationThreshold = 2 * time.Minute
  50. MinPodsPerSecondThroughput = 8
  51. DensityPollInterval = 10 * time.Second
  52. MinPodStartupMeasurements = 500
  53. )
  54. // Maximum container failures this test tolerates before failing.
  55. var MaxContainerFailures = 0
  56. // Maximum no. of missing measurements related to pod-startup that the test tolerates.
  57. var MaxMissingPodStartupMeasurements = 0
  58. // Number of nodes in the cluster (computed inside BeforeEach).
  59. var nodeCount = 0
  60. type DensityTestConfig struct {
  61. Configs []testutils.RunObjectConfig
  62. ClientSets []clientset.Interface
  63. ScaleClients []scaleclient.ScalesGetter
  64. PollInterval time.Duration
  65. PodCount int
  66. // What kind of resource we want to create
  67. kind schema.GroupKind
  68. SecretConfigs []*testutils.SecretConfig
  69. ConfigMapConfigs []*testutils.ConfigMapConfig
  70. DaemonConfigs []*testutils.DaemonConfig
  71. }
  72. type saturationTime struct {
  73. TimeToSaturate time.Duration `json:"timeToSaturate"`
  74. NumberOfNodes int `json:"numberOfNodes"`
  75. NumberOfPods int `json:"numberOfPods"`
  76. Throughput float32 `json:"throughput"`
  77. }
  78. func (dtc *DensityTestConfig) runSecretConfigs(testPhase *timer.Phase) {
  79. defer testPhase.End()
  80. for _, sc := range dtc.SecretConfigs {
  81. sc.Run()
  82. }
  83. }
  84. func (dtc *DensityTestConfig) runConfigMapConfigs(testPhase *timer.Phase) {
  85. defer testPhase.End()
  86. for _, cmc := range dtc.ConfigMapConfigs {
  87. cmc.Run()
  88. }
  89. }
  90. func (dtc *DensityTestConfig) runDaemonConfigs(testPhase *timer.Phase) {
  91. defer testPhase.End()
  92. for _, dc := range dtc.DaemonConfigs {
  93. dc.Run()
  94. }
  95. }
  96. func (dtc *DensityTestConfig) deleteSecrets(testPhase *timer.Phase) {
  97. defer testPhase.End()
  98. for i := range dtc.SecretConfigs {
  99. dtc.SecretConfigs[i].Stop()
  100. }
  101. }
  102. func (dtc *DensityTestConfig) deleteConfigMaps(testPhase *timer.Phase) {
  103. defer testPhase.End()
  104. for i := range dtc.ConfigMapConfigs {
  105. dtc.ConfigMapConfigs[i].Stop()
  106. }
  107. }
  108. func (dtc *DensityTestConfig) deleteDaemonSets(numberOfClients int, testPhase *timer.Phase) {
  109. defer testPhase.End()
  110. for i := range dtc.DaemonConfigs {
  111. framework.ExpectNoError(framework.DeleteResourceAndWaitForGC(
  112. dtc.ClientSets[i%numberOfClients],
  113. extensions.Kind("DaemonSet"),
  114. dtc.DaemonConfigs[i].Namespace,
  115. dtc.DaemonConfigs[i].Name,
  116. ))
  117. }
  118. }
  119. func density30AddonResourceVerifier(numNodes int) map[string]framework.ResourceConstraint {
  120. var apiserverMem uint64
  121. var controllerMem uint64
  122. var schedulerMem uint64
  123. apiserverCPU := math.MaxFloat32
  124. apiserverMem = math.MaxUint64
  125. controllerCPU := math.MaxFloat32
  126. controllerMem = math.MaxUint64
  127. schedulerCPU := math.MaxFloat32
  128. schedulerMem = math.MaxUint64
  129. e2elog.Logf("Setting resource constraints for provider: %s", framework.TestContext.Provider)
  130. if framework.ProviderIs("kubemark") {
  131. if numNodes <= 5 {
  132. apiserverCPU = 0.35
  133. apiserverMem = 150 * (1024 * 1024)
  134. controllerCPU = 0.15
  135. controllerMem = 100 * (1024 * 1024)
  136. schedulerCPU = 0.05
  137. schedulerMem = 50 * (1024 * 1024)
  138. } else if numNodes <= 100 {
  139. apiserverCPU = 1.5
  140. apiserverMem = 1500 * (1024 * 1024)
  141. controllerCPU = 0.5
  142. controllerMem = 500 * (1024 * 1024)
  143. schedulerCPU = 0.4
  144. schedulerMem = 180 * (1024 * 1024)
  145. } else if numNodes <= 500 {
  146. apiserverCPU = 3.5
  147. apiserverMem = 3400 * (1024 * 1024)
  148. controllerCPU = 1.3
  149. controllerMem = 1100 * (1024 * 1024)
  150. schedulerCPU = 1.5
  151. schedulerMem = 500 * (1024 * 1024)
  152. } else if numNodes <= 1000 {
  153. apiserverCPU = 5.5
  154. apiserverMem = 4000 * (1024 * 1024)
  155. controllerCPU = 3
  156. controllerMem = 2000 * (1024 * 1024)
  157. schedulerCPU = 1.5
  158. schedulerMem = 750 * (1024 * 1024)
  159. }
  160. } else {
  161. if numNodes <= 100 {
  162. apiserverCPU = 2.2
  163. apiserverMem = 1700 * (1024 * 1024)
  164. controllerCPU = 0.8
  165. controllerMem = 530 * (1024 * 1024)
  166. schedulerCPU = 0.4
  167. schedulerMem = 180 * (1024 * 1024)
  168. }
  169. }
  170. constraints := make(map[string]framework.ResourceConstraint)
  171. constraints["fluentd-elasticsearch"] = framework.ResourceConstraint{
  172. CPUConstraint: 0.2,
  173. MemoryConstraint: 250 * (1024 * 1024),
  174. }
  175. constraints["elasticsearch-logging"] = framework.ResourceConstraint{
  176. CPUConstraint: 2,
  177. // TODO: bring it down to 750MB again, when we lower Kubelet verbosity level. I.e. revert #19164
  178. MemoryConstraint: 5000 * (1024 * 1024),
  179. }
  180. constraints["heapster"] = framework.ResourceConstraint{
  181. CPUConstraint: 2,
  182. MemoryConstraint: 1800 * (1024 * 1024),
  183. }
  184. constraints["kibana-logging"] = framework.ResourceConstraint{
  185. CPUConstraint: 0.2,
  186. MemoryConstraint: 100 * (1024 * 1024),
  187. }
  188. constraints["kube-proxy"] = framework.ResourceConstraint{
  189. CPUConstraint: 0.15,
  190. MemoryConstraint: 100 * (1024 * 1024),
  191. }
  192. constraints["l7-lb-controller"] = framework.ResourceConstraint{
  193. CPUConstraint: 0.2 + 0.00015*float64(numNodes),
  194. MemoryConstraint: (75 + uint64(math.Ceil(0.8*float64(numNodes)))) * (1024 * 1024),
  195. }
  196. constraints["influxdb"] = framework.ResourceConstraint{
  197. CPUConstraint: 2,
  198. MemoryConstraint: 500 * (1024 * 1024),
  199. }
  200. constraints["kube-apiserver"] = framework.ResourceConstraint{
  201. CPUConstraint: apiserverCPU,
  202. MemoryConstraint: apiserverMem,
  203. }
  204. constraints["kube-controller-manager"] = framework.ResourceConstraint{
  205. CPUConstraint: controllerCPU,
  206. MemoryConstraint: controllerMem,
  207. }
  208. constraints["kube-scheduler"] = framework.ResourceConstraint{
  209. CPUConstraint: schedulerCPU,
  210. MemoryConstraint: schedulerMem,
  211. }
  212. constraints["coredns"] = framework.ResourceConstraint{
  213. CPUConstraint: framework.NoCPUConstraint,
  214. MemoryConstraint: 170 * (1024 * 1024),
  215. }
  216. constraints["kubedns"] = framework.ResourceConstraint{
  217. CPUConstraint: framework.NoCPUConstraint,
  218. MemoryConstraint: 170 * (1024 * 1024),
  219. }
  220. return constraints
  221. }
  222. func computeAverage(sample []float64) float64 {
  223. sum := 0.0
  224. for _, value := range sample {
  225. sum += value
  226. }
  227. return sum / float64(len(sample))
  228. }
  229. func computeQuantile(sample []float64, quantile float64) float64 {
  230. Expect(sort.Float64sAreSorted(sample)).To(Equal(true))
  231. Expect(quantile >= 0.0 && quantile <= 1.0).To(Equal(true))
  232. index := int(quantile*float64(len(sample))) - 1
  233. if index < 0 {
  234. return math.NaN()
  235. }
  236. return sample[index]
  237. }
  238. func logPodStartupStatus(
  239. c clientset.Interface,
  240. expectedPods int,
  241. observedLabels map[string]string,
  242. period time.Duration,
  243. scheduleThroughputs *[]float64,
  244. stopCh chan struct{}) {
  245. label := labels.SelectorFromSet(labels.Set(observedLabels))
  246. podStore, err := testutils.NewPodStore(c, metav1.NamespaceAll, label, fields.Everything())
  247. framework.ExpectNoError(err)
  248. defer podStore.Stop()
  249. ticker := time.NewTicker(period)
  250. startupStatus := testutils.ComputeRCStartupStatus(podStore.List(), expectedPods)
  251. lastScheduledCount := startupStatus.Scheduled
  252. defer ticker.Stop()
  253. for {
  254. select {
  255. case <-ticker.C:
  256. case <-stopCh:
  257. return
  258. }
  259. // Log status of the pods.
  260. startupStatus := testutils.ComputeRCStartupStatus(podStore.List(), expectedPods)
  261. e2elog.Logf(startupStatus.String("Density"))
  262. // Compute scheduling throughput for the latest time period.
  263. throughput := float64(startupStatus.Scheduled-lastScheduledCount) / float64(period/time.Second)
  264. *scheduleThroughputs = append(*scheduleThroughputs, throughput)
  265. lastScheduledCount = startupStatus.Scheduled
  266. }
  267. }
  268. // runDensityTest will perform a density test and return the time it took for
  269. // all pods to start
  270. func runDensityTest(dtc DensityTestConfig, testPhaseDurations *timer.TestPhaseTimer, scheduleThroughputs *[]float64) time.Duration {
  271. defer GinkgoRecover()
  272. // Create all secrets, configmaps and daemons.
  273. dtc.runSecretConfigs(testPhaseDurations.StartPhase(250, "secrets creation"))
  274. dtc.runConfigMapConfigs(testPhaseDurations.StartPhase(260, "configmaps creation"))
  275. dtc.runDaemonConfigs(testPhaseDurations.StartPhase(270, "daemonsets creation"))
  276. replicationCtrlStartupPhase := testPhaseDurations.StartPhase(300, "saturation pods creation")
  277. defer replicationCtrlStartupPhase.End()
  278. // Start scheduler CPU profile-gatherer before we begin cluster saturation.
  279. profileGatheringDelay := time.Duration(1+nodeCount/100) * time.Minute
  280. schedulerProfilingStopCh := framework.StartCPUProfileGatherer("kube-scheduler", "density", profileGatheringDelay)
  281. // Start all replication controllers.
  282. startTime := time.Now()
  283. wg := sync.WaitGroup{}
  284. wg.Add(len(dtc.Configs))
  285. for i := range dtc.Configs {
  286. config := dtc.Configs[i]
  287. go func() {
  288. defer GinkgoRecover()
  289. // Call wg.Done() in defer to avoid blocking whole test
  290. // in case of error from RunRC.
  291. defer wg.Done()
  292. framework.ExpectNoError(config.Run())
  293. }()
  294. }
  295. logStopCh := make(chan struct{})
  296. go logPodStartupStatus(dtc.ClientSets[0], dtc.PodCount, map[string]string{"type": "densityPod"}, dtc.PollInterval, scheduleThroughputs, logStopCh)
  297. wg.Wait()
  298. startupTime := time.Since(startTime)
  299. close(logStopCh)
  300. close(schedulerProfilingStopCh)
  301. e2elog.Logf("E2E startup time for %d pods: %v", dtc.PodCount, startupTime)
  302. e2elog.Logf("Throughput (pods/s) during cluster saturation phase: %v", float32(dtc.PodCount)/float32(startupTime/time.Second))
  303. replicationCtrlStartupPhase.End()
  304. // Grabbing scheduler memory profile after cluster saturation finished.
  305. wg.Add(1)
  306. framework.GatherMemoryProfile("kube-scheduler", "density", &wg)
  307. wg.Wait()
  308. printPodAllocationPhase := testPhaseDurations.StartPhase(400, "printing pod allocation")
  309. defer printPodAllocationPhase.End()
  310. // Print some data about Pod to Node allocation
  311. By("Printing Pod to Node allocation data")
  312. podList, err := dtc.ClientSets[0].CoreV1().Pods(metav1.NamespaceAll).List(metav1.ListOptions{})
  313. framework.ExpectNoError(err)
  314. pausePodAllocation := make(map[string]int)
  315. systemPodAllocation := make(map[string][]string)
  316. for _, pod := range podList.Items {
  317. if pod.Namespace == metav1.NamespaceSystem {
  318. systemPodAllocation[pod.Spec.NodeName] = append(systemPodAllocation[pod.Spec.NodeName], pod.Name)
  319. } else {
  320. pausePodAllocation[pod.Spec.NodeName]++
  321. }
  322. }
  323. nodeNames := make([]string, 0)
  324. for k := range pausePodAllocation {
  325. nodeNames = append(nodeNames, k)
  326. }
  327. sort.Strings(nodeNames)
  328. for _, node := range nodeNames {
  329. e2elog.Logf("%v: %v pause pods, system pods: %v", node, pausePodAllocation[node], systemPodAllocation[node])
  330. }
  331. defer printPodAllocationPhase.End()
  332. return startupTime
  333. }
  334. func cleanupDensityTest(dtc DensityTestConfig, testPhaseDurations *timer.TestPhaseTimer) {
  335. defer GinkgoRecover()
  336. podCleanupPhase := testPhaseDurations.StartPhase(900, "latency pods deletion")
  337. defer podCleanupPhase.End()
  338. By("Deleting created Collections")
  339. numberOfClients := len(dtc.ClientSets)
  340. // We explicitly delete all pods to have API calls necessary for deletion accounted in metrics.
  341. for i := range dtc.Configs {
  342. name := dtc.Configs[i].GetName()
  343. namespace := dtc.Configs[i].GetNamespace()
  344. kind := dtc.Configs[i].GetKind()
  345. By(fmt.Sprintf("Cleaning up only the %v, garbage collector will clean up the pods", kind))
  346. err := framework.DeleteResourceAndWaitForGC(dtc.ClientSets[i%numberOfClients], kind, namespace, name)
  347. framework.ExpectNoError(err)
  348. }
  349. podCleanupPhase.End()
  350. dtc.deleteSecrets(testPhaseDurations.StartPhase(910, "secrets deletion"))
  351. dtc.deleteConfigMaps(testPhaseDurations.StartPhase(920, "configmaps deletion"))
  352. dtc.deleteDaemonSets(numberOfClients, testPhaseDurations.StartPhase(930, "daemonsets deletion"))
  353. }
  354. // This test suite can take a long time to run, and can affect or be affected by other tests.
  355. // So by default it is added to the ginkgo.skip list (see driver.go).
  356. // To run this suite you must explicitly ask for it by setting the
  357. // -t/--test flag or ginkgo.focus flag.
  358. // IMPORTANT: This test is designed to work on large (>= 100 Nodes) clusters. For smaller ones
  359. // results will not be representative for control-plane performance as we'll start hitting
  360. // limits on Docker's concurrent container startup.
  361. var _ = SIGDescribe("Density", func() {
  362. var c clientset.Interface
  363. var additionalPodsPrefix string
  364. var ns string
  365. var uuid string
  366. var e2eStartupTime time.Duration
  367. var totalPods int
  368. var nodeCpuCapacity int64
  369. var nodeMemCapacity int64
  370. var nodes *v1.NodeList
  371. var scheduleThroughputs []float64
  372. testCaseBaseName := "density"
  373. missingMeasurements := 0
  374. var testPhaseDurations *timer.TestPhaseTimer
  375. var profileGathererStopCh chan struct{}
  376. var etcdMetricsCollector *framework.EtcdMetricsCollector
  377. // Gathers data prior to framework namespace teardown
  378. AfterEach(func() {
  379. // Stop apiserver CPU profile gatherer and gather memory allocations profile.
  380. close(profileGathererStopCh)
  381. wg := sync.WaitGroup{}
  382. wg.Add(1)
  383. framework.GatherMemoryProfile("kube-apiserver", "density", &wg)
  384. wg.Wait()
  385. saturationThreshold := time.Duration((totalPods / MinPodsPerSecondThroughput)) * time.Second
  386. if saturationThreshold < MinSaturationThreshold {
  387. saturationThreshold = MinSaturationThreshold
  388. }
  389. Expect(e2eStartupTime).NotTo(BeNumerically(">", saturationThreshold))
  390. saturationData := saturationTime{
  391. TimeToSaturate: e2eStartupTime,
  392. NumberOfNodes: nodeCount,
  393. NumberOfPods: totalPods,
  394. Throughput: float32(totalPods) / float32(e2eStartupTime/time.Second),
  395. }
  396. e2elog.Logf("Cluster saturation time: %s", framework.PrettyPrintJSON(saturationData))
  397. summaries := make([]framework.TestDataSummary, 0, 2)
  398. // Verify latency metrics.
  399. highLatencyRequests, metrics, err := framework.HighLatencyRequests(c, nodeCount)
  400. framework.ExpectNoError(err)
  401. if err == nil {
  402. summaries = append(summaries, metrics)
  403. }
  404. // Summarize scheduler metrics.
  405. latency, err := framework.VerifySchedulerLatency(c)
  406. framework.ExpectNoError(err)
  407. if err == nil {
  408. // Compute avg and quantiles of throughput (excluding last element, that's usually an outlier).
  409. sampleSize := len(scheduleThroughputs)
  410. if sampleSize > 1 {
  411. scheduleThroughputs = scheduleThroughputs[:sampleSize-1]
  412. sort.Float64s(scheduleThroughputs)
  413. latency.ThroughputAverage = computeAverage(scheduleThroughputs)
  414. latency.ThroughputPerc50 = computeQuantile(scheduleThroughputs, 0.5)
  415. latency.ThroughputPerc90 = computeQuantile(scheduleThroughputs, 0.9)
  416. latency.ThroughputPerc99 = computeQuantile(scheduleThroughputs, 0.99)
  417. }
  418. summaries = append(summaries, latency)
  419. }
  420. // Summarize etcd metrics.
  421. err = etcdMetricsCollector.StopAndSummarize()
  422. framework.ExpectNoError(err)
  423. if err == nil {
  424. summaries = append(summaries, etcdMetricsCollector.GetMetrics())
  425. }
  426. summaries = append(summaries, testPhaseDurations)
  427. framework.PrintSummaries(summaries, testCaseBaseName)
  428. // Fail if there were some high-latency requests.
  429. Expect(highLatencyRequests).NotTo(BeNumerically(">", 0), "There should be no high-latency requests")
  430. // Fail if more than the allowed threshold of measurements were missing in the latencyTest.
  431. Expect(missingMeasurements <= MaxMissingPodStartupMeasurements).To(Equal(true))
  432. })
  433. options := framework.Options{
  434. ClientQPS: 50.0,
  435. ClientBurst: 100,
  436. }
  437. // Explicitly put here, to delete namespace at the end of the test
  438. // (after measuring latency metrics, etc.).
  439. f := framework.NewFramework(testCaseBaseName, options, nil)
  440. f.NamespaceDeletionTimeout = time.Hour
  441. BeforeEach(func() {
  442. c = f.ClientSet
  443. ns = f.Namespace.Name
  444. testPhaseDurations = timer.NewTestPhaseTimer()
  445. // This is used to mimic what new service account token volumes will
  446. // eventually look like. We can remove this once the controller manager
  447. // publishes the root CA certificate to each namespace.
  448. c.CoreV1().ConfigMaps(ns).Create(&v1.ConfigMap{
  449. ObjectMeta: metav1.ObjectMeta{
  450. Name: "kube-root-ca-crt",
  451. },
  452. Data: map[string]string{
  453. "ca.crt": "trust me, i'm a ca.crt",
  454. },
  455. })
  456. _, nodes = framework.GetMasterAndWorkerNodesOrDie(c)
  457. nodeCount = len(nodes.Items)
  458. Expect(nodeCount).NotTo(BeZero())
  459. // Compute node capacity, leaving some slack for addon pods.
  460. nodeCpuCapacity = nodes.Items[0].Status.Allocatable.Cpu().MilliValue() - 100
  461. nodeMemCapacity = nodes.Items[0].Status.Allocatable.Memory().Value() - 100*1024*1024
  462. // Terminating a namespace (deleting the remaining objects from it - which
  463. // generally means events) can affect the current run. Thus we wait for all
  464. // terminating namespace to be finally deleted before starting this test.
  465. err := framework.CheckTestingNSDeletedExcept(c, ns)
  466. framework.ExpectNoError(err)
  467. uuid = string(utiluuid.NewUUID())
  468. framework.ExpectNoError(framework.ResetSchedulerMetrics(c))
  469. framework.ExpectNoError(framework.ResetMetrics(c))
  470. framework.ExpectNoError(os.Mkdir(fmt.Sprintf(framework.TestContext.OutputDir+"/%s", uuid), 0777))
  471. e2elog.Logf("Listing nodes for easy debugging:\n")
  472. for _, node := range nodes.Items {
  473. var internalIP, externalIP string
  474. for _, address := range node.Status.Addresses {
  475. if address.Type == v1.NodeInternalIP {
  476. internalIP = address.Address
  477. }
  478. if address.Type == v1.NodeExternalIP {
  479. externalIP = address.Address
  480. }
  481. }
  482. e2elog.Logf("Name: %v, clusterIP: %v, externalIP: %v", node.ObjectMeta.Name, internalIP, externalIP)
  483. }
  484. // Start apiserver CPU profile gatherer with frequency based on cluster size.
  485. profileGatheringDelay := time.Duration(5+nodeCount/100) * time.Minute
  486. profileGathererStopCh = framework.StartCPUProfileGatherer("kube-apiserver", "density", profileGatheringDelay)
  487. // Start etcs metrics collection.
  488. etcdMetricsCollector = framework.NewEtcdMetricsCollector()
  489. etcdMetricsCollector.StartCollecting(time.Minute)
  490. })
  491. type Density struct {
  492. // Controls if e2e latency tests should be run (they are slow)
  493. runLatencyTest bool
  494. podsPerNode int
  495. // Controls how often the apiserver is polled for pods
  496. interval time.Duration
  497. // What kind of resource we should be creating. Default: ReplicationController
  498. kind schema.GroupKind
  499. secretsPerPod int
  500. configMapsPerPod int
  501. svcacctTokenProjectionsPerPod int
  502. daemonsPerNode int
  503. quotas bool
  504. }
  505. densityTests := []Density{
  506. // TODO: Expose runLatencyTest as ginkgo flag.
  507. {podsPerNode: 3, runLatencyTest: false, kind: api.Kind("ReplicationController")},
  508. {podsPerNode: 30, runLatencyTest: true, kind: api.Kind("ReplicationController")},
  509. {podsPerNode: 50, runLatencyTest: false, kind: api.Kind("ReplicationController")},
  510. {podsPerNode: 95, runLatencyTest: true, kind: api.Kind("ReplicationController")},
  511. {podsPerNode: 100, runLatencyTest: false, kind: api.Kind("ReplicationController")},
  512. // Tests for other resource types:
  513. {podsPerNode: 30, runLatencyTest: true, kind: extensions.Kind("Deployment")},
  514. {podsPerNode: 30, runLatencyTest: true, kind: batch.Kind("Job")},
  515. // Test scheduling when daemons are preset
  516. {podsPerNode: 30, runLatencyTest: true, kind: api.Kind("ReplicationController"), daemonsPerNode: 2},
  517. // Test with secrets
  518. {podsPerNode: 30, runLatencyTest: true, kind: extensions.Kind("Deployment"), secretsPerPod: 2},
  519. // Test with configmaps
  520. {podsPerNode: 30, runLatencyTest: true, kind: extensions.Kind("Deployment"), configMapsPerPod: 2},
  521. // Test with service account projected volumes
  522. {podsPerNode: 30, runLatencyTest: true, kind: extensions.Kind("Deployment"), svcacctTokenProjectionsPerPod: 2},
  523. // Test with quotas
  524. {podsPerNode: 30, runLatencyTest: true, kind: api.Kind("ReplicationController"), quotas: true},
  525. }
  526. isCanonical := func(test *Density) bool {
  527. return test.kind == api.Kind("ReplicationController") && test.daemonsPerNode == 0 && test.secretsPerPod == 0 && test.configMapsPerPod == 0 && !test.quotas
  528. }
  529. for _, testArg := range densityTests {
  530. feature := "ManualPerformance"
  531. switch testArg.podsPerNode {
  532. case 30:
  533. if isCanonical(&testArg) {
  534. feature = "Performance"
  535. }
  536. case 95:
  537. feature = "HighDensityPerformance"
  538. }
  539. name := fmt.Sprintf("[Feature:%s] should allow starting %d pods per node using %v with %v secrets, %v configmaps, %v token projections, and %v daemons",
  540. feature,
  541. testArg.podsPerNode,
  542. testArg.kind,
  543. testArg.secretsPerPod,
  544. testArg.configMapsPerPod,
  545. testArg.svcacctTokenProjectionsPerPod,
  546. testArg.daemonsPerNode,
  547. )
  548. if testArg.quotas {
  549. name += " with quotas"
  550. }
  551. itArg := testArg
  552. It(name, func() {
  553. nodePrepPhase := testPhaseDurations.StartPhase(100, "node preparation")
  554. defer nodePrepPhase.End()
  555. nodePreparer := framework.NewE2ETestNodePreparer(
  556. f.ClientSet,
  557. []testutils.CountToStrategy{{Count: nodeCount, Strategy: &testutils.TrivialNodePrepareStrategy{}}},
  558. )
  559. framework.ExpectNoError(nodePreparer.PrepareNodes())
  560. defer nodePreparer.CleanupNodes()
  561. podsPerNode := itArg.podsPerNode
  562. if podsPerNode == 30 {
  563. f.AddonResourceConstraints = func() map[string]framework.ResourceConstraint { return density30AddonResourceVerifier(nodeCount) }()
  564. }
  565. totalPods = (podsPerNode - itArg.daemonsPerNode) * nodeCount
  566. fileHndl, err := os.Create(fmt.Sprintf(framework.TestContext.OutputDir+"/%s/pod_states.csv", uuid))
  567. framework.ExpectNoError(err)
  568. defer fileHndl.Close()
  569. nodePrepPhase.End()
  570. // nodeCountPerNamespace and CreateNamespaces are defined in load.go
  571. numberOfCollections := (nodeCount + nodeCountPerNamespace - 1) / nodeCountPerNamespace
  572. namespaces, err := CreateNamespaces(f, numberOfCollections, fmt.Sprintf("density-%v", testArg.podsPerNode), testPhaseDurations.StartPhase(200, "namespace creation"))
  573. framework.ExpectNoError(err)
  574. if itArg.quotas {
  575. framework.ExpectNoError(CreateQuotas(f, namespaces, totalPods+nodeCount, testPhaseDurations.StartPhase(210, "quota creation")))
  576. }
  577. configs := make([]testutils.RunObjectConfig, numberOfCollections)
  578. secretConfigs := make([]*testutils.SecretConfig, 0, numberOfCollections*itArg.secretsPerPod)
  579. configMapConfigs := make([]*testutils.ConfigMapConfig, 0, numberOfCollections*itArg.configMapsPerPod)
  580. // Since all RCs are created at the same time, timeout for each config
  581. // has to assume that it will be run at the very end.
  582. podThroughput := 20
  583. timeout := time.Duration(totalPods/podThroughput) * time.Second
  584. if timeout < UnreadyNodeToleration {
  585. timeout = UnreadyNodeToleration
  586. }
  587. timeout += 3 * time.Minute
  588. // createClients is defined in load.go
  589. clients, scalesClients, err := createClients(numberOfCollections)
  590. framework.ExpectNoError(err)
  591. for i := 0; i < numberOfCollections; i++ {
  592. nsName := namespaces[i].Name
  593. secretNames := []string{}
  594. for j := 0; j < itArg.secretsPerPod; j++ {
  595. secretName := fmt.Sprintf("density-secret-%v-%v", i, j)
  596. secretConfigs = append(secretConfigs, &testutils.SecretConfig{
  597. Content: map[string]string{"foo": "bar"},
  598. Client: clients[i],
  599. Name: secretName,
  600. Namespace: nsName,
  601. LogFunc: e2elog.Logf,
  602. })
  603. secretNames = append(secretNames, secretName)
  604. }
  605. configMapNames := []string{}
  606. for j := 0; j < itArg.configMapsPerPod; j++ {
  607. configMapName := fmt.Sprintf("density-configmap-%v-%v", i, j)
  608. configMapConfigs = append(configMapConfigs, &testutils.ConfigMapConfig{
  609. Content: map[string]string{"foo": "bar"},
  610. Client: clients[i],
  611. Name: configMapName,
  612. Namespace: nsName,
  613. LogFunc: e2elog.Logf,
  614. })
  615. configMapNames = append(configMapNames, configMapName)
  616. }
  617. name := fmt.Sprintf("density%v-%v-%v", totalPods, i, uuid)
  618. baseConfig := &testutils.RCConfig{
  619. Client: clients[i],
  620. ScalesGetter: scalesClients[i],
  621. Image: imageutils.GetPauseImageName(),
  622. Name: name,
  623. Namespace: nsName,
  624. Labels: map[string]string{"type": "densityPod"},
  625. PollInterval: DensityPollInterval,
  626. Timeout: timeout,
  627. PodStatusFile: fileHndl,
  628. Replicas: (totalPods + numberOfCollections - 1) / numberOfCollections,
  629. CpuRequest: nodeCpuCapacity / 100,
  630. MemRequest: nodeMemCapacity / 100,
  631. MaxContainerFailures: &MaxContainerFailures,
  632. Silent: true,
  633. LogFunc: e2elog.Logf,
  634. SecretNames: secretNames,
  635. ConfigMapNames: configMapNames,
  636. ServiceAccountTokenProjections: itArg.svcacctTokenProjectionsPerPod,
  637. Tolerations: []v1.Toleration{
  638. {
  639. Key: "node.kubernetes.io/not-ready",
  640. Operator: v1.TolerationOpExists,
  641. Effect: v1.TaintEffectNoExecute,
  642. TolerationSeconds: func(i int64) *int64 { return &i }(int64(UnreadyNodeToleration / time.Second)),
  643. }, {
  644. Key: "node.kubernetes.io/unreachable",
  645. Operator: v1.TolerationOpExists,
  646. Effect: v1.TaintEffectNoExecute,
  647. TolerationSeconds: func(i int64) *int64 { return &i }(int64(UnreadyNodeToleration / time.Second)),
  648. },
  649. },
  650. }
  651. switch itArg.kind {
  652. case api.Kind("ReplicationController"):
  653. configs[i] = baseConfig
  654. case extensions.Kind("ReplicaSet"):
  655. configs[i] = &testutils.ReplicaSetConfig{RCConfig: *baseConfig}
  656. case extensions.Kind("Deployment"):
  657. configs[i] = &testutils.DeploymentConfig{RCConfig: *baseConfig}
  658. case batch.Kind("Job"):
  659. configs[i] = &testutils.JobConfig{RCConfig: *baseConfig}
  660. default:
  661. framework.Failf("Unsupported kind: %v", itArg.kind)
  662. }
  663. }
  664. // Single client is running out of http2 connections in delete phase, hence we need more.
  665. clients, scalesClients, err = createClients(2)
  666. framework.ExpectNoError(err)
  667. dConfig := DensityTestConfig{
  668. ClientSets: clients,
  669. ScaleClients: scalesClients,
  670. Configs: configs,
  671. PodCount: totalPods,
  672. PollInterval: DensityPollInterval,
  673. kind: itArg.kind,
  674. SecretConfigs: secretConfigs,
  675. ConfigMapConfigs: configMapConfigs,
  676. }
  677. for i := 0; i < itArg.daemonsPerNode; i++ {
  678. dConfig.DaemonConfigs = append(dConfig.DaemonConfigs,
  679. &testutils.DaemonConfig{
  680. Client: f.ClientSet,
  681. Name: fmt.Sprintf("density-daemon-%v", i),
  682. Namespace: f.Namespace.Name,
  683. LogFunc: e2elog.Logf,
  684. })
  685. }
  686. e2eStartupTime = runDensityTest(dConfig, testPhaseDurations, &scheduleThroughputs)
  687. defer cleanupDensityTest(dConfig, testPhaseDurations)
  688. if itArg.runLatencyTest {
  689. // Pick latencyPodsIterations so that:
  690. // latencyPodsIterations * nodeCount >= MinPodStartupMeasurements.
  691. latencyPodsIterations := (MinPodStartupMeasurements + nodeCount - 1) / nodeCount
  692. By(fmt.Sprintf("Scheduling additional %d Pods to measure startup latencies", latencyPodsIterations*nodeCount))
  693. createTimes := make(map[string]metav1.Time, 0)
  694. nodeNames := make(map[string]string, 0)
  695. scheduleTimes := make(map[string]metav1.Time, 0)
  696. runTimes := make(map[string]metav1.Time, 0)
  697. watchTimes := make(map[string]metav1.Time, 0)
  698. var mutex sync.Mutex
  699. checkPod := func(p *v1.Pod) {
  700. mutex.Lock()
  701. defer mutex.Unlock()
  702. defer GinkgoRecover()
  703. if p.Status.Phase == v1.PodRunning {
  704. if _, found := watchTimes[p.Name]; !found {
  705. watchTimes[p.Name] = metav1.Now()
  706. createTimes[p.Name] = p.CreationTimestamp
  707. nodeNames[p.Name] = p.Spec.NodeName
  708. var startTime metav1.Time
  709. for _, cs := range p.Status.ContainerStatuses {
  710. if cs.State.Running != nil {
  711. if startTime.Before(&cs.State.Running.StartedAt) {
  712. startTime = cs.State.Running.StartedAt
  713. }
  714. }
  715. }
  716. if startTime != metav1.NewTime(time.Time{}) {
  717. runTimes[p.Name] = startTime
  718. } else {
  719. framework.Failf("Pod %v is reported to be running, but none of its containers is", p.Name)
  720. }
  721. }
  722. }
  723. }
  724. additionalPodsPrefix = "density-latency-pod"
  725. stopCh := make(chan struct{})
  726. latencyPodStores := make([]cache.Store, len(namespaces))
  727. for i := 0; i < len(namespaces); i++ {
  728. nsName := namespaces[i].Name
  729. latencyPodsStore, controller := cache.NewInformer(
  730. &cache.ListWatch{
  731. ListFunc: func(options metav1.ListOptions) (runtime.Object, error) {
  732. options.LabelSelector = labels.SelectorFromSet(labels.Set{"type": additionalPodsPrefix}).String()
  733. obj, err := c.CoreV1().Pods(nsName).List(options)
  734. return runtime.Object(obj), err
  735. },
  736. WatchFunc: func(options metav1.ListOptions) (watch.Interface, error) {
  737. options.LabelSelector = labels.SelectorFromSet(labels.Set{"type": additionalPodsPrefix}).String()
  738. return c.CoreV1().Pods(nsName).Watch(options)
  739. },
  740. },
  741. &v1.Pod{},
  742. 0,
  743. cache.ResourceEventHandlerFuncs{
  744. AddFunc: func(obj interface{}) {
  745. p, ok := obj.(*v1.Pod)
  746. if !ok {
  747. e2elog.Logf("Failed to cast observed object to *v1.Pod.")
  748. }
  749. Expect(ok).To(Equal(true))
  750. go checkPod(p)
  751. },
  752. UpdateFunc: func(oldObj, newObj interface{}) {
  753. p, ok := newObj.(*v1.Pod)
  754. if !ok {
  755. e2elog.Logf("Failed to cast observed object to *v1.Pod.")
  756. }
  757. Expect(ok).To(Equal(true))
  758. go checkPod(p)
  759. },
  760. },
  761. )
  762. latencyPodStores[i] = latencyPodsStore
  763. go controller.Run(stopCh)
  764. }
  765. for latencyPodsIteration := 0; latencyPodsIteration < latencyPodsIterations; latencyPodsIteration++ {
  766. podIndexOffset := latencyPodsIteration * nodeCount
  767. e2elog.Logf("Creating %d latency pods in range [%d, %d]", nodeCount, podIndexOffset+1, podIndexOffset+nodeCount)
  768. watchTimesLen := len(watchTimes)
  769. // Create some additional pods with throughput ~5 pods/sec.
  770. latencyPodStartupPhase := testPhaseDurations.StartPhase(800+latencyPodsIteration*10, "latency pods creation")
  771. defer latencyPodStartupPhase.End()
  772. var wg sync.WaitGroup
  773. wg.Add(nodeCount)
  774. // Explicitly set requests here.
  775. // Thanks to it we trigger increasing priority function by scheduling
  776. // a pod to a node, which in turn will result in spreading latency pods
  777. // more evenly between nodes.
  778. cpuRequest := *resource.NewMilliQuantity(nodeCpuCapacity/5, resource.DecimalSI)
  779. memRequest := *resource.NewQuantity(nodeMemCapacity/5, resource.DecimalSI)
  780. if podsPerNode > 30 {
  781. // This is to make them schedulable on high-density tests
  782. // (e.g. 100 pods/node kubemark).
  783. cpuRequest = *resource.NewMilliQuantity(0, resource.DecimalSI)
  784. memRequest = *resource.NewQuantity(0, resource.DecimalSI)
  785. }
  786. rcNameToNsMap := map[string]string{}
  787. for i := 1; i <= nodeCount; i++ {
  788. name := additionalPodsPrefix + "-" + strconv.Itoa(podIndexOffset+i)
  789. nsName := namespaces[i%len(namespaces)].Name
  790. rcNameToNsMap[name] = nsName
  791. go createRunningPodFromRC(&wg, c, name, nsName, imageutils.GetPauseImageName(), additionalPodsPrefix, cpuRequest, memRequest)
  792. time.Sleep(200 * time.Millisecond)
  793. }
  794. wg.Wait()
  795. latencyPodStartupPhase.End()
  796. latencyMeasurementPhase := testPhaseDurations.StartPhase(801+latencyPodsIteration*10, "pod startup latencies measurement")
  797. defer latencyMeasurementPhase.End()
  798. By("Waiting for all Pods begin observed by the watch...")
  799. waitTimeout := 10 * time.Minute
  800. for start := time.Now(); len(watchTimes) < watchTimesLen+nodeCount; time.Sleep(10 * time.Second) {
  801. if time.Since(start) < waitTimeout {
  802. framework.Failf("Timeout reached waiting for all Pods being observed by the watch.")
  803. }
  804. }
  805. nodeToLatencyPods := make(map[string]int)
  806. for i := range latencyPodStores {
  807. for _, item := range latencyPodStores[i].List() {
  808. pod := item.(*v1.Pod)
  809. nodeToLatencyPods[pod.Spec.NodeName]++
  810. }
  811. for node, count := range nodeToLatencyPods {
  812. if count > 1 {
  813. e2elog.Logf("%d latency pods scheduled on %s", count, node)
  814. }
  815. }
  816. }
  817. latencyMeasurementPhase.End()
  818. By("Removing additional replication controllers")
  819. podDeletionPhase := testPhaseDurations.StartPhase(802+latencyPodsIteration*10, "latency pods deletion")
  820. defer podDeletionPhase.End()
  821. deleteRC := func(i int) {
  822. defer GinkgoRecover()
  823. name := additionalPodsPrefix + "-" + strconv.Itoa(podIndexOffset+i+1)
  824. framework.ExpectNoError(framework.DeleteRCAndWaitForGC(c, rcNameToNsMap[name], name))
  825. }
  826. workqueue.ParallelizeUntil(context.TODO(), 25, nodeCount, deleteRC)
  827. podDeletionPhase.End()
  828. }
  829. close(stopCh)
  830. for i := 0; i < len(namespaces); i++ {
  831. nsName := namespaces[i].Name
  832. selector := fields.Set{
  833. "involvedObject.kind": "Pod",
  834. "involvedObject.namespace": nsName,
  835. "source": v1.DefaultSchedulerName,
  836. }.AsSelector().String()
  837. options := metav1.ListOptions{FieldSelector: selector}
  838. schedEvents, err := c.CoreV1().Events(nsName).List(options)
  839. framework.ExpectNoError(err)
  840. for k := range createTimes {
  841. for _, event := range schedEvents.Items {
  842. if event.InvolvedObject.Name == k {
  843. scheduleTimes[k] = event.FirstTimestamp
  844. break
  845. }
  846. }
  847. }
  848. }
  849. scheduleLag := make([]framework.PodLatencyData, 0)
  850. startupLag := make([]framework.PodLatencyData, 0)
  851. watchLag := make([]framework.PodLatencyData, 0)
  852. schedToWatchLag := make([]framework.PodLatencyData, 0)
  853. e2eLag := make([]framework.PodLatencyData, 0)
  854. for name, create := range createTimes {
  855. sched, ok := scheduleTimes[name]
  856. if !ok {
  857. e2elog.Logf("Failed to find schedule time for %v", name)
  858. missingMeasurements++
  859. }
  860. run, ok := runTimes[name]
  861. if !ok {
  862. e2elog.Logf("Failed to find run time for %v", name)
  863. missingMeasurements++
  864. }
  865. watch, ok := watchTimes[name]
  866. if !ok {
  867. e2elog.Logf("Failed to find watch time for %v", name)
  868. missingMeasurements++
  869. }
  870. node, ok := nodeNames[name]
  871. if !ok {
  872. e2elog.Logf("Failed to find node for %v", name)
  873. missingMeasurements++
  874. }
  875. scheduleLag = append(scheduleLag, framework.PodLatencyData{Name: name, Node: node, Latency: sched.Time.Sub(create.Time)})
  876. startupLag = append(startupLag, framework.PodLatencyData{Name: name, Node: node, Latency: run.Time.Sub(sched.Time)})
  877. watchLag = append(watchLag, framework.PodLatencyData{Name: name, Node: node, Latency: watch.Time.Sub(run.Time)})
  878. schedToWatchLag = append(schedToWatchLag, framework.PodLatencyData{Name: name, Node: node, Latency: watch.Time.Sub(sched.Time)})
  879. e2eLag = append(e2eLag, framework.PodLatencyData{Name: name, Node: node, Latency: watch.Time.Sub(create.Time)})
  880. }
  881. sort.Sort(framework.LatencySlice(scheduleLag))
  882. sort.Sort(framework.LatencySlice(startupLag))
  883. sort.Sort(framework.LatencySlice(watchLag))
  884. sort.Sort(framework.LatencySlice(schedToWatchLag))
  885. sort.Sort(framework.LatencySlice(e2eLag))
  886. framework.PrintLatencies(scheduleLag, "worst create-to-schedule latencies")
  887. framework.PrintLatencies(startupLag, "worst schedule-to-run latencies")
  888. framework.PrintLatencies(watchLag, "worst run-to-watch latencies")
  889. framework.PrintLatencies(schedToWatchLag, "worst schedule-to-watch latencies")
  890. framework.PrintLatencies(e2eLag, "worst e2e latencies")
  891. // Capture latency metrics related to pod-startup.
  892. podStartupLatency := &framework.PodStartupLatency{
  893. CreateToScheduleLatency: framework.ExtractLatencyMetrics(scheduleLag),
  894. ScheduleToRunLatency: framework.ExtractLatencyMetrics(startupLag),
  895. RunToWatchLatency: framework.ExtractLatencyMetrics(watchLag),
  896. ScheduleToWatchLatency: framework.ExtractLatencyMetrics(schedToWatchLag),
  897. E2ELatency: framework.ExtractLatencyMetrics(e2eLag),
  898. }
  899. f.TestSummaries = append(f.TestSummaries, podStartupLatency)
  900. // Test whether e2e pod startup time is acceptable.
  901. podStartupLatencyThreshold := framework.LatencyMetric{
  902. Perc50: PodStartupLatencyThreshold,
  903. Perc90: PodStartupLatencyThreshold,
  904. Perc99: PodStartupLatencyThreshold,
  905. }
  906. framework.ExpectNoError(framework.VerifyLatencyWithinThreshold(podStartupLatencyThreshold, podStartupLatency.E2ELatency, "pod startup"))
  907. framework.LogSuspiciousLatency(startupLag, e2eLag, nodeCount, c)
  908. }
  909. })
  910. }
  911. })
  912. func createRunningPodFromRC(wg *sync.WaitGroup, c clientset.Interface, name, ns, image, podType string, cpuRequest, memRequest resource.Quantity) {
  913. defer GinkgoRecover()
  914. defer wg.Done()
  915. labels := map[string]string{
  916. "type": podType,
  917. "name": name,
  918. }
  919. rc := &v1.ReplicationController{
  920. ObjectMeta: metav1.ObjectMeta{
  921. Name: name,
  922. Labels: labels,
  923. },
  924. Spec: v1.ReplicationControllerSpec{
  925. Replicas: func(i int) *int32 { x := int32(i); return &x }(1),
  926. Selector: labels,
  927. Template: &v1.PodTemplateSpec{
  928. ObjectMeta: metav1.ObjectMeta{
  929. Labels: labels,
  930. },
  931. Spec: v1.PodSpec{
  932. Containers: []v1.Container{
  933. {
  934. Name: name,
  935. Image: image,
  936. Resources: v1.ResourceRequirements{
  937. Requests: v1.ResourceList{
  938. v1.ResourceCPU: cpuRequest,
  939. v1.ResourceMemory: memRequest,
  940. },
  941. },
  942. },
  943. },
  944. DNSPolicy: v1.DNSDefault,
  945. },
  946. },
  947. },
  948. }
  949. framework.ExpectNoError(testutils.CreateRCWithRetries(c, ns, rc))
  950. framework.ExpectNoError(framework.WaitForControlledPodsRunning(c, ns, name, api.Kind("ReplicationController")))
  951. e2elog.Logf("Found pod '%s' running", name)
  952. }