legacy_horizontal_test.go 33 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077
  1. /*
  2. Copyright 2015 The Kubernetes Authors.
  3. Licensed under the Apache License, Version 2.0 (the "License");
  4. you may not use this file except in compliance with the License.
  5. You may obtain a copy of the License at
  6. http://www.apache.org/licenses/LICENSE-2.0
  7. Unless required by applicable law or agreed to in writing, software
  8. distributed under the License is distributed on an "AS IS" BASIS,
  9. WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
  10. See the License for the specific language governing permissions and
  11. limitations under the License.
  12. */
  13. package podautoscaler
  14. import (
  15. "context"
  16. "encoding/json"
  17. "fmt"
  18. "io"
  19. "math"
  20. "strconv"
  21. "strings"
  22. "sync"
  23. "testing"
  24. "time"
  25. autoscalingv1 "k8s.io/api/autoscaling/v1"
  26. autoscalingv2 "k8s.io/api/autoscaling/v2beta1"
  27. v1 "k8s.io/api/core/v1"
  28. "k8s.io/apimachinery/pkg/api/meta/testrestmapper"
  29. "k8s.io/apimachinery/pkg/api/resource"
  30. metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
  31. "k8s.io/apimachinery/pkg/labels"
  32. "k8s.io/apimachinery/pkg/runtime"
  33. "k8s.io/apimachinery/pkg/watch"
  34. "k8s.io/client-go/informers"
  35. "k8s.io/client-go/kubernetes/fake"
  36. restclient "k8s.io/client-go/rest"
  37. scalefake "k8s.io/client-go/scale/fake"
  38. core "k8s.io/client-go/testing"
  39. "k8s.io/kubernetes/pkg/api/legacyscheme"
  40. "k8s.io/kubernetes/pkg/controller"
  41. "k8s.io/kubernetes/pkg/controller/podautoscaler/metrics"
  42. heapster "k8s.io/heapster/metrics/api/v1/types"
  43. metricsapi "k8s.io/metrics/pkg/apis/metrics/v1alpha1"
  44. "github.com/stretchr/testify/assert"
  45. _ "k8s.io/kubernetes/pkg/apis/apps/install"
  46. _ "k8s.io/kubernetes/pkg/apis/autoscaling/install"
  47. _ "k8s.io/kubernetes/pkg/apis/core/install"
  48. )
  49. func (w fakeResponseWrapper) DoRaw(context.Context) ([]byte, error) {
  50. return w.raw, nil
  51. }
  52. func (w fakeResponseWrapper) Stream(context.Context) (io.ReadCloser, error) {
  53. return nil, nil
  54. }
  55. func newFakeResponseWrapper(raw []byte) fakeResponseWrapper {
  56. return fakeResponseWrapper{raw: raw}
  57. }
  58. type fakeResponseWrapper struct {
  59. raw []byte
  60. }
  61. type legacyTestCase struct {
  62. sync.Mutex
  63. minReplicas int32
  64. maxReplicas int32
  65. initialReplicas int32
  66. desiredReplicas int32
  67. // CPU target utilization as a percentage of the requested resources.
  68. CPUTarget int32
  69. CPUCurrent int32
  70. verifyCPUCurrent bool
  71. reportedLevels []uint64
  72. reportedCPURequests []resource.Quantity
  73. reportedPodReadiness []v1.ConditionStatus
  74. scaleUpdated bool
  75. statusUpdated bool
  76. eventCreated bool
  77. verifyEvents bool
  78. useMetricsAPI bool
  79. metricsTarget []autoscalingv2.MetricSpec
  80. // Channel with names of HPA objects which we have reconciled.
  81. processed chan string
  82. // Target resource information.
  83. resource *fakeResource
  84. // Last scale time
  85. lastScaleTime *metav1.Time
  86. recommendations []timestampedRecommendation
  87. finished bool
  88. }
  89. // Needs to be called under a lock.
  90. func (tc *legacyTestCase) computeCPUCurrent() {
  91. if len(tc.reportedLevels) != len(tc.reportedCPURequests) || len(tc.reportedLevels) == 0 {
  92. return
  93. }
  94. reported := 0
  95. for _, r := range tc.reportedLevels {
  96. reported += int(r)
  97. }
  98. requested := 0
  99. for _, req := range tc.reportedCPURequests {
  100. requested += int(req.MilliValue())
  101. }
  102. tc.CPUCurrent = int32(100 * reported / requested)
  103. }
  104. func (tc *legacyTestCase) prepareTestClient(t *testing.T) (*fake.Clientset, *scalefake.FakeScaleClient) {
  105. namespace := "test-namespace"
  106. hpaName := "test-hpa"
  107. podNamePrefix := "test-pod"
  108. labelSet := map[string]string{"name": podNamePrefix}
  109. selector := labels.SelectorFromSet(labelSet).String()
  110. tc.Lock()
  111. tc.scaleUpdated = false
  112. tc.statusUpdated = false
  113. tc.eventCreated = false
  114. tc.processed = make(chan string, 100)
  115. if tc.CPUCurrent == 0 {
  116. tc.computeCPUCurrent()
  117. }
  118. if tc.resource == nil {
  119. tc.resource = &fakeResource{
  120. name: "test-rc",
  121. apiVersion: "v1",
  122. kind: "ReplicationController",
  123. }
  124. }
  125. tc.Unlock()
  126. fakeClient := &fake.Clientset{}
  127. fakeClient.AddReactor("list", "horizontalpodautoscalers", func(action core.Action) (handled bool, ret runtime.Object, err error) {
  128. tc.Lock()
  129. defer tc.Unlock()
  130. obj := &autoscalingv2.HorizontalPodAutoscalerList{
  131. Items: []autoscalingv2.HorizontalPodAutoscaler{
  132. {
  133. ObjectMeta: metav1.ObjectMeta{
  134. Name: hpaName,
  135. Namespace: namespace,
  136. SelfLink: "experimental/v1/namespaces/" + namespace + "/horizontalpodautoscalers/" + hpaName,
  137. },
  138. Spec: autoscalingv2.HorizontalPodAutoscalerSpec{
  139. ScaleTargetRef: autoscalingv2.CrossVersionObjectReference{
  140. Kind: tc.resource.kind,
  141. Name: tc.resource.name,
  142. APIVersion: tc.resource.apiVersion,
  143. },
  144. MinReplicas: &tc.minReplicas,
  145. MaxReplicas: tc.maxReplicas,
  146. },
  147. Status: autoscalingv2.HorizontalPodAutoscalerStatus{
  148. CurrentReplicas: tc.initialReplicas,
  149. DesiredReplicas: tc.initialReplicas,
  150. },
  151. },
  152. },
  153. }
  154. if tc.CPUTarget > 0.0 {
  155. obj.Items[0].Spec.Metrics = []autoscalingv2.MetricSpec{
  156. {
  157. Type: autoscalingv2.ResourceMetricSourceType,
  158. Resource: &autoscalingv2.ResourceMetricSource{
  159. Name: v1.ResourceCPU,
  160. TargetAverageUtilization: &tc.CPUTarget,
  161. },
  162. },
  163. }
  164. }
  165. if len(tc.metricsTarget) > 0 {
  166. obj.Items[0].Spec.Metrics = append(obj.Items[0].Spec.Metrics, tc.metricsTarget...)
  167. }
  168. if len(obj.Items[0].Spec.Metrics) == 0 {
  169. // manually add in the defaulting logic
  170. obj.Items[0].Spec.Metrics = []autoscalingv2.MetricSpec{
  171. {
  172. Type: autoscalingv2.ResourceMetricSourceType,
  173. Resource: &autoscalingv2.ResourceMetricSource{
  174. Name: v1.ResourceCPU,
  175. },
  176. },
  177. }
  178. }
  179. // and... convert to autoscaling v1 to return the right type
  180. objv1, err := unsafeConvertToVersionVia(obj, autoscalingv1.SchemeGroupVersion)
  181. if err != nil {
  182. return true, nil, err
  183. }
  184. return true, objv1, nil
  185. })
  186. fakeClient.AddReactor("list", "pods", func(action core.Action) (handled bool, ret runtime.Object, err error) {
  187. tc.Lock()
  188. defer tc.Unlock()
  189. obj := &v1.PodList{}
  190. for i := 0; i < len(tc.reportedCPURequests); i++ {
  191. podReadiness := v1.ConditionTrue
  192. if tc.reportedPodReadiness != nil {
  193. podReadiness = tc.reportedPodReadiness[i]
  194. }
  195. podName := fmt.Sprintf("%s-%d", podNamePrefix, i)
  196. pod := v1.Pod{
  197. Status: v1.PodStatus{
  198. StartTime: &metav1.Time{Time: time.Now().Add(-3 * time.Minute)},
  199. Phase: v1.PodRunning,
  200. Conditions: []v1.PodCondition{
  201. {
  202. Type: v1.PodReady,
  203. Status: podReadiness,
  204. },
  205. },
  206. },
  207. ObjectMeta: metav1.ObjectMeta{
  208. Name: podName,
  209. Namespace: namespace,
  210. Labels: map[string]string{
  211. "name": podNamePrefix,
  212. },
  213. },
  214. Spec: v1.PodSpec{
  215. Containers: []v1.Container{
  216. {
  217. Resources: v1.ResourceRequirements{
  218. Requests: v1.ResourceList{
  219. v1.ResourceCPU: tc.reportedCPURequests[i],
  220. },
  221. },
  222. },
  223. },
  224. },
  225. }
  226. obj.Items = append(obj.Items, pod)
  227. }
  228. return true, obj, nil
  229. })
  230. fakeClient.AddProxyReactor("services", func(action core.Action) (handled bool, ret restclient.ResponseWrapper, err error) {
  231. tc.Lock()
  232. defer tc.Unlock()
  233. var heapsterRawMemResponse []byte
  234. if tc.useMetricsAPI {
  235. metrics := metricsapi.PodMetricsList{}
  236. for i, cpu := range tc.reportedLevels {
  237. podMetric := metricsapi.PodMetrics{
  238. ObjectMeta: metav1.ObjectMeta{
  239. Name: fmt.Sprintf("%s-%d", podNamePrefix, i),
  240. Namespace: namespace,
  241. },
  242. Timestamp: metav1.Time{Time: time.Now()},
  243. Containers: []metricsapi.ContainerMetrics{
  244. {
  245. Name: "container",
  246. Usage: v1.ResourceList{
  247. v1.ResourceCPU: *resource.NewMilliQuantity(
  248. int64(cpu),
  249. resource.DecimalSI),
  250. v1.ResourceMemory: *resource.NewQuantity(
  251. int64(1024*1024),
  252. resource.BinarySI),
  253. },
  254. },
  255. },
  256. }
  257. metrics.Items = append(metrics.Items, podMetric)
  258. }
  259. heapsterRawMemResponse, _ = json.Marshal(&metrics)
  260. } else {
  261. // only return the pods that we actually asked for
  262. proxyAction := action.(core.ProxyGetAction)
  263. pathParts := strings.Split(proxyAction.GetPath(), "/")
  264. // pathParts should look like [ api, v1, model, namespaces, $NS, pod-list, $PODS, metrics, $METRIC... ]
  265. if len(pathParts) < 9 {
  266. return true, nil, fmt.Errorf("invalid heapster path %q", proxyAction.GetPath())
  267. }
  268. podNames := strings.Split(pathParts[7], ",")
  269. podPresent := make([]bool, len(tc.reportedLevels))
  270. for _, name := range podNames {
  271. if len(name) <= len(podNamePrefix)+1 {
  272. return true, nil, fmt.Errorf("unknown pod %q", name)
  273. }
  274. num, err := strconv.Atoi(name[len(podNamePrefix)+1:])
  275. if err != nil {
  276. return true, nil, fmt.Errorf("unknown pod %q", name)
  277. }
  278. podPresent[num] = true
  279. }
  280. timestamp := time.Now()
  281. metrics := heapster.MetricResultList{}
  282. for i, level := range tc.reportedLevels {
  283. if !podPresent[i] {
  284. continue
  285. }
  286. metric := heapster.MetricResult{
  287. Metrics: []heapster.MetricPoint{{Timestamp: timestamp, Value: level, FloatValue: nil}},
  288. LatestTimestamp: timestamp,
  289. }
  290. metrics.Items = append(metrics.Items, metric)
  291. }
  292. heapsterRawMemResponse, _ = json.Marshal(&metrics)
  293. }
  294. return true, newFakeResponseWrapper(heapsterRawMemResponse), nil
  295. })
  296. fakeClient.AddReactor("update", "horizontalpodautoscalers", func(action core.Action) (handled bool, ret runtime.Object, err error) {
  297. obj := func() *autoscalingv1.HorizontalPodAutoscaler {
  298. tc.Lock()
  299. defer tc.Unlock()
  300. obj := action.(core.UpdateAction).GetObject().(*autoscalingv1.HorizontalPodAutoscaler)
  301. assert.Equal(t, namespace, obj.Namespace, "the HPA namespace should be as expected")
  302. assert.Equal(t, hpaName, obj.Name, "the HPA name should be as expected")
  303. assert.Equal(t, tc.desiredReplicas, obj.Status.DesiredReplicas, "the desired replica count reported in the object status should be as expected")
  304. if tc.verifyCPUCurrent {
  305. if assert.NotNil(t, obj.Status.CurrentCPUUtilizationPercentage, "the reported CPU utilization percentage should be non-nil") {
  306. assert.Equal(t, tc.CPUCurrent, *obj.Status.CurrentCPUUtilizationPercentage, "the report CPU utilization percentage should be as expected")
  307. }
  308. }
  309. tc.statusUpdated = true
  310. return obj
  311. }()
  312. // Every time we reconcile HPA object we are updating status.
  313. tc.processed <- obj.Name
  314. return true, obj, nil
  315. })
  316. fakeScaleClient := &scalefake.FakeScaleClient{}
  317. fakeScaleClient.AddReactor("get", "replicationcontrollers", func(action core.Action) (handled bool, ret runtime.Object, err error) {
  318. tc.Lock()
  319. defer tc.Unlock()
  320. obj := &autoscalingv1.Scale{
  321. ObjectMeta: metav1.ObjectMeta{
  322. Name: tc.resource.name,
  323. Namespace: namespace,
  324. },
  325. Spec: autoscalingv1.ScaleSpec{
  326. Replicas: tc.initialReplicas,
  327. },
  328. Status: autoscalingv1.ScaleStatus{
  329. Replicas: tc.initialReplicas,
  330. Selector: selector,
  331. },
  332. }
  333. return true, obj, nil
  334. })
  335. fakeScaleClient.AddReactor("get", "deployments", func(action core.Action) (handled bool, ret runtime.Object, err error) {
  336. tc.Lock()
  337. defer tc.Unlock()
  338. obj := &autoscalingv1.Scale{
  339. ObjectMeta: metav1.ObjectMeta{
  340. Name: tc.resource.name,
  341. Namespace: namespace,
  342. },
  343. Spec: autoscalingv1.ScaleSpec{
  344. Replicas: tc.initialReplicas,
  345. },
  346. Status: autoscalingv1.ScaleStatus{
  347. Replicas: tc.initialReplicas,
  348. Selector: selector,
  349. },
  350. }
  351. return true, obj, nil
  352. })
  353. fakeScaleClient.AddReactor("get", "replicasets", func(action core.Action) (handled bool, ret runtime.Object, err error) {
  354. tc.Lock()
  355. defer tc.Unlock()
  356. obj := &autoscalingv1.Scale{
  357. ObjectMeta: metav1.ObjectMeta{
  358. Name: tc.resource.name,
  359. Namespace: namespace,
  360. },
  361. Spec: autoscalingv1.ScaleSpec{
  362. Replicas: tc.initialReplicas,
  363. },
  364. Status: autoscalingv1.ScaleStatus{
  365. Replicas: tc.initialReplicas,
  366. Selector: selector,
  367. },
  368. }
  369. return true, obj, nil
  370. })
  371. fakeScaleClient.AddReactor("update", "replicationcontrollers", func(action core.Action) (handled bool, ret runtime.Object, err error) {
  372. tc.Lock()
  373. defer tc.Unlock()
  374. obj := action.(core.UpdateAction).GetObject().(*autoscalingv1.Scale)
  375. replicas := action.(core.UpdateAction).GetObject().(*autoscalingv1.Scale).Spec.Replicas
  376. assert.Equal(t, tc.desiredReplicas, replicas, "the replica count of the RC should be as expected")
  377. tc.scaleUpdated = true
  378. return true, obj, nil
  379. })
  380. fakeScaleClient.AddReactor("update", "deployments", func(action core.Action) (handled bool, ret runtime.Object, err error) {
  381. tc.Lock()
  382. defer tc.Unlock()
  383. obj := action.(core.UpdateAction).GetObject().(*autoscalingv1.Scale)
  384. replicas := action.(core.UpdateAction).GetObject().(*autoscalingv1.Scale).Spec.Replicas
  385. assert.Equal(t, tc.desiredReplicas, replicas, "the replica count of the deployment should be as expected")
  386. tc.scaleUpdated = true
  387. return true, obj, nil
  388. })
  389. fakeScaleClient.AddReactor("update", "replicasets", func(action core.Action) (handled bool, ret runtime.Object, err error) {
  390. tc.Lock()
  391. defer tc.Unlock()
  392. obj := action.(core.UpdateAction).GetObject().(*autoscalingv1.Scale)
  393. replicas := action.(core.UpdateAction).GetObject().(*autoscalingv1.Scale).Spec.Replicas
  394. assert.Equal(t, tc.desiredReplicas, replicas, "the replica count of the replicaset should be as expected")
  395. tc.scaleUpdated = true
  396. return true, obj, nil
  397. })
  398. fakeWatch := watch.NewFake()
  399. fakeClient.AddWatchReactor("*", core.DefaultWatchReactor(fakeWatch, nil))
  400. return fakeClient, fakeScaleClient
  401. }
  402. func (tc *legacyTestCase) verifyResults(t *testing.T) {
  403. tc.Lock()
  404. defer tc.Unlock()
  405. assert.Equal(t, tc.initialReplicas != tc.desiredReplicas, tc.scaleUpdated, "the scale should only be updated if we expected a change in replicas")
  406. assert.True(t, tc.statusUpdated, "the status should have been updated")
  407. if tc.verifyEvents {
  408. assert.Equal(t, tc.initialReplicas != tc.desiredReplicas, tc.eventCreated, "an event should have been created only if we expected a change in replicas")
  409. }
  410. }
  411. func (tc *legacyTestCase) runTest(t *testing.T) {
  412. testClient, testScaleClient := tc.prepareTestClient(t)
  413. metricsClient := metrics.NewHeapsterMetricsClient(testClient, metrics.DefaultHeapsterNamespace, metrics.DefaultHeapsterScheme, metrics.DefaultHeapsterService, metrics.DefaultHeapsterPort)
  414. eventClient := &fake.Clientset{}
  415. eventClient.AddReactor("*", "events", func(action core.Action) (handled bool, ret runtime.Object, err error) {
  416. tc.Lock()
  417. defer tc.Unlock()
  418. if tc.finished {
  419. return true, &v1.Event{}, nil
  420. }
  421. create, ok := action.(core.CreateAction)
  422. if !ok {
  423. return false, nil, nil
  424. }
  425. obj := create.GetObject().(*v1.Event)
  426. if tc.verifyEvents {
  427. switch obj.Reason {
  428. case "SuccessfulRescale":
  429. assert.Equal(t, fmt.Sprintf("New size: %d; reason: cpu resource utilization (percentage of request) above target", tc.desiredReplicas), obj.Message)
  430. case "DesiredReplicasComputed":
  431. assert.Equal(t, fmt.Sprintf(
  432. "Computed the desired num of replicas: %d (avgCPUutil: %d, current replicas: %d)",
  433. tc.desiredReplicas,
  434. (int64(tc.reportedLevels[0])*100)/tc.reportedCPURequests[0].MilliValue(), tc.initialReplicas), obj.Message)
  435. default:
  436. assert.False(t, true, fmt.Sprintf("Unexpected event: %s / %s", obj.Reason, obj.Message))
  437. }
  438. }
  439. tc.eventCreated = true
  440. return true, obj, nil
  441. })
  442. informerFactory := informers.NewSharedInformerFactory(testClient, controller.NoResyncPeriodFunc())
  443. defaultDownscaleStabilisationWindow := 5 * time.Minute
  444. hpaController := NewHorizontalController(
  445. eventClient.CoreV1(),
  446. testScaleClient,
  447. testClient.AutoscalingV1(),
  448. testrestmapper.TestOnlyStaticRESTMapper(legacyscheme.Scheme),
  449. metricsClient,
  450. informerFactory.Autoscaling().V1().HorizontalPodAutoscalers(),
  451. informerFactory.Core().V1().Pods(),
  452. controller.NoResyncPeriodFunc(),
  453. defaultDownscaleStabilisationWindow,
  454. defaultTestingTolerance,
  455. defaultTestingCpuInitializationPeriod,
  456. defaultTestingDelayOfInitialReadinessStatus,
  457. )
  458. hpaController.hpaListerSynced = alwaysReady
  459. if tc.recommendations != nil {
  460. hpaController.recommendations["test-namespace/test-hpa"] = tc.recommendations
  461. }
  462. stop := make(chan struct{})
  463. defer close(stop)
  464. informerFactory.Start(stop)
  465. go hpaController.Run(stop)
  466. // Wait for HPA to be processed.
  467. <-tc.processed
  468. tc.Lock()
  469. tc.finished = true
  470. if tc.verifyEvents {
  471. tc.Unlock()
  472. // We need to wait for events to be broadcasted (sleep for longer than record.sleepDuration).
  473. time.Sleep(2 * time.Second)
  474. } else {
  475. tc.Unlock()
  476. }
  477. tc.verifyResults(t)
  478. }
  479. func TestLegacyScaleUp(t *testing.T) {
  480. tc := legacyTestCase{
  481. minReplicas: 2,
  482. maxReplicas: 6,
  483. initialReplicas: 3,
  484. desiredReplicas: 5,
  485. CPUTarget: 30,
  486. verifyCPUCurrent: true,
  487. reportedLevels: []uint64{300, 500, 700},
  488. reportedCPURequests: []resource.Quantity{resource.MustParse("1.0"), resource.MustParse("1.0"), resource.MustParse("1.0")},
  489. useMetricsAPI: true,
  490. }
  491. tc.runTest(t)
  492. }
  493. func TestLegacyScaleUpUnreadyLessScale(t *testing.T) {
  494. tc := legacyTestCase{
  495. minReplicas: 2,
  496. maxReplicas: 6,
  497. initialReplicas: 3,
  498. desiredReplicas: 4,
  499. CPUTarget: 30,
  500. verifyCPUCurrent: false,
  501. reportedLevels: []uint64{300, 500, 700},
  502. reportedCPURequests: []resource.Quantity{resource.MustParse("1.0"), resource.MustParse("1.0"), resource.MustParse("1.0")},
  503. reportedPodReadiness: []v1.ConditionStatus{v1.ConditionFalse, v1.ConditionTrue, v1.ConditionTrue},
  504. useMetricsAPI: true,
  505. }
  506. tc.runTest(t)
  507. }
  508. func TestLegacyScaleUpUnreadyNoScale(t *testing.T) {
  509. tc := legacyTestCase{
  510. minReplicas: 2,
  511. maxReplicas: 6,
  512. initialReplicas: 3,
  513. desiredReplicas: 3,
  514. CPUTarget: 30,
  515. CPUCurrent: 40,
  516. verifyCPUCurrent: true,
  517. reportedLevels: []uint64{400, 500, 700},
  518. reportedCPURequests: []resource.Quantity{resource.MustParse("1.0"), resource.MustParse("1.0"), resource.MustParse("1.0")},
  519. reportedPodReadiness: []v1.ConditionStatus{v1.ConditionTrue, v1.ConditionFalse, v1.ConditionFalse},
  520. useMetricsAPI: true,
  521. }
  522. tc.runTest(t)
  523. }
  524. func TestLegacyScaleUpDeployment(t *testing.T) {
  525. tc := legacyTestCase{
  526. minReplicas: 2,
  527. maxReplicas: 6,
  528. initialReplicas: 3,
  529. desiredReplicas: 5,
  530. CPUTarget: 30,
  531. verifyCPUCurrent: true,
  532. reportedLevels: []uint64{300, 500, 700},
  533. reportedCPURequests: []resource.Quantity{resource.MustParse("1.0"), resource.MustParse("1.0"), resource.MustParse("1.0")},
  534. useMetricsAPI: true,
  535. resource: &fakeResource{
  536. name: "test-dep",
  537. apiVersion: "apps/v1",
  538. kind: "Deployment",
  539. },
  540. }
  541. tc.runTest(t)
  542. }
  543. func TestLegacyScaleUpReplicaSet(t *testing.T) {
  544. tc := legacyTestCase{
  545. minReplicas: 2,
  546. maxReplicas: 6,
  547. initialReplicas: 3,
  548. desiredReplicas: 5,
  549. CPUTarget: 30,
  550. verifyCPUCurrent: true,
  551. reportedLevels: []uint64{300, 500, 700},
  552. reportedCPURequests: []resource.Quantity{resource.MustParse("1.0"), resource.MustParse("1.0"), resource.MustParse("1.0")},
  553. useMetricsAPI: true,
  554. resource: &fakeResource{
  555. name: "test-replicaset",
  556. apiVersion: "apps/v1",
  557. kind: "ReplicaSet",
  558. },
  559. }
  560. tc.runTest(t)
  561. }
  562. func TestLegacyScaleUpCM(t *testing.T) {
  563. tc := legacyTestCase{
  564. minReplicas: 2,
  565. maxReplicas: 6,
  566. initialReplicas: 3,
  567. desiredReplicas: 4,
  568. CPUTarget: 0,
  569. metricsTarget: []autoscalingv2.MetricSpec{
  570. {
  571. Type: autoscalingv2.PodsMetricSourceType,
  572. Pods: &autoscalingv2.PodsMetricSource{
  573. MetricName: "qps",
  574. TargetAverageValue: resource.MustParse("15.0"),
  575. },
  576. },
  577. },
  578. reportedLevels: []uint64{20, 10, 30},
  579. reportedCPURequests: []resource.Quantity{resource.MustParse("1.0"), resource.MustParse("1.0"), resource.MustParse("1.0")},
  580. }
  581. tc.runTest(t)
  582. }
  583. func TestLegacyScaleUpCMUnreadyNoLessScale(t *testing.T) {
  584. tc := legacyTestCase{
  585. minReplicas: 2,
  586. maxReplicas: 6,
  587. initialReplicas: 3,
  588. desiredReplicas: 6,
  589. CPUTarget: 0,
  590. metricsTarget: []autoscalingv2.MetricSpec{
  591. {
  592. Type: autoscalingv2.PodsMetricSourceType,
  593. Pods: &autoscalingv2.PodsMetricSource{
  594. MetricName: "qps",
  595. TargetAverageValue: resource.MustParse("15.0"),
  596. },
  597. },
  598. },
  599. reportedLevels: []uint64{50, 10, 30},
  600. reportedPodReadiness: []v1.ConditionStatus{v1.ConditionTrue, v1.ConditionTrue, v1.ConditionFalse},
  601. reportedCPURequests: []resource.Quantity{resource.MustParse("1.0"), resource.MustParse("1.0"), resource.MustParse("1.0")},
  602. }
  603. tc.runTest(t)
  604. }
  605. func TestLegacyScaleUpCMUnreadyNoScaleWouldScaleDown(t *testing.T) {
  606. tc := legacyTestCase{
  607. minReplicas: 2,
  608. maxReplicas: 6,
  609. initialReplicas: 3,
  610. desiredReplicas: 6,
  611. CPUTarget: 0,
  612. metricsTarget: []autoscalingv2.MetricSpec{
  613. {
  614. Type: autoscalingv2.PodsMetricSourceType,
  615. Pods: &autoscalingv2.PodsMetricSource{
  616. MetricName: "qps",
  617. TargetAverageValue: resource.MustParse("15.0"),
  618. },
  619. },
  620. },
  621. reportedLevels: []uint64{50, 15, 30},
  622. reportedPodReadiness: []v1.ConditionStatus{v1.ConditionFalse, v1.ConditionTrue, v1.ConditionFalse},
  623. reportedCPURequests: []resource.Quantity{resource.MustParse("1.0"), resource.MustParse("1.0"), resource.MustParse("1.0")},
  624. }
  625. tc.runTest(t)
  626. }
  627. func TestLegacyScaleDown(t *testing.T) {
  628. tc := legacyTestCase{
  629. minReplicas: 2,
  630. maxReplicas: 6,
  631. initialReplicas: 5,
  632. desiredReplicas: 3,
  633. CPUTarget: 50,
  634. verifyCPUCurrent: true,
  635. reportedLevels: []uint64{100, 300, 500, 250, 250},
  636. reportedCPURequests: []resource.Quantity{resource.MustParse("1.0"), resource.MustParse("1.0"), resource.MustParse("1.0"), resource.MustParse("1.0"), resource.MustParse("1.0")},
  637. useMetricsAPI: true,
  638. recommendations: []timestampedRecommendation{},
  639. }
  640. tc.runTest(t)
  641. }
  642. func TestLegacyScaleDownCM(t *testing.T) {
  643. tc := legacyTestCase{
  644. minReplicas: 2,
  645. maxReplicas: 6,
  646. initialReplicas: 5,
  647. desiredReplicas: 3,
  648. CPUTarget: 0,
  649. metricsTarget: []autoscalingv2.MetricSpec{
  650. {
  651. Type: autoscalingv2.PodsMetricSourceType,
  652. Pods: &autoscalingv2.PodsMetricSource{
  653. MetricName: "qps",
  654. TargetAverageValue: resource.MustParse("20.0"),
  655. },
  656. },
  657. },
  658. reportedLevels: []uint64{12, 12, 12, 12, 12},
  659. reportedCPURequests: []resource.Quantity{resource.MustParse("1.0"), resource.MustParse("1.0"), resource.MustParse("1.0"), resource.MustParse("1.0"), resource.MustParse("1.0")},
  660. recommendations: []timestampedRecommendation{},
  661. }
  662. tc.runTest(t)
  663. }
  664. func TestLegacyScaleDownIgnoresUnreadyPods(t *testing.T) {
  665. tc := legacyTestCase{
  666. minReplicas: 2,
  667. maxReplicas: 6,
  668. initialReplicas: 5,
  669. desiredReplicas: 2,
  670. CPUTarget: 50,
  671. CPUCurrent: 30,
  672. verifyCPUCurrent: true,
  673. reportedLevels: []uint64{100, 300, 500, 250, 250},
  674. reportedCPURequests: []resource.Quantity{resource.MustParse("1.0"), resource.MustParse("1.0"), resource.MustParse("1.0"), resource.MustParse("1.0"), resource.MustParse("1.0")},
  675. useMetricsAPI: true,
  676. reportedPodReadiness: []v1.ConditionStatus{v1.ConditionTrue, v1.ConditionTrue, v1.ConditionTrue, v1.ConditionFalse, v1.ConditionFalse},
  677. recommendations: []timestampedRecommendation{},
  678. }
  679. tc.runTest(t)
  680. }
  681. func LegacyTestTolerance(t *testing.T) {
  682. tc := legacyTestCase{
  683. minReplicas: 1,
  684. maxReplicas: 5,
  685. initialReplicas: 3,
  686. desiredReplicas: 3,
  687. CPUTarget: 100,
  688. reportedLevels: []uint64{1010, 1030, 1020},
  689. reportedCPURequests: []resource.Quantity{resource.MustParse("0.9"), resource.MustParse("1.0"), resource.MustParse("1.1")},
  690. useMetricsAPI: true,
  691. }
  692. tc.runTest(t)
  693. }
  694. func LegacyTestToleranceCM(t *testing.T) {
  695. tc := legacyTestCase{
  696. minReplicas: 1,
  697. maxReplicas: 5,
  698. initialReplicas: 3,
  699. desiredReplicas: 3,
  700. metricsTarget: []autoscalingv2.MetricSpec{
  701. {
  702. Type: autoscalingv2.PodsMetricSourceType,
  703. Pods: &autoscalingv2.PodsMetricSource{
  704. MetricName: "qps",
  705. TargetAverageValue: resource.MustParse("20.0"),
  706. },
  707. },
  708. },
  709. reportedLevels: []uint64{20, 21, 21},
  710. reportedCPURequests: []resource.Quantity{resource.MustParse("0.9"), resource.MustParse("1.0"), resource.MustParse("1.1")},
  711. }
  712. tc.runTest(t)
  713. }
  714. func LegacyTestMinReplicas(t *testing.T) {
  715. tc := legacyTestCase{
  716. minReplicas: 2,
  717. maxReplicas: 5,
  718. initialReplicas: 3,
  719. desiredReplicas: 2,
  720. CPUTarget: 90,
  721. reportedLevels: []uint64{10, 95, 10},
  722. reportedCPURequests: []resource.Quantity{resource.MustParse("0.9"), resource.MustParse("1.0"), resource.MustParse("1.1")},
  723. useMetricsAPI: true,
  724. }
  725. tc.runTest(t)
  726. }
  727. func LegacyTestZeroReplicas(t *testing.T) {
  728. tc := legacyTestCase{
  729. minReplicas: 3,
  730. maxReplicas: 5,
  731. initialReplicas: 0,
  732. desiredReplicas: 0,
  733. CPUTarget: 90,
  734. reportedLevels: []uint64{},
  735. reportedCPURequests: []resource.Quantity{},
  736. useMetricsAPI: true,
  737. }
  738. tc.runTest(t)
  739. }
  740. func LegacyTestTooFewReplicas(t *testing.T) {
  741. tc := legacyTestCase{
  742. minReplicas: 3,
  743. maxReplicas: 5,
  744. initialReplicas: 2,
  745. desiredReplicas: 3,
  746. CPUTarget: 90,
  747. reportedLevels: []uint64{},
  748. reportedCPURequests: []resource.Quantity{},
  749. useMetricsAPI: true,
  750. }
  751. tc.runTest(t)
  752. }
  753. func LegacyTestTooManyReplicas(t *testing.T) {
  754. tc := legacyTestCase{
  755. minReplicas: 3,
  756. maxReplicas: 5,
  757. initialReplicas: 10,
  758. desiredReplicas: 5,
  759. CPUTarget: 90,
  760. reportedLevels: []uint64{},
  761. reportedCPURequests: []resource.Quantity{},
  762. useMetricsAPI: true,
  763. }
  764. tc.runTest(t)
  765. }
  766. func LegacyTestMaxReplicas(t *testing.T) {
  767. tc := legacyTestCase{
  768. minReplicas: 2,
  769. maxReplicas: 5,
  770. initialReplicas: 3,
  771. desiredReplicas: 5,
  772. CPUTarget: 90,
  773. reportedLevels: []uint64{8000, 9500, 1000},
  774. reportedCPURequests: []resource.Quantity{resource.MustParse("0.9"), resource.MustParse("1.0"), resource.MustParse("1.1")},
  775. useMetricsAPI: true,
  776. }
  777. tc.runTest(t)
  778. }
  779. func TestLegacySuperfluousMetrics(t *testing.T) {
  780. tc := legacyTestCase{
  781. minReplicas: 2,
  782. maxReplicas: 6,
  783. initialReplicas: 4,
  784. desiredReplicas: 6,
  785. CPUTarget: 100,
  786. reportedLevels: []uint64{4000, 9500, 3000, 7000, 3200, 2000},
  787. reportedCPURequests: []resource.Quantity{resource.MustParse("1.0"), resource.MustParse("1.0"), resource.MustParse("1.0"), resource.MustParse("1.0")},
  788. useMetricsAPI: true,
  789. }
  790. tc.runTest(t)
  791. }
  792. func LegacyTestMissingMetrics(t *testing.T) {
  793. tc := legacyTestCase{
  794. minReplicas: 2,
  795. maxReplicas: 6,
  796. initialReplicas: 4,
  797. desiredReplicas: 3,
  798. CPUTarget: 100,
  799. reportedLevels: []uint64{400, 95},
  800. reportedCPURequests: []resource.Quantity{resource.MustParse("1.0"), resource.MustParse("1.0"), resource.MustParse("1.0"), resource.MustParse("1.0")},
  801. useMetricsAPI: true,
  802. }
  803. tc.runTest(t)
  804. }
  805. func LegacyTestEmptyMetrics(t *testing.T) {
  806. tc := legacyTestCase{
  807. minReplicas: 2,
  808. maxReplicas: 6,
  809. initialReplicas: 4,
  810. desiredReplicas: 4,
  811. CPUTarget: 100,
  812. reportedLevels: []uint64{},
  813. reportedCPURequests: []resource.Quantity{resource.MustParse("1.0"), resource.MustParse("1.0"), resource.MustParse("1.0"), resource.MustParse("1.0")},
  814. useMetricsAPI: true,
  815. }
  816. tc.runTest(t)
  817. }
  818. func LegacyTestEmptyCPURequest(t *testing.T) {
  819. tc := legacyTestCase{
  820. minReplicas: 1,
  821. maxReplicas: 5,
  822. initialReplicas: 1,
  823. desiredReplicas: 1,
  824. CPUTarget: 100,
  825. reportedLevels: []uint64{200},
  826. useMetricsAPI: true,
  827. }
  828. tc.runTest(t)
  829. }
  830. func LegacyTestEventCreated(t *testing.T) {
  831. tc := legacyTestCase{
  832. minReplicas: 1,
  833. maxReplicas: 5,
  834. initialReplicas: 1,
  835. desiredReplicas: 2,
  836. CPUTarget: 50,
  837. reportedLevels: []uint64{200},
  838. reportedCPURequests: []resource.Quantity{resource.MustParse("0.2")},
  839. verifyEvents: true,
  840. useMetricsAPI: true,
  841. }
  842. tc.runTest(t)
  843. }
  844. func LegacyTestEventNotCreated(t *testing.T) {
  845. tc := legacyTestCase{
  846. minReplicas: 1,
  847. maxReplicas: 5,
  848. initialReplicas: 2,
  849. desiredReplicas: 2,
  850. CPUTarget: 50,
  851. reportedLevels: []uint64{200, 200},
  852. reportedCPURequests: []resource.Quantity{resource.MustParse("0.4"), resource.MustParse("0.4")},
  853. verifyEvents: true,
  854. useMetricsAPI: true,
  855. }
  856. tc.runTest(t)
  857. }
  858. func LegacyTestMissingReports(t *testing.T) {
  859. tc := legacyTestCase{
  860. minReplicas: 1,
  861. maxReplicas: 5,
  862. initialReplicas: 4,
  863. desiredReplicas: 2,
  864. CPUTarget: 50,
  865. reportedLevels: []uint64{200},
  866. reportedCPURequests: []resource.Quantity{resource.MustParse("0.2")},
  867. useMetricsAPI: true,
  868. }
  869. tc.runTest(t)
  870. }
  871. func LegacyTestUpscaleCap(t *testing.T) {
  872. tc := legacyTestCase{
  873. minReplicas: 1,
  874. maxReplicas: 100,
  875. initialReplicas: 3,
  876. desiredReplicas: 6,
  877. CPUTarget: 10,
  878. reportedLevels: []uint64{100, 200, 300},
  879. reportedCPURequests: []resource.Quantity{resource.MustParse("0.1"), resource.MustParse("0.1"), resource.MustParse("0.1")},
  880. useMetricsAPI: true,
  881. }
  882. tc.runTest(t)
  883. }
  884. // TestComputedToleranceAlgImplementation is a regression test which
  885. // back-calculates a minimal percentage for downscaling based on a small percentage
  886. // increase in pod utilization which is calibrated against the tolerance value.
  887. func LegacyTestComputedToleranceAlgImplementation(t *testing.T) {
  888. startPods := int32(10)
  889. // 150 mCPU per pod.
  890. totalUsedCPUOfAllPods := uint64(startPods * 150)
  891. // Each pod starts out asking for 2X what is really needed.
  892. // This means we will have a 50% ratio of used/requested
  893. totalRequestedCPUOfAllPods := int32(2 * totalUsedCPUOfAllPods)
  894. requestedToUsed := float64(totalRequestedCPUOfAllPods / int32(totalUsedCPUOfAllPods))
  895. // Spread the amount we ask over 10 pods. We can add some jitter later in reportedLevels.
  896. perPodRequested := totalRequestedCPUOfAllPods / startPods
  897. // Force a minimal scaling event by satisfying (tolerance < 1 - resourcesUsedRatio).
  898. target := math.Abs(1/(requestedToUsed*(1-defaultTestingTolerance))) + .01
  899. finalCPUPercentTarget := int32(target * 100)
  900. resourcesUsedRatio := float64(totalUsedCPUOfAllPods) / float64(float64(totalRequestedCPUOfAllPods)*target)
  901. // i.e. .60 * 20 -> scaled down expectation.
  902. finalPods := int32(math.Ceil(resourcesUsedRatio * float64(startPods)))
  903. // To breach tolerance we will create a utilization ratio difference of tolerance to usageRatioToleranceValue)
  904. tc := legacyTestCase{
  905. minReplicas: 0,
  906. maxReplicas: 1000,
  907. initialReplicas: startPods,
  908. desiredReplicas: finalPods,
  909. CPUTarget: finalCPUPercentTarget,
  910. reportedLevels: []uint64{
  911. totalUsedCPUOfAllPods / 10,
  912. totalUsedCPUOfAllPods / 10,
  913. totalUsedCPUOfAllPods / 10,
  914. totalUsedCPUOfAllPods / 10,
  915. totalUsedCPUOfAllPods / 10,
  916. totalUsedCPUOfAllPods / 10,
  917. totalUsedCPUOfAllPods / 10,
  918. totalUsedCPUOfAllPods / 10,
  919. totalUsedCPUOfAllPods / 10,
  920. totalUsedCPUOfAllPods / 10,
  921. },
  922. reportedCPURequests: []resource.Quantity{
  923. resource.MustParse(fmt.Sprint(perPodRequested+100) + "m"),
  924. resource.MustParse(fmt.Sprint(perPodRequested-100) + "m"),
  925. resource.MustParse(fmt.Sprint(perPodRequested+10) + "m"),
  926. resource.MustParse(fmt.Sprint(perPodRequested-10) + "m"),
  927. resource.MustParse(fmt.Sprint(perPodRequested+2) + "m"),
  928. resource.MustParse(fmt.Sprint(perPodRequested-2) + "m"),
  929. resource.MustParse(fmt.Sprint(perPodRequested+1) + "m"),
  930. resource.MustParse(fmt.Sprint(perPodRequested-1) + "m"),
  931. resource.MustParse(fmt.Sprint(perPodRequested) + "m"),
  932. resource.MustParse(fmt.Sprint(perPodRequested) + "m"),
  933. },
  934. useMetricsAPI: true,
  935. }
  936. tc.runTest(t)
  937. // Reuse the data structure above, now testing "unscaling".
  938. // Now, we test that no scaling happens if we are in a very close margin to the tolerance
  939. target = math.Abs(1/(requestedToUsed*(1-defaultTestingTolerance))) + .004
  940. finalCPUPercentTarget = int32(target * 100)
  941. tc.CPUTarget = finalCPUPercentTarget
  942. tc.initialReplicas = startPods
  943. tc.desiredReplicas = startPods
  944. tc.runTest(t)
  945. }
  946. func TestLegacyScaleUpRCImmediately(t *testing.T) {
  947. time := metav1.Time{Time: time.Now()}
  948. tc := legacyTestCase{
  949. minReplicas: 2,
  950. maxReplicas: 6,
  951. initialReplicas: 1,
  952. desiredReplicas: 2,
  953. verifyCPUCurrent: false,
  954. reportedLevels: []uint64{0, 0, 0, 0},
  955. reportedCPURequests: []resource.Quantity{resource.MustParse("1.0"), resource.MustParse("1.0"), resource.MustParse("1.0"), resource.MustParse("1.0")},
  956. useMetricsAPI: true,
  957. lastScaleTime: &time,
  958. }
  959. tc.runTest(t)
  960. }
  961. func TestLegacyScaleDownRCImmediately(t *testing.T) {
  962. time := metav1.Time{Time: time.Now()}
  963. tc := legacyTestCase{
  964. minReplicas: 2,
  965. maxReplicas: 5,
  966. initialReplicas: 6,
  967. desiredReplicas: 5,
  968. CPUTarget: 50,
  969. reportedLevels: []uint64{8000, 9500, 1000},
  970. reportedCPURequests: []resource.Quantity{resource.MustParse("0.9"), resource.MustParse("1.0"), resource.MustParse("1.1")},
  971. useMetricsAPI: true,
  972. lastScaleTime: &time,
  973. }
  974. tc.runTest(t)
  975. }
  976. // TODO: add more tests