horizontal.go 42 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890
  1. /*
  2. Copyright 2015 The Kubernetes Authors.
  3. Licensed under the Apache License, Version 2.0 (the "License");
  4. you may not use this file except in compliance with the License.
  5. You may obtain a copy of the License at
  6. http://www.apache.org/licenses/LICENSE-2.0
  7. Unless required by applicable law or agreed to in writing, software
  8. distributed under the License is distributed on an "AS IS" BASIS,
  9. WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
  10. See the License for the specific language governing permissions and
  11. limitations under the License.
  12. */
  13. package podautoscaler
  14. import (
  15. "fmt"
  16. "math"
  17. "time"
  18. autoscalingv1 "k8s.io/api/autoscaling/v1"
  19. autoscalingv2 "k8s.io/api/autoscaling/v2beta2"
  20. v1 "k8s.io/api/core/v1"
  21. apiequality "k8s.io/apimachinery/pkg/api/equality"
  22. "k8s.io/apimachinery/pkg/api/errors"
  23. apimeta "k8s.io/apimachinery/pkg/api/meta"
  24. "k8s.io/apimachinery/pkg/api/resource"
  25. metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
  26. "k8s.io/apimachinery/pkg/labels"
  27. "k8s.io/apimachinery/pkg/runtime"
  28. "k8s.io/apimachinery/pkg/runtime/schema"
  29. utilruntime "k8s.io/apimachinery/pkg/util/runtime"
  30. "k8s.io/apimachinery/pkg/util/wait"
  31. autoscalinginformers "k8s.io/client-go/informers/autoscaling/v1"
  32. coreinformers "k8s.io/client-go/informers/core/v1"
  33. "k8s.io/client-go/kubernetes/scheme"
  34. autoscalingclient "k8s.io/client-go/kubernetes/typed/autoscaling/v1"
  35. v1core "k8s.io/client-go/kubernetes/typed/core/v1"
  36. autoscalinglisters "k8s.io/client-go/listers/autoscaling/v1"
  37. corelisters "k8s.io/client-go/listers/core/v1"
  38. scaleclient "k8s.io/client-go/scale"
  39. "k8s.io/client-go/tools/cache"
  40. "k8s.io/client-go/tools/record"
  41. "k8s.io/client-go/util/workqueue"
  42. "k8s.io/klog"
  43. "k8s.io/kubernetes/pkg/api/legacyscheme"
  44. "k8s.io/kubernetes/pkg/controller"
  45. metricsclient "k8s.io/kubernetes/pkg/controller/podautoscaler/metrics"
  46. )
  47. var (
  48. scaleUpLimitFactor = 2.0
  49. scaleUpLimitMinimum = 4.0
  50. )
  51. type timestampedRecommendation struct {
  52. recommendation int32
  53. timestamp time.Time
  54. }
  55. // HorizontalController is responsible for the synchronizing HPA objects stored
  56. // in the system with the actual deployments/replication controllers they
  57. // control.
  58. type HorizontalController struct {
  59. scaleNamespacer scaleclient.ScalesGetter
  60. hpaNamespacer autoscalingclient.HorizontalPodAutoscalersGetter
  61. mapper apimeta.RESTMapper
  62. replicaCalc *ReplicaCalculator
  63. eventRecorder record.EventRecorder
  64. downscaleStabilisationWindow time.Duration
  65. // hpaLister is able to list/get HPAs from the shared cache from the informer passed in to
  66. // NewHorizontalController.
  67. hpaLister autoscalinglisters.HorizontalPodAutoscalerLister
  68. hpaListerSynced cache.InformerSynced
  69. // podLister is able to list/get Pods from the shared cache from the informer passed in to
  70. // NewHorizontalController.
  71. podLister corelisters.PodLister
  72. podListerSynced cache.InformerSynced
  73. // Controllers that need to be synced
  74. queue workqueue.RateLimitingInterface
  75. // Latest unstabilized recommendations for each autoscaler.
  76. recommendations map[string][]timestampedRecommendation
  77. }
  78. // NewHorizontalController creates a new HorizontalController.
  79. func NewHorizontalController(
  80. evtNamespacer v1core.EventsGetter,
  81. scaleNamespacer scaleclient.ScalesGetter,
  82. hpaNamespacer autoscalingclient.HorizontalPodAutoscalersGetter,
  83. mapper apimeta.RESTMapper,
  84. metricsClient metricsclient.MetricsClient,
  85. hpaInformer autoscalinginformers.HorizontalPodAutoscalerInformer,
  86. podInformer coreinformers.PodInformer,
  87. resyncPeriod time.Duration,
  88. downscaleStabilisationWindow time.Duration,
  89. tolerance float64,
  90. cpuInitializationPeriod,
  91. delayOfInitialReadinessStatus time.Duration,
  92. ) *HorizontalController {
  93. broadcaster := record.NewBroadcaster()
  94. broadcaster.StartLogging(klog.Infof)
  95. broadcaster.StartRecordingToSink(&v1core.EventSinkImpl{Interface: evtNamespacer.Events("")})
  96. recorder := broadcaster.NewRecorder(scheme.Scheme, v1.EventSource{Component: "horizontal-pod-autoscaler"})
  97. hpaController := &HorizontalController{
  98. eventRecorder: recorder,
  99. scaleNamespacer: scaleNamespacer,
  100. hpaNamespacer: hpaNamespacer,
  101. downscaleStabilisationWindow: downscaleStabilisationWindow,
  102. queue: workqueue.NewNamedRateLimitingQueue(NewDefaultHPARateLimiter(resyncPeriod), "horizontalpodautoscaler"),
  103. mapper: mapper,
  104. recommendations: map[string][]timestampedRecommendation{},
  105. }
  106. hpaInformer.Informer().AddEventHandlerWithResyncPeriod(
  107. cache.ResourceEventHandlerFuncs{
  108. AddFunc: hpaController.enqueueHPA,
  109. UpdateFunc: hpaController.updateHPA,
  110. DeleteFunc: hpaController.deleteHPA,
  111. },
  112. resyncPeriod,
  113. )
  114. hpaController.hpaLister = hpaInformer.Lister()
  115. hpaController.hpaListerSynced = hpaInformer.Informer().HasSynced
  116. hpaController.podLister = podInformer.Lister()
  117. hpaController.podListerSynced = podInformer.Informer().HasSynced
  118. replicaCalc := NewReplicaCalculator(
  119. metricsClient,
  120. hpaController.podLister,
  121. tolerance,
  122. cpuInitializationPeriod,
  123. delayOfInitialReadinessStatus,
  124. )
  125. hpaController.replicaCalc = replicaCalc
  126. return hpaController
  127. }
  128. // Run begins watching and syncing.
  129. func (a *HorizontalController) Run(stopCh <-chan struct{}) {
  130. defer utilruntime.HandleCrash()
  131. defer a.queue.ShutDown()
  132. klog.Infof("Starting HPA controller")
  133. defer klog.Infof("Shutting down HPA controller")
  134. if !controller.WaitForCacheSync("HPA", stopCh, a.hpaListerSynced, a.podListerSynced) {
  135. return
  136. }
  137. // start a single worker (we may wish to start more in the future)
  138. go wait.Until(a.worker, time.Second, stopCh)
  139. <-stopCh
  140. }
  141. // obj could be an *v1.HorizontalPodAutoscaler, or a DeletionFinalStateUnknown marker item.
  142. func (a *HorizontalController) updateHPA(old, cur interface{}) {
  143. a.enqueueHPA(cur)
  144. }
  145. // obj could be an *v1.HorizontalPodAutoscaler, or a DeletionFinalStateUnknown marker item.
  146. func (a *HorizontalController) enqueueHPA(obj interface{}) {
  147. key, err := controller.KeyFunc(obj)
  148. if err != nil {
  149. utilruntime.HandleError(fmt.Errorf("couldn't get key for object %+v: %v", obj, err))
  150. return
  151. }
  152. // Requests are always added to queue with resyncPeriod delay. If there's already
  153. // request for the HPA in the queue then a new request is always dropped. Requests spend resync
  154. // interval in queue so HPAs are processed every resync interval.
  155. a.queue.AddRateLimited(key)
  156. }
  157. func (a *HorizontalController) deleteHPA(obj interface{}) {
  158. key, err := controller.KeyFunc(obj)
  159. if err != nil {
  160. utilruntime.HandleError(fmt.Errorf("couldn't get key for object %+v: %v", obj, err))
  161. return
  162. }
  163. // TODO: could we leak if we fail to get the key?
  164. a.queue.Forget(key)
  165. }
  166. func (a *HorizontalController) worker() {
  167. for a.processNextWorkItem() {
  168. }
  169. klog.Infof("horizontal pod autoscaler controller worker shutting down")
  170. }
  171. func (a *HorizontalController) processNextWorkItem() bool {
  172. key, quit := a.queue.Get()
  173. if quit {
  174. return false
  175. }
  176. defer a.queue.Done(key)
  177. deleted, err := a.reconcileKey(key.(string))
  178. if err != nil {
  179. utilruntime.HandleError(err)
  180. }
  181. // Add request processing HPA to queue with resyncPeriod delay.
  182. // Requests are always added to queue with resyncPeriod delay. If there's already request
  183. // for the HPA in the queue then a new request is always dropped. Requests spend resyncPeriod
  184. // in queue so HPAs are processed every resyncPeriod.
  185. // Request is added here just in case last resync didn't insert request into the queue. This
  186. // happens quite often because there is race condition between adding request after resyncPeriod
  187. // and removing them from queue. Request can be added by resync before previous request is
  188. // removed from queue. If we didn't add request here then in this case one request would be dropped
  189. // and HPA would processed after 2 x resyncPeriod.
  190. if !deleted {
  191. a.queue.AddRateLimited(key)
  192. }
  193. return true
  194. }
  195. // computeReplicasForMetrics computes the desired number of replicas for the metric specifications listed in the HPA,
  196. // returning the maximum of the computed replica counts, a description of the associated metric, and the statuses of
  197. // all metrics computed.
  198. func (a *HorizontalController) computeReplicasForMetrics(hpa *autoscalingv2.HorizontalPodAutoscaler, scale *autoscalingv1.Scale,
  199. metricSpecs []autoscalingv2.MetricSpec) (replicas int32, metric string, statuses []autoscalingv2.MetricStatus, timestamp time.Time, err error) {
  200. specReplicas := scale.Spec.Replicas
  201. statusReplicas := scale.Status.Replicas
  202. statuses = make([]autoscalingv2.MetricStatus, len(metricSpecs))
  203. if scale.Status.Selector == "" {
  204. errMsg := "selector is required"
  205. a.eventRecorder.Event(hpa, v1.EventTypeWarning, "SelectorRequired", errMsg)
  206. setCondition(hpa, autoscalingv2.ScalingActive, v1.ConditionFalse, "InvalidSelector", "the HPA target's scale is missing a selector")
  207. return 0, "", nil, time.Time{}, fmt.Errorf(errMsg)
  208. }
  209. selector, err := labels.Parse(scale.Status.Selector)
  210. if err != nil {
  211. errMsg := fmt.Sprintf("couldn't convert selector into a corresponding internal selector object: %v", err)
  212. a.eventRecorder.Event(hpa, v1.EventTypeWarning, "InvalidSelector", errMsg)
  213. setCondition(hpa, autoscalingv2.ScalingActive, v1.ConditionFalse, "InvalidSelector", errMsg)
  214. return 0, "", nil, time.Time{}, fmt.Errorf(errMsg)
  215. }
  216. invalidMetricsCount := 0
  217. var invalidMetricError error
  218. for i, metricSpec := range metricSpecs {
  219. replicaCountProposal, metricNameProposal, timestampProposal, err := a.computeReplicasForMetric(hpa, metricSpec, specReplicas, statusReplicas, selector, &statuses[i])
  220. if err != nil {
  221. invalidMetricsCount++
  222. invalidMetricError = err
  223. }
  224. if err == nil && (replicas == 0 || replicaCountProposal > replicas) {
  225. timestamp = timestampProposal
  226. replicas = replicaCountProposal
  227. metric = metricNameProposal
  228. }
  229. }
  230. // If all metrics are invalid or some are invalid and we would scale down,
  231. // return an error and set the condition of the hpa based on the first invalid metric.
  232. // Otherwise set the condition as scaling active as we're going to scale
  233. if invalidMetricsCount >= len(metricSpecs) || (invalidMetricsCount > 0 && replicas < specReplicas) {
  234. return 0, "", statuses, time.Time{}, fmt.Errorf("Invalid metrics (%v invalid out of %v), last error was: %v", invalidMetricsCount, len(metricSpecs), invalidMetricError)
  235. } else {
  236. setCondition(hpa, autoscalingv2.ScalingActive, v1.ConditionTrue, "ValidMetricFound", "the HPA was able to successfully calculate a replica count from %v", metric)
  237. }
  238. return replicas, metric, statuses, timestamp, nil
  239. }
  240. // computeReplicasForMetric computes the desired number of replicas for for a specific hpa and single metric specification.
  241. func (a *HorizontalController) computeReplicasForMetric(hpa *autoscalingv2.HorizontalPodAutoscaler, spec autoscalingv2.MetricSpec,
  242. specReplicas, statusReplicas int32, selector labels.Selector, status *autoscalingv2.MetricStatus) (replicaCountProposal int32, metricNameProposal string,
  243. timestampProposal time.Time, err error) {
  244. switch spec.Type {
  245. case autoscalingv2.ObjectMetricSourceType:
  246. metricSelector, err := metav1.LabelSelectorAsSelector(spec.Object.Metric.Selector)
  247. if err != nil {
  248. a.eventRecorder.Event(hpa, v1.EventTypeWarning, "FailedGetObjectMetric", err.Error())
  249. setCondition(hpa, autoscalingv2.ScalingActive, v1.ConditionFalse, "FailedGetObjectMetric", "the HPA was unable to compute the replica count: %v", err)
  250. return 0, "", time.Time{}, fmt.Errorf("failed to get object metric value: %v", err)
  251. }
  252. replicaCountProposal, timestampProposal, metricNameProposal, err = a.computeStatusForObjectMetric(specReplicas, statusReplicas, spec, hpa, selector, status, metricSelector)
  253. if err != nil {
  254. return 0, "", time.Time{}, fmt.Errorf("failed to get object metric value: %v", err)
  255. }
  256. case autoscalingv2.PodsMetricSourceType:
  257. metricSelector, err := metav1.LabelSelectorAsSelector(spec.Pods.Metric.Selector)
  258. if err != nil {
  259. a.eventRecorder.Event(hpa, v1.EventTypeWarning, "FailedGetPodsMetric", err.Error())
  260. setCondition(hpa, autoscalingv2.ScalingActive, v1.ConditionFalse, "FailedGetPodsMetric", "the HPA was unable to compute the replica count: %v", err)
  261. return 0, "", time.Time{}, fmt.Errorf("failed to get pods metric value: %v", err)
  262. }
  263. replicaCountProposal, timestampProposal, metricNameProposal, err = a.computeStatusForPodsMetric(specReplicas, spec, hpa, selector, status, metricSelector)
  264. if err != nil {
  265. return 0, "", time.Time{}, fmt.Errorf("failed to get object metric value: %v", err)
  266. }
  267. case autoscalingv2.ResourceMetricSourceType:
  268. replicaCountProposal, timestampProposal, metricNameProposal, err = a.computeStatusForResourceMetric(specReplicas, spec, hpa, selector, status)
  269. if err != nil {
  270. return 0, "", time.Time{}, err
  271. }
  272. case autoscalingv2.ExternalMetricSourceType:
  273. replicaCountProposal, timestampProposal, metricNameProposal, err = a.computeStatusForExternalMetric(specReplicas, statusReplicas, spec, hpa, selector, status)
  274. if err != nil {
  275. return 0, "", time.Time{}, err
  276. }
  277. default:
  278. errMsg := fmt.Sprintf("unknown metric source type %q", string(spec.Type))
  279. a.eventRecorder.Event(hpa, v1.EventTypeWarning, "InvalidMetricSourceType", errMsg)
  280. setCondition(hpa, autoscalingv2.ScalingActive, v1.ConditionFalse, "InvalidMetricSourceType", "the HPA was unable to compute the replica count: %v", fmt.Errorf(errMsg))
  281. return 0, "", time.Time{}, fmt.Errorf(errMsg)
  282. }
  283. return replicaCountProposal, metricNameProposal, timestampProposal, nil
  284. }
  285. func (a *HorizontalController) reconcileKey(key string) (deleted bool, err error) {
  286. namespace, name, err := cache.SplitMetaNamespaceKey(key)
  287. if err != nil {
  288. return true, err
  289. }
  290. hpa, err := a.hpaLister.HorizontalPodAutoscalers(namespace).Get(name)
  291. if errors.IsNotFound(err) {
  292. klog.Infof("Horizontal Pod Autoscaler %s has been deleted in %s", name, namespace)
  293. delete(a.recommendations, key)
  294. return true, nil
  295. }
  296. return false, a.reconcileAutoscaler(hpa, key)
  297. }
  298. // computeStatusForObjectMetric computes the desired number of replicas for the specified metric of type ObjectMetricSourceType.
  299. func (a *HorizontalController) computeStatusForObjectMetric(specReplicas, statusReplicas int32, metricSpec autoscalingv2.MetricSpec, hpa *autoscalingv2.HorizontalPodAutoscaler, selector labels.Selector, status *autoscalingv2.MetricStatus, metricSelector labels.Selector) (int32, time.Time, string, error) {
  300. if metricSpec.Object.Target.Type == autoscalingv2.ValueMetricType {
  301. replicaCountProposal, utilizationProposal, timestampProposal, err := a.replicaCalc.GetObjectMetricReplicas(specReplicas, metricSpec.Object.Target.Value.MilliValue(), metricSpec.Object.Metric.Name, hpa.Namespace, &metricSpec.Object.DescribedObject, selector, metricSelector)
  302. if err != nil {
  303. a.eventRecorder.Event(hpa, v1.EventTypeWarning, "FailedGetObjectMetric", err.Error())
  304. setCondition(hpa, autoscalingv2.ScalingActive, v1.ConditionFalse, "FailedGetObjectMetric", "the HPA was unable to compute the replica count: %v", err)
  305. return 0, timestampProposal, "", err
  306. }
  307. *status = autoscalingv2.MetricStatus{
  308. Type: autoscalingv2.ObjectMetricSourceType,
  309. Object: &autoscalingv2.ObjectMetricStatus{
  310. DescribedObject: metricSpec.Object.DescribedObject,
  311. Metric: autoscalingv2.MetricIdentifier{
  312. Name: metricSpec.Object.Metric.Name,
  313. Selector: metricSpec.Object.Metric.Selector,
  314. },
  315. Current: autoscalingv2.MetricValueStatus{
  316. Value: resource.NewMilliQuantity(utilizationProposal, resource.DecimalSI),
  317. },
  318. },
  319. }
  320. return replicaCountProposal, timestampProposal, fmt.Sprintf("%s metric %s", metricSpec.Object.DescribedObject.Kind, metricSpec.Object.Metric.Name), nil
  321. } else if metricSpec.Object.Target.Type == autoscalingv2.AverageValueMetricType {
  322. replicaCountProposal, utilizationProposal, timestampProposal, err := a.replicaCalc.GetObjectPerPodMetricReplicas(statusReplicas, metricSpec.Object.Target.AverageValue.MilliValue(), metricSpec.Object.Metric.Name, hpa.Namespace, &metricSpec.Object.DescribedObject, metricSelector)
  323. if err != nil {
  324. a.eventRecorder.Event(hpa, v1.EventTypeWarning, "FailedGetObjectMetric", err.Error())
  325. setCondition(hpa, autoscalingv2.ScalingActive, v1.ConditionFalse, "FailedGetObjectMetric", "the HPA was unable to compute the replica count: %v", err)
  326. return 0, time.Time{}, "", fmt.Errorf("failed to get %s object metric: %v", metricSpec.Object.Metric.Name, err)
  327. }
  328. *status = autoscalingv2.MetricStatus{
  329. Type: autoscalingv2.ObjectMetricSourceType,
  330. Object: &autoscalingv2.ObjectMetricStatus{
  331. Metric: autoscalingv2.MetricIdentifier{
  332. Name: metricSpec.Object.Metric.Name,
  333. Selector: metricSpec.Object.Metric.Selector,
  334. },
  335. Current: autoscalingv2.MetricValueStatus{
  336. AverageValue: resource.NewMilliQuantity(utilizationProposal, resource.DecimalSI),
  337. },
  338. },
  339. }
  340. return replicaCountProposal, timestampProposal, fmt.Sprintf("external metric %s(%+v)", metricSpec.Object.Metric.Name, metricSpec.Object.Metric.Selector), nil
  341. }
  342. errMsg := "invalid object metric source: neither a value target nor an average value target was set"
  343. a.eventRecorder.Event(hpa, v1.EventTypeWarning, "FailedGetObjectMetric", errMsg)
  344. setCondition(hpa, autoscalingv2.ScalingActive, v1.ConditionFalse, "FailedGetObjectMetric", "the HPA was unable to compute the replica count: %s", errMsg)
  345. return 0, time.Time{}, "", fmt.Errorf(errMsg)
  346. }
  347. // computeStatusForPodsMetric computes the desired number of replicas for the specified metric of type PodsMetricSourceType.
  348. func (a *HorizontalController) computeStatusForPodsMetric(currentReplicas int32, metricSpec autoscalingv2.MetricSpec, hpa *autoscalingv2.HorizontalPodAutoscaler, selector labels.Selector, status *autoscalingv2.MetricStatus, metricSelector labels.Selector) (int32, time.Time, string, error) {
  349. replicaCountProposal, utilizationProposal, timestampProposal, err := a.replicaCalc.GetMetricReplicas(currentReplicas, metricSpec.Pods.Target.AverageValue.MilliValue(), metricSpec.Pods.Metric.Name, hpa.Namespace, selector, metricSelector)
  350. if err != nil {
  351. a.eventRecorder.Event(hpa, v1.EventTypeWarning, "FailedGetPodsMetric", err.Error())
  352. setCondition(hpa, autoscalingv2.ScalingActive, v1.ConditionFalse, "FailedGetPodsMetric", "the HPA was unable to compute the replica count: %v", err)
  353. return 0, timestampProposal, "", err
  354. }
  355. *status = autoscalingv2.MetricStatus{
  356. Type: autoscalingv2.PodsMetricSourceType,
  357. Pods: &autoscalingv2.PodsMetricStatus{
  358. Metric: autoscalingv2.MetricIdentifier{
  359. Name: metricSpec.Pods.Metric.Name,
  360. Selector: metricSpec.Pods.Metric.Selector,
  361. },
  362. Current: autoscalingv2.MetricValueStatus{
  363. AverageValue: resource.NewMilliQuantity(utilizationProposal, resource.DecimalSI),
  364. },
  365. },
  366. }
  367. return replicaCountProposal, timestampProposal, fmt.Sprintf("pods metric %s", metricSpec.Pods.Metric.Name), nil
  368. }
  369. // computeStatusForResourceMetric computes the desired number of replicas for the specified metric of type ResourceMetricSourceType.
  370. func (a *HorizontalController) computeStatusForResourceMetric(currentReplicas int32, metricSpec autoscalingv2.MetricSpec, hpa *autoscalingv2.HorizontalPodAutoscaler, selector labels.Selector, status *autoscalingv2.MetricStatus) (int32, time.Time, string, error) {
  371. if metricSpec.Resource.Target.AverageValue != nil {
  372. var rawProposal int64
  373. replicaCountProposal, rawProposal, timestampProposal, err := a.replicaCalc.GetRawResourceReplicas(currentReplicas, metricSpec.Resource.Target.AverageValue.MilliValue(), metricSpec.Resource.Name, hpa.Namespace, selector)
  374. if err != nil {
  375. a.eventRecorder.Event(hpa, v1.EventTypeWarning, "FailedGetResourceMetric", err.Error())
  376. setCondition(hpa, autoscalingv2.ScalingActive, v1.ConditionFalse, "FailedGetResourceMetric", "the HPA was unable to compute the replica count: %v", err)
  377. return 0, time.Time{}, "", fmt.Errorf("failed to get %s utilization: %v", metricSpec.Resource.Name, err)
  378. }
  379. metricNameProposal := fmt.Sprintf("%s resource", metricSpec.Resource.Name)
  380. *status = autoscalingv2.MetricStatus{
  381. Type: autoscalingv2.ResourceMetricSourceType,
  382. Resource: &autoscalingv2.ResourceMetricStatus{
  383. Name: metricSpec.Resource.Name,
  384. Current: autoscalingv2.MetricValueStatus{
  385. AverageValue: resource.NewMilliQuantity(rawProposal, resource.DecimalSI),
  386. },
  387. },
  388. }
  389. return replicaCountProposal, timestampProposal, metricNameProposal, nil
  390. } else {
  391. if metricSpec.Resource.Target.AverageUtilization == nil {
  392. errMsg := "invalid resource metric source: neither a utilization target nor a value target was set"
  393. a.eventRecorder.Event(hpa, v1.EventTypeWarning, "FailedGetResourceMetric", errMsg)
  394. setCondition(hpa, autoscalingv2.ScalingActive, v1.ConditionFalse, "FailedGetResourceMetric", "the HPA was unable to compute the replica count: %s", errMsg)
  395. return 0, time.Time{}, "", fmt.Errorf(errMsg)
  396. }
  397. targetUtilization := *metricSpec.Resource.Target.AverageUtilization
  398. var percentageProposal int32
  399. var rawProposal int64
  400. replicaCountProposal, percentageProposal, rawProposal, timestampProposal, err := a.replicaCalc.GetResourceReplicas(currentReplicas, targetUtilization, metricSpec.Resource.Name, hpa.Namespace, selector)
  401. if err != nil {
  402. a.eventRecorder.Event(hpa, v1.EventTypeWarning, "FailedGetResourceMetric", err.Error())
  403. setCondition(hpa, autoscalingv2.ScalingActive, v1.ConditionFalse, "FailedGetResourceMetric", "the HPA was unable to compute the replica count: %v", err)
  404. return 0, time.Time{}, "", fmt.Errorf("failed to get %s utilization: %v", metricSpec.Resource.Name, err)
  405. }
  406. metricNameProposal := fmt.Sprintf("%s resource utilization (percentage of request)", metricSpec.Resource.Name)
  407. *status = autoscalingv2.MetricStatus{
  408. Type: autoscalingv2.ResourceMetricSourceType,
  409. Resource: &autoscalingv2.ResourceMetricStatus{
  410. Name: metricSpec.Resource.Name,
  411. Current: autoscalingv2.MetricValueStatus{
  412. AverageUtilization: &percentageProposal,
  413. AverageValue: resource.NewMilliQuantity(rawProposal, resource.DecimalSI),
  414. },
  415. },
  416. }
  417. return replicaCountProposal, timestampProposal, metricNameProposal, nil
  418. }
  419. }
  420. // computeStatusForExternalMetric computes the desired number of replicas for the specified metric of type ExternalMetricSourceType.
  421. func (a *HorizontalController) computeStatusForExternalMetric(specReplicas, statusReplicas int32, metricSpec autoscalingv2.MetricSpec, hpa *autoscalingv2.HorizontalPodAutoscaler, selector labels.Selector, status *autoscalingv2.MetricStatus) (int32, time.Time, string, error) {
  422. if metricSpec.External.Target.AverageValue != nil {
  423. replicaCountProposal, utilizationProposal, timestampProposal, err := a.replicaCalc.GetExternalPerPodMetricReplicas(statusReplicas, metricSpec.External.Target.AverageValue.MilliValue(), metricSpec.External.Metric.Name, hpa.Namespace, metricSpec.External.Metric.Selector)
  424. if err != nil {
  425. a.eventRecorder.Event(hpa, v1.EventTypeWarning, "FailedGetExternalMetric", err.Error())
  426. setCondition(hpa, autoscalingv2.ScalingActive, v1.ConditionFalse, "FailedGetExternalMetric", "the HPA was unable to compute the replica count: %v", err)
  427. return 0, time.Time{}, "", fmt.Errorf("failed to get %s external metric: %v", metricSpec.External.Metric.Name, err)
  428. }
  429. *status = autoscalingv2.MetricStatus{
  430. Type: autoscalingv2.ExternalMetricSourceType,
  431. External: &autoscalingv2.ExternalMetricStatus{
  432. Metric: autoscalingv2.MetricIdentifier{
  433. Name: metricSpec.External.Metric.Name,
  434. Selector: metricSpec.External.Metric.Selector,
  435. },
  436. Current: autoscalingv2.MetricValueStatus{
  437. AverageValue: resource.NewMilliQuantity(utilizationProposal, resource.DecimalSI),
  438. },
  439. },
  440. }
  441. return replicaCountProposal, timestampProposal, fmt.Sprintf("external metric %s(%+v)", metricSpec.External.Metric.Name, metricSpec.External.Metric.Selector), nil
  442. }
  443. if metricSpec.External.Target.Value != nil {
  444. replicaCountProposal, utilizationProposal, timestampProposal, err := a.replicaCalc.GetExternalMetricReplicas(specReplicas, metricSpec.External.Target.Value.MilliValue(), metricSpec.External.Metric.Name, hpa.Namespace, metricSpec.External.Metric.Selector, selector)
  445. if err != nil {
  446. a.eventRecorder.Event(hpa, v1.EventTypeWarning, "FailedGetExternalMetric", err.Error())
  447. setCondition(hpa, autoscalingv2.ScalingActive, v1.ConditionFalse, "FailedGetExternalMetric", "the HPA was unable to compute the replica count: %v", err)
  448. return 0, time.Time{}, "", fmt.Errorf("failed to get external metric %s: %v", metricSpec.External.Metric.Name, err)
  449. }
  450. *status = autoscalingv2.MetricStatus{
  451. Type: autoscalingv2.ExternalMetricSourceType,
  452. External: &autoscalingv2.ExternalMetricStatus{
  453. Metric: autoscalingv2.MetricIdentifier{
  454. Name: metricSpec.External.Metric.Name,
  455. Selector: metricSpec.External.Metric.Selector,
  456. },
  457. Current: autoscalingv2.MetricValueStatus{
  458. Value: resource.NewMilliQuantity(utilizationProposal, resource.DecimalSI),
  459. },
  460. },
  461. }
  462. return replicaCountProposal, timestampProposal, fmt.Sprintf("external metric %s(%+v)", metricSpec.External.Metric.Name, metricSpec.External.Metric.Selector), nil
  463. }
  464. errMsg := "invalid external metric source: neither a value target nor an average value target was set"
  465. a.eventRecorder.Event(hpa, v1.EventTypeWarning, "FailedGetExternalMetric", errMsg)
  466. setCondition(hpa, autoscalingv2.ScalingActive, v1.ConditionFalse, "FailedGetExternalMetric", "the HPA was unable to compute the replica count: %s", errMsg)
  467. return 0, time.Time{}, "", fmt.Errorf(errMsg)
  468. }
  469. func (a *HorizontalController) recordInitialRecommendation(currentReplicas int32, key string) {
  470. if a.recommendations[key] == nil {
  471. a.recommendations[key] = []timestampedRecommendation{{currentReplicas, time.Now()}}
  472. }
  473. }
  474. func (a *HorizontalController) reconcileAutoscaler(hpav1Shared *autoscalingv1.HorizontalPodAutoscaler, key string) error {
  475. // make a copy so that we never mutate the shared informer cache (conversion can mutate the object)
  476. hpav1 := hpav1Shared.DeepCopy()
  477. // then, convert to autoscaling/v2, which makes our lives easier when calculating metrics
  478. hpaRaw, err := unsafeConvertToVersionVia(hpav1, autoscalingv2.SchemeGroupVersion)
  479. if err != nil {
  480. a.eventRecorder.Event(hpav1, v1.EventTypeWarning, "FailedConvertHPA", err.Error())
  481. return fmt.Errorf("failed to convert the given HPA to %s: %v", autoscalingv2.SchemeGroupVersion.String(), err)
  482. }
  483. hpa := hpaRaw.(*autoscalingv2.HorizontalPodAutoscaler)
  484. hpaStatusOriginal := hpa.Status.DeepCopy()
  485. reference := fmt.Sprintf("%s/%s/%s", hpa.Spec.ScaleTargetRef.Kind, hpa.Namespace, hpa.Spec.ScaleTargetRef.Name)
  486. targetGV, err := schema.ParseGroupVersion(hpa.Spec.ScaleTargetRef.APIVersion)
  487. if err != nil {
  488. a.eventRecorder.Event(hpa, v1.EventTypeWarning, "FailedGetScale", err.Error())
  489. setCondition(hpa, autoscalingv2.AbleToScale, v1.ConditionFalse, "FailedGetScale", "the HPA controller was unable to get the target's current scale: %v", err)
  490. a.updateStatusIfNeeded(hpaStatusOriginal, hpa)
  491. return fmt.Errorf("invalid API version in scale target reference: %v", err)
  492. }
  493. targetGK := schema.GroupKind{
  494. Group: targetGV.Group,
  495. Kind: hpa.Spec.ScaleTargetRef.Kind,
  496. }
  497. mappings, err := a.mapper.RESTMappings(targetGK)
  498. if err != nil {
  499. a.eventRecorder.Event(hpa, v1.EventTypeWarning, "FailedGetScale", err.Error())
  500. setCondition(hpa, autoscalingv2.AbleToScale, v1.ConditionFalse, "FailedGetScale", "the HPA controller was unable to get the target's current scale: %v", err)
  501. a.updateStatusIfNeeded(hpaStatusOriginal, hpa)
  502. return fmt.Errorf("unable to determine resource for scale target reference: %v", err)
  503. }
  504. scale, targetGR, err := a.scaleForResourceMappings(hpa.Namespace, hpa.Spec.ScaleTargetRef.Name, mappings)
  505. if err != nil {
  506. a.eventRecorder.Event(hpa, v1.EventTypeWarning, "FailedGetScale", err.Error())
  507. setCondition(hpa, autoscalingv2.AbleToScale, v1.ConditionFalse, "FailedGetScale", "the HPA controller was unable to get the target's current scale: %v", err)
  508. a.updateStatusIfNeeded(hpaStatusOriginal, hpa)
  509. return fmt.Errorf("failed to query scale subresource for %s: %v", reference, err)
  510. }
  511. setCondition(hpa, autoscalingv2.AbleToScale, v1.ConditionTrue, "SucceededGetScale", "the HPA controller was able to get the target's current scale")
  512. currentReplicas := scale.Spec.Replicas
  513. a.recordInitialRecommendation(currentReplicas, key)
  514. var (
  515. metricStatuses []autoscalingv2.MetricStatus
  516. metricDesiredReplicas int32
  517. metricName string
  518. )
  519. desiredReplicas := int32(0)
  520. rescaleReason := ""
  521. rescale := true
  522. if scale.Spec.Replicas == 0 {
  523. // Autoscaling is disabled for this resource
  524. desiredReplicas = 0
  525. rescale = false
  526. setCondition(hpa, autoscalingv2.ScalingActive, v1.ConditionFalse, "ScalingDisabled", "scaling is disabled since the replica count of the target is zero")
  527. } else if currentReplicas > hpa.Spec.MaxReplicas {
  528. rescaleReason = "Current number of replicas above Spec.MaxReplicas"
  529. desiredReplicas = hpa.Spec.MaxReplicas
  530. } else if hpa.Spec.MinReplicas != nil && currentReplicas < *hpa.Spec.MinReplicas {
  531. rescaleReason = "Current number of replicas below Spec.MinReplicas"
  532. desiredReplicas = *hpa.Spec.MinReplicas
  533. } else if currentReplicas == 0 {
  534. rescaleReason = "Current number of replicas must be greater than 0"
  535. desiredReplicas = 1
  536. } else {
  537. var metricTimestamp time.Time
  538. metricDesiredReplicas, metricName, metricStatuses, metricTimestamp, err = a.computeReplicasForMetrics(hpa, scale, hpa.Spec.Metrics)
  539. if err != nil {
  540. a.setCurrentReplicasInStatus(hpa, currentReplicas)
  541. if err := a.updateStatusIfNeeded(hpaStatusOriginal, hpa); err != nil {
  542. utilruntime.HandleError(err)
  543. }
  544. a.eventRecorder.Event(hpa, v1.EventTypeWarning, "FailedComputeMetricsReplicas", err.Error())
  545. return fmt.Errorf("failed to compute desired number of replicas based on listed metrics for %s: %v", reference, err)
  546. }
  547. klog.V(4).Infof("proposing %v desired replicas (based on %s from %s) for %s", metricDesiredReplicas, metricName, metricTimestamp, reference)
  548. rescaleMetric := ""
  549. if metricDesiredReplicas > desiredReplicas {
  550. desiredReplicas = metricDesiredReplicas
  551. rescaleMetric = metricName
  552. }
  553. if desiredReplicas > currentReplicas {
  554. rescaleReason = fmt.Sprintf("%s above target", rescaleMetric)
  555. }
  556. if desiredReplicas < currentReplicas {
  557. rescaleReason = "All metrics below target"
  558. }
  559. desiredReplicas = a.normalizeDesiredReplicas(hpa, key, currentReplicas, desiredReplicas)
  560. rescale = desiredReplicas != currentReplicas
  561. }
  562. if rescale {
  563. scale.Spec.Replicas = desiredReplicas
  564. _, err = a.scaleNamespacer.Scales(hpa.Namespace).Update(targetGR, scale)
  565. if err != nil {
  566. a.eventRecorder.Eventf(hpa, v1.EventTypeWarning, "FailedRescale", "New size: %d; reason: %s; error: %v", desiredReplicas, rescaleReason, err.Error())
  567. setCondition(hpa, autoscalingv2.AbleToScale, v1.ConditionFalse, "FailedUpdateScale", "the HPA controller was unable to update the target scale: %v", err)
  568. a.setCurrentReplicasInStatus(hpa, currentReplicas)
  569. if err := a.updateStatusIfNeeded(hpaStatusOriginal, hpa); err != nil {
  570. utilruntime.HandleError(err)
  571. }
  572. return fmt.Errorf("failed to rescale %s: %v", reference, err)
  573. }
  574. setCondition(hpa, autoscalingv2.AbleToScale, v1.ConditionTrue, "SucceededRescale", "the HPA controller was able to update the target scale to %d", desiredReplicas)
  575. a.eventRecorder.Eventf(hpa, v1.EventTypeNormal, "SuccessfulRescale", "New size: %d; reason: %s", desiredReplicas, rescaleReason)
  576. klog.Infof("Successful rescale of %s, old size: %d, new size: %d, reason: %s",
  577. hpa.Name, currentReplicas, desiredReplicas, rescaleReason)
  578. } else {
  579. klog.V(4).Infof("decided not to scale %s to %v (last scale time was %s)", reference, desiredReplicas, hpa.Status.LastScaleTime)
  580. desiredReplicas = currentReplicas
  581. }
  582. a.setStatus(hpa, currentReplicas, desiredReplicas, metricStatuses, rescale)
  583. return a.updateStatusIfNeeded(hpaStatusOriginal, hpa)
  584. }
  585. // stabilizeRecommendation:
  586. // - replaces old recommendation with the newest recommendation,
  587. // - returns max of recommendations that are not older than downscaleStabilisationWindow.
  588. func (a *HorizontalController) stabilizeRecommendation(key string, prenormalizedDesiredReplicas int32) int32 {
  589. maxRecommendation := prenormalizedDesiredReplicas
  590. foundOldSample := false
  591. oldSampleIndex := 0
  592. cutoff := time.Now().Add(-a.downscaleStabilisationWindow)
  593. for i, rec := range a.recommendations[key] {
  594. if rec.timestamp.Before(cutoff) {
  595. foundOldSample = true
  596. oldSampleIndex = i
  597. } else if rec.recommendation > maxRecommendation {
  598. maxRecommendation = rec.recommendation
  599. }
  600. }
  601. if foundOldSample {
  602. a.recommendations[key][oldSampleIndex] = timestampedRecommendation{prenormalizedDesiredReplicas, time.Now()}
  603. } else {
  604. a.recommendations[key] = append(a.recommendations[key], timestampedRecommendation{prenormalizedDesiredReplicas, time.Now()})
  605. }
  606. return maxRecommendation
  607. }
  608. // normalizeDesiredReplicas takes the metrics desired replicas value and normalizes it based on the appropriate conditions (i.e. < maxReplicas, >
  609. // minReplicas, etc...)
  610. func (a *HorizontalController) normalizeDesiredReplicas(hpa *autoscalingv2.HorizontalPodAutoscaler, key string, currentReplicas int32, prenormalizedDesiredReplicas int32) int32 {
  611. stabilizedRecommendation := a.stabilizeRecommendation(key, prenormalizedDesiredReplicas)
  612. if stabilizedRecommendation != prenormalizedDesiredReplicas {
  613. setCondition(hpa, autoscalingv2.AbleToScale, v1.ConditionTrue, "ScaleDownStabilized", "recent recommendations were higher than current one, applying the highest recent recommendation")
  614. } else {
  615. setCondition(hpa, autoscalingv2.AbleToScale, v1.ConditionTrue, "ReadyForNewScale", "recommended size matches current size")
  616. }
  617. var minReplicas int32
  618. if hpa.Spec.MinReplicas != nil {
  619. minReplicas = *hpa.Spec.MinReplicas
  620. } else {
  621. minReplicas = 0
  622. }
  623. desiredReplicas, condition, reason := convertDesiredReplicasWithRules(currentReplicas, stabilizedRecommendation, minReplicas, hpa.Spec.MaxReplicas)
  624. if desiredReplicas == stabilizedRecommendation {
  625. setCondition(hpa, autoscalingv2.ScalingLimited, v1.ConditionFalse, condition, reason)
  626. } else {
  627. setCondition(hpa, autoscalingv2.ScalingLimited, v1.ConditionTrue, condition, reason)
  628. }
  629. return desiredReplicas
  630. }
  631. // convertDesiredReplicas performs the actual normalization, without depending on `HorizontalController` or `HorizontalPodAutoscaler`
  632. func convertDesiredReplicasWithRules(currentReplicas, desiredReplicas, hpaMinReplicas, hpaMaxReplicas int32) (int32, string, string) {
  633. var minimumAllowedReplicas int32
  634. var maximumAllowedReplicas int32
  635. var possibleLimitingCondition string
  636. var possibleLimitingReason string
  637. if hpaMinReplicas == 0 {
  638. minimumAllowedReplicas = 1
  639. possibleLimitingReason = "the desired replica count is zero"
  640. } else {
  641. minimumAllowedReplicas = hpaMinReplicas
  642. possibleLimitingReason = "the desired replica count is less than the minimum replica count"
  643. }
  644. // Do not upscale too much to prevent incorrect rapid increase of the number of master replicas caused by
  645. // bogus CPU usage report from heapster/kubelet (like in issue #32304).
  646. scaleUpLimit := calculateScaleUpLimit(currentReplicas)
  647. if hpaMaxReplicas > scaleUpLimit {
  648. maximumAllowedReplicas = scaleUpLimit
  649. possibleLimitingCondition = "ScaleUpLimit"
  650. possibleLimitingReason = "the desired replica count is increasing faster than the maximum scale rate"
  651. } else {
  652. maximumAllowedReplicas = hpaMaxReplicas
  653. possibleLimitingCondition = "TooManyReplicas"
  654. possibleLimitingReason = "the desired replica count is more than the maximum replica count"
  655. }
  656. if desiredReplicas < minimumAllowedReplicas {
  657. possibleLimitingCondition = "TooFewReplicas"
  658. return minimumAllowedReplicas, possibleLimitingCondition, possibleLimitingReason
  659. } else if desiredReplicas > maximumAllowedReplicas {
  660. return maximumAllowedReplicas, possibleLimitingCondition, possibleLimitingReason
  661. }
  662. return desiredReplicas, "DesiredWithinRange", "the desired count is within the acceptable range"
  663. }
  664. func calculateScaleUpLimit(currentReplicas int32) int32 {
  665. return int32(math.Max(scaleUpLimitFactor*float64(currentReplicas), scaleUpLimitMinimum))
  666. }
  667. // scaleForResourceMappings attempts to fetch the scale for the
  668. // resource with the given name and namespace, trying each RESTMapping
  669. // in turn until a working one is found. If none work, the first error
  670. // is returned. It returns both the scale, as well as the group-resource from
  671. // the working mapping.
  672. func (a *HorizontalController) scaleForResourceMappings(namespace, name string, mappings []*apimeta.RESTMapping) (*autoscalingv1.Scale, schema.GroupResource, error) {
  673. var firstErr error
  674. for i, mapping := range mappings {
  675. targetGR := mapping.Resource.GroupResource()
  676. scale, err := a.scaleNamespacer.Scales(namespace).Get(targetGR, name)
  677. if err == nil {
  678. return scale, targetGR, nil
  679. }
  680. // if this is the first error, remember it,
  681. // then go on and try other mappings until we find a good one
  682. if i == 0 {
  683. firstErr = err
  684. }
  685. }
  686. // make sure we handle an empty set of mappings
  687. if firstErr == nil {
  688. firstErr = fmt.Errorf("unrecognized resource")
  689. }
  690. return nil, schema.GroupResource{}, firstErr
  691. }
  692. // setCurrentReplicasInStatus sets the current replica count in the status of the HPA.
  693. func (a *HorizontalController) setCurrentReplicasInStatus(hpa *autoscalingv2.HorizontalPodAutoscaler, currentReplicas int32) {
  694. a.setStatus(hpa, currentReplicas, hpa.Status.DesiredReplicas, hpa.Status.CurrentMetrics, false)
  695. }
  696. // setStatus recreates the status of the given HPA, updating the current and
  697. // desired replicas, as well as the metric statuses
  698. func (a *HorizontalController) setStatus(hpa *autoscalingv2.HorizontalPodAutoscaler, currentReplicas, desiredReplicas int32, metricStatuses []autoscalingv2.MetricStatus, rescale bool) {
  699. hpa.Status = autoscalingv2.HorizontalPodAutoscalerStatus{
  700. CurrentReplicas: currentReplicas,
  701. DesiredReplicas: desiredReplicas,
  702. LastScaleTime: hpa.Status.LastScaleTime,
  703. CurrentMetrics: metricStatuses,
  704. Conditions: hpa.Status.Conditions,
  705. }
  706. if rescale {
  707. now := metav1.NewTime(time.Now())
  708. hpa.Status.LastScaleTime = &now
  709. }
  710. }
  711. // updateStatusIfNeeded calls updateStatus only if the status of the new HPA is not the same as the old status
  712. func (a *HorizontalController) updateStatusIfNeeded(oldStatus *autoscalingv2.HorizontalPodAutoscalerStatus, newHPA *autoscalingv2.HorizontalPodAutoscaler) error {
  713. // skip a write if we wouldn't need to update
  714. if apiequality.Semantic.DeepEqual(oldStatus, &newHPA.Status) {
  715. return nil
  716. }
  717. return a.updateStatus(newHPA)
  718. }
  719. // updateStatus actually does the update request for the status of the given HPA
  720. func (a *HorizontalController) updateStatus(hpa *autoscalingv2.HorizontalPodAutoscaler) error {
  721. // convert back to autoscalingv1
  722. hpaRaw, err := unsafeConvertToVersionVia(hpa, autoscalingv1.SchemeGroupVersion)
  723. if err != nil {
  724. a.eventRecorder.Event(hpa, v1.EventTypeWarning, "FailedConvertHPA", err.Error())
  725. return fmt.Errorf("failed to convert the given HPA to %s: %v", autoscalingv2.SchemeGroupVersion.String(), err)
  726. }
  727. hpav1 := hpaRaw.(*autoscalingv1.HorizontalPodAutoscaler)
  728. _, err = a.hpaNamespacer.HorizontalPodAutoscalers(hpav1.Namespace).UpdateStatus(hpav1)
  729. if err != nil {
  730. a.eventRecorder.Event(hpa, v1.EventTypeWarning, "FailedUpdateStatus", err.Error())
  731. return fmt.Errorf("failed to update status for %s: %v", hpa.Name, err)
  732. }
  733. klog.V(2).Infof("Successfully updated status for %s", hpa.Name)
  734. return nil
  735. }
  736. // unsafeConvertToVersionVia is like Scheme.UnsafeConvertToVersion, but it does so via an internal version first.
  737. // We use it since working with v2alpha1 is convenient here, but we want to use the v1 client (and
  738. // can't just use the internal version). Note that conversion mutates the object, so you need to deepcopy
  739. // *before* you call this if the input object came out of a shared cache.
  740. func unsafeConvertToVersionVia(obj runtime.Object, externalVersion schema.GroupVersion) (runtime.Object, error) {
  741. objInt, err := legacyscheme.Scheme.UnsafeConvertToVersion(obj, schema.GroupVersion{Group: externalVersion.Group, Version: runtime.APIVersionInternal})
  742. if err != nil {
  743. return nil, fmt.Errorf("failed to convert the given object to the internal version: %v", err)
  744. }
  745. objExt, err := legacyscheme.Scheme.UnsafeConvertToVersion(objInt, externalVersion)
  746. if err != nil {
  747. return nil, fmt.Errorf("failed to convert the given object back to the external version: %v", err)
  748. }
  749. return objExt, err
  750. }
  751. // setCondition sets the specific condition type on the given HPA to the specified value with the given reason
  752. // and message. The message and args are treated like a format string. The condition will be added if it is
  753. // not present.
  754. func setCondition(hpa *autoscalingv2.HorizontalPodAutoscaler, conditionType autoscalingv2.HorizontalPodAutoscalerConditionType, status v1.ConditionStatus, reason, message string, args ...interface{}) {
  755. hpa.Status.Conditions = setConditionInList(hpa.Status.Conditions, conditionType, status, reason, message, args...)
  756. }
  757. // setConditionInList sets the specific condition type on the given HPA to the specified value with the given
  758. // reason and message. The message and args are treated like a format string. The condition will be added if
  759. // it is not present. The new list will be returned.
  760. func setConditionInList(inputList []autoscalingv2.HorizontalPodAutoscalerCondition, conditionType autoscalingv2.HorizontalPodAutoscalerConditionType, status v1.ConditionStatus, reason, message string, args ...interface{}) []autoscalingv2.HorizontalPodAutoscalerCondition {
  761. resList := inputList
  762. var existingCond *autoscalingv2.HorizontalPodAutoscalerCondition
  763. for i, condition := range resList {
  764. if condition.Type == conditionType {
  765. // can't take a pointer to an iteration variable
  766. existingCond = &resList[i]
  767. break
  768. }
  769. }
  770. if existingCond == nil {
  771. resList = append(resList, autoscalingv2.HorizontalPodAutoscalerCondition{
  772. Type: conditionType,
  773. })
  774. existingCond = &resList[len(resList)-1]
  775. }
  776. if existingCond.Status != status {
  777. existingCond.LastTransitionTime = metav1.Now()
  778. }
  779. existingCond.Status = status
  780. existingCond.Reason = reason
  781. existingCond.Message = fmt.Sprintf(message, args...)
  782. return resList
  783. }