resource_limits.go 3.7 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106
  1. /*
  2. Copyright 2017 The Kubernetes Authors.
  3. Licensed under the Apache License, Version 2.0 (the "License");
  4. you may not use this file except in compliance with the License.
  5. You may obtain a copy of the License at
  6. http://www.apache.org/licenses/LICENSE-2.0
  7. Unless required by applicable law or agreed to in writing, software
  8. distributed under the License is distributed on an "AS IS" BASIS,
  9. WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
  10. See the License for the specific language governing permissions and
  11. limitations under the License.
  12. */
  13. package priorities
  14. import (
  15. "fmt"
  16. v1 "k8s.io/api/core/v1"
  17. schedulerapi "k8s.io/kubernetes/pkg/scheduler/api"
  18. schedulernodeinfo "k8s.io/kubernetes/pkg/scheduler/nodeinfo"
  19. "k8s.io/klog"
  20. )
  21. // ResourceLimitsPriorityMap is a priority function that increases score of input node by 1 if the node satisfies
  22. // input pod's resource limits. In detail, this priority function works as follows: If a node does not publish its
  23. // allocatable resources (cpu and memory both), the node score is not affected. If a pod does not specify
  24. // its cpu and memory limits both, the node score is not affected. If one or both of cpu and memory limits
  25. // of the pod are satisfied, the node is assigned a score of 1.
  26. // Rationale of choosing the lowest score of 1 is that this is mainly selected to break ties between nodes that have
  27. // same scores assigned by one of least and most requested priority functions.
  28. func ResourceLimitsPriorityMap(pod *v1.Pod, meta interface{}, nodeInfo *schedulernodeinfo.NodeInfo) (schedulerapi.HostPriority, error) {
  29. node := nodeInfo.Node()
  30. if node == nil {
  31. return schedulerapi.HostPriority{}, fmt.Errorf("node not found")
  32. }
  33. allocatableResources := nodeInfo.AllocatableResource()
  34. // compute pod limits
  35. var podLimits *schedulernodeinfo.Resource
  36. if priorityMeta, ok := meta.(*priorityMetadata); ok {
  37. // We were able to parse metadata, use podLimits from there.
  38. podLimits = priorityMeta.podLimits
  39. } else {
  40. // We couldn't parse metadata - fallback to computing it.
  41. podLimits = getResourceLimits(pod)
  42. }
  43. cpuScore := computeScore(podLimits.MilliCPU, allocatableResources.MilliCPU)
  44. memScore := computeScore(podLimits.Memory, allocatableResources.Memory)
  45. score := int(0)
  46. if cpuScore == 1 || memScore == 1 {
  47. score = 1
  48. }
  49. if klog.V(10) {
  50. // We explicitly don't do klog.V(10).Infof() to avoid computing all the parameters if this is
  51. // not logged. There is visible performance gain from it.
  52. klog.Infof(
  53. "%v -> %v: Resource Limits Priority, allocatable %d millicores %d memory bytes, pod limits %d millicores %d memory bytes, score %d",
  54. pod.Name, node.Name,
  55. allocatableResources.MilliCPU, allocatableResources.Memory,
  56. podLimits.MilliCPU, podLimits.Memory,
  57. score,
  58. )
  59. }
  60. return schedulerapi.HostPriority{
  61. Host: node.Name,
  62. Score: float64(score),
  63. }, nil
  64. }
  65. // computeScore returns 1 if limit value is less than or equal to allocatable
  66. // value, otherwise it returns 0.
  67. func computeScore(limit, allocatable int64) int64 {
  68. if limit != 0 && allocatable != 0 && limit <= allocatable {
  69. return 1
  70. }
  71. return 0
  72. }
  73. // getResourceLimits computes resource limits for input pod.
  74. // The reason to create this new function is to be consistent with other
  75. // priority functions because most or perhaps all priority functions work
  76. // with schedulernodeinfo.Resource.
  77. func getResourceLimits(pod *v1.Pod) *schedulernodeinfo.Resource {
  78. result := &schedulernodeinfo.Resource{}
  79. for _, container := range pod.Spec.Containers {
  80. result.Add(container.Resources.Limits)
  81. }
  82. // take max_resource(sum_pod, any_init_container)
  83. for _, container := range pod.Spec.InitContainers {
  84. result.SetMaxResource(container.Resources.Limits)
  85. }
  86. return result
  87. }