Achilleas Tzenetopoulos 5 rokov pred
rodič
commit
b0ab0b336c

+ 3 - 3
kubernetes-v1.15.4/pkg/scheduler/algorithm/priorities/custom_resource_allocation.go

@@ -29,15 +29,15 @@ import (
 )
 
 var (
-	//customResourcePriority = &CustomAllocationPriority{"CustomResourceAllocation", customResourceScorer}
-	customResourcePriority = &CustomAllocationPriority{"CustomRequestedPriority", customResourceScorer}
+	customResourcePriority = &CustomAllocationPriority{"CustomResourceAllocation", customResourceScorer}
+	//customResourcePriority = &CustomAllocationPriority{"CustomRequestedPriority", customResourceScorer}
 	// LeastRequestedPriorityMap is a priority function that favors nodes with fewer requested resources.
 	// It calculates the percentage of memory and CPU requested by pods scheduled on the node, and
 	// prioritizes based on the minimum of the average of the fraction of requested to capacity.
 	//
 	// Details:
 	// (cpu((capacity-sum(requested))*10/capacity) + memory((capacity-sum(requested))*10/capacity))/2
-	CustomRequestedPriorityMap = customResourcePriority.CustomPriorityMap
+	CustomRequestedPriorityMap = customResourcePriority.PriorityMap
 )
 
 type Config struct {

+ 2 - 2
kubernetes-v1.15.4/pkg/scheduler/algorithm/priorities/image_locality.go

@@ -20,7 +20,7 @@ import (
 	"fmt"
 	"strings"
 
-	"k8s.io/api/core/v1"
+	v1 "k8s.io/api/core/v1"
 	schedulerapi "k8s.io/kubernetes/pkg/scheduler/api"
 	schedulernodeinfo "k8s.io/kubernetes/pkg/scheduler/nodeinfo"
 	"k8s.io/kubernetes/pkg/util/parsers"
@@ -55,7 +55,7 @@ func ImageLocalityPriorityMap(pod *v1.Pod, meta interface{}, nodeInfo *scheduler
 
 	return schedulerapi.HostPriority{
 		Host:  node.Name,
-		Score: score,
+		Score: float64(score),
 	}, nil
 }
 

+ 2 - 2
kubernetes-v1.15.4/pkg/scheduler/algorithm/priorities/interpod_affinity.go

@@ -21,7 +21,7 @@ import (
 	"sync"
 	"sync/atomic"
 
-	"k8s.io/api/core/v1"
+	v1 "k8s.io/api/core/v1"
 	apierrors "k8s.io/apimachinery/pkg/api/errors"
 	metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
 	"k8s.io/client-go/util/workqueue"
@@ -237,7 +237,7 @@ func (ipa *InterPodAffinity) CalculateInterPodAffinityPriority(pod *v1.Pod, node
 		if maxMinDiff > 0 && pm.counts[node.Name] != nil {
 			fScore = float64(schedulerapi.MaxPriority) * (float64(*pm.counts[node.Name]-minCount) / float64(maxCount-minCount))
 		}
-		result = append(result, schedulerapi.HostPriority{Host: node.Name, Score: int(fScore)})
+		result = append(result, schedulerapi.HostPriority{Host: node.Name, Score: float64(fScore)})
 		if klog.V(10) {
 			klog.Infof("%v -> %v: InterPodAffinityPriority, Score: (%d)", pod.Name, node.Name, int(fScore))
 		}

+ 2 - 2
kubernetes-v1.15.4/pkg/scheduler/algorithm/priorities/node_affinity.go

@@ -19,7 +19,7 @@ package priorities
 import (
 	"fmt"
 
-	"k8s.io/api/core/v1"
+	v1 "k8s.io/api/core/v1"
 	"k8s.io/apimachinery/pkg/labels"
 	v1helper "k8s.io/kubernetes/pkg/apis/core/v1/helper"
 	schedulerapi "k8s.io/kubernetes/pkg/scheduler/api"
@@ -69,7 +69,7 @@ func CalculateNodeAffinityPriorityMap(pod *v1.Pod, meta interface{}, nodeInfo *s
 
 	return schedulerapi.HostPriority{
 		Host:  node.Name,
-		Score: int(count),
+		Score: float64(count),
 	}, nil
 }
 

+ 2 - 2
kubernetes-v1.15.4/pkg/scheduler/algorithm/priorities/node_label.go

@@ -19,7 +19,7 @@ package priorities
 import (
 	"fmt"
 
-	"k8s.io/api/core/v1"
+	v1 "k8s.io/api/core/v1"
 	"k8s.io/apimachinery/pkg/labels"
 	schedulerapi "k8s.io/kubernetes/pkg/scheduler/api"
 	schedulernodeinfo "k8s.io/kubernetes/pkg/scheduler/nodeinfo"
@@ -56,6 +56,6 @@ func (n *NodeLabelPrioritizer) CalculateNodeLabelPriorityMap(pod *v1.Pod, meta i
 	}
 	return schedulerapi.HostPriority{
 		Host:  node.Name,
-		Score: score,
+		Score: float64(score),
 	}, nil
 }

+ 5 - 5
kubernetes-v1.15.4/pkg/scheduler/algorithm/priorities/reduce.go

@@ -17,7 +17,7 @@ limitations under the License.
 package priorities
 
 import (
-	"k8s.io/api/core/v1"
+	v1 "k8s.io/api/core/v1"
 	schedulerapi "k8s.io/kubernetes/pkg/scheduler/api"
 	schedulernodeinfo "k8s.io/kubernetes/pkg/scheduler/nodeinfo"
 )
@@ -32,7 +32,7 @@ func NormalizeReduce(maxPriority int, reverse bool) PriorityReduceFunction {
 		_ map[string]*schedulernodeinfo.NodeInfo,
 		result schedulerapi.HostPriorityList) error {
 
-		var maxCount int
+		var maxCount float64
 		for i := range result {
 			if result[i].Score > maxCount {
 				maxCount = result[i].Score
@@ -42,7 +42,7 @@ func NormalizeReduce(maxPriority int, reverse bool) PriorityReduceFunction {
 		if maxCount == 0 {
 			if reverse {
 				for i := range result {
-					result[i].Score = maxPriority
+					result[i].Score = float64(maxPriority)
 				}
 			}
 			return nil
@@ -51,9 +51,9 @@ func NormalizeReduce(maxPriority int, reverse bool) PriorityReduceFunction {
 		for i := range result {
 			score := result[i].Score
 
-			score = maxPriority * score / maxCount
+			score = float64(maxPriority) * score / maxCount
 			if reverse {
-				score = maxPriority - score
+				score = float64(maxPriority) - score
 			}
 
 			result[i].Score = score

+ 5 - 5
kubernetes-v1.15.4/pkg/scheduler/algorithm/priorities/resource_allocation.go

@@ -92,17 +92,17 @@ func (r *ResourceAllocationPriority) PriorityMap(
 
 	return schedulerapi.HostPriority{
 		Host:  node.Name,
-		Score: int(score),
+		Score: float64(score),
 	}, nil
 }
 
-func (r *CustomAllocationPriority) CustomPriorityMap(
+func (r *CustomAllocationPriority) PriorityMap(
 	pod *v1.Pod,
 	meta interface{},
-	nodeInfo *schedulernodeinfo.NodeInfo) (schedulerapi.CustomHostPriority, error) {
+	nodeInfo *schedulernodeinfo.NodeInfo) (schedulerapi.HostPriority, error) {
 	node := nodeInfo.Node()
 	if node == nil {
-		return schedulerapi.CustomHostPriority{}, fmt.Errorf("node not found")
+		return schedulerapi.HostPriority{}, fmt.Errorf("node not found")
 	}
 	//allocatable := nodeInfo.AllocatableResource()
 
@@ -148,7 +148,7 @@ func (r *CustomAllocationPriority) CustomPriorityMap(
 	// }
 
 	// TODO create a custom HostPriority
-	return schedulerapi.CustomHostPriority{
+	return schedulerapi.HostPriority{
 		Host:  node.Name,
 		Score: float64(score),
 	}, nil

+ 2 - 2
kubernetes-v1.15.4/pkg/scheduler/algorithm/priorities/resource_limits.go

@@ -19,7 +19,7 @@ package priorities
 import (
 	"fmt"
 
-	"k8s.io/api/core/v1"
+	v1 "k8s.io/api/core/v1"
 	schedulerapi "k8s.io/kubernetes/pkg/scheduler/api"
 	schedulernodeinfo "k8s.io/kubernetes/pkg/scheduler/nodeinfo"
 
@@ -73,7 +73,7 @@ func ResourceLimitsPriorityMap(pod *v1.Pod, meta interface{}, nodeInfo *schedule
 
 	return schedulerapi.HostPriority{
 		Host:  node.Name,
-		Score: score,
+		Score: float64(score),
 	}, nil
 }
 

+ 13 - 13
kubernetes-v1.15.4/pkg/scheduler/algorithm/priorities/selector_spreading.go

@@ -19,7 +19,7 @@ package priorities
 import (
 	"fmt"
 
-	"k8s.io/api/core/v1"
+	v1 "k8s.io/api/core/v1"
 	"k8s.io/apimachinery/pkg/labels"
 	"k8s.io/kubernetes/pkg/scheduler/algorithm"
 	schedulerapi "k8s.io/kubernetes/pkg/scheduler/api"
@@ -80,7 +80,7 @@ func (s *SelectorSpread) CalculateSpreadPriorityMap(pod *v1.Pod, meta interface{
 	if len(selectors) == 0 {
 		return schedulerapi.HostPriority{
 			Host:  node.Name,
-			Score: int(0),
+			Score: float64(0),
 		}, nil
 	}
 
@@ -88,7 +88,7 @@ func (s *SelectorSpread) CalculateSpreadPriorityMap(pod *v1.Pod, meta interface{
 
 	return schedulerapi.HostPriority{
 		Host:  node.Name,
-		Score: count,
+		Score: float64(count),
 	}, nil
 }
 
@@ -102,14 +102,14 @@ func (s *SelectorSpread) CalculateSpreadPriorityReduce(pod *v1.Pod, meta interfa
 	maxCountByNodeName := int(0)
 
 	for i := range result {
-		if result[i].Score > maxCountByNodeName {
-			maxCountByNodeName = result[i].Score
+		if int(result[i].Score) > maxCountByNodeName {
+			maxCountByNodeName = int(result[i].Score)
 		}
 		zoneID := utilnode.GetZoneKey(nodeNameToInfo[result[i].Host].Node())
 		if zoneID == "" {
 			continue
 		}
-		countsByZone[zoneID] += result[i].Score
+		countsByZone[zoneID] += int(result[i].Score)
 	}
 
 	for zoneID := range countsByZone {
@@ -128,7 +128,7 @@ func (s *SelectorSpread) CalculateSpreadPriorityReduce(pod *v1.Pod, meta interfa
 		// initializing to the default/max node score of maxPriority
 		fScore := MaxPriorityFloat64
 		if maxCountByNodeName > 0 {
-			fScore = MaxPriorityFloat64 * (float64(maxCountByNodeName-result[i].Score) / maxCountByNodeNameFloat64)
+			fScore = MaxPriorityFloat64 * (float64(maxCountByNodeName-int(result[i].Score)) / maxCountByNodeNameFloat64)
 		}
 		// If there is zone information present, incorporate it
 		if haveZones {
@@ -141,7 +141,7 @@ func (s *SelectorSpread) CalculateSpreadPriorityReduce(pod *v1.Pod, meta interfa
 				fScore = (fScore * (1.0 - zoneWeighting)) + (zoneWeighting * zoneScore)
 			}
 		}
-		result[i].Score = int(fScore)
+		result[i].Score = fScore
 		if klog.V(10) {
 			klog.Infof(
 				"%v -> %v: SelectorSpreadPriority, Score: (%d)", pod.Name, result[i].Host, int(fScore),
@@ -232,7 +232,7 @@ func (s *ServiceAntiAffinity) CalculateAntiAffinityPriorityMap(pod *v1.Pod, meta
 
 	return schedulerapi.HostPriority{
 		Host:  node.Name,
-		Score: score,
+		Score: float64(score),
 	}, nil
 }
 
@@ -246,13 +246,13 @@ func (s *ServiceAntiAffinity) CalculateAntiAffinityPriorityReduce(pod *v1.Pod, m
 	maxPriorityFloat64 := float64(schedulerapi.MaxPriority)
 
 	for _, hostPriority := range result {
-		numServicePods += hostPriority.Score
+		numServicePods += int(hostPriority.Score)
 		if !labels.Set(nodeNameToInfo[hostPriority.Host].Node().Labels).Has(s.label) {
 			continue
 		}
 		label = labels.Set(nodeNameToInfo[hostPriority.Host].Node().Labels).Get(s.label)
 		labelNodesStatus[hostPriority.Host] = label
-		podCounts[label] += hostPriority.Score
+		podCounts[label] += int(hostPriority.Score)
 	}
 
 	//score int - scale of 0-maxPriority
@@ -261,7 +261,7 @@ func (s *ServiceAntiAffinity) CalculateAntiAffinityPriorityReduce(pod *v1.Pod, m
 		label, ok := labelNodesStatus[hostPriority.Host]
 		if !ok {
 			result[i].Host = hostPriority.Host
-			result[i].Score = int(0)
+			result[i].Score = 0
 			continue
 		}
 		// initializing to the default/max node score of maxPriority
@@ -270,7 +270,7 @@ func (s *ServiceAntiAffinity) CalculateAntiAffinityPriorityReduce(pod *v1.Pod, m
 			fScore = maxPriorityFloat64 * (float64(numServicePods-podCounts[label]) / float64(numServicePods))
 		}
 		result[i].Host = hostPriority.Host
-		result[i].Score = int(fScore)
+		result[i].Score = fScore
 	}
 
 	return nil

+ 2 - 2
kubernetes-v1.15.4/pkg/scheduler/algorithm/priorities/taint_toleration.go

@@ -19,7 +19,7 @@ package priorities
 import (
 	"fmt"
 
-	"k8s.io/api/core/v1"
+	v1 "k8s.io/api/core/v1"
 	v1helper "k8s.io/kubernetes/pkg/apis/core/v1/helper"
 	schedulerapi "k8s.io/kubernetes/pkg/scheduler/api"
 	schedulernodeinfo "k8s.io/kubernetes/pkg/scheduler/nodeinfo"
@@ -68,7 +68,7 @@ func ComputeTaintTolerationPriorityMap(pod *v1.Pod, meta interface{}, nodeInfo *
 
 	return schedulerapi.HostPriority{
 		Host:  node.Name,
-		Score: countIntolerableTaintsPreferNoSchedule(node.Spec.Taints, tolerationsPreferNoSchedule),
+		Score: float64(countIntolerableTaintsPreferNoSchedule(node.Spec.Taints, tolerationsPreferNoSchedule)),
 	}, nil
 }
 

+ 13 - 11
kubernetes-v1.15.4/pkg/scheduler/algorithm/priorities/types.go

@@ -26,14 +26,16 @@ import (
 // TODO: Figure out the exact API of this method.
 // TODO: Change interface{} to a specific type.
 type PriorityMapFunction func(pod *v1.Pod, meta interface{}, nodeInfo *schedulernodeinfo.NodeInfo) (schedulerapi.HostPriority, error)
-type CustomPriorityMapFunction func(pod *v1.Pod, meta interface{}, nodeInfo *schedulernodeinfo.NodeInfo) (schedulerapi.CustomHostPriority, error)
+
+//type CustomPriorityMapFunction func(pod *v1.Pod, meta interface{}, nodeInfo *schedulernodeinfo.NodeInfo) (schedulerapi.CustomHostPriority, error)
 
 // PriorityReduceFunction is a function that aggregated per-node results and computes
 // final scores for all nodes.
 // TODO: Figure out the exact API of this method.
 // TODO: Change interface{} to a specific type.
 type PriorityReduceFunction func(pod *v1.Pod, meta interface{}, nodeNameToInfo map[string]*schedulernodeinfo.NodeInfo, result schedulerapi.HostPriorityList) error
-type CustomPriorityReduceFunction func(pod *v1.Pod, meta interface{}, nodeNameToInfo map[string]*schedulernodeinfo.NodeInfo, result schedulerapi.CustomHostPriorityList) error
+
+//type CustomPriorityReduceFunction func(pod *v1.Pod, meta interface{}, nodeNameToInfo map[string]*schedulernodeinfo.NodeInfo, result schedulerapi.CustomHostPriorityList) error
 
 // PriorityMetadataProducer is a function that computes metadata for a given pod. This
 // is now used for only for priority functions. For predicates please use PredicateMetadataProducer.
@@ -44,20 +46,20 @@ type PriorityMetadataProducer func(pod *v1.Pod, nodeNameToInfo map[string]*sched
 // Use Map-Reduce pattern for priority functions.
 type PriorityFunction func(pod *v1.Pod, nodeNameToInfo map[string]*schedulernodeinfo.NodeInfo, nodes []*v1.Node) (schedulerapi.HostPriorityList, error)
 
-type CustomPriorityFunction func(pod *v1.Pod, nodeNameToInfo map[string]*schedulernodeinfo.NodeInfo, nodes []*v1.Node) (schedulerapi.CustomHostPriorityList, error)
+//type CustomPriorityFunction func(pod *v1.Pod, nodeNameToInfo map[string]*schedulernodeinfo.NodeInfo, nodes []*v1.Node) (schedulerapi.CustomHostPriorityList, error)
 
 // PriorityConfig is a config used for a priority function.
 type PriorityConfig struct {
-	Name         string
-	Map          PriorityMapFunction
-	CustomMap    CustomPriorityMapFunction
-	Reduce       PriorityReduceFunction
-	CustomReduce CustomPriorityReduceFunction
+	Name string
+	Map  PriorityMapFunction
+	//CustomMap    CustomPriorityMapFunction
+	Reduce PriorityReduceFunction
+	//CustomReduce CustomPriorityReduceFunction
 	// TODO: Remove it after migrating all functions to
 	// Map-Reduce pattern.
-	Function       PriorityFunction
-	CustomFunction CustomPriorityFunction
-	Weight         int
+	Function PriorityFunction
+	//CustomFunction CustomPriorityFunction
+	Weight int
 }
 
 // EmptyPriorityMetadataProducer returns a no-op PriorityMetadataProducer type.

+ 1 - 1
kubernetes-v1.15.4/pkg/scheduler/algorithm/scheduler_interface.go

@@ -40,7 +40,7 @@ type SchedulerExtender interface {
 	// are used to compute the weighted score for an extender. The weighted scores are added to
 	// the scores computed  by Kubernetes scheduler. The total scores are used to do the host selection.
 	Prioritize(pod *v1.Pod, nodes []*v1.Node) (hostPriorities *schedulerapi.HostPriorityList, weight int, err error)
-	CustomPrioritize(pod *v1.Pod, nodes []*v1.Node) (hostPriorities *schedulerapi.CustomHostPriorityList, weight int, err error)
+	//CustomPrioritize(pod *v1.Pod, nodes []*v1.Node) (hostPriorities *schedulerapi.CustomHostPriorityList, weight int, err error)
 	// Bind delegates the action of binding a pod to a node to the extender.
 	Bind(binding *v1.Binding) error
 

+ 1 - 1
kubernetes-v1.15.4/pkg/scheduler/algorithmprovider/defaults/register_priorities.go

@@ -80,7 +80,7 @@ func init() {
 	factory.RegisterPriorityFunction2(priorities.LeastRequestedPriority, priorities.LeastRequestedPriorityMap, nil, 1)
 
 	// Prioritize nodes by custom function from custom metrics
-	factory.CustomRegisterPriorityFunction2(priorities.CustomRequestedPriority, priorities.CustomRequestedPriorityMap, nil, 1000000)
+	factory.RegisterPriorityFunction2(priorities.CustomRequestedPriority, priorities.CustomRequestedPriorityMap, nil, 1000000)
 
 	// Prioritizes nodes to help achieve balanced resource usage
 	factory.RegisterPriorityFunction2(priorities.BalancedResourceAllocation, priorities.BalancedResourceAllocationMap, nil, 1)

+ 24 - 23
kubernetes-v1.15.4/pkg/scheduler/api/types.go

@@ -332,16 +332,17 @@ type HostPriority struct {
 	// Name of the host
 	Host string
 	// Score associated with the host
-	Score int
-}
-
-type CustomHostPriority struct {
-	// Name of the host
-	Host string
-	// Score associated with the host (float)
+	//Score int
 	Score float64
 }
 
+// type CustomHostPriority struct {
+// 	// Name of the host
+// 	Host string
+// 	// Score associated with the host (float)
+// 	Score float64
+// }
+
 // HostPriorityList declares a []HostPriority type.
 type HostPriorityList []HostPriority
 
@@ -361,19 +362,19 @@ func (h HostPriorityList) Swap(i, j int) {
 }
 
 // CustomHostPriorityList declares a []CustomHostPriority type.
-type CustomHostPriorityList []CustomHostPriority
-
-func (h CustomHostPriorityList) Len() int {
-	return len(h)
-}
-
-func (h CustomHostPriorityList) Less(i, j int) bool {
-	if h[i].Score == h[j].Score {
-		return h[i].Host < h[j].Host
-	}
-	return h[i].Score < h[j].Score
-}
-
-func (h CustomHostPriorityList) Swap(i, j int) {
-	h[i], h[j] = h[j], h[i]
-}
+// type CustomHostPriorityList []CustomHostPriority
+
+// func (h CustomHostPriorityList) Len() int {
+// 	return len(h)
+// }
+
+// func (h CustomHostPriorityList) Less(i, j int) bool {
+// 	if h[i].Score == h[j].Score {
+// 		return h[i].Host < h[j].Host
+// 	}
+// 	return h[i].Score < h[j].Score
+// }
+
+// func (h CustomHostPriorityList) Swap(i, j int) {
+// 	h[i], h[j] = h[j], h[i]
+// }

+ 40 - 40
kubernetes-v1.15.4/pkg/scheduler/core/extender.go

@@ -361,46 +361,46 @@ func (h *HTTPExtender) Prioritize(pod *v1.Pod, nodes []*v1.Node) (*schedulerapi.
 // ---------START OF CUSTOMIZATION------------------------------------------------------------------
 //------------------------------------------------------------------------------------------------
 //------------------------------------------------------------------------------------------------
-func (h *HTTPExtender) CustomPrioritize(pod *v1.Pod, nodes []*v1.Node) (*schedulerapi.CustomHostPriorityList, int, error) {
-	var (
-		result    schedulerapi.CustomHostPriorityList
-		nodeList  *v1.NodeList
-		nodeNames *[]string
-		args      *schedulerapi.ExtenderArgs
-	)
-
-	if h.prioritizeVerb == "" {
-		result := schedulerapi.CustomHostPriorityList{}
-		for _, node := range nodes {
-			result = append(result, schedulerapi.CustomHostPriority{Host: node.Name, Score: 0})
-		}
-		return &result, 0, nil
-	}
-
-	if h.nodeCacheCapable {
-		nodeNameSlice := make([]string, 0, len(nodes))
-		for _, node := range nodes {
-			nodeNameSlice = append(nodeNameSlice, node.Name)
-		}
-		nodeNames = &nodeNameSlice
-	} else {
-		nodeList = &v1.NodeList{}
-		for _, node := range nodes {
-			nodeList.Items = append(nodeList.Items, *node)
-		}
-	}
-
-	args = &schedulerapi.ExtenderArgs{
-		Pod:       pod,
-		Nodes:     nodeList,
-		NodeNames: nodeNames,
-	}
-
-	if err := h.send(h.prioritizeVerb, args, &result); err != nil {
-		return nil, 0, err
-	}
-	return &result, h.weight, nil
-}
+// func (h *HTTPExtender) CustomPrioritize(pod *v1.Pod, nodes []*v1.Node) (*schedulerapi.CustomHostPriorityList, int, error) {
+// 	var (
+// 		result    schedulerapi.CustomHostPriorityList
+// 		nodeList  *v1.NodeList
+// 		nodeNames *[]string
+// 		args      *schedulerapi.ExtenderArgs
+// 	)
+
+// 	if h.prioritizeVerb == "" {
+// 		result := schedulerapi.CustomHostPriorityList{}
+// 		for _, node := range nodes {
+// 			result = append(result, schedulerapi.CustomHostPriority{Host: node.Name, Score: 0})
+// 		}
+// 		return &result, 0, nil
+// 	}
+
+// 	if h.nodeCacheCapable {
+// 		nodeNameSlice := make([]string, 0, len(nodes))
+// 		for _, node := range nodes {
+// 			nodeNameSlice = append(nodeNameSlice, node.Name)
+// 		}
+// 		nodeNames = &nodeNameSlice
+// 	} else {
+// 		nodeList = &v1.NodeList{}
+// 		for _, node := range nodes {
+// 			nodeList.Items = append(nodeList.Items, *node)
+// 		}
+// 	}
+
+// 	args = &schedulerapi.ExtenderArgs{
+// 		Pod:       pod,
+// 		Nodes:     nodeList,
+// 		NodeNames: nodeNames,
+// 	}
+
+// 	if err := h.send(h.prioritizeVerb, args, &result); err != nil {
+// 		return nil, 0, err
+// 	}
+// 	return &result, h.weight, nil
+// }
 
 //------------------------------------------------------------------------------------------------
 //------------------------------------------------------------------------------------------------

+ 167 - 144
kubernetes-v1.15.4/pkg/scheduler/core/generic_scheduler.go

@@ -234,8 +234,8 @@ func (g *genericScheduler) Schedule(pod *v1.Pod, nodeLister algorithm.NodeLister
 	}
 
 	metaPrioritiesInterface := g.priorityMetaProducer(pod, g.nodeInfoSnapshot.NodeInfoMap)
-	priorityList, err := PrioritizeNodes(pod, g.nodeInfoSnapshot.NodeInfoMap, metaPrioritiesInterface, g.prioritizers, filteredNodes, g.extenders)
-	//customPriotrityList, err := CustomPrioritizeNodes(pod, g.nodeInfoSnapshot.NodeInfoMap, metaPrioritiesInterface, g.prioritizers, filteredNodes, g.extenders)
+	//priorityList, err := PrioritizeNodes(pod, g.nodeInfoSnapshot.NodeInfoMap, metaPrioritiesInterface, g.prioritizers, filteredNodes, g.extenders)
+	priorityList, err := CustomPrioritizeNodes(pod, g.nodeInfoSnapshot.NodeInfoMap, metaPrioritiesInterface, g.prioritizers, filteredNodes, g.extenders)
 	if err != nil {
 		return result, err
 	}
@@ -296,6 +296,29 @@ func (g *genericScheduler) selectHost(priorityList schedulerapi.HostPriorityList
 	return priorityList[maxScores[ix]].Host, nil
 }
 
+//------------------------------------------------------------------------------------------------
+//------------------------------------------------------------------------------------------------
+// ---------START OF CUSTOMIZATION----------------------------------------------------------------
+//------------------------------------------------------------------------------------------------
+//------------------------------------------------------------------------------------------------
+// func (g *genericScheduler) customSelectHost(priorityList schedulerapi.CustomHostPriorityList) (string, error) {
+// 	if len(priorityList) == 0 {
+// 		return "", fmt.Errorf("empty priorityList")
+// 	}
+
+// 	maxScores := findMaxScores(priorityList)
+// 	ix := int(g.lastNodeIndex % uint64(len(maxScores)))
+// 	g.lastNodeIndex++
+
+// 	return priorityList[maxScores[ix]].Host, nil
+// }
+
+//------------------------------------------------------------------------------------------------
+//------------------------------------------------------------------------------------------------
+// ---------END OF CUSTOMIZATION----------------------------------------------------------------
+//------------------------------------------------------------------------------------------------
+//------------------------------------------------------------------------------------------------
+
 // preempt finds nodes with pods that can be preempted to make room for "pod" to
 // schedule. It chooses one of the nodes and preempts the pods on the node and
 // returns 1) the node, 2) the list of preempted pods if such a node is found,
@@ -810,148 +833,148 @@ func PrioritizeNodes(
 //------------------------------------------------------------------
 //-------------------START-CUSTOM-BY-IWITA---------------------------------------------------------
 //------------------------------------------------------------------
-func CustomPrioritizeNodes(
-	pod *v1.Pod,
-	nodeNameToInfo map[string]*schedulernodeinfo.NodeInfo,
-	meta interface{},
-	priorityConfigs []priorities.PriorityConfig,
-	nodes []*v1.Node,
-	extenders []algorithm.SchedulerExtender,
-) (schedulerapi.CustomHostPriorityList, error) {
-	// If no priority configs are provided, then the EqualPriority function is applied
-	// This is required to generate the priority list in the required format
-	// if len(priorityConfigs) == 0 && len(extenders) == 0 {
-	// 	result := make(schedulerapi.CustomHostPriorityList, 0, len(nodes))
-	// 	for i := range nodes {
-	// 		// initializes nodes with Score = 1
-	// 		hostPriority, err := EqualPriorityMap(pod, meta, nodeNameToInfo[nodes[i].Name])
-	// 		if err != nil {
-	// 			return nil, err
-	// 		}
-	// 		result = append(result, hostPriority)
-	// 	}
-	// 	return result, nil
-	// }
-
-	var (
-		mu   = sync.Mutex{}
-		wg   = sync.WaitGroup{}
-		errs []error
-	)
-	appendError := func(err error) {
-		mu.Lock()
-		defer mu.Unlock()
-		errs = append(errs, err)
-	}
-
-	results := make([]schedulerapi.CustomHostPriorityList, len(priorityConfigs), len(priorityConfigs))
-
-	// DEPRECATED: we can remove this when all priorityConfigs implement the
-	// Map-Reduce pattern.
-	for i := range priorityConfigs {
-		if priorityConfigs[i].CustomFunction != nil {
-			wg.Add(1)
-			go func(index int) {
-				defer wg.Done()
-				var err error
-				results[index], err = priorityConfigs[index].CustomFunction(pod, nodeNameToInfo, nodes)
-				if err != nil {
-					appendError(err)
-				}
-			}(i)
-		} else {
-			results[i] = make(schedulerapi.CustomHostPriorityList, len(nodes))
-		}
-	}
-
-	workqueue.ParallelizeUntil(context.TODO(), 16, len(nodes), func(index int) {
-		nodeInfo := nodeNameToInfo[nodes[index].Name]
-		for i := range priorityConfigs {
-			if priorityConfigs[i].Function != nil {
-				continue
-			}
-
-			var err error
-			results[i][index], err = priorityConfigs[i].CustomMap(pod, meta, nodeInfo)
-			if err != nil {
-				appendError(err)
-				results[i][index].Host = nodes[index].Name
-			}
-		}
-	})
-
-	for i := range priorityConfigs {
-		if priorityConfigs[i].Reduce == nil {
-			continue
-		}
-		wg.Add(1)
-		go func(index int) {
-			defer wg.Done()
-			if err := priorityConfigs[index].CustomReduce(pod, meta, nodeNameToInfo, results[index]); err != nil {
-				appendError(err)
-			}
-			if klog.V(10) {
-				for _, hostPriority := range results[index] {
-					klog.Infof("%v -> %v: %v, Score: (%d)", util.GetPodFullName(pod), hostPriority.Host, priorityConfigs[index].Name, hostPriority.Score)
-				}
-			}
-		}(i)
-	}
-	// Wait for all computations to be finished.
-	wg.Wait()
-	if len(errs) != 0 {
-		return schedulerapi.CustomHostPriorityList{}, errors.NewAggregate(errs)
-	}
-
-	// Summarize all scores.
-	result := make(schedulerapi.CustomHostPriorityList, 0, len(nodes))
-
-	for i := range nodes {
-		result = append(result, schedulerapi.CustomHostPriority{Host: nodes[i].Name, Score: 0})
-		for j := range priorityConfigs {
-			result[i].Score += results[j][i].Score * float64(priorityConfigs[j].Weight)
-		}
-	}
-
-	if len(extenders) != 0 && nodes != nil {
-		combinedScores := make(map[string]float64, len(nodeNameToInfo))
-		for i := range extenders {
-			if !extenders[i].IsInterested(pod) {
-				continue
-			}
-			wg.Add(1)
-			go func(extIndex int) {
-				defer wg.Done()
-				prioritizedList, weight, err := extenders[extIndex].CustomPrioritize(pod, nodes)
-				if err != nil {
-					// Prioritization errors from extender can be ignored, let k8s/other extenders determine the priorities
-					return
-				}
-				mu.Lock()
-				for i := range *prioritizedList {
-					host, score := (*prioritizedList)[i].Host, (*prioritizedList)[i].Score
-					if klog.V(10) {
-						klog.Infof("%v -> %v: %v, Score: (%d)", util.GetPodFullName(pod), host, extenders[extIndex].Name(), score)
-					}
-					combinedScores[host] += score * float64(weight)
-				}
-				mu.Unlock()
-			}(i)
-		}
-		// wait for all go routines to finish
-		wg.Wait()
-		for i := range result {
-			result[i].Score += combinedScores[result[i].Host]
-		}
-	}
-
-	if klog.V(10) {
-		for i := range result {
-			klog.Infof("Host %s => Score %d", result[i].Host, result[i].Score)
-		}
-	}
-	return result, nil
-}
+// func CustomPrioritizeNodes(
+// 	pod *v1.Pod,
+// 	nodeNameToInfo map[string]*schedulernodeinfo.NodeInfo,
+// 	meta interface{},
+// 	priorityConfigs []priorities.PriorityConfig,
+// 	nodes []*v1.Node,
+// 	extenders []algorithm.SchedulerExtender,
+// ) (schedulerapi.CustomHostPriorityList, error) {
+// 	// If no priority configs are provided, then the EqualPriority function is applied
+// 	// This is required to generate the priority list in the required format
+// 	// if len(priorityConfigs) == 0 && len(extenders) == 0 {
+// 	// 	result := make(schedulerapi.CustomHostPriorityList, 0, len(nodes))
+// 	// 	for i := range nodes {
+// 	// 		// initializes nodes with Score = 1
+// 	// 		hostPriority, err := EqualPriorityMap(pod, meta, nodeNameToInfo[nodes[i].Name])
+// 	// 		if err != nil {
+// 	// 			return nil, err
+// 	// 		}
+// 	// 		result = append(result, hostPriority)
+// 	// 	}
+// 	// 	return result, nil
+// 	// }
+
+// 	var (
+// 		mu   = sync.Mutex{}
+// 		wg   = sync.WaitGroup{}
+// 		errs []error
+// 	)
+// 	appendError := func(err error) {
+// 		mu.Lock()
+// 		defer mu.Unlock()
+// 		errs = append(errs, err)
+// 	}
+
+// 	results := make([]schedulerapi.CustomHostPriorityList, len(priorityConfigs), len(priorityConfigs))
+
+// 	// DEPRECATED: we can remove this when all priorityConfigs implement the
+// 	// Map-Reduce pattern.
+// 	for i := range priorityConfigs {
+// 		if priorityConfigs[i].CustomFunction != nil {
+// 			wg.Add(1)
+// 			go func(index int) {
+// 				defer wg.Done()
+// 				var err error
+// 				results[index], err = priorityConfigs[index].CustomFunction(pod, nodeNameToInfo, nodes)
+// 				if err != nil {
+// 					appendError(err)
+// 				}
+// 			}(i)
+// 		} else {
+// 			results[i] = make(schedulerapi.CustomHostPriorityList, len(nodes))
+// 		}
+// 	}
+
+// 	workqueue.ParallelizeUntil(context.TODO(), 16, len(nodes), func(index int) {
+// 		nodeInfo := nodeNameToInfo[nodes[index].Name]
+// 		for i := range priorityConfigs {
+// 			if priorityConfigs[i].Function != nil {
+// 				continue
+// 			}
+
+// 			var err error
+// 			results[i][index], err = priorityConfigs[i].CustomMap(pod, meta, nodeInfo)
+// 			if err != nil {
+// 				appendError(err)
+// 				results[i][index].Host = nodes[index].Name
+// 			}
+// 		}
+// 	})
+
+// 	for i := range priorityConfigs {
+// 		if priorityConfigs[i].Reduce == nil {
+// 			continue
+// 		}
+// 		wg.Add(1)
+// 		go func(index int) {
+// 			defer wg.Done()
+// 			if err := priorityConfigs[index].CustomReduce(pod, meta, nodeNameToInfo, results[index]); err != nil {
+// 				appendError(err)
+// 			}
+// 			if klog.V(10) {
+// 				for _, hostPriority := range results[index] {
+// 					klog.Infof("%v -> %v: %v, Score: (%d)", util.GetPodFullName(pod), hostPriority.Host, priorityConfigs[index].Name, hostPriority.Score)
+// 				}
+// 			}
+// 		}(i)
+// 	}
+// 	// Wait for all computations to be finished.
+// 	wg.Wait()
+// 	if len(errs) != 0 {
+// 		return schedulerapi.CustomHostPriorityList{}, errors.NewAggregate(errs)
+// 	}
+
+// 	// Summarize all scores.
+// 	result := make(schedulerapi.CustomHostPriorityList, 0, len(nodes))
+
+// 	for i := range nodes {
+// 		result = append(result, schedulerapi.CustomHostPriority{Host: nodes[i].Name, Score: 0})
+// 		for j := range priorityConfigs {
+// 			result[i].Score += results[j][i].Score * float64(priorityConfigs[j].Weight)
+// 		}
+// 	}
+
+// 	if len(extenders) != 0 && nodes != nil {
+// 		combinedScores := make(map[string]float64, len(nodeNameToInfo))
+// 		for i := range extenders {
+// 			if !extenders[i].IsInterested(pod) {
+// 				continue
+// 			}
+// 			wg.Add(1)
+// 			go func(extIndex int) {
+// 				defer wg.Done()
+// 				prioritizedList, weight, err := extenders[extIndex].CustomPrioritize(pod, nodes)
+// 				if err != nil {
+// 					// Prioritization errors from extender can be ignored, let k8s/other extenders determine the priorities
+// 					return
+// 				}
+// 				mu.Lock()
+// 				for i := range *prioritizedList {
+// 					host, score := (*prioritizedList)[i].Host, (*prioritizedList)[i].Score
+// 					if klog.V(10) {
+// 						klog.Infof("%v -> %v: %v, Score: (%d)", util.GetPodFullName(pod), host, extenders[extIndex].Name(), score)
+// 					}
+// 					combinedScores[host] += score * float64(weight)
+// 				}
+// 				mu.Unlock()
+// 			}(i)
+// 		}
+// 		// wait for all go routines to finish
+// 		wg.Wait()
+// 		for i := range result {
+// 			result[i].Score += combinedScores[result[i].Host]
+// 		}
+// 	}
+
+// 	if klog.V(10) {
+// 		for i := range result {
+// 			klog.Infof("Host %s => Score %d", result[i].Host, result[i].Score)
+// 		}
+// 	}
+// 	return result, nil
+// }
 
 //------------------------------------------------------------------
 // --------------END-CUSTOM-BY-IWITA--------------------------------

+ 1 - 1
kubernetes-v1.15.4/pkg/scheduler/factory/plugins.go

@@ -333,7 +333,7 @@ func CustomRegisterPriorityConfigFactory(name string, pcf CustomPriorityConfigFa
 
 //------------------------------------------------------------------------------------------------
 //------------------------------------------------------------------------------------------------
-// ---------START OF CUSTOMIZATION----------------------------------------------------------------
+// ---------END OF CUSTOMIZATION----------------------------------------------------------------
 //------------------------------------------------------------------------------------------------
 //------------------------------------------------------------------------------------------------