migrate scheduler api types to sized integers

This commit is contained in:
Ahmad Diaa 2019-09-03 17:59:24 +02:00
parent b3c4bdea49
commit 801cc549be
22 changed files with 75 additions and 73 deletions

View File

@ -176,7 +176,7 @@ func CalculateEvenPodsSpreadPriority(pod *v1.Pod, nodeNameToInfo map[string]*sch
// debugging purpose: print the value for each node // debugging purpose: print the value for each node
// score must be pointer here, otherwise it's always 0 // score must be pointer here, otherwise it's always 0
if klog.V(10) { if klog.V(10) {
defer func(score *int, nodeName string) { defer func(score *int64, nodeName string) {
klog.Infof("%v -> %v: EvenPodsSpreadPriority, Score: (%d)", pod.Name, nodeName, *score) klog.Infof("%v -> %v: EvenPodsSpreadPriority, Score: (%d)", pod.Name, nodeName, *score)
}(&result[i].Score, node.Name) }(&result[i].Score, node.Name)
} }
@ -190,7 +190,7 @@ func CalculateEvenPodsSpreadPriority(pod *v1.Pod, nodeNameToInfo map[string]*sch
continue continue
} }
fScore := float64(schedulerapi.MaxPriority) * (float64(total-t.nodeNameToPodCounts[node.Name]) / float64(maxMinDiff)) fScore := float64(schedulerapi.MaxPriority) * (float64(total-t.nodeNameToPodCounts[node.Name]) / float64(maxMinDiff))
result[i].Score = int(fScore) result[i].Score = int64(fScore)
} }
return result, nil return result, nil

View File

@ -55,7 +55,7 @@ func ImageLocalityPriorityMap(pod *v1.Pod, meta interface{}, nodeInfo *scheduler
return schedulerapi.HostPriority{ return schedulerapi.HostPriority{
Host: node.Name, Host: node.Name,
Score: score, Score: int64(score),
}, nil }, nil
} }

View File

@ -236,7 +236,7 @@ func (ipa *InterPodAffinity) CalculateInterPodAffinityPriority(pod *v1.Pod, node
if maxMinDiff > 0 && pm.counts[node.Name] != nil { if maxMinDiff > 0 && pm.counts[node.Name] != nil {
fScore = float64(schedulerapi.MaxPriority) * (float64(*pm.counts[node.Name]-minCount) / float64(maxCount-minCount)) fScore = float64(schedulerapi.MaxPriority) * (float64(*pm.counts[node.Name]-minCount) / float64(maxCount-minCount))
} }
result = append(result, schedulerapi.HostPriority{Host: node.Name, Score: int(fScore)}) result = append(result, schedulerapi.HostPriority{Host: node.Name, Score: int64(fScore)})
if klog.V(10) { if klog.V(10) {
klog.Infof("%v -> %v: InterPodAffinityPriority, Score: (%d)", pod.Name, node.Name, int(fScore)) klog.Infof("%v -> %v: InterPodAffinityPriority, Score: (%d)", pod.Name, node.Name, int(fScore))
} }

View File

@ -69,7 +69,7 @@ func CalculateNodeAffinityPriorityMap(pod *v1.Pod, meta interface{}, nodeInfo *s
return schedulerapi.HostPriority{ return schedulerapi.HostPriority{
Host: node.Name, Host: node.Name,
Score: int(count), Score: int64(count),
}, nil }, nil
} }

View File

@ -56,6 +56,6 @@ func (n *NodeLabelPrioritizer) CalculateNodeLabelPriorityMap(pod *v1.Pod, meta i
} }
return schedulerapi.HostPriority{ return schedulerapi.HostPriority{
Host: node.Name, Host: node.Name,
Score: score, Score: int64(score),
}, nil }, nil
} }

View File

@ -25,14 +25,14 @@ import (
// NormalizeReduce generates a PriorityReduceFunction that can normalize the result // NormalizeReduce generates a PriorityReduceFunction that can normalize the result
// scores to [0, maxPriority]. If reverse is set to true, it reverses the scores by // scores to [0, maxPriority]. If reverse is set to true, it reverses the scores by
// subtracting it from maxPriority. // subtracting it from maxPriority.
func NormalizeReduce(maxPriority int, reverse bool) PriorityReduceFunction { func NormalizeReduce(maxPriority int64, reverse bool) PriorityReduceFunction {
return func( return func(
_ *v1.Pod, _ *v1.Pod,
_ interface{}, _ interface{},
_ map[string]*schedulernodeinfo.NodeInfo, _ map[string]*schedulernodeinfo.NodeInfo,
result schedulerapi.HostPriorityList) error { result schedulerapi.HostPriorityList) error {
var maxCount int var maxCount int64
for i := range result { for i := range result {
if result[i].Score > maxCount { if result[i].Score > maxCount {
maxCount = result[i].Score maxCount = result[i].Score

View File

@ -92,7 +92,7 @@ func (r *ResourceAllocationPriority) PriorityMap(
return schedulerapi.HostPriority{ return schedulerapi.HostPriority{
Host: node.Name, Host: node.Name,
Score: int(score), Score: score,
}, nil }, nil
} }

View File

@ -54,7 +54,7 @@ func ResourceLimitsPriorityMap(pod *v1.Pod, meta interface{}, nodeInfo *schedule
cpuScore := computeScore(podLimits.MilliCPU, allocatableResources.MilliCPU) cpuScore := computeScore(podLimits.MilliCPU, allocatableResources.MilliCPU)
memScore := computeScore(podLimits.Memory, allocatableResources.Memory) memScore := computeScore(podLimits.Memory, allocatableResources.Memory)
score := int(0) score := int64(0)
if cpuScore == 1 || memScore == 1 { if cpuScore == 1 || memScore == 1 {
score = 1 score = 1
} }

View File

@ -80,7 +80,7 @@ func (s *SelectorSpread) CalculateSpreadPriorityMap(pod *v1.Pod, meta interface{
if len(selectors) == 0 { if len(selectors) == 0 {
return schedulerapi.HostPriority{ return schedulerapi.HostPriority{
Host: node.Name, Host: node.Name,
Score: int(0), Score: 0,
}, nil }, nil
} }
@ -88,7 +88,7 @@ func (s *SelectorSpread) CalculateSpreadPriorityMap(pod *v1.Pod, meta interface{
return schedulerapi.HostPriority{ return schedulerapi.HostPriority{
Host: node.Name, Host: node.Name,
Score: count, Score: int64(count),
}, nil }, nil
} }
@ -97,9 +97,9 @@ func (s *SelectorSpread) CalculateSpreadPriorityMap(pod *v1.Pod, meta interface{
// where zone information is included on the nodes, it favors nodes // where zone information is included on the nodes, it favors nodes
// in zones with fewer existing matching pods. // in zones with fewer existing matching pods.
func (s *SelectorSpread) CalculateSpreadPriorityReduce(pod *v1.Pod, meta interface{}, nodeNameToInfo map[string]*schedulernodeinfo.NodeInfo, result schedulerapi.HostPriorityList) error { func (s *SelectorSpread) CalculateSpreadPriorityReduce(pod *v1.Pod, meta interface{}, nodeNameToInfo map[string]*schedulernodeinfo.NodeInfo, result schedulerapi.HostPriorityList) error {
countsByZone := make(map[string]int, 10) countsByZone := make(map[string]int64, 10)
maxCountByZone := int(0) maxCountByZone := int64(0)
maxCountByNodeName := int(0) maxCountByNodeName := int64(0)
for i := range result { for i := range result {
if result[i].Score > maxCountByNodeName { if result[i].Score > maxCountByNodeName {
@ -141,10 +141,10 @@ func (s *SelectorSpread) CalculateSpreadPriorityReduce(pod *v1.Pod, meta interfa
fScore = (fScore * (1.0 - zoneWeighting)) + (zoneWeighting * zoneScore) fScore = (fScore * (1.0 - zoneWeighting)) + (zoneWeighting * zoneScore)
} }
} }
result[i].Score = int(fScore) result[i].Score = int64(fScore)
if klog.V(10) { if klog.V(10) {
klog.Infof( klog.Infof(
"%v -> %v: SelectorSpreadPriority, Score: (%d)", pod.Name, result[i].Host, int(fScore), "%v -> %v: SelectorSpreadPriority, Score: (%d)", pod.Name, result[i].Host, int64(fScore),
) )
} }
} }
@ -232,16 +232,16 @@ func (s *ServiceAntiAffinity) CalculateAntiAffinityPriorityMap(pod *v1.Pod, meta
return schedulerapi.HostPriority{ return schedulerapi.HostPriority{
Host: node.Name, Host: node.Name,
Score: score, Score: int64(score),
}, nil }, nil
} }
// CalculateAntiAffinityPriorityReduce computes each node score with the same value for a particular label. // CalculateAntiAffinityPriorityReduce computes each node score with the same value for a particular label.
// The label to be considered is provided to the struct (ServiceAntiAffinity). // The label to be considered is provided to the struct (ServiceAntiAffinity).
func (s *ServiceAntiAffinity) CalculateAntiAffinityPriorityReduce(pod *v1.Pod, meta interface{}, nodeNameToInfo map[string]*schedulernodeinfo.NodeInfo, result schedulerapi.HostPriorityList) error { func (s *ServiceAntiAffinity) CalculateAntiAffinityPriorityReduce(pod *v1.Pod, meta interface{}, nodeNameToInfo map[string]*schedulernodeinfo.NodeInfo, result schedulerapi.HostPriorityList) error {
var numServicePods int var numServicePods int64
var label string var label string
podCounts := map[string]int{} podCounts := map[string]int64{}
labelNodesStatus := map[string]string{} labelNodesStatus := map[string]string{}
maxPriorityFloat64 := float64(schedulerapi.MaxPriority) maxPriorityFloat64 := float64(schedulerapi.MaxPriority)
@ -261,7 +261,7 @@ func (s *ServiceAntiAffinity) CalculateAntiAffinityPriorityReduce(pod *v1.Pod, m
label, ok := labelNodesStatus[hostPriority.Host] label, ok := labelNodesStatus[hostPriority.Host]
if !ok { if !ok {
result[i].Host = hostPriority.Host result[i].Host = hostPriority.Host
result[i].Score = int(0) result[i].Score = 0
continue continue
} }
// initializing to the default/max node score of maxPriority // initializing to the default/max node score of maxPriority
@ -270,7 +270,7 @@ func (s *ServiceAntiAffinity) CalculateAntiAffinityPriorityReduce(pod *v1.Pod, m
fScore = maxPriorityFloat64 * (float64(numServicePods-podCounts[label]) / float64(numServicePods)) fScore = maxPriorityFloat64 * (float64(numServicePods-podCounts[label]) / float64(numServicePods))
} }
result[i].Host = hostPriority.Host result[i].Host = hostPriority.Host
result[i].Score = int(fScore) result[i].Score = int64(fScore)
} }
return nil return nil

View File

@ -68,7 +68,7 @@ func ComputeTaintTolerationPriorityMap(pod *v1.Pod, meta interface{}, nodeInfo *
return schedulerapi.HostPriority{ return schedulerapi.HostPriority{
Host: node.Name, Host: node.Name,
Score: countIntolerableTaintsPreferNoSchedule(node.Spec.Taints, tolerationsPreferNoSchedule), Score: int64(countIntolerableTaintsPreferNoSchedule(node.Spec.Taints, tolerationsPreferNoSchedule)),
}, nil }, nil
} }

View File

@ -50,7 +50,7 @@ type PriorityConfig struct {
// TODO: Remove it after migrating all functions to // TODO: Remove it after migrating all functions to
// Map-Reduce pattern. // Map-Reduce pattern.
Function PriorityFunction Function PriorityFunction
Weight int Weight int64
} }
// EmptyPriorityMetadataProducer returns a no-op PriorityMetadataProducer type. // EmptyPriorityMetadataProducer returns a no-op PriorityMetadataProducer type.

View File

@ -39,7 +39,7 @@ type SchedulerExtender interface {
// Prioritize based on extender-implemented priority functions. The returned scores & weight // Prioritize based on extender-implemented priority functions. The returned scores & weight
// are used to compute the weighted score for an extender. The weighted scores are added to // are used to compute the weighted score for an extender. The weighted scores are added to
// the scores computed by Kubernetes scheduler. The total scores are used to do the host selection. // the scores computed by Kubernetes scheduler. The total scores are used to do the host selection.
Prioritize(pod *v1.Pod, nodes []*v1.Node) (hostPriorities *schedulerapi.HostPriorityList, weight int, err error) Prioritize(pod *v1.Pod, nodes []*v1.Node) (hostPriorities *schedulerapi.HostPriorityList, weight int64, err error)
// Bind delegates the action of binding a pod to a node to the extender. // Bind delegates the action of binding a pod to a node to the extender.
Bind(binding *v1.Binding) error Bind(binding *v1.Binding) error

View File

@ -17,6 +17,7 @@ limitations under the License.
package api package api
import ( import (
"math"
"time" "time"
v1 "k8s.io/api/core/v1" v1 "k8s.io/api/core/v1"
@ -30,11 +31,11 @@ const (
// MaxInt defines the max signed int value. // MaxInt defines the max signed int value.
MaxInt = int(MaxUint >> 1) MaxInt = int(MaxUint >> 1)
// MaxTotalPriority defines the max total priority value. // MaxTotalPriority defines the max total priority value.
MaxTotalPriority = MaxInt MaxTotalPriority = int64(math.MaxInt64)
// MaxPriority defines the max priority value. // MaxPriority defines the max priority value.
MaxPriority = 10 MaxPriority = 10
// MaxWeight defines the max weight value. // MaxWeight defines the max weight value.
MaxWeight = MaxInt / MaxPriority MaxWeight = int64(math.MaxInt64 / MaxPriority)
// DefaultPercentageOfNodesToScore defines the percentage of nodes of all nodes // DefaultPercentageOfNodesToScore defines the percentage of nodes of all nodes
// that once found feasible, the scheduler stops looking for more nodes. // that once found feasible, the scheduler stops looking for more nodes.
DefaultPercentageOfNodesToScore = 50 DefaultPercentageOfNodesToScore = 50
@ -86,7 +87,7 @@ type PriorityPolicy struct {
Name string Name string
// The numeric multiplier for the node scores that the priority function generates // The numeric multiplier for the node scores that the priority function generates
// The weight should be a positive integer // The weight should be a positive integer
Weight int Weight int64
// Holds the parameters to configure the given priority function // Holds the parameters to configure the given priority function
Argument *PriorityArgument Argument *PriorityArgument
} }
@ -157,9 +158,9 @@ type RequestedToCapacityRatioArguments struct {
// UtilizationShapePoint represents single point of priority function shape // UtilizationShapePoint represents single point of priority function shape
type UtilizationShapePoint struct { type UtilizationShapePoint struct {
// Utilization (x axis). Valid values are 0 to 100. Fully utilized node maps to 100. // Utilization (x axis). Valid values are 0 to 100. Fully utilized node maps to 100.
Utilization int Utilization int32
// Score assigned to given utilization (y axis). Valid values are 0 to 10. // Score assigned to given utilization (y axis). Valid values are 0 to 10.
Score int Score int32
} }
// ResourceSpec represents single resource for bin packing of priority RequestedToCapacityRatioArguments. // ResourceSpec represents single resource for bin packing of priority RequestedToCapacityRatioArguments.
@ -167,7 +168,7 @@ type ResourceSpec struct {
// Name of the resource to be managed by RequestedToCapacityRatio function. // Name of the resource to be managed by RequestedToCapacityRatio function.
Name v1.ResourceName Name v1.ResourceName
// Weight of the resource. // Weight of the resource.
Weight int Weight int64
} }
// ExtenderManagedResource describes the arguments of extended resources // ExtenderManagedResource describes the arguments of extended resources
@ -220,7 +221,7 @@ type ExtenderConfig struct {
PrioritizeVerb string PrioritizeVerb string
// The numeric multiplier for the node scores that the prioritize call generates. // The numeric multiplier for the node scores that the prioritize call generates.
// The weight should be a positive integer // The weight should be a positive integer
Weight int Weight int64
// Verb for the bind call, empty if not supported. This verb is appended to the URLPrefix when issuing the bind call to extender. // Verb for the bind call, empty if not supported. This verb is appended to the URLPrefix when issuing the bind call to extender.
// If this method is implemented by the extender, it is the extender's responsibility to bind the pod to apiserver. Only one extender // If this method is implemented by the extender, it is the extender's responsibility to bind the pod to apiserver. Only one extender
// can implement this function. // can implement this function.
@ -271,7 +272,7 @@ type ExtenderPreemptionArgs struct {
// numPDBViolations: the count of violations of PodDisruptionBudget // numPDBViolations: the count of violations of PodDisruptionBudget
type Victims struct { type Victims struct {
Pods []*v1.Pod Pods []*v1.Pod
NumPDBViolations int NumPDBViolations int64
} }
// MetaPod represent identifier for a v1.Pod // MetaPod represent identifier for a v1.Pod
@ -285,7 +286,7 @@ type MetaPod struct {
// numPDBViolations: the count of violations of PodDisruptionBudget // numPDBViolations: the count of violations of PodDisruptionBudget
type MetaVictims struct { type MetaVictims struct {
Pods []*MetaPod Pods []*MetaPod
NumPDBViolations int NumPDBViolations int64
} }
// ExtenderArgs represents the arguments needed by the extender to filter/prioritize // ExtenderArgs represents the arguments needed by the extender to filter/prioritize
@ -341,7 +342,7 @@ type HostPriority struct {
// Name of the host // Name of the host
Host string Host string
// Score associated with the host // Score associated with the host
Score int Score int64
} }
// HostPriorityList declares a []HostPriority type. // HostPriorityList declares a []HostPriority type.

View File

@ -39,7 +39,7 @@ type Policy struct {
// RequiredDuringScheduling affinity is not symmetric, but there is an implicit PreferredDuringScheduling affinity rule // RequiredDuringScheduling affinity is not symmetric, but there is an implicit PreferredDuringScheduling affinity rule
// corresponding to every RequiredDuringScheduling affinity rule. // corresponding to every RequiredDuringScheduling affinity rule.
// HardPodAffinitySymmetricWeight represents the weight of implicit PreferredDuringScheduling affinity rule, in the range 1-100. // HardPodAffinitySymmetricWeight represents the weight of implicit PreferredDuringScheduling affinity rule, in the range 1-100.
HardPodAffinitySymmetricWeight int `json:"hardPodAffinitySymmetricWeight"` HardPodAffinitySymmetricWeight int32 `json:"hardPodAffinitySymmetricWeight"`
// When AlwaysCheckAllPredicates is set to true, scheduler checks all // When AlwaysCheckAllPredicates is set to true, scheduler checks all
// the configured predicates even after one or more of them fails. // the configured predicates even after one or more of them fails.
@ -66,7 +66,7 @@ type PriorityPolicy struct {
Name string `json:"name"` Name string `json:"name"`
// The numeric multiplier for the node scores that the priority function generates // The numeric multiplier for the node scores that the priority function generates
// The weight should be non-zero and can be a positive or a negative integer // The weight should be non-zero and can be a positive or a negative integer
Weight int `json:"weight"` Weight int64 `json:"weight"`
// Holds the parameters to configure the given priority function // Holds the parameters to configure the given priority function
Argument *PriorityArgument `json:"argument"` Argument *PriorityArgument `json:"argument"`
} }
@ -137,9 +137,9 @@ type RequestedToCapacityRatioArguments struct {
// UtilizationShapePoint represents single point of priority function shape. // UtilizationShapePoint represents single point of priority function shape.
type UtilizationShapePoint struct { type UtilizationShapePoint struct {
// Utilization (x axis). Valid values are 0 to 100. Fully utilized node maps to 100. // Utilization (x axis). Valid values are 0 to 100. Fully utilized node maps to 100.
Utilization int `json:"utilization"` Utilization int32 `json:"utilization"`
// Score assigned to given utilization (y axis). Valid values are 0 to 10. // Score assigned to given utilization (y axis). Valid values are 0 to 10.
Score int `json:"score"` Score int32 `json:"score"`
} }
// ResourceSpec represents single resource and weight for bin packing of priority RequestedToCapacityRatioArguments. // ResourceSpec represents single resource and weight for bin packing of priority RequestedToCapacityRatioArguments.
@ -147,7 +147,7 @@ type ResourceSpec struct {
// Name of the resource to be managed by RequestedToCapacityRatio function. // Name of the resource to be managed by RequestedToCapacityRatio function.
Name apiv1.ResourceName `json:"name,casttype=ResourceName"` Name apiv1.ResourceName `json:"name,casttype=ResourceName"`
// Weight of the resource. // Weight of the resource.
Weight int `json:"weight,omitempty"` Weight int64 `json:"weight,omitempty"`
} }
// ExtenderManagedResource describes the arguments of extended resources // ExtenderManagedResource describes the arguments of extended resources
@ -200,7 +200,7 @@ type ExtenderConfig struct {
PrioritizeVerb string `json:"prioritizeVerb,omitempty"` PrioritizeVerb string `json:"prioritizeVerb,omitempty"`
// The numeric multiplier for the node scores that the prioritize call generates. // The numeric multiplier for the node scores that the prioritize call generates.
// The weight should be a positive integer // The weight should be a positive integer
Weight int `json:"weight,omitempty"` Weight int64 `json:"weight,omitempty"`
// Verb for the bind call, empty if not supported. This verb is appended to the URLPrefix when issuing the bind call to extender. // Verb for the bind call, empty if not supported. This verb is appended to the URLPrefix when issuing the bind call to extender.
// If this method is implemented by the extender, it is the extender's responsibility to bind the pod to apiserver. Only one extender // If this method is implemented by the extender, it is the extender's responsibility to bind the pod to apiserver. Only one extender
// can implement this function. // can implement this function.
@ -276,7 +276,7 @@ type ExtenderPreemptionArgs struct {
// numPDBViolations: the count of violations of PodDisruptionBudget // numPDBViolations: the count of violations of PodDisruptionBudget
type Victims struct { type Victims struct {
Pods []*apiv1.Pod `json:"pods"` Pods []*apiv1.Pod `json:"pods"`
NumPDBViolations int `json:"numPDBViolations"` NumPDBViolations int64 `json:"numPDBViolations"`
} }
// MetaPod represent identifier for a v1.Pod // MetaPod represent identifier for a v1.Pod
@ -290,7 +290,7 @@ type MetaPod struct {
// numPDBViolations: the count of violations of PodDisruptionBudget // numPDBViolations: the count of violations of PodDisruptionBudget
type MetaVictims struct { type MetaVictims struct {
Pods []*MetaPod `json:"pods"` Pods []*MetaPod `json:"pods"`
NumPDBViolations int `json:"numPDBViolations"` NumPDBViolations int64 `json:"numPDBViolations"`
} }
// FailedNodesMap represents the filtered out nodes, with node names and failure messages // FailedNodesMap represents the filtered out nodes, with node names and failure messages
@ -333,7 +333,7 @@ type HostPriority struct {
// Name of the host // Name of the host
Host string `json:"host"` Host string `json:"host"`
// Score associated with the host // Score associated with the host
Score int `json:"score"` Score int64 `json:"score"`
} }
// HostPriorityList declares a []HostPriority type. // HostPriorityList declares a []HostPriority type.

View File

@ -45,7 +45,7 @@ type HTTPExtender struct {
filterVerb string filterVerb string
prioritizeVerb string prioritizeVerb string
bindVerb string bindVerb string
weight int weight int64
client *http.Client client *http.Client
nodeCacheCapable bool nodeCacheCapable bool
managedResources sets.String managedResources sets.String
@ -321,7 +321,7 @@ func (h *HTTPExtender) Filter(
// Prioritize based on extender implemented priority functions. Weight*priority is added // Prioritize based on extender implemented priority functions. Weight*priority is added
// up for each such priority function. The returned score is added to the score computed // up for each such priority function. The returned score is added to the score computed
// by Kubernetes scheduler. The total score is used to do the host selection. // by Kubernetes scheduler. The total score is used to do the host selection.
func (h *HTTPExtender) Prioritize(pod *v1.Pod, nodes []*v1.Node) (*schedulerapi.HostPriorityList, int, error) { func (h *HTTPExtender) Prioritize(pod *v1.Pod, nodes []*v1.Node) (*schedulerapi.HostPriorityList, int64, error) {
var ( var (
result schedulerapi.HostPriorityList result schedulerapi.HostPriorityList
nodeList *v1.NodeList nodeList *v1.NodeList

View File

@ -44,7 +44,7 @@ type priorityFunc func(pod *v1.Pod, nodes []*v1.Node) (*schedulerapi.HostPriorit
type priorityConfig struct { type priorityConfig struct {
function priorityFunc function priorityFunc
weight int weight int64
} }
func errorPredicateExtender(pod *v1.Pod, node *v1.Node) (bool, error) { func errorPredicateExtender(pod *v1.Pod, node *v1.Node) (bool, error) {
@ -84,7 +84,7 @@ func machine1PrioritizerExtender(pod *v1.Pod, nodes []*v1.Node) (*schedulerapi.H
if node.Name == "machine1" { if node.Name == "machine1" {
score = 10 score = 10
} }
result = append(result, schedulerapi.HostPriority{Host: node.Name, Score: score}) result = append(result, schedulerapi.HostPriority{Host: node.Name, Score: int64(score)})
} }
return &result, nil return &result, nil
} }
@ -96,7 +96,7 @@ func machine2PrioritizerExtender(pod *v1.Pod, nodes []*v1.Node) (*schedulerapi.H
if node.Name == "machine2" { if node.Name == "machine2" {
score = 10 score = 10
} }
result = append(result, schedulerapi.HostPriority{Host: node.Name, Score: score}) result = append(result, schedulerapi.HostPriority{Host: node.Name, Score: int64(score)})
} }
return &result, nil return &result, nil
} }
@ -108,7 +108,7 @@ func machine2Prioritizer(_ *v1.Pod, nodeNameToInfo map[string]*schedulernodeinfo
if node.Name == "machine2" { if node.Name == "machine2" {
score = 10 score = 10
} }
result = append(result, schedulerapi.HostPriority{Host: node.Name, Score: score}) result = append(result, schedulerapi.HostPriority{Host: node.Name, Score: int64(score)})
} }
return result, nil return result, nil
} }
@ -116,7 +116,7 @@ func machine2Prioritizer(_ *v1.Pod, nodeNameToInfo map[string]*schedulernodeinfo
type FakeExtender struct { type FakeExtender struct {
predicates []fitPredicate predicates []fitPredicate
prioritizers []priorityConfig prioritizers []priorityConfig
weight int weight int64
nodeCacheCapable bool nodeCacheCapable bool
filteredNodes []*v1.Node filteredNodes []*v1.Node
unInterested bool unInterested bool
@ -167,7 +167,7 @@ func (f *FakeExtender) ProcessPreemption(
} else { } else {
// Append new victims to original victims // Append new victims to original victims
nodeToVictimsCopy[node].Pods = append(victims.Pods, extenderVictimPods...) nodeToVictimsCopy[node].Pods = append(victims.Pods, extenderVictimPods...)
nodeToVictimsCopy[node].NumPDBViolations = victims.NumPDBViolations + extendernPDBViolations nodeToVictimsCopy[node].NumPDBViolations = victims.NumPDBViolations + int64(extendernPDBViolations)
} }
} }
return nodeToVictimsCopy, nil return nodeToVictimsCopy, nil
@ -292,9 +292,9 @@ func (f *FakeExtender) Filter(pod *v1.Pod, nodes []*v1.Node, nodeNameToInfo map[
return filtered, failedNodesMap, nil return filtered, failedNodesMap, nil
} }
func (f *FakeExtender) Prioritize(pod *v1.Pod, nodes []*v1.Node) (*schedulerapi.HostPriorityList, int, error) { func (f *FakeExtender) Prioritize(pod *v1.Pod, nodes []*v1.Node) (*schedulerapi.HostPriorityList, int64, error) {
result := schedulerapi.HostPriorityList{} result := schedulerapi.HostPriorityList{}
combinedScores := map[string]int{} combinedScores := map[string]int64{}
for _, prioritizer := range f.prioritizers { for _, prioritizer := range f.prioritizers {
weight := prioritizer.weight weight := prioritizer.weight
if weight == 0 { if weight == 0 {

View File

@ -799,12 +799,12 @@ func PrioritizeNodes(
} }
for j := range scoresMap { for j := range scoresMap {
result[i].Score += scoresMap[j][i].Score result[i].Score += int64(scoresMap[j][i].Score)
} }
} }
if len(extenders) != 0 && nodes != nil { if len(extenders) != 0 && nodes != nil {
combinedScores := make(map[string]int, len(nodeNameToInfo)) combinedScores := make(map[string]int64, len(nodeNameToInfo))
for i := range extenders { for i := range extenders {
if !extenders[i].IsInterested(pod) { if !extenders[i].IsInterested(pod) {
continue continue
@ -870,7 +870,7 @@ func pickOneNodeForPreemption(nodesToVictims map[*v1.Node]*schedulerapi.Victims)
if len(nodesToVictims) == 0 { if len(nodesToVictims) == 0 {
return nil return nil
} }
minNumPDBViolatingPods := math.MaxInt32 minNumPDBViolatingPods := int64(math.MaxInt32)
var minNodes1 []*v1.Node var minNodes1 []*v1.Node
lenNodes1 := 0 lenNodes1 := 0
for node, victims := range nodesToVictims { for node, victims := range nodesToVictims {
@ -1021,7 +1021,7 @@ func (g *genericScheduler) selectNodesForPreemption(
resultLock.Lock() resultLock.Lock()
victims := schedulerapi.Victims{ victims := schedulerapi.Victims{
Pods: pods, Pods: pods,
NumPDBViolations: numPDBViolations, NumPDBViolations: int64(numPDBViolations),
} }
nodeToVictims[potentialNodes[i]] = &victims nodeToVictims[potentialNodes[i]] = &victims
resultLock.Unlock() resultLock.Unlock()

View File

@ -88,7 +88,7 @@ func numericPriority(pod *v1.Pod, nodeNameToInfo map[string]*schedulernodeinfo.N
} }
result = append(result, schedulerapi.HostPriority{ result = append(result, schedulerapi.HostPriority{
Host: node.Name, Host: node.Name,
Score: score, Score: int64(score),
}) })
} }
return result, nil return result, nil
@ -110,7 +110,7 @@ func reverseNumericPriority(pod *v1.Pod, nodeNameToInfo map[string]*schedulernod
for _, hostPriority := range result { for _, hostPriority := range result {
reverseResult = append(reverseResult, schedulerapi.HostPriority{ reverseResult = append(reverseResult, schedulerapi.HostPriority{
Host: hostPriority.Host, Host: hostPriority.Host,
Score: int(maxScore + minScore - float64(hostPriority.Score)), Score: int64(maxScore + minScore - float64(hostPriority.Score)),
}) })
} }
@ -932,7 +932,7 @@ func TestZeroRequest(t *testing.T) {
pods []*v1.Pod pods []*v1.Pod
nodes []*v1.Node nodes []*v1.Node
name string name string
expectedScore int expectedScore int64
}{ }{
// The point of these next two tests is to show you get the same priority for a zero-request pod // The point of these next two tests is to show you get the same priority for a zero-request pod
// as for a pod with the defaults requests, both when the zero-request pod is already on the machine // as for a pod with the defaults requests, both when the zero-request pod is already on the machine

View File

@ -539,7 +539,7 @@ func (f *fakeExtender) Filter(
func (f *fakeExtender) Prioritize( func (f *fakeExtender) Prioritize(
pod *v1.Pod, pod *v1.Pod,
nodes []*v1.Node, nodes []*v1.Node,
) (hostPriorities *schedulerapi.HostPriorityList, weight int, err error) { ) (hostPriorities *schedulerapi.HostPriorityList, weight int64, err error) {
return nil, 0, nil return nil, 0, nil
} }

View File

@ -74,7 +74,7 @@ type PriorityFunctionFactory2 func(PluginFactoryArgs) (priorities.PriorityMapFun
type PriorityConfigFactory struct { type PriorityConfigFactory struct {
Function PriorityFunctionFactory Function PriorityFunctionFactory
MapReduceFunction PriorityFunctionFactory2 MapReduceFunction PriorityFunctionFactory2
Weight int Weight int64
} }
var ( var (
@ -329,7 +329,7 @@ func RegisterPriorityFunction(name string, function priorities.PriorityFunction,
Function: func(PluginFactoryArgs) priorities.PriorityFunction { Function: func(PluginFactoryArgs) priorities.PriorityFunction {
return function return function
}, },
Weight: weight, Weight: int64(weight),
}) })
} }
@ -344,7 +344,7 @@ func RegisterPriorityMapReduceFunction(
MapReduceFunction: func(PluginFactoryArgs) (priorities.PriorityMapFunction, priorities.PriorityReduceFunction) { MapReduceFunction: func(PluginFactoryArgs) (priorities.PriorityMapFunction, priorities.PriorityReduceFunction) {
return mapFunction, reduceFunction return mapFunction, reduceFunction
}, },
Weight: weight, Weight: int64(weight),
}) })
} }
@ -549,7 +549,7 @@ func getPriorityFunctionConfigs(names sets.String, args PluginFactoryArgs) ([]pr
// validateSelectedConfigs validates the config weights to avoid the overflow. // validateSelectedConfigs validates the config weights to avoid the overflow.
func validateSelectedConfigs(configs []priorities.PriorityConfig) error { func validateSelectedConfigs(configs []priorities.PriorityConfig) error {
var totalPriority int var totalPriority int64
for _, config := range configs { for _, config := range configs {
// Checks totalPriority against MaxTotalPriority to avoid overflow // Checks totalPriority against MaxTotalPriority to avoid overflow
if config.Weight*schedulerapi.MaxPriority > schedulerapi.MaxTotalPriority-totalPriority { if config.Weight*schedulerapi.MaxPriority > schedulerapi.MaxTotalPriority-totalPriority {

View File

@ -17,6 +17,7 @@ limitations under the License.
package factory package factory
import ( import (
"math"
"testing" "testing"
"github.com/stretchr/testify/assert" "github.com/stretchr/testify/assert"
@ -58,13 +59,13 @@ func TestValidatePriorityConfigOverFlow(t *testing.T) {
expected bool expected bool
}{ }{
{ {
description: "one of the weights is MaxInt", description: "one of the weights is MaxInt64",
configs: []priorities.PriorityConfig{{Weight: api.MaxInt}, {Weight: 5}}, configs: []priorities.PriorityConfig{{Weight: math.MaxInt64}, {Weight: 5}},
expected: true, expected: true,
}, },
{ {
description: "after multiplication with MaxPriority the weight is larger than MaxWeight", description: "after multiplication with MaxPriority the weight is larger than MaxWeight",
configs: []priorities.PriorityConfig{{Weight: api.MaxInt/api.MaxPriority + api.MaxPriority}, {Weight: 5}}, configs: []priorities.PriorityConfig{{Weight: math.MaxInt64/api.MaxPriority + api.MaxPriority}, {Weight: 5}},
expected: true, expected: true,
}, },
{ {

View File

@ -49,7 +49,7 @@ type priorityFunc func(pod *v1.Pod, nodes *v1.NodeList) (*schedulerapi.HostPrior
type priorityConfig struct { type priorityConfig struct {
function priorityFunc function priorityFunc
weight int weight int64
} }
type Extender struct { type Extender struct {
@ -191,7 +191,7 @@ func (e *Extender) Filter(args *schedulerapi.ExtenderArgs) (*schedulerapi.Extend
func (e *Extender) Prioritize(args *schedulerapi.ExtenderArgs) (*schedulerapi.HostPriorityList, error) { func (e *Extender) Prioritize(args *schedulerapi.ExtenderArgs) (*schedulerapi.HostPriorityList, error) {
result := schedulerapi.HostPriorityList{} result := schedulerapi.HostPriorityList{}
combinedScores := map[string]int{} combinedScores := map[string]int64{}
var nodes = &v1.NodeList{Items: []v1.Node{}} var nodes = &v1.NodeList{Items: []v1.Node{}}
if e.nodeCacheCapable { if e.nodeCacheCapable {
@ -257,7 +257,7 @@ func machine2Prioritizer(pod *v1.Pod, nodes *v1.NodeList) (*schedulerapi.HostPri
} }
result = append(result, schedulerapi.HostPriority{ result = append(result, schedulerapi.HostPriority{
Host: node.Name, Host: node.Name,
Score: score, Score: int64(score),
}) })
} }
return &result, nil return &result, nil
@ -272,7 +272,7 @@ func machine3Prioritizer(pod *v1.Pod, nodes *v1.NodeList) (*schedulerapi.HostPri
} }
result = append(result, schedulerapi.HostPriority{ result = append(result, schedulerapi.HostPriority{
Host: node.Name, Host: node.Name,
Score: score, Score: int64(score),
}) })
} }
return &result, nil return &result, nil