diff --git a/pkg/scheduler/algorithm/priorities/even_pods_spread.go b/pkg/scheduler/algorithm/priorities/even_pods_spread.go index 37cc073a158..f1f43f8fcb0 100644 --- a/pkg/scheduler/algorithm/priorities/even_pods_spread.go +++ b/pkg/scheduler/algorithm/priorities/even_pods_spread.go @@ -176,7 +176,7 @@ func CalculateEvenPodsSpreadPriority(pod *v1.Pod, nodeNameToInfo map[string]*sch // debugging purpose: print the value for each node // score must be pointer here, otherwise it's always 0 if klog.V(10) { - defer func(score *int, nodeName string) { + defer func(score *int64, nodeName string) { klog.Infof("%v -> %v: EvenPodsSpreadPriority, Score: (%d)", pod.Name, nodeName, *score) }(&result[i].Score, node.Name) } @@ -190,7 +190,7 @@ func CalculateEvenPodsSpreadPriority(pod *v1.Pod, nodeNameToInfo map[string]*sch continue } fScore := float64(schedulerapi.MaxPriority) * (float64(total-t.nodeNameToPodCounts[node.Name]) / float64(maxMinDiff)) - result[i].Score = int(fScore) + result[i].Score = int64(fScore) } return result, nil diff --git a/pkg/scheduler/algorithm/priorities/image_locality.go b/pkg/scheduler/algorithm/priorities/image_locality.go index cc1db725ad9..4785c993727 100644 --- a/pkg/scheduler/algorithm/priorities/image_locality.go +++ b/pkg/scheduler/algorithm/priorities/image_locality.go @@ -55,7 +55,7 @@ func ImageLocalityPriorityMap(pod *v1.Pod, meta interface{}, nodeInfo *scheduler return schedulerapi.HostPriority{ Host: node.Name, - Score: score, + Score: int64(score), }, nil } diff --git a/pkg/scheduler/algorithm/priorities/interpod_affinity.go b/pkg/scheduler/algorithm/priorities/interpod_affinity.go index f720320a710..50737ed029a 100644 --- a/pkg/scheduler/algorithm/priorities/interpod_affinity.go +++ b/pkg/scheduler/algorithm/priorities/interpod_affinity.go @@ -236,7 +236,7 @@ func (ipa *InterPodAffinity) CalculateInterPodAffinityPriority(pod *v1.Pod, node if maxMinDiff > 0 && pm.counts[node.Name] != nil { fScore = float64(schedulerapi.MaxPriority) * (float64(*pm.counts[node.Name]-minCount) / float64(maxCount-minCount)) } - result = append(result, schedulerapi.HostPriority{Host: node.Name, Score: int(fScore)}) + result = append(result, schedulerapi.HostPriority{Host: node.Name, Score: int64(fScore)}) if klog.V(10) { klog.Infof("%v -> %v: InterPodAffinityPriority, Score: (%d)", pod.Name, node.Name, int(fScore)) } diff --git a/pkg/scheduler/algorithm/priorities/node_affinity.go b/pkg/scheduler/algorithm/priorities/node_affinity.go index 870649d72fb..39774959b67 100644 --- a/pkg/scheduler/algorithm/priorities/node_affinity.go +++ b/pkg/scheduler/algorithm/priorities/node_affinity.go @@ -69,7 +69,7 @@ func CalculateNodeAffinityPriorityMap(pod *v1.Pod, meta interface{}, nodeInfo *s return schedulerapi.HostPriority{ Host: node.Name, - Score: int(count), + Score: int64(count), }, nil } diff --git a/pkg/scheduler/algorithm/priorities/node_label.go b/pkg/scheduler/algorithm/priorities/node_label.go index 2cedd13142b..7c838740a94 100644 --- a/pkg/scheduler/algorithm/priorities/node_label.go +++ b/pkg/scheduler/algorithm/priorities/node_label.go @@ -56,6 +56,6 @@ func (n *NodeLabelPrioritizer) CalculateNodeLabelPriorityMap(pod *v1.Pod, meta i } return schedulerapi.HostPriority{ Host: node.Name, - Score: score, + Score: int64(score), }, nil } diff --git a/pkg/scheduler/algorithm/priorities/reduce.go b/pkg/scheduler/algorithm/priorities/reduce.go index 416724cbea5..07d5bd07b50 100644 --- a/pkg/scheduler/algorithm/priorities/reduce.go +++ b/pkg/scheduler/algorithm/priorities/reduce.go @@ -25,14 +25,14 @@ import ( // NormalizeReduce generates a PriorityReduceFunction that can normalize the result // scores to [0, maxPriority]. If reverse is set to true, it reverses the scores by // subtracting it from maxPriority. -func NormalizeReduce(maxPriority int, reverse bool) PriorityReduceFunction { +func NormalizeReduce(maxPriority int64, reverse bool) PriorityReduceFunction { return func( _ *v1.Pod, _ interface{}, _ map[string]*schedulernodeinfo.NodeInfo, result schedulerapi.HostPriorityList) error { - var maxCount int + var maxCount int64 for i := range result { if result[i].Score > maxCount { maxCount = result[i].Score diff --git a/pkg/scheduler/algorithm/priorities/resource_allocation.go b/pkg/scheduler/algorithm/priorities/resource_allocation.go index c6563a5dac6..408b4d2b718 100644 --- a/pkg/scheduler/algorithm/priorities/resource_allocation.go +++ b/pkg/scheduler/algorithm/priorities/resource_allocation.go @@ -92,7 +92,7 @@ func (r *ResourceAllocationPriority) PriorityMap( return schedulerapi.HostPriority{ Host: node.Name, - Score: int(score), + Score: score, }, nil } diff --git a/pkg/scheduler/algorithm/priorities/resource_limits.go b/pkg/scheduler/algorithm/priorities/resource_limits.go index 994f4be0c2b..28c7b651385 100644 --- a/pkg/scheduler/algorithm/priorities/resource_limits.go +++ b/pkg/scheduler/algorithm/priorities/resource_limits.go @@ -54,7 +54,7 @@ func ResourceLimitsPriorityMap(pod *v1.Pod, meta interface{}, nodeInfo *schedule cpuScore := computeScore(podLimits.MilliCPU, allocatableResources.MilliCPU) memScore := computeScore(podLimits.Memory, allocatableResources.Memory) - score := int(0) + score := int64(0) if cpuScore == 1 || memScore == 1 { score = 1 } diff --git a/pkg/scheduler/algorithm/priorities/selector_spreading.go b/pkg/scheduler/algorithm/priorities/selector_spreading.go index 3faba83810e..00b0e9730fd 100644 --- a/pkg/scheduler/algorithm/priorities/selector_spreading.go +++ b/pkg/scheduler/algorithm/priorities/selector_spreading.go @@ -80,7 +80,7 @@ func (s *SelectorSpread) CalculateSpreadPriorityMap(pod *v1.Pod, meta interface{ if len(selectors) == 0 { return schedulerapi.HostPriority{ Host: node.Name, - Score: int(0), + Score: 0, }, nil } @@ -88,7 +88,7 @@ func (s *SelectorSpread) CalculateSpreadPriorityMap(pod *v1.Pod, meta interface{ return schedulerapi.HostPriority{ Host: node.Name, - Score: count, + Score: int64(count), }, nil } @@ -97,9 +97,9 @@ func (s *SelectorSpread) CalculateSpreadPriorityMap(pod *v1.Pod, meta interface{ // where zone information is included on the nodes, it favors nodes // in zones with fewer existing matching pods. func (s *SelectorSpread) CalculateSpreadPriorityReduce(pod *v1.Pod, meta interface{}, nodeNameToInfo map[string]*schedulernodeinfo.NodeInfo, result schedulerapi.HostPriorityList) error { - countsByZone := make(map[string]int, 10) - maxCountByZone := int(0) - maxCountByNodeName := int(0) + countsByZone := make(map[string]int64, 10) + maxCountByZone := int64(0) + maxCountByNodeName := int64(0) for i := range result { if result[i].Score > maxCountByNodeName { @@ -141,10 +141,10 @@ func (s *SelectorSpread) CalculateSpreadPriorityReduce(pod *v1.Pod, meta interfa fScore = (fScore * (1.0 - zoneWeighting)) + (zoneWeighting * zoneScore) } } - result[i].Score = int(fScore) + result[i].Score = int64(fScore) if klog.V(10) { klog.Infof( - "%v -> %v: SelectorSpreadPriority, Score: (%d)", pod.Name, result[i].Host, int(fScore), + "%v -> %v: SelectorSpreadPriority, Score: (%d)", pod.Name, result[i].Host, int64(fScore), ) } } @@ -232,16 +232,16 @@ func (s *ServiceAntiAffinity) CalculateAntiAffinityPriorityMap(pod *v1.Pod, meta return schedulerapi.HostPriority{ Host: node.Name, - Score: score, + Score: int64(score), }, nil } // CalculateAntiAffinityPriorityReduce computes each node score with the same value for a particular label. // The label to be considered is provided to the struct (ServiceAntiAffinity). func (s *ServiceAntiAffinity) CalculateAntiAffinityPriorityReduce(pod *v1.Pod, meta interface{}, nodeNameToInfo map[string]*schedulernodeinfo.NodeInfo, result schedulerapi.HostPriorityList) error { - var numServicePods int + var numServicePods int64 var label string - podCounts := map[string]int{} + podCounts := map[string]int64{} labelNodesStatus := map[string]string{} maxPriorityFloat64 := float64(schedulerapi.MaxPriority) @@ -261,7 +261,7 @@ func (s *ServiceAntiAffinity) CalculateAntiAffinityPriorityReduce(pod *v1.Pod, m label, ok := labelNodesStatus[hostPriority.Host] if !ok { result[i].Host = hostPriority.Host - result[i].Score = int(0) + result[i].Score = 0 continue } // initializing to the default/max node score of maxPriority @@ -270,7 +270,7 @@ func (s *ServiceAntiAffinity) CalculateAntiAffinityPriorityReduce(pod *v1.Pod, m fScore = maxPriorityFloat64 * (float64(numServicePods-podCounts[label]) / float64(numServicePods)) } result[i].Host = hostPriority.Host - result[i].Score = int(fScore) + result[i].Score = int64(fScore) } return nil diff --git a/pkg/scheduler/algorithm/priorities/taint_toleration.go b/pkg/scheduler/algorithm/priorities/taint_toleration.go index 85be011cabe..c3a52739d01 100644 --- a/pkg/scheduler/algorithm/priorities/taint_toleration.go +++ b/pkg/scheduler/algorithm/priorities/taint_toleration.go @@ -68,7 +68,7 @@ func ComputeTaintTolerationPriorityMap(pod *v1.Pod, meta interface{}, nodeInfo * return schedulerapi.HostPriority{ Host: node.Name, - Score: countIntolerableTaintsPreferNoSchedule(node.Spec.Taints, tolerationsPreferNoSchedule), + Score: int64(countIntolerableTaintsPreferNoSchedule(node.Spec.Taints, tolerationsPreferNoSchedule)), }, nil } diff --git a/pkg/scheduler/algorithm/priorities/types.go b/pkg/scheduler/algorithm/priorities/types.go index 6c98a780aee..fb689e48ea2 100644 --- a/pkg/scheduler/algorithm/priorities/types.go +++ b/pkg/scheduler/algorithm/priorities/types.go @@ -50,7 +50,7 @@ type PriorityConfig struct { // TODO: Remove it after migrating all functions to // Map-Reduce pattern. Function PriorityFunction - Weight int + Weight int64 } // EmptyPriorityMetadataProducer returns a no-op PriorityMetadataProducer type. diff --git a/pkg/scheduler/algorithm/scheduler_interface.go b/pkg/scheduler/algorithm/scheduler_interface.go index 81dedd42928..43c64758c39 100644 --- a/pkg/scheduler/algorithm/scheduler_interface.go +++ b/pkg/scheduler/algorithm/scheduler_interface.go @@ -39,7 +39,7 @@ type SchedulerExtender interface { // Prioritize based on extender-implemented priority functions. The returned scores & weight // are used to compute the weighted score for an extender. The weighted scores are added to // the scores computed by Kubernetes scheduler. The total scores are used to do the host selection. - Prioritize(pod *v1.Pod, nodes []*v1.Node) (hostPriorities *schedulerapi.HostPriorityList, weight int, err error) + Prioritize(pod *v1.Pod, nodes []*v1.Node) (hostPriorities *schedulerapi.HostPriorityList, weight int64, err error) // Bind delegates the action of binding a pod to a node to the extender. Bind(binding *v1.Binding) error diff --git a/pkg/scheduler/api/types.go b/pkg/scheduler/api/types.go index eb76ea39839..98e51512ed4 100644 --- a/pkg/scheduler/api/types.go +++ b/pkg/scheduler/api/types.go @@ -17,6 +17,7 @@ limitations under the License. package api import ( + "math" "time" v1 "k8s.io/api/core/v1" @@ -30,11 +31,11 @@ const ( // MaxInt defines the max signed int value. MaxInt = int(MaxUint >> 1) // MaxTotalPriority defines the max total priority value. - MaxTotalPriority = MaxInt + MaxTotalPriority = int64(math.MaxInt64) // MaxPriority defines the max priority value. MaxPriority = 10 // MaxWeight defines the max weight value. - MaxWeight = MaxInt / MaxPriority + MaxWeight = int64(math.MaxInt64 / MaxPriority) // DefaultPercentageOfNodesToScore defines the percentage of nodes of all nodes // that once found feasible, the scheduler stops looking for more nodes. DefaultPercentageOfNodesToScore = 50 @@ -86,7 +87,7 @@ type PriorityPolicy struct { Name string // The numeric multiplier for the node scores that the priority function generates // The weight should be a positive integer - Weight int + Weight int64 // Holds the parameters to configure the given priority function Argument *PriorityArgument } @@ -157,9 +158,9 @@ type RequestedToCapacityRatioArguments struct { // UtilizationShapePoint represents single point of priority function shape type UtilizationShapePoint struct { // Utilization (x axis). Valid values are 0 to 100. Fully utilized node maps to 100. - Utilization int + Utilization int32 // Score assigned to given utilization (y axis). Valid values are 0 to 10. - Score int + Score int32 } // ResourceSpec represents single resource for bin packing of priority RequestedToCapacityRatioArguments. @@ -167,7 +168,7 @@ type ResourceSpec struct { // Name of the resource to be managed by RequestedToCapacityRatio function. Name v1.ResourceName // Weight of the resource. - Weight int + Weight int64 } // ExtenderManagedResource describes the arguments of extended resources @@ -220,7 +221,7 @@ type ExtenderConfig struct { PrioritizeVerb string // The numeric multiplier for the node scores that the prioritize call generates. // The weight should be a positive integer - Weight int + Weight int64 // Verb for the bind call, empty if not supported. This verb is appended to the URLPrefix when issuing the bind call to extender. // If this method is implemented by the extender, it is the extender's responsibility to bind the pod to apiserver. Only one extender // can implement this function. @@ -271,7 +272,7 @@ type ExtenderPreemptionArgs struct { // numPDBViolations: the count of violations of PodDisruptionBudget type Victims struct { Pods []*v1.Pod - NumPDBViolations int + NumPDBViolations int64 } // MetaPod represent identifier for a v1.Pod @@ -285,7 +286,7 @@ type MetaPod struct { // numPDBViolations: the count of violations of PodDisruptionBudget type MetaVictims struct { Pods []*MetaPod - NumPDBViolations int + NumPDBViolations int64 } // ExtenderArgs represents the arguments needed by the extender to filter/prioritize @@ -341,7 +342,7 @@ type HostPriority struct { // Name of the host Host string // Score associated with the host - Score int + Score int64 } // HostPriorityList declares a []HostPriority type. diff --git a/pkg/scheduler/api/v1/types.go b/pkg/scheduler/api/v1/types.go index f7a67a11067..85c88bc3101 100644 --- a/pkg/scheduler/api/v1/types.go +++ b/pkg/scheduler/api/v1/types.go @@ -39,7 +39,7 @@ type Policy struct { // RequiredDuringScheduling affinity is not symmetric, but there is an implicit PreferredDuringScheduling affinity rule // corresponding to every RequiredDuringScheduling affinity rule. // HardPodAffinitySymmetricWeight represents the weight of implicit PreferredDuringScheduling affinity rule, in the range 1-100. - HardPodAffinitySymmetricWeight int `json:"hardPodAffinitySymmetricWeight"` + HardPodAffinitySymmetricWeight int32 `json:"hardPodAffinitySymmetricWeight"` // When AlwaysCheckAllPredicates is set to true, scheduler checks all // the configured predicates even after one or more of them fails. @@ -66,7 +66,7 @@ type PriorityPolicy struct { Name string `json:"name"` // The numeric multiplier for the node scores that the priority function generates // The weight should be non-zero and can be a positive or a negative integer - Weight int `json:"weight"` + Weight int64 `json:"weight"` // Holds the parameters to configure the given priority function Argument *PriorityArgument `json:"argument"` } @@ -137,9 +137,9 @@ type RequestedToCapacityRatioArguments struct { // UtilizationShapePoint represents single point of priority function shape. type UtilizationShapePoint struct { // Utilization (x axis). Valid values are 0 to 100. Fully utilized node maps to 100. - Utilization int `json:"utilization"` + Utilization int32 `json:"utilization"` // Score assigned to given utilization (y axis). Valid values are 0 to 10. - Score int `json:"score"` + Score int32 `json:"score"` } // ResourceSpec represents single resource and weight for bin packing of priority RequestedToCapacityRatioArguments. @@ -147,7 +147,7 @@ type ResourceSpec struct { // Name of the resource to be managed by RequestedToCapacityRatio function. Name apiv1.ResourceName `json:"name,casttype=ResourceName"` // Weight of the resource. - Weight int `json:"weight,omitempty"` + Weight int64 `json:"weight,omitempty"` } // ExtenderManagedResource describes the arguments of extended resources @@ -200,7 +200,7 @@ type ExtenderConfig struct { PrioritizeVerb string `json:"prioritizeVerb,omitempty"` // The numeric multiplier for the node scores that the prioritize call generates. // The weight should be a positive integer - Weight int `json:"weight,omitempty"` + Weight int64 `json:"weight,omitempty"` // Verb for the bind call, empty if not supported. This verb is appended to the URLPrefix when issuing the bind call to extender. // If this method is implemented by the extender, it is the extender's responsibility to bind the pod to apiserver. Only one extender // can implement this function. @@ -276,7 +276,7 @@ type ExtenderPreemptionArgs struct { // numPDBViolations: the count of violations of PodDisruptionBudget type Victims struct { Pods []*apiv1.Pod `json:"pods"` - NumPDBViolations int `json:"numPDBViolations"` + NumPDBViolations int64 `json:"numPDBViolations"` } // MetaPod represent identifier for a v1.Pod @@ -290,7 +290,7 @@ type MetaPod struct { // numPDBViolations: the count of violations of PodDisruptionBudget type MetaVictims struct { Pods []*MetaPod `json:"pods"` - NumPDBViolations int `json:"numPDBViolations"` + NumPDBViolations int64 `json:"numPDBViolations"` } // FailedNodesMap represents the filtered out nodes, with node names and failure messages @@ -333,7 +333,7 @@ type HostPriority struct { // Name of the host Host string `json:"host"` // Score associated with the host - Score int `json:"score"` + Score int64 `json:"score"` } // HostPriorityList declares a []HostPriority type. diff --git a/pkg/scheduler/core/extender.go b/pkg/scheduler/core/extender.go index b5a478a6caa..0b21d732ffa 100644 --- a/pkg/scheduler/core/extender.go +++ b/pkg/scheduler/core/extender.go @@ -45,7 +45,7 @@ type HTTPExtender struct { filterVerb string prioritizeVerb string bindVerb string - weight int + weight int64 client *http.Client nodeCacheCapable bool managedResources sets.String @@ -321,7 +321,7 @@ func (h *HTTPExtender) Filter( // Prioritize based on extender implemented priority functions. Weight*priority is added // up for each such priority function. The returned score is added to the score computed // by Kubernetes scheduler. The total score is used to do the host selection. -func (h *HTTPExtender) Prioritize(pod *v1.Pod, nodes []*v1.Node) (*schedulerapi.HostPriorityList, int, error) { +func (h *HTTPExtender) Prioritize(pod *v1.Pod, nodes []*v1.Node) (*schedulerapi.HostPriorityList, int64, error) { var ( result schedulerapi.HostPriorityList nodeList *v1.NodeList diff --git a/pkg/scheduler/core/extender_test.go b/pkg/scheduler/core/extender_test.go index 4c52350a815..9f7e7b7a80e 100644 --- a/pkg/scheduler/core/extender_test.go +++ b/pkg/scheduler/core/extender_test.go @@ -44,7 +44,7 @@ type priorityFunc func(pod *v1.Pod, nodes []*v1.Node) (*schedulerapi.HostPriorit type priorityConfig struct { function priorityFunc - weight int + weight int64 } func errorPredicateExtender(pod *v1.Pod, node *v1.Node) (bool, error) { @@ -84,7 +84,7 @@ func machine1PrioritizerExtender(pod *v1.Pod, nodes []*v1.Node) (*schedulerapi.H if node.Name == "machine1" { score = 10 } - result = append(result, schedulerapi.HostPriority{Host: node.Name, Score: score}) + result = append(result, schedulerapi.HostPriority{Host: node.Name, Score: int64(score)}) } return &result, nil } @@ -96,7 +96,7 @@ func machine2PrioritizerExtender(pod *v1.Pod, nodes []*v1.Node) (*schedulerapi.H if node.Name == "machine2" { score = 10 } - result = append(result, schedulerapi.HostPriority{Host: node.Name, Score: score}) + result = append(result, schedulerapi.HostPriority{Host: node.Name, Score: int64(score)}) } return &result, nil } @@ -108,7 +108,7 @@ func machine2Prioritizer(_ *v1.Pod, nodeNameToInfo map[string]*schedulernodeinfo if node.Name == "machine2" { score = 10 } - result = append(result, schedulerapi.HostPriority{Host: node.Name, Score: score}) + result = append(result, schedulerapi.HostPriority{Host: node.Name, Score: int64(score)}) } return result, nil } @@ -116,7 +116,7 @@ func machine2Prioritizer(_ *v1.Pod, nodeNameToInfo map[string]*schedulernodeinfo type FakeExtender struct { predicates []fitPredicate prioritizers []priorityConfig - weight int + weight int64 nodeCacheCapable bool filteredNodes []*v1.Node unInterested bool @@ -167,7 +167,7 @@ func (f *FakeExtender) ProcessPreemption( } else { // Append new victims to original victims nodeToVictimsCopy[node].Pods = append(victims.Pods, extenderVictimPods...) - nodeToVictimsCopy[node].NumPDBViolations = victims.NumPDBViolations + extendernPDBViolations + nodeToVictimsCopy[node].NumPDBViolations = victims.NumPDBViolations + int64(extendernPDBViolations) } } return nodeToVictimsCopy, nil @@ -292,9 +292,9 @@ func (f *FakeExtender) Filter(pod *v1.Pod, nodes []*v1.Node, nodeNameToInfo map[ return filtered, failedNodesMap, nil } -func (f *FakeExtender) Prioritize(pod *v1.Pod, nodes []*v1.Node) (*schedulerapi.HostPriorityList, int, error) { +func (f *FakeExtender) Prioritize(pod *v1.Pod, nodes []*v1.Node) (*schedulerapi.HostPriorityList, int64, error) { result := schedulerapi.HostPriorityList{} - combinedScores := map[string]int{} + combinedScores := map[string]int64{} for _, prioritizer := range f.prioritizers { weight := prioritizer.weight if weight == 0 { diff --git a/pkg/scheduler/core/generic_scheduler.go b/pkg/scheduler/core/generic_scheduler.go index 070fcad461c..d9399609f61 100644 --- a/pkg/scheduler/core/generic_scheduler.go +++ b/pkg/scheduler/core/generic_scheduler.go @@ -799,12 +799,12 @@ func PrioritizeNodes( } for j := range scoresMap { - result[i].Score += scoresMap[j][i].Score + result[i].Score += int64(scoresMap[j][i].Score) } } if len(extenders) != 0 && nodes != nil { - combinedScores := make(map[string]int, len(nodeNameToInfo)) + combinedScores := make(map[string]int64, len(nodeNameToInfo)) for i := range extenders { if !extenders[i].IsInterested(pod) { continue @@ -870,7 +870,7 @@ func pickOneNodeForPreemption(nodesToVictims map[*v1.Node]*schedulerapi.Victims) if len(nodesToVictims) == 0 { return nil } - minNumPDBViolatingPods := math.MaxInt32 + minNumPDBViolatingPods := int64(math.MaxInt32) var minNodes1 []*v1.Node lenNodes1 := 0 for node, victims := range nodesToVictims { @@ -1021,7 +1021,7 @@ func (g *genericScheduler) selectNodesForPreemption( resultLock.Lock() victims := schedulerapi.Victims{ Pods: pods, - NumPDBViolations: numPDBViolations, + NumPDBViolations: int64(numPDBViolations), } nodeToVictims[potentialNodes[i]] = &victims resultLock.Unlock() diff --git a/pkg/scheduler/core/generic_scheduler_test.go b/pkg/scheduler/core/generic_scheduler_test.go index a9851679f25..b610be762ca 100644 --- a/pkg/scheduler/core/generic_scheduler_test.go +++ b/pkg/scheduler/core/generic_scheduler_test.go @@ -88,7 +88,7 @@ func numericPriority(pod *v1.Pod, nodeNameToInfo map[string]*schedulernodeinfo.N } result = append(result, schedulerapi.HostPriority{ Host: node.Name, - Score: score, + Score: int64(score), }) } return result, nil @@ -110,7 +110,7 @@ func reverseNumericPriority(pod *v1.Pod, nodeNameToInfo map[string]*schedulernod for _, hostPriority := range result { reverseResult = append(reverseResult, schedulerapi.HostPriority{ Host: hostPriority.Host, - Score: int(maxScore + minScore - float64(hostPriority.Score)), + Score: int64(maxScore + minScore - float64(hostPriority.Score)), }) } @@ -932,7 +932,7 @@ func TestZeroRequest(t *testing.T) { pods []*v1.Pod nodes []*v1.Node name string - expectedScore int + expectedScore int64 }{ // The point of these next two tests is to show you get the same priority for a zero-request pod // as for a pod with the defaults requests, both when the zero-request pod is already on the machine diff --git a/pkg/scheduler/factory/factory_test.go b/pkg/scheduler/factory/factory_test.go index 21e2efd391f..3f152a27c9c 100644 --- a/pkg/scheduler/factory/factory_test.go +++ b/pkg/scheduler/factory/factory_test.go @@ -539,7 +539,7 @@ func (f *fakeExtender) Filter( func (f *fakeExtender) Prioritize( pod *v1.Pod, nodes []*v1.Node, -) (hostPriorities *schedulerapi.HostPriorityList, weight int, err error) { +) (hostPriorities *schedulerapi.HostPriorityList, weight int64, err error) { return nil, 0, nil } diff --git a/pkg/scheduler/factory/plugins.go b/pkg/scheduler/factory/plugins.go index e7f2e7be1cf..c829b62eee2 100644 --- a/pkg/scheduler/factory/plugins.go +++ b/pkg/scheduler/factory/plugins.go @@ -74,7 +74,7 @@ type PriorityFunctionFactory2 func(PluginFactoryArgs) (priorities.PriorityMapFun type PriorityConfigFactory struct { Function PriorityFunctionFactory MapReduceFunction PriorityFunctionFactory2 - Weight int + Weight int64 } var ( @@ -329,7 +329,7 @@ func RegisterPriorityFunction(name string, function priorities.PriorityFunction, Function: func(PluginFactoryArgs) priorities.PriorityFunction { return function }, - Weight: weight, + Weight: int64(weight), }) } @@ -344,7 +344,7 @@ func RegisterPriorityMapReduceFunction( MapReduceFunction: func(PluginFactoryArgs) (priorities.PriorityMapFunction, priorities.PriorityReduceFunction) { return mapFunction, reduceFunction }, - Weight: weight, + Weight: int64(weight), }) } @@ -549,7 +549,7 @@ func getPriorityFunctionConfigs(names sets.String, args PluginFactoryArgs) ([]pr // validateSelectedConfigs validates the config weights to avoid the overflow. func validateSelectedConfigs(configs []priorities.PriorityConfig) error { - var totalPriority int + var totalPriority int64 for _, config := range configs { // Checks totalPriority against MaxTotalPriority to avoid overflow if config.Weight*schedulerapi.MaxPriority > schedulerapi.MaxTotalPriority-totalPriority { diff --git a/pkg/scheduler/factory/plugins_test.go b/pkg/scheduler/factory/plugins_test.go index 9ddc1237062..4dd3812fc87 100644 --- a/pkg/scheduler/factory/plugins_test.go +++ b/pkg/scheduler/factory/plugins_test.go @@ -17,6 +17,7 @@ limitations under the License. package factory import ( + "math" "testing" "github.com/stretchr/testify/assert" @@ -58,13 +59,13 @@ func TestValidatePriorityConfigOverFlow(t *testing.T) { expected bool }{ { - description: "one of the weights is MaxInt", - configs: []priorities.PriorityConfig{{Weight: api.MaxInt}, {Weight: 5}}, + description: "one of the weights is MaxInt64", + configs: []priorities.PriorityConfig{{Weight: math.MaxInt64}, {Weight: 5}}, expected: true, }, { description: "after multiplication with MaxPriority the weight is larger than MaxWeight", - configs: []priorities.PriorityConfig{{Weight: api.MaxInt/api.MaxPriority + api.MaxPriority}, {Weight: 5}}, + configs: []priorities.PriorityConfig{{Weight: math.MaxInt64/api.MaxPriority + api.MaxPriority}, {Weight: 5}}, expected: true, }, { diff --git a/test/integration/scheduler/extender_test.go b/test/integration/scheduler/extender_test.go index 082d940359f..3ed2a24a15c 100644 --- a/test/integration/scheduler/extender_test.go +++ b/test/integration/scheduler/extender_test.go @@ -49,7 +49,7 @@ type priorityFunc func(pod *v1.Pod, nodes *v1.NodeList) (*schedulerapi.HostPrior type priorityConfig struct { function priorityFunc - weight int + weight int64 } type Extender struct { @@ -191,7 +191,7 @@ func (e *Extender) Filter(args *schedulerapi.ExtenderArgs) (*schedulerapi.Extend func (e *Extender) Prioritize(args *schedulerapi.ExtenderArgs) (*schedulerapi.HostPriorityList, error) { result := schedulerapi.HostPriorityList{} - combinedScores := map[string]int{} + combinedScores := map[string]int64{} var nodes = &v1.NodeList{Items: []v1.Node{}} if e.nodeCacheCapable { @@ -257,7 +257,7 @@ func machine2Prioritizer(pod *v1.Pod, nodes *v1.NodeList) (*schedulerapi.HostPri } result = append(result, schedulerapi.HostPriority{ Host: node.Name, - Score: score, + Score: int64(score), }) } return &result, nil @@ -272,7 +272,7 @@ func machine3Prioritizer(pod *v1.Pod, nodes *v1.NodeList) (*schedulerapi.HostPri } result = append(result, schedulerapi.HostPriority{ Host: node.Name, - Score: score, + Score: int64(score), }) } return &result, nil