feat(scheduler): remove MaxPriority in the scheduler api

This commit is contained in:
draveness 2019-10-02 09:27:47 +08:00
parent 1fe922efcc
commit 9769d49bb5
32 changed files with 166 additions and 214 deletions

View File

@ -36,7 +36,6 @@ go_library(
"//pkg/scheduler/algorithm:go_default_library",
"//pkg/scheduler/algorithm/predicates:go_default_library",
"//pkg/scheduler/algorithm/priorities/util:go_default_library",
"//pkg/scheduler/api:go_default_library",
"//pkg/scheduler/framework/v1alpha1:go_default_library",
"//pkg/scheduler/nodeinfo:go_default_library",
"//pkg/scheduler/util:go_default_library",
@ -77,7 +76,6 @@ go_test(
"//pkg/features:go_default_library",
"//pkg/scheduler/algorithm:go_default_library",
"//pkg/scheduler/algorithm/priorities/util:go_default_library",
"//pkg/scheduler/api:go_default_library",
"//pkg/scheduler/framework/v1alpha1:go_default_library",
"//pkg/scheduler/nodeinfo:go_default_library",
"//pkg/scheduler/testing:go_default_library",

View File

@ -22,7 +22,7 @@ import (
v1 "k8s.io/api/core/v1"
utilfeature "k8s.io/apiserver/pkg/util/feature"
"k8s.io/kubernetes/pkg/features"
schedulerapi "k8s.io/kubernetes/pkg/scheduler/api"
framework "k8s.io/kubernetes/pkg/scheduler/framework/v1alpha1"
)
var (
@ -60,7 +60,7 @@ func balancedResourceScorer(requested, allocable ResourceToValueMap, includeVolu
// Since the variance is between positive fractions, it will be positive fraction. 1-variance lets the
// score to be higher for node which has least variance and multiplying it with 10 provides the scaling
// factor needed.
return int64((1 - variance) * float64(schedulerapi.MaxPriority))
return int64((1 - variance) * float64(framework.MaxNodeScore))
}
// Upper and lower boundary of difference between cpuFraction and memoryFraction are -1 and 1
@ -68,7 +68,7 @@ func balancedResourceScorer(requested, allocable ResourceToValueMap, includeVolu
// 0-10 with 0 representing well balanced allocation and 10 poorly balanced. Subtracting it from
// 10 leads to the score which also scales from 0 to 10 while 10 representing well balanced.
diff := math.Abs(cpuFraction - memoryFraction)
return int64((1 - diff) * float64(schedulerapi.MaxPriority))
return int64((1 - diff) * float64(framework.MaxNodeScore))
}
func fractionOfCapacity(requested, capacity int64) float64 {

View File

@ -26,7 +26,6 @@ import (
utilfeature "k8s.io/apiserver/pkg/util/feature"
featuregatetesting "k8s.io/component-base/featuregate/testing"
"k8s.io/kubernetes/pkg/features"
schedulerapi "k8s.io/kubernetes/pkg/scheduler/api"
framework "k8s.io/kubernetes/pkg/scheduler/framework/v1alpha1"
schedulernodeinfo "k8s.io/kubernetes/pkg/scheduler/nodeinfo"
)
@ -233,7 +232,7 @@ func TestBalancedResourceAllocation(t *testing.T) {
*/
pod: &v1.Pod{Spec: noResources},
nodes: []*v1.Node{makeNode("machine1", 4000, 10000), makeNode("machine2", 4000, 10000)},
expectedList: []framework.NodeScore{{Name: "machine1", Score: schedulerapi.MaxPriority}, {Name: "machine2", Score: schedulerapi.MaxPriority}},
expectedList: []framework.NodeScore{{Name: "machine1", Score: framework.MaxNodeScore}, {Name: "machine2", Score: framework.MaxNodeScore}},
name: "nothing scheduled, nothing requested",
},
{
@ -250,7 +249,7 @@ func TestBalancedResourceAllocation(t *testing.T) {
*/
pod: &v1.Pod{Spec: cpuAndMemory},
nodes: []*v1.Node{makeNode("machine1", 4000, 10000), makeNode("machine2", 6000, 10000)},
expectedList: []framework.NodeScore{{Name: "machine1", Score: 7}, {Name: "machine2", Score: schedulerapi.MaxPriority}},
expectedList: []framework.NodeScore{{Name: "machine1", Score: 7}, {Name: "machine2", Score: framework.MaxNodeScore}},
name: "nothing scheduled, resources requested, differently sized machines",
},
{
@ -267,7 +266,7 @@ func TestBalancedResourceAllocation(t *testing.T) {
*/
pod: &v1.Pod{Spec: noResources},
nodes: []*v1.Node{makeNode("machine1", 4000, 10000), makeNode("machine2", 4000, 10000)},
expectedList: []framework.NodeScore{{Name: "machine1", Score: schedulerapi.MaxPriority}, {Name: "machine2", Score: schedulerapi.MaxPriority}},
expectedList: []framework.NodeScore{{Name: "machine1", Score: framework.MaxNodeScore}, {Name: "machine2", Score: framework.MaxNodeScore}},
name: "no resources requested, pods scheduled",
pods: []*v1.Pod{
{Spec: machine1Spec, ObjectMeta: metav1.ObjectMeta{Labels: labels2}},

View File

@ -21,10 +21,9 @@ import (
"math"
"sync/atomic"
"k8s.io/api/core/v1"
v1 "k8s.io/api/core/v1"
"k8s.io/client-go/util/workqueue"
"k8s.io/kubernetes/pkg/scheduler/algorithm/predicates"
schedulerapi "k8s.io/kubernetes/pkg/scheduler/api"
framework "k8s.io/kubernetes/pkg/scheduler/framework/v1alpha1"
schedulernodeinfo "k8s.io/kubernetes/pkg/scheduler/nodeinfo"
schedutil "k8s.io/kubernetes/pkg/scheduler/util"
@ -187,10 +186,10 @@ func CalculateEvenPodsSpreadPriority(pod *v1.Pod, nodeNameToInfo map[string]*sch
continue
}
if maxMinDiff == 0 {
result[i].Score = schedulerapi.MaxPriority
result[i].Score = framework.MaxNodeScore
continue
}
fScore := float64(schedulerapi.MaxPriority) * (float64(total-t.nodeNameToPodCounts[node.Name]) / float64(maxMinDiff))
fScore := float64(framework.MaxNodeScore) * (float64(total-t.nodeNameToPodCounts[node.Name]) / float64(maxMinDiff))
result[i].Score = int64(fScore)
}

View File

@ -20,8 +20,7 @@ import (
"fmt"
"strings"
"k8s.io/api/core/v1"
schedulerapi "k8s.io/kubernetes/pkg/scheduler/api"
v1 "k8s.io/api/core/v1"
framework "k8s.io/kubernetes/pkg/scheduler/framework/v1alpha1"
schedulernodeinfo "k8s.io/kubernetes/pkg/scheduler/nodeinfo"
"k8s.io/kubernetes/pkg/util/parsers"
@ -69,7 +68,7 @@ func calculatePriority(sumScores int64) int {
sumScores = maxThreshold
}
return int(int64(schedulerapi.MaxPriority) * (sumScores - minThreshold) / (maxThreshold - minThreshold))
return int(int64(framework.MaxNodeScore) * (sumScores - minThreshold) / (maxThreshold - minThreshold))
}
// sumImageScores returns the sum of image scores of all the containers that are already on the node.

View File

@ -22,9 +22,8 @@ import (
"reflect"
"testing"
"k8s.io/api/core/v1"
v1 "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
schedulerapi "k8s.io/kubernetes/pkg/scheduler/api"
framework "k8s.io/kubernetes/pkg/scheduler/framework/v1alpha1"
schedulernodeinfo "k8s.io/kubernetes/pkg/scheduler/nodeinfo"
"k8s.io/kubernetes/pkg/util/parsers"
@ -159,7 +158,7 @@ func TestImageLocalityPriority(t *testing.T) {
// Score: 0 (10M/2 < 23M, min-threshold)
pod: &v1.Pod{Spec: testMinMax},
nodes: []*v1.Node{makeImageNode("machine1", node403002000), makeImageNode("machine2", node25010)},
expectedList: []framework.NodeScore{{Name: "machine1", Score: schedulerapi.MaxPriority}, {Name: "machine2", Score: 0}},
expectedList: []framework.NodeScore{{Name: "machine1", Score: framework.MaxNodeScore}, {Name: "machine2", Score: 0}},
name: "if exceed limit, use limit",
},
{

View File

@ -20,13 +20,12 @@ import (
"context"
"sync/atomic"
"k8s.io/api/core/v1"
v1 "k8s.io/api/core/v1"
apierrors "k8s.io/apimachinery/pkg/api/errors"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/client-go/util/workqueue"
"k8s.io/kubernetes/pkg/scheduler/algorithm/predicates"
priorityutil "k8s.io/kubernetes/pkg/scheduler/algorithm/priorities/util"
schedulerapi "k8s.io/kubernetes/pkg/scheduler/api"
framework "k8s.io/kubernetes/pkg/scheduler/framework/v1alpha1"
schedulernodeinfo "k8s.io/kubernetes/pkg/scheduler/nodeinfo"
schedutil "k8s.io/kubernetes/pkg/scheduler/util"
@ -225,7 +224,7 @@ func (ipa *InterPodAffinity) CalculateInterPodAffinityPriority(pod *v1.Pod, node
for i, node := range nodes {
fScore := float64(0)
if maxMinDiff > 0 {
fScore = float64(schedulerapi.MaxPriority) * (float64(pm.counts[i]-minCount) / float64(maxCount-minCount))
fScore = float64(framework.MaxNodeScore) * (float64(pm.counts[i]-minCount) / float64(maxCount-minCount))
}
result = append(result, framework.NodeScore{Name: node.Name, Score: int64(fScore)})
if klog.V(10) {

View File

@ -21,9 +21,8 @@ import (
"reflect"
"testing"
"k8s.io/api/core/v1"
v1 "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
schedulerapi "k8s.io/kubernetes/pkg/scheduler/api"
framework "k8s.io/kubernetes/pkg/scheduler/framework/v1alpha1"
schedulernodeinfo "k8s.io/kubernetes/pkg/scheduler/nodeinfo"
st "k8s.io/kubernetes/pkg/scheduler/testing"
@ -295,7 +294,7 @@ func TestInterPodAffinityPriority(t *testing.T) {
{ObjectMeta: metav1.ObjectMeta{Name: "machine2", Labels: labelRgIndia}},
{ObjectMeta: metav1.ObjectMeta{Name: "machine3", Labels: labelAzAz1}},
},
expectedList: []framework.NodeScore{{Name: "machine1", Score: schedulerapi.MaxPriority}, {Name: "machine2", Score: 0}, {Name: "machine3", Score: 0}},
expectedList: []framework.NodeScore{{Name: "machine1", Score: framework.MaxNodeScore}, {Name: "machine2", Score: 0}, {Name: "machine3", Score: 0}},
name: "Affinity: pod that matches topology key & pods in nodes will get high score comparing to others" +
"which doesn't match either pods in nodes or in topology key",
},
@ -313,7 +312,7 @@ func TestInterPodAffinityPriority(t *testing.T) {
{ObjectMeta: metav1.ObjectMeta{Name: "machine2", Labels: labelRgChinaAzAz1}},
{ObjectMeta: metav1.ObjectMeta{Name: "machine3", Labels: labelRgIndia}},
},
expectedList: []framework.NodeScore{{Name: "machine1", Score: schedulerapi.MaxPriority}, {Name: "machine2", Score: schedulerapi.MaxPriority}, {Name: "machine3", Score: 0}},
expectedList: []framework.NodeScore{{Name: "machine1", Score: framework.MaxNodeScore}, {Name: "machine2", Score: framework.MaxNodeScore}, {Name: "machine3", Score: 0}},
name: "All the nodes that have the same topology key & label value with one of them has an existing pod that match the affinity rules, have the same score",
},
// there are 2 regions, say regionChina(machine1,machine3,machine4) and regionIndia(machine2,machine5), both regions have nodes that match the preference.
@ -337,7 +336,7 @@ func TestInterPodAffinityPriority(t *testing.T) {
{ObjectMeta: metav1.ObjectMeta{Name: "machine4", Labels: labelRgChina}},
{ObjectMeta: metav1.ObjectMeta{Name: "machine5", Labels: labelRgIndia}},
},
expectedList: []framework.NodeScore{{Name: "machine1", Score: schedulerapi.MaxPriority}, {Name: "machine2", Score: 5}, {Name: "machine3", Score: schedulerapi.MaxPriority}, {Name: "machine4", Score: schedulerapi.MaxPriority}, {Name: "machine5", Score: 5}},
expectedList: []framework.NodeScore{{Name: "machine1", Score: framework.MaxNodeScore}, {Name: "machine2", Score: 5}, {Name: "machine3", Score: framework.MaxNodeScore}, {Name: "machine4", Score: framework.MaxNodeScore}, {Name: "machine5", Score: 5}},
name: "Affinity: nodes in one region has more matching pods comparing to other reqion, so the region which has more macthes will get high score",
},
// Test with the different operators and values for pod affinity scheduling preference, including some match failures.
@ -353,7 +352,7 @@ func TestInterPodAffinityPriority(t *testing.T) {
{ObjectMeta: metav1.ObjectMeta{Name: "machine2", Labels: labelRgIndia}},
{ObjectMeta: metav1.ObjectMeta{Name: "machine3", Labels: labelAzAz1}},
},
expectedList: []framework.NodeScore{{Name: "machine1", Score: 2}, {Name: "machine2", Score: schedulerapi.MaxPriority}, {Name: "machine3", Score: 0}},
expectedList: []framework.NodeScore{{Name: "machine1", Score: 2}, {Name: "machine2", Score: framework.MaxNodeScore}, {Name: "machine3", Score: 0}},
name: "Affinity: different Label operators and values for pod affinity scheduling preference, including some match failures ",
},
// Test the symmetry cases for affinity, the difference between affinity and symmetry is not the pod wants to run together with some existing pods,
@ -369,7 +368,7 @@ func TestInterPodAffinityPriority(t *testing.T) {
{ObjectMeta: metav1.ObjectMeta{Name: "machine2", Labels: labelRgIndia}},
{ObjectMeta: metav1.ObjectMeta{Name: "machine3", Labels: labelAzAz1}},
},
expectedList: []framework.NodeScore{{Name: "machine1", Score: 0}, {Name: "machine2", Score: schedulerapi.MaxPriority}, {Name: "machine3", Score: 0}},
expectedList: []framework.NodeScore{{Name: "machine1", Score: 0}, {Name: "machine2", Score: framework.MaxNodeScore}, {Name: "machine3", Score: 0}},
name: "Affinity symmetry: considred only the preferredDuringSchedulingIgnoredDuringExecution in pod affinity symmetry",
},
{
@ -383,7 +382,7 @@ func TestInterPodAffinityPriority(t *testing.T) {
{ObjectMeta: metav1.ObjectMeta{Name: "machine2", Labels: labelRgIndia}},
{ObjectMeta: metav1.ObjectMeta{Name: "machine3", Labels: labelAzAz1}},
},
expectedList: []framework.NodeScore{{Name: "machine1", Score: schedulerapi.MaxPriority}, {Name: "machine2", Score: schedulerapi.MaxPriority}, {Name: "machine3", Score: 0}},
expectedList: []framework.NodeScore{{Name: "machine1", Score: framework.MaxNodeScore}, {Name: "machine2", Score: framework.MaxNodeScore}, {Name: "machine3", Score: 0}},
name: "Affinity symmetry: considred RequiredDuringSchedulingIgnoredDuringExecution in pod affinity symmetry",
},
@ -403,7 +402,7 @@ func TestInterPodAffinityPriority(t *testing.T) {
{ObjectMeta: metav1.ObjectMeta{Name: "machine1", Labels: labelAzAz1}},
{ObjectMeta: metav1.ObjectMeta{Name: "machine2", Labels: labelRgChina}},
},
expectedList: []framework.NodeScore{{Name: "machine1", Score: 0}, {Name: "machine2", Score: schedulerapi.MaxPriority}},
expectedList: []framework.NodeScore{{Name: "machine1", Score: 0}, {Name: "machine2", Score: framework.MaxNodeScore}},
name: "Anti Affinity: pod that doesnot match existing pods in node will get high score ",
},
{
@ -416,7 +415,7 @@ func TestInterPodAffinityPriority(t *testing.T) {
{ObjectMeta: metav1.ObjectMeta{Name: "machine1", Labels: labelAzAz1}},
{ObjectMeta: metav1.ObjectMeta{Name: "machine2", Labels: labelRgChina}},
},
expectedList: []framework.NodeScore{{Name: "machine1", Score: 0}, {Name: "machine2", Score: schedulerapi.MaxPriority}},
expectedList: []framework.NodeScore{{Name: "machine1", Score: 0}, {Name: "machine2", Score: framework.MaxNodeScore}},
name: "Anti Affinity: pod that does not matches topology key & matches the pods in nodes will get higher score comparing to others ",
},
{
@ -430,7 +429,7 @@ func TestInterPodAffinityPriority(t *testing.T) {
{ObjectMeta: metav1.ObjectMeta{Name: "machine1", Labels: labelAzAz1}},
{ObjectMeta: metav1.ObjectMeta{Name: "machine2", Labels: labelRgIndia}},
},
expectedList: []framework.NodeScore{{Name: "machine1", Score: 0}, {Name: "machine2", Score: schedulerapi.MaxPriority}},
expectedList: []framework.NodeScore{{Name: "machine1", Score: 0}, {Name: "machine2", Score: framework.MaxNodeScore}},
name: "Anti Affinity: one node has more matching pods comparing to other node, so the node which has more unmacthes will get high score",
},
// Test the symmetry cases for anti affinity
@ -444,7 +443,7 @@ func TestInterPodAffinityPriority(t *testing.T) {
{ObjectMeta: metav1.ObjectMeta{Name: "machine1", Labels: labelAzAz1}},
{ObjectMeta: metav1.ObjectMeta{Name: "machine2", Labels: labelAzAz2}},
},
expectedList: []framework.NodeScore{{Name: "machine1", Score: 0}, {Name: "machine2", Score: schedulerapi.MaxPriority}},
expectedList: []framework.NodeScore{{Name: "machine1", Score: 0}, {Name: "machine2", Score: framework.MaxNodeScore}},
name: "Anti Affinity symmetry: the existing pods in node which has anti affinity match will get high score",
},
// Test both affinity and anti-affinity
@ -458,7 +457,7 @@ func TestInterPodAffinityPriority(t *testing.T) {
{ObjectMeta: metav1.ObjectMeta{Name: "machine1", Labels: labelRgChina}},
{ObjectMeta: metav1.ObjectMeta{Name: "machine2", Labels: labelAzAz1}},
},
expectedList: []framework.NodeScore{{Name: "machine1", Score: schedulerapi.MaxPriority}, {Name: "machine2", Score: 0}},
expectedList: []framework.NodeScore{{Name: "machine1", Score: framework.MaxNodeScore}, {Name: "machine2", Score: 0}},
name: "Affinity and Anti Affinity: considered only preferredDuringSchedulingIgnoredDuringExecution in both pod affinity & anti affinity",
},
// Combined cases considering both affinity and anti-affinity, the pod to schedule and existing pods have the same labels (they are in the same RC/service),
@ -483,7 +482,7 @@ func TestInterPodAffinityPriority(t *testing.T) {
{ObjectMeta: metav1.ObjectMeta{Name: "machine4", Labels: labelRgChina}},
{ObjectMeta: metav1.ObjectMeta{Name: "machine5", Labels: labelRgIndia}},
},
expectedList: []framework.NodeScore{{Name: "machine1", Score: schedulerapi.MaxPriority}, {Name: "machine2", Score: 4}, {Name: "machine3", Score: schedulerapi.MaxPriority}, {Name: "machine4", Score: schedulerapi.MaxPriority}, {Name: "machine5", Score: 4}},
expectedList: []framework.NodeScore{{Name: "machine1", Score: framework.MaxNodeScore}, {Name: "machine2", Score: 4}, {Name: "machine3", Score: framework.MaxNodeScore}, {Name: "machine4", Score: framework.MaxNodeScore}, {Name: "machine5", Score: 4}},
name: "Affinity and Anti Affinity: considering both affinity and anti-affinity, the pod to schedule and existing pods have the same labels",
},
// Consider Affinity, Anti Affinity and symmetry together.
@ -505,7 +504,7 @@ func TestInterPodAffinityPriority(t *testing.T) {
{ObjectMeta: metav1.ObjectMeta{Name: "machine3", Labels: labelRgIndia}},
{ObjectMeta: metav1.ObjectMeta{Name: "machine4", Labels: labelAzAz2}},
},
expectedList: []framework.NodeScore{{Name: "machine1", Score: schedulerapi.MaxPriority}, {Name: "machine2", Score: 0}, {Name: "machine3", Score: schedulerapi.MaxPriority}, {Name: "machine4", Score: 0}},
expectedList: []framework.NodeScore{{Name: "machine1", Score: framework.MaxNodeScore}, {Name: "machine2", Score: 0}, {Name: "machine3", Score: framework.MaxNodeScore}, {Name: "machine4", Score: 0}},
name: "Affinity and Anti Affinity and symmetry: considered only preferredDuringSchedulingIgnoredDuringExecution in both pod affinity & anti affinity & symmetry",
},
// Cover https://github.com/kubernetes/kubernetes/issues/82796 which panics upon:
@ -521,7 +520,7 @@ func TestInterPodAffinityPriority(t *testing.T) {
{ObjectMeta: metav1.ObjectMeta{Name: "machine1", Labels: labelRgChina}},
{ObjectMeta: metav1.ObjectMeta{Name: "machine2", Labels: labelRgChina}},
},
expectedList: []framework.NodeScore{{Name: "machine1", Score: schedulerapi.MaxPriority}, {Name: "machine2", Score: schedulerapi.MaxPriority}},
expectedList: []framework.NodeScore{{Name: "machine1", Score: framework.MaxNodeScore}, {Name: "machine2", Score: framework.MaxNodeScore}},
name: "Avoid panic when partial nodes in a topology don't have pods with affinity",
},
}
@ -594,7 +593,7 @@ func TestHardPodAffinitySymmetricWeight(t *testing.T) {
{ObjectMeta: metav1.ObjectMeta{Name: "machine3", Labels: labelAzAz1}},
},
hardPodAffinityWeight: v1.DefaultHardPodAffinitySymmetricWeight,
expectedList: []framework.NodeScore{{Name: "machine1", Score: schedulerapi.MaxPriority}, {Name: "machine2", Score: schedulerapi.MaxPriority}, {Name: "machine3", Score: 0}},
expectedList: []framework.NodeScore{{Name: "machine1", Score: framework.MaxNodeScore}, {Name: "machine2", Score: framework.MaxNodeScore}, {Name: "machine3", Score: 0}},
name: "Hard Pod Affinity symmetry: hard pod affinity symmetry weights 1 by default, then nodes that match the hard pod affinity symmetry rules, get a high score",
},
{

View File

@ -16,9 +16,7 @@ limitations under the License.
package priorities
import (
schedulerapi "k8s.io/kubernetes/pkg/scheduler/api"
)
import framework "k8s.io/kubernetes/pkg/scheduler/framework/v1alpha1"
var (
leastRequestedRatioResources = DefaultRequestedRatioResources
@ -54,5 +52,5 @@ func leastRequestedScore(requested, capacity int64) int64 {
return 0
}
return ((capacity - requested) * int64(schedulerapi.MaxPriority)) / capacity
return ((capacity - requested) * int64(framework.MaxNodeScore)) / capacity
}

View File

@ -20,10 +20,9 @@ import (
"reflect"
"testing"
"k8s.io/api/core/v1"
v1 "k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/api/resource"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
schedulerapi "k8s.io/kubernetes/pkg/scheduler/api"
framework "k8s.io/kubernetes/pkg/scheduler/framework/v1alpha1"
schedulernodeinfo "k8s.io/kubernetes/pkg/scheduler/nodeinfo"
)
@ -111,7 +110,7 @@ func TestLeastRequested(t *testing.T) {
*/
pod: &v1.Pod{Spec: noResources},
nodes: []*v1.Node{makeNode("machine1", 4000, 10000), makeNode("machine2", 4000, 10000)},
expectedList: []framework.NodeScore{{Name: "machine1", Score: schedulerapi.MaxPriority}, {Name: "machine2", Score: schedulerapi.MaxPriority}},
expectedList: []framework.NodeScore{{Name: "machine1", Score: framework.MaxNodeScore}, {Name: "machine2", Score: framework.MaxNodeScore}},
name: "nothing scheduled, nothing requested",
},
{
@ -145,7 +144,7 @@ func TestLeastRequested(t *testing.T) {
*/
pod: &v1.Pod{Spec: noResources},
nodes: []*v1.Node{makeNode("machine1", 4000, 10000), makeNode("machine2", 4000, 10000)},
expectedList: []framework.NodeScore{{Name: "machine1", Score: schedulerapi.MaxPriority}, {Name: "machine2", Score: schedulerapi.MaxPriority}},
expectedList: []framework.NodeScore{{Name: "machine1", Score: framework.MaxNodeScore}, {Name: "machine2", Score: framework.MaxNodeScore}},
name: "no resources requested, pods scheduled",
pods: []*v1.Pod{
{Spec: machine1Spec, ObjectMeta: metav1.ObjectMeta{Labels: labels2}},

View File

@ -16,9 +16,7 @@ limitations under the License.
package priorities
import (
schedulerapi "k8s.io/kubernetes/pkg/scheduler/api"
)
import framework "k8s.io/kubernetes/pkg/scheduler/framework/v1alpha1"
var (
mostRequestedRatioResources = DefaultRequestedRatioResources
@ -57,5 +55,5 @@ func mostRequestedScore(requested, capacity int64) int64 {
return 0
}
return (requested * schedulerapi.MaxPriority) / capacity
return (requested * framework.MaxNodeScore) / capacity
}

View File

@ -19,10 +19,9 @@ package priorities
import (
"fmt"
"k8s.io/api/core/v1"
v1 "k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/labels"
v1helper "k8s.io/kubernetes/pkg/apis/core/v1/helper"
schedulerapi "k8s.io/kubernetes/pkg/scheduler/api"
framework "k8s.io/kubernetes/pkg/scheduler/framework/v1alpha1"
schedulernodeinfo "k8s.io/kubernetes/pkg/scheduler/nodeinfo"
)
@ -75,4 +74,4 @@ func CalculateNodeAffinityPriorityMap(pod *v1.Pod, meta interface{}, nodeInfo *s
}
// CalculateNodeAffinityPriorityReduce is a reduce function for node affinity priority calculation.
var CalculateNodeAffinityPriorityReduce = NormalizeReduce(schedulerapi.MaxPriority, false)
var CalculateNodeAffinityPriorityReduce = NormalizeReduce(framework.MaxNodeScore, false)

View File

@ -20,9 +20,8 @@ import (
"reflect"
"testing"
"k8s.io/api/core/v1"
v1 "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
schedulerapi "k8s.io/kubernetes/pkg/scheduler/api"
framework "k8s.io/kubernetes/pkg/scheduler/framework/v1alpha1"
schedulernodeinfo "k8s.io/kubernetes/pkg/scheduler/nodeinfo"
)
@ -147,7 +146,7 @@ func TestNodeAffinityPriority(t *testing.T) {
{ObjectMeta: metav1.ObjectMeta{Name: "machine2", Labels: label2}},
{ObjectMeta: metav1.ObjectMeta{Name: "machine3", Labels: label3}},
},
expectedList: []framework.NodeScore{{Name: "machine1", Score: schedulerapi.MaxPriority}, {Name: "machine2", Score: 0}, {Name: "machine3", Score: 0}},
expectedList: []framework.NodeScore{{Name: "machine1", Score: framework.MaxNodeScore}, {Name: "machine2", Score: 0}, {Name: "machine3", Score: 0}},
name: "only machine1 matches the preferred scheduling requirements of pod",
},
{
@ -161,7 +160,7 @@ func TestNodeAffinityPriority(t *testing.T) {
{ObjectMeta: metav1.ObjectMeta{Name: "machine5", Labels: label5}},
{ObjectMeta: metav1.ObjectMeta{Name: "machine2", Labels: label2}},
},
expectedList: []framework.NodeScore{{Name: "machine1", Score: 1}, {Name: "machine5", Score: schedulerapi.MaxPriority}, {Name: "machine2", Score: 3}},
expectedList: []framework.NodeScore{{Name: "machine1", Score: 1}, {Name: "machine5", Score: framework.MaxNodeScore}, {Name: "machine2", Score: 3}},
name: "all machines matches the preferred scheduling requirements of pod but with different priorities ",
},
}

View File

@ -19,9 +19,8 @@ package priorities
import (
"fmt"
"k8s.io/api/core/v1"
v1 "k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/labels"
schedulerapi "k8s.io/kubernetes/pkg/scheduler/api"
framework "k8s.io/kubernetes/pkg/scheduler/framework/v1alpha1"
schedulernodeinfo "k8s.io/kubernetes/pkg/scheduler/nodeinfo"
)
@ -51,12 +50,12 @@ func (n *NodeLabelPrioritizer) CalculateNodeLabelPriorityMap(pod *v1.Pod, meta i
}
exists := labels.Set(node.Labels).Has(n.label)
score := 0
score := int64(0)
if (exists && n.presence) || (!exists && !n.presence) {
score = schedulerapi.MaxPriority
score = framework.MaxNodeScore
}
return framework.NodeScore{
Name: node.Name,
Score: int64(score),
Score: score,
}, nil
}

View File

@ -20,9 +20,8 @@ import (
"reflect"
"testing"
"k8s.io/api/core/v1"
v1 "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
schedulerapi "k8s.io/kubernetes/pkg/scheduler/api"
framework "k8s.io/kubernetes/pkg/scheduler/framework/v1alpha1"
schedulernodeinfo "k8s.io/kubernetes/pkg/scheduler/nodeinfo"
)
@ -55,7 +54,7 @@ func TestNewNodeLabelPriority(t *testing.T) {
{ObjectMeta: metav1.ObjectMeta{Name: "machine2", Labels: label2}},
{ObjectMeta: metav1.ObjectMeta{Name: "machine3", Labels: label3}},
},
expectedList: []framework.NodeScore{{Name: "machine1", Score: schedulerapi.MaxPriority}, {Name: "machine2", Score: schedulerapi.MaxPriority}, {Name: "machine3", Score: schedulerapi.MaxPriority}},
expectedList: []framework.NodeScore{{Name: "machine1", Score: framework.MaxNodeScore}, {Name: "machine2", Score: framework.MaxNodeScore}, {Name: "machine3", Score: framework.MaxNodeScore}},
label: "baz",
presence: false,
name: "no match found, presence false",
@ -66,7 +65,7 @@ func TestNewNodeLabelPriority(t *testing.T) {
{ObjectMeta: metav1.ObjectMeta{Name: "machine2", Labels: label2}},
{ObjectMeta: metav1.ObjectMeta{Name: "machine3", Labels: label3}},
},
expectedList: []framework.NodeScore{{Name: "machine1", Score: schedulerapi.MaxPriority}, {Name: "machine2", Score: 0}, {Name: "machine3", Score: 0}},
expectedList: []framework.NodeScore{{Name: "machine1", Score: framework.MaxNodeScore}, {Name: "machine2", Score: 0}, {Name: "machine3", Score: 0}},
label: "foo",
presence: true,
name: "one match found, presence true",
@ -77,7 +76,7 @@ func TestNewNodeLabelPriority(t *testing.T) {
{ObjectMeta: metav1.ObjectMeta{Name: "machine2", Labels: label2}},
{ObjectMeta: metav1.ObjectMeta{Name: "machine3", Labels: label3}},
},
expectedList: []framework.NodeScore{{Name: "machine1", Score: 0}, {Name: "machine2", Score: schedulerapi.MaxPriority}, {Name: "machine3", Score: schedulerapi.MaxPriority}},
expectedList: []framework.NodeScore{{Name: "machine1", Score: 0}, {Name: "machine2", Score: framework.MaxNodeScore}, {Name: "machine3", Score: framework.MaxNodeScore}},
label: "foo",
presence: false,
name: "one match found, presence false",
@ -88,7 +87,7 @@ func TestNewNodeLabelPriority(t *testing.T) {
{ObjectMeta: metav1.ObjectMeta{Name: "machine2", Labels: label2}},
{ObjectMeta: metav1.ObjectMeta{Name: "machine3", Labels: label3}},
},
expectedList: []framework.NodeScore{{Name: "machine1", Score: 0}, {Name: "machine2", Score: schedulerapi.MaxPriority}, {Name: "machine3", Score: schedulerapi.MaxPriority}},
expectedList: []framework.NodeScore{{Name: "machine1", Score: 0}, {Name: "machine2", Score: framework.MaxNodeScore}, {Name: "machine3", Score: framework.MaxNodeScore}},
label: "bar",
presence: true,
name: "two matches found, presence true",
@ -99,7 +98,7 @@ func TestNewNodeLabelPriority(t *testing.T) {
{ObjectMeta: metav1.ObjectMeta{Name: "machine2", Labels: label2}},
{ObjectMeta: metav1.ObjectMeta{Name: "machine3", Labels: label3}},
},
expectedList: []framework.NodeScore{{Name: "machine1", Score: schedulerapi.MaxPriority}, {Name: "machine2", Score: 0}, {Name: "machine3", Score: 0}},
expectedList: []framework.NodeScore{{Name: "machine1", Score: framework.MaxNodeScore}, {Name: "machine2", Score: 0}, {Name: "machine3", Score: 0}},
label: "bar",
presence: false,
name: "two matches found, presence false",

View File

@ -19,10 +19,9 @@ package priorities
import (
"fmt"
"k8s.io/api/core/v1"
v1 "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
v1helper "k8s.io/kubernetes/pkg/apis/core/v1/helper"
schedulerapi "k8s.io/kubernetes/pkg/scheduler/api"
framework "k8s.io/kubernetes/pkg/scheduler/framework/v1alpha1"
schedulernodeinfo "k8s.io/kubernetes/pkg/scheduler/nodeinfo"
)
@ -50,13 +49,13 @@ func CalculateNodePreferAvoidPodsPriorityMap(pod *v1.Pod, meta interface{}, node
}
}
if controllerRef == nil {
return framework.NodeScore{Name: node.Name, Score: schedulerapi.MaxPriority}, nil
return framework.NodeScore{Name: node.Name, Score: framework.MaxNodeScore}, nil
}
avoids, err := v1helper.GetAvoidPodsFromNodeAnnotations(node.Annotations)
if err != nil {
// If we cannot get annotation, assume it's schedulable there.
return framework.NodeScore{Name: node.Name, Score: schedulerapi.MaxPriority}, nil
return framework.NodeScore{Name: node.Name, Score: framework.MaxNodeScore}, nil
}
for i := range avoids.PreferAvoidPods {
avoid := &avoids.PreferAvoidPods[i]
@ -64,5 +63,5 @@ func CalculateNodePreferAvoidPodsPriorityMap(pod *v1.Pod, meta interface{}, node
return framework.NodeScore{Name: node.Name, Score: 0}, nil
}
}
return framework.NodeScore{Name: node.Name, Score: schedulerapi.MaxPriority}, nil
return framework.NodeScore{Name: node.Name, Score: framework.MaxNodeScore}, nil
}

View File

@ -20,9 +20,8 @@ import (
"reflect"
"testing"
"k8s.io/api/core/v1"
v1 "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
schedulerapi "k8s.io/kubernetes/pkg/scheduler/api"
framework "k8s.io/kubernetes/pkg/scheduler/framework/v1alpha1"
schedulernodeinfo "k8s.io/kubernetes/pkg/scheduler/nodeinfo"
)
@ -96,7 +95,7 @@ func TestNodePreferAvoidPriority(t *testing.T) {
},
},
nodes: testNodes,
expectedList: []framework.NodeScore{{Name: "machine1", Score: 0}, {Name: "machine2", Score: schedulerapi.MaxPriority}, {Name: "machine3", Score: schedulerapi.MaxPriority}},
expectedList: []framework.NodeScore{{Name: "machine1", Score: 0}, {Name: "machine2", Score: framework.MaxNodeScore}, {Name: "machine3", Score: framework.MaxNodeScore}},
name: "pod managed by ReplicationController should avoid a node, this node get lowest priority score",
},
{
@ -109,7 +108,7 @@ func TestNodePreferAvoidPriority(t *testing.T) {
},
},
nodes: testNodes,
expectedList: []framework.NodeScore{{Name: "machine1", Score: schedulerapi.MaxPriority}, {Name: "machine2", Score: schedulerapi.MaxPriority}, {Name: "machine3", Score: schedulerapi.MaxPriority}},
expectedList: []framework.NodeScore{{Name: "machine1", Score: framework.MaxNodeScore}, {Name: "machine2", Score: framework.MaxNodeScore}, {Name: "machine3", Score: framework.MaxNodeScore}},
name: "ownership by random controller should be ignored",
},
{
@ -122,7 +121,7 @@ func TestNodePreferAvoidPriority(t *testing.T) {
},
},
nodes: testNodes,
expectedList: []framework.NodeScore{{Name: "machine1", Score: schedulerapi.MaxPriority}, {Name: "machine2", Score: schedulerapi.MaxPriority}, {Name: "machine3", Score: schedulerapi.MaxPriority}},
expectedList: []framework.NodeScore{{Name: "machine1", Score: framework.MaxNodeScore}, {Name: "machine2", Score: framework.MaxNodeScore}, {Name: "machine3", Score: framework.MaxNodeScore}},
name: "owner without Controller field set should be ignored",
},
{
@ -135,7 +134,7 @@ func TestNodePreferAvoidPriority(t *testing.T) {
},
},
nodes: testNodes,
expectedList: []framework.NodeScore{{Name: "machine1", Score: schedulerapi.MaxPriority}, {Name: "machine2", Score: 0}, {Name: "machine3", Score: schedulerapi.MaxPriority}},
expectedList: []framework.NodeScore{{Name: "machine1", Score: framework.MaxNodeScore}, {Name: "machine2", Score: 0}, {Name: "machine3", Score: framework.MaxNodeScore}},
name: "pod managed by ReplicaSet should avoid a node, this node get lowest priority score",
},
}

View File

@ -21,7 +21,7 @@ import (
"math"
"k8s.io/klog"
schedulerapi "k8s.io/kubernetes/pkg/scheduler/api"
framework "k8s.io/kubernetes/pkg/scheduler/framework/v1alpha1"
)
// FunctionShape represents shape of scoring function.
@ -45,7 +45,7 @@ const (
minUtilization = 0
maxUtilization = 100
minScore = 0
maxScore = schedulerapi.MaxPriority
maxScore = framework.MaxNodeScore
)
// NewFunctionShape creates instance of FunctionShape in a safe way performing all

View File

@ -19,10 +19,9 @@ package priorities
import (
"fmt"
"k8s.io/api/core/v1"
v1 "k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/labels"
"k8s.io/kubernetes/pkg/scheduler/algorithm"
schedulerapi "k8s.io/kubernetes/pkg/scheduler/api"
framework "k8s.io/kubernetes/pkg/scheduler/framework/v1alpha1"
schedulernodeinfo "k8s.io/kubernetes/pkg/scheduler/nodeinfo"
utilnode "k8s.io/kubernetes/pkg/util/node"
@ -123,21 +122,21 @@ func (s *SelectorSpread) CalculateSpreadPriorityReduce(pod *v1.Pod, meta interfa
maxCountByNodeNameFloat64 := float64(maxCountByNodeName)
maxCountByZoneFloat64 := float64(maxCountByZone)
MaxPriorityFloat64 := float64(schedulerapi.MaxPriority)
MaxNodeScoreFloat64 := float64(framework.MaxNodeScore)
for i := range result {
// initializing to the default/max node score of maxPriority
fScore := MaxPriorityFloat64
fScore := MaxNodeScoreFloat64
if maxCountByNodeName > 0 {
fScore = MaxPriorityFloat64 * (float64(maxCountByNodeName-result[i].Score) / maxCountByNodeNameFloat64)
fScore = MaxNodeScoreFloat64 * (float64(maxCountByNodeName-result[i].Score) / maxCountByNodeNameFloat64)
}
// If there is zone information present, incorporate it
if haveZones {
zoneID := utilnode.GetZoneKey(nodeNameToInfo[result[i].Name].Node())
if zoneID != "" {
zoneScore := MaxPriorityFloat64
zoneScore := MaxNodeScoreFloat64
if maxCountByZone > 0 {
zoneScore = MaxPriorityFloat64 * (float64(maxCountByZone-countsByZone[zoneID]) / maxCountByZoneFloat64)
zoneScore = MaxNodeScoreFloat64 * (float64(maxCountByZone-countsByZone[zoneID]) / maxCountByZoneFloat64)
}
fScore = (fScore * (1.0 - zoneWeighting)) + (zoneWeighting * zoneScore)
}
@ -244,7 +243,7 @@ func (s *ServiceAntiAffinity) CalculateAntiAffinityPriorityReduce(pod *v1.Pod, m
var label string
podCounts := map[string]int64{}
labelNodesStatus := map[string]string{}
maxPriorityFloat64 := float64(schedulerapi.MaxPriority)
maxPriorityFloat64 := float64(framework.MaxNodeScore)
for _, hostPriority := range result {
numServicePods += hostPriority.Score

View File

@ -21,9 +21,8 @@ import (
"testing"
apps "k8s.io/api/apps/v1"
"k8s.io/api/core/v1"
v1 "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
schedulerapi "k8s.io/kubernetes/pkg/scheduler/api"
framework "k8s.io/kubernetes/pkg/scheduler/framework/v1alpha1"
schedulernodeinfo "k8s.io/kubernetes/pkg/scheduler/nodeinfo"
schedulertesting "k8s.io/kubernetes/pkg/scheduler/testing"
@ -67,14 +66,14 @@ func TestSelectorSpreadPriority(t *testing.T) {
{
pod: new(v1.Pod),
nodes: []string{"machine1", "machine2"},
expectedList: []framework.NodeScore{{Name: "machine1", Score: schedulerapi.MaxPriority}, {Name: "machine2", Score: schedulerapi.MaxPriority}},
expectedList: []framework.NodeScore{{Name: "machine1", Score: framework.MaxNodeScore}, {Name: "machine2", Score: framework.MaxNodeScore}},
name: "nothing scheduled",
},
{
pod: &v1.Pod{ObjectMeta: metav1.ObjectMeta{Labels: labels1}},
pods: []*v1.Pod{{Spec: zone1Spec}},
nodes: []string{"machine1", "machine2"},
expectedList: []framework.NodeScore{{Name: "machine1", Score: schedulerapi.MaxPriority}, {Name: "machine2", Score: schedulerapi.MaxPriority}},
expectedList: []framework.NodeScore{{Name: "machine1", Score: framework.MaxNodeScore}, {Name: "machine2", Score: framework.MaxNodeScore}},
name: "no services",
},
{
@ -82,7 +81,7 @@ func TestSelectorSpreadPriority(t *testing.T) {
pods: []*v1.Pod{{Spec: zone1Spec, ObjectMeta: metav1.ObjectMeta{Labels: labels2}}},
nodes: []string{"machine1", "machine2"},
services: []*v1.Service{{Spec: v1.ServiceSpec{Selector: map[string]string{"key": "value"}}}},
expectedList: []framework.NodeScore{{Name: "machine1", Score: schedulerapi.MaxPriority}, {Name: "machine2", Score: schedulerapi.MaxPriority}},
expectedList: []framework.NodeScore{{Name: "machine1", Score: framework.MaxNodeScore}, {Name: "machine2", Score: framework.MaxNodeScore}},
name: "different services",
},
{
@ -93,7 +92,7 @@ func TestSelectorSpreadPriority(t *testing.T) {
},
nodes: []string{"machine1", "machine2"},
services: []*v1.Service{{Spec: v1.ServiceSpec{Selector: labels1}}},
expectedList: []framework.NodeScore{{Name: "machine1", Score: schedulerapi.MaxPriority}, {Name: "machine2", Score: 0}},
expectedList: []framework.NodeScore{{Name: "machine1", Score: framework.MaxNodeScore}, {Name: "machine2", Score: 0}},
name: "two pods, one service pod",
},
{
@ -107,7 +106,7 @@ func TestSelectorSpreadPriority(t *testing.T) {
},
nodes: []string{"machine1", "machine2"},
services: []*v1.Service{{Spec: v1.ServiceSpec{Selector: labels1}}},
expectedList: []framework.NodeScore{{Name: "machine1", Score: schedulerapi.MaxPriority}, {Name: "machine2", Score: 0}},
expectedList: []framework.NodeScore{{Name: "machine1", Score: framework.MaxNodeScore}, {Name: "machine2", Score: 0}},
name: "five pods, one service pod in no namespace",
},
{
@ -120,7 +119,7 @@ func TestSelectorSpreadPriority(t *testing.T) {
},
nodes: []string{"machine1", "machine2"},
services: []*v1.Service{{Spec: v1.ServiceSpec{Selector: labels1}, ObjectMeta: metav1.ObjectMeta{Namespace: metav1.NamespaceDefault}}},
expectedList: []framework.NodeScore{{Name: "machine1", Score: schedulerapi.MaxPriority}, {Name: "machine2", Score: 0}},
expectedList: []framework.NodeScore{{Name: "machine1", Score: framework.MaxNodeScore}, {Name: "machine2", Score: 0}},
name: "four pods, one service pod in default namespace",
},
{
@ -134,7 +133,7 @@ func TestSelectorSpreadPriority(t *testing.T) {
},
nodes: []string{"machine1", "machine2"},
services: []*v1.Service{{Spec: v1.ServiceSpec{Selector: labels1}, ObjectMeta: metav1.ObjectMeta{Namespace: "ns1"}}},
expectedList: []framework.NodeScore{{Name: "machine1", Score: schedulerapi.MaxPriority}, {Name: "machine2", Score: 0}},
expectedList: []framework.NodeScore{{Name: "machine1", Score: framework.MaxNodeScore}, {Name: "machine2", Score: 0}},
name: "five pods, one service pod in specific namespace",
},
{
@ -417,12 +416,12 @@ func TestZoneSelectorSpreadPriority(t *testing.T) {
{
pod: new(v1.Pod),
expectedList: []framework.NodeScore{
{Name: nodeMachine1Zone1, Score: schedulerapi.MaxPriority},
{Name: nodeMachine1Zone2, Score: schedulerapi.MaxPriority},
{Name: nodeMachine2Zone2, Score: schedulerapi.MaxPriority},
{Name: nodeMachine1Zone3, Score: schedulerapi.MaxPriority},
{Name: nodeMachine2Zone3, Score: schedulerapi.MaxPriority},
{Name: nodeMachine3Zone3, Score: schedulerapi.MaxPriority},
{Name: nodeMachine1Zone1, Score: framework.MaxNodeScore},
{Name: nodeMachine1Zone2, Score: framework.MaxNodeScore},
{Name: nodeMachine2Zone2, Score: framework.MaxNodeScore},
{Name: nodeMachine1Zone3, Score: framework.MaxNodeScore},
{Name: nodeMachine2Zone3, Score: framework.MaxNodeScore},
{Name: nodeMachine3Zone3, Score: framework.MaxNodeScore},
},
name: "nothing scheduled",
},
@ -430,12 +429,12 @@ func TestZoneSelectorSpreadPriority(t *testing.T) {
pod: buildPod("", labels1, nil),
pods: []*v1.Pod{buildPod(nodeMachine1Zone1, nil, nil)},
expectedList: []framework.NodeScore{
{Name: nodeMachine1Zone1, Score: schedulerapi.MaxPriority},
{Name: nodeMachine1Zone2, Score: schedulerapi.MaxPriority},
{Name: nodeMachine2Zone2, Score: schedulerapi.MaxPriority},
{Name: nodeMachine1Zone3, Score: schedulerapi.MaxPriority},
{Name: nodeMachine2Zone3, Score: schedulerapi.MaxPriority},
{Name: nodeMachine3Zone3, Score: schedulerapi.MaxPriority},
{Name: nodeMachine1Zone1, Score: framework.MaxNodeScore},
{Name: nodeMachine1Zone2, Score: framework.MaxNodeScore},
{Name: nodeMachine2Zone2, Score: framework.MaxNodeScore},
{Name: nodeMachine1Zone3, Score: framework.MaxNodeScore},
{Name: nodeMachine2Zone3, Score: framework.MaxNodeScore},
{Name: nodeMachine3Zone3, Score: framework.MaxNodeScore},
},
name: "no services",
},
@ -444,12 +443,12 @@ func TestZoneSelectorSpreadPriority(t *testing.T) {
pods: []*v1.Pod{buildPod(nodeMachine1Zone1, labels2, nil)},
services: []*v1.Service{{Spec: v1.ServiceSpec{Selector: map[string]string{"key": "value"}}}},
expectedList: []framework.NodeScore{
{Name: nodeMachine1Zone1, Score: schedulerapi.MaxPriority},
{Name: nodeMachine1Zone2, Score: schedulerapi.MaxPriority},
{Name: nodeMachine2Zone2, Score: schedulerapi.MaxPriority},
{Name: nodeMachine1Zone3, Score: schedulerapi.MaxPriority},
{Name: nodeMachine2Zone3, Score: schedulerapi.MaxPriority},
{Name: nodeMachine3Zone3, Score: schedulerapi.MaxPriority},
{Name: nodeMachine1Zone1, Score: framework.MaxNodeScore},
{Name: nodeMachine1Zone2, Score: framework.MaxNodeScore},
{Name: nodeMachine2Zone2, Score: framework.MaxNodeScore},
{Name: nodeMachine1Zone3, Score: framework.MaxNodeScore},
{Name: nodeMachine2Zone3, Score: framework.MaxNodeScore},
{Name: nodeMachine3Zone3, Score: framework.MaxNodeScore},
},
name: "different services",
},
@ -461,12 +460,12 @@ func TestZoneSelectorSpreadPriority(t *testing.T) {
},
services: []*v1.Service{{Spec: v1.ServiceSpec{Selector: labels1}}},
expectedList: []framework.NodeScore{
{Name: nodeMachine1Zone1, Score: schedulerapi.MaxPriority},
{Name: nodeMachine1Zone2, Score: schedulerapi.MaxPriority},
{Name: nodeMachine2Zone2, Score: schedulerapi.MaxPriority},
{Name: nodeMachine1Zone3, Score: schedulerapi.MaxPriority},
{Name: nodeMachine2Zone3, Score: schedulerapi.MaxPriority},
{Name: nodeMachine3Zone3, Score: schedulerapi.MaxPriority},
{Name: nodeMachine1Zone1, Score: framework.MaxNodeScore},
{Name: nodeMachine1Zone2, Score: framework.MaxNodeScore},
{Name: nodeMachine2Zone2, Score: framework.MaxNodeScore},
{Name: nodeMachine1Zone3, Score: framework.MaxNodeScore},
{Name: nodeMachine2Zone3, Score: framework.MaxNodeScore},
{Name: nodeMachine3Zone3, Score: framework.MaxNodeScore},
},
name: "two pods, 0 matching",
},
@ -478,12 +477,12 @@ func TestZoneSelectorSpreadPriority(t *testing.T) {
},
services: []*v1.Service{{Spec: v1.ServiceSpec{Selector: labels1}}},
expectedList: []framework.NodeScore{
{Name: nodeMachine1Zone1, Score: schedulerapi.MaxPriority},
{Name: nodeMachine1Zone1, Score: framework.MaxNodeScore},
{Name: nodeMachine1Zone2, Score: 0}, // Already have pod on machine
{Name: nodeMachine2Zone2, Score: 3}, // Already have pod in zone
{Name: nodeMachine1Zone3, Score: schedulerapi.MaxPriority},
{Name: nodeMachine2Zone3, Score: schedulerapi.MaxPriority},
{Name: nodeMachine3Zone3, Score: schedulerapi.MaxPriority},
{Name: nodeMachine1Zone3, Score: framework.MaxNodeScore},
{Name: nodeMachine2Zone3, Score: framework.MaxNodeScore},
{Name: nodeMachine3Zone3, Score: framework.MaxNodeScore},
},
name: "two pods, 1 matching (in z2)",
},
@ -498,7 +497,7 @@ func TestZoneSelectorSpreadPriority(t *testing.T) {
},
services: []*v1.Service{{Spec: v1.ServiceSpec{Selector: labels1}}},
expectedList: []framework.NodeScore{
{Name: nodeMachine1Zone1, Score: schedulerapi.MaxPriority},
{Name: nodeMachine1Zone1, Score: framework.MaxNodeScore},
{Name: nodeMachine1Zone2, Score: 0}, // Pod on node
{Name: nodeMachine2Zone2, Score: 0}, // Pod on node
{Name: nodeMachine1Zone3, Score: 6}, // Pod in zone
@ -561,12 +560,12 @@ func TestZoneSelectorSpreadPriority(t *testing.T) {
// We would probably prefer to see a bigger gap between putting a second
// pod on m1.z2 and putting a pod on m2.z2, but the ordering is correct.
// This is also consistent with what we have already.
{Name: nodeMachine1Zone1, Score: schedulerapi.MaxPriority}, // No pods in zone
{Name: nodeMachine1Zone2, Score: 5}, // Pod on node
{Name: nodeMachine2Zone2, Score: 6}, // Pod in zone
{Name: nodeMachine1Zone3, Score: 0}, // Two pods on node
{Name: nodeMachine2Zone3, Score: 3}, // Pod in zone
{Name: nodeMachine3Zone3, Score: 3}, // Pod in zone
{Name: nodeMachine1Zone1, Score: framework.MaxNodeScore}, // No pods in zone
{Name: nodeMachine1Zone2, Score: 5}, // Pod on node
{Name: nodeMachine2Zone2, Score: 6}, // Pod in zone
{Name: nodeMachine1Zone3, Score: 0}, // Two pods on node
{Name: nodeMachine2Zone3, Score: 3}, // Pod in zone
{Name: nodeMachine3Zone3, Score: 3}, // Pod in zone
},
name: "Replication controller spreading (z1=0, z2=1, z3=2)",
},
@ -646,8 +645,8 @@ func TestZoneSpreadPriority(t *testing.T) {
{
pod: new(v1.Pod),
nodes: labeledNodes,
expectedList: []framework.NodeScore{{Name: "machine11", Score: schedulerapi.MaxPriority}, {Name: "machine12", Score: schedulerapi.MaxPriority},
{Name: "machine21", Score: schedulerapi.MaxPriority}, {Name: "machine22", Score: schedulerapi.MaxPriority},
expectedList: []framework.NodeScore{{Name: "machine11", Score: framework.MaxNodeScore}, {Name: "machine12", Score: framework.MaxNodeScore},
{Name: "machine21", Score: framework.MaxNodeScore}, {Name: "machine22", Score: framework.MaxNodeScore},
{Name: "machine01", Score: 0}, {Name: "machine02", Score: 0}},
name: "nothing scheduled",
},
@ -655,8 +654,8 @@ func TestZoneSpreadPriority(t *testing.T) {
pod: &v1.Pod{ObjectMeta: metav1.ObjectMeta{Labels: labels1}},
pods: []*v1.Pod{{Spec: zone1Spec}},
nodes: labeledNodes,
expectedList: []framework.NodeScore{{Name: "machine11", Score: schedulerapi.MaxPriority}, {Name: "machine12", Score: schedulerapi.MaxPriority},
{Name: "machine21", Score: schedulerapi.MaxPriority}, {Name: "machine22", Score: schedulerapi.MaxPriority},
expectedList: []framework.NodeScore{{Name: "machine11", Score: framework.MaxNodeScore}, {Name: "machine12", Score: framework.MaxNodeScore},
{Name: "machine21", Score: framework.MaxNodeScore}, {Name: "machine22", Score: framework.MaxNodeScore},
{Name: "machine01", Score: 0}, {Name: "machine02", Score: 0}},
name: "no services",
},
@ -665,8 +664,8 @@ func TestZoneSpreadPriority(t *testing.T) {
pods: []*v1.Pod{{Spec: zone1Spec, ObjectMeta: metav1.ObjectMeta{Labels: labels2}}},
nodes: labeledNodes,
services: []*v1.Service{{Spec: v1.ServiceSpec{Selector: map[string]string{"key": "value"}}}},
expectedList: []framework.NodeScore{{Name: "machine11", Score: schedulerapi.MaxPriority}, {Name: "machine12", Score: schedulerapi.MaxPriority},
{Name: "machine21", Score: schedulerapi.MaxPriority}, {Name: "machine22", Score: schedulerapi.MaxPriority},
expectedList: []framework.NodeScore{{Name: "machine11", Score: framework.MaxNodeScore}, {Name: "machine12", Score: framework.MaxNodeScore},
{Name: "machine21", Score: framework.MaxNodeScore}, {Name: "machine22", Score: framework.MaxNodeScore},
{Name: "machine01", Score: 0}, {Name: "machine02", Score: 0}},
name: "different services",
},
@ -679,7 +678,7 @@ func TestZoneSpreadPriority(t *testing.T) {
},
nodes: labeledNodes,
services: []*v1.Service{{Spec: v1.ServiceSpec{Selector: labels1}}},
expectedList: []framework.NodeScore{{Name: "machine11", Score: schedulerapi.MaxPriority}, {Name: "machine12", Score: schedulerapi.MaxPriority},
expectedList: []framework.NodeScore{{Name: "machine11", Score: framework.MaxNodeScore}, {Name: "machine12", Score: framework.MaxNodeScore},
{Name: "machine21", Score: 0}, {Name: "machine22", Score: 0},
{Name: "machine01", Score: 0}, {Name: "machine02", Score: 0}},
name: "three pods, one service pod",
@ -709,7 +708,7 @@ func TestZoneSpreadPriority(t *testing.T) {
nodes: labeledNodes,
services: []*v1.Service{{Spec: v1.ServiceSpec{Selector: labels1}, ObjectMeta: metav1.ObjectMeta{Namespace: metav1.NamespaceDefault}}},
expectedList: []framework.NodeScore{{Name: "machine11", Score: 0}, {Name: "machine12", Score: 0},
{Name: "machine21", Score: schedulerapi.MaxPriority}, {Name: "machine22", Score: schedulerapi.MaxPriority},
{Name: "machine21", Score: framework.MaxNodeScore}, {Name: "machine22", Score: framework.MaxNodeScore},
{Name: "machine01", Score: 0}, {Name: "machine02", Score: 0}},
name: "three service label match pods in different namespaces",
},

View File

@ -19,9 +19,8 @@ package priorities
import (
"fmt"
"k8s.io/api/core/v1"
v1 "k8s.io/api/core/v1"
v1helper "k8s.io/kubernetes/pkg/apis/core/v1/helper"
schedulerapi "k8s.io/kubernetes/pkg/scheduler/api"
framework "k8s.io/kubernetes/pkg/scheduler/framework/v1alpha1"
schedulernodeinfo "k8s.io/kubernetes/pkg/scheduler/nodeinfo"
)
@ -74,4 +73,4 @@ func ComputeTaintTolerationPriorityMap(pod *v1.Pod, meta interface{}, nodeInfo *
}
// ComputeTaintTolerationPriorityReduce calculates the source of each node based on the number of intolerable taints on the node
var ComputeTaintTolerationPriorityReduce = NormalizeReduce(schedulerapi.MaxPriority, true)
var ComputeTaintTolerationPriorityReduce = NormalizeReduce(framework.MaxNodeScore, true)

View File

@ -20,9 +20,8 @@ import (
"reflect"
"testing"
"k8s.io/api/core/v1"
v1 "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
schedulerapi "k8s.io/kubernetes/pkg/scheduler/api"
framework "k8s.io/kubernetes/pkg/scheduler/framework/v1alpha1"
schedulernodeinfo "k8s.io/kubernetes/pkg/scheduler/nodeinfo"
)
@ -79,7 +78,7 @@ func TestTaintAndToleration(t *testing.T) {
}}),
},
expectedList: []framework.NodeScore{
{Name: "nodeA", Score: schedulerapi.MaxPriority},
{Name: "nodeA", Score: framework.MaxNodeScore},
{Name: "nodeB", Score: 0},
},
},
@ -121,9 +120,9 @@ func TestTaintAndToleration(t *testing.T) {
}),
},
expectedList: []framework.NodeScore{
{Name: "nodeA", Score: schedulerapi.MaxPriority},
{Name: "nodeB", Score: schedulerapi.MaxPriority},
{Name: "nodeC", Score: schedulerapi.MaxPriority},
{Name: "nodeA", Score: framework.MaxNodeScore},
{Name: "nodeB", Score: framework.MaxNodeScore},
{Name: "nodeC", Score: framework.MaxNodeScore},
},
},
// the count of taints on a node that are not tolerated by pod, matters.
@ -157,7 +156,7 @@ func TestTaintAndToleration(t *testing.T) {
}),
},
expectedList: []framework.NodeScore{
{Name: "nodeA", Score: schedulerapi.MaxPriority},
{Name: "nodeA", Score: framework.MaxNodeScore},
{Name: "nodeB", Score: 5},
{Name: "nodeC", Score: 0},
},
@ -200,8 +199,8 @@ func TestTaintAndToleration(t *testing.T) {
}),
},
expectedList: []framework.NodeScore{
{Name: "nodeA", Score: schedulerapi.MaxPriority},
{Name: "nodeB", Score: schedulerapi.MaxPriority},
{Name: "nodeA", Score: framework.MaxNodeScore},
{Name: "nodeB", Score: framework.MaxNodeScore},
{Name: "nodeC", Score: 0},
},
},
@ -221,7 +220,7 @@ func TestTaintAndToleration(t *testing.T) {
}),
},
expectedList: []framework.NodeScore{
{Name: "nodeA", Score: schedulerapi.MaxPriority},
{Name: "nodeA", Score: framework.MaxNodeScore},
{Name: "nodeB", Score: 0},
},
},

View File

@ -17,7 +17,6 @@ limitations under the License.
package api
import (
"math"
"time"
v1 "k8s.io/api/core/v1"
@ -25,12 +24,6 @@ import (
)
const (
// MaxTotalPriority defines the max total priority value.
MaxTotalPriority = int64(math.MaxInt64)
// MaxPriority defines the max priority value.
MaxPriority = 10
// MaxWeight defines the max weight value.
MaxWeight = MaxTotalPriority / MaxPriority
// DefaultPercentageOfNodesToScore defines the percentage of nodes of all nodes
// that once found feasible, the scheduler stops looking for more nodes.
DefaultPercentageOfNodesToScore = 50

View File

@ -13,6 +13,7 @@ go_library(
deps = [
"//pkg/apis/core/v1/helper:go_default_library",
"//pkg/scheduler/api:go_default_library",
"//pkg/scheduler/framework/v1alpha1:go_default_library",
"//staging/src/k8s.io/api/core/v1:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/util/errors:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/util/sets:go_default_library",
@ -24,7 +25,10 @@ go_test(
name = "go_default_test",
srcs = ["validation_test.go"],
embed = [":go_default_library"],
deps = ["//pkg/scheduler/api:go_default_library"],
deps = [
"//pkg/scheduler/api:go_default_library",
"//pkg/scheduler/framework/v1alpha1:go_default_library",
],
)
filegroup(

View File

@ -20,12 +20,13 @@ import (
"errors"
"fmt"
"k8s.io/api/core/v1"
v1 "k8s.io/api/core/v1"
utilerrors "k8s.io/apimachinery/pkg/util/errors"
"k8s.io/apimachinery/pkg/util/sets"
"k8s.io/apimachinery/pkg/util/validation"
v1helper "k8s.io/kubernetes/pkg/apis/core/v1/helper"
schedulerapi "k8s.io/kubernetes/pkg/scheduler/api"
framework "k8s.io/kubernetes/pkg/scheduler/framework/v1alpha1"
)
// ValidatePolicy checks for errors in the Config
@ -34,7 +35,7 @@ func ValidatePolicy(policy schedulerapi.Policy) error {
var validationErrors []error
for _, priority := range policy.Priorities {
if priority.Weight <= 0 || priority.Weight >= schedulerapi.MaxWeight {
if priority.Weight <= 0 || priority.Weight >= framework.MaxWeight {
validationErrors = append(validationErrors, fmt.Errorf("Priority %s should have a positive weight applied to it or it has overflown", priority.Name))
}
}

View File

@ -22,6 +22,7 @@ import (
"testing"
"k8s.io/kubernetes/pkg/scheduler/api"
framework "k8s.io/kubernetes/pkg/scheduler/framework/v1alpha1"
)
func TestValidatePolicy(t *testing.T) {
@ -52,7 +53,7 @@ func TestValidatePolicy(t *testing.T) {
},
{
name: "policy weight exceeds maximum",
policy: api.Policy{Priorities: []api.PriorityPolicy{{Name: "WeightPriority", Weight: api.MaxWeight}}},
policy: api.Policy{Priorities: []api.PriorityPolicy{{Name: "WeightPriority", Weight: framework.MaxWeight}}},
expected: errors.New("Priority WeightPriority should have a positive weight applied to it or it has overflown"),
},
{

View File

@ -17,10 +17,18 @@ limitations under the License.
package v1
import (
"k8s.io/api/core/v1"
v1 "k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/types"
)
const (
// MinExtenderPriority defines the min priority value for extender.
MinExtenderPriority int = 0
// MaxExtenderPriority defines the max priority value for extender.
MaxExtenderPriority int = 10
)
// ExtenderPreemptionResult represents the result returned by preemption phase of extender.
type ExtenderPreemptionResult struct {
NodeNameToMetaVictims map[string]*MetaVictims

View File

@ -28,6 +28,7 @@ import (
"k8s.io/kubernetes/pkg/scheduler/algorithm/predicates"
"k8s.io/kubernetes/pkg/scheduler/algorithm/priorities"
schedulerapi "k8s.io/kubernetes/pkg/scheduler/api"
framework "k8s.io/kubernetes/pkg/scheduler/framework/v1alpha1"
"k8s.io/kubernetes/pkg/scheduler/volumebinder"
"k8s.io/klog"
@ -549,11 +550,11 @@ func getPriorityFunctionConfigs(names sets.String, args PluginFactoryArgs) ([]pr
func validateSelectedConfigs(configs []priorities.PriorityConfig) error {
var totalPriority int64
for _, config := range configs {
// Checks totalPriority against MaxTotalPriority to avoid overflow
if config.Weight*schedulerapi.MaxPriority > schedulerapi.MaxTotalPriority-totalPriority {
// Checks totalPriority against MaxTotalScore to avoid overflow
if config.Weight*framework.MaxNodeScore > framework.MaxTotalScore-totalPriority {
return fmt.Errorf("total priority of priority functions has overflown")
}
totalPriority += config.Weight * schedulerapi.MaxPriority
totalPriority += config.Weight * framework.MaxNodeScore
}
return nil
}

View File

@ -51,44 +51,6 @@ func TestAlgorithmNameValidation(t *testing.T) {
}
}
func TestValidatePriorityConfigOverFlow(t *testing.T) {
tests := []struct {
description string
configs []priorities.PriorityConfig
expected bool
}{
{
description: "one of the weights is MaxTotalPriority(MaxInt64)",
configs: []priorities.PriorityConfig{{Weight: api.MaxTotalPriority}, {Weight: 5}},
expected: true,
},
{
description: "after multiplication with MaxPriority the weight is larger than MaxWeight",
configs: []priorities.PriorityConfig{{Weight: api.MaxWeight + api.MaxPriority}, {Weight: 5}},
expected: true,
},
{
description: "normal weights",
configs: []priorities.PriorityConfig{{Weight: 10000}, {Weight: 5}},
expected: false,
},
}
for _, test := range tests {
t.Run(test.description, func(t *testing.T) {
err := validateSelectedConfigs(test.configs)
if test.expected {
if err == nil {
t.Errorf("Expected Overflow")
}
} else {
if err != nil {
t.Errorf("Did not expect an overflow")
}
}
})
}
}
func TestBuildScoringFunctionShapeFromRequestedToCapacityRatioArguments(t *testing.T) {
arguments := api.RequestedToCapacityRatioArguments{
UtilizationShape: []api.UtilizationShapePoint{

View File

@ -12,7 +12,6 @@ go_library(
importpath = "k8s.io/kubernetes/pkg/scheduler/framework/v1alpha1",
visibility = ["//visibility:public"],
deps = [
"//pkg/scheduler/api:go_default_library",
"//pkg/scheduler/apis/config:go_default_library",
"//pkg/scheduler/nodeinfo:go_default_library",
"//pkg/scheduler/util:go_default_library",

View File

@ -20,11 +20,11 @@ package v1alpha1
import (
"errors"
"math"
"time"
v1 "k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/types"
schedulerapi "k8s.io/kubernetes/pkg/scheduler/api"
schedulernodeinfo "k8s.io/kubernetes/pkg/scheduler/nodeinfo"
)
@ -71,10 +71,16 @@ const (
const (
// MaxNodeScore is the maximum score a Score plugin is expected to return.
MaxNodeScore int = schedulerapi.MaxPriority
MaxNodeScore int64 = 10
// MinNodeScore is the minimum score a Score plugin is expected to return.
MinNodeScore int = 0
MinNodeScore int64 = 0
// MaxTotalScore is the maximum total score.
MaxTotalScore int64 = math.MaxInt64
// MaxWeight defines the max weight value.
MaxWeight int64 = MaxTotalScore / MaxNodeScore
)
// Status indicates the result of running a plugin. It consists of a code and a

View File

@ -157,7 +157,7 @@ func (sp *ScorePlugin) Score(pc *framework.PluginContext, p *v1.Pod, nodeName st
if sp.numScoreCalled == 1 {
// The first node is scored the highest, the rest is scored lower.
sp.highScoreNode = nodeName
score = framework.MaxNodeScore
score = int(framework.MaxNodeScore)
}
return score, nil
}