mirror of
https://github.com/k3s-io/kubernetes.git
synced 2025-07-22 19:31:44 +00:00
Merge pull request #63658 from xchapter7x/pkg-scheduler-algorithm-priorities
Automatic merge from submit-queue. If you want to cherry-pick this change to another branch, please follow the instructions <a href="https://github.com/kubernetes/community/blob/master/contributors/devel/cherry-picks.md">here</a>. use subtest for table units (pkg-scheduler-algorithm-priorities) **What this PR does / why we need it**: Update scheduler's unit table tests to use subtest **Which issue(s) this PR fixes** *(optional, in `fixes #<issue number>(, fixes #<issue_number>, ...)` format, will close the issue(s) when PR gets merged)*: **Special notes for your reviewer**: breaks up PR: https://github.com/kubernetes/kubernetes/pull/63281 /ref #63267 **Release note**: ```release-note This PR will leverage subtests on the existing table tests for the scheduler units. Some refactoring of error/status messages and functions to align with new approach. ```
This commit is contained in:
commit
af9531b8a7
@ -216,7 +216,7 @@ func TestBalancedResourceAllocation(t *testing.T) {
|
|||||||
pods []*v1.Pod
|
pods []*v1.Pod
|
||||||
nodes []*v1.Node
|
nodes []*v1.Node
|
||||||
expectedList schedulerapi.HostPriorityList
|
expectedList schedulerapi.HostPriorityList
|
||||||
test string
|
name string
|
||||||
}{
|
}{
|
||||||
{
|
{
|
||||||
/*
|
/*
|
||||||
@ -233,7 +233,7 @@ func TestBalancedResourceAllocation(t *testing.T) {
|
|||||||
pod: &v1.Pod{Spec: noResources},
|
pod: &v1.Pod{Spec: noResources},
|
||||||
nodes: []*v1.Node{makeNode("machine1", 4000, 10000), makeNode("machine2", 4000, 10000)},
|
nodes: []*v1.Node{makeNode("machine1", 4000, 10000), makeNode("machine2", 4000, 10000)},
|
||||||
expectedList: []schedulerapi.HostPriority{{Host: "machine1", Score: schedulerapi.MaxPriority}, {Host: "machine2", Score: schedulerapi.MaxPriority}},
|
expectedList: []schedulerapi.HostPriority{{Host: "machine1", Score: schedulerapi.MaxPriority}, {Host: "machine2", Score: schedulerapi.MaxPriority}},
|
||||||
test: "nothing scheduled, nothing requested",
|
name: "nothing scheduled, nothing requested",
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
/*
|
/*
|
||||||
@ -250,7 +250,7 @@ func TestBalancedResourceAllocation(t *testing.T) {
|
|||||||
pod: &v1.Pod{Spec: cpuAndMemory},
|
pod: &v1.Pod{Spec: cpuAndMemory},
|
||||||
nodes: []*v1.Node{makeNode("machine1", 4000, 10000), makeNode("machine2", 6000, 10000)},
|
nodes: []*v1.Node{makeNode("machine1", 4000, 10000), makeNode("machine2", 6000, 10000)},
|
||||||
expectedList: []schedulerapi.HostPriority{{Host: "machine1", Score: 7}, {Host: "machine2", Score: schedulerapi.MaxPriority}},
|
expectedList: []schedulerapi.HostPriority{{Host: "machine1", Score: 7}, {Host: "machine2", Score: schedulerapi.MaxPriority}},
|
||||||
test: "nothing scheduled, resources requested, differently sized machines",
|
name: "nothing scheduled, resources requested, differently sized machines",
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
/*
|
/*
|
||||||
@ -267,7 +267,7 @@ func TestBalancedResourceAllocation(t *testing.T) {
|
|||||||
pod: &v1.Pod{Spec: noResources},
|
pod: &v1.Pod{Spec: noResources},
|
||||||
nodes: []*v1.Node{makeNode("machine1", 4000, 10000), makeNode("machine2", 4000, 10000)},
|
nodes: []*v1.Node{makeNode("machine1", 4000, 10000), makeNode("machine2", 4000, 10000)},
|
||||||
expectedList: []schedulerapi.HostPriority{{Host: "machine1", Score: schedulerapi.MaxPriority}, {Host: "machine2", Score: schedulerapi.MaxPriority}},
|
expectedList: []schedulerapi.HostPriority{{Host: "machine1", Score: schedulerapi.MaxPriority}, {Host: "machine2", Score: schedulerapi.MaxPriority}},
|
||||||
test: "no resources requested, pods scheduled",
|
name: "no resources requested, pods scheduled",
|
||||||
pods: []*v1.Pod{
|
pods: []*v1.Pod{
|
||||||
{Spec: machine1Spec, ObjectMeta: metav1.ObjectMeta{Labels: labels2}},
|
{Spec: machine1Spec, ObjectMeta: metav1.ObjectMeta{Labels: labels2}},
|
||||||
{Spec: machine1Spec, ObjectMeta: metav1.ObjectMeta{Labels: labels1}},
|
{Spec: machine1Spec, ObjectMeta: metav1.ObjectMeta{Labels: labels1}},
|
||||||
@ -290,7 +290,7 @@ func TestBalancedResourceAllocation(t *testing.T) {
|
|||||||
pod: &v1.Pod{Spec: noResources},
|
pod: &v1.Pod{Spec: noResources},
|
||||||
nodes: []*v1.Node{makeNode("machine1", 10000, 20000), makeNode("machine2", 10000, 20000)},
|
nodes: []*v1.Node{makeNode("machine1", 10000, 20000), makeNode("machine2", 10000, 20000)},
|
||||||
expectedList: []schedulerapi.HostPriority{{Host: "machine1", Score: 4}, {Host: "machine2", Score: 6}},
|
expectedList: []schedulerapi.HostPriority{{Host: "machine1", Score: 4}, {Host: "machine2", Score: 6}},
|
||||||
test: "no resources requested, pods scheduled with resources",
|
name: "no resources requested, pods scheduled with resources",
|
||||||
pods: []*v1.Pod{
|
pods: []*v1.Pod{
|
||||||
{Spec: cpuOnly, ObjectMeta: metav1.ObjectMeta{Labels: labels2}},
|
{Spec: cpuOnly, ObjectMeta: metav1.ObjectMeta{Labels: labels2}},
|
||||||
{Spec: cpuOnly, ObjectMeta: metav1.ObjectMeta{Labels: labels1}},
|
{Spec: cpuOnly, ObjectMeta: metav1.ObjectMeta{Labels: labels1}},
|
||||||
@ -313,7 +313,7 @@ func TestBalancedResourceAllocation(t *testing.T) {
|
|||||||
pod: &v1.Pod{Spec: cpuAndMemory},
|
pod: &v1.Pod{Spec: cpuAndMemory},
|
||||||
nodes: []*v1.Node{makeNode("machine1", 10000, 20000), makeNode("machine2", 10000, 20000)},
|
nodes: []*v1.Node{makeNode("machine1", 10000, 20000), makeNode("machine2", 10000, 20000)},
|
||||||
expectedList: []schedulerapi.HostPriority{{Host: "machine1", Score: 6}, {Host: "machine2", Score: 9}},
|
expectedList: []schedulerapi.HostPriority{{Host: "machine1", Score: 6}, {Host: "machine2", Score: 9}},
|
||||||
test: "resources requested, pods scheduled with resources",
|
name: "resources requested, pods scheduled with resources",
|
||||||
pods: []*v1.Pod{
|
pods: []*v1.Pod{
|
||||||
{Spec: cpuOnly},
|
{Spec: cpuOnly},
|
||||||
{Spec: cpuAndMemory},
|
{Spec: cpuAndMemory},
|
||||||
@ -334,7 +334,7 @@ func TestBalancedResourceAllocation(t *testing.T) {
|
|||||||
pod: &v1.Pod{Spec: cpuAndMemory},
|
pod: &v1.Pod{Spec: cpuAndMemory},
|
||||||
nodes: []*v1.Node{makeNode("machine1", 10000, 20000), makeNode("machine2", 10000, 50000)},
|
nodes: []*v1.Node{makeNode("machine1", 10000, 20000), makeNode("machine2", 10000, 50000)},
|
||||||
expectedList: []schedulerapi.HostPriority{{Host: "machine1", Score: 6}, {Host: "machine2", Score: 6}},
|
expectedList: []schedulerapi.HostPriority{{Host: "machine1", Score: 6}, {Host: "machine2", Score: 6}},
|
||||||
test: "resources requested, pods scheduled with resources, differently sized machines",
|
name: "resources requested, pods scheduled with resources, differently sized machines",
|
||||||
pods: []*v1.Pod{
|
pods: []*v1.Pod{
|
||||||
{Spec: cpuOnly},
|
{Spec: cpuOnly},
|
||||||
{Spec: cpuAndMemory},
|
{Spec: cpuAndMemory},
|
||||||
@ -355,7 +355,7 @@ func TestBalancedResourceAllocation(t *testing.T) {
|
|||||||
pod: &v1.Pod{Spec: cpuOnly},
|
pod: &v1.Pod{Spec: cpuOnly},
|
||||||
nodes: []*v1.Node{makeNode("machine1", 4000, 10000), makeNode("machine2", 4000, 10000)},
|
nodes: []*v1.Node{makeNode("machine1", 4000, 10000), makeNode("machine2", 4000, 10000)},
|
||||||
expectedList: []schedulerapi.HostPriority{{Host: "machine1", Score: 0}, {Host: "machine2", Score: 0}},
|
expectedList: []schedulerapi.HostPriority{{Host: "machine1", Score: 0}, {Host: "machine2", Score: 0}},
|
||||||
test: "requested resources exceed node capacity",
|
name: "requested resources exceed node capacity",
|
||||||
pods: []*v1.Pod{
|
pods: []*v1.Pod{
|
||||||
{Spec: cpuOnly},
|
{Spec: cpuOnly},
|
||||||
{Spec: cpuAndMemory},
|
{Spec: cpuAndMemory},
|
||||||
@ -365,7 +365,7 @@ func TestBalancedResourceAllocation(t *testing.T) {
|
|||||||
pod: &v1.Pod{Spec: noResources},
|
pod: &v1.Pod{Spec: noResources},
|
||||||
nodes: []*v1.Node{makeNode("machine1", 0, 0), makeNode("machine2", 0, 0)},
|
nodes: []*v1.Node{makeNode("machine1", 0, 0), makeNode("machine2", 0, 0)},
|
||||||
expectedList: []schedulerapi.HostPriority{{Host: "machine1", Score: 0}, {Host: "machine2", Score: 0}},
|
expectedList: []schedulerapi.HostPriority{{Host: "machine1", Score: 0}, {Host: "machine2", Score: 0}},
|
||||||
test: "zero node resources, pods scheduled with resources",
|
name: "zero node resources, pods scheduled with resources",
|
||||||
pods: []*v1.Pod{
|
pods: []*v1.Pod{
|
||||||
{Spec: cpuOnly},
|
{Spec: cpuOnly},
|
||||||
{Spec: cpuAndMemory},
|
{Spec: cpuAndMemory},
|
||||||
@ -389,7 +389,7 @@ func TestBalancedResourceAllocation(t *testing.T) {
|
|||||||
},
|
},
|
||||||
nodes: []*v1.Node{makeNode("machine3", 3500, 40000), makeNode("machine4", 4000, 10000)},
|
nodes: []*v1.Node{makeNode("machine3", 3500, 40000), makeNode("machine4", 4000, 10000)},
|
||||||
expectedList: []schedulerapi.HostPriority{{Host: "machine3", Score: 8}, {Host: "machine4", Score: 9}},
|
expectedList: []schedulerapi.HostPriority{{Host: "machine3", Score: 8}, {Host: "machine4", Score: 9}},
|
||||||
test: "Include volume count on a node for balanced resource allocation",
|
name: "Include volume count on a node for balanced resource allocation",
|
||||||
pods: []*v1.Pod{
|
pods: []*v1.Pod{
|
||||||
{Spec: cpuAndMemory3},
|
{Spec: cpuAndMemory3},
|
||||||
{Spec: podwithVol1},
|
{Spec: podwithVol1},
|
||||||
@ -400,20 +400,22 @@ func TestBalancedResourceAllocation(t *testing.T) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
for _, test := range tests {
|
for _, test := range tests {
|
||||||
nodeNameToInfo := schedulercache.CreateNodeNameToInfoMap(test.pods, test.nodes)
|
t.Run(test.name, func(t *testing.T) {
|
||||||
if len(test.pod.Spec.Volumes) > 0 {
|
nodeNameToInfo := schedulercache.CreateNodeNameToInfoMap(test.pods, test.nodes)
|
||||||
maxVolumes := 5
|
if len(test.pod.Spec.Volumes) > 0 {
|
||||||
for _, info := range nodeNameToInfo {
|
maxVolumes := 5
|
||||||
info.TransientInfo.TransNodeInfo.AllocatableVolumesCount = getExistingVolumeCountForNode(info.Pods(), maxVolumes)
|
for _, info := range nodeNameToInfo {
|
||||||
info.TransientInfo.TransNodeInfo.RequestedVolumes = len(test.pod.Spec.Volumes)
|
info.TransientInfo.TransNodeInfo.AllocatableVolumesCount = getExistingVolumeCountForNode(info.Pods(), maxVolumes)
|
||||||
|
info.TransientInfo.TransNodeInfo.RequestedVolumes = len(test.pod.Spec.Volumes)
|
||||||
|
}
|
||||||
}
|
}
|
||||||
}
|
list, err := priorityFunction(BalancedResourceAllocationMap, nil, nil)(test.pod, nodeNameToInfo, test.nodes)
|
||||||
list, err := priorityFunction(BalancedResourceAllocationMap, nil, nil)(test.pod, nodeNameToInfo, test.nodes)
|
if err != nil {
|
||||||
if err != nil {
|
t.Errorf("unexpected error: %v", err)
|
||||||
t.Errorf("unexpected error: %v", err)
|
}
|
||||||
}
|
if !reflect.DeepEqual(test.expectedList, list) {
|
||||||
if !reflect.DeepEqual(test.expectedList, list) {
|
t.Errorf("expected %#v, got %#v", test.expectedList, list)
|
||||||
t.Errorf("%s: expected %#v, got %#v", test.test, test.expectedList, list)
|
}
|
||||||
}
|
})
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -110,7 +110,7 @@ func TestImageLocalityPriority(t *testing.T) {
|
|||||||
pods []*v1.Pod
|
pods []*v1.Pod
|
||||||
nodes []*v1.Node
|
nodes []*v1.Node
|
||||||
expectedList schedulerapi.HostPriorityList
|
expectedList schedulerapi.HostPriorityList
|
||||||
test string
|
name string
|
||||||
}{
|
}{
|
||||||
{
|
{
|
||||||
// Pod: gcr.io/40 gcr.io/250
|
// Pod: gcr.io/40 gcr.io/250
|
||||||
@ -125,7 +125,7 @@ func TestImageLocalityPriority(t *testing.T) {
|
|||||||
pod: &v1.Pod{Spec: test40250},
|
pod: &v1.Pod{Spec: test40250},
|
||||||
nodes: []*v1.Node{makeImageNode("machine1", node401402000), makeImageNode("machine2", node25010)},
|
nodes: []*v1.Node{makeImageNode("machine1", node401402000), makeImageNode("machine2", node25010)},
|
||||||
expectedList: []schedulerapi.HostPriority{{Host: "machine1", Score: 1}, {Host: "machine2", Score: 3}},
|
expectedList: []schedulerapi.HostPriority{{Host: "machine1", Score: 1}, {Host: "machine2", Score: 3}},
|
||||||
test: "two images spread on two nodes, prefer the larger image one",
|
name: "two images spread on two nodes, prefer the larger image one",
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
// Pod: gcr.io/40 gcr.io/140
|
// Pod: gcr.io/40 gcr.io/140
|
||||||
@ -140,7 +140,7 @@ func TestImageLocalityPriority(t *testing.T) {
|
|||||||
pod: &v1.Pod{Spec: test40140},
|
pod: &v1.Pod{Spec: test40140},
|
||||||
nodes: []*v1.Node{makeImageNode("machine1", node401402000), makeImageNode("machine2", node25010)},
|
nodes: []*v1.Node{makeImageNode("machine1", node401402000), makeImageNode("machine2", node25010)},
|
||||||
expectedList: []schedulerapi.HostPriority{{Host: "machine1", Score: 2}, {Host: "machine2", Score: 0}},
|
expectedList: []schedulerapi.HostPriority{{Host: "machine1", Score: 2}, {Host: "machine2", Score: 0}},
|
||||||
test: "two images on one node, prefer this node",
|
name: "two images on one node, prefer this node",
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
// Pod: gcr.io/2000 gcr.io/10
|
// Pod: gcr.io/2000 gcr.io/10
|
||||||
@ -155,23 +155,25 @@ func TestImageLocalityPriority(t *testing.T) {
|
|||||||
pod: &v1.Pod{Spec: testMinMax},
|
pod: &v1.Pod{Spec: testMinMax},
|
||||||
nodes: []*v1.Node{makeImageNode("machine1", node401402000), makeImageNode("machine2", node25010)},
|
nodes: []*v1.Node{makeImageNode("machine1", node401402000), makeImageNode("machine2", node25010)},
|
||||||
expectedList: []schedulerapi.HostPriority{{Host: "machine1", Score: schedulerapi.MaxPriority}, {Host: "machine2", Score: 0}},
|
expectedList: []schedulerapi.HostPriority{{Host: "machine1", Score: schedulerapi.MaxPriority}, {Host: "machine2", Score: 0}},
|
||||||
test: "if exceed limit, use limit",
|
name: "if exceed limit, use limit",
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
|
|
||||||
for _, test := range tests {
|
for _, test := range tests {
|
||||||
nodeNameToInfo := schedulercache.CreateNodeNameToInfoMap(test.pods, test.nodes)
|
t.Run(test.name, func(t *testing.T) {
|
||||||
list, err := priorityFunction(ImageLocalityPriorityMap, nil, nil)(test.pod, nodeNameToInfo, test.nodes)
|
nodeNameToInfo := schedulercache.CreateNodeNameToInfoMap(test.pods, test.nodes)
|
||||||
if err != nil {
|
list, err := priorityFunction(ImageLocalityPriorityMap, nil, nil)(test.pod, nodeNameToInfo, test.nodes)
|
||||||
t.Errorf("unexpected error: %v", err)
|
if err != nil {
|
||||||
}
|
t.Errorf("unexpected error: %v", err)
|
||||||
|
}
|
||||||
|
|
||||||
sort.Sort(test.expectedList)
|
sort.Sort(test.expectedList)
|
||||||
sort.Sort(list)
|
sort.Sort(list)
|
||||||
|
|
||||||
if !reflect.DeepEqual(test.expectedList, list) {
|
if !reflect.DeepEqual(test.expectedList, list) {
|
||||||
t.Errorf("%s: expected %#v, got %#v", test.test, test.expectedList, list)
|
t.Errorf("expected %#v, got %#v", test.expectedList, list)
|
||||||
}
|
}
|
||||||
|
})
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -267,7 +267,7 @@ func TestInterPodAffinityPriority(t *testing.T) {
|
|||||||
pods []*v1.Pod
|
pods []*v1.Pod
|
||||||
nodes []*v1.Node
|
nodes []*v1.Node
|
||||||
expectedList schedulerapi.HostPriorityList
|
expectedList schedulerapi.HostPriorityList
|
||||||
test string
|
name string
|
||||||
}{
|
}{
|
||||||
{
|
{
|
||||||
pod: &v1.Pod{Spec: v1.PodSpec{NodeName: ""}, ObjectMeta: metav1.ObjectMeta{Labels: podLabelSecurityS1}},
|
pod: &v1.Pod{Spec: v1.PodSpec{NodeName: ""}, ObjectMeta: metav1.ObjectMeta{Labels: podLabelSecurityS1}},
|
||||||
@ -277,7 +277,7 @@ func TestInterPodAffinityPriority(t *testing.T) {
|
|||||||
{ObjectMeta: metav1.ObjectMeta{Name: "machine3", Labels: labelAzAz1}},
|
{ObjectMeta: metav1.ObjectMeta{Name: "machine3", Labels: labelAzAz1}},
|
||||||
},
|
},
|
||||||
expectedList: []schedulerapi.HostPriority{{Host: "machine1", Score: 0}, {Host: "machine2", Score: 0}, {Host: "machine3", Score: 0}},
|
expectedList: []schedulerapi.HostPriority{{Host: "machine1", Score: 0}, {Host: "machine2", Score: 0}, {Host: "machine3", Score: 0}},
|
||||||
test: "all machines are same priority as Affinity is nil",
|
name: "all machines are same priority as Affinity is nil",
|
||||||
},
|
},
|
||||||
// the node(machine1) that have the label {"region": "China"} (match the topology key) and that have existing pods that match the labelSelector get high score
|
// the node(machine1) that have the label {"region": "China"} (match the topology key) and that have existing pods that match the labelSelector get high score
|
||||||
// the node(machine3) that don't have the label {"region": "whatever the value is"} (mismatch the topology key) but that have existing pods that match the labelSelector get low score
|
// the node(machine3) that don't have the label {"region": "whatever the value is"} (mismatch the topology key) but that have existing pods that match the labelSelector get low score
|
||||||
@ -295,7 +295,7 @@ func TestInterPodAffinityPriority(t *testing.T) {
|
|||||||
{ObjectMeta: metav1.ObjectMeta{Name: "machine3", Labels: labelAzAz1}},
|
{ObjectMeta: metav1.ObjectMeta{Name: "machine3", Labels: labelAzAz1}},
|
||||||
},
|
},
|
||||||
expectedList: []schedulerapi.HostPriority{{Host: "machine1", Score: schedulerapi.MaxPriority}, {Host: "machine2", Score: 0}, {Host: "machine3", Score: 0}},
|
expectedList: []schedulerapi.HostPriority{{Host: "machine1", Score: schedulerapi.MaxPriority}, {Host: "machine2", Score: 0}, {Host: "machine3", Score: 0}},
|
||||||
test: "Affinity: pod that matches topology key & pods in nodes will get high score comparing to others" +
|
name: "Affinity: pod that matches topology key & pods in nodes will get high score comparing to others" +
|
||||||
"which doesn't match either pods in nodes or in topology key",
|
"which doesn't match either pods in nodes or in topology key",
|
||||||
},
|
},
|
||||||
// the node1(machine1) that have the label {"region": "China"} (match the topology key) and that have existing pods that match the labelSelector get high score
|
// the node1(machine1) that have the label {"region": "China"} (match the topology key) and that have existing pods that match the labelSelector get high score
|
||||||
@ -313,7 +313,7 @@ func TestInterPodAffinityPriority(t *testing.T) {
|
|||||||
{ObjectMeta: metav1.ObjectMeta{Name: "machine3", Labels: labelRgIndia}},
|
{ObjectMeta: metav1.ObjectMeta{Name: "machine3", Labels: labelRgIndia}},
|
||||||
},
|
},
|
||||||
expectedList: []schedulerapi.HostPriority{{Host: "machine1", Score: schedulerapi.MaxPriority}, {Host: "machine2", Score: schedulerapi.MaxPriority}, {Host: "machine3", Score: 0}},
|
expectedList: []schedulerapi.HostPriority{{Host: "machine1", Score: schedulerapi.MaxPriority}, {Host: "machine2", Score: schedulerapi.MaxPriority}, {Host: "machine3", Score: 0}},
|
||||||
test: "All the nodes that have the same topology key & label value with one of them has an existing pod that match the affinity rules, have the same score",
|
name: "All the nodes that have the same topology key & label value with one of them has an existing pod that match the affinity rules, have the same score",
|
||||||
},
|
},
|
||||||
// there are 2 regions, say regionChina(machine1,machine3,machine4) and regionIndia(machine2,machine5), both regions have nodes that match the preference.
|
// there are 2 regions, say regionChina(machine1,machine3,machine4) and regionIndia(machine2,machine5), both regions have nodes that match the preference.
|
||||||
// But there are more nodes(actually more existing pods) in regionChina that match the preference than regionIndia.
|
// But there are more nodes(actually more existing pods) in regionChina that match the preference than regionIndia.
|
||||||
@ -337,7 +337,7 @@ func TestInterPodAffinityPriority(t *testing.T) {
|
|||||||
{ObjectMeta: metav1.ObjectMeta{Name: "machine5", Labels: labelRgIndia}},
|
{ObjectMeta: metav1.ObjectMeta{Name: "machine5", Labels: labelRgIndia}},
|
||||||
},
|
},
|
||||||
expectedList: []schedulerapi.HostPriority{{Host: "machine1", Score: schedulerapi.MaxPriority}, {Host: "machine2", Score: 5}, {Host: "machine3", Score: schedulerapi.MaxPriority}, {Host: "machine4", Score: schedulerapi.MaxPriority}, {Host: "machine5", Score: 5}},
|
expectedList: []schedulerapi.HostPriority{{Host: "machine1", Score: schedulerapi.MaxPriority}, {Host: "machine2", Score: 5}, {Host: "machine3", Score: schedulerapi.MaxPriority}, {Host: "machine4", Score: schedulerapi.MaxPriority}, {Host: "machine5", Score: 5}},
|
||||||
test: "Affinity: nodes in one region has more matching pods comparing to other reqion, so the region which has more macthes will get high score",
|
name: "Affinity: nodes in one region has more matching pods comparing to other reqion, so the region which has more macthes will get high score",
|
||||||
},
|
},
|
||||||
// Test with the different operators and values for pod affinity scheduling preference, including some match failures.
|
// Test with the different operators and values for pod affinity scheduling preference, including some match failures.
|
||||||
{
|
{
|
||||||
@ -353,7 +353,7 @@ func TestInterPodAffinityPriority(t *testing.T) {
|
|||||||
{ObjectMeta: metav1.ObjectMeta{Name: "machine3", Labels: labelAzAz1}},
|
{ObjectMeta: metav1.ObjectMeta{Name: "machine3", Labels: labelAzAz1}},
|
||||||
},
|
},
|
||||||
expectedList: []schedulerapi.HostPriority{{Host: "machine1", Score: 2}, {Host: "machine2", Score: schedulerapi.MaxPriority}, {Host: "machine3", Score: 0}},
|
expectedList: []schedulerapi.HostPriority{{Host: "machine1", Score: 2}, {Host: "machine2", Score: schedulerapi.MaxPriority}, {Host: "machine3", Score: 0}},
|
||||||
test: "Affinity: different Label operators and values for pod affinity scheduling preference, including some match failures ",
|
name: "Affinity: different Label operators and values for pod affinity scheduling preference, including some match failures ",
|
||||||
},
|
},
|
||||||
// Test the symmetry cases for affinity, the difference between affinity and symmetry is not the pod wants to run together with some existing pods,
|
// Test the symmetry cases for affinity, the difference between affinity and symmetry is not the pod wants to run together with some existing pods,
|
||||||
// but the existing pods have the inter pod affinity preference while the pod to schedule satisfy the preference.
|
// but the existing pods have the inter pod affinity preference while the pod to schedule satisfy the preference.
|
||||||
@ -369,7 +369,7 @@ func TestInterPodAffinityPriority(t *testing.T) {
|
|||||||
{ObjectMeta: metav1.ObjectMeta{Name: "machine3", Labels: labelAzAz1}},
|
{ObjectMeta: metav1.ObjectMeta{Name: "machine3", Labels: labelAzAz1}},
|
||||||
},
|
},
|
||||||
expectedList: []schedulerapi.HostPriority{{Host: "machine1", Score: 0}, {Host: "machine2", Score: schedulerapi.MaxPriority}, {Host: "machine3", Score: 0}},
|
expectedList: []schedulerapi.HostPriority{{Host: "machine1", Score: 0}, {Host: "machine2", Score: schedulerapi.MaxPriority}, {Host: "machine3", Score: 0}},
|
||||||
test: "Affinity symmetry: considred only the preferredDuringSchedulingIgnoredDuringExecution in pod affinity symmetry",
|
name: "Affinity symmetry: considred only the preferredDuringSchedulingIgnoredDuringExecution in pod affinity symmetry",
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
pod: &v1.Pod{Spec: v1.PodSpec{NodeName: ""}, ObjectMeta: metav1.ObjectMeta{Labels: podLabelSecurityS1}},
|
pod: &v1.Pod{Spec: v1.PodSpec{NodeName: ""}, ObjectMeta: metav1.ObjectMeta{Labels: podLabelSecurityS1}},
|
||||||
@ -383,7 +383,7 @@ func TestInterPodAffinityPriority(t *testing.T) {
|
|||||||
{ObjectMeta: metav1.ObjectMeta{Name: "machine3", Labels: labelAzAz1}},
|
{ObjectMeta: metav1.ObjectMeta{Name: "machine3", Labels: labelAzAz1}},
|
||||||
},
|
},
|
||||||
expectedList: []schedulerapi.HostPriority{{Host: "machine1", Score: schedulerapi.MaxPriority}, {Host: "machine2", Score: schedulerapi.MaxPriority}, {Host: "machine3", Score: 0}},
|
expectedList: []schedulerapi.HostPriority{{Host: "machine1", Score: schedulerapi.MaxPriority}, {Host: "machine2", Score: schedulerapi.MaxPriority}, {Host: "machine3", Score: 0}},
|
||||||
test: "Affinity symmetry: considred RequiredDuringSchedulingIgnoredDuringExecution in pod affinity symmetry",
|
name: "Affinity symmetry: considred RequiredDuringSchedulingIgnoredDuringExecution in pod affinity symmetry",
|
||||||
},
|
},
|
||||||
|
|
||||||
// The pod to schedule prefer to stay away from some existing pods at node level using the pod anti affinity.
|
// The pod to schedule prefer to stay away from some existing pods at node level using the pod anti affinity.
|
||||||
@ -403,7 +403,7 @@ func TestInterPodAffinityPriority(t *testing.T) {
|
|||||||
{ObjectMeta: metav1.ObjectMeta{Name: "machine2", Labels: labelRgChina}},
|
{ObjectMeta: metav1.ObjectMeta{Name: "machine2", Labels: labelRgChina}},
|
||||||
},
|
},
|
||||||
expectedList: []schedulerapi.HostPriority{{Host: "machine1", Score: 0}, {Host: "machine2", Score: schedulerapi.MaxPriority}},
|
expectedList: []schedulerapi.HostPriority{{Host: "machine1", Score: 0}, {Host: "machine2", Score: schedulerapi.MaxPriority}},
|
||||||
test: "Anti Affinity: pod that doesnot match existing pods in node will get high score ",
|
name: "Anti Affinity: pod that doesnot match existing pods in node will get high score ",
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
pod: &v1.Pod{Spec: v1.PodSpec{NodeName: "", Affinity: awayFromS1InAz}, ObjectMeta: metav1.ObjectMeta{Labels: podLabelSecurityS1}},
|
pod: &v1.Pod{Spec: v1.PodSpec{NodeName: "", Affinity: awayFromS1InAz}, ObjectMeta: metav1.ObjectMeta{Labels: podLabelSecurityS1}},
|
||||||
@ -416,7 +416,7 @@ func TestInterPodAffinityPriority(t *testing.T) {
|
|||||||
{ObjectMeta: metav1.ObjectMeta{Name: "machine2", Labels: labelRgChina}},
|
{ObjectMeta: metav1.ObjectMeta{Name: "machine2", Labels: labelRgChina}},
|
||||||
},
|
},
|
||||||
expectedList: []schedulerapi.HostPriority{{Host: "machine1", Score: 0}, {Host: "machine2", Score: schedulerapi.MaxPriority}},
|
expectedList: []schedulerapi.HostPriority{{Host: "machine1", Score: 0}, {Host: "machine2", Score: schedulerapi.MaxPriority}},
|
||||||
test: "Anti Affinity: pod that does not matches topology key & matches the pods in nodes will get higher score comparing to others ",
|
name: "Anti Affinity: pod that does not matches topology key & matches the pods in nodes will get higher score comparing to others ",
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
pod: &v1.Pod{Spec: v1.PodSpec{NodeName: "", Affinity: awayFromS1InAz}, ObjectMeta: metav1.ObjectMeta{Labels: podLabelSecurityS1}},
|
pod: &v1.Pod{Spec: v1.PodSpec{NodeName: "", Affinity: awayFromS1InAz}, ObjectMeta: metav1.ObjectMeta{Labels: podLabelSecurityS1}},
|
||||||
@ -430,7 +430,7 @@ func TestInterPodAffinityPriority(t *testing.T) {
|
|||||||
{ObjectMeta: metav1.ObjectMeta{Name: "machine2", Labels: labelRgIndia}},
|
{ObjectMeta: metav1.ObjectMeta{Name: "machine2", Labels: labelRgIndia}},
|
||||||
},
|
},
|
||||||
expectedList: []schedulerapi.HostPriority{{Host: "machine1", Score: 0}, {Host: "machine2", Score: schedulerapi.MaxPriority}},
|
expectedList: []schedulerapi.HostPriority{{Host: "machine1", Score: 0}, {Host: "machine2", Score: schedulerapi.MaxPriority}},
|
||||||
test: "Anti Affinity: one node has more matching pods comparing to other node, so the node which has more unmacthes will get high score",
|
name: "Anti Affinity: one node has more matching pods comparing to other node, so the node which has more unmacthes will get high score",
|
||||||
},
|
},
|
||||||
// Test the symmetry cases for anti affinity
|
// Test the symmetry cases for anti affinity
|
||||||
{
|
{
|
||||||
@ -444,7 +444,7 @@ func TestInterPodAffinityPriority(t *testing.T) {
|
|||||||
{ObjectMeta: metav1.ObjectMeta{Name: "machine2", Labels: labelAzAz2}},
|
{ObjectMeta: metav1.ObjectMeta{Name: "machine2", Labels: labelAzAz2}},
|
||||||
},
|
},
|
||||||
expectedList: []schedulerapi.HostPriority{{Host: "machine1", Score: 0}, {Host: "machine2", Score: schedulerapi.MaxPriority}},
|
expectedList: []schedulerapi.HostPriority{{Host: "machine1", Score: 0}, {Host: "machine2", Score: schedulerapi.MaxPriority}},
|
||||||
test: "Anti Affinity symmetry: the existing pods in node which has anti affinity match will get high score",
|
name: "Anti Affinity symmetry: the existing pods in node which has anti affinity match will get high score",
|
||||||
},
|
},
|
||||||
// Test both affinity and anti-affinity
|
// Test both affinity and anti-affinity
|
||||||
{
|
{
|
||||||
@ -458,7 +458,7 @@ func TestInterPodAffinityPriority(t *testing.T) {
|
|||||||
{ObjectMeta: metav1.ObjectMeta{Name: "machine2", Labels: labelAzAz1}},
|
{ObjectMeta: metav1.ObjectMeta{Name: "machine2", Labels: labelAzAz1}},
|
||||||
},
|
},
|
||||||
expectedList: []schedulerapi.HostPriority{{Host: "machine1", Score: schedulerapi.MaxPriority}, {Host: "machine2", Score: 0}},
|
expectedList: []schedulerapi.HostPriority{{Host: "machine1", Score: schedulerapi.MaxPriority}, {Host: "machine2", Score: 0}},
|
||||||
test: "Affinity and Anti Affinity: considered only preferredDuringSchedulingIgnoredDuringExecution in both pod affinity & anti affinity",
|
name: "Affinity and Anti Affinity: considered only preferredDuringSchedulingIgnoredDuringExecution in both pod affinity & anti affinity",
|
||||||
},
|
},
|
||||||
// Combined cases considering both affinity and anti-affinity, the pod to schedule and existing pods have the same labels (they are in the same RC/service),
|
// Combined cases considering both affinity and anti-affinity, the pod to schedule and existing pods have the same labels (they are in the same RC/service),
|
||||||
// the pod prefer to run together with its brother pods in the same region, but wants to stay away from them at node level,
|
// the pod prefer to run together with its brother pods in the same region, but wants to stay away from them at node level,
|
||||||
@ -483,7 +483,7 @@ func TestInterPodAffinityPriority(t *testing.T) {
|
|||||||
{ObjectMeta: metav1.ObjectMeta{Name: "machine5", Labels: labelRgIndia}},
|
{ObjectMeta: metav1.ObjectMeta{Name: "machine5", Labels: labelRgIndia}},
|
||||||
},
|
},
|
||||||
expectedList: []schedulerapi.HostPriority{{Host: "machine1", Score: schedulerapi.MaxPriority}, {Host: "machine2", Score: 4}, {Host: "machine3", Score: schedulerapi.MaxPriority}, {Host: "machine4", Score: schedulerapi.MaxPriority}, {Host: "machine5", Score: 4}},
|
expectedList: []schedulerapi.HostPriority{{Host: "machine1", Score: schedulerapi.MaxPriority}, {Host: "machine2", Score: 4}, {Host: "machine3", Score: schedulerapi.MaxPriority}, {Host: "machine4", Score: schedulerapi.MaxPriority}, {Host: "machine5", Score: 4}},
|
||||||
test: "Affinity and Anti Affinity: considering both affinity and anti-affinity, the pod to schedule and existing pods have the same labels",
|
name: "Affinity and Anti Affinity: considering both affinity and anti-affinity, the pod to schedule and existing pods have the same labels",
|
||||||
},
|
},
|
||||||
// Consider Affinity, Anti Affinity and symmetry together.
|
// Consider Affinity, Anti Affinity and symmetry together.
|
||||||
// for Affinity, the weights are: 8, 0, 0, 0
|
// for Affinity, the weights are: 8, 0, 0, 0
|
||||||
@ -505,24 +505,26 @@ func TestInterPodAffinityPriority(t *testing.T) {
|
|||||||
{ObjectMeta: metav1.ObjectMeta{Name: "machine4", Labels: labelAzAz2}},
|
{ObjectMeta: metav1.ObjectMeta{Name: "machine4", Labels: labelAzAz2}},
|
||||||
},
|
},
|
||||||
expectedList: []schedulerapi.HostPriority{{Host: "machine1", Score: schedulerapi.MaxPriority}, {Host: "machine2", Score: 0}, {Host: "machine3", Score: schedulerapi.MaxPriority}, {Host: "machine4", Score: 0}},
|
expectedList: []schedulerapi.HostPriority{{Host: "machine1", Score: schedulerapi.MaxPriority}, {Host: "machine2", Score: 0}, {Host: "machine3", Score: schedulerapi.MaxPriority}, {Host: "machine4", Score: 0}},
|
||||||
test: "Affinity and Anti Affinity and symmetry: considered only preferredDuringSchedulingIgnoredDuringExecution in both pod affinity & anti affinity & symmetry",
|
name: "Affinity and Anti Affinity and symmetry: considered only preferredDuringSchedulingIgnoredDuringExecution in both pod affinity & anti affinity & symmetry",
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
for _, test := range tests {
|
for _, test := range tests {
|
||||||
nodeNameToInfo := schedulercache.CreateNodeNameToInfoMap(test.pods, test.nodes)
|
t.Run(test.name, func(t *testing.T) {
|
||||||
interPodAffinity := InterPodAffinity{
|
nodeNameToInfo := schedulercache.CreateNodeNameToInfoMap(test.pods, test.nodes)
|
||||||
info: FakeNodeListInfo(test.nodes),
|
interPodAffinity := InterPodAffinity{
|
||||||
nodeLister: schedulertesting.FakeNodeLister(test.nodes),
|
info: FakeNodeListInfo(test.nodes),
|
||||||
podLister: schedulertesting.FakePodLister(test.pods),
|
nodeLister: schedulertesting.FakeNodeLister(test.nodes),
|
||||||
hardPodAffinityWeight: v1.DefaultHardPodAffinitySymmetricWeight,
|
podLister: schedulertesting.FakePodLister(test.pods),
|
||||||
}
|
hardPodAffinityWeight: v1.DefaultHardPodAffinitySymmetricWeight,
|
||||||
list, err := interPodAffinity.CalculateInterPodAffinityPriority(test.pod, nodeNameToInfo, test.nodes)
|
}
|
||||||
if err != nil {
|
list, err := interPodAffinity.CalculateInterPodAffinityPriority(test.pod, nodeNameToInfo, test.nodes)
|
||||||
t.Errorf("unexpected error: %v", err)
|
if err != nil {
|
||||||
}
|
t.Errorf("unexpected error: %v", err)
|
||||||
if !reflect.DeepEqual(test.expectedList, list) {
|
}
|
||||||
t.Errorf("%s: \nexpected \n\t%#v, \ngot \n\t%#v\n", test.test, test.expectedList, list)
|
if !reflect.DeepEqual(test.expectedList, list) {
|
||||||
}
|
t.Errorf("expected \n\t%#v, \ngot \n\t%#v\n", test.expectedList, list)
|
||||||
|
}
|
||||||
|
})
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -563,7 +565,7 @@ func TestHardPodAffinitySymmetricWeight(t *testing.T) {
|
|||||||
nodes []*v1.Node
|
nodes []*v1.Node
|
||||||
hardPodAffinityWeight int32
|
hardPodAffinityWeight int32
|
||||||
expectedList schedulerapi.HostPriorityList
|
expectedList schedulerapi.HostPriorityList
|
||||||
test string
|
name string
|
||||||
}{
|
}{
|
||||||
{
|
{
|
||||||
pod: &v1.Pod{Spec: v1.PodSpec{NodeName: ""}, ObjectMeta: metav1.ObjectMeta{Labels: podLabelServiceS1}},
|
pod: &v1.Pod{Spec: v1.PodSpec{NodeName: ""}, ObjectMeta: metav1.ObjectMeta{Labels: podLabelServiceS1}},
|
||||||
@ -578,7 +580,7 @@ func TestHardPodAffinitySymmetricWeight(t *testing.T) {
|
|||||||
},
|
},
|
||||||
hardPodAffinityWeight: v1.DefaultHardPodAffinitySymmetricWeight,
|
hardPodAffinityWeight: v1.DefaultHardPodAffinitySymmetricWeight,
|
||||||
expectedList: []schedulerapi.HostPriority{{Host: "machine1", Score: schedulerapi.MaxPriority}, {Host: "machine2", Score: schedulerapi.MaxPriority}, {Host: "machine3", Score: 0}},
|
expectedList: []schedulerapi.HostPriority{{Host: "machine1", Score: schedulerapi.MaxPriority}, {Host: "machine2", Score: schedulerapi.MaxPriority}, {Host: "machine3", Score: 0}},
|
||||||
test: "Hard Pod Affinity symmetry: hard pod affinity symmetry weights 1 by default, then nodes that match the hard pod affinity symmetry rules, get a high score",
|
name: "Hard Pod Affinity symmetry: hard pod affinity symmetry weights 1 by default, then nodes that match the hard pod affinity symmetry rules, get a high score",
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
pod: &v1.Pod{Spec: v1.PodSpec{NodeName: ""}, ObjectMeta: metav1.ObjectMeta{Labels: podLabelServiceS1}},
|
pod: &v1.Pod{Spec: v1.PodSpec{NodeName: ""}, ObjectMeta: metav1.ObjectMeta{Labels: podLabelServiceS1}},
|
||||||
@ -593,23 +595,25 @@ func TestHardPodAffinitySymmetricWeight(t *testing.T) {
|
|||||||
},
|
},
|
||||||
hardPodAffinityWeight: 0,
|
hardPodAffinityWeight: 0,
|
||||||
expectedList: []schedulerapi.HostPriority{{Host: "machine1", Score: 0}, {Host: "machine2", Score: 0}, {Host: "machine3", Score: 0}},
|
expectedList: []schedulerapi.HostPriority{{Host: "machine1", Score: 0}, {Host: "machine2", Score: 0}, {Host: "machine3", Score: 0}},
|
||||||
test: "Hard Pod Affinity symmetry: hard pod affinity symmetry is closed(weights 0), then nodes that match the hard pod affinity symmetry rules, get same score with those not match",
|
name: "Hard Pod Affinity symmetry: hard pod affinity symmetry is closed(weights 0), then nodes that match the hard pod affinity symmetry rules, get same score with those not match",
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
for _, test := range tests {
|
for _, test := range tests {
|
||||||
nodeNameToInfo := schedulercache.CreateNodeNameToInfoMap(test.pods, test.nodes)
|
t.Run(test.name, func(t *testing.T) {
|
||||||
ipa := InterPodAffinity{
|
nodeNameToInfo := schedulercache.CreateNodeNameToInfoMap(test.pods, test.nodes)
|
||||||
info: FakeNodeListInfo(test.nodes),
|
ipa := InterPodAffinity{
|
||||||
nodeLister: schedulertesting.FakeNodeLister(test.nodes),
|
info: FakeNodeListInfo(test.nodes),
|
||||||
podLister: schedulertesting.FakePodLister(test.pods),
|
nodeLister: schedulertesting.FakeNodeLister(test.nodes),
|
||||||
hardPodAffinityWeight: test.hardPodAffinityWeight,
|
podLister: schedulertesting.FakePodLister(test.pods),
|
||||||
}
|
hardPodAffinityWeight: test.hardPodAffinityWeight,
|
||||||
list, err := ipa.CalculateInterPodAffinityPriority(test.pod, nodeNameToInfo, test.nodes)
|
}
|
||||||
if err != nil {
|
list, err := ipa.CalculateInterPodAffinityPriority(test.pod, nodeNameToInfo, test.nodes)
|
||||||
t.Errorf("unexpected error: %v", err)
|
if err != nil {
|
||||||
}
|
t.Errorf("unexpected error: %v", err)
|
||||||
if !reflect.DeepEqual(test.expectedList, list) {
|
}
|
||||||
t.Errorf("%s: \nexpected \n\t%#v, \ngot \n\t%#v\n", test.test, test.expectedList, list)
|
if !reflect.DeepEqual(test.expectedList, list) {
|
||||||
}
|
t.Errorf("expected \n\t%#v, \ngot \n\t%#v\n", test.expectedList, list)
|
||||||
|
}
|
||||||
|
})
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -94,7 +94,7 @@ func TestLeastRequested(t *testing.T) {
|
|||||||
pods []*v1.Pod
|
pods []*v1.Pod
|
||||||
nodes []*v1.Node
|
nodes []*v1.Node
|
||||||
expectedList schedulerapi.HostPriorityList
|
expectedList schedulerapi.HostPriorityList
|
||||||
test string
|
name string
|
||||||
}{
|
}{
|
||||||
{
|
{
|
||||||
/*
|
/*
|
||||||
@ -111,7 +111,7 @@ func TestLeastRequested(t *testing.T) {
|
|||||||
pod: &v1.Pod{Spec: noResources},
|
pod: &v1.Pod{Spec: noResources},
|
||||||
nodes: []*v1.Node{makeNode("machine1", 4000, 10000), makeNode("machine2", 4000, 10000)},
|
nodes: []*v1.Node{makeNode("machine1", 4000, 10000), makeNode("machine2", 4000, 10000)},
|
||||||
expectedList: []schedulerapi.HostPriority{{Host: "machine1", Score: schedulerapi.MaxPriority}, {Host: "machine2", Score: schedulerapi.MaxPriority}},
|
expectedList: []schedulerapi.HostPriority{{Host: "machine1", Score: schedulerapi.MaxPriority}, {Host: "machine2", Score: schedulerapi.MaxPriority}},
|
||||||
test: "nothing scheduled, nothing requested",
|
name: "nothing scheduled, nothing requested",
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
/*
|
/*
|
||||||
@ -128,7 +128,7 @@ func TestLeastRequested(t *testing.T) {
|
|||||||
pod: &v1.Pod{Spec: cpuAndMemory},
|
pod: &v1.Pod{Spec: cpuAndMemory},
|
||||||
nodes: []*v1.Node{makeNode("machine1", 4000, 10000), makeNode("machine2", 6000, 10000)},
|
nodes: []*v1.Node{makeNode("machine1", 4000, 10000), makeNode("machine2", 6000, 10000)},
|
||||||
expectedList: []schedulerapi.HostPriority{{Host: "machine1", Score: 3}, {Host: "machine2", Score: 5}},
|
expectedList: []schedulerapi.HostPriority{{Host: "machine1", Score: 3}, {Host: "machine2", Score: 5}},
|
||||||
test: "nothing scheduled, resources requested, differently sized machines",
|
name: "nothing scheduled, resources requested, differently sized machines",
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
/*
|
/*
|
||||||
@ -145,7 +145,7 @@ func TestLeastRequested(t *testing.T) {
|
|||||||
pod: &v1.Pod{Spec: noResources},
|
pod: &v1.Pod{Spec: noResources},
|
||||||
nodes: []*v1.Node{makeNode("machine1", 4000, 10000), makeNode("machine2", 4000, 10000)},
|
nodes: []*v1.Node{makeNode("machine1", 4000, 10000), makeNode("machine2", 4000, 10000)},
|
||||||
expectedList: []schedulerapi.HostPriority{{Host: "machine1", Score: schedulerapi.MaxPriority}, {Host: "machine2", Score: schedulerapi.MaxPriority}},
|
expectedList: []schedulerapi.HostPriority{{Host: "machine1", Score: schedulerapi.MaxPriority}, {Host: "machine2", Score: schedulerapi.MaxPriority}},
|
||||||
test: "no resources requested, pods scheduled",
|
name: "no resources requested, pods scheduled",
|
||||||
pods: []*v1.Pod{
|
pods: []*v1.Pod{
|
||||||
{Spec: machine1Spec, ObjectMeta: metav1.ObjectMeta{Labels: labels2}},
|
{Spec: machine1Spec, ObjectMeta: metav1.ObjectMeta{Labels: labels2}},
|
||||||
{Spec: machine1Spec, ObjectMeta: metav1.ObjectMeta{Labels: labels1}},
|
{Spec: machine1Spec, ObjectMeta: metav1.ObjectMeta{Labels: labels1}},
|
||||||
@ -168,7 +168,7 @@ func TestLeastRequested(t *testing.T) {
|
|||||||
pod: &v1.Pod{Spec: noResources},
|
pod: &v1.Pod{Spec: noResources},
|
||||||
nodes: []*v1.Node{makeNode("machine1", 10000, 20000), makeNode("machine2", 10000, 20000)},
|
nodes: []*v1.Node{makeNode("machine1", 10000, 20000), makeNode("machine2", 10000, 20000)},
|
||||||
expectedList: []schedulerapi.HostPriority{{Host: "machine1", Score: 7}, {Host: "machine2", Score: 5}},
|
expectedList: []schedulerapi.HostPriority{{Host: "machine1", Score: 7}, {Host: "machine2", Score: 5}},
|
||||||
test: "no resources requested, pods scheduled with resources",
|
name: "no resources requested, pods scheduled with resources",
|
||||||
pods: []*v1.Pod{
|
pods: []*v1.Pod{
|
||||||
{Spec: cpuOnly, ObjectMeta: metav1.ObjectMeta{Labels: labels2}},
|
{Spec: cpuOnly, ObjectMeta: metav1.ObjectMeta{Labels: labels2}},
|
||||||
{Spec: cpuOnly, ObjectMeta: metav1.ObjectMeta{Labels: labels1}},
|
{Spec: cpuOnly, ObjectMeta: metav1.ObjectMeta{Labels: labels1}},
|
||||||
@ -191,7 +191,7 @@ func TestLeastRequested(t *testing.T) {
|
|||||||
pod: &v1.Pod{Spec: cpuAndMemory},
|
pod: &v1.Pod{Spec: cpuAndMemory},
|
||||||
nodes: []*v1.Node{makeNode("machine1", 10000, 20000), makeNode("machine2", 10000, 20000)},
|
nodes: []*v1.Node{makeNode("machine1", 10000, 20000), makeNode("machine2", 10000, 20000)},
|
||||||
expectedList: []schedulerapi.HostPriority{{Host: "machine1", Score: 5}, {Host: "machine2", Score: 4}},
|
expectedList: []schedulerapi.HostPriority{{Host: "machine1", Score: 5}, {Host: "machine2", Score: 4}},
|
||||||
test: "resources requested, pods scheduled with resources",
|
name: "resources requested, pods scheduled with resources",
|
||||||
pods: []*v1.Pod{
|
pods: []*v1.Pod{
|
||||||
{Spec: cpuOnly},
|
{Spec: cpuOnly},
|
||||||
{Spec: cpuAndMemory},
|
{Spec: cpuAndMemory},
|
||||||
@ -212,7 +212,7 @@ func TestLeastRequested(t *testing.T) {
|
|||||||
pod: &v1.Pod{Spec: cpuAndMemory},
|
pod: &v1.Pod{Spec: cpuAndMemory},
|
||||||
nodes: []*v1.Node{makeNode("machine1", 10000, 20000), makeNode("machine2", 10000, 50000)},
|
nodes: []*v1.Node{makeNode("machine1", 10000, 20000), makeNode("machine2", 10000, 50000)},
|
||||||
expectedList: []schedulerapi.HostPriority{{Host: "machine1", Score: 5}, {Host: "machine2", Score: 6}},
|
expectedList: []schedulerapi.HostPriority{{Host: "machine1", Score: 5}, {Host: "machine2", Score: 6}},
|
||||||
test: "resources requested, pods scheduled with resources, differently sized machines",
|
name: "resources requested, pods scheduled with resources, differently sized machines",
|
||||||
pods: []*v1.Pod{
|
pods: []*v1.Pod{
|
||||||
{Spec: cpuOnly},
|
{Spec: cpuOnly},
|
||||||
{Spec: cpuAndMemory},
|
{Spec: cpuAndMemory},
|
||||||
@ -233,7 +233,7 @@ func TestLeastRequested(t *testing.T) {
|
|||||||
pod: &v1.Pod{Spec: cpuOnly},
|
pod: &v1.Pod{Spec: cpuOnly},
|
||||||
nodes: []*v1.Node{makeNode("machine1", 4000, 10000), makeNode("machine2", 4000, 10000)},
|
nodes: []*v1.Node{makeNode("machine1", 4000, 10000), makeNode("machine2", 4000, 10000)},
|
||||||
expectedList: []schedulerapi.HostPriority{{Host: "machine1", Score: 5}, {Host: "machine2", Score: 2}},
|
expectedList: []schedulerapi.HostPriority{{Host: "machine1", Score: 5}, {Host: "machine2", Score: 2}},
|
||||||
test: "requested resources exceed node capacity",
|
name: "requested resources exceed node capacity",
|
||||||
pods: []*v1.Pod{
|
pods: []*v1.Pod{
|
||||||
{Spec: cpuOnly},
|
{Spec: cpuOnly},
|
||||||
{Spec: cpuAndMemory},
|
{Spec: cpuAndMemory},
|
||||||
@ -243,7 +243,7 @@ func TestLeastRequested(t *testing.T) {
|
|||||||
pod: &v1.Pod{Spec: noResources},
|
pod: &v1.Pod{Spec: noResources},
|
||||||
nodes: []*v1.Node{makeNode("machine1", 0, 0), makeNode("machine2", 0, 0)},
|
nodes: []*v1.Node{makeNode("machine1", 0, 0), makeNode("machine2", 0, 0)},
|
||||||
expectedList: []schedulerapi.HostPriority{{Host: "machine1", Score: 0}, {Host: "machine2", Score: 0}},
|
expectedList: []schedulerapi.HostPriority{{Host: "machine1", Score: 0}, {Host: "machine2", Score: 0}},
|
||||||
test: "zero node resources, pods scheduled with resources",
|
name: "zero node resources, pods scheduled with resources",
|
||||||
pods: []*v1.Pod{
|
pods: []*v1.Pod{
|
||||||
{Spec: cpuOnly},
|
{Spec: cpuOnly},
|
||||||
{Spec: cpuAndMemory},
|
{Spec: cpuAndMemory},
|
||||||
@ -252,13 +252,15 @@ func TestLeastRequested(t *testing.T) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
for _, test := range tests {
|
for _, test := range tests {
|
||||||
nodeNameToInfo := schedulercache.CreateNodeNameToInfoMap(test.pods, test.nodes)
|
t.Run(test.name, func(t *testing.T) {
|
||||||
list, err := priorityFunction(LeastRequestedPriorityMap, nil, nil)(test.pod, nodeNameToInfo, test.nodes)
|
nodeNameToInfo := schedulercache.CreateNodeNameToInfoMap(test.pods, test.nodes)
|
||||||
if err != nil {
|
list, err := priorityFunction(LeastRequestedPriorityMap, nil, nil)(test.pod, nodeNameToInfo, test.nodes)
|
||||||
t.Errorf("unexpected error: %v", err)
|
if err != nil {
|
||||||
}
|
t.Errorf("unexpected error: %v", err)
|
||||||
if !reflect.DeepEqual(test.expectedList, list) {
|
}
|
||||||
t.Errorf("%s: expected %#v, got %#v", test.test, test.expectedList, list)
|
if !reflect.DeepEqual(test.expectedList, list) {
|
||||||
}
|
t.Errorf("expected %#v, got %#v", test.expectedList, list)
|
||||||
|
}
|
||||||
|
})
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -117,13 +117,13 @@ func TestPriorityMetadata(t *testing.T) {
|
|||||||
}
|
}
|
||||||
tests := []struct {
|
tests := []struct {
|
||||||
pod *v1.Pod
|
pod *v1.Pod
|
||||||
test string
|
name string
|
||||||
expected interface{}
|
expected interface{}
|
||||||
}{
|
}{
|
||||||
{
|
{
|
||||||
pod: nil,
|
pod: nil,
|
||||||
expected: nil,
|
expected: nil,
|
||||||
test: "pod is nil , priorityMetadata is nil",
|
name: "pod is nil , priorityMetadata is nil",
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
pod: podWithTolerationsAndAffinity,
|
pod: podWithTolerationsAndAffinity,
|
||||||
@ -132,7 +132,7 @@ func TestPriorityMetadata(t *testing.T) {
|
|||||||
podTolerations: tolerations,
|
podTolerations: tolerations,
|
||||||
affinity: podAffinity,
|
affinity: podAffinity,
|
||||||
},
|
},
|
||||||
test: "Produce a priorityMetadata with default requests",
|
name: "Produce a priorityMetadata with default requests",
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
pod: podWithTolerationsAndRequests,
|
pod: podWithTolerationsAndRequests,
|
||||||
@ -141,7 +141,7 @@ func TestPriorityMetadata(t *testing.T) {
|
|||||||
podTolerations: tolerations,
|
podTolerations: tolerations,
|
||||||
affinity: nil,
|
affinity: nil,
|
||||||
},
|
},
|
||||||
test: "Produce a priorityMetadata with specified requests",
|
name: "Produce a priorityMetadata with specified requests",
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
pod: podWithAffinityAndRequests,
|
pod: podWithAffinityAndRequests,
|
||||||
@ -150,7 +150,7 @@ func TestPriorityMetadata(t *testing.T) {
|
|||||||
podTolerations: nil,
|
podTolerations: nil,
|
||||||
affinity: podAffinity,
|
affinity: podAffinity,
|
||||||
},
|
},
|
||||||
test: "Produce a priorityMetadata with specified requests",
|
name: "Produce a priorityMetadata with specified requests",
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
mataDataProducer := NewPriorityMetadataFactory(
|
mataDataProducer := NewPriorityMetadataFactory(
|
||||||
@ -159,9 +159,11 @@ func TestPriorityMetadata(t *testing.T) {
|
|||||||
schedulertesting.FakeReplicaSetLister([]*extensions.ReplicaSet{}),
|
schedulertesting.FakeReplicaSetLister([]*extensions.ReplicaSet{}),
|
||||||
schedulertesting.FakeStatefulSetLister([]*apps.StatefulSet{}))
|
schedulertesting.FakeStatefulSetLister([]*apps.StatefulSet{}))
|
||||||
for _, test := range tests {
|
for _, test := range tests {
|
||||||
ptData := mataDataProducer(test.pod, nil)
|
t.Run(test.name, func(t *testing.T) {
|
||||||
if !reflect.DeepEqual(test.expected, ptData) {
|
ptData := mataDataProducer(test.pod, nil)
|
||||||
t.Errorf("%s: expected %#v, got %#v", test.test, test.expected, ptData)
|
if !reflect.DeepEqual(test.expected, ptData) {
|
||||||
}
|
t.Errorf("expected %#v, got %#v", test.expected, ptData)
|
||||||
|
}
|
||||||
|
})
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -109,7 +109,7 @@ func TestMostRequested(t *testing.T) {
|
|||||||
pods []*v1.Pod
|
pods []*v1.Pod
|
||||||
nodes []*v1.Node
|
nodes []*v1.Node
|
||||||
expectedList schedulerapi.HostPriorityList
|
expectedList schedulerapi.HostPriorityList
|
||||||
test string
|
name string
|
||||||
}{
|
}{
|
||||||
{
|
{
|
||||||
/*
|
/*
|
||||||
@ -126,7 +126,7 @@ func TestMostRequested(t *testing.T) {
|
|||||||
pod: &v1.Pod{Spec: noResources},
|
pod: &v1.Pod{Spec: noResources},
|
||||||
nodes: []*v1.Node{makeNode("machine1", 4000, 10000), makeNode("machine2", 4000, 10000)},
|
nodes: []*v1.Node{makeNode("machine1", 4000, 10000), makeNode("machine2", 4000, 10000)},
|
||||||
expectedList: []schedulerapi.HostPriority{{Host: "machine1", Score: 0}, {Host: "machine2", Score: 0}},
|
expectedList: []schedulerapi.HostPriority{{Host: "machine1", Score: 0}, {Host: "machine2", Score: 0}},
|
||||||
test: "nothing scheduled, nothing requested",
|
name: "nothing scheduled, nothing requested",
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
/*
|
/*
|
||||||
@ -143,7 +143,7 @@ func TestMostRequested(t *testing.T) {
|
|||||||
pod: &v1.Pod{Spec: cpuAndMemory},
|
pod: &v1.Pod{Spec: cpuAndMemory},
|
||||||
nodes: []*v1.Node{makeNode("machine1", 4000, 10000), makeNode("machine2", 6000, 10000)},
|
nodes: []*v1.Node{makeNode("machine1", 4000, 10000), makeNode("machine2", 6000, 10000)},
|
||||||
expectedList: []schedulerapi.HostPriority{{Host: "machine1", Score: 6}, {Host: "machine2", Score: 5}},
|
expectedList: []schedulerapi.HostPriority{{Host: "machine1", Score: 6}, {Host: "machine2", Score: 5}},
|
||||||
test: "nothing scheduled, resources requested, differently sized machines",
|
name: "nothing scheduled, resources requested, differently sized machines",
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
/*
|
/*
|
||||||
@ -160,7 +160,7 @@ func TestMostRequested(t *testing.T) {
|
|||||||
pod: &v1.Pod{Spec: noResources},
|
pod: &v1.Pod{Spec: noResources},
|
||||||
nodes: []*v1.Node{makeNode("machine1", 10000, 20000), makeNode("machine2", 10000, 20000)},
|
nodes: []*v1.Node{makeNode("machine1", 10000, 20000), makeNode("machine2", 10000, 20000)},
|
||||||
expectedList: []schedulerapi.HostPriority{{Host: "machine1", Score: 3}, {Host: "machine2", Score: 4}},
|
expectedList: []schedulerapi.HostPriority{{Host: "machine1", Score: 3}, {Host: "machine2", Score: 4}},
|
||||||
test: "no resources requested, pods scheduled with resources",
|
name: "no resources requested, pods scheduled with resources",
|
||||||
pods: []*v1.Pod{
|
pods: []*v1.Pod{
|
||||||
{Spec: cpuOnly, ObjectMeta: metav1.ObjectMeta{Labels: labels2}},
|
{Spec: cpuOnly, ObjectMeta: metav1.ObjectMeta{Labels: labels2}},
|
||||||
{Spec: cpuOnly, ObjectMeta: metav1.ObjectMeta{Labels: labels1}},
|
{Spec: cpuOnly, ObjectMeta: metav1.ObjectMeta{Labels: labels1}},
|
||||||
@ -183,7 +183,7 @@ func TestMostRequested(t *testing.T) {
|
|||||||
pod: &v1.Pod{Spec: cpuAndMemory},
|
pod: &v1.Pod{Spec: cpuAndMemory},
|
||||||
nodes: []*v1.Node{makeNode("machine1", 10000, 20000), makeNode("machine2", 10000, 20000)},
|
nodes: []*v1.Node{makeNode("machine1", 10000, 20000), makeNode("machine2", 10000, 20000)},
|
||||||
expectedList: []schedulerapi.HostPriority{{Host: "machine1", Score: 4}, {Host: "machine2", Score: 5}},
|
expectedList: []schedulerapi.HostPriority{{Host: "machine1", Score: 4}, {Host: "machine2", Score: 5}},
|
||||||
test: "resources requested, pods scheduled with resources",
|
name: "resources requested, pods scheduled with resources",
|
||||||
pods: []*v1.Pod{
|
pods: []*v1.Pod{
|
||||||
{Spec: cpuOnly},
|
{Spec: cpuOnly},
|
||||||
{Spec: cpuAndMemory},
|
{Spec: cpuAndMemory},
|
||||||
@ -204,18 +204,20 @@ func TestMostRequested(t *testing.T) {
|
|||||||
pod: &v1.Pod{Spec: bigCPUAndMemory},
|
pod: &v1.Pod{Spec: bigCPUAndMemory},
|
||||||
nodes: []*v1.Node{makeNode("machine1", 4000, 10000), makeNode("machine2", 10000, 8000)},
|
nodes: []*v1.Node{makeNode("machine1", 4000, 10000), makeNode("machine2", 10000, 8000)},
|
||||||
expectedList: []schedulerapi.HostPriority{{Host: "machine1", Score: 4}, {Host: "machine2", Score: 2}},
|
expectedList: []schedulerapi.HostPriority{{Host: "machine1", Score: 4}, {Host: "machine2", Score: 2}},
|
||||||
test: "resources requested with more than the node, pods scheduled with resources",
|
name: "resources requested with more than the node, pods scheduled with resources",
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
|
|
||||||
for _, test := range tests {
|
for _, test := range tests {
|
||||||
nodeNameToInfo := schedulercache.CreateNodeNameToInfoMap(test.pods, test.nodes)
|
t.Run(test.name, func(t *testing.T) {
|
||||||
list, err := priorityFunction(MostRequestedPriorityMap, nil, nil)(test.pod, nodeNameToInfo, test.nodes)
|
nodeNameToInfo := schedulercache.CreateNodeNameToInfoMap(test.pods, test.nodes)
|
||||||
if err != nil {
|
list, err := priorityFunction(MostRequestedPriorityMap, nil, nil)(test.pod, nodeNameToInfo, test.nodes)
|
||||||
t.Errorf("unexpected error: %v", err)
|
if err != nil {
|
||||||
}
|
t.Errorf("unexpected error: %v", err)
|
||||||
if !reflect.DeepEqual(test.expectedList, list) {
|
}
|
||||||
t.Errorf("%s: expected %#v, got %#v", test.test, test.expectedList, list)
|
if !reflect.DeepEqual(test.expectedList, list) {
|
||||||
}
|
t.Errorf("expected %#v, got %#v", test.expectedList, list)
|
||||||
|
}
|
||||||
|
})
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -105,7 +105,7 @@ func TestNodeAffinityPriority(t *testing.T) {
|
|||||||
pod *v1.Pod
|
pod *v1.Pod
|
||||||
nodes []*v1.Node
|
nodes []*v1.Node
|
||||||
expectedList schedulerapi.HostPriorityList
|
expectedList schedulerapi.HostPriorityList
|
||||||
test string
|
name string
|
||||||
}{
|
}{
|
||||||
{
|
{
|
||||||
pod: &v1.Pod{
|
pod: &v1.Pod{
|
||||||
@ -119,7 +119,7 @@ func TestNodeAffinityPriority(t *testing.T) {
|
|||||||
{ObjectMeta: metav1.ObjectMeta{Name: "machine3", Labels: label3}},
|
{ObjectMeta: metav1.ObjectMeta{Name: "machine3", Labels: label3}},
|
||||||
},
|
},
|
||||||
expectedList: []schedulerapi.HostPriority{{Host: "machine1", Score: 0}, {Host: "machine2", Score: 0}, {Host: "machine3", Score: 0}},
|
expectedList: []schedulerapi.HostPriority{{Host: "machine1", Score: 0}, {Host: "machine2", Score: 0}, {Host: "machine3", Score: 0}},
|
||||||
test: "all machines are same priority as NodeAffinity is nil",
|
name: "all machines are same priority as NodeAffinity is nil",
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
pod: &v1.Pod{
|
pod: &v1.Pod{
|
||||||
@ -133,7 +133,7 @@ func TestNodeAffinityPriority(t *testing.T) {
|
|||||||
{ObjectMeta: metav1.ObjectMeta{Name: "machine3", Labels: label3}},
|
{ObjectMeta: metav1.ObjectMeta{Name: "machine3", Labels: label3}},
|
||||||
},
|
},
|
||||||
expectedList: []schedulerapi.HostPriority{{Host: "machine1", Score: 0}, {Host: "machine2", Score: 0}, {Host: "machine3", Score: 0}},
|
expectedList: []schedulerapi.HostPriority{{Host: "machine1", Score: 0}, {Host: "machine2", Score: 0}, {Host: "machine3", Score: 0}},
|
||||||
test: "no machine macthes preferred scheduling requirements in NodeAffinity of pod so all machines' priority is zero",
|
name: "no machine macthes preferred scheduling requirements in NodeAffinity of pod so all machines' priority is zero",
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
pod: &v1.Pod{
|
pod: &v1.Pod{
|
||||||
@ -147,7 +147,7 @@ func TestNodeAffinityPriority(t *testing.T) {
|
|||||||
{ObjectMeta: metav1.ObjectMeta{Name: "machine3", Labels: label3}},
|
{ObjectMeta: metav1.ObjectMeta{Name: "machine3", Labels: label3}},
|
||||||
},
|
},
|
||||||
expectedList: []schedulerapi.HostPriority{{Host: "machine1", Score: schedulerapi.MaxPriority}, {Host: "machine2", Score: 0}, {Host: "machine3", Score: 0}},
|
expectedList: []schedulerapi.HostPriority{{Host: "machine1", Score: schedulerapi.MaxPriority}, {Host: "machine2", Score: 0}, {Host: "machine3", Score: 0}},
|
||||||
test: "only machine1 matches the preferred scheduling requirements of pod",
|
name: "only machine1 matches the preferred scheduling requirements of pod",
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
pod: &v1.Pod{
|
pod: &v1.Pod{
|
||||||
@ -161,19 +161,21 @@ func TestNodeAffinityPriority(t *testing.T) {
|
|||||||
{ObjectMeta: metav1.ObjectMeta{Name: "machine2", Labels: label2}},
|
{ObjectMeta: metav1.ObjectMeta{Name: "machine2", Labels: label2}},
|
||||||
},
|
},
|
||||||
expectedList: []schedulerapi.HostPriority{{Host: "machine1", Score: 1}, {Host: "machine5", Score: schedulerapi.MaxPriority}, {Host: "machine2", Score: 3}},
|
expectedList: []schedulerapi.HostPriority{{Host: "machine1", Score: 1}, {Host: "machine5", Score: schedulerapi.MaxPriority}, {Host: "machine2", Score: 3}},
|
||||||
test: "all machines matches the preferred scheduling requirements of pod but with different priorities ",
|
name: "all machines matches the preferred scheduling requirements of pod but with different priorities ",
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
|
|
||||||
for _, test := range tests {
|
for _, test := range tests {
|
||||||
nodeNameToInfo := schedulercache.CreateNodeNameToInfoMap(nil, test.nodes)
|
t.Run(test.name, func(t *testing.T) {
|
||||||
nap := priorityFunction(CalculateNodeAffinityPriorityMap, CalculateNodeAffinityPriorityReduce, nil)
|
nodeNameToInfo := schedulercache.CreateNodeNameToInfoMap(nil, test.nodes)
|
||||||
list, err := nap(test.pod, nodeNameToInfo, test.nodes)
|
nap := priorityFunction(CalculateNodeAffinityPriorityMap, CalculateNodeAffinityPriorityReduce, nil)
|
||||||
if err != nil {
|
list, err := nap(test.pod, nodeNameToInfo, test.nodes)
|
||||||
t.Errorf("unexpected error: %v", err)
|
if err != nil {
|
||||||
}
|
t.Errorf("unexpected error: %v", err)
|
||||||
if !reflect.DeepEqual(test.expectedList, list) {
|
}
|
||||||
t.Errorf("%s: \nexpected %#v, \ngot %#v", test.test, test.expectedList, list)
|
if !reflect.DeepEqual(test.expectedList, list) {
|
||||||
}
|
t.Errorf("expected %#v, \ngot %#v", test.expectedList, list)
|
||||||
|
}
|
||||||
|
})
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -36,7 +36,7 @@ func TestNewNodeLabelPriority(t *testing.T) {
|
|||||||
label string
|
label string
|
||||||
presence bool
|
presence bool
|
||||||
expectedList schedulerapi.HostPriorityList
|
expectedList schedulerapi.HostPriorityList
|
||||||
test string
|
name string
|
||||||
}{
|
}{
|
||||||
{
|
{
|
||||||
nodes: []*v1.Node{
|
nodes: []*v1.Node{
|
||||||
@ -47,7 +47,7 @@ func TestNewNodeLabelPriority(t *testing.T) {
|
|||||||
expectedList: []schedulerapi.HostPriority{{Host: "machine1", Score: 0}, {Host: "machine2", Score: 0}, {Host: "machine3", Score: 0}},
|
expectedList: []schedulerapi.HostPriority{{Host: "machine1", Score: 0}, {Host: "machine2", Score: 0}, {Host: "machine3", Score: 0}},
|
||||||
label: "baz",
|
label: "baz",
|
||||||
presence: true,
|
presence: true,
|
||||||
test: "no match found, presence true",
|
name: "no match found, presence true",
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
nodes: []*v1.Node{
|
nodes: []*v1.Node{
|
||||||
@ -58,7 +58,7 @@ func TestNewNodeLabelPriority(t *testing.T) {
|
|||||||
expectedList: []schedulerapi.HostPriority{{Host: "machine1", Score: schedulerapi.MaxPriority}, {Host: "machine2", Score: schedulerapi.MaxPriority}, {Host: "machine3", Score: schedulerapi.MaxPriority}},
|
expectedList: []schedulerapi.HostPriority{{Host: "machine1", Score: schedulerapi.MaxPriority}, {Host: "machine2", Score: schedulerapi.MaxPriority}, {Host: "machine3", Score: schedulerapi.MaxPriority}},
|
||||||
label: "baz",
|
label: "baz",
|
||||||
presence: false,
|
presence: false,
|
||||||
test: "no match found, presence false",
|
name: "no match found, presence false",
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
nodes: []*v1.Node{
|
nodes: []*v1.Node{
|
||||||
@ -69,7 +69,7 @@ func TestNewNodeLabelPriority(t *testing.T) {
|
|||||||
expectedList: []schedulerapi.HostPriority{{Host: "machine1", Score: schedulerapi.MaxPriority}, {Host: "machine2", Score: 0}, {Host: "machine3", Score: 0}},
|
expectedList: []schedulerapi.HostPriority{{Host: "machine1", Score: schedulerapi.MaxPriority}, {Host: "machine2", Score: 0}, {Host: "machine3", Score: 0}},
|
||||||
label: "foo",
|
label: "foo",
|
||||||
presence: true,
|
presence: true,
|
||||||
test: "one match found, presence true",
|
name: "one match found, presence true",
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
nodes: []*v1.Node{
|
nodes: []*v1.Node{
|
||||||
@ -80,7 +80,7 @@ func TestNewNodeLabelPriority(t *testing.T) {
|
|||||||
expectedList: []schedulerapi.HostPriority{{Host: "machine1", Score: 0}, {Host: "machine2", Score: schedulerapi.MaxPriority}, {Host: "machine3", Score: schedulerapi.MaxPriority}},
|
expectedList: []schedulerapi.HostPriority{{Host: "machine1", Score: 0}, {Host: "machine2", Score: schedulerapi.MaxPriority}, {Host: "machine3", Score: schedulerapi.MaxPriority}},
|
||||||
label: "foo",
|
label: "foo",
|
||||||
presence: false,
|
presence: false,
|
||||||
test: "one match found, presence false",
|
name: "one match found, presence false",
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
nodes: []*v1.Node{
|
nodes: []*v1.Node{
|
||||||
@ -91,7 +91,7 @@ func TestNewNodeLabelPriority(t *testing.T) {
|
|||||||
expectedList: []schedulerapi.HostPriority{{Host: "machine1", Score: 0}, {Host: "machine2", Score: schedulerapi.MaxPriority}, {Host: "machine3", Score: schedulerapi.MaxPriority}},
|
expectedList: []schedulerapi.HostPriority{{Host: "machine1", Score: 0}, {Host: "machine2", Score: schedulerapi.MaxPriority}, {Host: "machine3", Score: schedulerapi.MaxPriority}},
|
||||||
label: "bar",
|
label: "bar",
|
||||||
presence: true,
|
presence: true,
|
||||||
test: "two matches found, presence true",
|
name: "two matches found, presence true",
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
nodes: []*v1.Node{
|
nodes: []*v1.Node{
|
||||||
@ -102,25 +102,27 @@ func TestNewNodeLabelPriority(t *testing.T) {
|
|||||||
expectedList: []schedulerapi.HostPriority{{Host: "machine1", Score: schedulerapi.MaxPriority}, {Host: "machine2", Score: 0}, {Host: "machine3", Score: 0}},
|
expectedList: []schedulerapi.HostPriority{{Host: "machine1", Score: schedulerapi.MaxPriority}, {Host: "machine2", Score: 0}, {Host: "machine3", Score: 0}},
|
||||||
label: "bar",
|
label: "bar",
|
||||||
presence: false,
|
presence: false,
|
||||||
test: "two matches found, presence false",
|
name: "two matches found, presence false",
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
|
|
||||||
for _, test := range tests {
|
for _, test := range tests {
|
||||||
nodeNameToInfo := schedulercache.CreateNodeNameToInfoMap(nil, test.nodes)
|
t.Run(test.name, func(t *testing.T) {
|
||||||
labelPrioritizer := &NodeLabelPrioritizer{
|
nodeNameToInfo := schedulercache.CreateNodeNameToInfoMap(nil, test.nodes)
|
||||||
label: test.label,
|
labelPrioritizer := &NodeLabelPrioritizer{
|
||||||
presence: test.presence,
|
label: test.label,
|
||||||
}
|
presence: test.presence,
|
||||||
list, err := priorityFunction(labelPrioritizer.CalculateNodeLabelPriorityMap, nil, nil)(nil, nodeNameToInfo, test.nodes)
|
}
|
||||||
if err != nil {
|
list, err := priorityFunction(labelPrioritizer.CalculateNodeLabelPriorityMap, nil, nil)(nil, nodeNameToInfo, test.nodes)
|
||||||
t.Errorf("unexpected error: %v", err)
|
if err != nil {
|
||||||
}
|
t.Errorf("unexpected error: %v", err)
|
||||||
// sort the two lists to avoid failures on account of different ordering
|
}
|
||||||
sort.Sort(test.expectedList)
|
// sort the two lists to avoid failures on account of different ordering
|
||||||
sort.Sort(list)
|
sort.Sort(test.expectedList)
|
||||||
if !reflect.DeepEqual(test.expectedList, list) {
|
sort.Sort(list)
|
||||||
t.Errorf("%s: expected %#v, got %#v", test.test, test.expectedList, list)
|
if !reflect.DeepEqual(test.expectedList, list) {
|
||||||
}
|
t.Errorf("expected %#v, got %#v", test.expectedList, list)
|
||||||
|
}
|
||||||
|
})
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -84,7 +84,7 @@ func TestNodePreferAvoidPriority(t *testing.T) {
|
|||||||
pod *v1.Pod
|
pod *v1.Pod
|
||||||
nodes []*v1.Node
|
nodes []*v1.Node
|
||||||
expectedList schedulerapi.HostPriorityList
|
expectedList schedulerapi.HostPriorityList
|
||||||
test string
|
name string
|
||||||
}{
|
}{
|
||||||
{
|
{
|
||||||
pod: &v1.Pod{
|
pod: &v1.Pod{
|
||||||
@ -97,7 +97,7 @@ func TestNodePreferAvoidPriority(t *testing.T) {
|
|||||||
},
|
},
|
||||||
nodes: testNodes,
|
nodes: testNodes,
|
||||||
expectedList: []schedulerapi.HostPriority{{Host: "machine1", Score: 0}, {Host: "machine2", Score: schedulerapi.MaxPriority}, {Host: "machine3", Score: schedulerapi.MaxPriority}},
|
expectedList: []schedulerapi.HostPriority{{Host: "machine1", Score: 0}, {Host: "machine2", Score: schedulerapi.MaxPriority}, {Host: "machine3", Score: schedulerapi.MaxPriority}},
|
||||||
test: "pod managed by ReplicationController should avoid a node, this node get lowest priority score",
|
name: "pod managed by ReplicationController should avoid a node, this node get lowest priority score",
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
pod: &v1.Pod{
|
pod: &v1.Pod{
|
||||||
@ -110,7 +110,7 @@ func TestNodePreferAvoidPriority(t *testing.T) {
|
|||||||
},
|
},
|
||||||
nodes: testNodes,
|
nodes: testNodes,
|
||||||
expectedList: []schedulerapi.HostPriority{{Host: "machine1", Score: schedulerapi.MaxPriority}, {Host: "machine2", Score: schedulerapi.MaxPriority}, {Host: "machine3", Score: schedulerapi.MaxPriority}},
|
expectedList: []schedulerapi.HostPriority{{Host: "machine1", Score: schedulerapi.MaxPriority}, {Host: "machine2", Score: schedulerapi.MaxPriority}, {Host: "machine3", Score: schedulerapi.MaxPriority}},
|
||||||
test: "ownership by random controller should be ignored",
|
name: "ownership by random controller should be ignored",
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
pod: &v1.Pod{
|
pod: &v1.Pod{
|
||||||
@ -123,7 +123,7 @@ func TestNodePreferAvoidPriority(t *testing.T) {
|
|||||||
},
|
},
|
||||||
nodes: testNodes,
|
nodes: testNodes,
|
||||||
expectedList: []schedulerapi.HostPriority{{Host: "machine1", Score: schedulerapi.MaxPriority}, {Host: "machine2", Score: schedulerapi.MaxPriority}, {Host: "machine3", Score: schedulerapi.MaxPriority}},
|
expectedList: []schedulerapi.HostPriority{{Host: "machine1", Score: schedulerapi.MaxPriority}, {Host: "machine2", Score: schedulerapi.MaxPriority}, {Host: "machine3", Score: schedulerapi.MaxPriority}},
|
||||||
test: "owner without Controller field set should be ignored",
|
name: "owner without Controller field set should be ignored",
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
pod: &v1.Pod{
|
pod: &v1.Pod{
|
||||||
@ -136,21 +136,23 @@ func TestNodePreferAvoidPriority(t *testing.T) {
|
|||||||
},
|
},
|
||||||
nodes: testNodes,
|
nodes: testNodes,
|
||||||
expectedList: []schedulerapi.HostPriority{{Host: "machine1", Score: schedulerapi.MaxPriority}, {Host: "machine2", Score: 0}, {Host: "machine3", Score: schedulerapi.MaxPriority}},
|
expectedList: []schedulerapi.HostPriority{{Host: "machine1", Score: schedulerapi.MaxPriority}, {Host: "machine2", Score: 0}, {Host: "machine3", Score: schedulerapi.MaxPriority}},
|
||||||
test: "pod managed by ReplicaSet should avoid a node, this node get lowest priority score",
|
name: "pod managed by ReplicaSet should avoid a node, this node get lowest priority score",
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
|
|
||||||
for _, test := range tests {
|
for _, test := range tests {
|
||||||
nodeNameToInfo := schedulercache.CreateNodeNameToInfoMap(nil, test.nodes)
|
t.Run(test.name, func(t *testing.T) {
|
||||||
list, err := priorityFunction(CalculateNodePreferAvoidPodsPriorityMap, nil, nil)(test.pod, nodeNameToInfo, test.nodes)
|
nodeNameToInfo := schedulercache.CreateNodeNameToInfoMap(nil, test.nodes)
|
||||||
if err != nil {
|
list, err := priorityFunction(CalculateNodePreferAvoidPodsPriorityMap, nil, nil)(test.pod, nodeNameToInfo, test.nodes)
|
||||||
t.Errorf("unexpected error: %v", err)
|
if err != nil {
|
||||||
}
|
t.Errorf("unexpected error: %v", err)
|
||||||
// sort the two lists to avoid failures on account of different ordering
|
}
|
||||||
sort.Sort(test.expectedList)
|
// sort the two lists to avoid failures on account of different ordering
|
||||||
sort.Sort(list)
|
sort.Sort(test.expectedList)
|
||||||
if !reflect.DeepEqual(test.expectedList, list) {
|
sort.Sort(list)
|
||||||
t.Errorf("%s: expected %#v, got %#v", test.test, test.expectedList, list)
|
if !reflect.DeepEqual(test.expectedList, list) {
|
||||||
}
|
t.Errorf("expected %#v, got %#v", test.expectedList, list)
|
||||||
|
}
|
||||||
|
})
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -103,49 +103,50 @@ func TestResourceLimistPriority(t *testing.T) {
|
|||||||
pod *v1.Pod
|
pod *v1.Pod
|
||||||
nodes []*v1.Node
|
nodes []*v1.Node
|
||||||
expectedList schedulerapi.HostPriorityList
|
expectedList schedulerapi.HostPriorityList
|
||||||
test string
|
name string
|
||||||
}{
|
}{
|
||||||
{
|
{
|
||||||
pod: &v1.Pod{Spec: noResources},
|
pod: &v1.Pod{Spec: noResources},
|
||||||
nodes: []*v1.Node{makeNode("machine1", 4000, 10000), makeNode("machine2", 4000, 0), makeNode("machine3", 0, 10000), makeNode("machine4", 0, 0)},
|
nodes: []*v1.Node{makeNode("machine1", 4000, 10000), makeNode("machine2", 4000, 0), makeNode("machine3", 0, 10000), makeNode("machine4", 0, 0)},
|
||||||
expectedList: []schedulerapi.HostPriority{{Host: "machine1", Score: 0}, {Host: "machine2", Score: 0}, {Host: "machine3", Score: 0}, {Host: "machine4", Score: 0}},
|
expectedList: []schedulerapi.HostPriority{{Host: "machine1", Score: 0}, {Host: "machine2", Score: 0}, {Host: "machine3", Score: 0}, {Host: "machine4", Score: 0}},
|
||||||
test: "pod does not specify its resource limits",
|
name: "pod does not specify its resource limits",
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
pod: &v1.Pod{Spec: cpuOnly},
|
pod: &v1.Pod{Spec: cpuOnly},
|
||||||
nodes: []*v1.Node{makeNode("machine1", 3000, 10000), makeNode("machine2", 2000, 10000)},
|
nodes: []*v1.Node{makeNode("machine1", 3000, 10000), makeNode("machine2", 2000, 10000)},
|
||||||
expectedList: []schedulerapi.HostPriority{{Host: "machine1", Score: 1}, {Host: "machine2", Score: 0}},
|
expectedList: []schedulerapi.HostPriority{{Host: "machine1", Score: 1}, {Host: "machine2", Score: 0}},
|
||||||
test: "pod only specifies cpu limits",
|
name: "pod only specifies cpu limits",
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
pod: &v1.Pod{Spec: memOnly},
|
pod: &v1.Pod{Spec: memOnly},
|
||||||
nodes: []*v1.Node{makeNode("machine1", 4000, 4000), makeNode("machine2", 5000, 10000)},
|
nodes: []*v1.Node{makeNode("machine1", 4000, 4000), makeNode("machine2", 5000, 10000)},
|
||||||
expectedList: []schedulerapi.HostPriority{{Host: "machine1", Score: 0}, {Host: "machine2", Score: 1}},
|
expectedList: []schedulerapi.HostPriority{{Host: "machine1", Score: 0}, {Host: "machine2", Score: 1}},
|
||||||
test: "pod only specifies mem limits",
|
name: "pod only specifies mem limits",
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
pod: &v1.Pod{Spec: cpuAndMemory},
|
pod: &v1.Pod{Spec: cpuAndMemory},
|
||||||
nodes: []*v1.Node{makeNode("machine1", 4000, 4000), makeNode("machine2", 5000, 10000)},
|
nodes: []*v1.Node{makeNode("machine1", 4000, 4000), makeNode("machine2", 5000, 10000)},
|
||||||
expectedList: []schedulerapi.HostPriority{{Host: "machine1", Score: 1}, {Host: "machine2", Score: 1}},
|
expectedList: []schedulerapi.HostPriority{{Host: "machine1", Score: 1}, {Host: "machine2", Score: 1}},
|
||||||
test: "pod specifies both cpu and mem limits",
|
name: "pod specifies both cpu and mem limits",
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
pod: &v1.Pod{Spec: cpuAndMemory},
|
pod: &v1.Pod{Spec: cpuAndMemory},
|
||||||
nodes: []*v1.Node{makeNode("machine1", 0, 0)},
|
nodes: []*v1.Node{makeNode("machine1", 0, 0)},
|
||||||
expectedList: []schedulerapi.HostPriority{{Host: "machine1", Score: 0}},
|
expectedList: []schedulerapi.HostPriority{{Host: "machine1", Score: 0}},
|
||||||
test: "node does not advertise its allocatables",
|
name: "node does not advertise its allocatables",
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
|
|
||||||
for _, test := range tests {
|
for _, test := range tests {
|
||||||
nodeNameToInfo := schedulercache.CreateNodeNameToInfoMap(nil, test.nodes)
|
t.Run(test.name, func(t *testing.T) {
|
||||||
list, err := priorityFunction(ResourceLimitsPriorityMap, nil, nil)(test.pod, nodeNameToInfo, test.nodes)
|
nodeNameToInfo := schedulercache.CreateNodeNameToInfoMap(nil, test.nodes)
|
||||||
if err != nil {
|
list, err := priorityFunction(ResourceLimitsPriorityMap, nil, nil)(test.pod, nodeNameToInfo, test.nodes)
|
||||||
t.Errorf("unexpected error: %v", err)
|
if err != nil {
|
||||||
}
|
t.Errorf("unexpected error: %v", err)
|
||||||
if !reflect.DeepEqual(test.expectedList, list) {
|
}
|
||||||
t.Errorf("%s: expected %#v, got %#v", test.test, test.expectedList, list)
|
if !reflect.DeepEqual(test.expectedList, list) {
|
||||||
}
|
t.Errorf("expected %#v, got %#v", test.expectedList, list)
|
||||||
|
}
|
||||||
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
}
|
}
|
||||||
|
@ -64,20 +64,20 @@ func TestSelectorSpreadPriority(t *testing.T) {
|
|||||||
services []*v1.Service
|
services []*v1.Service
|
||||||
sss []*apps.StatefulSet
|
sss []*apps.StatefulSet
|
||||||
expectedList schedulerapi.HostPriorityList
|
expectedList schedulerapi.HostPriorityList
|
||||||
test string
|
name string
|
||||||
}{
|
}{
|
||||||
{
|
{
|
||||||
pod: new(v1.Pod),
|
pod: new(v1.Pod),
|
||||||
nodes: []string{"machine1", "machine2"},
|
nodes: []string{"machine1", "machine2"},
|
||||||
expectedList: []schedulerapi.HostPriority{{Host: "machine1", Score: schedulerapi.MaxPriority}, {Host: "machine2", Score: schedulerapi.MaxPriority}},
|
expectedList: []schedulerapi.HostPriority{{Host: "machine1", Score: schedulerapi.MaxPriority}, {Host: "machine2", Score: schedulerapi.MaxPriority}},
|
||||||
test: "nothing scheduled",
|
name: "nothing scheduled",
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
pod: &v1.Pod{ObjectMeta: metav1.ObjectMeta{Labels: labels1}},
|
pod: &v1.Pod{ObjectMeta: metav1.ObjectMeta{Labels: labels1}},
|
||||||
pods: []*v1.Pod{{Spec: zone1Spec}},
|
pods: []*v1.Pod{{Spec: zone1Spec}},
|
||||||
nodes: []string{"machine1", "machine2"},
|
nodes: []string{"machine1", "machine2"},
|
||||||
expectedList: []schedulerapi.HostPriority{{Host: "machine1", Score: schedulerapi.MaxPriority}, {Host: "machine2", Score: schedulerapi.MaxPriority}},
|
expectedList: []schedulerapi.HostPriority{{Host: "machine1", Score: schedulerapi.MaxPriority}, {Host: "machine2", Score: schedulerapi.MaxPriority}},
|
||||||
test: "no services",
|
name: "no services",
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
pod: &v1.Pod{ObjectMeta: metav1.ObjectMeta{Labels: labels1}},
|
pod: &v1.Pod{ObjectMeta: metav1.ObjectMeta{Labels: labels1}},
|
||||||
@ -85,7 +85,7 @@ func TestSelectorSpreadPriority(t *testing.T) {
|
|||||||
nodes: []string{"machine1", "machine2"},
|
nodes: []string{"machine1", "machine2"},
|
||||||
services: []*v1.Service{{Spec: v1.ServiceSpec{Selector: map[string]string{"key": "value"}}}},
|
services: []*v1.Service{{Spec: v1.ServiceSpec{Selector: map[string]string{"key": "value"}}}},
|
||||||
expectedList: []schedulerapi.HostPriority{{Host: "machine1", Score: schedulerapi.MaxPriority}, {Host: "machine2", Score: schedulerapi.MaxPriority}},
|
expectedList: []schedulerapi.HostPriority{{Host: "machine1", Score: schedulerapi.MaxPriority}, {Host: "machine2", Score: schedulerapi.MaxPriority}},
|
||||||
test: "different services",
|
name: "different services",
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
pod: &v1.Pod{ObjectMeta: metav1.ObjectMeta{Labels: labels1}},
|
pod: &v1.Pod{ObjectMeta: metav1.ObjectMeta{Labels: labels1}},
|
||||||
@ -96,7 +96,7 @@ func TestSelectorSpreadPriority(t *testing.T) {
|
|||||||
nodes: []string{"machine1", "machine2"},
|
nodes: []string{"machine1", "machine2"},
|
||||||
services: []*v1.Service{{Spec: v1.ServiceSpec{Selector: labels1}}},
|
services: []*v1.Service{{Spec: v1.ServiceSpec{Selector: labels1}}},
|
||||||
expectedList: []schedulerapi.HostPriority{{Host: "machine1", Score: schedulerapi.MaxPriority}, {Host: "machine2", Score: 0}},
|
expectedList: []schedulerapi.HostPriority{{Host: "machine1", Score: schedulerapi.MaxPriority}, {Host: "machine2", Score: 0}},
|
||||||
test: "two pods, one service pod",
|
name: "two pods, one service pod",
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
pod: &v1.Pod{ObjectMeta: metav1.ObjectMeta{Labels: labels1}},
|
pod: &v1.Pod{ObjectMeta: metav1.ObjectMeta{Labels: labels1}},
|
||||||
@ -110,7 +110,7 @@ func TestSelectorSpreadPriority(t *testing.T) {
|
|||||||
nodes: []string{"machine1", "machine2"},
|
nodes: []string{"machine1", "machine2"},
|
||||||
services: []*v1.Service{{Spec: v1.ServiceSpec{Selector: labels1}}},
|
services: []*v1.Service{{Spec: v1.ServiceSpec{Selector: labels1}}},
|
||||||
expectedList: []schedulerapi.HostPriority{{Host: "machine1", Score: schedulerapi.MaxPriority}, {Host: "machine2", Score: 0}},
|
expectedList: []schedulerapi.HostPriority{{Host: "machine1", Score: schedulerapi.MaxPriority}, {Host: "machine2", Score: 0}},
|
||||||
test: "five pods, one service pod in no namespace",
|
name: "five pods, one service pod in no namespace",
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
pod: &v1.Pod{ObjectMeta: metav1.ObjectMeta{Labels: labels1, Namespace: metav1.NamespaceDefault}},
|
pod: &v1.Pod{ObjectMeta: metav1.ObjectMeta{Labels: labels1, Namespace: metav1.NamespaceDefault}},
|
||||||
@ -123,7 +123,7 @@ func TestSelectorSpreadPriority(t *testing.T) {
|
|||||||
nodes: []string{"machine1", "machine2"},
|
nodes: []string{"machine1", "machine2"},
|
||||||
services: []*v1.Service{{Spec: v1.ServiceSpec{Selector: labels1}, ObjectMeta: metav1.ObjectMeta{Namespace: metav1.NamespaceDefault}}},
|
services: []*v1.Service{{Spec: v1.ServiceSpec{Selector: labels1}, ObjectMeta: metav1.ObjectMeta{Namespace: metav1.NamespaceDefault}}},
|
||||||
expectedList: []schedulerapi.HostPriority{{Host: "machine1", Score: schedulerapi.MaxPriority}, {Host: "machine2", Score: 0}},
|
expectedList: []schedulerapi.HostPriority{{Host: "machine1", Score: schedulerapi.MaxPriority}, {Host: "machine2", Score: 0}},
|
||||||
test: "four pods, one service pod in default namespace",
|
name: "four pods, one service pod in default namespace",
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
pod: &v1.Pod{ObjectMeta: metav1.ObjectMeta{Labels: labels1, Namespace: "ns1"}},
|
pod: &v1.Pod{ObjectMeta: metav1.ObjectMeta{Labels: labels1, Namespace: "ns1"}},
|
||||||
@ -137,7 +137,7 @@ func TestSelectorSpreadPriority(t *testing.T) {
|
|||||||
nodes: []string{"machine1", "machine2"},
|
nodes: []string{"machine1", "machine2"},
|
||||||
services: []*v1.Service{{Spec: v1.ServiceSpec{Selector: labels1}, ObjectMeta: metav1.ObjectMeta{Namespace: "ns1"}}},
|
services: []*v1.Service{{Spec: v1.ServiceSpec{Selector: labels1}, ObjectMeta: metav1.ObjectMeta{Namespace: "ns1"}}},
|
||||||
expectedList: []schedulerapi.HostPriority{{Host: "machine1", Score: schedulerapi.MaxPriority}, {Host: "machine2", Score: 0}},
|
expectedList: []schedulerapi.HostPriority{{Host: "machine1", Score: schedulerapi.MaxPriority}, {Host: "machine2", Score: 0}},
|
||||||
test: "five pods, one service pod in specific namespace",
|
name: "five pods, one service pod in specific namespace",
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
pod: &v1.Pod{ObjectMeta: metav1.ObjectMeta{Labels: labels1}},
|
pod: &v1.Pod{ObjectMeta: metav1.ObjectMeta{Labels: labels1}},
|
||||||
@ -149,7 +149,7 @@ func TestSelectorSpreadPriority(t *testing.T) {
|
|||||||
nodes: []string{"machine1", "machine2"},
|
nodes: []string{"machine1", "machine2"},
|
||||||
services: []*v1.Service{{Spec: v1.ServiceSpec{Selector: labels1}}},
|
services: []*v1.Service{{Spec: v1.ServiceSpec{Selector: labels1}}},
|
||||||
expectedList: []schedulerapi.HostPriority{{Host: "machine1", Score: 0}, {Host: "machine2", Score: 0}},
|
expectedList: []schedulerapi.HostPriority{{Host: "machine1", Score: 0}, {Host: "machine2", Score: 0}},
|
||||||
test: "three pods, two service pods on different machines",
|
name: "three pods, two service pods on different machines",
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
pod: &v1.Pod{ObjectMeta: metav1.ObjectMeta{Labels: labels1}},
|
pod: &v1.Pod{ObjectMeta: metav1.ObjectMeta{Labels: labels1}},
|
||||||
@ -162,7 +162,7 @@ func TestSelectorSpreadPriority(t *testing.T) {
|
|||||||
nodes: []string{"machine1", "machine2"},
|
nodes: []string{"machine1", "machine2"},
|
||||||
services: []*v1.Service{{Spec: v1.ServiceSpec{Selector: labels1}}},
|
services: []*v1.Service{{Spec: v1.ServiceSpec{Selector: labels1}}},
|
||||||
expectedList: []schedulerapi.HostPriority{{Host: "machine1", Score: 5}, {Host: "machine2", Score: 0}},
|
expectedList: []schedulerapi.HostPriority{{Host: "machine1", Score: 5}, {Host: "machine2", Score: 0}},
|
||||||
test: "four pods, three service pods",
|
name: "four pods, three service pods",
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
pod: &v1.Pod{ObjectMeta: metav1.ObjectMeta{Labels: labels1}},
|
pod: &v1.Pod{ObjectMeta: metav1.ObjectMeta{Labels: labels1}},
|
||||||
@ -174,7 +174,7 @@ func TestSelectorSpreadPriority(t *testing.T) {
|
|||||||
nodes: []string{"machine1", "machine2"},
|
nodes: []string{"machine1", "machine2"},
|
||||||
services: []*v1.Service{{Spec: v1.ServiceSpec{Selector: map[string]string{"baz": "blah"}}}},
|
services: []*v1.Service{{Spec: v1.ServiceSpec{Selector: map[string]string{"baz": "blah"}}}},
|
||||||
expectedList: []schedulerapi.HostPriority{{Host: "machine1", Score: 0}, {Host: "machine2", Score: 5}},
|
expectedList: []schedulerapi.HostPriority{{Host: "machine1", Score: 0}, {Host: "machine2", Score: 5}},
|
||||||
test: "service with partial pod label matches",
|
name: "service with partial pod label matches",
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
pod: &v1.Pod{ObjectMeta: metav1.ObjectMeta{Labels: labels1, OwnerReferences: controllerRef("ReplicationController", "name", "abc123")}},
|
pod: &v1.Pod{ObjectMeta: metav1.ObjectMeta{Labels: labels1, OwnerReferences: controllerRef("ReplicationController", "name", "abc123")}},
|
||||||
@ -189,7 +189,7 @@ func TestSelectorSpreadPriority(t *testing.T) {
|
|||||||
// "baz=blah" matches both labels1 and labels2, and "foo=bar" matches only labels 1. This means that we assume that we want to
|
// "baz=blah" matches both labels1 and labels2, and "foo=bar" matches only labels 1. This means that we assume that we want to
|
||||||
// do spreading between all pods. The result should be exactly as above.
|
// do spreading between all pods. The result should be exactly as above.
|
||||||
expectedList: []schedulerapi.HostPriority{{Host: "machine1", Score: 0}, {Host: "machine2", Score: 5}},
|
expectedList: []schedulerapi.HostPriority{{Host: "machine1", Score: 0}, {Host: "machine2", Score: 5}},
|
||||||
test: "service with partial pod label matches with service and replication controller",
|
name: "service with partial pod label matches with service and replication controller",
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
pod: &v1.Pod{ObjectMeta: metav1.ObjectMeta{Labels: labels1, OwnerReferences: controllerRef("ReplicaSet", "name", "abc123")}},
|
pod: &v1.Pod{ObjectMeta: metav1.ObjectMeta{Labels: labels1, OwnerReferences: controllerRef("ReplicaSet", "name", "abc123")}},
|
||||||
@ -203,7 +203,7 @@ func TestSelectorSpreadPriority(t *testing.T) {
|
|||||||
rss: []*extensions.ReplicaSet{{Spec: extensions.ReplicaSetSpec{Selector: &metav1.LabelSelector{MatchLabels: map[string]string{"foo": "bar"}}}}},
|
rss: []*extensions.ReplicaSet{{Spec: extensions.ReplicaSetSpec{Selector: &metav1.LabelSelector{MatchLabels: map[string]string{"foo": "bar"}}}}},
|
||||||
// We use ReplicaSet, instead of ReplicationController. The result should be exactly as above.
|
// We use ReplicaSet, instead of ReplicationController. The result should be exactly as above.
|
||||||
expectedList: []schedulerapi.HostPriority{{Host: "machine1", Score: 0}, {Host: "machine2", Score: 5}},
|
expectedList: []schedulerapi.HostPriority{{Host: "machine1", Score: 0}, {Host: "machine2", Score: 5}},
|
||||||
test: "service with partial pod label matches with service and replica set",
|
name: "service with partial pod label matches with service and replica set",
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
pod: &v1.Pod{ObjectMeta: metav1.ObjectMeta{Labels: labels1, OwnerReferences: controllerRef("StatefulSet", "name", "abc123")}},
|
pod: &v1.Pod{ObjectMeta: metav1.ObjectMeta{Labels: labels1, OwnerReferences: controllerRef("StatefulSet", "name", "abc123")}},
|
||||||
@ -216,7 +216,7 @@ func TestSelectorSpreadPriority(t *testing.T) {
|
|||||||
services: []*v1.Service{{Spec: v1.ServiceSpec{Selector: map[string]string{"baz": "blah"}}}},
|
services: []*v1.Service{{Spec: v1.ServiceSpec{Selector: map[string]string{"baz": "blah"}}}},
|
||||||
sss: []*apps.StatefulSet{{Spec: apps.StatefulSetSpec{Selector: &metav1.LabelSelector{MatchLabels: map[string]string{"foo": "bar"}}}}},
|
sss: []*apps.StatefulSet{{Spec: apps.StatefulSetSpec{Selector: &metav1.LabelSelector{MatchLabels: map[string]string{"foo": "bar"}}}}},
|
||||||
expectedList: []schedulerapi.HostPriority{{Host: "machine1", Score: 0}, {Host: "machine2", Score: 5}},
|
expectedList: []schedulerapi.HostPriority{{Host: "machine1", Score: 0}, {Host: "machine2", Score: 5}},
|
||||||
test: "service with partial pod label matches with service and replica set",
|
name: "service with partial pod label matches with service and replica set",
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
pod: &v1.Pod{ObjectMeta: metav1.ObjectMeta{Labels: map[string]string{"foo": "bar", "bar": "foo"}, OwnerReferences: controllerRef("ReplicationController", "name", "abc123")}},
|
pod: &v1.Pod{ObjectMeta: metav1.ObjectMeta{Labels: map[string]string{"foo": "bar", "bar": "foo"}, OwnerReferences: controllerRef("ReplicationController", "name", "abc123")}},
|
||||||
@ -230,7 +230,7 @@ func TestSelectorSpreadPriority(t *testing.T) {
|
|||||||
services: []*v1.Service{{Spec: v1.ServiceSpec{Selector: map[string]string{"bar": "foo"}}}},
|
services: []*v1.Service{{Spec: v1.ServiceSpec{Selector: map[string]string{"bar": "foo"}}}},
|
||||||
// Taken together Service and Replication Controller should match all Pods, hence result should be equal to one above.
|
// Taken together Service and Replication Controller should match all Pods, hence result should be equal to one above.
|
||||||
expectedList: []schedulerapi.HostPriority{{Host: "machine1", Score: 0}, {Host: "machine2", Score: 5}},
|
expectedList: []schedulerapi.HostPriority{{Host: "machine1", Score: 0}, {Host: "machine2", Score: 5}},
|
||||||
test: "disjoined service and replication controller should be treated equally",
|
name: "disjoined service and replication controller should be treated equally",
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
pod: &v1.Pod{ObjectMeta: metav1.ObjectMeta{Labels: map[string]string{"foo": "bar", "bar": "foo"}, OwnerReferences: controllerRef("ReplicaSet", "name", "abc123")}},
|
pod: &v1.Pod{ObjectMeta: metav1.ObjectMeta{Labels: map[string]string{"foo": "bar", "bar": "foo"}, OwnerReferences: controllerRef("ReplicaSet", "name", "abc123")}},
|
||||||
@ -244,7 +244,7 @@ func TestSelectorSpreadPriority(t *testing.T) {
|
|||||||
rss: []*extensions.ReplicaSet{{Spec: extensions.ReplicaSetSpec{Selector: &metav1.LabelSelector{MatchLabels: map[string]string{"foo": "bar"}}}}},
|
rss: []*extensions.ReplicaSet{{Spec: extensions.ReplicaSetSpec{Selector: &metav1.LabelSelector{MatchLabels: map[string]string{"foo": "bar"}}}}},
|
||||||
// We use ReplicaSet, instead of ReplicationController. The result should be exactly as above.
|
// We use ReplicaSet, instead of ReplicationController. The result should be exactly as above.
|
||||||
expectedList: []schedulerapi.HostPriority{{Host: "machine1", Score: 0}, {Host: "machine2", Score: 5}},
|
expectedList: []schedulerapi.HostPriority{{Host: "machine1", Score: 0}, {Host: "machine2", Score: 5}},
|
||||||
test: "disjoined service and replica set should be treated equally",
|
name: "disjoined service and replica set should be treated equally",
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
pod: &v1.Pod{ObjectMeta: metav1.ObjectMeta{Labels: map[string]string{"foo": "bar", "bar": "foo"}, OwnerReferences: controllerRef("StatefulSet", "name", "abc123")}},
|
pod: &v1.Pod{ObjectMeta: metav1.ObjectMeta{Labels: map[string]string{"foo": "bar", "bar": "foo"}, OwnerReferences: controllerRef("StatefulSet", "name", "abc123")}},
|
||||||
@ -257,7 +257,7 @@ func TestSelectorSpreadPriority(t *testing.T) {
|
|||||||
services: []*v1.Service{{Spec: v1.ServiceSpec{Selector: map[string]string{"bar": "foo"}}}},
|
services: []*v1.Service{{Spec: v1.ServiceSpec{Selector: map[string]string{"bar": "foo"}}}},
|
||||||
sss: []*apps.StatefulSet{{Spec: apps.StatefulSetSpec{Selector: &metav1.LabelSelector{MatchLabels: map[string]string{"foo": "bar"}}}}},
|
sss: []*apps.StatefulSet{{Spec: apps.StatefulSetSpec{Selector: &metav1.LabelSelector{MatchLabels: map[string]string{"foo": "bar"}}}}},
|
||||||
expectedList: []schedulerapi.HostPriority{{Host: "machine1", Score: 0}, {Host: "machine2", Score: 5}},
|
expectedList: []schedulerapi.HostPriority{{Host: "machine1", Score: 0}, {Host: "machine2", Score: 5}},
|
||||||
test: "disjoined service and replica set should be treated equally",
|
name: "disjoined service and replica set should be treated equally",
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
pod: &v1.Pod{ObjectMeta: metav1.ObjectMeta{Labels: labels1, OwnerReferences: controllerRef("ReplicationController", "name", "abc123")}},
|
pod: &v1.Pod{ObjectMeta: metav1.ObjectMeta{Labels: labels1, OwnerReferences: controllerRef("ReplicationController", "name", "abc123")}},
|
||||||
@ -270,7 +270,7 @@ func TestSelectorSpreadPriority(t *testing.T) {
|
|||||||
rcs: []*v1.ReplicationController{{Spec: v1.ReplicationControllerSpec{Selector: map[string]string{"foo": "bar"}}}},
|
rcs: []*v1.ReplicationController{{Spec: v1.ReplicationControllerSpec{Selector: map[string]string{"foo": "bar"}}}},
|
||||||
// Both Nodes have one pod from the given RC, hence both get 0 score.
|
// Both Nodes have one pod from the given RC, hence both get 0 score.
|
||||||
expectedList: []schedulerapi.HostPriority{{Host: "machine1", Score: 0}, {Host: "machine2", Score: 0}},
|
expectedList: []schedulerapi.HostPriority{{Host: "machine1", Score: 0}, {Host: "machine2", Score: 0}},
|
||||||
test: "Replication controller with partial pod label matches",
|
name: "Replication controller with partial pod label matches",
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
pod: &v1.Pod{ObjectMeta: metav1.ObjectMeta{Labels: labels1, OwnerReferences: controllerRef("ReplicaSet", "name", "abc123")}},
|
pod: &v1.Pod{ObjectMeta: metav1.ObjectMeta{Labels: labels1, OwnerReferences: controllerRef("ReplicaSet", "name", "abc123")}},
|
||||||
@ -283,7 +283,7 @@ func TestSelectorSpreadPriority(t *testing.T) {
|
|||||||
rss: []*extensions.ReplicaSet{{Spec: extensions.ReplicaSetSpec{Selector: &metav1.LabelSelector{MatchLabels: map[string]string{"foo": "bar"}}}}},
|
rss: []*extensions.ReplicaSet{{Spec: extensions.ReplicaSetSpec{Selector: &metav1.LabelSelector{MatchLabels: map[string]string{"foo": "bar"}}}}},
|
||||||
// We use ReplicaSet, instead of ReplicationController. The result should be exactly as above.
|
// We use ReplicaSet, instead of ReplicationController. The result should be exactly as above.
|
||||||
expectedList: []schedulerapi.HostPriority{{Host: "machine1", Score: 0}, {Host: "machine2", Score: 0}},
|
expectedList: []schedulerapi.HostPriority{{Host: "machine1", Score: 0}, {Host: "machine2", Score: 0}},
|
||||||
test: "Replica set with partial pod label matches",
|
name: "Replica set with partial pod label matches",
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
pod: &v1.Pod{ObjectMeta: metav1.ObjectMeta{Labels: labels1, OwnerReferences: controllerRef("StatefulSet", "name", "abc123")}},
|
pod: &v1.Pod{ObjectMeta: metav1.ObjectMeta{Labels: labels1, OwnerReferences: controllerRef("StatefulSet", "name", "abc123")}},
|
||||||
@ -296,7 +296,7 @@ func TestSelectorSpreadPriority(t *testing.T) {
|
|||||||
sss: []*apps.StatefulSet{{Spec: apps.StatefulSetSpec{Selector: &metav1.LabelSelector{MatchLabels: map[string]string{"foo": "bar"}}}}},
|
sss: []*apps.StatefulSet{{Spec: apps.StatefulSetSpec{Selector: &metav1.LabelSelector{MatchLabels: map[string]string{"foo": "bar"}}}}},
|
||||||
// We use StatefulSet, instead of ReplicationController. The result should be exactly as above.
|
// We use StatefulSet, instead of ReplicationController. The result should be exactly as above.
|
||||||
expectedList: []schedulerapi.HostPriority{{Host: "machine1", Score: 0}, {Host: "machine2", Score: 0}},
|
expectedList: []schedulerapi.HostPriority{{Host: "machine1", Score: 0}, {Host: "machine2", Score: 0}},
|
||||||
test: "StatefulSet with partial pod label matches",
|
name: "StatefulSet with partial pod label matches",
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
pod: &v1.Pod{ObjectMeta: metav1.ObjectMeta{Labels: labels1, OwnerReferences: controllerRef("ReplicationController", "name", "abc123")}},
|
pod: &v1.Pod{ObjectMeta: metav1.ObjectMeta{Labels: labels1, OwnerReferences: controllerRef("ReplicationController", "name", "abc123")}},
|
||||||
@ -308,7 +308,7 @@ func TestSelectorSpreadPriority(t *testing.T) {
|
|||||||
nodes: []string{"machine1", "machine2"},
|
nodes: []string{"machine1", "machine2"},
|
||||||
rcs: []*v1.ReplicationController{{Spec: v1.ReplicationControllerSpec{Selector: map[string]string{"baz": "blah"}}}},
|
rcs: []*v1.ReplicationController{{Spec: v1.ReplicationControllerSpec{Selector: map[string]string{"baz": "blah"}}}},
|
||||||
expectedList: []schedulerapi.HostPriority{{Host: "machine1", Score: 0}, {Host: "machine2", Score: 5}},
|
expectedList: []schedulerapi.HostPriority{{Host: "machine1", Score: 0}, {Host: "machine2", Score: 5}},
|
||||||
test: "Another replication controller with partial pod label matches",
|
name: "Another replication controller with partial pod label matches",
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
pod: &v1.Pod{ObjectMeta: metav1.ObjectMeta{Labels: labels1, OwnerReferences: controllerRef("ReplicaSet", "name", "abc123")}},
|
pod: &v1.Pod{ObjectMeta: metav1.ObjectMeta{Labels: labels1, OwnerReferences: controllerRef("ReplicaSet", "name", "abc123")}},
|
||||||
@ -321,7 +321,7 @@ func TestSelectorSpreadPriority(t *testing.T) {
|
|||||||
rss: []*extensions.ReplicaSet{{Spec: extensions.ReplicaSetSpec{Selector: &metav1.LabelSelector{MatchLabels: map[string]string{"baz": "blah"}}}}},
|
rss: []*extensions.ReplicaSet{{Spec: extensions.ReplicaSetSpec{Selector: &metav1.LabelSelector{MatchLabels: map[string]string{"baz": "blah"}}}}},
|
||||||
// We use ReplicaSet, instead of ReplicationController. The result should be exactly as above.
|
// We use ReplicaSet, instead of ReplicationController. The result should be exactly as above.
|
||||||
expectedList: []schedulerapi.HostPriority{{Host: "machine1", Score: 0}, {Host: "machine2", Score: 5}},
|
expectedList: []schedulerapi.HostPriority{{Host: "machine1", Score: 0}, {Host: "machine2", Score: 5}},
|
||||||
test: "Another replication set with partial pod label matches",
|
name: "Another replication set with partial pod label matches",
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
pod: &v1.Pod{ObjectMeta: metav1.ObjectMeta{Labels: labels1, OwnerReferences: controllerRef("StatefulSet", "name", "abc123")}},
|
pod: &v1.Pod{ObjectMeta: metav1.ObjectMeta{Labels: labels1, OwnerReferences: controllerRef("StatefulSet", "name", "abc123")}},
|
||||||
@ -334,34 +334,36 @@ func TestSelectorSpreadPriority(t *testing.T) {
|
|||||||
sss: []*apps.StatefulSet{{Spec: apps.StatefulSetSpec{Selector: &metav1.LabelSelector{MatchLabels: map[string]string{"baz": "blah"}}}}},
|
sss: []*apps.StatefulSet{{Spec: apps.StatefulSetSpec{Selector: &metav1.LabelSelector{MatchLabels: map[string]string{"baz": "blah"}}}}},
|
||||||
// We use StatefulSet, instead of ReplicationController. The result should be exactly as above.
|
// We use StatefulSet, instead of ReplicationController. The result should be exactly as above.
|
||||||
expectedList: []schedulerapi.HostPriority{{Host: "machine1", Score: 0}, {Host: "machine2", Score: 5}},
|
expectedList: []schedulerapi.HostPriority{{Host: "machine1", Score: 0}, {Host: "machine2", Score: 5}},
|
||||||
test: "Another stateful set with partial pod label matches",
|
name: "Another stateful set with partial pod label matches",
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
|
|
||||||
for i, test := range tests {
|
for _, test := range tests {
|
||||||
nodeNameToInfo := schedulercache.CreateNodeNameToInfoMap(test.pods, makeNodeList(test.nodes))
|
t.Run(test.name, func(t *testing.T) {
|
||||||
selectorSpread := SelectorSpread{
|
nodeNameToInfo := schedulercache.CreateNodeNameToInfoMap(test.pods, makeNodeList(test.nodes))
|
||||||
serviceLister: schedulertesting.FakeServiceLister(test.services),
|
selectorSpread := SelectorSpread{
|
||||||
controllerLister: schedulertesting.FakeControllerLister(test.rcs),
|
serviceLister: schedulertesting.FakeServiceLister(test.services),
|
||||||
replicaSetLister: schedulertesting.FakeReplicaSetLister(test.rss),
|
controllerLister: schedulertesting.FakeControllerLister(test.rcs),
|
||||||
statefulSetLister: schedulertesting.FakeStatefulSetLister(test.sss),
|
replicaSetLister: schedulertesting.FakeReplicaSetLister(test.rss),
|
||||||
}
|
statefulSetLister: schedulertesting.FakeStatefulSetLister(test.sss),
|
||||||
|
}
|
||||||
|
|
||||||
mataDataProducer := NewPriorityMetadataFactory(
|
mataDataProducer := NewPriorityMetadataFactory(
|
||||||
schedulertesting.FakeServiceLister(test.services),
|
schedulertesting.FakeServiceLister(test.services),
|
||||||
schedulertesting.FakeControllerLister(test.rcs),
|
schedulertesting.FakeControllerLister(test.rcs),
|
||||||
schedulertesting.FakeReplicaSetLister(test.rss),
|
schedulertesting.FakeReplicaSetLister(test.rss),
|
||||||
schedulertesting.FakeStatefulSetLister(test.sss))
|
schedulertesting.FakeStatefulSetLister(test.sss))
|
||||||
mataData := mataDataProducer(test.pod, nodeNameToInfo)
|
mataData := mataDataProducer(test.pod, nodeNameToInfo)
|
||||||
|
|
||||||
ttp := priorityFunction(selectorSpread.CalculateSpreadPriorityMap, selectorSpread.CalculateSpreadPriorityReduce, mataData)
|
ttp := priorityFunction(selectorSpread.CalculateSpreadPriorityMap, selectorSpread.CalculateSpreadPriorityReduce, mataData)
|
||||||
list, err := ttp(test.pod, nodeNameToInfo, makeNodeList(test.nodes))
|
list, err := ttp(test.pod, nodeNameToInfo, makeNodeList(test.nodes))
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Errorf("unexpected error: %v index : %d\n", err, i)
|
t.Errorf("unexpected error: %v \n", err)
|
||||||
}
|
}
|
||||||
if !reflect.DeepEqual(test.expectedList, list) {
|
if !reflect.DeepEqual(test.expectedList, list) {
|
||||||
t.Errorf("%s: expected %#v, got %#v", test.test, test.expectedList, list)
|
t.Errorf("expected %#v, got %#v", test.expectedList, list)
|
||||||
}
|
}
|
||||||
|
})
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -413,7 +415,7 @@ func TestZoneSelectorSpreadPriority(t *testing.T) {
|
|||||||
services []*v1.Service
|
services []*v1.Service
|
||||||
sss []*apps.StatefulSet
|
sss []*apps.StatefulSet
|
||||||
expectedList schedulerapi.HostPriorityList
|
expectedList schedulerapi.HostPriorityList
|
||||||
test string
|
name string
|
||||||
}{
|
}{
|
||||||
{
|
{
|
||||||
pod: new(v1.Pod),
|
pod: new(v1.Pod),
|
||||||
@ -425,7 +427,7 @@ func TestZoneSelectorSpreadPriority(t *testing.T) {
|
|||||||
{Host: nodeMachine2Zone3, Score: schedulerapi.MaxPriority},
|
{Host: nodeMachine2Zone3, Score: schedulerapi.MaxPriority},
|
||||||
{Host: nodeMachine3Zone3, Score: schedulerapi.MaxPriority},
|
{Host: nodeMachine3Zone3, Score: schedulerapi.MaxPriority},
|
||||||
},
|
},
|
||||||
test: "nothing scheduled",
|
name: "nothing scheduled",
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
pod: buildPod("", labels1, nil),
|
pod: buildPod("", labels1, nil),
|
||||||
@ -438,7 +440,7 @@ func TestZoneSelectorSpreadPriority(t *testing.T) {
|
|||||||
{Host: nodeMachine2Zone3, Score: schedulerapi.MaxPriority},
|
{Host: nodeMachine2Zone3, Score: schedulerapi.MaxPriority},
|
||||||
{Host: nodeMachine3Zone3, Score: schedulerapi.MaxPriority},
|
{Host: nodeMachine3Zone3, Score: schedulerapi.MaxPriority},
|
||||||
},
|
},
|
||||||
test: "no services",
|
name: "no services",
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
pod: buildPod("", labels1, nil),
|
pod: buildPod("", labels1, nil),
|
||||||
@ -452,7 +454,7 @@ func TestZoneSelectorSpreadPriority(t *testing.T) {
|
|||||||
{Host: nodeMachine2Zone3, Score: schedulerapi.MaxPriority},
|
{Host: nodeMachine2Zone3, Score: schedulerapi.MaxPriority},
|
||||||
{Host: nodeMachine3Zone3, Score: schedulerapi.MaxPriority},
|
{Host: nodeMachine3Zone3, Score: schedulerapi.MaxPriority},
|
||||||
},
|
},
|
||||||
test: "different services",
|
name: "different services",
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
pod: buildPod("", labels1, nil),
|
pod: buildPod("", labels1, nil),
|
||||||
@ -469,7 +471,7 @@ func TestZoneSelectorSpreadPriority(t *testing.T) {
|
|||||||
{Host: nodeMachine2Zone3, Score: schedulerapi.MaxPriority},
|
{Host: nodeMachine2Zone3, Score: schedulerapi.MaxPriority},
|
||||||
{Host: nodeMachine3Zone3, Score: schedulerapi.MaxPriority},
|
{Host: nodeMachine3Zone3, Score: schedulerapi.MaxPriority},
|
||||||
},
|
},
|
||||||
test: "two pods, 0 matching",
|
name: "two pods, 0 matching",
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
pod: buildPod("", labels1, nil),
|
pod: buildPod("", labels1, nil),
|
||||||
@ -486,7 +488,7 @@ func TestZoneSelectorSpreadPriority(t *testing.T) {
|
|||||||
{Host: nodeMachine2Zone3, Score: schedulerapi.MaxPriority},
|
{Host: nodeMachine2Zone3, Score: schedulerapi.MaxPriority},
|
||||||
{Host: nodeMachine3Zone3, Score: schedulerapi.MaxPriority},
|
{Host: nodeMachine3Zone3, Score: schedulerapi.MaxPriority},
|
||||||
},
|
},
|
||||||
test: "two pods, 1 matching (in z2)",
|
name: "two pods, 1 matching (in z2)",
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
pod: buildPod("", labels1, nil),
|
pod: buildPod("", labels1, nil),
|
||||||
@ -506,7 +508,7 @@ func TestZoneSelectorSpreadPriority(t *testing.T) {
|
|||||||
{Host: nodeMachine2Zone3, Score: 3}, // Pod on node
|
{Host: nodeMachine2Zone3, Score: 3}, // Pod on node
|
||||||
{Host: nodeMachine3Zone3, Score: 6}, // Pod in zone
|
{Host: nodeMachine3Zone3, Score: 6}, // Pod in zone
|
||||||
},
|
},
|
||||||
test: "five pods, 3 matching (z2=2, z3=1)",
|
name: "five pods, 3 matching (z2=2, z3=1)",
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
pod: buildPod("", labels1, nil),
|
pod: buildPod("", labels1, nil),
|
||||||
@ -525,7 +527,7 @@ func TestZoneSelectorSpreadPriority(t *testing.T) {
|
|||||||
{Host: nodeMachine2Zone3, Score: 3}, // Pod in zone
|
{Host: nodeMachine2Zone3, Score: 3}, // Pod in zone
|
||||||
{Host: nodeMachine3Zone3, Score: 3}, // Pod in zone
|
{Host: nodeMachine3Zone3, Score: 3}, // Pod in zone
|
||||||
},
|
},
|
||||||
test: "four pods, 3 matching (z1=1, z2=1, z3=1)",
|
name: "four pods, 3 matching (z1=1, z2=1, z3=1)",
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
pod: buildPod("", labels1, nil),
|
pod: buildPod("", labels1, nil),
|
||||||
@ -544,7 +546,7 @@ func TestZoneSelectorSpreadPriority(t *testing.T) {
|
|||||||
{Host: nodeMachine2Zone3, Score: 3}, // Pod in zone
|
{Host: nodeMachine2Zone3, Score: 3}, // Pod in zone
|
||||||
{Host: nodeMachine3Zone3, Score: 3}, // Pod in zone
|
{Host: nodeMachine3Zone3, Score: 3}, // Pod in zone
|
||||||
},
|
},
|
||||||
test: "four pods, 3 matching (z1=1, z2=1, z3=1)",
|
name: "four pods, 3 matching (z1=1, z2=1, z3=1)",
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
pod: buildPod("", labels1, controllerRef("ReplicationController", "name", "abc123")),
|
pod: buildPod("", labels1, controllerRef("ReplicationController", "name", "abc123")),
|
||||||
@ -569,36 +571,38 @@ func TestZoneSelectorSpreadPriority(t *testing.T) {
|
|||||||
{Host: nodeMachine2Zone3, Score: 3}, // Pod in zone
|
{Host: nodeMachine2Zone3, Score: 3}, // Pod in zone
|
||||||
{Host: nodeMachine3Zone3, Score: 3}, // Pod in zone
|
{Host: nodeMachine3Zone3, Score: 3}, // Pod in zone
|
||||||
},
|
},
|
||||||
test: "Replication controller spreading (z1=0, z2=1, z3=2)",
|
name: "Replication controller spreading (z1=0, z2=1, z3=2)",
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
|
|
||||||
for i, test := range tests {
|
for _, test := range tests {
|
||||||
nodeNameToInfo := schedulercache.CreateNodeNameToInfoMap(test.pods, makeLabeledNodeList(labeledNodes))
|
t.Run(test.name, func(t *testing.T) {
|
||||||
selectorSpread := SelectorSpread{
|
nodeNameToInfo := schedulercache.CreateNodeNameToInfoMap(test.pods, makeLabeledNodeList(labeledNodes))
|
||||||
serviceLister: schedulertesting.FakeServiceLister(test.services),
|
selectorSpread := SelectorSpread{
|
||||||
controllerLister: schedulertesting.FakeControllerLister(test.rcs),
|
serviceLister: schedulertesting.FakeServiceLister(test.services),
|
||||||
replicaSetLister: schedulertesting.FakeReplicaSetLister(test.rss),
|
controllerLister: schedulertesting.FakeControllerLister(test.rcs),
|
||||||
statefulSetLister: schedulertesting.FakeStatefulSetLister(test.sss),
|
replicaSetLister: schedulertesting.FakeReplicaSetLister(test.rss),
|
||||||
}
|
statefulSetLister: schedulertesting.FakeStatefulSetLister(test.sss),
|
||||||
|
}
|
||||||
|
|
||||||
mataDataProducer := NewPriorityMetadataFactory(
|
mataDataProducer := NewPriorityMetadataFactory(
|
||||||
schedulertesting.FakeServiceLister(test.services),
|
schedulertesting.FakeServiceLister(test.services),
|
||||||
schedulertesting.FakeControllerLister(test.rcs),
|
schedulertesting.FakeControllerLister(test.rcs),
|
||||||
schedulertesting.FakeReplicaSetLister(test.rss),
|
schedulertesting.FakeReplicaSetLister(test.rss),
|
||||||
schedulertesting.FakeStatefulSetLister(test.sss))
|
schedulertesting.FakeStatefulSetLister(test.sss))
|
||||||
mataData := mataDataProducer(test.pod, nodeNameToInfo)
|
mataData := mataDataProducer(test.pod, nodeNameToInfo)
|
||||||
ttp := priorityFunction(selectorSpread.CalculateSpreadPriorityMap, selectorSpread.CalculateSpreadPriorityReduce, mataData)
|
ttp := priorityFunction(selectorSpread.CalculateSpreadPriorityMap, selectorSpread.CalculateSpreadPriorityReduce, mataData)
|
||||||
list, err := ttp(test.pod, nodeNameToInfo, makeLabeledNodeList(labeledNodes))
|
list, err := ttp(test.pod, nodeNameToInfo, makeLabeledNodeList(labeledNodes))
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Errorf("unexpected error: %v index : %d", err, i)
|
t.Errorf("unexpected error: %v", err)
|
||||||
}
|
}
|
||||||
// sort the two lists to avoid failures on account of different ordering
|
// sort the two lists to avoid failures on account of different ordering
|
||||||
sort.Sort(test.expectedList)
|
sort.Sort(test.expectedList)
|
||||||
sort.Sort(list)
|
sort.Sort(list)
|
||||||
if !reflect.DeepEqual(test.expectedList, list) {
|
if !reflect.DeepEqual(test.expectedList, list) {
|
||||||
t.Errorf("%s: expected %#v, got %#v", test.test, test.expectedList, list)
|
t.Errorf("expected %#v, got %#v", test.expectedList, list)
|
||||||
}
|
}
|
||||||
|
})
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -640,7 +644,7 @@ func TestZoneSpreadPriority(t *testing.T) {
|
|||||||
nodes map[string]map[string]string
|
nodes map[string]map[string]string
|
||||||
services []*v1.Service
|
services []*v1.Service
|
||||||
expectedList schedulerapi.HostPriorityList
|
expectedList schedulerapi.HostPriorityList
|
||||||
test string
|
name string
|
||||||
}{
|
}{
|
||||||
{
|
{
|
||||||
pod: new(v1.Pod),
|
pod: new(v1.Pod),
|
||||||
@ -648,7 +652,7 @@ func TestZoneSpreadPriority(t *testing.T) {
|
|||||||
expectedList: []schedulerapi.HostPriority{{Host: "machine11", Score: schedulerapi.MaxPriority}, {Host: "machine12", Score: schedulerapi.MaxPriority},
|
expectedList: []schedulerapi.HostPriority{{Host: "machine11", Score: schedulerapi.MaxPriority}, {Host: "machine12", Score: schedulerapi.MaxPriority},
|
||||||
{Host: "machine21", Score: schedulerapi.MaxPriority}, {Host: "machine22", Score: schedulerapi.MaxPriority},
|
{Host: "machine21", Score: schedulerapi.MaxPriority}, {Host: "machine22", Score: schedulerapi.MaxPriority},
|
||||||
{Host: "machine01", Score: 0}, {Host: "machine02", Score: 0}},
|
{Host: "machine01", Score: 0}, {Host: "machine02", Score: 0}},
|
||||||
test: "nothing scheduled",
|
name: "nothing scheduled",
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
pod: &v1.Pod{ObjectMeta: metav1.ObjectMeta{Labels: labels1}},
|
pod: &v1.Pod{ObjectMeta: metav1.ObjectMeta{Labels: labels1}},
|
||||||
@ -657,7 +661,7 @@ func TestZoneSpreadPriority(t *testing.T) {
|
|||||||
expectedList: []schedulerapi.HostPriority{{Host: "machine11", Score: schedulerapi.MaxPriority}, {Host: "machine12", Score: schedulerapi.MaxPriority},
|
expectedList: []schedulerapi.HostPriority{{Host: "machine11", Score: schedulerapi.MaxPriority}, {Host: "machine12", Score: schedulerapi.MaxPriority},
|
||||||
{Host: "machine21", Score: schedulerapi.MaxPriority}, {Host: "machine22", Score: schedulerapi.MaxPriority},
|
{Host: "machine21", Score: schedulerapi.MaxPriority}, {Host: "machine22", Score: schedulerapi.MaxPriority},
|
||||||
{Host: "machine01", Score: 0}, {Host: "machine02", Score: 0}},
|
{Host: "machine01", Score: 0}, {Host: "machine02", Score: 0}},
|
||||||
test: "no services",
|
name: "no services",
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
pod: &v1.Pod{ObjectMeta: metav1.ObjectMeta{Labels: labels1}},
|
pod: &v1.Pod{ObjectMeta: metav1.ObjectMeta{Labels: labels1}},
|
||||||
@ -667,7 +671,7 @@ func TestZoneSpreadPriority(t *testing.T) {
|
|||||||
expectedList: []schedulerapi.HostPriority{{Host: "machine11", Score: schedulerapi.MaxPriority}, {Host: "machine12", Score: schedulerapi.MaxPriority},
|
expectedList: []schedulerapi.HostPriority{{Host: "machine11", Score: schedulerapi.MaxPriority}, {Host: "machine12", Score: schedulerapi.MaxPriority},
|
||||||
{Host: "machine21", Score: schedulerapi.MaxPriority}, {Host: "machine22", Score: schedulerapi.MaxPriority},
|
{Host: "machine21", Score: schedulerapi.MaxPriority}, {Host: "machine22", Score: schedulerapi.MaxPriority},
|
||||||
{Host: "machine01", Score: 0}, {Host: "machine02", Score: 0}},
|
{Host: "machine01", Score: 0}, {Host: "machine02", Score: 0}},
|
||||||
test: "different services",
|
name: "different services",
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
pod: &v1.Pod{ObjectMeta: metav1.ObjectMeta{Labels: labels1}},
|
pod: &v1.Pod{ObjectMeta: metav1.ObjectMeta{Labels: labels1}},
|
||||||
@ -681,7 +685,7 @@ func TestZoneSpreadPriority(t *testing.T) {
|
|||||||
expectedList: []schedulerapi.HostPriority{{Host: "machine11", Score: schedulerapi.MaxPriority}, {Host: "machine12", Score: schedulerapi.MaxPriority},
|
expectedList: []schedulerapi.HostPriority{{Host: "machine11", Score: schedulerapi.MaxPriority}, {Host: "machine12", Score: schedulerapi.MaxPriority},
|
||||||
{Host: "machine21", Score: 0}, {Host: "machine22", Score: 0},
|
{Host: "machine21", Score: 0}, {Host: "machine22", Score: 0},
|
||||||
{Host: "machine01", Score: 0}, {Host: "machine02", Score: 0}},
|
{Host: "machine01", Score: 0}, {Host: "machine02", Score: 0}},
|
||||||
test: "three pods, one service pod",
|
name: "three pods, one service pod",
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
pod: &v1.Pod{ObjectMeta: metav1.ObjectMeta{Labels: labels1}},
|
pod: &v1.Pod{ObjectMeta: metav1.ObjectMeta{Labels: labels1}},
|
||||||
@ -695,7 +699,7 @@ func TestZoneSpreadPriority(t *testing.T) {
|
|||||||
expectedList: []schedulerapi.HostPriority{{Host: "machine11", Score: 5}, {Host: "machine12", Score: 5},
|
expectedList: []schedulerapi.HostPriority{{Host: "machine11", Score: 5}, {Host: "machine12", Score: 5},
|
||||||
{Host: "machine21", Score: 5}, {Host: "machine22", Score: 5},
|
{Host: "machine21", Score: 5}, {Host: "machine22", Score: 5},
|
||||||
{Host: "machine01", Score: 0}, {Host: "machine02", Score: 0}},
|
{Host: "machine01", Score: 0}, {Host: "machine02", Score: 0}},
|
||||||
test: "three pods, two service pods on different machines",
|
name: "three pods, two service pods on different machines",
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
pod: &v1.Pod{ObjectMeta: metav1.ObjectMeta{Labels: labels1, Namespace: metav1.NamespaceDefault}},
|
pod: &v1.Pod{ObjectMeta: metav1.ObjectMeta{Labels: labels1, Namespace: metav1.NamespaceDefault}},
|
||||||
@ -710,7 +714,7 @@ func TestZoneSpreadPriority(t *testing.T) {
|
|||||||
expectedList: []schedulerapi.HostPriority{{Host: "machine11", Score: 0}, {Host: "machine12", Score: 0},
|
expectedList: []schedulerapi.HostPriority{{Host: "machine11", Score: 0}, {Host: "machine12", Score: 0},
|
||||||
{Host: "machine21", Score: schedulerapi.MaxPriority}, {Host: "machine22", Score: schedulerapi.MaxPriority},
|
{Host: "machine21", Score: schedulerapi.MaxPriority}, {Host: "machine22", Score: schedulerapi.MaxPriority},
|
||||||
{Host: "machine01", Score: 0}, {Host: "machine02", Score: 0}},
|
{Host: "machine01", Score: 0}, {Host: "machine02", Score: 0}},
|
||||||
test: "three service label match pods in different namespaces",
|
name: "three service label match pods in different namespaces",
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
pod: &v1.Pod{ObjectMeta: metav1.ObjectMeta{Labels: labels1}},
|
pod: &v1.Pod{ObjectMeta: metav1.ObjectMeta{Labels: labels1}},
|
||||||
@ -725,7 +729,7 @@ func TestZoneSpreadPriority(t *testing.T) {
|
|||||||
expectedList: []schedulerapi.HostPriority{{Host: "machine11", Score: 6}, {Host: "machine12", Score: 6},
|
expectedList: []schedulerapi.HostPriority{{Host: "machine11", Score: 6}, {Host: "machine12", Score: 6},
|
||||||
{Host: "machine21", Score: 3}, {Host: "machine22", Score: 3},
|
{Host: "machine21", Score: 3}, {Host: "machine22", Score: 3},
|
||||||
{Host: "machine01", Score: 0}, {Host: "machine02", Score: 0}},
|
{Host: "machine01", Score: 0}, {Host: "machine02", Score: 0}},
|
||||||
test: "four pods, three service pods",
|
name: "four pods, three service pods",
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
pod: &v1.Pod{ObjectMeta: metav1.ObjectMeta{Labels: labels1}},
|
pod: &v1.Pod{ObjectMeta: metav1.ObjectMeta{Labels: labels1}},
|
||||||
@ -739,7 +743,7 @@ func TestZoneSpreadPriority(t *testing.T) {
|
|||||||
expectedList: []schedulerapi.HostPriority{{Host: "machine11", Score: 3}, {Host: "machine12", Score: 3},
|
expectedList: []schedulerapi.HostPriority{{Host: "machine11", Score: 3}, {Host: "machine12", Score: 3},
|
||||||
{Host: "machine21", Score: 6}, {Host: "machine22", Score: 6},
|
{Host: "machine21", Score: 6}, {Host: "machine22", Score: 6},
|
||||||
{Host: "machine01", Score: 0}, {Host: "machine02", Score: 0}},
|
{Host: "machine01", Score: 0}, {Host: "machine02", Score: 0}},
|
||||||
test: "service with partial pod label matches",
|
name: "service with partial pod label matches",
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
pod: &v1.Pod{ObjectMeta: metav1.ObjectMeta{Labels: labels1}},
|
pod: &v1.Pod{ObjectMeta: metav1.ObjectMeta{Labels: labels1}},
|
||||||
@ -754,7 +758,7 @@ func TestZoneSpreadPriority(t *testing.T) {
|
|||||||
expectedList: []schedulerapi.HostPriority{{Host: "machine11", Score: 7}, {Host: "machine12", Score: 7},
|
expectedList: []schedulerapi.HostPriority{{Host: "machine11", Score: 7}, {Host: "machine12", Score: 7},
|
||||||
{Host: "machine21", Score: 5}, {Host: "machine22", Score: 5},
|
{Host: "machine21", Score: 5}, {Host: "machine22", Score: 5},
|
||||||
{Host: "machine01", Score: 0}, {Host: "machine02", Score: 0}},
|
{Host: "machine01", Score: 0}, {Host: "machine02", Score: 0}},
|
||||||
test: "service pod on non-zoned node",
|
name: "service pod on non-zoned node",
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
// these local variables just make sure controllerLister\replicaSetLister\statefulSetLister not nil
|
// these local variables just make sure controllerLister\replicaSetLister\statefulSetLister not nil
|
||||||
@ -763,28 +767,30 @@ func TestZoneSpreadPriority(t *testing.T) {
|
|||||||
rcs := []*v1.ReplicationController{{Spec: v1.ReplicationControllerSpec{Selector: map[string]string{"foo": "bar"}}}}
|
rcs := []*v1.ReplicationController{{Spec: v1.ReplicationControllerSpec{Selector: map[string]string{"foo": "bar"}}}}
|
||||||
rss := []*extensions.ReplicaSet{{Spec: extensions.ReplicaSetSpec{Selector: &metav1.LabelSelector{MatchLabels: map[string]string{"foo": "bar"}}}}}
|
rss := []*extensions.ReplicaSet{{Spec: extensions.ReplicaSetSpec{Selector: &metav1.LabelSelector{MatchLabels: map[string]string{"foo": "bar"}}}}}
|
||||||
|
|
||||||
for i, test := range tests {
|
for _, test := range tests {
|
||||||
nodeNameToInfo := schedulercache.CreateNodeNameToInfoMap(test.pods, makeLabeledNodeList(test.nodes))
|
t.Run(test.name, func(t *testing.T) {
|
||||||
zoneSpread := ServiceAntiAffinity{podLister: schedulertesting.FakePodLister(test.pods), serviceLister: schedulertesting.FakeServiceLister(test.services), label: "zone"}
|
nodeNameToInfo := schedulercache.CreateNodeNameToInfoMap(test.pods, makeLabeledNodeList(test.nodes))
|
||||||
|
zoneSpread := ServiceAntiAffinity{podLister: schedulertesting.FakePodLister(test.pods), serviceLister: schedulertesting.FakeServiceLister(test.services), label: "zone"}
|
||||||
|
|
||||||
mataDataProducer := NewPriorityMetadataFactory(
|
mataDataProducer := NewPriorityMetadataFactory(
|
||||||
schedulertesting.FakeServiceLister(test.services),
|
schedulertesting.FakeServiceLister(test.services),
|
||||||
schedulertesting.FakeControllerLister(rcs),
|
schedulertesting.FakeControllerLister(rcs),
|
||||||
schedulertesting.FakeReplicaSetLister(rss),
|
schedulertesting.FakeReplicaSetLister(rss),
|
||||||
schedulertesting.FakeStatefulSetLister(sss))
|
schedulertesting.FakeStatefulSetLister(sss))
|
||||||
mataData := mataDataProducer(test.pod, nodeNameToInfo)
|
mataData := mataDataProducer(test.pod, nodeNameToInfo)
|
||||||
ttp := priorityFunction(zoneSpread.CalculateAntiAffinityPriorityMap, zoneSpread.CalculateAntiAffinityPriorityReduce, mataData)
|
ttp := priorityFunction(zoneSpread.CalculateAntiAffinityPriorityMap, zoneSpread.CalculateAntiAffinityPriorityReduce, mataData)
|
||||||
list, err := ttp(test.pod, nodeNameToInfo, makeLabeledNodeList(test.nodes))
|
list, err := ttp(test.pod, nodeNameToInfo, makeLabeledNodeList(test.nodes))
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Errorf("unexpected error: %v index : %d", err, i)
|
t.Errorf("unexpected error: %v", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
// sort the two lists to avoid failures on account of different ordering
|
// sort the two lists to avoid failures on account of different ordering
|
||||||
sort.Sort(test.expectedList)
|
sort.Sort(test.expectedList)
|
||||||
sort.Sort(list)
|
sort.Sort(list)
|
||||||
if !reflect.DeepEqual(test.expectedList, list) {
|
if !reflect.DeepEqual(test.expectedList, list) {
|
||||||
t.Errorf("test index %d (%s): expected %#v, got %#v", i, test.test, test.expectedList, list)
|
t.Errorf("expected %#v, got %#v", test.expectedList, list)
|
||||||
}
|
}
|
||||||
|
})
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -54,11 +54,11 @@ func TestTaintAndToleration(t *testing.T) {
|
|||||||
pod *v1.Pod
|
pod *v1.Pod
|
||||||
nodes []*v1.Node
|
nodes []*v1.Node
|
||||||
expectedList schedulerapi.HostPriorityList
|
expectedList schedulerapi.HostPriorityList
|
||||||
test string
|
name string
|
||||||
}{
|
}{
|
||||||
// basic test case
|
// basic test case
|
||||||
{
|
{
|
||||||
test: "node with taints tolerated by the pod, gets a higher score than those node with intolerable taints",
|
name: "node with taints tolerated by the pod, gets a higher score than those node with intolerable taints",
|
||||||
pod: podWithTolerations([]v1.Toleration{{
|
pod: podWithTolerations([]v1.Toleration{{
|
||||||
Key: "foo",
|
Key: "foo",
|
||||||
Operator: v1.TolerationOpEqual,
|
Operator: v1.TolerationOpEqual,
|
||||||
@ -84,7 +84,7 @@ func TestTaintAndToleration(t *testing.T) {
|
|||||||
},
|
},
|
||||||
// the count of taints that are tolerated by pod, does not matter.
|
// the count of taints that are tolerated by pod, does not matter.
|
||||||
{
|
{
|
||||||
test: "the nodes that all of their taints are tolerated by the pod, get the same score, no matter how many tolerable taints a node has",
|
name: "the nodes that all of their taints are tolerated by the pod, get the same score, no matter how many tolerable taints a node has",
|
||||||
pod: podWithTolerations([]v1.Toleration{
|
pod: podWithTolerations([]v1.Toleration{
|
||||||
{
|
{
|
||||||
Key: "cpu-type",
|
Key: "cpu-type",
|
||||||
@ -127,7 +127,7 @@ func TestTaintAndToleration(t *testing.T) {
|
|||||||
},
|
},
|
||||||
// the count of taints on a node that are not tolerated by pod, matters.
|
// the count of taints on a node that are not tolerated by pod, matters.
|
||||||
{
|
{
|
||||||
test: "the more intolerable taints a node has, the lower score it gets.",
|
name: "the more intolerable taints a node has, the lower score it gets.",
|
||||||
pod: podWithTolerations([]v1.Toleration{{
|
pod: podWithTolerations([]v1.Toleration{{
|
||||||
Key: "foo",
|
Key: "foo",
|
||||||
Operator: v1.TolerationOpEqual,
|
Operator: v1.TolerationOpEqual,
|
||||||
@ -163,7 +163,7 @@ func TestTaintAndToleration(t *testing.T) {
|
|||||||
},
|
},
|
||||||
// taints-tolerations priority only takes care about the taints and tolerations that have effect PreferNoSchedule
|
// taints-tolerations priority only takes care about the taints and tolerations that have effect PreferNoSchedule
|
||||||
{
|
{
|
||||||
test: "only taints and tolerations that have effect PreferNoSchedule are checked by taints-tolerations priority function",
|
name: "only taints and tolerations that have effect PreferNoSchedule are checked by taints-tolerations priority function",
|
||||||
pod: podWithTolerations([]v1.Toleration{
|
pod: podWithTolerations([]v1.Toleration{
|
||||||
{
|
{
|
||||||
Key: "cpu-type",
|
Key: "cpu-type",
|
||||||
@ -205,7 +205,7 @@ func TestTaintAndToleration(t *testing.T) {
|
|||||||
},
|
},
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
test: "Default behaviour No taints and tolerations, lands on node with no taints",
|
name: "Default behaviour No taints and tolerations, lands on node with no taints",
|
||||||
//pod without tolerations
|
//pod without tolerations
|
||||||
pod: podWithTolerations([]v1.Toleration{}),
|
pod: podWithTolerations([]v1.Toleration{}),
|
||||||
nodes: []*v1.Node{
|
nodes: []*v1.Node{
|
||||||
@ -226,16 +226,17 @@ func TestTaintAndToleration(t *testing.T) {
|
|||||||
},
|
},
|
||||||
}
|
}
|
||||||
for _, test := range tests {
|
for _, test := range tests {
|
||||||
nodeNameToInfo := schedulercache.CreateNodeNameToInfoMap(nil, test.nodes)
|
t.Run(test.name, func(t *testing.T) {
|
||||||
ttp := priorityFunction(ComputeTaintTolerationPriorityMap, ComputeTaintTolerationPriorityReduce, nil)
|
nodeNameToInfo := schedulercache.CreateNodeNameToInfoMap(nil, test.nodes)
|
||||||
list, err := ttp(test.pod, nodeNameToInfo, test.nodes)
|
ttp := priorityFunction(ComputeTaintTolerationPriorityMap, ComputeTaintTolerationPriorityReduce, nil)
|
||||||
if err != nil {
|
list, err := ttp(test.pod, nodeNameToInfo, test.nodes)
|
||||||
t.Errorf("%s, unexpected error: %v", test.test, err)
|
if err != nil {
|
||||||
}
|
t.Errorf("unexpected error: %v", err)
|
||||||
|
}
|
||||||
|
|
||||||
if !reflect.DeepEqual(test.expectedList, list) {
|
if !reflect.DeepEqual(test.expectedList, list) {
|
||||||
t.Errorf("%s,\nexpected:\n\t%+v,\ngot:\n\t%+v", test.test, test.expectedList, list)
|
t.Errorf("expected:\n\t%+v,\ngot:\n\t%+v", test.expectedList, list)
|
||||||
}
|
}
|
||||||
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
}
|
}
|
||||||
|
Loading…
Reference in New Issue
Block a user