E2E tests for PodTopologySpread

This commit is contained in:
Wei Huang 2020-02-10 13:47:15 -08:00
parent 0e37bcedef
commit c93dffdfc4
No known key found for this signature in database
GPG Key ID: BE5E9752F8B6E005
7 changed files with 362 additions and 5 deletions

View File

@ -503,7 +503,7 @@ const (
EndpointSliceProxying featuregate.Feature = "EndpointSliceProxying" EndpointSliceProxying featuregate.Feature = "EndpointSliceProxying"
// owner: @Huang-Wei // owner: @Huang-Wei
// alpha: v1.16 // beta: v1.18
// //
// Schedule pods evenly across available topology domains. // Schedule pods evenly across available topology domains.
EvenPodsSpread featuregate.Feature = "EvenPodsSpread" EvenPodsSpread featuregate.Feature = "EvenPodsSpread"
@ -618,7 +618,7 @@ var defaultKubernetesFeatureGates = map[featuregate.Feature]featuregate.FeatureS
IPv6DualStack: {Default: false, PreRelease: featuregate.Alpha}, IPv6DualStack: {Default: false, PreRelease: featuregate.Alpha},
EndpointSlice: {Default: true, PreRelease: featuregate.Beta}, EndpointSlice: {Default: true, PreRelease: featuregate.Beta},
EndpointSliceProxying: {Default: false, PreRelease: featuregate.Alpha}, EndpointSliceProxying: {Default: false, PreRelease: featuregate.Alpha},
EvenPodsSpread: {Default: false, PreRelease: featuregate.Alpha}, EvenPodsSpread: {Default: true, PreRelease: featuregate.Beta},
StartupProbe: {Default: true, PreRelease: featuregate.Beta}, StartupProbe: {Default: true, PreRelease: featuregate.Beta},
AllowInsecureBackendProxy: {Default: true, PreRelease: featuregate.Beta}, AllowInsecureBackendProxy: {Default: true, PreRelease: featuregate.Beta},
PodDisruptionBudget: {Default: true, PreRelease: featuregate.Beta}, PodDisruptionBudget: {Default: true, PreRelease: featuregate.Beta},

View File

@ -56,6 +56,7 @@ func TestClusterAutoscalerProvider(t *testing.T) {
{Name: noderesources.FitName}, {Name: noderesources.FitName},
{Name: nodeports.Name}, {Name: nodeports.Name},
{Name: interpodaffinity.Name}, {Name: interpodaffinity.Name},
{Name: podtopologyspread.Name},
}, },
}, },
Filter: &schedulerapi.PluginSet{ Filter: &schedulerapi.PluginSet{
@ -74,6 +75,7 @@ func TestClusterAutoscalerProvider(t *testing.T) {
{Name: volumebinding.Name}, {Name: volumebinding.Name},
{Name: volumezone.Name}, {Name: volumezone.Name},
{Name: interpodaffinity.Name}, {Name: interpodaffinity.Name},
{Name: podtopologyspread.Name},
}, },
}, },
PreScore: &schedulerapi.PluginSet{ PreScore: &schedulerapi.PluginSet{
@ -81,6 +83,7 @@ func TestClusterAutoscalerProvider(t *testing.T) {
{Name: interpodaffinity.Name}, {Name: interpodaffinity.Name},
{Name: defaultpodtopologyspread.Name}, {Name: defaultpodtopologyspread.Name},
{Name: tainttoleration.Name}, {Name: tainttoleration.Name},
{Name: podtopologyspread.Name},
}, },
}, },
Score: &schedulerapi.PluginSet{ Score: &schedulerapi.PluginSet{
@ -93,6 +96,7 @@ func TestClusterAutoscalerProvider(t *testing.T) {
{Name: nodepreferavoidpods.Name, Weight: 10000}, {Name: nodepreferavoidpods.Name, Weight: 10000},
{Name: defaultpodtopologyspread.Name, Weight: 1}, {Name: defaultpodtopologyspread.Name, Weight: 1},
{Name: tainttoleration.Name, Weight: 1}, {Name: tainttoleration.Name, Weight: 1},
{Name: podtopologyspread.Name, Weight: 1},
}, },
}, },
Bind: &schedulerapi.PluginSet{ Bind: &schedulerapi.PluginSet{

View File

@ -1423,6 +1423,7 @@ func TestAlgorithmProviderCompatibility(t *testing.T) {
{Name: "NodeResourcesFit"}, {Name: "NodeResourcesFit"},
{Name: "NodePorts"}, {Name: "NodePorts"},
{Name: "InterPodAffinity"}, {Name: "InterPodAffinity"},
{Name: "PodTopologySpread"},
}, },
"FilterPlugin": { "FilterPlugin": {
{Name: "NodeUnschedulable"}, {Name: "NodeUnschedulable"},
@ -1439,11 +1440,13 @@ func TestAlgorithmProviderCompatibility(t *testing.T) {
{Name: "VolumeBinding"}, {Name: "VolumeBinding"},
{Name: "VolumeZone"}, {Name: "VolumeZone"},
{Name: "InterPodAffinity"}, {Name: "InterPodAffinity"},
{Name: "PodTopologySpread"},
}, },
"PreScorePlugin": { "PreScorePlugin": {
{Name: "InterPodAffinity"}, {Name: "InterPodAffinity"},
{Name: "DefaultPodTopologySpread"}, {Name: "DefaultPodTopologySpread"},
{Name: "TaintToleration"}, {Name: "TaintToleration"},
{Name: "PodTopologySpread"},
}, },
"ScorePlugin": { "ScorePlugin": {
{Name: "NodeResourcesBalancedAllocation", Weight: 1}, {Name: "NodeResourcesBalancedAllocation", Weight: 1},
@ -1454,6 +1457,7 @@ func TestAlgorithmProviderCompatibility(t *testing.T) {
{Name: "NodePreferAvoidPods", Weight: 10000}, {Name: "NodePreferAvoidPods", Weight: 10000},
{Name: "DefaultPodTopologySpread", Weight: 1}, {Name: "DefaultPodTopologySpread", Weight: 1},
{Name: "TaintToleration", Weight: 1}, {Name: "TaintToleration", Weight: 1},
{Name: "PodTopologySpread", Weight: 1},
}, },
"BindPlugin": {{Name: "DefaultBinder"}}, "BindPlugin": {{Name: "DefaultBinder"}},
} }
@ -1483,6 +1487,7 @@ func TestAlgorithmProviderCompatibility(t *testing.T) {
{Name: "NodeResourcesFit"}, {Name: "NodeResourcesFit"},
{Name: "NodePorts"}, {Name: "NodePorts"},
{Name: "InterPodAffinity"}, {Name: "InterPodAffinity"},
{Name: "PodTopologySpread"},
}, },
"FilterPlugin": { "FilterPlugin": {
{Name: "NodeUnschedulable"}, {Name: "NodeUnschedulable"},
@ -1499,11 +1504,13 @@ func TestAlgorithmProviderCompatibility(t *testing.T) {
{Name: "VolumeBinding"}, {Name: "VolumeBinding"},
{Name: "VolumeZone"}, {Name: "VolumeZone"},
{Name: "InterPodAffinity"}, {Name: "InterPodAffinity"},
{Name: "PodTopologySpread"},
}, },
"PreScorePlugin": { "PreScorePlugin": {
{Name: "InterPodAffinity"}, {Name: "InterPodAffinity"},
{Name: "DefaultPodTopologySpread"}, {Name: "DefaultPodTopologySpread"},
{Name: "TaintToleration"}, {Name: "TaintToleration"},
{Name: "PodTopologySpread"},
}, },
"ScorePlugin": { "ScorePlugin": {
{Name: "NodeResourcesBalancedAllocation", Weight: 1}, {Name: "NodeResourcesBalancedAllocation", Weight: 1},
@ -1514,6 +1521,7 @@ func TestAlgorithmProviderCompatibility(t *testing.T) {
{Name: "NodePreferAvoidPods", Weight: 10000}, {Name: "NodePreferAvoidPods", Weight: 10000},
{Name: "DefaultPodTopologySpread", Weight: 1}, {Name: "DefaultPodTopologySpread", Weight: 1},
{Name: "TaintToleration", Weight: 1}, {Name: "TaintToleration", Weight: 1},
{Name: "PodTopologySpread", Weight: 1},
}, },
"BindPlugin": {{Name: "DefaultBinder"}}, "BindPlugin": {{Name: "DefaultBinder"}},
}, },

View File

@ -67,6 +67,7 @@ type pausePodConfig struct {
OwnerReferences []metav1.OwnerReference OwnerReferences []metav1.OwnerReference
PriorityClassName string PriorityClassName string
DeletionGracePeriodSeconds *int64 DeletionGracePeriodSeconds *int64
TopologySpreadConstraints []v1.TopologySpreadConstraint
} }
var _ = SIGDescribe("SchedulerPredicates [Serial]", func() { var _ = SIGDescribe("SchedulerPredicates [Serial]", func() {
@ -604,6 +605,84 @@ var _ = SIGDescribe("SchedulerPredicates [Serial]", func() {
ginkgo.By(fmt.Sprintf("Trying to create another pod(pod5) with hostport %v but hostIP 127.0.0.1 on the node which pod4 resides and expect not scheduled", port)) ginkgo.By(fmt.Sprintf("Trying to create another pod(pod5) with hostport %v but hostIP 127.0.0.1 on the node which pod4 resides and expect not scheduled", port))
createHostPortPodOnNode(f, "pod5", ns, "127.0.0.1", port, v1.ProtocolTCP, nodeSelector, false) createHostPortPodOnNode(f, "pod5", ns, "127.0.0.1", port, v1.ProtocolTCP, nodeSelector, false)
}) })
ginkgo.Context("PodTopologySpread Filtering", func() {
var nodeNames []string
topologyKey := "kubernetes.io/e2e-pts-filter"
ginkgo.BeforeEach(func() {
ginkgo.By("Trying to get 2 available nodes which can run pod")
nodeNames = Get2NodesThatCanRunPod(f)
ginkgo.By(fmt.Sprintf("Apply dedicated topologyKey %v for this test on the 2 nodes.", topologyKey))
for _, nodeName := range nodeNames {
framework.AddOrUpdateLabelOnNode(cs, nodeName, topologyKey, nodeName)
}
})
ginkgo.AfterEach(func() {
for _, nodeName := range nodeNames {
framework.RemoveLabelOffNode(cs, nodeName, topologyKey)
}
})
ginkgo.It("validates 4 pods with MaxSkew=1 are evenly distributed into 2 nodes", func() {
podLabel := "e2e-pts-filter"
replicas := 4
rsConfig := pauseRSConfig{
Replicas: int32(replicas),
PodConfig: pausePodConfig{
Name: podLabel,
Namespace: ns,
Labels: map[string]string{podLabel: ""},
Affinity: &v1.Affinity{
NodeAffinity: &v1.NodeAffinity{
RequiredDuringSchedulingIgnoredDuringExecution: &v1.NodeSelector{
NodeSelectorTerms: []v1.NodeSelectorTerm{
{
MatchExpressions: []v1.NodeSelectorRequirement{
{
Key: topologyKey,
Operator: v1.NodeSelectorOpIn,
Values: nodeNames,
},
},
},
},
},
},
},
TopologySpreadConstraints: []v1.TopologySpreadConstraint{
{
MaxSkew: 1,
TopologyKey: topologyKey,
WhenUnsatisfiable: v1.DoNotSchedule,
LabelSelector: &metav1.LabelSelector{
MatchExpressions: []metav1.LabelSelectorRequirement{
{
Key: podLabel,
Operator: metav1.LabelSelectorOpExists,
},
},
},
},
},
},
}
runPauseRS(f, rsConfig)
podList, err := cs.CoreV1().Pods(ns).List(context.TODO(), metav1.ListOptions{})
framework.ExpectNoError(err)
numInNode1, numInNode2 := 0, 0
for _, pod := range podList.Items {
if pod.Spec.NodeName == nodeNames[0] {
numInNode1++
} else if pod.Spec.NodeName == nodeNames[1] {
numInNode2++
}
}
expected := replicas / len(nodeNames)
framework.ExpectEqual(numInNode1, expected, fmt.Sprintf("Pods are not distributed as expected on node %q", nodeNames[0]))
framework.ExpectEqual(numInNode2, expected, fmt.Sprintf("Pods are not distributed as expected on node %q", nodeNames[1]))
})
})
}) })
// printAllKubeletPods outputs status of all kubelet pods into log. // printAllKubeletPods outputs status of all kubelet pods into log.
@ -633,8 +712,9 @@ func initPausePod(f *framework.Framework, conf pausePodConfig) *v1.Pod {
OwnerReferences: conf.OwnerReferences, OwnerReferences: conf.OwnerReferences,
}, },
Spec: v1.PodSpec{ Spec: v1.PodSpec{
NodeSelector: conf.NodeSelector, NodeSelector: conf.NodeSelector,
Affinity: conf.Affinity, Affinity: conf.Affinity,
TopologySpreadConstraints: conf.TopologySpreadConstraints,
Containers: []v1.Container{ Containers: []v1.Container{
{ {
Name: conf.Name, Name: conf.Name,
@ -669,7 +749,7 @@ func createPausePod(f *framework.Framework, conf pausePodConfig) *v1.Pod {
func runPausePod(f *framework.Framework, conf pausePodConfig) *v1.Pod { func runPausePod(f *framework.Framework, conf pausePodConfig) *v1.Pod {
pod := createPausePod(f, conf) pod := createPausePod(f, conf)
framework.ExpectNoError(e2epod.WaitForPodRunningInNamespace(f.ClientSet, pod)) framework.ExpectNoError(e2epod.WaitTimeoutForPodRunningInNamespace(f.ClientSet, pod.Name, pod.Namespace, framework.PollShortTimeout))
pod, err := f.ClientSet.CoreV1().Pods(pod.Namespace).Get(context.TODO(), conf.Name, metav1.GetOptions{}) pod, err := f.ClientSet.CoreV1().Pods(pod.Namespace).Get(context.TODO(), conf.Name, metav1.GetOptions{})
framework.ExpectNoError(err) framework.ExpectNoError(err)
return pod return pod
@ -750,6 +830,30 @@ func GetNodeThatCanRunPod(f *framework.Framework) string {
return runPodAndGetNodeName(f, pausePodConfig{Name: "without-label"}) return runPodAndGetNodeName(f, pausePodConfig{Name: "without-label"})
} }
// Get2NodesThatCanRunPod return a 2-node slice where can run pod.
func Get2NodesThatCanRunPod(f *framework.Framework) []string {
firstNode := GetNodeThatCanRunPod(f)
ginkgo.By("Trying to launch a pod without a label to get a node which can launch it.")
pod := pausePodConfig{
Name: "without-label",
Affinity: &v1.Affinity{
NodeAffinity: &v1.NodeAffinity{
RequiredDuringSchedulingIgnoredDuringExecution: &v1.NodeSelector{
NodeSelectorTerms: []v1.NodeSelectorTerm{
{
MatchFields: []v1.NodeSelectorRequirement{
{Key: "metadata.name", Operator: v1.NodeSelectorOpNotIn, Values: []string{firstNode}},
},
},
},
},
},
},
}
secondNode := runPodAndGetNodeName(f, pod)
return []string{firstNode, secondNode}
}
func getNodeThatCanRunPodWithoutToleration(f *framework.Framework) string { func getNodeThatCanRunPodWithoutToleration(f *framework.Framework) string {
ginkgo.By("Trying to launch a pod without a toleration to get a node which can launch it.") ginkgo.By("Trying to launch a pod without a toleration to get a node which can launch it.")
return runPodAndGetNodeName(f, pausePodConfig{Name: "without-toleration"}) return runPodAndGetNodeName(f, pausePodConfig{Name: "without-toleration"})

View File

@ -30,6 +30,7 @@ import (
"k8s.io/apimachinery/pkg/api/resource" "k8s.io/apimachinery/pkg/api/resource"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/runtime" "k8s.io/apimachinery/pkg/runtime"
"k8s.io/apimachinery/pkg/util/sets"
"k8s.io/apimachinery/pkg/util/wait" "k8s.io/apimachinery/pkg/util/wait"
"k8s.io/apimachinery/pkg/watch" "k8s.io/apimachinery/pkg/watch"
clientset "k8s.io/client-go/kubernetes" clientset "k8s.io/client-go/kubernetes"
@ -271,6 +272,150 @@ var _ = SIGDescribe("SchedulerPreemption [Serial]", func() {
framework.ExpectEqual(podPreempted, true) framework.ExpectEqual(podPreempted, true)
}) })
ginkgo.Context("PodTopologySpread Preemption", func() {
var nodeNames []string
var nodes []*v1.Node
topologyKey := "kubernetes.io/e2e-pts-preemption"
var fakeRes v1.ResourceName = "example.com/fakePTSRes"
ginkgo.BeforeEach(func() {
ginkgo.By("Trying to get 2 available nodes which can run pod")
nodeNames = Get2NodesThatCanRunPod(f)
ginkgo.By(fmt.Sprintf("Apply dedicated topologyKey %v for this test on the 2 nodes.", topologyKey))
for _, nodeName := range nodeNames {
framework.AddOrUpdateLabelOnNode(cs, nodeName, topologyKey, nodeName)
node, err := cs.CoreV1().Nodes().Get(context.TODO(), nodeName, metav1.GetOptions{})
framework.ExpectNoError(err)
// update Node API object with a fake resource
nodeCopy := node.DeepCopy()
// force it to update
nodeCopy.ResourceVersion = "0"
ginkgo.By(fmt.Sprintf("Apply 10 fake resource to node %v.", node.Name))
nodeCopy.Status.Capacity[fakeRes] = resource.MustParse("10")
node, err = cs.CoreV1().Nodes().UpdateStatus(context.TODO(), nodeCopy, metav1.UpdateOptions{})
framework.ExpectNoError(err)
nodes = append(nodes, node)
}
})
ginkgo.AfterEach(func() {
for _, nodeName := range nodeNames {
framework.RemoveLabelOffNode(cs, nodeName, topologyKey)
}
for _, node := range nodes {
nodeCopy := node.DeepCopy()
// force it to update
nodeCopy.ResourceVersion = "0"
delete(nodeCopy.Status.Capacity, fakeRes)
_, err := cs.CoreV1().Nodes().UpdateStatus(context.TODO(), nodeCopy, metav1.UpdateOptions{})
framework.ExpectNoError(err)
}
})
ginkgo.It("validates proper pods are preempted", func() {
podLabel := "e2e-pts-preemption"
nodeAffinity := &v1.Affinity{
NodeAffinity: &v1.NodeAffinity{
RequiredDuringSchedulingIgnoredDuringExecution: &v1.NodeSelector{
NodeSelectorTerms: []v1.NodeSelectorTerm{
{
MatchExpressions: []v1.NodeSelectorRequirement{
{
Key: topologyKey,
Operator: v1.NodeSelectorOpIn,
Values: nodeNames,
},
},
},
},
},
},
}
highPodCfg := pausePodConfig{
Name: "high",
Namespace: ns,
Labels: map[string]string{podLabel: ""},
PriorityClassName: highPriorityClassName,
Affinity: nodeAffinity,
Resources: &v1.ResourceRequirements{
Requests: v1.ResourceList{fakeRes: resource.MustParse("9")},
Limits: v1.ResourceList{fakeRes: resource.MustParse("9")},
},
}
lowPodCfg := pausePodConfig{
Namespace: ns,
Labels: map[string]string{podLabel: ""},
PriorityClassName: lowPriorityClassName,
Affinity: nodeAffinity,
Resources: &v1.ResourceRequirements{
Requests: v1.ResourceList{fakeRes: resource.MustParse("3")},
Limits: v1.ResourceList{fakeRes: resource.MustParse("3")},
},
}
ginkgo.By("Create 1 High Pod and 3 Low Pods to occupy 9/10 of fake resources on both nodes.")
// Prepare 1 High Pod and 3 Low Pods
runPausePod(f, highPodCfg)
for i := 1; i <= 3; i++ {
lowPodCfg.Name = fmt.Sprintf("low-%v", i)
runPausePod(f, lowPodCfg)
}
ginkgo.By("Create 1 Medium Pod with TopologySpreadConstraints")
mediumPodCfg := pausePodConfig{
Name: "medium",
Namespace: ns,
Labels: map[string]string{podLabel: ""},
PriorityClassName: mediumPriorityClassName,
Affinity: nodeAffinity,
Resources: &v1.ResourceRequirements{
Requests: v1.ResourceList{fakeRes: resource.MustParse("3")},
Limits: v1.ResourceList{fakeRes: resource.MustParse("3")},
},
TopologySpreadConstraints: []v1.TopologySpreadConstraint{
{
MaxSkew: 1,
TopologyKey: topologyKey,
WhenUnsatisfiable: v1.DoNotSchedule,
LabelSelector: &metav1.LabelSelector{
MatchExpressions: []metav1.LabelSelectorRequirement{
{
Key: podLabel,
Operator: metav1.LabelSelectorOpExists,
},
},
},
},
},
}
// To fulfil resource.requests, the medium Pod only needs to preempt one low pod.
// However, in that case, the Pods spread becomes [<high>, <medium, low, low>], which doesn't
// satisfy the pod topology spread constraints. Hence it needs to preempt another low pod
// to make the Pods spread like [<high>, <medium, low>].
runPausePod(f, mediumPodCfg)
e2epod.WaitForPodNotPending(cs, ns, mediumPodCfg.Name)
ginkgo.By("Verify there are 3 Pods left in this namespace")
wantPods := sets.NewString("high", "medium", "low")
podList, err := cs.CoreV1().Pods(ns).List(context.TODO(), metav1.ListOptions{})
pods := podList.Items
framework.ExpectNoError(err)
framework.ExpectEqual(len(pods), 3)
for _, pod := range pods {
// Remove the ordinal index for low pod.
podName := strings.Split(pod.Name, "-")[0]
if wantPods.Has(podName) {
ginkgo.By(fmt.Sprintf("Pod %q is as expected to be running.", pod.Name))
wantPods.Delete(podName)
} else {
framework.Failf("Pod %q conflicted with expected PodSet %v", podName, wantPods)
}
}
})
})
}) })
// construct a fakecpu so as to set it to status of Node object // construct a fakecpu so as to set it to status of Node object

View File

@ -339,6 +339,94 @@ var _ = SIGDescribe("SchedulerPriorities [Serial]", func() {
framework.ExpectNoError(err) framework.ExpectNoError(err)
framework.ExpectEqual(tolePod.Spec.NodeName, nodeName) framework.ExpectEqual(tolePod.Spec.NodeName, nodeName)
}) })
ginkgo.Context("PodTopologySpread Scoring", func() {
var nodeNames []string
topologyKey := "kubernetes.io/e2e-pts-score"
ginkgo.BeforeEach(func() {
ginkgo.By("Trying to get 2 available nodes which can run pod")
nodeNames = Get2NodesThatCanRunPod(f)
ginkgo.By(fmt.Sprintf("Apply dedicated topologyKey %v for this test on the 2 nodes.", topologyKey))
for _, nodeName := range nodeNames {
framework.AddOrUpdateLabelOnNode(cs, nodeName, topologyKey, nodeName)
}
})
ginkgo.AfterEach(func() {
for _, nodeName := range nodeNames {
framework.RemoveLabelOffNode(cs, nodeName, topologyKey)
}
})
ginkgo.It("validates pod should be preferably scheduled to node which makes the matching pods more evenly distributed", func() {
var nodes []v1.Node
for _, nodeName := range nodeNames {
node, err := cs.CoreV1().Nodes().Get(context.TODO(), nodeName, metav1.GetOptions{})
framework.ExpectNoError(err)
nodes = append(nodes, *node)
}
// Make the nodes have balanced cpu,mem usage.
err := createBalancedPodForNodes(f, cs, ns, nodes, podRequestedResource, 0.5)
framework.ExpectNoError(err)
replicas := 4
podLabel := "e2e-pts-score"
ginkgo.By(fmt.Sprintf("Run a ReplicaSet with %v replicas on node %q", replicas, nodeNames[0]))
rsConfig := pauseRSConfig{
Replicas: int32(replicas),
PodConfig: pausePodConfig{
Name: podLabel,
Namespace: ns,
Labels: map[string]string{podLabel: ""},
NodeSelector: map[string]string{topologyKey: nodeNames[0]},
},
}
runPauseRS(f, rsConfig)
// Run a Pod with WhenUnsatisfiable:ScheduleAnyway.
podCfg := pausePodConfig{
Name: "test-pod",
Namespace: ns,
Labels: map[string]string{podLabel: ""},
Affinity: &v1.Affinity{
NodeAffinity: &v1.NodeAffinity{
RequiredDuringSchedulingIgnoredDuringExecution: &v1.NodeSelector{
NodeSelectorTerms: []v1.NodeSelectorTerm{
{
MatchExpressions: []v1.NodeSelectorRequirement{
{
Key: topologyKey,
Operator: v1.NodeSelectorOpIn,
Values: nodeNames,
},
},
},
},
},
},
},
TopologySpreadConstraints: []v1.TopologySpreadConstraint{
{
MaxSkew: 1,
TopologyKey: topologyKey,
WhenUnsatisfiable: v1.ScheduleAnyway,
LabelSelector: &metav1.LabelSelector{
MatchExpressions: []metav1.LabelSelectorRequirement{
{
Key: podLabel,
Operator: metav1.LabelSelectorOpExists,
},
},
},
},
},
}
testPod := runPausePod(f, podCfg)
ginkgo.By(fmt.Sprintf("Verifying if the test-pod lands on node %q", nodeNames[1]))
framework.ExpectEqual(nodeNames[1], testPod.Spec.NodeName)
})
})
}) })
// createBalancedPodForNodes creates a pod per node that asks for enough resources to make all nodes have the same mem/cpu usage ratio. // createBalancedPodForNodes creates a pod per node that asks for enough resources to make all nodes have the same mem/cpu usage ratio.

View File

@ -105,6 +105,7 @@ func TestSchedulerCreationFromConfigMap(t *testing.T) {
"PreFilterPlugin": { "PreFilterPlugin": {
{Name: "NodeResourcesFit"}, {Name: "NodeResourcesFit"},
{Name: "NodePorts"}, {Name: "NodePorts"},
{Name: "PodTopologySpread"},
{Name: "InterPodAffinity"}, {Name: "InterPodAffinity"},
}, },
"FilterPlugin": { "FilterPlugin": {
@ -121,15 +122,18 @@ func TestSchedulerCreationFromConfigMap(t *testing.T) {
{Name: "AzureDiskLimits"}, {Name: "AzureDiskLimits"},
{Name: "VolumeBinding"}, {Name: "VolumeBinding"},
{Name: "VolumeZone"}, {Name: "VolumeZone"},
{Name: "PodTopologySpread"},
{Name: "InterPodAffinity"}, {Name: "InterPodAffinity"},
}, },
"PreScorePlugin": { "PreScorePlugin": {
{Name: "PodTopologySpread"},
{Name: "InterPodAffinity"}, {Name: "InterPodAffinity"},
{Name: "DefaultPodTopologySpread"}, {Name: "DefaultPodTopologySpread"},
{Name: "TaintToleration"}, {Name: "TaintToleration"},
}, },
"ScorePlugin": { "ScorePlugin": {
{Name: "NodeResourcesBalancedAllocation", Weight: 1}, {Name: "NodeResourcesBalancedAllocation", Weight: 1},
{Name: "PodTopologySpread", Weight: 1},
{Name: "ImageLocality", Weight: 1}, {Name: "ImageLocality", Weight: 1},
{Name: "InterPodAffinity", Weight: 1}, {Name: "InterPodAffinity", Weight: 1},
{Name: "NodeResourcesLeastAllocated", Weight: 1}, {Name: "NodeResourcesLeastAllocated", Weight: 1},
@ -191,6 +195,7 @@ kind: Policy
"PreFilterPlugin": { "PreFilterPlugin": {
{Name: "NodeResourcesFit"}, {Name: "NodeResourcesFit"},
{Name: "NodePorts"}, {Name: "NodePorts"},
{Name: "PodTopologySpread"},
{Name: "InterPodAffinity"}, {Name: "InterPodAffinity"},
}, },
"FilterPlugin": { "FilterPlugin": {
@ -207,15 +212,18 @@ kind: Policy
{Name: "AzureDiskLimits"}, {Name: "AzureDiskLimits"},
{Name: "VolumeBinding"}, {Name: "VolumeBinding"},
{Name: "VolumeZone"}, {Name: "VolumeZone"},
{Name: "PodTopologySpread"},
{Name: "InterPodAffinity"}, {Name: "InterPodAffinity"},
}, },
"PreScorePlugin": { "PreScorePlugin": {
{Name: "PodTopologySpread"},
{Name: "InterPodAffinity"}, {Name: "InterPodAffinity"},
{Name: "DefaultPodTopologySpread"}, {Name: "DefaultPodTopologySpread"},
{Name: "TaintToleration"}, {Name: "TaintToleration"},
}, },
"ScorePlugin": { "ScorePlugin": {
{Name: "NodeResourcesBalancedAllocation", Weight: 1}, {Name: "NodeResourcesBalancedAllocation", Weight: 1},
{Name: "PodTopologySpread", Weight: 1},
{Name: "ImageLocality", Weight: 1}, {Name: "ImageLocality", Weight: 1},
{Name: "InterPodAffinity", Weight: 1}, {Name: "InterPodAffinity", Weight: 1},
{Name: "NodeResourcesLeastAllocated", Weight: 1}, {Name: "NodeResourcesLeastAllocated", Weight: 1},