EvenPodsSpread: integration test

This commit is contained in:
Wei Huang 2019-07-10 15:59:07 -07:00
parent 270d9529e0
commit caab8b74ba
No known key found for this signature in database
GPG Key ID: BE5E9752F8B6E005
7 changed files with 356 additions and 17 deletions

View File

@ -17,10 +17,14 @@ limitations under the License.
package testing
import (
"fmt"
"k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
)
var zero int64
// NodeSelectorWrapper wraps a NodeSelector inside.
type NodeSelectorWrapper struct{ v1.NodeSelector }
@ -152,6 +156,27 @@ func (p *PodWrapper) Namespace(s string) *PodWrapper {
return p
}
// Container appends a container into PodSpec of the inner pod.
func (p *PodWrapper) Container(s string) *PodWrapper {
p.Spec.Containers = append(p.Spec.Containers, v1.Container{
Name: fmt.Sprintf("con%d", len(p.Spec.Containers)),
Image: s,
})
return p
}
// Priority sets a priority value into PodSpec of the inner pod.
func (p *PodWrapper) Priority(val int32) *PodWrapper {
p.Spec.Priority = &val
return p
}
// ZeroTerminationGracePeriod sets the TerminationGracePeriodSeconds of the inner pod to zero.
func (p *PodWrapper) ZeroTerminationGracePeriod() *PodWrapper {
p.Spec.TerminationGracePeriodSeconds = &zero
return p
}
// Node sets `s` as the nodeName of the inner pod.
func (p *PodWrapper) Node(s string) *PodWrapper {
p.Spec.NodeName = s

View File

@ -18,7 +18,6 @@ package util
import (
"sort"
"time"
"k8s.io/api/core/v1"

View File

@ -37,6 +37,7 @@ go_test(
"//pkg/scheduler/factory:go_default_library",
"//pkg/scheduler/framework/v1alpha1:go_default_library",
"//pkg/scheduler/nodeinfo:go_default_library",
"//pkg/scheduler/testing:go_default_library",
"//pkg/volume:go_default_library",
"//pkg/volume/testing:go_default_library",
"//plugin/pkg/admission/podtolerationrestriction:go_default_library",

View File

@ -17,6 +17,7 @@ limitations under the License.
package scheduler
import (
"fmt"
"testing"
"time"
@ -24,6 +25,11 @@ import (
"k8s.io/apimachinery/pkg/api/errors"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/util/wait"
utilfeature "k8s.io/apiserver/pkg/util/feature"
featuregatetesting "k8s.io/component-base/featuregate/testing"
"k8s.io/kubernetes/pkg/features"
"k8s.io/kubernetes/pkg/scheduler/algorithmprovider"
st "k8s.io/kubernetes/pkg/scheduler/testing"
testutils "k8s.io/kubernetes/test/utils"
imageutils "k8s.io/kubernetes/test/utils/image"
)
@ -920,3 +926,165 @@ func TestNodePIDPressure(t *testing.T) {
cleanupPods(cs, t, []*v1.Pod{testPod})
}
// TestEvenPodsSpreadPredicate verifies that EvenPodsSpread predicate functions well.
func TestEvenPodsSpreadPredicate(t *testing.T) {
defer featuregatetesting.SetFeatureGateDuringTest(t, utilfeature.DefaultFeatureGate, features.EvenPodsSpread, true)()
// Apply feature gates to enable EvenPodsSpread
defer algorithmprovider.ApplyFeatureGates()()
context := initTest(t, "eps-predicate")
cs := context.clientSet
ns := context.ns.Name
defer cleanupTest(t, context)
// Add 4 nodes.
nodes, err := createNodes(cs, "node", nil, 4)
if err != nil {
t.Fatalf("Cannot create nodes: %v", err)
}
for i, node := range nodes {
// Apply labels "zone: zone-{0,1}" and "node: <node name>" to each node.
labels := map[string]string{
"zone": fmt.Sprintf("zone-%d", i/2),
"node": node.Name,
}
if err = testutils.AddLabelsToNode(cs, node.Name, labels); err != nil {
t.Fatalf("Cannot add labels to node: %v", err)
}
if err = waitForNodeLabels(cs, node.Name, labels); err != nil {
t.Fatalf("Failed to poll node labels: %v", err)
}
}
pause := imageutils.GetPauseImageName()
tests := []struct {
name string
incomingPod *v1.Pod
existingPods []*v1.Pod
fits bool
candidateNodes []string // nodes expected to schedule onto
}{
// note: naming starts at index 0
{
name: "place pod on a 1/1/0/1 cluster with MaxSkew=1, node-2 is the only fit",
incomingPod: st.MakePod().Namespace(ns).Name("p").Label("foo", "").Container(pause).
SpreadConstraint(1, "node", hardSpread, st.MakeLabelSelector().Exists("foo").Obj()).
Obj(),
existingPods: []*v1.Pod{
st.MakePod().Namespace(ns).Name("p0").Node("node-0").Label("foo", "").Container(pause).Obj(),
st.MakePod().Namespace(ns).Name("p1").Node("node-1").Label("foo", "").Container(pause).Obj(),
st.MakePod().Namespace(ns).Name("p3").Node("node-3").Label("foo", "").Container(pause).Obj(),
},
fits: true,
candidateNodes: []string{"node-2"},
},
{
name: "place pod on a 2/0/0/1 cluster with MaxSkew=2, node-{1,2,3} are good fits",
incomingPod: st.MakePod().Namespace(ns).Name("p").Label("foo", "").Container(pause).
SpreadConstraint(2, "node", hardSpread, st.MakeLabelSelector().Exists("foo").Obj()).
Obj(),
existingPods: []*v1.Pod{
st.MakePod().Namespace(ns).Name("p0a").Node("node-0").Label("foo", "").Container(pause).Obj(),
st.MakePod().Namespace(ns).Name("p0b").Node("node-0").Label("foo", "").Container(pause).Obj(),
st.MakePod().Namespace(ns).Name("p3").Node("node-3").Label("foo", "").Container(pause).Obj(),
},
fits: true,
candidateNodes: []string{"node-1", "node-2", "node-3"},
},
{
name: "pod is required to be placed on zone0, so only node-1 fits",
incomingPod: st.MakePod().Namespace(ns).Name("p").Label("foo", "").Container(pause).
NodeAffinityIn("zone", []string{"zone-0"}).
SpreadConstraint(1, "node", hardSpread, st.MakeLabelSelector().Exists("foo").Obj()).
Obj(),
existingPods: []*v1.Pod{
st.MakePod().Namespace(ns).Name("p0").Node("node-0").Label("foo", "").Container(pause).Obj(),
st.MakePod().Namespace(ns).Name("p3").Node("node-3").Label("foo", "").Container(pause).Obj(),
},
fits: true,
candidateNodes: []string{"node-1"},
},
{
name: "two constraints: pod can only be placed to zone-1/node-2",
incomingPod: st.MakePod().Namespace(ns).Name("p").Label("foo", "").Container(pause).
SpreadConstraint(1, "zone", hardSpread, st.MakeLabelSelector().Exists("foo").Obj()).
SpreadConstraint(1, "node", hardSpread, st.MakeLabelSelector().Exists("foo").Obj()).
Obj(),
existingPods: []*v1.Pod{
st.MakePod().Namespace(ns).Name("p0").Node("node-0").Label("foo", "").Container(pause).Obj(),
st.MakePod().Namespace(ns).Name("p1").Node("node-1").Label("foo", "").Container(pause).Obj(),
st.MakePod().Namespace(ns).Name("p3a").Node("node-3").Label("foo", "").Container(pause).Obj(),
st.MakePod().Namespace(ns).Name("p3b").Node("node-3").Label("foo", "").Container(pause).Obj(),
},
fits: true,
candidateNodes: []string{"node-2"},
},
{
name: "pod cannot be placed onto any node",
incomingPod: st.MakePod().Namespace(ns).Name("p").Label("foo", "").Container(pause).
NodeAffinityNotIn("node", []string{"node-0"}). // mock a 3-node cluster
SpreadConstraint(1, "zone", hardSpread, st.MakeLabelSelector().Exists("foo").Obj()).
SpreadConstraint(1, "node", hardSpread, st.MakeLabelSelector().Exists("foo").Obj()).
Obj(),
existingPods: []*v1.Pod{
st.MakePod().Namespace(ns).Name("p1a").Node("node-1").Label("foo", "").Container(pause).Obj(),
st.MakePod().Namespace(ns).Name("p1b").Node("node-1").Label("foo", "").Container(pause).Obj(),
st.MakePod().Namespace(ns).Name("p2a").Node("node-2").Label("foo", "").Container(pause).Obj(),
st.MakePod().Namespace(ns).Name("p2b").Node("node-2").Label("foo", "").Container(pause).Obj(),
st.MakePod().Namespace(ns).Name("p3").Node("node-3").Label("foo", "").Container(pause).Obj(),
},
fits: false,
},
{
name: "high priority pod can preempt others",
incomingPod: st.MakePod().Namespace(ns).Name("p").Label("foo", "").Container(pause).Priority(100).
NodeAffinityNotIn("node", []string{"node-0"}). // mock a 3-node cluster
SpreadConstraint(1, "zone", hardSpread, st.MakeLabelSelector().Exists("foo").Obj()).
SpreadConstraint(1, "node", hardSpread, st.MakeLabelSelector().Exists("foo").Obj()).
Obj(),
existingPods: []*v1.Pod{
st.MakePod().ZeroTerminationGracePeriod().Namespace(ns).Name("p1a").Node("node-1").Label("foo", "").Container(pause).Obj(),
st.MakePod().ZeroTerminationGracePeriod().Namespace(ns).Name("p1b").Node("node-1").Label("foo", "").Container(pause).Obj(),
st.MakePod().ZeroTerminationGracePeriod().Namespace(ns).Name("p2a").Node("node-2").Label("foo", "").Container(pause).Obj(),
st.MakePod().ZeroTerminationGracePeriod().Namespace(ns).Name("p2b").Node("node-2").Label("foo", "").Container(pause).Obj(),
st.MakePod().ZeroTerminationGracePeriod().Namespace(ns).Name("p3").Node("node-3").Label("foo", "").Container(pause).Obj(),
},
fits: true,
candidateNodes: []string{"node-1", "node-2", "node-3"},
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
allPods := append(tt.existingPods, tt.incomingPod)
defer cleanupPods(cs, t, allPods)
for _, pod := range tt.existingPods {
createdPod, err := cs.CoreV1().Pods(pod.Namespace).Create(pod)
if err != nil {
t.Fatalf("Test Failed: error while creating pod during test: %v", err)
}
err = wait.Poll(pollInterval, wait.ForeverTestTimeout, podScheduled(cs, createdPod.Namespace, createdPod.Name))
if err != nil {
t.Errorf("Test Failed: error while waiting for pod during test: %v", err)
}
}
testPod, err := cs.CoreV1().Pods(tt.incomingPod.Namespace).Create(tt.incomingPod)
if err != nil && !errors.IsInvalid(err) {
t.Fatalf("Test Failed: error while creating pod during test: %v", err)
}
if tt.fits {
err = wait.Poll(pollInterval, wait.ForeverTestTimeout, podScheduledIn(cs, testPod.Namespace, testPod.Name, tt.candidateNodes))
} else {
err = wait.Poll(pollInterval, wait.ForeverTestTimeout, podUnschedulable(cs, testPod.Namespace, testPod.Name))
}
if err != nil {
t.Errorf("Test Failed: %v", err)
}
})
}
}
var (
hardSpread = v1.DoNotSchedule
softSpread = v1.ScheduleAnyway
)

View File

@ -273,10 +273,10 @@ func TestPreemption(t *testing.T) {
}
}
}
// Also check that the preemptor pod gets the annotation for nominated node name.
// Also check that the preemptor pod gets the NominatedNodeName field set.
if len(test.preemptedPodIndexes) > 0 {
if err := waitForNominatedNodeName(cs, preemptor); err != nil {
t.Errorf("Test [%v]: NominatedNodeName annotation was not set for pod %v: %v", test.description, preemptor.Name, err)
t.Errorf("Test [%v]: NominatedNodeName field was not set for pod %v: %v", test.description, preemptor.Name, err)
}
}

View File

@ -17,12 +17,21 @@ limitations under the License.
package scheduler
import (
"fmt"
"strings"
"testing"
"k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/api/errors"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/util/wait"
utilfeature "k8s.io/apiserver/pkg/util/feature"
featuregatetesting "k8s.io/component-base/featuregate/testing"
"k8s.io/kubernetes/pkg/features"
"k8s.io/kubernetes/pkg/scheduler/algorithmprovider"
st "k8s.io/kubernetes/pkg/scheduler/testing"
testutils "k8s.io/kubernetes/test/utils"
"strings"
imageutils "k8s.io/kubernetes/test/utils/image"
)
// This file tests the scheduler priority functions.
@ -234,3 +243,121 @@ func makeContainersWithImages(images []string) []v1.Container {
}
return containers
}
// TestEvenPodsSpreadPriority verifies that EvenPodsSpread priority functions well.
func TestEvenPodsSpreadPriority(t *testing.T) {
defer featuregatetesting.SetFeatureGateDuringTest(t, utilfeature.DefaultFeatureGate, features.EvenPodsSpread, true)()
// Apply feature gates to enable EvenPodsSpread
defer algorithmprovider.ApplyFeatureGates()()
context := initTest(t, "eps-priority")
cs := context.clientSet
ns := context.ns.Name
defer cleanupTest(t, context)
// Add 4 nodes.
nodes, err := createNodes(cs, "node", nil, 4)
if err != nil {
t.Fatalf("Cannot create nodes: %v", err)
}
for i, node := range nodes {
// Apply labels "zone: zone-{0,1}" and "node: <node name>" to each node.
labels := map[string]string{
"zone": fmt.Sprintf("zone-%d", i/2),
"node": node.Name,
}
if err = testutils.AddLabelsToNode(cs, node.Name, labels); err != nil {
t.Fatalf("Cannot add labels to node: %v", err)
}
if err = waitForNodeLabels(cs, node.Name, labels); err != nil {
t.Fatalf("Adding labels to node failed: %v", err)
}
}
// Taint the 0th node
taint := v1.Taint{
Key: "k1",
Value: "v1",
Effect: v1.TaintEffectNoSchedule,
}
if err = addTaintToNode(cs, nodes[0].Name, taint); err != nil {
t.Fatalf("Adding taint to node failed: %v", err)
}
if err = waitForNodeTaints(cs, nodes[0], []v1.Taint{taint}); err != nil {
t.Fatalf("Taint not seen on node: %v", err)
}
pause := imageutils.GetPauseImageName()
tests := []struct {
name string
incomingPod *v1.Pod
existingPods []*v1.Pod
fits bool
want []string // nodes expected to schedule onto
}{
// note: naming starts at index 0
// the symbol ~X~ means that node is infeasible
{
name: "place pod on a ~0~/1/2/3 cluster with MaxSkew=1, node-1 is the preferred fit",
incomingPod: st.MakePod().Namespace(ns).Name("p").Label("foo", "").Container(pause).
SpreadConstraint(1, "node", softSpread, st.MakeLabelSelector().Exists("foo").Obj()).
Obj(),
existingPods: []*v1.Pod{
st.MakePod().Namespace(ns).Name("p1").Node("node-1").Label("foo", "").Container(pause).Obj(),
st.MakePod().Namespace(ns).Name("p2a").Node("node-2").Label("foo", "").Container(pause).Obj(),
st.MakePod().Namespace(ns).Name("p2b").Node("node-2").Label("foo", "").Container(pause).Obj(),
st.MakePod().Namespace(ns).Name("p3a").Node("node-3").Label("foo", "").Container(pause).Obj(),
st.MakePod().Namespace(ns).Name("p3b").Node("node-3").Label("foo", "").Container(pause).Obj(),
st.MakePod().Namespace(ns).Name("p3c").Node("node-3").Label("foo", "").Container(pause).Obj(),
},
fits: true,
want: []string{"node-1"},
},
{
name: "combined with hardSpread constraint on a ~4~/0/1/2 cluster",
incomingPod: st.MakePod().Namespace(ns).Name("p").Label("foo", "").Container(pause).
SpreadConstraint(1, "node", softSpread, st.MakeLabelSelector().Exists("foo").Obj()).
SpreadConstraint(1, "zone", hardSpread, st.MakeLabelSelector().Exists("foo").Obj()).
Obj(),
existingPods: []*v1.Pod{
st.MakePod().Namespace(ns).Name("p0a").Node("node-0").Label("foo", "").Container(pause).Obj(),
st.MakePod().Namespace(ns).Name("p0b").Node("node-0").Label("foo", "").Container(pause).Obj(),
st.MakePod().Namespace(ns).Name("p0c").Node("node-0").Label("foo", "").Container(pause).Obj(),
st.MakePod().Namespace(ns).Name("p0d").Node("node-0").Label("foo", "").Container(pause).Obj(),
st.MakePod().Namespace(ns).Name("p2").Node("node-2").Label("foo", "").Container(pause).Obj(),
st.MakePod().Namespace(ns).Name("p3a").Node("node-3").Label("foo", "").Container(pause).Obj(),
st.MakePod().Namespace(ns).Name("p3b").Node("node-3").Label("foo", "").Container(pause).Obj(),
},
fits: true,
want: []string{"node-2"},
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
allPods := append(tt.existingPods, tt.incomingPod)
defer cleanupPods(cs, t, allPods)
for _, pod := range tt.existingPods {
createdPod, err := cs.CoreV1().Pods(pod.Namespace).Create(pod)
if err != nil {
t.Fatalf("Test Failed: error while creating pod during test: %v", err)
}
err = wait.Poll(pollInterval, wait.ForeverTestTimeout, podScheduled(cs, createdPod.Namespace, createdPod.Name))
if err != nil {
t.Errorf("Test Failed: error while waiting for pod during test: %v", err)
}
}
testPod, err := cs.CoreV1().Pods(tt.incomingPod.Namespace).Create(tt.incomingPod)
if err != nil && !errors.IsInvalid(err) {
t.Fatalf("Test Failed: error while creating pod during test: %v", err)
}
if tt.fits {
err = wait.Poll(pollInterval, wait.ForeverTestTimeout, podScheduledIn(cs, testPod.Namespace, testPod.Name, tt.want))
} else {
err = wait.Poll(pollInterval, wait.ForeverTestTimeout, podUnschedulable(cs, testPod.Namespace, testPod.Name))
}
if err != nil {
t.Errorf("Test Failed: %v", err)
}
})
}
}

View File

@ -23,7 +23,7 @@ import (
"testing"
"time"
v1 "k8s.io/api/core/v1"
"k8s.io/api/core/v1"
policy "k8s.io/api/policy/v1beta1"
"k8s.io/apimachinery/pkg/api/errors"
"k8s.io/apimachinery/pkg/api/resource"
@ -334,9 +334,6 @@ func waitForReflection(t *testing.T, nodeLister corelisters.NodeLister, key stri
func nodeHasLabels(cs clientset.Interface, nodeName string, labels map[string]string) wait.ConditionFunc {
return func() (bool, error) {
node, err := cs.CoreV1().Nodes().Get(nodeName, metav1.GetOptions{})
if errors.IsNotFound(err) {
return false, nil
}
if err != nil {
// This could be a connection error so we want to retry.
return false, nil
@ -430,6 +427,17 @@ func nodeTainted(cs clientset.Interface, nodeName string, taints []v1.Taint) wai
}
}
func addTaintToNode(cs clientset.Interface, nodeName string, taint v1.Taint) error {
node, err := cs.CoreV1().Nodes().Get(nodeName, metav1.GetOptions{})
if err != nil {
return err
}
copy := node.DeepCopy()
copy.Spec.Taints = append(copy.Spec.Taints, taint)
_, err = cs.CoreV1().Nodes().Update(copy)
return err
}
// waitForNodeTaints waits for a node to have the target taints and returns
// an error if it does not have taints within the given timeout.
func waitForNodeTaints(cs clientset.Interface, node *v1.Node, taints []v1.Taint) error {
@ -602,9 +610,6 @@ func podIsGettingEvicted(c clientset.Interface, podNamespace, podName string) wa
func podScheduled(c clientset.Interface, podNamespace, podName string) wait.ConditionFunc {
return func() (bool, error) {
pod, err := c.CoreV1().Pods(podNamespace).Get(podName, metav1.GetOptions{})
if errors.IsNotFound(err) {
return false, nil
}
if err != nil {
// This could be a connection error so we want to retry.
return false, nil
@ -616,14 +621,31 @@ func podScheduled(c clientset.Interface, podNamespace, podName string) wait.Cond
}
}
// podScheduledIn returns true if a given pod is placed onto one of the expected nodes.
func podScheduledIn(c clientset.Interface, podNamespace, podName string, nodeNames []string) wait.ConditionFunc {
return func() (bool, error) {
pod, err := c.CoreV1().Pods(podNamespace).Get(podName, metav1.GetOptions{})
if err != nil {
// This could be a connection error so we want to retry.
return false, nil
}
if pod.Spec.NodeName == "" {
return false, nil
}
for _, nodeName := range nodeNames {
if pod.Spec.NodeName == nodeName {
return true, nil
}
}
return false, nil
}
}
// podUnschedulable returns a condition function that returns true if the given pod
// gets unschedulable status.
func podUnschedulable(c clientset.Interface, podNamespace, podName string) wait.ConditionFunc {
return func() (bool, error) {
pod, err := c.CoreV1().Pods(podNamespace).Get(podName, metav1.GetOptions{})
if errors.IsNotFound(err) {
return false, nil
}
if err != nil {
// This could be a connection error so we want to retry.
return false, nil
@ -640,9 +662,6 @@ func podUnschedulable(c clientset.Interface, podNamespace, podName string) wait.
func podSchedulingError(c clientset.Interface, podNamespace, podName string) wait.ConditionFunc {
return func() (bool, error) {
pod, err := c.CoreV1().Pods(podNamespace).Get(podName, metav1.GetOptions{})
if errors.IsNotFound(err) {
return false, nil
}
if err != nil {
// This could be a connection error so we want to retry.
return false, nil