From 505ae6930f84c24a65fa8e66528b3989ea464cfc Mon Sep 17 00:00:00 2001 From: Alex Wang Date: Fri, 29 May 2020 11:54:03 +0800 Subject: [PATCH] add integration-test for NonPreemption --- test/integration/scheduler/preemption_test.go | 84 +++++++++++++++++++ test/integration/scheduler/util.go | 23 +++++ 2 files changed, 107 insertions(+) diff --git a/test/integration/scheduler/preemption_test.go b/test/integration/scheduler/preemption_test.go index 836058a5696..e3fffcfc9dd 100644 --- a/test/integration/scheduler/preemption_test.go +++ b/test/integration/scheduler/preemption_test.go @@ -33,13 +33,16 @@ import ( "k8s.io/apimachinery/pkg/types" "k8s.io/apimachinery/pkg/util/intstr" "k8s.io/apimachinery/pkg/util/wait" + utilfeature "k8s.io/apiserver/pkg/util/feature" "k8s.io/client-go/informers" "k8s.io/client-go/kubernetes" clientset "k8s.io/client-go/kubernetes" restclient "k8s.io/client-go/rest" + featuregatetesting "k8s.io/component-base/featuregate/testing" "k8s.io/klog/v2" podutil "k8s.io/kubernetes/pkg/api/v1/pod" "k8s.io/kubernetes/pkg/apis/scheduling" + "k8s.io/kubernetes/pkg/features" "k8s.io/kubernetes/pkg/scheduler" schedulerconfig "k8s.io/kubernetes/pkg/scheduler/apis/config" framework "k8s.io/kubernetes/pkg/scheduler/framework/v1alpha1" @@ -434,6 +437,87 @@ func TestPreemption(t *testing.T) { } } +// TestNonPreemption tests NonPreempt option of PriorityClass of scheduler works as expected. +func TestNonPreemption(t *testing.T) { + defer featuregatetesting.SetFeatureGateDuringTest(t, utilfeature.DefaultFeatureGate, features.NonPreemptingPriority, true)() + + var preemptNever = v1.PreemptNever + // Initialize scheduler. + testCtx := initTest(t, "non-preemption") + defer testutils.CleanupTest(t, testCtx) + cs := testCtx.ClientSet + tests := []struct { + name string + PreemptionPolicy *v1.PreemptionPolicy + }{ + { + name: "pod preemption will happen", + PreemptionPolicy: nil, + }, + { + name: "pod preemption will not happen", + PreemptionPolicy: &preemptNever, + }, + } + victim := initPausePod(&pausePodConfig{ + Name: "victim-pod", + Namespace: testCtx.NS.Name, + Priority: &lowPriority, + Resources: &v1.ResourceRequirements{Requests: v1.ResourceList{ + v1.ResourceCPU: *resource.NewMilliQuantity(400, resource.DecimalSI), + v1.ResourceMemory: *resource.NewQuantity(200, resource.DecimalSI)}, + }, + }) + + preemptor := initPausePod(&pausePodConfig{ + Name: "preemptor-pod", + Namespace: testCtx.NS.Name, + Priority: &highPriority, + Resources: &v1.ResourceRequirements{Requests: v1.ResourceList{ + v1.ResourceCPU: *resource.NewMilliQuantity(300, resource.DecimalSI), + v1.ResourceMemory: *resource.NewQuantity(200, resource.DecimalSI)}, + }, + }) + + // Create a node with some resources and a label. + nodeRes := &v1.ResourceList{ + v1.ResourcePods: *resource.NewQuantity(32, resource.DecimalSI), + v1.ResourceCPU: *resource.NewMilliQuantity(500, resource.DecimalSI), + v1.ResourceMemory: *resource.NewQuantity(500, resource.DecimalSI), + } + _, err := createNode(testCtx.ClientSet, "node1", nodeRes) + if err != nil { + t.Fatalf("Error creating nodes: %v", err) + } + for _, test := range tests { + t.Run(test.name, func(t *testing.T) { + defer testutils.CleanupPods(cs, t, []*v1.Pod{preemptor, victim}) + preemptor.Spec.PreemptionPolicy = test.PreemptionPolicy + victimPod, err := createPausePod(cs, victim) + if err != nil { + t.Fatalf("Error while creating victim: %v", err) + } + if err := waitForPodToScheduleWithTimeout(cs, victimPod, 5*time.Second); err != nil { + t.Fatalf("victim %v should be become scheduled", victimPod.Name) + } + + preemptorPod, err := createPausePod(cs, preemptor) + if err != nil { + t.Fatalf("Error while creating preemptor: %v", err) + } + + err = waitForNominatedNodeNameWithTimeout(cs, preemptorPod, 5*time.Second) + // test.PreemptionPolicy == nil means we expect the preemptor to be nominated. + expect := test.PreemptionPolicy == nil + // err == nil indicates the preemptor is indeed nominated. + got := err == nil + if got != expect { + t.Errorf("Expect preemptor to be nominated=%v, but got=%v", expect, got) + } + }) + } +} + // TestDisablePreemption tests disable pod preemption of scheduler works as expected. func TestDisablePreemption(t *testing.T) { // Initialize scheduler, and disable preemption. diff --git a/test/integration/scheduler/util.go b/test/integration/scheduler/util.go index 0f9ac4402b4..343725ee9ba 100644 --- a/test/integration/scheduler/util.go +++ b/test/integration/scheduler/util.go @@ -204,6 +204,7 @@ type pausePodConfig struct { NodeName string SchedulerName string Priority *int32 + PreemptionPolicy *v1.PreemptionPolicy PriorityClassName string } @@ -230,6 +231,7 @@ func initPausePod(conf *pausePodConfig) *v1.Pod { NodeName: conf.NodeName, SchedulerName: conf.SchedulerName, Priority: conf.Priority, + PreemptionPolicy: conf.PreemptionPolicy, PriorityClassName: conf.PriorityClassName, }, } @@ -398,6 +400,12 @@ func waitForPodUnschedulable(cs clientset.Interface, pod *v1.Pod) error { return waitForPodUnschedulableWithTimeout(cs, pod, 30*time.Second) } +// waitForPodToScheduleWithTimeout waits for a pod to get scheduled and returns +// an error if it does not scheduled within the given timeout. +func waitForPodToScheduleWithTimeout(cs clientset.Interface, pod *v1.Pod, timeout time.Duration) error { + return wait.Poll(100*time.Millisecond, timeout, podScheduled(cs, pod.Namespace, pod.Name)) +} + // waitForPDBsStable waits for PDBs to have "CurrentHealthy" status equal to // the expected values. func waitForPDBsStable(testCtx *testutils.TestContext, pdbs []*policy.PodDisruptionBudget, pdbPodNum []int32) error { @@ -485,3 +493,18 @@ func cleanupPodsInNamespace(cs clientset.Interface, t *testing.T, ns string) { t.Errorf("error while waiting for pods in namespace %v: %v", ns, err) } } + +// podScheduled returns true if a node is assigned to the given pod. +func podScheduled(c clientset.Interface, podNamespace, podName string) wait.ConditionFunc { + return func() (bool, error) { + pod, err := c.CoreV1().Pods(podNamespace).Get(context.TODO(), podName, metav1.GetOptions{}) + if err != nil { + // This could be a connection error so we want to retry. + return false, nil + } + if pod.Spec.NodeName == "" { + return false, nil + } + return true, nil + } +}