From 818e180300a7903a05090ad1818ef80d43956aaf Mon Sep 17 00:00:00 2001 From: Michal Wozniak Date: Tue, 8 Nov 2022 15:18:55 +0100 Subject: [PATCH] Add e2e test for adding DisruptionTarget condition to the preemption victim pod --- test/e2e/scheduling/predicates.go | 2 + test/e2e/scheduling/preemption.go | 73 +++++++++++++++++++++++++++++++ 2 files changed, 75 insertions(+) diff --git a/test/e2e/scheduling/predicates.go b/test/e2e/scheduling/predicates.go index 10204c9a552..7775b77596e 100644 --- a/test/e2e/scheduling/predicates.go +++ b/test/e2e/scheduling/predicates.go @@ -62,6 +62,7 @@ var workerNodes = sets.String{} type pausePodConfig struct { Name string Namespace string + Finalizers []string Affinity *v1.Affinity Annotations, Labels, NodeSelector map[string]string Resources *v1.ResourceRequirements @@ -897,6 +898,7 @@ func initPausePod(f *framework.Framework, conf pausePodConfig) *v1.Pod { Labels: map[string]string{}, Annotations: map[string]string{}, OwnerReferences: conf.OwnerReferences, + Finalizers: conf.Finalizers, }, Spec: v1.PodSpec{ SecurityContext: e2epod.GetRestrictedPodSecurityContext(), diff --git a/test/e2e/scheduling/preemption.go b/test/e2e/scheduling/preemption.go index 1c3e424fc5a..2c282214d8d 100644 --- a/test/e2e/scheduling/preemption.go +++ b/test/e2e/scheduling/preemption.go @@ -60,6 +60,10 @@ type priorityPair struct { var testExtendedResource = v1.ResourceName("scheduling.k8s.io/foo") +const ( + testFinalizer = "example.com/test-finalizer" +) + var _ = SIGDescribe("SchedulerPreemption [Serial]", func() { var cs clientset.Interface var nodeList *v1.NodeList @@ -313,6 +317,75 @@ var _ = SIGDescribe("SchedulerPreemption [Serial]", func() { } }) + // 1. Run a low priority pod with finalizer which consumes 1/1 of node resources + // 2. Schedule a higher priority pod which also consumes 1/1 of node resources + // 3. See if the pod with lower priority is preempted and has the pod disruption condition + // 4. Remove the finalizer so that the pod can be deleted by GC + ginkgo.It("validates pod disruption condition is added to the preempted pod", func() { + podRes := v1.ResourceList{testExtendedResource: resource.MustParse("1")} + + ginkgo.By("Select a node to run the lower and higher priority pods") + framework.ExpectNotEqual(len(nodeList.Items), 0, "We need at least one node for the test to run") + node := nodeList.Items[0] + nodeCopy := node.DeepCopy() + nodeCopy.Status.Capacity[testExtendedResource] = resource.MustParse("1") + err := patchNode(cs, &node, nodeCopy) + framework.ExpectNoError(err) + + // prepare node affinity to make sure both the lower and higher priority pods are scheduled on the same node + testNodeAffinity := v1.Affinity{ + NodeAffinity: &v1.NodeAffinity{ + RequiredDuringSchedulingIgnoredDuringExecution: &v1.NodeSelector{ + NodeSelectorTerms: []v1.NodeSelectorTerm{ + { + MatchFields: []v1.NodeSelectorRequirement{ + {Key: "metadata.name", Operator: v1.NodeSelectorOpIn, Values: []string{node.Name}}, + }, + }, + }, + }, + }, + } + + ginkgo.By("Create a low priority pod that consumes 1/1 of node resources") + victimPod := createPausePod(f, pausePodConfig{ + Name: "victim-pod", + PriorityClassName: lowPriorityClassName, + Resources: &v1.ResourceRequirements{ + Requests: podRes, + Limits: podRes, + }, + Finalizers: []string{testFinalizer}, + Affinity: &testNodeAffinity, + }) + framework.Logf("Created pod: %v", victimPod.Name) + + ginkgo.By("Wait for the victim pod to be scheduled") + framework.ExpectNoError(e2epod.WaitForPodRunningInNamespace(cs, victimPod)) + + // Remove the finalizer so that the victim pod can be GCed + defer e2epod.NewPodClient(f).RemoveFinalizer(victimPod.Name, testFinalizer) + + ginkgo.By("Create a high priority pod to trigger preemption of the lower priority pod") + preemptorPod := createPausePod(f, pausePodConfig{ + Name: "preemptor-pod", + PriorityClassName: highPriorityClassName, + Resources: &v1.ResourceRequirements{ + Requests: podRes, + Limits: podRes, + }, + Affinity: &testNodeAffinity, + }) + framework.Logf("Created pod: %v", preemptorPod.Name) + + ginkgo.By("Waiting for the victim pod to be terminating") + err = e2epod.WaitForPodTerminatingInNamespaceTimeout(f.ClientSet, victimPod.Name, victimPod.Namespace, framework.PodDeleteTimeout) + framework.ExpectNoError(err) + + ginkgo.By("Verifying the pod has the pod disruption condition") + e2epod.VerifyPodHasConditionWithType(f, victimPod, v1.DisruptionTarget) + }) + ginkgo.Context("PodTopologySpread Preemption", func() { var nodeNames []string var nodes []*v1.Node