mirror of
https://github.com/k3s-io/kubernetes.git
synced 2025-07-21 10:51:29 +00:00
Add e2e test for adding DisruptionTarget condition to the preemption victim pod
This commit is contained in:
parent
c803892bd8
commit
818e180300
@ -62,6 +62,7 @@ var workerNodes = sets.String{}
|
||||
type pausePodConfig struct {
|
||||
Name string
|
||||
Namespace string
|
||||
Finalizers []string
|
||||
Affinity *v1.Affinity
|
||||
Annotations, Labels, NodeSelector map[string]string
|
||||
Resources *v1.ResourceRequirements
|
||||
@ -897,6 +898,7 @@ func initPausePod(f *framework.Framework, conf pausePodConfig) *v1.Pod {
|
||||
Labels: map[string]string{},
|
||||
Annotations: map[string]string{},
|
||||
OwnerReferences: conf.OwnerReferences,
|
||||
Finalizers: conf.Finalizers,
|
||||
},
|
||||
Spec: v1.PodSpec{
|
||||
SecurityContext: e2epod.GetRestrictedPodSecurityContext(),
|
||||
|
@ -60,6 +60,10 @@ type priorityPair struct {
|
||||
|
||||
var testExtendedResource = v1.ResourceName("scheduling.k8s.io/foo")
|
||||
|
||||
const (
|
||||
testFinalizer = "example.com/test-finalizer"
|
||||
)
|
||||
|
||||
var _ = SIGDescribe("SchedulerPreemption [Serial]", func() {
|
||||
var cs clientset.Interface
|
||||
var nodeList *v1.NodeList
|
||||
@ -313,6 +317,75 @@ var _ = SIGDescribe("SchedulerPreemption [Serial]", func() {
|
||||
}
|
||||
})
|
||||
|
||||
// 1. Run a low priority pod with finalizer which consumes 1/1 of node resources
|
||||
// 2. Schedule a higher priority pod which also consumes 1/1 of node resources
|
||||
// 3. See if the pod with lower priority is preempted and has the pod disruption condition
|
||||
// 4. Remove the finalizer so that the pod can be deleted by GC
|
||||
ginkgo.It("validates pod disruption condition is added to the preempted pod", func() {
|
||||
podRes := v1.ResourceList{testExtendedResource: resource.MustParse("1")}
|
||||
|
||||
ginkgo.By("Select a node to run the lower and higher priority pods")
|
||||
framework.ExpectNotEqual(len(nodeList.Items), 0, "We need at least one node for the test to run")
|
||||
node := nodeList.Items[0]
|
||||
nodeCopy := node.DeepCopy()
|
||||
nodeCopy.Status.Capacity[testExtendedResource] = resource.MustParse("1")
|
||||
err := patchNode(cs, &node, nodeCopy)
|
||||
framework.ExpectNoError(err)
|
||||
|
||||
// prepare node affinity to make sure both the lower and higher priority pods are scheduled on the same node
|
||||
testNodeAffinity := v1.Affinity{
|
||||
NodeAffinity: &v1.NodeAffinity{
|
||||
RequiredDuringSchedulingIgnoredDuringExecution: &v1.NodeSelector{
|
||||
NodeSelectorTerms: []v1.NodeSelectorTerm{
|
||||
{
|
||||
MatchFields: []v1.NodeSelectorRequirement{
|
||||
{Key: "metadata.name", Operator: v1.NodeSelectorOpIn, Values: []string{node.Name}},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
ginkgo.By("Create a low priority pod that consumes 1/1 of node resources")
|
||||
victimPod := createPausePod(f, pausePodConfig{
|
||||
Name: "victim-pod",
|
||||
PriorityClassName: lowPriorityClassName,
|
||||
Resources: &v1.ResourceRequirements{
|
||||
Requests: podRes,
|
||||
Limits: podRes,
|
||||
},
|
||||
Finalizers: []string{testFinalizer},
|
||||
Affinity: &testNodeAffinity,
|
||||
})
|
||||
framework.Logf("Created pod: %v", victimPod.Name)
|
||||
|
||||
ginkgo.By("Wait for the victim pod to be scheduled")
|
||||
framework.ExpectNoError(e2epod.WaitForPodRunningInNamespace(cs, victimPod))
|
||||
|
||||
// Remove the finalizer so that the victim pod can be GCed
|
||||
defer e2epod.NewPodClient(f).RemoveFinalizer(victimPod.Name, testFinalizer)
|
||||
|
||||
ginkgo.By("Create a high priority pod to trigger preemption of the lower priority pod")
|
||||
preemptorPod := createPausePod(f, pausePodConfig{
|
||||
Name: "preemptor-pod",
|
||||
PriorityClassName: highPriorityClassName,
|
||||
Resources: &v1.ResourceRequirements{
|
||||
Requests: podRes,
|
||||
Limits: podRes,
|
||||
},
|
||||
Affinity: &testNodeAffinity,
|
||||
})
|
||||
framework.Logf("Created pod: %v", preemptorPod.Name)
|
||||
|
||||
ginkgo.By("Waiting for the victim pod to be terminating")
|
||||
err = e2epod.WaitForPodTerminatingInNamespaceTimeout(f.ClientSet, victimPod.Name, victimPod.Namespace, framework.PodDeleteTimeout)
|
||||
framework.ExpectNoError(err)
|
||||
|
||||
ginkgo.By("Verifying the pod has the pod disruption condition")
|
||||
e2epod.VerifyPodHasConditionWithType(f, victimPod, v1.DisruptionTarget)
|
||||
})
|
||||
|
||||
ginkgo.Context("PodTopologySpread Preemption", func() {
|
||||
var nodeNames []string
|
||||
var nodes []*v1.Node
|
||||
|
Loading…
Reference in New Issue
Block a user