Merge pull request #117586 from mimowo/preemption-for-critical-pods

Add DisruptionTarget condition when preempting for critical pod
This commit is contained in:
Kubernetes Prow Robot 2023-05-23 11:22:27 -07:00 committed by GitHub
commit 8b8dfcad12
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
2 changed files with 55 additions and 0 deletions

View File

@ -21,10 +21,13 @@ import (
"math"
v1 "k8s.io/api/core/v1"
utilfeature "k8s.io/apiserver/pkg/util/feature"
"k8s.io/client-go/tools/record"
"k8s.io/klog/v2"
podutil "k8s.io/kubernetes/pkg/api/v1/pod"
"k8s.io/kubernetes/pkg/api/v1/resource"
v1qos "k8s.io/kubernetes/pkg/apis/core/v1/helper/qos"
"k8s.io/kubernetes/pkg/features"
"k8s.io/kubernetes/pkg/kubelet/events"
"k8s.io/kubernetes/pkg/kubelet/eviction"
"k8s.io/kubernetes/pkg/kubelet/lifecycle"
@ -103,6 +106,14 @@ func (c *CriticalPodAdmissionHandler) evictPodsToFreeRequests(admitPod *v1.Pod,
status.Phase = v1.PodFailed
status.Reason = events.PreemptContainer
status.Message = message
if utilfeature.DefaultFeatureGate.Enabled(features.PodDisruptionConditions) {
podutil.UpdatePodCondition(status, &v1.PodCondition{
Type: v1.DisruptionTarget,
Status: v1.ConditionTrue,
Reason: v1.PodReasonTerminationByKubelet,
Message: "Pod was preempted by Kubelet to accommodate a critical pod.",
})
}
})
if err != nil {
klog.ErrorS(err, "Failed to evict pod", "pod", klog.KObj(pod))

View File

@ -23,6 +23,7 @@ import (
v1 "k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/api/resource"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/klog/v2"
kubeapi "k8s.io/kubernetes/pkg/apis/core"
"k8s.io/kubernetes/pkg/apis/scheduling"
kubelettypes "k8s.io/kubernetes/pkg/kubelet/types"
@ -87,6 +88,49 @@ var _ = SIGDescribe("CriticalPod [Serial] [Disruptive] [NodeFeature:CriticalPod]
}
}
})
ginkgo.It("should add DisruptionTarget condition to the preempted pod [NodeFeature:PodDisruptionConditions]", func(ctx context.Context) {
// because adminssion Priority enable, If the priority class is not found, the Pod is rejected.
node := getNodeName(ctx, f)
nonCriticalGuaranteed := getTestPod(false, guaranteedPodName, v1.ResourceRequirements{
Requests: v1.ResourceList{
v1.ResourceCPU: resource.MustParse("100m"),
v1.ResourceMemory: resource.MustParse("100Mi"),
},
Limits: v1.ResourceList{
v1.ResourceCPU: resource.MustParse("100m"),
v1.ResourceMemory: resource.MustParse("100Mi"),
},
}, node)
criticalPod := getTestPod(true, criticalPodName, v1.ResourceRequirements{
// request the entire resource capacity of the node, so that
// admitting this pod requires the other pod to be preempted
Requests: getNodeCPUAndMemoryCapacity(ctx, f),
}, node)
criticalPod.Namespace = kubeapi.NamespaceSystem
ginkgo.By(fmt.Sprintf("create the non-critical pod %q", klog.KObj(nonCriticalGuaranteed)))
e2epod.NewPodClient(f).CreateSync(ctx, nonCriticalGuaranteed)
ginkgo.By(fmt.Sprintf("create the critical pod %q", klog.KObj(criticalPod)))
e2epod.PodClientNS(f, kubeapi.NamespaceSystem).Create(ctx, criticalPod)
ginkgo.By(fmt.Sprintf("await for the critical pod %q to be ready", klog.KObj(criticalPod)))
err := e2epod.WaitForPodNameRunningInNamespace(ctx, f.ClientSet, criticalPod.Name, kubeapi.NamespaceSystem)
framework.ExpectNoError(err, "Failed to await for the pod to be running: %q", klog.KObj(criticalPod))
// Check that non-critical pods other than the besteffort have been evicted
updatedPodList, err := f.ClientSet.CoreV1().Pods(f.Namespace.Name).List(ctx, metav1.ListOptions{})
framework.ExpectNoError(err)
for _, p := range updatedPodList.Items {
ginkgo.By(fmt.Sprintf("verify that the non-critical pod %q is preempted and has the DisruptionTarget condition", klog.KObj(&p)))
framework.ExpectEqual(p.Status.Phase, v1.PodSucceeded, fmt.Sprintf("pod: %v should be preempted with status: %#v", p.Name, p.Status))
if condition := e2epod.FindPodConditionByType(&p.Status, v1.DisruptionTarget); condition == nil {
framework.Failf("pod %q should have the condition: %q, pod status: %v", klog.KObj(&p), v1.DisruptionTarget, p.Status)
}
}
})
ginkgo.AfterEach(func(ctx context.Context) {
// Delete Pods
e2epod.NewPodClient(f).DeleteSync(ctx, guaranteedPodName, metav1.DeleteOptions{}, e2epod.DefaultPodDeletionTimeout)