Merge pull request #113360 from mimowo/handling-pod-failures-beta-enable

Enable the "Retriable and non-retriable pod failures for jobs" feature into beta
This commit is contained in:
Kubernetes Prow Robot 2022-11-09 08:30:24 -08:00 committed by GitHub
commit 7e0e0c8ec3
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
45 changed files with 399 additions and 139 deletions

View File

@ -440,8 +440,7 @@ const (
// JobFailed means the job has failed its execution. // JobFailed means the job has failed its execution.
JobFailed JobConditionType = "Failed" JobFailed JobConditionType = "Failed"
// FailureTarget means the job is about to fail its execution. // FailureTarget means the job is about to fail its execution.
// The constant is to be renamed once the name is accepted within the KEP-3329. JobFailureTarget JobConditionType = "FailureTarget"
AlphaNoCompatGuaranteeJobFailureTarget JobConditionType = "FailureTarget"
) )
// JobCondition describes current state of a job. // JobCondition describes current state of a job.

View File

@ -52,7 +52,7 @@ func TestSetDefaultJob(t *testing.T) {
Action: batchv1.PodFailurePolicyActionFailJob, Action: batchv1.PodFailurePolicyActionFailJob,
OnPodConditions: []batchv1.PodFailurePolicyOnPodConditionsPattern{ OnPodConditions: []batchv1.PodFailurePolicyOnPodConditionsPattern{
{ {
Type: v1.AlphaNoCompatGuaranteeDisruptionTarget, Type: v1.DisruptionTarget,
Status: v1.ConditionTrue, Status: v1.ConditionTrue,
}, },
{ {
@ -75,7 +75,7 @@ func TestSetDefaultJob(t *testing.T) {
Action: batchv1.PodFailurePolicyActionFailJob, Action: batchv1.PodFailurePolicyActionFailJob,
OnPodConditions: []batchv1.PodFailurePolicyOnPodConditionsPattern{ OnPodConditions: []batchv1.PodFailurePolicyOnPodConditionsPattern{
{ {
Type: v1.AlphaNoCompatGuaranteeDisruptionTarget, Type: v1.DisruptionTarget,
}, },
}, },
}, },
@ -96,7 +96,7 @@ func TestSetDefaultJob(t *testing.T) {
Action: batchv1.PodFailurePolicyActionFailJob, Action: batchv1.PodFailurePolicyActionFailJob,
OnPodConditions: []batchv1.PodFailurePolicyOnPodConditionsPattern{ OnPodConditions: []batchv1.PodFailurePolicyOnPodConditionsPattern{
{ {
Type: v1.AlphaNoCompatGuaranteeDisruptionTarget, Type: v1.DisruptionTarget,
Status: v1.ConditionTrue, Status: v1.ConditionTrue,
}, },
{ {
@ -120,7 +120,7 @@ func TestSetDefaultJob(t *testing.T) {
Action: batchv1.PodFailurePolicyActionFailJob, Action: batchv1.PodFailurePolicyActionFailJob,
OnPodConditions: []batchv1.PodFailurePolicyOnPodConditionsPattern{ OnPodConditions: []batchv1.PodFailurePolicyOnPodConditionsPattern{
{ {
Type: v1.AlphaNoCompatGuaranteeDisruptionTarget, Type: v1.DisruptionTarget,
Status: v1.ConditionTrue, Status: v1.ConditionTrue,
}, },
}, },

View File

@ -118,7 +118,7 @@ func TestValidateJob(t *testing.T) {
Action: batch.PodFailurePolicyActionIgnore, Action: batch.PodFailurePolicyActionIgnore,
OnPodConditions: []batch.PodFailurePolicyOnPodConditionsPattern{ OnPodConditions: []batch.PodFailurePolicyOnPodConditionsPattern{
{ {
Type: api.AlphaNoCompatGuaranteeDisruptionTarget, Type: api.DisruptionTarget,
Status: api.ConditionTrue, Status: api.ConditionTrue,
}, },
}, },
@ -456,7 +456,7 @@ func TestValidateJob(t *testing.T) {
}, },
OnPodConditions: []batch.PodFailurePolicyOnPodConditionsPattern{ OnPodConditions: []batch.PodFailurePolicyOnPodConditionsPattern{
{ {
Type: api.AlphaNoCompatGuaranteeDisruptionTarget, Type: api.DisruptionTarget,
Status: api.ConditionTrue, Status: api.ConditionTrue,
}, },
}, },
@ -558,7 +558,7 @@ func TestValidateJob(t *testing.T) {
Action: batch.PodFailurePolicyActionIgnore, Action: batch.PodFailurePolicyActionIgnore,
OnPodConditions: []batch.PodFailurePolicyOnPodConditionsPattern{ OnPodConditions: []batch.PodFailurePolicyOnPodConditionsPattern{
{ {
Type: api.AlphaNoCompatGuaranteeDisruptionTarget, Type: api.DisruptionTarget,
}, },
}, },
}, },
@ -577,7 +577,7 @@ func TestValidateJob(t *testing.T) {
Action: batch.PodFailurePolicyActionIgnore, Action: batch.PodFailurePolicyActionIgnore,
OnPodConditions: []batch.PodFailurePolicyOnPodConditionsPattern{ OnPodConditions: []batch.PodFailurePolicyOnPodConditionsPattern{
{ {
Type: api.AlphaNoCompatGuaranteeDisruptionTarget, Type: api.DisruptionTarget,
Status: "UnknownStatus", Status: "UnknownStatus",
}, },
}, },
@ -968,7 +968,7 @@ func TestValidateJobUpdate(t *testing.T) {
Action: batch.PodFailurePolicyActionIgnore, Action: batch.PodFailurePolicyActionIgnore,
OnPodConditions: []batch.PodFailurePolicyOnPodConditionsPattern{ OnPodConditions: []batch.PodFailurePolicyOnPodConditionsPattern{
{ {
Type: api.AlphaNoCompatGuaranteeDisruptionTarget, Type: api.DisruptionTarget,
Status: api.ConditionTrue, Status: api.ConditionTrue,
}, },
}, },
@ -993,7 +993,7 @@ func TestValidateJobUpdate(t *testing.T) {
Action: batch.PodFailurePolicyActionIgnore, Action: batch.PodFailurePolicyActionIgnore,
OnPodConditions: []batch.PodFailurePolicyOnPodConditionsPattern{ OnPodConditions: []batch.PodFailurePolicyOnPodConditionsPattern{
{ {
Type: api.AlphaNoCompatGuaranteeDisruptionTarget, Type: api.DisruptionTarget,
Status: api.ConditionTrue, Status: api.ConditionTrue,
}, },
}, },
@ -1007,7 +1007,7 @@ func TestValidateJobUpdate(t *testing.T) {
Action: batch.PodFailurePolicyActionCount, Action: batch.PodFailurePolicyActionCount,
OnPodConditions: []batch.PodFailurePolicyOnPodConditionsPattern{ OnPodConditions: []batch.PodFailurePolicyOnPodConditionsPattern{
{ {
Type: api.AlphaNoCompatGuaranteeDisruptionTarget, Type: api.DisruptionTarget,
Status: api.ConditionTrue, Status: api.ConditionTrue,
}, },
}, },
@ -1030,7 +1030,7 @@ func TestValidateJobUpdate(t *testing.T) {
Action: batch.PodFailurePolicyActionIgnore, Action: batch.PodFailurePolicyActionIgnore,
OnPodConditions: []batch.PodFailurePolicyOnPodConditionsPattern{ OnPodConditions: []batch.PodFailurePolicyOnPodConditionsPattern{
{ {
Type: api.AlphaNoCompatGuaranteeDisruptionTarget, Type: api.DisruptionTarget,
Status: api.ConditionTrue, Status: api.ConditionTrue,
}, },
}, },

View File

@ -2433,10 +2433,9 @@ const (
PodReasonSchedulingGated = "SchedulingGated" PodReasonSchedulingGated = "SchedulingGated"
// ContainersReady indicates whether all containers in the pod are ready. // ContainersReady indicates whether all containers in the pod are ready.
ContainersReady PodConditionType = "ContainersReady" ContainersReady PodConditionType = "ContainersReady"
// AlphaNoCompatGuaranteeDisruptionTarget indicates the pod is about to be terminated due to a // DisruptionTarget indicates the pod is about to be terminated due to a
// disruption (such as preemption, eviction API or garbage-collection). // disruption (such as preemption, eviction API or garbage-collection).
// The constant is to be renamed once the name is accepted within the KEP-3329. DisruptionTarget PodConditionType = "DisruptionTarget"
AlphaNoCompatGuaranteeDisruptionTarget PodConditionType = "DisruptionTarget"
) )
// PodCondition represents pod's condition // PodCondition represents pod's condition

View File

@ -755,7 +755,7 @@ func (dc *DisruptionController) syncStalePodDisruption(ctx context.Context, key
WithStatus(corev1apply.PodStatus()). WithStatus(corev1apply.PodStatus()).
WithResourceVersion(pod.ResourceVersion) WithResourceVersion(pod.ResourceVersion)
podApply.Status.WithConditions(corev1apply.PodCondition(). podApply.Status.WithConditions(corev1apply.PodCondition().
WithType(v1.AlphaNoCompatGuaranteeDisruptionTarget). WithType(v1.DisruptionTarget).
WithStatus(v1.ConditionFalse). WithStatus(v1.ConditionFalse).
WithLastTransitionTime(metav1.Now()), WithLastTransitionTime(metav1.Now()),
) )
@ -998,11 +998,11 @@ func (dc *DisruptionController) nonTerminatingPodHasStaleDisruptionCondition(pod
if pod.DeletionTimestamp != nil { if pod.DeletionTimestamp != nil {
return false, 0 return false, 0
} }
_, cond := apipod.GetPodCondition(&pod.Status, v1.AlphaNoCompatGuaranteeDisruptionTarget) _, cond := apipod.GetPodCondition(&pod.Status, v1.DisruptionTarget)
// Pod disruption conditions added by kubelet are never considered stale because the condition might take // Pod disruption conditions added by kubelet are never considered stale because the condition might take
// arbitrarily long before the pod is terminating (has deletion timestamp). Also, pod conditions present // arbitrarily long before the pod is terminating (has deletion timestamp). Also, pod conditions present
// on pods in terminal phase are not stale to avoid unnecessary status updates. // on pods in terminal phase are not stale to avoid unnecessary status updates.
if cond == nil || cond.Status != v1.ConditionTrue || cond.Reason == v1.AlphaNoCompatGuaranteePodReasonTerminationByKubelet || apipod.IsPodPhaseTerminal(pod.Status.Phase) { if cond == nil || cond.Status != v1.ConditionTrue || cond.Reason == v1.PodReasonTerminationByKubelet || apipod.IsPodPhaseTerminal(pod.Status.Phase) {
return false, 0 return false, 0
} }
waitFor := dc.stalePodDisruptionTimeout - dc.clock.Since(cond.LastTransitionTime.Time) waitFor := dc.stalePodDisruptionTimeout - dc.clock.Since(cond.LastTransitionTime.Time)

View File

@ -1403,7 +1403,7 @@ func TestStalePodDisruption(t *testing.T) {
Status: v1.PodStatus{ Status: v1.PodStatus{
Conditions: []v1.PodCondition{ Conditions: []v1.PodCondition{
{ {
Type: v1.AlphaNoCompatGuaranteeDisruptionTarget, Type: v1.DisruptionTarget,
Status: v1.ConditionTrue, Status: v1.ConditionTrue,
LastTransitionTime: metav1.Time{Time: now}, LastTransitionTime: metav1.Time{Time: now},
}, },
@ -1413,7 +1413,7 @@ func TestStalePodDisruption(t *testing.T) {
timePassed: 2*time.Minute + time.Second, timePassed: 2*time.Minute + time.Second,
wantConditions: []v1.PodCondition{ wantConditions: []v1.PodCondition{
{ {
Type: v1.AlphaNoCompatGuaranteeDisruptionTarget, Type: v1.DisruptionTarget,
Status: v1.ConditionFalse, Status: v1.ConditionFalse,
}, },
}, },
@ -1427,7 +1427,7 @@ func TestStalePodDisruption(t *testing.T) {
Status: v1.PodStatus{ Status: v1.PodStatus{
Conditions: []v1.PodCondition{ Conditions: []v1.PodCondition{
{ {
Type: v1.AlphaNoCompatGuaranteeDisruptionTarget, Type: v1.DisruptionTarget,
Status: v1.ConditionTrue, Status: v1.ConditionTrue,
LastTransitionTime: metav1.Time{Time: now}, LastTransitionTime: metav1.Time{Time: now},
}, },
@ -1437,7 +1437,7 @@ func TestStalePodDisruption(t *testing.T) {
timePassed: 2*time.Minute - time.Second, timePassed: 2*time.Minute - time.Second,
wantConditions: []v1.PodCondition{ wantConditions: []v1.PodCondition{
{ {
Type: v1.AlphaNoCompatGuaranteeDisruptionTarget, Type: v1.DisruptionTarget,
Status: v1.ConditionTrue, Status: v1.ConditionTrue,
}, },
}, },
@ -1452,7 +1452,7 @@ func TestStalePodDisruption(t *testing.T) {
Status: v1.PodStatus{ Status: v1.PodStatus{
Conditions: []v1.PodCondition{ Conditions: []v1.PodCondition{
{ {
Type: v1.AlphaNoCompatGuaranteeDisruptionTarget, Type: v1.DisruptionTarget,
Status: v1.ConditionTrue, Status: v1.ConditionTrue,
LastTransitionTime: metav1.Time{Time: now}, LastTransitionTime: metav1.Time{Time: now},
}, },
@ -1462,7 +1462,7 @@ func TestStalePodDisruption(t *testing.T) {
timePassed: 2*time.Minute + time.Second, timePassed: 2*time.Minute + time.Second,
wantConditions: []v1.PodCondition{ wantConditions: []v1.PodCondition{
{ {
Type: v1.AlphaNoCompatGuaranteeDisruptionTarget, Type: v1.DisruptionTarget,
Status: v1.ConditionTrue, Status: v1.ConditionTrue,
}, },
}, },
@ -1487,7 +1487,7 @@ func TestStalePodDisruption(t *testing.T) {
Status: v1.PodStatus{ Status: v1.PodStatus{
Conditions: []v1.PodCondition{ Conditions: []v1.PodCondition{
{ {
Type: v1.AlphaNoCompatGuaranteeDisruptionTarget, Type: v1.DisruptionTarget,
Status: v1.ConditionFalse, Status: v1.ConditionFalse,
}, },
}, },
@ -1496,7 +1496,7 @@ func TestStalePodDisruption(t *testing.T) {
timePassed: 2*time.Minute + time.Second, timePassed: 2*time.Minute + time.Second,
wantConditions: []v1.PodCondition{ wantConditions: []v1.PodCondition{
{ {
Type: v1.AlphaNoCompatGuaranteeDisruptionTarget, Type: v1.DisruptionTarget,
Status: v1.ConditionFalse, Status: v1.ConditionFalse,
}, },
}, },

View File

@ -758,12 +758,12 @@ func (jm *Controller) syncJob(ctx context.Context, key string) (forget bool, rEr
(failed > *job.Spec.BackoffLimit) (failed > *job.Spec.BackoffLimit)
if feature.DefaultFeatureGate.Enabled(features.JobPodFailurePolicy) { if feature.DefaultFeatureGate.Enabled(features.JobPodFailurePolicy) {
if failureTargetCondition := findConditionByType(job.Status.Conditions, batch.AlphaNoCompatGuaranteeJobFailureTarget); failureTargetCondition != nil { if failureTargetCondition := findConditionByType(job.Status.Conditions, batch.JobFailureTarget); failureTargetCondition != nil {
finishedCondition = newFailedConditionForFailureTarget(failureTargetCondition) finishedCondition = newFailedConditionForFailureTarget(failureTargetCondition)
} else if failJobMessage := getFailJobMessage(&job, pods, uncounted.Failed()); failJobMessage != nil { } else if failJobMessage := getFailJobMessage(&job, pods, uncounted.Failed()); failJobMessage != nil {
if uncounted != nil { if uncounted != nil {
// Prepare the interim FailureTarget condition to record the failure message before the finalizers (allowing removal of the pods) are removed. // Prepare the interim FailureTarget condition to record the failure message before the finalizers (allowing removal of the pods) are removed.
finishedCondition = newCondition(batch.AlphaNoCompatGuaranteeJobFailureTarget, v1.ConditionTrue, jobConditionReasonPodFailurePolicy, *failJobMessage) finishedCondition = newCondition(batch.JobFailureTarget, v1.ConditionTrue, jobConditionReasonPodFailurePolicy, *failJobMessage)
} else { } else {
// Prepare the Failed job condition for the legacy path without finalizers (don't use the interim FailureTarget condition). // Prepare the Failed job condition for the legacy path without finalizers (don't use the interim FailureTarget condition).
finishedCondition = newCondition(batch.JobFailed, v1.ConditionTrue, jobConditionReasonPodFailurePolicy, *failJobMessage) finishedCondition = newCondition(batch.JobFailed, v1.ConditionTrue, jobConditionReasonPodFailurePolicy, *failJobMessage)
@ -1090,7 +1090,7 @@ func (jm *Controller) trackJobStatusAndRemoveFinalizers(ctx context.Context, job
job.Status.CompletedIndexes = succeededIndexes.String() job.Status.CompletedIndexes = succeededIndexes.String()
} }
if feature.DefaultFeatureGate.Enabled(features.JobPodFailurePolicy) { if feature.DefaultFeatureGate.Enabled(features.JobPodFailurePolicy) {
if finishedCond != nil && finishedCond.Type == batch.AlphaNoCompatGuaranteeJobFailureTarget { if finishedCond != nil && finishedCond.Type == batch.JobFailureTarget {
// Append the interim FailureTarget condition to update the job status with before finalizers are removed. // Append the interim FailureTarget condition to update the job status with before finalizers are removed.
job.Status.Conditions = append(job.Status.Conditions, *finishedCond) job.Status.Conditions = append(job.Status.Conditions, *finishedCond)

View File

@ -2192,7 +2192,7 @@ func TestSyncJobWithJobPodFailurePolicy(t *testing.T) {
Status: batch.JobStatus{ Status: batch.JobStatus{
Conditions: []batch.JobCondition{ Conditions: []batch.JobCondition{
{ {
Type: batch.AlphaNoCompatGuaranteeJobFailureTarget, Type: batch.JobFailureTarget,
Status: v1.ConditionTrue, Status: v1.ConditionTrue,
Reason: "PodFailurePolicy", Reason: "PodFailurePolicy",
Message: "Container main-container for pod default/mypod-0 failed with exit code 5 matching FailJob rule at index 1", Message: "Container main-container for pod default/mypod-0 failed with exit code 5 matching FailJob rule at index 1",
@ -2245,7 +2245,7 @@ func TestSyncJobWithJobPodFailurePolicy(t *testing.T) {
Status: batch.JobStatus{ Status: batch.JobStatus{
Conditions: []batch.JobCondition{ Conditions: []batch.JobCondition{
{ {
Type: batch.AlphaNoCompatGuaranteeJobFailureTarget, Type: batch.JobFailureTarget,
Status: v1.ConditionTrue, Status: v1.ConditionTrue,
Reason: "PodFailurePolicy", Reason: "PodFailurePolicy",
Message: "Container main-container for pod default/already-deleted-pod failed with exit code 5 matching FailJob rule at index 1", Message: "Container main-container for pod default/already-deleted-pod failed with exit code 5 matching FailJob rule at index 1",
@ -2751,7 +2751,7 @@ func TestSyncJobWithJobPodFailurePolicy(t *testing.T) {
Action: batch.PodFailurePolicyActionIgnore, Action: batch.PodFailurePolicyActionIgnore,
OnPodConditions: []batch.PodFailurePolicyOnPodConditionsPattern{ OnPodConditions: []batch.PodFailurePolicyOnPodConditionsPattern{
{ {
Type: v1.AlphaNoCompatGuaranteeDisruptionTarget, Type: v1.DisruptionTarget,
Status: v1.ConditionTrue, Status: v1.ConditionTrue,
}, },
}, },
@ -2769,7 +2769,7 @@ func TestSyncJobWithJobPodFailurePolicy(t *testing.T) {
Status: v1.ConditionTrue, Status: v1.ConditionTrue,
}, },
{ {
Type: v1.AlphaNoCompatGuaranteeDisruptionTarget, Type: v1.DisruptionTarget,
Status: v1.ConditionTrue, Status: v1.ConditionTrue,
}, },
}, },
@ -2797,7 +2797,7 @@ func TestSyncJobWithJobPodFailurePolicy(t *testing.T) {
Action: batch.PodFailurePolicyActionIgnore, Action: batch.PodFailurePolicyActionIgnore,
OnPodConditions: []batch.PodFailurePolicyOnPodConditionsPattern{ OnPodConditions: []batch.PodFailurePolicyOnPodConditionsPattern{
{ {
Type: v1.AlphaNoCompatGuaranteeDisruptionTarget, Type: v1.DisruptionTarget,
Status: v1.ConditionTrue, Status: v1.ConditionTrue,
}, },
}, },
@ -2811,7 +2811,7 @@ func TestSyncJobWithJobPodFailurePolicy(t *testing.T) {
Phase: v1.PodFailed, Phase: v1.PodFailed,
Conditions: []v1.PodCondition{ Conditions: []v1.PodCondition{
{ {
Type: v1.AlphaNoCompatGuaranteeDisruptionTarget, Type: v1.DisruptionTarget,
Status: v1.ConditionTrue, Status: v1.ConditionTrue,
}, },
}, },
@ -2839,7 +2839,7 @@ func TestSyncJobWithJobPodFailurePolicy(t *testing.T) {
Action: batch.PodFailurePolicyActionFailJob, Action: batch.PodFailurePolicyActionFailJob,
OnPodConditions: []batch.PodFailurePolicyOnPodConditionsPattern{ OnPodConditions: []batch.PodFailurePolicyOnPodConditionsPattern{
{ {
Type: v1.AlphaNoCompatGuaranteeDisruptionTarget, Type: v1.DisruptionTarget,
Status: v1.ConditionTrue, Status: v1.ConditionTrue,
}, },
}, },
@ -2853,7 +2853,7 @@ func TestSyncJobWithJobPodFailurePolicy(t *testing.T) {
Phase: v1.PodFailed, Phase: v1.PodFailed,
Conditions: []v1.PodCondition{ Conditions: []v1.PodCondition{
{ {
Type: v1.AlphaNoCompatGuaranteeDisruptionTarget, Type: v1.DisruptionTarget,
Status: v1.ConditionTrue, Status: v1.ConditionTrue,
}, },
}, },

View File

@ -89,7 +89,7 @@ func TestMatchPodFailurePolicy(t *testing.T) {
Action: "UnkonwnAction", Action: "UnkonwnAction",
OnPodConditions: []batch.PodFailurePolicyOnPodConditionsPattern{ OnPodConditions: []batch.PodFailurePolicyOnPodConditionsPattern{
{ {
Type: v1.AlphaNoCompatGuaranteeDisruptionTarget, Type: v1.DisruptionTarget,
Status: v1.ConditionTrue, Status: v1.ConditionTrue,
}, },
}, },
@ -98,7 +98,7 @@ func TestMatchPodFailurePolicy(t *testing.T) {
Action: batch.PodFailurePolicyActionIgnore, Action: batch.PodFailurePolicyActionIgnore,
OnPodConditions: []batch.PodFailurePolicyOnPodConditionsPattern{ OnPodConditions: []batch.PodFailurePolicyOnPodConditionsPattern{
{ {
Type: v1.AlphaNoCompatGuaranteeDisruptionTarget, Type: v1.DisruptionTarget,
Status: v1.ConditionTrue, Status: v1.ConditionTrue,
}, },
}, },
@ -111,7 +111,7 @@ func TestMatchPodFailurePolicy(t *testing.T) {
Phase: v1.PodFailed, Phase: v1.PodFailed,
Conditions: []v1.PodCondition{ Conditions: []v1.PodCondition{
{ {
Type: v1.AlphaNoCompatGuaranteeDisruptionTarget, Type: v1.DisruptionTarget,
Status: v1.ConditionTrue, Status: v1.ConditionTrue,
}, },
}, },
@ -410,7 +410,7 @@ func TestMatchPodFailurePolicy(t *testing.T) {
Action: batch.PodFailurePolicyActionIgnore, Action: batch.PodFailurePolicyActionIgnore,
OnPodConditions: []batch.PodFailurePolicyOnPodConditionsPattern{ OnPodConditions: []batch.PodFailurePolicyOnPodConditionsPattern{
{ {
Type: v1.AlphaNoCompatGuaranteeDisruptionTarget, Type: v1.DisruptionTarget,
Status: v1.ConditionTrue, Status: v1.ConditionTrue,
}, },
}, },
@ -423,7 +423,7 @@ func TestMatchPodFailurePolicy(t *testing.T) {
Phase: v1.PodFailed, Phase: v1.PodFailed,
Conditions: []v1.PodCondition{ Conditions: []v1.PodCondition{
{ {
Type: v1.AlphaNoCompatGuaranteeDisruptionTarget, Type: v1.DisruptionTarget,
Status: v1.ConditionTrue, Status: v1.ConditionTrue,
}, },
}, },
@ -440,7 +440,7 @@ func TestMatchPodFailurePolicy(t *testing.T) {
Action: batch.PodFailurePolicyActionIgnore, Action: batch.PodFailurePolicyActionIgnore,
OnPodConditions: []batch.PodFailurePolicyOnPodConditionsPattern{ OnPodConditions: []batch.PodFailurePolicyOnPodConditionsPattern{
{ {
Type: v1.AlphaNoCompatGuaranteeDisruptionTarget, Type: v1.DisruptionTarget,
Status: v1.ConditionFalse, Status: v1.ConditionFalse,
}, },
}, },
@ -453,7 +453,7 @@ func TestMatchPodFailurePolicy(t *testing.T) {
Phase: v1.PodFailed, Phase: v1.PodFailed,
Conditions: []v1.PodCondition{ Conditions: []v1.PodCondition{
{ {
Type: v1.AlphaNoCompatGuaranteeDisruptionTarget, Type: v1.DisruptionTarget,
Status: v1.ConditionFalse, Status: v1.ConditionFalse,
}, },
}, },
@ -470,7 +470,7 @@ func TestMatchPodFailurePolicy(t *testing.T) {
Action: batch.PodFailurePolicyActionIgnore, Action: batch.PodFailurePolicyActionIgnore,
OnPodConditions: []batch.PodFailurePolicyOnPodConditionsPattern{ OnPodConditions: []batch.PodFailurePolicyOnPodConditionsPattern{
{ {
Type: v1.AlphaNoCompatGuaranteeDisruptionTarget, Type: v1.DisruptionTarget,
Status: v1.ConditionUnknown, Status: v1.ConditionUnknown,
}, },
}, },
@ -483,7 +483,7 @@ func TestMatchPodFailurePolicy(t *testing.T) {
Phase: v1.PodFailed, Phase: v1.PodFailed,
Conditions: []v1.PodCondition{ Conditions: []v1.PodCondition{
{ {
Type: v1.AlphaNoCompatGuaranteeDisruptionTarget, Type: v1.DisruptionTarget,
Status: v1.ConditionUnknown, Status: v1.ConditionUnknown,
}, },
}, },
@ -500,7 +500,7 @@ func TestMatchPodFailurePolicy(t *testing.T) {
Action: batch.PodFailurePolicyActionIgnore, Action: batch.PodFailurePolicyActionIgnore,
OnPodConditions: []batch.PodFailurePolicyOnPodConditionsPattern{ OnPodConditions: []batch.PodFailurePolicyOnPodConditionsPattern{
{ {
Type: v1.AlphaNoCompatGuaranteeDisruptionTarget, Type: v1.DisruptionTarget,
Status: v1.ConditionFalse, Status: v1.ConditionFalse,
}, },
}, },
@ -513,7 +513,7 @@ func TestMatchPodFailurePolicy(t *testing.T) {
Phase: v1.PodFailed, Phase: v1.PodFailed,
Conditions: []v1.PodCondition{ Conditions: []v1.PodCondition{
{ {
Type: v1.AlphaNoCompatGuaranteeDisruptionTarget, Type: v1.DisruptionTarget,
Status: v1.ConditionTrue, Status: v1.ConditionTrue,
}, },
}, },
@ -529,7 +529,7 @@ func TestMatchPodFailurePolicy(t *testing.T) {
Action: batch.PodFailurePolicyActionIgnore, Action: batch.PodFailurePolicyActionIgnore,
OnPodConditions: []batch.PodFailurePolicyOnPodConditionsPattern{ OnPodConditions: []batch.PodFailurePolicyOnPodConditionsPattern{
{ {
Type: v1.AlphaNoCompatGuaranteeDisruptionTarget, Type: v1.DisruptionTarget,
Status: v1.ConditionTrue, Status: v1.ConditionTrue,
}, },
}, },
@ -542,7 +542,7 @@ func TestMatchPodFailurePolicy(t *testing.T) {
Phase: v1.PodFailed, Phase: v1.PodFailed,
Conditions: []v1.PodCondition{ Conditions: []v1.PodCondition{
{ {
Type: v1.AlphaNoCompatGuaranteeDisruptionTarget, Type: v1.DisruptionTarget,
Status: v1.ConditionFalse, Status: v1.ConditionFalse,
}, },
}, },
@ -558,7 +558,7 @@ func TestMatchPodFailurePolicy(t *testing.T) {
Action: batch.PodFailurePolicyActionIgnore, Action: batch.PodFailurePolicyActionIgnore,
OnPodConditions: []batch.PodFailurePolicyOnPodConditionsPattern{ OnPodConditions: []batch.PodFailurePolicyOnPodConditionsPattern{
{ {
Type: v1.AlphaNoCompatGuaranteeDisruptionTarget, Type: v1.DisruptionTarget,
Status: v1.ConditionTrue, Status: v1.ConditionTrue,
}, },
}, },
@ -571,7 +571,7 @@ func TestMatchPodFailurePolicy(t *testing.T) {
Phase: v1.PodFailed, Phase: v1.PodFailed,
Conditions: []v1.PodCondition{ Conditions: []v1.PodCondition{
{ {
Type: v1.AlphaNoCompatGuaranteeDisruptionTarget, Type: v1.DisruptionTarget,
Status: v1.ConditionFalse, Status: v1.ConditionFalse,
}, },
}, },
@ -587,7 +587,7 @@ func TestMatchPodFailurePolicy(t *testing.T) {
Action: batch.PodFailurePolicyActionFailJob, Action: batch.PodFailurePolicyActionFailJob,
OnPodConditions: []batch.PodFailurePolicyOnPodConditionsPattern{ OnPodConditions: []batch.PodFailurePolicyOnPodConditionsPattern{
{ {
Type: v1.AlphaNoCompatGuaranteeDisruptionTarget, Type: v1.DisruptionTarget,
Status: v1.ConditionTrue, Status: v1.ConditionTrue,
}, },
}, },
@ -600,7 +600,7 @@ func TestMatchPodFailurePolicy(t *testing.T) {
Phase: v1.PodFailed, Phase: v1.PodFailed,
Conditions: []v1.PodCondition{ Conditions: []v1.PodCondition{
{ {
Type: v1.AlphaNoCompatGuaranteeDisruptionTarget, Type: v1.DisruptionTarget,
Status: v1.ConditionTrue, Status: v1.ConditionTrue,
}, },
}, },
@ -617,7 +617,7 @@ func TestMatchPodFailurePolicy(t *testing.T) {
Action: batch.PodFailurePolicyActionCount, Action: batch.PodFailurePolicyActionCount,
OnPodConditions: []batch.PodFailurePolicyOnPodConditionsPattern{ OnPodConditions: []batch.PodFailurePolicyOnPodConditionsPattern{
{ {
Type: v1.AlphaNoCompatGuaranteeDisruptionTarget, Type: v1.DisruptionTarget,
Status: v1.ConditionTrue, Status: v1.ConditionTrue,
}, },
}, },
@ -630,7 +630,7 @@ func TestMatchPodFailurePolicy(t *testing.T) {
Phase: v1.PodFailed, Phase: v1.PodFailed,
Conditions: []v1.PodCondition{ Conditions: []v1.PodCondition{
{ {
Type: v1.AlphaNoCompatGuaranteeDisruptionTarget, Type: v1.DisruptionTarget,
Status: v1.ConditionTrue, Status: v1.ConditionTrue,
}, },
}, },
@ -677,7 +677,7 @@ func TestMatchPodFailurePolicy(t *testing.T) {
Action: batch.PodFailurePolicyActionIgnore, Action: batch.PodFailurePolicyActionIgnore,
OnPodConditions: []batch.PodFailurePolicyOnPodConditionsPattern{ OnPodConditions: []batch.PodFailurePolicyOnPodConditionsPattern{
{ {
Type: v1.AlphaNoCompatGuaranteeDisruptionTarget, Type: v1.DisruptionTarget,
Status: v1.ConditionTrue, Status: v1.ConditionTrue,
}, },
}, },

View File

@ -129,7 +129,7 @@ func addConditionAndDeletePod(ctx context.Context, c clientset.Interface, name,
} }
podApply := corev1apply.Pod(pod.Name, pod.Namespace).WithStatus(corev1apply.PodStatus()) podApply := corev1apply.Pod(pod.Name, pod.Namespace).WithStatus(corev1apply.PodStatus())
podApply.Status.WithConditions(corev1apply.PodCondition(). podApply.Status.WithConditions(corev1apply.PodCondition().
WithType(v1.AlphaNoCompatGuaranteeDisruptionTarget). WithType(v1.DisruptionTarget).
WithStatus(v1.ConditionTrue). WithStatus(v1.ConditionTrue).
WithReason("DeletionByTaintManager"). WithReason("DeletionByTaintManager").
WithMessage("Taint manager: deleting due to NoExecute taint"). WithMessage("Taint manager: deleting due to NoExecute taint").

View File

@ -327,6 +327,7 @@ func TestCreateNode(t *testing.T) {
description string description string
pods []v1.Pod pods []v1.Pod
node *v1.Node node *v1.Node
expectPatch bool
expectDelete bool expectDelete bool
}{ }{
{ {
@ -335,6 +336,7 @@ func TestCreateNode(t *testing.T) {
*testutil.NewPod("pod1", "node1"), *testutil.NewPod("pod1", "node1"),
}, },
node: testutil.NewNode("node1"), node: testutil.NewNode("node1"),
expectPatch: false,
expectDelete: false, expectDelete: false,
}, },
{ {
@ -343,6 +345,7 @@ func TestCreateNode(t *testing.T) {
*testutil.NewPod("pod1", "node1"), *testutil.NewPod("pod1", "node1"),
}, },
node: addTaintsToNode(testutil.NewNode("node1"), "testTaint1", "taint1", []int{1}), node: addTaintsToNode(testutil.NewNode("node1"), "testTaint1", "taint1", []int{1}),
expectPatch: true,
expectDelete: true, expectDelete: true,
}, },
{ {
@ -351,6 +354,7 @@ func TestCreateNode(t *testing.T) {
*addToleration(testutil.NewPod("pod1", "node1"), 1, -1), *addToleration(testutil.NewPod("pod1", "node1"), 1, -1),
}, },
node: addTaintsToNode(testutil.NewNode("node1"), "testTaint1", "taint1", []int{1}), node: addTaintsToNode(testutil.NewNode("node1"), "testTaint1", "taint1", []int{1}),
expectPatch: false,
expectDelete: false, expectDelete: false,
}, },
} }
@ -366,7 +370,7 @@ func TestCreateNode(t *testing.T) {
// wait a bit // wait a bit
time.Sleep(timeForControllerToProgress) time.Sleep(timeForControllerToProgress)
verifyPodActions(t, item.description, fakeClientset, false, item.expectDelete) verifyPodActions(t, item.description, fakeClientset, item.expectPatch, item.expectDelete)
cancel() cancel()
} }
@ -766,6 +770,7 @@ func TestEventualConsistency(t *testing.T) {
newPod *v1.Pod newPod *v1.Pod
oldNode *v1.Node oldNode *v1.Node
newNode *v1.Node newNode *v1.Node
expectPatch bool
expectDelete bool expectDelete bool
}{ }{
{ {
@ -777,6 +782,7 @@ func TestEventualConsistency(t *testing.T) {
newPod: testutil.NewPod("pod2", "node1"), newPod: testutil.NewPod("pod2", "node1"),
oldNode: testutil.NewNode("node1"), oldNode: testutil.NewNode("node1"),
newNode: addTaintsToNode(testutil.NewNode("node1"), "testTaint1", "taint1", []int{1}), newNode: addTaintsToNode(testutil.NewNode("node1"), "testTaint1", "taint1", []int{1}),
expectPatch: true,
expectDelete: true, expectDelete: true,
}, },
{ {
@ -788,6 +794,7 @@ func TestEventualConsistency(t *testing.T) {
newPod: addToleration(testutil.NewPod("pod2", "node1"), 1, 100), newPod: addToleration(testutil.NewPod("pod2", "node1"), 1, 100),
oldNode: testutil.NewNode("node1"), oldNode: testutil.NewNode("node1"),
newNode: addTaintsToNode(testutil.NewNode("node1"), "testTaint1", "taint1", []int{1}), newNode: addTaintsToNode(testutil.NewNode("node1"), "testTaint1", "taint1", []int{1}),
expectPatch: true,
expectDelete: true, expectDelete: true,
}, },
{ {
@ -799,6 +806,7 @@ func TestEventualConsistency(t *testing.T) {
newPod: testutil.NewPod("pod2", "node1"), newPod: testutil.NewPod("pod2", "node1"),
oldNode: testutil.NewNode("node1"), oldNode: testutil.NewNode("node1"),
newNode: addTaintsToNode(testutil.NewNode("node1"), "testTaint1", "taint1", []int{1}), newNode: addTaintsToNode(testutil.NewNode("node1"), "testTaint1", "taint1", []int{1}),
expectPatch: true,
expectDelete: true, expectDelete: true,
}, },
{ {
@ -810,6 +818,7 @@ func TestEventualConsistency(t *testing.T) {
newPod: addToleration(testutil.NewPod("pod2", "node1"), 1, 100), newPod: addToleration(testutil.NewPod("pod2", "node1"), 1, 100),
oldNode: testutil.NewNode("node1"), oldNode: testutil.NewNode("node1"),
newNode: addTaintsToNode(testutil.NewNode("node1"), "testTaint1", "taint1", []int{1}), newNode: addTaintsToNode(testutil.NewNode("node1"), "testTaint1", "taint1", []int{1}),
expectPatch: true,
expectDelete: true, expectDelete: true,
}, },
} }
@ -835,7 +844,7 @@ func TestEventualConsistency(t *testing.T) {
// TODO(mborsz): Remove this sleep and other sleeps in this file. // TODO(mborsz): Remove this sleep and other sleeps in this file.
time.Sleep(timeForControllerToProgress) time.Sleep(timeForControllerToProgress)
verifyPodActions(t, item.description, fakeClientset, false, item.expectDelete) verifyPodActions(t, item.description, fakeClientset, item.expectPatch, item.expectDelete)
fakeClientset.ClearActions() fakeClientset.ClearActions()
// And now the delayed update of 'pod2' comes to the TaintManager. We should delete it as well. // And now the delayed update of 'pod2' comes to the TaintManager. We should delete it as well.

View File

@ -246,12 +246,11 @@ func (gcc *PodGCController) gcOrphaned(ctx context.Context, pods []*v1.Pod, node
} }
klog.V(2).InfoS("Found orphaned Pod assigned to the Node, deleting.", "pod", klog.KObj(pod), "node", pod.Spec.NodeName) klog.V(2).InfoS("Found orphaned Pod assigned to the Node, deleting.", "pod", klog.KObj(pod), "node", pod.Spec.NodeName)
condition := corev1apply.PodCondition(). condition := corev1apply.PodCondition().
WithType(v1.AlphaNoCompatGuaranteeDisruptionTarget). WithType(v1.DisruptionTarget).
WithStatus(v1.ConditionTrue). WithStatus(v1.ConditionTrue).
WithReason("DeletionByPodGC"). WithReason("DeletionByPodGC").
WithMessage("PodGC: node no longer exists"). WithMessage("PodGC: node no longer exists").
WithLastTransitionTime(metav1.Now()) WithLastTransitionTime(metav1.Now())
if err := gcc.markFailedAndDeletePodWithCondition(ctx, pod, condition); err != nil { if err := gcc.markFailedAndDeletePodWithCondition(ctx, pod, condition); err != nil {
utilruntime.HandleError(err) utilruntime.HandleError(err)
} else { } else {

View File

@ -436,6 +436,7 @@ const (
// owner: @mimowo // owner: @mimowo
// kep: https://kep.k8s.io/3329 // kep: https://kep.k8s.io/3329
// alpha: v1.25 // alpha: v1.25
// beta: v1.26
// //
// Allow users to specify handling of pod failures based on container exit codes // Allow users to specify handling of pod failures based on container exit codes
// and pod conditions. // and pod conditions.
@ -643,6 +644,7 @@ const (
// owner: @mimowo // owner: @mimowo
// kep: https://kep.k8s.io/3329 // kep: https://kep.k8s.io/3329
// alpha: v1.25 // alpha: v1.25
// beta: v1.26
// //
// Enables support for appending a dedicated pod condition indicating that // Enables support for appending a dedicated pod condition indicating that
// the pod is being deleted due to a disruption. // the pod is being deleted due to a disruption.
@ -993,7 +995,7 @@ var defaultKubernetesFeatureGates = map[featuregate.Feature]featuregate.FeatureS
IPTablesOwnershipCleanup: {Default: false, PreRelease: featuregate.Alpha}, IPTablesOwnershipCleanup: {Default: false, PreRelease: featuregate.Alpha},
JobPodFailurePolicy: {Default: false, PreRelease: featuregate.Alpha}, JobPodFailurePolicy: {Default: true, PreRelease: featuregate.Beta},
JobMutableNodeSchedulingDirectives: {Default: true, PreRelease: featuregate.Beta}, JobMutableNodeSchedulingDirectives: {Default: true, PreRelease: featuregate.Beta},
@ -1049,7 +1051,7 @@ var defaultKubernetesFeatureGates = map[featuregate.Feature]featuregate.FeatureS
PodDeletionCost: {Default: true, PreRelease: featuregate.Beta}, PodDeletionCost: {Default: true, PreRelease: featuregate.Beta},
PodDisruptionConditions: {Default: false, PreRelease: featuregate.Alpha}, PodDisruptionConditions: {Default: true, PreRelease: featuregate.Beta},
PodHasNetworkCondition: {Default: false, PreRelease: featuregate.Alpha}, PodHasNetworkCondition: {Default: false, PreRelease: featuregate.Alpha},

View File

@ -392,9 +392,9 @@ func (m *managerImpl) synchronize(diskInfoProvider DiskInfoProvider, podFunc Act
var condition *v1.PodCondition var condition *v1.PodCondition
if utilfeature.DefaultFeatureGate.Enabled(features.PodDisruptionConditions) { if utilfeature.DefaultFeatureGate.Enabled(features.PodDisruptionConditions) {
condition = &v1.PodCondition{ condition = &v1.PodCondition{
Type: v1.AlphaNoCompatGuaranteeDisruptionTarget, Type: v1.DisruptionTarget,
Status: v1.ConditionTrue, Status: v1.ConditionTrue,
Reason: v1.AlphaNoCompatGuaranteePodReasonTerminationByKubelet, Reason: v1.PodReasonTerminationByKubelet,
Message: message, Message: message,
} }
} }

View File

@ -1517,7 +1517,7 @@ func (kl *Kubelet) generateAPIPodStatus(pod *v1.Pod, podStatus *kubecontainer.Po
// updated during the eviciton (due to either node resource pressure or // updated during the eviciton (due to either node resource pressure or
// node graceful shutdown). We do not re-generate the conditions based // node graceful shutdown). We do not re-generate the conditions based
// on the container statuses as they are added based on one-time events. // on the container statuses as they are added based on one-time events.
cType := v1.AlphaNoCompatGuaranteeDisruptionTarget cType := v1.DisruptionTarget
if _, condition := podutil.GetPodConditionFromList(oldPodStatus.Conditions, cType); condition != nil { if _, condition := podutil.GetPodConditionFromList(oldPodStatus.Conditions, cType); condition != nil {
s.Conditions = utilpod.ReplaceOrAppendPodCondition(s.Conditions, condition) s.Conditions = utilpod.ReplaceOrAppendPodCondition(s.Conditions, condition)
} }

View File

@ -2522,7 +2522,7 @@ func Test_generateAPIPodStatus(t *testing.T) {
runningState("containerB"), runningState("containerB"),
}, },
Conditions: []v1.PodCondition{{ Conditions: []v1.PodCondition{{
Type: v1.AlphaNoCompatGuaranteeDisruptionTarget, Type: v1.DisruptionTarget,
Status: v1.ConditionTrue, Status: v1.ConditionTrue,
LastTransitionTime: normalized_now, LastTransitionTime: normalized_now,
}}, }},
@ -2536,7 +2536,7 @@ func Test_generateAPIPodStatus(t *testing.T) {
runningState("containerB"), runningState("containerB"),
}, },
Conditions: []v1.PodCondition{{ Conditions: []v1.PodCondition{{
Type: v1.AlphaNoCompatGuaranteeDisruptionTarget, Type: v1.DisruptionTarget,
Status: v1.ConditionTrue, Status: v1.ConditionTrue,
LastTransitionTime: normalized_now, LastTransitionTime: normalized_now,
}}, }},
@ -2558,7 +2558,7 @@ func Test_generateAPIPodStatus(t *testing.T) {
}, },
}, },
expectedPodDisruptionCondition: v1.PodCondition{ expectedPodDisruptionCondition: v1.PodCondition{
Type: v1.AlphaNoCompatGuaranteeDisruptionTarget, Type: v1.DisruptionTarget,
Status: v1.ConditionTrue, Status: v1.ConditionTrue,
LastTransitionTime: normalized_now, LastTransitionTime: normalized_now,
}, },

View File

@ -383,9 +383,9 @@ func (m *managerImpl) processShutdownEvent() error {
status.Reason = nodeShutdownReason status.Reason = nodeShutdownReason
if utilfeature.DefaultFeatureGate.Enabled(features.PodDisruptionConditions) { if utilfeature.DefaultFeatureGate.Enabled(features.PodDisruptionConditions) {
podutil.UpdatePodCondition(status, &v1.PodCondition{ podutil.UpdatePodCondition(status, &v1.PodCondition{
Type: v1.AlphaNoCompatGuaranteeDisruptionTarget, Type: v1.DisruptionTarget,
Status: v1.ConditionTrue, Status: v1.ConditionTrue,
Reason: v1.AlphaNoCompatGuaranteePodReasonTerminationByKubelet, Reason: v1.PodReasonTerminationByKubelet,
Message: nodeShutdownMessage, Message: nodeShutdownMessage,
}) })
} }

View File

@ -170,7 +170,7 @@ func TestManager(t *testing.T) {
Reason: "Terminated", Reason: "Terminated",
Conditions: []v1.PodCondition{ Conditions: []v1.PodCondition{
{ {
Type: v1.AlphaNoCompatGuaranteeDisruptionTarget, Type: v1.DisruptionTarget,
Status: v1.ConditionTrue, Status: v1.ConditionTrue,
Reason: "TerminationByKubelet", Reason: "TerminationByKubelet",
Message: "Pod was terminated in response to imminent node shutdown.", Message: "Pod was terminated in response to imminent node shutdown.",
@ -183,7 +183,7 @@ func TestManager(t *testing.T) {
Reason: "Terminated", Reason: "Terminated",
Conditions: []v1.PodCondition{ Conditions: []v1.PodCondition{
{ {
Type: v1.AlphaNoCompatGuaranteeDisruptionTarget, Type: v1.DisruptionTarget,
Status: v1.ConditionTrue, Status: v1.ConditionTrue,
Reason: "TerminationByKubelet", Reason: "TerminationByKubelet",
Message: "Pod was terminated in response to imminent node shutdown.", Message: "Pod was terminated in response to imminent node shutdown.",
@ -196,7 +196,7 @@ func TestManager(t *testing.T) {
Reason: "Terminated", Reason: "Terminated",
Conditions: []v1.PodCondition{ Conditions: []v1.PodCondition{
{ {
Type: v1.AlphaNoCompatGuaranteeDisruptionTarget, Type: v1.DisruptionTarget,
Status: v1.ConditionTrue, Status: v1.ConditionTrue,
Reason: "TerminationByKubelet", Reason: "TerminationByKubelet",
Message: "Pod was terminated in response to imminent node shutdown.", Message: "Pod was terminated in response to imminent node shutdown.",

View File

@ -506,7 +506,7 @@ func (m *manager) updateStatusInternal(pod *v1.Pod, status v1.PodStatus, forceUp
if utilfeature.DefaultFeatureGate.Enabled(features.PodDisruptionConditions) { if utilfeature.DefaultFeatureGate.Enabled(features.PodDisruptionConditions) {
// Set DisruptionTarget.LastTransitionTime. // Set DisruptionTarget.LastTransitionTime.
updateLastTransitionTime(&status, &oldStatus, v1.AlphaNoCompatGuaranteeDisruptionTarget) updateLastTransitionTime(&status, &oldStatus, v1.DisruptionTarget)
} }
// ensure that the start time does not change across updates. // ensure that the start time does not change across updates.
@ -895,7 +895,7 @@ func mergePodStatus(oldPodStatus, newPodStatus v1.PodStatus, couldHaveRunningCon
podConditions = append(podConditions, c) podConditions = append(podConditions, c)
} else if kubetypes.PodConditionSharedByKubelet(c.Type) { } else if kubetypes.PodConditionSharedByKubelet(c.Type) {
// we replace or append all the "shared by kubelet" conditions // we replace or append all the "shared by kubelet" conditions
if c.Type == v1.AlphaNoCompatGuaranteeDisruptionTarget { if c.Type == v1.DisruptionTarget {
// guard the update of the DisruptionTarget condition with a check to ensure // guard the update of the DisruptionTarget condition with a check to ensure
// it will only be sent once all containers have terminated and the phase // it will only be sent once all containers have terminated and the phase
// is terminal. This avoids sending an unnecessary patch request to add // is terminal. This avoids sending an unnecessary patch request to add

View File

@ -1426,7 +1426,7 @@ func TestMergePodStatus(t *testing.T) {
func(input v1.PodStatus) v1.PodStatus { func(input v1.PodStatus) v1.PodStatus {
input.Phase = v1.PodFailed input.Phase = v1.PodFailed
input.Conditions = append(input.Conditions, v1.PodCondition{ input.Conditions = append(input.Conditions, v1.PodCondition{
Type: v1.AlphaNoCompatGuaranteeDisruptionTarget, Type: v1.DisruptionTarget,
Status: v1.ConditionTrue, Status: v1.ConditionTrue,
Reason: "TerminationByKubelet", Reason: "TerminationByKubelet",
}) })
@ -1436,7 +1436,7 @@ func TestMergePodStatus(t *testing.T) {
Phase: v1.PodFailed, Phase: v1.PodFailed,
Conditions: []v1.PodCondition{ Conditions: []v1.PodCondition{
{ {
Type: v1.AlphaNoCompatGuaranteeDisruptionTarget, Type: v1.DisruptionTarget,
Status: v1.ConditionTrue, Status: v1.ConditionTrue,
Reason: "TerminationByKubelet", Reason: "TerminationByKubelet",
}, },
@ -1466,7 +1466,7 @@ func TestMergePodStatus(t *testing.T) {
func(input v1.PodStatus) v1.PodStatus { func(input v1.PodStatus) v1.PodStatus {
input.Phase = v1.PodFailed input.Phase = v1.PodFailed
input.Conditions = append(input.Conditions, v1.PodCondition{ input.Conditions = append(input.Conditions, v1.PodCondition{
Type: v1.AlphaNoCompatGuaranteeDisruptionTarget, Type: v1.DisruptionTarget,
Status: v1.ConditionTrue, Status: v1.ConditionTrue,
Reason: "TerminationByKubelet", Reason: "TerminationByKubelet",
}) })
@ -1493,7 +1493,7 @@ func TestMergePodStatus(t *testing.T) {
false, false,
func(input v1.PodStatus) v1.PodStatus { func(input v1.PodStatus) v1.PodStatus {
input.Conditions = append(input.Conditions, v1.PodCondition{ input.Conditions = append(input.Conditions, v1.PodCondition{
Type: v1.AlphaNoCompatGuaranteeDisruptionTarget, Type: v1.DisruptionTarget,
Status: v1.ConditionTrue, Status: v1.ConditionTrue,
Reason: "TerminationByKubelet", Reason: "TerminationByKubelet",
}) })
@ -1514,7 +1514,7 @@ func TestMergePodStatus(t *testing.T) {
Status: v1.ConditionTrue, Status: v1.ConditionTrue,
}, },
{ {
Type: v1.AlphaNoCompatGuaranteeDisruptionTarget, Type: v1.DisruptionTarget,
Status: v1.ConditionTrue, Status: v1.ConditionTrue,
Reason: "TerminationByKubelet", Reason: "TerminationByKubelet",
}, },
@ -1528,7 +1528,7 @@ func TestMergePodStatus(t *testing.T) {
false, false,
func(input v1.PodStatus) v1.PodStatus { func(input v1.PodStatus) v1.PodStatus {
input.Conditions = append(input.Conditions, v1.PodCondition{ input.Conditions = append(input.Conditions, v1.PodCondition{
Type: v1.AlphaNoCompatGuaranteeDisruptionTarget, Type: v1.DisruptionTarget,
Status: v1.ConditionTrue, Status: v1.ConditionTrue,
Reason: "TerminationByKubelet", Reason: "TerminationByKubelet",
}) })
@ -1549,7 +1549,7 @@ func TestMergePodStatus(t *testing.T) {
Status: v1.ConditionTrue, Status: v1.ConditionTrue,
}, },
{ {
Type: v1.AlphaNoCompatGuaranteeDisruptionTarget, Type: v1.DisruptionTarget,
Status: v1.ConditionTrue, Status: v1.ConditionTrue,
Reason: "TerminationByKubelet", Reason: "TerminationByKubelet",
}, },
@ -1563,7 +1563,7 @@ func TestMergePodStatus(t *testing.T) {
false, false,
func(input v1.PodStatus) v1.PodStatus { func(input v1.PodStatus) v1.PodStatus {
input.Conditions = append(input.Conditions, v1.PodCondition{ input.Conditions = append(input.Conditions, v1.PodCondition{
Type: v1.AlphaNoCompatGuaranteeDisruptionTarget, Type: v1.DisruptionTarget,
Status: v1.ConditionTrue, Status: v1.ConditionTrue,
Reason: "EvictedByEvictionAPI", Reason: "EvictedByEvictionAPI",
}) })
@ -1572,7 +1572,7 @@ func TestMergePodStatus(t *testing.T) {
func(input v1.PodStatus) v1.PodStatus { func(input v1.PodStatus) v1.PodStatus {
input.Phase = v1.PodFailed input.Phase = v1.PodFailed
input.Conditions = append(input.Conditions, v1.PodCondition{ input.Conditions = append(input.Conditions, v1.PodCondition{
Type: v1.AlphaNoCompatGuaranteeDisruptionTarget, Type: v1.DisruptionTarget,
Status: v1.ConditionTrue, Status: v1.ConditionTrue,
Reason: "TerminationByKubelet", Reason: "TerminationByKubelet",
}) })
@ -1596,7 +1596,7 @@ func TestMergePodStatus(t *testing.T) {
Status: v1.ConditionTrue, Status: v1.ConditionTrue,
}, },
{ {
Type: v1.AlphaNoCompatGuaranteeDisruptionTarget, Type: v1.DisruptionTarget,
Status: v1.ConditionTrue, Status: v1.ConditionTrue,
Reason: "TerminationByKubelet", Reason: "TerminationByKubelet",
}, },
@ -1610,7 +1610,7 @@ func TestMergePodStatus(t *testing.T) {
false, false,
func(input v1.PodStatus) v1.PodStatus { func(input v1.PodStatus) v1.PodStatus {
input.Conditions = append(input.Conditions, v1.PodCondition{ input.Conditions = append(input.Conditions, v1.PodCondition{
Type: v1.AlphaNoCompatGuaranteeDisruptionTarget, Type: v1.DisruptionTarget,
Status: v1.ConditionTrue, Status: v1.ConditionTrue,
Reason: "EvictedByEvictionAPI", Reason: "EvictedByEvictionAPI",
}) })
@ -1618,7 +1618,7 @@ func TestMergePodStatus(t *testing.T) {
}, },
func(input v1.PodStatus) v1.PodStatus { func(input v1.PodStatus) v1.PodStatus {
input.Conditions = append(input.Conditions, v1.PodCondition{ input.Conditions = append(input.Conditions, v1.PodCondition{
Type: v1.AlphaNoCompatGuaranteeDisruptionTarget, Type: v1.DisruptionTarget,
Status: v1.ConditionTrue, Status: v1.ConditionTrue,
Reason: "TerminationByKubelet", Reason: "TerminationByKubelet",
}) })
@ -1628,7 +1628,7 @@ func TestMergePodStatus(t *testing.T) {
Phase: v1.PodRunning, Phase: v1.PodRunning,
Conditions: []v1.PodCondition{ Conditions: []v1.PodCondition{
{ {
Type: v1.AlphaNoCompatGuaranteeDisruptionTarget, Type: v1.DisruptionTarget,
Status: v1.ConditionTrue, Status: v1.ConditionTrue,
Reason: "EvictedByEvictionAPI", Reason: "EvictedByEvictionAPI",
}, },
@ -1650,7 +1650,7 @@ func TestMergePodStatus(t *testing.T) {
true, true,
func(input v1.PodStatus) v1.PodStatus { func(input v1.PodStatus) v1.PodStatus {
input.Conditions = append(input.Conditions, v1.PodCondition{ input.Conditions = append(input.Conditions, v1.PodCondition{
Type: v1.AlphaNoCompatGuaranteeDisruptionTarget, Type: v1.DisruptionTarget,
Status: v1.ConditionTrue, Status: v1.ConditionTrue,
Reason: "EvictedByEvictionAPI", Reason: "EvictedByEvictionAPI",
}) })
@ -1659,7 +1659,7 @@ func TestMergePodStatus(t *testing.T) {
func(input v1.PodStatus) v1.PodStatus { func(input v1.PodStatus) v1.PodStatus {
input.Phase = v1.PodFailed input.Phase = v1.PodFailed
input.Conditions = append(input.Conditions, v1.PodCondition{ input.Conditions = append(input.Conditions, v1.PodCondition{
Type: v1.AlphaNoCompatGuaranteeDisruptionTarget, Type: v1.DisruptionTarget,
Status: v1.ConditionTrue, Status: v1.ConditionTrue,
Reason: "TerminationByKubelet", Reason: "TerminationByKubelet",
}) })
@ -1669,7 +1669,7 @@ func TestMergePodStatus(t *testing.T) {
Phase: v1.PodRunning, Phase: v1.PodRunning,
Conditions: []v1.PodCondition{ Conditions: []v1.PodCondition{
{ {
Type: v1.AlphaNoCompatGuaranteeDisruptionTarget, Type: v1.DisruptionTarget,
Status: v1.ConditionTrue, Status: v1.ConditionTrue,
Reason: "EvictedByEvictionAPI", Reason: "EvictedByEvictionAPI",
}, },

View File

@ -48,7 +48,7 @@ func PodConditionByKubelet(conditionType v1.PodConditionType) bool {
// PodConditionSharedByKubelet returns if the pod condition type is shared by kubelet // PodConditionSharedByKubelet returns if the pod condition type is shared by kubelet
func PodConditionSharedByKubelet(conditionType v1.PodConditionType) bool { func PodConditionSharedByKubelet(conditionType v1.PodConditionType) bool {
if utilfeature.DefaultFeatureGate.Enabled(features.PodDisruptionConditions) { if utilfeature.DefaultFeatureGate.Enabled(features.PodDisruptionConditions) {
if conditionType == v1.AlphaNoCompatGuaranteeDisruptionTarget { if conditionType == v1.DisruptionTarget {
return true return true
} }
} }

View File

@ -59,7 +59,7 @@ func TestPodConditionSharedByKubelet(t *testing.T) {
defer featuregatetesting.SetFeatureGateDuringTest(t, utilfeature.DefaultFeatureGate, features.PodDisruptionConditions, true)() defer featuregatetesting.SetFeatureGateDuringTest(t, utilfeature.DefaultFeatureGate, features.PodDisruptionConditions, true)()
trueCases := []v1.PodConditionType{ trueCases := []v1.PodConditionType{
v1.AlphaNoCompatGuaranteeDisruptionTarget, v1.DisruptionTarget,
} }
for _, tc := range trueCases { for _, tc := range trueCases {

View File

@ -302,7 +302,7 @@ func addConditionAndDeletePod(r *EvictionREST, ctx context.Context, name string,
conditionAppender := func(_ context.Context, newObj, _ runtime.Object) (runtime.Object, error) { conditionAppender := func(_ context.Context, newObj, _ runtime.Object) (runtime.Object, error) {
podObj := newObj.(*api.Pod) podObj := newObj.(*api.Pod)
podutil.UpdatePodCondition(&podObj.Status, &api.PodCondition{ podutil.UpdatePodCondition(&podObj.Status, &api.PodCondition{
Type: api.AlphaNoCompatGuaranteeDisruptionTarget, Type: api.DisruptionTarget,
Status: api.ConditionTrue, Status: api.ConditionTrue,
Reason: "EvictionByEvictionAPI", Reason: "EvictionByEvictionAPI",
Message: "Eviction API: evicting", Message: "Eviction API: evicting",

View File

@ -328,18 +328,18 @@ func TestPostFilter(t *testing.T) {
for _, tt := range tests { for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) { t.Run(tt.name, func(t *testing.T) {
cs := clientsetfake.NewSimpleClientset() // index the potential victim pods in the fake client so that the victims deletion logic does not fail
podItems := []v1.Pod{}
for _, pod := range tt.pods {
podItems = append(podItems, *pod)
}
cs := clientsetfake.NewSimpleClientset(&v1.PodList{Items: podItems})
informerFactory := informers.NewSharedInformerFactory(cs, 0) informerFactory := informers.NewSharedInformerFactory(cs, 0)
podInformer := informerFactory.Core().V1().Pods().Informer() podInformer := informerFactory.Core().V1().Pods().Informer()
podInformer.GetStore().Add(tt.pod) podInformer.GetStore().Add(tt.pod)
for i := range tt.pods { for i := range tt.pods {
podInformer.GetStore().Add(tt.pods[i]) podInformer.GetStore().Add(tt.pods[i])
} }
// As we use a bare clientset above, it's needed to add a reactor here
// to not fail Victims deletion logic.
cs.PrependReactor("delete", "pods", func(action clienttesting.Action) (bool, runtime.Object, error) {
return true, nil, nil
})
// Register NodeResourceFit as the Filter & PreFilter plugin. // Register NodeResourceFit as the Filter & PreFilter plugin.
registeredPlugins := []st.RegisterPluginFunc{ registeredPlugins := []st.RegisterPluginFunc{
st.RegisterQueueSortPlugin(queuesort.Name, queuesort.New), st.RegisterQueueSortPlugin(queuesort.Name, queuesort.New),
@ -1642,6 +1642,11 @@ func TestPreempt(t *testing.T) {
} }
deletedPodNames := make(sets.String) deletedPodNames := make(sets.String)
patchedPodNames := make(sets.String)
client.PrependReactor("patch", "pods", func(action clienttesting.Action) (bool, runtime.Object, error) {
patchedPodNames.Insert(action.(clienttesting.PatchAction).GetName())
return true, nil, nil
})
client.PrependReactor("delete", "pods", func(action clienttesting.Action) (bool, runtime.Object, error) { client.PrependReactor("delete", "pods", func(action clienttesting.Action) (bool, runtime.Object, error) {
deletedPodNames.Insert(action.(clienttesting.DeleteAction).GetName()) deletedPodNames.Insert(action.(clienttesting.DeleteAction).GetName())
return true, nil, nil return true, nil, nil
@ -1729,6 +1734,9 @@ func TestPreempt(t *testing.T) {
if len(deletedPodNames) != len(test.expectedPods) { if len(deletedPodNames) != len(test.expectedPods) {
t.Errorf("expected %v pods, got %v.", len(test.expectedPods), len(deletedPodNames)) t.Errorf("expected %v pods, got %v.", len(test.expectedPods), len(deletedPodNames))
} }
if diff := cmp.Diff(patchedPodNames.List(), deletedPodNames.List()); diff != "" {
t.Errorf("unexpected difference in the set of patched and deleted pods: %s", diff)
}
for victimName := range deletedPodNames { for victimName := range deletedPodNames {
found := false found := false
for _, expPod := range test.expectedPods { for _, expPod := range test.expectedPods {

View File

@ -359,7 +359,7 @@ func (ev *Evaluator) prepareCandidate(ctx context.Context, c Candidate, pod *v1.
if feature.DefaultFeatureGate.Enabled(features.PodDisruptionConditions) { if feature.DefaultFeatureGate.Enabled(features.PodDisruptionConditions) {
victimPodApply := corev1apply.Pod(victim.Name, victim.Namespace).WithStatus(corev1apply.PodStatus()) victimPodApply := corev1apply.Pod(victim.Name, victim.Namespace).WithStatus(corev1apply.PodStatus())
victimPodApply.Status.WithConditions(corev1apply.PodCondition(). victimPodApply.Status.WithConditions(corev1apply.PodCondition().
WithType(v1.AlphaNoCompatGuaranteeDisruptionTarget). WithType(v1.DisruptionTarget).
WithStatus(v1.ConditionTrue). WithStatus(v1.ConditionTrue).
WithReason("PreemptionByKubeScheduler"). WithReason("PreemptionByKubeScheduler").
WithMessage(fmt.Sprintf("Kube-scheduler: preempting to accommodate a higher priority pod: %s", klog.KObj(pod))). WithMessage(fmt.Sprintf("Kube-scheduler: preempting to accommodate a higher priority pod: %s", klog.KObj(pod))).

View File

@ -254,7 +254,7 @@ func buildControllerRoles() ([]rbacv1.ClusterRole, []rbacv1.ClusterRoleBinding)
rbacv1helpers.NewRule("get", "list", "update", "delete", "patch").Groups(legacyGroup).Resources("nodes").RuleOrDie(), rbacv1helpers.NewRule("get", "list", "update", "delete", "patch").Groups(legacyGroup).Resources("nodes").RuleOrDie(),
rbacv1helpers.NewRule("patch", "update").Groups(legacyGroup).Resources("nodes/status").RuleOrDie(), rbacv1helpers.NewRule("patch", "update").Groups(legacyGroup).Resources("nodes/status").RuleOrDie(),
// used for pod deletion // used for pod deletion
rbacv1helpers.NewRule("update").Groups(legacyGroup).Resources("pods/status").RuleOrDie(), rbacv1helpers.NewRule("patch", "update").Groups(legacyGroup).Resources("pods/status").RuleOrDie(),
rbacv1helpers.NewRule("list", "delete").Groups(legacyGroup).Resources("pods").RuleOrDie(), rbacv1helpers.NewRule("list", "delete").Groups(legacyGroup).Resources("pods").RuleOrDie(),
rbacv1helpers.NewRule("get", "list", "create", "update").Groups(networkingGroup).Resources("clustercidrs").RuleOrDie(), rbacv1helpers.NewRule("get", "list", "create", "update").Groups(networkingGroup).Resources("clustercidrs").RuleOrDie(),
eventsRule(), eventsRule(),
@ -262,7 +262,6 @@ func buildControllerRoles() ([]rbacv1.ClusterRole, []rbacv1.ClusterRoleBinding)
} }
if utilfeature.DefaultFeatureGate.Enabled(features.PodDisruptionConditions) { if utilfeature.DefaultFeatureGate.Enabled(features.PodDisruptionConditions) {
role.Rules = append(role.Rules, rbacv1helpers.NewRule("get").Groups(legacyGroup).Resources("pods").RuleOrDie()) role.Rules = append(role.Rules, rbacv1helpers.NewRule("get").Groups(legacyGroup).Resources("pods").RuleOrDie())
role.Rules = append(role.Rules, rbacv1helpers.NewRule("patch").Groups(legacyGroup).Resources("pods/status").RuleOrDie())
} }
return role return role
}()) }())

View File

@ -430,6 +430,12 @@ items:
- create - create
- patch - patch
- update - update
- apiGroups:
- ""
resources:
- pods/status
verbs:
- patch
- apiVersion: rbac.authorization.k8s.io/v1 - apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRole kind: ClusterRole
metadata: metadata:
@ -899,6 +905,7 @@ items:
resources: resources:
- pods/status - pods/status
verbs: verbs:
- patch
- update - update
- apiGroups: - apiGroups:
- "" - ""
@ -925,6 +932,12 @@ items:
- create - create
- patch - patch
- update - update
- apiGroups:
- ""
resources:
- pods
verbs:
- get
- apiVersion: rbac.authorization.k8s.io/v1 - apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRole kind: ClusterRole
metadata: metadata:
@ -1055,6 +1068,12 @@ items:
verbs: verbs:
- get - get
- list - list
- apiGroups:
- ""
resources:
- pods/status
verbs:
- patch
- apiVersion: rbac.authorization.k8s.io/v1 - apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRole kind: ClusterRole
metadata: metadata:

View File

@ -425,8 +425,7 @@ const (
// JobFailed means the job has failed its execution. // JobFailed means the job has failed its execution.
JobFailed JobConditionType = "Failed" JobFailed JobConditionType = "Failed"
// FailureTarget means the job is about to fail its execution. // FailureTarget means the job is about to fail its execution.
// The constant is to be renamed once the name is accepted within the KEP-3329. JobFailureTarget JobConditionType = "FailureTarget"
AlphaNoCompatGuaranteeJobFailureTarget JobConditionType = "FailureTarget"
) )
// JobCondition describes current state of a job. // JobCondition describes current state of a job.

View File

@ -2654,10 +2654,9 @@ const (
PodReady PodConditionType = "Ready" PodReady PodConditionType = "Ready"
// PodScheduled represents status of the scheduling process for this pod. // PodScheduled represents status of the scheduling process for this pod.
PodScheduled PodConditionType = "PodScheduled" PodScheduled PodConditionType = "PodScheduled"
// AlphaNoCompatGuaranteeDisruptionTarget indicates the pod is about to be terminated due to a // DisruptionTarget indicates the pod is about to be terminated due to a
// disruption (such as preemption, eviction API or garbage-collection). // disruption (such as preemption, eviction API or garbage-collection).
// The constant is to be renamed once the name is accepted within the KEP-3329. DisruptionTarget PodConditionType = "DisruptionTarget"
AlphaNoCompatGuaranteeDisruptionTarget PodConditionType = "DisruptionTarget"
) )
// These are reasons for a pod's transition to a condition. // These are reasons for a pod's transition to a condition.
@ -2676,7 +2675,7 @@ const (
// TerminationByKubelet reason in DisruptionTarget pod condition indicates that the termination // TerminationByKubelet reason in DisruptionTarget pod condition indicates that the termination
// is initiated by kubelet // is initiated by kubelet
AlphaNoCompatGuaranteePodReasonTerminationByKubelet = "TerminationByKubelet" PodReasonTerminationByKubelet = "TerminationByKubelet"
) )
// PodCondition contains details for the current condition of this pod. // PodCondition contains details for the current condition of this pod.

View File

@ -100,6 +100,75 @@ var _ = SIGDescribe("Job", func() {
framework.ExpectEqual(successes, completions, "expected %d successful job pods, but got %d", completions, successes) framework.ExpectEqual(successes, completions, "expected %d successful job pods, but got %d", completions, successes)
}) })
ginkgo.It("should allow to use the pod failure policy on exit code to fail the job early", func() {
// We fail the Job's pod only once to ensure the backoffLimit is not
// reached and thus the job is failed due to the pod failure policy
// with FailJob action.
// In order to ensure a Job's pod fails once before succeeding we force
// the Job's Pods to be scheduled to a single Node and use a hostPath
// volume to persist data across new Pods.
ginkgo.By("Looking for a node to schedule job pod")
node, err := e2enode.GetRandomReadySchedulableNode(f.ClientSet)
framework.ExpectNoError(err)
ginkgo.By("Creating a job")
job := e2ejob.NewTestJobOnNode("failOnce", "pod-failure-failjob", v1.RestartPolicyNever, parallelism, completions, nil, backoffLimit, node.Name)
job.Spec.PodFailurePolicy = &batchv1.PodFailurePolicy{
Rules: []batchv1.PodFailurePolicyRule{
{
Action: batchv1.PodFailurePolicyActionFailJob,
OnExitCodes: &batchv1.PodFailurePolicyOnExitCodesRequirement{
Operator: batchv1.PodFailurePolicyOnExitCodesOpIn,
Values: []int32{1},
},
},
},
}
job, err = e2ejob.CreateJob(f.ClientSet, f.Namespace.Name, job)
framework.ExpectNoError(err, "failed to create job in namespace: %s", f.Namespace.Name)
ginkgo.By("Ensuring job fails")
err = e2ejob.WaitForJobFailed(f.ClientSet, f.Namespace.Name, job.Name)
framework.ExpectNoError(err, "failed to ensure job failure in namespace: %s", f.Namespace.Name)
})
ginkgo.It("should allow to use the pod failure policy to not count the failure towards the backoffLimit", func() {
// We set the backoffLimit to 0 so that any pod failure would trigger
// job failure if not for the pod failure policy to ignore the failed
// pods from counting them towards the backoffLimit. Also, we fail the
// pod only once so that the job eventually succeeds.
// In order to ensure a Job's pod fails once before succeeding we force
// the Job's Pods to be scheduled to a single Node and use a hostPath
// volume to persist data across new Pods.
backoffLimit := int32(0)
ginkgo.By("Looking for a node to schedule job pod")
node, err := e2enode.GetRandomReadySchedulableNode(f.ClientSet)
framework.ExpectNoError(err)
ginkgo.By("Creating a job")
job := e2ejob.NewTestJobOnNode("failOnce", "pod-failure-ignore", v1.RestartPolicyNever, parallelism, completions, nil, backoffLimit, node.Name)
job.Spec.PodFailurePolicy = &batchv1.PodFailurePolicy{
Rules: []batchv1.PodFailurePolicyRule{
{
Action: batchv1.PodFailurePolicyActionIgnore,
OnExitCodes: &batchv1.PodFailurePolicyOnExitCodesRequirement{
Operator: batchv1.PodFailurePolicyOnExitCodesOpIn,
Values: []int32{1},
},
},
},
}
job, err = e2ejob.CreateJob(f.ClientSet, f.Namespace.Name, job)
framework.ExpectNoError(err, "failed to create job in namespace: %s", f.Namespace.Name)
ginkgo.By("Ensuring job reaches completions")
err = e2ejob.WaitForJobComplete(f.ClientSet, f.Namespace.Name, job.Name, completions)
framework.ExpectNoError(err, "failed to ensure job completion in namespace: %s", f.Namespace.Name)
})
ginkgo.It("should not create pods when created in suspend state", func() { ginkgo.It("should not create pods when created in suspend state", func() {
ginkgo.By("Creating a job with suspend=true") ginkgo.By("Creating a job with suspend=true")
job := e2ejob.NewTestJob("succeed", "suspend-true-to-false", v1.RestartPolicyNever, parallelism, completions, nil, backoffLimit) job := e2ejob.NewTestJob("succeed", "suspend-true-to-false", v1.RestartPolicyNever, parallelism, completions, nil, backoffLimit)

View File

@ -58,6 +58,27 @@ func WaitForJobComplete(c clientset.Interface, ns, jobName string, completions i
}) })
} }
// WaitForJobFailed uses c to wait for the Job jobName in namespace ns to fail
func WaitForJobFailed(c clientset.Interface, ns, jobName string) error {
return wait.PollImmediate(framework.Poll, JobTimeout, func() (bool, error) {
curr, err := c.BatchV1().Jobs(ns).Get(context.TODO(), jobName, metav1.GetOptions{})
if err != nil {
return false, err
}
return isJobFailed(curr), nil
})
}
func isJobFailed(j *batchv1.Job) bool {
for _, c := range j.Status.Conditions {
if (c.Type == batchv1.JobFailed) && c.Status == v1.ConditionTrue {
return true
}
}
return false
}
// WaitForJobFinish uses c to wait for the Job jobName in namespace ns to finish (either Failed or Complete). // WaitForJobFinish uses c to wait for the Job jobName in namespace ns to finish (either Failed or Complete).
func WaitForJobFinish(c clientset.Interface, ns, jobName string) error { func WaitForJobFinish(c clientset.Interface, ns, jobName string) error {
return wait.PollImmediate(framework.Poll, JobTimeout, func() (bool, error) { return wait.PollImmediate(framework.Poll, JobTimeout, func() (bool, error) {

View File

@ -40,6 +40,7 @@ import (
"github.com/onsi/gomega" "github.com/onsi/gomega"
"k8s.io/kubernetes/pkg/kubelet/util/format" "k8s.io/kubernetes/pkg/kubelet/util/format"
"k8s.io/kubernetes/pkg/util/slice"
"k8s.io/kubernetes/test/e2e/framework" "k8s.io/kubernetes/test/e2e/framework"
) )
@ -303,3 +304,11 @@ func (c *PodClient) PodIsReady(name string) bool {
framework.ExpectNoError(err) framework.ExpectNoError(err)
return podutils.IsPodReady(pod) return podutils.IsPodReady(pod)
} }
// RemovePodFinalizer removes the pod's finalizer
func (c *PodClient) RemoveFinalizer(podName string, finalizerName string) {
framework.Logf("Removing pod's %q finalizer: %q", podName, finalizerName)
c.Update(podName, func(pod *v1.Pod) {
pod.ObjectMeta.Finalizers = slice.RemoveString(pod.ObjectMeta.Finalizers, finalizerName, nil)
})
}

View File

@ -623,6 +623,15 @@ func GetPodSecretUpdateTimeout(c clientset.Interface) time.Duration {
return podLogTimeout return podLogTimeout
} }
// VerifyPodHasConditionWithType verifies the pod has the expected condition by type
func VerifyPodHasConditionWithType(f *framework.Framework, pod *v1.Pod, cType v1.PodConditionType) {
pod, err := f.ClientSet.CoreV1().Pods(f.Namespace.Name).Get(context.TODO(), pod.Name, metav1.GetOptions{})
framework.ExpectNoError(err, "Failed to get the recent pod object for name: %q", pod.Name)
if condition := FindPodConditionByType(&pod.Status, cType); condition == nil {
framework.Failf("pod %q should have the condition: %q, pod status: %v", pod.Name, cType, pod.Status)
}
}
func getNodeTTLAnnotationValue(c clientset.Interface) (time.Duration, error) { func getNodeTTLAnnotationValue(c clientset.Interface) (time.Duration, error) {
nodes, err := c.CoreV1().Nodes().List(context.TODO(), metav1.ListOptions{}) nodes, err := c.CoreV1().Nodes().List(context.TODO(), metav1.ListOptions{})
if err != nil || len(nodes.Items) == 0 { if err != nil || len(nodes.Items) == 0 {

View File

@ -454,6 +454,16 @@ func WaitForPodTerminatedInNamespace(c clientset.Interface, podName, reason, nam
}) })
} }
// WaitForPodTerminatingInNamespaceTimeout returns if the pod is terminating, or an error if it is not after the timeout.
func WaitForPodTerminatingInNamespaceTimeout(c clientset.Interface, podName, namespace string, timeout time.Duration) error {
return WaitForPodCondition(c, namespace, podName, "is terminating", timeout, func(pod *v1.Pod) (bool, error) {
if pod.DeletionTimestamp != nil {
return true, nil
}
return false, nil
})
}
// WaitForPodSuccessInNamespaceTimeout returns nil if the pod reached state success, or an error if it reached failure or ran too long. // WaitForPodSuccessInNamespaceTimeout returns nil if the pod reached state success, or an error if it reached failure or ran too long.
func WaitForPodSuccessInNamespaceTimeout(c clientset.Interface, podName, namespace string, timeout time.Duration) error { func WaitForPodSuccessInNamespaceTimeout(c clientset.Interface, podName, namespace string, timeout time.Duration) error {
return WaitForPodCondition(c, namespace, podName, fmt.Sprintf("%s or %s", v1.PodSucceeded, v1.PodFailed), timeout, func(pod *v1.Pod) (bool, error) { return WaitForPodCondition(c, namespace, podName, fmt.Sprintf("%s or %s", v1.PodSucceeded, v1.PodFailed), timeout, func(pod *v1.Pod) (bool, error) {

View File

@ -43,6 +43,10 @@ var (
pauseImage = imageutils.GetE2EImage(imageutils.Pause) pauseImage = imageutils.GetE2EImage(imageutils.Pause)
) )
const (
testFinalizer = "example.com/test-finalizer"
)
func getTestTaint() v1.Taint { func getTestTaint() v1.Taint {
now := metav1.Now() now := metav1.Now()
return v1.Taint{ return v1.Taint{
@ -337,6 +341,37 @@ var _ = SIGDescribe("NoExecuteTaintManager Single Pod [Serial]", func() {
framework.Failf("Pod was evicted despite toleration") framework.Failf("Pod was evicted despite toleration")
} }
}) })
// 1. Run a pod with finalizer
// 2. Taint the node running this pod with a no-execute taint
// 3. See if pod will get evicted and has the pod disruption condition
// 4. Remove the finalizer so that the pod can be deleted by GC
ginkgo.It("pods evicted from tainted nodes have pod disruption condition", func() {
podName := "taint-eviction-pod-disruption"
pod := createPodForTaintsTest(false, 0, podName, podName, ns)
pod.Finalizers = append(pod.Finalizers, testFinalizer)
ginkgo.By("Starting pod...")
nodeName, err := testutils.RunPodAndGetNodeName(cs, pod, 2*time.Minute)
framework.ExpectNoError(err)
framework.Logf("Pod is running on %v. Tainting Node", nodeName)
defer e2epod.NewPodClient(f).RemoveFinalizer(pod.Name, testFinalizer)
ginkgo.By("Trying to apply a taint on the Node")
testTaint := getTestTaint()
e2enode.AddOrUpdateTaintOnNode(cs, nodeName, testTaint)
e2enode.ExpectNodeHasTaint(cs, nodeName, &testTaint)
defer e2enode.RemoveTaintOffNode(cs, nodeName, testTaint)
ginkgo.By("Waiting for Pod to be terminating")
timeout := time.Duration(kubeletPodDeletionDelaySeconds+3*additionalWaitPerDeleteSeconds) * time.Second
err = e2epod.WaitForPodTerminatingInNamespaceTimeout(f.ClientSet, pod.Name, pod.Namespace, timeout)
framework.ExpectNoError(err)
ginkgo.By("Verifying the pod has the pod disruption condition")
e2epod.VerifyPodHasConditionWithType(f, pod, v1.DisruptionTarget)
})
}) })
var _ = SIGDescribe("NoExecuteTaintManager Multiple Pods [Serial]", func() { var _ = SIGDescribe("NoExecuteTaintManager Multiple Pods [Serial]", func() {

View File

@ -62,6 +62,7 @@ var workerNodes = sets.String{}
type pausePodConfig struct { type pausePodConfig struct {
Name string Name string
Namespace string Namespace string
Finalizers []string
Affinity *v1.Affinity Affinity *v1.Affinity
Annotations, Labels, NodeSelector map[string]string Annotations, Labels, NodeSelector map[string]string
Resources *v1.ResourceRequirements Resources *v1.ResourceRequirements
@ -897,6 +898,7 @@ func initPausePod(f *framework.Framework, conf pausePodConfig) *v1.Pod {
Labels: map[string]string{}, Labels: map[string]string{},
Annotations: map[string]string{}, Annotations: map[string]string{},
OwnerReferences: conf.OwnerReferences, OwnerReferences: conf.OwnerReferences,
Finalizers: conf.Finalizers,
}, },
Spec: v1.PodSpec{ Spec: v1.PodSpec{
SecurityContext: e2epod.GetRestrictedPodSecurityContext(), SecurityContext: e2epod.GetRestrictedPodSecurityContext(),

View File

@ -60,6 +60,10 @@ type priorityPair struct {
var testExtendedResource = v1.ResourceName("scheduling.k8s.io/foo") var testExtendedResource = v1.ResourceName("scheduling.k8s.io/foo")
const (
testFinalizer = "example.com/test-finalizer"
)
var _ = SIGDescribe("SchedulerPreemption [Serial]", func() { var _ = SIGDescribe("SchedulerPreemption [Serial]", func() {
var cs clientset.Interface var cs clientset.Interface
var nodeList *v1.NodeList var nodeList *v1.NodeList
@ -313,6 +317,75 @@ var _ = SIGDescribe("SchedulerPreemption [Serial]", func() {
} }
}) })
// 1. Run a low priority pod with finalizer which consumes 1/1 of node resources
// 2. Schedule a higher priority pod which also consumes 1/1 of node resources
// 3. See if the pod with lower priority is preempted and has the pod disruption condition
// 4. Remove the finalizer so that the pod can be deleted by GC
ginkgo.It("validates pod disruption condition is added to the preempted pod", func() {
podRes := v1.ResourceList{testExtendedResource: resource.MustParse("1")}
ginkgo.By("Select a node to run the lower and higher priority pods")
framework.ExpectNotEqual(len(nodeList.Items), 0, "We need at least one node for the test to run")
node := nodeList.Items[0]
nodeCopy := node.DeepCopy()
nodeCopy.Status.Capacity[testExtendedResource] = resource.MustParse("1")
err := patchNode(cs, &node, nodeCopy)
framework.ExpectNoError(err)
// prepare node affinity to make sure both the lower and higher priority pods are scheduled on the same node
testNodeAffinity := v1.Affinity{
NodeAffinity: &v1.NodeAffinity{
RequiredDuringSchedulingIgnoredDuringExecution: &v1.NodeSelector{
NodeSelectorTerms: []v1.NodeSelectorTerm{
{
MatchFields: []v1.NodeSelectorRequirement{
{Key: "metadata.name", Operator: v1.NodeSelectorOpIn, Values: []string{node.Name}},
},
},
},
},
},
}
ginkgo.By("Create a low priority pod that consumes 1/1 of node resources")
victimPod := createPausePod(f, pausePodConfig{
Name: "victim-pod",
PriorityClassName: lowPriorityClassName,
Resources: &v1.ResourceRequirements{
Requests: podRes,
Limits: podRes,
},
Finalizers: []string{testFinalizer},
Affinity: &testNodeAffinity,
})
framework.Logf("Created pod: %v", victimPod.Name)
ginkgo.By("Wait for the victim pod to be scheduled")
framework.ExpectNoError(e2epod.WaitForPodRunningInNamespace(cs, victimPod))
// Remove the finalizer so that the victim pod can be GCed
defer e2epod.NewPodClient(f).RemoveFinalizer(victimPod.Name, testFinalizer)
ginkgo.By("Create a high priority pod to trigger preemption of the lower priority pod")
preemptorPod := createPausePod(f, pausePodConfig{
Name: "preemptor-pod",
PriorityClassName: highPriorityClassName,
Resources: &v1.ResourceRequirements{
Requests: podRes,
Limits: podRes,
},
Affinity: &testNodeAffinity,
})
framework.Logf("Created pod: %v", preemptorPod.Name)
ginkgo.By("Waiting for the victim pod to be terminating")
err = e2epod.WaitForPodTerminatingInNamespaceTimeout(f.ClientSet, victimPod.Name, victimPod.Namespace, framework.PodDeleteTimeout)
framework.ExpectNoError(err)
ginkgo.By("Verifying the pod has the pod disruption condition")
e2epod.VerifyPodHasConditionWithType(f, victimPod, v1.DisruptionTarget)
})
ginkgo.Context("PodTopologySpread Preemption", func() { ginkgo.Context("PodTopologySpread Preemption", func() {
var nodeNames []string var nodeNames []string
var nodes []*v1.Node var nodes []*v1.Node

View File

@ -513,7 +513,7 @@ var _ = SIGDescribe("PriorityPidEvictionOrdering [Slow] [Serial] [Disruptive][No
string(features.PodDisruptionConditions): true, string(features.PodDisruptionConditions): true,
} }
}) })
disruptionTarget := v1.AlphaNoCompatGuaranteeDisruptionTarget disruptionTarget := v1.DisruptionTarget
specs := []podEvictSpec{ specs := []podEvictSpec{
{ {
evictionPriority: 1, evictionPriority: 1,

View File

@ -148,9 +148,9 @@ var _ = SIGDescribe("GracefulNodeShutdown [Serial] [NodeFeature:GracefulNodeShut
framework.Logf("Expecting pod to be shutdown, but it's not currently: Pod: %q, Pod Status %+v", pod.Name, pod.Status) framework.Logf("Expecting pod to be shutdown, but it's not currently: Pod: %q, Pod Status %+v", pod.Name, pod.Status)
return fmt.Errorf("pod should be shutdown, phase: %s", pod.Status.Phase) return fmt.Errorf("pod should be shutdown, phase: %s", pod.Status.Phase)
} }
podDisruptionCondition := e2epod.FindPodConditionByType(&pod.Status, v1.AlphaNoCompatGuaranteeDisruptionTarget) podDisruptionCondition := e2epod.FindPodConditionByType(&pod.Status, v1.DisruptionTarget)
if podDisruptionCondition == nil { if podDisruptionCondition == nil {
framework.Failf("pod %q should have the condition: %q, pod status: %v", pod.Name, v1.AlphaNoCompatGuaranteeDisruptionTarget, pod.Status) framework.Failf("pod %q should have the condition: %q, pod status: %v", pod.Name, v1.DisruptionTarget, pod.Status)
} }
} }
return nil return nil

View File

@ -676,7 +676,7 @@ func TestStalePodDisruption(t *testing.T) {
podPhase: v1.PodRunning, podPhase: v1.PodRunning,
wantConditions: []v1.PodCondition{ wantConditions: []v1.PodCondition{
{ {
Type: v1.AlphaNoCompatGuaranteeDisruptionTarget, Type: v1.DisruptionTarget,
Status: v1.ConditionFalse, Status: v1.ConditionFalse,
}, },
}, },
@ -686,19 +686,19 @@ func TestStalePodDisruption(t *testing.T) {
deletePod: true, deletePod: true,
wantConditions: []v1.PodCondition{ wantConditions: []v1.PodCondition{
{ {
Type: v1.AlphaNoCompatGuaranteeDisruptionTarget, Type: v1.DisruptionTarget,
Status: v1.ConditionTrue, Status: v1.ConditionTrue,
}, },
}, },
}, },
"disruption-condition-by-kubelet": { "disruption-condition-by-kubelet": {
podPhase: v1.PodFailed, podPhase: v1.PodFailed,
reason: v1.AlphaNoCompatGuaranteePodReasonTerminationByKubelet, reason: v1.PodReasonTerminationByKubelet,
wantConditions: []v1.PodCondition{ wantConditions: []v1.PodCondition{
{ {
Type: v1.AlphaNoCompatGuaranteeDisruptionTarget, Type: v1.DisruptionTarget,
Status: v1.ConditionTrue, Status: v1.ConditionTrue,
Reason: v1.AlphaNoCompatGuaranteePodReasonTerminationByKubelet, Reason: v1.PodReasonTerminationByKubelet,
}, },
}, },
}, },
@ -706,7 +706,7 @@ func TestStalePodDisruption(t *testing.T) {
podPhase: v1.PodFailed, podPhase: v1.PodFailed,
wantConditions: []v1.PodCondition{ wantConditions: []v1.PodCondition{
{ {
Type: v1.AlphaNoCompatGuaranteeDisruptionTarget, Type: v1.DisruptionTarget,
Status: v1.ConditionTrue, Status: v1.ConditionTrue,
}, },
}, },
@ -728,7 +728,7 @@ func TestStalePodDisruption(t *testing.T) {
pod.Status.Phase = tc.podPhase pod.Status.Phase = tc.podPhase
pod.Status.Conditions = append(pod.Status.Conditions, v1.PodCondition{ pod.Status.Conditions = append(pod.Status.Conditions, v1.PodCondition{
Type: v1.AlphaNoCompatGuaranteeDisruptionTarget, Type: v1.DisruptionTarget,
Status: v1.ConditionTrue, Status: v1.ConditionTrue,
Reason: tc.reason, Reason: tc.reason,
LastTransitionTime: metav1.Now(), LastTransitionTime: metav1.Now(),

View File

@ -409,11 +409,11 @@ func TestEvictionWithFinalizers(t *testing.T) {
if e != nil { if e != nil {
t.Fatalf("Failed to get the pod %q with error: %q", klog.KObj(pod), e) t.Fatalf("Failed to get the pod %q with error: %q", klog.KObj(pod), e)
} }
_, cond := podutil.GetPodCondition(&updatedPod.Status, v1.PodConditionType(v1.AlphaNoCompatGuaranteeDisruptionTarget)) _, cond := podutil.GetPodCondition(&updatedPod.Status, v1.PodConditionType(v1.DisruptionTarget))
if tc.enablePodDisruptionConditions == true && cond == nil { if tc.enablePodDisruptionConditions == true && cond == nil {
t.Errorf("Pod %q does not have the expected condition: %q", klog.KObj(updatedPod), v1.AlphaNoCompatGuaranteeDisruptionTarget) t.Errorf("Pod %q does not have the expected condition: %q", klog.KObj(updatedPod), v1.DisruptionTarget)
} else if tc.enablePodDisruptionConditions == false && cond != nil { } else if tc.enablePodDisruptionConditions == false && cond != nil {
t.Errorf("Pod %q has an unexpected condition: %q", klog.KObj(updatedPod), v1.AlphaNoCompatGuaranteeDisruptionTarget) t.Errorf("Pod %q has an unexpected condition: %q", klog.KObj(updatedPod), v1.DisruptionTarget)
} }
}) })
} }

View File

@ -481,7 +481,7 @@ func TestJobPodFailurePolicy(t *testing.T) {
Action: batchv1.PodFailurePolicyActionIgnore, Action: batchv1.PodFailurePolicyActionIgnore,
OnPodConditions: []batchv1.PodFailurePolicyOnPodConditionsPattern{ OnPodConditions: []batchv1.PodFailurePolicyOnPodConditionsPattern{
{ {
Type: v1.AlphaNoCompatGuaranteeDisruptionTarget, Type: v1.DisruptionTarget,
}, },
}, },
}, },
@ -533,7 +533,7 @@ func TestJobPodFailurePolicy(t *testing.T) {
Phase: v1.PodFailed, Phase: v1.PodFailed,
Conditions: []v1.PodCondition{ Conditions: []v1.PodCondition{
{ {
Type: v1.AlphaNoCompatGuaranteeDisruptionTarget, Type: v1.DisruptionTarget,
Status: v1.ConditionTrue, Status: v1.ConditionTrue,
}, },
}, },

View File

@ -168,11 +168,11 @@ func TestEvictionForNoExecuteTaintAddedByUser(t *testing.T) {
if err != nil { if err != nil {
t.Fatalf("Test Failed: error: %q, while getting updated pod", err) t.Fatalf("Test Failed: error: %q, while getting updated pod", err)
} }
_, cond := podutil.GetPodCondition(&testPod.Status, v1.AlphaNoCompatGuaranteeDisruptionTarget) _, cond := podutil.GetPodCondition(&testPod.Status, v1.DisruptionTarget)
if test.enablePodDisruptionConditions == true && cond == nil { if test.enablePodDisruptionConditions == true && cond == nil {
t.Errorf("Pod %q does not have the expected condition: %q", klog.KObj(testPod), v1.AlphaNoCompatGuaranteeDisruptionTarget) t.Errorf("Pod %q does not have the expected condition: %q", klog.KObj(testPod), v1.DisruptionTarget)
} else if test.enablePodDisruptionConditions == false && cond != nil { } else if test.enablePodDisruptionConditions == false && cond != nil {
t.Errorf("Pod %q has an unexpected condition: %q", klog.KObj(testPod), v1.AlphaNoCompatGuaranteeDisruptionTarget) t.Errorf("Pod %q has an unexpected condition: %q", klog.KObj(testPod), v1.DisruptionTarget)
} }
}) })
} }

View File

@ -110,11 +110,11 @@ func TestPodGcOrphanedPodsWithFinalizer(t *testing.T) {
if err != nil { if err != nil {
t.Fatalf("Error: '%v' while updating pod info: '%v'", err, klog.KObj(pod)) t.Fatalf("Error: '%v' while updating pod info: '%v'", err, klog.KObj(pod))
} }
_, cond := podutil.GetPodCondition(&pod.Status, v1.AlphaNoCompatGuaranteeDisruptionTarget) _, cond := podutil.GetPodCondition(&pod.Status, v1.DisruptionTarget)
if test.enablePodDisruptionConditions == true && cond == nil { if test.enablePodDisruptionConditions == true && cond == nil {
t.Errorf("Pod %q does not have the expected condition: %q", klog.KObj(pod), v1.AlphaNoCompatGuaranteeDisruptionTarget) t.Errorf("Pod %q does not have the expected condition: %q", klog.KObj(pod), v1.DisruptionTarget)
} else if test.enablePodDisruptionConditions == false && cond != nil { } else if test.enablePodDisruptionConditions == false && cond != nil {
t.Errorf("Pod %q has an unexpected condition: %q", klog.KObj(pod), v1.AlphaNoCompatGuaranteeDisruptionTarget) t.Errorf("Pod %q has an unexpected condition: %q", klog.KObj(pod), v1.DisruptionTarget)
} }
if pod.Status.Phase != test.wantPhase { if pod.Status.Phase != test.wantPhase {
t.Errorf("Unexpected phase for pod %q. Got: %q, want: %q", klog.KObj(pod), pod.Status.Phase, test.wantPhase) t.Errorf("Unexpected phase for pod %q. Got: %q, want: %q", klog.KObj(pod), pod.Status.Phase, test.wantPhase)
@ -232,9 +232,9 @@ func TestTerminatingOnOutOfServiceNode(t *testing.T) {
if err != nil { if err != nil {
t.Errorf("Error %q while waiting for the pod %q to be in expected phase", err, klog.KObj(pod)) t.Errorf("Error %q while waiting for the pod %q to be in expected phase", err, klog.KObj(pod))
} }
_, cond := podutil.GetPodCondition(&pod.Status, v1.AlphaNoCompatGuaranteeDisruptionTarget) _, cond := podutil.GetPodCondition(&pod.Status, v1.DisruptionTarget)
if cond != nil { if cond != nil {
t.Errorf("Pod %q has an unexpected condition: %q", klog.KObj(pod), v1.AlphaNoCompatGuaranteeDisruptionTarget) t.Errorf("Pod %q has an unexpected condition: %q", klog.KObj(pod), v1.DisruptionTarget)
} }
} else { } else {
// wait until the pod is deleted // wait until the pod is deleted

View File

@ -468,11 +468,11 @@ func TestPreemption(t *testing.T) {
if err != nil { if err != nil {
t.Errorf("Error %v when getting the updated status for pod %v/%v ", err, p.Namespace, p.Name) t.Errorf("Error %v when getting the updated status for pod %v/%v ", err, p.Namespace, p.Name)
} }
_, cond := podutil.GetPodCondition(&pod.Status, v1.AlphaNoCompatGuaranteeDisruptionTarget) _, cond := podutil.GetPodCondition(&pod.Status, v1.DisruptionTarget)
if test.enablePodDisruptionConditions == true && cond == nil { if test.enablePodDisruptionConditions == true && cond == nil {
t.Errorf("Pod %q does not have the expected condition: %q", klog.KObj(pod), v1.AlphaNoCompatGuaranteeDisruptionTarget) t.Errorf("Pod %q does not have the expected condition: %q", klog.KObj(pod), v1.DisruptionTarget)
} else if test.enablePodDisruptionConditions == false && cond != nil { } else if test.enablePodDisruptionConditions == false && cond != nil {
t.Errorf("Pod %q has an unexpected condition: %q", klog.KObj(pod), v1.AlphaNoCompatGuaranteeDisruptionTarget) t.Errorf("Pod %q has an unexpected condition: %q", klog.KObj(pod), v1.DisruptionTarget)
} }
} else { } else {
if p.DeletionTimestamp != nil { if p.DeletionTimestamp != nil {