diff --git a/pkg/apis/batch/types.go b/pkg/apis/batch/types.go index bc88b938b5d..cd3dd9656a2 100644 --- a/pkg/apis/batch/types.go +++ b/pkg/apis/batch/types.go @@ -440,8 +440,7 @@ const ( // JobFailed means the job has failed its execution. JobFailed JobConditionType = "Failed" // FailureTarget means the job is about to fail its execution. - // The constant is to be renamed once the name is accepted within the KEP-3329. - AlphaNoCompatGuaranteeJobFailureTarget JobConditionType = "FailureTarget" + JobFailureTarget JobConditionType = "FailureTarget" ) // JobCondition describes current state of a job. diff --git a/pkg/apis/batch/v1/defaults_test.go b/pkg/apis/batch/v1/defaults_test.go index 73574c13282..deaf088516e 100644 --- a/pkg/apis/batch/v1/defaults_test.go +++ b/pkg/apis/batch/v1/defaults_test.go @@ -52,7 +52,7 @@ func TestSetDefaultJob(t *testing.T) { Action: batchv1.PodFailurePolicyActionFailJob, OnPodConditions: []batchv1.PodFailurePolicyOnPodConditionsPattern{ { - Type: v1.AlphaNoCompatGuaranteeDisruptionTarget, + Type: v1.DisruptionTarget, Status: v1.ConditionTrue, }, { @@ -75,7 +75,7 @@ func TestSetDefaultJob(t *testing.T) { Action: batchv1.PodFailurePolicyActionFailJob, OnPodConditions: []batchv1.PodFailurePolicyOnPodConditionsPattern{ { - Type: v1.AlphaNoCompatGuaranteeDisruptionTarget, + Type: v1.DisruptionTarget, }, }, }, @@ -96,7 +96,7 @@ func TestSetDefaultJob(t *testing.T) { Action: batchv1.PodFailurePolicyActionFailJob, OnPodConditions: []batchv1.PodFailurePolicyOnPodConditionsPattern{ { - Type: v1.AlphaNoCompatGuaranteeDisruptionTarget, + Type: v1.DisruptionTarget, Status: v1.ConditionTrue, }, { @@ -120,7 +120,7 @@ func TestSetDefaultJob(t *testing.T) { Action: batchv1.PodFailurePolicyActionFailJob, OnPodConditions: []batchv1.PodFailurePolicyOnPodConditionsPattern{ { - Type: v1.AlphaNoCompatGuaranteeDisruptionTarget, + Type: v1.DisruptionTarget, Status: v1.ConditionTrue, }, }, diff --git a/pkg/apis/batch/validation/validation_test.go b/pkg/apis/batch/validation/validation_test.go index b98a6399e93..af2a17b9406 100644 --- a/pkg/apis/batch/validation/validation_test.go +++ b/pkg/apis/batch/validation/validation_test.go @@ -118,7 +118,7 @@ func TestValidateJob(t *testing.T) { Action: batch.PodFailurePolicyActionIgnore, OnPodConditions: []batch.PodFailurePolicyOnPodConditionsPattern{ { - Type: api.AlphaNoCompatGuaranteeDisruptionTarget, + Type: api.DisruptionTarget, Status: api.ConditionTrue, }, }, @@ -456,7 +456,7 @@ func TestValidateJob(t *testing.T) { }, OnPodConditions: []batch.PodFailurePolicyOnPodConditionsPattern{ { - Type: api.AlphaNoCompatGuaranteeDisruptionTarget, + Type: api.DisruptionTarget, Status: api.ConditionTrue, }, }, @@ -558,7 +558,7 @@ func TestValidateJob(t *testing.T) { Action: batch.PodFailurePolicyActionIgnore, OnPodConditions: []batch.PodFailurePolicyOnPodConditionsPattern{ { - Type: api.AlphaNoCompatGuaranteeDisruptionTarget, + Type: api.DisruptionTarget, }, }, }, @@ -577,7 +577,7 @@ func TestValidateJob(t *testing.T) { Action: batch.PodFailurePolicyActionIgnore, OnPodConditions: []batch.PodFailurePolicyOnPodConditionsPattern{ { - Type: api.AlphaNoCompatGuaranteeDisruptionTarget, + Type: api.DisruptionTarget, Status: "UnknownStatus", }, }, @@ -968,7 +968,7 @@ func TestValidateJobUpdate(t *testing.T) { Action: batch.PodFailurePolicyActionIgnore, OnPodConditions: []batch.PodFailurePolicyOnPodConditionsPattern{ { - Type: api.AlphaNoCompatGuaranteeDisruptionTarget, + Type: api.DisruptionTarget, Status: api.ConditionTrue, }, }, @@ -993,7 +993,7 @@ func TestValidateJobUpdate(t *testing.T) { Action: batch.PodFailurePolicyActionIgnore, OnPodConditions: []batch.PodFailurePolicyOnPodConditionsPattern{ { - Type: api.AlphaNoCompatGuaranteeDisruptionTarget, + Type: api.DisruptionTarget, Status: api.ConditionTrue, }, }, @@ -1007,7 +1007,7 @@ func TestValidateJobUpdate(t *testing.T) { Action: batch.PodFailurePolicyActionCount, OnPodConditions: []batch.PodFailurePolicyOnPodConditionsPattern{ { - Type: api.AlphaNoCompatGuaranteeDisruptionTarget, + Type: api.DisruptionTarget, Status: api.ConditionTrue, }, }, @@ -1030,7 +1030,7 @@ func TestValidateJobUpdate(t *testing.T) { Action: batch.PodFailurePolicyActionIgnore, OnPodConditions: []batch.PodFailurePolicyOnPodConditionsPattern{ { - Type: api.AlphaNoCompatGuaranteeDisruptionTarget, + Type: api.DisruptionTarget, Status: api.ConditionTrue, }, }, diff --git a/pkg/apis/core/types.go b/pkg/apis/core/types.go index d9f2c250ffd..1746640f1db 100644 --- a/pkg/apis/core/types.go +++ b/pkg/apis/core/types.go @@ -2433,10 +2433,9 @@ const ( PodReasonSchedulingGated = "SchedulingGated" // ContainersReady indicates whether all containers in the pod are ready. ContainersReady PodConditionType = "ContainersReady" - // AlphaNoCompatGuaranteeDisruptionTarget indicates the pod is about to be terminated due to a + // DisruptionTarget indicates the pod is about to be terminated due to a // disruption (such as preemption, eviction API or garbage-collection). - // The constant is to be renamed once the name is accepted within the KEP-3329. - AlphaNoCompatGuaranteeDisruptionTarget PodConditionType = "DisruptionTarget" + DisruptionTarget PodConditionType = "DisruptionTarget" ) // PodCondition represents pod's condition diff --git a/pkg/controller/disruption/disruption.go b/pkg/controller/disruption/disruption.go index ca8d24665a5..a11f267262e 100644 --- a/pkg/controller/disruption/disruption.go +++ b/pkg/controller/disruption/disruption.go @@ -755,7 +755,7 @@ func (dc *DisruptionController) syncStalePodDisruption(ctx context.Context, key WithStatus(corev1apply.PodStatus()). WithResourceVersion(pod.ResourceVersion) podApply.Status.WithConditions(corev1apply.PodCondition(). - WithType(v1.AlphaNoCompatGuaranteeDisruptionTarget). + WithType(v1.DisruptionTarget). WithStatus(v1.ConditionFalse). WithLastTransitionTime(metav1.Now()), ) @@ -998,11 +998,11 @@ func (dc *DisruptionController) nonTerminatingPodHasStaleDisruptionCondition(pod if pod.DeletionTimestamp != nil { return false, 0 } - _, cond := apipod.GetPodCondition(&pod.Status, v1.AlphaNoCompatGuaranteeDisruptionTarget) + _, cond := apipod.GetPodCondition(&pod.Status, v1.DisruptionTarget) // Pod disruption conditions added by kubelet are never considered stale because the condition might take // arbitrarily long before the pod is terminating (has deletion timestamp). Also, pod conditions present // on pods in terminal phase are not stale to avoid unnecessary status updates. - if cond == nil || cond.Status != v1.ConditionTrue || cond.Reason == v1.AlphaNoCompatGuaranteePodReasonTerminationByKubelet || apipod.IsPodPhaseTerminal(pod.Status.Phase) { + if cond == nil || cond.Status != v1.ConditionTrue || cond.Reason == v1.PodReasonTerminationByKubelet || apipod.IsPodPhaseTerminal(pod.Status.Phase) { return false, 0 } waitFor := dc.stalePodDisruptionTimeout - dc.clock.Since(cond.LastTransitionTime.Time) diff --git a/pkg/controller/disruption/disruption_test.go b/pkg/controller/disruption/disruption_test.go index d5d60f33bf3..0d6f837eb2c 100644 --- a/pkg/controller/disruption/disruption_test.go +++ b/pkg/controller/disruption/disruption_test.go @@ -1403,7 +1403,7 @@ func TestStalePodDisruption(t *testing.T) { Status: v1.PodStatus{ Conditions: []v1.PodCondition{ { - Type: v1.AlphaNoCompatGuaranteeDisruptionTarget, + Type: v1.DisruptionTarget, Status: v1.ConditionTrue, LastTransitionTime: metav1.Time{Time: now}, }, @@ -1413,7 +1413,7 @@ func TestStalePodDisruption(t *testing.T) { timePassed: 2*time.Minute + time.Second, wantConditions: []v1.PodCondition{ { - Type: v1.AlphaNoCompatGuaranteeDisruptionTarget, + Type: v1.DisruptionTarget, Status: v1.ConditionFalse, }, }, @@ -1427,7 +1427,7 @@ func TestStalePodDisruption(t *testing.T) { Status: v1.PodStatus{ Conditions: []v1.PodCondition{ { - Type: v1.AlphaNoCompatGuaranteeDisruptionTarget, + Type: v1.DisruptionTarget, Status: v1.ConditionTrue, LastTransitionTime: metav1.Time{Time: now}, }, @@ -1437,7 +1437,7 @@ func TestStalePodDisruption(t *testing.T) { timePassed: 2*time.Minute - time.Second, wantConditions: []v1.PodCondition{ { - Type: v1.AlphaNoCompatGuaranteeDisruptionTarget, + Type: v1.DisruptionTarget, Status: v1.ConditionTrue, }, }, @@ -1452,7 +1452,7 @@ func TestStalePodDisruption(t *testing.T) { Status: v1.PodStatus{ Conditions: []v1.PodCondition{ { - Type: v1.AlphaNoCompatGuaranteeDisruptionTarget, + Type: v1.DisruptionTarget, Status: v1.ConditionTrue, LastTransitionTime: metav1.Time{Time: now}, }, @@ -1462,7 +1462,7 @@ func TestStalePodDisruption(t *testing.T) { timePassed: 2*time.Minute + time.Second, wantConditions: []v1.PodCondition{ { - Type: v1.AlphaNoCompatGuaranteeDisruptionTarget, + Type: v1.DisruptionTarget, Status: v1.ConditionTrue, }, }, @@ -1487,7 +1487,7 @@ func TestStalePodDisruption(t *testing.T) { Status: v1.PodStatus{ Conditions: []v1.PodCondition{ { - Type: v1.AlphaNoCompatGuaranteeDisruptionTarget, + Type: v1.DisruptionTarget, Status: v1.ConditionFalse, }, }, @@ -1496,7 +1496,7 @@ func TestStalePodDisruption(t *testing.T) { timePassed: 2*time.Minute + time.Second, wantConditions: []v1.PodCondition{ { - Type: v1.AlphaNoCompatGuaranteeDisruptionTarget, + Type: v1.DisruptionTarget, Status: v1.ConditionFalse, }, }, diff --git a/pkg/controller/job/job_controller.go b/pkg/controller/job/job_controller.go index d49afac2f8b..5ca13544c03 100644 --- a/pkg/controller/job/job_controller.go +++ b/pkg/controller/job/job_controller.go @@ -758,12 +758,12 @@ func (jm *Controller) syncJob(ctx context.Context, key string) (forget bool, rEr (failed > *job.Spec.BackoffLimit) if feature.DefaultFeatureGate.Enabled(features.JobPodFailurePolicy) { - if failureTargetCondition := findConditionByType(job.Status.Conditions, batch.AlphaNoCompatGuaranteeJobFailureTarget); failureTargetCondition != nil { + if failureTargetCondition := findConditionByType(job.Status.Conditions, batch.JobFailureTarget); failureTargetCondition != nil { finishedCondition = newFailedConditionForFailureTarget(failureTargetCondition) } else if failJobMessage := getFailJobMessage(&job, pods, uncounted.Failed()); failJobMessage != nil { if uncounted != nil { // Prepare the interim FailureTarget condition to record the failure message before the finalizers (allowing removal of the pods) are removed. - finishedCondition = newCondition(batch.AlphaNoCompatGuaranteeJobFailureTarget, v1.ConditionTrue, jobConditionReasonPodFailurePolicy, *failJobMessage) + finishedCondition = newCondition(batch.JobFailureTarget, v1.ConditionTrue, jobConditionReasonPodFailurePolicy, *failJobMessage) } else { // Prepare the Failed job condition for the legacy path without finalizers (don't use the interim FailureTarget condition). finishedCondition = newCondition(batch.JobFailed, v1.ConditionTrue, jobConditionReasonPodFailurePolicy, *failJobMessage) @@ -1090,7 +1090,7 @@ func (jm *Controller) trackJobStatusAndRemoveFinalizers(ctx context.Context, job job.Status.CompletedIndexes = succeededIndexes.String() } if feature.DefaultFeatureGate.Enabled(features.JobPodFailurePolicy) { - if finishedCond != nil && finishedCond.Type == batch.AlphaNoCompatGuaranteeJobFailureTarget { + if finishedCond != nil && finishedCond.Type == batch.JobFailureTarget { // Append the interim FailureTarget condition to update the job status with before finalizers are removed. job.Status.Conditions = append(job.Status.Conditions, *finishedCond) diff --git a/pkg/controller/job/job_controller_test.go b/pkg/controller/job/job_controller_test.go index cf71b119b8c..c019f80a734 100644 --- a/pkg/controller/job/job_controller_test.go +++ b/pkg/controller/job/job_controller_test.go @@ -2192,7 +2192,7 @@ func TestSyncJobWithJobPodFailurePolicy(t *testing.T) { Status: batch.JobStatus{ Conditions: []batch.JobCondition{ { - Type: batch.AlphaNoCompatGuaranteeJobFailureTarget, + Type: batch.JobFailureTarget, Status: v1.ConditionTrue, Reason: "PodFailurePolicy", Message: "Container main-container for pod default/mypod-0 failed with exit code 5 matching FailJob rule at index 1", @@ -2245,7 +2245,7 @@ func TestSyncJobWithJobPodFailurePolicy(t *testing.T) { Status: batch.JobStatus{ Conditions: []batch.JobCondition{ { - Type: batch.AlphaNoCompatGuaranteeJobFailureTarget, + Type: batch.JobFailureTarget, Status: v1.ConditionTrue, Reason: "PodFailurePolicy", Message: "Container main-container for pod default/already-deleted-pod failed with exit code 5 matching FailJob rule at index 1", @@ -2751,7 +2751,7 @@ func TestSyncJobWithJobPodFailurePolicy(t *testing.T) { Action: batch.PodFailurePolicyActionIgnore, OnPodConditions: []batch.PodFailurePolicyOnPodConditionsPattern{ { - Type: v1.AlphaNoCompatGuaranteeDisruptionTarget, + Type: v1.DisruptionTarget, Status: v1.ConditionTrue, }, }, @@ -2769,7 +2769,7 @@ func TestSyncJobWithJobPodFailurePolicy(t *testing.T) { Status: v1.ConditionTrue, }, { - Type: v1.AlphaNoCompatGuaranteeDisruptionTarget, + Type: v1.DisruptionTarget, Status: v1.ConditionTrue, }, }, @@ -2797,7 +2797,7 @@ func TestSyncJobWithJobPodFailurePolicy(t *testing.T) { Action: batch.PodFailurePolicyActionIgnore, OnPodConditions: []batch.PodFailurePolicyOnPodConditionsPattern{ { - Type: v1.AlphaNoCompatGuaranteeDisruptionTarget, + Type: v1.DisruptionTarget, Status: v1.ConditionTrue, }, }, @@ -2811,7 +2811,7 @@ func TestSyncJobWithJobPodFailurePolicy(t *testing.T) { Phase: v1.PodFailed, Conditions: []v1.PodCondition{ { - Type: v1.AlphaNoCompatGuaranteeDisruptionTarget, + Type: v1.DisruptionTarget, Status: v1.ConditionTrue, }, }, @@ -2839,7 +2839,7 @@ func TestSyncJobWithJobPodFailurePolicy(t *testing.T) { Action: batch.PodFailurePolicyActionFailJob, OnPodConditions: []batch.PodFailurePolicyOnPodConditionsPattern{ { - Type: v1.AlphaNoCompatGuaranteeDisruptionTarget, + Type: v1.DisruptionTarget, Status: v1.ConditionTrue, }, }, @@ -2853,7 +2853,7 @@ func TestSyncJobWithJobPodFailurePolicy(t *testing.T) { Phase: v1.PodFailed, Conditions: []v1.PodCondition{ { - Type: v1.AlphaNoCompatGuaranteeDisruptionTarget, + Type: v1.DisruptionTarget, Status: v1.ConditionTrue, }, }, diff --git a/pkg/controller/job/pod_failure_policy_test.go b/pkg/controller/job/pod_failure_policy_test.go index 3d872b48d29..e2d1440f752 100644 --- a/pkg/controller/job/pod_failure_policy_test.go +++ b/pkg/controller/job/pod_failure_policy_test.go @@ -89,7 +89,7 @@ func TestMatchPodFailurePolicy(t *testing.T) { Action: "UnkonwnAction", OnPodConditions: []batch.PodFailurePolicyOnPodConditionsPattern{ { - Type: v1.AlphaNoCompatGuaranteeDisruptionTarget, + Type: v1.DisruptionTarget, Status: v1.ConditionTrue, }, }, @@ -98,7 +98,7 @@ func TestMatchPodFailurePolicy(t *testing.T) { Action: batch.PodFailurePolicyActionIgnore, OnPodConditions: []batch.PodFailurePolicyOnPodConditionsPattern{ { - Type: v1.AlphaNoCompatGuaranteeDisruptionTarget, + Type: v1.DisruptionTarget, Status: v1.ConditionTrue, }, }, @@ -111,7 +111,7 @@ func TestMatchPodFailurePolicy(t *testing.T) { Phase: v1.PodFailed, Conditions: []v1.PodCondition{ { - Type: v1.AlphaNoCompatGuaranteeDisruptionTarget, + Type: v1.DisruptionTarget, Status: v1.ConditionTrue, }, }, @@ -410,7 +410,7 @@ func TestMatchPodFailurePolicy(t *testing.T) { Action: batch.PodFailurePolicyActionIgnore, OnPodConditions: []batch.PodFailurePolicyOnPodConditionsPattern{ { - Type: v1.AlphaNoCompatGuaranteeDisruptionTarget, + Type: v1.DisruptionTarget, Status: v1.ConditionTrue, }, }, @@ -423,7 +423,7 @@ func TestMatchPodFailurePolicy(t *testing.T) { Phase: v1.PodFailed, Conditions: []v1.PodCondition{ { - Type: v1.AlphaNoCompatGuaranteeDisruptionTarget, + Type: v1.DisruptionTarget, Status: v1.ConditionTrue, }, }, @@ -440,7 +440,7 @@ func TestMatchPodFailurePolicy(t *testing.T) { Action: batch.PodFailurePolicyActionIgnore, OnPodConditions: []batch.PodFailurePolicyOnPodConditionsPattern{ { - Type: v1.AlphaNoCompatGuaranteeDisruptionTarget, + Type: v1.DisruptionTarget, Status: v1.ConditionFalse, }, }, @@ -453,7 +453,7 @@ func TestMatchPodFailurePolicy(t *testing.T) { Phase: v1.PodFailed, Conditions: []v1.PodCondition{ { - Type: v1.AlphaNoCompatGuaranteeDisruptionTarget, + Type: v1.DisruptionTarget, Status: v1.ConditionFalse, }, }, @@ -470,7 +470,7 @@ func TestMatchPodFailurePolicy(t *testing.T) { Action: batch.PodFailurePolicyActionIgnore, OnPodConditions: []batch.PodFailurePolicyOnPodConditionsPattern{ { - Type: v1.AlphaNoCompatGuaranteeDisruptionTarget, + Type: v1.DisruptionTarget, Status: v1.ConditionUnknown, }, }, @@ -483,7 +483,7 @@ func TestMatchPodFailurePolicy(t *testing.T) { Phase: v1.PodFailed, Conditions: []v1.PodCondition{ { - Type: v1.AlphaNoCompatGuaranteeDisruptionTarget, + Type: v1.DisruptionTarget, Status: v1.ConditionUnknown, }, }, @@ -500,7 +500,7 @@ func TestMatchPodFailurePolicy(t *testing.T) { Action: batch.PodFailurePolicyActionIgnore, OnPodConditions: []batch.PodFailurePolicyOnPodConditionsPattern{ { - Type: v1.AlphaNoCompatGuaranteeDisruptionTarget, + Type: v1.DisruptionTarget, Status: v1.ConditionFalse, }, }, @@ -513,7 +513,7 @@ func TestMatchPodFailurePolicy(t *testing.T) { Phase: v1.PodFailed, Conditions: []v1.PodCondition{ { - Type: v1.AlphaNoCompatGuaranteeDisruptionTarget, + Type: v1.DisruptionTarget, Status: v1.ConditionTrue, }, }, @@ -529,7 +529,7 @@ func TestMatchPodFailurePolicy(t *testing.T) { Action: batch.PodFailurePolicyActionIgnore, OnPodConditions: []batch.PodFailurePolicyOnPodConditionsPattern{ { - Type: v1.AlphaNoCompatGuaranteeDisruptionTarget, + Type: v1.DisruptionTarget, Status: v1.ConditionTrue, }, }, @@ -542,7 +542,7 @@ func TestMatchPodFailurePolicy(t *testing.T) { Phase: v1.PodFailed, Conditions: []v1.PodCondition{ { - Type: v1.AlphaNoCompatGuaranteeDisruptionTarget, + Type: v1.DisruptionTarget, Status: v1.ConditionFalse, }, }, @@ -558,7 +558,7 @@ func TestMatchPodFailurePolicy(t *testing.T) { Action: batch.PodFailurePolicyActionIgnore, OnPodConditions: []batch.PodFailurePolicyOnPodConditionsPattern{ { - Type: v1.AlphaNoCompatGuaranteeDisruptionTarget, + Type: v1.DisruptionTarget, Status: v1.ConditionTrue, }, }, @@ -571,7 +571,7 @@ func TestMatchPodFailurePolicy(t *testing.T) { Phase: v1.PodFailed, Conditions: []v1.PodCondition{ { - Type: v1.AlphaNoCompatGuaranteeDisruptionTarget, + Type: v1.DisruptionTarget, Status: v1.ConditionFalse, }, }, @@ -587,7 +587,7 @@ func TestMatchPodFailurePolicy(t *testing.T) { Action: batch.PodFailurePolicyActionFailJob, OnPodConditions: []batch.PodFailurePolicyOnPodConditionsPattern{ { - Type: v1.AlphaNoCompatGuaranteeDisruptionTarget, + Type: v1.DisruptionTarget, Status: v1.ConditionTrue, }, }, @@ -600,7 +600,7 @@ func TestMatchPodFailurePolicy(t *testing.T) { Phase: v1.PodFailed, Conditions: []v1.PodCondition{ { - Type: v1.AlphaNoCompatGuaranteeDisruptionTarget, + Type: v1.DisruptionTarget, Status: v1.ConditionTrue, }, }, @@ -617,7 +617,7 @@ func TestMatchPodFailurePolicy(t *testing.T) { Action: batch.PodFailurePolicyActionCount, OnPodConditions: []batch.PodFailurePolicyOnPodConditionsPattern{ { - Type: v1.AlphaNoCompatGuaranteeDisruptionTarget, + Type: v1.DisruptionTarget, Status: v1.ConditionTrue, }, }, @@ -630,7 +630,7 @@ func TestMatchPodFailurePolicy(t *testing.T) { Phase: v1.PodFailed, Conditions: []v1.PodCondition{ { - Type: v1.AlphaNoCompatGuaranteeDisruptionTarget, + Type: v1.DisruptionTarget, Status: v1.ConditionTrue, }, }, @@ -677,7 +677,7 @@ func TestMatchPodFailurePolicy(t *testing.T) { Action: batch.PodFailurePolicyActionIgnore, OnPodConditions: []batch.PodFailurePolicyOnPodConditionsPattern{ { - Type: v1.AlphaNoCompatGuaranteeDisruptionTarget, + Type: v1.DisruptionTarget, Status: v1.ConditionTrue, }, }, diff --git a/pkg/controller/nodelifecycle/scheduler/taint_manager.go b/pkg/controller/nodelifecycle/scheduler/taint_manager.go index bc50f8eb697..f147a44aadb 100644 --- a/pkg/controller/nodelifecycle/scheduler/taint_manager.go +++ b/pkg/controller/nodelifecycle/scheduler/taint_manager.go @@ -129,7 +129,7 @@ func addConditionAndDeletePod(ctx context.Context, c clientset.Interface, name, } podApply := corev1apply.Pod(pod.Name, pod.Namespace).WithStatus(corev1apply.PodStatus()) podApply.Status.WithConditions(corev1apply.PodCondition(). - WithType(v1.AlphaNoCompatGuaranteeDisruptionTarget). + WithType(v1.DisruptionTarget). WithStatus(v1.ConditionTrue). WithReason("DeletionByTaintManager"). WithMessage("Taint manager: deleting due to NoExecute taint"). diff --git a/pkg/controller/nodelifecycle/scheduler/taint_manager_test.go b/pkg/controller/nodelifecycle/scheduler/taint_manager_test.go index 82f4b956574..c990e8ef5f5 100644 --- a/pkg/controller/nodelifecycle/scheduler/taint_manager_test.go +++ b/pkg/controller/nodelifecycle/scheduler/taint_manager_test.go @@ -327,6 +327,7 @@ func TestCreateNode(t *testing.T) { description string pods []v1.Pod node *v1.Node + expectPatch bool expectDelete bool }{ { @@ -335,6 +336,7 @@ func TestCreateNode(t *testing.T) { *testutil.NewPod("pod1", "node1"), }, node: testutil.NewNode("node1"), + expectPatch: false, expectDelete: false, }, { @@ -343,6 +345,7 @@ func TestCreateNode(t *testing.T) { *testutil.NewPod("pod1", "node1"), }, node: addTaintsToNode(testutil.NewNode("node1"), "testTaint1", "taint1", []int{1}), + expectPatch: true, expectDelete: true, }, { @@ -351,6 +354,7 @@ func TestCreateNode(t *testing.T) { *addToleration(testutil.NewPod("pod1", "node1"), 1, -1), }, node: addTaintsToNode(testutil.NewNode("node1"), "testTaint1", "taint1", []int{1}), + expectPatch: false, expectDelete: false, }, } @@ -366,7 +370,7 @@ func TestCreateNode(t *testing.T) { // wait a bit time.Sleep(timeForControllerToProgress) - verifyPodActions(t, item.description, fakeClientset, false, item.expectDelete) + verifyPodActions(t, item.description, fakeClientset, item.expectPatch, item.expectDelete) cancel() } @@ -766,6 +770,7 @@ func TestEventualConsistency(t *testing.T) { newPod *v1.Pod oldNode *v1.Node newNode *v1.Node + expectPatch bool expectDelete bool }{ { @@ -777,6 +782,7 @@ func TestEventualConsistency(t *testing.T) { newPod: testutil.NewPod("pod2", "node1"), oldNode: testutil.NewNode("node1"), newNode: addTaintsToNode(testutil.NewNode("node1"), "testTaint1", "taint1", []int{1}), + expectPatch: true, expectDelete: true, }, { @@ -788,6 +794,7 @@ func TestEventualConsistency(t *testing.T) { newPod: addToleration(testutil.NewPod("pod2", "node1"), 1, 100), oldNode: testutil.NewNode("node1"), newNode: addTaintsToNode(testutil.NewNode("node1"), "testTaint1", "taint1", []int{1}), + expectPatch: true, expectDelete: true, }, { @@ -799,6 +806,7 @@ func TestEventualConsistency(t *testing.T) { newPod: testutil.NewPod("pod2", "node1"), oldNode: testutil.NewNode("node1"), newNode: addTaintsToNode(testutil.NewNode("node1"), "testTaint1", "taint1", []int{1}), + expectPatch: true, expectDelete: true, }, { @@ -810,6 +818,7 @@ func TestEventualConsistency(t *testing.T) { newPod: addToleration(testutil.NewPod("pod2", "node1"), 1, 100), oldNode: testutil.NewNode("node1"), newNode: addTaintsToNode(testutil.NewNode("node1"), "testTaint1", "taint1", []int{1}), + expectPatch: true, expectDelete: true, }, } @@ -835,7 +844,7 @@ func TestEventualConsistency(t *testing.T) { // TODO(mborsz): Remove this sleep and other sleeps in this file. time.Sleep(timeForControllerToProgress) - verifyPodActions(t, item.description, fakeClientset, false, item.expectDelete) + verifyPodActions(t, item.description, fakeClientset, item.expectPatch, item.expectDelete) fakeClientset.ClearActions() // And now the delayed update of 'pod2' comes to the TaintManager. We should delete it as well. diff --git a/pkg/controller/podgc/gc_controller.go b/pkg/controller/podgc/gc_controller.go index db1c62d2693..f6efaf7aed3 100644 --- a/pkg/controller/podgc/gc_controller.go +++ b/pkg/controller/podgc/gc_controller.go @@ -246,12 +246,11 @@ func (gcc *PodGCController) gcOrphaned(ctx context.Context, pods []*v1.Pod, node } klog.V(2).InfoS("Found orphaned Pod assigned to the Node, deleting.", "pod", klog.KObj(pod), "node", pod.Spec.NodeName) condition := corev1apply.PodCondition(). - WithType(v1.AlphaNoCompatGuaranteeDisruptionTarget). + WithType(v1.DisruptionTarget). WithStatus(v1.ConditionTrue). WithReason("DeletionByPodGC"). WithMessage("PodGC: node no longer exists"). WithLastTransitionTime(metav1.Now()) - if err := gcc.markFailedAndDeletePodWithCondition(ctx, pod, condition); err != nil { utilruntime.HandleError(err) } else { diff --git a/pkg/features/kube_features.go b/pkg/features/kube_features.go index dd307cacd54..81d06b4b731 100644 --- a/pkg/features/kube_features.go +++ b/pkg/features/kube_features.go @@ -436,6 +436,7 @@ const ( // owner: @mimowo // kep: https://kep.k8s.io/3329 // alpha: v1.25 + // beta: v1.26 // // Allow users to specify handling of pod failures based on container exit codes // and pod conditions. @@ -643,6 +644,7 @@ const ( // owner: @mimowo // kep: https://kep.k8s.io/3329 // alpha: v1.25 + // beta: v1.26 // // Enables support for appending a dedicated pod condition indicating that // the pod is being deleted due to a disruption. @@ -993,7 +995,7 @@ var defaultKubernetesFeatureGates = map[featuregate.Feature]featuregate.FeatureS IPTablesOwnershipCleanup: {Default: false, PreRelease: featuregate.Alpha}, - JobPodFailurePolicy: {Default: false, PreRelease: featuregate.Alpha}, + JobPodFailurePolicy: {Default: true, PreRelease: featuregate.Beta}, JobMutableNodeSchedulingDirectives: {Default: true, PreRelease: featuregate.Beta}, @@ -1049,7 +1051,7 @@ var defaultKubernetesFeatureGates = map[featuregate.Feature]featuregate.FeatureS PodDeletionCost: {Default: true, PreRelease: featuregate.Beta}, - PodDisruptionConditions: {Default: false, PreRelease: featuregate.Alpha}, + PodDisruptionConditions: {Default: true, PreRelease: featuregate.Beta}, PodHasNetworkCondition: {Default: false, PreRelease: featuregate.Alpha}, diff --git a/pkg/kubelet/eviction/eviction_manager.go b/pkg/kubelet/eviction/eviction_manager.go index 308ab482c90..9056507c00e 100644 --- a/pkg/kubelet/eviction/eviction_manager.go +++ b/pkg/kubelet/eviction/eviction_manager.go @@ -392,9 +392,9 @@ func (m *managerImpl) synchronize(diskInfoProvider DiskInfoProvider, podFunc Act var condition *v1.PodCondition if utilfeature.DefaultFeatureGate.Enabled(features.PodDisruptionConditions) { condition = &v1.PodCondition{ - Type: v1.AlphaNoCompatGuaranteeDisruptionTarget, + Type: v1.DisruptionTarget, Status: v1.ConditionTrue, - Reason: v1.AlphaNoCompatGuaranteePodReasonTerminationByKubelet, + Reason: v1.PodReasonTerminationByKubelet, Message: message, } } diff --git a/pkg/kubelet/kubelet_pods.go b/pkg/kubelet/kubelet_pods.go index 53ec75b87be..881492a1506 100644 --- a/pkg/kubelet/kubelet_pods.go +++ b/pkg/kubelet/kubelet_pods.go @@ -1517,7 +1517,7 @@ func (kl *Kubelet) generateAPIPodStatus(pod *v1.Pod, podStatus *kubecontainer.Po // updated during the eviciton (due to either node resource pressure or // node graceful shutdown). We do not re-generate the conditions based // on the container statuses as they are added based on one-time events. - cType := v1.AlphaNoCompatGuaranteeDisruptionTarget + cType := v1.DisruptionTarget if _, condition := podutil.GetPodConditionFromList(oldPodStatus.Conditions, cType); condition != nil { s.Conditions = utilpod.ReplaceOrAppendPodCondition(s.Conditions, condition) } diff --git a/pkg/kubelet/kubelet_pods_test.go b/pkg/kubelet/kubelet_pods_test.go index 7af53dc40c8..edb4f00f8f1 100644 --- a/pkg/kubelet/kubelet_pods_test.go +++ b/pkg/kubelet/kubelet_pods_test.go @@ -2522,7 +2522,7 @@ func Test_generateAPIPodStatus(t *testing.T) { runningState("containerB"), }, Conditions: []v1.PodCondition{{ - Type: v1.AlphaNoCompatGuaranteeDisruptionTarget, + Type: v1.DisruptionTarget, Status: v1.ConditionTrue, LastTransitionTime: normalized_now, }}, @@ -2536,7 +2536,7 @@ func Test_generateAPIPodStatus(t *testing.T) { runningState("containerB"), }, Conditions: []v1.PodCondition{{ - Type: v1.AlphaNoCompatGuaranteeDisruptionTarget, + Type: v1.DisruptionTarget, Status: v1.ConditionTrue, LastTransitionTime: normalized_now, }}, @@ -2558,7 +2558,7 @@ func Test_generateAPIPodStatus(t *testing.T) { }, }, expectedPodDisruptionCondition: v1.PodCondition{ - Type: v1.AlphaNoCompatGuaranteeDisruptionTarget, + Type: v1.DisruptionTarget, Status: v1.ConditionTrue, LastTransitionTime: normalized_now, }, diff --git a/pkg/kubelet/nodeshutdown/nodeshutdown_manager_linux.go b/pkg/kubelet/nodeshutdown/nodeshutdown_manager_linux.go index 30f15b152f4..8469cecdc0e 100644 --- a/pkg/kubelet/nodeshutdown/nodeshutdown_manager_linux.go +++ b/pkg/kubelet/nodeshutdown/nodeshutdown_manager_linux.go @@ -383,9 +383,9 @@ func (m *managerImpl) processShutdownEvent() error { status.Reason = nodeShutdownReason if utilfeature.DefaultFeatureGate.Enabled(features.PodDisruptionConditions) { podutil.UpdatePodCondition(status, &v1.PodCondition{ - Type: v1.AlphaNoCompatGuaranteeDisruptionTarget, + Type: v1.DisruptionTarget, Status: v1.ConditionTrue, - Reason: v1.AlphaNoCompatGuaranteePodReasonTerminationByKubelet, + Reason: v1.PodReasonTerminationByKubelet, Message: nodeShutdownMessage, }) } diff --git a/pkg/kubelet/nodeshutdown/nodeshutdown_manager_linux_test.go b/pkg/kubelet/nodeshutdown/nodeshutdown_manager_linux_test.go index 2372c87b215..a7b54c08921 100644 --- a/pkg/kubelet/nodeshutdown/nodeshutdown_manager_linux_test.go +++ b/pkg/kubelet/nodeshutdown/nodeshutdown_manager_linux_test.go @@ -170,7 +170,7 @@ func TestManager(t *testing.T) { Reason: "Terminated", Conditions: []v1.PodCondition{ { - Type: v1.AlphaNoCompatGuaranteeDisruptionTarget, + Type: v1.DisruptionTarget, Status: v1.ConditionTrue, Reason: "TerminationByKubelet", Message: "Pod was terminated in response to imminent node shutdown.", @@ -183,7 +183,7 @@ func TestManager(t *testing.T) { Reason: "Terminated", Conditions: []v1.PodCondition{ { - Type: v1.AlphaNoCompatGuaranteeDisruptionTarget, + Type: v1.DisruptionTarget, Status: v1.ConditionTrue, Reason: "TerminationByKubelet", Message: "Pod was terminated in response to imminent node shutdown.", @@ -196,7 +196,7 @@ func TestManager(t *testing.T) { Reason: "Terminated", Conditions: []v1.PodCondition{ { - Type: v1.AlphaNoCompatGuaranteeDisruptionTarget, + Type: v1.DisruptionTarget, Status: v1.ConditionTrue, Reason: "TerminationByKubelet", Message: "Pod was terminated in response to imminent node shutdown.", diff --git a/pkg/kubelet/status/status_manager.go b/pkg/kubelet/status/status_manager.go index 219bfc90e6f..4022b7e806a 100644 --- a/pkg/kubelet/status/status_manager.go +++ b/pkg/kubelet/status/status_manager.go @@ -506,7 +506,7 @@ func (m *manager) updateStatusInternal(pod *v1.Pod, status v1.PodStatus, forceUp if utilfeature.DefaultFeatureGate.Enabled(features.PodDisruptionConditions) { // Set DisruptionTarget.LastTransitionTime. - updateLastTransitionTime(&status, &oldStatus, v1.AlphaNoCompatGuaranteeDisruptionTarget) + updateLastTransitionTime(&status, &oldStatus, v1.DisruptionTarget) } // ensure that the start time does not change across updates. @@ -895,7 +895,7 @@ func mergePodStatus(oldPodStatus, newPodStatus v1.PodStatus, couldHaveRunningCon podConditions = append(podConditions, c) } else if kubetypes.PodConditionSharedByKubelet(c.Type) { // we replace or append all the "shared by kubelet" conditions - if c.Type == v1.AlphaNoCompatGuaranteeDisruptionTarget { + if c.Type == v1.DisruptionTarget { // guard the update of the DisruptionTarget condition with a check to ensure // it will only be sent once all containers have terminated and the phase // is terminal. This avoids sending an unnecessary patch request to add diff --git a/pkg/kubelet/status/status_manager_test.go b/pkg/kubelet/status/status_manager_test.go index 1aa605cb9ba..c6dbde9fe42 100644 --- a/pkg/kubelet/status/status_manager_test.go +++ b/pkg/kubelet/status/status_manager_test.go @@ -1426,7 +1426,7 @@ func TestMergePodStatus(t *testing.T) { func(input v1.PodStatus) v1.PodStatus { input.Phase = v1.PodFailed input.Conditions = append(input.Conditions, v1.PodCondition{ - Type: v1.AlphaNoCompatGuaranteeDisruptionTarget, + Type: v1.DisruptionTarget, Status: v1.ConditionTrue, Reason: "TerminationByKubelet", }) @@ -1436,7 +1436,7 @@ func TestMergePodStatus(t *testing.T) { Phase: v1.PodFailed, Conditions: []v1.PodCondition{ { - Type: v1.AlphaNoCompatGuaranteeDisruptionTarget, + Type: v1.DisruptionTarget, Status: v1.ConditionTrue, Reason: "TerminationByKubelet", }, @@ -1466,7 +1466,7 @@ func TestMergePodStatus(t *testing.T) { func(input v1.PodStatus) v1.PodStatus { input.Phase = v1.PodFailed input.Conditions = append(input.Conditions, v1.PodCondition{ - Type: v1.AlphaNoCompatGuaranteeDisruptionTarget, + Type: v1.DisruptionTarget, Status: v1.ConditionTrue, Reason: "TerminationByKubelet", }) @@ -1493,7 +1493,7 @@ func TestMergePodStatus(t *testing.T) { false, func(input v1.PodStatus) v1.PodStatus { input.Conditions = append(input.Conditions, v1.PodCondition{ - Type: v1.AlphaNoCompatGuaranteeDisruptionTarget, + Type: v1.DisruptionTarget, Status: v1.ConditionTrue, Reason: "TerminationByKubelet", }) @@ -1514,7 +1514,7 @@ func TestMergePodStatus(t *testing.T) { Status: v1.ConditionTrue, }, { - Type: v1.AlphaNoCompatGuaranteeDisruptionTarget, + Type: v1.DisruptionTarget, Status: v1.ConditionTrue, Reason: "TerminationByKubelet", }, @@ -1528,7 +1528,7 @@ func TestMergePodStatus(t *testing.T) { false, func(input v1.PodStatus) v1.PodStatus { input.Conditions = append(input.Conditions, v1.PodCondition{ - Type: v1.AlphaNoCompatGuaranteeDisruptionTarget, + Type: v1.DisruptionTarget, Status: v1.ConditionTrue, Reason: "TerminationByKubelet", }) @@ -1549,7 +1549,7 @@ func TestMergePodStatus(t *testing.T) { Status: v1.ConditionTrue, }, { - Type: v1.AlphaNoCompatGuaranteeDisruptionTarget, + Type: v1.DisruptionTarget, Status: v1.ConditionTrue, Reason: "TerminationByKubelet", }, @@ -1563,7 +1563,7 @@ func TestMergePodStatus(t *testing.T) { false, func(input v1.PodStatus) v1.PodStatus { input.Conditions = append(input.Conditions, v1.PodCondition{ - Type: v1.AlphaNoCompatGuaranteeDisruptionTarget, + Type: v1.DisruptionTarget, Status: v1.ConditionTrue, Reason: "EvictedByEvictionAPI", }) @@ -1572,7 +1572,7 @@ func TestMergePodStatus(t *testing.T) { func(input v1.PodStatus) v1.PodStatus { input.Phase = v1.PodFailed input.Conditions = append(input.Conditions, v1.PodCondition{ - Type: v1.AlphaNoCompatGuaranteeDisruptionTarget, + Type: v1.DisruptionTarget, Status: v1.ConditionTrue, Reason: "TerminationByKubelet", }) @@ -1596,7 +1596,7 @@ func TestMergePodStatus(t *testing.T) { Status: v1.ConditionTrue, }, { - Type: v1.AlphaNoCompatGuaranteeDisruptionTarget, + Type: v1.DisruptionTarget, Status: v1.ConditionTrue, Reason: "TerminationByKubelet", }, @@ -1610,7 +1610,7 @@ func TestMergePodStatus(t *testing.T) { false, func(input v1.PodStatus) v1.PodStatus { input.Conditions = append(input.Conditions, v1.PodCondition{ - Type: v1.AlphaNoCompatGuaranteeDisruptionTarget, + Type: v1.DisruptionTarget, Status: v1.ConditionTrue, Reason: "EvictedByEvictionAPI", }) @@ -1618,7 +1618,7 @@ func TestMergePodStatus(t *testing.T) { }, func(input v1.PodStatus) v1.PodStatus { input.Conditions = append(input.Conditions, v1.PodCondition{ - Type: v1.AlphaNoCompatGuaranteeDisruptionTarget, + Type: v1.DisruptionTarget, Status: v1.ConditionTrue, Reason: "TerminationByKubelet", }) @@ -1628,7 +1628,7 @@ func TestMergePodStatus(t *testing.T) { Phase: v1.PodRunning, Conditions: []v1.PodCondition{ { - Type: v1.AlphaNoCompatGuaranteeDisruptionTarget, + Type: v1.DisruptionTarget, Status: v1.ConditionTrue, Reason: "EvictedByEvictionAPI", }, @@ -1650,7 +1650,7 @@ func TestMergePodStatus(t *testing.T) { true, func(input v1.PodStatus) v1.PodStatus { input.Conditions = append(input.Conditions, v1.PodCondition{ - Type: v1.AlphaNoCompatGuaranteeDisruptionTarget, + Type: v1.DisruptionTarget, Status: v1.ConditionTrue, Reason: "EvictedByEvictionAPI", }) @@ -1659,7 +1659,7 @@ func TestMergePodStatus(t *testing.T) { func(input v1.PodStatus) v1.PodStatus { input.Phase = v1.PodFailed input.Conditions = append(input.Conditions, v1.PodCondition{ - Type: v1.AlphaNoCompatGuaranteeDisruptionTarget, + Type: v1.DisruptionTarget, Status: v1.ConditionTrue, Reason: "TerminationByKubelet", }) @@ -1669,7 +1669,7 @@ func TestMergePodStatus(t *testing.T) { Phase: v1.PodRunning, Conditions: []v1.PodCondition{ { - Type: v1.AlphaNoCompatGuaranteeDisruptionTarget, + Type: v1.DisruptionTarget, Status: v1.ConditionTrue, Reason: "EvictedByEvictionAPI", }, diff --git a/pkg/kubelet/types/pod_status.go b/pkg/kubelet/types/pod_status.go index 93a12c6165a..a1894aedf7a 100644 --- a/pkg/kubelet/types/pod_status.go +++ b/pkg/kubelet/types/pod_status.go @@ -48,7 +48,7 @@ func PodConditionByKubelet(conditionType v1.PodConditionType) bool { // PodConditionSharedByKubelet returns if the pod condition type is shared by kubelet func PodConditionSharedByKubelet(conditionType v1.PodConditionType) bool { if utilfeature.DefaultFeatureGate.Enabled(features.PodDisruptionConditions) { - if conditionType == v1.AlphaNoCompatGuaranteeDisruptionTarget { + if conditionType == v1.DisruptionTarget { return true } } diff --git a/pkg/kubelet/types/pod_status_test.go b/pkg/kubelet/types/pod_status_test.go index 2e8bd26031c..91b8a264227 100644 --- a/pkg/kubelet/types/pod_status_test.go +++ b/pkg/kubelet/types/pod_status_test.go @@ -59,7 +59,7 @@ func TestPodConditionSharedByKubelet(t *testing.T) { defer featuregatetesting.SetFeatureGateDuringTest(t, utilfeature.DefaultFeatureGate, features.PodDisruptionConditions, true)() trueCases := []v1.PodConditionType{ - v1.AlphaNoCompatGuaranteeDisruptionTarget, + v1.DisruptionTarget, } for _, tc := range trueCases { diff --git a/pkg/registry/core/pod/storage/eviction.go b/pkg/registry/core/pod/storage/eviction.go index 844c27e475c..05469782e8f 100644 --- a/pkg/registry/core/pod/storage/eviction.go +++ b/pkg/registry/core/pod/storage/eviction.go @@ -302,7 +302,7 @@ func addConditionAndDeletePod(r *EvictionREST, ctx context.Context, name string, conditionAppender := func(_ context.Context, newObj, _ runtime.Object) (runtime.Object, error) { podObj := newObj.(*api.Pod) podutil.UpdatePodCondition(&podObj.Status, &api.PodCondition{ - Type: api.AlphaNoCompatGuaranteeDisruptionTarget, + Type: api.DisruptionTarget, Status: api.ConditionTrue, Reason: "EvictionByEvictionAPI", Message: "Eviction API: evicting", diff --git a/pkg/scheduler/framework/plugins/defaultpreemption/default_preemption_test.go b/pkg/scheduler/framework/plugins/defaultpreemption/default_preemption_test.go index b73c6194b2c..cec8f0dd734 100644 --- a/pkg/scheduler/framework/plugins/defaultpreemption/default_preemption_test.go +++ b/pkg/scheduler/framework/plugins/defaultpreemption/default_preemption_test.go @@ -337,6 +337,9 @@ func TestPostFilter(t *testing.T) { } // As we use a bare clientset above, it's needed to add a reactor here // to not fail Victims deletion logic. + cs.PrependReactor("patch", "pods", func(action clienttesting.Action) (bool, runtime.Object, error) { + return true, nil, nil + }) cs.PrependReactor("delete", "pods", func(action clienttesting.Action) (bool, runtime.Object, error) { return true, nil, nil }) @@ -1642,6 +1645,11 @@ func TestPreempt(t *testing.T) { } deletedPodNames := make(sets.String) + patchedPodNames := make(sets.String) + client.PrependReactor("patch", "pods", func(action clienttesting.Action) (bool, runtime.Object, error) { + patchedPodNames.Insert(action.(clienttesting.PatchAction).GetName()) + return true, nil, nil + }) client.PrependReactor("delete", "pods", func(action clienttesting.Action) (bool, runtime.Object, error) { deletedPodNames.Insert(action.(clienttesting.DeleteAction).GetName()) return true, nil, nil @@ -1729,6 +1737,9 @@ func TestPreempt(t *testing.T) { if len(deletedPodNames) != len(test.expectedPods) { t.Errorf("expected %v pods, got %v.", len(test.expectedPods), len(deletedPodNames)) } + if diff := cmp.Diff(patchedPodNames.List(), deletedPodNames.List()); diff != "" { + t.Errorf("unexpected difference in the set of patched and deleted pods: %s", diff) + } for victimName := range deletedPodNames { found := false for _, expPod := range test.expectedPods { diff --git a/pkg/scheduler/framework/preemption/preemption.go b/pkg/scheduler/framework/preemption/preemption.go index 8d0c51479ef..32e1e1ba743 100644 --- a/pkg/scheduler/framework/preemption/preemption.go +++ b/pkg/scheduler/framework/preemption/preemption.go @@ -359,7 +359,7 @@ func (ev *Evaluator) prepareCandidate(ctx context.Context, c Candidate, pod *v1. if feature.DefaultFeatureGate.Enabled(features.PodDisruptionConditions) { victimPodApply := corev1apply.Pod(victim.Name, victim.Namespace).WithStatus(corev1apply.PodStatus()) victimPodApply.Status.WithConditions(corev1apply.PodCondition(). - WithType(v1.AlphaNoCompatGuaranteeDisruptionTarget). + WithType(v1.DisruptionTarget). WithStatus(v1.ConditionTrue). WithReason("PreemptionByKubeScheduler"). WithMessage(fmt.Sprintf("Kube-scheduler: preempting to accommodate a higher priority pod: %s", klog.KObj(pod))). diff --git a/plugin/pkg/auth/authorizer/rbac/bootstrappolicy/testdata/controller-roles.yaml b/plugin/pkg/auth/authorizer/rbac/bootstrappolicy/testdata/controller-roles.yaml index d1d6e37c08a..e5bfccfef9f 100644 --- a/plugin/pkg/auth/authorizer/rbac/bootstrappolicy/testdata/controller-roles.yaml +++ b/plugin/pkg/auth/authorizer/rbac/bootstrappolicy/testdata/controller-roles.yaml @@ -430,6 +430,12 @@ items: - create - patch - update + - apiGroups: + - "" + resources: + - pods/status + verbs: + - patch - apiVersion: rbac.authorization.k8s.io/v1 kind: ClusterRole metadata: @@ -925,6 +931,18 @@ items: - create - patch - update + - apiGroups: + - "" + resources: + - pods + verbs: + - get + - apiGroups: + - "" + resources: + - pods/status + verbs: + - patch - apiVersion: rbac.authorization.k8s.io/v1 kind: ClusterRole metadata: @@ -1055,6 +1073,12 @@ items: verbs: - get - list + - apiGroups: + - "" + resources: + - pods/status + verbs: + - patch - apiVersion: rbac.authorization.k8s.io/v1 kind: ClusterRole metadata: diff --git a/staging/src/k8s.io/api/batch/v1/types.go b/staging/src/k8s.io/api/batch/v1/types.go index 0b8e30d4b0a..dcb15728f99 100644 --- a/staging/src/k8s.io/api/batch/v1/types.go +++ b/staging/src/k8s.io/api/batch/v1/types.go @@ -425,8 +425,7 @@ const ( // JobFailed means the job has failed its execution. JobFailed JobConditionType = "Failed" // FailureTarget means the job is about to fail its execution. - // The constant is to be renamed once the name is accepted within the KEP-3329. - AlphaNoCompatGuaranteeJobFailureTarget JobConditionType = "FailureTarget" + JobFailureTarget JobConditionType = "FailureTarget" ) // JobCondition describes current state of a job. diff --git a/staging/src/k8s.io/api/core/v1/types.go b/staging/src/k8s.io/api/core/v1/types.go index 507c499afc8..bae7e8940b1 100644 --- a/staging/src/k8s.io/api/core/v1/types.go +++ b/staging/src/k8s.io/api/core/v1/types.go @@ -2654,10 +2654,9 @@ const ( PodReady PodConditionType = "Ready" // PodScheduled represents status of the scheduling process for this pod. PodScheduled PodConditionType = "PodScheduled" - // AlphaNoCompatGuaranteeDisruptionTarget indicates the pod is about to be terminated due to a + // DisruptionTarget indicates the pod is about to be terminated due to a // disruption (such as preemption, eviction API or garbage-collection). - // The constant is to be renamed once the name is accepted within the KEP-3329. - AlphaNoCompatGuaranteeDisruptionTarget PodConditionType = "DisruptionTarget" + DisruptionTarget PodConditionType = "DisruptionTarget" ) // These are reasons for a pod's transition to a condition. @@ -2676,7 +2675,7 @@ const ( // TerminationByKubelet reason in DisruptionTarget pod condition indicates that the termination // is initiated by kubelet - AlphaNoCompatGuaranteePodReasonTerminationByKubelet = "TerminationByKubelet" + PodReasonTerminationByKubelet = "TerminationByKubelet" ) // PodCondition contains details for the current condition of this pod. diff --git a/test/e2e/apps/job.go b/test/e2e/apps/job.go index bc47c2c311b..dc445a4a982 100644 --- a/test/e2e/apps/job.go +++ b/test/e2e/apps/job.go @@ -100,6 +100,75 @@ var _ = SIGDescribe("Job", func() { framework.ExpectEqual(successes, completions, "expected %d successful job pods, but got %d", completions, successes) }) + ginkgo.It("should allow to use the pod failure policy on exit code to fail the job early", func() { + + // We fail the Job's pod only once to ensure the backoffLimit is not + // reached and thus the job is failed due to the pod failure policy + // with FailJob action. + // In order to ensure a Job's pod fails once before succeeding we force + // the Job's Pods to be scheduled to a single Node and use a hostPath + // volume to persist data across new Pods. + ginkgo.By("Looking for a node to schedule job pod") + node, err := e2enode.GetRandomReadySchedulableNode(f.ClientSet) + framework.ExpectNoError(err) + + ginkgo.By("Creating a job") + job := e2ejob.NewTestJobOnNode("failOnce", "pod-failure-failjob", v1.RestartPolicyNever, parallelism, completions, nil, backoffLimit, node.Name) + job.Spec.PodFailurePolicy = &batchv1.PodFailurePolicy{ + Rules: []batchv1.PodFailurePolicyRule{ + { + Action: batchv1.PodFailurePolicyActionFailJob, + OnExitCodes: &batchv1.PodFailurePolicyOnExitCodesRequirement{ + Operator: batchv1.PodFailurePolicyOnExitCodesOpIn, + Values: []int32{1}, + }, + }, + }, + } + job, err = e2ejob.CreateJob(f.ClientSet, f.Namespace.Name, job) + framework.ExpectNoError(err, "failed to create job in namespace: %s", f.Namespace.Name) + + ginkgo.By("Ensuring job fails") + err = e2ejob.WaitForJobFailed(f.ClientSet, f.Namespace.Name, job.Name) + framework.ExpectNoError(err, "failed to ensure job failure in namespace: %s", f.Namespace.Name) + }) + + ginkgo.It("should allow to use the pod failure policy to not count the failure towards the backoffLimit", func() { + + // We set the backoffLimit to 0 so that any pod failure would trigger + // job failure if not for the pod failure policy to ignore the failed + // pods from counting them towards the backoffLimit. Also, we fail the + // pod only once so that the job eventually succeeds. + // In order to ensure a Job's pod fails once before succeeding we force + // the Job's Pods to be scheduled to a single Node and use a hostPath + // volume to persist data across new Pods. + backoffLimit := int32(0) + + ginkgo.By("Looking for a node to schedule job pod") + node, err := e2enode.GetRandomReadySchedulableNode(f.ClientSet) + framework.ExpectNoError(err) + + ginkgo.By("Creating a job") + job := e2ejob.NewTestJobOnNode("failOnce", "pod-failure-ignore", v1.RestartPolicyNever, parallelism, completions, nil, backoffLimit, node.Name) + job.Spec.PodFailurePolicy = &batchv1.PodFailurePolicy{ + Rules: []batchv1.PodFailurePolicyRule{ + { + Action: batchv1.PodFailurePolicyActionIgnore, + OnExitCodes: &batchv1.PodFailurePolicyOnExitCodesRequirement{ + Operator: batchv1.PodFailurePolicyOnExitCodesOpIn, + Values: []int32{1}, + }, + }, + }, + } + job, err = e2ejob.CreateJob(f.ClientSet, f.Namespace.Name, job) + framework.ExpectNoError(err, "failed to create job in namespace: %s", f.Namespace.Name) + + ginkgo.By("Ensuring job reaches completions") + err = e2ejob.WaitForJobComplete(f.ClientSet, f.Namespace.Name, job.Name, completions) + framework.ExpectNoError(err, "failed to ensure job completion in namespace: %s", f.Namespace.Name) + }) + ginkgo.It("should not create pods when created in suspend state", func() { ginkgo.By("Creating a job with suspend=true") job := e2ejob.NewTestJob("succeed", "suspend-true-to-false", v1.RestartPolicyNever, parallelism, completions, nil, backoffLimit) diff --git a/test/e2e/framework/job/wait.go b/test/e2e/framework/job/wait.go index 96bd4230427..de7fa9257d2 100644 --- a/test/e2e/framework/job/wait.go +++ b/test/e2e/framework/job/wait.go @@ -58,6 +58,27 @@ func WaitForJobComplete(c clientset.Interface, ns, jobName string, completions i }) } +// WaitForJobFailed uses c to wait for the Job jobName in namespace ns to fail +func WaitForJobFailed(c clientset.Interface, ns, jobName string) error { + return wait.PollImmediate(framework.Poll, JobTimeout, func() (bool, error) { + curr, err := c.BatchV1().Jobs(ns).Get(context.TODO(), jobName, metav1.GetOptions{}) + if err != nil { + return false, err + } + + return isJobFailed(curr), nil + }) +} + +func isJobFailed(j *batchv1.Job) bool { + for _, c := range j.Status.Conditions { + if (c.Type == batchv1.JobFailed) && c.Status == v1.ConditionTrue { + return true + } + } + return false +} + // WaitForJobFinish uses c to wait for the Job jobName in namespace ns to finish (either Failed or Complete). func WaitForJobFinish(c clientset.Interface, ns, jobName string) error { return wait.PollImmediate(framework.Poll, JobTimeout, func() (bool, error) { diff --git a/test/e2e/framework/pod/pod_client.go b/test/e2e/framework/pod/pod_client.go index dcc597a2d52..54b52dd2351 100644 --- a/test/e2e/framework/pod/pod_client.go +++ b/test/e2e/framework/pod/pod_client.go @@ -40,6 +40,7 @@ import ( "github.com/onsi/gomega" "k8s.io/kubernetes/pkg/kubelet/util/format" + "k8s.io/kubernetes/pkg/util/slice" "k8s.io/kubernetes/test/e2e/framework" ) @@ -303,3 +304,11 @@ func (c *PodClient) PodIsReady(name string) bool { framework.ExpectNoError(err) return podutils.IsPodReady(pod) } + +// RemovePodFinalizer removes the pod's finalizer +func (c *PodClient) RemoveFinalizer(podName string, finalizerName string) { + framework.Logf("Removing pod's %q finalizer: %q", podName, finalizerName) + c.Update(podName, func(pod *v1.Pod) { + pod.ObjectMeta.Finalizers = slice.RemoveString(pod.ObjectMeta.Finalizers, finalizerName, nil) + }) +} diff --git a/test/e2e/framework/pod/resource.go b/test/e2e/framework/pod/resource.go index f85371fe3fa..64c3447f8e6 100644 --- a/test/e2e/framework/pod/resource.go +++ b/test/e2e/framework/pod/resource.go @@ -623,6 +623,15 @@ func GetPodSecretUpdateTimeout(c clientset.Interface) time.Duration { return podLogTimeout } +// VerifyPodHasConditionWithType verifies the pod has the expected condition by type +func VerifyPodHasConditionWithType(f *framework.Framework, pod *v1.Pod, cType v1.PodConditionType) { + pod, err := f.ClientSet.CoreV1().Pods(f.Namespace.Name).Get(context.TODO(), pod.Name, metav1.GetOptions{}) + framework.ExpectNoError(err, "Failed to get the recent pod object for name: %q", pod.Name) + if condition := FindPodConditionByType(&pod.Status, cType); condition == nil { + framework.Failf("pod %q should have the condition: %q, pod status: %v", pod.Name, cType, pod.Status) + } +} + func getNodeTTLAnnotationValue(c clientset.Interface) (time.Duration, error) { nodes, err := c.CoreV1().Nodes().List(context.TODO(), metav1.ListOptions{}) if err != nil || len(nodes.Items) == 0 { diff --git a/test/e2e/framework/pod/wait.go b/test/e2e/framework/pod/wait.go index e4e8de51829..1305416d135 100644 --- a/test/e2e/framework/pod/wait.go +++ b/test/e2e/framework/pod/wait.go @@ -454,6 +454,16 @@ func WaitForPodTerminatedInNamespace(c clientset.Interface, podName, reason, nam }) } +// WaitForPodTerminatingInNamespaceTimeout returns if the pod is terminating, or an error if it is not after the timeout. +func WaitForPodTerminatingInNamespaceTimeout(c clientset.Interface, podName, namespace string, timeout time.Duration) error { + return WaitForPodCondition(c, namespace, podName, "is terminating", timeout, func(pod *v1.Pod) (bool, error) { + if pod.DeletionTimestamp != nil { + return true, nil + } + return false, nil + }) +} + // WaitForPodSuccessInNamespaceTimeout returns nil if the pod reached state success, or an error if it reached failure or ran too long. func WaitForPodSuccessInNamespaceTimeout(c clientset.Interface, podName, namespace string, timeout time.Duration) error { return WaitForPodCondition(c, namespace, podName, fmt.Sprintf("%s or %s", v1.PodSucceeded, v1.PodFailed), timeout, func(pod *v1.Pod) (bool, error) { diff --git a/test/e2e/node/taints.go b/test/e2e/node/taints.go index cbff5547515..f7b4b25e153 100644 --- a/test/e2e/node/taints.go +++ b/test/e2e/node/taints.go @@ -43,6 +43,10 @@ var ( pauseImage = imageutils.GetE2EImage(imageutils.Pause) ) +const ( + testFinalizer = "example.com/test-finalizer" +) + func getTestTaint() v1.Taint { now := metav1.Now() return v1.Taint{ @@ -337,6 +341,37 @@ var _ = SIGDescribe("NoExecuteTaintManager Single Pod [Serial]", func() { framework.Failf("Pod was evicted despite toleration") } }) + + // 1. Run a pod with finalizer + // 2. Taint the node running this pod with a no-execute taint + // 3. See if pod will get evicted and has the pod disruption condition + // 4. Remove the finalizer so that the pod can be deleted by GC + ginkgo.It("pods evicted from tainted nodes have pod disruption condition", func() { + podName := "taint-eviction-pod-disruption" + pod := createPodForTaintsTest(false, 0, podName, podName, ns) + pod.Finalizers = append(pod.Finalizers, testFinalizer) + + ginkgo.By("Starting pod...") + nodeName, err := testutils.RunPodAndGetNodeName(cs, pod, 2*time.Minute) + framework.ExpectNoError(err) + framework.Logf("Pod is running on %v. Tainting Node", nodeName) + + defer e2epod.NewPodClient(f).RemoveFinalizer(pod.Name, testFinalizer) + + ginkgo.By("Trying to apply a taint on the Node") + testTaint := getTestTaint() + e2enode.AddOrUpdateTaintOnNode(cs, nodeName, testTaint) + e2enode.ExpectNodeHasTaint(cs, nodeName, &testTaint) + defer e2enode.RemoveTaintOffNode(cs, nodeName, testTaint) + + ginkgo.By("Waiting for Pod to be terminating") + timeout := time.Duration(kubeletPodDeletionDelaySeconds+3*additionalWaitPerDeleteSeconds) * time.Second + err = e2epod.WaitForPodTerminatingInNamespaceTimeout(f.ClientSet, pod.Name, pod.Namespace, timeout) + framework.ExpectNoError(err) + + ginkgo.By("Verifying the pod has the pod disruption condition") + e2epod.VerifyPodHasConditionWithType(f, pod, v1.DisruptionTarget) + }) }) var _ = SIGDescribe("NoExecuteTaintManager Multiple Pods [Serial]", func() { diff --git a/test/e2e_node/eviction_test.go b/test/e2e_node/eviction_test.go index 174c2a5ef0b..dfd66b1678d 100644 --- a/test/e2e_node/eviction_test.go +++ b/test/e2e_node/eviction_test.go @@ -513,7 +513,7 @@ var _ = SIGDescribe("PriorityPidEvictionOrdering [Slow] [Serial] [Disruptive][No string(features.PodDisruptionConditions): true, } }) - disruptionTarget := v1.AlphaNoCompatGuaranteeDisruptionTarget + disruptionTarget := v1.DisruptionTarget specs := []podEvictSpec{ { evictionPriority: 1, diff --git a/test/e2e_node/node_shutdown_linux_test.go b/test/e2e_node/node_shutdown_linux_test.go index f25e50606a8..22b4d57265b 100644 --- a/test/e2e_node/node_shutdown_linux_test.go +++ b/test/e2e_node/node_shutdown_linux_test.go @@ -148,9 +148,9 @@ var _ = SIGDescribe("GracefulNodeShutdown [Serial] [NodeFeature:GracefulNodeShut framework.Logf("Expecting pod to be shutdown, but it's not currently: Pod: %q, Pod Status %+v", pod.Name, pod.Status) return fmt.Errorf("pod should be shutdown, phase: %s", pod.Status.Phase) } - podDisruptionCondition := e2epod.FindPodConditionByType(&pod.Status, v1.AlphaNoCompatGuaranteeDisruptionTarget) + podDisruptionCondition := e2epod.FindPodConditionByType(&pod.Status, v1.DisruptionTarget) if podDisruptionCondition == nil { - framework.Failf("pod %q should have the condition: %q, pod status: %v", pod.Name, v1.AlphaNoCompatGuaranteeDisruptionTarget, pod.Status) + framework.Failf("pod %q should have the condition: %q, pod status: %v", pod.Name, v1.DisruptionTarget, pod.Status) } } return nil diff --git a/test/integration/disruption/disruption_test.go b/test/integration/disruption/disruption_test.go index 62edf29cdbb..8d2b47b3c2d 100644 --- a/test/integration/disruption/disruption_test.go +++ b/test/integration/disruption/disruption_test.go @@ -676,7 +676,7 @@ func TestStalePodDisruption(t *testing.T) { podPhase: v1.PodRunning, wantConditions: []v1.PodCondition{ { - Type: v1.AlphaNoCompatGuaranteeDisruptionTarget, + Type: v1.DisruptionTarget, Status: v1.ConditionFalse, }, }, @@ -686,19 +686,19 @@ func TestStalePodDisruption(t *testing.T) { deletePod: true, wantConditions: []v1.PodCondition{ { - Type: v1.AlphaNoCompatGuaranteeDisruptionTarget, + Type: v1.DisruptionTarget, Status: v1.ConditionTrue, }, }, }, "disruption-condition-by-kubelet": { podPhase: v1.PodFailed, - reason: v1.AlphaNoCompatGuaranteePodReasonTerminationByKubelet, + reason: v1.PodReasonTerminationByKubelet, wantConditions: []v1.PodCondition{ { - Type: v1.AlphaNoCompatGuaranteeDisruptionTarget, + Type: v1.DisruptionTarget, Status: v1.ConditionTrue, - Reason: v1.AlphaNoCompatGuaranteePodReasonTerminationByKubelet, + Reason: v1.PodReasonTerminationByKubelet, }, }, }, @@ -706,7 +706,7 @@ func TestStalePodDisruption(t *testing.T) { podPhase: v1.PodFailed, wantConditions: []v1.PodCondition{ { - Type: v1.AlphaNoCompatGuaranteeDisruptionTarget, + Type: v1.DisruptionTarget, Status: v1.ConditionTrue, }, }, @@ -728,7 +728,7 @@ func TestStalePodDisruption(t *testing.T) { pod.Status.Phase = tc.podPhase pod.Status.Conditions = append(pod.Status.Conditions, v1.PodCondition{ - Type: v1.AlphaNoCompatGuaranteeDisruptionTarget, + Type: v1.DisruptionTarget, Status: v1.ConditionTrue, Reason: tc.reason, LastTransitionTime: metav1.Now(), diff --git a/test/integration/evictions/evictions_test.go b/test/integration/evictions/evictions_test.go index 5392d461ead..aee67c024b8 100644 --- a/test/integration/evictions/evictions_test.go +++ b/test/integration/evictions/evictions_test.go @@ -409,11 +409,11 @@ func TestEvictionWithFinalizers(t *testing.T) { if e != nil { t.Fatalf("Failed to get the pod %q with error: %q", klog.KObj(pod), e) } - _, cond := podutil.GetPodCondition(&updatedPod.Status, v1.PodConditionType(v1.AlphaNoCompatGuaranteeDisruptionTarget)) + _, cond := podutil.GetPodCondition(&updatedPod.Status, v1.PodConditionType(v1.DisruptionTarget)) if tc.enablePodDisruptionConditions == true && cond == nil { - t.Errorf("Pod %q does not have the expected condition: %q", klog.KObj(updatedPod), v1.AlphaNoCompatGuaranteeDisruptionTarget) + t.Errorf("Pod %q does not have the expected condition: %q", klog.KObj(updatedPod), v1.DisruptionTarget) } else if tc.enablePodDisruptionConditions == false && cond != nil { - t.Errorf("Pod %q has an unexpected condition: %q", klog.KObj(updatedPod), v1.AlphaNoCompatGuaranteeDisruptionTarget) + t.Errorf("Pod %q has an unexpected condition: %q", klog.KObj(updatedPod), v1.DisruptionTarget) } }) } diff --git a/test/integration/job/job_test.go b/test/integration/job/job_test.go index 29fa518ef0a..bed18498274 100644 --- a/test/integration/job/job_test.go +++ b/test/integration/job/job_test.go @@ -481,7 +481,7 @@ func TestJobPodFailurePolicy(t *testing.T) { Action: batchv1.PodFailurePolicyActionIgnore, OnPodConditions: []batchv1.PodFailurePolicyOnPodConditionsPattern{ { - Type: v1.AlphaNoCompatGuaranteeDisruptionTarget, + Type: v1.DisruptionTarget, }, }, }, @@ -533,7 +533,7 @@ func TestJobPodFailurePolicy(t *testing.T) { Phase: v1.PodFailed, Conditions: []v1.PodCondition{ { - Type: v1.AlphaNoCompatGuaranteeDisruptionTarget, + Type: v1.DisruptionTarget, Status: v1.ConditionTrue, }, }, diff --git a/test/integration/node/lifecycle_test.go b/test/integration/node/lifecycle_test.go index 0dccc7be30b..2128ec44157 100644 --- a/test/integration/node/lifecycle_test.go +++ b/test/integration/node/lifecycle_test.go @@ -168,11 +168,11 @@ func TestEvictionForNoExecuteTaintAddedByUser(t *testing.T) { if err != nil { t.Fatalf("Test Failed: error: %q, while getting updated pod", err) } - _, cond := podutil.GetPodCondition(&testPod.Status, v1.AlphaNoCompatGuaranteeDisruptionTarget) + _, cond := podutil.GetPodCondition(&testPod.Status, v1.DisruptionTarget) if test.enablePodDisruptionConditions == true && cond == nil { - t.Errorf("Pod %q does not have the expected condition: %q", klog.KObj(testPod), v1.AlphaNoCompatGuaranteeDisruptionTarget) + t.Errorf("Pod %q does not have the expected condition: %q", klog.KObj(testPod), v1.DisruptionTarget) } else if test.enablePodDisruptionConditions == false && cond != nil { - t.Errorf("Pod %q has an unexpected condition: %q", klog.KObj(testPod), v1.AlphaNoCompatGuaranteeDisruptionTarget) + t.Errorf("Pod %q has an unexpected condition: %q", klog.KObj(testPod), v1.DisruptionTarget) } }) } diff --git a/test/integration/podgc/podgc_test.go b/test/integration/podgc/podgc_test.go index da9858ac5aa..e08a96a4400 100644 --- a/test/integration/podgc/podgc_test.go +++ b/test/integration/podgc/podgc_test.go @@ -110,11 +110,11 @@ func TestPodGcOrphanedPodsWithFinalizer(t *testing.T) { if err != nil { t.Fatalf("Error: '%v' while updating pod info: '%v'", err, klog.KObj(pod)) } - _, cond := podutil.GetPodCondition(&pod.Status, v1.AlphaNoCompatGuaranteeDisruptionTarget) + _, cond := podutil.GetPodCondition(&pod.Status, v1.DisruptionTarget) if test.enablePodDisruptionConditions == true && cond == nil { - t.Errorf("Pod %q does not have the expected condition: %q", klog.KObj(pod), v1.AlphaNoCompatGuaranteeDisruptionTarget) + t.Errorf("Pod %q does not have the expected condition: %q", klog.KObj(pod), v1.DisruptionTarget) } else if test.enablePodDisruptionConditions == false && cond != nil { - t.Errorf("Pod %q has an unexpected condition: %q", klog.KObj(pod), v1.AlphaNoCompatGuaranteeDisruptionTarget) + t.Errorf("Pod %q has an unexpected condition: %q", klog.KObj(pod), v1.DisruptionTarget) } if pod.Status.Phase != test.wantPhase { t.Errorf("Unexpected phase for pod %q. Got: %q, want: %q", klog.KObj(pod), pod.Status.Phase, test.wantPhase) @@ -232,9 +232,9 @@ func TestTerminatingOnOutOfServiceNode(t *testing.T) { if err != nil { t.Errorf("Error %q while waiting for the pod %q to be in expected phase", err, klog.KObj(pod)) } - _, cond := podutil.GetPodCondition(&pod.Status, v1.AlphaNoCompatGuaranteeDisruptionTarget) + _, cond := podutil.GetPodCondition(&pod.Status, v1.DisruptionTarget) if cond != nil { - t.Errorf("Pod %q has an unexpected condition: %q", klog.KObj(pod), v1.AlphaNoCompatGuaranteeDisruptionTarget) + t.Errorf("Pod %q has an unexpected condition: %q", klog.KObj(pod), v1.DisruptionTarget) } } else { // wait until the pod is deleted diff --git a/test/integration/scheduler/preemption/preemption_test.go b/test/integration/scheduler/preemption/preemption_test.go index fd87b15419a..6adedbb2779 100644 --- a/test/integration/scheduler/preemption/preemption_test.go +++ b/test/integration/scheduler/preemption/preemption_test.go @@ -468,11 +468,11 @@ func TestPreemption(t *testing.T) { if err != nil { t.Errorf("Error %v when getting the updated status for pod %v/%v ", err, p.Namespace, p.Name) } - _, cond := podutil.GetPodCondition(&pod.Status, v1.AlphaNoCompatGuaranteeDisruptionTarget) + _, cond := podutil.GetPodCondition(&pod.Status, v1.DisruptionTarget) if test.enablePodDisruptionConditions == true && cond == nil { - t.Errorf("Pod %q does not have the expected condition: %q", klog.KObj(pod), v1.AlphaNoCompatGuaranteeDisruptionTarget) + t.Errorf("Pod %q does not have the expected condition: %q", klog.KObj(pod), v1.DisruptionTarget) } else if test.enablePodDisruptionConditions == false && cond != nil { - t.Errorf("Pod %q has an unexpected condition: %q", klog.KObj(pod), v1.AlphaNoCompatGuaranteeDisruptionTarget) + t.Errorf("Pod %q has an unexpected condition: %q", klog.KObj(pod), v1.DisruptionTarget) } } else { if p.DeletionTimestamp != nil {