mirror of
https://github.com/k3s-io/kubernetes.git
synced 2025-08-05 10:19:50 +00:00
switch feature flag to beta for pod replacement policy and add e2e test
update pod replacement policy feature flag comment and refactor the e2e test for pod replacement policy minor fixes for pod replacement policy and e2e test fix wrong assertions for pod replacement policy e2e test more fixes to pod replacement policy e2e test refactor PodReplacementPolicy e2e test to use finalizers fix unit tests when pod replacement policy feature flag is promoted to beta fix podgc controller unit tests when pod replacement feature is enabled fix lint issue in pod replacement policy e2e test assert no error in defer function for removing finalizer in pod replacement policy e2e test implement test using a sh trap for pod replacement policy reduce sleep after SIGTERM in pod replacement policy e2e test to 5s
This commit is contained in:
parent
191abe34b8
commit
e98c33bfaf
@ -3452,6 +3452,7 @@ func TestSyncJobWithJobBackoffLimitPerIndex(t *testing.T) {
|
||||
wantStatus: batch.JobStatus{
|
||||
Failed: 1,
|
||||
Succeeded: 2,
|
||||
Terminating: ptr.To[int32](0),
|
||||
CompletedIndexes: "0,1",
|
||||
FailedIndexes: ptr.To(""),
|
||||
UncountedTerminatedPods: &batch.UncountedTerminatedPods{},
|
||||
@ -3483,6 +3484,7 @@ func TestSyncJobWithJobBackoffLimitPerIndex(t *testing.T) {
|
||||
},
|
||||
wantStatus: batch.JobStatus{
|
||||
Active: 2,
|
||||
Terminating: ptr.To[int32](0),
|
||||
UncountedTerminatedPods: &batch.UncountedTerminatedPods{},
|
||||
FailedIndexes: ptr.To(""),
|
||||
},
|
||||
@ -3509,6 +3511,7 @@ func TestSyncJobWithJobBackoffLimitPerIndex(t *testing.T) {
|
||||
wantStatus: batch.JobStatus{
|
||||
Active: 2,
|
||||
Failed: 1,
|
||||
Terminating: ptr.To[int32](0),
|
||||
UncountedTerminatedPods: &batch.UncountedTerminatedPods{},
|
||||
FailedIndexes: ptr.To(""),
|
||||
},
|
||||
@ -3535,6 +3538,7 @@ func TestSyncJobWithJobBackoffLimitPerIndex(t *testing.T) {
|
||||
Active: 1,
|
||||
Failed: 1,
|
||||
FailedIndexes: ptr.To("0"),
|
||||
Terminating: ptr.To[int32](0),
|
||||
UncountedTerminatedPods: &batch.UncountedTerminatedPods{},
|
||||
},
|
||||
},
|
||||
@ -3583,6 +3587,7 @@ func TestSyncJobWithJobBackoffLimitPerIndex(t *testing.T) {
|
||||
Active: 1,
|
||||
Failed: 1,
|
||||
FailedIndexes: ptr.To("0"),
|
||||
Terminating: ptr.To[int32](0),
|
||||
UncountedTerminatedPods: &batch.UncountedTerminatedPods{},
|
||||
},
|
||||
},
|
||||
@ -3632,6 +3637,7 @@ func TestSyncJobWithJobBackoffLimitPerIndex(t *testing.T) {
|
||||
Active: 0,
|
||||
Failed: 1,
|
||||
FailedIndexes: ptr.To(""),
|
||||
Terminating: ptr.To[int32](0),
|
||||
UncountedTerminatedPods: &batch.UncountedTerminatedPods{},
|
||||
Conditions: []batch.JobCondition{
|
||||
{
|
||||
@ -3695,6 +3701,7 @@ func TestSyncJobWithJobBackoffLimitPerIndex(t *testing.T) {
|
||||
Active: 2,
|
||||
Failed: 0,
|
||||
FailedIndexes: ptr.To(""),
|
||||
Terminating: ptr.To[int32](0),
|
||||
UncountedTerminatedPods: &batch.UncountedTerminatedPods{},
|
||||
},
|
||||
},
|
||||
@ -3721,6 +3728,7 @@ func TestSyncJobWithJobBackoffLimitPerIndex(t *testing.T) {
|
||||
Failed: 2,
|
||||
Succeeded: 0,
|
||||
FailedIndexes: ptr.To(""),
|
||||
Terminating: ptr.To[int32](0),
|
||||
UncountedTerminatedPods: &batch.UncountedTerminatedPods{},
|
||||
Conditions: []batch.JobCondition{
|
||||
{
|
||||
@ -3754,6 +3762,7 @@ func TestSyncJobWithJobBackoffLimitPerIndex(t *testing.T) {
|
||||
wantStatus: batch.JobStatus{
|
||||
Failed: 1,
|
||||
Succeeded: 1,
|
||||
Terminating: ptr.To[int32](0),
|
||||
FailedIndexes: ptr.To("0"),
|
||||
CompletedIndexes: "1",
|
||||
UncountedTerminatedPods: &batch.UncountedTerminatedPods{},
|
||||
@ -3792,6 +3801,7 @@ func TestSyncJobWithJobBackoffLimitPerIndex(t *testing.T) {
|
||||
wantStatus: batch.JobStatus{
|
||||
Failed: 3,
|
||||
Succeeded: 1,
|
||||
Terminating: ptr.To[int32](0),
|
||||
FailedIndexes: ptr.To("0,2"),
|
||||
CompletedIndexes: "1",
|
||||
UncountedTerminatedPods: &batch.UncountedTerminatedPods{},
|
||||
@ -3830,6 +3840,7 @@ func TestSyncJobWithJobBackoffLimitPerIndex(t *testing.T) {
|
||||
wantStatus: batch.JobStatus{
|
||||
Active: 2,
|
||||
Succeeded: 1,
|
||||
Terminating: ptr.To[int32](0),
|
||||
CompletedIndexes: "1",
|
||||
UncountedTerminatedPods: &batch.UncountedTerminatedPods{},
|
||||
},
|
||||
|
@ -295,6 +295,7 @@ func TestGCOrphaned(t *testing.T) {
|
||||
},
|
||||
itemsInQueue: 1,
|
||||
deletedPodNames: sets.NewString("a"),
|
||||
patchedPodNames: sets.NewString("a"),
|
||||
},
|
||||
{
|
||||
name: "some nodes missing",
|
||||
@ -308,6 +309,7 @@ func TestGCOrphaned(t *testing.T) {
|
||||
},
|
||||
itemsInQueue: 1,
|
||||
deletedPodNames: sets.NewString("a", "c", "d"),
|
||||
patchedPodNames: sets.NewString("d"),
|
||||
},
|
||||
{
|
||||
name: "node added to client after quarantine",
|
||||
@ -457,6 +459,7 @@ func TestGCUnscheduledTerminating(t *testing.T) {
|
||||
{name: "c", phase: v1.PodRunning, deletionTimeStamp: &metav1.Time{}, nodeName: ""},
|
||||
},
|
||||
deletedPodNames: sets.NewString("a", "b", "c"),
|
||||
patchedPodNames: sets.NewString("c"),
|
||||
},
|
||||
{
|
||||
name: "Scheduled pod in any phase must not be deleted",
|
||||
@ -607,6 +610,7 @@ func TestGCTerminating(t *testing.T) {
|
||||
{name: "e6", phase: v1.PodUnknown, nodeName: "worker-5"},
|
||||
},
|
||||
deletedPodNames: sets.NewString("b1", "b4", "b5", "b6"),
|
||||
patchedPodNames: sets.NewString("b1", "b4", "b5", "b6"),
|
||||
},
|
||||
{
|
||||
name: "pods deleted from node tained out-of-service; PodDisruptionConditions enabled",
|
||||
|
@ -389,6 +389,7 @@ const (
|
||||
// owner: @kannon92
|
||||
// kep : https://kep.k8s.io/3939
|
||||
// alpha: v1.28
|
||||
// beta: v1.29
|
||||
//
|
||||
// Allow users to specify recreating pods of a job only when
|
||||
// pods have fully terminated.
|
||||
@ -990,7 +991,7 @@ var defaultKubernetesFeatureGates = map[featuregate.Feature]featuregate.FeatureS
|
||||
|
||||
JobPodFailurePolicy: {Default: true, PreRelease: featuregate.Beta},
|
||||
|
||||
JobPodReplacementPolicy: {Default: false, PreRelease: featuregate.Alpha},
|
||||
JobPodReplacementPolicy: {Default: true, PreRelease: featuregate.Beta},
|
||||
|
||||
JobReadyPods: {Default: true, PreRelease: featuregate.GA, LockToDefault: true}, // remove in 1.31
|
||||
|
||||
|
@ -344,6 +344,61 @@ var _ = SIGDescribe("Job", func() {
|
||||
}
|
||||
})
|
||||
|
||||
ginkgo.It("should recreate pods only after they have failed if pod replacement policy is set to Failed", func(ctx context.Context) {
|
||||
ginkgo.By("Creating a job")
|
||||
job := e2ejob.NewTestJob("", "pod-recreate-failed", v1.RestartPolicyNever, 1, 1, nil, 1)
|
||||
job.Spec.PodReplacementPolicy = ptr.To(batchv1.Failed)
|
||||
job.Spec.Template.Spec.Containers[0].Command = []string{"/bin/sh", "-c", `_term(){
|
||||
sleep 5
|
||||
exit 143
|
||||
}
|
||||
trap _term SIGTERM
|
||||
while true; do
|
||||
sleep 1
|
||||
done`}
|
||||
job, err := e2ejob.CreateJob(ctx, f.ClientSet, f.Namespace.Name, job)
|
||||
framework.ExpectNoError(err, "failed to create job in namespace: %s", f.Namespace.Name)
|
||||
|
||||
err = e2ejob.WaitForJobPodsRunning(ctx, f.ClientSet, f.Namespace.Name, job.Name, 1)
|
||||
framework.ExpectNoError(err, "failed to wait for job pod to become running in namespace: %s", f.Namespace.Name)
|
||||
|
||||
ginkgo.By("Deleting job pod")
|
||||
pods, err := e2ejob.GetJobPods(ctx, f.ClientSet, f.Namespace.Name, job.Name)
|
||||
framework.ExpectNoError(err, "failed to get pod list for job %s in namespace: %s", job.Name, f.Namespace.Name)
|
||||
|
||||
framework.ExpectNoError(e2epod.DeletePodsWithGracePeriod(ctx, f.ClientSet, pods.Items, 30), "failed to delete pods in namespace: %s", f.Namespace.Name)
|
||||
|
||||
ginkgo.By("Ensuring pod does not get recreated while it is in terminating state")
|
||||
err = e2ejob.WaitForJobState(ctx, f.ClientSet, f.Namespace.Name, job.Name, f.Timeouts.PodDelete, func(job *batchv1.Job) string {
|
||||
if job.Status.Active == 0 && job.Status.Failed == 0 && *job.Status.Terminating == 1 {
|
||||
return ""
|
||||
} else {
|
||||
return fmt.Sprintf(
|
||||
"expected job to have 0 active pod, 0 failed pod and 1 terminating pods, but got %d active pods, %d failed pods and %d terminating pods",
|
||||
job.Status.Active,
|
||||
job.Status.Failed,
|
||||
*job.Status.Terminating,
|
||||
)
|
||||
}
|
||||
})
|
||||
framework.ExpectNoError(err, "failed to ensure pod is not recreated while it is in terminating state")
|
||||
|
||||
ginkgo.By("Ensuring pod gets recreated after it has failed")
|
||||
err = e2ejob.WaitForJobState(ctx, f.ClientSet, f.Namespace.Name, job.Name, f.Timeouts.PodDelete, func(job *batchv1.Job) string {
|
||||
if job.Status.Active == 1 && job.Status.Failed == 1 && *job.Status.Terminating == 0 {
|
||||
return ""
|
||||
} else {
|
||||
return fmt.Sprintf(
|
||||
"expected job to have 1 active pods, 1 failed pods and 0 terminating pod, but got %d active pods, %d failed pods and %d terminating pods",
|
||||
job.Status.Active,
|
||||
job.Status.Failed,
|
||||
*job.Status.Terminating,
|
||||
)
|
||||
}
|
||||
})
|
||||
framework.ExpectNoError(err, "failed to wait for pod to get recreated")
|
||||
})
|
||||
|
||||
/*
|
||||
Release: v1.24
|
||||
Testname: Ensure Pods of an Indexed Job get a unique index.
|
||||
|
@ -177,6 +177,7 @@ func TestJobPodFailurePolicyWithFailedPodDeletedDuringControllerRestart(t *testi
|
||||
validateJobPodsStatus(ctx, t, cs, jobObj, podsByStatus{
|
||||
Active: count,
|
||||
Ready: ptr.To[int32](0),
|
||||
Terminating: ptr.To[int32](0),
|
||||
})
|
||||
|
||||
jobPods, err := getJobPods(ctx, t, cs, jobObj, func(s v1.PodStatus) bool {
|
||||
@ -454,6 +455,7 @@ func TestJobPodFailurePolicy(t *testing.T) {
|
||||
validateJobPodsStatus(ctx, t, clientSet, jobObj, podsByStatus{
|
||||
Active: 1,
|
||||
Ready: ptr.To[int32](0),
|
||||
Terminating: ptr.To[int32](0),
|
||||
})
|
||||
|
||||
op := func(p *v1.Pod) bool {
|
||||
@ -474,6 +476,7 @@ func TestJobPodFailurePolicy(t *testing.T) {
|
||||
Active: test.wantActive,
|
||||
Failed: test.wantFailed,
|
||||
Ready: ptr.To[int32](0),
|
||||
Terminating: ptr.To[int32](0),
|
||||
})
|
||||
|
||||
if test.wantJobConditionType == batchv1.JobComplete {
|
||||
@ -519,6 +522,7 @@ func TestBackoffLimitPerIndex_DelayedPodDeletion(t *testing.T) {
|
||||
validateJobPodsStatus(ctx, t, clientSet, jobObj, podsByStatus{
|
||||
Active: 1,
|
||||
Ready: ptr.To[int32](0),
|
||||
Terminating: ptr.To[int32](0),
|
||||
})
|
||||
validateIndexedJobPods(ctx, t, clientSet, jobObj, sets.New(0), "", ptr.To(""))
|
||||
|
||||
@ -539,6 +543,7 @@ func TestBackoffLimitPerIndex_DelayedPodDeletion(t *testing.T) {
|
||||
Active: 1,
|
||||
Failed: 1,
|
||||
Ready: ptr.To[int32](0),
|
||||
Terminating: ptr.To[int32](0),
|
||||
})
|
||||
validateIndexedJobPods(ctx, t, clientSet, jobObj, sets.New(0), "", ptr.To(""))
|
||||
|
||||
@ -563,6 +568,7 @@ func TestBackoffLimitPerIndex_DelayedPodDeletion(t *testing.T) {
|
||||
Succeeded: 1,
|
||||
Failed: 1,
|
||||
Ready: ptr.To[int32](0),
|
||||
Terminating: ptr.To[int32](0),
|
||||
})
|
||||
validateJobSucceeded(ctx, t, clientSet, jobObj)
|
||||
}
|
||||
@ -593,6 +599,7 @@ func TestBackoffLimitPerIndex_Reenabling(t *testing.T) {
|
||||
validateJobPodsStatus(ctx, t, clientSet, jobObj, podsByStatus{
|
||||
Active: 3,
|
||||
Ready: ptr.To[int32](0),
|
||||
Terminating: ptr.To[int32](0),
|
||||
})
|
||||
validateIndexedJobPods(ctx, t, clientSet, jobObj, sets.New(0, 1, 2), "", ptr.To(""))
|
||||
|
||||
@ -604,6 +611,7 @@ func TestBackoffLimitPerIndex_Reenabling(t *testing.T) {
|
||||
Active: 2,
|
||||
Failed: 1,
|
||||
Ready: ptr.To[int32](0),
|
||||
Terminating: ptr.To[int32](0),
|
||||
})
|
||||
validateIndexedJobPods(ctx, t, clientSet, jobObj, sets.New(1, 2), "", ptr.To("0"))
|
||||
|
||||
@ -618,6 +626,7 @@ func TestBackoffLimitPerIndex_Reenabling(t *testing.T) {
|
||||
Active: 3,
|
||||
Failed: 2,
|
||||
Ready: ptr.To[int32](0),
|
||||
Terminating: ptr.To[int32](0),
|
||||
})
|
||||
validateIndexedJobPods(ctx, t, clientSet, jobObj, sets.New(0, 1, 2), "", nil)
|
||||
|
||||
@ -635,6 +644,7 @@ func TestBackoffLimitPerIndex_Reenabling(t *testing.T) {
|
||||
Active: 2,
|
||||
Failed: 3,
|
||||
Ready: ptr.To[int32](0),
|
||||
Terminating: ptr.To[int32](0),
|
||||
})
|
||||
validateIndexedJobPods(ctx, t, clientSet, jobObj, sets.New(0, 1), "", ptr.To("2"))
|
||||
|
||||
@ -678,6 +688,7 @@ func TestBackoffLimitPerIndex_JobPodsCreatedWithExponentialBackoff(t *testing.T)
|
||||
validateJobPodsStatus(ctx, t, clientSet, jobObj, podsByStatus{
|
||||
Active: 2,
|
||||
Ready: ptr.To[int32](0),
|
||||
Terminating: ptr.To[int32](0),
|
||||
})
|
||||
validateIndexedJobPods(ctx, t, clientSet, jobObj, sets.New(0, 1), "", ptr.To(""))
|
||||
|
||||
@ -689,6 +700,7 @@ func TestBackoffLimitPerIndex_JobPodsCreatedWithExponentialBackoff(t *testing.T)
|
||||
Active: 2,
|
||||
Failed: 1,
|
||||
Ready: ptr.To[int32](0),
|
||||
Terminating: ptr.To[int32](0),
|
||||
})
|
||||
validateIndexedJobPods(ctx, t, clientSet, jobObj, sets.New(0, 1), "", ptr.To(""))
|
||||
|
||||
@ -700,6 +712,7 @@ func TestBackoffLimitPerIndex_JobPodsCreatedWithExponentialBackoff(t *testing.T)
|
||||
Active: 2,
|
||||
Failed: 2,
|
||||
Ready: ptr.To[int32](0),
|
||||
Terminating: ptr.To[int32](0),
|
||||
})
|
||||
validateIndexedJobPods(ctx, t, clientSet, jobObj, sets.New(0, 1), "", ptr.To(""))
|
||||
|
||||
@ -711,6 +724,7 @@ func TestBackoffLimitPerIndex_JobPodsCreatedWithExponentialBackoff(t *testing.T)
|
||||
Active: 2,
|
||||
Failed: 3,
|
||||
Ready: ptr.To[int32](0),
|
||||
Terminating: ptr.To[int32](0),
|
||||
})
|
||||
validateIndexedJobPods(ctx, t, clientSet, jobObj, sets.New(0, 1), "", ptr.To(""))
|
||||
|
||||
@ -723,6 +737,7 @@ func TestBackoffLimitPerIndex_JobPodsCreatedWithExponentialBackoff(t *testing.T)
|
||||
Failed: 3,
|
||||
Succeeded: 1,
|
||||
Ready: ptr.To[int32](0),
|
||||
Terminating: ptr.To[int32](0),
|
||||
})
|
||||
validateIndexedJobPods(ctx, t, clientSet, jobObj, sets.New(1), "0", ptr.To(""))
|
||||
|
||||
@ -735,6 +750,7 @@ func TestBackoffLimitPerIndex_JobPodsCreatedWithExponentialBackoff(t *testing.T)
|
||||
Failed: 4,
|
||||
Succeeded: 1,
|
||||
Ready: ptr.To[int32](0),
|
||||
Terminating: ptr.To[int32](0),
|
||||
})
|
||||
validateIndexedJobPods(ctx, t, clientSet, jobObj, sets.New(1), "0", ptr.To(""))
|
||||
|
||||
@ -747,6 +763,7 @@ func TestBackoffLimitPerIndex_JobPodsCreatedWithExponentialBackoff(t *testing.T)
|
||||
Failed: 4,
|
||||
Succeeded: 2,
|
||||
Ready: ptr.To[int32](0),
|
||||
Terminating: ptr.To[int32](0),
|
||||
})
|
||||
validateIndexedJobPods(ctx, t, clientSet, jobObj, sets.New[int](), "0,1", ptr.To(""))
|
||||
validateJobSucceeded(ctx, t, clientSet, jobObj)
|
||||
@ -1108,6 +1125,7 @@ func TestBackoffLimitPerIndex(t *testing.T) {
|
||||
validateJobPodsStatus(ctx, t, clientSet, jobObj, podsByStatus{
|
||||
Active: int(*test.job.Spec.Parallelism),
|
||||
Ready: ptr.To[int32](0),
|
||||
Terminating: ptr.To[int32](0),
|
||||
})
|
||||
for _, podTermination := range test.podTerminations {
|
||||
pod, err := getActivePodForIndex(ctx, clientSet, jobObj, podTermination.index)
|
||||
@ -1123,6 +1141,7 @@ func TestBackoffLimitPerIndex(t *testing.T) {
|
||||
Succeeded: podTermination.wantSucceeded,
|
||||
Failed: podTermination.wantFailed,
|
||||
Ready: ptr.To[int32](0),
|
||||
Terminating: ptr.To[int32](0),
|
||||
})
|
||||
validateIndexedJobPods(ctx, t, clientSet, jobObj, podTermination.wantActiveIndexes, podTermination.wantCompletedIndexes, podTermination.wantFailedIndexes)
|
||||
if podTermination.wantReplacementPodFailureCount != nil {
|
||||
@ -1189,6 +1208,7 @@ func TestNonParallelJob(t *testing.T) {
|
||||
validateJobPodsStatus(ctx, t, clientSet, jobObj, podsByStatus{
|
||||
Active: 1,
|
||||
Ready: ptr.To[int32](0),
|
||||
Terminating: ptr.To[int32](0),
|
||||
})
|
||||
|
||||
// Restarting controller.
|
||||
@ -1203,6 +1223,7 @@ func TestNonParallelJob(t *testing.T) {
|
||||
Active: 1,
|
||||
Failed: 1,
|
||||
Ready: ptr.To[int32](0),
|
||||
Terminating: ptr.To[int32](0),
|
||||
})
|
||||
validateCounterMetric(ctx, t, metrics.JobPodsFinished, metricLabelsWithValue{
|
||||
Labels: []string{"NonIndexed", "failed"},
|
||||
@ -1222,6 +1243,7 @@ func TestNonParallelJob(t *testing.T) {
|
||||
Failed: 1,
|
||||
Succeeded: 1,
|
||||
Ready: ptr.To[int32](0),
|
||||
Terminating: ptr.To[int32](0),
|
||||
})
|
||||
validateFinishedPodsNoFinalizer(ctx, t, clientSet, jobObj)
|
||||
validateCounterMetric(ctx, t, metrics.JobFinishedNum, metricLabelsWithValue{
|
||||
@ -1253,6 +1275,7 @@ func TestParallelJob(t *testing.T) {
|
||||
want := podsByStatus{
|
||||
Active: 5,
|
||||
Ready: ptr.To[int32](0),
|
||||
Terminating: ptr.To[int32](0),
|
||||
}
|
||||
validateJobPodsStatus(ctx, t, clientSet, jobObj, want)
|
||||
|
||||
@ -1271,6 +1294,7 @@ func TestParallelJob(t *testing.T) {
|
||||
Active: 5,
|
||||
Failed: 2,
|
||||
Ready: ptr.To[int32](0),
|
||||
Terminating: ptr.To[int32](0),
|
||||
}
|
||||
validateJobPodsStatus(ctx, t, clientSet, jobObj, want)
|
||||
// Once one Pod succeeds, no more Pods are created, even if some fail.
|
||||
@ -1282,6 +1306,7 @@ func TestParallelJob(t *testing.T) {
|
||||
Succeeded: 1,
|
||||
Active: 4,
|
||||
Ready: ptr.To[int32](0),
|
||||
Terminating: ptr.To[int32](0),
|
||||
}
|
||||
validateJobPodsStatus(ctx, t, clientSet, jobObj, want)
|
||||
if err, _ := setJobPodsPhase(ctx, clientSet, jobObj, v1.PodFailed, 2); err != nil {
|
||||
@ -1292,6 +1317,7 @@ func TestParallelJob(t *testing.T) {
|
||||
Succeeded: 1,
|
||||
Active: 2,
|
||||
Ready: ptr.To[int32](0),
|
||||
Terminating: ptr.To[int32](0),
|
||||
}
|
||||
validateJobPodsStatus(ctx, t, clientSet, jobObj, want)
|
||||
// No more Pods are created after remaining Pods succeed.
|
||||
@ -1303,6 +1329,7 @@ func TestParallelJob(t *testing.T) {
|
||||
Failed: 4,
|
||||
Succeeded: 3,
|
||||
Ready: ptr.To[int32](0),
|
||||
Terminating: ptr.To[int32](0),
|
||||
}
|
||||
validateJobPodsStatus(ctx, t, clientSet, jobObj, want)
|
||||
validateFinishedPodsNoFinalizer(ctx, t, clientSet, jobObj)
|
||||
@ -1339,6 +1366,7 @@ func TestParallelJobChangingParallelism(t *testing.T) {
|
||||
validateJobPodsStatus(ctx, t, clientSet, jobObj, podsByStatus{
|
||||
Active: 5,
|
||||
Ready: ptr.To[int32](0),
|
||||
Terminating: ptr.To[int32](0),
|
||||
})
|
||||
|
||||
// Reduce parallelism by a number greater than backoffLimit.
|
||||
@ -1350,6 +1378,7 @@ func TestParallelJobChangingParallelism(t *testing.T) {
|
||||
validateJobPodsStatus(ctx, t, clientSet, jobObj, podsByStatus{
|
||||
Active: 2,
|
||||
Ready: ptr.To[int32](0),
|
||||
Terminating: ptr.To[int32](0),
|
||||
})
|
||||
|
||||
// Increase parallelism again.
|
||||
@ -1361,6 +1390,7 @@ func TestParallelJobChangingParallelism(t *testing.T) {
|
||||
validateJobPodsStatus(ctx, t, clientSet, jobObj, podsByStatus{
|
||||
Active: 4,
|
||||
Ready: ptr.To[int32](0),
|
||||
Terminating: ptr.To[int32](0),
|
||||
})
|
||||
|
||||
// Succeed Job
|
||||
@ -1371,6 +1401,7 @@ func TestParallelJobChangingParallelism(t *testing.T) {
|
||||
validateJobPodsStatus(ctx, t, clientSet, jobObj, podsByStatus{
|
||||
Succeeded: 4,
|
||||
Ready: ptr.To[int32](0),
|
||||
Terminating: ptr.To[int32](0),
|
||||
})
|
||||
validateFinishedPodsNoFinalizer(ctx, t, clientSet, jobObj)
|
||||
}
|
||||
@ -1399,6 +1430,7 @@ func TestParallelJobWithCompletions(t *testing.T) {
|
||||
want := podsByStatus{
|
||||
Active: 54,
|
||||
Ready: ptr.To[int32](0),
|
||||
Terminating: ptr.To[int32](0),
|
||||
}
|
||||
validateJobPodsStatus(ctx, t, clientSet, jobObj, want)
|
||||
// Tracks ready pods, if enabled.
|
||||
@ -1416,6 +1448,7 @@ func TestParallelJobWithCompletions(t *testing.T) {
|
||||
Active: 54,
|
||||
Failed: 2,
|
||||
Ready: ptr.To[int32](50),
|
||||
Terminating: ptr.To[int32](0),
|
||||
}
|
||||
validateJobPodsStatus(ctx, t, clientSet, jobObj, want)
|
||||
// Pods are created until the number of succeeded Pods equals completions.
|
||||
@ -1427,6 +1460,7 @@ func TestParallelJobWithCompletions(t *testing.T) {
|
||||
Succeeded: 53,
|
||||
Active: 3,
|
||||
Ready: ptr.To[int32](0),
|
||||
Terminating: ptr.To[int32](0),
|
||||
}
|
||||
validateJobPodsStatus(ctx, t, clientSet, jobObj, want)
|
||||
// No more Pods are created after the Job completes.
|
||||
@ -1438,6 +1472,7 @@ func TestParallelJobWithCompletions(t *testing.T) {
|
||||
Failed: 2,
|
||||
Succeeded: 56,
|
||||
Ready: ptr.To[int32](0),
|
||||
Terminating: ptr.To[int32](0),
|
||||
}
|
||||
validateJobPodsStatus(ctx, t, clientSet, jobObj, want)
|
||||
validateFinishedPodsNoFinalizer(ctx, t, clientSet, jobObj)
|
||||
@ -1477,6 +1512,7 @@ func TestIndexedJob(t *testing.T) {
|
||||
validateJobPodsStatus(ctx, t, clientSet, jobObj, podsByStatus{
|
||||
Active: 3,
|
||||
Ready: ptr.To[int32](0),
|
||||
Terminating: ptr.To[int32](0),
|
||||
})
|
||||
validateIndexedJobPods(ctx, t, clientSet, jobObj, sets.New(0, 1, 2), "", nil)
|
||||
validateCounterMetric(ctx, t, metrics.JobFinishedIndexesTotal, metricLabelsWithValue{
|
||||
@ -1492,6 +1528,7 @@ func TestIndexedJob(t *testing.T) {
|
||||
Active: 3,
|
||||
Succeeded: 1,
|
||||
Ready: ptr.To[int32](0),
|
||||
Terminating: ptr.To[int32](0),
|
||||
})
|
||||
validateIndexedJobPods(ctx, t, clientSet, jobObj, sets.New(0, 2, 3), "1", nil)
|
||||
validateCounterMetric(ctx, t, metrics.JobFinishedIndexesTotal, metricLabelsWithValue{
|
||||
@ -1508,6 +1545,7 @@ func TestIndexedJob(t *testing.T) {
|
||||
Failed: 1,
|
||||
Succeeded: 1,
|
||||
Ready: ptr.To[int32](0),
|
||||
Terminating: ptr.To[int32](0),
|
||||
})
|
||||
validateIndexedJobPods(ctx, t, clientSet, jobObj, sets.New(0, 2, 3), "1", nil)
|
||||
validateCounterMetric(ctx, t, metrics.JobFinishedIndexesTotal, metricLabelsWithValue{
|
||||
@ -1524,6 +1562,7 @@ func TestIndexedJob(t *testing.T) {
|
||||
Failed: 1,
|
||||
Succeeded: 4,
|
||||
Ready: ptr.To[int32](0),
|
||||
Terminating: ptr.To[int32](0),
|
||||
})
|
||||
validateIndexedJobPods(ctx, t, clientSet, jobObj, nil, "0-3", nil)
|
||||
validateJobSucceeded(ctx, t, clientSet, jobObj)
|
||||
@ -2032,6 +2071,7 @@ func TestElasticIndexedJob(t *testing.T) {
|
||||
Succeeded: len(update.succeedIndexes),
|
||||
Failed: update.wantFailed,
|
||||
Ready: ptr.To[int32](0),
|
||||
Terminating: ptr.To[int32](0),
|
||||
})
|
||||
validateIndexedJobPods(ctx, t, clientSet, jobObj, update.wantRemainingIndexes, update.wantSucceededIndexes, nil)
|
||||
}
|
||||
@ -2149,6 +2189,7 @@ func TestOrphanPodsFinalizersClearedWithGC(t *testing.T) {
|
||||
validateJobPodsStatus(ctx, t, clientSet, jobObj, podsByStatus{
|
||||
Active: 2,
|
||||
Ready: ptr.To[int32](0),
|
||||
Terminating: ptr.To[int32](0),
|
||||
})
|
||||
|
||||
// Delete Job. The GC should delete the pods in cascade.
|
||||
@ -2224,6 +2265,7 @@ func TestJobPodsCreatedWithExponentialBackoff(t *testing.T) {
|
||||
validateJobPodsStatus(ctx, t, clientSet, jobObj, podsByStatus{
|
||||
Active: 1,
|
||||
Ready: ptr.To[int32](0),
|
||||
Terminating: ptr.To[int32](0),
|
||||
})
|
||||
|
||||
// Fail the first pod
|
||||
@ -2234,6 +2276,7 @@ func TestJobPodsCreatedWithExponentialBackoff(t *testing.T) {
|
||||
Active: 1,
|
||||
Ready: ptr.To[int32](0),
|
||||
Failed: 1,
|
||||
Terminating: ptr.To[int32](0),
|
||||
})
|
||||
|
||||
// Fail the second pod
|
||||
@ -2244,6 +2287,7 @@ func TestJobPodsCreatedWithExponentialBackoff(t *testing.T) {
|
||||
Active: 1,
|
||||
Ready: ptr.To[int32](0),
|
||||
Failed: 2,
|
||||
Terminating: ptr.To[int32](0),
|
||||
})
|
||||
|
||||
jobPods, err := getJobPods(ctx, t, clientSet, jobObj, func(ps v1.PodStatus) bool { return true })
|
||||
@ -2322,6 +2366,7 @@ func TestJobFailedWithInterrupts(t *testing.T) {
|
||||
validateJobPodsStatus(ctx, t, clientSet, jobObj, podsByStatus{
|
||||
Active: 10,
|
||||
Ready: ptr.To[int32](0),
|
||||
Terminating: ptr.To[int32](0),
|
||||
})
|
||||
t.Log("Finishing pods")
|
||||
if err, _ := setJobPodsPhase(ctx, clientSet, jobObj, v1.PodFailed, 1); err != nil {
|
||||
@ -2387,6 +2432,7 @@ func TestOrphanPodsFinalizersClearedOnRestart(t *testing.T) {
|
||||
validateJobPodsStatus(ctx, t, clientSet, jobObj, podsByStatus{
|
||||
Active: 1,
|
||||
Ready: ptr.To[int32](0),
|
||||
Terminating: ptr.To[int32](0),
|
||||
})
|
||||
|
||||
// Step 2: Delete the Job while the controller is stopped.
|
||||
@ -2455,6 +2501,7 @@ func TestSuspendJob(t *testing.T) {
|
||||
validateJobPodsStatus(ctx, t, clientSet, job, podsByStatus{
|
||||
Active: active,
|
||||
Ready: ptr.To[int32](0),
|
||||
Terminating: ptr.To[int32](0),
|
||||
})
|
||||
job, err = clientSet.BatchV1().Jobs(ns.Name).Get(ctx, job.Name, metav1.GetOptions{})
|
||||
if err != nil {
|
||||
@ -2498,6 +2545,7 @@ func TestSuspendJobControllerRestart(t *testing.T) {
|
||||
validateJobPodsStatus(ctx, t, clientSet, job, podsByStatus{
|
||||
Active: 0,
|
||||
Ready: ptr.To[int32](0),
|
||||
Terminating: ptr.To[int32](0),
|
||||
})
|
||||
}
|
||||
|
||||
|
Loading…
Reference in New Issue
Block a user