mirror of
https://github.com/k3s-io/kubernetes.git
synced 2025-07-30 23:15:14 +00:00
Merge pull request #130536 from tenzen-y/promote-successpolicy-to-ga
KEP-3998: Promote JobSuccessPolicy to Stable
This commit is contained in:
commit
04fb7ac18b
2
api/openapi-spec/swagger.json
generated
2
api/openapi-spec/swagger.json
generated
@ -4695,7 +4695,7 @@
|
||||
},
|
||||
"successPolicy": {
|
||||
"$ref": "#/definitions/io.k8s.api.batch.v1.SuccessPolicy",
|
||||
"description": "successPolicy specifies the policy when the Job can be declared as succeeded. If empty, the default behavior applies - the Job is declared as succeeded only when the number of succeeded pods equals to the completions. When the field is specified, it must be immutable and works only for the Indexed Jobs. Once the Job meets the SuccessPolicy, the lingering pods are terminated.\n\nThis field is beta-level. To use this field, you must enable the `JobSuccessPolicy` feature gate (enabled by default)."
|
||||
"description": "successPolicy specifies the policy when the Job can be declared as succeeded. If empty, the default behavior applies - the Job is declared as succeeded only when the number of succeeded pods equals to the completions. When the field is specified, it must be immutable and works only for the Indexed Jobs. Once the Job meets the SuccessPolicy, the lingering pods are terminated."
|
||||
},
|
||||
"suspend": {
|
||||
"description": "suspend specifies whether the Job controller should create Pods or not. If a Job is created with suspend set to true, no Pods are created by the Job controller. If a Job is suspended after creation (i.e. the flag goes from false to true), the Job controller will delete all active Pods associated with this Job. Users must design their workload to gracefully handle this. Suspending a Job will reset the StartTime field of the Job, effectively resetting the ActiveDeadlineSeconds timer too. Defaults to false.",
|
||||
|
2
api/openapi-spec/v3/apis__batch__v1_openapi.json
generated
2
api/openapi-spec/v3/apis__batch__v1_openapi.json
generated
@ -388,7 +388,7 @@
|
||||
"$ref": "#/components/schemas/io.k8s.api.batch.v1.SuccessPolicy"
|
||||
}
|
||||
],
|
||||
"description": "successPolicy specifies the policy when the Job can be declared as succeeded. If empty, the default behavior applies - the Job is declared as succeeded only when the number of succeeded pods equals to the completions. When the field is specified, it must be immutable and works only for the Indexed Jobs. Once the Job meets the SuccessPolicy, the lingering pods are terminated.\n\nThis field is beta-level. To use this field, you must enable the `JobSuccessPolicy` feature gate (enabled by default)."
|
||||
"description": "successPolicy specifies the policy when the Job can be declared as succeeded. If empty, the default behavior applies - the Job is declared as succeeded only when the number of succeeded pods equals to the completions. When the field is specified, it must be immutable and works only for the Indexed Jobs. Once the Job meets the SuccessPolicy, the lingering pods are terminated."
|
||||
},
|
||||
"suspend": {
|
||||
"description": "suspend specifies whether the Job controller should create Pods or not. If a Job is created with suspend set to true, no Pods are created by the Job controller. If a Job is suspended after creation (i.e. the flag goes from false to true), the Job controller will delete all active Pods associated with this Job. Users must design their workload to gracefully handle this. Suspending a Job will reset the StartTime field of the Job, effectively resetting the ActiveDeadlineSeconds timer too. Defaults to false.",
|
||||
|
@ -336,8 +336,6 @@ type JobSpec struct {
|
||||
// When the field is specified, it must be immutable and works only for the Indexed Jobs.
|
||||
// Once the Job meets the SuccessPolicy, the lingering pods are terminated.
|
||||
//
|
||||
// This field is beta-level. To use this field, you must enable the
|
||||
// `JobSuccessPolicy` feature gate (enabled by default).
|
||||
// +optional
|
||||
SuccessPolicy *SuccessPolicy
|
||||
|
||||
|
@ -1253,6 +1253,9 @@ func TestControllerSyncJob(t *testing.T) {
|
||||
if tc.podIndexLabelDisabled {
|
||||
// TODO: this will be removed in 1.35 when 1.31 will fall out of support matrix
|
||||
featuregatetesting.SetFeatureGateEmulationVersionDuringTest(t, feature.DefaultFeatureGate, utilversion.MustParse("1.31"))
|
||||
} else if !tc.jobSuccessPolicy {
|
||||
// TODO: this will be removed in 1.36.
|
||||
featuregatetesting.SetFeatureGateEmulationVersionDuringTest(t, feature.DefaultFeatureGate, utilversion.MustParse("1.32"))
|
||||
}
|
||||
featuregatetesting.SetFeatureGateDuringTest(t, feature.DefaultFeatureGate, features.PodIndexLabel, !tc.podIndexLabelDisabled)
|
||||
featuregatetesting.SetFeatureGateDuringTest(t, feature.DefaultFeatureGate, features.JobPodReplacementPolicy, tc.jobPodReplacementPolicy)
|
||||
@ -2437,7 +2440,7 @@ func TestTrackJobStatusAndRemoveFinalizers(t *testing.T) {
|
||||
}
|
||||
for name, tc := range cases {
|
||||
t.Run(name, func(t *testing.T) {
|
||||
if !tc.enableJobBackoffLimitPerIndex {
|
||||
if !tc.enableJobBackoffLimitPerIndex || !tc.enableJobSuccessPolicy {
|
||||
// TODO: this will be removed in 1.36
|
||||
featuregatetesting.SetFeatureGateEmulationVersionDuringTest(t, feature.DefaultFeatureGate, utilversion.MustParse("1.32"))
|
||||
}
|
||||
@ -5175,7 +5178,7 @@ func TestSyncJobWithJobSuccessPolicy(t *testing.T) {
|
||||
}
|
||||
for name, tc := range testCases {
|
||||
t.Run(name, func(t *testing.T) {
|
||||
if !tc.enableBackoffLimitPerIndex {
|
||||
if !tc.enableBackoffLimitPerIndex || !tc.enableJobSuccessPolicy {
|
||||
// TODO: this will be removed in 1.36
|
||||
featuregatetesting.SetFeatureGateEmulationVersionDuringTest(t, feature.DefaultFeatureGate, utilversion.MustParse("1.32"))
|
||||
}
|
||||
|
@ -21,6 +21,7 @@ import (
|
||||
|
||||
"github.com/google/go-cmp/cmp"
|
||||
batch "k8s.io/api/batch/v1"
|
||||
utilversion "k8s.io/apimachinery/pkg/util/version"
|
||||
"k8s.io/apiserver/pkg/util/feature"
|
||||
featuregatetesting "k8s.io/component-base/featuregate/testing"
|
||||
"k8s.io/klog/v2/ktesting"
|
||||
@ -391,6 +392,10 @@ func TestMatchSuccessPolicy(t *testing.T) {
|
||||
}
|
||||
for name, tc := range testCases {
|
||||
t.Run(name, func(t *testing.T) {
|
||||
if !tc.enableJobSuccessPolicy {
|
||||
// TODO: this will be removed in 1.36.
|
||||
featuregatetesting.SetFeatureGateEmulationVersionDuringTest(t, feature.DefaultFeatureGate, utilversion.MustParse("1.32"))
|
||||
}
|
||||
featuregatetesting.SetFeatureGateDuringTest(t, feature.DefaultFeatureGate, features.JobSuccessPolicy, tc.enableJobSuccessPolicy)
|
||||
logger := ktesting.NewLogger(t,
|
||||
ktesting.NewConfig(
|
||||
|
@ -424,6 +424,7 @@ var defaultVersionedKubernetesFeatureGates = map[featuregate.Feature]featuregate
|
||||
JobSuccessPolicy: {
|
||||
{Version: version.MustParse("1.30"), Default: false, PreRelease: featuregate.Alpha},
|
||||
{Version: version.MustParse("1.31"), Default: true, PreRelease: featuregate.Beta},
|
||||
{Version: version.MustParse("1.33"), Default: true, PreRelease: featuregate.GA, LockToDefault: true}, // remove in 1.36
|
||||
},
|
||||
|
||||
KubeletCgroupDriverFromCRI: {
|
||||
|
2
pkg/generated/openapi/zz_generated.openapi.go
generated
2
pkg/generated/openapi/zz_generated.openapi.go
generated
@ -17724,7 +17724,7 @@ func schema_k8sio_api_batch_v1_JobSpec(ref common.ReferenceCallback) common.Open
|
||||
},
|
||||
"successPolicy": {
|
||||
SchemaProps: spec.SchemaProps{
|
||||
Description: "successPolicy specifies the policy when the Job can be declared as succeeded. If empty, the default behavior applies - the Job is declared as succeeded only when the number of succeeded pods equals to the completions. When the field is specified, it must be immutable and works only for the Indexed Jobs. Once the Job meets the SuccessPolicy, the lingering pods are terminated.\n\nThis field is beta-level. To use this field, you must enable the `JobSuccessPolicy` feature gate (enabled by default).",
|
||||
Description: "successPolicy specifies the policy when the Job can be declared as succeeded. If empty, the default behavior applies - the Job is declared as succeeded only when the number of succeeded pods equals to the completions. When the field is specified, it must be immutable and works only for the Indexed Jobs. Once the Job meets the SuccessPolicy, the lingering pods are terminated.",
|
||||
Ref: ref("k8s.io/api/batch/v1.SuccessPolicy"),
|
||||
},
|
||||
},
|
||||
|
@ -513,7 +513,7 @@ func TestJobStrategy_PrepareForUpdate(t *testing.T) {
|
||||
|
||||
for name, tc := range cases {
|
||||
t.Run(name, func(t *testing.T) {
|
||||
if !tc.enableJobBackoffLimitPerIndex {
|
||||
if !tc.enableJobBackoffLimitPerIndex || !tc.enableJobSuccessPolicy {
|
||||
// TODO: this will be removed in 1.36
|
||||
featuregatetesting.SetFeatureGateEmulationVersionDuringTest(t, utilfeature.DefaultFeatureGate, utilversion.MustParse("1.32"))
|
||||
}
|
||||
@ -898,7 +898,7 @@ func TestJobStrategy_PrepareForCreate(t *testing.T) {
|
||||
|
||||
for name, tc := range cases {
|
||||
t.Run(name, func(t *testing.T) {
|
||||
if !tc.enableJobBackoffLimitPerIndex {
|
||||
if !tc.enableJobBackoffLimitPerIndex || !tc.enableJobSuccessPolicy {
|
||||
// TODO: this will be removed in 1.36
|
||||
featuregatetesting.SetFeatureGateEmulationVersionDuringTest(t, utilfeature.DefaultFeatureGate, utilversion.MustParse("1.32"))
|
||||
}
|
||||
@ -3538,6 +3538,10 @@ func TestStatusStrategy_ValidateUpdate(t *testing.T) {
|
||||
}
|
||||
for name, tc := range cases {
|
||||
t.Run(name, func(t *testing.T) {
|
||||
if !tc.enableJobSuccessPolicy {
|
||||
// TODO: this will be removed in 1.36.
|
||||
featuregatetesting.SetFeatureGateEmulationVersionDuringTest(t, utilfeature.DefaultFeatureGate, utilversion.MustParse("1.32"))
|
||||
}
|
||||
featuregatetesting.SetFeatureGateDuringTest(t, utilfeature.DefaultFeatureGate, features.JobManagedBy, tc.enableJobManagedBy)
|
||||
featuregatetesting.SetFeatureGateDuringTest(t, utilfeature.DefaultFeatureGate, features.JobSuccessPolicy, tc.enableJobSuccessPolicy)
|
||||
featuregatetesting.SetFeatureGateDuringTest(t, utilfeature.DefaultFeatureGate, features.JobPodReplacementPolicy, tc.enableJobPodReplacementPolicy)
|
||||
|
@ -222,8 +222,6 @@ message JobSpec {
|
||||
// When the field is specified, it must be immutable and works only for the Indexed Jobs.
|
||||
// Once the Job meets the SuccessPolicy, the lingering pods are terminated.
|
||||
//
|
||||
// This field is beta-level. To use this field, you must enable the
|
||||
// `JobSuccessPolicy` feature gate (enabled by default).
|
||||
// +optional
|
||||
optional SuccessPolicy successPolicy = 16;
|
||||
|
||||
|
@ -343,8 +343,6 @@ type JobSpec struct {
|
||||
// When the field is specified, it must be immutable and works only for the Indexed Jobs.
|
||||
// Once the Job meets the SuccessPolicy, the lingering pods are terminated.
|
||||
//
|
||||
// This field is beta-level. To use this field, you must enable the
|
||||
// `JobSuccessPolicy` feature gate (enabled by default).
|
||||
// +optional
|
||||
SuccessPolicy *SuccessPolicy `json:"successPolicy,omitempty" protobuf:"bytes,16,opt,name=successPolicy"`
|
||||
|
||||
@ -638,13 +636,9 @@ const (
|
||||
JobReasonFailedIndexes string = "FailedIndexes"
|
||||
// JobReasonSuccessPolicy reason indicates a SuccessCriteriaMet condition is added due to
|
||||
// a Job met successPolicy.
|
||||
// https://kep.k8s.io/3998
|
||||
// This is currently a beta field.
|
||||
JobReasonSuccessPolicy string = "SuccessPolicy"
|
||||
// JobReasonCompletionsReached reason indicates a SuccessCriteriaMet condition is added due to
|
||||
// a number of succeeded Job pods met completions.
|
||||
// - https://kep.k8s.io/3998
|
||||
// This is currently a beta field.
|
||||
JobReasonCompletionsReached string = "CompletionsReached"
|
||||
)
|
||||
|
||||
|
@ -116,7 +116,7 @@ var map_JobSpec = map[string]string{
|
||||
"completions": "Specifies the desired number of successfully finished pods the job should be run with. Setting to null means that the success of any pod signals the success of all pods, and allows parallelism to have any positive value. Setting to 1 means that parallelism is limited to 1 and the success of that pod signals the success of the job. More info: https://kubernetes.io/docs/concepts/workloads/controllers/jobs-run-to-completion/",
|
||||
"activeDeadlineSeconds": "Specifies the duration in seconds relative to the startTime that the job may be continuously active before the system tries to terminate it; value must be positive integer. If a Job is suspended (at creation or through an update), this timer will effectively be stopped and reset when the Job is resumed again.",
|
||||
"podFailurePolicy": "Specifies the policy of handling failed pods. In particular, it allows to specify the set of actions and conditions which need to be satisfied to take the associated action. If empty, the default behaviour applies - the counter of failed pods, represented by the jobs's .status.failed field, is incremented and it is checked against the backoffLimit. This field cannot be used in combination with restartPolicy=OnFailure.",
|
||||
"successPolicy": "successPolicy specifies the policy when the Job can be declared as succeeded. If empty, the default behavior applies - the Job is declared as succeeded only when the number of succeeded pods equals to the completions. When the field is specified, it must be immutable and works only for the Indexed Jobs. Once the Job meets the SuccessPolicy, the lingering pods are terminated.\n\nThis field is beta-level. To use this field, you must enable the `JobSuccessPolicy` feature gate (enabled by default).",
|
||||
"successPolicy": "successPolicy specifies the policy when the Job can be declared as succeeded. If empty, the default behavior applies - the Job is declared as succeeded only when the number of succeeded pods equals to the completions. When the field is specified, it must be immutable and works only for the Indexed Jobs. Once the Job meets the SuccessPolicy, the lingering pods are terminated.",
|
||||
"backoffLimit": "Specifies the number of retries before marking this job failed. Defaults to 6",
|
||||
"backoffLimitPerIndex": "Specifies the limit for the number of retries within an index before marking this index as failed. When enabled the number of failures per index is kept in the pod's batch.kubernetes.io/job-index-failure-count annotation. It can only be set when Job's completionMode=Indexed, and the Pod's restart policy is Never. The field is immutable.",
|
||||
"maxFailedIndexes": "Specifies the maximal number of failed indexes before marking the Job as failed, when backoffLimitPerIndex is set. Once the number of failed indexes exceeds this number the entire Job is marked as Failed and its execution is terminated. When left as null the job continues execution of all of its indexes and is marked with the `Complete` Job condition. It can only be specified when backoffLimitPerIndex is set. It can be null or up to completions. It is required and must be less than or equal to 10^4 when is completions greater than 10^5.",
|
||||
|
@ -85,7 +85,7 @@ var _ = SIGDescribe("Job", func() {
|
||||
framework.ExpectNoError(err, "failed to create job in namespace: %s", f.Namespace.Name)
|
||||
|
||||
ginkgo.By("Ensuring job reaches completions")
|
||||
err = e2ejob.WaitForJobComplete(ctx, f.ClientSet, f.Namespace.Name, job.Name, ptr.To(batchv1.JobReasonCompletionsReached), completions)
|
||||
err = e2ejob.WaitForJobComplete(ctx, f.ClientSet, f.Namespace.Name, job.Name, batchv1.JobReasonCompletionsReached, completions)
|
||||
framework.ExpectNoError(err, "failed to ensure job completion in namespace: %s", f.Namespace.Name)
|
||||
|
||||
ginkgo.By("Ensuring pods for job exist")
|
||||
@ -182,7 +182,7 @@ var _ = SIGDescribe("Job", func() {
|
||||
framework.ExpectNoError(err, "failed to create job in namespace: %s", f.Namespace.Name)
|
||||
|
||||
ginkgo.By("Ensuring job reaches completions")
|
||||
err = e2ejob.WaitForJobComplete(ctx, f.ClientSet, f.Namespace.Name, job.Name, nil, completions)
|
||||
err = e2ejob.WaitForJobComplete(ctx, f.ClientSet, f.Namespace.Name, job.Name, batchv1.JobReasonCompletionsReached, completions)
|
||||
framework.ExpectNoError(err, "failed to ensure job completion in namespace: %s", f.Namespace.Name)
|
||||
})
|
||||
|
||||
@ -260,7 +260,7 @@ var _ = SIGDescribe("Job", func() {
|
||||
})
|
||||
|
||||
ginkgo.By("Ensuring job reaches completions")
|
||||
err = e2ejob.WaitForJobComplete(ctx, f.ClientSet, f.Namespace.Name, job.Name, nil, completions)
|
||||
err = e2ejob.WaitForJobComplete(ctx, f.ClientSet, f.Namespace.Name, job.Name, batchv1.JobReasonCompletionsReached, completions)
|
||||
framework.ExpectNoError(err, "failed to ensure job completion in namespace: %s", f.Namespace.Name)
|
||||
})
|
||||
|
||||
@ -292,7 +292,7 @@ var _ = SIGDescribe("Job", func() {
|
||||
framework.ExpectNoError(err, "failed to update job in namespace: %s", f.Namespace.Name)
|
||||
|
||||
ginkgo.By("Waiting for job to complete")
|
||||
err = e2ejob.WaitForJobComplete(ctx, f.ClientSet, f.Namespace.Name, job.Name, ptr.To(batchv1.JobReasonCompletionsReached), completions)
|
||||
err = e2ejob.WaitForJobComplete(ctx, f.ClientSet, f.Namespace.Name, job.Name, batchv1.JobReasonCompletionsReached, completions)
|
||||
framework.ExpectNoError(err, "failed to ensure job completion in namespace: %s", f.Namespace.Name)
|
||||
})
|
||||
|
||||
@ -417,7 +417,7 @@ done`}
|
||||
framework.ExpectNoError(err, "failed to create indexed job in namespace %s", f.Namespace.Name)
|
||||
|
||||
ginkgo.By("Ensuring job reaches completions")
|
||||
err = e2ejob.WaitForJobComplete(ctx, f.ClientSet, f.Namespace.Name, job.Name, nil, completions)
|
||||
err = e2ejob.WaitForJobComplete(ctx, f.ClientSet, f.Namespace.Name, job.Name, batchv1.JobReasonCompletionsReached, completions)
|
||||
framework.ExpectNoError(err, "failed to ensure job completion in namespace: %s", f.Namespace.Name)
|
||||
|
||||
ginkgo.By("Ensuring pods with index for job exist")
|
||||
@ -458,7 +458,7 @@ done`}
|
||||
framework.ExpectNoError(err, "failed to create indexed job in namespace %s", f.Namespace.Name)
|
||||
|
||||
ginkgo.By("Ensuring job reaches completions")
|
||||
err = e2ejob.WaitForJobComplete(ctx, f.ClientSet, f.Namespace.Name, job.Name, nil, completions)
|
||||
err = e2ejob.WaitForJobComplete(ctx, f.ClientSet, f.Namespace.Name, job.Name, batchv1.JobReasonCompletionsReached, completions)
|
||||
framework.ExpectNoError(err, "failed to ensure job completion in namespace: %s", f.Namespace.Name)
|
||||
|
||||
ginkgo.By("Ensuring all pods have the required index labels")
|
||||
@ -502,7 +502,7 @@ done`}
|
||||
framework.ExpectNoError(err, "failed to ensure that job has SuccessCriteriaMet with SuccessPolicy reason condition")
|
||||
|
||||
ginkgo.By("Ensure that the job reaches completions")
|
||||
err = e2ejob.WaitForJobComplete(ctx, f.ClientSet, f.Namespace.Name, job.Name, ptr.To(batchv1.JobReasonSuccessPolicy), completions)
|
||||
err = e2ejob.WaitForJobComplete(ctx, f.ClientSet, f.Namespace.Name, job.Name, batchv1.JobReasonSuccessPolicy, completions)
|
||||
framework.ExpectNoError(err, "failed to ensure that job completed")
|
||||
|
||||
ginkgo.By("Verifying that the job status to ensure correct final state")
|
||||
@ -541,7 +541,7 @@ done`}
|
||||
framework.ExpectNoError(err, "failed to ensure that job has SuccessCriteriaMet with SuccessPolicy reason condition")
|
||||
|
||||
ginkgo.By("Ensure that the job reaches completions")
|
||||
err = e2ejob.WaitForJobComplete(ctx, f.ClientSet, f.Namespace.Name, job.Name, ptr.To(batchv1.JobReasonSuccessPolicy), 1)
|
||||
err = e2ejob.WaitForJobComplete(ctx, f.ClientSet, f.Namespace.Name, job.Name, batchv1.JobReasonSuccessPolicy, 1)
|
||||
framework.ExpectNoError(err, "failed to ensure that job completed")
|
||||
|
||||
ginkgo.By("Verifying that the only appropriately index succeeded")
|
||||
@ -580,7 +580,7 @@ done`}
|
||||
framework.ExpectNoError(err, "failed to ensure that the job has SuccessCriteriaMet condition with SuccessPolicy rule")
|
||||
|
||||
ginkgo.By("Ensure that the job reaches completions")
|
||||
err = e2ejob.WaitForJobComplete(ctx, f.ClientSet, f.Namespace.Name, job.Name, ptr.To(batchv1.JobReasonSuccessPolicy), 1)
|
||||
err = e2ejob.WaitForJobComplete(ctx, f.ClientSet, f.Namespace.Name, job.Name, batchv1.JobReasonSuccessPolicy, 1)
|
||||
framework.ExpectNoError(err, "failed to ensure that job completed")
|
||||
|
||||
ginkgo.By("Verifying that the job status to ensure correct final state")
|
||||
@ -809,7 +809,7 @@ done`}
|
||||
framework.ExpectNoError(err, "failed to create job in namespace: %s", f.Namespace.Name)
|
||||
|
||||
ginkgo.By("Ensuring job reaches completions")
|
||||
err = e2ejob.WaitForJobComplete(ctx, f.ClientSet, f.Namespace.Name, job.Name, nil, completions)
|
||||
err = e2ejob.WaitForJobComplete(ctx, f.ClientSet, f.Namespace.Name, job.Name, batchv1.JobReasonCompletionsReached, completions)
|
||||
framework.ExpectNoError(err, "failed to ensure job completion in namespace: %s", f.Namespace.Name)
|
||||
})
|
||||
|
||||
@ -840,7 +840,7 @@ done`}
|
||||
framework.ExpectNoError(err, "failed to ensure job has the interim success condition: %s", f.Namespace.Name)
|
||||
|
||||
ginkgo.By("Ensuring job reaches completions")
|
||||
err = e2ejob.WaitForJobComplete(ctx, f.ClientSet, f.Namespace.Name, job.Name, ptr.To(batchv1.JobReasonCompletionsReached), *job.Spec.Completions)
|
||||
err = e2ejob.WaitForJobComplete(ctx, f.ClientSet, f.Namespace.Name, job.Name, batchv1.JobReasonCompletionsReached, *job.Spec.Completions)
|
||||
framework.ExpectNoError(err, "failed to ensure job completion in namespace: %s", f.Namespace.Name)
|
||||
|
||||
ginkgo.By("Verifying the Job status fields to ensure correct final state")
|
||||
@ -1035,7 +1035,7 @@ done`}
|
||||
framework.ExpectNoError(err, "failed to create job in namespace: %s", f.Namespace.Name)
|
||||
|
||||
ginkgo.By("Ensuring job reaches completions")
|
||||
err = e2ejob.WaitForJobComplete(ctx, f.ClientSet, f.Namespace.Name, job.Name, ptr.To(batchv1.JobReasonCompletionsReached), completions)
|
||||
err = e2ejob.WaitForJobComplete(ctx, f.ClientSet, f.Namespace.Name, job.Name, batchv1.JobReasonCompletionsReached, completions)
|
||||
framework.ExpectNoError(err, "failed to ensure job completion in namespace: %s", f.Namespace.Name)
|
||||
|
||||
ginkgo.By("Ensuring pods for job exist")
|
||||
@ -1241,7 +1241,7 @@ done`}
|
||||
framework.Logf("Job: %v as labels: %v", testJob.Name, testJob.Labels)
|
||||
|
||||
ginkgo.By("Waiting for job to complete")
|
||||
err = e2ejob.WaitForJobComplete(ctx, f.ClientSet, ns, jobName, nil, completions)
|
||||
err = e2ejob.WaitForJobComplete(ctx, f.ClientSet, ns, jobName, batchv1.JobReasonCompletionsReached, completions)
|
||||
framework.ExpectNoError(err, "failed to ensure job completion in namespace: %s", ns)
|
||||
|
||||
ginkgo.By("Delete a job collection with a labelselector")
|
||||
|
@ -87,12 +87,7 @@ func waitForJobPodsInPhase(ctx context.Context, c clientset.Interface, ns, jobNa
|
||||
// WaitForJobComplete uses c to wait for completions to complete for the Job jobName in namespace ns.
|
||||
// This function checks if the number of succeeded Job Pods reached expected completions and
|
||||
// the Job has a "Complete" condition with the expected reason.
|
||||
// The pointer "reason" argument allows us to skip "Complete" condition reason verifications.
|
||||
// The conformance test cases have the different expected "Complete" condition reason ("CompletionsReached" vs "")
|
||||
// between conformance CI jobs and e2e CI jobs since the e2e conformance test cases are performed in
|
||||
// both conformance CI jobs with GA-only features and e2e CI jobs with all default-enabled features.
|
||||
// So, we need to skip "Complete" condition reason verifications in the e2e conformance test cases.
|
||||
func WaitForJobComplete(ctx context.Context, c clientset.Interface, ns, jobName string, reason *string, completions int32) error {
|
||||
func WaitForJobComplete(ctx context.Context, c clientset.Interface, ns, jobName string, reason string, completions int32) error {
|
||||
// This function is called by HandleRetry, which will retry
|
||||
// on transient API errors or stop polling in the case of other errors.
|
||||
get := func(ctx context.Context) (*batchv1.Job, error) {
|
||||
@ -121,7 +116,7 @@ func WaitForJobComplete(ctx context.Context, c clientset.Interface, ns, jobName
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
return WaitForJobCondition(ctx, c, ns, jobName, batchv1.JobComplete, reason)
|
||||
return WaitForJobCondition(ctx, c, ns, jobName, batchv1.JobComplete, &reason)
|
||||
}
|
||||
|
||||
// WaitForJobReady waits for particular value of the Job .status.ready field
|
||||
|
@ -600,6 +600,10 @@
|
||||
lockToDefault: false
|
||||
preRelease: Beta
|
||||
version: "1.31"
|
||||
- default: true
|
||||
lockToDefault: true
|
||||
preRelease: GA
|
||||
version: "1.33"
|
||||
- name: KMSv1
|
||||
versionedSpecs:
|
||||
- default: true
|
||||
|
@ -824,11 +824,11 @@ func TestSuccessPolicy(t *testing.T) {
|
||||
for name, tc := range testCases {
|
||||
t.Run(name, func(t *testing.T) {
|
||||
resetMetrics()
|
||||
featuregatetesting.SetFeatureGateDuringTest(t, feature.DefaultFeatureGate, features.JobSuccessPolicy, tc.enableJobSuccessPolicy)
|
||||
if !tc.enableBackoffLimitPerIndex {
|
||||
if !tc.enableBackoffLimitPerIndex || !tc.enableJobSuccessPolicy {
|
||||
// TODO: this will be removed in 1.36
|
||||
featuregatetesting.SetFeatureGateEmulationVersionDuringTest(t, feature.DefaultFeatureGate, utilversion.MustParse("1.32"))
|
||||
}
|
||||
featuregatetesting.SetFeatureGateDuringTest(t, feature.DefaultFeatureGate, features.JobSuccessPolicy, tc.enableJobSuccessPolicy)
|
||||
featuregatetesting.SetFeatureGateDuringTest(t, feature.DefaultFeatureGate, features.JobBackoffLimitPerIndex, tc.enableBackoffLimitPerIndex)
|
||||
|
||||
ctx, cancel := startJobControllerAndWaitForCaches(t, restConfig)
|
||||
@ -875,6 +875,7 @@ func TestSuccessPolicy(t *testing.T) {
|
||||
// TestSuccessPolicy_ReEnabling tests handling of pod successful when
|
||||
// re-enabling the JobSuccessPolicy feature.
|
||||
func TestSuccessPolicy_ReEnabling(t *testing.T) {
|
||||
featuregatetesting.SetFeatureGateEmulationVersionDuringTest(t, feature.DefaultFeatureGate, utilversion.MustParse("1.32"))
|
||||
featuregatetesting.SetFeatureGateDuringTest(t, feature.DefaultFeatureGate, features.JobSuccessPolicy, true)
|
||||
closeFn, resetConfig, clientSet, ns := setup(t, "success-policy-re-enabling")
|
||||
t.Cleanup(closeFn)
|
||||
@ -1582,6 +1583,10 @@ func TestDelayTerminalPhaseCondition(t *testing.T) {
|
||||
for name, test := range testCases {
|
||||
t.Run(name, func(t *testing.T) {
|
||||
resetMetrics()
|
||||
if !test.enableJobSuccessPolicy {
|
||||
// TODO: this will be removed in 1.36.
|
||||
featuregatetesting.SetFeatureGateEmulationVersionDuringTest(t, feature.DefaultFeatureGate, utilversion.MustParse("1.32"))
|
||||
}
|
||||
featuregatetesting.SetFeatureGateDuringTest(t, feature.DefaultFeatureGate, features.JobPodReplacementPolicy, test.enableJobPodReplacementPolicy)
|
||||
featuregatetesting.SetFeatureGateDuringTest(t, feature.DefaultFeatureGate, features.JobManagedBy, test.enableJobManagedBy)
|
||||
featuregatetesting.SetFeatureGateDuringTest(t, feature.DefaultFeatureGate, features.ElasticIndexedJob, true)
|
||||
|
Loading…
Reference in New Issue
Block a user