diff --git a/api/openapi-spec/swagger.json b/api/openapi-spec/swagger.json index 9844f75a2ee..147e5111776 100644 --- a/api/openapi-spec/swagger.json +++ b/api/openapi-spec/swagger.json @@ -3756,6 +3756,11 @@ "format": "int32", "type": "integer" }, + "backoffLimitPerIndex": { + "description": "Specifies the limit for the number of retries within an index before marking this index as failed. When enabled the number of failures per index is kept in the pod's batch.kubernetes.io/job-index-failure-count annotation. It can only be set when Job's completionMode=Indexed, and the Pod's restart policy is Never. The field is immutable. This field is alpha-level. It can be used when the `JobBackoffLimitPerIndex` feature gate is enabled (disabled by default).", + "format": "int32", + "type": "integer" + }, "completionMode": { "description": "completionMode specifies how Pod completions are tracked. It can be `NonIndexed` (default) or `Indexed`.\n\n`NonIndexed` means that the Job is considered complete when there have been .spec.completions successfully completed Pods. Each Pod completion is homologous to each other.\n\n`Indexed` means that the Pods of a Job get an associated completion index from 0 to (.spec.completions - 1), available in the annotation batch.kubernetes.io/job-completion-index. The Job is considered complete when there is one successfully completed Pod for each index. When value is `Indexed`, .spec.completions must be specified and `.spec.parallelism` must be less than or equal to 10^5. In addition, The Pod name takes the form `$(job-name)-$(index)-$(random-string)`, the Pod hostname takes the form `$(job-name)-$(index)`.\n\nMore completion modes can be added in the future. If the Job controller observes a mode that it doesn't recognize, which is possible during upgrades due to version skew, the controller skips updates for the Job.", "type": "string" @@ -3769,6 +3774,11 @@ "description": "manualSelector controls generation of pod labels and pod selectors. Leave `manualSelector` unset unless you are certain what you are doing. When false or unset, the system pick labels unique to this job and appends those labels to the pod template. When true, the user is responsible for picking unique labels and specifying the selector. Failure to pick a unique label may cause this and other jobs to not function correctly. However, You may see `manualSelector=true` in jobs that were created with the old `extensions/v1beta1` API. More info: https://kubernetes.io/docs/concepts/workloads/controllers/jobs-run-to-completion/#specifying-your-own-pod-selector", "type": "boolean" }, + "maxFailedIndexes": { + "description": "Specifies the maximal number of failed indexes before marking the Job as failed, when backoffLimitPerIndex is set. Once the number of failed indexes exceeds this number the entire Job is marked as Failed and its execution is terminated. When left as null the job continues execution of all of its indexes and is marked with the `Complete` Job condition. It can only be specified when backoffLimitPerIndex is set. It can be null or up to completions. It is required and must be less than or equal to 10^4 when is completions greater than 10^5. This field is alpha-level. It can be used when the `JobBackoffLimitPerIndex` feature gate is enabled (disabled by default).", + "format": "int32", + "type": "integer" + }, "parallelism": { "description": "Specifies the maximum desired number of pods the job should run at any given time. The actual number of pods running in steady state will be less than this number when ((.spec.completions - .status.successful) < .spec.parallelism), i.e. when the work left to do is less than max parallelism. More info: https://kubernetes.io/docs/concepts/workloads/controllers/jobs-run-to-completion/", "format": "int32", @@ -3832,6 +3842,10 @@ "format": "int32", "type": "integer" }, + "failedIndexes": { + "description": "FailedIndexes holds the failed indexes when backoffLimitPerIndex=true. The indexes are represented in the text format analogous as for the `completedIndexes` field, ie. they are kept as decimal integers separated by commas. The numbers are listed in increasing order. Three or more consecutive numbers are compressed and represented by the first and last element of the series, separated by a hyphen. For example, if the failed indexes are 1, 3, 4, 5 and 7, they are represented as \"1,3-5,7\". This field is alpha-level. It can be used when the `JobBackoffLimitPerIndex` feature gate is enabled (disabled by default).", + "type": "string" + }, "ready": { "description": "The number of pods which have a Ready condition.\n\nThis field is beta-level. The job controller populates the field when the feature gate JobReadyPods is enabled (enabled by default).", "format": "int32", @@ -3933,7 +3947,7 @@ "description": "PodFailurePolicyRule describes how a pod failure is handled when the requirements are met. One of onExitCodes and onPodConditions, but not both, can be used in each rule.", "properties": { "action": { - "description": "Specifies the action taken on a pod failure when the requirements are satisfied. Possible values are:\n\n- FailJob: indicates that the pod's job is marked as Failed and all\n running pods are terminated.\n- Ignore: indicates that the counter towards the .backoffLimit is not\n incremented and a replacement pod is created.\n- Count: indicates that the pod is handled in the default way - the\n counter towards the .backoffLimit is incremented.\nAdditional values are considered to be added in the future. Clients should react to an unknown action by skipping the rule.", + "description": "Specifies the action taken on a pod failure when the requirements are satisfied. Possible values are:\n\n- FailJob: indicates that the pod's job is marked as Failed and all\n running pods are terminated.\n- FailIndex: indicates that the pod's index is marked as Failed and will\n not be restarted.\n This value is alpha-level. It can be used when the\n `JobBackoffLimitPerIndex` feature gate is enabled (disabled by default).\n- Ignore: indicates that the counter towards the .backoffLimit is not\n incremented and a replacement pod is created.\n- Count: indicates that the pod is handled in the default way - the\n counter towards the .backoffLimit is incremented.\nAdditional values are considered to be added in the future. Clients should react to an unknown action by skipping the rule.", "type": "string" }, "onExitCodes": { diff --git a/api/openapi-spec/v3/apis__batch__v1_openapi.json b/api/openapi-spec/v3/apis__batch__v1_openapi.json index d55a73fc7d1..2ff99a5bbbb 100644 --- a/api/openapi-spec/v3/apis__batch__v1_openapi.json +++ b/api/openapi-spec/v3/apis__batch__v1_openapi.json @@ -332,6 +332,11 @@ "format": "int32", "type": "integer" }, + "backoffLimitPerIndex": { + "description": "Specifies the limit for the number of retries within an index before marking this index as failed. When enabled the number of failures per index is kept in the pod's batch.kubernetes.io/job-index-failure-count annotation. It can only be set when Job's completionMode=Indexed, and the Pod's restart policy is Never. The field is immutable. This field is alpha-level. It can be used when the `JobBackoffLimitPerIndex` feature gate is enabled (disabled by default).", + "format": "int32", + "type": "integer" + }, "completionMode": { "description": "completionMode specifies how Pod completions are tracked. It can be `NonIndexed` (default) or `Indexed`.\n\n`NonIndexed` means that the Job is considered complete when there have been .spec.completions successfully completed Pods. Each Pod completion is homologous to each other.\n\n`Indexed` means that the Pods of a Job get an associated completion index from 0 to (.spec.completions - 1), available in the annotation batch.kubernetes.io/job-completion-index. The Job is considered complete when there is one successfully completed Pod for each index. When value is `Indexed`, .spec.completions must be specified and `.spec.parallelism` must be less than or equal to 10^5. In addition, The Pod name takes the form `$(job-name)-$(index)-$(random-string)`, the Pod hostname takes the form `$(job-name)-$(index)`.\n\nMore completion modes can be added in the future. If the Job controller observes a mode that it doesn't recognize, which is possible during upgrades due to version skew, the controller skips updates for the Job.", "type": "string" @@ -345,6 +350,11 @@ "description": "manualSelector controls generation of pod labels and pod selectors. Leave `manualSelector` unset unless you are certain what you are doing. When false or unset, the system pick labels unique to this job and appends those labels to the pod template. When true, the user is responsible for picking unique labels and specifying the selector. Failure to pick a unique label may cause this and other jobs to not function correctly. However, You may see `manualSelector=true` in jobs that were created with the old `extensions/v1beta1` API. More info: https://kubernetes.io/docs/concepts/workloads/controllers/jobs-run-to-completion/#specifying-your-own-pod-selector", "type": "boolean" }, + "maxFailedIndexes": { + "description": "Specifies the maximal number of failed indexes before marking the Job as failed, when backoffLimitPerIndex is set. Once the number of failed indexes exceeds this number the entire Job is marked as Failed and its execution is terminated. When left as null the job continues execution of all of its indexes and is marked with the `Complete` Job condition. It can only be specified when backoffLimitPerIndex is set. It can be null or up to completions. It is required and must be less than or equal to 10^4 when is completions greater than 10^5. This field is alpha-level. It can be used when the `JobBackoffLimitPerIndex` feature gate is enabled (disabled by default).", + "format": "int32", + "type": "integer" + }, "parallelism": { "description": "Specifies the maximum desired number of pods the job should run at any given time. The actual number of pods running in steady state will be less than this number when ((.spec.completions - .status.successful) < .spec.parallelism), i.e. when the work left to do is less than max parallelism. More info: https://kubernetes.io/docs/concepts/workloads/controllers/jobs-run-to-completion/", "format": "int32", @@ -430,6 +440,10 @@ "format": "int32", "type": "integer" }, + "failedIndexes": { + "description": "FailedIndexes holds the failed indexes when backoffLimitPerIndex=true. The indexes are represented in the text format analogous as for the `completedIndexes` field, ie. they are kept as decimal integers separated by commas. The numbers are listed in increasing order. Three or more consecutive numbers are compressed and represented by the first and last element of the series, separated by a hyphen. For example, if the failed indexes are 1, 3, 4, 5 and 7, they are represented as \"1,3-5,7\". This field is alpha-level. It can be used when the `JobBackoffLimitPerIndex` feature gate is enabled (disabled by default).", + "type": "string" + }, "ready": { "description": "The number of pods which have a Ready condition.\n\nThis field is beta-level. The job controller populates the field when the feature gate JobReadyPods is enabled (enabled by default).", "format": "int32", @@ -559,7 +573,7 @@ "properties": { "action": { "default": "", - "description": "Specifies the action taken on a pod failure when the requirements are satisfied. Possible values are:\n\n- FailJob: indicates that the pod's job is marked as Failed and all\n running pods are terminated.\n- Ignore: indicates that the counter towards the .backoffLimit is not\n incremented and a replacement pod is created.\n- Count: indicates that the pod is handled in the default way - the\n counter towards the .backoffLimit is incremented.\nAdditional values are considered to be added in the future. Clients should react to an unknown action by skipping the rule.", + "description": "Specifies the action taken on a pod failure when the requirements are satisfied. Possible values are:\n\n- FailJob: indicates that the pod's job is marked as Failed and all\n running pods are terminated.\n- FailIndex: indicates that the pod's index is marked as Failed and will\n not be restarted.\n This value is alpha-level. It can be used when the\n `JobBackoffLimitPerIndex` feature gate is enabled (disabled by default).\n- Ignore: indicates that the counter towards the .backoffLimit is not\n incremented and a replacement pod is created.\n- Count: indicates that the pod is handled in the default way - the\n counter towards the .backoffLimit is incremented.\nAdditional values are considered to be added in the future. Clients should react to an unknown action by skipping the rule.", "type": "string" }, "onExitCodes": { diff --git a/pkg/apis/batch/fuzzer/fuzzer.go b/pkg/apis/batch/fuzzer/fuzzer.go index c8ff43fa2ec..1bae3fc833d 100644 --- a/pkg/apis/batch/fuzzer/fuzzer.go +++ b/pkg/apis/batch/fuzzer/fuzzer.go @@ -17,6 +17,8 @@ limitations under the License. package fuzzer import ( + "math" + fuzz "github.com/google/gofuzz" runtimeserializer "k8s.io/apimachinery/pkg/runtime/serializer" "k8s.io/kubernetes/pkg/apis/batch" @@ -51,6 +53,11 @@ var Funcs = func(codecs runtimeserializer.CodecFactory) []interface{} { mode := batch.NonIndexedCompletion if c.RandBool() { mode = batch.IndexedCompletion + j.BackoffLimitPerIndex = pointer.Int32(c.Rand.Int31()) + j.MaxFailedIndexes = pointer.Int32(c.Rand.Int31()) + } + if c.RandBool() { + j.BackoffLimit = pointer.Int32(math.MaxInt32) } j.CompletionMode = &mode // We're fuzzing the internal JobSpec type, not the v1 type, so we don't diff --git a/pkg/apis/batch/types.go b/pkg/apis/batch/types.go index fe5b451c6a8..e1a6c8ff675 100644 --- a/pkg/apis/batch/types.go +++ b/pkg/apis/batch/types.go @@ -44,6 +44,9 @@ const ( JobNameLabel = labelPrefix + LegacyJobNameLabel // Controller UID is used for selectors and labels for jobs ControllerUidLabel = labelPrefix + LegacyControllerUidLabel + // Annotation indicating the number of failures for the index corresponding + // to the pod. + JobIndexFailureCountAnnotation = labelPrefix + "job-index-failure-count" ) // +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object @@ -119,6 +122,12 @@ const ( // pod's job as Failed and terminate all running pods. PodFailurePolicyActionFailJob PodFailurePolicyAction = "FailJob" + // This is an action which might be taken on a pod failure - mark the + // Job's index as failed to avoid restarts within this index. This action + // can only be used when backoffLimitPerIndex is set. + // This value is alpha-level. + PodFailurePolicyActionFailIndex PodFailurePolicyAction = "FailIndex" + // This is an action which might be taken on a pod failure - the counter towards // .backoffLimit, represented by the job's .status.failed field, is not // incremented and a replacement pod is created. @@ -195,6 +204,10 @@ type PodFailurePolicyRule struct { // // - FailJob: indicates that the pod's job is marked as Failed and all // running pods are terminated. + // - FailIndex: indicates that the pod's index is marked as Failed and will + // not be restarted. + // This value is alpha-level. It can be used when the + // `JobBackoffLimitPerIndex` feature gate is enabled (disabled by default). // - Ignore: indicates that the counter towards the .backoffLimit is not // incremented and a replacement pod is created. // - Count: indicates that the pod is handled in the default way - the @@ -269,6 +282,30 @@ type JobSpec struct { // +optional BackoffLimit *int32 + // Specifies the limit for the number of retries within an + // index before marking this index as failed. When enabled the number of + // failures per index is kept in the pod's + // batch.kubernetes.io/job-index-failure-count annotation. It can only + // be set when Job's completionMode=Indexed, and the Pod's restart + // policy is Never. The field is immutable. + // This field is alpha-level. It can be used when the `JobBackoffLimitPerIndex` + // feature gate is enabled (disabled by default). + // +optional + BackoffLimitPerIndex *int32 + + // Specifies the maximal number of failed indexes before marking the Job as + // failed, when backoffLimitPerIndex is set. Once the number of failed + // indexes exceeds this number the entire Job is marked as Failed and its + // execution is terminated. When left as null the job continues execution of + // all of its indexes and is marked with the `Complete` Job condition. + // It can only be specified when backoffLimitPerIndex is set. + // It can be null or up to completions. It is required and must be + // less than or equal to 10^4 when is completions greater than 10^5. + // This field is alpha-level. It can be used when the `JobBackoffLimitPerIndex` + // feature gate is enabled (disabled by default). + // +optional + MaxFailedIndexes *int32 + // TODO enabled it when https://github.com/kubernetes/kubernetes/issues/28486 has been fixed // Optional number of failed pods to retain. // +optional @@ -397,6 +434,19 @@ type JobStatus struct { // +optional CompletedIndexes string + // FailedIndexes holds the failed indexes when backoffLimitPerIndex=true. + // The indexes are represented in the text format analogous as for the + // `completedIndexes` field, ie. they are kept as decimal integers + // separated by commas. The numbers are listed in increasing order. Three or + // more consecutive numbers are compressed and represented by the first and + // last element of the series, separated by a hyphen. + // For example, if the failed indexes are 1, 3, 4, 5 and 7, they are + // represented as "1,3-5,7". + // This field is alpha-level. It can be used when the `JobBackoffLimitPerIndex` + // feature gate is enabled (disabled by default). + // +optional + FailedIndexes *string + // uncountedTerminatedPods holds the UIDs of Pods that have terminated but // the job controller hasn't yet accounted for in the status counters. // diff --git a/pkg/apis/batch/v1/defaults.go b/pkg/apis/batch/v1/defaults.go index 622f6f12397..737d898f266 100644 --- a/pkg/apis/batch/v1/defaults.go +++ b/pkg/apis/batch/v1/defaults.go @@ -17,6 +17,8 @@ limitations under the License. package v1 import ( + "math" + batchv1 "k8s.io/api/batch/v1" corev1 "k8s.io/api/core/v1" "k8s.io/apimachinery/pkg/runtime" @@ -38,7 +40,11 @@ func SetDefaults_Job(obj *batchv1.Job) { obj.Spec.Parallelism = utilpointer.Int32(1) } if obj.Spec.BackoffLimit == nil { - obj.Spec.BackoffLimit = utilpointer.Int32(6) + if obj.Spec.BackoffLimitPerIndex != nil { + obj.Spec.BackoffLimit = utilpointer.Int32(math.MaxInt32) + } else { + obj.Spec.BackoffLimit = utilpointer.Int32(6) + } } labels := obj.Spec.Template.Labels if labels != nil && len(obj.Labels) == 0 { diff --git a/pkg/apis/batch/v1/defaults_test.go b/pkg/apis/batch/v1/defaults_test.go index fc1cd4d8c0e..dbe81ef8f61 100644 --- a/pkg/apis/batch/v1/defaults_test.go +++ b/pkg/apis/batch/v1/defaults_test.go @@ -17,6 +17,7 @@ limitations under the License. package v1_test import ( + "math" "reflect" "testing" @@ -35,6 +36,9 @@ import ( func TestSetDefaultJob(t *testing.T) { defaultLabels := map[string]string{"default": "default"} + validPodTemplateSpec := v1.PodTemplateSpec{ + ObjectMeta: metav1.ObjectMeta{Labels: defaultLabels}, + } tests := map[string]struct { original *batchv1.Job expected *batchv1.Job @@ -339,6 +343,55 @@ func TestSetDefaultJob(t *testing.T) { }, expectLabels: true, }, + "BackoffLimitPerIndex specified, but no BackoffLimit -> default BackoffLimit to max int32": { + original: &batchv1.Job{ + Spec: batchv1.JobSpec{ + Completions: pointer.Int32(11), + Parallelism: pointer.Int32(10), + BackoffLimitPerIndex: pointer.Int32(1), + CompletionMode: completionModePtr(batchv1.IndexedCompletion), + Template: validPodTemplateSpec, + Suspend: pointer.Bool(true), + }, + }, + expected: &batchv1.Job{ + Spec: batchv1.JobSpec{ + Completions: pointer.Int32(11), + Parallelism: pointer.Int32(10), + BackoffLimit: pointer.Int32(math.MaxInt32), + BackoffLimitPerIndex: pointer.Int32(1), + CompletionMode: completionModePtr(batchv1.IndexedCompletion), + Template: validPodTemplateSpec, + Suspend: pointer.Bool(true), + }, + }, + expectLabels: true, + }, + "BackoffLimitPerIndex and BackoffLimit specified -> no change": { + original: &batchv1.Job{ + Spec: batchv1.JobSpec{ + Completions: pointer.Int32(11), + Parallelism: pointer.Int32(10), + BackoffLimit: pointer.Int32(3), + BackoffLimitPerIndex: pointer.Int32(1), + CompletionMode: completionModePtr(batchv1.IndexedCompletion), + Template: validPodTemplateSpec, + Suspend: pointer.Bool(true), + }, + }, + expected: &batchv1.Job{ + Spec: batchv1.JobSpec{ + Completions: pointer.Int32(11), + Parallelism: pointer.Int32(10), + BackoffLimit: pointer.Int32(3), + BackoffLimitPerIndex: pointer.Int32(1), + CompletionMode: completionModePtr(batchv1.IndexedCompletion), + Template: validPodTemplateSpec, + Suspend: pointer.Bool(true), + }, + }, + expectLabels: true, + }, } for name, test := range tests { diff --git a/pkg/apis/batch/v1/zz_generated.conversion.go b/pkg/apis/batch/v1/zz_generated.conversion.go index a7c425fc500..fbdaa0d800d 100644 --- a/pkg/apis/batch/v1/zz_generated.conversion.go +++ b/pkg/apis/batch/v1/zz_generated.conversion.go @@ -441,6 +441,8 @@ func autoConvert_v1_JobSpec_To_batch_JobSpec(in *v1.JobSpec, out *batch.JobSpec, out.ActiveDeadlineSeconds = (*int64)(unsafe.Pointer(in.ActiveDeadlineSeconds)) out.PodFailurePolicy = (*batch.PodFailurePolicy)(unsafe.Pointer(in.PodFailurePolicy)) out.BackoffLimit = (*int32)(unsafe.Pointer(in.BackoffLimit)) + out.BackoffLimitPerIndex = (*int32)(unsafe.Pointer(in.BackoffLimitPerIndex)) + out.MaxFailedIndexes = (*int32)(unsafe.Pointer(in.MaxFailedIndexes)) out.Selector = (*metav1.LabelSelector)(unsafe.Pointer(in.Selector)) out.ManualSelector = (*bool)(unsafe.Pointer(in.ManualSelector)) if err := apiscorev1.Convert_v1_PodTemplateSpec_To_core_PodTemplateSpec(&in.Template, &out.Template, s); err != nil { @@ -458,6 +460,8 @@ func autoConvert_batch_JobSpec_To_v1_JobSpec(in *batch.JobSpec, out *v1.JobSpec, out.PodFailurePolicy = (*v1.PodFailurePolicy)(unsafe.Pointer(in.PodFailurePolicy)) out.ActiveDeadlineSeconds = (*int64)(unsafe.Pointer(in.ActiveDeadlineSeconds)) out.BackoffLimit = (*int32)(unsafe.Pointer(in.BackoffLimit)) + out.BackoffLimitPerIndex = (*int32)(unsafe.Pointer(in.BackoffLimitPerIndex)) + out.MaxFailedIndexes = (*int32)(unsafe.Pointer(in.MaxFailedIndexes)) out.Selector = (*metav1.LabelSelector)(unsafe.Pointer(in.Selector)) out.ManualSelector = (*bool)(unsafe.Pointer(in.ManualSelector)) if err := apiscorev1.Convert_core_PodTemplateSpec_To_v1_PodTemplateSpec(&in.Template, &out.Template, s); err != nil { @@ -477,6 +481,7 @@ func autoConvert_v1_JobStatus_To_batch_JobStatus(in *v1.JobStatus, out *batch.Jo out.Succeeded = in.Succeeded out.Failed = in.Failed out.CompletedIndexes = in.CompletedIndexes + out.FailedIndexes = (*string)(unsafe.Pointer(in.FailedIndexes)) out.UncountedTerminatedPods = (*batch.UncountedTerminatedPods)(unsafe.Pointer(in.UncountedTerminatedPods)) out.Ready = (*int32)(unsafe.Pointer(in.Ready)) return nil @@ -496,6 +501,7 @@ func autoConvert_batch_JobStatus_To_v1_JobStatus(in *batch.JobStatus, out *v1.Jo out.Succeeded = in.Succeeded out.Failed = in.Failed out.CompletedIndexes = in.CompletedIndexes + out.FailedIndexes = (*string)(unsafe.Pointer(in.FailedIndexes)) out.UncountedTerminatedPods = (*v1.UncountedTerminatedPods)(unsafe.Pointer(in.UncountedTerminatedPods)) return nil } diff --git a/pkg/apis/batch/validation/validation.go b/pkg/apis/batch/validation/validation.go index d67c1c7e348..fdb365754fb 100644 --- a/pkg/apis/batch/validation/validation.go +++ b/pkg/apis/batch/validation/validation.go @@ -42,7 +42,16 @@ import ( // .status.completedIndexes. const maxParallelismForIndexedJob = 100000 +// maxFailedIndexesForIndexedJob is the maximum number of failed indexes that +// an Indexed Job is allowed to have. This threshold allows to cap the length of +// .status.completedIndexes and .status.failedIndexes. +const maxFailedIndexesForIndexedJob = 100_000 + const ( + completionsSoftLimit = 100_000 + parallelismLimitForHighCompletions = 10_000 + maxFailedIndexesLimitForHighCompletions = 10_000 + // maximum number of rules in pod failure policy maxPodFailurePolicyRules = 20 @@ -56,6 +65,7 @@ const ( var ( supportedPodFailurePolicyActions = sets.New( string(batch.PodFailurePolicyActionCount), + string(batch.PodFailurePolicyActionFailIndex), string(batch.PodFailurePolicyActionFailJob), string(batch.PodFailurePolicyActionIgnore)) @@ -182,6 +192,15 @@ func validateJobSpec(spec *batch.JobSpec, fldPath *field.Path, opts apivalidatio if spec.TTLSecondsAfterFinished != nil { allErrs = append(allErrs, apivalidation.ValidateNonnegativeField(int64(*spec.TTLSecondsAfterFinished), fldPath.Child("ttlSecondsAfterFinished"))...) } + if spec.BackoffLimitPerIndex != nil { + allErrs = append(allErrs, apivalidation.ValidateNonnegativeField(int64(*spec.BackoffLimitPerIndex), fldPath.Child("backoffLimitPerIndex"))...) + } + if spec.MaxFailedIndexes != nil { + allErrs = append(allErrs, apivalidation.ValidateNonnegativeField(int64(*spec.MaxFailedIndexes), fldPath.Child("maxFailedIndexes"))...) + if spec.BackoffLimitPerIndex == nil { + allErrs = append(allErrs, field.Required(fldPath.Child("backoffLimitPerIndex"), fmt.Sprintf("when maxFailedIndexes is specified"))) + } + } if spec.CompletionMode != nil { if *spec.CompletionMode != batch.NonIndexedCompletion && *spec.CompletionMode != batch.IndexedCompletion { allErrs = append(allErrs, field.NotSupported(fldPath.Child("completionMode"), spec.CompletionMode, []string{string(batch.NonIndexedCompletion), string(batch.IndexedCompletion)})) @@ -193,6 +212,31 @@ func validateJobSpec(spec *batch.JobSpec, fldPath *field.Path, opts apivalidatio if spec.Parallelism != nil && *spec.Parallelism > maxParallelismForIndexedJob { allErrs = append(allErrs, field.Invalid(fldPath.Child("parallelism"), *spec.Parallelism, fmt.Sprintf("must be less than or equal to %d when completion mode is %s", maxParallelismForIndexedJob, batch.IndexedCompletion))) } + if spec.Completions != nil && spec.MaxFailedIndexes != nil && *spec.MaxFailedIndexes > *spec.Completions { + allErrs = append(allErrs, field.Invalid(fldPath.Child("maxFailedIndexes"), *spec.MaxFailedIndexes, "must be less than or equal to completions")) + } + if spec.MaxFailedIndexes != nil && *spec.MaxFailedIndexes > maxFailedIndexesForIndexedJob { + allErrs = append(allErrs, field.Invalid(fldPath.Child("maxFailedIndexes"), *spec.MaxFailedIndexes, fmt.Sprintf("must be less than or equal to %d", maxFailedIndexesForIndexedJob))) + } + if spec.Completions != nil && *spec.Completions > completionsSoftLimit && spec.BackoffLimitPerIndex != nil { + if spec.MaxFailedIndexes == nil { + allErrs = append(allErrs, field.Required(fldPath.Child("maxFailedIndexes"), fmt.Sprintf("must be specified when completions is above %d", completionsSoftLimit))) + } + if spec.Parallelism != nil && *spec.Parallelism > parallelismLimitForHighCompletions { + allErrs = append(allErrs, field.Invalid(fldPath.Child("parallelism"), *spec.Parallelism, fmt.Sprintf("must be less than or equal to %d when completions are above %d and used with backoff limit per index", parallelismLimitForHighCompletions, completionsSoftLimit))) + } + if spec.MaxFailedIndexes != nil && *spec.MaxFailedIndexes > maxFailedIndexesLimitForHighCompletions { + allErrs = append(allErrs, field.Invalid(fldPath.Child("maxFailedIndexes"), *spec.MaxFailedIndexes, fmt.Sprintf("must be less than or equal to %d when completions are above %d and used with backoff limit per index", maxFailedIndexesLimitForHighCompletions, completionsSoftLimit))) + } + } + } + } + if spec.CompletionMode == nil || *spec.CompletionMode == batch.NonIndexedCompletion { + if spec.BackoffLimitPerIndex != nil { + allErrs = append(allErrs, field.Invalid(fldPath.Child("backoffLimitPerIndex"), *spec.BackoffLimitPerIndex, "requires indexed completion mode")) + } + if spec.MaxFailedIndexes != nil { + allErrs = append(allErrs, field.Invalid(fldPath.Child("maxFailedIndexes"), *spec.MaxFailedIndexes, "requires indexed completion mode")) } } @@ -232,16 +276,20 @@ func validatePodFailurePolicy(spec *batch.JobSpec, fldPath *field.Path) field.Er containerNames.Insert(containerSpec.Name) } for i, rule := range spec.PodFailurePolicy.Rules { - allErrs = append(allErrs, validatePodFailurePolicyRule(&rule, rulesPath.Index(i), containerNames)...) + allErrs = append(allErrs, validatePodFailurePolicyRule(spec, &rule, rulesPath.Index(i), containerNames)...) } return allErrs } -func validatePodFailurePolicyRule(rule *batch.PodFailurePolicyRule, rulePath *field.Path, containerNames sets.String) field.ErrorList { +func validatePodFailurePolicyRule(spec *batch.JobSpec, rule *batch.PodFailurePolicyRule, rulePath *field.Path, containerNames sets.String) field.ErrorList { var allErrs field.ErrorList actionPath := rulePath.Child("action") if rule.Action == "" { allErrs = append(allErrs, field.Required(actionPath, fmt.Sprintf("valid values: %q", sets.List(supportedPodFailurePolicyActions)))) + } else if rule.Action == batch.PodFailurePolicyActionFailIndex { + if spec.BackoffLimitPerIndex == nil { + allErrs = append(allErrs, field.Invalid(actionPath, rule.Action, "requires the backoffLimitPerIndex to be set")) + } } else if !supportedPodFailurePolicyActions.Has(string(rule.Action)) { allErrs = append(allErrs, field.NotSupported(actionPath, rule.Action, sets.List(supportedPodFailurePolicyActions))) } @@ -377,6 +425,7 @@ func ValidateJobSpecUpdate(spec, oldSpec batch.JobSpec, fldPath *field.Path, opt allErrs = append(allErrs, validatePodTemplateUpdate(spec, oldSpec, fldPath, opts)...) allErrs = append(allErrs, apivalidation.ValidateImmutableField(spec.CompletionMode, oldSpec.CompletionMode, fldPath.Child("completionMode"))...) allErrs = append(allErrs, apivalidation.ValidateImmutableField(spec.PodFailurePolicy, oldSpec.PodFailurePolicy, fldPath.Child("podFailurePolicy"))...) + allErrs = append(allErrs, apivalidation.ValidateImmutableField(spec.BackoffLimitPerIndex, oldSpec.BackoffLimitPerIndex, fldPath.Child("backoffLimitPerIndex"))...) return allErrs } diff --git a/pkg/apis/batch/validation/validation_test.go b/pkg/apis/batch/validation/validation_test.go index 4dd17cb9e33..7247089f167 100644 --- a/pkg/apis/batch/validation/validation_test.go +++ b/pkg/apis/batch/validation/validation_test.go @@ -160,6 +160,28 @@ func TestValidateJob(t *testing.T) { }, }, }, + "valid pod failure policy with FailIndex": { + job: batch.Job{ + ObjectMeta: validJobObjectMeta, + Spec: batch.JobSpec{ + CompletionMode: completionModePtr(batch.IndexedCompletion), + Completions: pointer.Int32(2), + BackoffLimitPerIndex: pointer.Int32(1), + Selector: validGeneratedSelector, + ManualSelector: pointer.Bool(true), + Template: validPodTemplateSpecForGeneratedRestartPolicyNever, + PodFailurePolicy: &batch.PodFailurePolicy{ + Rules: []batch.PodFailurePolicyRule{{ + Action: batch.PodFailurePolicyActionFailIndex, + OnExitCodes: &batch.PodFailurePolicyOnExitCodesRequirement{ + Operator: batch.PodFailurePolicyOnExitCodesOpIn, + Values: []int32{10}, + }, + }}, + }, + }, + }, + }, "valid manual selector": { opts: JobValidationOptions{RequirePrefixedLabels: true}, job: batch.Job{ @@ -236,6 +258,36 @@ func TestValidateJob(t *testing.T) { }, }, }, + "valid parallelism and maxFailedIndexes for high completions when backoffLimitPerIndex is used": { + job: batch.Job{ + ObjectMeta: validJobObjectMeta, + Spec: batch.JobSpec{ + Completions: pointer.Int32(100_000), + Parallelism: pointer.Int32(100_000), + MaxFailedIndexes: pointer.Int32(100_000), + BackoffLimitPerIndex: pointer.Int32(1), + CompletionMode: completionModePtr(batch.IndexedCompletion), + Selector: validGeneratedSelector, + Template: validPodTemplateSpecForGenerated, + }, + }, + opts: JobValidationOptions{RequirePrefixedLabels: true}, + }, + "valid parallelism and maxFailedIndexes for unlimited completions when backoffLimitPerIndex is used": { + job: batch.Job{ + ObjectMeta: validJobObjectMeta, + Spec: batch.JobSpec{ + Completions: pointer.Int32(1_000_000_000), + Parallelism: pointer.Int32(10_000), + MaxFailedIndexes: pointer.Int32(10_000), + BackoffLimitPerIndex: pointer.Int32(1), + CompletionMode: completionModePtr(batch.IndexedCompletion), + Selector: validGeneratedSelector, + Template: validPodTemplateSpecForGenerated, + }, + }, + opts: JobValidationOptions{RequirePrefixedLabels: true}, + }, "valid job tracking annotation": { opts: JobValidationOptions{ RequirePrefixedLabels: true, @@ -476,7 +528,7 @@ func TestValidateJob(t *testing.T) { }, opts: JobValidationOptions{RequirePrefixedLabels: true}, }, - `spec.podFailurePolicy.rules[0].action: Required value: valid values: ["Count" "FailJob" "Ignore"]`: { + `spec.podFailurePolicy.rules[0].action: Required value: valid values: ["Count" "FailIndex" "FailJob" "Ignore"]`: { job: batch.Job{ ObjectMeta: validJobObjectMeta, Spec: batch.JobSpec{ @@ -584,7 +636,7 @@ func TestValidateJob(t *testing.T) { }, opts: JobValidationOptions{RequirePrefixedLabels: true}, }, - `spec.podFailurePolicy.rules[0].action: Unsupported value: "UnknownAction": supported values: "Count", "FailJob", "Ignore"`: { + `spec.podFailurePolicy.rules[0].action: Unsupported value: "UnknownAction": supported values: "Count", "FailIndex", "FailJob", "Ignore"`: { job: batch.Job{ ObjectMeta: validJobObjectMeta, Spec: batch.JobSpec{ @@ -749,6 +801,124 @@ func TestValidateJob(t *testing.T) { }, opts: JobValidationOptions{RequirePrefixedLabels: true}, }, + "spec.backoffLimitPerIndex: Invalid value: 1: requires indexed completion mode": { + job: batch.Job{ + ObjectMeta: validJobObjectMeta, + Spec: batch.JobSpec{ + BackoffLimitPerIndex: pointer.Int32(1), + Selector: validGeneratedSelector, + Template: validPodTemplateSpecForGenerated, + }, + }, + opts: JobValidationOptions{RequirePrefixedLabels: true}, + }, + "spec.backoffLimitPerIndex:must be greater than or equal to 0": { + job: batch.Job{ + ObjectMeta: validJobObjectMeta, + Spec: batch.JobSpec{ + BackoffLimitPerIndex: pointer.Int32(-1), + CompletionMode: completionModePtr(batch.IndexedCompletion), + Selector: validGeneratedSelector, + Template: validPodTemplateSpecForGenerated, + }, + }, + opts: JobValidationOptions{RequirePrefixedLabels: true}, + }, + "spec.maxFailedIndexes: Invalid value: 11: must be less than or equal to completions": { + job: batch.Job{ + ObjectMeta: validJobObjectMeta, + Spec: batch.JobSpec{ + Completions: pointer.Int32(10), + MaxFailedIndexes: pointer.Int32(11), + BackoffLimitPerIndex: pointer.Int32(1), + CompletionMode: completionModePtr(batch.IndexedCompletion), + Selector: validGeneratedSelector, + Template: validPodTemplateSpecForGenerated, + }, + }, + opts: JobValidationOptions{RequirePrefixedLabels: true}, + }, + "spec.maxFailedIndexes: Required value: must be specified when completions is above 100000": { + job: batch.Job{ + ObjectMeta: validJobObjectMeta, + Spec: batch.JobSpec{ + Completions: pointer.Int32(100_001), + BackoffLimitPerIndex: pointer.Int32(1), + CompletionMode: completionModePtr(batch.IndexedCompletion), + Selector: validGeneratedSelector, + Template: validPodTemplateSpecForGenerated, + }, + }, + opts: JobValidationOptions{RequirePrefixedLabels: true}, + }, + "spec.parallelism: Invalid value: 50000: must be less than or equal to 10000 when completions are above 100000 and used with backoff limit per index": { + job: batch.Job{ + ObjectMeta: validJobObjectMeta, + Spec: batch.JobSpec{ + Completions: pointer.Int32(100_001), + Parallelism: pointer.Int32(50_000), + BackoffLimitPerIndex: pointer.Int32(1), + MaxFailedIndexes: pointer.Int32(1), + CompletionMode: completionModePtr(batch.IndexedCompletion), + Selector: validGeneratedSelector, + Template: validPodTemplateSpecForGenerated, + }, + }, + opts: JobValidationOptions{RequirePrefixedLabels: true}, + }, + "spec.maxFailedIndexes: Invalid value: 100001: must be less than or equal to 100000": { + job: batch.Job{ + ObjectMeta: validJobObjectMeta, + Spec: batch.JobSpec{ + Completions: pointer.Int32(100_001), + BackoffLimitPerIndex: pointer.Int32(1), + MaxFailedIndexes: pointer.Int32(100_001), + CompletionMode: completionModePtr(batch.IndexedCompletion), + Selector: validGeneratedSelector, + Template: validPodTemplateSpecForGenerated, + }, + }, + opts: JobValidationOptions{RequirePrefixedLabels: true}, + }, + "spec.maxFailedIndexes: Invalid value: 50000: must be less than or equal to 10000 when completions are above 100000 and used with backoff limit per index": { + job: batch.Job{ + ObjectMeta: validJobObjectMeta, + Spec: batch.JobSpec{ + Completions: pointer.Int32(100_001), + BackoffLimitPerIndex: pointer.Int32(1), + MaxFailedIndexes: pointer.Int32(50_000), + CompletionMode: completionModePtr(batch.IndexedCompletion), + Selector: validGeneratedSelector, + Template: validPodTemplateSpecForGenerated, + }, + }, + opts: JobValidationOptions{RequirePrefixedLabels: true}, + }, + "spec.maxFailedIndexes:must be greater than or equal to 0": { + job: batch.Job{ + ObjectMeta: validJobObjectMeta, + Spec: batch.JobSpec{ + BackoffLimitPerIndex: pointer.Int32(1), + MaxFailedIndexes: pointer.Int32(-1), + CompletionMode: completionModePtr(batch.IndexedCompletion), + Selector: validGeneratedSelector, + Template: validPodTemplateSpecForGenerated, + }, + }, + opts: JobValidationOptions{RequirePrefixedLabels: true}, + }, + "spec.backoffLimitPerIndex: Required value: when maxFailedIndexes is specified": { + job: batch.Job{ + ObjectMeta: validJobObjectMeta, + Spec: batch.JobSpec{ + MaxFailedIndexes: pointer.Int32(1), + CompletionMode: completionModePtr(batch.IndexedCompletion), + Selector: validGeneratedSelector, + Template: validPodTemplateSpecForGenerated, + }, + }, + opts: JobValidationOptions{RequirePrefixedLabels: true}, + }, "spec.completions:must be greater than or equal to 0": { job: batch.Job{ ObjectMeta: metav1.ObjectMeta{ @@ -1243,12 +1413,117 @@ func TestValidateJobUpdate(t *testing.T) { Field: "spec.podFailurePolicy", }, }, + "set backoff limit per index": { + old: batch.Job{ + ObjectMeta: metav1.ObjectMeta{Name: "abc", Namespace: metav1.NamespaceDefault}, + Spec: batch.JobSpec{ + Selector: validGeneratedSelector, + Template: validPodTemplateSpecForGeneratedRestartPolicyNever, + Completions: pointer.Int32(3), + CompletionMode: completionModePtr(batch.IndexedCompletion), + }, + }, + update: func(job *batch.Job) { + job.Spec.BackoffLimitPerIndex = pointer.Int32(1) + }, + err: &field.Error{ + Type: field.ErrorTypeInvalid, + Field: "spec.backoffLimitPerIndex", + }, + }, + "unset backoff limit per index": { + old: batch.Job{ + ObjectMeta: metav1.ObjectMeta{Name: "abc", Namespace: metav1.NamespaceDefault}, + Spec: batch.JobSpec{ + Selector: validGeneratedSelector, + Template: validPodTemplateSpecForGeneratedRestartPolicyNever, + Completions: pointer.Int32(3), + CompletionMode: completionModePtr(batch.IndexedCompletion), + BackoffLimitPerIndex: pointer.Int32(1), + }, + }, + update: func(job *batch.Job) { + job.Spec.BackoffLimitPerIndex = nil + }, + err: &field.Error{ + Type: field.ErrorTypeInvalid, + Field: "spec.backoffLimitPerIndex", + }, + }, + "update backoff limit per index": { + old: batch.Job{ + ObjectMeta: metav1.ObjectMeta{Name: "abc", Namespace: metav1.NamespaceDefault}, + Spec: batch.JobSpec{ + Selector: validGeneratedSelector, + Template: validPodTemplateSpecForGeneratedRestartPolicyNever, + Completions: pointer.Int32(3), + CompletionMode: completionModePtr(batch.IndexedCompletion), + BackoffLimitPerIndex: pointer.Int32(1), + }, + }, + update: func(job *batch.Job) { + job.Spec.BackoffLimitPerIndex = pointer.Int32(2) + }, + err: &field.Error{ + Type: field.ErrorTypeInvalid, + Field: "spec.backoffLimitPerIndex", + }, + }, + "set max failed indexes": { + old: batch.Job{ + ObjectMeta: metav1.ObjectMeta{Name: "abc", Namespace: metav1.NamespaceDefault}, + Spec: batch.JobSpec{ + Selector: validGeneratedSelector, + Template: validPodTemplateSpecForGeneratedRestartPolicyNever, + Completions: pointer.Int32(3), + CompletionMode: completionModePtr(batch.IndexedCompletion), + BackoffLimitPerIndex: pointer.Int32(1), + }, + }, + update: func(job *batch.Job) { + job.Spec.MaxFailedIndexes = pointer.Int32(1) + }, + }, + "unset max failed indexes": { + old: batch.Job{ + ObjectMeta: metav1.ObjectMeta{Name: "abc", Namespace: metav1.NamespaceDefault}, + Spec: batch.JobSpec{ + Selector: validGeneratedSelector, + Template: validPodTemplateSpecForGeneratedRestartPolicyNever, + Completions: pointer.Int32(3), + CompletionMode: completionModePtr(batch.IndexedCompletion), + BackoffLimitPerIndex: pointer.Int32(1), + MaxFailedIndexes: pointer.Int32(1), + }, + }, + update: func(job *batch.Job) { + job.Spec.MaxFailedIndexes = nil + }, + }, + "update max failed indexes": { + old: batch.Job{ + ObjectMeta: metav1.ObjectMeta{Name: "abc", Namespace: metav1.NamespaceDefault}, + Spec: batch.JobSpec{ + Selector: validGeneratedSelector, + Template: validPodTemplateSpecForGeneratedRestartPolicyNever, + Completions: pointer.Int32(3), + CompletionMode: completionModePtr(batch.IndexedCompletion), + BackoffLimitPerIndex: pointer.Int32(1), + MaxFailedIndexes: pointer.Int32(1), + }, + }, + update: func(job *batch.Job) { + job.Spec.MaxFailedIndexes = pointer.Int32(2) + }, + }, "immutable pod template": { old: batch.Job{ ObjectMeta: metav1.ObjectMeta{Name: "abc", Namespace: metav1.NamespaceDefault}, Spec: batch.JobSpec{ - Selector: validGeneratedSelector, - Template: validPodTemplateSpecForGenerated, + Selector: validGeneratedSelector, + Template: validPodTemplateSpecForGenerated, + Completions: pointer.Int32(3), + CompletionMode: completionModePtr(batch.IndexedCompletion), }, }, update: func(job *batch.Job) { diff --git a/pkg/apis/batch/zz_generated.deepcopy.go b/pkg/apis/batch/zz_generated.deepcopy.go index 015128250e5..98e32b65370 100644 --- a/pkg/apis/batch/zz_generated.deepcopy.go +++ b/pkg/apis/batch/zz_generated.deepcopy.go @@ -267,6 +267,16 @@ func (in *JobSpec) DeepCopyInto(out *JobSpec) { *out = new(int32) **out = **in } + if in.BackoffLimitPerIndex != nil { + in, out := &in.BackoffLimitPerIndex, &out.BackoffLimitPerIndex + *out = new(int32) + **out = **in + } + if in.MaxFailedIndexes != nil { + in, out := &in.MaxFailedIndexes, &out.MaxFailedIndexes + *out = new(int32) + **out = **in + } if in.Selector != nil { in, out := &in.Selector, &out.Selector *out = new(v1.LabelSelector) @@ -329,6 +339,11 @@ func (in *JobStatus) DeepCopyInto(out *JobStatus) { *out = new(int32) **out = **in } + if in.FailedIndexes != nil { + in, out := &in.FailedIndexes, &out.FailedIndexes + *out = new(string) + **out = **in + } if in.UncountedTerminatedPods != nil { in, out := &in.UncountedTerminatedPods, &out.UncountedTerminatedPods *out = new(UncountedTerminatedPods) diff --git a/pkg/features/kube_features.go b/pkg/features/kube_features.go index 43ee081674c..65612fc279e 100644 --- a/pkg/features/kube_features.go +++ b/pkg/features/kube_features.go @@ -381,6 +381,13 @@ const ( // Causes kubelet to no longer create legacy IPTables rules IPTablesOwnershipCleanup featuregate.Feature = "IPTablesOwnershipCleanup" + // owner: @mimowo + // kep: https://kep.k8s.io/3850 + // alpha: v1.28 + // + // Allows users to specify counting of failed pods per index. + JobBackoffLimitPerIndex featuregate.Feature = "JobBackoffLimitPerIndex" + // owner: @ahg // beta: v1.23 // stable: v1.27 @@ -1019,6 +1026,8 @@ var defaultKubernetesFeatureGates = map[featuregate.Feature]featuregate.FeatureS IPTablesOwnershipCleanup: {Default: true, PreRelease: featuregate.GA, LockToDefault: true}, // remove in 1.30 + JobBackoffLimitPerIndex: {Default: false, PreRelease: featuregate.Alpha}, + JobMutableNodeSchedulingDirectives: {Default: true, PreRelease: featuregate.GA, LockToDefault: true}, // remove in 1.29 JobPodFailurePolicy: {Default: true, PreRelease: featuregate.Beta}, diff --git a/pkg/generated/openapi/zz_generated.openapi.go b/pkg/generated/openapi/zz_generated.openapi.go index 011f96b093c..68fd2768464 100644 --- a/pkg/generated/openapi/zz_generated.openapi.go +++ b/pkg/generated/openapi/zz_generated.openapi.go @@ -14684,6 +14684,20 @@ func schema_k8sio_api_batch_v1_JobSpec(ref common.ReferenceCallback) common.Open Format: "int32", }, }, + "backoffLimitPerIndex": { + SchemaProps: spec.SchemaProps{ + Description: "Specifies the limit for the number of retries within an index before marking this index as failed. When enabled the number of failures per index is kept in the pod's batch.kubernetes.io/job-index-failure-count annotation. It can only be set when Job's completionMode=Indexed, and the Pod's restart policy is Never. The field is immutable. This field is alpha-level. It can be used when the `JobBackoffLimitPerIndex` feature gate is enabled (disabled by default).", + Type: []string{"integer"}, + Format: "int32", + }, + }, + "maxFailedIndexes": { + SchemaProps: spec.SchemaProps{ + Description: "Specifies the maximal number of failed indexes before marking the Job as failed, when backoffLimitPerIndex is set. Once the number of failed indexes exceeds this number the entire Job is marked as Failed and its execution is terminated. When left as null the job continues execution of all of its indexes and is marked with the `Complete` Job condition. It can only be specified when backoffLimitPerIndex is set. It can be null or up to completions. It is required and must be less than or equal to 10^4 when is completions greater than 10^5. This field is alpha-level. It can be used when the `JobBackoffLimitPerIndex` feature gate is enabled (disabled by default).", + Type: []string{"integer"}, + Format: "int32", + }, + }, "selector": { SchemaProps: spec.SchemaProps{ Description: "A label query over pods that should match the pod count. Normally, the system sets this field for you. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/#label-selectors", @@ -14803,6 +14817,13 @@ func schema_k8sio_api_batch_v1_JobStatus(ref common.ReferenceCallback) common.Op Format: "", }, }, + "failedIndexes": { + SchemaProps: spec.SchemaProps{ + Description: "FailedIndexes holds the failed indexes when backoffLimitPerIndex=true. The indexes are represented in the text format analogous as for the `completedIndexes` field, ie. they are kept as decimal integers separated by commas. The numbers are listed in increasing order. Three or more consecutive numbers are compressed and represented by the first and last element of the series, separated by a hyphen. For example, if the failed indexes are 1, 3, 4, 5 and 7, they are represented as \"1,3-5,7\". This field is alpha-level. It can be used when the `JobBackoffLimitPerIndex` feature gate is enabled (disabled by default).", + Type: []string{"string"}, + Format: "", + }, + }, "uncountedTerminatedPods": { SchemaProps: spec.SchemaProps{ Description: "uncountedTerminatedPods holds the UIDs of Pods that have terminated but the job controller hasn't yet accounted for in the status counters.\n\nThe job controller creates pods with a finalizer. When a pod terminates (succeeded or failed), the controller does three steps to account for it in the job status:\n\n1. Add the pod UID to the arrays in this field. 2. Remove the pod finalizer. 3. Remove the pod UID from the arrays while increasing the corresponding\n counter.\n\nOld jobs might not be tracked using this field, in which case the field remains null.", @@ -14977,11 +14998,11 @@ func schema_k8sio_api_batch_v1_PodFailurePolicyRule(ref common.ReferenceCallback Properties: map[string]spec.Schema{ "action": { SchemaProps: spec.SchemaProps{ - Description: "Specifies the action taken on a pod failure when the requirements are satisfied. Possible values are:\n\n- FailJob: indicates that the pod's job is marked as Failed and all\n running pods are terminated.\n- Ignore: indicates that the counter towards the .backoffLimit is not\n incremented and a replacement pod is created.\n- Count: indicates that the pod is handled in the default way - the\n counter towards the .backoffLimit is incremented.\nAdditional values are considered to be added in the future. Clients should react to an unknown action by skipping the rule.\n\nPossible enum values:\n - `\"Count\"` This is an action which might be taken on a pod failure - the pod failure is handled in the default way - the counter towards .backoffLimit, represented by the job's .status.failed field, is incremented.\n - `\"FailJob\"` This is an action which might be taken on a pod failure - mark the pod's job as Failed and terminate all running pods.\n - `\"Ignore\"` This is an action which might be taken on a pod failure - the counter towards .backoffLimit, represented by the job's .status.failed field, is not incremented and a replacement pod is created.", + Description: "Specifies the action taken on a pod failure when the requirements are satisfied. Possible values are:\n\n- FailJob: indicates that the pod's job is marked as Failed and all\n running pods are terminated.\n- FailIndex: indicates that the pod's index is marked as Failed and will\n not be restarted.\n This value is alpha-level. It can be used when the\n `JobBackoffLimitPerIndex` feature gate is enabled (disabled by default).\n- Ignore: indicates that the counter towards the .backoffLimit is not\n incremented and a replacement pod is created.\n- Count: indicates that the pod is handled in the default way - the\n counter towards the .backoffLimit is incremented.\nAdditional values are considered to be added in the future. Clients should react to an unknown action by skipping the rule.\n\nPossible enum values:\n - `\"Count\"` This is an action which might be taken on a pod failure - the pod failure is handled in the default way - the counter towards .backoffLimit, represented by the job's .status.failed field, is incremented.\n - `\"FailIndex\"` This is an action which might be taken on a pod failure - mark the Job's index as failed to avoid restarts within this index. This action can only be used when backoffLimitPerIndex is set.\n - `\"FailJob\"` This is an action which might be taken on a pod failure - mark the pod's job as Failed and terminate all running pods.\n - `\"Ignore\"` This is an action which might be taken on a pod failure - the counter towards .backoffLimit, represented by the job's .status.failed field, is not incremented and a replacement pod is created.", Default: "", Type: []string{"string"}, Format: "", - Enum: []interface{}{"Count", "FailJob", "Ignore"}, + Enum: []interface{}{"Count", "FailIndex", "FailJob", "Ignore"}, }, }, "onExitCodes": { diff --git a/pkg/registry/batch/job/strategy.go b/pkg/registry/batch/job/strategy.go index 918e21c3622..fef6fd29278 100644 --- a/pkg/registry/batch/job/strategy.go +++ b/pkg/registry/batch/job/strategy.go @@ -100,6 +100,23 @@ func (jobStrategy) PrepareForCreate(ctx context.Context, obj runtime.Object) { job.Spec.PodFailurePolicy = nil } + if !utilfeature.DefaultFeatureGate.Enabled(features.JobBackoffLimitPerIndex) { + job.Spec.BackoffLimitPerIndex = nil + job.Spec.MaxFailedIndexes = nil + if job.Spec.PodFailurePolicy != nil { + // We drop the FailIndex pod failure policy rules because + // JobBackoffLimitPerIndex is disabled. + index := 0 + for _, rule := range job.Spec.PodFailurePolicy.Rules { + if rule.Action != batch.PodFailurePolicyActionFailIndex { + job.Spec.PodFailurePolicy.Rules[index] = rule + index++ + } + } + job.Spec.PodFailurePolicy.Rules = job.Spec.PodFailurePolicy.Rules[:index] + } + } + pod.DropDisabledTemplateFields(&job.Spec.Template, nil) } @@ -113,6 +130,20 @@ func (jobStrategy) PrepareForUpdate(ctx context.Context, obj, old runtime.Object newJob.Spec.PodFailurePolicy = nil } + if !utilfeature.DefaultFeatureGate.Enabled(features.JobBackoffLimitPerIndex) { + if oldJob.Spec.BackoffLimitPerIndex == nil { + newJob.Spec.BackoffLimitPerIndex = nil + } + if oldJob.Spec.MaxFailedIndexes == nil { + newJob.Spec.MaxFailedIndexes = nil + } + // We keep pod failure policy rules with FailIndex actions (is any), + // since the pod failure policy is immutable. Note that, if the old job + // had BackoffLimitPerIndex set, the new Job will also have it, so the + // validation of the pod failure policy with FailIndex rules will + // continue to pass. + } + pod.DropDisabledTemplateFields(&newJob.Spec.Template, &oldJob.Spec.Template) // Any changes to the spec increment the generation number. diff --git a/pkg/registry/batch/job/strategy_test.go b/pkg/registry/batch/job/strategy_test.go index 22c7bdee8ec..938181c4340 100644 --- a/pkg/registry/batch/job/strategy_test.go +++ b/pkg/registry/batch/job/strategy_test.go @@ -70,11 +70,72 @@ func TestJobStrategy_PrepareForUpdate(t *testing.T) { } cases := map[string]struct { - enableJobPodFailurePolicy bool - job batch.Job - updatedJob batch.Job - wantJob batch.Job + enableJobPodFailurePolicy bool + enableJobBackoffLimitPerIndex bool + job batch.Job + updatedJob batch.Job + wantJob batch.Job }{ + "update job with a new field; updated when JobBackoffLimitPerIndex enabled": { + enableJobBackoffLimitPerIndex: true, + job: batch.Job{ + ObjectMeta: getValidObjectMeta(0), + Spec: batch.JobSpec{ + Selector: validSelector, + Template: validPodTemplateSpec, + BackoffLimitPerIndex: nil, + MaxFailedIndexes: nil, + }, + }, + updatedJob: batch.Job{ + ObjectMeta: getValidObjectMeta(0), + Spec: batch.JobSpec{ + Selector: validSelector, + Template: validPodTemplateSpec, + BackoffLimitPerIndex: pointer.Int32(1), + MaxFailedIndexes: pointer.Int32(1), + }, + }, + wantJob: batch.Job{ + ObjectMeta: getValidObjectMeta(1), + Spec: batch.JobSpec{ + Selector: validSelector, + Template: validPodTemplateSpec, + BackoffLimitPerIndex: pointer.Int32(1), + MaxFailedIndexes: pointer.Int32(1), + }, + }, + }, + "update job with a new field; not updated when JobBackoffLimitPerIndex disabled": { + enableJobBackoffLimitPerIndex: false, + job: batch.Job{ + ObjectMeta: getValidObjectMeta(0), + Spec: batch.JobSpec{ + Selector: validSelector, + Template: validPodTemplateSpec, + BackoffLimitPerIndex: nil, + MaxFailedIndexes: nil, + }, + }, + updatedJob: batch.Job{ + ObjectMeta: getValidObjectMeta(0), + Spec: batch.JobSpec{ + Selector: validSelector, + Template: validPodTemplateSpec, + BackoffLimitPerIndex: pointer.Int32(1), + MaxFailedIndexes: pointer.Int32(1), + }, + }, + wantJob: batch.Job{ + ObjectMeta: getValidObjectMeta(0), + Spec: batch.JobSpec{ + Selector: validSelector, + Template: validPodTemplateSpec, + BackoffLimitPerIndex: nil, + MaxFailedIndexes: nil, + }, + }, + }, "update job with a new field; updated when JobPodFailurePolicy enabled": { enableJobPodFailurePolicy: true, job: batch.Job{ @@ -327,6 +388,7 @@ func TestJobStrategy_PrepareForUpdate(t *testing.T) { for name, tc := range cases { t.Run(name, func(t *testing.T) { defer featuregatetesting.SetFeatureGateDuringTest(t, utilfeature.DefaultFeatureGate, features.JobPodFailurePolicy, tc.enableJobPodFailurePolicy)() + defer featuregatetesting.SetFeatureGateDuringTest(t, utilfeature.DefaultFeatureGate, features.JobBackoffLimitPerIndex, tc.enableJobBackoffLimitPerIndex)() ctx := genericapirequest.NewDefaultContext() Strategy.PrepareForUpdate(ctx, &tc.updatedJob, &tc.job) @@ -357,10 +419,53 @@ func TestJobStrategy_PrepareForCreate(t *testing.T) { } cases := map[string]struct { - enableJobPodFailurePolicy bool - job batch.Job - wantJob batch.Job + enableJobPodFailurePolicy bool + enableJobBackoffLimitPerIndex bool + job batch.Job + wantJob batch.Job }{ + "create job with a new fields; JobBackoffLimitPerIndex enabled": { + enableJobBackoffLimitPerIndex: true, + job: batch.Job{ + ObjectMeta: getValidObjectMeta(0), + Spec: batch.JobSpec{ + Selector: validSelector, + Template: validPodTemplateSpec, + BackoffLimitPerIndex: pointer.Int32(1), + MaxFailedIndexes: pointer.Int32(1), + }, + }, + wantJob: batch.Job{ + ObjectMeta: getValidObjectMeta(1), + Spec: batch.JobSpec{ + Selector: validSelector, + Template: validPodTemplateSpec, + BackoffLimitPerIndex: pointer.Int32(1), + MaxFailedIndexes: pointer.Int32(1), + }, + }, + }, + "create job with a new fields; JobBackoffLimitPerIndex disabled": { + enableJobBackoffLimitPerIndex: false, + job: batch.Job{ + ObjectMeta: getValidObjectMeta(0), + Spec: batch.JobSpec{ + Selector: validSelector, + Template: validPodTemplateSpec, + BackoffLimitPerIndex: pointer.Int32(1), + MaxFailedIndexes: pointer.Int32(1), + }, + }, + wantJob: batch.Job{ + ObjectMeta: getValidObjectMeta(1), + Spec: batch.JobSpec{ + Selector: validSelector, + Template: validPodTemplateSpec, + BackoffLimitPerIndex: nil, + MaxFailedIndexes: nil, + }, + }, + }, "create job with a new field; JobPodFailurePolicy enabled": { enableJobPodFailurePolicy: true, job: batch.Job{ @@ -418,11 +523,107 @@ func TestJobStrategy_PrepareForCreate(t *testing.T) { }, }, }, + "create job with pod failure policy using FailIndex action; JobPodFailurePolicy enabled, JobBackoffLimitPerIndex disabled": { + enableJobBackoffLimitPerIndex: false, + enableJobPodFailurePolicy: true, + job: batch.Job{ + ObjectMeta: getValidObjectMeta(0), + Spec: batch.JobSpec{ + Selector: validSelector, + Template: validPodTemplateSpec, + BackoffLimitPerIndex: pointer.Int32(1), + PodFailurePolicy: &batch.PodFailurePolicy{ + Rules: []batch.PodFailurePolicyRule{ + { + Action: batch.PodFailurePolicyActionFailIndex, + OnExitCodes: &batch.PodFailurePolicyOnExitCodesRequirement{ + Operator: batch.PodFailurePolicyOnExitCodesOpIn, + Values: []int32{1}, + }, + }, + }, + }, + }, + }, + wantJob: batch.Job{ + ObjectMeta: getValidObjectMeta(1), + Spec: batch.JobSpec{ + Selector: validSelector, + Template: validPodTemplateSpec, + PodFailurePolicy: &batch.PodFailurePolicy{ + Rules: []batch.PodFailurePolicyRule{}, + }, + }, + }, + }, + "create job with multiple pod failure policy rules, some using FailIndex action; JobPodFailurePolicy enabled, JobBackoffLimitPerIndex disabled": { + enableJobBackoffLimitPerIndex: false, + enableJobPodFailurePolicy: true, + job: batch.Job{ + ObjectMeta: getValidObjectMeta(0), + Spec: batch.JobSpec{ + Selector: validSelector, + Template: validPodTemplateSpec, + BackoffLimitPerIndex: pointer.Int32(1), + PodFailurePolicy: &batch.PodFailurePolicy{ + Rules: []batch.PodFailurePolicyRule{ + { + Action: batch.PodFailurePolicyActionFailJob, + OnExitCodes: &batch.PodFailurePolicyOnExitCodesRequirement{ + Operator: batch.PodFailurePolicyOnExitCodesOpIn, + Values: []int32{2}, + }, + }, + { + Action: batch.PodFailurePolicyActionFailIndex, + OnExitCodes: &batch.PodFailurePolicyOnExitCodesRequirement{ + Operator: batch.PodFailurePolicyOnExitCodesOpIn, + Values: []int32{1}, + }, + }, + { + Action: batch.PodFailurePolicyActionIgnore, + OnExitCodes: &batch.PodFailurePolicyOnExitCodesRequirement{ + Operator: batch.PodFailurePolicyOnExitCodesOpIn, + Values: []int32{13}, + }, + }, + }, + }, + }, + }, + wantJob: batch.Job{ + ObjectMeta: getValidObjectMeta(1), + Spec: batch.JobSpec{ + Selector: validSelector, + Template: validPodTemplateSpec, + PodFailurePolicy: &batch.PodFailurePolicy{ + Rules: []batch.PodFailurePolicyRule{ + { + Action: batch.PodFailurePolicyActionFailJob, + OnExitCodes: &batch.PodFailurePolicyOnExitCodesRequirement{ + Operator: batch.PodFailurePolicyOnExitCodesOpIn, + Values: []int32{2}, + }, + }, + { + Action: batch.PodFailurePolicyActionIgnore, + OnExitCodes: &batch.PodFailurePolicyOnExitCodesRequirement{ + Operator: batch.PodFailurePolicyOnExitCodesOpIn, + Values: []int32{13}, + }, + }, + }, + }, + }, + }, + }, } for name, tc := range cases { t.Run(name, func(t *testing.T) { defer featuregatetesting.SetFeatureGateDuringTest(t, utilfeature.DefaultFeatureGate, features.JobPodFailurePolicy, tc.enableJobPodFailurePolicy)() + defer featuregatetesting.SetFeatureGateDuringTest(t, utilfeature.DefaultFeatureGate, features.JobBackoffLimitPerIndex, tc.enableJobBackoffLimitPerIndex)() ctx := genericapirequest.NewDefaultContext() Strategy.PrepareForCreate(ctx, &tc.job) @@ -469,11 +670,15 @@ func TestJobStrategy_ValidateUpdate(t *testing.T) { Containers: []api.Container{{Name: "abc", Image: "image", ImagePullPolicy: "IfNotPresent", TerminationMessagePolicy: api.TerminationMessageReadFile}}, }, } + validPodTemplateSpecNever := *validPodTemplateSpec.DeepCopy() + validPodTemplateSpecNever.Spec.RestartPolicy = api.RestartPolicyNever now := metav1.Now() cases := map[string]struct { - job *batch.Job - update func(*batch.Job) - wantErrs field.ErrorList + enableJobPodFailurePolicy bool + enableJobBackoffLimitPerIndex bool + job *batch.Job + update func(*batch.Job) + wantErrs field.ErrorList }{ "update parallelism": { job: &batch.Job{ @@ -707,9 +912,45 @@ func TestJobStrategy_ValidateUpdate(t *testing.T) { job.Annotations["hello"] = "world" }, }, + "old job is using FailIndex JobBackoffLimitPerIndex is disabled, but FailIndex was already used": { + enableJobPodFailurePolicy: true, + enableJobBackoffLimitPerIndex: false, + job: &batch.Job{ + ObjectMeta: metav1.ObjectMeta{ + Name: "myjob", + Namespace: metav1.NamespaceDefault, + ResourceVersion: "0", + Annotations: map[string]string{"foo": "bar"}, + }, + Spec: batch.JobSpec{ + CompletionMode: completionModePtr(batch.IndexedCompletion), + Completions: pointer.Int32(2), + BackoffLimitPerIndex: pointer.Int32(1), + Selector: validSelector, + ManualSelector: pointer.Bool(true), + Template: validPodTemplateSpecNever, + PodFailurePolicy: &batch.PodFailurePolicy{ + Rules: []batch.PodFailurePolicyRule{ + { + Action: batch.PodFailurePolicyActionFailIndex, + OnExitCodes: &batch.PodFailurePolicyOnExitCodesRequirement{ + Operator: batch.PodFailurePolicyOnExitCodesOpIn, + Values: []int32{1}, + }, + }, + }, + }, + }, + }, + update: func(job *batch.Job) { + job.Annotations["hello"] = "world" + }, + }, } for name, tc := range cases { t.Run(name, func(t *testing.T) { + defer featuregatetesting.SetFeatureGateDuringTest(t, utilfeature.DefaultFeatureGate, features.JobPodFailurePolicy, tc.enableJobPodFailurePolicy)() + defer featuregatetesting.SetFeatureGateDuringTest(t, utilfeature.DefaultFeatureGate, features.JobBackoffLimitPerIndex, tc.enableJobBackoffLimitPerIndex)() newJob := tc.job.DeepCopy() tc.update(newJob) errs := Strategy.ValidateUpdate(ctx, newJob, tc.job) @@ -965,15 +1206,19 @@ func TestJobStrategy_Validate(t *testing.T) { DNSPolicy: api.DNSClusterFirst, Containers: []api.Container{{Name: "abc", Image: "image", ImagePullPolicy: "IfNotPresent", TerminationMessagePolicy: api.TerminationMessageReadFile}}, } + validPodSpecNever := *validPodSpec.DeepCopy() + validPodSpecNever.RestartPolicy = api.RestartPolicyNever validObjectMeta := metav1.ObjectMeta{ Name: "myjob2", Namespace: metav1.NamespaceDefault, UID: theUID, } testcases := map[string]struct { - job *batch.Job - wantJob *batch.Job - wantWarningCount int32 + enableJobPodFailurePolicy bool + enableJobBackoffLimitPerIndex bool + job *batch.Job + wantJob *batch.Job + wantWarningCount int32 }{ "valid job with labels in pod template": { job: &batch.Job{ @@ -1145,12 +1390,178 @@ func TestJobStrategy_Validate(t *testing.T) { }, wantWarningCount: 1, }, + "FailIndex action; when JobBackoffLimitPerIndex is disabled - validation error": { + enableJobPodFailurePolicy: true, + enableJobBackoffLimitPerIndex: false, + job: &batch.Job{ + ObjectMeta: validObjectMeta, + Spec: batch.JobSpec{ + Selector: validSelector, + ManualSelector: pointer.Bool(true), + Template: api.PodTemplateSpec{ + ObjectMeta: metav1.ObjectMeta{ + Labels: validSelector.MatchLabels, + }, + Spec: validPodSpecNever, + }, + PodFailurePolicy: &batch.PodFailurePolicy{ + Rules: []batch.PodFailurePolicyRule{ + { + Action: batch.PodFailurePolicyActionFailIndex, + OnExitCodes: &batch.PodFailurePolicyOnExitCodesRequirement{ + Operator: batch.PodFailurePolicyOnExitCodesOpIn, + Values: []int32{1}, + }, + }, + }, + }, + }, + }, + wantJob: &batch.Job{ + ObjectMeta: validObjectMeta, + Spec: batch.JobSpec{ + Selector: validSelector, + ManualSelector: pointer.Bool(true), + Template: api.PodTemplateSpec{ + ObjectMeta: metav1.ObjectMeta{ + Labels: validSelector.MatchLabels, + }, + Spec: validPodSpecNever, + }, + PodFailurePolicy: &batch.PodFailurePolicy{ + Rules: []batch.PodFailurePolicyRule{ + { + Action: batch.PodFailurePolicyActionFailIndex, + OnExitCodes: &batch.PodFailurePolicyOnExitCodesRequirement{ + Operator: batch.PodFailurePolicyOnExitCodesOpIn, + Values: []int32{1}, + }, + }, + }, + }, + }, + }, + wantWarningCount: 1, + }, + "FailIndex action; when JobBackoffLimitPerIndex is enabled, but not used - validation error": { + enableJobPodFailurePolicy: true, + enableJobBackoffLimitPerIndex: true, + job: &batch.Job{ + ObjectMeta: validObjectMeta, + Spec: batch.JobSpec{ + Selector: validSelector, + ManualSelector: pointer.Bool(true), + Template: api.PodTemplateSpec{ + ObjectMeta: metav1.ObjectMeta{ + Labels: validSelector.MatchLabels, + }, + Spec: validPodSpecNever, + }, + PodFailurePolicy: &batch.PodFailurePolicy{ + Rules: []batch.PodFailurePolicyRule{ + { + Action: batch.PodFailurePolicyActionFailIndex, + OnExitCodes: &batch.PodFailurePolicyOnExitCodesRequirement{ + Operator: batch.PodFailurePolicyOnExitCodesOpIn, + Values: []int32{1}, + }, + }, + }, + }, + }, + }, + wantJob: &batch.Job{ + ObjectMeta: validObjectMeta, + Spec: batch.JobSpec{ + Selector: validSelector, + ManualSelector: pointer.Bool(true), + Template: api.PodTemplateSpec{ + ObjectMeta: metav1.ObjectMeta{ + Labels: validSelector.MatchLabels, + }, + Spec: validPodSpecNever, + }, + PodFailurePolicy: &batch.PodFailurePolicy{ + Rules: []batch.PodFailurePolicyRule{ + { + Action: batch.PodFailurePolicyActionFailIndex, + OnExitCodes: &batch.PodFailurePolicyOnExitCodesRequirement{ + Operator: batch.PodFailurePolicyOnExitCodesOpIn, + Values: []int32{1}, + }, + }, + }, + }, + }, + }, + wantWarningCount: 1, + }, + "FailIndex action; when JobBackoffLimitPerIndex is enabled and used - no error": { + enableJobPodFailurePolicy: true, + enableJobBackoffLimitPerIndex: true, + job: &batch.Job{ + ObjectMeta: validObjectMeta, + Spec: batch.JobSpec{ + CompletionMode: completionModePtr(batch.IndexedCompletion), + Completions: pointer.Int32(2), + BackoffLimitPerIndex: pointer.Int32(1), + Selector: validSelector, + ManualSelector: pointer.Bool(true), + Template: api.PodTemplateSpec{ + ObjectMeta: metav1.ObjectMeta{ + Labels: validSelector.MatchLabels, + }, + Spec: validPodSpecNever, + }, + PodFailurePolicy: &batch.PodFailurePolicy{ + Rules: []batch.PodFailurePolicyRule{ + { + Action: batch.PodFailurePolicyActionFailIndex, + OnExitCodes: &batch.PodFailurePolicyOnExitCodesRequirement{ + Operator: batch.PodFailurePolicyOnExitCodesOpIn, + Values: []int32{1}, + }, + }, + }, + }, + }, + }, + wantJob: &batch.Job{ + ObjectMeta: validObjectMeta, + Spec: batch.JobSpec{ + CompletionMode: completionModePtr(batch.IndexedCompletion), + Completions: pointer.Int32(2), + BackoffLimitPerIndex: pointer.Int32(1), + Selector: validSelector, + ManualSelector: pointer.Bool(true), + Template: api.PodTemplateSpec{ + ObjectMeta: metav1.ObjectMeta{ + Labels: validSelector.MatchLabels, + }, + Spec: validPodSpecNever, + }, + PodFailurePolicy: &batch.PodFailurePolicy{ + Rules: []batch.PodFailurePolicyRule{ + { + Action: batch.PodFailurePolicyActionFailIndex, + OnExitCodes: &batch.PodFailurePolicyOnExitCodesRequirement{ + Operator: batch.PodFailurePolicyOnExitCodesOpIn, + Values: []int32{1}, + }, + }, + }, + }, + }, + }, + }, } for name, tc := range testcases { t.Run(name, func(t *testing.T) { + defer featuregatetesting.SetFeatureGateDuringTest(t, utilfeature.DefaultFeatureGate, features.JobPodFailurePolicy, tc.enableJobPodFailurePolicy)() + defer featuregatetesting.SetFeatureGateDuringTest(t, utilfeature.DefaultFeatureGate, features.JobBackoffLimitPerIndex, tc.enableJobBackoffLimitPerIndex)() errs := Strategy.Validate(ctx, tc.job) if len(errs) != int(tc.wantWarningCount) { - t.Errorf("want warnings %d but got %d", tc.wantWarningCount, len(errs)) + t.Errorf("want warnings %d but got %d, errors: %v", tc.wantWarningCount, len(errs), errs) } if diff := cmp.Diff(tc.wantJob, tc.job); diff != "" { t.Errorf("Unexpected job (-want,+got):\n%s", diff) diff --git a/staging/src/k8s.io/api/batch/v1/generated.pb.go b/staging/src/k8s.io/api/batch/v1/generated.pb.go index feafc23c2bb..f24436194c1 100644 --- a/staging/src/k8s.io/api/batch/v1/generated.pb.go +++ b/staging/src/k8s.io/api/batch/v1/generated.pb.go @@ -495,113 +495,117 @@ func init() { } var fileDescriptor_3b52da57c93de713 = []byte{ - // 1696 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xcc, 0x58, 0x4f, 0x73, 0xe3, 0x48, - 0x15, 0x8f, 0xe2, 0xd8, 0xb1, 0xdb, 0xc9, 0xc4, 0xd3, 0xb3, 0x33, 0x63, 0xc2, 0x96, 0x95, 0xd5, - 0xec, 0x6e, 0x65, 0xa9, 0x45, 0x66, 0xb2, 0x53, 0x2c, 0xff, 0x8b, 0x91, 0x87, 0x59, 0x26, 0x78, - 0x36, 0xa6, 0x9d, 0x40, 0xd5, 0xb2, 0x50, 0xc8, 0x52, 0xdb, 0xd1, 0x46, 0x56, 0x1b, 0x75, 0x2b, - 0xb5, 0xb9, 0x50, 0x54, 0xf1, 0x05, 0xe0, 0xc8, 0x17, 0xe0, 0xc8, 0x05, 0xce, 0x70, 0xa3, 0x72, - 0xdc, 0xe2, 0xb4, 0xc5, 0x41, 0xc5, 0x88, 0x0f, 0xc0, 0x3d, 0x5c, 0xa8, 0x6e, 0xb5, 0xf5, 0xcf, - 0x52, 0xc8, 0x6c, 0x15, 0x5b, 0xdc, 0xa2, 0xf7, 0x7e, 0xef, 0xd7, 0x4f, 0xfd, 0x9e, 0x7e, 0xef, - 0xc5, 0xe0, 0x5b, 0x67, 0x5f, 0xa3, 0xba, 0x43, 0xfa, 0x67, 0xc1, 0x04, 0xfb, 0x1e, 0x66, 0x98, - 0xf6, 0xcf, 0xb1, 0x67, 0x13, 0xbf, 0x2f, 0x1d, 0xe6, 0xc2, 0xe9, 0x4f, 0x4c, 0x66, 0x9d, 0xf6, - 0xcf, 0x1f, 0xf6, 0x67, 0xd8, 0xc3, 0xbe, 0xc9, 0xb0, 0xad, 0x2f, 0x7c, 0xc2, 0x08, 0xbc, 0x13, - 0x83, 0x74, 0x73, 0xe1, 0xe8, 0x02, 0xa4, 0x9f, 0x3f, 0xdc, 0xfd, 0xf2, 0xcc, 0x61, 0xa7, 0xc1, - 0x44, 0xb7, 0xc8, 0xbc, 0x3f, 0x23, 0x33, 0xd2, 0x17, 0xd8, 0x49, 0x30, 0x15, 0x4f, 0xe2, 0x41, - 0xfc, 0x15, 0x73, 0xec, 0x6a, 0x99, 0x83, 0x2c, 0xe2, 0xe3, 0x92, 0x73, 0x76, 0x1f, 0xa5, 0x98, - 0xb9, 0x69, 0x9d, 0x3a, 0x1e, 0xf6, 0x2f, 0xfa, 0x8b, 0xb3, 0x19, 0x37, 0xd0, 0xfe, 0x1c, 0x33, - 0xb3, 0x2c, 0xaa, 0x5f, 0x15, 0xe5, 0x07, 0x1e, 0x73, 0xe6, 0x78, 0x25, 0xe0, 0xab, 0xff, 0x2d, - 0x80, 0x5a, 0xa7, 0x78, 0x6e, 0x16, 0xe3, 0xb4, 0x7f, 0x2b, 0x60, 0x73, 0xe0, 0x13, 0xef, 0x90, - 0x4c, 0xe0, 0xcf, 0x41, 0x93, 0xe7, 0x63, 0x9b, 0xcc, 0xec, 0x2a, 0x7b, 0xca, 0x7e, 0xfb, 0xe0, - 0x2b, 0x7a, 0x7a, 0x4b, 0x09, 0xad, 0xbe, 0x38, 0x9b, 0x71, 0x03, 0xd5, 0x39, 0x5a, 0x3f, 0x7f, - 0xa8, 0x1f, 0x4d, 0x3e, 0xc2, 0x16, 0x7b, 0x8e, 0x99, 0x69, 0xc0, 0xcb, 0x50, 0x5d, 0x8b, 0x42, - 0x15, 0xa4, 0x36, 0x94, 0xb0, 0x42, 0x03, 0x6c, 0xd0, 0x05, 0xb6, 0xba, 0xeb, 0x82, 0x7d, 0x4f, - 0x2f, 0xa9, 0x81, 0x2e, 0xb3, 0x19, 0x2f, 0xb0, 0x65, 0x6c, 0x49, 0xb6, 0x0d, 0xfe, 0x84, 0x44, - 0x2c, 0x3c, 0x04, 0x0d, 0xca, 0x4c, 0x16, 0xd0, 0x6e, 0x4d, 0xb0, 0x68, 0xd7, 0xb2, 0x08, 0xa4, - 0x71, 0x4b, 0xf2, 0x34, 0xe2, 0x67, 0x24, 0x19, 0xb4, 0x3f, 0x28, 0xa0, 0x2d, 0x91, 0x43, 0x87, - 0x32, 0xf8, 0xe1, 0xca, 0x0d, 0xe8, 0x37, 0xbb, 0x01, 0x1e, 0x2d, 0xde, 0xbf, 0x23, 0x4f, 0x6a, - 0x2e, 0x2d, 0x99, 0xb7, 0x7f, 0x0c, 0xea, 0x0e, 0xc3, 0x73, 0xda, 0x5d, 0xdf, 0xab, 0xed, 0xb7, - 0x0f, 0x5e, 0xbd, 0x2e, 0x71, 0x63, 0x5b, 0x12, 0xd5, 0x9f, 0xf1, 0x10, 0x14, 0x47, 0x6a, 0x7f, - 0xdb, 0x48, 0x12, 0xe6, 0x57, 0x02, 0xdf, 0x06, 0x4d, 0x5e, 0x58, 0x3b, 0x70, 0xb1, 0x48, 0xb8, - 0x95, 0x26, 0x30, 0x96, 0x76, 0x94, 0x20, 0xe0, 0x3e, 0x68, 0xf2, 0x5e, 0xf8, 0x80, 0x78, 0xb8, - 0xdb, 0x14, 0xe8, 0x2d, 0x8e, 0x3c, 0x96, 0x36, 0x94, 0x78, 0xe1, 0x09, 0xb8, 0x4f, 0x99, 0xe9, - 0x33, 0xc7, 0x9b, 0x3d, 0xc1, 0xa6, 0xed, 0x3a, 0x1e, 0x1e, 0x63, 0x8b, 0x78, 0x36, 0x15, 0xb5, - 0xab, 0x19, 0x5f, 0x8c, 0x42, 0xf5, 0xfe, 0xb8, 0x1c, 0x82, 0xaa, 0x62, 0xe1, 0x87, 0xe0, 0xb6, - 0x45, 0x3c, 0x2b, 0xf0, 0x7d, 0xec, 0x59, 0x17, 0x23, 0xe2, 0x3a, 0xd6, 0x85, 0x28, 0x63, 0xcb, - 0xd0, 0x65, 0xde, 0xb7, 0x07, 0x45, 0xc0, 0x55, 0x99, 0x11, 0xad, 0x12, 0xc1, 0x37, 0xc0, 0x26, - 0x0d, 0xe8, 0x02, 0x7b, 0x76, 0x77, 0x63, 0x4f, 0xd9, 0x6f, 0x1a, 0xed, 0x28, 0x54, 0x37, 0xc7, - 0xb1, 0x09, 0x2d, 0x7d, 0xf0, 0x27, 0xa0, 0xfd, 0x11, 0x99, 0x1c, 0xe3, 0xf9, 0xc2, 0x35, 0x19, - 0xee, 0xd6, 0x45, 0x9d, 0x5f, 0x2f, 0x2d, 0xc6, 0x61, 0x8a, 0x13, 0xfd, 0x78, 0x47, 0x26, 0xd9, - 0xce, 0x38, 0x50, 0x96, 0x0d, 0xfe, 0x0c, 0xec, 0xd2, 0xc0, 0xb2, 0x30, 0xa5, 0xd3, 0xc0, 0x3d, - 0x24, 0x13, 0xfa, 0x7d, 0x87, 0x32, 0xe2, 0x5f, 0x0c, 0x9d, 0xb9, 0xc3, 0xba, 0x8d, 0x3d, 0x65, - 0xbf, 0x6e, 0xf4, 0xa2, 0x50, 0xdd, 0x1d, 0x57, 0xa2, 0xd0, 0x35, 0x0c, 0x10, 0x81, 0x7b, 0x53, - 0xd3, 0x71, 0xb1, 0xbd, 0xc2, 0xbd, 0x29, 0xb8, 0x77, 0xa3, 0x50, 0xbd, 0xf7, 0xb4, 0x14, 0x81, - 0x2a, 0x22, 0xb5, 0x3f, 0xaf, 0x83, 0xed, 0xdc, 0xf7, 0x02, 0x7f, 0x00, 0x1a, 0xa6, 0xc5, 0x9c, - 0x73, 0xde, 0x54, 0xbc, 0x55, 0x1f, 0x64, 0x6f, 0x87, 0x2b, 0x5d, 0xfa, 0xd5, 0x23, 0x3c, 0xc5, - 0xbc, 0x08, 0x38, 0xfd, 0xc8, 0x1e, 0x8b, 0x50, 0x24, 0x29, 0xa0, 0x0b, 0x3a, 0xae, 0x49, 0xd9, - 0xb2, 0x1f, 0x79, 0xb7, 0x89, 0xfa, 0xb4, 0x0f, 0xbe, 0x74, 0xb3, 0x8f, 0x8b, 0x47, 0x18, 0xaf, - 0x44, 0xa1, 0xda, 0x19, 0x16, 0x78, 0xd0, 0x0a, 0x33, 0xf4, 0x01, 0x14, 0xb6, 0xe4, 0x0a, 0xc5, - 0x79, 0xf5, 0x97, 0x3e, 0xef, 0x5e, 0x14, 0xaa, 0x70, 0xb8, 0xc2, 0x84, 0x4a, 0xd8, 0xb5, 0x7f, - 0x29, 0xa0, 0xf6, 0xf9, 0x08, 0xe8, 0x77, 0x72, 0x02, 0xfa, 0x6a, 0x55, 0xd3, 0x56, 0x8a, 0xe7, - 0xd3, 0x82, 0x78, 0xf6, 0x2a, 0x19, 0xae, 0x17, 0xce, 0xbf, 0xd6, 0xc0, 0xd6, 0x21, 0x99, 0x0c, - 0x88, 0x67, 0x3b, 0xcc, 0x21, 0x1e, 0x7c, 0x04, 0x36, 0xd8, 0xc5, 0x62, 0x29, 0x42, 0x7b, 0xcb, - 0xa3, 0x8f, 0x2f, 0x16, 0xf8, 0x2a, 0x54, 0x3b, 0x59, 0x2c, 0xb7, 0x21, 0x81, 0x86, 0xc3, 0x24, - 0x9d, 0x75, 0x11, 0xf7, 0x28, 0x7f, 0xdc, 0x55, 0xa8, 0x96, 0x8c, 0x58, 0x3d, 0x61, 0xca, 0x27, - 0x05, 0x67, 0x60, 0x9b, 0x17, 0x67, 0xe4, 0x93, 0x49, 0xdc, 0x65, 0xb5, 0x97, 0xae, 0xfa, 0x5d, - 0x99, 0xc0, 0xf6, 0x30, 0x4b, 0x84, 0xf2, 0xbc, 0xf0, 0x3c, 0xee, 0xb1, 0x63, 0xdf, 0xf4, 0x68, - 0xfc, 0x4a, 0x9f, 0xad, 0xa7, 0x77, 0xe5, 0x69, 0xa2, 0xcf, 0xf2, 0x6c, 0xa8, 0xe4, 0x04, 0xf8, - 0x26, 0x68, 0xf8, 0xd8, 0xa4, 0xc4, 0x13, 0xfd, 0xdc, 0x4a, 0xab, 0x83, 0x84, 0x15, 0x49, 0x2f, - 0x7c, 0x0b, 0x6c, 0xce, 0x31, 0xa5, 0xe6, 0x0c, 0x0b, 0xc5, 0x69, 0x19, 0x3b, 0x12, 0xb8, 0xf9, - 0x3c, 0x36, 0xa3, 0xa5, 0x5f, 0xfb, 0xbd, 0x02, 0x36, 0x3f, 0x9f, 0xe9, 0xf7, 0xed, 0xfc, 0xf4, - 0xeb, 0x56, 0x75, 0x5e, 0xc5, 0xe4, 0xfb, 0x5d, 0x43, 0x24, 0x2a, 0xa6, 0xde, 0x43, 0xd0, 0x5e, - 0x98, 0xbe, 0xe9, 0xba, 0xd8, 0x75, 0xe8, 0x5c, 0xe4, 0x5a, 0x37, 0x76, 0xb8, 0x2e, 0x8f, 0x52, - 0x33, 0xca, 0x62, 0x78, 0x88, 0x45, 0xe6, 0x0b, 0x17, 0xf3, 0xcb, 0x8c, 0xdb, 0x4d, 0x86, 0x0c, - 0x52, 0x33, 0xca, 0x62, 0xe0, 0x11, 0xb8, 0x1b, 0x2b, 0x58, 0x71, 0x02, 0xd6, 0xc4, 0x04, 0xfc, - 0x42, 0x14, 0xaa, 0x77, 0x1f, 0x97, 0x01, 0x50, 0x79, 0x1c, 0x9c, 0x81, 0xce, 0x82, 0xd8, 0x5c, - 0x9c, 0x03, 0x1f, 0xcb, 0xe1, 0xd7, 0x16, 0xf7, 0xfc, 0x46, 0xe9, 0x65, 0x8c, 0x0a, 0xe0, 0x58, - 0x03, 0x8b, 0x56, 0xb4, 0x42, 0x0a, 0x1f, 0x81, 0xad, 0x89, 0x69, 0x9d, 0x91, 0xe9, 0x34, 0x3b, - 0x1a, 0x3a, 0x51, 0xa8, 0x6e, 0x19, 0x19, 0x3b, 0xca, 0xa1, 0xe0, 0x4f, 0x41, 0x93, 0x62, 0x17, - 0x5b, 0x8c, 0xf8, 0xb2, 0x97, 0xdf, 0xb9, 0x61, 0xf9, 0xcd, 0x09, 0x76, 0xc7, 0x32, 0x34, 0x5e, - 0x29, 0x96, 0x4f, 0x28, 0xa1, 0x84, 0xdf, 0x00, 0xb7, 0xe6, 0xa6, 0x17, 0x98, 0x09, 0x52, 0x34, - 0x71, 0xd3, 0x80, 0x51, 0xa8, 0xde, 0x7a, 0x9e, 0xf3, 0xa0, 0x02, 0x12, 0xfe, 0x10, 0x34, 0xd9, - 0x72, 0x5e, 0x37, 0x44, 0x6a, 0xa5, 0x13, 0x69, 0x44, 0xec, 0xdc, 0xb8, 0x4e, 0xda, 0x31, 0x99, - 0xd5, 0x09, 0x0d, 0xdf, 0x70, 0x18, 0x73, 0x65, 0x69, 0x1e, 0x4f, 0x19, 0xf6, 0x9f, 0x3a, 0x9e, - 0x43, 0x4f, 0xb1, 0x2d, 0x56, 0xa3, 0x7a, 0xbc, 0xe1, 0x1c, 0x1f, 0x0f, 0xcb, 0x20, 0xa8, 0x2a, - 0x16, 0x0e, 0xc1, 0xad, 0xb4, 0x87, 0x9e, 0x13, 0x1b, 0x77, 0x5b, 0xe2, 0x0b, 0x7c, 0x9d, 0xbf, - 0xe5, 0x20, 0xe7, 0xb9, 0x5a, 0xb1, 0xa0, 0x42, 0x6c, 0x76, 0xa3, 0x01, 0xd5, 0x1b, 0x8d, 0xf6, - 0xdb, 0x3a, 0x68, 0xa5, 0xc3, 0xfb, 0x04, 0x00, 0x6b, 0xa9, 0x90, 0x54, 0x0e, 0xf0, 0xd7, 0xaa, - 0xbe, 0xb6, 0x44, 0x4b, 0xd3, 0xc1, 0x93, 0x98, 0x28, 0xca, 0x10, 0xc1, 0x1f, 0x83, 0x96, 0x58, - 0xeb, 0x84, 0xd6, 0xad, 0xbf, 0xb4, 0xd6, 0x6d, 0x47, 0xa1, 0xda, 0x1a, 0x2f, 0x09, 0x50, 0xca, - 0x05, 0xa7, 0xd9, 0x2b, 0xfb, 0x8c, 0xba, 0x0d, 0xf3, 0xd7, 0x2b, 0x8e, 0x28, 0xb0, 0x72, 0xf5, - 0x94, 0x4b, 0xcd, 0x86, 0x28, 0x70, 0xd5, 0xbe, 0xd2, 0x07, 0x2d, 0xb1, 0x80, 0x61, 0x1b, 0xdb, - 0xa2, 0x47, 0xeb, 0xc6, 0x6d, 0x09, 0x6d, 0x8d, 0x97, 0x0e, 0x94, 0x62, 0x38, 0x71, 0xbc, 0x59, - 0xc9, 0xfd, 0x2e, 0x21, 0x8e, 0xf7, 0x30, 0x24, 0xbd, 0xf0, 0x09, 0xe8, 0xc8, 0x94, 0xb0, 0xfd, - 0xcc, 0xb3, 0xf1, 0xc7, 0x98, 0x8a, 0x4f, 0xb3, 0x65, 0x74, 0x65, 0x44, 0x67, 0x50, 0xf0, 0xa3, - 0x95, 0x08, 0xf8, 0x6b, 0x05, 0xdc, 0x0f, 0x3c, 0x8b, 0x04, 0x1e, 0xc3, 0xf6, 0x31, 0xf6, 0xe7, - 0x8e, 0xc7, 0xff, 0x9f, 0x1b, 0x11, 0x9b, 0x8a, 0xce, 0x6d, 0x1f, 0xbc, 0x5d, 0x5a, 0xec, 0x93, - 0xf2, 0x98, 0xb8, 0xcf, 0x2b, 0x9c, 0xa8, 0xea, 0x24, 0xa8, 0x82, 0xba, 0x8f, 0x4d, 0xfb, 0x42, - 0xb4, 0x77, 0xdd, 0x68, 0x71, 0xbd, 0x46, 0xdc, 0x80, 0x62, 0xbb, 0xf6, 0x47, 0x05, 0xec, 0x14, - 0xd6, 0xe7, 0xff, 0xff, 0xfd, 0x48, 0x9b, 0x80, 0x15, 0x7d, 0x85, 0xef, 0x83, 0xba, 0x1f, 0xb8, - 0x78, 0xf9, 0x29, 0xbd, 0x75, 0x23, 0xad, 0x46, 0x81, 0x8b, 0xd3, 0x49, 0xc6, 0x9f, 0x28, 0x8a, - 0x69, 0xb4, 0xbf, 0x2b, 0xe0, 0xcd, 0x22, 0xfc, 0xc8, 0xfb, 0xde, 0xc7, 0x0e, 0x1b, 0x10, 0x1b, - 0x53, 0x84, 0x7f, 0x11, 0x38, 0x3e, 0x9e, 0x63, 0x8f, 0xc1, 0x77, 0xc1, 0xb6, 0x45, 0x3c, 0x66, - 0xf2, 0x6b, 0x79, 0xdf, 0x9c, 0x2f, 0xd7, 0xab, 0xdb, 0x7c, 0x43, 0x19, 0x64, 0x1d, 0x28, 0x8f, - 0x83, 0x63, 0xd0, 0x24, 0x0b, 0xfe, 0x8f, 0x3e, 0xf1, 0xe5, 0x6a, 0xf5, 0xee, 0x52, 0x0b, 0x8f, - 0xa4, 0xfd, 0x2a, 0x54, 0x1f, 0x5c, 0x93, 0xc6, 0x12, 0x86, 0x12, 0x22, 0xa8, 0x81, 0xc6, 0xb9, - 0xe9, 0x06, 0x98, 0x4f, 0xc0, 0xda, 0x7e, 0xdd, 0x00, 0xbc, 0xc7, 0x7f, 0x24, 0x2c, 0x48, 0x7a, - 0xb4, 0xbf, 0x94, 0xbe, 0xdc, 0x88, 0xd8, 0xa9, 0xaa, 0x8c, 0x4c, 0xc6, 0xb0, 0xef, 0xc1, 0xf7, - 0x72, 0x2b, 0xe3, 0x3b, 0x85, 0x95, 0xf1, 0x41, 0xc9, 0xe2, 0x97, 0xa5, 0xf9, 0x5f, 0x6d, 0x91, - 0xda, 0xe5, 0x3a, 0x78, 0xa5, 0xac, 0x9a, 0xf0, 0xbb, 0xb1, 0x7e, 0x10, 0x4f, 0x66, 0xbc, 0x9f, - 0xd5, 0x0f, 0xe2, 0x5d, 0x85, 0xea, 0xbd, 0x62, 0x5c, 0xec, 0x41, 0x32, 0x0e, 0x7a, 0xa0, 0x4d, - 0xd2, 0x1b, 0x96, 0x4d, 0xfa, 0xcd, 0x1b, 0xf5, 0x53, 0x79, 0x83, 0xc4, 0x1b, 0x4c, 0xd6, 0x97, - 0x3d, 0x00, 0xfe, 0x12, 0xec, 0x90, 0xfc, 0xdd, 0x8b, 0xca, 0xdd, 0xfc, 0xcc, 0xb2, 0xba, 0x19, - 0xf7, 0xe5, 0x7b, 0xef, 0x14, 0xfc, 0xa8, 0x78, 0x98, 0xf6, 0x27, 0x05, 0x54, 0x29, 0x0b, 0x1c, - 0x65, 0x55, 0x96, 0x7f, 0x59, 0x2d, 0xe3, 0x20, 0xa7, 0xb0, 0x57, 0xa1, 0xfa, 0x5a, 0xd5, 0x8f, - 0x5a, 0xbc, 0xec, 0x54, 0x3f, 0x79, 0xf6, 0x24, 0x2b, 0xc3, 0xef, 0x25, 0x32, 0xbc, 0x2e, 0xe8, - 0xfa, 0xa9, 0x04, 0xdf, 0x8c, 0x4b, 0x86, 0x1b, 0x5f, 0xbf, 0x7c, 0xd1, 0x5b, 0xfb, 0xe4, 0x45, - 0x6f, 0xed, 0xd3, 0x17, 0xbd, 0xb5, 0x5f, 0x45, 0x3d, 0xe5, 0x32, 0xea, 0x29, 0x9f, 0x44, 0x3d, - 0xe5, 0xd3, 0xa8, 0xa7, 0xfc, 0x23, 0xea, 0x29, 0xbf, 0xf9, 0x67, 0x6f, 0xed, 0x83, 0x3b, 0x25, - 0xbf, 0x32, 0xfe, 0x27, 0x00, 0x00, 0xff, 0xff, 0xf2, 0x8e, 0x19, 0x59, 0x94, 0x14, 0x00, 0x00, + // 1752 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xcc, 0x58, 0xcd, 0x6f, 0x23, 0x49, + 0x15, 0x4f, 0x27, 0x71, 0x6c, 0x97, 0x93, 0x89, 0xa7, 0xe6, 0xcb, 0x84, 0x95, 0x3b, 0xeb, 0xd9, + 0x5d, 0x65, 0xd1, 0xd2, 0x66, 0xb2, 0x23, 0x96, 0x6f, 0xed, 0x74, 0x86, 0x59, 0x26, 0x38, 0x3b, + 0xa6, 0x9c, 0x01, 0x69, 0x59, 0x10, 0xe5, 0xee, 0xb2, 0xd3, 0x9b, 0x76, 0x97, 0xe9, 0xaa, 0x8e, + 0x26, 0x17, 0x84, 0xc4, 0x3f, 0xc0, 0x5f, 0x81, 0xc4, 0x85, 0x0b, 0x9c, 0xe1, 0x86, 0x46, 0x9c, + 0x56, 0x9c, 0x56, 0x1c, 0x5a, 0x4c, 0xf3, 0x07, 0x70, 0x0f, 0x17, 0x54, 0xd5, 0xe5, 0xfe, 0x72, + 0x77, 0xc8, 0xac, 0xc4, 0x68, 0x6f, 0xe9, 0xf7, 0x7e, 0xef, 0x57, 0xaf, 0xea, 0x7d, 0xc6, 0xe0, + 0x3b, 0xa7, 0xdf, 0x60, 0x86, 0x43, 0xfb, 0xa7, 0xc1, 0x98, 0xf8, 0x1e, 0xe1, 0x84, 0xf5, 0xcf, + 0x88, 0x67, 0x53, 0xbf, 0xaf, 0x14, 0x78, 0xee, 0xf4, 0xc7, 0x98, 0x5b, 0x27, 0xfd, 0xb3, 0x7b, + 0xfd, 0x29, 0xf1, 0x88, 0x8f, 0x39, 0xb1, 0x8d, 0xb9, 0x4f, 0x39, 0x85, 0x37, 0x62, 0x90, 0x81, + 0xe7, 0x8e, 0x21, 0x41, 0xc6, 0xd9, 0xbd, 0x9d, 0xaf, 0x4e, 0x1d, 0x7e, 0x12, 0x8c, 0x0d, 0x8b, + 0xce, 0xfa, 0x53, 0x3a, 0xa5, 0x7d, 0x89, 0x1d, 0x07, 0x13, 0xf9, 0x25, 0x3f, 0xe4, 0x5f, 0x31, + 0xc7, 0x4e, 0x2f, 0x73, 0x90, 0x45, 0x7d, 0x52, 0x72, 0xce, 0xce, 0xfd, 0x14, 0x33, 0xc3, 0xd6, + 0x89, 0xe3, 0x11, 0xff, 0xbc, 0x3f, 0x3f, 0x9d, 0x0a, 0x01, 0xeb, 0xcf, 0x08, 0xc7, 0x65, 0x56, + 0xfd, 0x2a, 0x2b, 0x3f, 0xf0, 0xb8, 0x33, 0x23, 0x4b, 0x06, 0x5f, 0xff, 0x5f, 0x06, 0xcc, 0x3a, + 0x21, 0x33, 0x5c, 0xb4, 0xeb, 0xfd, 0x47, 0x03, 0xf5, 0x03, 0x9f, 0x7a, 0x87, 0x74, 0x0c, 0x7f, + 0x01, 0x1a, 0xc2, 0x1f, 0x1b, 0x73, 0xdc, 0xd1, 0x76, 0xb5, 0xbd, 0xd6, 0xfe, 0xd7, 0x8c, 0xf4, + 0x95, 0x12, 0x5a, 0x63, 0x7e, 0x3a, 0x15, 0x02, 0x66, 0x08, 0xb4, 0x71, 0x76, 0xcf, 0x78, 0x32, + 0xfe, 0x84, 0x58, 0xfc, 0x88, 0x70, 0x6c, 0xc2, 0xe7, 0xa1, 0xbe, 0x12, 0x85, 0x3a, 0x48, 0x65, + 0x28, 0x61, 0x85, 0x26, 0x58, 0x67, 0x73, 0x62, 0x75, 0x56, 0x25, 0xfb, 0xae, 0x51, 0x12, 0x03, + 0x43, 0x79, 0x33, 0x9a, 0x13, 0xcb, 0xdc, 0x54, 0x6c, 0xeb, 0xe2, 0x0b, 0x49, 0x5b, 0x78, 0x08, + 0x36, 0x18, 0xc7, 0x3c, 0x60, 0x9d, 0x35, 0xc9, 0xd2, 0xbb, 0x94, 0x45, 0x22, 0xcd, 0x6b, 0x8a, + 0x67, 0x23, 0xfe, 0x46, 0x8a, 0xa1, 0xf7, 0x07, 0x0d, 0xb4, 0x14, 0x72, 0xe0, 0x30, 0x0e, 0x3f, + 0x5e, 0x7a, 0x01, 0xe3, 0x6a, 0x2f, 0x20, 0xac, 0xe5, 0xfd, 0xdb, 0xea, 0xa4, 0xc6, 0x42, 0x92, + 0xb9, 0xfd, 0x03, 0x50, 0x73, 0x38, 0x99, 0xb1, 0xce, 0xea, 0xee, 0xda, 0x5e, 0x6b, 0xff, 0xb5, + 0xcb, 0x1c, 0x37, 0xb7, 0x14, 0x51, 0xed, 0xb1, 0x30, 0x41, 0xb1, 0x65, 0xef, 0xef, 0xeb, 0x89, + 0xc3, 0xe2, 0x49, 0xe0, 0x3b, 0xa0, 0x21, 0x02, 0x6b, 0x07, 0x2e, 0x91, 0x0e, 0x37, 0x53, 0x07, + 0x46, 0x4a, 0x8e, 0x12, 0x04, 0xdc, 0x03, 0x0d, 0x91, 0x0b, 0x1f, 0x51, 0x8f, 0x74, 0x1a, 0x12, + 0xbd, 0x29, 0x90, 0xc7, 0x4a, 0x86, 0x12, 0x2d, 0x7c, 0x0a, 0xee, 0x30, 0x8e, 0x7d, 0xee, 0x78, + 0xd3, 0x87, 0x04, 0xdb, 0xae, 0xe3, 0x91, 0x11, 0xb1, 0xa8, 0x67, 0x33, 0x19, 0xbb, 0x35, 0xf3, + 0xcb, 0x51, 0xa8, 0xdf, 0x19, 0x95, 0x43, 0x50, 0x95, 0x2d, 0xfc, 0x18, 0x5c, 0xb7, 0xa8, 0x67, + 0x05, 0xbe, 0x4f, 0x3c, 0xeb, 0x7c, 0x48, 0x5d, 0xc7, 0x3a, 0x97, 0x61, 0x6c, 0x9a, 0x86, 0xf2, + 0xfb, 0xfa, 0x41, 0x11, 0x70, 0x51, 0x26, 0x44, 0xcb, 0x44, 0xf0, 0x4d, 0x50, 0x67, 0x01, 0x9b, + 0x13, 0xcf, 0xee, 0xac, 0xef, 0x6a, 0x7b, 0x0d, 0xb3, 0x15, 0x85, 0x7a, 0x7d, 0x14, 0x8b, 0xd0, + 0x42, 0x07, 0x7f, 0x0a, 0x5a, 0x9f, 0xd0, 0xf1, 0x31, 0x99, 0xcd, 0x5d, 0xcc, 0x49, 0xa7, 0x26, + 0xe3, 0xfc, 0x46, 0x69, 0x30, 0x0e, 0x53, 0x9c, 0xcc, 0xc7, 0x1b, 0xca, 0xc9, 0x56, 0x46, 0x81, + 0xb2, 0x6c, 0xf0, 0xe7, 0x60, 0x87, 0x05, 0x96, 0x45, 0x18, 0x9b, 0x04, 0xee, 0x21, 0x1d, 0xb3, + 0x1f, 0x38, 0x8c, 0x53, 0xff, 0x7c, 0xe0, 0xcc, 0x1c, 0xde, 0xd9, 0xd8, 0xd5, 0xf6, 0x6a, 0x66, + 0x37, 0x0a, 0xf5, 0x9d, 0x51, 0x25, 0x0a, 0x5d, 0xc2, 0x00, 0x11, 0xb8, 0x3d, 0xc1, 0x8e, 0x4b, + 0xec, 0x25, 0xee, 0xba, 0xe4, 0xde, 0x89, 0x42, 0xfd, 0xf6, 0xa3, 0x52, 0x04, 0xaa, 0xb0, 0xec, + 0xfd, 0x79, 0x15, 0x6c, 0xe5, 0xea, 0x05, 0xfe, 0x10, 0x6c, 0x60, 0x8b, 0x3b, 0x67, 0x22, 0xa9, + 0x44, 0xaa, 0xde, 0xcd, 0xbe, 0x8e, 0xe8, 0x74, 0x69, 0xd5, 0x23, 0x32, 0x21, 0x22, 0x08, 0x24, + 0x2d, 0xb2, 0x07, 0xd2, 0x14, 0x29, 0x0a, 0xe8, 0x82, 0xb6, 0x8b, 0x19, 0x5f, 0xe4, 0xa3, 0xc8, + 0x36, 0x19, 0x9f, 0xd6, 0xfe, 0x57, 0xae, 0x56, 0x5c, 0xc2, 0xc2, 0xbc, 0x19, 0x85, 0x7a, 0x7b, + 0x50, 0xe0, 0x41, 0x4b, 0xcc, 0xd0, 0x07, 0x50, 0xca, 0x92, 0x27, 0x94, 0xe7, 0xd5, 0x5e, 0xfa, + 0xbc, 0xdb, 0x51, 0xa8, 0xc3, 0xc1, 0x12, 0x13, 0x2a, 0x61, 0xef, 0xfd, 0x5b, 0x03, 0x6b, 0xaf, + 0xa6, 0x81, 0x7e, 0x2f, 0xd7, 0x40, 0x5f, 0xab, 0x4a, 0xda, 0xca, 0xe6, 0xf9, 0xa8, 0xd0, 0x3c, + 0xbb, 0x95, 0x0c, 0x97, 0x37, 0xce, 0xbf, 0xae, 0x81, 0xcd, 0x43, 0x3a, 0x3e, 0xa0, 0x9e, 0xed, + 0x70, 0x87, 0x7a, 0xf0, 0x3e, 0x58, 0xe7, 0xe7, 0xf3, 0x45, 0x13, 0xda, 0x5d, 0x1c, 0x7d, 0x7c, + 0x3e, 0x27, 0x17, 0xa1, 0xde, 0xce, 0x62, 0x85, 0x0c, 0x49, 0x34, 0x1c, 0x24, 0xee, 0xac, 0x4a, + 0xbb, 0xfb, 0xf9, 0xe3, 0x2e, 0x42, 0xbd, 0x64, 0xc4, 0x1a, 0x09, 0x53, 0xde, 0x29, 0x38, 0x05, + 0x5b, 0x22, 0x38, 0x43, 0x9f, 0x8e, 0xe3, 0x2c, 0x5b, 0x7b, 0xe9, 0xa8, 0xdf, 0x52, 0x0e, 0x6c, + 0x0d, 0xb2, 0x44, 0x28, 0xcf, 0x0b, 0xcf, 0xe2, 0x1c, 0x3b, 0xf6, 0xb1, 0xc7, 0xe2, 0x2b, 0x7d, + 0xbe, 0x9c, 0xde, 0x51, 0xa7, 0xc9, 0x3c, 0xcb, 0xb3, 0xa1, 0x92, 0x13, 0xe0, 0x5b, 0x60, 0xc3, + 0x27, 0x98, 0x51, 0x4f, 0xe6, 0x73, 0x33, 0x8d, 0x0e, 0x92, 0x52, 0xa4, 0xb4, 0xf0, 0x6d, 0x50, + 0x9f, 0x11, 0xc6, 0xf0, 0x94, 0xc8, 0x8e, 0xd3, 0x34, 0xb7, 0x15, 0xb0, 0x7e, 0x14, 0x8b, 0xd1, + 0x42, 0xdf, 0xfb, 0x9d, 0x06, 0xea, 0xaf, 0x66, 0xfa, 0x7d, 0x37, 0x3f, 0xfd, 0x3a, 0x55, 0x99, + 0x57, 0x31, 0xf9, 0x7e, 0x5f, 0x97, 0x8e, 0xca, 0xa9, 0x77, 0x0f, 0xb4, 0xe6, 0xd8, 0xc7, 0xae, + 0x4b, 0x5c, 0x87, 0xcd, 0xa4, 0xaf, 0x35, 0x73, 0x5b, 0xf4, 0xe5, 0x61, 0x2a, 0x46, 0x59, 0x8c, + 0x30, 0xb1, 0xe8, 0x6c, 0xee, 0x12, 0xf1, 0x98, 0x71, 0xba, 0x29, 0x93, 0x83, 0x54, 0x8c, 0xb2, + 0x18, 0xf8, 0x04, 0xdc, 0x8a, 0x3b, 0x58, 0x71, 0x02, 0xae, 0xc9, 0x09, 0xf8, 0xa5, 0x28, 0xd4, + 0x6f, 0x3d, 0x28, 0x03, 0xa0, 0x72, 0x3b, 0x38, 0x05, 0xed, 0x39, 0xb5, 0x45, 0x73, 0x0e, 0x7c, + 0xa2, 0x86, 0x5f, 0x4b, 0xbe, 0xf3, 0x9b, 0xa5, 0x8f, 0x31, 0x2c, 0x80, 0xe3, 0x1e, 0x58, 0x94, + 0xa2, 0x25, 0x52, 0x78, 0x1f, 0x6c, 0x8e, 0xb1, 0x75, 0x4a, 0x27, 0x93, 0xec, 0x68, 0x68, 0x47, + 0xa1, 0xbe, 0x69, 0x66, 0xe4, 0x28, 0x87, 0x82, 0x03, 0x70, 0x33, 0xfb, 0x3d, 0x24, 0xfe, 0x63, + 0xcf, 0x26, 0xcf, 0x3a, 0x9b, 0xd2, 0xba, 0x13, 0x85, 0xfa, 0x4d, 0xb3, 0x44, 0x8f, 0x4a, 0xad, + 0xe0, 0xfb, 0xa0, 0x3d, 0xc3, 0xcf, 0xe2, 0x49, 0x24, 0x25, 0x84, 0x75, 0xb6, 0x24, 0x93, 0xbc, + 0xc5, 0x51, 0x41, 0x87, 0x96, 0xd0, 0xf0, 0x67, 0xa0, 0xc1, 0x88, 0x4b, 0x2c, 0x4e, 0x7d, 0x55, + 0x5b, 0xef, 0x5e, 0x31, 0x1d, 0xf1, 0x98, 0xb8, 0x23, 0x65, 0x1a, 0xaf, 0x38, 0x8b, 0x2f, 0x94, + 0x50, 0xc2, 0x6f, 0x81, 0x6b, 0x33, 0xec, 0x05, 0x38, 0x41, 0xca, 0xa2, 0x6a, 0x98, 0x30, 0x0a, + 0xf5, 0x6b, 0x47, 0x39, 0x0d, 0x2a, 0x20, 0xe1, 0x8f, 0x40, 0x83, 0x2f, 0xf6, 0x87, 0x0d, 0xe9, + 0x5a, 0xe9, 0x84, 0x1c, 0x52, 0x3b, 0xb7, 0x3e, 0x24, 0xe5, 0x91, 0xec, 0x0e, 0x09, 0x8d, 0xd8, + 0xb8, 0x38, 0x77, 0x55, 0xaa, 0x3c, 0x98, 0x70, 0xe2, 0x3f, 0x72, 0x3c, 0x87, 0x9d, 0x10, 0x5b, + 0xae, 0x6a, 0xb5, 0x78, 0xe3, 0x3a, 0x3e, 0x1e, 0x94, 0x41, 0x50, 0x95, 0x2d, 0x1c, 0x80, 0x6b, + 0x69, 0x4e, 0x1f, 0x51, 0x9b, 0x74, 0x9a, 0xb2, 0x23, 0xbc, 0x21, 0x6e, 0x79, 0x90, 0xd3, 0x5c, + 0x2c, 0x49, 0x50, 0xc1, 0x36, 0xbb, 0x61, 0x81, 0xea, 0x0d, 0xab, 0xf7, 0xb7, 0x1a, 0x68, 0xa6, + 0xcb, 0xc4, 0x53, 0x00, 0xac, 0x45, 0xc7, 0x66, 0x6a, 0xa1, 0x78, 0xbd, 0xaa, 0xfa, 0x93, 0xde, + 0x9e, 0x0e, 0xc2, 0x44, 0xc4, 0x50, 0x86, 0x08, 0xfe, 0x04, 0x34, 0xe5, 0x9a, 0x29, 0x7b, 0xef, + 0xea, 0x4b, 0xf7, 0xde, 0xad, 0x28, 0xd4, 0x9b, 0xa3, 0x05, 0x01, 0x4a, 0xb9, 0xe0, 0x24, 0xfb, + 0x64, 0x9f, 0x73, 0x8e, 0xc0, 0xfc, 0xf3, 0xca, 0x23, 0x0a, 0xac, 0xa2, 0x9b, 0xab, 0x25, 0x6b, + 0x5d, 0x06, 0xb8, 0x6a, 0x7f, 0xea, 0x83, 0xa6, 0x5c, 0x08, 0x89, 0x4d, 0x6c, 0x99, 0xa3, 0x35, + 0xf3, 0xba, 0x82, 0x36, 0x47, 0x0b, 0x05, 0x4a, 0x31, 0x82, 0x38, 0xde, 0xf4, 0xd4, 0xbe, 0x99, + 0x10, 0xc7, 0xf5, 0x85, 0x94, 0x16, 0x3e, 0x04, 0x6d, 0xe5, 0x52, 0x5a, 0xa2, 0x75, 0x99, 0x1d, + 0x1d, 0x65, 0xd1, 0x3e, 0x28, 0xe8, 0xd1, 0x92, 0x05, 0x7c, 0x0f, 0x6c, 0x4d, 0x72, 0x55, 0x0e, + 0x24, 0xc5, 0x75, 0x31, 0x45, 0xf3, 0x25, 0x9e, 0xc7, 0xc1, 0xdf, 0x68, 0xe0, 0x4e, 0xe0, 0x59, + 0x34, 0xf0, 0x38, 0xb1, 0x8f, 0x89, 0x3f, 0x73, 0x3c, 0xf1, 0x8f, 0xe9, 0x90, 0xda, 0x4c, 0xa6, + 0x7c, 0x6b, 0xff, 0x9d, 0xd2, 0x2c, 0x79, 0x5a, 0x6e, 0x13, 0x17, 0x48, 0x85, 0x12, 0x55, 0x9d, + 0x04, 0x75, 0x50, 0xf3, 0x09, 0xb6, 0xcf, 0x65, 0x5d, 0xd4, 0xcc, 0xa6, 0x18, 0x3c, 0x48, 0x08, + 0x50, 0x2c, 0xef, 0xfd, 0x51, 0x03, 0xdb, 0x85, 0xff, 0x03, 0xbe, 0xf8, 0x8b, 0x5e, 0x6f, 0x0c, + 0x96, 0x06, 0x05, 0xfc, 0x10, 0xd4, 0xfc, 0xc0, 0x25, 0x8b, 0x1a, 0x7c, 0xfb, 0x4a, 0x43, 0x07, + 0x05, 0x2e, 0x49, 0x47, 0xb2, 0xf8, 0x62, 0x28, 0xa6, 0xe9, 0xfd, 0x43, 0x03, 0x6f, 0x15, 0xe1, + 0x4f, 0xbc, 0xef, 0x3f, 0x73, 0xf8, 0x01, 0xb5, 0x09, 0x43, 0xe4, 0x97, 0x81, 0xe3, 0x93, 0x19, + 0xf1, 0xb8, 0x48, 0x12, 0x8b, 0x7a, 0x1c, 0x8b, 0x67, 0xf9, 0x10, 0xcf, 0x16, 0x7b, 0xa2, 0x4c, + 0x92, 0x83, 0xac, 0x02, 0xe5, 0x71, 0x70, 0x04, 0x1a, 0x74, 0x4e, 0x7c, 0x2c, 0xfa, 0x73, 0xbc, + 0x23, 0xbe, 0xb7, 0x68, 0xa2, 0x4f, 0x94, 0xfc, 0x22, 0xd4, 0xef, 0x5e, 0xe2, 0xc6, 0x02, 0x86, + 0x12, 0x22, 0xd8, 0x03, 0x1b, 0x67, 0xd8, 0x0d, 0x88, 0x18, 0xe5, 0x6b, 0x7b, 0x35, 0x13, 0x88, + 0xe2, 0xf8, 0xb1, 0x94, 0x20, 0xa5, 0xe9, 0xfd, 0xa5, 0xf4, 0x72, 0x43, 0x6a, 0xa7, 0xed, 0x68, + 0x88, 0x39, 0x27, 0xbe, 0x07, 0x3f, 0xc8, 0xed, 0xbe, 0xef, 0x16, 0x76, 0xdf, 0xbb, 0x25, 0x1b, + 0x6c, 0x96, 0xe6, 0xff, 0xb5, 0x0e, 0xf7, 0x9e, 0xaf, 0x82, 0x9b, 0x65, 0xd1, 0x84, 0xef, 0xc7, + 0x8d, 0x87, 0x7a, 0xca, 0xe3, 0xbd, 0x6c, 0xe3, 0xa1, 0xde, 0x45, 0xa8, 0xdf, 0x2e, 0xda, 0xc5, + 0x1a, 0xa4, 0xec, 0xa0, 0x07, 0x5a, 0x34, 0x7d, 0x61, 0x95, 0xa4, 0xdf, 0xbe, 0x52, 0x3e, 0x95, + 0x27, 0x48, 0xbc, 0x8a, 0x65, 0x75, 0xd9, 0x03, 0xe0, 0xaf, 0xc0, 0x36, 0xcd, 0xbf, 0xbd, 0x8c, + 0xdc, 0xd5, 0xcf, 0x2c, 0x8b, 0x9b, 0x79, 0x47, 0xdd, 0x7b, 0xbb, 0xa0, 0x47, 0xc5, 0xc3, 0x7a, + 0x7f, 0xd2, 0x40, 0x55, 0x67, 0x81, 0xc3, 0x6c, 0x7b, 0x16, 0x95, 0xd5, 0x34, 0xf7, 0x73, 0xad, + 0xf9, 0x22, 0xd4, 0x5f, 0xaf, 0xfa, 0x75, 0x4e, 0x84, 0x9d, 0x19, 0x4f, 0x1f, 0x3f, 0xcc, 0xf6, + 0xef, 0x0f, 0x92, 0xfe, 0xbd, 0x2a, 0xe9, 0xfa, 0x69, 0xef, 0xbe, 0x1a, 0x97, 0x32, 0x37, 0xbf, + 0xf9, 0xfc, 0x45, 0x77, 0xe5, 0xd3, 0x17, 0xdd, 0x95, 0xcf, 0x5e, 0x74, 0x57, 0x7e, 0x1d, 0x75, + 0xb5, 0xe7, 0x51, 0x57, 0xfb, 0x34, 0xea, 0x6a, 0x9f, 0x45, 0x5d, 0xed, 0x9f, 0x51, 0x57, 0xfb, + 0xed, 0xbf, 0xba, 0x2b, 0x1f, 0xdd, 0x28, 0xf9, 0xb9, 0xf4, 0xbf, 0x01, 0x00, 0x00, 0xff, 0xff, + 0xff, 0x94, 0x48, 0xc1, 0x5d, 0x15, 0x00, 0x00, } func (m *CronJob) Marshal() (dAtA []byte, err error) { @@ -1023,6 +1027,16 @@ func (m *JobSpec) MarshalToSizedBuffer(dAtA []byte) (int, error) { _ = i var l int _ = l + if m.MaxFailedIndexes != nil { + i = encodeVarintGenerated(dAtA, i, uint64(*m.MaxFailedIndexes)) + i-- + dAtA[i] = 0x68 + } + if m.BackoffLimitPerIndex != nil { + i = encodeVarintGenerated(dAtA, i, uint64(*m.BackoffLimitPerIndex)) + i-- + dAtA[i] = 0x60 + } if m.PodFailurePolicy != nil { { size, err := m.PodFailurePolicy.MarshalToSizedBuffer(dAtA[:i]) @@ -1132,6 +1146,13 @@ func (m *JobStatus) MarshalToSizedBuffer(dAtA []byte) (int, error) { _ = i var l int _ = l + if m.FailedIndexes != nil { + i -= len(*m.FailedIndexes) + copy(dAtA[i:], *m.FailedIndexes) + i = encodeVarintGenerated(dAtA, i, uint64(len(*m.FailedIndexes))) + i-- + dAtA[i] = 0x52 + } if m.Ready != nil { i = encodeVarintGenerated(dAtA, i, uint64(*m.Ready)) i-- @@ -1645,6 +1666,12 @@ func (m *JobSpec) Size() (n int) { l = m.PodFailurePolicy.Size() n += 1 + l + sovGenerated(uint64(l)) } + if m.BackoffLimitPerIndex != nil { + n += 1 + sovGenerated(uint64(*m.BackoffLimitPerIndex)) + } + if m.MaxFailedIndexes != nil { + n += 1 + sovGenerated(uint64(*m.MaxFailedIndexes)) + } return n } @@ -1680,6 +1707,10 @@ func (m *JobStatus) Size() (n int) { if m.Ready != nil { n += 1 + sovGenerated(uint64(*m.Ready)) } + if m.FailedIndexes != nil { + l = len(*m.FailedIndexes) + n += 1 + l + sovGenerated(uint64(l)) + } return n } @@ -1913,6 +1944,8 @@ func (this *JobSpec) String() string { `CompletionMode:` + valueToStringGenerated(this.CompletionMode) + `,`, `Suspend:` + valueToStringGenerated(this.Suspend) + `,`, `PodFailurePolicy:` + strings.Replace(this.PodFailurePolicy.String(), "PodFailurePolicy", "PodFailurePolicy", 1) + `,`, + `BackoffLimitPerIndex:` + valueToStringGenerated(this.BackoffLimitPerIndex) + `,`, + `MaxFailedIndexes:` + valueToStringGenerated(this.MaxFailedIndexes) + `,`, `}`, }, "") return s @@ -1936,6 +1969,7 @@ func (this *JobStatus) String() string { `CompletedIndexes:` + fmt.Sprintf("%v", this.CompletedIndexes) + `,`, `UncountedTerminatedPods:` + strings.Replace(this.UncountedTerminatedPods.String(), "UncountedTerminatedPods", "UncountedTerminatedPods", 1) + `,`, `Ready:` + valueToStringGenerated(this.Ready) + `,`, + `FailedIndexes:` + valueToStringGenerated(this.FailedIndexes) + `,`, `}`, }, "") return s @@ -3527,6 +3561,46 @@ func (m *JobSpec) Unmarshal(dAtA []byte) error { return err } iNdEx = postIndex + case 12: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field BackoffLimitPerIndex", wireType) + } + var v int32 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int32(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.BackoffLimitPerIndex = &v + case 13: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field MaxFailedIndexes", wireType) + } + var v int32 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int32(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.MaxFailedIndexes = &v default: iNdEx = preIndex skippy, err := skipGenerated(dAtA[iNdEx:]) @@ -3828,6 +3902,39 @@ func (m *JobStatus) Unmarshal(dAtA []byte) error { } } m.Ready = &v + case 10: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field FailedIndexes", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + s := string(dAtA[iNdEx:postIndex]) + m.FailedIndexes = &s + iNdEx = postIndex default: iNdEx = preIndex skippy, err := skipGenerated(dAtA[iNdEx:]) diff --git a/staging/src/k8s.io/api/batch/v1/generated.proto b/staging/src/k8s.io/api/batch/v1/generated.proto index df4381c737f..defdf4d5ff2 100644 --- a/staging/src/k8s.io/api/batch/v1/generated.proto +++ b/staging/src/k8s.io/api/batch/v1/generated.proto @@ -223,6 +223,30 @@ message JobSpec { // +optional optional int32 backoffLimit = 7; + // Specifies the limit for the number of retries within an + // index before marking this index as failed. When enabled the number of + // failures per index is kept in the pod's + // batch.kubernetes.io/job-index-failure-count annotation. It can only + // be set when Job's completionMode=Indexed, and the Pod's restart + // policy is Never. The field is immutable. + // This field is alpha-level. It can be used when the `JobBackoffLimitPerIndex` + // feature gate is enabled (disabled by default). + // +optional + optional int32 backoffLimitPerIndex = 12; + + // Specifies the maximal number of failed indexes before marking the Job as + // failed, when backoffLimitPerIndex is set. Once the number of failed + // indexes exceeds this number the entire Job is marked as Failed and its + // execution is terminated. When left as null the job continues execution of + // all of its indexes and is marked with the `Complete` Job condition. + // It can only be specified when backoffLimitPerIndex is set. + // It can be null or up to completions. It is required and must be + // less than or equal to 10^4 when is completions greater than 10^5. + // This field is alpha-level. It can be used when the `JobBackoffLimitPerIndex` + // feature gate is enabled (disabled by default). + // +optional + optional int32 maxFailedIndexes = 13; + // A label query over pods that should match the pod count. // Normally, the system sets this field for you. // More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/#label-selectors @@ -345,6 +369,19 @@ message JobStatus { // +optional optional string completedIndexes = 7; + // FailedIndexes holds the failed indexes when backoffLimitPerIndex=true. + // The indexes are represented in the text format analogous as for the + // `completedIndexes` field, ie. they are kept as decimal integers + // separated by commas. The numbers are listed in increasing order. Three or + // more consecutive numbers are compressed and represented by the first and + // last element of the series, separated by a hyphen. + // For example, if the failed indexes are 1, 3, 4, 5 and 7, they are + // represented as "1,3-5,7". + // This field is alpha-level. It can be used when the `JobBackoffLimitPerIndex` + // feature gate is enabled (disabled by default). + // +optional + optional string failedIndexes = 10; + // uncountedTerminatedPods holds the UIDs of Pods that have terminated but // the job controller hasn't yet accounted for in the status counters. // @@ -452,6 +489,10 @@ message PodFailurePolicyRule { // // - FailJob: indicates that the pod's job is marked as Failed and all // running pods are terminated. + // - FailIndex: indicates that the pod's index is marked as Failed and will + // not be restarted. + // This value is alpha-level. It can be used when the + // `JobBackoffLimitPerIndex` feature gate is enabled (disabled by default). // - Ignore: indicates that the counter towards the .backoffLimit is not // incremented and a replacement pod is created. // - Count: indicates that the pod is handled in the default way - the diff --git a/staging/src/k8s.io/api/batch/v1/types.go b/staging/src/k8s.io/api/batch/v1/types.go index 6e616390627..c34d2aebecb 100644 --- a/staging/src/k8s.io/api/batch/v1/types.go +++ b/staging/src/k8s.io/api/batch/v1/types.go @@ -50,6 +50,9 @@ const ( // ControllerUid is used to programatically get pods corresponding to a Job. // There is a corresponding label without the batch.kubernetes.io that we support for legacy reasons. ControllerUidLabel = labelPrefix + "controller-uid" + // Annotation indicating the number of failures for the index corresponding + // to the pod. + JobIndexFailureCountAnnotation = labelPrefix + "job-index-failure-count" ) // +genclient @@ -114,6 +117,11 @@ const ( // pod's job as Failed and terminate all running pods. PodFailurePolicyActionFailJob PodFailurePolicyAction = "FailJob" + // This is an action which might be taken on a pod failure - mark the + // Job's index as failed to avoid restarts within this index. This action + // can only be used when backoffLimitPerIndex is set. + PodFailurePolicyActionFailIndex PodFailurePolicyAction = "FailIndex" + // This is an action which might be taken on a pod failure - the counter towards // .backoffLimit, represented by the job's .status.failed field, is not // incremented and a replacement pod is created. @@ -191,6 +199,10 @@ type PodFailurePolicyRule struct { // // - FailJob: indicates that the pod's job is marked as Failed and all // running pods are terminated. + // - FailIndex: indicates that the pod's index is marked as Failed and will + // not be restarted. + // This value is alpha-level. It can be used when the + // `JobBackoffLimitPerIndex` feature gate is enabled (disabled by default). // - Ignore: indicates that the counter towards the .backoffLimit is not // incremented and a replacement pod is created. // - Count: indicates that the pod is handled in the default way - the @@ -267,6 +279,30 @@ type JobSpec struct { // +optional BackoffLimit *int32 `json:"backoffLimit,omitempty" protobuf:"varint,7,opt,name=backoffLimit"` + // Specifies the limit for the number of retries within an + // index before marking this index as failed. When enabled the number of + // failures per index is kept in the pod's + // batch.kubernetes.io/job-index-failure-count annotation. It can only + // be set when Job's completionMode=Indexed, and the Pod's restart + // policy is Never. The field is immutable. + // This field is alpha-level. It can be used when the `JobBackoffLimitPerIndex` + // feature gate is enabled (disabled by default). + // +optional + BackoffLimitPerIndex *int32 `json:"backoffLimitPerIndex,omitempty" protobuf:"varint,12,opt,name=backoffLimitPerIndex"` + + // Specifies the maximal number of failed indexes before marking the Job as + // failed, when backoffLimitPerIndex is set. Once the number of failed + // indexes exceeds this number the entire Job is marked as Failed and its + // execution is terminated. When left as null the job continues execution of + // all of its indexes and is marked with the `Complete` Job condition. + // It can only be specified when backoffLimitPerIndex is set. + // It can be null or up to completions. It is required and must be + // less than or equal to 10^4 when is completions greater than 10^5. + // This field is alpha-level. It can be used when the `JobBackoffLimitPerIndex` + // feature gate is enabled (disabled by default). + // +optional + MaxFailedIndexes *int32 `json:"maxFailedIndexes,omitempty" protobuf:"varint,13,opt,name=maxFailedIndexes"` + // TODO enabled it when https://github.com/kubernetes/kubernetes/issues/28486 has been fixed // Optional number of failed pods to retain. // +optional @@ -394,6 +430,19 @@ type JobStatus struct { // +optional CompletedIndexes string `json:"completedIndexes,omitempty" protobuf:"bytes,7,opt,name=completedIndexes"` + // FailedIndexes holds the failed indexes when backoffLimitPerIndex=true. + // The indexes are represented in the text format analogous as for the + // `completedIndexes` field, ie. they are kept as decimal integers + // separated by commas. The numbers are listed in increasing order. Three or + // more consecutive numbers are compressed and represented by the first and + // last element of the series, separated by a hyphen. + // For example, if the failed indexes are 1, 3, 4, 5 and 7, they are + // represented as "1,3-5,7". + // This field is alpha-level. It can be used when the `JobBackoffLimitPerIndex` + // feature gate is enabled (disabled by default). + // +optional + FailedIndexes *string `json:"failedIndexes,omitempty" protobuf:"bytes,10,opt,name=failedIndexes"` + // uncountedTerminatedPods holds the UIDs of Pods that have terminated but // the job controller hasn't yet accounted for in the status counters. // diff --git a/staging/src/k8s.io/api/batch/v1/types_swagger_doc_generated.go b/staging/src/k8s.io/api/batch/v1/types_swagger_doc_generated.go index f6f3141f189..5cefe7b10a4 100644 --- a/staging/src/k8s.io/api/batch/v1/types_swagger_doc_generated.go +++ b/staging/src/k8s.io/api/batch/v1/types_swagger_doc_generated.go @@ -117,6 +117,8 @@ var map_JobSpec = map[string]string{ "activeDeadlineSeconds": "Specifies the duration in seconds relative to the startTime that the job may be continuously active before the system tries to terminate it; value must be positive integer. If a Job is suspended (at creation or through an update), this timer will effectively be stopped and reset when the Job is resumed again.", "podFailurePolicy": "Specifies the policy of handling failed pods. In particular, it allows to specify the set of actions and conditions which need to be satisfied to take the associated action. If empty, the default behaviour applies - the counter of failed pods, represented by the jobs's .status.failed field, is incremented and it is checked against the backoffLimit. This field cannot be used in combination with restartPolicy=OnFailure.\n\nThis field is beta-level. It can be used when the `JobPodFailurePolicy` feature gate is enabled (enabled by default).", "backoffLimit": "Specifies the number of retries before marking this job failed. Defaults to 6", + "backoffLimitPerIndex": "Specifies the limit for the number of retries within an index before marking this index as failed. When enabled the number of failures per index is kept in the pod's batch.kubernetes.io/job-index-failure-count annotation. It can only be set when Job's completionMode=Indexed, and the Pod's restart policy is Never. The field is immutable. This field is alpha-level. It can be used when the `JobBackoffLimitPerIndex` feature gate is enabled (disabled by default).", + "maxFailedIndexes": "Specifies the maximal number of failed indexes before marking the Job as failed, when backoffLimitPerIndex is set. Once the number of failed indexes exceeds this number the entire Job is marked as Failed and its execution is terminated. When left as null the job continues execution of all of its indexes and is marked with the `Complete` Job condition. It can only be specified when backoffLimitPerIndex is set. It can be null or up to completions. It is required and must be less than or equal to 10^4 when is completions greater than 10^5. This field is alpha-level. It can be used when the `JobBackoffLimitPerIndex` feature gate is enabled (disabled by default).", "selector": "A label query over pods that should match the pod count. Normally, the system sets this field for you. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/#label-selectors", "manualSelector": "manualSelector controls generation of pod labels and pod selectors. Leave `manualSelector` unset unless you are certain what you are doing. When false or unset, the system pick labels unique to this job and appends those labels to the pod template. When true, the user is responsible for picking unique labels and specifying the selector. Failure to pick a unique label may cause this and other jobs to not function correctly. However, You may see `manualSelector=true` in jobs that were created with the old `extensions/v1beta1` API. More info: https://kubernetes.io/docs/concepts/workloads/controllers/jobs-run-to-completion/#specifying-your-own-pod-selector", "template": "Describes the pod that will be created when executing a job. The only allowed template.spec.restartPolicy values are \"Never\" or \"OnFailure\". More info: https://kubernetes.io/docs/concepts/workloads/controllers/jobs-run-to-completion/", @@ -138,6 +140,7 @@ var map_JobStatus = map[string]string{ "succeeded": "The number of pods which reached phase Succeeded.", "failed": "The number of pods which reached phase Failed.", "completedIndexes": "completedIndexes holds the completed indexes when .spec.completionMode = \"Indexed\" in a text format. The indexes are represented as decimal integers separated by commas. The numbers are listed in increasing order. Three or more consecutive numbers are compressed and represented by the first and last element of the series, separated by a hyphen. For example, if the completed indexes are 1, 3, 4, 5 and 7, they are represented as \"1,3-5,7\".", + "failedIndexes": "FailedIndexes holds the failed indexes when backoffLimitPerIndex=true. The indexes are represented in the text format analogous as for the `completedIndexes` field, ie. they are kept as decimal integers separated by commas. The numbers are listed in increasing order. Three or more consecutive numbers are compressed and represented by the first and last element of the series, separated by a hyphen. For example, if the failed indexes are 1, 3, 4, 5 and 7, they are represented as \"1,3-5,7\". This field is alpha-level. It can be used when the `JobBackoffLimitPerIndex` feature gate is enabled (disabled by default).", "uncountedTerminatedPods": "uncountedTerminatedPods holds the UIDs of Pods that have terminated but the job controller hasn't yet accounted for in the status counters.\n\nThe job controller creates pods with a finalizer. When a pod terminates (succeeded or failed), the controller does three steps to account for it in the job status:\n\n1. Add the pod UID to the arrays in this field. 2. Remove the pod finalizer. 3. Remove the pod UID from the arrays while increasing the corresponding\n counter.\n\nOld jobs might not be tracked using this field, in which case the field remains null.", "ready": "The number of pods which have a Ready condition.\n\nThis field is beta-level. The job controller populates the field when the feature gate JobReadyPods is enabled (enabled by default).", } @@ -188,7 +191,7 @@ func (PodFailurePolicyOnPodConditionsPattern) SwaggerDoc() map[string]string { var map_PodFailurePolicyRule = map[string]string{ "": "PodFailurePolicyRule describes how a pod failure is handled when the requirements are met. One of onExitCodes and onPodConditions, but not both, can be used in each rule.", - "action": "Specifies the action taken on a pod failure when the requirements are satisfied. Possible values are:\n\n- FailJob: indicates that the pod's job is marked as Failed and all\n running pods are terminated.\n- Ignore: indicates that the counter towards the .backoffLimit is not\n incremented and a replacement pod is created.\n- Count: indicates that the pod is handled in the default way - the\n counter towards the .backoffLimit is incremented.\nAdditional values are considered to be added in the future. Clients should react to an unknown action by skipping the rule.", + "action": "Specifies the action taken on a pod failure when the requirements are satisfied. Possible values are:\n\n- FailJob: indicates that the pod's job is marked as Failed and all\n running pods are terminated.\n- FailIndex: indicates that the pod's index is marked as Failed and will\n not be restarted.\n This value is alpha-level. It can be used when the\n `JobBackoffLimitPerIndex` feature gate is enabled (disabled by default).\n- Ignore: indicates that the counter towards the .backoffLimit is not\n incremented and a replacement pod is created.\n- Count: indicates that the pod is handled in the default way - the\n counter towards the .backoffLimit is incremented.\nAdditional values are considered to be added in the future. Clients should react to an unknown action by skipping the rule.", "onExitCodes": "Represents the requirement on the container exit codes.", "onPodConditions": "Represents the requirement on the pod conditions. The requirement is represented as a list of pod condition patterns. The requirement is satisfied if at least one pattern matches an actual pod condition. At most 20 elements are allowed.", } diff --git a/staging/src/k8s.io/api/batch/v1/zz_generated.deepcopy.go b/staging/src/k8s.io/api/batch/v1/zz_generated.deepcopy.go index 2a901e9d0f9..5527ab30be5 100644 --- a/staging/src/k8s.io/api/batch/v1/zz_generated.deepcopy.go +++ b/staging/src/k8s.io/api/batch/v1/zz_generated.deepcopy.go @@ -267,6 +267,16 @@ func (in *JobSpec) DeepCopyInto(out *JobSpec) { *out = new(int32) **out = **in } + if in.BackoffLimitPerIndex != nil { + in, out := &in.BackoffLimitPerIndex, &out.BackoffLimitPerIndex + *out = new(int32) + **out = **in + } + if in.MaxFailedIndexes != nil { + in, out := &in.MaxFailedIndexes, &out.MaxFailedIndexes + *out = new(int32) + **out = **in + } if in.Selector != nil { in, out := &in.Selector, &out.Selector *out = new(metav1.LabelSelector) @@ -324,6 +334,11 @@ func (in *JobStatus) DeepCopyInto(out *JobStatus) { in, out := &in.CompletionTime, &out.CompletionTime *out = (*in).DeepCopy() } + if in.FailedIndexes != nil { + in, out := &in.FailedIndexes, &out.FailedIndexes + *out = new(string) + **out = **in + } if in.UncountedTerminatedPods != nil { in, out := &in.UncountedTerminatedPods, &out.UncountedTerminatedPods *out = new(UncountedTerminatedPods) diff --git a/staging/src/k8s.io/api/testdata/HEAD/batch.v1.CronJob.json b/staging/src/k8s.io/api/testdata/HEAD/batch.v1.CronJob.json index a6d2970cc04..c0a3ea99057 100644 --- a/staging/src/k8s.io/api/testdata/HEAD/batch.v1.CronJob.json +++ b/staging/src/k8s.io/api/testdata/HEAD/batch.v1.CronJob.json @@ -117,6 +117,8 @@ ] }, "backoffLimit": 7, + "backoffLimitPerIndex": 12, + "maxFailedIndexes": 13, "selector": { "matchLabels": { "matchLabelsKey": "matchLabelsValue" diff --git a/staging/src/k8s.io/api/testdata/HEAD/batch.v1.CronJob.pb b/staging/src/k8s.io/api/testdata/HEAD/batch.v1.CronJob.pb index 74a6a00d4da..da86af27649 100644 Binary files a/staging/src/k8s.io/api/testdata/HEAD/batch.v1.CronJob.pb and b/staging/src/k8s.io/api/testdata/HEAD/batch.v1.CronJob.pb differ diff --git a/staging/src/k8s.io/api/testdata/HEAD/batch.v1.CronJob.yaml b/staging/src/k8s.io/api/testdata/HEAD/batch.v1.CronJob.yaml index a3368010de3..7b10842d191 100644 --- a/staging/src/k8s.io/api/testdata/HEAD/batch.v1.CronJob.yaml +++ b/staging/src/k8s.io/api/testdata/HEAD/batch.v1.CronJob.yaml @@ -71,9 +71,11 @@ spec: spec: activeDeadlineSeconds: 3 backoffLimit: 7 + backoffLimitPerIndex: 12 completionMode: completionModeValue completions: 2 manualSelector: true + maxFailedIndexes: 13 parallelism: 1 podFailurePolicy: rules: diff --git a/staging/src/k8s.io/api/testdata/HEAD/batch.v1.Job.json b/staging/src/k8s.io/api/testdata/HEAD/batch.v1.Job.json index 00e2587e0bd..7785b7bfc7d 100644 --- a/staging/src/k8s.io/api/testdata/HEAD/batch.v1.Job.json +++ b/staging/src/k8s.io/api/testdata/HEAD/batch.v1.Job.json @@ -68,6 +68,8 @@ ] }, "backoffLimit": 7, + "backoffLimitPerIndex": 12, + "maxFailedIndexes": 13, "selector": { "matchLabels": { "matchLabelsKey": "matchLabelsValue" @@ -1731,6 +1733,7 @@ "succeeded": 5, "failed": 6, "completedIndexes": "completedIndexesValue", + "failedIndexes": "failedIndexesValue", "uncountedTerminatedPods": { "succeeded": [ "succeededValue" diff --git a/staging/src/k8s.io/api/testdata/HEAD/batch.v1.Job.pb b/staging/src/k8s.io/api/testdata/HEAD/batch.v1.Job.pb index 677ab73c0f1..ccc12d7a9eb 100644 Binary files a/staging/src/k8s.io/api/testdata/HEAD/batch.v1.Job.pb and b/staging/src/k8s.io/api/testdata/HEAD/batch.v1.Job.pb differ diff --git a/staging/src/k8s.io/api/testdata/HEAD/batch.v1.Job.yaml b/staging/src/k8s.io/api/testdata/HEAD/batch.v1.Job.yaml index 5ab547fc858..b49a62cb30e 100644 --- a/staging/src/k8s.io/api/testdata/HEAD/batch.v1.Job.yaml +++ b/staging/src/k8s.io/api/testdata/HEAD/batch.v1.Job.yaml @@ -35,9 +35,11 @@ metadata: spec: activeDeadlineSeconds: 3 backoffLimit: 7 + backoffLimitPerIndex: 12 completionMode: completionModeValue completions: 2 manualSelector: true + maxFailedIndexes: 13 parallelism: 1 podFailurePolicy: rules: @@ -1182,6 +1184,7 @@ status: status: statusValue type: typeValue failed: 6 + failedIndexes: failedIndexesValue ready: 9 startTime: "2002-01-01T01:01:01Z" succeeded: 5 diff --git a/staging/src/k8s.io/api/testdata/HEAD/batch.v1beta1.CronJob.json b/staging/src/k8s.io/api/testdata/HEAD/batch.v1beta1.CronJob.json index 534fea6d20a..d4c574489a1 100644 --- a/staging/src/k8s.io/api/testdata/HEAD/batch.v1beta1.CronJob.json +++ b/staging/src/k8s.io/api/testdata/HEAD/batch.v1beta1.CronJob.json @@ -117,6 +117,8 @@ ] }, "backoffLimit": 7, + "backoffLimitPerIndex": 12, + "maxFailedIndexes": 13, "selector": { "matchLabels": { "matchLabelsKey": "matchLabelsValue" diff --git a/staging/src/k8s.io/api/testdata/HEAD/batch.v1beta1.CronJob.pb b/staging/src/k8s.io/api/testdata/HEAD/batch.v1beta1.CronJob.pb index 78cc3d78d93..c671fa67d59 100644 Binary files a/staging/src/k8s.io/api/testdata/HEAD/batch.v1beta1.CronJob.pb and b/staging/src/k8s.io/api/testdata/HEAD/batch.v1beta1.CronJob.pb differ diff --git a/staging/src/k8s.io/api/testdata/HEAD/batch.v1beta1.CronJob.yaml b/staging/src/k8s.io/api/testdata/HEAD/batch.v1beta1.CronJob.yaml index d34c5f0b15c..3b0c0d96627 100644 --- a/staging/src/k8s.io/api/testdata/HEAD/batch.v1beta1.CronJob.yaml +++ b/staging/src/k8s.io/api/testdata/HEAD/batch.v1beta1.CronJob.yaml @@ -71,9 +71,11 @@ spec: spec: activeDeadlineSeconds: 3 backoffLimit: 7 + backoffLimitPerIndex: 12 completionMode: completionModeValue completions: 2 manualSelector: true + maxFailedIndexes: 13 parallelism: 1 podFailurePolicy: rules: diff --git a/staging/src/k8s.io/client-go/applyconfigurations/batch/v1/jobspec.go b/staging/src/k8s.io/client-go/applyconfigurations/batch/v1/jobspec.go index 839d88b64ec..b94aa966887 100644 --- a/staging/src/k8s.io/client-go/applyconfigurations/batch/v1/jobspec.go +++ b/staging/src/k8s.io/client-go/applyconfigurations/batch/v1/jobspec.go @@ -32,6 +32,8 @@ type JobSpecApplyConfiguration struct { ActiveDeadlineSeconds *int64 `json:"activeDeadlineSeconds,omitempty"` PodFailurePolicy *PodFailurePolicyApplyConfiguration `json:"podFailurePolicy,omitempty"` BackoffLimit *int32 `json:"backoffLimit,omitempty"` + BackoffLimitPerIndex *int32 `json:"backoffLimitPerIndex,omitempty"` + MaxFailedIndexes *int32 `json:"maxFailedIndexes,omitempty"` Selector *metav1.LabelSelectorApplyConfiguration `json:"selector,omitempty"` ManualSelector *bool `json:"manualSelector,omitempty"` Template *corev1.PodTemplateSpecApplyConfiguration `json:"template,omitempty"` @@ -86,6 +88,22 @@ func (b *JobSpecApplyConfiguration) WithBackoffLimit(value int32) *JobSpecApplyC return b } +// WithBackoffLimitPerIndex sets the BackoffLimitPerIndex field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the BackoffLimitPerIndex field is set to the value of the last call. +func (b *JobSpecApplyConfiguration) WithBackoffLimitPerIndex(value int32) *JobSpecApplyConfiguration { + b.BackoffLimitPerIndex = &value + return b +} + +// WithMaxFailedIndexes sets the MaxFailedIndexes field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the MaxFailedIndexes field is set to the value of the last call. +func (b *JobSpecApplyConfiguration) WithMaxFailedIndexes(value int32) *JobSpecApplyConfiguration { + b.MaxFailedIndexes = &value + return b +} + // WithSelector sets the Selector field in the declarative configuration to the given value // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the Selector field is set to the value of the last call. diff --git a/staging/src/k8s.io/client-go/applyconfigurations/batch/v1/jobstatus.go b/staging/src/k8s.io/client-go/applyconfigurations/batch/v1/jobstatus.go index a36d5d0ae11..a9294e4de7b 100644 --- a/staging/src/k8s.io/client-go/applyconfigurations/batch/v1/jobstatus.go +++ b/staging/src/k8s.io/client-go/applyconfigurations/batch/v1/jobstatus.go @@ -32,6 +32,7 @@ type JobStatusApplyConfiguration struct { Succeeded *int32 `json:"succeeded,omitempty"` Failed *int32 `json:"failed,omitempty"` CompletedIndexes *string `json:"completedIndexes,omitempty"` + FailedIndexes *string `json:"failedIndexes,omitempty"` UncountedTerminatedPods *UncountedTerminatedPodsApplyConfiguration `json:"uncountedTerminatedPods,omitempty"` Ready *int32 `json:"ready,omitempty"` } @@ -103,6 +104,14 @@ func (b *JobStatusApplyConfiguration) WithCompletedIndexes(value string) *JobSta return b } +// WithFailedIndexes sets the FailedIndexes field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the FailedIndexes field is set to the value of the last call. +func (b *JobStatusApplyConfiguration) WithFailedIndexes(value string) *JobStatusApplyConfiguration { + b.FailedIndexes = &value + return b +} + // WithUncountedTerminatedPods sets the UncountedTerminatedPods field in the declarative configuration to the given value // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the UncountedTerminatedPods field is set to the value of the last call. diff --git a/staging/src/k8s.io/client-go/applyconfigurations/internal/internal.go b/staging/src/k8s.io/client-go/applyconfigurations/internal/internal.go index 5b9998d2226..5ce4fbf3e2a 100644 --- a/staging/src/k8s.io/client-go/applyconfigurations/internal/internal.go +++ b/staging/src/k8s.io/client-go/applyconfigurations/internal/internal.go @@ -3353,6 +3353,9 @@ var schemaYAML = typed.YAMLObject(`types: - name: backoffLimit type: scalar: numeric + - name: backoffLimitPerIndex + type: + scalar: numeric - name: completionMode type: scalar: string @@ -3362,6 +3365,9 @@ var schemaYAML = typed.YAMLObject(`types: - name: manualSelector type: scalar: boolean + - name: maxFailedIndexes + type: + scalar: numeric - name: parallelism type: scalar: numeric @@ -3402,6 +3408,9 @@ var schemaYAML = typed.YAMLObject(`types: - name: failed type: scalar: numeric + - name: failedIndexes + type: + scalar: string - name: ready type: scalar: numeric