mirror of
https://github.com/k3s-io/kubernetes.git
synced 2025-07-24 04:06:03 +00:00
Fix for timeZone validation and strategy
This commit is contained in:
parent
f3b928a23d
commit
dbb3906a09
@ -31,6 +31,7 @@ import (
|
||||
"k8s.io/kubernetes/pkg/apis/batch"
|
||||
api "k8s.io/kubernetes/pkg/apis/core"
|
||||
apivalidation "k8s.io/kubernetes/pkg/apis/core/validation"
|
||||
"k8s.io/utils/pointer"
|
||||
)
|
||||
|
||||
// maxParallelismForIndexJob is the maximum parallelism that an Indexed Job
|
||||
@ -279,11 +280,11 @@ func ValidateJobStatusUpdate(status, oldStatus batch.JobStatus) field.ErrorList
|
||||
return allErrs
|
||||
}
|
||||
|
||||
// ValidateCronJob validates a CronJob and returns an ErrorList with any errors.
|
||||
func ValidateCronJob(cronJob *batch.CronJob, opts apivalidation.PodValidationOptions) field.ErrorList {
|
||||
// ValidateCronJobCreate validates a CronJob on creation and returns an ErrorList with any errors.
|
||||
func ValidateCronJobCreate(cronJob *batch.CronJob, opts apivalidation.PodValidationOptions) field.ErrorList {
|
||||
// CronJobs and rcs have the same name validation
|
||||
allErrs := apivalidation.ValidateObjectMeta(&cronJob.ObjectMeta, true, apivalidation.ValidateReplicationControllerName, field.NewPath("metadata"))
|
||||
allErrs = append(allErrs, ValidateCronJobSpec(&cronJob.Spec, field.NewPath("spec"), opts)...)
|
||||
allErrs = append(allErrs, validateCronJobSpec(&cronJob.Spec, nil, field.NewPath("spec"), opts)...)
|
||||
if len(cronJob.ObjectMeta.Name) > apimachineryvalidation.DNS1035LabelMaxLength-11 {
|
||||
// The cronjob controller appends a 11-character suffix to the cronjob (`-$TIMESTAMP`) when
|
||||
// creating a job. The job name length limit is 63 characters.
|
||||
@ -297,14 +298,15 @@ func ValidateCronJob(cronJob *batch.CronJob, opts apivalidation.PodValidationOpt
|
||||
// ValidateCronJobUpdate validates an update to a CronJob and returns an ErrorList with any errors.
|
||||
func ValidateCronJobUpdate(job, oldJob *batch.CronJob, opts apivalidation.PodValidationOptions) field.ErrorList {
|
||||
allErrs := apivalidation.ValidateObjectMetaUpdate(&job.ObjectMeta, &oldJob.ObjectMeta, field.NewPath("metadata"))
|
||||
allErrs = append(allErrs, ValidateCronJobSpec(&job.Spec, field.NewPath("spec"), opts)...)
|
||||
allErrs = append(allErrs, validateCronJobSpec(&job.Spec, &oldJob.Spec, field.NewPath("spec"), opts)...)
|
||||
|
||||
// skip the 52-character name validation limit on update validation
|
||||
// to allow old cronjobs with names > 52 chars to be updated/deleted
|
||||
return allErrs
|
||||
}
|
||||
|
||||
// ValidateCronJobSpec validates a CronJobSpec and returns an ErrorList with any errors.
|
||||
func ValidateCronJobSpec(spec *batch.CronJobSpec, fldPath *field.Path, opts apivalidation.PodValidationOptions) field.ErrorList {
|
||||
// validateCronJobSpec validates a CronJobSpec and returns an ErrorList with any errors.
|
||||
func validateCronJobSpec(spec, oldSpec *batch.CronJobSpec, fldPath *field.Path, opts apivalidation.PodValidationOptions) field.ErrorList {
|
||||
allErrs := field.ErrorList{}
|
||||
|
||||
if len(spec.Schedule) == 0 {
|
||||
@ -317,7 +319,10 @@ func ValidateCronJobSpec(spec *batch.CronJobSpec, fldPath *field.Path, opts apiv
|
||||
allErrs = append(allErrs, apivalidation.ValidateNonnegativeField(int64(*spec.StartingDeadlineSeconds), fldPath.Child("startingDeadlineSeconds"))...)
|
||||
}
|
||||
|
||||
allErrs = append(allErrs, validateTimeZone(spec.TimeZone, fldPath.Child("timeZone"))...)
|
||||
if oldSpec == nil || !pointer.StringEqual(oldSpec.TimeZone, spec.TimeZone) {
|
||||
allErrs = append(allErrs, validateTimeZone(spec.TimeZone, fldPath.Child("timeZone"))...)
|
||||
}
|
||||
|
||||
allErrs = append(allErrs, validateConcurrencyPolicy(&spec.ConcurrencyPolicy, fldPath.Child("concurrencyPolicy"))...)
|
||||
allErrs = append(allErrs, ValidateJobTemplateSpec(&spec.JobTemplate, fldPath.Child("jobTemplate"), opts)...)
|
||||
|
||||
|
@ -933,7 +933,7 @@ func TestValidateCronJob(t *testing.T) {
|
||||
},
|
||||
}
|
||||
for k, v := range successCases {
|
||||
if errs := ValidateCronJob(&v, corevalidation.PodValidationOptions{}); len(errs) != 0 {
|
||||
if errs := ValidateCronJobCreate(&v, corevalidation.PodValidationOptions{}); len(errs) != 0 {
|
||||
t.Errorf("expected success for %s: %v", k, errs)
|
||||
}
|
||||
|
||||
@ -1350,7 +1350,7 @@ func TestValidateCronJob(t *testing.T) {
|
||||
}
|
||||
|
||||
for k, v := range errorCases {
|
||||
errs := ValidateCronJob(&v, corevalidation.PodValidationOptions{})
|
||||
errs := ValidateCronJobCreate(&v, corevalidation.PodValidationOptions{})
|
||||
if len(errs) == 0 {
|
||||
t.Errorf("expected failure for %s", k)
|
||||
} else {
|
||||
@ -1363,9 +1363,14 @@ func TestValidateCronJob(t *testing.T) {
|
||||
|
||||
// Update validation should fail all failure cases other than the 52 character name limit
|
||||
// copy to avoid polluting the testcase object, set a resourceVersion to allow validating update, and test a no-op update
|
||||
v = *v.DeepCopy()
|
||||
v.ResourceVersion = "1"
|
||||
errs = ValidateCronJobUpdate(&v, &v, corevalidation.PodValidationOptions{})
|
||||
oldSpec := *v.DeepCopy()
|
||||
oldSpec.ResourceVersion = "1"
|
||||
oldSpec.Spec.TimeZone = nil
|
||||
|
||||
newSpec := *v.DeepCopy()
|
||||
newSpec.ResourceVersion = "2"
|
||||
|
||||
errs = ValidateCronJobUpdate(&newSpec, &oldSpec, corevalidation.PodValidationOptions{})
|
||||
if len(errs) == 0 {
|
||||
if k == "metadata.name: must be no more than 52 characters" {
|
||||
continue
|
||||
@ -1381,6 +1386,208 @@ func TestValidateCronJob(t *testing.T) {
|
||||
}
|
||||
}
|
||||
|
||||
func TestValidateCronJobSpec(t *testing.T) {
|
||||
validPodTemplateSpec := getValidPodTemplateSpecForGenerated(getValidGeneratedSelector())
|
||||
validPodTemplateSpec.Labels = map[string]string{}
|
||||
|
||||
type testCase struct {
|
||||
old *batch.CronJobSpec
|
||||
new *batch.CronJobSpec
|
||||
expectErr bool
|
||||
}
|
||||
|
||||
cases := map[string]testCase{
|
||||
"no validation because timeZone is nil for old and new": {
|
||||
old: &batch.CronJobSpec{
|
||||
Schedule: "0 * * * *",
|
||||
TimeZone: nil,
|
||||
ConcurrencyPolicy: batch.AllowConcurrent,
|
||||
JobTemplate: batch.JobTemplateSpec{
|
||||
Spec: batch.JobSpec{
|
||||
Template: validPodTemplateSpec,
|
||||
},
|
||||
},
|
||||
},
|
||||
new: &batch.CronJobSpec{
|
||||
Schedule: "0 * * * *",
|
||||
TimeZone: nil,
|
||||
ConcurrencyPolicy: batch.AllowConcurrent,
|
||||
JobTemplate: batch.JobTemplateSpec{
|
||||
Spec: batch.JobSpec{
|
||||
Template: validPodTemplateSpec,
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
"check validation because timeZone is different for new": {
|
||||
old: &batch.CronJobSpec{
|
||||
Schedule: "0 * * * *",
|
||||
TimeZone: nil,
|
||||
ConcurrencyPolicy: batch.AllowConcurrent,
|
||||
JobTemplate: batch.JobTemplateSpec{
|
||||
Spec: batch.JobSpec{
|
||||
Template: validPodTemplateSpec,
|
||||
},
|
||||
},
|
||||
},
|
||||
new: &batch.CronJobSpec{
|
||||
Schedule: "0 * * * *",
|
||||
TimeZone: pointer.String("America/New_York"),
|
||||
ConcurrencyPolicy: batch.AllowConcurrent,
|
||||
JobTemplate: batch.JobTemplateSpec{
|
||||
Spec: batch.JobSpec{
|
||||
Template: validPodTemplateSpec,
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
"check validation because timeZone is different for new and invalid": {
|
||||
old: &batch.CronJobSpec{
|
||||
Schedule: "0 * * * *",
|
||||
TimeZone: nil,
|
||||
ConcurrencyPolicy: batch.AllowConcurrent,
|
||||
JobTemplate: batch.JobTemplateSpec{
|
||||
Spec: batch.JobSpec{
|
||||
Template: validPodTemplateSpec,
|
||||
},
|
||||
},
|
||||
},
|
||||
new: &batch.CronJobSpec{
|
||||
Schedule: "0 * * * *",
|
||||
TimeZone: pointer.String("broken"),
|
||||
ConcurrencyPolicy: batch.AllowConcurrent,
|
||||
JobTemplate: batch.JobTemplateSpec{
|
||||
Spec: batch.JobSpec{
|
||||
Template: validPodTemplateSpec,
|
||||
},
|
||||
},
|
||||
},
|
||||
expectErr: true,
|
||||
},
|
||||
"old timeZone and new timeZone are valid": {
|
||||
old: &batch.CronJobSpec{
|
||||
Schedule: "0 * * * *",
|
||||
TimeZone: pointer.String("America/New_York"),
|
||||
ConcurrencyPolicy: batch.AllowConcurrent,
|
||||
JobTemplate: batch.JobTemplateSpec{
|
||||
Spec: batch.JobSpec{
|
||||
Template: validPodTemplateSpec,
|
||||
},
|
||||
},
|
||||
},
|
||||
new: &batch.CronJobSpec{
|
||||
Schedule: "0 * * * *",
|
||||
TimeZone: pointer.String("America/Chicago"),
|
||||
ConcurrencyPolicy: batch.AllowConcurrent,
|
||||
JobTemplate: batch.JobTemplateSpec{
|
||||
Spec: batch.JobSpec{
|
||||
Template: validPodTemplateSpec,
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
"old timeZone is valid, but new timeZone is invalid": {
|
||||
old: &batch.CronJobSpec{
|
||||
Schedule: "0 * * * *",
|
||||
TimeZone: pointer.String("America/New_York"),
|
||||
ConcurrencyPolicy: batch.AllowConcurrent,
|
||||
JobTemplate: batch.JobTemplateSpec{
|
||||
Spec: batch.JobSpec{
|
||||
Template: validPodTemplateSpec,
|
||||
},
|
||||
},
|
||||
},
|
||||
new: &batch.CronJobSpec{
|
||||
Schedule: "0 * * * *",
|
||||
TimeZone: pointer.String("broken"),
|
||||
ConcurrencyPolicy: batch.AllowConcurrent,
|
||||
JobTemplate: batch.JobTemplateSpec{
|
||||
Spec: batch.JobSpec{
|
||||
Template: validPodTemplateSpec,
|
||||
},
|
||||
},
|
||||
},
|
||||
expectErr: true,
|
||||
},
|
||||
"old timeZone and new timeZone are invalid, but unchanged": {
|
||||
old: &batch.CronJobSpec{
|
||||
Schedule: "0 * * * *",
|
||||
TimeZone: pointer.String("broken"),
|
||||
ConcurrencyPolicy: batch.AllowConcurrent,
|
||||
JobTemplate: batch.JobTemplateSpec{
|
||||
Spec: batch.JobSpec{
|
||||
Template: validPodTemplateSpec,
|
||||
},
|
||||
},
|
||||
},
|
||||
new: &batch.CronJobSpec{
|
||||
Schedule: "0 * * * *",
|
||||
TimeZone: pointer.String("broken"),
|
||||
ConcurrencyPolicy: batch.AllowConcurrent,
|
||||
JobTemplate: batch.JobTemplateSpec{
|
||||
Spec: batch.JobSpec{
|
||||
Template: validPodTemplateSpec,
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
"old timeZone and new timeZone are invalid, but different": {
|
||||
old: &batch.CronJobSpec{
|
||||
Schedule: "0 * * * *",
|
||||
TimeZone: pointer.String("broken"),
|
||||
ConcurrencyPolicy: batch.AllowConcurrent,
|
||||
JobTemplate: batch.JobTemplateSpec{
|
||||
Spec: batch.JobSpec{
|
||||
Template: validPodTemplateSpec,
|
||||
},
|
||||
},
|
||||
},
|
||||
new: &batch.CronJobSpec{
|
||||
Schedule: "0 * * * *",
|
||||
TimeZone: pointer.String("still broken"),
|
||||
ConcurrencyPolicy: batch.AllowConcurrent,
|
||||
JobTemplate: batch.JobTemplateSpec{
|
||||
Spec: batch.JobSpec{
|
||||
Template: validPodTemplateSpec,
|
||||
},
|
||||
},
|
||||
},
|
||||
expectErr: true,
|
||||
},
|
||||
"old timeZone is invalid, but new timeZone is valid": {
|
||||
old: &batch.CronJobSpec{
|
||||
Schedule: "0 * * * *",
|
||||
TimeZone: pointer.String("broken"),
|
||||
ConcurrencyPolicy: batch.AllowConcurrent,
|
||||
JobTemplate: batch.JobTemplateSpec{
|
||||
Spec: batch.JobSpec{
|
||||
Template: validPodTemplateSpec,
|
||||
},
|
||||
},
|
||||
},
|
||||
new: &batch.CronJobSpec{
|
||||
Schedule: "0 * * * *",
|
||||
TimeZone: pointer.String("America/New_York"),
|
||||
ConcurrencyPolicy: batch.AllowConcurrent,
|
||||
JobTemplate: batch.JobTemplateSpec{
|
||||
Spec: batch.JobSpec{
|
||||
Template: validPodTemplateSpec,
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
for k, v := range cases {
|
||||
errs := validateCronJobSpec(v.new, v.old, field.NewPath("spec"), corevalidation.PodValidationOptions{})
|
||||
if len(errs) > 0 && !v.expectErr {
|
||||
t.Errorf("unexpected error for %s: %v", k, errs)
|
||||
} else if len(errs) == 0 && v.expectErr {
|
||||
t.Errorf("expected error for %s but got nil", k)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func completionModePtr(m batch.CompletionMode) *batch.CompletionMode {
|
||||
return &m
|
||||
}
|
||||
|
@ -902,7 +902,7 @@ func TestControllerV2SyncCronJob(t *testing.T) {
|
||||
tc := tc
|
||||
|
||||
t.Run(name, func(t *testing.T) {
|
||||
defer featuregatetesting.SetFeatureGateDuringTest(t, feature.DefaultFeatureGate, features.CronJobTimeZone, tc.enableTimeZone)
|
||||
defer featuregatetesting.SetFeatureGateDuringTest(t, feature.DefaultFeatureGate, features.CronJobTimeZone, tc.enableTimeZone)()
|
||||
|
||||
cj := cronJob()
|
||||
cj.Spec.ConcurrencyPolicy = tc.concurrencyPolicy
|
||||
@ -1147,6 +1147,63 @@ func TestControllerV2UpdateCronJob(t *testing.T) {
|
||||
},
|
||||
expectedDelay: 1*time.Second + nextScheduleDelta,
|
||||
},
|
||||
{
|
||||
name: "spec.timeZone not changed",
|
||||
oldCronJob: &batchv1.CronJob{
|
||||
Spec: batchv1.CronJobSpec{
|
||||
TimeZone: &newYork,
|
||||
JobTemplate: batchv1.JobTemplateSpec{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Labels: map[string]string{"a": "b"},
|
||||
Annotations: map[string]string{"x": "y"},
|
||||
},
|
||||
Spec: jobSpec(),
|
||||
},
|
||||
},
|
||||
},
|
||||
newCronJob: &batchv1.CronJob{
|
||||
Spec: batchv1.CronJobSpec{
|
||||
TimeZone: &newYork,
|
||||
JobTemplate: batchv1.JobTemplateSpec{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Labels: map[string]string{"a": "foo"},
|
||||
Annotations: map[string]string{"x": "y"},
|
||||
},
|
||||
Spec: jobSpec(),
|
||||
},
|
||||
},
|
||||
},
|
||||
expectedDelay: 0 * time.Second,
|
||||
},
|
||||
{
|
||||
name: "spec.timeZone changed",
|
||||
oldCronJob: &batchv1.CronJob{
|
||||
Spec: batchv1.CronJobSpec{
|
||||
TimeZone: &newYork,
|
||||
JobTemplate: batchv1.JobTemplateSpec{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Labels: map[string]string{"a": "b"},
|
||||
Annotations: map[string]string{"x": "y"},
|
||||
},
|
||||
Spec: jobSpec(),
|
||||
},
|
||||
},
|
||||
},
|
||||
newCronJob: &batchv1.CronJob{
|
||||
Spec: batchv1.CronJobSpec{
|
||||
TimeZone: nil,
|
||||
JobTemplate: batchv1.JobTemplateSpec{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Labels: map[string]string{"a": "foo"},
|
||||
Annotations: map[string]string{"x": "y"},
|
||||
},
|
||||
Spec: jobSpec(),
|
||||
},
|
||||
},
|
||||
},
|
||||
expectedDelay: 0 * time.Second,
|
||||
},
|
||||
|
||||
// TODO: Add more test cases for updating scheduling.
|
||||
}
|
||||
for _, tt := range tests {
|
||||
|
@ -27,8 +27,10 @@ import (
|
||||
"k8s.io/apimachinery/pkg/runtime/schema"
|
||||
"k8s.io/apimachinery/pkg/util/validation/field"
|
||||
genericapirequest "k8s.io/apiserver/pkg/endpoints/request"
|
||||
"k8s.io/apiserver/pkg/features"
|
||||
"k8s.io/apiserver/pkg/registry/rest"
|
||||
"k8s.io/apiserver/pkg/storage/names"
|
||||
utilfeature "k8s.io/apiserver/pkg/util/feature"
|
||||
"k8s.io/kubernetes/pkg/api/legacyscheme"
|
||||
"k8s.io/kubernetes/pkg/api/pod"
|
||||
"k8s.io/kubernetes/pkg/apis/batch"
|
||||
@ -88,6 +90,10 @@ func (cronJobStrategy) PrepareForCreate(ctx context.Context, obj runtime.Object)
|
||||
|
||||
cronJob.Generation = 1
|
||||
|
||||
if !utilfeature.DefaultFeatureGate.Enabled(features.CronJobTimeZone) {
|
||||
cronJob.Spec.TimeZone = nil
|
||||
}
|
||||
|
||||
pod.DropDisabledTemplateFields(&cronJob.Spec.JobTemplate.Spec.Template, nil)
|
||||
}
|
||||
|
||||
@ -97,6 +103,10 @@ func (cronJobStrategy) PrepareForUpdate(ctx context.Context, obj, old runtime.Ob
|
||||
oldCronJob := old.(*batch.CronJob)
|
||||
newCronJob.Status = oldCronJob.Status
|
||||
|
||||
if !utilfeature.DefaultFeatureGate.Enabled(features.CronJobTimeZone) && oldCronJob.Spec.TimeZone == nil {
|
||||
newCronJob.Spec.TimeZone = nil
|
||||
}
|
||||
|
||||
pod.DropDisabledTemplateFields(&newCronJob.Spec.JobTemplate.Spec.Template, &oldCronJob.Spec.JobTemplate.Spec.Template)
|
||||
|
||||
// Any changes to the spec increment the generation number.
|
||||
@ -110,7 +120,7 @@ func (cronJobStrategy) PrepareForUpdate(ctx context.Context, obj, old runtime.Ob
|
||||
func (cronJobStrategy) Validate(ctx context.Context, obj runtime.Object) field.ErrorList {
|
||||
cronJob := obj.(*batch.CronJob)
|
||||
opts := pod.GetValidationOptionsFromPodTemplate(&cronJob.Spec.JobTemplate.Spec.Template, nil)
|
||||
return validation.ValidateCronJob(cronJob, opts)
|
||||
return validation.ValidateCronJobCreate(cronJob, opts)
|
||||
}
|
||||
|
||||
// WarningsOnCreate returns warnings for the creation of the given object.
|
||||
|
Loading…
Reference in New Issue
Block a user