From 7cba9d9c92f9a4c5bdadbf28c8a21f3a2fc80d14 Mon Sep 17 00:00:00 2001 From: Maciej Szulik Date: Wed, 22 Feb 2017 12:55:58 +0100 Subject: [PATCH] Issue 37166: remove everything from batch/v2alpha1 that is not new --- pkg/apis/batch/v2alpha1/conversion.go | 49 +---- pkg/apis/batch/v2alpha1/defaults.go | 20 -- pkg/apis/batch/v2alpha1/defaults_test.go | 171 +++--------------- pkg/apis/batch/v2alpha1/register.go | 2 - pkg/apis/batch/v2alpha1/types.go | 149 +-------------- pkg/controller/cronjob/cronjob_controller.go | 28 +-- .../cronjob/cronjob_controller_test.go | 63 +++---- pkg/controller/cronjob/injection.go | 47 ++--- pkg/controller/cronjob/utils.go | 27 +-- pkg/controller/cronjob/utils_test.go | 49 ++--- pkg/registry/batch/rest/storage_batch.go | 5 - test/e2e/cronjob.go | 38 ++-- test/e2e/generated_clientset.go | 21 ++- 13 files changed, 169 insertions(+), 500 deletions(-) diff --git a/pkg/apis/batch/v2alpha1/conversion.go b/pkg/apis/batch/v2alpha1/conversion.go index cf71106edfe..2393fdca97f 100644 --- a/pkg/apis/batch/v2alpha1/conversion.go +++ b/pkg/apis/batch/v2alpha1/conversion.go @@ -19,22 +19,11 @@ package v2alpha1 import ( "fmt" - "k8s.io/apimachinery/pkg/conversion" "k8s.io/apimachinery/pkg/runtime" - v1 "k8s.io/kubernetes/pkg/api/v1" - "k8s.io/kubernetes/pkg/apis/batch" ) func addConversionFuncs(scheme *runtime.Scheme) error { - // Add non-generated conversion functions - err := scheme.AddConversionFuncs( - Convert_batch_JobSpec_To_v2alpha1_JobSpec, - Convert_v2alpha1_JobSpec_To_batch_JobSpec, - ) - if err != nil { - return err - } - + var err error // Add field label conversions for kinds having selectable nothing but ObjectMeta fields. for _, k := range []string{"Job", "JobTemplate", "CronJob"} { kind := k // don't close over range variables @@ -53,39 +42,3 @@ func addConversionFuncs(scheme *runtime.Scheme) error { } return nil } - -func Convert_batch_JobSpec_To_v2alpha1_JobSpec(in *batch.JobSpec, out *JobSpec, s conversion.Scope) error { - out.Parallelism = in.Parallelism - out.Completions = in.Completions - out.ActiveDeadlineSeconds = in.ActiveDeadlineSeconds - out.Selector = in.Selector - if in.ManualSelector != nil { - out.ManualSelector = new(bool) - *out.ManualSelector = *in.ManualSelector - } else { - out.ManualSelector = nil - } - - if err := v1.Convert_api_PodTemplateSpec_To_v1_PodTemplateSpec(&in.Template, &out.Template, s); err != nil { - return err - } - return nil -} - -func Convert_v2alpha1_JobSpec_To_batch_JobSpec(in *JobSpec, out *batch.JobSpec, s conversion.Scope) error { - out.Parallelism = in.Parallelism - out.Completions = in.Completions - out.ActiveDeadlineSeconds = in.ActiveDeadlineSeconds - out.Selector = in.Selector - if in.ManualSelector != nil { - out.ManualSelector = new(bool) - *out.ManualSelector = *in.ManualSelector - } else { - out.ManualSelector = nil - } - - if err := v1.Convert_v1_PodTemplateSpec_To_api_PodTemplateSpec(&in.Template, &out.Template, s); err != nil { - return err - } - return nil -} diff --git a/pkg/apis/batch/v2alpha1/defaults.go b/pkg/apis/batch/v2alpha1/defaults.go index 63cf624c2d1..6da07cc7d2c 100644 --- a/pkg/apis/batch/v2alpha1/defaults.go +++ b/pkg/apis/batch/v2alpha1/defaults.go @@ -23,30 +23,10 @@ import ( func addDefaultingFuncs(scheme *runtime.Scheme) error { RegisterDefaults(scheme) return scheme.AddDefaultingFuncs( - SetDefaults_Job, SetDefaults_CronJob, ) } -func SetDefaults_Job(obj *Job) { - // For a non-parallel job, you can leave both `.spec.completions` and - // `.spec.parallelism` unset. When both are unset, both are defaulted to 1. - if obj.Spec.Completions == nil && obj.Spec.Parallelism == nil { - obj.Spec.Completions = new(int32) - *obj.Spec.Completions = 1 - obj.Spec.Parallelism = new(int32) - *obj.Spec.Parallelism = 1 - } - if obj.Spec.Parallelism == nil { - obj.Spec.Parallelism = new(int32) - *obj.Spec.Parallelism = 1 - } - labels := obj.Spec.Template.Labels - if labels != nil && len(obj.Labels) == 0 { - obj.Labels = labels - } -} - func SetDefaults_CronJob(obj *CronJob) { if obj.Spec.ConcurrencyPolicy == "" { obj.Spec.ConcurrencyPolicy = AllowConcurrent diff --git a/pkg/apis/batch/v2alpha1/defaults_test.go b/pkg/apis/batch/v2alpha1/defaults_test.go index b8994600071..c300a72f146 100644 --- a/pkg/apis/batch/v2alpha1/defaults_test.go +++ b/pkg/apis/batch/v2alpha1/defaults_test.go @@ -20,143 +20,40 @@ import ( "reflect" "testing" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/runtime" "k8s.io/kubernetes/pkg/api" _ "k8s.io/kubernetes/pkg/api/install" - "k8s.io/kubernetes/pkg/api/v1" _ "k8s.io/kubernetes/pkg/apis/batch/install" . "k8s.io/kubernetes/pkg/apis/batch/v2alpha1" ) -func TestSetDefaultJob(t *testing.T) { - defaultLabels := map[string]string{"default": "default"} +func TestSetDefaultCronJob(t *testing.T) { tests := map[string]struct { - original *Job - expected *Job - expectLabels bool + original *CronJob + expected *CronJob }{ - "both unspecified -> sets both to 1": { - original: &Job{ - Spec: JobSpec{ - Template: v1.PodTemplateSpec{ - ObjectMeta: metav1.ObjectMeta{Labels: defaultLabels}, - }, - }, - }, - expected: &Job{ - Spec: JobSpec{ - Completions: newInt32(1), - Parallelism: newInt32(1), - }, - }, - expectLabels: true, - }, - "both unspecified -> sets both to 1 and no default labels": { - original: &Job{ - ObjectMeta: metav1.ObjectMeta{ - Labels: map[string]string{"mylabel": "myvalue"}, - }, - Spec: JobSpec{ - Template: v1.PodTemplateSpec{ - ObjectMeta: metav1.ObjectMeta{Labels: defaultLabels}, - }, - }, - }, - expected: &Job{ - Spec: JobSpec{ - Completions: newInt32(1), - Parallelism: newInt32(1), + "empty CronJob should default ConcurrencyPolicy and Suspend": { + original: &CronJob{}, + expected: &CronJob{ + Spec: CronJobSpec{ + ConcurrencyPolicy: AllowConcurrent, + Suspend: newBool(false), }, }, }, - "WQ: Parallelism explicitly 0 and completions unset -> no change": { - original: &Job{ - Spec: JobSpec{ - Parallelism: newInt32(0), - Template: v1.PodTemplateSpec{ - ObjectMeta: metav1.ObjectMeta{Labels: defaultLabels}, - }, + "nothing should be defaulted": { + original: &CronJob{ + Spec: CronJobSpec{ + ConcurrencyPolicy: ForbidConcurrent, + Suspend: newBool(true), }, }, - expected: &Job{ - Spec: JobSpec{ - Parallelism: newInt32(0), + expected: &CronJob{ + Spec: CronJobSpec{ + ConcurrencyPolicy: ForbidConcurrent, + Suspend: newBool(true), }, }, - expectLabels: true, - }, - "WQ: Parallelism explicitly 2 and completions unset -> no change": { - original: &Job{ - Spec: JobSpec{ - Parallelism: newInt32(2), - Template: v1.PodTemplateSpec{ - ObjectMeta: metav1.ObjectMeta{Labels: defaultLabels}, - }, - }, - }, - expected: &Job{ - Spec: JobSpec{ - Parallelism: newInt32(2), - }, - }, - expectLabels: true, - }, - "Completions explicitly 2 and parallelism unset -> parallelism is defaulted": { - original: &Job{ - Spec: JobSpec{ - Completions: newInt32(2), - Template: v1.PodTemplateSpec{ - ObjectMeta: metav1.ObjectMeta{Labels: defaultLabels}, - }, - }, - }, - expected: &Job{ - Spec: JobSpec{ - Completions: newInt32(2), - Parallelism: newInt32(1), - }, - }, - expectLabels: true, - }, - "Both set -> no change": { - original: &Job{ - Spec: JobSpec{ - Completions: newInt32(10), - Parallelism: newInt32(11), - Template: v1.PodTemplateSpec{ - ObjectMeta: metav1.ObjectMeta{Labels: defaultLabels}, - }, - }, - }, - expected: &Job{ - Spec: JobSpec{ - Completions: newInt32(10), - Parallelism: newInt32(11), - Template: v1.PodTemplateSpec{ - ObjectMeta: metav1.ObjectMeta{Labels: defaultLabels}, - }, - }, - }, - expectLabels: true, - }, - "Both set, flipped -> no change": { - original: &Job{ - Spec: JobSpec{ - Completions: newInt32(11), - Parallelism: newInt32(10), - Template: v1.PodTemplateSpec{ - ObjectMeta: metav1.ObjectMeta{Labels: defaultLabels}, - }, - }, - }, - expected: &Job{ - Spec: JobSpec{ - Completions: newInt32(11), - Parallelism: newInt32(10), - }, - }, - expectLabels: true, }, } @@ -164,35 +61,17 @@ func TestSetDefaultJob(t *testing.T) { original := test.original expected := test.expected obj2 := roundTrip(t, runtime.Object(original)) - actual, ok := obj2.(*Job) + actual, ok := obj2.(*CronJob) if !ok { t.Errorf("%s: unexpected object: %v", name, actual) t.FailNow() } - if (actual.Spec.Completions == nil) != (expected.Spec.Completions == nil) { - t.Errorf("%s: got different *completions than expected: %v %v", name, actual.Spec.Completions, expected.Spec.Completions) + if actual.Spec.ConcurrencyPolicy != expected.Spec.ConcurrencyPolicy { + t.Errorf("%s: got different concurrencyPolicy than expected: %v %v", name, actual.Spec.ConcurrencyPolicy, expected.Spec.ConcurrencyPolicy) } - if actual.Spec.Completions != nil && expected.Spec.Completions != nil { - if *actual.Spec.Completions != *expected.Spec.Completions { - t.Errorf("%s: got different completions than expected: %d %d", name, *actual.Spec.Completions, *expected.Spec.Completions) - } + if *actual.Spec.Suspend != *expected.Spec.Suspend { + t.Errorf("%s: got different suspend than expected: %v %v", name, *actual.Spec.Suspend, *expected.Spec.Suspend) } - if (actual.Spec.Parallelism == nil) != (expected.Spec.Parallelism == nil) { - t.Errorf("%s: got different *Parallelism than expected: %v %v", name, actual.Spec.Parallelism, expected.Spec.Parallelism) - } - if actual.Spec.Parallelism != nil && expected.Spec.Parallelism != nil { - if *actual.Spec.Parallelism != *expected.Spec.Parallelism { - t.Errorf("%s: got different parallelism than expected: %d %d", name, *actual.Spec.Parallelism, *expected.Spec.Parallelism) - } - } - if test.expectLabels != reflect.DeepEqual(actual.Labels, actual.Spec.Template.Labels) { - if test.expectLabels { - t.Errorf("%s: expected: %v, got: %v", name, actual.Spec.Template.Labels, actual.Labels) - } else { - t.Errorf("%s: unexpected equality: %v", name, actual.Labels) - } - } - } } @@ -216,8 +95,8 @@ func roundTrip(t *testing.T, obj runtime.Object) runtime.Object { return obj3 } -func newInt32(val int32) *int32 { - p := new(int32) +func newBool(val bool) *bool { + p := new(bool) *p = val return p } diff --git a/pkg/apis/batch/v2alpha1/register.go b/pkg/apis/batch/v2alpha1/register.go index e8e669ab6be..5286ca4a085 100644 --- a/pkg/apis/batch/v2alpha1/register.go +++ b/pkg/apis/batch/v2alpha1/register.go @@ -41,8 +41,6 @@ var ( // Adds the list of known types to api.Scheme. func addKnownTypes(scheme *runtime.Scheme) error { scheme.AddKnownTypes(SchemeGroupVersion, - &Job{}, - &JobList{}, &JobTemplate{}, &CronJob{}, &CronJobList{}, diff --git a/pkg/apis/batch/v2alpha1/types.go b/pkg/apis/batch/v2alpha1/types.go index 5d111471f2d..3c1fdf22a3b 100644 --- a/pkg/apis/batch/v2alpha1/types.go +++ b/pkg/apis/batch/v2alpha1/types.go @@ -19,41 +19,9 @@ package v2alpha1 import ( metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/kubernetes/pkg/api/v1" + batchv1 "k8s.io/kubernetes/pkg/apis/batch/v1" ) -// +genclient=true - -// Job represents the configuration of a single job. -type Job struct { - metav1.TypeMeta `json:",inline"` - // Standard object's metadata. - // More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#metadata - // +optional - metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` - - // Spec is a structure defining the expected behavior of a job. - // More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#spec-and-status - // +optional - Spec JobSpec `json:"spec,omitempty" protobuf:"bytes,2,opt,name=spec"` - - // Status is a structure describing current status of a job. - // More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#spec-and-status - // +optional - Status JobStatus `json:"status,omitempty" protobuf:"bytes,3,opt,name=status"` -} - -// JobList is a collection of jobs. -type JobList struct { - metav1.TypeMeta `json:",inline"` - // Standard list metadata - // More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#metadata - // +optional - metav1.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` - - // Items is the list of Job. - Items []Job `json:"items" protobuf:"bytes,2,rep,name=items"` -} - // JobTemplate describes a template for creating copies of a predefined pod. type JobTemplate struct { metav1.TypeMeta `json:",inline"` @@ -78,120 +46,7 @@ type JobTemplateSpec struct { // Specification of the desired behavior of the job. // More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#spec-and-status // +optional - Spec JobSpec `json:"spec,omitempty" protobuf:"bytes,2,opt,name=spec"` -} - -// JobSpec describes how the job execution will look like. -type JobSpec struct { - - // Parallelism specifies the maximum desired number of pods the job should - // run at any given time. The actual number of pods running in steady state will - // be less than this number when ((.spec.completions - .status.successful) < .spec.parallelism), - // i.e. when the work left to do is less than max parallelism. - // More info: http://kubernetes.io/docs/user-guide/jobs - // +optional - Parallelism *int32 `json:"parallelism,omitempty" protobuf:"varint,1,opt,name=parallelism"` - - // Completions specifies the desired number of successfully finished pods the - // job should be run with. Setting to nil means that the success of any - // pod signals the success of all pods, and allows parallelism to have any positive - // value. Setting to 1 means that parallelism is limited to 1 and the success of that - // pod signals the success of the job. - // More info: http://kubernetes.io/docs/user-guide/jobs - // +optional - Completions *int32 `json:"completions,omitempty" protobuf:"varint,2,opt,name=completions"` - - // Optional duration in seconds relative to the startTime that the job may be active - // before the system tries to terminate it; value must be positive integer - // +optional - ActiveDeadlineSeconds *int64 `json:"activeDeadlineSeconds,omitempty" protobuf:"varint,3,opt,name=activeDeadlineSeconds"` - - // Selector is a label query over pods that should match the pod count. - // Normally, the system sets this field for you. - // More info: http://kubernetes.io/docs/user-guide/labels#label-selectors - // +optional - Selector *metav1.LabelSelector `json:"selector,omitempty" protobuf:"bytes,4,opt,name=selector"` - - // ManualSelector controls generation of pod labels and pod selectors. - // Leave `manualSelector` unset unless you are certain what you are doing. - // When false or unset, the system pick labels unique to this job - // and appends those labels to the pod template. When true, - // the user is responsible for picking unique labels and specifying - // the selector. Failure to pick a unique label may cause this - // and other jobs to not function correctly. However, You may see - // `manualSelector=true` in jobs that were created with the old `extensions/v1beta1` - // API. - // More info: http://releases.k8s.io/HEAD/docs/design/selector-generation.md - // +optional - ManualSelector *bool `json:"manualSelector,omitempty" protobuf:"varint,5,opt,name=manualSelector"` - - // Template is the object that describes the pod that will be created when - // executing a job. - // More info: http://kubernetes.io/docs/user-guide/jobs - Template v1.PodTemplateSpec `json:"template" protobuf:"bytes,6,opt,name=template"` -} - -// JobStatus represents the current state of a Job. -type JobStatus struct { - - // Conditions represent the latest available observations of an object's current state. - // More info: http://kubernetes.io/docs/user-guide/jobs - // +optional - Conditions []JobCondition `json:"conditions,omitempty" patchStrategy:"merge" patchMergeKey:"type" protobuf:"bytes,1,rep,name=conditions"` - - // StartTime represents time when the job was acknowledged by the Job Manager. - // It is not guaranteed to be set in happens-before order across separate operations. - // It is represented in RFC3339 form and is in UTC. - // +optional - StartTime *metav1.Time `json:"startTime,omitempty" protobuf:"bytes,2,opt,name=startTime"` - - // CompletionTime represents time when the job was completed. It is not guaranteed to - // be set in happens-before order across separate operations. - // It is represented in RFC3339 form and is in UTC. - // +optional - CompletionTime *metav1.Time `json:"completionTime,omitempty" protobuf:"bytes,3,opt,name=completionTime"` - - // Active is the number of actively running pods. - // +optional - Active int32 `json:"active,omitempty" protobuf:"varint,4,opt,name=active"` - - // Succeeded is the number of pods which reached Phase Succeeded. - // +optional - Succeeded int32 `json:"succeeded,omitempty" protobuf:"varint,5,opt,name=succeeded"` - - // Failed is the number of pods which reached Phase Failed. - // +optional - Failed int32 `json:"failed,omitempty" protobuf:"varint,6,opt,name=failed"` -} - -type JobConditionType string - -// These are valid conditions of a job. -const ( - // JobComplete means the job has completed its execution. - JobComplete JobConditionType = "Complete" - // JobFailed means the job has failed its execution. - JobFailed JobConditionType = "Failed" -) - -// JobCondition describes current state of a job. -type JobCondition struct { - // Type of job condition, Complete or Failed. - Type JobConditionType `json:"type" protobuf:"bytes,1,opt,name=type,casttype=JobConditionType"` - // Status of the condition, one of True, False, Unknown. - Status v1.ConditionStatus `json:"status" protobuf:"bytes,2,opt,name=status,casttype=k8s.io/kubernetes/pkg/api/v1.ConditionStatus"` - // Last time the condition was checked. - // +optional - LastProbeTime metav1.Time `json:"lastProbeTime,omitempty" protobuf:"bytes,3,opt,name=lastProbeTime"` - // Last time the condition transit from one status to another. - // +optional - LastTransitionTime metav1.Time `json:"lastTransitionTime,omitempty" protobuf:"bytes,4,opt,name=lastTransitionTime"` - // (brief) reason for the condition's last transition. - // +optional - Reason string `json:"reason,omitempty" protobuf:"bytes,5,opt,name=reason"` - // Human readable message indicating details about last transition. - // +optional - Message string `json:"message,omitempty" protobuf:"bytes,6,opt,name=message"` + Spec batchv1.JobSpec `json:"spec,omitempty" protobuf:"bytes,2,opt,name=spec"` } // +genclient=true diff --git a/pkg/controller/cronjob/cronjob_controller.go b/pkg/controller/cronjob/cronjob_controller.go index 9271f2242b0..ba61dfb9c28 100644 --- a/pkg/controller/cronjob/cronjob_controller.go +++ b/pkg/controller/cronjob/cronjob_controller.go @@ -47,7 +47,8 @@ import ( "k8s.io/client-go/tools/record" "k8s.io/kubernetes/pkg/api" "k8s.io/kubernetes/pkg/api/v1" - batch "k8s.io/kubernetes/pkg/apis/batch/v2alpha1" + batchv1 "k8s.io/kubernetes/pkg/apis/batch/v1" + batchv2alpha1 "k8s.io/kubernetes/pkg/apis/batch/v2alpha1" "k8s.io/kubernetes/pkg/client/clientset_generated/clientset" "k8s.io/kubernetes/pkg/util/metrics" ) @@ -108,7 +109,7 @@ func (jm *CronJobController) syncAll() { sjs := sjl.Items glog.V(4).Infof("Found %d cronjobs", len(sjs)) - jl, err := jm.kubeClient.BatchV2alpha1().Jobs(metav1.NamespaceAll).List(metav1.ListOptions{}) + jl, err := jm.kubeClient.BatchV1().Jobs(metav1.NamespaceAll).List(metav1.ListOptions{}) if err != nil { glog.Errorf("Error listing jobs") return @@ -126,20 +127,21 @@ func (jm *CronJobController) syncAll() { } // cleanupFinishedJobs cleanups finished jobs created by a CronJob -func cleanupFinishedJobs(sj *batch.CronJob, js []batch.Job, jc jobControlInterface, sjc sjControlInterface, pc podControlInterface, recorder record.EventRecorder) { +func cleanupFinishedJobs(sj *batchv2alpha1.CronJob, js []batchv1.Job, jc jobControlInterface, + sjc sjControlInterface, pc podControlInterface, recorder record.EventRecorder) { // If neither limits are active, there is no need to do anything. if sj.Spec.FailedJobsHistoryLimit == nil && sj.Spec.SuccessfulJobsHistoryLimit == nil { return } - failedJobs := []batch.Job{} - succesfulJobs := []batch.Job{} + failedJobs := []batchv1.Job{} + succesfulJobs := []batchv1.Job{} for _, job := range js { isFinished, finishedStatus := getFinishedStatus(&job) - if isFinished && finishedStatus == batch.JobComplete { + if isFinished && finishedStatus == batchv1.JobComplete { succesfulJobs = append(succesfulJobs, job) - } else if isFinished && finishedStatus == batch.JobFailed { + } else if isFinished && finishedStatus == batchv1.JobFailed { failedJobs = append(failedJobs, job) } } @@ -170,7 +172,8 @@ func cleanupFinishedJobs(sj *batch.CronJob, js []batch.Job, jc jobControlInterfa } // removeOldestJobs removes the oldest jobs from a list of jobs -func removeOldestJobs(sj *batch.CronJob, js []batch.Job, jc jobControlInterface, pc podControlInterface, maxJobs int32, recorder record.EventRecorder) { +func removeOldestJobs(sj *batchv2alpha1.CronJob, js []batchv1.Job, jc jobControlInterface, + pc podControlInterface, maxJobs int32, recorder record.EventRecorder) { numToDelete := len(js) - int(maxJobs) if numToDelete <= 0 { return @@ -190,7 +193,7 @@ func removeOldestJobs(sj *batch.CronJob, js []batch.Job, jc jobControlInterface, // All known jobs created by "sj" should be included in "js". // The current time is passed in to facilitate testing. // It has no receiver, to facilitate testing. -func syncOne(sj *batch.CronJob, js []batch.Job, now time.Time, jc jobControlInterface, sjc sjControlInterface, pc podControlInterface, recorder record.EventRecorder) { +func syncOne(sj *batchv2alpha1.CronJob, js []batchv1.Job, now time.Time, jc jobControlInterface, sjc sjControlInterface, pc podControlInterface, recorder record.EventRecorder) { nameForLog := fmt.Sprintf("%s/%s", sj.Namespace, sj.Name) childrenJobs := make(map[types.UID]bool) @@ -269,7 +272,7 @@ func syncOne(sj *batch.CronJob, js []batch.Job, now time.Time, jc jobControlInte // can see easily that there was a missed execution. return } - if sj.Spec.ConcurrencyPolicy == batch.ForbidConcurrent && len(sj.Status.Active) > 0 { + if sj.Spec.ConcurrencyPolicy == batchv2alpha1.ForbidConcurrent && len(sj.Status.Active) > 0 { // Regardless which source of information we use for the set of active jobs, // there is some risk that we won't see an active job when there is one. // (because we haven't seen the status update to the SJ or the created pod). @@ -282,7 +285,7 @@ func syncOne(sj *batch.CronJob, js []batch.Job, now time.Time, jc jobControlInte glog.V(4).Infof("Not starting job for %s because of prior execution still running and concurrency policy is Forbid", nameForLog) return } - if sj.Spec.ConcurrencyPolicy == batch.ReplaceConcurrent { + if sj.Spec.ConcurrencyPolicy == batchv2alpha1.ReplaceConcurrent { for _, j := range sj.Status.Active { // TODO: this should be replaced with server side job deletion // currently this mimics JobReaper from pkg/kubectl/stop.go @@ -338,7 +341,8 @@ func syncOne(sj *batch.CronJob, js []batch.Job, now time.Time, jc jobControlInte } // deleteJob reaps a job, deleting the job, the pobs and the reference in the active list -func deleteJob(sj *batch.CronJob, job *batch.Job, jc jobControlInterface, pc podControlInterface, recorder record.EventRecorder, reason string) bool { +func deleteJob(sj *batchv2alpha1.CronJob, job *batchv1.Job, jc jobControlInterface, + pc podControlInterface, recorder record.EventRecorder, reason string) bool { // TODO: this should be replaced with server side job deletion // currencontinuetly this mimics JobReaper from pkg/kubectl/stop.go nameForLog := fmt.Sprintf("%s/%s", sj.Namespace, sj.Name) diff --git a/pkg/controller/cronjob/cronjob_controller_test.go b/pkg/controller/cronjob/cronjob_controller_test.go index bcbe2b5f5aa..f3281c8cbf9 100644 --- a/pkg/controller/cronjob/cronjob_controller_test.go +++ b/pkg/controller/cronjob/cronjob_controller_test.go @@ -27,7 +27,8 @@ import ( "k8s.io/apimachinery/pkg/types" "k8s.io/client-go/tools/record" "k8s.io/kubernetes/pkg/api/v1" - batch "k8s.io/kubernetes/pkg/apis/batch/v2alpha1" + batchv1 "k8s.io/kubernetes/pkg/apis/batch/v1" + batchv2alpha1 "k8s.io/kubernetes/pkg/apis/batch/v2alpha1" ) // schedule is hourly on the hour @@ -92,8 +93,8 @@ func startTimeStringToTime(startTime string) time.Time { } // returns a cronJob with some fields filled in. -func cronJob() batch.CronJob { - return batch.CronJob{ +func cronJob() batchv2alpha1.CronJob { + return batchv2alpha1.CronJob{ ObjectMeta: metav1.ObjectMeta{ Name: "mycronjob", Namespace: "snazzycats", @@ -101,10 +102,10 @@ func cronJob() batch.CronJob { SelfLink: "/apis/batch/v2alpha1/namespaces/snazzycats/cronjobs/mycronjob", CreationTimestamp: metav1.Time{Time: justBeforeTheHour()}, }, - Spec: batch.CronJobSpec{ + Spec: batchv2alpha1.CronJobSpec{ Schedule: "* * * * ?", - ConcurrencyPolicy: batch.AllowConcurrent, - JobTemplate: batch.JobTemplateSpec{ + ConcurrencyPolicy: batchv2alpha1.AllowConcurrent, + JobTemplate: batchv2alpha1.JobTemplateSpec{ ObjectMeta: metav1.ObjectMeta{ Labels: map[string]string{"a": "b"}, Annotations: map[string]string{"x": "y"}, @@ -115,9 +116,9 @@ func cronJob() batch.CronJob { } } -func jobSpec() batch.JobSpec { +func jobSpec() batchv1.JobSpec { one := int32(1) - return batch.JobSpec{ + return batchv1.JobSpec{ Parallelism: &one, Completions: &one, Template: v1.PodTemplateSpec{ @@ -135,8 +136,8 @@ func jobSpec() batch.JobSpec { } } -func newJob(UID string) batch.Job { - return batch.Job{ +func newJob(UID string) batchv1.Job { + return batchv1.Job{ ObjectMeta: metav1.ObjectMeta{ UID: types.UID(UID), Name: "foobar", @@ -148,15 +149,15 @@ func newJob(UID string) batch.Job { } var ( - shortDead int64 = 10 - mediumDead int64 = 2 * 60 * 60 - longDead int64 = 1000000 - noDead int64 = -12345 - A batch.ConcurrencyPolicy = batch.AllowConcurrent - f batch.ConcurrencyPolicy = batch.ForbidConcurrent - R batch.ConcurrencyPolicy = batch.ReplaceConcurrent - T bool = true - F bool = false + shortDead int64 = 10 + mediumDead int64 = 2 * 60 * 60 + longDead int64 = 1000000 + noDead int64 = -12345 + A batchv2alpha1.ConcurrencyPolicy = batchv2alpha1.AllowConcurrent + f batchv2alpha1.ConcurrencyPolicy = batchv2alpha1.ForbidConcurrent + R batchv2alpha1.ConcurrencyPolicy = batchv2alpha1.ReplaceConcurrent + T bool = true + F bool = false ) func TestSyncOne_RunOrNot(t *testing.T) { @@ -175,7 +176,7 @@ func TestSyncOne_RunOrNot(t *testing.T) { testCases := map[string]struct { // sj spec - concurrencyPolicy batch.ConcurrencyPolicy + concurrencyPolicy batchv2alpha1.ConcurrencyPolicy suspend bool schedule string deadline int64 @@ -251,10 +252,10 @@ func TestSyncOne_RunOrNot(t *testing.T) { } var ( - job *batch.Job + job *batchv1.Job err error ) - js := []batch.Job{} + js := []batchv1.Job{} if tc.ranPreviously { sj.ObjectMeta.CreationTimestamp = metav1.Time{Time: justBeforeThePriorHour()} sj.Status.LastScheduleTime = &metav1.Time{Time: justAfterThePriorHour()} @@ -466,7 +467,7 @@ func TestCleanupFinishedJobs_DeleteOrNot(t *testing.T) { sj.Spec.FailedJobsHistoryLimit = tc.failedJobsHistoryLimit var ( - job *batch.Job + job *batchv1.Job err error ) @@ -481,7 +482,7 @@ func TestCleanupFinishedJobs_DeleteOrNot(t *testing.T) { } // Create jobs - js := []batch.Job{} + js := []batchv1.Job{} jobsToDelete := []string{} sj.Status.Active = []v1.ObjectReference{} @@ -495,13 +496,13 @@ func TestCleanupFinishedJobs_DeleteOrNot(t *testing.T) { job.Namespace = "" if spec.IsFinished { - var conditionType batch.JobConditionType + var conditionType batchv1.JobConditionType if spec.IsSuccessful { - conditionType = batch.JobComplete + conditionType = batchv1.JobComplete } else { - conditionType = batch.JobFailed + conditionType = batchv1.JobFailed } - condition := batch.JobCondition{Type: conditionType, Status: v1.ConditionTrue} + condition := batchv1.JobCondition{Type: conditionType, Status: v1.ConditionTrue} job.Status.Conditions = append(job.Status.Conditions, condition) if spec.IsStillInActiveList { @@ -563,13 +564,13 @@ func TestCleanupFinishedJobs_DeleteOrNot(t *testing.T) { // TestSyncOne_Status tests sj.UpdateStatus in syncOne func TestSyncOne_Status(t *testing.T) { finishedJob := newJob("1") - finishedJob.Status.Conditions = append(finishedJob.Status.Conditions, batch.JobCondition{Type: batch.JobComplete, Status: v1.ConditionTrue}) + finishedJob.Status.Conditions = append(finishedJob.Status.Conditions, batchv1.JobCondition{Type: batchv1.JobComplete, Status: v1.ConditionTrue}) unexpectedJob := newJob("2") missingJob := newJob("3") testCases := map[string]struct { // sj spec - concurrencyPolicy batch.ConcurrencyPolicy + concurrencyPolicy batchv2alpha1.ConcurrencyPolicy suspend bool schedule string deadline int64 @@ -654,7 +655,7 @@ func TestSyncOne_Status(t *testing.T) { } sj.ObjectMeta.CreationTimestamp = metav1.Time{Time: justBeforeTheHour()} } - jobs := []batch.Job{} + jobs := []batchv1.Job{} if tc.hasFinishedJob { ref, err := getRef(&finishedJob) if err != nil { diff --git a/pkg/controller/cronjob/injection.go b/pkg/controller/cronjob/injection.go index dcbd1ae8c6c..ba33de68d8d 100644 --- a/pkg/controller/cronjob/injection.go +++ b/pkg/controller/cronjob/injection.go @@ -24,14 +24,15 @@ import ( "k8s.io/apimachinery/pkg/labels" "k8s.io/client-go/tools/record" "k8s.io/kubernetes/pkg/api/v1" - batch "k8s.io/kubernetes/pkg/apis/batch/v2alpha1" + batchv1 "k8s.io/kubernetes/pkg/apis/batch/v1" + batchv2alpha1 "k8s.io/kubernetes/pkg/apis/batch/v2alpha1" "k8s.io/kubernetes/pkg/client/clientset_generated/clientset" ) // sjControlInterface is an interface that knows how to update CronJob status // created as an interface to allow testing. type sjControlInterface interface { - UpdateStatus(sj *batch.CronJob) (*batch.CronJob, error) + UpdateStatus(sj *batchv2alpha1.CronJob) (*batchv2alpha1.CronJob, error) } // realSJControl is the default implementation of sjControlInterface. @@ -41,18 +42,18 @@ type realSJControl struct { var _ sjControlInterface = &realSJControl{} -func (c *realSJControl) UpdateStatus(sj *batch.CronJob) (*batch.CronJob, error) { +func (c *realSJControl) UpdateStatus(sj *batchv2alpha1.CronJob) (*batchv2alpha1.CronJob, error) { return c.KubeClient.BatchV2alpha1().CronJobs(sj.Namespace).UpdateStatus(sj) } // fakeSJControl is the default implementation of sjControlInterface. type fakeSJControl struct { - Updates []batch.CronJob + Updates []batchv2alpha1.CronJob } var _ sjControlInterface = &fakeSJControl{} -func (c *fakeSJControl) UpdateStatus(sj *batch.CronJob) (*batch.CronJob, error) { +func (c *fakeSJControl) UpdateStatus(sj *batchv2alpha1.CronJob) (*batchv2alpha1.CronJob, error) { c.Updates = append(c.Updates, *sj) return sj, nil } @@ -63,11 +64,11 @@ func (c *fakeSJControl) UpdateStatus(sj *batch.CronJob) (*batch.CronJob, error) // created as an interface to allow testing. type jobControlInterface interface { // GetJob retrieves a job - GetJob(namespace, name string) (*batch.Job, error) + GetJob(namespace, name string) (*batchv1.Job, error) // CreateJob creates new jobs according to the spec - CreateJob(namespace string, job *batch.Job) (*batch.Job, error) + CreateJob(namespace string, job *batchv1.Job) (*batchv1.Job, error) // UpdateJob updates a job - UpdateJob(namespace string, job *batch.Job) (*batch.Job, error) + UpdateJob(namespace string, job *batchv1.Job) (*batchv1.Job, error) // DeleteJob deletes the job identified by name. // TODO: delete by UID? DeleteJob(namespace string, name string) error @@ -81,7 +82,7 @@ type realJobControl struct { var _ jobControlInterface = &realJobControl{} -func copyLabels(template *batch.JobTemplateSpec) labels.Set { +func copyLabels(template *batchv2alpha1.JobTemplateSpec) labels.Set { l := make(labels.Set) for k, v := range template.Labels { l[k] = v @@ -89,7 +90,7 @@ func copyLabels(template *batch.JobTemplateSpec) labels.Set { return l } -func copyAnnotations(template *batch.JobTemplateSpec) labels.Set { +func copyAnnotations(template *batchv2alpha1.JobTemplateSpec) labels.Set { a := make(labels.Set) for k, v := range template.Annotations { a[k] = v @@ -97,33 +98,33 @@ func copyAnnotations(template *batch.JobTemplateSpec) labels.Set { return a } -func (r realJobControl) GetJob(namespace, name string) (*batch.Job, error) { - return r.KubeClient.BatchV2alpha1().Jobs(namespace).Get(name, metav1.GetOptions{}) +func (r realJobControl) GetJob(namespace, name string) (*batchv1.Job, error) { + return r.KubeClient.BatchV1().Jobs(namespace).Get(name, metav1.GetOptions{}) } -func (r realJobControl) UpdateJob(namespace string, job *batch.Job) (*batch.Job, error) { - return r.KubeClient.BatchV2alpha1().Jobs(namespace).Update(job) +func (r realJobControl) UpdateJob(namespace string, job *batchv1.Job) (*batchv1.Job, error) { + return r.KubeClient.BatchV1().Jobs(namespace).Update(job) } -func (r realJobControl) CreateJob(namespace string, job *batch.Job) (*batch.Job, error) { - return r.KubeClient.BatchV2alpha1().Jobs(namespace).Create(job) +func (r realJobControl) CreateJob(namespace string, job *batchv1.Job) (*batchv1.Job, error) { + return r.KubeClient.BatchV1().Jobs(namespace).Create(job) } func (r realJobControl) DeleteJob(namespace string, name string) error { - return r.KubeClient.BatchV2alpha1().Jobs(namespace).Delete(name, nil) + return r.KubeClient.BatchV1().Jobs(namespace).Delete(name, nil) } type fakeJobControl struct { sync.Mutex - Job *batch.Job - Jobs []batch.Job + Job *batchv1.Job + Jobs []batchv1.Job DeleteJobName []string Err error } var _ jobControlInterface = &fakeJobControl{} -func (f *fakeJobControl) CreateJob(namespace string, job *batch.Job) (*batch.Job, error) { +func (f *fakeJobControl) CreateJob(namespace string, job *batchv1.Job) (*batchv1.Job, error) { f.Lock() defer f.Unlock() if f.Err != nil { @@ -135,7 +136,7 @@ func (f *fakeJobControl) CreateJob(namespace string, job *batch.Job) (*batch.Job return job, nil } -func (f *fakeJobControl) GetJob(namespace, name string) (*batch.Job, error) { +func (f *fakeJobControl) GetJob(namespace, name string) (*batchv1.Job, error) { f.Lock() defer f.Unlock() if f.Err != nil { @@ -144,7 +145,7 @@ func (f *fakeJobControl) GetJob(namespace, name string) (*batch.Job, error) { return f.Job, nil } -func (f *fakeJobControl) UpdateJob(namespace string, job *batch.Job) (*batch.Job, error) { +func (f *fakeJobControl) UpdateJob(namespace string, job *batchv1.Job) (*batchv1.Job, error) { f.Lock() defer f.Unlock() if f.Err != nil { @@ -167,7 +168,7 @@ func (f *fakeJobControl) Clear() { f.Lock() defer f.Unlock() f.DeleteJobName = []string{} - f.Jobs = []batch.Job{} + f.Jobs = []batchv1.Job{} f.Err = nil } diff --git a/pkg/controller/cronjob/utils.go b/pkg/controller/cronjob/utils.go index 2fc73a666e5..d8e3b724948 100644 --- a/pkg/controller/cronjob/utils.go +++ b/pkg/controller/cronjob/utils.go @@ -30,12 +30,13 @@ import ( "k8s.io/apimachinery/pkg/types" "k8s.io/kubernetes/pkg/api" "k8s.io/kubernetes/pkg/api/v1" - batch "k8s.io/kubernetes/pkg/apis/batch/v2alpha1" + batchv1 "k8s.io/kubernetes/pkg/apis/batch/v1" + batchv2alpha1 "k8s.io/kubernetes/pkg/apis/batch/v2alpha1" ) // Utilities for dealing with Jobs and CronJobs and time. -func inActiveList(sj batch.CronJob, uid types.UID) bool { +func inActiveList(sj batchv2alpha1.CronJob, uid types.UID) bool { for _, j := range sj.Status.Active { if j.UID == uid { return true @@ -44,7 +45,7 @@ func inActiveList(sj batch.CronJob, uid types.UID) bool { return false } -func deleteFromActiveList(sj *batch.CronJob, uid types.UID) { +func deleteFromActiveList(sj *batchv2alpha1.CronJob, uid types.UID) { if sj == nil { return } @@ -58,7 +59,7 @@ func deleteFromActiveList(sj *batch.CronJob, uid types.UID) { } // getParentUIDFromJob extracts UID of job's parent and whether it was found -func getParentUIDFromJob(j batch.Job) (types.UID, bool) { +func getParentUIDFromJob(j batchv1.Job) (types.UID, bool) { creatorRefJson, found := j.ObjectMeta.Annotations[v1.CreatedByAnnotation] if !found { glog.V(4).Infof("Job with no created-by annotation, name %s namespace %s", j.Name, j.Namespace) @@ -85,8 +86,8 @@ func getParentUIDFromJob(j batch.Job) (types.UID, bool) { // groupJobsByParent groups jobs into a map keyed by the job parent UID (e.g. scheduledJob). // It has no receiver, to facilitate testing. -func groupJobsByParent(sjs []batch.CronJob, js []batch.Job) map[types.UID][]batch.Job { - jobsBySj := make(map[types.UID][]batch.Job) +func groupJobsByParent(sjs []batchv2alpha1.CronJob, js []batchv1.Job) map[types.UID][]batchv1.Job { + jobsBySj := make(map[types.UID][]batchv1.Job) for _, job := range js { parentUID, found := getParentUIDFromJob(job) if !found { @@ -120,7 +121,7 @@ func getNextStartTimeAfter(schedule string, now time.Time) (time.Time, error) { // // If there are too many (>100) unstarted times, just give up and return an empty slice. // If there were missed times prior to the last known start time, then those are not returned. -func getRecentUnmetScheduleTimes(sj batch.CronJob, now time.Time) ([]time.Time, error) { +func getRecentUnmetScheduleTimes(sj batchv2alpha1.CronJob, now time.Time) ([]time.Time, error) { starts := []time.Time{} sched, err := cron.ParseStandard(sj.Spec.Schedule) if err != nil { @@ -181,7 +182,7 @@ func getRecentUnmetScheduleTimes(sj batch.CronJob, now time.Time) ([]time.Time, // XXX unit test this // getJobFromTemplate makes a Job from a CronJob -func getJobFromTemplate(sj *batch.CronJob, scheduledTime time.Time) (*batch.Job, error) { +func getJobFromTemplate(sj *batchv2alpha1.CronJob, scheduledTime time.Time) (*batchv1.Job, error) { // TODO: consider adding the following labels: // nominal-start-time=$RFC_3339_DATE_OF_INTENDED_START -- for user convenience // scheduled-job-name=$SJ_NAME -- for user convenience @@ -195,7 +196,7 @@ func getJobFromTemplate(sj *batch.CronJob, scheduledTime time.Time) (*batch.Job, // We want job names for a given nominal start time to have a deterministic name to avoid the same job being created twice name := fmt.Sprintf("%s-%d", sj.Name, getTimeHash(scheduledTime)) - job := &batch.Job{ + job := &batchv1.Job{ ObjectMeta: metav1.ObjectMeta{ Labels: labels, Annotations: annotations, @@ -234,22 +235,22 @@ func makeCreatedByRefJson(object runtime.Object) (string, error) { return string(createdByRefJson), nil } -func getFinishedStatus(j *batch.Job) (bool, batch.JobConditionType) { +func getFinishedStatus(j *batchv1.Job) (bool, batchv1.JobConditionType) { for _, c := range j.Status.Conditions { - if (c.Type == batch.JobComplete || c.Type == batch.JobFailed) && c.Status == v1.ConditionTrue { + if (c.Type == batchv1.JobComplete || c.Type == batchv1.JobFailed) && c.Status == v1.ConditionTrue { return true, c.Type } } return false, "" } -func IsJobFinished(j *batch.Job) bool { +func IsJobFinished(j *batchv1.Job) bool { isFinished, _ := getFinishedStatus(j) return isFinished } // byJobStartTime sorts a list of jobs by start timestamp, using their names as a tie breaker. -type byJobStartTime []batch.Job +type byJobStartTime []batchv1.Job func (o byJobStartTime) Len() int { return len(o) } func (o byJobStartTime) Swap(i, j int) { o[i], o[j] = o[j], o[i] } diff --git a/pkg/controller/cronjob/utils_test.go b/pkg/controller/cronjob/utils_test.go index 4b84b198906..9f74b4de383 100644 --- a/pkg/controller/cronjob/utils_test.go +++ b/pkg/controller/cronjob/utils_test.go @@ -24,7 +24,8 @@ import ( metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/types" "k8s.io/kubernetes/pkg/api/v1" - batch "k8s.io/kubernetes/pkg/apis/batch/v2alpha1" + batchv1 "k8s.io/kubernetes/pkg/apis/batch/v1" + batchv2alpha1 "k8s.io/kubernetes/pkg/apis/batch/v2alpha1" ) func TestGetJobFromTemplate(t *testing.T) { @@ -34,22 +35,22 @@ func TestGetJobFromTemplate(t *testing.T) { var one int64 = 1 var no bool = false - sj := batch.CronJob{ + sj := batchv2alpha1.CronJob{ ObjectMeta: metav1.ObjectMeta{ Name: "mycronjob", Namespace: "snazzycats", UID: types.UID("1a2b3c"), SelfLink: "/apis/batch/v1/namespaces/snazzycats/jobs/mycronjob", }, - Spec: batch.CronJobSpec{ + Spec: batchv2alpha1.CronJobSpec{ Schedule: "* * * * ?", - ConcurrencyPolicy: batch.AllowConcurrent, - JobTemplate: batch.JobTemplateSpec{ + ConcurrencyPolicy: batchv2alpha1.AllowConcurrent, + JobTemplate: batchv2alpha1.JobTemplateSpec{ ObjectMeta: metav1.ObjectMeta{ Labels: map[string]string{"a": "b"}, Annotations: map[string]string{"x": "y"}, }, - Spec: batch.JobSpec{ + Spec: batchv1.JobSpec{ ActiveDeadlineSeconds: &one, ManualSelector: &no, Template: v1.PodTemplateSpec{ @@ -69,7 +70,7 @@ func TestGetJobFromTemplate(t *testing.T) { }, } - var job *batch.Job + var job *batchv1.Job job, err := getJobFromTemplate(&sj, time.Time{}) if err != nil { t.Errorf("Did not expect error: %s", err) @@ -98,12 +99,12 @@ func TestGetJobFromTemplate(t *testing.T) { } func TestGetParentUIDFromJob(t *testing.T) { - j := &batch.Job{ + j := &batchv1.Job{ ObjectMeta: metav1.ObjectMeta{ Name: "foobar", Namespace: metav1.NamespaceDefault, }, - Spec: batch.JobSpec{ + Spec: batchv1.JobSpec{ Selector: &metav1.LabelSelector{ MatchLabels: map[string]string{"foo": "bar"}, }, @@ -120,9 +121,9 @@ func TestGetParentUIDFromJob(t *testing.T) { }, }, }, - Status: batch.JobStatus{ - Conditions: []batch.JobCondition{{ - Type: batch.JobComplete, + Status: batchv1.JobStatus{ + Conditions: []batchv1.JobCondition{{ + Type: batchv1.JobComplete, Status: v1.ConditionTrue, }}, }, @@ -162,8 +163,8 @@ func TestGroupJobsByParent(t *testing.T) { { // Case 1: There are no jobs and scheduledJobs - sjs := []batch.CronJob{} - js := []batch.Job{} + sjs := []batchv2alpha1.CronJob{} + js := []batchv1.Job{} jobsBySj := groupJobsByParent(sjs, js) if len(jobsBySj) != 0 { t.Errorf("Wrong number of items in map") @@ -172,10 +173,10 @@ func TestGroupJobsByParent(t *testing.T) { { // Case 2: there is one controller with no job. - sjs := []batch.CronJob{ + sjs := []batchv2alpha1.CronJob{ {ObjectMeta: metav1.ObjectMeta{Name: "e", Namespace: "x", UID: uid1}}, } - js := []batch.Job{} + js := []batchv1.Job{} jobsBySj := groupJobsByParent(sjs, js) if len(jobsBySj) != 0 { t.Errorf("Wrong number of items in map") @@ -184,10 +185,10 @@ func TestGroupJobsByParent(t *testing.T) { { // Case 3: there is one controller with one job it created. - sjs := []batch.CronJob{ + sjs := []batchv2alpha1.CronJob{ {ObjectMeta: metav1.ObjectMeta{Name: "e", Namespace: "x", UID: uid1}}, } - js := []batch.Job{ + js := []batchv1.Job{ {ObjectMeta: metav1.ObjectMeta{Name: "a", Namespace: "x", Annotations: createdBy1}}, } jobsBySj := groupJobsByParent(sjs, js) @@ -207,7 +208,7 @@ func TestGroupJobsByParent(t *testing.T) { { // Case 4: Two namespaces, one has two jobs from one controller, other has 3 jobs from two controllers. // There are also two jobs with no created-by annotation. - js := []batch.Job{ + js := []batchv1.Job{ {ObjectMeta: metav1.ObjectMeta{Name: "a", Namespace: "x", Annotations: createdBy1}}, {ObjectMeta: metav1.ObjectMeta{Name: "b", Namespace: "x", Annotations: createdBy2}}, {ObjectMeta: metav1.ObjectMeta{Name: "c", Namespace: "x", Annotations: createdBy1}}, @@ -216,7 +217,7 @@ func TestGroupJobsByParent(t *testing.T) { {ObjectMeta: metav1.ObjectMeta{Name: "b", Namespace: "y", Annotations: createdBy3}}, {ObjectMeta: metav1.ObjectMeta{Name: "d", Namespace: "y", Annotations: noCreatedBy}}, } - sjs := []batch.CronJob{ + sjs := []batchv2alpha1.CronJob{ {ObjectMeta: metav1.ObjectMeta{Name: "e", Namespace: "x", UID: uid1}}, {ObjectMeta: metav1.ObjectMeta{Name: "f", Namespace: "x", UID: uid2}}, {ObjectMeta: metav1.ObjectMeta{Name: "g", Namespace: "y", UID: uid3}}, @@ -266,16 +267,16 @@ func TestGetRecentUnmetScheduleTimes(t *testing.T) { t.Errorf("test setup error: %v", err) } - sj := batch.CronJob{ + sj := batchv2alpha1.CronJob{ ObjectMeta: metav1.ObjectMeta{ Name: "mycronjob", Namespace: metav1.NamespaceDefault, UID: types.UID("1a2b3c"), }, - Spec: batch.CronJobSpec{ + Spec: batchv2alpha1.CronJobSpec{ Schedule: schedule, - ConcurrencyPolicy: batch.AllowConcurrent, - JobTemplate: batch.JobTemplateSpec{}, + ConcurrencyPolicy: batchv2alpha1.AllowConcurrent, + JobTemplate: batchv2alpha1.JobTemplateSpec{}, }, } { diff --git a/pkg/registry/batch/rest/storage_batch.go b/pkg/registry/batch/rest/storage_batch.go index c4d359284d4..d523b8ab087 100644 --- a/pkg/registry/batch/rest/storage_batch.go +++ b/pkg/registry/batch/rest/storage_batch.go @@ -67,11 +67,6 @@ func (p RESTStorageProvider) v2alpha1Storage(apiResourceConfigSource serverstora version := batchapiv2alpha1.SchemeGroupVersion storage := map[string]rest.Storage{} - if apiResourceConfigSource.ResourceEnabled(version.WithResource("jobs")) { - jobsStorage, jobsStatusStorage := jobstore.NewREST(restOptionsGetter) - storage["jobs"] = jobsStorage - storage["jobs/status"] = jobsStatusStorage - } if apiResourceConfigSource.ResourceEnabled(version.WithResource("cronjobs")) { cronJobsStorage, cronJobsStatusStorage := cronjobstore.NewREST(restOptionsGetter) storage["cronjobs"] = cronJobsStorage diff --git a/test/e2e/cronjob.go b/test/e2e/cronjob.go index ef2286175a2..ac7d098e10a 100644 --- a/test/e2e/cronjob.go +++ b/test/e2e/cronjob.go @@ -31,7 +31,7 @@ import ( "k8s.io/kubernetes/pkg/api/v1" batchinternal "k8s.io/kubernetes/pkg/apis/batch" batchv1 "k8s.io/kubernetes/pkg/apis/batch/v1" - batch "k8s.io/kubernetes/pkg/apis/batch/v2alpha1" + batchv2alpha1 "k8s.io/kubernetes/pkg/apis/batch/v2alpha1" "k8s.io/kubernetes/pkg/client/clientset_generated/clientset" "k8s.io/kubernetes/pkg/controller/job" "k8s.io/kubernetes/pkg/kubectl" @@ -44,9 +44,9 @@ const ( ) var ( - CronJobGroupVersionResource = schema.GroupVersionResource{Group: batch.GroupName, Version: "v2alpha1", Resource: "cronjobs"} - ScheduledJobGroupVersionResource = schema.GroupVersionResource{Group: batch.GroupName, Version: "v2alpha1", Resource: "scheduledjobs"} - BatchV2Alpha1GroupVersion = schema.GroupVersion{Group: batch.GroupName, Version: "v2alpha1"} + CronJobGroupVersionResource = schema.GroupVersionResource{Group: batchv2alpha1.GroupName, Version: "v2alpha1", Resource: "cronjobs"} + ScheduledJobGroupVersionResource = schema.GroupVersionResource{Group: batchv2alpha1.GroupName, Version: "v2alpha1", Resource: "scheduledjobs"} + BatchV2Alpha1GroupVersion = schema.GroupVersion{Group: batchv2alpha1.GroupName, Version: "v2alpha1"} ) var _ = framework.KubeDescribe("CronJob", func() { @@ -64,7 +64,7 @@ var _ = framework.KubeDescribe("CronJob", func() { // multiple jobs running at once It("should schedule multiple jobs concurrently", func() { By("Creating a cronjob") - cronJob := newTestCronJob("concurrent", "*/1 * * * ?", batch.AllowConcurrent, + cronJob := newTestCronJob("concurrent", "*/1 * * * ?", batchv2alpha1.AllowConcurrent, sleepCommand, nil) cronJob, err := createCronJob(f.ClientSet, f.Namespace.Name, cronJob) Expect(err).NotTo(HaveOccurred()) @@ -87,7 +87,7 @@ var _ = framework.KubeDescribe("CronJob", func() { // suspended should not schedule jobs It("should not schedule jobs when suspended [Slow]", func() { By("Creating a suspended cronjob") - cronJob := newTestCronJob("suspended", "*/1 * * * ?", batch.AllowConcurrent, + cronJob := newTestCronJob("suspended", "*/1 * * * ?", batchv2alpha1.AllowConcurrent, sleepCommand, nil) t := true cronJob.Spec.Suspend = &t @@ -111,7 +111,7 @@ var _ = framework.KubeDescribe("CronJob", func() { // only single active job is allowed for ForbidConcurrent It("should not schedule new jobs when ForbidConcurrent [Slow]", func() { By("Creating a ForbidConcurrent cronjob") - cronJob := newTestCronJob("forbid", "*/1 * * * ?", batch.ForbidConcurrent, + cronJob := newTestCronJob("forbid", "*/1 * * * ?", batchv2alpha1.ForbidConcurrent, sleepCommand, nil) cronJob, err := createCronJob(f.ClientSet, f.Namespace.Name, cronJob) Expect(err).NotTo(HaveOccurred()) @@ -143,7 +143,7 @@ var _ = framework.KubeDescribe("CronJob", func() { // only single active job is allowed for ReplaceConcurrent It("should replace jobs when ReplaceConcurrent", func() { By("Creating a ReplaceConcurrent cronjob") - cronJob := newTestCronJob("replace", "*/1 * * * ?", batch.ReplaceConcurrent, + cronJob := newTestCronJob("replace", "*/1 * * * ?", batchv2alpha1.ReplaceConcurrent, sleepCommand, nil) cronJob, err := createCronJob(f.ClientSet, f.Namespace.Name, cronJob) Expect(err).NotTo(HaveOccurred()) @@ -175,7 +175,7 @@ var _ = framework.KubeDescribe("CronJob", func() { // shouldn't give us unexpected warnings It("should not emit unexpected warnings", func() { By("Creating a cronjob") - cronJob := newTestCronJob("concurrent", "*/1 * * * ?", batch.AllowConcurrent, + cronJob := newTestCronJob("concurrent", "*/1 * * * ?", batchv2alpha1.AllowConcurrent, nil, nil) cronJob, err := createCronJob(f.ClientSet, f.Namespace.Name, cronJob) Expect(err).NotTo(HaveOccurred()) @@ -198,7 +198,7 @@ var _ = framework.KubeDescribe("CronJob", func() { // deleted jobs should be removed from the active list It("should remove from active list jobs that have been deleted", func() { By("Creating a ForbidConcurrent cronjob") - cronJob := newTestCronJob("forbid", "*/1 * * * ?", batch.ForbidConcurrent, + cronJob := newTestCronJob("forbid", "*/1 * * * ?", batchv2alpha1.ForbidConcurrent, sleepCommand, nil) cronJob, err := createCronJob(f.ClientSet, f.Namespace.Name, cronJob) Expect(err).NotTo(HaveOccurred()) @@ -242,7 +242,7 @@ var _ = framework.KubeDescribe("CronJob", func() { It("should delete successful finished jobs with limit of one successful job", func() { By("Creating a AllowConcurrent cronjob with custom history limits") successLimit := int32(1) - cronJob := newTestCronJob("concurrent-limit", "*/1 * * * ?", batch.AllowConcurrent, + cronJob := newTestCronJob("concurrent-limit", "*/1 * * * ?", batchv2alpha1.AllowConcurrent, successCommand, &successLimit) cronJob, err := createCronJob(f.ClientSet, f.Namespace.Name, cronJob) Expect(err).NotTo(HaveOccurred()) @@ -278,19 +278,19 @@ var _ = framework.KubeDescribe("CronJob", func() { }) // newTestCronJob returns a cronjob which does one of several testing behaviors. -func newTestCronJob(name, schedule string, concurrencyPolicy batch.ConcurrencyPolicy, command []string, - successfulJobsHistoryLimit *int32) *batch.CronJob { +func newTestCronJob(name, schedule string, concurrencyPolicy batchv2alpha1.ConcurrencyPolicy, + command []string, successfulJobsHistoryLimit *int32) *batchv2alpha1.CronJob { parallelism := int32(1) completions := int32(1) - sj := &batch.CronJob{ + sj := &batchv2alpha1.CronJob{ ObjectMeta: metav1.ObjectMeta{ Name: name, }, - Spec: batch.CronJobSpec{ + Spec: batchv2alpha1.CronJobSpec{ Schedule: schedule, ConcurrencyPolicy: concurrencyPolicy, - JobTemplate: batch.JobTemplateSpec{ - Spec: batch.JobSpec{ + JobTemplate: batchv2alpha1.JobTemplateSpec{ + Spec: batchv1.JobSpec{ Parallelism: ¶llelism, Completions: &completions, Template: v1.PodTemplateSpec{ @@ -329,11 +329,11 @@ func newTestCronJob(name, schedule string, concurrencyPolicy batch.ConcurrencyPo return sj } -func createCronJob(c clientset.Interface, ns string, cronJob *batch.CronJob) (*batch.CronJob, error) { +func createCronJob(c clientset.Interface, ns string, cronJob *batchv2alpha1.CronJob) (*batchv2alpha1.CronJob, error) { return c.BatchV2alpha1().CronJobs(ns).Create(cronJob) } -func getCronJob(c clientset.Interface, ns, name string) (*batch.CronJob, error) { +func getCronJob(c clientset.Interface, ns, name string) (*batchv2alpha1.CronJob, error) { return c.BatchV2alpha1().CronJobs(ns).Get(name, metav1.GetOptions{}) } diff --git a/test/e2e/generated_clientset.go b/test/e2e/generated_clientset.go index 687edf2d421..c060a717f68 100644 --- a/test/e2e/generated_clientset.go +++ b/test/e2e/generated_clientset.go @@ -29,7 +29,8 @@ import ( "k8s.io/apimachinery/pkg/watch" clientv1 "k8s.io/client-go/pkg/api/v1" "k8s.io/kubernetes/pkg/api/v1" - "k8s.io/kubernetes/pkg/apis/batch/v2alpha1" + batchv1 "k8s.io/kubernetes/pkg/apis/batch/v1" + batchv2alpha1 "k8s.io/kubernetes/pkg/apis/batch/v2alpha1" "k8s.io/kubernetes/test/e2e/framework" . "github.com/onsi/ginkgo" @@ -189,21 +190,21 @@ var _ = framework.KubeDescribe("Generated release_1_5 clientset", func() { }) }) -func newTestingCronJob(name string, value string) *v2alpha1.CronJob { +func newTestingCronJob(name string, value string) *batchv2alpha1.CronJob { parallelism := int32(1) completions := int32(1) - return &v2alpha1.CronJob{ + return &batchv2alpha1.CronJob{ ObjectMeta: metav1.ObjectMeta{ Name: name, Labels: map[string]string{ "time": value, }, }, - Spec: v2alpha1.CronJobSpec{ + Spec: batchv2alpha1.CronJobSpec{ Schedule: "*/1 * * * ?", - ConcurrencyPolicy: v2alpha1.AllowConcurrent, - JobTemplate: v2alpha1.JobTemplateSpec{ - Spec: v2alpha1.JobSpec{ + ConcurrencyPolicy: batchv2alpha1.AllowConcurrent, + JobTemplate: batchv2alpha1.JobTemplateSpec{ + Spec: batchv1.JobSpec{ Parallelism: ¶llelism, Completions: &completions, Template: v1.PodTemplateSpec{ @@ -244,9 +245,9 @@ var _ = framework.KubeDescribe("Generated release_1_5 clientset", func() { groupList, err := f.ClientSet.Discovery().ServerGroups() framework.ExpectNoError(err) for _, group := range groupList.Groups { - if group.Name == v2alpha1.GroupName { + if group.Name == batchv2alpha1.GroupName { for _, version := range group.Versions { - if version.Version == v2alpha1.SchemeGroupVersion.Version { + if version.Version == batchv2alpha1.SchemeGroupVersion.Version { enabled = true break } @@ -254,7 +255,7 @@ var _ = framework.KubeDescribe("Generated release_1_5 clientset", func() { } } if !enabled { - framework.Logf("%s is not enabled, test skipped", v2alpha1.SchemeGroupVersion) + framework.Logf("%s is not enabled, test skipped", batchv2alpha1.SchemeGroupVersion) return } cronJobClient := f.ClientSet.BatchV2alpha1().CronJobs(f.Namespace.Name)