mirror of
https://github.com/k3s-io/kubernetes.git
synced 2026-01-05 07:27:21 +00:00
Merge pull request #41890 from soltysh/issue37166
Automatic merge from submit-queue (batch tested with PRs 41890, 42593, 42633, 42626, 42609) Remove everything that is not new from batch/v2alpha1 Fixes #37166. @lavalamp you've asked for it @erictune this is a prereq for moving CronJobs to beta. I initially planned to put all in one PR, but after I did that I figured out it'll be easier to review separately. ptal @kubernetes/api-approvers @kubernetes/sig-api-machinery-pr-reviews ptal
This commit is contained in:
@@ -20,6 +20,7 @@ go_library(
|
||||
deps = [
|
||||
"//pkg/api:go_default_library",
|
||||
"//pkg/api/v1:go_default_library",
|
||||
"//pkg/apis/batch/v1:go_default_library",
|
||||
"//pkg/apis/batch/v2alpha1:go_default_library",
|
||||
"//pkg/client/clientset_generated/clientset:go_default_library",
|
||||
"//pkg/util/metrics:go_default_library",
|
||||
@@ -50,6 +51,7 @@ go_test(
|
||||
tags = ["automanaged"],
|
||||
deps = [
|
||||
"//pkg/api/v1:go_default_library",
|
||||
"//pkg/apis/batch/v1:go_default_library",
|
||||
"//pkg/apis/batch/v2alpha1:go_default_library",
|
||||
"//vendor:k8s.io/apimachinery/pkg/apis/meta/v1",
|
||||
"//vendor:k8s.io/apimachinery/pkg/types",
|
||||
|
||||
@@ -47,7 +47,8 @@ import (
|
||||
"k8s.io/client-go/tools/record"
|
||||
"k8s.io/kubernetes/pkg/api"
|
||||
"k8s.io/kubernetes/pkg/api/v1"
|
||||
batch "k8s.io/kubernetes/pkg/apis/batch/v2alpha1"
|
||||
batchv1 "k8s.io/kubernetes/pkg/apis/batch/v1"
|
||||
batchv2alpha1 "k8s.io/kubernetes/pkg/apis/batch/v2alpha1"
|
||||
"k8s.io/kubernetes/pkg/client/clientset_generated/clientset"
|
||||
"k8s.io/kubernetes/pkg/util/metrics"
|
||||
)
|
||||
@@ -108,7 +109,7 @@ func (jm *CronJobController) syncAll() {
|
||||
sjs := sjl.Items
|
||||
glog.V(4).Infof("Found %d cronjobs", len(sjs))
|
||||
|
||||
jl, err := jm.kubeClient.BatchV2alpha1().Jobs(metav1.NamespaceAll).List(metav1.ListOptions{})
|
||||
jl, err := jm.kubeClient.BatchV1().Jobs(metav1.NamespaceAll).List(metav1.ListOptions{})
|
||||
if err != nil {
|
||||
glog.Errorf("Error listing jobs")
|
||||
return
|
||||
@@ -126,20 +127,21 @@ func (jm *CronJobController) syncAll() {
|
||||
}
|
||||
|
||||
// cleanupFinishedJobs cleanups finished jobs created by a CronJob
|
||||
func cleanupFinishedJobs(sj *batch.CronJob, js []batch.Job, jc jobControlInterface, sjc sjControlInterface, pc podControlInterface, recorder record.EventRecorder) {
|
||||
func cleanupFinishedJobs(sj *batchv2alpha1.CronJob, js []batchv1.Job, jc jobControlInterface,
|
||||
sjc sjControlInterface, pc podControlInterface, recorder record.EventRecorder) {
|
||||
// If neither limits are active, there is no need to do anything.
|
||||
if sj.Spec.FailedJobsHistoryLimit == nil && sj.Spec.SuccessfulJobsHistoryLimit == nil {
|
||||
return
|
||||
}
|
||||
|
||||
failedJobs := []batch.Job{}
|
||||
succesfulJobs := []batch.Job{}
|
||||
failedJobs := []batchv1.Job{}
|
||||
succesfulJobs := []batchv1.Job{}
|
||||
|
||||
for _, job := range js {
|
||||
isFinished, finishedStatus := getFinishedStatus(&job)
|
||||
if isFinished && finishedStatus == batch.JobComplete {
|
||||
if isFinished && finishedStatus == batchv1.JobComplete {
|
||||
succesfulJobs = append(succesfulJobs, job)
|
||||
} else if isFinished && finishedStatus == batch.JobFailed {
|
||||
} else if isFinished && finishedStatus == batchv1.JobFailed {
|
||||
failedJobs = append(failedJobs, job)
|
||||
}
|
||||
}
|
||||
@@ -170,7 +172,8 @@ func cleanupFinishedJobs(sj *batch.CronJob, js []batch.Job, jc jobControlInterfa
|
||||
}
|
||||
|
||||
// removeOldestJobs removes the oldest jobs from a list of jobs
|
||||
func removeOldestJobs(sj *batch.CronJob, js []batch.Job, jc jobControlInterface, pc podControlInterface, maxJobs int32, recorder record.EventRecorder) {
|
||||
func removeOldestJobs(sj *batchv2alpha1.CronJob, js []batchv1.Job, jc jobControlInterface,
|
||||
pc podControlInterface, maxJobs int32, recorder record.EventRecorder) {
|
||||
numToDelete := len(js) - int(maxJobs)
|
||||
if numToDelete <= 0 {
|
||||
return
|
||||
@@ -190,7 +193,7 @@ func removeOldestJobs(sj *batch.CronJob, js []batch.Job, jc jobControlInterface,
|
||||
// All known jobs created by "sj" should be included in "js".
|
||||
// The current time is passed in to facilitate testing.
|
||||
// It has no receiver, to facilitate testing.
|
||||
func syncOne(sj *batch.CronJob, js []batch.Job, now time.Time, jc jobControlInterface, sjc sjControlInterface, pc podControlInterface, recorder record.EventRecorder) {
|
||||
func syncOne(sj *batchv2alpha1.CronJob, js []batchv1.Job, now time.Time, jc jobControlInterface, sjc sjControlInterface, pc podControlInterface, recorder record.EventRecorder) {
|
||||
nameForLog := fmt.Sprintf("%s/%s", sj.Namespace, sj.Name)
|
||||
|
||||
childrenJobs := make(map[types.UID]bool)
|
||||
@@ -269,7 +272,7 @@ func syncOne(sj *batch.CronJob, js []batch.Job, now time.Time, jc jobControlInte
|
||||
// can see easily that there was a missed execution.
|
||||
return
|
||||
}
|
||||
if sj.Spec.ConcurrencyPolicy == batch.ForbidConcurrent && len(sj.Status.Active) > 0 {
|
||||
if sj.Spec.ConcurrencyPolicy == batchv2alpha1.ForbidConcurrent && len(sj.Status.Active) > 0 {
|
||||
// Regardless which source of information we use for the set of active jobs,
|
||||
// there is some risk that we won't see an active job when there is one.
|
||||
// (because we haven't seen the status update to the SJ or the created pod).
|
||||
@@ -282,7 +285,7 @@ func syncOne(sj *batch.CronJob, js []batch.Job, now time.Time, jc jobControlInte
|
||||
glog.V(4).Infof("Not starting job for %s because of prior execution still running and concurrency policy is Forbid", nameForLog)
|
||||
return
|
||||
}
|
||||
if sj.Spec.ConcurrencyPolicy == batch.ReplaceConcurrent {
|
||||
if sj.Spec.ConcurrencyPolicy == batchv2alpha1.ReplaceConcurrent {
|
||||
for _, j := range sj.Status.Active {
|
||||
// TODO: this should be replaced with server side job deletion
|
||||
// currently this mimics JobReaper from pkg/kubectl/stop.go
|
||||
@@ -338,7 +341,8 @@ func syncOne(sj *batch.CronJob, js []batch.Job, now time.Time, jc jobControlInte
|
||||
}
|
||||
|
||||
// deleteJob reaps a job, deleting the job, the pobs and the reference in the active list
|
||||
func deleteJob(sj *batch.CronJob, job *batch.Job, jc jobControlInterface, pc podControlInterface, recorder record.EventRecorder, reason string) bool {
|
||||
func deleteJob(sj *batchv2alpha1.CronJob, job *batchv1.Job, jc jobControlInterface,
|
||||
pc podControlInterface, recorder record.EventRecorder, reason string) bool {
|
||||
// TODO: this should be replaced with server side job deletion
|
||||
// currencontinuetly this mimics JobReaper from pkg/kubectl/stop.go
|
||||
nameForLog := fmt.Sprintf("%s/%s", sj.Namespace, sj.Name)
|
||||
|
||||
@@ -27,7 +27,8 @@ import (
|
||||
"k8s.io/apimachinery/pkg/types"
|
||||
"k8s.io/client-go/tools/record"
|
||||
"k8s.io/kubernetes/pkg/api/v1"
|
||||
batch "k8s.io/kubernetes/pkg/apis/batch/v2alpha1"
|
||||
batchv1 "k8s.io/kubernetes/pkg/apis/batch/v1"
|
||||
batchv2alpha1 "k8s.io/kubernetes/pkg/apis/batch/v2alpha1"
|
||||
)
|
||||
|
||||
// schedule is hourly on the hour
|
||||
@@ -92,8 +93,8 @@ func startTimeStringToTime(startTime string) time.Time {
|
||||
}
|
||||
|
||||
// returns a cronJob with some fields filled in.
|
||||
func cronJob() batch.CronJob {
|
||||
return batch.CronJob{
|
||||
func cronJob() batchv2alpha1.CronJob {
|
||||
return batchv2alpha1.CronJob{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "mycronjob",
|
||||
Namespace: "snazzycats",
|
||||
@@ -101,10 +102,10 @@ func cronJob() batch.CronJob {
|
||||
SelfLink: "/apis/batch/v2alpha1/namespaces/snazzycats/cronjobs/mycronjob",
|
||||
CreationTimestamp: metav1.Time{Time: justBeforeTheHour()},
|
||||
},
|
||||
Spec: batch.CronJobSpec{
|
||||
Spec: batchv2alpha1.CronJobSpec{
|
||||
Schedule: "* * * * ?",
|
||||
ConcurrencyPolicy: batch.AllowConcurrent,
|
||||
JobTemplate: batch.JobTemplateSpec{
|
||||
ConcurrencyPolicy: batchv2alpha1.AllowConcurrent,
|
||||
JobTemplate: batchv2alpha1.JobTemplateSpec{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Labels: map[string]string{"a": "b"},
|
||||
Annotations: map[string]string{"x": "y"},
|
||||
@@ -115,9 +116,9 @@ func cronJob() batch.CronJob {
|
||||
}
|
||||
}
|
||||
|
||||
func jobSpec() batch.JobSpec {
|
||||
func jobSpec() batchv1.JobSpec {
|
||||
one := int32(1)
|
||||
return batch.JobSpec{
|
||||
return batchv1.JobSpec{
|
||||
Parallelism: &one,
|
||||
Completions: &one,
|
||||
Template: v1.PodTemplateSpec{
|
||||
@@ -135,8 +136,8 @@ func jobSpec() batch.JobSpec {
|
||||
}
|
||||
}
|
||||
|
||||
func newJob(UID string) batch.Job {
|
||||
return batch.Job{
|
||||
func newJob(UID string) batchv1.Job {
|
||||
return batchv1.Job{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
UID: types.UID(UID),
|
||||
Name: "foobar",
|
||||
@@ -148,15 +149,15 @@ func newJob(UID string) batch.Job {
|
||||
}
|
||||
|
||||
var (
|
||||
shortDead int64 = 10
|
||||
mediumDead int64 = 2 * 60 * 60
|
||||
longDead int64 = 1000000
|
||||
noDead int64 = -12345
|
||||
A batch.ConcurrencyPolicy = batch.AllowConcurrent
|
||||
f batch.ConcurrencyPolicy = batch.ForbidConcurrent
|
||||
R batch.ConcurrencyPolicy = batch.ReplaceConcurrent
|
||||
T bool = true
|
||||
F bool = false
|
||||
shortDead int64 = 10
|
||||
mediumDead int64 = 2 * 60 * 60
|
||||
longDead int64 = 1000000
|
||||
noDead int64 = -12345
|
||||
A batchv2alpha1.ConcurrencyPolicy = batchv2alpha1.AllowConcurrent
|
||||
f batchv2alpha1.ConcurrencyPolicy = batchv2alpha1.ForbidConcurrent
|
||||
R batchv2alpha1.ConcurrencyPolicy = batchv2alpha1.ReplaceConcurrent
|
||||
T bool = true
|
||||
F bool = false
|
||||
)
|
||||
|
||||
func TestSyncOne_RunOrNot(t *testing.T) {
|
||||
@@ -175,7 +176,7 @@ func TestSyncOne_RunOrNot(t *testing.T) {
|
||||
|
||||
testCases := map[string]struct {
|
||||
// sj spec
|
||||
concurrencyPolicy batch.ConcurrencyPolicy
|
||||
concurrencyPolicy batchv2alpha1.ConcurrencyPolicy
|
||||
suspend bool
|
||||
schedule string
|
||||
deadline int64
|
||||
@@ -251,10 +252,10 @@ func TestSyncOne_RunOrNot(t *testing.T) {
|
||||
}
|
||||
|
||||
var (
|
||||
job *batch.Job
|
||||
job *batchv1.Job
|
||||
err error
|
||||
)
|
||||
js := []batch.Job{}
|
||||
js := []batchv1.Job{}
|
||||
if tc.ranPreviously {
|
||||
sj.ObjectMeta.CreationTimestamp = metav1.Time{Time: justBeforeThePriorHour()}
|
||||
sj.Status.LastScheduleTime = &metav1.Time{Time: justAfterThePriorHour()}
|
||||
@@ -466,7 +467,7 @@ func TestCleanupFinishedJobs_DeleteOrNot(t *testing.T) {
|
||||
sj.Spec.FailedJobsHistoryLimit = tc.failedJobsHistoryLimit
|
||||
|
||||
var (
|
||||
job *batch.Job
|
||||
job *batchv1.Job
|
||||
err error
|
||||
)
|
||||
|
||||
@@ -481,7 +482,7 @@ func TestCleanupFinishedJobs_DeleteOrNot(t *testing.T) {
|
||||
}
|
||||
|
||||
// Create jobs
|
||||
js := []batch.Job{}
|
||||
js := []batchv1.Job{}
|
||||
jobsToDelete := []string{}
|
||||
sj.Status.Active = []v1.ObjectReference{}
|
||||
|
||||
@@ -495,13 +496,13 @@ func TestCleanupFinishedJobs_DeleteOrNot(t *testing.T) {
|
||||
job.Namespace = ""
|
||||
|
||||
if spec.IsFinished {
|
||||
var conditionType batch.JobConditionType
|
||||
var conditionType batchv1.JobConditionType
|
||||
if spec.IsSuccessful {
|
||||
conditionType = batch.JobComplete
|
||||
conditionType = batchv1.JobComplete
|
||||
} else {
|
||||
conditionType = batch.JobFailed
|
||||
conditionType = batchv1.JobFailed
|
||||
}
|
||||
condition := batch.JobCondition{Type: conditionType, Status: v1.ConditionTrue}
|
||||
condition := batchv1.JobCondition{Type: conditionType, Status: v1.ConditionTrue}
|
||||
job.Status.Conditions = append(job.Status.Conditions, condition)
|
||||
|
||||
if spec.IsStillInActiveList {
|
||||
@@ -563,13 +564,13 @@ func TestCleanupFinishedJobs_DeleteOrNot(t *testing.T) {
|
||||
// TestSyncOne_Status tests sj.UpdateStatus in syncOne
|
||||
func TestSyncOne_Status(t *testing.T) {
|
||||
finishedJob := newJob("1")
|
||||
finishedJob.Status.Conditions = append(finishedJob.Status.Conditions, batch.JobCondition{Type: batch.JobComplete, Status: v1.ConditionTrue})
|
||||
finishedJob.Status.Conditions = append(finishedJob.Status.Conditions, batchv1.JobCondition{Type: batchv1.JobComplete, Status: v1.ConditionTrue})
|
||||
unexpectedJob := newJob("2")
|
||||
missingJob := newJob("3")
|
||||
|
||||
testCases := map[string]struct {
|
||||
// sj spec
|
||||
concurrencyPolicy batch.ConcurrencyPolicy
|
||||
concurrencyPolicy batchv2alpha1.ConcurrencyPolicy
|
||||
suspend bool
|
||||
schedule string
|
||||
deadline int64
|
||||
@@ -654,7 +655,7 @@ func TestSyncOne_Status(t *testing.T) {
|
||||
}
|
||||
sj.ObjectMeta.CreationTimestamp = metav1.Time{Time: justBeforeTheHour()}
|
||||
}
|
||||
jobs := []batch.Job{}
|
||||
jobs := []batchv1.Job{}
|
||||
if tc.hasFinishedJob {
|
||||
ref, err := getRef(&finishedJob)
|
||||
if err != nil {
|
||||
|
||||
@@ -24,14 +24,15 @@ import (
|
||||
"k8s.io/apimachinery/pkg/labels"
|
||||
"k8s.io/client-go/tools/record"
|
||||
"k8s.io/kubernetes/pkg/api/v1"
|
||||
batch "k8s.io/kubernetes/pkg/apis/batch/v2alpha1"
|
||||
batchv1 "k8s.io/kubernetes/pkg/apis/batch/v1"
|
||||
batchv2alpha1 "k8s.io/kubernetes/pkg/apis/batch/v2alpha1"
|
||||
"k8s.io/kubernetes/pkg/client/clientset_generated/clientset"
|
||||
)
|
||||
|
||||
// sjControlInterface is an interface that knows how to update CronJob status
|
||||
// created as an interface to allow testing.
|
||||
type sjControlInterface interface {
|
||||
UpdateStatus(sj *batch.CronJob) (*batch.CronJob, error)
|
||||
UpdateStatus(sj *batchv2alpha1.CronJob) (*batchv2alpha1.CronJob, error)
|
||||
}
|
||||
|
||||
// realSJControl is the default implementation of sjControlInterface.
|
||||
@@ -41,18 +42,18 @@ type realSJControl struct {
|
||||
|
||||
var _ sjControlInterface = &realSJControl{}
|
||||
|
||||
func (c *realSJControl) UpdateStatus(sj *batch.CronJob) (*batch.CronJob, error) {
|
||||
func (c *realSJControl) UpdateStatus(sj *batchv2alpha1.CronJob) (*batchv2alpha1.CronJob, error) {
|
||||
return c.KubeClient.BatchV2alpha1().CronJobs(sj.Namespace).UpdateStatus(sj)
|
||||
}
|
||||
|
||||
// fakeSJControl is the default implementation of sjControlInterface.
|
||||
type fakeSJControl struct {
|
||||
Updates []batch.CronJob
|
||||
Updates []batchv2alpha1.CronJob
|
||||
}
|
||||
|
||||
var _ sjControlInterface = &fakeSJControl{}
|
||||
|
||||
func (c *fakeSJControl) UpdateStatus(sj *batch.CronJob) (*batch.CronJob, error) {
|
||||
func (c *fakeSJControl) UpdateStatus(sj *batchv2alpha1.CronJob) (*batchv2alpha1.CronJob, error) {
|
||||
c.Updates = append(c.Updates, *sj)
|
||||
return sj, nil
|
||||
}
|
||||
@@ -63,11 +64,11 @@ func (c *fakeSJControl) UpdateStatus(sj *batch.CronJob) (*batch.CronJob, error)
|
||||
// created as an interface to allow testing.
|
||||
type jobControlInterface interface {
|
||||
// GetJob retrieves a job
|
||||
GetJob(namespace, name string) (*batch.Job, error)
|
||||
GetJob(namespace, name string) (*batchv1.Job, error)
|
||||
// CreateJob creates new jobs according to the spec
|
||||
CreateJob(namespace string, job *batch.Job) (*batch.Job, error)
|
||||
CreateJob(namespace string, job *batchv1.Job) (*batchv1.Job, error)
|
||||
// UpdateJob updates a job
|
||||
UpdateJob(namespace string, job *batch.Job) (*batch.Job, error)
|
||||
UpdateJob(namespace string, job *batchv1.Job) (*batchv1.Job, error)
|
||||
// DeleteJob deletes the job identified by name.
|
||||
// TODO: delete by UID?
|
||||
DeleteJob(namespace string, name string) error
|
||||
@@ -81,7 +82,7 @@ type realJobControl struct {
|
||||
|
||||
var _ jobControlInterface = &realJobControl{}
|
||||
|
||||
func copyLabels(template *batch.JobTemplateSpec) labels.Set {
|
||||
func copyLabels(template *batchv2alpha1.JobTemplateSpec) labels.Set {
|
||||
l := make(labels.Set)
|
||||
for k, v := range template.Labels {
|
||||
l[k] = v
|
||||
@@ -89,7 +90,7 @@ func copyLabels(template *batch.JobTemplateSpec) labels.Set {
|
||||
return l
|
||||
}
|
||||
|
||||
func copyAnnotations(template *batch.JobTemplateSpec) labels.Set {
|
||||
func copyAnnotations(template *batchv2alpha1.JobTemplateSpec) labels.Set {
|
||||
a := make(labels.Set)
|
||||
for k, v := range template.Annotations {
|
||||
a[k] = v
|
||||
@@ -97,33 +98,33 @@ func copyAnnotations(template *batch.JobTemplateSpec) labels.Set {
|
||||
return a
|
||||
}
|
||||
|
||||
func (r realJobControl) GetJob(namespace, name string) (*batch.Job, error) {
|
||||
return r.KubeClient.BatchV2alpha1().Jobs(namespace).Get(name, metav1.GetOptions{})
|
||||
func (r realJobControl) GetJob(namespace, name string) (*batchv1.Job, error) {
|
||||
return r.KubeClient.BatchV1().Jobs(namespace).Get(name, metav1.GetOptions{})
|
||||
}
|
||||
|
||||
func (r realJobControl) UpdateJob(namespace string, job *batch.Job) (*batch.Job, error) {
|
||||
return r.KubeClient.BatchV2alpha1().Jobs(namespace).Update(job)
|
||||
func (r realJobControl) UpdateJob(namespace string, job *batchv1.Job) (*batchv1.Job, error) {
|
||||
return r.KubeClient.BatchV1().Jobs(namespace).Update(job)
|
||||
}
|
||||
|
||||
func (r realJobControl) CreateJob(namespace string, job *batch.Job) (*batch.Job, error) {
|
||||
return r.KubeClient.BatchV2alpha1().Jobs(namespace).Create(job)
|
||||
func (r realJobControl) CreateJob(namespace string, job *batchv1.Job) (*batchv1.Job, error) {
|
||||
return r.KubeClient.BatchV1().Jobs(namespace).Create(job)
|
||||
}
|
||||
|
||||
func (r realJobControl) DeleteJob(namespace string, name string) error {
|
||||
return r.KubeClient.BatchV2alpha1().Jobs(namespace).Delete(name, nil)
|
||||
return r.KubeClient.BatchV1().Jobs(namespace).Delete(name, nil)
|
||||
}
|
||||
|
||||
type fakeJobControl struct {
|
||||
sync.Mutex
|
||||
Job *batch.Job
|
||||
Jobs []batch.Job
|
||||
Job *batchv1.Job
|
||||
Jobs []batchv1.Job
|
||||
DeleteJobName []string
|
||||
Err error
|
||||
}
|
||||
|
||||
var _ jobControlInterface = &fakeJobControl{}
|
||||
|
||||
func (f *fakeJobControl) CreateJob(namespace string, job *batch.Job) (*batch.Job, error) {
|
||||
func (f *fakeJobControl) CreateJob(namespace string, job *batchv1.Job) (*batchv1.Job, error) {
|
||||
f.Lock()
|
||||
defer f.Unlock()
|
||||
if f.Err != nil {
|
||||
@@ -135,7 +136,7 @@ func (f *fakeJobControl) CreateJob(namespace string, job *batch.Job) (*batch.Job
|
||||
return job, nil
|
||||
}
|
||||
|
||||
func (f *fakeJobControl) GetJob(namespace, name string) (*batch.Job, error) {
|
||||
func (f *fakeJobControl) GetJob(namespace, name string) (*batchv1.Job, error) {
|
||||
f.Lock()
|
||||
defer f.Unlock()
|
||||
if f.Err != nil {
|
||||
@@ -144,7 +145,7 @@ func (f *fakeJobControl) GetJob(namespace, name string) (*batch.Job, error) {
|
||||
return f.Job, nil
|
||||
}
|
||||
|
||||
func (f *fakeJobControl) UpdateJob(namespace string, job *batch.Job) (*batch.Job, error) {
|
||||
func (f *fakeJobControl) UpdateJob(namespace string, job *batchv1.Job) (*batchv1.Job, error) {
|
||||
f.Lock()
|
||||
defer f.Unlock()
|
||||
if f.Err != nil {
|
||||
@@ -167,7 +168,7 @@ func (f *fakeJobControl) Clear() {
|
||||
f.Lock()
|
||||
defer f.Unlock()
|
||||
f.DeleteJobName = []string{}
|
||||
f.Jobs = []batch.Job{}
|
||||
f.Jobs = []batchv1.Job{}
|
||||
f.Err = nil
|
||||
}
|
||||
|
||||
|
||||
@@ -30,12 +30,13 @@ import (
|
||||
"k8s.io/apimachinery/pkg/types"
|
||||
"k8s.io/kubernetes/pkg/api"
|
||||
"k8s.io/kubernetes/pkg/api/v1"
|
||||
batch "k8s.io/kubernetes/pkg/apis/batch/v2alpha1"
|
||||
batchv1 "k8s.io/kubernetes/pkg/apis/batch/v1"
|
||||
batchv2alpha1 "k8s.io/kubernetes/pkg/apis/batch/v2alpha1"
|
||||
)
|
||||
|
||||
// Utilities for dealing with Jobs and CronJobs and time.
|
||||
|
||||
func inActiveList(sj batch.CronJob, uid types.UID) bool {
|
||||
func inActiveList(sj batchv2alpha1.CronJob, uid types.UID) bool {
|
||||
for _, j := range sj.Status.Active {
|
||||
if j.UID == uid {
|
||||
return true
|
||||
@@ -44,7 +45,7 @@ func inActiveList(sj batch.CronJob, uid types.UID) bool {
|
||||
return false
|
||||
}
|
||||
|
||||
func deleteFromActiveList(sj *batch.CronJob, uid types.UID) {
|
||||
func deleteFromActiveList(sj *batchv2alpha1.CronJob, uid types.UID) {
|
||||
if sj == nil {
|
||||
return
|
||||
}
|
||||
@@ -58,7 +59,7 @@ func deleteFromActiveList(sj *batch.CronJob, uid types.UID) {
|
||||
}
|
||||
|
||||
// getParentUIDFromJob extracts UID of job's parent and whether it was found
|
||||
func getParentUIDFromJob(j batch.Job) (types.UID, bool) {
|
||||
func getParentUIDFromJob(j batchv1.Job) (types.UID, bool) {
|
||||
creatorRefJson, found := j.ObjectMeta.Annotations[v1.CreatedByAnnotation]
|
||||
if !found {
|
||||
glog.V(4).Infof("Job with no created-by annotation, name %s namespace %s", j.Name, j.Namespace)
|
||||
@@ -85,8 +86,8 @@ func getParentUIDFromJob(j batch.Job) (types.UID, bool) {
|
||||
|
||||
// groupJobsByParent groups jobs into a map keyed by the job parent UID (e.g. scheduledJob).
|
||||
// It has no receiver, to facilitate testing.
|
||||
func groupJobsByParent(sjs []batch.CronJob, js []batch.Job) map[types.UID][]batch.Job {
|
||||
jobsBySj := make(map[types.UID][]batch.Job)
|
||||
func groupJobsByParent(sjs []batchv2alpha1.CronJob, js []batchv1.Job) map[types.UID][]batchv1.Job {
|
||||
jobsBySj := make(map[types.UID][]batchv1.Job)
|
||||
for _, job := range js {
|
||||
parentUID, found := getParentUIDFromJob(job)
|
||||
if !found {
|
||||
@@ -120,7 +121,7 @@ func getNextStartTimeAfter(schedule string, now time.Time) (time.Time, error) {
|
||||
//
|
||||
// If there are too many (>100) unstarted times, just give up and return an empty slice.
|
||||
// If there were missed times prior to the last known start time, then those are not returned.
|
||||
func getRecentUnmetScheduleTimes(sj batch.CronJob, now time.Time) ([]time.Time, error) {
|
||||
func getRecentUnmetScheduleTimes(sj batchv2alpha1.CronJob, now time.Time) ([]time.Time, error) {
|
||||
starts := []time.Time{}
|
||||
sched, err := cron.ParseStandard(sj.Spec.Schedule)
|
||||
if err != nil {
|
||||
@@ -181,7 +182,7 @@ func getRecentUnmetScheduleTimes(sj batch.CronJob, now time.Time) ([]time.Time,
|
||||
// XXX unit test this
|
||||
|
||||
// getJobFromTemplate makes a Job from a CronJob
|
||||
func getJobFromTemplate(sj *batch.CronJob, scheduledTime time.Time) (*batch.Job, error) {
|
||||
func getJobFromTemplate(sj *batchv2alpha1.CronJob, scheduledTime time.Time) (*batchv1.Job, error) {
|
||||
// TODO: consider adding the following labels:
|
||||
// nominal-start-time=$RFC_3339_DATE_OF_INTENDED_START -- for user convenience
|
||||
// scheduled-job-name=$SJ_NAME -- for user convenience
|
||||
@@ -195,7 +196,7 @@ func getJobFromTemplate(sj *batch.CronJob, scheduledTime time.Time) (*batch.Job,
|
||||
// We want job names for a given nominal start time to have a deterministic name to avoid the same job being created twice
|
||||
name := fmt.Sprintf("%s-%d", sj.Name, getTimeHash(scheduledTime))
|
||||
|
||||
job := &batch.Job{
|
||||
job := &batchv1.Job{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Labels: labels,
|
||||
Annotations: annotations,
|
||||
@@ -234,22 +235,22 @@ func makeCreatedByRefJson(object runtime.Object) (string, error) {
|
||||
return string(createdByRefJson), nil
|
||||
}
|
||||
|
||||
func getFinishedStatus(j *batch.Job) (bool, batch.JobConditionType) {
|
||||
func getFinishedStatus(j *batchv1.Job) (bool, batchv1.JobConditionType) {
|
||||
for _, c := range j.Status.Conditions {
|
||||
if (c.Type == batch.JobComplete || c.Type == batch.JobFailed) && c.Status == v1.ConditionTrue {
|
||||
if (c.Type == batchv1.JobComplete || c.Type == batchv1.JobFailed) && c.Status == v1.ConditionTrue {
|
||||
return true, c.Type
|
||||
}
|
||||
}
|
||||
return false, ""
|
||||
}
|
||||
|
||||
func IsJobFinished(j *batch.Job) bool {
|
||||
func IsJobFinished(j *batchv1.Job) bool {
|
||||
isFinished, _ := getFinishedStatus(j)
|
||||
return isFinished
|
||||
}
|
||||
|
||||
// byJobStartTime sorts a list of jobs by start timestamp, using their names as a tie breaker.
|
||||
type byJobStartTime []batch.Job
|
||||
type byJobStartTime []batchv1.Job
|
||||
|
||||
func (o byJobStartTime) Len() int { return len(o) }
|
||||
func (o byJobStartTime) Swap(i, j int) { o[i], o[j] = o[j], o[i] }
|
||||
|
||||
@@ -24,7 +24,8 @@ import (
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/types"
|
||||
"k8s.io/kubernetes/pkg/api/v1"
|
||||
batch "k8s.io/kubernetes/pkg/apis/batch/v2alpha1"
|
||||
batchv1 "k8s.io/kubernetes/pkg/apis/batch/v1"
|
||||
batchv2alpha1 "k8s.io/kubernetes/pkg/apis/batch/v2alpha1"
|
||||
)
|
||||
|
||||
func TestGetJobFromTemplate(t *testing.T) {
|
||||
@@ -34,22 +35,22 @@ func TestGetJobFromTemplate(t *testing.T) {
|
||||
var one int64 = 1
|
||||
var no bool = false
|
||||
|
||||
sj := batch.CronJob{
|
||||
sj := batchv2alpha1.CronJob{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "mycronjob",
|
||||
Namespace: "snazzycats",
|
||||
UID: types.UID("1a2b3c"),
|
||||
SelfLink: "/apis/batch/v1/namespaces/snazzycats/jobs/mycronjob",
|
||||
},
|
||||
Spec: batch.CronJobSpec{
|
||||
Spec: batchv2alpha1.CronJobSpec{
|
||||
Schedule: "* * * * ?",
|
||||
ConcurrencyPolicy: batch.AllowConcurrent,
|
||||
JobTemplate: batch.JobTemplateSpec{
|
||||
ConcurrencyPolicy: batchv2alpha1.AllowConcurrent,
|
||||
JobTemplate: batchv2alpha1.JobTemplateSpec{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Labels: map[string]string{"a": "b"},
|
||||
Annotations: map[string]string{"x": "y"},
|
||||
},
|
||||
Spec: batch.JobSpec{
|
||||
Spec: batchv1.JobSpec{
|
||||
ActiveDeadlineSeconds: &one,
|
||||
ManualSelector: &no,
|
||||
Template: v1.PodTemplateSpec{
|
||||
@@ -69,7 +70,7 @@ func TestGetJobFromTemplate(t *testing.T) {
|
||||
},
|
||||
}
|
||||
|
||||
var job *batch.Job
|
||||
var job *batchv1.Job
|
||||
job, err := getJobFromTemplate(&sj, time.Time{})
|
||||
if err != nil {
|
||||
t.Errorf("Did not expect error: %s", err)
|
||||
@@ -98,12 +99,12 @@ func TestGetJobFromTemplate(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestGetParentUIDFromJob(t *testing.T) {
|
||||
j := &batch.Job{
|
||||
j := &batchv1.Job{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "foobar",
|
||||
Namespace: metav1.NamespaceDefault,
|
||||
},
|
||||
Spec: batch.JobSpec{
|
||||
Spec: batchv1.JobSpec{
|
||||
Selector: &metav1.LabelSelector{
|
||||
MatchLabels: map[string]string{"foo": "bar"},
|
||||
},
|
||||
@@ -120,9 +121,9 @@ func TestGetParentUIDFromJob(t *testing.T) {
|
||||
},
|
||||
},
|
||||
},
|
||||
Status: batch.JobStatus{
|
||||
Conditions: []batch.JobCondition{{
|
||||
Type: batch.JobComplete,
|
||||
Status: batchv1.JobStatus{
|
||||
Conditions: []batchv1.JobCondition{{
|
||||
Type: batchv1.JobComplete,
|
||||
Status: v1.ConditionTrue,
|
||||
}},
|
||||
},
|
||||
@@ -162,8 +163,8 @@ func TestGroupJobsByParent(t *testing.T) {
|
||||
|
||||
{
|
||||
// Case 1: There are no jobs and scheduledJobs
|
||||
sjs := []batch.CronJob{}
|
||||
js := []batch.Job{}
|
||||
sjs := []batchv2alpha1.CronJob{}
|
||||
js := []batchv1.Job{}
|
||||
jobsBySj := groupJobsByParent(sjs, js)
|
||||
if len(jobsBySj) != 0 {
|
||||
t.Errorf("Wrong number of items in map")
|
||||
@@ -172,10 +173,10 @@ func TestGroupJobsByParent(t *testing.T) {
|
||||
|
||||
{
|
||||
// Case 2: there is one controller with no job.
|
||||
sjs := []batch.CronJob{
|
||||
sjs := []batchv2alpha1.CronJob{
|
||||
{ObjectMeta: metav1.ObjectMeta{Name: "e", Namespace: "x", UID: uid1}},
|
||||
}
|
||||
js := []batch.Job{}
|
||||
js := []batchv1.Job{}
|
||||
jobsBySj := groupJobsByParent(sjs, js)
|
||||
if len(jobsBySj) != 0 {
|
||||
t.Errorf("Wrong number of items in map")
|
||||
@@ -184,10 +185,10 @@ func TestGroupJobsByParent(t *testing.T) {
|
||||
|
||||
{
|
||||
// Case 3: there is one controller with one job it created.
|
||||
sjs := []batch.CronJob{
|
||||
sjs := []batchv2alpha1.CronJob{
|
||||
{ObjectMeta: metav1.ObjectMeta{Name: "e", Namespace: "x", UID: uid1}},
|
||||
}
|
||||
js := []batch.Job{
|
||||
js := []batchv1.Job{
|
||||
{ObjectMeta: metav1.ObjectMeta{Name: "a", Namespace: "x", Annotations: createdBy1}},
|
||||
}
|
||||
jobsBySj := groupJobsByParent(sjs, js)
|
||||
@@ -207,7 +208,7 @@ func TestGroupJobsByParent(t *testing.T) {
|
||||
{
|
||||
// Case 4: Two namespaces, one has two jobs from one controller, other has 3 jobs from two controllers.
|
||||
// There are also two jobs with no created-by annotation.
|
||||
js := []batch.Job{
|
||||
js := []batchv1.Job{
|
||||
{ObjectMeta: metav1.ObjectMeta{Name: "a", Namespace: "x", Annotations: createdBy1}},
|
||||
{ObjectMeta: metav1.ObjectMeta{Name: "b", Namespace: "x", Annotations: createdBy2}},
|
||||
{ObjectMeta: metav1.ObjectMeta{Name: "c", Namespace: "x", Annotations: createdBy1}},
|
||||
@@ -216,7 +217,7 @@ func TestGroupJobsByParent(t *testing.T) {
|
||||
{ObjectMeta: metav1.ObjectMeta{Name: "b", Namespace: "y", Annotations: createdBy3}},
|
||||
{ObjectMeta: metav1.ObjectMeta{Name: "d", Namespace: "y", Annotations: noCreatedBy}},
|
||||
}
|
||||
sjs := []batch.CronJob{
|
||||
sjs := []batchv2alpha1.CronJob{
|
||||
{ObjectMeta: metav1.ObjectMeta{Name: "e", Namespace: "x", UID: uid1}},
|
||||
{ObjectMeta: metav1.ObjectMeta{Name: "f", Namespace: "x", UID: uid2}},
|
||||
{ObjectMeta: metav1.ObjectMeta{Name: "g", Namespace: "y", UID: uid3}},
|
||||
@@ -266,16 +267,16 @@ func TestGetRecentUnmetScheduleTimes(t *testing.T) {
|
||||
t.Errorf("test setup error: %v", err)
|
||||
}
|
||||
|
||||
sj := batch.CronJob{
|
||||
sj := batchv2alpha1.CronJob{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "mycronjob",
|
||||
Namespace: metav1.NamespaceDefault,
|
||||
UID: types.UID("1a2b3c"),
|
||||
},
|
||||
Spec: batch.CronJobSpec{
|
||||
Spec: batchv2alpha1.CronJobSpec{
|
||||
Schedule: schedule,
|
||||
ConcurrencyPolicy: batch.AllowConcurrent,
|
||||
JobTemplate: batch.JobTemplateSpec{},
|
||||
ConcurrencyPolicy: batchv2alpha1.AllowConcurrent,
|
||||
JobTemplate: batchv2alpha1.JobTemplateSpec{},
|
||||
},
|
||||
}
|
||||
{
|
||||
|
||||
Reference in New Issue
Block a user