mirror of
https://github.com/k3s-io/kubernetes.git
synced 2025-07-21 10:51:29 +00:00
batch: add suspended job
Signed-off-by: Adhityaa Chandrasekar <adtac@google.com>
This commit is contained in:
parent
97cd5bb7e2
commit
a0844da8f7
10
api/openapi-spec/swagger.json
generated
10
api/openapi-spec/swagger.json
generated
@ -4369,7 +4369,7 @@
|
|||||||
"description": "JobSpec describes how the job execution will look like.",
|
"description": "JobSpec describes how the job execution will look like.",
|
||||||
"properties": {
|
"properties": {
|
||||||
"activeDeadlineSeconds": {
|
"activeDeadlineSeconds": {
|
||||||
"description": "Specifies the duration in seconds relative to the startTime that the job may be active before the system tries to terminate it; value must be positive integer",
|
"description": "Specifies the duration in seconds relative to the startTime that the job may be continuously active before the system tries to terminate it; value must be positive integer. If a Job is suspended (at creation or through an update), this timer will effectively be stopped and reset when the Job is resumed again.",
|
||||||
"format": "int64",
|
"format": "int64",
|
||||||
"type": "integer"
|
"type": "integer"
|
||||||
},
|
},
|
||||||
@ -4400,6 +4400,10 @@
|
|||||||
"$ref": "#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.LabelSelector",
|
"$ref": "#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.LabelSelector",
|
||||||
"description": "A label query over pods that should match the pod count. Normally, the system sets this field for you. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/#label-selectors"
|
"description": "A label query over pods that should match the pod count. Normally, the system sets this field for you. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/#label-selectors"
|
||||||
},
|
},
|
||||||
|
"suspend": {
|
||||||
|
"description": "Suspend specifies whether the Job controller should create Pods or not. If a Job is created with suspend set to true, no Pods are created by the Job controller. If a Job is suspended after creation (i.e. the flag goes from false to true), the Job controller will delete all active Pods associated with this Job. Users must design their workload to gracefully handle this. Suspending a Job will reset the StartTime field of the Job, effectively resetting the ActiveDeadlineSeconds timer too. This is an alpha field and requires the SuspendJob feature gate to be enabled; otherwise this field may not be set to true. Defaults to false.",
|
||||||
|
"type": "boolean"
|
||||||
|
},
|
||||||
"template": {
|
"template": {
|
||||||
"$ref": "#/definitions/io.k8s.api.core.v1.PodTemplateSpec",
|
"$ref": "#/definitions/io.k8s.api.core.v1.PodTemplateSpec",
|
||||||
"description": "Describes the pod that will be created when executing a job. More info: https://kubernetes.io/docs/concepts/workloads/controllers/jobs-run-to-completion/"
|
"description": "Describes the pod that will be created when executing a job. More info: https://kubernetes.io/docs/concepts/workloads/controllers/jobs-run-to-completion/"
|
||||||
@ -4432,7 +4436,7 @@
|
|||||||
"description": "Represents time when the job was completed. It is not guaranteed to be set in happens-before order across separate operations. It is represented in RFC3339 form and is in UTC. The completion time is only set when the job finishes successfully."
|
"description": "Represents time when the job was completed. It is not guaranteed to be set in happens-before order across separate operations. It is represented in RFC3339 form and is in UTC. The completion time is only set when the job finishes successfully."
|
||||||
},
|
},
|
||||||
"conditions": {
|
"conditions": {
|
||||||
"description": "The latest available observations of an object's current state. When a job fails, one of the conditions will have type == \"Failed\". More info: https://kubernetes.io/docs/concepts/workloads/controllers/jobs-run-to-completion/",
|
"description": "The latest available observations of an object's current state. When a Job fails, one of the conditions will have type \"Failed\" and status true. When a Job is suspended, one of the conditions will have type \"Suspended\" and status true; when the Job is resumed, the status of this condition will become false. When a Job is completed, one of the conditions will have type \"Complete\" and status true. More info: https://kubernetes.io/docs/concepts/workloads/controllers/jobs-run-to-completion/",
|
||||||
"items": {
|
"items": {
|
||||||
"$ref": "#/definitions/io.k8s.api.batch.v1.JobCondition"
|
"$ref": "#/definitions/io.k8s.api.batch.v1.JobCondition"
|
||||||
},
|
},
|
||||||
@ -4448,7 +4452,7 @@
|
|||||||
},
|
},
|
||||||
"startTime": {
|
"startTime": {
|
||||||
"$ref": "#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.Time",
|
"$ref": "#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.Time",
|
||||||
"description": "Represents time when the job was acknowledged by the job controller. It is not guaranteed to be set in happens-before order across separate operations. It is represented in RFC3339 form and is in UTC."
|
"description": "Represents time when the job controller started processing a job. When a Job is created in the suspended state, this field is not set until the first time it is resumed. This field is reset every time a Job is resumed from suspension. It is represented in RFC3339 form and is in UTC."
|
||||||
},
|
},
|
||||||
"succeeded": {
|
"succeeded": {
|
||||||
"description": "The number of pods which reached phase Succeeded.",
|
"description": "The number of pods which reached phase Succeeded.",
|
||||||
|
@ -20,14 +20,9 @@ import (
|
|||||||
fuzz "github.com/google/gofuzz"
|
fuzz "github.com/google/gofuzz"
|
||||||
runtimeserializer "k8s.io/apimachinery/pkg/runtime/serializer"
|
runtimeserializer "k8s.io/apimachinery/pkg/runtime/serializer"
|
||||||
"k8s.io/kubernetes/pkg/apis/batch"
|
"k8s.io/kubernetes/pkg/apis/batch"
|
||||||
|
"k8s.io/utils/pointer"
|
||||||
)
|
)
|
||||||
|
|
||||||
func newBool(val bool) *bool {
|
|
||||||
p := new(bool)
|
|
||||||
*p = val
|
|
||||||
return p
|
|
||||||
}
|
|
||||||
|
|
||||||
// Funcs returns the fuzzer functions for the batch api group.
|
// Funcs returns the fuzzer functions for the batch api group.
|
||||||
var Funcs = func(codecs runtimeserializer.CodecFactory) []interface{} {
|
var Funcs = func(codecs runtimeserializer.CodecFactory) []interface{} {
|
||||||
return []interface{}{
|
return []interface{}{
|
||||||
@ -48,7 +43,7 @@ var Funcs = func(codecs runtimeserializer.CodecFactory) []interface{} {
|
|||||||
j.Parallelism = ¶llelism
|
j.Parallelism = ¶llelism
|
||||||
j.BackoffLimit = &backoffLimit
|
j.BackoffLimit = &backoffLimit
|
||||||
if c.Rand.Int31()%2 == 0 {
|
if c.Rand.Int31()%2 == 0 {
|
||||||
j.ManualSelector = newBool(true)
|
j.ManualSelector = pointer.BoolPtr(true)
|
||||||
} else {
|
} else {
|
||||||
j.ManualSelector = nil
|
j.ManualSelector = nil
|
||||||
}
|
}
|
||||||
@ -57,6 +52,9 @@ var Funcs = func(codecs runtimeserializer.CodecFactory) []interface{} {
|
|||||||
} else {
|
} else {
|
||||||
j.CompletionMode = batch.IndexedCompletion
|
j.CompletionMode = batch.IndexedCompletion
|
||||||
}
|
}
|
||||||
|
// We're fuzzing the internal JobSpec type, not the v1 type, so we don't
|
||||||
|
// need to fuzz the nil value.
|
||||||
|
j.Suspend = pointer.BoolPtr(c.RandBool())
|
||||||
},
|
},
|
||||||
func(sj *batch.CronJobSpec, c fuzz.Continue) {
|
func(sj *batch.CronJobSpec, c fuzz.Continue) {
|
||||||
c.FuzzNoCustom(sj)
|
c.FuzzNoCustom(sj)
|
||||||
|
@ -119,8 +119,11 @@ type JobSpec struct {
|
|||||||
// +optional
|
// +optional
|
||||||
Completions *int32
|
Completions *int32
|
||||||
|
|
||||||
// Optional duration in seconds relative to the startTime that the job may be active
|
// Specifies the duration in seconds relative to the startTime that the job
|
||||||
// before the system tries to terminate it; value must be positive integer
|
// may be continuously active before the system tries to terminate it; value
|
||||||
|
// must be positive integer. If a Job is suspended (at creation or through an
|
||||||
|
// update), this timer will effectively be stopped and reset when the Job is
|
||||||
|
// resumed again.
|
||||||
// +optional
|
// +optional
|
||||||
ActiveDeadlineSeconds *int64
|
ActiveDeadlineSeconds *int64
|
||||||
|
|
||||||
@ -187,19 +190,36 @@ type JobSpec struct {
|
|||||||
// controller skips updates for the Job.
|
// controller skips updates for the Job.
|
||||||
// +optional
|
// +optional
|
||||||
CompletionMode CompletionMode
|
CompletionMode CompletionMode
|
||||||
|
|
||||||
|
// Suspend specifies whether the Job controller should create Pods or not. If
|
||||||
|
// a Job is created with suspend set to true, no Pods are created by the Job
|
||||||
|
// controller. If a Job is suspended after creation (i.e. the flag goes from
|
||||||
|
// false to true), the Job controller will delete all active Pods associated
|
||||||
|
// with this Job. Users must design their workload to gracefully handle this.
|
||||||
|
// Suspending a Job will reset the StartTime field of the Job, effectively
|
||||||
|
// resetting the ActiveDeadlineSeconds timer too. This is an alpha field and
|
||||||
|
// requires the SuspendJob feature gate to be enabled; otherwise this field
|
||||||
|
// may not be set to true. Defaults to false.
|
||||||
|
// +optional
|
||||||
|
Suspend *bool
|
||||||
}
|
}
|
||||||
|
|
||||||
// JobStatus represents the current state of a Job.
|
// JobStatus represents the current state of a Job.
|
||||||
type JobStatus struct {
|
type JobStatus struct {
|
||||||
|
|
||||||
// The latest available observations of an object's current state.
|
// The latest available observations of an object's current state. When a Job
|
||||||
// When a job fails, one of the conditions will have type == "Failed".
|
// fails, one of the conditions will have type "Failed" and status true. When
|
||||||
|
// a Job is suspended, one of the conditions will have type "Suspended" and
|
||||||
|
// status true; when the Job is resumed, the status of this condition will
|
||||||
|
// become false. When a Job is completed, one of the conditions will have
|
||||||
|
// type "Complete" and status true.
|
||||||
// +optional
|
// +optional
|
||||||
Conditions []JobCondition
|
Conditions []JobCondition
|
||||||
|
|
||||||
// Represents time when the job was acknowledged by the job controller.
|
// Represents time when the job controller started processing a job. When a
|
||||||
// It is not guaranteed to be set in happens-before order across separate operations.
|
// Job is created in the suspended state, this field is not set until the
|
||||||
// It is represented in RFC3339 form and is in UTC.
|
// first time it is resumed. This field is reset every time a Job is resumed
|
||||||
|
// from suspension. It is represented in RFC3339 form and is in UTC.
|
||||||
// +optional
|
// +optional
|
||||||
StartTime *metav1.Time
|
StartTime *metav1.Time
|
||||||
|
|
||||||
@ -238,6 +258,8 @@ type JobConditionType string
|
|||||||
|
|
||||||
// These are valid conditions of a job.
|
// These are valid conditions of a job.
|
||||||
const (
|
const (
|
||||||
|
// JobSuspended means the job has been suspended.
|
||||||
|
JobSuspended JobConditionType = "Suspended"
|
||||||
// JobComplete means the job has completed its execution.
|
// JobComplete means the job has completed its execution.
|
||||||
JobComplete JobConditionType = "Complete"
|
JobComplete JobConditionType = "Complete"
|
||||||
// JobFailed means the job has failed its execution.
|
// JobFailed means the job has failed its execution.
|
||||||
@ -246,7 +268,7 @@ const (
|
|||||||
|
|
||||||
// JobCondition describes current state of a job.
|
// JobCondition describes current state of a job.
|
||||||
type JobCondition struct {
|
type JobCondition struct {
|
||||||
// Type of job condition, Complete or Failed.
|
// Type of job condition.
|
||||||
Type JobConditionType
|
Type JobConditionType
|
||||||
// Status of the condition, one of True, False, Unknown.
|
// Status of the condition, one of True, False, Unknown.
|
||||||
Status api.ConditionStatus
|
Status api.ConditionStatus
|
||||||
@ -319,7 +341,7 @@ type CronJobSpec struct {
|
|||||||
ConcurrencyPolicy ConcurrencyPolicy
|
ConcurrencyPolicy ConcurrencyPolicy
|
||||||
|
|
||||||
// This flag tells the controller to suspend subsequent executions, it does
|
// This flag tells the controller to suspend subsequent executions, it does
|
||||||
// not apply to already started executions. Defaults to false.
|
// not apply to already started executions. Defaults to false.
|
||||||
// +optional
|
// +optional
|
||||||
Suspend *bool
|
Suspend *bool
|
||||||
|
|
||||||
|
@ -46,6 +46,9 @@ func SetDefaults_Job(obj *batchv1.Job) {
|
|||||||
if len(obj.Spec.CompletionMode) == 0 {
|
if len(obj.Spec.CompletionMode) == 0 {
|
||||||
obj.Spec.CompletionMode = batchv1.NonIndexedCompletion
|
obj.Spec.CompletionMode = batchv1.NonIndexedCompletion
|
||||||
}
|
}
|
||||||
|
if obj.Spec.Suspend == nil {
|
||||||
|
obj.Spec.Suspend = utilpointer.BoolPtr(false)
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func SetDefaults_CronJob(obj *batchv1.CronJob) {
|
func SetDefaults_CronJob(obj *batchv1.CronJob) {
|
||||||
|
@ -20,6 +20,7 @@ import (
|
|||||||
"reflect"
|
"reflect"
|
||||||
"testing"
|
"testing"
|
||||||
|
|
||||||
|
"github.com/google/go-cmp/cmp"
|
||||||
batchv1 "k8s.io/api/batch/v1"
|
batchv1 "k8s.io/api/batch/v1"
|
||||||
"k8s.io/api/core/v1"
|
"k8s.io/api/core/v1"
|
||||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||||
@ -27,7 +28,7 @@ import (
|
|||||||
"k8s.io/kubernetes/pkg/api/legacyscheme"
|
"k8s.io/kubernetes/pkg/api/legacyscheme"
|
||||||
_ "k8s.io/kubernetes/pkg/apis/batch/install"
|
_ "k8s.io/kubernetes/pkg/apis/batch/install"
|
||||||
_ "k8s.io/kubernetes/pkg/apis/core/install"
|
_ "k8s.io/kubernetes/pkg/apis/core/install"
|
||||||
utilpointer "k8s.io/utils/pointer"
|
"k8s.io/utils/pointer"
|
||||||
|
|
||||||
. "k8s.io/kubernetes/pkg/apis/batch/v1"
|
. "k8s.io/kubernetes/pkg/apis/batch/v1"
|
||||||
)
|
)
|
||||||
@ -49,15 +50,36 @@ func TestSetDefaultJob(t *testing.T) {
|
|||||||
},
|
},
|
||||||
expected: &batchv1.Job{
|
expected: &batchv1.Job{
|
||||||
Spec: batchv1.JobSpec{
|
Spec: batchv1.JobSpec{
|
||||||
Completions: utilpointer.Int32Ptr(1),
|
Completions: pointer.Int32Ptr(1),
|
||||||
Parallelism: utilpointer.Int32Ptr(1),
|
Parallelism: pointer.Int32Ptr(1),
|
||||||
BackoffLimit: utilpointer.Int32Ptr(6),
|
BackoffLimit: pointer.Int32Ptr(6),
|
||||||
CompletionMode: batchv1.NonIndexedCompletion,
|
CompletionMode: batchv1.NonIndexedCompletion,
|
||||||
|
Suspend: pointer.BoolPtr(false),
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
expectLabels: true,
|
expectLabels: true,
|
||||||
},
|
},
|
||||||
"All unspecified -> all integers are defaulted and no default labels": {
|
"suspend set, everything else is defaulted": {
|
||||||
|
original: &batchv1.Job{
|
||||||
|
Spec: batchv1.JobSpec{
|
||||||
|
Suspend: pointer.BoolPtr(true),
|
||||||
|
Template: v1.PodTemplateSpec{
|
||||||
|
ObjectMeta: metav1.ObjectMeta{Labels: defaultLabels},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
expected: &batchv1.Job{
|
||||||
|
Spec: batchv1.JobSpec{
|
||||||
|
Completions: pointer.Int32Ptr(1),
|
||||||
|
Parallelism: pointer.Int32Ptr(1),
|
||||||
|
BackoffLimit: pointer.Int32Ptr(6),
|
||||||
|
CompletionMode: batchv1.NonIndexedCompletion,
|
||||||
|
Suspend: pointer.BoolPtr(true),
|
||||||
|
},
|
||||||
|
},
|
||||||
|
expectLabels: true,
|
||||||
|
},
|
||||||
|
"All unspecified -> all pointers, CompletionMode are defaulted and no default labels": {
|
||||||
original: &batchv1.Job{
|
original: &batchv1.Job{
|
||||||
ObjectMeta: metav1.ObjectMeta{
|
ObjectMeta: metav1.ObjectMeta{
|
||||||
Labels: map[string]string{"mylabel": "myvalue"},
|
Labels: map[string]string{"mylabel": "myvalue"},
|
||||||
@ -70,17 +92,18 @@ func TestSetDefaultJob(t *testing.T) {
|
|||||||
},
|
},
|
||||||
expected: &batchv1.Job{
|
expected: &batchv1.Job{
|
||||||
Spec: batchv1.JobSpec{
|
Spec: batchv1.JobSpec{
|
||||||
Completions: utilpointer.Int32Ptr(1),
|
Completions: pointer.Int32Ptr(1),
|
||||||
Parallelism: utilpointer.Int32Ptr(1),
|
Parallelism: pointer.Int32Ptr(1),
|
||||||
BackoffLimit: utilpointer.Int32Ptr(6),
|
BackoffLimit: pointer.Int32Ptr(6),
|
||||||
CompletionMode: batchv1.NonIndexedCompletion,
|
CompletionMode: batchv1.NonIndexedCompletion,
|
||||||
|
Suspend: pointer.BoolPtr(false),
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
"WQ: Parallelism explicitly 0 and completions unset -> BackoffLimit is defaulted": {
|
"WQ: Parallelism explicitly 0 and completions unset -> BackoffLimit is defaulted": {
|
||||||
original: &batchv1.Job{
|
original: &batchv1.Job{
|
||||||
Spec: batchv1.JobSpec{
|
Spec: batchv1.JobSpec{
|
||||||
Parallelism: utilpointer.Int32Ptr(0),
|
Parallelism: pointer.Int32Ptr(0),
|
||||||
Template: v1.PodTemplateSpec{
|
Template: v1.PodTemplateSpec{
|
||||||
ObjectMeta: metav1.ObjectMeta{Labels: defaultLabels},
|
ObjectMeta: metav1.ObjectMeta{Labels: defaultLabels},
|
||||||
},
|
},
|
||||||
@ -88,9 +111,10 @@ func TestSetDefaultJob(t *testing.T) {
|
|||||||
},
|
},
|
||||||
expected: &batchv1.Job{
|
expected: &batchv1.Job{
|
||||||
Spec: batchv1.JobSpec{
|
Spec: batchv1.JobSpec{
|
||||||
Parallelism: utilpointer.Int32Ptr(0),
|
Parallelism: pointer.Int32Ptr(0),
|
||||||
BackoffLimit: utilpointer.Int32Ptr(6),
|
BackoffLimit: pointer.Int32Ptr(6),
|
||||||
CompletionMode: batchv1.NonIndexedCompletion,
|
CompletionMode: batchv1.NonIndexedCompletion,
|
||||||
|
Suspend: pointer.BoolPtr(false),
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
expectLabels: true,
|
expectLabels: true,
|
||||||
@ -98,7 +122,7 @@ func TestSetDefaultJob(t *testing.T) {
|
|||||||
"WQ: Parallelism explicitly 2 and completions unset -> BackoffLimit is defaulted": {
|
"WQ: Parallelism explicitly 2 and completions unset -> BackoffLimit is defaulted": {
|
||||||
original: &batchv1.Job{
|
original: &batchv1.Job{
|
||||||
Spec: batchv1.JobSpec{
|
Spec: batchv1.JobSpec{
|
||||||
Parallelism: utilpointer.Int32Ptr(2),
|
Parallelism: pointer.Int32Ptr(2),
|
||||||
Template: v1.PodTemplateSpec{
|
Template: v1.PodTemplateSpec{
|
||||||
ObjectMeta: metav1.ObjectMeta{Labels: defaultLabels},
|
ObjectMeta: metav1.ObjectMeta{Labels: defaultLabels},
|
||||||
},
|
},
|
||||||
@ -106,9 +130,10 @@ func TestSetDefaultJob(t *testing.T) {
|
|||||||
},
|
},
|
||||||
expected: &batchv1.Job{
|
expected: &batchv1.Job{
|
||||||
Spec: batchv1.JobSpec{
|
Spec: batchv1.JobSpec{
|
||||||
Parallelism: utilpointer.Int32Ptr(2),
|
Parallelism: pointer.Int32Ptr(2),
|
||||||
BackoffLimit: utilpointer.Int32Ptr(6),
|
BackoffLimit: pointer.Int32Ptr(6),
|
||||||
CompletionMode: batchv1.NonIndexedCompletion,
|
CompletionMode: batchv1.NonIndexedCompletion,
|
||||||
|
Suspend: pointer.BoolPtr(false),
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
expectLabels: true,
|
expectLabels: true,
|
||||||
@ -116,7 +141,7 @@ func TestSetDefaultJob(t *testing.T) {
|
|||||||
"Completions explicitly 2 and others unset -> parallelism and BackoffLimit are defaulted": {
|
"Completions explicitly 2 and others unset -> parallelism and BackoffLimit are defaulted": {
|
||||||
original: &batchv1.Job{
|
original: &batchv1.Job{
|
||||||
Spec: batchv1.JobSpec{
|
Spec: batchv1.JobSpec{
|
||||||
Completions: utilpointer.Int32Ptr(2),
|
Completions: pointer.Int32Ptr(2),
|
||||||
Template: v1.PodTemplateSpec{
|
Template: v1.PodTemplateSpec{
|
||||||
ObjectMeta: metav1.ObjectMeta{Labels: defaultLabels},
|
ObjectMeta: metav1.ObjectMeta{Labels: defaultLabels},
|
||||||
},
|
},
|
||||||
@ -124,10 +149,11 @@ func TestSetDefaultJob(t *testing.T) {
|
|||||||
},
|
},
|
||||||
expected: &batchv1.Job{
|
expected: &batchv1.Job{
|
||||||
Spec: batchv1.JobSpec{
|
Spec: batchv1.JobSpec{
|
||||||
Completions: utilpointer.Int32Ptr(2),
|
Completions: pointer.Int32Ptr(2),
|
||||||
Parallelism: utilpointer.Int32Ptr(1),
|
Parallelism: pointer.Int32Ptr(1),
|
||||||
BackoffLimit: utilpointer.Int32Ptr(6),
|
BackoffLimit: pointer.Int32Ptr(6),
|
||||||
CompletionMode: batchv1.NonIndexedCompletion,
|
CompletionMode: batchv1.NonIndexedCompletion,
|
||||||
|
Suspend: pointer.BoolPtr(false),
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
expectLabels: true,
|
expectLabels: true,
|
||||||
@ -135,7 +161,7 @@ func TestSetDefaultJob(t *testing.T) {
|
|||||||
"BackoffLimit explicitly 5 and others unset -> parallelism and completions are defaulted": {
|
"BackoffLimit explicitly 5 and others unset -> parallelism and completions are defaulted": {
|
||||||
original: &batchv1.Job{
|
original: &batchv1.Job{
|
||||||
Spec: batchv1.JobSpec{
|
Spec: batchv1.JobSpec{
|
||||||
BackoffLimit: utilpointer.Int32Ptr(5),
|
BackoffLimit: pointer.Int32Ptr(5),
|
||||||
Template: v1.PodTemplateSpec{
|
Template: v1.PodTemplateSpec{
|
||||||
ObjectMeta: metav1.ObjectMeta{Labels: defaultLabels},
|
ObjectMeta: metav1.ObjectMeta{Labels: defaultLabels},
|
||||||
},
|
},
|
||||||
@ -143,10 +169,11 @@ func TestSetDefaultJob(t *testing.T) {
|
|||||||
},
|
},
|
||||||
expected: &batchv1.Job{
|
expected: &batchv1.Job{
|
||||||
Spec: batchv1.JobSpec{
|
Spec: batchv1.JobSpec{
|
||||||
Completions: utilpointer.Int32Ptr(1),
|
Completions: pointer.Int32Ptr(1),
|
||||||
Parallelism: utilpointer.Int32Ptr(1),
|
Parallelism: pointer.Int32Ptr(1),
|
||||||
BackoffLimit: utilpointer.Int32Ptr(5),
|
BackoffLimit: pointer.Int32Ptr(5),
|
||||||
CompletionMode: batchv1.NonIndexedCompletion,
|
CompletionMode: batchv1.NonIndexedCompletion,
|
||||||
|
Suspend: pointer.BoolPtr(false),
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
expectLabels: true,
|
expectLabels: true,
|
||||||
@ -154,10 +181,11 @@ func TestSetDefaultJob(t *testing.T) {
|
|||||||
"All set -> no change": {
|
"All set -> no change": {
|
||||||
original: &batchv1.Job{
|
original: &batchv1.Job{
|
||||||
Spec: batchv1.JobSpec{
|
Spec: batchv1.JobSpec{
|
||||||
Completions: utilpointer.Int32Ptr(8),
|
Completions: pointer.Int32Ptr(8),
|
||||||
Parallelism: utilpointer.Int32Ptr(9),
|
Parallelism: pointer.Int32Ptr(9),
|
||||||
BackoffLimit: utilpointer.Int32Ptr(10),
|
BackoffLimit: pointer.Int32Ptr(10),
|
||||||
CompletionMode: batchv1.NonIndexedCompletion,
|
CompletionMode: batchv1.NonIndexedCompletion,
|
||||||
|
Suspend: pointer.BoolPtr(true),
|
||||||
Template: v1.PodTemplateSpec{
|
Template: v1.PodTemplateSpec{
|
||||||
ObjectMeta: metav1.ObjectMeta{Labels: defaultLabels},
|
ObjectMeta: metav1.ObjectMeta{Labels: defaultLabels},
|
||||||
},
|
},
|
||||||
@ -165,10 +193,11 @@ func TestSetDefaultJob(t *testing.T) {
|
|||||||
},
|
},
|
||||||
expected: &batchv1.Job{
|
expected: &batchv1.Job{
|
||||||
Spec: batchv1.JobSpec{
|
Spec: batchv1.JobSpec{
|
||||||
Completions: utilpointer.Int32Ptr(8),
|
Completions: pointer.Int32Ptr(8),
|
||||||
Parallelism: utilpointer.Int32Ptr(9),
|
Parallelism: pointer.Int32Ptr(9),
|
||||||
BackoffLimit: utilpointer.Int32Ptr(10),
|
BackoffLimit: pointer.Int32Ptr(10),
|
||||||
CompletionMode: batchv1.NonIndexedCompletion,
|
CompletionMode: batchv1.NonIndexedCompletion,
|
||||||
|
Suspend: pointer.BoolPtr(true),
|
||||||
Template: v1.PodTemplateSpec{
|
Template: v1.PodTemplateSpec{
|
||||||
ObjectMeta: metav1.ObjectMeta{Labels: defaultLabels},
|
ObjectMeta: metav1.ObjectMeta{Labels: defaultLabels},
|
||||||
},
|
},
|
||||||
@ -179,10 +208,11 @@ func TestSetDefaultJob(t *testing.T) {
|
|||||||
"All set, flipped -> no change": {
|
"All set, flipped -> no change": {
|
||||||
original: &batchv1.Job{
|
original: &batchv1.Job{
|
||||||
Spec: batchv1.JobSpec{
|
Spec: batchv1.JobSpec{
|
||||||
Completions: utilpointer.Int32Ptr(11),
|
Completions: pointer.Int32Ptr(11),
|
||||||
Parallelism: utilpointer.Int32Ptr(10),
|
Parallelism: pointer.Int32Ptr(10),
|
||||||
BackoffLimit: utilpointer.Int32Ptr(9),
|
BackoffLimit: pointer.Int32Ptr(9),
|
||||||
CompletionMode: batchv1.IndexedCompletion,
|
CompletionMode: batchv1.IndexedCompletion,
|
||||||
|
Suspend: pointer.BoolPtr(true),
|
||||||
Template: v1.PodTemplateSpec{
|
Template: v1.PodTemplateSpec{
|
||||||
ObjectMeta: metav1.ObjectMeta{Labels: defaultLabels},
|
ObjectMeta: metav1.ObjectMeta{Labels: defaultLabels},
|
||||||
},
|
},
|
||||||
@ -190,10 +220,11 @@ func TestSetDefaultJob(t *testing.T) {
|
|||||||
},
|
},
|
||||||
expected: &batchv1.Job{
|
expected: &batchv1.Job{
|
||||||
Spec: batchv1.JobSpec{
|
Spec: batchv1.JobSpec{
|
||||||
Completions: utilpointer.Int32Ptr(11),
|
Completions: pointer.Int32Ptr(11),
|
||||||
Parallelism: utilpointer.Int32Ptr(10),
|
Parallelism: pointer.Int32Ptr(10),
|
||||||
BackoffLimit: utilpointer.Int32Ptr(9),
|
BackoffLimit: pointer.Int32Ptr(9),
|
||||||
CompletionMode: batchv1.IndexedCompletion,
|
CompletionMode: batchv1.IndexedCompletion,
|
||||||
|
Suspend: pointer.BoolPtr(true),
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
expectLabels: true,
|
expectLabels: true,
|
||||||
@ -211,6 +242,9 @@ func TestSetDefaultJob(t *testing.T) {
|
|||||||
t.Fatalf("Unexpected object: %v", actual)
|
t.Fatalf("Unexpected object: %v", actual)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
if diff := cmp.Diff(expected.Spec.Suspend, actual.Spec.Suspend); diff != "" {
|
||||||
|
t.Errorf(".spec.suspend does not match; -want,+got:\n%s", diff)
|
||||||
|
}
|
||||||
validateDefaultInt32(t, "Completions", actual.Spec.Completions, expected.Spec.Completions)
|
validateDefaultInt32(t, "Completions", actual.Spec.Completions, expected.Spec.Completions)
|
||||||
validateDefaultInt32(t, "Parallelism", actual.Spec.Parallelism, expected.Spec.Parallelism)
|
validateDefaultInt32(t, "Parallelism", actual.Spec.Parallelism, expected.Spec.Parallelism)
|
||||||
validateDefaultInt32(t, "BackoffLimit", actual.Spec.BackoffLimit, expected.Spec.BackoffLimit)
|
validateDefaultInt32(t, "BackoffLimit", actual.Spec.BackoffLimit, expected.Spec.BackoffLimit)
|
||||||
@ -271,8 +305,8 @@ func TestSetDefaultCronJob(t *testing.T) {
|
|||||||
Spec: batchv1.CronJobSpec{
|
Spec: batchv1.CronJobSpec{
|
||||||
ConcurrencyPolicy: batchv1.AllowConcurrent,
|
ConcurrencyPolicy: batchv1.AllowConcurrent,
|
||||||
Suspend: newBool(false),
|
Suspend: newBool(false),
|
||||||
SuccessfulJobsHistoryLimit: utilpointer.Int32Ptr(3),
|
SuccessfulJobsHistoryLimit: pointer.Int32Ptr(3),
|
||||||
FailedJobsHistoryLimit: utilpointer.Int32Ptr(1),
|
FailedJobsHistoryLimit: pointer.Int32Ptr(1),
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
@ -281,16 +315,16 @@ func TestSetDefaultCronJob(t *testing.T) {
|
|||||||
Spec: batchv1.CronJobSpec{
|
Spec: batchv1.CronJobSpec{
|
||||||
ConcurrencyPolicy: batchv1.ForbidConcurrent,
|
ConcurrencyPolicy: batchv1.ForbidConcurrent,
|
||||||
Suspend: newBool(true),
|
Suspend: newBool(true),
|
||||||
SuccessfulJobsHistoryLimit: utilpointer.Int32Ptr(5),
|
SuccessfulJobsHistoryLimit: pointer.Int32Ptr(5),
|
||||||
FailedJobsHistoryLimit: utilpointer.Int32Ptr(5),
|
FailedJobsHistoryLimit: pointer.Int32Ptr(5),
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
expected: &batchv1.CronJob{
|
expected: &batchv1.CronJob{
|
||||||
Spec: batchv1.CronJobSpec{
|
Spec: batchv1.CronJobSpec{
|
||||||
ConcurrencyPolicy: batchv1.ForbidConcurrent,
|
ConcurrencyPolicy: batchv1.ForbidConcurrent,
|
||||||
Suspend: newBool(true),
|
Suspend: newBool(true),
|
||||||
SuccessfulJobsHistoryLimit: utilpointer.Int32Ptr(5),
|
SuccessfulJobsHistoryLimit: pointer.Int32Ptr(5),
|
||||||
FailedJobsHistoryLimit: utilpointer.Int32Ptr(5),
|
FailedJobsHistoryLimit: pointer.Int32Ptr(5),
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
|
2
pkg/apis/batch/v1/zz_generated.conversion.go
generated
2
pkg/apis/batch/v1/zz_generated.conversion.go
generated
@ -393,6 +393,7 @@ func autoConvert_v1_JobSpec_To_batch_JobSpec(in *v1.JobSpec, out *batch.JobSpec,
|
|||||||
}
|
}
|
||||||
out.TTLSecondsAfterFinished = (*int32)(unsafe.Pointer(in.TTLSecondsAfterFinished))
|
out.TTLSecondsAfterFinished = (*int32)(unsafe.Pointer(in.TTLSecondsAfterFinished))
|
||||||
out.CompletionMode = batch.CompletionMode(in.CompletionMode)
|
out.CompletionMode = batch.CompletionMode(in.CompletionMode)
|
||||||
|
out.Suspend = (*bool)(unsafe.Pointer(in.Suspend))
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -408,6 +409,7 @@ func autoConvert_batch_JobSpec_To_v1_JobSpec(in *batch.JobSpec, out *v1.JobSpec,
|
|||||||
}
|
}
|
||||||
out.TTLSecondsAfterFinished = (*int32)(unsafe.Pointer(in.TTLSecondsAfterFinished))
|
out.TTLSecondsAfterFinished = (*int32)(unsafe.Pointer(in.TTLSecondsAfterFinished))
|
||||||
out.CompletionMode = v1.CompletionMode(in.CompletionMode)
|
out.CompletionMode = v1.CompletionMode(in.CompletionMode)
|
||||||
|
out.Suspend = (*bool)(unsafe.Pointer(in.Suspend))
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
5
pkg/apis/batch/zz_generated.deepcopy.go
generated
5
pkg/apis/batch/zz_generated.deepcopy.go
generated
@ -271,6 +271,11 @@ func (in *JobSpec) DeepCopyInto(out *JobSpec) {
|
|||||||
*out = new(int32)
|
*out = new(int32)
|
||||||
**out = **in
|
**out = **in
|
||||||
}
|
}
|
||||||
|
if in.Suspend != nil {
|
||||||
|
in, out := &in.Suspend, &out.Suspend
|
||||||
|
*out = new(bool)
|
||||||
|
**out = **in
|
||||||
|
}
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -487,9 +487,9 @@ func (jm *Controller) syncJob(key string) (bool, error) {
|
|||||||
activePods := controller.FilterActivePods(pods)
|
activePods := controller.FilterActivePods(pods)
|
||||||
active := int32(len(activePods))
|
active := int32(len(activePods))
|
||||||
succeeded, failed := getStatus(&job, pods)
|
succeeded, failed := getStatus(&job, pods)
|
||||||
conditions := len(job.Status.Conditions)
|
// Job first start. Set StartTime and start the ActiveDeadlineSeconds timer
|
||||||
// job first start
|
// only if the job is not in the suspended state.
|
||||||
if job.Status.StartTime == nil {
|
if job.Status.StartTime == nil && !jobSuspended(&job) {
|
||||||
now := metav1.Now()
|
now := metav1.Now()
|
||||||
job.Status.StartTime = &now
|
job.Status.StartTime = &now
|
||||||
// enqueue a sync to check if job past ActiveDeadlineSeconds
|
// enqueue a sync to check if job past ActiveDeadlineSeconds
|
||||||
@ -524,6 +524,8 @@ func (jm *Controller) syncJob(key string) (bool, error) {
|
|||||||
failureMessage = "Job was active longer than specified deadline"
|
failureMessage = "Job was active longer than specified deadline"
|
||||||
}
|
}
|
||||||
|
|
||||||
|
jobConditionsChanged := false
|
||||||
|
manageJobCalled := false
|
||||||
if jobFailed {
|
if jobFailed {
|
||||||
// TODO(#28486): Account for pod failures in status once we can track
|
// TODO(#28486): Account for pod failures in status once we can track
|
||||||
// completions without lingering pods.
|
// completions without lingering pods.
|
||||||
@ -532,11 +534,13 @@ func (jm *Controller) syncJob(key string) (bool, error) {
|
|||||||
// update status values accordingly
|
// update status values accordingly
|
||||||
failed += active
|
failed += active
|
||||||
active = 0
|
active = 0
|
||||||
job.Status.Conditions = append(job.Status.Conditions, newCondition(batch.JobFailed, failureReason, failureMessage))
|
job.Status.Conditions = append(job.Status.Conditions, newCondition(batch.JobFailed, v1.ConditionTrue, failureReason, failureMessage))
|
||||||
|
jobConditionsChanged = true
|
||||||
jm.recorder.Event(&job, v1.EventTypeWarning, failureReason, failureMessage)
|
jm.recorder.Event(&job, v1.EventTypeWarning, failureReason, failureMessage)
|
||||||
} else {
|
} else {
|
||||||
if jobNeedsSync && job.DeletionTimestamp == nil {
|
if jobNeedsSync && job.DeletionTimestamp == nil {
|
||||||
active, manageJobErr = jm.manageJob(&job, activePods, succeeded, pods)
|
active, manageJobErr = jm.manageJob(&job, activePods, succeeded, pods)
|
||||||
|
manageJobCalled = true
|
||||||
}
|
}
|
||||||
completions := succeeded
|
completions := succeeded
|
||||||
complete := false
|
complete := false
|
||||||
@ -566,10 +570,40 @@ func (jm *Controller) syncJob(key string) (bool, error) {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
if complete {
|
if complete {
|
||||||
job.Status.Conditions = append(job.Status.Conditions, newCondition(batch.JobComplete, "", ""))
|
job.Status.Conditions = append(job.Status.Conditions, newCondition(batch.JobComplete, v1.ConditionTrue, "", ""))
|
||||||
|
jobConditionsChanged = true
|
||||||
now := metav1.Now()
|
now := metav1.Now()
|
||||||
job.Status.CompletionTime = &now
|
job.Status.CompletionTime = &now
|
||||||
jm.recorder.Event(&job, v1.EventTypeNormal, "Completed", "Job completed")
|
jm.recorder.Event(&job, v1.EventTypeNormal, "Completed", "Job completed")
|
||||||
|
} else if utilfeature.DefaultFeatureGate.Enabled(features.SuspendJob) && manageJobCalled {
|
||||||
|
// Update the conditions / emit events only if manageJob was called in
|
||||||
|
// this syncJob. Otherwise wait for the right syncJob call to make
|
||||||
|
// updates.
|
||||||
|
if job.Spec.Suspend != nil && *job.Spec.Suspend {
|
||||||
|
// Job can be in the suspended state only if it is NOT completed.
|
||||||
|
var isUpdated bool
|
||||||
|
job.Status.Conditions, isUpdated = ensureJobConditionStatus(job.Status.Conditions, batch.JobSuspended, v1.ConditionTrue, "JobSuspended", "Job suspended")
|
||||||
|
if isUpdated {
|
||||||
|
jobConditionsChanged = true
|
||||||
|
jm.recorder.Event(&job, v1.EventTypeNormal, "Suspended", "Job suspended")
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
// Job not suspended.
|
||||||
|
var isUpdated bool
|
||||||
|
job.Status.Conditions, isUpdated = ensureJobConditionStatus(job.Status.Conditions, batch.JobSuspended, v1.ConditionFalse, "JobResumed", "Job resumed")
|
||||||
|
if isUpdated {
|
||||||
|
jobConditionsChanged = true
|
||||||
|
jm.recorder.Event(&job, v1.EventTypeNormal, "Resumed", "Job resumed")
|
||||||
|
// Resumed jobs will always reset StartTime to current time. This is
|
||||||
|
// done because the ActiveDeadlineSeconds timer shouldn't go off
|
||||||
|
// whilst the Job is still suspended and resetting StartTime is
|
||||||
|
// consistent with resuming a Job created in the suspended state.
|
||||||
|
// (ActiveDeadlineSeconds is interpreted as the number of seconds a
|
||||||
|
// Job is continuously active.)
|
||||||
|
now := metav1.Now()
|
||||||
|
job.Status.StartTime = &now
|
||||||
|
}
|
||||||
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -583,7 +617,7 @@ func (jm *Controller) syncJob(key string) (bool, error) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// no need to update the job if the status hasn't changed since last time
|
// no need to update the job if the status hasn't changed since last time
|
||||||
if job.Status.Active != active || job.Status.Succeeded != succeeded || job.Status.Failed != failed || len(job.Status.Conditions) != conditions {
|
if job.Status.Active != active || job.Status.Succeeded != succeeded || job.Status.Failed != failed || jobConditionsChanged {
|
||||||
job.Status.Active = active
|
job.Status.Active = active
|
||||||
job.Status.Succeeded = succeeded
|
job.Status.Succeeded = succeeded
|
||||||
job.Status.Failed = failed
|
job.Status.Failed = failed
|
||||||
@ -660,9 +694,11 @@ func pastBackoffLimitOnFailure(job *batch.Job, pods []*v1.Pod) bool {
|
|||||||
return result >= *job.Spec.BackoffLimit
|
return result >= *job.Spec.BackoffLimit
|
||||||
}
|
}
|
||||||
|
|
||||||
// pastActiveDeadline checks if job has ActiveDeadlineSeconds field set and if it is exceeded.
|
// pastActiveDeadline checks if job has ActiveDeadlineSeconds field set and if
|
||||||
|
// it is exceeded. If the job is currently suspended, the function will always
|
||||||
|
// return false.
|
||||||
func pastActiveDeadline(job *batch.Job) bool {
|
func pastActiveDeadline(job *batch.Job) bool {
|
||||||
if job.Spec.ActiveDeadlineSeconds == nil || job.Status.StartTime == nil {
|
if job.Spec.ActiveDeadlineSeconds == nil || job.Status.StartTime == nil || jobSuspended(job) {
|
||||||
return false
|
return false
|
||||||
}
|
}
|
||||||
now := metav1.Now()
|
now := metav1.Now()
|
||||||
@ -672,10 +708,10 @@ func pastActiveDeadline(job *batch.Job) bool {
|
|||||||
return duration >= allowedDuration
|
return duration >= allowedDuration
|
||||||
}
|
}
|
||||||
|
|
||||||
func newCondition(conditionType batch.JobConditionType, reason, message string) batch.JobCondition {
|
func newCondition(conditionType batch.JobConditionType, status v1.ConditionStatus, reason, message string) batch.JobCondition {
|
||||||
return batch.JobCondition{
|
return batch.JobCondition{
|
||||||
Type: conditionType,
|
Type: conditionType,
|
||||||
Status: v1.ConditionTrue,
|
Status: status,
|
||||||
LastProbeTime: metav1.Now(),
|
LastProbeTime: metav1.Now(),
|
||||||
LastTransitionTime: metav1.Now(),
|
LastTransitionTime: metav1.Now(),
|
||||||
Reason: reason,
|
Reason: reason,
|
||||||
@ -690,6 +726,12 @@ func getStatus(job *batch.Job, pods []*v1.Pod) (succeeded, failed int32) {
|
|||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// jobSuspended returns whether a Job is suspended while taking the feature
|
||||||
|
// gate into account.
|
||||||
|
func jobSuspended(job *batch.Job) bool {
|
||||||
|
return utilfeature.DefaultFeatureGate.Enabled(features.SuspendJob) && job.Spec.Suspend != nil && *job.Spec.Suspend
|
||||||
|
}
|
||||||
|
|
||||||
// manageJob is the core method responsible for managing the number of running
|
// manageJob is the core method responsible for managing the number of running
|
||||||
// pods according to what is specified in the job.Spec.
|
// pods according to what is specified in the job.Spec.
|
||||||
// Does NOT modify <activePods>.
|
// Does NOT modify <activePods>.
|
||||||
@ -702,6 +744,15 @@ func (jm *Controller) manageJob(job *batch.Job, activePods []*v1.Pod, succeeded
|
|||||||
return 0, nil
|
return 0, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
if jobSuspended(job) {
|
||||||
|
klog.V(4).InfoS("Deleting all active pods in suspended job", "job", klog.KObj(job), "active", active)
|
||||||
|
podsToDelete := activePodsForRemoval(job, activePods, int(active))
|
||||||
|
jm.expectations.ExpectDeletions(jobKey, len(podsToDelete))
|
||||||
|
removed, err := jm.deleteJobPods(job, jobKey, podsToDelete)
|
||||||
|
active -= removed
|
||||||
|
return active, err
|
||||||
|
}
|
||||||
|
|
||||||
rmAtLeast := active - parallelism
|
rmAtLeast := active - parallelism
|
||||||
if rmAtLeast < 0 {
|
if rmAtLeast < 0 {
|
||||||
rmAtLeast = 0
|
rmAtLeast = 0
|
||||||
@ -709,7 +760,7 @@ func (jm *Controller) manageJob(job *batch.Job, activePods []*v1.Pod, succeeded
|
|||||||
podsToDelete := activePodsForRemoval(job, activePods, int(rmAtLeast))
|
podsToDelete := activePodsForRemoval(job, activePods, int(rmAtLeast))
|
||||||
if len(podsToDelete) > 0 {
|
if len(podsToDelete) > 0 {
|
||||||
jm.expectations.ExpectDeletions(jobKey, len(podsToDelete))
|
jm.expectations.ExpectDeletions(jobKey, len(podsToDelete))
|
||||||
klog.V(4).InfoS("Too many pods running for job", "job", klog.KObj(job), "deleted", len(podsToDelete), "target", parallelism)
|
klog.V(4).InfoS("Too many pods running for job", "job", klog.KObj(job), "deleted", rmAtLeast, "target", parallelism)
|
||||||
removed, err := jm.deleteJobPods(job, jobKey, podsToDelete)
|
removed, err := jm.deleteJobPods(job, jobKey, podsToDelete)
|
||||||
active -= removed
|
active -= removed
|
||||||
if err != nil {
|
if err != nil {
|
||||||
@ -910,3 +961,29 @@ func errorFromChannel(errCh <-chan error) error {
|
|||||||
}
|
}
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// ensureJobConditionStatus appends or updates an existing job condition of the
|
||||||
|
// given type with the given status value. Note that this function will not
|
||||||
|
// append to the conditions list if the new condition's status is false
|
||||||
|
// (because going from nothing to false is meaningless); it can, however,
|
||||||
|
// update the status condition to false. The function returns a bool to let the
|
||||||
|
// caller know if the list was changed (either appended or updated).
|
||||||
|
func ensureJobConditionStatus(list []batch.JobCondition, cType batch.JobConditionType, status v1.ConditionStatus, reason, message string) ([]batch.JobCondition, bool) {
|
||||||
|
for i := range list {
|
||||||
|
if list[i].Type == cType {
|
||||||
|
if list[i].Status != status || list[i].Reason != reason || list[i].Message != message {
|
||||||
|
list[i].Status = status
|
||||||
|
list[i].LastTransitionTime = metav1.Now()
|
||||||
|
list[i].Reason = reason
|
||||||
|
list[i].Message = message
|
||||||
|
return list, true
|
||||||
|
}
|
||||||
|
return list, false
|
||||||
|
}
|
||||||
|
}
|
||||||
|
// A condition with that type doesn't exist in the list.
|
||||||
|
if status != v1.ConditionFalse {
|
||||||
|
return append(list, newCondition(cType, status, reason, message)), true
|
||||||
|
}
|
||||||
|
return list, false
|
||||||
|
}
|
||||||
|
@ -47,6 +47,7 @@ import (
|
|||||||
"k8s.io/kubernetes/pkg/controller"
|
"k8s.io/kubernetes/pkg/controller"
|
||||||
"k8s.io/kubernetes/pkg/controller/testutil"
|
"k8s.io/kubernetes/pkg/controller/testutil"
|
||||||
"k8s.io/kubernetes/pkg/features"
|
"k8s.io/kubernetes/pkg/features"
|
||||||
|
"k8s.io/utils/pointer"
|
||||||
)
|
)
|
||||||
|
|
||||||
var alwaysReady = func() bool { return true }
|
var alwaysReady = func() bool { return true }
|
||||||
@ -156,6 +157,7 @@ func setPodsStatusesWithIndexes(podIndexer cache.Indexer, job *batch.Job, status
|
|||||||
func TestControllerSyncJob(t *testing.T) {
|
func TestControllerSyncJob(t *testing.T) {
|
||||||
jobConditionComplete := batch.JobComplete
|
jobConditionComplete := batch.JobComplete
|
||||||
jobConditionFailed := batch.JobFailed
|
jobConditionFailed := batch.JobFailed
|
||||||
|
jobConditionSuspended := batch.JobSuspended
|
||||||
|
|
||||||
testCases := map[string]struct {
|
testCases := map[string]struct {
|
||||||
// job setup
|
// job setup
|
||||||
@ -165,15 +167,18 @@ func TestControllerSyncJob(t *testing.T) {
|
|||||||
deleting bool
|
deleting bool
|
||||||
podLimit int
|
podLimit int
|
||||||
completionMode batch.CompletionMode
|
completionMode batch.CompletionMode
|
||||||
|
wasSuspended bool
|
||||||
|
suspend bool
|
||||||
|
|
||||||
// pod setup
|
// pod setup
|
||||||
podControllerError error
|
podControllerError error
|
||||||
jobKeyForget bool
|
jobKeyForget bool
|
||||||
pendingPods int32
|
pendingPods int32
|
||||||
activePods int32
|
activePods int32
|
||||||
succeededPods int32
|
succeededPods int32
|
||||||
failedPods int32
|
failedPods int32
|
||||||
podsWithIndexes []indexPhase
|
podsWithIndexes []indexPhase
|
||||||
|
fakeExpectationAtCreation int32 // negative: ExpectDeletions, positive: ExpectCreations
|
||||||
|
|
||||||
// expectations
|
// expectations
|
||||||
expectedCreations int32
|
expectedCreations int32
|
||||||
@ -183,11 +188,13 @@ func TestControllerSyncJob(t *testing.T) {
|
|||||||
expectedCompletedIdxs string
|
expectedCompletedIdxs string
|
||||||
expectedFailed int32
|
expectedFailed int32
|
||||||
expectedCondition *batch.JobConditionType
|
expectedCondition *batch.JobConditionType
|
||||||
|
expectedConditionStatus v1.ConditionStatus
|
||||||
expectedConditionReason string
|
expectedConditionReason string
|
||||||
expectedCreatedIndexes sets.Int
|
expectedCreatedIndexes sets.Int
|
||||||
|
|
||||||
// features
|
// features
|
||||||
indexedJobEnabled bool
|
indexedJobEnabled bool
|
||||||
|
suspendJobEnabled bool
|
||||||
}{
|
}{
|
||||||
"job start": {
|
"job start": {
|
||||||
parallelism: 2,
|
parallelism: 2,
|
||||||
@ -334,24 +341,26 @@ func TestControllerSyncJob(t *testing.T) {
|
|||||||
expectedSucceeded: 1,
|
expectedSucceeded: 1,
|
||||||
},
|
},
|
||||||
"WQ job all finished": {
|
"WQ job all finished": {
|
||||||
parallelism: 2,
|
parallelism: 2,
|
||||||
completions: -1,
|
completions: -1,
|
||||||
backoffLimit: 6,
|
backoffLimit: 6,
|
||||||
jobKeyForget: true,
|
jobKeyForget: true,
|
||||||
succeededPods: 2,
|
succeededPods: 2,
|
||||||
expectedSucceeded: 2,
|
expectedSucceeded: 2,
|
||||||
expectedCondition: &jobConditionComplete,
|
expectedCondition: &jobConditionComplete,
|
||||||
|
expectedConditionStatus: v1.ConditionTrue,
|
||||||
},
|
},
|
||||||
"WQ job all finished despite one failure": {
|
"WQ job all finished despite one failure": {
|
||||||
parallelism: 2,
|
parallelism: 2,
|
||||||
completions: -1,
|
completions: -1,
|
||||||
backoffLimit: 6,
|
backoffLimit: 6,
|
||||||
jobKeyForget: true,
|
jobKeyForget: true,
|
||||||
succeededPods: 1,
|
succeededPods: 1,
|
||||||
failedPods: 1,
|
failedPods: 1,
|
||||||
expectedSucceeded: 1,
|
expectedSucceeded: 1,
|
||||||
expectedFailed: 1,
|
expectedFailed: 1,
|
||||||
expectedCondition: &jobConditionComplete,
|
expectedCondition: &jobConditionComplete,
|
||||||
|
expectedConditionStatus: v1.ConditionTrue,
|
||||||
},
|
},
|
||||||
"more active pods than completions": {
|
"more active pods than completions": {
|
||||||
parallelism: 2,
|
parallelism: 2,
|
||||||
@ -401,6 +410,7 @@ func TestControllerSyncJob(t *testing.T) {
|
|||||||
failedPods: 1,
|
failedPods: 1,
|
||||||
expectedFailed: 1,
|
expectedFailed: 1,
|
||||||
expectedCondition: &jobConditionFailed,
|
expectedCondition: &jobConditionFailed,
|
||||||
|
expectedConditionStatus: v1.ConditionTrue,
|
||||||
expectedConditionReason: "BackoffLimitExceeded",
|
expectedConditionReason: "BackoffLimitExceeded",
|
||||||
},
|
},
|
||||||
"indexed job start": {
|
"indexed job start": {
|
||||||
@ -510,11 +520,78 @@ func TestControllerSyncJob(t *testing.T) {
|
|||||||
// No status updates.
|
// No status updates.
|
||||||
indexedJobEnabled: false,
|
indexedJobEnabled: false,
|
||||||
},
|
},
|
||||||
|
"suspending a job with satisfied expectations": {
|
||||||
|
// Suspended Job should delete active pods when expectations are
|
||||||
|
// satisfied.
|
||||||
|
suspendJobEnabled: true,
|
||||||
|
suspend: true,
|
||||||
|
parallelism: 2,
|
||||||
|
activePods: 2, // parallelism == active, expectations satisfied
|
||||||
|
completions: 4,
|
||||||
|
backoffLimit: 6,
|
||||||
|
jobKeyForget: true,
|
||||||
|
expectedCreations: 0,
|
||||||
|
expectedDeletions: 2,
|
||||||
|
expectedActive: 0,
|
||||||
|
expectedCondition: &jobConditionSuspended,
|
||||||
|
expectedConditionStatus: v1.ConditionTrue,
|
||||||
|
expectedConditionReason: "JobSuspended",
|
||||||
|
},
|
||||||
|
"suspending a job with unsatisfied expectations": {
|
||||||
|
// Unlike the previous test, we expect the controller to NOT suspend the
|
||||||
|
// Job in the syncJob call because the controller will wait for
|
||||||
|
// expectations to be satisfied first. The next syncJob call (not tested
|
||||||
|
// here) will be the same as the previous test.
|
||||||
|
suspendJobEnabled: true,
|
||||||
|
suspend: true,
|
||||||
|
parallelism: 2,
|
||||||
|
activePods: 3, // active > parallelism, expectations unsatisfied
|
||||||
|
fakeExpectationAtCreation: -1, // the controller is expecting a deletion
|
||||||
|
completions: 4,
|
||||||
|
backoffLimit: 6,
|
||||||
|
jobKeyForget: true,
|
||||||
|
expectedCreations: 0,
|
||||||
|
expectedDeletions: 0,
|
||||||
|
expectedActive: 3,
|
||||||
|
},
|
||||||
|
"resuming a suspended job": {
|
||||||
|
suspendJobEnabled: true,
|
||||||
|
wasSuspended: true,
|
||||||
|
suspend: false,
|
||||||
|
parallelism: 2,
|
||||||
|
completions: 4,
|
||||||
|
backoffLimit: 6,
|
||||||
|
jobKeyForget: true,
|
||||||
|
expectedCreations: 2,
|
||||||
|
expectedDeletions: 0,
|
||||||
|
expectedActive: 2,
|
||||||
|
expectedCondition: &jobConditionSuspended,
|
||||||
|
expectedConditionStatus: v1.ConditionFalse,
|
||||||
|
expectedConditionReason: "JobResumed",
|
||||||
|
},
|
||||||
|
"suspending a deleted job": {
|
||||||
|
// We would normally expect the active pods to be deleted (see a few test
|
||||||
|
// cases above), but since this job is being deleted, we don't expect
|
||||||
|
// anything changed here from before the job was suspended. The
|
||||||
|
// JobSuspended condition is also missing.
|
||||||
|
suspendJobEnabled: true,
|
||||||
|
suspend: true,
|
||||||
|
deleting: true,
|
||||||
|
parallelism: 2,
|
||||||
|
activePods: 2, // parallelism == active, expectations satisfied
|
||||||
|
completions: 4,
|
||||||
|
backoffLimit: 6,
|
||||||
|
jobKeyForget: true,
|
||||||
|
expectedCreations: 0,
|
||||||
|
expectedDeletions: 0,
|
||||||
|
expectedActive: 2,
|
||||||
|
},
|
||||||
}
|
}
|
||||||
|
|
||||||
for name, tc := range testCases {
|
for name, tc := range testCases {
|
||||||
t.Run(name, func(t *testing.T) {
|
t.Run(name, func(t *testing.T) {
|
||||||
defer featuregatetesting.SetFeatureGateDuringTest(t, feature.DefaultFeatureGate, features.IndexedJob, tc.indexedJobEnabled)()
|
defer featuregatetesting.SetFeatureGateDuringTest(t, feature.DefaultFeatureGate, features.IndexedJob, tc.indexedJobEnabled)()
|
||||||
|
defer featuregatetesting.SetFeatureGateDuringTest(t, feature.DefaultFeatureGate, features.SuspendJob, tc.suspendJobEnabled)()
|
||||||
|
|
||||||
// job manager setup
|
// job manager setup
|
||||||
clientSet := clientset.NewForConfigOrDie(&restclient.Config{Host: "", ContentConfig: restclient.ContentConfig{GroupVersion: &schema.GroupVersion{Group: "", Version: "v1"}}})
|
clientSet := clientset.NewForConfigOrDie(&restclient.Config{Host: "", ContentConfig: restclient.ContentConfig{GroupVersion: &schema.GroupVersion{Group: "", Version: "v1"}}})
|
||||||
@ -526,6 +603,19 @@ func TestControllerSyncJob(t *testing.T) {
|
|||||||
|
|
||||||
// job & pods setup
|
// job & pods setup
|
||||||
job := newJob(tc.parallelism, tc.completions, tc.backoffLimit, tc.completionMode)
|
job := newJob(tc.parallelism, tc.completions, tc.backoffLimit, tc.completionMode)
|
||||||
|
job.Spec.Suspend = pointer.BoolPtr(tc.suspend)
|
||||||
|
key, err := controller.KeyFunc(job)
|
||||||
|
if err != nil {
|
||||||
|
t.Errorf("Unexpected error getting job key: %v", err)
|
||||||
|
}
|
||||||
|
if tc.fakeExpectationAtCreation < 0 {
|
||||||
|
manager.expectations.ExpectDeletions(key, int(-tc.fakeExpectationAtCreation))
|
||||||
|
} else if tc.fakeExpectationAtCreation > 0 {
|
||||||
|
manager.expectations.ExpectCreations(key, int(tc.fakeExpectationAtCreation))
|
||||||
|
}
|
||||||
|
if tc.wasSuspended {
|
||||||
|
job.Status.Conditions = append(job.Status.Conditions, newCondition(batch.JobSuspended, v1.ConditionTrue, "JobSuspended", "Job suspended"))
|
||||||
|
}
|
||||||
if tc.deleting {
|
if tc.deleting {
|
||||||
now := metav1.Now()
|
now := metav1.Now()
|
||||||
job.DeletionTimestamp = &now
|
job.DeletionTimestamp = &now
|
||||||
@ -608,13 +698,19 @@ func TestControllerSyncJob(t *testing.T) {
|
|||||||
if actual.Status.Failed != tc.expectedFailed {
|
if actual.Status.Failed != tc.expectedFailed {
|
||||||
t.Errorf("Unexpected number of failed pods. Expected %d, saw %d\n", tc.expectedFailed, actual.Status.Failed)
|
t.Errorf("Unexpected number of failed pods. Expected %d, saw %d\n", tc.expectedFailed, actual.Status.Failed)
|
||||||
}
|
}
|
||||||
if actual.Status.StartTime == nil && tc.indexedJobEnabled {
|
if actual.Status.StartTime != nil && tc.suspend {
|
||||||
|
t.Error("Unexpected .status.startTime not nil when suspend is true")
|
||||||
|
}
|
||||||
|
if actual.Status.StartTime == nil && tc.indexedJobEnabled && !tc.suspend {
|
||||||
t.Error("Missing .status.startTime")
|
t.Error("Missing .status.startTime")
|
||||||
}
|
}
|
||||||
// validate conditions
|
// validate conditions
|
||||||
if tc.expectedCondition != nil && !getCondition(actual, *tc.expectedCondition, tc.expectedConditionReason) {
|
if tc.expectedCondition != nil && !getCondition(actual, *tc.expectedCondition, tc.expectedConditionStatus, tc.expectedConditionReason) {
|
||||||
t.Errorf("Expected completion condition. Got %#v", actual.Status.Conditions)
|
t.Errorf("Expected completion condition. Got %#v", actual.Status.Conditions)
|
||||||
}
|
}
|
||||||
|
if tc.expectedCondition == nil && tc.suspend && len(actual.Status.Conditions) != 0 {
|
||||||
|
t.Errorf("Unexpected conditions %v", actual.Status.Conditions)
|
||||||
|
}
|
||||||
// validate slow start
|
// validate slow start
|
||||||
expectedLimit := 0
|
expectedLimit := 0
|
||||||
for pass := uint8(0); expectedLimit <= tc.podLimit; pass++ {
|
for pass := uint8(0); expectedLimit <= tc.podLimit; pass++ {
|
||||||
@ -652,6 +748,7 @@ func TestSyncJobPastDeadline(t *testing.T) {
|
|||||||
activeDeadlineSeconds int64
|
activeDeadlineSeconds int64
|
||||||
startTime int64
|
startTime int64
|
||||||
backoffLimit int32
|
backoffLimit int32
|
||||||
|
suspend bool
|
||||||
|
|
||||||
// pod setup
|
// pod setup
|
||||||
activePods int32
|
activePods int32
|
||||||
@ -664,7 +761,11 @@ func TestSyncJobPastDeadline(t *testing.T) {
|
|||||||
expectedActive int32
|
expectedActive int32
|
||||||
expectedSucceeded int32
|
expectedSucceeded int32
|
||||||
expectedFailed int32
|
expectedFailed int32
|
||||||
|
expectedCondition batch.JobConditionType
|
||||||
expectedConditionReason string
|
expectedConditionReason string
|
||||||
|
|
||||||
|
// features
|
||||||
|
suspendJobEnabled bool
|
||||||
}{
|
}{
|
||||||
"activeDeadlineSeconds less than single pod execution": {
|
"activeDeadlineSeconds less than single pod execution": {
|
||||||
parallelism: 1,
|
parallelism: 1,
|
||||||
@ -676,6 +777,7 @@ func TestSyncJobPastDeadline(t *testing.T) {
|
|||||||
expectedForGetKey: true,
|
expectedForGetKey: true,
|
||||||
expectedDeletions: 1,
|
expectedDeletions: 1,
|
||||||
expectedFailed: 1,
|
expectedFailed: 1,
|
||||||
|
expectedCondition: batch.JobFailed,
|
||||||
expectedConditionReason: "DeadlineExceeded",
|
expectedConditionReason: "DeadlineExceeded",
|
||||||
},
|
},
|
||||||
"activeDeadlineSeconds bigger than single pod execution": {
|
"activeDeadlineSeconds bigger than single pod execution": {
|
||||||
@ -690,6 +792,7 @@ func TestSyncJobPastDeadline(t *testing.T) {
|
|||||||
expectedDeletions: 1,
|
expectedDeletions: 1,
|
||||||
expectedSucceeded: 1,
|
expectedSucceeded: 1,
|
||||||
expectedFailed: 1,
|
expectedFailed: 1,
|
||||||
|
expectedCondition: batch.JobFailed,
|
||||||
expectedConditionReason: "DeadlineExceeded",
|
expectedConditionReason: "DeadlineExceeded",
|
||||||
},
|
},
|
||||||
"activeDeadlineSeconds times-out before any pod starts": {
|
"activeDeadlineSeconds times-out before any pod starts": {
|
||||||
@ -699,6 +802,7 @@ func TestSyncJobPastDeadline(t *testing.T) {
|
|||||||
startTime: 10,
|
startTime: 10,
|
||||||
backoffLimit: 6,
|
backoffLimit: 6,
|
||||||
expectedForGetKey: true,
|
expectedForGetKey: true,
|
||||||
|
expectedCondition: batch.JobFailed,
|
||||||
expectedConditionReason: "DeadlineExceeded",
|
expectedConditionReason: "DeadlineExceeded",
|
||||||
},
|
},
|
||||||
"activeDeadlineSeconds with backofflimit reach": {
|
"activeDeadlineSeconds with backofflimit reach": {
|
||||||
@ -709,12 +813,27 @@ func TestSyncJobPastDeadline(t *testing.T) {
|
|||||||
failedPods: 1,
|
failedPods: 1,
|
||||||
expectedForGetKey: true,
|
expectedForGetKey: true,
|
||||||
expectedFailed: 1,
|
expectedFailed: 1,
|
||||||
|
expectedCondition: batch.JobFailed,
|
||||||
expectedConditionReason: "BackoffLimitExceeded",
|
expectedConditionReason: "BackoffLimitExceeded",
|
||||||
},
|
},
|
||||||
|
"activeDeadlineSeconds is not triggered when Job is suspended": {
|
||||||
|
suspendJobEnabled: true,
|
||||||
|
suspend: true,
|
||||||
|
parallelism: 1,
|
||||||
|
completions: 2,
|
||||||
|
activeDeadlineSeconds: 10,
|
||||||
|
startTime: 15,
|
||||||
|
backoffLimit: 6,
|
||||||
|
expectedForGetKey: true,
|
||||||
|
expectedCondition: batch.JobSuspended,
|
||||||
|
expectedConditionReason: "JobSuspended",
|
||||||
|
},
|
||||||
}
|
}
|
||||||
|
|
||||||
for name, tc := range testCases {
|
for name, tc := range testCases {
|
||||||
t.Run(name, func(t *testing.T) {
|
t.Run(name, func(t *testing.T) {
|
||||||
|
defer featuregatetesting.SetFeatureGateDuringTest(t, feature.DefaultFeatureGate, features.SuspendJob, tc.suspendJobEnabled)()
|
||||||
|
|
||||||
// job manager setup
|
// job manager setup
|
||||||
clientSet := clientset.NewForConfigOrDie(&restclient.Config{Host: "", ContentConfig: restclient.ContentConfig{GroupVersion: &schema.GroupVersion{Group: "", Version: "v1"}}})
|
clientSet := clientset.NewForConfigOrDie(&restclient.Config{Host: "", ContentConfig: restclient.ContentConfig{GroupVersion: &schema.GroupVersion{Group: "", Version: "v1"}}})
|
||||||
manager, sharedInformerFactory := newControllerFromClient(clientSet, controller.NoResyncPeriodFunc)
|
manager, sharedInformerFactory := newControllerFromClient(clientSet, controller.NoResyncPeriodFunc)
|
||||||
@ -731,6 +850,7 @@ func TestSyncJobPastDeadline(t *testing.T) {
|
|||||||
// job & pods setup
|
// job & pods setup
|
||||||
job := newJob(tc.parallelism, tc.completions, tc.backoffLimit, batch.NonIndexedCompletion)
|
job := newJob(tc.parallelism, tc.completions, tc.backoffLimit, batch.NonIndexedCompletion)
|
||||||
job.Spec.ActiveDeadlineSeconds = &tc.activeDeadlineSeconds
|
job.Spec.ActiveDeadlineSeconds = &tc.activeDeadlineSeconds
|
||||||
|
job.Spec.Suspend = pointer.BoolPtr(tc.suspend)
|
||||||
start := metav1.Unix(metav1.Now().Time.Unix()-tc.startTime, 0)
|
start := metav1.Unix(metav1.Now().Time.Unix()-tc.startTime, 0)
|
||||||
job.Status.StartTime = &start
|
job.Status.StartTime = &start
|
||||||
sharedInformerFactory.Batch().V1().Jobs().Informer().GetIndexer().Add(job)
|
sharedInformerFactory.Batch().V1().Jobs().Informer().GetIndexer().Add(job)
|
||||||
@ -766,16 +886,16 @@ func TestSyncJobPastDeadline(t *testing.T) {
|
|||||||
t.Error("Missing .status.startTime")
|
t.Error("Missing .status.startTime")
|
||||||
}
|
}
|
||||||
// validate conditions
|
// validate conditions
|
||||||
if !getCondition(actual, batch.JobFailed, tc.expectedConditionReason) {
|
if !getCondition(actual, tc.expectedCondition, v1.ConditionTrue, tc.expectedConditionReason) {
|
||||||
t.Errorf("Expected fail condition. Got %#v", actual.Status.Conditions)
|
t.Errorf("Expected fail condition. Got %#v", actual.Status.Conditions)
|
||||||
}
|
}
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func getCondition(job *batch.Job, condition batch.JobConditionType, reason string) bool {
|
func getCondition(job *batch.Job, condition batch.JobConditionType, status v1.ConditionStatus, reason string) bool {
|
||||||
for _, v := range job.Status.Conditions {
|
for _, v := range job.Status.Conditions {
|
||||||
if v.Type == condition && v.Status == v1.ConditionTrue && v.Reason == reason {
|
if v.Type == condition && v.Status == status && v.Reason == reason {
|
||||||
return true
|
return true
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -800,7 +920,7 @@ func TestSyncPastDeadlineJobFinished(t *testing.T) {
|
|||||||
job.Spec.ActiveDeadlineSeconds = &activeDeadlineSeconds
|
job.Spec.ActiveDeadlineSeconds = &activeDeadlineSeconds
|
||||||
start := metav1.Unix(metav1.Now().Time.Unix()-15, 0)
|
start := metav1.Unix(metav1.Now().Time.Unix()-15, 0)
|
||||||
job.Status.StartTime = &start
|
job.Status.StartTime = &start
|
||||||
job.Status.Conditions = append(job.Status.Conditions, newCondition(batch.JobFailed, "DeadlineExceeded", "Job was active longer than specified deadline"))
|
job.Status.Conditions = append(job.Status.Conditions, newCondition(batch.JobFailed, v1.ConditionTrue, "DeadlineExceeded", "Job was active longer than specified deadline"))
|
||||||
sharedInformerFactory.Batch().V1().Jobs().Informer().GetIndexer().Add(job)
|
sharedInformerFactory.Batch().V1().Jobs().Informer().GetIndexer().Add(job)
|
||||||
forget, err := manager.syncJob(testutil.GetKey(job, t))
|
forget, err := manager.syncJob(testutil.GetKey(job, t))
|
||||||
if err != nil {
|
if err != nil {
|
||||||
@ -829,7 +949,7 @@ func TestSyncJobComplete(t *testing.T) {
|
|||||||
manager.jobStoreSynced = alwaysReady
|
manager.jobStoreSynced = alwaysReady
|
||||||
|
|
||||||
job := newJob(1, 1, 6, batch.NonIndexedCompletion)
|
job := newJob(1, 1, 6, batch.NonIndexedCompletion)
|
||||||
job.Status.Conditions = append(job.Status.Conditions, newCondition(batch.JobComplete, "", ""))
|
job.Status.Conditions = append(job.Status.Conditions, newCondition(batch.JobComplete, v1.ConditionTrue, "", ""))
|
||||||
sharedInformerFactory.Batch().V1().Jobs().Informer().GetIndexer().Add(job)
|
sharedInformerFactory.Batch().V1().Jobs().Informer().GetIndexer().Add(job)
|
||||||
forget, err := manager.syncJob(testutil.GetKey(job, t))
|
forget, err := manager.syncJob(testutil.GetKey(job, t))
|
||||||
if err != nil {
|
if err != nil {
|
||||||
@ -1572,7 +1692,7 @@ func TestJobBackoffReset(t *testing.T) {
|
|||||||
if retries != 0 {
|
if retries != 0 {
|
||||||
t.Errorf("%s: expected exactly 0 retries, got %d", name, retries)
|
t.Errorf("%s: expected exactly 0 retries, got %d", name, retries)
|
||||||
}
|
}
|
||||||
if getCondition(actual, batch.JobFailed, "BackoffLimitExceeded") {
|
if getCondition(actual, batch.JobFailed, v1.ConditionTrue, "BackoffLimitExceeded") {
|
||||||
t.Errorf("%s: unexpected job failure", name)
|
t.Errorf("%s: unexpected job failure", name)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -1760,7 +1880,7 @@ func TestJobBackoffForOnFailure(t *testing.T) {
|
|||||||
t.Errorf("unexpected number of failed pods. Expected %d, saw %d\n", tc.expectedFailed, actual.Status.Failed)
|
t.Errorf("unexpected number of failed pods. Expected %d, saw %d\n", tc.expectedFailed, actual.Status.Failed)
|
||||||
}
|
}
|
||||||
// validate conditions
|
// validate conditions
|
||||||
if tc.expectedCondition != nil && !getCondition(actual, *tc.expectedCondition, tc.expectedConditionReason) {
|
if tc.expectedCondition != nil && !getCondition(actual, *tc.expectedCondition, v1.ConditionTrue, tc.expectedConditionReason) {
|
||||||
t.Errorf("expected completion condition. Got %#v", actual.Status.Conditions)
|
t.Errorf("expected completion condition. Got %#v", actual.Status.Conditions)
|
||||||
}
|
}
|
||||||
})
|
})
|
||||||
@ -1864,13 +1984,99 @@ func TestJobBackoffOnRestartPolicyNever(t *testing.T) {
|
|||||||
t.Errorf("unexpected number of failed pods. Expected %d, saw %d\n", tc.expectedFailed, actual.Status.Failed)
|
t.Errorf("unexpected number of failed pods. Expected %d, saw %d\n", tc.expectedFailed, actual.Status.Failed)
|
||||||
}
|
}
|
||||||
// validate conditions
|
// validate conditions
|
||||||
if tc.expectedCondition != nil && !getCondition(actual, *tc.expectedCondition, tc.expectedConditionReason) {
|
if tc.expectedCondition != nil && !getCondition(actual, *tc.expectedCondition, v1.ConditionTrue, tc.expectedConditionReason) {
|
||||||
t.Errorf("expected completion condition. Got %#v", actual.Status.Conditions)
|
t.Errorf("expected completion condition. Got %#v", actual.Status.Conditions)
|
||||||
}
|
}
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func TestEnsureJobConditions(t *testing.T) {
|
||||||
|
testCases := []struct {
|
||||||
|
name string
|
||||||
|
haveList []batch.JobCondition
|
||||||
|
wantType batch.JobConditionType
|
||||||
|
wantStatus v1.ConditionStatus
|
||||||
|
wantReason string
|
||||||
|
expectList []batch.JobCondition
|
||||||
|
expectUpdate bool
|
||||||
|
}{
|
||||||
|
{
|
||||||
|
name: "append true condition",
|
||||||
|
haveList: []batch.JobCondition{},
|
||||||
|
wantType: batch.JobSuspended,
|
||||||
|
wantStatus: v1.ConditionTrue,
|
||||||
|
wantReason: "foo",
|
||||||
|
expectList: []batch.JobCondition{newCondition(batch.JobSuspended, v1.ConditionTrue, "foo", "")},
|
||||||
|
expectUpdate: true,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "append false condition",
|
||||||
|
haveList: []batch.JobCondition{},
|
||||||
|
wantType: batch.JobSuspended,
|
||||||
|
wantStatus: v1.ConditionFalse,
|
||||||
|
wantReason: "foo",
|
||||||
|
expectList: []batch.JobCondition{},
|
||||||
|
expectUpdate: false,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "update true condition reason",
|
||||||
|
haveList: []batch.JobCondition{newCondition(batch.JobSuspended, v1.ConditionTrue, "foo", "")},
|
||||||
|
wantType: batch.JobSuspended,
|
||||||
|
wantStatus: v1.ConditionTrue,
|
||||||
|
wantReason: "bar",
|
||||||
|
expectList: []batch.JobCondition{newCondition(batch.JobSuspended, v1.ConditionTrue, "bar", "")},
|
||||||
|
expectUpdate: true,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "update true condition status",
|
||||||
|
haveList: []batch.JobCondition{newCondition(batch.JobSuspended, v1.ConditionTrue, "foo", "")},
|
||||||
|
wantType: batch.JobSuspended,
|
||||||
|
wantStatus: v1.ConditionFalse,
|
||||||
|
wantReason: "foo",
|
||||||
|
expectList: []batch.JobCondition{newCondition(batch.JobSuspended, v1.ConditionFalse, "foo", "")},
|
||||||
|
expectUpdate: true,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "update false condition status",
|
||||||
|
haveList: []batch.JobCondition{newCondition(batch.JobSuspended, v1.ConditionFalse, "foo", "")},
|
||||||
|
wantType: batch.JobSuspended,
|
||||||
|
wantStatus: v1.ConditionTrue,
|
||||||
|
wantReason: "foo",
|
||||||
|
expectList: []batch.JobCondition{newCondition(batch.JobSuspended, v1.ConditionTrue, "foo", "")},
|
||||||
|
expectUpdate: true,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "condition already exists",
|
||||||
|
haveList: []batch.JobCondition{newCondition(batch.JobSuspended, v1.ConditionTrue, "foo", "")},
|
||||||
|
wantType: batch.JobSuspended,
|
||||||
|
wantStatus: v1.ConditionTrue,
|
||||||
|
wantReason: "foo",
|
||||||
|
expectList: []batch.JobCondition{newCondition(batch.JobSuspended, v1.ConditionTrue, "foo", "")},
|
||||||
|
expectUpdate: false,
|
||||||
|
},
|
||||||
|
}
|
||||||
|
for _, tc := range testCases {
|
||||||
|
t.Run(tc.name, func(t *testing.T) {
|
||||||
|
gotList, isUpdated := ensureJobConditionStatus(tc.haveList, tc.wantType, tc.wantStatus, tc.wantReason, "")
|
||||||
|
if isUpdated != tc.expectUpdate {
|
||||||
|
t.Errorf("Got isUpdated=%v, want %v", isUpdated, tc.expectUpdate)
|
||||||
|
}
|
||||||
|
if len(gotList) != len(tc.expectList) {
|
||||||
|
t.Errorf("got a list of length %d, want %d", len(gotList), len(tc.expectList))
|
||||||
|
}
|
||||||
|
for i := range gotList {
|
||||||
|
// Make timestamps the same before comparing the two lists.
|
||||||
|
gotList[i].LastProbeTime = tc.expectList[i].LastProbeTime
|
||||||
|
gotList[i].LastTransitionTime = tc.expectList[i].LastTransitionTime
|
||||||
|
}
|
||||||
|
if diff := cmp.Diff(tc.expectList, gotList); diff != "" {
|
||||||
|
t.Errorf("Unexpected JobCondition list: (-want,+got):\n%s", diff)
|
||||||
|
}
|
||||||
|
})
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
func checkJobCompletionEnvVariable(t *testing.T, spec *v1.PodSpec) {
|
func checkJobCompletionEnvVariable(t *testing.T, spec *v1.PodSpec) {
|
||||||
t.Helper()
|
t.Helper()
|
||||||
want := []v1.EnvVar{
|
want := []v1.EnvVar{
|
||||||
|
@ -716,6 +716,12 @@ const (
|
|||||||
//
|
//
|
||||||
// Enables node-local routing for Service internal traffic
|
// Enables node-local routing for Service internal traffic
|
||||||
ServiceInternalTrafficPolicy featuregate.Feature = "ServiceInternalTrafficPolicy"
|
ServiceInternalTrafficPolicy featuregate.Feature = "ServiceInternalTrafficPolicy"
|
||||||
|
|
||||||
|
// owner: @adtac
|
||||||
|
// alpha: v1.21
|
||||||
|
//
|
||||||
|
// Allows jobs to be created in the suspended state.
|
||||||
|
SuspendJob featuregate.Feature = "SuspendJob"
|
||||||
)
|
)
|
||||||
|
|
||||||
func init() {
|
func init() {
|
||||||
@ -824,6 +830,7 @@ var defaultKubernetesFeatureGates = map[featuregate.Feature]featuregate.FeatureS
|
|||||||
LogarithmicScaleDown: {Default: false, PreRelease: featuregate.Alpha},
|
LogarithmicScaleDown: {Default: false, PreRelease: featuregate.Alpha},
|
||||||
IngressClassNamespacedParams: {Default: false, PreRelease: featuregate.Alpha},
|
IngressClassNamespacedParams: {Default: false, PreRelease: featuregate.Alpha},
|
||||||
ServiceInternalTrafficPolicy: {Default: false, PreRelease: featuregate.Alpha},
|
ServiceInternalTrafficPolicy: {Default: false, PreRelease: featuregate.Alpha},
|
||||||
|
SuspendJob: {Default: false, PreRelease: featuregate.Alpha},
|
||||||
|
|
||||||
// inherited features from generic apiserver, relisted here to get a conflict if it is changed
|
// inherited features from generic apiserver, relisted here to get a conflict if it is changed
|
||||||
// unintentionally on either side:
|
// unintentionally on either side:
|
||||||
|
@ -39,6 +39,7 @@ import (
|
|||||||
"k8s.io/kubernetes/pkg/apis/batch"
|
"k8s.io/kubernetes/pkg/apis/batch"
|
||||||
"k8s.io/kubernetes/pkg/apis/batch/validation"
|
"k8s.io/kubernetes/pkg/apis/batch/validation"
|
||||||
"k8s.io/kubernetes/pkg/features"
|
"k8s.io/kubernetes/pkg/features"
|
||||||
|
"k8s.io/utils/pointer"
|
||||||
)
|
)
|
||||||
|
|
||||||
// jobStrategy implements verification logic for Replication Controllers.
|
// jobStrategy implements verification logic for Replication Controllers.
|
||||||
@ -84,6 +85,10 @@ func (jobStrategy) PrepareForCreate(ctx context.Context, obj runtime.Object) {
|
|||||||
job.Spec.CompletionMode = batch.NonIndexedCompletion
|
job.Spec.CompletionMode = batch.NonIndexedCompletion
|
||||||
}
|
}
|
||||||
|
|
||||||
|
if !utilfeature.DefaultFeatureGate.Enabled(features.SuspendJob) {
|
||||||
|
job.Spec.Suspend = pointer.BoolPtr(false)
|
||||||
|
}
|
||||||
|
|
||||||
pod.DropDisabledTemplateFields(&job.Spec.Template, nil)
|
pod.DropDisabledTemplateFields(&job.Spec.Template, nil)
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -101,6 +106,16 @@ func (jobStrategy) PrepareForUpdate(ctx context.Context, obj, old runtime.Object
|
|||||||
newJob.Spec.CompletionMode = batch.NonIndexedCompletion
|
newJob.Spec.CompletionMode = batch.NonIndexedCompletion
|
||||||
}
|
}
|
||||||
|
|
||||||
|
if !utilfeature.DefaultFeatureGate.Enabled(features.SuspendJob) {
|
||||||
|
// There are 3 possible values (nil, true, false) for each flag, so 9
|
||||||
|
// combinations. We want to disallow everything except true->false and
|
||||||
|
// true->nil when the feature gate is disabled. Or, basically allow this
|
||||||
|
// only when oldJob is true.
|
||||||
|
if oldJob.Spec.Suspend == nil || !*oldJob.Spec.Suspend {
|
||||||
|
newJob.Spec.Suspend = oldJob.Spec.Suspend
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
pod.DropDisabledTemplateFields(&newJob.Spec.Template, &oldJob.Spec.Template)
|
pod.DropDisabledTemplateFields(&newJob.Spec.Template, &oldJob.Spec.Template)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -31,20 +31,14 @@ import (
|
|||||||
_ "k8s.io/kubernetes/pkg/apis/batch/install"
|
_ "k8s.io/kubernetes/pkg/apis/batch/install"
|
||||||
api "k8s.io/kubernetes/pkg/apis/core"
|
api "k8s.io/kubernetes/pkg/apis/core"
|
||||||
"k8s.io/kubernetes/pkg/features"
|
"k8s.io/kubernetes/pkg/features"
|
||||||
|
"k8s.io/utils/pointer"
|
||||||
)
|
)
|
||||||
|
|
||||||
func newBool(a bool) *bool {
|
|
||||||
return &a
|
|
||||||
}
|
|
||||||
|
|
||||||
func newInt32(i int32) *int32 {
|
|
||||||
return &i
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestJobStrategy(t *testing.T) {
|
func TestJobStrategy(t *testing.T) {
|
||||||
cases := map[string]struct {
|
cases := map[string]struct {
|
||||||
ttlEnabled bool
|
ttlEnabled bool
|
||||||
indexedJobEnabled bool
|
indexedJobEnabled bool
|
||||||
|
suspendJobEnabled bool
|
||||||
}{
|
}{
|
||||||
"features disabled": {},
|
"features disabled": {},
|
||||||
"ttl enabled": {
|
"ttl enabled": {
|
||||||
@ -53,11 +47,15 @@ func TestJobStrategy(t *testing.T) {
|
|||||||
"indexed job enabled": {
|
"indexed job enabled": {
|
||||||
indexedJobEnabled: true,
|
indexedJobEnabled: true,
|
||||||
},
|
},
|
||||||
|
"suspend job enabled": {
|
||||||
|
suspendJobEnabled: true,
|
||||||
|
},
|
||||||
}
|
}
|
||||||
for name, tc := range cases {
|
for name, tc := range cases {
|
||||||
t.Run(name, func(t *testing.T) {
|
t.Run(name, func(t *testing.T) {
|
||||||
defer featuregatetesting.SetFeatureGateDuringTest(t, utilfeature.DefaultFeatureGate, features.TTLAfterFinished, tc.ttlEnabled)()
|
defer featuregatetesting.SetFeatureGateDuringTest(t, utilfeature.DefaultFeatureGate, features.TTLAfterFinished, tc.ttlEnabled)()
|
||||||
defer featuregatetesting.SetFeatureGateDuringTest(t, utilfeature.DefaultFeatureGate, features.IndexedJob, tc.indexedJobEnabled)()
|
defer featuregatetesting.SetFeatureGateDuringTest(t, utilfeature.DefaultFeatureGate, features.IndexedJob, tc.indexedJobEnabled)()
|
||||||
|
defer featuregatetesting.SetFeatureGateDuringTest(t, utilfeature.DefaultFeatureGate, features.SuspendJob, tc.suspendJobEnabled)()
|
||||||
testJobStrategy(t)
|
testJobStrategy(t)
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
@ -66,6 +64,7 @@ func TestJobStrategy(t *testing.T) {
|
|||||||
func testJobStrategy(t *testing.T) {
|
func testJobStrategy(t *testing.T) {
|
||||||
ttlEnabled := utilfeature.DefaultFeatureGate.Enabled(features.TTLAfterFinished)
|
ttlEnabled := utilfeature.DefaultFeatureGate.Enabled(features.TTLAfterFinished)
|
||||||
indexedJobEnabled := utilfeature.DefaultFeatureGate.Enabled(features.IndexedJob)
|
indexedJobEnabled := utilfeature.DefaultFeatureGate.Enabled(features.IndexedJob)
|
||||||
|
suspendJobEnabled := utilfeature.DefaultFeatureGate.Enabled(features.SuspendJob)
|
||||||
ctx := genericapirequest.NewDefaultContext()
|
ctx := genericapirequest.NewDefaultContext()
|
||||||
if !Strategy.NamespaceScoped() {
|
if !Strategy.NamespaceScoped() {
|
||||||
t.Errorf("Job must be namespace scoped")
|
t.Errorf("Job must be namespace scoped")
|
||||||
@ -95,10 +94,11 @@ func testJobStrategy(t *testing.T) {
|
|||||||
Spec: batch.JobSpec{
|
Spec: batch.JobSpec{
|
||||||
Selector: validSelector,
|
Selector: validSelector,
|
||||||
Template: validPodTemplateSpec,
|
Template: validPodTemplateSpec,
|
||||||
ManualSelector: newBool(true),
|
ManualSelector: pointer.BoolPtr(true),
|
||||||
Completions: newInt32(2),
|
Completions: pointer.Int32Ptr(2),
|
||||||
// Set gated values.
|
// Set gated values.
|
||||||
TTLSecondsAfterFinished: newInt32(0),
|
Suspend: pointer.BoolPtr(true),
|
||||||
|
TTLSecondsAfterFinished: pointer.Int32Ptr(0),
|
||||||
CompletionMode: batch.IndexedCompletion,
|
CompletionMode: batch.IndexedCompletion,
|
||||||
},
|
},
|
||||||
Status: batch.JobStatus{
|
Status: batch.JobStatus{
|
||||||
@ -120,15 +120,18 @@ func testJobStrategy(t *testing.T) {
|
|||||||
if indexedJobEnabled != (job.Spec.CompletionMode != batch.NonIndexedCompletion) {
|
if indexedJobEnabled != (job.Spec.CompletionMode != batch.NonIndexedCompletion) {
|
||||||
t.Errorf("Job should allow setting .spec.completionMode=Indexed only when %v feature is enabled", features.IndexedJob)
|
t.Errorf("Job should allow setting .spec.completionMode=Indexed only when %v feature is enabled", features.IndexedJob)
|
||||||
}
|
}
|
||||||
|
if !suspendJobEnabled && *job.Spec.Suspend {
|
||||||
|
t.Errorf("[SuspendJob=%v] .spec.suspend should be set to true", suspendJobEnabled)
|
||||||
|
}
|
||||||
|
|
||||||
parallelism := int32(10)
|
parallelism := int32(10)
|
||||||
updatedJob := &batch.Job{
|
updatedJob := &batch.Job{
|
||||||
ObjectMeta: metav1.ObjectMeta{Name: "bar", ResourceVersion: "4"},
|
ObjectMeta: metav1.ObjectMeta{Name: "bar", ResourceVersion: "4"},
|
||||||
Spec: batch.JobSpec{
|
Spec: batch.JobSpec{
|
||||||
Parallelism: ¶llelism,
|
Parallelism: ¶llelism,
|
||||||
Completions: newInt32(2),
|
Completions: pointer.Int32Ptr(2),
|
||||||
// Update gated features.
|
// Update gated features.
|
||||||
TTLSecondsAfterFinished: newInt32(1),
|
TTLSecondsAfterFinished: pointer.Int32Ptr(1),
|
||||||
CompletionMode: batch.IndexedCompletion, // No change because field is immutable.
|
CompletionMode: batch.IndexedCompletion, // No change because field is immutable.
|
||||||
},
|
},
|
||||||
Status: batch.JobStatus{
|
Status: batch.JobStatus{
|
||||||
@ -151,17 +154,28 @@ func testJobStrategy(t *testing.T) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// Existing gated fields should be preserved
|
// Existing gated fields should be preserved
|
||||||
job.Spec.TTLSecondsAfterFinished = newInt32(1)
|
job.Spec.TTLSecondsAfterFinished = pointer.Int32Ptr(1)
|
||||||
job.Spec.CompletionMode = batch.IndexedCompletion
|
job.Spec.CompletionMode = batch.IndexedCompletion
|
||||||
updatedJob.Spec.TTLSecondsAfterFinished = newInt32(2)
|
updatedJob.Spec.TTLSecondsAfterFinished = pointer.Int32Ptr(2)
|
||||||
updatedJob.Spec.CompletionMode = batch.IndexedCompletion
|
updatedJob.Spec.CompletionMode = batch.IndexedCompletion
|
||||||
|
// Test updating suspend false->true and nil-> true when the feature gate is
|
||||||
|
// disabled. We don't care about other combinations.
|
||||||
|
job.Spec.Suspend, updatedJob.Spec.Suspend = pointer.BoolPtr(false), pointer.BoolPtr(true)
|
||||||
Strategy.PrepareForUpdate(ctx, updatedJob, job)
|
Strategy.PrepareForUpdate(ctx, updatedJob, job)
|
||||||
if job.Spec.TTLSecondsAfterFinished == nil || updatedJob.Spec.TTLSecondsAfterFinished == nil {
|
if job.Spec.TTLSecondsAfterFinished == nil || updatedJob.Spec.TTLSecondsAfterFinished == nil {
|
||||||
t.Errorf("existing TTLSecondsAfterFinished should be preserved")
|
t.Errorf("existing .spec.ttlSecondsAfterFinished should be preserved")
|
||||||
}
|
}
|
||||||
if job.Spec.CompletionMode == "" || updatedJob.Spec.CompletionMode == "" {
|
if job.Spec.CompletionMode == "" || updatedJob.Spec.CompletionMode == "" {
|
||||||
t.Errorf("existing completionMode should be preserved")
|
t.Errorf("existing completionMode should be preserved")
|
||||||
}
|
}
|
||||||
|
if !suspendJobEnabled && *updatedJob.Spec.Suspend {
|
||||||
|
t.Errorf("[SuspendJob=%v] .spec.suspend should not be updated from false to true", suspendJobEnabled)
|
||||||
|
}
|
||||||
|
job.Spec.Suspend, updatedJob.Spec.Suspend = nil, pointer.BoolPtr(true)
|
||||||
|
Strategy.PrepareForUpdate(ctx, updatedJob, job)
|
||||||
|
if !suspendJobEnabled && updatedJob.Spec.Suspend != nil {
|
||||||
|
t.Errorf("[SuspendJob=%v] .spec.suspend should not be updated from nil to non-nil", suspendJobEnabled)
|
||||||
|
}
|
||||||
|
|
||||||
// Make sure we correctly implement the interface.
|
// Make sure we correctly implement the interface.
|
||||||
// Otherwise a typo could silently change the default.
|
// Otherwise a typo could silently change the default.
|
||||||
|
193
staging/src/k8s.io/api/batch/v1/generated.pb.go
generated
193
staging/src/k8s.io/api/batch/v1/generated.pb.go
generated
@ -344,89 +344,89 @@ func init() {
|
|||||||
}
|
}
|
||||||
|
|
||||||
var fileDescriptor_3b52da57c93de713 = []byte{
|
var fileDescriptor_3b52da57c93de713 = []byte{
|
||||||
// 1301 bytes of a gzipped FileDescriptorProto
|
// 1307 bytes of a gzipped FileDescriptorProto
|
||||||
0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xcc, 0x56, 0xcf, 0x8f, 0xdb, 0xc4,
|
0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xcc, 0x56, 0xcf, 0x8f, 0xdb, 0xc4,
|
||||||
0x17, 0x5f, 0x6f, 0x36, 0x9b, 0x64, 0xb2, 0xbb, 0x4d, 0xa7, 0xdf, 0xb6, 0xf9, 0x86, 0x2a, 0x5e,
|
0x17, 0x5f, 0x6f, 0x36, 0x9b, 0x64, 0xb2, 0xbb, 0x4d, 0xa7, 0xdf, 0xb6, 0xf9, 0x86, 0x2a, 0x5e,
|
||||||
0xc2, 0x0f, 0x2d, 0x08, 0x1c, 0xb6, 0xac, 0x10, 0x42, 0x80, 0xb4, 0xde, 0xaa, 0xa2, 0x4b, 0x56,
|
0xc2, 0x0f, 0x2d, 0x08, 0x1c, 0xb6, 0xac, 0x10, 0x42, 0x80, 0xb4, 0xde, 0xaa, 0xa2, 0x4b, 0x56,
|
||||||
0x5d, 0x26, 0x5b, 0x21, 0x41, 0x41, 0x4c, 0xec, 0x49, 0xd6, 0x5d, 0xdb, 0x63, 0x79, 0x26, 0x11,
|
0x5d, 0x26, 0x5b, 0x21, 0x41, 0x41, 0x4c, 0xec, 0x49, 0xd6, 0x5d, 0xdb, 0x63, 0x79, 0x26, 0x11,
|
||||||
0xb9, 0xf1, 0x0f, 0x20, 0xf1, 0x57, 0x20, 0x4e, 0x5c, 0xb8, 0x73, 0x44, 0x3d, 0xf6, 0xd8, 0x93,
|
0xb9, 0xf1, 0x27, 0xf0, 0x57, 0x20, 0x4e, 0x5c, 0xe0, 0xcc, 0x11, 0xf5, 0xd8, 0x63, 0x4f, 0x16,
|
||||||
0x45, 0xcd, 0x8d, 0x0b, 0xf7, 0xe5, 0x82, 0x3c, 0x9e, 0xd8, 0x4e, 0x62, 0x2f, 0x6d, 0x0f, 0x15,
|
0x35, 0x37, 0x2e, 0xdc, 0x97, 0x0b, 0xf2, 0x78, 0x62, 0x3b, 0x89, 0xbd, 0xb4, 0x3d, 0x54, 0xdc,
|
||||||
0xb7, 0xf8, 0xcd, 0xe7, 0xf3, 0x99, 0x97, 0xf7, 0xde, 0xbc, 0xf7, 0xc0, 0x87, 0x67, 0xef, 0x33,
|
0xe2, 0x37, 0x9f, 0xcf, 0x67, 0x5e, 0xde, 0x7b, 0xf3, 0xde, 0x03, 0x1f, 0x9e, 0xbd, 0xcf, 0x34,
|
||||||
0xcd, 0xa2, 0xdd, 0xb3, 0xf1, 0x80, 0xf8, 0x2e, 0xe1, 0x84, 0x75, 0x27, 0xc4, 0x35, 0xa9, 0xdf,
|
0x8b, 0x76, 0xcf, 0xc6, 0x03, 0xe2, 0xbb, 0x84, 0x13, 0xd6, 0x9d, 0x10, 0xd7, 0xa4, 0x7e, 0x57,
|
||||||
0x95, 0x07, 0xd8, 0xb3, 0xba, 0x03, 0xcc, 0x8d, 0xd3, 0xee, 0x64, 0xb7, 0x3b, 0x22, 0x2e, 0xf1,
|
0x1e, 0x60, 0xcf, 0xea, 0x0e, 0x30, 0x37, 0x4e, 0xbb, 0x93, 0xdd, 0xee, 0x88, 0xb8, 0xc4, 0xc7,
|
||||||
0x31, 0x27, 0xa6, 0xe6, 0xf9, 0x94, 0x53, 0x78, 0x25, 0x06, 0x69, 0xd8, 0xb3, 0x34, 0x01, 0xd2,
|
0x9c, 0x98, 0x9a, 0xe7, 0x53, 0x4e, 0xe1, 0x95, 0x18, 0xa4, 0x61, 0xcf, 0xd2, 0x04, 0x48, 0x9b,
|
||||||
0x26, 0xbb, 0xad, 0xb7, 0x47, 0x16, 0x3f, 0x1d, 0x0f, 0x34, 0x83, 0x3a, 0xdd, 0x11, 0x1d, 0xd1,
|
0xec, 0xb6, 0xde, 0x1e, 0x59, 0xfc, 0x74, 0x3c, 0xd0, 0x0c, 0xea, 0x74, 0x47, 0x74, 0x44, 0xbb,
|
||||||
0xae, 0xc0, 0x0e, 0xc6, 0x43, 0xf1, 0x25, 0x3e, 0xc4, 0xaf, 0x58, 0xa3, 0xd5, 0xc9, 0x5c, 0x64,
|
0x02, 0x3b, 0x18, 0x0f, 0xc5, 0x97, 0xf8, 0x10, 0xbf, 0x62, 0x8d, 0x56, 0x27, 0x73, 0x91, 0x41,
|
||||||
0x50, 0x9f, 0xe4, 0xdc, 0xd3, 0xda, 0x4b, 0x31, 0x0e, 0x36, 0x4e, 0x2d, 0x97, 0xf8, 0xd3, 0xae,
|
0x7d, 0x92, 0x73, 0x4f, 0x6b, 0x2f, 0xc5, 0x38, 0xd8, 0x38, 0xb5, 0x5c, 0xe2, 0x4f, 0xbb, 0xde,
|
||||||
0x77, 0x36, 0x8a, 0x0c, 0xac, 0xeb, 0x10, 0x8e, 0xf3, 0x58, 0xdd, 0x22, 0x96, 0x3f, 0x76, 0xb9,
|
0xd9, 0x28, 0x32, 0xb0, 0xae, 0x43, 0x38, 0xce, 0x63, 0x75, 0x8b, 0x58, 0xfe, 0xd8, 0xe5, 0x96,
|
||||||
0xe5, 0x90, 0x25, 0xc2, 0x7b, 0xff, 0x46, 0x60, 0xc6, 0x29, 0x71, 0xf0, 0x22, 0xaf, 0xf3, 0xb7,
|
0x43, 0x96, 0x08, 0xef, 0xfd, 0x1b, 0x81, 0x19, 0xa7, 0xc4, 0xc1, 0x8b, 0xbc, 0xce, 0xdf, 0x0a,
|
||||||
0x02, 0x2a, 0x07, 0x3e, 0x75, 0x0f, 0xe9, 0x00, 0x7e, 0x03, 0xaa, 0x91, 0x3f, 0x26, 0xe6, 0xb8,
|
0xa8, 0x1c, 0xf8, 0xd4, 0x3d, 0xa4, 0x03, 0xf8, 0x0d, 0xa8, 0x46, 0xfe, 0x98, 0x98, 0xe3, 0xa6,
|
||||||
0xa9, 0x6c, 0x2b, 0x3b, 0xf5, 0x9b, 0xef, 0x68, 0x69, 0x94, 0x12, 0x59, 0xcd, 0x3b, 0x1b, 0x45,
|
0xb2, 0xad, 0xec, 0xd4, 0x6f, 0xbe, 0xa3, 0xa5, 0x51, 0x4a, 0x64, 0x35, 0xef, 0x6c, 0x14, 0x19,
|
||||||
0x06, 0xa6, 0x45, 0x68, 0x6d, 0xb2, 0xab, 0xdd, 0x1d, 0x3c, 0x20, 0x06, 0x3f, 0x22, 0x1c, 0xeb,
|
0x98, 0x16, 0xa1, 0xb5, 0xc9, 0xae, 0x76, 0x77, 0xf0, 0x80, 0x18, 0xfc, 0x88, 0x70, 0xac, 0xc3,
|
||||||
0xf0, 0x61, 0xa0, 0xae, 0x84, 0x81, 0x0a, 0x52, 0x1b, 0x4a, 0x54, 0xa1, 0x0e, 0xd6, 0x98, 0x47,
|
0x87, 0x81, 0xba, 0x12, 0x06, 0x2a, 0x48, 0x6d, 0x28, 0x51, 0x85, 0x3a, 0x58, 0x63, 0x1e, 0x31,
|
||||||
0x8c, 0xe6, 0xaa, 0x50, 0xdf, 0xd6, 0x72, 0x72, 0xa0, 0x49, 0x6f, 0xfa, 0x1e, 0x31, 0xf4, 0x0d,
|
0x9a, 0xab, 0x42, 0x7d, 0x5b, 0xcb, 0xc9, 0x81, 0x26, 0xbd, 0xe9, 0x7b, 0xc4, 0xd0, 0x37, 0xa4,
|
||||||
0xa9, 0xb6, 0x16, 0x7d, 0x21, 0xc1, 0x85, 0x87, 0x60, 0x9d, 0x71, 0xcc, 0xc7, 0xac, 0x59, 0x12,
|
0xda, 0x5a, 0xf4, 0x85, 0x04, 0x17, 0x1e, 0x82, 0x75, 0xc6, 0x31, 0x1f, 0xb3, 0x66, 0x49, 0xa8,
|
||||||
0x2a, 0x9d, 0x0b, 0x55, 0x04, 0x52, 0xdf, 0x92, 0x3a, 0xeb, 0xf1, 0x37, 0x92, 0x0a, 0x9d, 0x9f,
|
0x74, 0x2e, 0x54, 0x11, 0x48, 0x7d, 0x4b, 0xea, 0xac, 0xc7, 0xdf, 0x48, 0x2a, 0x74, 0x7e, 0x52,
|
||||||
0x15, 0x50, 0x97, 0xc8, 0x9e, 0xc5, 0x38, 0xbc, 0xbf, 0x14, 0x01, 0xed, 0xe9, 0x22, 0x10, 0xb1,
|
0x40, 0x5d, 0x22, 0x7b, 0x16, 0xe3, 0xf0, 0xfe, 0x52, 0x04, 0xb4, 0xa7, 0x8b, 0x40, 0xc4, 0x16,
|
||||||
0xc5, 0xff, 0x6f, 0xc8, 0x9b, 0xaa, 0x33, 0x4b, 0xe6, 0xdf, 0xef, 0x83, 0xb2, 0xc5, 0x89, 0xc3,
|
0xff, 0xbf, 0x21, 0x6f, 0xaa, 0xce, 0x2c, 0x99, 0x7f, 0xbf, 0x0f, 0xca, 0x16, 0x27, 0x0e, 0x6b,
|
||||||
0x9a, 0xab, 0xdb, 0xa5, 0x9d, 0xfa, 0xcd, 0x1b, 0x17, 0x39, 0xae, 0x6f, 0x4a, 0xa1, 0xf2, 0x9d,
|
0xae, 0x6e, 0x97, 0x76, 0xea, 0x37, 0x6f, 0x5c, 0xe4, 0xb8, 0xbe, 0x29, 0x85, 0xca, 0x77, 0x22,
|
||||||
0x88, 0x82, 0x62, 0x66, 0xe7, 0xa7, 0xb5, 0xc4, 0xe1, 0x28, 0x24, 0xf0, 0x2d, 0x50, 0x8d, 0x12,
|
0x0a, 0x8a, 0x99, 0x9d, 0x1f, 0xd7, 0x12, 0x87, 0xa3, 0x90, 0xc0, 0xb7, 0x40, 0x35, 0x4a, 0xac,
|
||||||
0x6b, 0x8e, 0x6d, 0x22, 0x1c, 0xae, 0xa5, 0x0e, 0xf4, 0xa5, 0x1d, 0x25, 0x08, 0x78, 0x0f, 0x5c,
|
0x39, 0xb6, 0x89, 0x70, 0xb8, 0x96, 0x3a, 0xd0, 0x97, 0x76, 0x94, 0x20, 0xe0, 0x3d, 0x70, 0x9d,
|
||||||
0x67, 0x1c, 0xfb, 0xdc, 0x72, 0x47, 0xb7, 0x08, 0x36, 0x6d, 0xcb, 0x25, 0x7d, 0x62, 0x50, 0xd7,
|
0x71, 0xec, 0x73, 0xcb, 0x1d, 0xdd, 0x22, 0xd8, 0xb4, 0x2d, 0x97, 0xf4, 0x89, 0x41, 0x5d, 0x93,
|
||||||
0x64, 0x22, 0x23, 0x25, 0xfd, 0xa5, 0x30, 0x50, 0xaf, 0xf7, 0xf3, 0x21, 0xa8, 0x88, 0x0b, 0xef,
|
0x89, 0x8c, 0x94, 0xf4, 0x97, 0xc2, 0x40, 0xbd, 0xde, 0xcf, 0x87, 0xa0, 0x22, 0x2e, 0xbc, 0x0f,
|
||||||
0x83, 0xcb, 0x06, 0x75, 0x8d, 0xb1, 0xef, 0x13, 0xd7, 0x98, 0x1e, 0x53, 0xdb, 0x32, 0xa6, 0x22,
|
0x2e, 0x1b, 0xd4, 0x35, 0xc6, 0xbe, 0x4f, 0x5c, 0x63, 0x7a, 0x4c, 0x6d, 0xcb, 0x98, 0x8a, 0xe4,
|
||||||
0x39, 0x35, 0x5d, 0x93, 0xde, 0x5c, 0x3e, 0x58, 0x04, 0x9c, 0xe7, 0x19, 0xd1, 0xb2, 0x10, 0x7c,
|
0xd4, 0x74, 0x4d, 0x7a, 0x73, 0xf9, 0x60, 0x11, 0x70, 0x9e, 0x67, 0x44, 0xcb, 0x42, 0xf0, 0x35,
|
||||||
0x0d, 0x54, 0xd8, 0x98, 0x79, 0xc4, 0x35, 0x9b, 0x6b, 0xdb, 0xca, 0x4e, 0x55, 0xaf, 0x87, 0x81,
|
0x50, 0x61, 0x63, 0xe6, 0x11, 0xd7, 0x6c, 0xae, 0x6d, 0x2b, 0x3b, 0x55, 0xbd, 0x1e, 0x06, 0x6a,
|
||||||
0x5a, 0xe9, 0xc7, 0x26, 0x34, 0x3b, 0x83, 0x5f, 0x82, 0xfa, 0x03, 0x3a, 0x38, 0x21, 0x8e, 0x67,
|
0xa5, 0x1f, 0x9b, 0xd0, 0xec, 0x0c, 0x7e, 0x09, 0xea, 0x0f, 0xe8, 0xe0, 0x84, 0x38, 0x9e, 0x8d,
|
||||||
0x63, 0x4e, 0x9a, 0x65, 0x91, 0xbd, 0x57, 0x73, 0x43, 0x7c, 0x98, 0xe2, 0x44, 0x95, 0x5d, 0x91,
|
0x39, 0x69, 0x96, 0x45, 0xf6, 0x5e, 0xcd, 0x0d, 0xf1, 0x61, 0x8a, 0x13, 0x55, 0x76, 0x45, 0x3a,
|
||||||
0x4e, 0xd6, 0x33, 0x07, 0x28, 0xab, 0x06, 0xbf, 0x06, 0x2d, 0x36, 0x36, 0x0c, 0xc2, 0xd8, 0x70,
|
0x59, 0xcf, 0x1c, 0xa0, 0xac, 0x1a, 0xfc, 0x1a, 0xb4, 0xd8, 0xd8, 0x30, 0x08, 0x63, 0xc3, 0xb1,
|
||||||
0x6c, 0x1f, 0xd2, 0x01, 0xfb, 0xc4, 0x62, 0x9c, 0xfa, 0xd3, 0x9e, 0xe5, 0x58, 0xbc, 0xb9, 0xbe,
|
0x7d, 0x48, 0x07, 0xec, 0x13, 0x8b, 0x71, 0xea, 0x4f, 0x7b, 0x96, 0x63, 0xf1, 0xe6, 0xfa, 0xb6,
|
||||||
0xad, 0xec, 0x94, 0xf5, 0x76, 0x18, 0xa8, 0xad, 0x7e, 0x21, 0x0a, 0x5d, 0xa0, 0x00, 0x11, 0xb8,
|
0xb2, 0x53, 0xd6, 0xdb, 0x61, 0xa0, 0xb6, 0xfa, 0x85, 0x28, 0x74, 0x81, 0x02, 0x44, 0xe0, 0xda,
|
||||||
0x36, 0xc4, 0x96, 0x4d, 0xcc, 0x25, 0xed, 0x8a, 0xd0, 0x6e, 0x85, 0x81, 0x7a, 0xed, 0x76, 0x2e,
|
0x10, 0x5b, 0x36, 0x31, 0x97, 0xb4, 0x2b, 0x42, 0xbb, 0x15, 0x06, 0xea, 0xb5, 0xdb, 0xb9, 0x08,
|
||||||
0x02, 0x15, 0x30, 0x3b, 0xbf, 0xae, 0x82, 0xcd, 0xb9, 0x57, 0x00, 0x3f, 0x05, 0xeb, 0xd8, 0xe0,
|
0x54, 0xc0, 0xec, 0xfc, 0xba, 0x0a, 0x36, 0xe7, 0x5e, 0x01, 0xfc, 0x14, 0xac, 0x63, 0x83, 0x5b,
|
||||||
0xd6, 0x24, 0x2a, 0x95, 0xa8, 0x00, 0x5f, 0xc9, 0x46, 0x27, 0xea, 0x5f, 0xe9, 0x5b, 0x46, 0x64,
|
0x93, 0xa8, 0x54, 0xa2, 0x02, 0x7c, 0x25, 0x1b, 0x9d, 0xa8, 0x7f, 0xa5, 0x6f, 0x19, 0x91, 0x21,
|
||||||
0x48, 0xa2, 0x24, 0x90, 0xf4, 0xe9, 0xec, 0x0b, 0x2a, 0x92, 0x12, 0xd0, 0x06, 0x0d, 0x1b, 0x33,
|
0x89, 0x92, 0x40, 0xd2, 0xa7, 0xb3, 0x2f, 0xa8, 0x48, 0x4a, 0x40, 0x1b, 0x34, 0x6c, 0xcc, 0xf8,
|
||||||
0x3e, 0xab, 0xb2, 0x13, 0xcb, 0x21, 0x22, 0x3f, 0xf5, 0x9b, 0x6f, 0x3e, 0xdd, 0x93, 0x89, 0x18,
|
0xac, 0xca, 0x4e, 0x2c, 0x87, 0x88, 0xfc, 0xd4, 0x6f, 0xbe, 0xf9, 0x74, 0x4f, 0x26, 0x62, 0xe8,
|
||||||
0xfa, 0xff, 0xc2, 0x40, 0x6d, 0xf4, 0x16, 0x74, 0xd0, 0x92, 0x32, 0xf4, 0x01, 0x14, 0xb6, 0x24,
|
0xff, 0x0b, 0x03, 0xb5, 0xd1, 0x5b, 0xd0, 0x41, 0x4b, 0xca, 0xd0, 0x07, 0x50, 0xd8, 0x92, 0x10,
|
||||||
0x84, 0xe2, 0xbe, 0xf2, 0x33, 0xdf, 0x77, 0x2d, 0x0c, 0x54, 0xd8, 0x5b, 0x52, 0x42, 0x39, 0xea,
|
0x8a, 0xfb, 0xca, 0xcf, 0x7c, 0xdf, 0xb5, 0x30, 0x50, 0x61, 0x6f, 0x49, 0x09, 0xe5, 0xa8, 0x77,
|
||||||
0x9d, 0xbf, 0x14, 0x50, 0x7a, 0x31, 0x6d, 0xf1, 0xe3, 0xb9, 0xb6, 0x78, 0xa3, 0xa8, 0x68, 0x0b,
|
0xfe, 0x52, 0x40, 0xe9, 0xc5, 0xb4, 0xc5, 0x8f, 0xe7, 0xda, 0xe2, 0x8d, 0xa2, 0xa2, 0x2d, 0x6c,
|
||||||
0x5b, 0xe2, 0xed, 0x85, 0x96, 0xd8, 0x2e, 0x54, 0xb8, 0xb8, 0x1d, 0xfe, 0x56, 0x02, 0x1b, 0x87,
|
0x89, 0xb7, 0x17, 0x5a, 0x62, 0xbb, 0x50, 0xe1, 0xe2, 0x76, 0xf8, 0x5b, 0x09, 0x6c, 0x1c, 0xd2,
|
||||||
0x74, 0x70, 0x40, 0x5d, 0xd3, 0xe2, 0x16, 0x75, 0xe1, 0x1e, 0x58, 0xe3, 0x53, 0x6f, 0xd6, 0x5a,
|
0xc1, 0x01, 0x75, 0x4d, 0x8b, 0x5b, 0xd4, 0x85, 0x7b, 0x60, 0x8d, 0x4f, 0xbd, 0x59, 0x6b, 0xd9,
|
||||||
0xb6, 0x67, 0x57, 0x9f, 0x4c, 0x3d, 0x72, 0x1e, 0xa8, 0x8d, 0x2c, 0x36, 0xb2, 0x21, 0x81, 0x86,
|
0x9e, 0x5d, 0x7d, 0x32, 0xf5, 0xc8, 0x79, 0xa0, 0x36, 0xb2, 0xd8, 0xc8, 0x86, 0x04, 0x1a, 0xf6,
|
||||||
0xbd, 0xc4, 0x9d, 0x55, 0xc1, 0xdb, 0x9b, 0xbf, 0xee, 0x3c, 0x50, 0x73, 0x06, 0xa7, 0x96, 0x28,
|
0x12, 0x77, 0x56, 0x05, 0x6f, 0x6f, 0xfe, 0xba, 0xf3, 0x40, 0xcd, 0x19, 0x9c, 0x5a, 0xa2, 0x34,
|
||||||
0xcd, 0x3b, 0x05, 0x47, 0x60, 0x33, 0x4a, 0xce, 0xb1, 0x4f, 0x07, 0x71, 0x95, 0x95, 0x9e, 0x39,
|
0xef, 0x14, 0x1c, 0x81, 0xcd, 0x28, 0x39, 0xc7, 0x3e, 0x1d, 0xc4, 0x55, 0x56, 0x7a, 0xe6, 0xac,
|
||||||
0xeb, 0x57, 0xa5, 0x03, 0x9b, 0xbd, 0xac, 0x10, 0x9a, 0xd7, 0x85, 0x93, 0xb8, 0xc6, 0x4e, 0x7c,
|
0x5f, 0x95, 0x0e, 0x6c, 0xf6, 0xb2, 0x42, 0x68, 0x5e, 0x17, 0x4e, 0xe2, 0x1a, 0x3b, 0xf1, 0xb1,
|
||||||
0xec, 0xb2, 0xf8, 0x2f, 0x3d, 0x5f, 0x4d, 0xb7, 0xe4, 0x6d, 0xa2, 0xce, 0xe6, 0xd5, 0x50, 0xce,
|
0xcb, 0xe2, 0xbf, 0xf4, 0x7c, 0x35, 0xdd, 0x92, 0xb7, 0x89, 0x3a, 0x9b, 0x57, 0x43, 0x39, 0x37,
|
||||||
0x0d, 0xf0, 0x75, 0xb0, 0xee, 0x13, 0xcc, 0xa8, 0x2b, 0xea, 0xb9, 0x96, 0x66, 0x07, 0x09, 0x2b,
|
0xc0, 0xd7, 0xc1, 0xba, 0x4f, 0x30, 0xa3, 0xae, 0xa8, 0xe7, 0x5a, 0x9a, 0x1d, 0x24, 0xac, 0x48,
|
||||||
0x92, 0xa7, 0xf0, 0x0d, 0x50, 0x71, 0x08, 0x63, 0x78, 0x44, 0x44, 0xc7, 0xa9, 0xe9, 0x97, 0x24,
|
0x9e, 0xc2, 0x37, 0x40, 0xc5, 0x21, 0x8c, 0xe1, 0x11, 0x11, 0x1d, 0xa7, 0xa6, 0x5f, 0x92, 0xc0,
|
||||||
0xb0, 0x72, 0x14, 0x9b, 0xd1, 0xec, 0xbc, 0xf3, 0xa3, 0x02, 0x2a, 0x2f, 0x66, 0xa6, 0x7d, 0x34,
|
0xca, 0x51, 0x6c, 0x46, 0xb3, 0xf3, 0xce, 0x0f, 0x0a, 0xa8, 0xbc, 0x98, 0x99, 0xf6, 0xd1, 0xfc,
|
||||||
0x3f, 0xd3, 0x9a, 0x45, 0x95, 0x57, 0x30, 0xcf, 0xbe, 0x2f, 0x0b, 0x47, 0xc5, 0x2c, 0xdb, 0x05,
|
0x4c, 0x6b, 0x16, 0x55, 0x5e, 0xc1, 0x3c, 0xfb, 0xa5, 0x2c, 0x1c, 0x15, 0xb3, 0x6c, 0x17, 0xd4,
|
||||||
0x75, 0x0f, 0xfb, 0xd8, 0xb6, 0x89, 0x6d, 0x31, 0x47, 0xf8, 0x5a, 0xd6, 0x2f, 0x45, 0x7d, 0xf9,
|
0x3d, 0xec, 0x63, 0xdb, 0x26, 0xb6, 0xc5, 0x1c, 0xe1, 0x6b, 0x59, 0xbf, 0x14, 0xf5, 0xe5, 0xe3,
|
||||||
0x38, 0x35, 0xa3, 0x2c, 0x26, 0xa2, 0x18, 0xd4, 0xf1, 0x6c, 0x12, 0x05, 0x33, 0x2e, 0x37, 0x49,
|
0xd4, 0x8c, 0xb2, 0x98, 0x88, 0x62, 0x50, 0xc7, 0xb3, 0x49, 0x14, 0xcc, 0xb8, 0xdc, 0x24, 0xe5,
|
||||||
0x39, 0x48, 0xcd, 0x28, 0x8b, 0x81, 0x77, 0xc1, 0xd5, 0xb8, 0x83, 0x2d, 0x4e, 0xc0, 0x92, 0x98,
|
0x20, 0x35, 0xa3, 0x2c, 0x06, 0xde, 0x05, 0x57, 0xe3, 0x0e, 0xb6, 0x38, 0x01, 0x4b, 0x62, 0x02,
|
||||||
0x80, 0xff, 0x0f, 0x03, 0xf5, 0xea, 0x7e, 0x1e, 0x00, 0xe5, 0xf3, 0xe0, 0x1e, 0xd8, 0x18, 0x60,
|
0xfe, 0x3f, 0x0c, 0xd4, 0xab, 0xfb, 0x79, 0x00, 0x94, 0xcf, 0x83, 0x7b, 0x60, 0x63, 0x80, 0x8d,
|
||||||
0xe3, 0x8c, 0x0e, 0x87, 0xd9, 0x8e, 0xdd, 0x08, 0x03, 0x75, 0x43, 0xcf, 0xd8, 0xd1, 0x1c, 0x0a,
|
0x33, 0x3a, 0x1c, 0x66, 0x3b, 0x76, 0x23, 0x0c, 0xd4, 0x0d, 0x3d, 0x63, 0x47, 0x73, 0x28, 0xf8,
|
||||||
0x7e, 0x05, 0xaa, 0x8c, 0xd8, 0xc4, 0xe0, 0xd4, 0x97, 0x25, 0xf6, 0xee, 0x53, 0x66, 0x05, 0x0f,
|
0x15, 0xa8, 0x32, 0x62, 0x13, 0x83, 0x53, 0x5f, 0x96, 0xd8, 0xbb, 0x4f, 0x99, 0x15, 0x3c, 0x20,
|
||||||
0x88, 0xdd, 0x97, 0x54, 0x7d, 0x43, 0x4c, 0x7a, 0xf9, 0x85, 0x12, 0x49, 0xf8, 0x01, 0xd8, 0x72,
|
0x76, 0x5f, 0x52, 0xf5, 0x0d, 0x31, 0xe9, 0xe5, 0x17, 0x4a, 0x24, 0xe1, 0x07, 0x60, 0xcb, 0xc1,
|
||||||
0xb0, 0x3b, 0xc6, 0x09, 0x52, 0xd4, 0x56, 0x55, 0x87, 0x61, 0xa0, 0x6e, 0x1d, 0xcd, 0x9d, 0xa0,
|
0xee, 0x18, 0x27, 0x48, 0x51, 0x5b, 0x55, 0x1d, 0x86, 0x81, 0xba, 0x75, 0x34, 0x77, 0x82, 0x16,
|
||||||
0x05, 0x24, 0xfc, 0x0c, 0x54, 0xf9, 0x6c, 0x8c, 0xae, 0x0b, 0xd7, 0x72, 0x07, 0xc5, 0x31, 0x35,
|
0x90, 0xf0, 0x33, 0x50, 0xe5, 0xb3, 0x31, 0xba, 0x2e, 0x5c, 0xcb, 0x1d, 0x14, 0xc7, 0xd4, 0x9c,
|
||||||
0xe7, 0xa6, 0x68, 0x52, 0x25, 0xc9, 0x08, 0x4d, 0x64, 0xa2, 0xc5, 0x83, 0x73, 0x5b, 0x46, 0x6c,
|
0x9b, 0xa2, 0x49, 0x95, 0x24, 0x23, 0x34, 0x91, 0x89, 0x16, 0x0f, 0xce, 0x6d, 0x19, 0xb1, 0xfd,
|
||||||
0x7f, 0xc8, 0x89, 0x7f, 0xdb, 0x72, 0x2d, 0x76, 0x4a, 0xcc, 0x66, 0x55, 0x84, 0x4b, 0x2c, 0x1e,
|
0x21, 0x27, 0xfe, 0x6d, 0xcb, 0xb5, 0xd8, 0x29, 0x31, 0x9b, 0x55, 0x11, 0x2e, 0xb1, 0x78, 0x9c,
|
||||||
0x27, 0x27, 0xbd, 0x3c, 0x08, 0x2a, 0xe2, 0xc2, 0x63, 0xb0, 0x95, 0xa6, 0xf6, 0x88, 0x9a, 0xa4,
|
0x9c, 0xf4, 0xf2, 0x20, 0xa8, 0x88, 0x0b, 0x8f, 0xc1, 0x56, 0x9a, 0xda, 0x23, 0x6a, 0x92, 0x66,
|
||||||
0x59, 0x13, 0x0f, 0x63, 0x47, 0xba, 0xb2, 0x75, 0x30, 0x77, 0x7a, 0xbe, 0x64, 0x41, 0x0b, 0xfc,
|
0x4d, 0x3c, 0x8c, 0x1d, 0xe9, 0xca, 0xd6, 0xc1, 0xdc, 0xe9, 0xf9, 0x92, 0x05, 0x2d, 0xf0, 0xb3,
|
||||||
0xce, 0x9f, 0x25, 0x50, 0x4b, 0x07, 0xe6, 0x3d, 0x00, 0x8c, 0x59, 0x57, 0x62, 0x72, 0x68, 0xbe,
|
0xcb, 0x06, 0x28, 0x5e, 0x36, 0x3a, 0x7f, 0x96, 0x40, 0x2d, 0x9d, 0xab, 0xf7, 0x00, 0x30, 0x66,
|
||||||
0x5c, 0x54, 0xe1, 0x49, 0xff, 0x4a, 0x9b, 0x7d, 0x62, 0x62, 0x28, 0x23, 0x04, 0x3f, 0x07, 0x35,
|
0xcd, 0x8b, 0xc9, 0xd9, 0xfa, 0x72, 0xd1, 0x43, 0x48, 0xda, 0x5c, 0x3a, 0x13, 0x12, 0x13, 0x43,
|
||||||
0xb1, 0x4a, 0x89, 0xfe, 0xb2, 0xfa, 0xcc, 0xfd, 0x65, 0x33, 0x0c, 0xd4, 0x5a, 0x7f, 0x26, 0x80,
|
0x19, 0x21, 0xf8, 0x39, 0xa8, 0x89, 0x8d, 0x4b, 0xb4, 0xa1, 0xd5, 0x67, 0x6e, 0x43, 0x9b, 0x61,
|
||||||
0x52, 0x2d, 0x38, 0xcc, 0xc6, 0xe3, 0x39, 0x7b, 0x25, 0x9c, 0x8f, 0x9b, 0xb8, 0x62, 0x41, 0x35,
|
0xa0, 0xd6, 0xfa, 0x33, 0x01, 0x94, 0x6a, 0xc1, 0x61, 0x36, 0x6c, 0xcf, 0xd9, 0x52, 0xe1, 0x7c,
|
||||||
0xea, 0x58, 0x72, 0x91, 0x58, 0x13, 0xd9, 0x2b, 0xda, 0x11, 0xba, 0xa0, 0x26, 0x96, 0x1e, 0x62,
|
0x78, 0xc5, 0x15, 0x0b, 0xaa, 0x51, 0x63, 0x93, 0xfb, 0xc6, 0x9a, 0x48, 0x72, 0xd1, 0x2a, 0xd1,
|
||||||
0x12, 0x53, 0x14, 0x60, 0x59, 0xbf, 0x2c, 0xa1, 0xb5, 0xfe, 0xec, 0x00, 0xa5, 0x98, 0x48, 0x38,
|
0x05, 0x35, 0xb1, 0x1b, 0x11, 0x93, 0x98, 0xa2, 0x4e, 0xcb, 0xfa, 0x65, 0x09, 0xad, 0xf5, 0x67,
|
||||||
0xde, 0x66, 0xe4, 0x4e, 0x95, 0x08, 0xc7, 0xbb, 0x0f, 0x92, 0xa7, 0xf0, 0x16, 0x68, 0x48, 0x97,
|
0x07, 0x28, 0xc5, 0x44, 0xc2, 0xf1, 0xd2, 0x23, 0x57, 0xaf, 0x44, 0x38, 0x5e, 0x91, 0x90, 0x3c,
|
||||||
0x88, 0x79, 0xc7, 0x35, 0xc9, 0xb7, 0x84, 0x89, 0x77, 0x57, 0xd3, 0x9b, 0x92, 0xd1, 0x38, 0x58,
|
0x85, 0xb7, 0x40, 0x43, 0xba, 0x44, 0xcc, 0x3b, 0xae, 0x49, 0xbe, 0x25, 0x4c, 0x3c, 0xcf, 0x9a,
|
||||||
0x38, 0x47, 0x4b, 0x8c, 0xce, 0x2f, 0x0a, 0xb8, 0xb4, 0xb0, 0x0b, 0xfe, 0xf7, 0x87, 0xbd, 0xbe,
|
0xde, 0x94, 0x8c, 0xc6, 0xc1, 0xc2, 0x39, 0x5a, 0x62, 0x74, 0x7e, 0x56, 0xc0, 0xa5, 0x85, 0x95,
|
||||||
0xf3, 0xf0, 0x49, 0x7b, 0xe5, 0xd1, 0x93, 0xf6, 0xca, 0xe3, 0x27, 0xed, 0x95, 0xef, 0xc2, 0xb6,
|
0xf1, 0xbf, 0xbf, 0x13, 0xe8, 0x3b, 0x0f, 0x9f, 0xb4, 0x57, 0x1e, 0x3d, 0x69, 0xaf, 0x3c, 0x7e,
|
||||||
0xf2, 0x30, 0x6c, 0x2b, 0x8f, 0xc2, 0xb6, 0xf2, 0x38, 0x6c, 0x2b, 0xbf, 0x87, 0x6d, 0xe5, 0x87,
|
0xd2, 0x5e, 0xf9, 0x2e, 0x6c, 0x2b, 0x0f, 0xc3, 0xb6, 0xf2, 0x28, 0x6c, 0x2b, 0x8f, 0xc3, 0xb6,
|
||||||
0x3f, 0xda, 0x2b, 0x5f, 0xac, 0x4e, 0x76, 0xff, 0x09, 0x00, 0x00, 0xff, 0xff, 0x27, 0x57, 0xd4,
|
0xf2, 0x7b, 0xd8, 0x56, 0xbe, 0xff, 0xa3, 0xbd, 0xf2, 0xc5, 0xea, 0x64, 0xf7, 0x9f, 0x00, 0x00,
|
||||||
0x63, 0x21, 0x0f, 0x00, 0x00,
|
0x00, 0xff, 0xff, 0x3d, 0x6d, 0x62, 0x04, 0x48, 0x0f, 0x00, 0x00,
|
||||||
}
|
}
|
||||||
|
|
||||||
func (m *CronJob) Marshal() (dAtA []byte, err error) {
|
func (m *CronJob) Marshal() (dAtA []byte, err error) {
|
||||||
@ -841,6 +841,16 @@ func (m *JobSpec) MarshalToSizedBuffer(dAtA []byte) (int, error) {
|
|||||||
_ = i
|
_ = i
|
||||||
var l int
|
var l int
|
||||||
_ = l
|
_ = l
|
||||||
|
if m.Suspend != nil {
|
||||||
|
i--
|
||||||
|
if *m.Suspend {
|
||||||
|
dAtA[i] = 1
|
||||||
|
} else {
|
||||||
|
dAtA[i] = 0
|
||||||
|
}
|
||||||
|
i--
|
||||||
|
dAtA[i] = 0x50
|
||||||
|
}
|
||||||
i -= len(m.CompletionMode)
|
i -= len(m.CompletionMode)
|
||||||
copy(dAtA[i:], m.CompletionMode)
|
copy(dAtA[i:], m.CompletionMode)
|
||||||
i = encodeVarintGenerated(dAtA, i, uint64(len(m.CompletionMode)))
|
i = encodeVarintGenerated(dAtA, i, uint64(len(m.CompletionMode)))
|
||||||
@ -1202,6 +1212,9 @@ func (m *JobSpec) Size() (n int) {
|
|||||||
}
|
}
|
||||||
l = len(m.CompletionMode)
|
l = len(m.CompletionMode)
|
||||||
n += 1 + l + sovGenerated(uint64(l))
|
n += 1 + l + sovGenerated(uint64(l))
|
||||||
|
if m.Suspend != nil {
|
||||||
|
n += 2
|
||||||
|
}
|
||||||
return n
|
return n
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -1370,6 +1383,7 @@ func (this *JobSpec) String() string {
|
|||||||
`BackoffLimit:` + valueToStringGenerated(this.BackoffLimit) + `,`,
|
`BackoffLimit:` + valueToStringGenerated(this.BackoffLimit) + `,`,
|
||||||
`TTLSecondsAfterFinished:` + valueToStringGenerated(this.TTLSecondsAfterFinished) + `,`,
|
`TTLSecondsAfterFinished:` + valueToStringGenerated(this.TTLSecondsAfterFinished) + `,`,
|
||||||
`CompletionMode:` + fmt.Sprintf("%v", this.CompletionMode) + `,`,
|
`CompletionMode:` + fmt.Sprintf("%v", this.CompletionMode) + `,`,
|
||||||
|
`Suspend:` + valueToStringGenerated(this.Suspend) + `,`,
|
||||||
`}`,
|
`}`,
|
||||||
}, "")
|
}, "")
|
||||||
return s
|
return s
|
||||||
@ -2825,6 +2839,27 @@ func (m *JobSpec) Unmarshal(dAtA []byte) error {
|
|||||||
}
|
}
|
||||||
m.CompletionMode = CompletionMode(dAtA[iNdEx:postIndex])
|
m.CompletionMode = CompletionMode(dAtA[iNdEx:postIndex])
|
||||||
iNdEx = postIndex
|
iNdEx = postIndex
|
||||||
|
case 10:
|
||||||
|
if wireType != 0 {
|
||||||
|
return fmt.Errorf("proto: wrong wireType = %d for field Suspend", wireType)
|
||||||
|
}
|
||||||
|
var v int
|
||||||
|
for shift := uint(0); ; shift += 7 {
|
||||||
|
if shift >= 64 {
|
||||||
|
return ErrIntOverflowGenerated
|
||||||
|
}
|
||||||
|
if iNdEx >= l {
|
||||||
|
return io.ErrUnexpectedEOF
|
||||||
|
}
|
||||||
|
b := dAtA[iNdEx]
|
||||||
|
iNdEx++
|
||||||
|
v |= int(b&0x7F) << shift
|
||||||
|
if b < 0x80 {
|
||||||
|
break
|
||||||
|
}
|
||||||
|
}
|
||||||
|
b := bool(v != 0)
|
||||||
|
m.Suspend = &b
|
||||||
default:
|
default:
|
||||||
iNdEx = preIndex
|
iNdEx = preIndex
|
||||||
skippy, err := skipGenerated(dAtA[iNdEx:])
|
skippy, err := skipGenerated(dAtA[iNdEx:])
|
||||||
|
@ -184,8 +184,11 @@ message JobSpec {
|
|||||||
// +optional
|
// +optional
|
||||||
optional int32 completions = 2;
|
optional int32 completions = 2;
|
||||||
|
|
||||||
// Specifies the duration in seconds relative to the startTime that the job may be active
|
// Specifies the duration in seconds relative to the startTime that the job
|
||||||
// before the system tries to terminate it; value must be positive integer
|
// may be continuously active before the system tries to terminate it; value
|
||||||
|
// must be positive integer. If a Job is suspended (at creation or through an
|
||||||
|
// update), this timer will effectively be stopped and reset when the Job is
|
||||||
|
// resumed again.
|
||||||
// +optional
|
// +optional
|
||||||
optional int64 activeDeadlineSeconds = 3;
|
optional int64 activeDeadlineSeconds = 3;
|
||||||
|
|
||||||
@ -250,12 +253,28 @@ message JobSpec {
|
|||||||
// controller skips updates for the Job.
|
// controller skips updates for the Job.
|
||||||
// +optional
|
// +optional
|
||||||
optional string completionMode = 9;
|
optional string completionMode = 9;
|
||||||
|
|
||||||
|
// Suspend specifies whether the Job controller should create Pods or not. If
|
||||||
|
// a Job is created with suspend set to true, no Pods are created by the Job
|
||||||
|
// controller. If a Job is suspended after creation (i.e. the flag goes from
|
||||||
|
// false to true), the Job controller will delete all active Pods associated
|
||||||
|
// with this Job. Users must design their workload to gracefully handle this.
|
||||||
|
// Suspending a Job will reset the StartTime field of the Job, effectively
|
||||||
|
// resetting the ActiveDeadlineSeconds timer too. This is an alpha field and
|
||||||
|
// requires the SuspendJob feature gate to be enabled; otherwise this field
|
||||||
|
// may not be set to true. Defaults to false.
|
||||||
|
// +optional
|
||||||
|
optional bool suspend = 10;
|
||||||
}
|
}
|
||||||
|
|
||||||
// JobStatus represents the current state of a Job.
|
// JobStatus represents the current state of a Job.
|
||||||
message JobStatus {
|
message JobStatus {
|
||||||
// The latest available observations of an object's current state.
|
// The latest available observations of an object's current state. When a Job
|
||||||
// When a job fails, one of the conditions will have type == "Failed".
|
// fails, one of the conditions will have type "Failed" and status true. When
|
||||||
|
// a Job is suspended, one of the conditions will have type "Suspended" and
|
||||||
|
// status true; when the Job is resumed, the status of this condition will
|
||||||
|
// become false. When a Job is completed, one of the conditions will have
|
||||||
|
// type "Complete" and status true.
|
||||||
// More info: https://kubernetes.io/docs/concepts/workloads/controllers/jobs-run-to-completion/
|
// More info: https://kubernetes.io/docs/concepts/workloads/controllers/jobs-run-to-completion/
|
||||||
// +optional
|
// +optional
|
||||||
// +patchMergeKey=type
|
// +patchMergeKey=type
|
||||||
@ -263,9 +282,10 @@ message JobStatus {
|
|||||||
// +listType=atomic
|
// +listType=atomic
|
||||||
repeated JobCondition conditions = 1;
|
repeated JobCondition conditions = 1;
|
||||||
|
|
||||||
// Represents time when the job was acknowledged by the job controller.
|
// Represents time when the job controller started processing a job. When a
|
||||||
// It is not guaranteed to be set in happens-before order across separate operations.
|
// Job is created in the suspended state, this field is not set until the
|
||||||
// It is represented in RFC3339 form and is in UTC.
|
// first time it is resumed. This field is reset every time a Job is resumed
|
||||||
|
// from suspension. It is represented in RFC3339 form and is in UTC.
|
||||||
// +optional
|
// +optional
|
||||||
optional k8s.io.apimachinery.pkg.apis.meta.v1.Time startTime = 2;
|
optional k8s.io.apimachinery.pkg.apis.meta.v1.Time startTime = 2;
|
||||||
|
|
||||||
|
@ -95,8 +95,11 @@ type JobSpec struct {
|
|||||||
// +optional
|
// +optional
|
||||||
Completions *int32 `json:"completions,omitempty" protobuf:"varint,2,opt,name=completions"`
|
Completions *int32 `json:"completions,omitempty" protobuf:"varint,2,opt,name=completions"`
|
||||||
|
|
||||||
// Specifies the duration in seconds relative to the startTime that the job may be active
|
// Specifies the duration in seconds relative to the startTime that the job
|
||||||
// before the system tries to terminate it; value must be positive integer
|
// may be continuously active before the system tries to terminate it; value
|
||||||
|
// must be positive integer. If a Job is suspended (at creation or through an
|
||||||
|
// update), this timer will effectively be stopped and reset when the Job is
|
||||||
|
// resumed again.
|
||||||
// +optional
|
// +optional
|
||||||
ActiveDeadlineSeconds *int64 `json:"activeDeadlineSeconds,omitempty" protobuf:"varint,3,opt,name=activeDeadlineSeconds"`
|
ActiveDeadlineSeconds *int64 `json:"activeDeadlineSeconds,omitempty" protobuf:"varint,3,opt,name=activeDeadlineSeconds"`
|
||||||
|
|
||||||
@ -166,12 +169,28 @@ type JobSpec struct {
|
|||||||
// controller skips updates for the Job.
|
// controller skips updates for the Job.
|
||||||
// +optional
|
// +optional
|
||||||
CompletionMode CompletionMode `json:"completionMode,omitempty" protobuf:"bytes,9,opt,name=completionMode,casttype=CompletionMode"`
|
CompletionMode CompletionMode `json:"completionMode,omitempty" protobuf:"bytes,9,opt,name=completionMode,casttype=CompletionMode"`
|
||||||
|
|
||||||
|
// Suspend specifies whether the Job controller should create Pods or not. If
|
||||||
|
// a Job is created with suspend set to true, no Pods are created by the Job
|
||||||
|
// controller. If a Job is suspended after creation (i.e. the flag goes from
|
||||||
|
// false to true), the Job controller will delete all active Pods associated
|
||||||
|
// with this Job. Users must design their workload to gracefully handle this.
|
||||||
|
// Suspending a Job will reset the StartTime field of the Job, effectively
|
||||||
|
// resetting the ActiveDeadlineSeconds timer too. This is an alpha field and
|
||||||
|
// requires the SuspendJob feature gate to be enabled; otherwise this field
|
||||||
|
// may not be set to true. Defaults to false.
|
||||||
|
// +optional
|
||||||
|
Suspend *bool `json:"suspend,omitempty" protobuf:"varint,10,opt,name=suspend"`
|
||||||
}
|
}
|
||||||
|
|
||||||
// JobStatus represents the current state of a Job.
|
// JobStatus represents the current state of a Job.
|
||||||
type JobStatus struct {
|
type JobStatus struct {
|
||||||
// The latest available observations of an object's current state.
|
// The latest available observations of an object's current state. When a Job
|
||||||
// When a job fails, one of the conditions will have type == "Failed".
|
// fails, one of the conditions will have type "Failed" and status true. When
|
||||||
|
// a Job is suspended, one of the conditions will have type "Suspended" and
|
||||||
|
// status true; when the Job is resumed, the status of this condition will
|
||||||
|
// become false. When a Job is completed, one of the conditions will have
|
||||||
|
// type "Complete" and status true.
|
||||||
// More info: https://kubernetes.io/docs/concepts/workloads/controllers/jobs-run-to-completion/
|
// More info: https://kubernetes.io/docs/concepts/workloads/controllers/jobs-run-to-completion/
|
||||||
// +optional
|
// +optional
|
||||||
// +patchMergeKey=type
|
// +patchMergeKey=type
|
||||||
@ -179,9 +198,10 @@ type JobStatus struct {
|
|||||||
// +listType=atomic
|
// +listType=atomic
|
||||||
Conditions []JobCondition `json:"conditions,omitempty" patchStrategy:"merge" patchMergeKey:"type" protobuf:"bytes,1,rep,name=conditions"`
|
Conditions []JobCondition `json:"conditions,omitempty" patchStrategy:"merge" patchMergeKey:"type" protobuf:"bytes,1,rep,name=conditions"`
|
||||||
|
|
||||||
// Represents time when the job was acknowledged by the job controller.
|
// Represents time when the job controller started processing a job. When a
|
||||||
// It is not guaranteed to be set in happens-before order across separate operations.
|
// Job is created in the suspended state, this field is not set until the
|
||||||
// It is represented in RFC3339 form and is in UTC.
|
// first time it is resumed. This field is reset every time a Job is resumed
|
||||||
|
// from suspension. It is represented in RFC3339 form and is in UTC.
|
||||||
// +optional
|
// +optional
|
||||||
StartTime *metav1.Time `json:"startTime,omitempty" protobuf:"bytes,2,opt,name=startTime"`
|
StartTime *metav1.Time `json:"startTime,omitempty" protobuf:"bytes,2,opt,name=startTime"`
|
||||||
|
|
||||||
@ -219,6 +239,8 @@ type JobConditionType string
|
|||||||
|
|
||||||
// These are valid conditions of a job.
|
// These are valid conditions of a job.
|
||||||
const (
|
const (
|
||||||
|
// JobSuspended means the job has been suspended.
|
||||||
|
JobSuspended JobConditionType = "Suspended"
|
||||||
// JobComplete means the job has completed its execution.
|
// JobComplete means the job has completed its execution.
|
||||||
JobComplete JobConditionType = "Complete"
|
JobComplete JobConditionType = "Complete"
|
||||||
// JobFailed means the job has failed its execution.
|
// JobFailed means the job has failed its execution.
|
||||||
|
@ -113,13 +113,14 @@ var map_JobSpec = map[string]string{
|
|||||||
"": "JobSpec describes how the job execution will look like.",
|
"": "JobSpec describes how the job execution will look like.",
|
||||||
"parallelism": "Specifies the maximum desired number of pods the job should run at any given time. The actual number of pods running in steady state will be less than this number when ((.spec.completions - .status.successful) < .spec.parallelism), i.e. when the work left to do is less than max parallelism. More info: https://kubernetes.io/docs/concepts/workloads/controllers/jobs-run-to-completion/",
|
"parallelism": "Specifies the maximum desired number of pods the job should run at any given time. The actual number of pods running in steady state will be less than this number when ((.spec.completions - .status.successful) < .spec.parallelism), i.e. when the work left to do is less than max parallelism. More info: https://kubernetes.io/docs/concepts/workloads/controllers/jobs-run-to-completion/",
|
||||||
"completions": "Specifies the desired number of successfully finished pods the job should be run with. Setting to nil means that the success of any pod signals the success of all pods, and allows parallelism to have any positive value. Setting to 1 means that parallelism is limited to 1 and the success of that pod signals the success of the job. More info: https://kubernetes.io/docs/concepts/workloads/controllers/jobs-run-to-completion/",
|
"completions": "Specifies the desired number of successfully finished pods the job should be run with. Setting to nil means that the success of any pod signals the success of all pods, and allows parallelism to have any positive value. Setting to 1 means that parallelism is limited to 1 and the success of that pod signals the success of the job. More info: https://kubernetes.io/docs/concepts/workloads/controllers/jobs-run-to-completion/",
|
||||||
"activeDeadlineSeconds": "Specifies the duration in seconds relative to the startTime that the job may be active before the system tries to terminate it; value must be positive integer",
|
"activeDeadlineSeconds": "Specifies the duration in seconds relative to the startTime that the job may be continuously active before the system tries to terminate it; value must be positive integer. If a Job is suspended (at creation or through an update), this timer will effectively be stopped and reset when the Job is resumed again.",
|
||||||
"backoffLimit": "Specifies the number of retries before marking this job failed. Defaults to 6",
|
"backoffLimit": "Specifies the number of retries before marking this job failed. Defaults to 6",
|
||||||
"selector": "A label query over pods that should match the pod count. Normally, the system sets this field for you. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/#label-selectors",
|
"selector": "A label query over pods that should match the pod count. Normally, the system sets this field for you. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/#label-selectors",
|
||||||
"manualSelector": "manualSelector controls generation of pod labels and pod selectors. Leave `manualSelector` unset unless you are certain what you are doing. When false or unset, the system pick labels unique to this job and appends those labels to the pod template. When true, the user is responsible for picking unique labels and specifying the selector. Failure to pick a unique label may cause this and other jobs to not function correctly. However, You may see `manualSelector=true` in jobs that were created with the old `extensions/v1beta1` API. More info: https://kubernetes.io/docs/concepts/workloads/controllers/jobs-run-to-completion/#specifying-your-own-pod-selector",
|
"manualSelector": "manualSelector controls generation of pod labels and pod selectors. Leave `manualSelector` unset unless you are certain what you are doing. When false or unset, the system pick labels unique to this job and appends those labels to the pod template. When true, the user is responsible for picking unique labels and specifying the selector. Failure to pick a unique label may cause this and other jobs to not function correctly. However, You may see `manualSelector=true` in jobs that were created with the old `extensions/v1beta1` API. More info: https://kubernetes.io/docs/concepts/workloads/controllers/jobs-run-to-completion/#specifying-your-own-pod-selector",
|
||||||
"template": "Describes the pod that will be created when executing a job. More info: https://kubernetes.io/docs/concepts/workloads/controllers/jobs-run-to-completion/",
|
"template": "Describes the pod that will be created when executing a job. More info: https://kubernetes.io/docs/concepts/workloads/controllers/jobs-run-to-completion/",
|
||||||
"ttlSecondsAfterFinished": "ttlSecondsAfterFinished limits the lifetime of a Job that has finished execution (either Complete or Failed). If this field is set, ttlSecondsAfterFinished after the Job finishes, it is eligible to be automatically deleted. When the Job is being deleted, its lifecycle guarantees (e.g. finalizers) will be honored. If this field is unset, the Job won't be automatically deleted. If this field is set to zero, the Job becomes eligible to be deleted immediately after it finishes. This field is alpha-level and is only honored by servers that enable the TTLAfterFinished feature.",
|
"ttlSecondsAfterFinished": "ttlSecondsAfterFinished limits the lifetime of a Job that has finished execution (either Complete or Failed). If this field is set, ttlSecondsAfterFinished after the Job finishes, it is eligible to be automatically deleted. When the Job is being deleted, its lifecycle guarantees (e.g. finalizers) will be honored. If this field is unset, the Job won't be automatically deleted. If this field is set to zero, the Job becomes eligible to be deleted immediately after it finishes. This field is alpha-level and is only honored by servers that enable the TTLAfterFinished feature.",
|
||||||
"completionMode": "CompletionMode specifies how Pod completions are tracked. It can be `NonIndexed` (default) or `Indexed`.\n\n`NonIndexed` means that the Job is considered complete when there have been .spec.completions successfully completed Pods. Each Pod completion is homologous to each other.\n\n`Indexed` means that the Pods of a Job get an associated completion index from 0 to (.spec.completions - 1), available in the annotation batch.alpha.kubernetes.io/job-completion-index. The Job is considered complete when there is one successfully completed Pod for each index. When value is `Indexed`, .spec.completions must be specified and `.spec.parallelism` must be less than or equal to 10^5.\n\nThis field is alpha-level and is only honored by servers that enable the IndexedJob feature gate. More completion modes can be added in the future. If the Job controller observes a mode that it doesn't recognize, the controller skips updates for the Job.",
|
"completionMode": "CompletionMode specifies how Pod completions are tracked. It can be `NonIndexed` (default) or `Indexed`.\n\n`NonIndexed` means that the Job is considered complete when there have been .spec.completions successfully completed Pods. Each Pod completion is homologous to each other.\n\n`Indexed` means that the Pods of a Job get an associated completion index from 0 to (.spec.completions - 1), available in the annotation batch.alpha.kubernetes.io/job-completion-index. The Job is considered complete when there is one successfully completed Pod for each index. When value is `Indexed`, .spec.completions must be specified and `.spec.parallelism` must be less than or equal to 10^5.\n\nThis field is alpha-level and is only honored by servers that enable the IndexedJob feature gate. More completion modes can be added in the future. If the Job controller observes a mode that it doesn't recognize, the controller skips updates for the Job.",
|
||||||
|
"suspend": "Suspend specifies whether the Job controller should create Pods or not. If a Job is created with suspend set to true, no Pods are created by the Job controller. If a Job is suspended after creation (i.e. the flag goes from false to true), the Job controller will delete all active Pods associated with this Job. Users must design their workload to gracefully handle this. Suspending a Job will reset the StartTime field of the Job, effectively resetting the ActiveDeadlineSeconds timer too. This is an alpha field and requires the SuspendJob feature gate to be enabled; otherwise this field may not be set to true. Defaults to false.",
|
||||||
}
|
}
|
||||||
|
|
||||||
func (JobSpec) SwaggerDoc() map[string]string {
|
func (JobSpec) SwaggerDoc() map[string]string {
|
||||||
@ -128,8 +129,8 @@ func (JobSpec) SwaggerDoc() map[string]string {
|
|||||||
|
|
||||||
var map_JobStatus = map[string]string{
|
var map_JobStatus = map[string]string{
|
||||||
"": "JobStatus represents the current state of a Job.",
|
"": "JobStatus represents the current state of a Job.",
|
||||||
"conditions": "The latest available observations of an object's current state. When a job fails, one of the conditions will have type == \"Failed\". More info: https://kubernetes.io/docs/concepts/workloads/controllers/jobs-run-to-completion/",
|
"conditions": "The latest available observations of an object's current state. When a Job fails, one of the conditions will have type \"Failed\" and status true. When a Job is suspended, one of the conditions will have type \"Suspended\" and status true; when the Job is resumed, the status of this condition will become false. When a Job is completed, one of the conditions will have type \"Complete\" and status true. More info: https://kubernetes.io/docs/concepts/workloads/controllers/jobs-run-to-completion/",
|
||||||
"startTime": "Represents time when the job was acknowledged by the job controller. It is not guaranteed to be set in happens-before order across separate operations. It is represented in RFC3339 form and is in UTC.",
|
"startTime": "Represents time when the job controller started processing a job. When a Job is created in the suspended state, this field is not set until the first time it is resumed. This field is reset every time a Job is resumed from suspension. It is represented in RFC3339 form and is in UTC.",
|
||||||
"completionTime": "Represents time when the job was completed. It is not guaranteed to be set in happens-before order across separate operations. It is represented in RFC3339 form and is in UTC. The completion time is only set when the job finishes successfully.",
|
"completionTime": "Represents time when the job was completed. It is not guaranteed to be set in happens-before order across separate operations. It is represented in RFC3339 form and is in UTC. The completion time is only set when the job finishes successfully.",
|
||||||
"active": "The number of actively running pods.",
|
"active": "The number of actively running pods.",
|
||||||
"succeeded": "The number of pods which reached phase Succeeded.",
|
"succeeded": "The number of pods which reached phase Succeeded.",
|
||||||
|
@ -271,6 +271,11 @@ func (in *JobSpec) DeepCopyInto(out *JobSpec) {
|
|||||||
*out = new(int32)
|
*out = new(int32)
|
||||||
**out = **in
|
**out = **in
|
||||||
}
|
}
|
||||||
|
if in.Suspend != nil {
|
||||||
|
in, out := &in.Suspend, &out.Suspend
|
||||||
|
*out = new(bool)
|
||||||
|
**out = **in
|
||||||
|
}
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -1569,11 +1569,12 @@
|
|||||||
"setHostnameAsFQDN": false
|
"setHostnameAsFQDN": false
|
||||||
}
|
}
|
||||||
},
|
},
|
||||||
"ttlSecondsAfterFinished": -1285029915
|
"ttlSecondsAfterFinished": -1285029915,
|
||||||
|
"suspend": true
|
||||||
}
|
}
|
||||||
},
|
},
|
||||||
"successfulJobsHistoryLimit": -1887637570,
|
"successfulJobsHistoryLimit": 1729066291,
|
||||||
"failedJobsHistoryLimit": 1755548633
|
"failedJobsHistoryLimit": -908823020
|
||||||
},
|
},
|
||||||
"status": {
|
"status": {
|
||||||
"active": [
|
"active": [
|
||||||
@ -1581,7 +1582,7 @@
|
|||||||
"kind": "503",
|
"kind": "503",
|
||||||
"namespace": "504",
|
"namespace": "504",
|
||||||
"name": "505",
|
"name": "505",
|
||||||
"uid": "犓`ɜɅco\\穜T睭憲Ħ焵i,ŋŨN",
|
"uid": "`",
|
||||||
"apiVersion": "506",
|
"apiVersion": "506",
|
||||||
"resourceVersion": "507",
|
"resourceVersion": "507",
|
||||||
"fieldPath": "508"
|
"fieldPath": "508"
|
||||||
|
Binary file not shown.
@ -31,7 +31,7 @@ metadata:
|
|||||||
uid: "7"
|
uid: "7"
|
||||||
spec:
|
spec:
|
||||||
concurrencyPolicy: Hr鯹)晿<o,c鮽ort昍řČ扷5Ɨ
|
concurrencyPolicy: Hr鯹)晿<o,c鮽ort昍řČ扷5Ɨ
|
||||||
failedJobsHistoryLimit: 1755548633
|
failedJobsHistoryLimit: -908823020
|
||||||
jobTemplate:
|
jobTemplate:
|
||||||
metadata:
|
metadata:
|
||||||
annotations:
|
annotations:
|
||||||
@ -74,6 +74,7 @@ spec:
|
|||||||
operator: DoesNotExist
|
operator: DoesNotExist
|
||||||
matchLabels:
|
matchLabels:
|
||||||
2_kS91.e5K-_e63_-_3-n-_-__3u-.__P__.7U-Uo_4_-D7r__.am6-4_WE-_T: cd-2.-__E_Sv__26KX_R_.-.Nth._--S_4DAm
|
2_kS91.e5K-_e63_-_3-n-_-__3u-.__P__.7U-Uo_4_-D7r__.am6-4_WE-_T: cd-2.-__E_Sv__26KX_R_.-.Nth._--S_4DAm
|
||||||
|
suspend: true
|
||||||
template:
|
template:
|
||||||
metadata:
|
metadata:
|
||||||
annotations:
|
annotations:
|
||||||
@ -1073,7 +1074,7 @@ spec:
|
|||||||
ttlSecondsAfterFinished: -1285029915
|
ttlSecondsAfterFinished: -1285029915
|
||||||
schedule: "19"
|
schedule: "19"
|
||||||
startingDeadlineSeconds: -2555947251840004808
|
startingDeadlineSeconds: -2555947251840004808
|
||||||
successfulJobsHistoryLimit: -1887637570
|
successfulJobsHistoryLimit: 1729066291
|
||||||
suspend: true
|
suspend: true
|
||||||
status:
|
status:
|
||||||
active:
|
active:
|
||||||
@ -1083,4 +1084,4 @@ status:
|
|||||||
name: "505"
|
name: "505"
|
||||||
namespace: "504"
|
namespace: "504"
|
||||||
resourceVersion: "507"
|
resourceVersion: "507"
|
||||||
uid: 犓`ɜɅco\穜T睭憲Ħ焵i,ŋŨN
|
uid: '`'
|
||||||
|
@ -1528,22 +1528,23 @@
|
|||||||
}
|
}
|
||||||
},
|
},
|
||||||
"ttlSecondsAfterFinished": -1812920817,
|
"ttlSecondsAfterFinished": -1812920817,
|
||||||
"completionMode": "ʅ朁遐»"
|
"completionMode": "ʅ朁遐»",
|
||||||
|
"suspend": false
|
||||||
},
|
},
|
||||||
"status": {
|
"status": {
|
||||||
"conditions": [
|
"conditions": [
|
||||||
{
|
{
|
||||||
"type": "癸ƥf豯烠砖#囹J,R譏K譕ơ",
|
"type": "ƥf豯烠砖#囹J",
|
||||||
"status": "噓涫祲ŗȨ",
|
"status": "ʝ3",
|
||||||
"lastProbeTime": "2108-10-11T06:42:59Z",
|
"lastProbeTime": "2358-11-16T05:24:38Z",
|
||||||
"lastTransitionTime": "2845-10-01T19:47:44Z",
|
"lastTransitionTime": "2305-06-19T03:46:44Z",
|
||||||
"reason": "488",
|
"reason": "488",
|
||||||
"message": "489"
|
"message": "489"
|
||||||
}
|
}
|
||||||
],
|
],
|
||||||
"active": -1576445541,
|
"active": -1842630179,
|
||||||
"succeeded": 416561398,
|
"succeeded": 1984090670,
|
||||||
"failed": -291702642,
|
"failed": -1949686005,
|
||||||
"completedIndexes": "490"
|
"completedIndexes": "490"
|
||||||
}
|
}
|
||||||
}
|
}
|
BIN
staging/src/k8s.io/api/testdata/HEAD/batch.v1.Job.pb
vendored
BIN
staging/src/k8s.io/api/testdata/HEAD/batch.v1.Job.pb
vendored
Binary file not shown.
@ -44,6 +44,7 @@ spec:
|
|||||||
- 3_bQw.-dG6c-.x
|
- 3_bQw.-dG6c-.x
|
||||||
matchLabels:
|
matchLabels:
|
||||||
hjT9s-j41-0-6p-JFHn7y-74.-0MUORQQ.N4: 3L.u
|
hjT9s-j41-0-6p-JFHn7y-74.-0MUORQQ.N4: 3L.u
|
||||||
|
suspend: false
|
||||||
template:
|
template:
|
||||||
metadata:
|
metadata:
|
||||||
annotations:
|
annotations:
|
||||||
@ -1040,14 +1041,14 @@ spec:
|
|||||||
volumePath: "101"
|
volumePath: "101"
|
||||||
ttlSecondsAfterFinished: -1812920817
|
ttlSecondsAfterFinished: -1812920817
|
||||||
status:
|
status:
|
||||||
active: -1576445541
|
active: -1842630179
|
||||||
completedIndexes: "490"
|
completedIndexes: "490"
|
||||||
conditions:
|
conditions:
|
||||||
- lastProbeTime: "2108-10-11T06:42:59Z"
|
- lastProbeTime: "2358-11-16T05:24:38Z"
|
||||||
lastTransitionTime: "2845-10-01T19:47:44Z"
|
lastTransitionTime: "2305-06-19T03:46:44Z"
|
||||||
message: "489"
|
message: "489"
|
||||||
reason: "488"
|
reason: "488"
|
||||||
status: 噓涫祲ŗȨ
|
status: ʝ3
|
||||||
type: 癸ƥf豯烠砖#囹J,R譏K譕ơ
|
type: ƥf豯烠砖#囹J
|
||||||
failed: -291702642
|
failed: -1949686005
|
||||||
succeeded: 416561398
|
succeeded: 1984090670
|
||||||
|
@ -1569,11 +1569,12 @@
|
|||||||
"setHostnameAsFQDN": false
|
"setHostnameAsFQDN": false
|
||||||
}
|
}
|
||||||
},
|
},
|
||||||
"ttlSecondsAfterFinished": -1285029915
|
"ttlSecondsAfterFinished": -1285029915,
|
||||||
|
"suspend": true
|
||||||
}
|
}
|
||||||
},
|
},
|
||||||
"successfulJobsHistoryLimit": -1887637570,
|
"successfulJobsHistoryLimit": 1729066291,
|
||||||
"failedJobsHistoryLimit": 1755548633
|
"failedJobsHistoryLimit": -908823020
|
||||||
},
|
},
|
||||||
"status": {
|
"status": {
|
||||||
"active": [
|
"active": [
|
||||||
@ -1581,7 +1582,7 @@
|
|||||||
"kind": "503",
|
"kind": "503",
|
||||||
"namespace": "504",
|
"namespace": "504",
|
||||||
"name": "505",
|
"name": "505",
|
||||||
"uid": "犓`ɜɅco\\穜T睭憲Ħ焵i,ŋŨN",
|
"uid": "`",
|
||||||
"apiVersion": "506",
|
"apiVersion": "506",
|
||||||
"resourceVersion": "507",
|
"resourceVersion": "507",
|
||||||
"fieldPath": "508"
|
"fieldPath": "508"
|
||||||
|
Binary file not shown.
@ -31,7 +31,7 @@ metadata:
|
|||||||
uid: "7"
|
uid: "7"
|
||||||
spec:
|
spec:
|
||||||
concurrencyPolicy: Hr鯹)晿<o,c鮽ort昍řČ扷5Ɨ
|
concurrencyPolicy: Hr鯹)晿<o,c鮽ort昍řČ扷5Ɨ
|
||||||
failedJobsHistoryLimit: 1755548633
|
failedJobsHistoryLimit: -908823020
|
||||||
jobTemplate:
|
jobTemplate:
|
||||||
metadata:
|
metadata:
|
||||||
annotations:
|
annotations:
|
||||||
@ -74,6 +74,7 @@ spec:
|
|||||||
operator: DoesNotExist
|
operator: DoesNotExist
|
||||||
matchLabels:
|
matchLabels:
|
||||||
2_kS91.e5K-_e63_-_3-n-_-__3u-.__P__.7U-Uo_4_-D7r__.am6-4_WE-_T: cd-2.-__E_Sv__26KX_R_.-.Nth._--S_4DAm
|
2_kS91.e5K-_e63_-_3-n-_-__3u-.__P__.7U-Uo_4_-D7r__.am6-4_WE-_T: cd-2.-__E_Sv__26KX_R_.-.Nth._--S_4DAm
|
||||||
|
suspend: true
|
||||||
template:
|
template:
|
||||||
metadata:
|
metadata:
|
||||||
annotations:
|
annotations:
|
||||||
@ -1073,7 +1074,7 @@ spec:
|
|||||||
ttlSecondsAfterFinished: -1285029915
|
ttlSecondsAfterFinished: -1285029915
|
||||||
schedule: "19"
|
schedule: "19"
|
||||||
startingDeadlineSeconds: -2555947251840004808
|
startingDeadlineSeconds: -2555947251840004808
|
||||||
successfulJobsHistoryLimit: -1887637570
|
successfulJobsHistoryLimit: 1729066291
|
||||||
suspend: true
|
suspend: true
|
||||||
status:
|
status:
|
||||||
active:
|
active:
|
||||||
@ -1083,4 +1084,4 @@ status:
|
|||||||
name: "505"
|
name: "505"
|
||||||
namespace: "504"
|
namespace: "504"
|
||||||
resourceVersion: "507"
|
resourceVersion: "507"
|
||||||
uid: 犓`ɜɅco\穜T睭憲Ħ焵i,ŋŨN
|
uid: '`'
|
||||||
|
@ -1578,7 +1578,8 @@
|
|||||||
}
|
}
|
||||||
},
|
},
|
||||||
"ttlSecondsAfterFinished": 1315299341,
|
"ttlSecondsAfterFinished": 1315299341,
|
||||||
"completionMode": "ſZɐYɋsx羳ıȦj"
|
"completionMode": "ſZɐYɋsx羳ıȦj",
|
||||||
|
"suspend": false
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
Binary file not shown.
@ -74,6 +74,7 @@ template:
|
|||||||
- Ou1.m_.5AW-_S-.3g.7_2fNc5-_.-RX8
|
- Ou1.m_.5AW-_S-.3g.7_2fNc5-_.-RX8
|
||||||
matchLabels:
|
matchLabels:
|
||||||
g5i9/l-Y._.-444: c2_kS91.e5K-_e63_-_3-n-_-__3u-.__P__.7U-Uo_4_-D7r__.am64
|
g5i9/l-Y._.-444: c2_kS91.e5K-_e63_-_3-n-_-__3u-.__P__.7U-Uo_4_-D7r__.am64
|
||||||
|
suspend: false
|
||||||
template:
|
template:
|
||||||
metadata:
|
metadata:
|
||||||
annotations:
|
annotations:
|
||||||
|
@ -36,6 +36,7 @@ type JobSpecApplyConfiguration struct {
|
|||||||
Template *corev1.PodTemplateSpecApplyConfiguration `json:"template,omitempty"`
|
Template *corev1.PodTemplateSpecApplyConfiguration `json:"template,omitempty"`
|
||||||
TTLSecondsAfterFinished *int32 `json:"ttlSecondsAfterFinished,omitempty"`
|
TTLSecondsAfterFinished *int32 `json:"ttlSecondsAfterFinished,omitempty"`
|
||||||
CompletionMode *batchv1.CompletionMode `json:"completionMode,omitempty"`
|
CompletionMode *batchv1.CompletionMode `json:"completionMode,omitempty"`
|
||||||
|
Suspend *bool `json:"suspend,omitempty"`
|
||||||
}
|
}
|
||||||
|
|
||||||
// JobSpecApplyConfiguration constructs an declarative configuration of the JobSpec type for use with
|
// JobSpecApplyConfiguration constructs an declarative configuration of the JobSpec type for use with
|
||||||
@ -115,3 +116,11 @@ func (b *JobSpecApplyConfiguration) WithCompletionMode(value batchv1.CompletionM
|
|||||||
b.CompletionMode = &value
|
b.CompletionMode = &value
|
||||||
return b
|
return b
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// WithSuspend sets the Suspend field in the declarative configuration to the given value
|
||||||
|
// and returns the receiver, so that objects can be built by chaining "With" function invocations.
|
||||||
|
// If called multiple times, the Suspend field is set to the value of the last call.
|
||||||
|
func (b *JobSpecApplyConfiguration) WithSuspend(value bool) *JobSpecApplyConfiguration {
|
||||||
|
b.Suspend = &value
|
||||||
|
return b
|
||||||
|
}
|
||||||
|
@ -35,6 +35,7 @@ import (
|
|||||||
e2enode "k8s.io/kubernetes/test/e2e/framework/node"
|
e2enode "k8s.io/kubernetes/test/e2e/framework/node"
|
||||||
e2epod "k8s.io/kubernetes/test/e2e/framework/pod"
|
e2epod "k8s.io/kubernetes/test/e2e/framework/pod"
|
||||||
e2eresource "k8s.io/kubernetes/test/e2e/framework/resource"
|
e2eresource "k8s.io/kubernetes/test/e2e/framework/resource"
|
||||||
|
"k8s.io/utils/pointer"
|
||||||
|
|
||||||
"github.com/onsi/ginkgo"
|
"github.com/onsi/ginkgo"
|
||||||
"github.com/onsi/gomega"
|
"github.com/onsi/gomega"
|
||||||
@ -69,6 +70,85 @@ var _ = SIGDescribe("Job", func() {
|
|||||||
framework.ExpectEqual(successes, completions, "epected %d successful job pods, but got %d", completions, successes)
|
framework.ExpectEqual(successes, completions, "epected %d successful job pods, but got %d", completions, successes)
|
||||||
})
|
})
|
||||||
|
|
||||||
|
// Requires the alpha level feature gate SuspendJob. This e2e test will not
|
||||||
|
// pass without the following flag being passed to kubetest:
|
||||||
|
// --test_args="--feature-gates=SuspendJob=true"
|
||||||
|
ginkgo.It("[Feature:SuspendJob] should not create pods when created in suspend state", func() {
|
||||||
|
ginkgo.By("Creating a job with suspend=true")
|
||||||
|
job := e2ejob.NewTestJob("succeed", "suspend-true-to-false", v1.RestartPolicyNever, parallelism, completions, nil, backoffLimit)
|
||||||
|
job.Spec.Suspend = pointer.BoolPtr(true)
|
||||||
|
job, err := e2ejob.CreateJob(f.ClientSet, f.Namespace.Name, job)
|
||||||
|
framework.ExpectNoError(err, "failed to create job in namespace: %s", f.Namespace.Name)
|
||||||
|
|
||||||
|
ginkgo.By("Ensuring pods aren't created for job")
|
||||||
|
framework.ExpectEqual(wait.Poll(framework.Poll, wait.ForeverTestTimeout, func() (bool, error) {
|
||||||
|
pods, err := e2ejob.GetJobPods(f.ClientSet, f.Namespace.Name, job.Name)
|
||||||
|
if err != nil {
|
||||||
|
return false, err
|
||||||
|
}
|
||||||
|
return len(pods.Items) > 0, nil
|
||||||
|
}), wait.ErrWaitTimeout)
|
||||||
|
|
||||||
|
ginkgo.By("Checking Job status to observe Suspended state")
|
||||||
|
job, err = e2ejob.GetJob(f.ClientSet, f.Namespace.Name, job.Name)
|
||||||
|
framework.ExpectNoError(err, "failed to retrieve latest job object")
|
||||||
|
exists := false
|
||||||
|
for _, c := range job.Status.Conditions {
|
||||||
|
if c.Type == batchv1.JobSuspended {
|
||||||
|
exists = true
|
||||||
|
break
|
||||||
|
}
|
||||||
|
}
|
||||||
|
framework.ExpectEqual(exists, true)
|
||||||
|
|
||||||
|
ginkgo.By("Updating the job with suspend=false")
|
||||||
|
job.Spec.Suspend = pointer.BoolPtr(false)
|
||||||
|
job, err = e2ejob.UpdateJob(f.ClientSet, f.Namespace.Name, job)
|
||||||
|
framework.ExpectNoError(err, "failed to update job in namespace: %s", f.Namespace.Name)
|
||||||
|
|
||||||
|
ginkgo.By("Waiting for job to complete")
|
||||||
|
err = e2ejob.WaitForJobComplete(f.ClientSet, f.Namespace.Name, job.Name, completions)
|
||||||
|
framework.ExpectNoError(err, "failed to ensure job completion in namespace: %s", f.Namespace.Name)
|
||||||
|
})
|
||||||
|
|
||||||
|
// Requires the alpha level feature gate SuspendJob. This e2e test will not
|
||||||
|
// pass without the following flag being passed to kubetest:
|
||||||
|
// --test_args="--feature-gates=SuspendJob=true"
|
||||||
|
ginkgo.It("[Feature:SuspendJob] should delete pods when suspended", func() {
|
||||||
|
ginkgo.By("Creating a job with suspend=false")
|
||||||
|
job := e2ejob.NewTestJob("notTerminate", "suspend-false-to-true", v1.RestartPolicyNever, parallelism, completions, nil, backoffLimit)
|
||||||
|
job.Spec.Suspend = pointer.BoolPtr(false)
|
||||||
|
job, err := e2ejob.CreateJob(f.ClientSet, f.Namespace.Name, job)
|
||||||
|
framework.ExpectNoError(err, "failed to create job in namespace: %s", f.Namespace.Name)
|
||||||
|
|
||||||
|
ginkgo.By("Ensure pods equal to paralellism count is attached to the job")
|
||||||
|
err = e2ejob.WaitForAllJobPodsRunning(f.ClientSet, f.Namespace.Name, job.Name, parallelism)
|
||||||
|
framework.ExpectNoError(err, "failed to ensure number of pods associated with job %s is equal to parallelism count in namespace: %s", job.Name, f.Namespace.Name)
|
||||||
|
|
||||||
|
ginkgo.By("Updating the job with suspend=true")
|
||||||
|
job, err = e2ejob.GetJob(f.ClientSet, f.Namespace.Name, job.Name)
|
||||||
|
framework.ExpectNoError(err, "failed to retrieve latest job object")
|
||||||
|
job.Spec.Suspend = pointer.BoolPtr(true)
|
||||||
|
job, err = e2ejob.UpdateJob(f.ClientSet, f.Namespace.Name, job)
|
||||||
|
framework.ExpectNoError(err, "failed to update job in namespace: %s", f.Namespace.Name)
|
||||||
|
|
||||||
|
ginkgo.By("Ensuring pods are deleted")
|
||||||
|
err = e2ejob.WaitForAllJobPodsGone(f.ClientSet, f.Namespace.Name, job.Name)
|
||||||
|
framework.ExpectNoError(err, "failed to ensure pods are deleted after suspend=true")
|
||||||
|
|
||||||
|
ginkgo.By("Checking Job status to observe Suspended state")
|
||||||
|
job, err = e2ejob.GetJob(f.ClientSet, f.Namespace.Name, job.Name)
|
||||||
|
framework.ExpectNoError(err, "failed to retrieve latest job object")
|
||||||
|
exists := false
|
||||||
|
for _, c := range job.Status.Conditions {
|
||||||
|
if c.Type == batchv1.JobSuspended {
|
||||||
|
exists = true
|
||||||
|
break
|
||||||
|
}
|
||||||
|
}
|
||||||
|
framework.ExpectEqual(exists, true)
|
||||||
|
})
|
||||||
|
|
||||||
/*
|
/*
|
||||||
Testcase: Ensure Pods of an Indexed Job get a unique index.
|
Testcase: Ensure Pods of an Indexed Job get a unique index.
|
||||||
Description: Create an Indexed Job, wait for completion, capture the output of the pods and verify that they contain the completion index.
|
Description: Create an Indexed Job, wait for completion, capture the output of the pods and verify that they contain the completion index.
|
||||||
|
@ -42,3 +42,9 @@ func GetJobPods(c clientset.Interface, ns, jobName string) (*v1.PodList, error)
|
|||||||
func CreateJob(c clientset.Interface, ns string, job *batchv1.Job) (*batchv1.Job, error) {
|
func CreateJob(c clientset.Interface, ns string, job *batchv1.Job) (*batchv1.Job, error) {
|
||||||
return c.BatchV1().Jobs(ns).Create(context.TODO(), job, metav1.CreateOptions{})
|
return c.BatchV1().Jobs(ns).Create(context.TODO(), job, metav1.CreateOptions{})
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// CreateJob uses c to update a job in namespace ns. If the returned error is
|
||||||
|
// nil, the returned Job is valid and has been updated.
|
||||||
|
func UpdateJob(c clientset.Interface, ns string, job *batchv1.Job) (*batchv1.Job, error) {
|
||||||
|
return c.BatchV1().Jobs(ns).Update(context.TODO(), job, metav1.UpdateOptions{})
|
||||||
|
}
|
||||||
|
Loading…
Reference in New Issue
Block a user