Merge pull request #40932 from peay/cronjob-max-finished-jobs

Automatic merge from submit-queue (batch tested with PRs 40932, 41896, 41815, 41309, 41628)

Modify CronJob API to add job history limits, cleanup jobs in controller

**What this PR does / why we need it**:
As discussed in #34710: this adds two limits to `CronJobSpec`, to limit the number of finished jobs created by a CronJob to keep.

**Which issue this PR fixes**: fixes #34710

**Special notes for your reviewer**:

cc @soltysh, please have a look and let me know what you think -- I'll then add end to end testing and update the doc in a separate commit. What is the timeline to get this into 1.6?

The plan:

- [x] API changes
  - [x] Changing versioned APIs
    - [x] `types.go`
    - [x] `defaults.go` (nothing to do)
    - [x] `conversion.go` (nothing to do?)
    - [x] `conversion_test.go` (nothing to do?)
  - [x] Changing the internal structure
    - [x] `types.go`
    - [x] `validation.go`
    - [x] `validation_test.go`
  - [x] Edit version conversions
    - [x] Edit (nothing to do?)
    - [x] Run `hack/update-codegen.sh`
  - [x] Generate protobuf objects
    - [x] Run `hack/update-generated-protobuf.sh`
  - [x] Generate json (un)marshaling code
    - [x] Run `hack/update-codecgen.sh`
  - [x] Update fuzzer
- [x] Actual logic
- [x] Unit tests
- [x] End to end tests
- [x] Documentation changes and API specs update in separate commit


**Release note**:

```release-note
Add configurable limits to CronJob resource to specify how many successful and failed jobs are preserved.
```
This commit is contained in:
Kubernetes Submit Queue 2017-02-26 08:09:54 -08:00 committed by GitHub
commit 80e6492f03
18 changed files with 965 additions and 208 deletions

View File

@ -40985,6 +40985,11 @@
"description": "ConcurrencyPolicy specifies how to treat concurrent executions of a Job.",
"type": "string"
},
"failedJobsHistoryLimit": {
"description": "The number of failed finished jobs to retain. This is a pointer to distinguish between explicit zero and not specified.",
"type": "integer",
"format": "int32"
},
"jobTemplate": {
"description": "JobTemplate is the object that describes the job that will be created when executing a CronJob.",
"$ref": "#/definitions/io.k8s.kubernetes.pkg.apis.batch.v2alpha1.JobTemplateSpec"
@ -40998,6 +41003,11 @@
"type": "integer",
"format": "int64"
},
"successfulJobsHistoryLimit": {
"description": "The number of successful finished jobs to retain. This is a pointer to distinguish between explicit zero and not specified.",
"type": "integer",
"format": "int32"
},
"suspend": {
"description": "Suspend flag tells the controller to suspend subsequent executions, it does not apply to already started executions. Defaults to false.",
"type": "boolean"

View File

@ -542,6 +542,14 @@ func batchFuncs(t apitesting.TestingCommon) []interface{} {
sds := int64(c.RandUint64())
sj.StartingDeadlineSeconds = &sds
sj.Schedule = c.RandString()
if hasSuccessLimit := c.RandBool(); hasSuccessLimit {
successfulJobsHistoryLimit := int32(c.Rand.Int31())
sj.SuccessfulJobsHistoryLimit = &successfulJobsHistoryLimit
}
if hasFailedLimit := c.RandBool(); hasFailedLimit {
failedJobsHistoryLimit := int32(c.Rand.Int31())
sj.FailedJobsHistoryLimit = &failedJobsHistoryLimit
}
},
func(cp *batch.ConcurrencyPolicy, c fuzz.Continue) {
policies := []batch.ConcurrencyPolicy{batch.AllowConcurrent, batch.ForbidConcurrent, batch.ReplaceConcurrent}

View File

@ -244,6 +244,16 @@ type CronJobSpec struct {
// JobTemplate is the object that describes the job that will be created when
// executing a CronJob.
JobTemplate JobTemplateSpec
// The number of successful finished jobs to retain.
// This is a pointer to distinguish between explicit zero and not specified.
// +optional
SuccessfulJobsHistoryLimit *int32
// The number of failed finished jobs to retain.
// This is a pointer to distinguish between explicit zero and not specified.
// +optional
FailedJobsHistoryLimit *int32
}
// ConcurrencyPolicy describes how the job will be handled.

View File

@ -244,6 +244,16 @@ func (m *CronJobSpec) MarshalTo(data []byte) (int, error) {
return 0, err
}
i += n5
if m.SuccessfulJobsHistoryLimit != nil {
data[i] = 0x30
i++
i = encodeVarintGenerated(data, i, uint64(*m.SuccessfulJobsHistoryLimit))
}
if m.FailedJobsHistoryLimit != nil {
data[i] = 0x38
i++
i = encodeVarintGenerated(data, i, uint64(*m.FailedJobsHistoryLimit))
}
return i, nil
}
@ -673,6 +683,12 @@ func (m *CronJobSpec) Size() (n int) {
}
l = m.JobTemplate.Size()
n += 1 + l + sovGenerated(uint64(l))
if m.SuccessfulJobsHistoryLimit != nil {
n += 1 + sovGenerated(uint64(*m.SuccessfulJobsHistoryLimit))
}
if m.FailedJobsHistoryLimit != nil {
n += 1 + sovGenerated(uint64(*m.FailedJobsHistoryLimit))
}
return n
}
@ -849,6 +865,8 @@ func (this *CronJobSpec) String() string {
`ConcurrencyPolicy:` + fmt.Sprintf("%v", this.ConcurrencyPolicy) + `,`,
`Suspend:` + valueToStringGenerated(this.Suspend) + `,`,
`JobTemplate:` + strings.Replace(strings.Replace(this.JobTemplate.String(), "JobTemplateSpec", "JobTemplateSpec", 1), `&`, ``, 1) + `,`,
`SuccessfulJobsHistoryLimit:` + valueToStringGenerated(this.SuccessfulJobsHistoryLimit) + `,`,
`FailedJobsHistoryLimit:` + valueToStringGenerated(this.FailedJobsHistoryLimit) + `,`,
`}`,
}, "")
return s
@ -1371,6 +1389,46 @@ func (m *CronJobSpec) Unmarshal(data []byte) error {
return err
}
iNdEx = postIndex
case 6:
if wireType != 0 {
return fmt.Errorf("proto: wrong wireType = %d for field SuccessfulJobsHistoryLimit", wireType)
}
var v int32
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowGenerated
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := data[iNdEx]
iNdEx++
v |= (int32(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
m.SuccessfulJobsHistoryLimit = &v
case 7:
if wireType != 0 {
return fmt.Errorf("proto: wrong wireType = %d for field FailedJobsHistoryLimit", wireType)
}
var v int32
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowGenerated
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := data[iNdEx]
iNdEx++
v |= (int32(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
m.FailedJobsHistoryLimit = &v
default:
iNdEx = preIndex
skippy, err := skipGenerated(data[iNdEx:])
@ -2707,78 +2765,82 @@ var (
)
var fileDescriptorGenerated = []byte{
// 1162 bytes of a gzipped FileDescriptorProto
0x1f, 0x8b, 0x08, 0x00, 0x00, 0x09, 0x6e, 0x88, 0x02, 0xff, 0xd4, 0x56, 0x4b, 0x6f, 0x23, 0x45,
0x10, 0xce, 0xd8, 0x89, 0x1f, 0xed, 0xcd, 0xab, 0x21, 0x5a, 0x13, 0x24, 0x3b, 0xb2, 0x04, 0xca,
0xae, 0x76, 0x67, 0x88, 0x37, 0x5a, 0x96, 0x3d, 0x20, 0xed, 0x04, 0x21, 0x11, 0x25, 0xda, 0xa8,
0x9d, 0x65, 0x11, 0x04, 0x69, 0xdb, 0xe3, 0x8a, 0x3d, 0x9b, 0x79, 0x31, 0xdd, 0xb6, 0xc8, 0x8d,
0x33, 0x27, 0xee, 0xfc, 0x00, 0xfe, 0x02, 0x42, 0x1c, 0x39, 0x84, 0x5b, 0x0e, 0x1c, 0xe0, 0x62,
0x91, 0xe1, 0x5f, 0xe4, 0x84, 0xa6, 0xdd, 0xf3, 0xf0, 0x2b, 0x1b, 0x07, 0x29, 0x12, 0xb7, 0xe9,
0xea, 0xfa, 0xbe, 0xae, 0xae, 0xfa, 0xba, 0x6a, 0xd0, 0x47, 0x27, 0x4f, 0x98, 0x6a, 0xba, 0xda,
0x49, 0xb7, 0x09, 0xbe, 0x03, 0x1c, 0x98, 0xe6, 0x9d, 0xb4, 0x35, 0xea, 0x99, 0x4c, 0x6b, 0x52,
0x6e, 0x74, 0xb4, 0x5e, 0x9d, 0x5a, 0x5e, 0x87, 0x6e, 0x69, 0x6d, 0x70, 0xc0, 0xa7, 0x1c, 0x5a,
0xaa, 0xe7, 0xbb, 0xdc, 0xc5, 0xf7, 0x06, 0x50, 0x35, 0x81, 0xaa, 0xde, 0x49, 0x5b, 0x0d, 0xa1,
0xaa, 0x80, 0xaa, 0x11, 0x74, 0xfd, 0x61, 0xdb, 0xe4, 0x9d, 0x6e, 0x53, 0x35, 0x5c, 0x5b, 0x6b,
0xbb, 0x6d, 0x57, 0x13, 0x0c, 0xcd, 0xee, 0xb1, 0x58, 0x89, 0x85, 0xf8, 0x1a, 0x30, 0xaf, 0x6f,
0xcb, 0xa0, 0xa8, 0x67, 0xda, 0xd4, 0xe8, 0x98, 0x0e, 0xf8, 0xa7, 0x49, 0x58, 0x36, 0x70, 0xaa,
0xf5, 0xc6, 0xe2, 0x59, 0xd7, 0xa6, 0xa1, 0xfc, 0xae, 0xc3, 0x4d, 0x1b, 0xc6, 0x00, 0x8f, 0xdf,
0x04, 0x60, 0x46, 0x07, 0x6c, 0x3a, 0x86, 0x7b, 0x34, 0x0d, 0xd7, 0xe5, 0xa6, 0xa5, 0x99, 0x0e,
0x67, 0xdc, 0x1f, 0x03, 0xa5, 0xee, 0xc4, 0xc0, 0xef, 0x81, 0x9f, 0x5c, 0x08, 0xbe, 0xa5, 0xb6,
0x67, 0xc1, 0xa4, 0x3b, 0x3d, 0x98, 0x5a, 0x9e, 0x09, 0xde, 0xb5, 0x9f, 0x32, 0x28, 0xbf, 0xe3,
0xbb, 0xce, 0xae, 0xdb, 0xc4, 0xaf, 0x50, 0x21, 0x4c, 0x54, 0x8b, 0x72, 0x5a, 0x56, 0x36, 0x94,
0xcd, 0x52, 0xfd, 0x03, 0x55, 0x16, 0x2c, 0x1d, 0x77, 0x52, 0xb2, 0xd0, 0x5b, 0xed, 0x6d, 0xa9,
0xcf, 0x9b, 0xaf, 0xc1, 0xe0, 0xfb, 0xc0, 0xa9, 0x8e, 0xcf, 0xfa, 0xd5, 0xb9, 0xa0, 0x5f, 0x45,
0x89, 0x8d, 0xc4, 0xac, 0xf8, 0x0b, 0x34, 0xcf, 0x3c, 0x30, 0xca, 0x19, 0xc1, 0xfe, 0x58, 0xbd,
0xb6, 0x1c, 0x54, 0x19, 0x63, 0xc3, 0x03, 0x43, 0xbf, 0x23, 0xcf, 0x98, 0x0f, 0x57, 0x44, 0x30,
0xe2, 0x57, 0x28, 0xc7, 0x38, 0xe5, 0x5d, 0x56, 0xce, 0x0a, 0xee, 0x27, 0x37, 0xe0, 0x16, 0x78,
0x7d, 0x49, 0xb2, 0xe7, 0x06, 0x6b, 0x22, 0x79, 0x6b, 0xbf, 0x29, 0xa8, 0x24, 0x3d, 0xf7, 0x4c,
0xc6, 0xf1, 0xd1, 0x58, 0xb6, 0xd4, 0xeb, 0x65, 0x2b, 0x44, 0x8b, 0x5c, 0xad, 0xc8, 0x93, 0x0a,
0x91, 0x25, 0x95, 0xa9, 0x97, 0x68, 0xc1, 0xe4, 0x60, 0xb3, 0x72, 0x66, 0x23, 0xbb, 0x59, 0xaa,
0xd7, 0x67, 0xbf, 0x8e, 0xbe, 0x28, 0xe9, 0x17, 0x3e, 0x0b, 0x89, 0xc8, 0x80, 0xaf, 0xf6, 0x7d,
0x36, 0xbe, 0x46, 0x98, 0x3e, 0xfc, 0x00, 0x15, 0x42, 0xcd, 0xb6, 0xba, 0x16, 0x88, 0x6b, 0x14,
0x93, 0xb0, 0x1a, 0xd2, 0x4e, 0x62, 0x0f, 0xfc, 0x02, 0xdd, 0x65, 0x9c, 0xfa, 0xdc, 0x74, 0xda,
0x9f, 0x00, 0x6d, 0x59, 0xa6, 0x03, 0x0d, 0x30, 0x5c, 0xa7, 0xc5, 0x44, 0x4d, 0xb3, 0xfa, 0xbb,
0x41, 0xbf, 0x7a, 0xb7, 0x31, 0xd9, 0x85, 0x4c, 0xc3, 0xe2, 0x23, 0xb4, 0x6a, 0xb8, 0x8e, 0xd1,
0xf5, 0x7d, 0x70, 0x8c, 0xd3, 0x03, 0xd7, 0x32, 0x8d, 0x53, 0x51, 0xc8, 0xa2, 0xae, 0xca, 0x68,
0x56, 0x77, 0x46, 0x1d, 0x2e, 0x27, 0x19, 0xc9, 0x38, 0x11, 0x7e, 0x0f, 0xe5, 0x59, 0x97, 0x79,
0xe0, 0xb4, 0xca, 0xf3, 0x1b, 0xca, 0x66, 0x41, 0x2f, 0x05, 0xfd, 0x6a, 0xbe, 0x31, 0x30, 0x91,
0x68, 0x0f, 0x7f, 0x83, 0x4a, 0xaf, 0xdd, 0xe6, 0x21, 0xd8, 0x9e, 0x45, 0x39, 0x94, 0x17, 0x44,
0x4d, 0x9f, 0xce, 0x90, 0xf8, 0xdd, 0x04, 0x2d, 0x74, 0xfa, 0x96, 0x0c, 0xbd, 0x94, 0xda, 0x20,
0xe9, 0x33, 0x6a, 0x7f, 0x28, 0x68, 0x71, 0x48, 0x7d, 0xf8, 0x05, 0xca, 0x51, 0x83, 0x9b, 0xbd,
0xb0, 0x18, 0x61, 0xe1, 0x1f, 0x4e, 0x3f, 0x3f, 0x79, 0x79, 0x04, 0x8e, 0x21, 0xbc, 0x30, 0x24,
0xe2, 0x7d, 0x26, 0x48, 0x88, 0x24, 0xc3, 0x16, 0x5a, 0xb1, 0x28, 0xe3, 0x51, 0x45, 0x0f, 0x4d,
0x1b, 0x44, 0x2e, 0x4a, 0xf5, 0xfb, 0xd7, 0x13, 0x6d, 0x88, 0xd0, 0xdf, 0x0e, 0xfa, 0xd5, 0x95,
0xbd, 0x11, 0x1e, 0x32, 0xc6, 0x5c, 0xfb, 0x31, 0x83, 0xb2, 0xb7, 0xd3, 0x50, 0x0e, 0x87, 0x1a,
0x4a, 0x7d, 0xb6, 0x62, 0x4d, 0x6d, 0x26, 0x47, 0x23, 0xcd, 0x64, 0x7b, 0x46, 0xde, 0xab, 0x1b,
0xc9, 0x79, 0x16, 0xdd, 0xd9, 0x75, 0x9b, 0x3b, 0xae, 0xd3, 0x32, 0xb9, 0xe9, 0x3a, 0x78, 0x1b,
0xcd, 0xf3, 0x53, 0x2f, 0x7a, 0x7e, 0x1b, 0x51, 0x40, 0x87, 0xa7, 0x1e, 0x5c, 0xf6, 0xab, 0x2b,
0x69, 0xdf, 0xd0, 0x46, 0x84, 0x37, 0xfe, 0x3c, 0x0e, 0x32, 0x23, 0x70, 0x1f, 0x0f, 0x1f, 0x77,
0xd9, 0xaf, 0x5e, 0x39, 0x09, 0xd4, 0x98, 0x73, 0x38, 0x3c, 0xdc, 0x46, 0x8b, 0x61, 0x41, 0x0f,
0x7c, 0xb7, 0x39, 0xd0, 0x49, 0x76, 0x66, 0x9d, 0xac, 0xc9, 0x50, 0x16, 0xf7, 0xd2, 0x44, 0x64,
0x98, 0x17, 0xf7, 0x10, 0x0e, 0x0d, 0x87, 0x3e, 0x75, 0xd8, 0xe0, 0x72, 0x37, 0x53, 0xe5, 0xba,
0x3c, 0x0d, 0xef, 0x8d, 0xb1, 0x91, 0x09, 0x27, 0xe0, 0xf7, 0x51, 0xce, 0x07, 0xca, 0x5c, 0x47,
0x3c, 0xf1, 0x62, 0x52, 0x27, 0x22, 0xac, 0x44, 0xee, 0xe2, 0x7b, 0x28, 0x6f, 0x03, 0x63, 0xb4,
0x0d, 0xe5, 0x9c, 0x70, 0x5c, 0x96, 0x8e, 0xf9, 0xfd, 0x81, 0x99, 0x44, 0xfb, 0xb5, 0x5f, 0x15,
0x94, 0xbf, 0x9d, 0xb9, 0xd0, 0x18, 0x9e, 0x0b, 0xea, 0x6c, 0xca, 0x9c, 0x32, 0x13, 0x7e, 0xce,
0x8a, 0xf0, 0xc5, 0x3c, 0xd8, 0x42, 0x25, 0x8f, 0xfa, 0xd4, 0xb2, 0xc0, 0x32, 0x99, 0x2d, 0x6e,
0xb0, 0xa0, 0x2f, 0x87, 0x5d, 0xec, 0x20, 0x31, 0x93, 0xb4, 0x4f, 0x08, 0x31, 0xdc, 0xf0, 0x77,
0x24, 0x4c, 0xf1, 0x40, 0x8e, 0x12, 0xb2, 0x93, 0x98, 0x49, 0xda, 0x07, 0x3f, 0x47, 0x6b, 0x83,
0xce, 0x34, 0x3a, 0x45, 0xb2, 0x62, 0x8a, 0xbc, 0x13, 0xf4, 0xab, 0x6b, 0xcf, 0x26, 0x39, 0x90,
0xc9, 0x38, 0xfc, 0x35, 0x2a, 0x30, 0xb0, 0xc0, 0xe0, 0xae, 0x2f, 0x25, 0xf4, 0xe8, 0x9a, 0x59,
0xa7, 0x4d, 0xb0, 0x1a, 0x12, 0xaa, 0xdf, 0x11, 0x73, 0x4f, 0xae, 0x48, 0x4c, 0x89, 0x9f, 0xa2,
0x25, 0x9b, 0x3a, 0x5d, 0x1a, 0x7b, 0x0a, 0xed, 0x14, 0x74, 0x1c, 0xf4, 0xab, 0x4b, 0xfb, 0x43,
0x3b, 0x64, 0xc4, 0x13, 0x7f, 0x85, 0x0a, 0x3c, 0x1a, 0x2a, 0x39, 0x11, 0xda, 0x1b, 0x9a, 0xfa,
0x81, 0xdb, 0x1a, 0x9a, 0x23, 0xb1, 0x1e, 0xe2, 0x21, 0x12, 0x13, 0xd6, 0x7e, 0xc9, 0xa2, 0x62,
0x32, 0x3d, 0x4e, 0x10, 0x32, 0xa2, 0x67, 0xcd, 0xe4, 0x04, 0xf9, 0x70, 0x36, 0x89, 0xc4, 0x6d,
0x21, 0xe9, 0xbc, 0xb1, 0x89, 0x91, 0x14, 0x3d, 0x7e, 0x89, 0x8a, 0x62, 0x9e, 0x8b, 0x67, 0x9b,
0x99, 0xf9, 0xd9, 0x2e, 0x06, 0xfd, 0x6a, 0xb1, 0x11, 0x11, 0x90, 0x84, 0x0b, 0x1f, 0xa3, 0xa5,
0x44, 0x2b, 0x37, 0x6c, 0x41, 0xa2, 0x30, 0x3b, 0x43, 0x2c, 0x64, 0x84, 0x35, 0x6c, 0x04, 0x72,
0xd6, 0xce, 0x0b, 0xc9, 0x4e, 0x1b, 0x9e, 0x1a, 0x2a, 0xb2, 0xae, 0x61, 0x00, 0xb4, 0xa0, 0x25,
0xea, 0xbe, 0xa0, 0xaf, 0x4a, 0xd7, 0x62, 0x23, 0xda, 0x20, 0x89, 0x4f, 0x48, 0x7c, 0x4c, 0x4d,
0x0b, 0x5a, 0xa2, 0xde, 0x29, 0xe2, 0x4f, 0x85, 0x95, 0xc8, 0xdd, 0xda, 0x5f, 0x0a, 0x4a, 0xff,
0x1b, 0xdc, 0xc2, 0xbc, 0xec, 0xa4, 0xb4, 0x98, 0xf9, 0xcf, 0x3f, 0x38, 0x57, 0x09, 0xf3, 0x77,
0x05, 0x2d, 0x8f, 0xf8, 0xff, 0x5f, 0xff, 0x07, 0xf4, 0xfb, 0x67, 0x17, 0x95, 0xb9, 0xf3, 0x8b,
0xca, 0xdc, 0x9f, 0x17, 0x95, 0xb9, 0xef, 0x82, 0x8a, 0x72, 0x16, 0x54, 0x94, 0xf3, 0xa0, 0xa2,
0xfc, 0x1d, 0x54, 0x94, 0x1f, 0xfe, 0xa9, 0xcc, 0x7d, 0x59, 0x88, 0x78, 0xfe, 0x0d, 0x00, 0x00,
0xff, 0xff, 0xef, 0x59, 0xca, 0xdd, 0x1e, 0x0f, 0x00, 0x00,
// 1224 bytes of a gzipped FileDescriptorProto
0x1f, 0x8b, 0x08, 0x00, 0x00, 0x09, 0x6e, 0x88, 0x02, 0xff, 0xd4, 0x56, 0xcd, 0x6e, 0x1c, 0x45,
0x10, 0xf6, 0xec, 0xff, 0xf6, 0xc6, 0x8e, 0xd3, 0x90, 0x64, 0x59, 0xa4, 0x1d, 0x6b, 0x25, 0x90,
0x13, 0x25, 0x33, 0x64, 0x13, 0x85, 0x90, 0x03, 0x52, 0xc6, 0x08, 0x81, 0xe5, 0x28, 0x56, 0xaf,
0x43, 0x10, 0x04, 0x94, 0xde, 0xd9, 0xf6, 0xee, 0xc4, 0xf3, 0xc7, 0x74, 0xcf, 0x8a, 0xbd, 0xf1,
0x08, 0xdc, 0x79, 0x00, 0x5e, 0x01, 0x21, 0xc4, 0x89, 0x43, 0xb8, 0xe5, 0xc0, 0x01, 0x2e, 0x23,
0x32, 0xbc, 0x85, 0x4f, 0x68, 0x7a, 0x7a, 0x7e, 0xf6, 0xcf, 0xf1, 0x1a, 0xc9, 0x12, 0xb7, 0x99,
0xea, 0xfa, 0xbe, 0xae, 0xae, 0xfa, 0xba, 0xaa, 0xc1, 0x07, 0x47, 0xf7, 0xa8, 0x62, 0x38, 0xea,
0x91, 0xdf, 0x27, 0x9e, 0x4d, 0x18, 0xa1, 0xaa, 0x7b, 0x34, 0x54, 0xb1, 0x6b, 0x50, 0xb5, 0x8f,
0x99, 0x3e, 0x52, 0xc7, 0x5d, 0x6c, 0xba, 0x23, 0x7c, 0x4b, 0x1d, 0x12, 0x9b, 0x78, 0x98, 0x91,
0x81, 0xe2, 0x7a, 0x0e, 0x73, 0xe0, 0xb5, 0x18, 0xaa, 0x64, 0x50, 0xc5, 0x3d, 0x1a, 0x2a, 0x11,
0x54, 0xe1, 0x50, 0x25, 0x81, 0xb6, 0x6e, 0x0e, 0x0d, 0x36, 0xf2, 0xfb, 0x8a, 0xee, 0x58, 0xea,
0xd0, 0x19, 0x3a, 0x2a, 0x67, 0xe8, 0xfb, 0x87, 0xfc, 0x8f, 0xff, 0xf0, 0xaf, 0x98, 0xb9, 0x75,
0x47, 0x04, 0x85, 0x5d, 0xc3, 0xc2, 0xfa, 0xc8, 0xb0, 0x89, 0x37, 0xc9, 0xc2, 0xb2, 0x08, 0xc3,
0xea, 0x78, 0x2e, 0x9e, 0x96, 0xba, 0x0c, 0xe5, 0xf9, 0x36, 0x33, 0x2c, 0x32, 0x07, 0xb8, 0xfb,
0x3a, 0x00, 0xd5, 0x47, 0xc4, 0xc2, 0x73, 0xb8, 0xdb, 0xcb, 0x70, 0x3e, 0x33, 0x4c, 0xd5, 0xb0,
0x19, 0x65, 0xde, 0x1c, 0x28, 0x77, 0x26, 0x4a, 0xbc, 0x31, 0xf1, 0xb2, 0x03, 0x91, 0x6f, 0xb1,
0xe5, 0x9a, 0x64, 0xd1, 0x99, 0x6e, 0x2c, 0x2d, 0xcf, 0x02, 0xef, 0xce, 0x8f, 0x05, 0x50, 0xdd,
0xf1, 0x1c, 0x7b, 0xd7, 0xe9, 0xc3, 0x67, 0xa0, 0x16, 0x25, 0x6a, 0x80, 0x19, 0x6e, 0x4a, 0x5b,
0xd2, 0x76, 0xa3, 0xfb, 0x9e, 0x22, 0x0a, 0x96, 0x8f, 0x3b, 0x2b, 0x59, 0xe4, 0xad, 0x8c, 0x6f,
0x29, 0x8f, 0xfa, 0xcf, 0x89, 0xce, 0x1e, 0x12, 0x86, 0x35, 0xf8, 0x22, 0x90, 0xd7, 0xc2, 0x40,
0x06, 0x99, 0x0d, 0xa5, 0xac, 0xf0, 0x73, 0x50, 0xa2, 0x2e, 0xd1, 0x9b, 0x05, 0xce, 0x7e, 0x57,
0x39, 0xb5, 0x1c, 0x14, 0x11, 0x63, 0xcf, 0x25, 0xba, 0x76, 0x41, 0xec, 0x51, 0x8a, 0xfe, 0x10,
0x67, 0x84, 0xcf, 0x40, 0x85, 0x32, 0xcc, 0x7c, 0xda, 0x2c, 0x72, 0xee, 0x7b, 0x67, 0xe0, 0xe6,
0x78, 0x6d, 0x43, 0xb0, 0x57, 0xe2, 0x7f, 0x24, 0x78, 0x3b, 0xbf, 0x49, 0xa0, 0x21, 0x3c, 0xf7,
0x0c, 0xca, 0xe0, 0xd3, 0xb9, 0x6c, 0x29, 0xa7, 0xcb, 0x56, 0x84, 0xe6, 0xb9, 0xda, 0x14, 0x3b,
0xd5, 0x12, 0x4b, 0x2e, 0x53, 0x4f, 0x40, 0xd9, 0x60, 0xc4, 0xa2, 0xcd, 0xc2, 0x56, 0x71, 0xbb,
0xd1, 0xed, 0xae, 0x7e, 0x1c, 0x6d, 0x5d, 0xd0, 0x97, 0x3f, 0x8d, 0x88, 0x50, 0xcc, 0xd7, 0xf9,
0xb5, 0x94, 0x1e, 0x23, 0x4a, 0x1f, 0xbc, 0x01, 0x6a, 0x91, 0x66, 0x07, 0xbe, 0x49, 0xf8, 0x31,
0xea, 0x59, 0x58, 0x3d, 0x61, 0x47, 0xa9, 0x07, 0x7c, 0x0c, 0xae, 0x52, 0x86, 0x3d, 0x66, 0xd8,
0xc3, 0x8f, 0x08, 0x1e, 0x98, 0x86, 0x4d, 0x7a, 0x44, 0x77, 0xec, 0x01, 0xe5, 0x35, 0x2d, 0x6a,
0x6f, 0x87, 0x81, 0x7c, 0xb5, 0xb7, 0xd8, 0x05, 0x2d, 0xc3, 0xc2, 0xa7, 0xe0, 0x92, 0xee, 0xd8,
0xba, 0xef, 0x79, 0xc4, 0xd6, 0x27, 0xfb, 0x8e, 0x69, 0xe8, 0x13, 0x5e, 0xc8, 0xba, 0xa6, 0x88,
0x68, 0x2e, 0xed, 0xcc, 0x3a, 0x1c, 0x2f, 0x32, 0xa2, 0x79, 0x22, 0xf8, 0x0e, 0xa8, 0x52, 0x9f,
0xba, 0xc4, 0x1e, 0x34, 0x4b, 0x5b, 0xd2, 0x76, 0x4d, 0x6b, 0x84, 0x81, 0x5c, 0xed, 0xc5, 0x26,
0x94, 0xac, 0xc1, 0x6f, 0x40, 0xe3, 0xb9, 0xd3, 0x3f, 0x20, 0x96, 0x6b, 0x62, 0x46, 0x9a, 0x65,
0x5e, 0xd3, 0xfb, 0x2b, 0x24, 0x7e, 0x37, 0x43, 0x73, 0x9d, 0xbe, 0x21, 0x42, 0x6f, 0xe4, 0x16,
0x50, 0x7e, 0x0f, 0xf8, 0x35, 0x68, 0x51, 0x5f, 0xd7, 0x09, 0xa5, 0x87, 0xbe, 0xb9, 0xeb, 0xf4,
0xe9, 0x27, 0x06, 0x65, 0x8e, 0x37, 0xd9, 0x33, 0x2c, 0x83, 0x35, 0x2b, 0x5b, 0xd2, 0x76, 0x59,
0x6b, 0x87, 0x81, 0xdc, 0xea, 0x2d, 0xf5, 0x42, 0x27, 0x30, 0x40, 0x04, 0xae, 0x1c, 0x62, 0xc3,
0x24, 0x83, 0x39, 0xee, 0x2a, 0xe7, 0x6e, 0x85, 0x81, 0x7c, 0xe5, 0xe3, 0x85, 0x1e, 0x68, 0x09,
0xb2, 0xf3, 0x87, 0x04, 0xd6, 0xa7, 0x6e, 0x0c, 0x7c, 0x0c, 0x2a, 0x58, 0x67, 0xc6, 0x38, 0x12,
0x50, 0x24, 0xd6, 0x9b, 0xcb, 0x73, 0x96, 0x75, 0x0b, 0x44, 0x0e, 0x49, 0x54, 0x24, 0x92, 0x5d,
0xb8, 0x07, 0x9c, 0x04, 0x09, 0x32, 0x68, 0x82, 0x4d, 0x13, 0x53, 0x96, 0xa8, 0xf0, 0xc0, 0xb0,
0x08, 0xaf, 0x5f, 0xa3, 0x7b, 0xfd, 0x74, 0x17, 0x2d, 0x42, 0x68, 0x6f, 0x86, 0x81, 0xbc, 0xb9,
0x37, 0xc3, 0x83, 0xe6, 0x98, 0x3b, 0x3f, 0x14, 0x40, 0xf1, 0x7c, 0x9a, 0xe0, 0xc1, 0x54, 0x13,
0xec, 0xae, 0x26, 0xb0, 0xa5, 0x0d, 0xf0, 0xe9, 0x4c, 0x03, 0xbc, 0xb3, 0x22, 0xef, 0xc9, 0xcd,
0xef, 0x65, 0x11, 0x5c, 0xd8, 0x75, 0xfa, 0x3b, 0x8e, 0x3d, 0x30, 0x98, 0xe1, 0xd8, 0xf0, 0x0e,
0x28, 0xb1, 0x89, 0x9b, 0xb4, 0x8c, 0xad, 0x24, 0xa0, 0x83, 0x89, 0x4b, 0x8e, 0x03, 0x79, 0x33,
0xef, 0x1b, 0xd9, 0x10, 0xf7, 0x86, 0x9f, 0xa5, 0x41, 0x16, 0x38, 0xee, 0xc3, 0xe9, 0xed, 0x8e,
0x03, 0xf9, 0xc4, 0xe9, 0xa5, 0xa4, 0x9c, 0xd3, 0xe1, 0xc1, 0x21, 0x58, 0x8f, 0x0a, 0xba, 0xef,
0x39, 0xfd, 0x58, 0x27, 0xc5, 0x95, 0x75, 0x72, 0x59, 0x84, 0xb2, 0xbe, 0x97, 0x27, 0x42, 0xd3,
0xbc, 0x70, 0x0c, 0x60, 0x64, 0x38, 0xf0, 0xb0, 0x4d, 0xe3, 0xc3, 0x9d, 0x4d, 0x95, 0x2d, 0xb1,
0x1b, 0xdc, 0x9b, 0x63, 0x43, 0x0b, 0x76, 0x80, 0xef, 0x82, 0x8a, 0x47, 0x30, 0x75, 0x6c, 0xde,
0x96, 0xea, 0x59, 0x9d, 0x10, 0xb7, 0x22, 0xb1, 0x0a, 0xaf, 0x81, 0xaa, 0x45, 0x28, 0xc5, 0x43,
0xc2, 0xbb, 0x47, 0x5d, 0xbb, 0x28, 0x1c, 0xab, 0x0f, 0x63, 0x33, 0x4a, 0xd6, 0x3b, 0xbf, 0x48,
0xa0, 0x7a, 0x3e, 0xb3, 0xac, 0x37, 0x3d, 0xcb, 0x94, 0xd5, 0x94, 0xb9, 0x64, 0x8e, 0xfd, 0x54,
0xe4, 0xe1, 0xf3, 0x19, 0x76, 0x0b, 0x34, 0x5c, 0xec, 0x61, 0xd3, 0x24, 0xa6, 0x41, 0x2d, 0x7e,
0x82, 0xb2, 0x76, 0x31, 0xea, 0xbc, 0xfb, 0x99, 0x19, 0xe5, 0x7d, 0x22, 0x88, 0xee, 0x44, 0x4f,
0xa8, 0x28, 0xc5, 0xb1, 0x1c, 0x05, 0x64, 0x27, 0x33, 0xa3, 0xbc, 0x0f, 0x7c, 0x04, 0x2e, 0xc7,
0x9d, 0x69, 0x76, 0xf2, 0x15, 0xf9, 0xe4, 0x7b, 0x2b, 0x0c, 0xe4, 0xcb, 0x0f, 0x16, 0x39, 0xa0,
0xc5, 0x38, 0xf8, 0x15, 0xa8, 0x51, 0x62, 0x12, 0x9d, 0x39, 0x9e, 0x90, 0xd0, 0xed, 0x53, 0x66,
0x1d, 0xf7, 0x89, 0xd9, 0x13, 0x50, 0xed, 0x02, 0x9f, 0xd5, 0xe2, 0x0f, 0xa5, 0x94, 0xf0, 0x3e,
0xd8, 0xb0, 0xb0, 0xed, 0xe3, 0xd4, 0x93, 0x6b, 0xa7, 0xa6, 0xc1, 0x30, 0x90, 0x37, 0x1e, 0x4e,
0xad, 0xa0, 0x19, 0x4f, 0xf8, 0x25, 0xa8, 0xb1, 0x64, 0x10, 0x56, 0x78, 0x68, 0xaf, 0x69, 0xea,
0xfb, 0xce, 0x60, 0x6a, 0xf6, 0xa5, 0x7a, 0x48, 0x07, 0x5f, 0x4a, 0xd8, 0xf9, 0xb9, 0x08, 0xea,
0xd9, 0xf4, 0x38, 0x02, 0x40, 0x4f, 0xae, 0x35, 0x15, 0x13, 0xe4, 0xfd, 0xd5, 0x24, 0x92, 0xb6,
0x85, 0xac, 0xf3, 0xa6, 0x26, 0x8a, 0x72, 0xf4, 0xf0, 0x09, 0xa8, 0xf3, 0x37, 0x08, 0xbf, 0xb6,
0x85, 0x95, 0xaf, 0xed, 0x7a, 0x18, 0xc8, 0xf5, 0x5e, 0x42, 0x80, 0x32, 0x2e, 0x78, 0x08, 0x36,
0x32, 0xad, 0x9c, 0xb1, 0x05, 0xf1, 0xc2, 0xec, 0x4c, 0xb1, 0xa0, 0x19, 0xd6, 0xa8, 0x11, 0x88,
0x59, 0x5b, 0xe2, 0x92, 0x5d, 0x36, 0x3c, 0x55, 0x50, 0xe7, 0xef, 0x02, 0x32, 0x20, 0x03, 0x5e,
0xf7, 0xb2, 0x76, 0x49, 0xb8, 0xd6, 0x7b, 0xc9, 0x02, 0xca, 0x7c, 0x22, 0xe2, 0x78, 0xe0, 0x8b,
0x67, 0x47, 0x4a, 0x1c, 0x3f, 0x0f, 0x90, 0x58, 0xed, 0xfc, 0x25, 0x81, 0xfc, 0x7b, 0xe6, 0x1c,
0xe6, 0xe5, 0x28, 0xa7, 0xc5, 0xc2, 0x7f, 0x7e, 0x94, 0x9d, 0x24, 0xcc, 0xdf, 0x25, 0x70, 0x71,
0xc6, 0xff, 0xff, 0xfa, 0x1e, 0xd0, 0xae, 0xbf, 0x78, 0xd5, 0x5e, 0x7b, 0xf9, 0xaa, 0xbd, 0xf6,
0xe7, 0xab, 0xf6, 0xda, 0x77, 0x61, 0x5b, 0x7a, 0x11, 0xb6, 0xa5, 0x97, 0x61, 0x5b, 0xfa, 0x3b,
0x6c, 0x4b, 0xdf, 0xff, 0xd3, 0x5e, 0xfb, 0xa2, 0x96, 0xf0, 0xfc, 0x1b, 0x00, 0x00, 0xff, 0xff,
0x0b, 0x15, 0xd8, 0x21, 0xd2, 0x0f, 0x00, 0x00,
}

View File

@ -82,6 +82,16 @@ message CronJobSpec {
// JobTemplate is the object that describes the job that will be created when
// executing a CronJob.
optional JobTemplateSpec jobTemplate = 5;
// The number of successful finished jobs to retain.
// This is a pointer to distinguish between explicit zero and not specified.
// +optional
optional int32 successfulJobsHistoryLimit = 6;
// The number of failed finished jobs to retain.
// This is a pointer to distinguish between explicit zero and not specified.
// +optional
optional int32 failedJobsHistoryLimit = 7;
}
// CronJobStatus represents the current state of a cron job.

View File

@ -3793,15 +3793,17 @@ func (x *CronJobSpec) CodecEncodeSelf(e *codec1978.Encoder) {
} else {
yysep2 := !z.EncBinary()
yy2arr2 := z.EncBasicHandle().StructToArray
var yyq2 [5]bool
var yyq2 [7]bool
_, _, _ = yysep2, yyq2, yy2arr2
const yyr2 bool = false
yyq2[1] = x.StartingDeadlineSeconds != nil
yyq2[2] = x.ConcurrencyPolicy != ""
yyq2[3] = x.Suspend != nil
yyq2[5] = x.SuccessfulJobsHistoryLimit != nil
yyq2[6] = x.FailedJobsHistoryLimit != nil
var yynn2 int
if yyr2 || yy2arr2 {
r.EncodeArrayStart(5)
r.EncodeArrayStart(7)
} else {
yynn2 = 2
for _, b := range yyq2 {
@ -3927,6 +3929,76 @@ func (x *CronJobSpec) CodecEncodeSelf(e *codec1978.Encoder) {
yy22 := &x.JobTemplate
yy22.CodecEncodeSelf(e)
}
if yyr2 || yy2arr2 {
z.EncSendContainerState(codecSelfer_containerArrayElem1234)
if yyq2[5] {
if x.SuccessfulJobsHistoryLimit == nil {
r.EncodeNil()
} else {
yy25 := *x.SuccessfulJobsHistoryLimit
yym26 := z.EncBinary()
_ = yym26
if false {
} else {
r.EncodeInt(int64(yy25))
}
}
} else {
r.EncodeNil()
}
} else {
if yyq2[5] {
z.EncSendContainerState(codecSelfer_containerMapKey1234)
r.EncodeString(codecSelferC_UTF81234, string("successfulJobsHistoryLimit"))
z.EncSendContainerState(codecSelfer_containerMapValue1234)
if x.SuccessfulJobsHistoryLimit == nil {
r.EncodeNil()
} else {
yy27 := *x.SuccessfulJobsHistoryLimit
yym28 := z.EncBinary()
_ = yym28
if false {
} else {
r.EncodeInt(int64(yy27))
}
}
}
}
if yyr2 || yy2arr2 {
z.EncSendContainerState(codecSelfer_containerArrayElem1234)
if yyq2[6] {
if x.FailedJobsHistoryLimit == nil {
r.EncodeNil()
} else {
yy30 := *x.FailedJobsHistoryLimit
yym31 := z.EncBinary()
_ = yym31
if false {
} else {
r.EncodeInt(int64(yy30))
}
}
} else {
r.EncodeNil()
}
} else {
if yyq2[6] {
z.EncSendContainerState(codecSelfer_containerMapKey1234)
r.EncodeString(codecSelferC_UTF81234, string("failedJobsHistoryLimit"))
z.EncSendContainerState(codecSelfer_containerMapValue1234)
if x.FailedJobsHistoryLimit == nil {
r.EncodeNil()
} else {
yy32 := *x.FailedJobsHistoryLimit
yym33 := z.EncBinary()
_ = yym33
if false {
} else {
r.EncodeInt(int64(yy32))
}
}
}
}
if yyr2 || yy2arr2 {
z.EncSendContainerState(codecSelfer_containerArrayEnd1234)
} else {
@ -4046,6 +4118,38 @@ func (x *CronJobSpec) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) {
yyv11 := &x.JobTemplate
yyv11.CodecDecodeSelf(d)
}
case "successfulJobsHistoryLimit":
if r.TryDecodeAsNil() {
if x.SuccessfulJobsHistoryLimit != nil {
x.SuccessfulJobsHistoryLimit = nil
}
} else {
if x.SuccessfulJobsHistoryLimit == nil {
x.SuccessfulJobsHistoryLimit = new(int32)
}
yym13 := z.DecBinary()
_ = yym13
if false {
} else {
*((*int32)(x.SuccessfulJobsHistoryLimit)) = int32(r.DecodeInt(32))
}
}
case "failedJobsHistoryLimit":
if r.TryDecodeAsNil() {
if x.FailedJobsHistoryLimit != nil {
x.FailedJobsHistoryLimit = nil
}
} else {
if x.FailedJobsHistoryLimit == nil {
x.FailedJobsHistoryLimit = new(int32)
}
yym15 := z.DecBinary()
_ = yym15
if false {
} else {
*((*int32)(x.FailedJobsHistoryLimit)) = int32(r.DecodeInt(32))
}
}
default:
z.DecStructFieldNotFound(-1, yys3)
} // end switch yys3
@ -4057,16 +4161,16 @@ func (x *CronJobSpec) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) {
var h codecSelfer1234
z, r := codec1978.GenHelperDecoder(d)
_, _, _ = h, z, r
var yyj12 int
var yyb12 bool
var yyhl12 bool = l >= 0
yyj12++
if yyhl12 {
yyb12 = yyj12 > l
var yyj16 int
var yyb16 bool
var yyhl16 bool = l >= 0
yyj16++
if yyhl16 {
yyb16 = yyj16 > l
} else {
yyb12 = r.CheckBreak()
yyb16 = r.CheckBreak()
}
if yyb12 {
if yyb16 {
z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
return
}
@ -4074,21 +4178,21 @@ func (x *CronJobSpec) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) {
if r.TryDecodeAsNil() {
x.Schedule = ""
} else {
yyv13 := &x.Schedule
yym14 := z.DecBinary()
_ = yym14
yyv17 := &x.Schedule
yym18 := z.DecBinary()
_ = yym18
if false {
} else {
*((*string)(yyv13)) = r.DecodeString()
*((*string)(yyv17)) = r.DecodeString()
}
}
yyj12++
if yyhl12 {
yyb12 = yyj12 > l
yyj16++
if yyhl16 {
yyb16 = yyj16 > l
} else {
yyb12 = r.CheckBreak()
yyb16 = r.CheckBreak()
}
if yyb12 {
if yyb16 {
z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
return
}
@ -4101,20 +4205,20 @@ func (x *CronJobSpec) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) {
if x.StartingDeadlineSeconds == nil {
x.StartingDeadlineSeconds = new(int64)
}
yym16 := z.DecBinary()
_ = yym16
yym20 := z.DecBinary()
_ = yym20
if false {
} else {
*((*int64)(x.StartingDeadlineSeconds)) = int64(r.DecodeInt(64))
}
}
yyj12++
if yyhl12 {
yyb12 = yyj12 > l
yyj16++
if yyhl16 {
yyb16 = yyj16 > l
} else {
yyb12 = r.CheckBreak()
yyb16 = r.CheckBreak()
}
if yyb12 {
if yyb16 {
z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
return
}
@ -4122,16 +4226,16 @@ func (x *CronJobSpec) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) {
if r.TryDecodeAsNil() {
x.ConcurrencyPolicy = ""
} else {
yyv17 := &x.ConcurrencyPolicy
yyv17.CodecDecodeSelf(d)
yyv21 := &x.ConcurrencyPolicy
yyv21.CodecDecodeSelf(d)
}
yyj12++
if yyhl12 {
yyb12 = yyj12 > l
yyj16++
if yyhl16 {
yyb16 = yyj16 > l
} else {
yyb12 = r.CheckBreak()
yyb16 = r.CheckBreak()
}
if yyb12 {
if yyb16 {
z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
return
}
@ -4144,20 +4248,20 @@ func (x *CronJobSpec) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) {
if x.Suspend == nil {
x.Suspend = new(bool)
}
yym19 := z.DecBinary()
_ = yym19
yym23 := z.DecBinary()
_ = yym23
if false {
} else {
*((*bool)(x.Suspend)) = r.DecodeBool()
}
}
yyj12++
if yyhl12 {
yyb12 = yyj12 > l
yyj16++
if yyhl16 {
yyb16 = yyj16 > l
} else {
yyb12 = r.CheckBreak()
yyb16 = r.CheckBreak()
}
if yyb12 {
if yyb16 {
z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
return
}
@ -4165,21 +4269,73 @@ func (x *CronJobSpec) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) {
if r.TryDecodeAsNil() {
x.JobTemplate = JobTemplateSpec{}
} else {
yyv20 := &x.JobTemplate
yyv20.CodecDecodeSelf(d)
yyv24 := &x.JobTemplate
yyv24.CodecDecodeSelf(d)
}
yyj16++
if yyhl16 {
yyb16 = yyj16 > l
} else {
yyb16 = r.CheckBreak()
}
if yyb16 {
z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
return
}
z.DecSendContainerState(codecSelfer_containerArrayElem1234)
if r.TryDecodeAsNil() {
if x.SuccessfulJobsHistoryLimit != nil {
x.SuccessfulJobsHistoryLimit = nil
}
} else {
if x.SuccessfulJobsHistoryLimit == nil {
x.SuccessfulJobsHistoryLimit = new(int32)
}
yym26 := z.DecBinary()
_ = yym26
if false {
} else {
*((*int32)(x.SuccessfulJobsHistoryLimit)) = int32(r.DecodeInt(32))
}
}
yyj16++
if yyhl16 {
yyb16 = yyj16 > l
} else {
yyb16 = r.CheckBreak()
}
if yyb16 {
z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
return
}
z.DecSendContainerState(codecSelfer_containerArrayElem1234)
if r.TryDecodeAsNil() {
if x.FailedJobsHistoryLimit != nil {
x.FailedJobsHistoryLimit = nil
}
} else {
if x.FailedJobsHistoryLimit == nil {
x.FailedJobsHistoryLimit = new(int32)
}
yym28 := z.DecBinary()
_ = yym28
if false {
} else {
*((*int32)(x.FailedJobsHistoryLimit)) = int32(r.DecodeInt(32))
}
}
for {
yyj12++
if yyhl12 {
yyb12 = yyj12 > l
yyj16++
if yyhl16 {
yyb16 = yyj16 > l
} else {
yyb12 = r.CheckBreak()
yyb16 = r.CheckBreak()
}
if yyb12 {
if yyb16 {
break
}
z.DecSendContainerState(codecSelfer_containerArrayElem1234)
z.DecStructFieldNotFound(yyj12-1, "")
z.DecStructFieldNotFound(yyj16-1, "")
}
z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
}
@ -4772,7 +4928,7 @@ func (x codecSelfer1234) decSliceCronJob(v *[]CronJob, d *codec1978.Decoder) {
yyrg1 := len(yyv1) > 0
yyv21 := yyv1
yyrl1, yyrt1 = z.DecInferLen(yyl1, z.DecBasicHandle().MaxInitLen, 1128)
yyrl1, yyrt1 = z.DecInferLen(yyl1, z.DecBasicHandle().MaxInitLen, 1144)
if yyrt1 {
if yyrl1 <= cap(yyv1) {
yyv1 = yyv1[:yyrl1]

View File

@ -250,6 +250,16 @@ type CronJobSpec struct {
// JobTemplate is the object that describes the job that will be created when
// executing a CronJob.
JobTemplate JobTemplateSpec `json:"jobTemplate" protobuf:"bytes,5,opt,name=jobTemplate"`
// The number of successful finished jobs to retain.
// This is a pointer to distinguish between explicit zero and not specified.
// +optional
SuccessfulJobsHistoryLimit *int32 `json:"successfulJobsHistoryLimit,omitempty" protobuf:"varint,6,opt,name=successfulJobsHistoryLimit"`
// The number of failed finished jobs to retain.
// This is a pointer to distinguish between explicit zero and not specified.
// +optional
FailedJobsHistoryLimit *int32 `json:"failedJobsHistoryLimit,omitempty" protobuf:"varint,7,opt,name=failedJobsHistoryLimit"`
}
// ConcurrencyPolicy describes how the job will be handled.

View File

@ -49,12 +49,14 @@ func (CronJobList) SwaggerDoc() map[string]string {
}
var map_CronJobSpec = map[string]string{
"": "CronJobSpec describes how the job execution will look like and when it will actually run.",
"schedule": "Schedule contains the schedule in Cron format, see https://en.wikipedia.org/wiki/Cron.",
"startingDeadlineSeconds": "Optional deadline in seconds for starting the job if it misses scheduled time for any reason. Missed jobs executions will be counted as failed ones.",
"concurrencyPolicy": "ConcurrencyPolicy specifies how to treat concurrent executions of a Job.",
"suspend": "Suspend flag tells the controller to suspend subsequent executions, it does not apply to already started executions. Defaults to false.",
"jobTemplate": "JobTemplate is the object that describes the job that will be created when executing a CronJob.",
"": "CronJobSpec describes how the job execution will look like and when it will actually run.",
"schedule": "Schedule contains the schedule in Cron format, see https://en.wikipedia.org/wiki/Cron.",
"startingDeadlineSeconds": "Optional deadline in seconds for starting the job if it misses scheduled time for any reason. Missed jobs executions will be counted as failed ones.",
"concurrencyPolicy": "ConcurrencyPolicy specifies how to treat concurrent executions of a Job.",
"suspend": "Suspend flag tells the controller to suspend subsequent executions, it does not apply to already started executions. Defaults to false.",
"jobTemplate": "JobTemplate is the object that describes the job that will be created when executing a CronJob.",
"successfulJobsHistoryLimit": "The number of successful finished jobs to retain. This is a pointer to distinguish between explicit zero and not specified.",
"failedJobsHistoryLimit": "The number of failed finished jobs to retain. This is a pointer to distinguish between explicit zero and not specified.",
}
func (CronJobSpec) SwaggerDoc() map[string]string {

View File

@ -141,6 +141,8 @@ func autoConvert_v2alpha1_CronJobSpec_To_batch_CronJobSpec(in *CronJobSpec, out
if err := Convert_v2alpha1_JobTemplateSpec_To_batch_JobTemplateSpec(&in.JobTemplate, &out.JobTemplate, s); err != nil {
return err
}
out.SuccessfulJobsHistoryLimit = (*int32)(unsafe.Pointer(in.SuccessfulJobsHistoryLimit))
out.FailedJobsHistoryLimit = (*int32)(unsafe.Pointer(in.FailedJobsHistoryLimit))
return nil
}
@ -156,6 +158,8 @@ func autoConvert_batch_CronJobSpec_To_v2alpha1_CronJobSpec(in *batch.CronJobSpec
if err := Convert_batch_JobTemplateSpec_To_v2alpha1_JobTemplateSpec(&in.JobTemplate, &out.JobTemplate, s); err != nil {
return err
}
out.SuccessfulJobsHistoryLimit = (*int32)(unsafe.Pointer(in.SuccessfulJobsHistoryLimit))
out.FailedJobsHistoryLimit = (*int32)(unsafe.Pointer(in.FailedJobsHistoryLimit))
return nil
}

View File

@ -106,6 +106,16 @@ func DeepCopy_v2alpha1_CronJobSpec(in interface{}, out interface{}, c *conversio
if err := DeepCopy_v2alpha1_JobTemplateSpec(&in.JobTemplate, &out.JobTemplate, c); err != nil {
return err
}
if in.SuccessfulJobsHistoryLimit != nil {
in, out := &in.SuccessfulJobsHistoryLimit, &out.SuccessfulJobsHistoryLimit
*out = new(int32)
**out = **in
}
if in.FailedJobsHistoryLimit != nil {
in, out := &in.FailedJobsHistoryLimit, &out.FailedJobsHistoryLimit
*out = new(int32)
**out = **in
}
return nil
}
}

View File

@ -179,6 +179,15 @@ func ValidateCronJobSpec(spec *batch.CronJobSpec, fldPath *field.Path) field.Err
allErrs = append(allErrs, validateConcurrencyPolicy(&spec.ConcurrencyPolicy, fldPath.Child("concurrencyPolicy"))...)
allErrs = append(allErrs, ValidateJobTemplateSpec(&spec.JobTemplate, fldPath.Child("jobTemplate"))...)
if spec.SuccessfulJobsHistoryLimit != nil {
// zero is a valid SuccessfulJobsHistoryLimit
allErrs = append(allErrs, apivalidation.ValidateNonnegativeField(int64(*spec.SuccessfulJobsHistoryLimit), fldPath.Child("successfulJobsHistoryLimit"))...)
}
if spec.FailedJobsHistoryLimit != nil {
// zero is a valid SuccessfulJobsHistoryLimit
allErrs = append(allErrs, apivalidation.ValidateNonnegativeField(int64(*spec.FailedJobsHistoryLimit), fldPath.Child("failedJobsHistoryLimit"))...)
}
return allErrs
}

View File

@ -402,6 +402,40 @@ func TestValidateCronJob(t *testing.T) {
},
},
},
"spec.successfulJobsHistoryLimit: must be greater than or equal to 0": {
ObjectMeta: metav1.ObjectMeta{
Name: "mycronjob",
Namespace: metav1.NamespaceDefault,
UID: types.UID("1a2b3c"),
},
Spec: batch.CronJobSpec{
Schedule: "* * * * ?",
ConcurrencyPolicy: batch.AllowConcurrent,
SuccessfulJobsHistoryLimit: &negative,
JobTemplate: batch.JobTemplateSpec{
Spec: batch.JobSpec{
Template: validPodTemplateSpec,
},
},
},
},
"spec.failedJobsHistoryLimit: must be greater than or equal to 0": {
ObjectMeta: metav1.ObjectMeta{
Name: "mycronjob",
Namespace: metav1.NamespaceDefault,
UID: types.UID("1a2b3c"),
},
Spec: batch.CronJobSpec{
Schedule: "* * * * ?",
ConcurrencyPolicy: batch.AllowConcurrent,
FailedJobsHistoryLimit: &negative,
JobTemplate: batch.JobTemplateSpec{
Spec: batch.JobSpec{
Template: validPodTemplateSpec,
},
},
},
},
"spec.concurrencyPolicy: Required value": {
ObjectMeta: metav1.ObjectMeta{
Name: "mycronjob",

View File

@ -106,6 +106,16 @@ func DeepCopy_batch_CronJobSpec(in interface{}, out interface{}, c *conversion.C
if err := DeepCopy_batch_JobTemplateSpec(&in.JobTemplate, &out.JobTemplate, c); err != nil {
return err
}
if in.SuccessfulJobsHistoryLimit != nil {
in, out := &in.SuccessfulJobsHistoryLimit, &out.SuccessfulJobsHistoryLimit
*out = new(int32)
**out = **in
}
if in.FailedJobsHistoryLimit != nil {
in, out := &in.FailedJobsHistoryLimit, &out.FailedJobsHistoryLimit
*out = new(int32)
**out = **in
}
return nil
}
}

View File

@ -30,6 +30,7 @@ Just periodically list jobs and SJs, and then reconcile them.
import (
"fmt"
"sort"
"time"
"github.com/golang/glog"
@ -92,13 +93,13 @@ func (jm *CronJobController) Run(stopCh <-chan struct{}) {
defer utilruntime.HandleCrash()
glog.Infof("Starting CronJob Manager")
// Check things every 10 second.
go wait.Until(jm.SyncAll, 10*time.Second, stopCh)
go wait.Until(jm.syncAll, 10*time.Second, stopCh)
<-stopCh
glog.Infof("Shutting down CronJob Manager")
}
// SyncAll lists all the CronJobs and Jobs and reconciles them.
func (jm *CronJobController) SyncAll() {
// syncAll lists all the CronJobs and Jobs and reconciles them.
func (jm *CronJobController) syncAll() {
sjl, err := jm.kubeClient.BatchV2alpha1().CronJobs(metav1.NamespaceAll).List(metav1.ListOptions{})
if err != nil {
glog.Errorf("Error listing cronjobs: %v", err)
@ -119,24 +120,86 @@ func (jm *CronJobController) SyncAll() {
glog.V(4).Infof("Found %d groups", len(jobsBySj))
for _, sj := range sjs {
SyncOne(sj, jobsBySj[sj.UID], time.Now(), jm.jobControl, jm.sjControl, jm.podControl, jm.recorder)
syncOne(&sj, jobsBySj[sj.UID], time.Now(), jm.jobControl, jm.sjControl, jm.podControl, jm.recorder)
cleanupFinishedJobs(&sj, jobsBySj[sj.UID], jm.jobControl, jm.sjControl, jm.podControl, jm.recorder)
}
}
// SyncOne reconciles a CronJob with a list of any Jobs that it created.
// cleanupFinishedJobs cleanups finished jobs created by a CronJob
func cleanupFinishedJobs(sj *batch.CronJob, js []batch.Job, jc jobControlInterface, sjc sjControlInterface, pc podControlInterface, recorder record.EventRecorder) {
// If neither limits are active, there is no need to do anything.
if sj.Spec.FailedJobsHistoryLimit == nil && sj.Spec.SuccessfulJobsHistoryLimit == nil {
return
}
failedJobs := []batch.Job{}
succesfulJobs := []batch.Job{}
for _, job := range js {
isFinished, finishedStatus := getFinishedStatus(&job)
if isFinished && finishedStatus == batch.JobComplete {
succesfulJobs = append(succesfulJobs, job)
} else if isFinished && finishedStatus == batch.JobFailed {
failedJobs = append(failedJobs, job)
}
}
if sj.Spec.SuccessfulJobsHistoryLimit != nil {
removeOldestJobs(sj,
succesfulJobs,
jc,
pc,
*sj.Spec.SuccessfulJobsHistoryLimit,
recorder)
}
if sj.Spec.FailedJobsHistoryLimit != nil {
removeOldestJobs(sj,
failedJobs,
jc,
pc,
*sj.Spec.FailedJobsHistoryLimit,
recorder)
}
// Update the CronJob, in case jobs were removed from the list.
if _, err := sjc.UpdateStatus(sj); err != nil {
nameForLog := fmt.Sprintf("%s/%s", sj.Namespace, sj.Name)
glog.Infof("Unable to update status for %s (rv = %s): %v", nameForLog, sj.ResourceVersion, err)
}
}
// removeOldestJobs removes the oldest jobs from a list of jobs
func removeOldestJobs(sj *batch.CronJob, js []batch.Job, jc jobControlInterface, pc podControlInterface, maxJobs int32, recorder record.EventRecorder) {
numToDelete := len(js) - int(maxJobs)
if numToDelete <= 0 {
return
}
nameForLog := fmt.Sprintf("%s/%s", sj.Namespace, sj.Name)
glog.V(4).Infof("Cleaning up %d/%d jobs from %s", numToDelete, len(js), nameForLog)
sort.Sort(byJobStartTime(js))
for i := 0; i < numToDelete; i++ {
glog.V(4).Infof("Removing job %s from %s", js[i].Name, nameForLog)
deleteJob(sj, &js[i], jc, pc, recorder, "history limit reached")
}
}
// syncOne reconciles a CronJob with a list of any Jobs that it created.
// All known jobs created by "sj" should be included in "js".
// The current time is passed in to facilitate testing.
// It has no receiver, to facilitate testing.
func SyncOne(sj batch.CronJob, js []batch.Job, now time.Time, jc jobControlInterface, sjc sjControlInterface, pc podControlInterface, recorder record.EventRecorder) {
func syncOne(sj *batch.CronJob, js []batch.Job, now time.Time, jc jobControlInterface, sjc sjControlInterface, pc podControlInterface, recorder record.EventRecorder) {
nameForLog := fmt.Sprintf("%s/%s", sj.Namespace, sj.Name)
childrenJobs := make(map[types.UID]bool)
for i := range js {
j := js[i]
childrenJobs[j.ObjectMeta.UID] = true
found := inActiveList(sj, j.ObjectMeta.UID)
found := inActiveList(*sj, j.ObjectMeta.UID)
if !found && !IsJobFinished(&j) {
recorder.Eventf(&sj, v1.EventTypeWarning, "UnexpectedJob", "Saw a job that the controller did not create or forgot: %v", j.Name)
recorder.Eventf(sj, v1.EventTypeWarning, "UnexpectedJob", "Saw a job that the controller did not create or forgot: %v", j.Name)
// We found an unfinished job that has us as the parent, but it is not in our Active list.
// This could happen if we crashed right after creating the Job and before updating the status,
// or if our jobs list is newer than our sj status after a relist, or if someone intentionally created
@ -148,9 +211,9 @@ func SyncOne(sj batch.CronJob, js []batch.Job, now time.Time, jc jobControlInter
// in the same namespace "adopt" that job. ReplicaSets and their Pods work the same way.
// TBS: how to update sj.Status.LastScheduleTime if the adopted job is newer than any we knew about?
} else if found && IsJobFinished(&j) {
deleteFromActiveList(&sj, j.ObjectMeta.UID)
deleteFromActiveList(sj, j.ObjectMeta.UID)
// TODO: event to call out failure vs success.
recorder.Eventf(&sj, v1.EventTypeNormal, "SawCompletedJob", "Saw completed job: %v", j.Name)
recorder.Eventf(sj, v1.EventTypeNormal, "SawCompletedJob", "Saw completed job: %v", j.Name)
}
}
@ -159,25 +222,25 @@ func SyncOne(sj batch.CronJob, js []batch.Job, now time.Time, jc jobControlInter
// job running.
for _, j := range sj.Status.Active {
if found := childrenJobs[j.UID]; !found {
recorder.Eventf(&sj, v1.EventTypeNormal, "MissingJob", "Active job went missing: %v", j.Name)
deleteFromActiveList(&sj, j.UID)
recorder.Eventf(sj, v1.EventTypeNormal, "MissingJob", "Active job went missing: %v", j.Name)
deleteFromActiveList(sj, j.UID)
}
}
updatedSJ, err := sjc.UpdateStatus(&sj)
updatedSJ, err := sjc.UpdateStatus(sj)
if err != nil {
glog.Errorf("Unable to update status for %s (rv = %s): %v", nameForLog, sj.ResourceVersion, err)
return
}
sj = *updatedSJ
*sj = *updatedSJ
if sj.Spec.Suspend != nil && *sj.Spec.Suspend {
glog.V(4).Infof("Not starting job for %s because it is suspended", nameForLog)
return
}
times, err := getRecentUnmetScheduleTimes(sj, now)
times, err := getRecentUnmetScheduleTimes(*sj, now)
if err != nil {
recorder.Eventf(&sj, v1.EventTypeWarning, "FailedNeedsStart", "Cannot determine if job needs to be started: %v", err)
recorder.Eventf(sj, v1.EventTypeWarning, "FailedNeedsStart", "Cannot determine if job needs to be started: %v", err)
glog.Errorf("Cannot determine if %s needs to be started: %v", nameForLog, err)
}
// TODO: handle multiple unmet start times, from oldest to newest, updating status as needed.
@ -224,73 +287,37 @@ func SyncOne(sj batch.CronJob, js []batch.Job, now time.Time, jc jobControlInter
// TODO: this should be replaced with server side job deletion
// currently this mimics JobReaper from pkg/kubectl/stop.go
glog.V(4).Infof("Deleting job %s of %s that was still running at next scheduled start time", j.Name, nameForLog)
job, err := jc.GetJob(j.Namespace, j.Name)
if err != nil {
recorder.Eventf(&sj, v1.EventTypeWarning, "FailedGet", "Get job: %v", err)
recorder.Eventf(sj, v1.EventTypeWarning, "FailedGet", "Get job: %v", err)
return
}
// scale job down to 0
if *job.Spec.Parallelism != 0 {
zero := int32(0)
job.Spec.Parallelism = &zero
job, err = jc.UpdateJob(job.Namespace, job)
if err != nil {
recorder.Eventf(&sj, v1.EventTypeWarning, "FailedUpdate", "Update job: %v", err)
return
}
}
// remove all pods...
selector, _ := metav1.LabelSelectorAsSelector(job.Spec.Selector)
options := metav1.ListOptions{LabelSelector: selector.String()}
podList, err := pc.ListPods(job.Namespace, options)
if err != nil {
recorder.Eventf(&sj, v1.EventTypeWarning, "FailedList", "List job-pods: %v", err)
}
errList := []error{}
for _, pod := range podList.Items {
glog.V(2).Infof("CronJob controller is deleting Pod %v/%v", pod.Namespace, pod.Name)
if err := pc.DeletePod(pod.Namespace, pod.Name); err != nil {
// ignores the error when the pod isn't found
if !errors.IsNotFound(err) {
errList = append(errList, err)
}
}
}
if len(errList) != 0 {
recorder.Eventf(&sj, v1.EventTypeWarning, "FailedDelete", "Deleted job-pods: %v", utilerrors.NewAggregate(errList))
if !deleteJob(sj, job, jc, pc, recorder, "") {
return
}
// ... the job itself...
if err := jc.DeleteJob(job.Namespace, job.Name); err != nil {
recorder.Eventf(&sj, v1.EventTypeWarning, "FailedDelete", "Deleted job: %v", err)
glog.Errorf("Error deleting job %s from %s: %v", job.Name, nameForLog, err)
return
}
// ... and its reference from active list
deleteFromActiveList(&sj, job.ObjectMeta.UID)
recorder.Eventf(&sj, v1.EventTypeNormal, "SuccessfulDelete", "Deleted job %v", j.Name)
}
}
jobReq, err := getJobFromTemplate(&sj, scheduledTime)
jobReq, err := getJobFromTemplate(sj, scheduledTime)
if err != nil {
glog.Errorf("Unable to make Job from template in %s: %v", nameForLog, err)
return
}
jobResp, err := jc.CreateJob(sj.Namespace, jobReq)
if err != nil {
recorder.Eventf(&sj, v1.EventTypeWarning, "FailedCreate", "Error creating job: %v", err)
recorder.Eventf(sj, v1.EventTypeWarning, "FailedCreate", "Error creating job: %v", err)
return
}
glog.V(4).Infof("Created Job %s for %s", jobResp.Name, nameForLog)
recorder.Eventf(&sj, v1.EventTypeNormal, "SuccessfulCreate", "Created job %v", jobResp.Name)
recorder.Eventf(sj, v1.EventTypeNormal, "SuccessfulCreate", "Created job %v", jobResp.Name)
// ------------------------------------------------------------------ //
// If this process restarts at this point (after posting a job, but
// before updating the status), then we might try to start the job on
// the next time. Actually, if we relist the SJs and Jobs on the next
// iteration of SyncAll, we might not see our own status update, and
// iteration of syncAll, we might not see our own status update, and
// then post one again. So, we need to use the job name as a lock to
// prevent us from making the job twice (name the job with hash of its
// scheduled time).
@ -303,13 +330,64 @@ func SyncOne(sj batch.CronJob, js []batch.Job, now time.Time, jc jobControlInter
sj.Status.Active = append(sj.Status.Active, *ref)
}
sj.Status.LastScheduleTime = &metav1.Time{Time: scheduledTime}
if _, err := sjc.UpdateStatus(&sj); err != nil {
if _, err := sjc.UpdateStatus(sj); err != nil {
glog.Infof("Unable to update status for %s (rv = %s): %v", nameForLog, sj.ResourceVersion, err)
}
return
}
// deleteJob reaps a job, deleting the job, the pobs and the reference in the active list
func deleteJob(sj *batch.CronJob, job *batch.Job, jc jobControlInterface, pc podControlInterface, recorder record.EventRecorder, reason string) bool {
// TODO: this should be replaced with server side job deletion
// currencontinuetly this mimics JobReaper from pkg/kubectl/stop.go
nameForLog := fmt.Sprintf("%s/%s", sj.Namespace, sj.Name)
var err error
// scale job down to 0
if *job.Spec.Parallelism != 0 {
zero := int32(0)
job.Spec.Parallelism = &zero
job, err = jc.UpdateJob(job.Namespace, job)
if err != nil {
recorder.Eventf(sj, v1.EventTypeWarning, "FailedUpdate", "Update job: %v", err)
return false
}
}
// remove all pods...
selector, _ := metav1.LabelSelectorAsSelector(job.Spec.Selector)
options := metav1.ListOptions{LabelSelector: selector.String()}
podList, err := pc.ListPods(job.Namespace, options)
if err != nil {
recorder.Eventf(sj, v1.EventTypeWarning, "FailedList", "List job-pods: %v", err)
}
errList := []error{}
for _, pod := range podList.Items {
glog.V(2).Infof("CronJob controller is deleting Pod %v/%v", pod.Namespace, pod.Name)
if err := pc.DeletePod(pod.Namespace, pod.Name); err != nil {
// ignores the error when the pod isn't found
if !errors.IsNotFound(err) {
errList = append(errList, err)
}
}
}
if len(errList) != 0 {
recorder.Eventf(sj, v1.EventTypeWarning, "FailedDelete", "Deleted job-pods: %v", utilerrors.NewAggregate(errList))
return false
}
// ... the job itself...
if err := jc.DeleteJob(job.Namespace, job.Name); err != nil {
recorder.Eventf(sj, v1.EventTypeWarning, "FailedDelete", "Deleted job: %v", err)
glog.Errorf("Error deleting job %s from %s: %v", job.Name, nameForLog, err)
return false
}
// ... and its reference from active list
deleteFromActiveList(sj, job.ObjectMeta.UID)
recorder.Eventf(sj, v1.EventTypeNormal, "SuccessfulDelete", "Deleted job %v", job.Name)
return true
}
func getRef(object runtime.Object) (*v1.ObjectReference, error) {
return v1.GetReference(api.Scheme, object)
}

View File

@ -17,6 +17,8 @@ limitations under the License.
package cronjob
import (
"sort"
"strconv"
"strings"
"testing"
"time"
@ -81,6 +83,14 @@ func justAfterThePriorHour() time.Time {
return T1
}
func startTimeStringToTime(startTime string) time.Time {
T1, err := time.Parse(time.RFC3339, startTime)
if err != nil {
panic("test setup error")
}
return T1
}
// returns a cronJob with some fields filled in.
func cronJob() batch.CronJob {
return batch.CronJob{
@ -270,7 +280,7 @@ func TestSyncOne_RunOrNot(t *testing.T) {
pc := &fakePodControl{}
recorder := record.NewFakeRecorder(10)
SyncOne(sj, js, tc.now, jc, sjc, pc, recorder)
syncOne(&sj, js, tc.now, jc, sjc, pc, recorder)
expectedCreates := 0
if tc.expectCreate {
expectedCreates = 1
@ -320,10 +330,237 @@ func TestSyncOne_RunOrNot(t *testing.T) {
}
}
type CleanupJobSpec struct {
StartTime string
IsFinished bool
IsSuccessful bool
ExpectDelete bool
IsStillInActiveList bool // only when IsFinished is set
}
func TestCleanupFinishedJobs_DeleteOrNot(t *testing.T) {
limitThree := int32(3)
limitTwo := int32(2)
limitOne := int32(1)
limitZero := int32(0)
// Starting times are assumed to be sorted by increasing start time
// in all the test cases
testCases := map[string]struct {
jobSpecs []CleanupJobSpec
now time.Time
successfulJobsHistoryLimit *int32
failedJobsHistoryLimit *int32
expectActive int
}{
"success. job limit reached": {
[]CleanupJobSpec{
{"2016-05-19T04:00:00Z", T, T, T, F},
{"2016-05-19T05:00:00Z", T, T, T, F},
{"2016-05-19T06:00:00Z", T, T, F, F},
{"2016-05-19T07:00:00Z", T, T, F, F},
{"2016-05-19T08:00:00Z", F, F, F, F},
{"2016-05-19T09:00:00Z", T, F, F, F},
}, justBeforeTheHour(), &limitTwo, &limitOne, 1},
"success. jobs not processed by Sync yet": {
[]CleanupJobSpec{
{"2016-05-19T04:00:00Z", T, T, T, F},
{"2016-05-19T05:00:00Z", T, T, T, T},
{"2016-05-19T06:00:00Z", T, T, F, T},
{"2016-05-19T07:00:00Z", T, T, F, T},
{"2016-05-19T08:00:00Z", F, F, F, F},
{"2016-05-19T09:00:00Z", T, F, F, T},
}, justBeforeTheHour(), &limitTwo, &limitOne, 4},
"failed job limit reached": {
[]CleanupJobSpec{
{"2016-05-19T04:00:00Z", T, F, T, F},
{"2016-05-19T05:00:00Z", T, F, T, F},
{"2016-05-19T06:00:00Z", T, T, F, F},
{"2016-05-19T07:00:00Z", T, T, F, F},
{"2016-05-19T08:00:00Z", T, F, F, F},
{"2016-05-19T09:00:00Z", T, F, F, F},
}, justBeforeTheHour(), &limitTwo, &limitTwo, 0},
"success. job limit set to zero": {
[]CleanupJobSpec{
{"2016-05-19T04:00:00Z", T, T, T, F},
{"2016-05-19T05:00:00Z", T, F, T, F},
{"2016-05-19T06:00:00Z", T, T, T, F},
{"2016-05-19T07:00:00Z", T, T, T, F},
{"2016-05-19T08:00:00Z", F, F, F, F},
{"2016-05-19T09:00:00Z", T, F, F, F},
}, justBeforeTheHour(), &limitZero, &limitOne, 1},
"failed job limit set to zero": {
[]CleanupJobSpec{
{"2016-05-19T04:00:00Z", T, T, F, F},
{"2016-05-19T05:00:00Z", T, F, T, F},
{"2016-05-19T06:00:00Z", T, T, F, F},
{"2016-05-19T07:00:00Z", T, T, F, F},
{"2016-05-19T08:00:00Z", F, F, F, F},
{"2016-05-19T09:00:00Z", T, F, T, F},
}, justBeforeTheHour(), &limitThree, &limitZero, 1},
"no limits reached": {
[]CleanupJobSpec{
{"2016-05-19T04:00:00Z", T, T, F, F},
{"2016-05-19T05:00:00Z", T, F, F, F},
{"2016-05-19T06:00:00Z", T, T, F, F},
{"2016-05-19T07:00:00Z", T, T, F, F},
{"2016-05-19T08:00:00Z", T, F, F, F},
{"2016-05-19T09:00:00Z", T, F, F, F},
}, justBeforeTheHour(), &limitThree, &limitThree, 0},
// This test case should trigger the short-circuit
"limits disabled": {
[]CleanupJobSpec{
{"2016-05-19T04:00:00Z", T, T, F, F},
{"2016-05-19T05:00:00Z", T, F, F, F},
{"2016-05-19T06:00:00Z", T, T, F, F},
{"2016-05-19T07:00:00Z", T, T, F, F},
{"2016-05-19T08:00:00Z", T, F, F, F},
{"2016-05-19T09:00:00Z", T, F, F, F},
}, justBeforeTheHour(), nil, nil, 0},
"success limit disabled": {
[]CleanupJobSpec{
{"2016-05-19T04:00:00Z", T, T, F, F},
{"2016-05-19T05:00:00Z", T, F, F, F},
{"2016-05-19T06:00:00Z", T, T, F, F},
{"2016-05-19T07:00:00Z", T, T, F, F},
{"2016-05-19T08:00:00Z", T, F, F, F},
{"2016-05-19T09:00:00Z", T, F, F, F},
}, justBeforeTheHour(), nil, &limitThree, 0},
"failure limit disabled": {
[]CleanupJobSpec{
{"2016-05-19T04:00:00Z", T, T, F, F},
{"2016-05-19T05:00:00Z", T, F, F, F},
{"2016-05-19T06:00:00Z", T, T, F, F},
{"2016-05-19T07:00:00Z", T, T, F, F},
{"2016-05-19T08:00:00Z", T, F, F, F},
{"2016-05-19T09:00:00Z", T, F, F, F},
}, justBeforeTheHour(), &limitThree, nil, 0},
"no limits reached because still active": {
[]CleanupJobSpec{
{"2016-05-19T04:00:00Z", F, F, F, F},
{"2016-05-19T05:00:00Z", F, F, F, F},
{"2016-05-19T06:00:00Z", F, F, F, F},
{"2016-05-19T07:00:00Z", F, F, F, F},
{"2016-05-19T08:00:00Z", F, F, F, F},
{"2016-05-19T09:00:00Z", F, F, F, F},
}, justBeforeTheHour(), &limitZero, &limitZero, 6},
}
for name, tc := range testCases {
sj := cronJob()
suspend := false
sj.Spec.ConcurrencyPolicy = f
sj.Spec.Suspend = &suspend
sj.Spec.Schedule = onTheHour
sj.Spec.SuccessfulJobsHistoryLimit = tc.successfulJobsHistoryLimit
sj.Spec.FailedJobsHistoryLimit = tc.failedJobsHistoryLimit
var (
job *batch.Job
err error
)
// Set consistent timestamps for the CronJob
if len(tc.jobSpecs) != 0 {
firstTime := startTimeStringToTime(tc.jobSpecs[0].StartTime)
lastTime := startTimeStringToTime(tc.jobSpecs[len(tc.jobSpecs)-1].StartTime)
sj.ObjectMeta.CreationTimestamp = metav1.Time{Time: firstTime}
sj.Status.LastScheduleTime = &metav1.Time{Time: lastTime}
} else {
sj.ObjectMeta.CreationTimestamp = metav1.Time{Time: justBeforeTheHour()}
}
// Create jobs
js := []batch.Job{}
jobsToDelete := []string{}
sj.Status.Active = []v1.ObjectReference{}
for i, spec := range tc.jobSpecs {
job, err = getJobFromTemplate(&sj, startTimeStringToTime(spec.StartTime))
if err != nil {
t.Fatalf("%s: unexpected error creating a job from template: %v", name, err)
}
job.UID = types.UID(strconv.Itoa(i))
job.Namespace = ""
if spec.IsFinished {
var conditionType batch.JobConditionType
if spec.IsSuccessful {
conditionType = batch.JobComplete
} else {
conditionType = batch.JobFailed
}
condition := batch.JobCondition{Type: conditionType, Status: v1.ConditionTrue}
job.Status.Conditions = append(job.Status.Conditions, condition)
if spec.IsStillInActiveList {
sj.Status.Active = append(sj.Status.Active, v1.ObjectReference{UID: job.UID})
}
} else {
if spec.IsSuccessful || spec.IsStillInActiveList {
t.Errorf("%s: test setup error: this case makes no sense", name)
}
sj.Status.Active = append(sj.Status.Active, v1.ObjectReference{UID: job.UID})
}
js = append(js, *job)
if spec.ExpectDelete {
jobsToDelete = append(jobsToDelete, job.Name)
}
}
jc := &fakeJobControl{Job: job}
pc := &fakePodControl{}
sjc := &fakeSJControl{}
recorder := record.NewFakeRecorder(10)
cleanupFinishedJobs(&sj, js, jc, sjc, pc, recorder)
// Check we have actually deleted the correct jobs
if len(jc.DeleteJobName) != len(jobsToDelete) {
t.Errorf("%s: expected %d job deleted, actually %d", name, len(jobsToDelete), len(jc.DeleteJobName))
} else {
sort.Strings(jobsToDelete)
sort.Strings(jc.DeleteJobName)
for i, expectedJobName := range jobsToDelete {
if expectedJobName != jc.DeleteJobName[i] {
t.Errorf("%s: expected job %s deleted, actually %v -- %v vs %v", name, expectedJobName, jc.DeleteJobName[i], jc.DeleteJobName, jobsToDelete)
}
}
}
// Check for events
expectedEvents := len(jobsToDelete)
if len(recorder.Events) != expectedEvents {
t.Errorf("%s: expected %d event, actually %v", name, expectedEvents, len(recorder.Events))
}
// Check for jobs still in active list
numActive := 0
if len(sjc.Updates) != 0 {
numActive = len(sjc.Updates[len(sjc.Updates)-1].Status.Active)
}
if tc.expectActive != numActive {
t.Errorf("%s: expected Active size %d, got %d", name, tc.expectActive, numActive)
}
}
}
// TODO: simulation where the controller randomly doesn't run, and randomly has errors starting jobs or deleting jobs,
// but over time, all jobs run as expected (assuming Allow and no deadline).
// TestSyncOne_Status tests sj.UpdateStatus in SyncOne
// TestSyncOne_Status tests sj.UpdateStatus in syncOne
func TestSyncOne_Status(t *testing.T) {
finishedJob := newJob("1")
finishedJob.Status.Conditions = append(finishedJob.Status.Conditions, batch.JobCondition{Type: batch.JobComplete, Status: v1.ConditionTrue})
@ -443,7 +680,7 @@ func TestSyncOne_Status(t *testing.T) {
recorder := record.NewFakeRecorder(10)
// Run the code
SyncOne(sj, jobs, tc.now, jc, sjc, pc, recorder)
syncOne(&sj, jobs, tc.now, jc, sjc, pc, recorder)
// Status update happens once when ranging through job list, and another one if create jobs.
expectUpdates := 1

View File

@ -234,11 +234,34 @@ func makeCreatedByRefJson(object runtime.Object) (string, error) {
return string(createdByRefJson), nil
}
func IsJobFinished(j *batch.Job) bool {
func getFinishedStatus(j *batch.Job) (bool, batch.JobConditionType) {
for _, c := range j.Status.Conditions {
if (c.Type == batch.JobComplete || c.Type == batch.JobFailed) && c.Status == v1.ConditionTrue {
return true
return true, c.Type
}
}
return false
return false, ""
}
func IsJobFinished(j *batch.Job) bool {
isFinished, _ := getFinishedStatus(j)
return isFinished
}
// byJobStartTime sorts a list of jobs by start timestamp, using their names as a tie breaker.
type byJobStartTime []batch.Job
func (o byJobStartTime) Len() int { return len(o) }
func (o byJobStartTime) Swap(i, j int) { o[i], o[j] = o[j], o[i] }
func (o byJobStartTime) Less(i, j int) bool {
if o[j].Status.StartTime == nil {
return o[i].Status.StartTime != nil
}
if (*o[i].Status.StartTime).Equal(*o[j].Status.StartTime) {
return o[i].Name < o[j].Name
}
return (*o[i].Status.StartTime).Before(*o[j].Status.StartTime)
}

View File

@ -16226,6 +16226,20 @@ func GetOpenAPIDefinitions(ref openapi.ReferenceCallback) map[string]openapi.Ope
Ref: ref("k8s.io/kubernetes/pkg/apis/batch/v2alpha1.JobTemplateSpec"),
},
},
"successfulJobsHistoryLimit": {
SchemaProps: spec.SchemaProps{
Description: "The number of successful finished jobs to retain. This is a pointer to distinguish between explicit zero and not specified.",
Type: []string{"integer"},
Format: "int32",
},
},
"failedJobsHistoryLimit": {
SchemaProps: spec.SchemaProps{
Description: "The number of failed finished jobs to retain. This is a pointer to distinguish between explicit zero and not specified.",
Type: []string{"integer"},
Format: "int32",
},
},
},
Required: []string{"schedule", "jobTemplate"},
},

View File

@ -52,6 +52,11 @@ var (
var _ = framework.KubeDescribe("CronJob", func() {
f := framework.NewDefaultGroupVersionFramework("cronjob", BatchV2Alpha1GroupVersion)
sleepCommand := []string{"sleep", "300"}
// Pod will complete instantly
successCommand := []string{"/bin/true"}
BeforeEach(func() {
framework.SkipIfMissingResource(f.ClientPool, CronJobGroupVersionResource, f.Namespace.Name)
})
@ -59,7 +64,8 @@ var _ = framework.KubeDescribe("CronJob", func() {
// multiple jobs running at once
It("should schedule multiple jobs concurrently", func() {
By("Creating a cronjob")
cronJob := newTestCronJob("concurrent", "*/1 * * * ?", batch.AllowConcurrent, true)
cronJob := newTestCronJob("concurrent", "*/1 * * * ?", batch.AllowConcurrent,
sleepCommand, nil)
cronJob, err := createCronJob(f.ClientSet, f.Namespace.Name, cronJob)
Expect(err).NotTo(HaveOccurred())
@ -70,7 +76,7 @@ var _ = framework.KubeDescribe("CronJob", func() {
By("Ensuring at least two running jobs exists by listing jobs explicitly")
jobs, err := f.ClientSet.Batch().Jobs(f.Namespace.Name).List(metav1.ListOptions{})
Expect(err).NotTo(HaveOccurred())
activeJobs := filterActiveJobs(jobs)
activeJobs, _ := filterActiveJobs(jobs)
Expect(len(activeJobs) >= 2).To(BeTrue())
By("Removing cronjob")
@ -81,7 +87,8 @@ var _ = framework.KubeDescribe("CronJob", func() {
// suspended should not schedule jobs
It("should not schedule jobs when suspended [Slow]", func() {
By("Creating a suspended cronjob")
cronJob := newTestCronJob("suspended", "*/1 * * * ?", batch.AllowConcurrent, true)
cronJob := newTestCronJob("suspended", "*/1 * * * ?", batch.AllowConcurrent,
sleepCommand, nil)
cronJob.Spec.Suspend = newBool(true)
cronJob, err := createCronJob(f.ClientSet, f.Namespace.Name, cronJob)
Expect(err).NotTo(HaveOccurred())
@ -103,7 +110,8 @@ var _ = framework.KubeDescribe("CronJob", func() {
// only single active job is allowed for ForbidConcurrent
It("should not schedule new jobs when ForbidConcurrent [Slow]", func() {
By("Creating a ForbidConcurrent cronjob")
cronJob := newTestCronJob("forbid", "*/1 * * * ?", batch.ForbidConcurrent, true)
cronJob := newTestCronJob("forbid", "*/1 * * * ?", batch.ForbidConcurrent,
sleepCommand, nil)
cronJob, err := createCronJob(f.ClientSet, f.Namespace.Name, cronJob)
Expect(err).NotTo(HaveOccurred())
@ -119,7 +127,7 @@ var _ = framework.KubeDescribe("CronJob", func() {
By("Ensuring exaclty one running job exists by listing jobs explicitly")
jobs, err := f.ClientSet.Batch().Jobs(f.Namespace.Name).List(metav1.ListOptions{})
Expect(err).NotTo(HaveOccurred())
activeJobs := filterActiveJobs(jobs)
activeJobs, _ := filterActiveJobs(jobs)
Expect(activeJobs).To(HaveLen(1))
By("Ensuring no more jobs are scheduled")
@ -134,7 +142,8 @@ var _ = framework.KubeDescribe("CronJob", func() {
// only single active job is allowed for ReplaceConcurrent
It("should replace jobs when ReplaceConcurrent", func() {
By("Creating a ReplaceConcurrent cronjob")
cronJob := newTestCronJob("replace", "*/1 * * * ?", batch.ReplaceConcurrent, true)
cronJob := newTestCronJob("replace", "*/1 * * * ?", batch.ReplaceConcurrent,
sleepCommand, nil)
cronJob, err := createCronJob(f.ClientSet, f.Namespace.Name, cronJob)
Expect(err).NotTo(HaveOccurred())
@ -150,7 +159,7 @@ var _ = framework.KubeDescribe("CronJob", func() {
By("Ensuring exaclty one running job exists by listing jobs explicitly")
jobs, err := f.ClientSet.Batch().Jobs(f.Namespace.Name).List(metav1.ListOptions{})
Expect(err).NotTo(HaveOccurred())
activeJobs := filterActiveJobs(jobs)
activeJobs, _ := filterActiveJobs(jobs)
Expect(activeJobs).To(HaveLen(1))
By("Ensuring the job is replaced with a new one")
@ -165,7 +174,8 @@ var _ = framework.KubeDescribe("CronJob", func() {
// shouldn't give us unexpected warnings
It("should not emit unexpected warnings", func() {
By("Creating a cronjob")
cronJob := newTestCronJob("concurrent", "*/1 * * * ?", batch.AllowConcurrent, false)
cronJob := newTestCronJob("concurrent", "*/1 * * * ?", batch.AllowConcurrent,
nil, nil)
cronJob, err := createCronJob(f.ClientSet, f.Namespace.Name, cronJob)
Expect(err).NotTo(HaveOccurred())
@ -187,7 +197,8 @@ var _ = framework.KubeDescribe("CronJob", func() {
// deleted jobs should be removed from the active list
It("should remove from active list jobs that have been deleted", func() {
By("Creating a ForbidConcurrent cronjob")
cronJob := newTestCronJob("forbid", "*/1 * * * ?", batch.ForbidConcurrent, true)
cronJob := newTestCronJob("forbid", "*/1 * * * ?", batch.ForbidConcurrent,
sleepCommand, nil)
cronJob, err := createCronJob(f.ClientSet, f.Namespace.Name, cronJob)
Expect(err).NotTo(HaveOccurred())
@ -225,10 +236,49 @@ var _ = framework.KubeDescribe("CronJob", func() {
err = deleteCronJob(f.ClientSet, f.Namespace.Name, cronJob.Name)
Expect(err).NotTo(HaveOccurred())
})
// cleanup of successful finished jobs, with limit of one successful job
It("should delete successful finished jobs with limit of one successful job", func() {
By("Creating a AllowConcurrent cronjob with custom history limits")
successLimit := int32(1)
cronJob := newTestCronJob("concurrent-limit", "*/1 * * * ?", batch.AllowConcurrent,
successCommand, &successLimit)
cronJob, err := createCronJob(f.ClientSet, f.Namespace.Name, cronJob)
Expect(err).NotTo(HaveOccurred())
// Job is going to complete instantly: do not check for an active job
// as we are most likely to miss it
By("Ensuring a finished job exists")
err = waitForAnyFinishedJob(f.ClientSet, f.Namespace.Name)
Expect(err).NotTo(HaveOccurred())
By("Ensuring a finished job exists by listing jobs explicitly")
jobs, err := f.ClientSet.Batch().Jobs(f.Namespace.Name).List(metav1.ListOptions{})
Expect(err).NotTo(HaveOccurred())
_, finishedJobs := filterActiveJobs(jobs)
Expect(len(finishedJobs) == 1).To(BeTrue())
// Job should get deleted when the next job finishes the next minute
By("Ensuring this job does not exist anymore")
err = waitForJobNotExist(f.ClientSet, f.Namespace.Name, finishedJobs[0])
Expect(err).NotTo(HaveOccurred())
By("Ensuring there is 1 finished job by listing jobs explicitly")
jobs, err = f.ClientSet.Batch().Jobs(f.Namespace.Name).List(metav1.ListOptions{})
Expect(err).NotTo(HaveOccurred())
_, finishedJobs = filterActiveJobs(jobs)
Expect(len(finishedJobs) == 1).To(BeTrue())
By("Removing cronjob")
err = deleteCronJob(f.ClientSet, f.Namespace.Name, cronJob.Name)
Expect(err).NotTo(HaveOccurred())
})
})
// newTestCronJob returns a cronjob which does one of several testing behaviors.
func newTestCronJob(name, schedule string, concurrencyPolicy batch.ConcurrencyPolicy, sleep bool) *batch.CronJob {
func newTestCronJob(name, schedule string, concurrencyPolicy batch.ConcurrencyPolicy, command []string,
successfulJobsHistoryLimit *int32) *batch.CronJob {
parallelism := int32(1)
completions := int32(1)
sj := &batch.CronJob{
@ -271,8 +321,9 @@ func newTestCronJob(name, schedule string, concurrencyPolicy batch.ConcurrencyPo
},
},
}
if sleep {
sj.Spec.JobTemplate.Spec.Template.Spec.Containers[0].Command = []string{"sleep", "300"}
sj.Spec.SuccessfulJobsHistoryLimit = successfulJobsHistoryLimit
if command != nil {
sj.Spec.JobTemplate.Spec.Template.Spec.Containers[0].Command = command
}
return sj
}
@ -319,6 +370,23 @@ func waitForNoJobs(c clientset.Interface, ns, jobName string, failIfNonEmpty boo
})
}
// Wait for a job to not exist by listing jobs explicitly.
func waitForJobNotExist(c clientset.Interface, ns string, targetJob *batchv1.Job) error {
return wait.Poll(framework.Poll, cronJobTimeout, func() (bool, error) {
jobs, err := c.Batch().Jobs(ns).List(metav1.ListOptions{})
if err != nil {
return false, err
}
_, finishedJobs := filterActiveJobs(jobs)
for _, job := range finishedJobs {
if targetJob.Namespace == job.Namespace && targetJob.Name == job.Name {
return false, nil
}
}
return true, nil
})
}
// Wait for a job to be replaced with a new one.
func waitForJobReplaced(c clientset.Interface, ns, previousJobName string) error {
return wait.Poll(framework.Poll, cronJobTimeout, func() (bool, error) {
@ -383,11 +451,13 @@ func checkNoEventWithReason(c clientset.Interface, ns, cronJobName string, reaso
return nil
}
func filterActiveJobs(jobs *batchv1.JobList) (active []*batchv1.Job) {
func filterActiveJobs(jobs *batchv1.JobList) (active []*batchv1.Job, finished []*batchv1.Job) {
for i := range jobs.Items {
j := jobs.Items[i]
if !job.IsJobFinished(&j) {
active = append(active, &j)
} else {
finished = append(finished, &j)
}
}
return