mirror of
https://github.com/k3s-io/kubernetes.git
synced 2025-08-02 16:29:21 +00:00
Merge pull request #120998 from kannon92/job-ptr-update
convert pointer to ptr for job controller
This commit is contained in:
commit
e314ec2c98
@ -26,7 +26,7 @@ import (
|
|||||||
"k8s.io/klog/v2"
|
"k8s.io/klog/v2"
|
||||||
apipod "k8s.io/kubernetes/pkg/api/v1/pod"
|
apipod "k8s.io/kubernetes/pkg/api/v1/pod"
|
||||||
"k8s.io/utils/clock"
|
"k8s.io/utils/clock"
|
||||||
"k8s.io/utils/pointer"
|
"k8s.io/utils/ptr"
|
||||||
)
|
)
|
||||||
|
|
||||||
type backoffRecord struct {
|
type backoffRecord struct {
|
||||||
@ -207,7 +207,7 @@ func getFinishTimeFromPodReadyFalseCondition(p *v1.Pod) *time.Time {
|
|||||||
|
|
||||||
func getFinishTimeFromDeletionTimestamp(p *v1.Pod) *time.Time {
|
func getFinishTimeFromDeletionTimestamp(p *v1.Pod) *time.Time {
|
||||||
if p.DeletionTimestamp != nil {
|
if p.DeletionTimestamp != nil {
|
||||||
finishTime := p.DeletionTimestamp.Time.Add(-time.Duration(pointer.Int64Deref(p.DeletionGracePeriodSeconds, 0)) * time.Second)
|
finishTime := p.DeletionTimestamp.Time.Add(-time.Duration(ptr.Deref(p.DeletionGracePeriodSeconds, 0)) * time.Second)
|
||||||
return &finishTime
|
return &finishTime
|
||||||
}
|
}
|
||||||
return nil
|
return nil
|
||||||
|
@ -25,7 +25,7 @@ import (
|
|||||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||||
"k8s.io/klog/v2/ktesting"
|
"k8s.io/klog/v2/ktesting"
|
||||||
clocktesting "k8s.io/utils/clock/testing"
|
clocktesting "k8s.io/utils/clock/testing"
|
||||||
"k8s.io/utils/pointer"
|
"k8s.io/utils/ptr"
|
||||||
)
|
)
|
||||||
|
|
||||||
func TestNewBackoffRecord(t *testing.T) {
|
func TestNewBackoffRecord(t *testing.T) {
|
||||||
@ -287,7 +287,7 @@ func TestGetFinishedTime(t *testing.T) {
|
|||||||
},
|
},
|
||||||
ObjectMeta: metav1.ObjectMeta{
|
ObjectMeta: metav1.ObjectMeta{
|
||||||
DeletionTimestamp: &metav1.Time{Time: defaultTestTime},
|
DeletionTimestamp: &metav1.Time{Time: defaultTestTime},
|
||||||
DeletionGracePeriodSeconds: pointer.Int64(30),
|
DeletionGracePeriodSeconds: ptr.To[int64](30),
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
wantFinishTime: defaultTestTimeMinus30s,
|
wantFinishTime: defaultTestTimeMinus30s,
|
||||||
|
@ -31,7 +31,7 @@ import (
|
|||||||
"k8s.io/klog/v2/ktesting"
|
"k8s.io/klog/v2/ktesting"
|
||||||
"k8s.io/kubernetes/pkg/controller"
|
"k8s.io/kubernetes/pkg/controller"
|
||||||
"k8s.io/kubernetes/pkg/features"
|
"k8s.io/kubernetes/pkg/features"
|
||||||
"k8s.io/utils/pointer"
|
"k8s.io/utils/ptr"
|
||||||
)
|
)
|
||||||
|
|
||||||
const noIndex = "-"
|
const noIndex = "-"
|
||||||
@ -209,7 +209,7 @@ func TestCalculateSucceededIndexes(t *testing.T) {
|
|||||||
CompletedIndexes: tc.prevSucceeded,
|
CompletedIndexes: tc.prevSucceeded,
|
||||||
},
|
},
|
||||||
Spec: batch.JobSpec{
|
Spec: batch.JobSpec{
|
||||||
Completions: pointer.Int32(tc.completions),
|
Completions: ptr.To(tc.completions),
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
pods := hollowPodsWithIndexPhase(tc.pods)
|
pods := hollowPodsWithIndexPhase(tc.pods)
|
||||||
@ -238,8 +238,8 @@ func TestIsIndexFailed(t *testing.T) {
|
|||||||
"failed pod exceeding backoffLimitPerIndex, when backoffLimitPerIndex=0": {
|
"failed pod exceeding backoffLimitPerIndex, when backoffLimitPerIndex=0": {
|
||||||
job: batch.Job{
|
job: batch.Job{
|
||||||
Spec: batch.JobSpec{
|
Spec: batch.JobSpec{
|
||||||
Completions: pointer.Int32(2),
|
Completions: ptr.To[int32](2),
|
||||||
BackoffLimitPerIndex: pointer.Int32(0),
|
BackoffLimitPerIndex: ptr.To[int32](0),
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
pod: buildPod().indexFailureCount("0").phase(v1.PodFailed).index("0").trackingFinalizer().Pod,
|
pod: buildPod().indexFailureCount("0").phase(v1.PodFailed).index("0").trackingFinalizer().Pod,
|
||||||
@ -248,8 +248,8 @@ func TestIsIndexFailed(t *testing.T) {
|
|||||||
"failed pod exceeding backoffLimitPerIndex, when backoffLimitPerIndex=1": {
|
"failed pod exceeding backoffLimitPerIndex, when backoffLimitPerIndex=1": {
|
||||||
job: batch.Job{
|
job: batch.Job{
|
||||||
Spec: batch.JobSpec{
|
Spec: batch.JobSpec{
|
||||||
Completions: pointer.Int32(2),
|
Completions: ptr.To[int32](2),
|
||||||
BackoffLimitPerIndex: pointer.Int32(1),
|
BackoffLimitPerIndex: ptr.To[int32](1),
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
pod: buildPod().indexFailureCount("1").phase(v1.PodFailed).index("1").trackingFinalizer().Pod,
|
pod: buildPod().indexFailureCount("1").phase(v1.PodFailed).index("1").trackingFinalizer().Pod,
|
||||||
@ -259,8 +259,8 @@ func TestIsIndexFailed(t *testing.T) {
|
|||||||
enableJobPodFailurePolicy: true,
|
enableJobPodFailurePolicy: true,
|
||||||
job: batch.Job{
|
job: batch.Job{
|
||||||
Spec: batch.JobSpec{
|
Spec: batch.JobSpec{
|
||||||
Completions: pointer.Int32(2),
|
Completions: ptr.To[int32](2),
|
||||||
BackoffLimitPerIndex: pointer.Int32(1),
|
BackoffLimitPerIndex: ptr.To[int32](1),
|
||||||
PodFailurePolicy: &batch.PodFailurePolicy{
|
PodFailurePolicy: &batch.PodFailurePolicy{
|
||||||
Rules: []batch.PodFailurePolicyRule{
|
Rules: []batch.PodFailurePolicyRule{
|
||||||
{
|
{
|
||||||
@ -292,8 +292,8 @@ func TestIsIndexFailed(t *testing.T) {
|
|||||||
enableJobPodFailurePolicy: false,
|
enableJobPodFailurePolicy: false,
|
||||||
job: batch.Job{
|
job: batch.Job{
|
||||||
Spec: batch.JobSpec{
|
Spec: batch.JobSpec{
|
||||||
Completions: pointer.Int32(2),
|
Completions: ptr.To[int32](2),
|
||||||
BackoffLimitPerIndex: pointer.Int32(1),
|
BackoffLimitPerIndex: ptr.To[int32](1),
|
||||||
PodFailurePolicy: &batch.PodFailurePolicy{
|
PodFailurePolicy: &batch.PodFailurePolicy{
|
||||||
Rules: []batch.PodFailurePolicyRule{
|
Rules: []batch.PodFailurePolicyRule{
|
||||||
{
|
{
|
||||||
@ -346,8 +346,8 @@ func TestCalculateFailedIndexes(t *testing.T) {
|
|||||||
"one new index failed": {
|
"one new index failed": {
|
||||||
job: batch.Job{
|
job: batch.Job{
|
||||||
Spec: batch.JobSpec{
|
Spec: batch.JobSpec{
|
||||||
Completions: pointer.Int32(2),
|
Completions: ptr.To[int32](2),
|
||||||
BackoffLimitPerIndex: pointer.Int32(1),
|
BackoffLimitPerIndex: ptr.To[int32](1),
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
pods: []*v1.Pod{
|
pods: []*v1.Pod{
|
||||||
@ -359,8 +359,8 @@ func TestCalculateFailedIndexes(t *testing.T) {
|
|||||||
"pod without finalizer is ignored": {
|
"pod without finalizer is ignored": {
|
||||||
job: batch.Job{
|
job: batch.Job{
|
||||||
Spec: batch.JobSpec{
|
Spec: batch.JobSpec{
|
||||||
Completions: pointer.Int32(2),
|
Completions: ptr.To[int32](2),
|
||||||
BackoffLimitPerIndex: pointer.Int32(0),
|
BackoffLimitPerIndex: ptr.To[int32](0),
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
pods: []*v1.Pod{
|
pods: []*v1.Pod{
|
||||||
@ -371,8 +371,8 @@ func TestCalculateFailedIndexes(t *testing.T) {
|
|||||||
"pod outside completions is ignored": {
|
"pod outside completions is ignored": {
|
||||||
job: batch.Job{
|
job: batch.Job{
|
||||||
Spec: batch.JobSpec{
|
Spec: batch.JobSpec{
|
||||||
Completions: pointer.Int32(2),
|
Completions: ptr.To[int32](2),
|
||||||
BackoffLimitPerIndex: pointer.Int32(0),
|
BackoffLimitPerIndex: ptr.To[int32](0),
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
pods: []*v1.Pod{
|
pods: []*v1.Pod{
|
||||||
@ -383,11 +383,11 @@ func TestCalculateFailedIndexes(t *testing.T) {
|
|||||||
"extend the failed indexes": {
|
"extend the failed indexes": {
|
||||||
job: batch.Job{
|
job: batch.Job{
|
||||||
Status: batch.JobStatus{
|
Status: batch.JobStatus{
|
||||||
FailedIndexes: pointer.String("0"),
|
FailedIndexes: ptr.To("0"),
|
||||||
},
|
},
|
||||||
Spec: batch.JobSpec{
|
Spec: batch.JobSpec{
|
||||||
Completions: pointer.Int32(2),
|
Completions: ptr.To[int32](2),
|
||||||
BackoffLimitPerIndex: pointer.Int32(0),
|
BackoffLimitPerIndex: ptr.To[int32](0),
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
pods: []*v1.Pod{
|
pods: []*v1.Pod{
|
||||||
@ -398,11 +398,11 @@ func TestCalculateFailedIndexes(t *testing.T) {
|
|||||||
"prev failed indexes empty": {
|
"prev failed indexes empty": {
|
||||||
job: batch.Job{
|
job: batch.Job{
|
||||||
Status: batch.JobStatus{
|
Status: batch.JobStatus{
|
||||||
FailedIndexes: pointer.String(""),
|
FailedIndexes: ptr.To(""),
|
||||||
},
|
},
|
||||||
Spec: batch.JobSpec{
|
Spec: batch.JobSpec{
|
||||||
Completions: pointer.Int32(2),
|
Completions: ptr.To[int32](2),
|
||||||
BackoffLimitPerIndex: pointer.Int32(0),
|
BackoffLimitPerIndex: ptr.To[int32](0),
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
pods: []*v1.Pod{
|
pods: []*v1.Pod{
|
||||||
@ -413,11 +413,11 @@ func TestCalculateFailedIndexes(t *testing.T) {
|
|||||||
"prev failed indexes outside the completions": {
|
"prev failed indexes outside the completions": {
|
||||||
job: batch.Job{
|
job: batch.Job{
|
||||||
Status: batch.JobStatus{
|
Status: batch.JobStatus{
|
||||||
FailedIndexes: pointer.String("9"),
|
FailedIndexes: ptr.To("9"),
|
||||||
},
|
},
|
||||||
Spec: batch.JobSpec{
|
Spec: batch.JobSpec{
|
||||||
Completions: pointer.Int32(2),
|
Completions: ptr.To[int32](2),
|
||||||
BackoffLimitPerIndex: pointer.Int32(0),
|
BackoffLimitPerIndex: ptr.To[int32](0),
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
pods: []*v1.Pod{
|
pods: []*v1.Pod{
|
||||||
@ -449,8 +449,8 @@ func TestGetPodsWithDelayedDeletionPerIndex(t *testing.T) {
|
|||||||
"failed pods are kept corresponding to non-failed indexes are kept": {
|
"failed pods are kept corresponding to non-failed indexes are kept": {
|
||||||
job: batch.Job{
|
job: batch.Job{
|
||||||
Spec: batch.JobSpec{
|
Spec: batch.JobSpec{
|
||||||
Completions: pointer.Int32(3),
|
Completions: ptr.To[int32](3),
|
||||||
BackoffLimitPerIndex: pointer.Int32(1),
|
BackoffLimitPerIndex: ptr.To[int32](1),
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
pods: []*v1.Pod{
|
pods: []*v1.Pod{
|
||||||
@ -463,8 +463,8 @@ func TestGetPodsWithDelayedDeletionPerIndex(t *testing.T) {
|
|||||||
"failed pod without finalizer; the pod's deletion is not delayed as it already started": {
|
"failed pod without finalizer; the pod's deletion is not delayed as it already started": {
|
||||||
job: batch.Job{
|
job: batch.Job{
|
||||||
Spec: batch.JobSpec{
|
Spec: batch.JobSpec{
|
||||||
Completions: pointer.Int32(2),
|
Completions: ptr.To[int32](2),
|
||||||
BackoffLimitPerIndex: pointer.Int32(0),
|
BackoffLimitPerIndex: ptr.To[int32](0),
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
pods: []*v1.Pod{
|
pods: []*v1.Pod{
|
||||||
@ -475,8 +475,8 @@ func TestGetPodsWithDelayedDeletionPerIndex(t *testing.T) {
|
|||||||
"failed pod with expected finalizer removal; the pod's deletion is not delayed as it already started": {
|
"failed pod with expected finalizer removal; the pod's deletion is not delayed as it already started": {
|
||||||
job: batch.Job{
|
job: batch.Job{
|
||||||
Spec: batch.JobSpec{
|
Spec: batch.JobSpec{
|
||||||
Completions: pointer.Int32(2),
|
Completions: ptr.To[int32](2),
|
||||||
BackoffLimitPerIndex: pointer.Int32(0),
|
BackoffLimitPerIndex: ptr.To[int32](0),
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
pods: []*v1.Pod{
|
pods: []*v1.Pod{
|
||||||
@ -488,8 +488,8 @@ func TestGetPodsWithDelayedDeletionPerIndex(t *testing.T) {
|
|||||||
"failed pod with index outside of completions; the pod's deletion is not delayed": {
|
"failed pod with index outside of completions; the pod's deletion is not delayed": {
|
||||||
job: batch.Job{
|
job: batch.Job{
|
||||||
Spec: batch.JobSpec{
|
Spec: batch.JobSpec{
|
||||||
Completions: pointer.Int32(2),
|
Completions: ptr.To[int32](2),
|
||||||
BackoffLimitPerIndex: pointer.Int32(0),
|
BackoffLimitPerIndex: ptr.To[int32](0),
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
pods: []*v1.Pod{
|
pods: []*v1.Pod{
|
||||||
@ -500,8 +500,8 @@ func TestGetPodsWithDelayedDeletionPerIndex(t *testing.T) {
|
|||||||
"failed pod for active index; the pod's deletion is not delayed as it is already replaced": {
|
"failed pod for active index; the pod's deletion is not delayed as it is already replaced": {
|
||||||
job: batch.Job{
|
job: batch.Job{
|
||||||
Spec: batch.JobSpec{
|
Spec: batch.JobSpec{
|
||||||
Completions: pointer.Int32(2),
|
Completions: ptr.To[int32](2),
|
||||||
BackoffLimitPerIndex: pointer.Int32(1),
|
BackoffLimitPerIndex: ptr.To[int32](1),
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
pods: []*v1.Pod{
|
pods: []*v1.Pod{
|
||||||
@ -513,8 +513,8 @@ func TestGetPodsWithDelayedDeletionPerIndex(t *testing.T) {
|
|||||||
"failed pod for succeeded index; the pod's deletion is not delayed as it is already replaced": {
|
"failed pod for succeeded index; the pod's deletion is not delayed as it is already replaced": {
|
||||||
job: batch.Job{
|
job: batch.Job{
|
||||||
Spec: batch.JobSpec{
|
Spec: batch.JobSpec{
|
||||||
Completions: pointer.Int32(2),
|
Completions: ptr.To[int32](2),
|
||||||
BackoffLimitPerIndex: pointer.Int32(1),
|
BackoffLimitPerIndex: ptr.To[int32](1),
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
pods: []*v1.Pod{
|
pods: []*v1.Pod{
|
||||||
@ -526,8 +526,8 @@ func TestGetPodsWithDelayedDeletionPerIndex(t *testing.T) {
|
|||||||
"multiple failed pods for index with different failure count; only the pod with highest failure count is kept": {
|
"multiple failed pods for index with different failure count; only the pod with highest failure count is kept": {
|
||||||
job: batch.Job{
|
job: batch.Job{
|
||||||
Spec: batch.JobSpec{
|
Spec: batch.JobSpec{
|
||||||
Completions: pointer.Int32(2),
|
Completions: ptr.To[int32](2),
|
||||||
BackoffLimitPerIndex: pointer.Int32(4),
|
BackoffLimitPerIndex: ptr.To[int32](4),
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
pods: []*v1.Pod{
|
pods: []*v1.Pod{
|
||||||
@ -540,8 +540,8 @@ func TestGetPodsWithDelayedDeletionPerIndex(t *testing.T) {
|
|||||||
"multiple failed pods for index with different finish times; only the last failed pod is kept": {
|
"multiple failed pods for index with different finish times; only the last failed pod is kept": {
|
||||||
job: batch.Job{
|
job: batch.Job{
|
||||||
Spec: batch.JobSpec{
|
Spec: batch.JobSpec{
|
||||||
Completions: pointer.Int32(2),
|
Completions: ptr.To[int32](2),
|
||||||
BackoffLimitPerIndex: pointer.Int32(4),
|
BackoffLimitPerIndex: ptr.To[int32](4),
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
pods: []*v1.Pod{
|
pods: []*v1.Pod{
|
||||||
|
@ -53,7 +53,7 @@ import (
|
|||||||
"k8s.io/kubernetes/pkg/features"
|
"k8s.io/kubernetes/pkg/features"
|
||||||
"k8s.io/utils/clock"
|
"k8s.io/utils/clock"
|
||||||
"k8s.io/utils/integer"
|
"k8s.io/utils/integer"
|
||||||
"k8s.io/utils/pointer"
|
"k8s.io/utils/ptr"
|
||||||
)
|
)
|
||||||
|
|
||||||
// controllerKind contains the schema.GroupVersionKind for this controller type.
|
// controllerKind contains the schema.GroupVersionKind for this controller type.
|
||||||
@ -783,7 +783,7 @@ func (jm *Controller) syncJob(ctx context.Context, key string) (rErr error) {
|
|||||||
}
|
}
|
||||||
var terminating *int32
|
var terminating *int32
|
||||||
if feature.DefaultFeatureGate.Enabled(features.JobPodReplacementPolicy) {
|
if feature.DefaultFeatureGate.Enabled(features.JobPodReplacementPolicy) {
|
||||||
terminating = pointer.Int32(controller.CountTerminatingPods(pods))
|
terminating = ptr.To(controller.CountTerminatingPods(pods))
|
||||||
}
|
}
|
||||||
jobCtx := &syncJobCtx{
|
jobCtx := &syncJobCtx{
|
||||||
job: &job,
|
job: &job,
|
||||||
@ -799,7 +799,7 @@ func (jm *Controller) syncJob(ctx context.Context, key string) (rErr error) {
|
|||||||
failed := job.Status.Failed + int32(nonIgnoredFailedPodsCount(jobCtx, newFailedPods)) + int32(len(jobCtx.uncounted.failed))
|
failed := job.Status.Failed + int32(nonIgnoredFailedPodsCount(jobCtx, newFailedPods)) + int32(len(jobCtx.uncounted.failed))
|
||||||
var ready *int32
|
var ready *int32
|
||||||
if feature.DefaultFeatureGate.Enabled(features.JobReadyPods) {
|
if feature.DefaultFeatureGate.Enabled(features.JobReadyPods) {
|
||||||
ready = pointer.Int32(countReadyPods(jobCtx.activePods))
|
ready = ptr.To(countReadyPods(jobCtx.activePods))
|
||||||
}
|
}
|
||||||
|
|
||||||
// Job first start. Set StartTime only if the job is not in the suspended state.
|
// Job first start. Set StartTime only if the job is not in the suspended state.
|
||||||
@ -918,11 +918,11 @@ func (jm *Controller) syncJob(ctx context.Context, key string) (rErr error) {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
needsStatusUpdate := suspendCondChanged || active != job.Status.Active || !pointer.Int32Equal(ready, job.Status.Ready)
|
needsStatusUpdate := suspendCondChanged || active != job.Status.Active || !ptr.Equal(ready, job.Status.Ready)
|
||||||
job.Status.Active = active
|
job.Status.Active = active
|
||||||
job.Status.Ready = ready
|
job.Status.Ready = ready
|
||||||
job.Status.Terminating = jobCtx.terminating
|
job.Status.Terminating = jobCtx.terminating
|
||||||
needsStatusUpdate = needsStatusUpdate || !pointer.Int32Equal(job.Status.Terminating, jobCtx.terminating)
|
needsStatusUpdate = needsStatusUpdate || !ptr.Equal(job.Status.Terminating, jobCtx.terminating)
|
||||||
err = jm.trackJobStatusAndRemoveFinalizers(ctx, jobCtx, needsStatusUpdate)
|
err = jm.trackJobStatusAndRemoveFinalizers(ctx, jobCtx, needsStatusUpdate)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return fmt.Errorf("tracking status: %w", err)
|
return fmt.Errorf("tracking status: %w", err)
|
||||||
@ -1106,9 +1106,9 @@ func (jm *Controller) trackJobStatusAndRemoveFinalizers(ctx context.Context, job
|
|||||||
jobCtx.job.Status.CompletedIndexes = succeededIndexesStr
|
jobCtx.job.Status.CompletedIndexes = succeededIndexesStr
|
||||||
var failedIndexesStr *string
|
var failedIndexesStr *string
|
||||||
if jobCtx.failedIndexes != nil {
|
if jobCtx.failedIndexes != nil {
|
||||||
failedIndexesStr = pointer.String(jobCtx.failedIndexes.String())
|
failedIndexesStr = ptr.To(jobCtx.failedIndexes.String())
|
||||||
}
|
}
|
||||||
if !pointer.StringEqual(jobCtx.job.Status.FailedIndexes, failedIndexesStr) {
|
if !ptr.Equal(jobCtx.job.Status.FailedIndexes, failedIndexesStr) {
|
||||||
jobCtx.job.Status.FailedIndexes = failedIndexesStr
|
jobCtx.job.Status.FailedIndexes = failedIndexesStr
|
||||||
needsFlush = true
|
needsFlush = true
|
||||||
}
|
}
|
||||||
@ -1642,7 +1642,7 @@ func (jm *Controller) getPodCreationInfoForIndependentIndexes(logger klog.Logger
|
|||||||
if len(indexesToAddNow) > 0 {
|
if len(indexesToAddNow) > 0 {
|
||||||
return indexesToAddNow, 0
|
return indexesToAddNow, 0
|
||||||
}
|
}
|
||||||
return indexesToAddNow, pointer.DurationDeref(minRemainingTimePerIndex, 0)
|
return indexesToAddNow, ptr.Deref(minRemainingTimePerIndex, 0)
|
||||||
}
|
}
|
||||||
|
|
||||||
// activePodsForRemoval returns Pods that should be removed because there
|
// activePodsForRemoval returns Pods that should be removed because there
|
||||||
|
@ -59,7 +59,7 @@ import (
|
|||||||
"k8s.io/kubernetes/pkg/features"
|
"k8s.io/kubernetes/pkg/features"
|
||||||
"k8s.io/utils/clock"
|
"k8s.io/utils/clock"
|
||||||
clocktesting "k8s.io/utils/clock/testing"
|
clocktesting "k8s.io/utils/clock/testing"
|
||||||
"k8s.io/utils/pointer"
|
"k8s.io/utils/ptr"
|
||||||
)
|
)
|
||||||
|
|
||||||
var realClock = &clock.RealClock{}
|
var realClock = &clock.RealClock{}
|
||||||
@ -326,7 +326,7 @@ func TestControllerSyncJob(t *testing.T) {
|
|||||||
activePods: 3,
|
activePods: 3,
|
||||||
readyPods: 2,
|
readyPods: 2,
|
||||||
expectedActive: 3,
|
expectedActive: 3,
|
||||||
expectedReady: pointer.Int32(2),
|
expectedReady: ptr.To[int32](2),
|
||||||
jobReadyPodsEnabled: true,
|
jobReadyPodsEnabled: true,
|
||||||
},
|
},
|
||||||
"WQ job: correct # of pods": {
|
"WQ job: correct # of pods": {
|
||||||
@ -356,7 +356,7 @@ func TestControllerSyncJob(t *testing.T) {
|
|||||||
podReplacementPolicy: podReplacementPolicy(batch.Failed),
|
podReplacementPolicy: podReplacementPolicy(batch.Failed),
|
||||||
jobPodReplacementPolicy: true,
|
jobPodReplacementPolicy: true,
|
||||||
terminatingPods: 1,
|
terminatingPods: 1,
|
||||||
expectedTerminating: pointer.Int32(1),
|
expectedTerminating: ptr.To[int32](1),
|
||||||
expectedPodPatches: 2,
|
expectedPodPatches: 2,
|
||||||
expectedDeletions: 1,
|
expectedDeletions: 1,
|
||||||
expectedFailed: 1,
|
expectedFailed: 1,
|
||||||
@ -370,7 +370,7 @@ func TestControllerSyncJob(t *testing.T) {
|
|||||||
podReplacementPolicy: podReplacementPolicy(batch.TerminatingOrFailed),
|
podReplacementPolicy: podReplacementPolicy(batch.TerminatingOrFailed),
|
||||||
jobPodReplacementPolicy: true,
|
jobPodReplacementPolicy: true,
|
||||||
terminatingPods: 1,
|
terminatingPods: 1,
|
||||||
expectedTerminating: pointer.Int32(1),
|
expectedTerminating: ptr.To[int32](1),
|
||||||
expectedActive: 1,
|
expectedActive: 1,
|
||||||
expectedPodPatches: 2,
|
expectedPodPatches: 2,
|
||||||
expectedFailed: 2,
|
expectedFailed: 2,
|
||||||
@ -637,7 +637,7 @@ func TestControllerSyncJob(t *testing.T) {
|
|||||||
podReplacementPolicy: podReplacementPolicy(batch.Failed),
|
podReplacementPolicy: podReplacementPolicy(batch.Failed),
|
||||||
jobPodReplacementPolicy: true,
|
jobPodReplacementPolicy: true,
|
||||||
terminatingPods: 1,
|
terminatingPods: 1,
|
||||||
expectedTerminating: pointer.Int32(1),
|
expectedTerminating: ptr.To[int32](1),
|
||||||
},
|
},
|
||||||
"indexed job with some pods deleted, podReplacementPolicy TerminatingOrFailed": {
|
"indexed job with some pods deleted, podReplacementPolicy TerminatingOrFailed": {
|
||||||
parallelism: 2,
|
parallelism: 2,
|
||||||
@ -650,7 +650,7 @@ func TestControllerSyncJob(t *testing.T) {
|
|||||||
podReplacementPolicy: podReplacementPolicy(batch.TerminatingOrFailed),
|
podReplacementPolicy: podReplacementPolicy(batch.TerminatingOrFailed),
|
||||||
jobPodReplacementPolicy: true,
|
jobPodReplacementPolicy: true,
|
||||||
terminatingPods: 1,
|
terminatingPods: 1,
|
||||||
expectedTerminating: pointer.Int32(1),
|
expectedTerminating: ptr.To[int32](1),
|
||||||
expectedPodPatches: 1,
|
expectedPodPatches: 1,
|
||||||
},
|
},
|
||||||
"indexed job completed": {
|
"indexed job completed": {
|
||||||
@ -887,7 +887,7 @@ func TestControllerSyncJob(t *testing.T) {
|
|||||||
|
|
||||||
// job & pods setup
|
// job & pods setup
|
||||||
job := newJob(tc.parallelism, tc.completions, tc.backoffLimit, tc.completionMode)
|
job := newJob(tc.parallelism, tc.completions, tc.backoffLimit, tc.completionMode)
|
||||||
job.Spec.Suspend = pointer.Bool(tc.suspend)
|
job.Spec.Suspend = ptr.To(tc.suspend)
|
||||||
if tc.jobPodReplacementPolicy {
|
if tc.jobPodReplacementPolicy {
|
||||||
job.Spec.PodReplacementPolicy = tc.podReplacementPolicy
|
job.Spec.PodReplacementPolicy = tc.podReplacementPolicy
|
||||||
}
|
}
|
||||||
@ -1482,7 +1482,7 @@ func TestTrackJobStatusAndRemoveFinalizers(t *testing.T) {
|
|||||||
job: batch.Job{
|
job: batch.Job{
|
||||||
Spec: batch.JobSpec{
|
Spec: batch.JobSpec{
|
||||||
CompletionMode: &indexedCompletion,
|
CompletionMode: &indexedCompletion,
|
||||||
Completions: pointer.Int32(6),
|
Completions: ptr.To[int32](6),
|
||||||
},
|
},
|
||||||
Status: batch.JobStatus{
|
Status: batch.JobStatus{
|
||||||
Active: 1,
|
Active: 1,
|
||||||
@ -1510,8 +1510,8 @@ func TestTrackJobStatusAndRemoveFinalizers(t *testing.T) {
|
|||||||
job: batch.Job{
|
job: batch.Job{
|
||||||
Spec: batch.JobSpec{
|
Spec: batch.JobSpec{
|
||||||
CompletionMode: &indexedCompletion,
|
CompletionMode: &indexedCompletion,
|
||||||
Completions: pointer.Int32(2),
|
Completions: ptr.To[int32](2),
|
||||||
Parallelism: pointer.Int32(2),
|
Parallelism: ptr.To[int32](2),
|
||||||
},
|
},
|
||||||
Status: batch.JobStatus{
|
Status: batch.JobStatus{
|
||||||
Active: 2,
|
Active: 2,
|
||||||
@ -1537,8 +1537,8 @@ func TestTrackJobStatusAndRemoveFinalizers(t *testing.T) {
|
|||||||
job: batch.Job{
|
job: batch.Job{
|
||||||
Spec: batch.JobSpec{
|
Spec: batch.JobSpec{
|
||||||
CompletionMode: &indexedCompletion,
|
CompletionMode: &indexedCompletion,
|
||||||
Completions: pointer.Int32(2),
|
Completions: ptr.To[int32](2),
|
||||||
Parallelism: pointer.Int32(2),
|
Parallelism: ptr.To[int32](2),
|
||||||
},
|
},
|
||||||
Status: batch.JobStatus{
|
Status: batch.JobStatus{
|
||||||
Active: 2,
|
Active: 2,
|
||||||
@ -1565,7 +1565,7 @@ func TestTrackJobStatusAndRemoveFinalizers(t *testing.T) {
|
|||||||
job: batch.Job{
|
job: batch.Job{
|
||||||
Spec: batch.JobSpec{
|
Spec: batch.JobSpec{
|
||||||
CompletionMode: &indexedCompletion,
|
CompletionMode: &indexedCompletion,
|
||||||
Completions: pointer.Int32(6),
|
Completions: ptr.To[int32](6),
|
||||||
},
|
},
|
||||||
Status: batch.JobStatus{
|
Status: batch.JobStatus{
|
||||||
Active: 1,
|
Active: 1,
|
||||||
@ -1598,7 +1598,7 @@ func TestTrackJobStatusAndRemoveFinalizers(t *testing.T) {
|
|||||||
job: batch.Job{
|
job: batch.Job{
|
||||||
Spec: batch.JobSpec{
|
Spec: batch.JobSpec{
|
||||||
CompletionMode: &indexedCompletion,
|
CompletionMode: &indexedCompletion,
|
||||||
Completions: pointer.Int32(7),
|
Completions: ptr.To[int32](7),
|
||||||
},
|
},
|
||||||
Status: batch.JobStatus{
|
Status: batch.JobStatus{
|
||||||
Failed: 2,
|
Failed: 2,
|
||||||
@ -1680,7 +1680,7 @@ func TestTrackJobStatusAndRemoveFinalizers(t *testing.T) {
|
|||||||
job: batch.Job{
|
job: batch.Job{
|
||||||
Spec: batch.JobSpec{
|
Spec: batch.JobSpec{
|
||||||
CompletionMode: &indexedCompletion,
|
CompletionMode: &indexedCompletion,
|
||||||
Completions: pointer.Int32(501),
|
Completions: ptr.To[int32](501),
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
pods: func() []*v1.Pod {
|
pods: func() []*v1.Pod {
|
||||||
@ -1703,8 +1703,8 @@ func TestTrackJobStatusAndRemoveFinalizers(t *testing.T) {
|
|||||||
"pod flips from failed to succeeded": {
|
"pod flips from failed to succeeded": {
|
||||||
job: batch.Job{
|
job: batch.Job{
|
||||||
Spec: batch.JobSpec{
|
Spec: batch.JobSpec{
|
||||||
Completions: pointer.Int32(2),
|
Completions: ptr.To[int32](2),
|
||||||
Parallelism: pointer.Int32(2),
|
Parallelism: ptr.To[int32](2),
|
||||||
},
|
},
|
||||||
Status: batch.JobStatus{
|
Status: batch.JobStatus{
|
||||||
UncountedTerminatedPods: &batch.UncountedTerminatedPods{
|
UncountedTerminatedPods: &batch.UncountedTerminatedPods{
|
||||||
@ -1732,8 +1732,8 @@ func TestTrackJobStatusAndRemoveFinalizers(t *testing.T) {
|
|||||||
job: batch.Job{
|
job: batch.Job{
|
||||||
Spec: batch.JobSpec{
|
Spec: batch.JobSpec{
|
||||||
CompletionMode: &indexedCompletion,
|
CompletionMode: &indexedCompletion,
|
||||||
Completions: pointer.Int32(6),
|
Completions: ptr.To[int32](6),
|
||||||
BackoffLimitPerIndex: pointer.Int32(1),
|
BackoffLimitPerIndex: ptr.To[int32](1),
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
pods: []*v1.Pod{
|
pods: []*v1.Pod{
|
||||||
@ -1742,7 +1742,7 @@ func TestTrackJobStatusAndRemoveFinalizers(t *testing.T) {
|
|||||||
wantStatusUpdates: []batch.JobStatus{
|
wantStatusUpdates: []batch.JobStatus{
|
||||||
{
|
{
|
||||||
UncountedTerminatedPods: &batch.UncountedTerminatedPods{},
|
UncountedTerminatedPods: &batch.UncountedTerminatedPods{},
|
||||||
FailedIndexes: pointer.String(""),
|
FailedIndexes: ptr.To(""),
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
@ -1751,8 +1751,8 @@ func TestTrackJobStatusAndRemoveFinalizers(t *testing.T) {
|
|||||||
job: batch.Job{
|
job: batch.Job{
|
||||||
Spec: batch.JobSpec{
|
Spec: batch.JobSpec{
|
||||||
CompletionMode: &indexedCompletion,
|
CompletionMode: &indexedCompletion,
|
||||||
Completions: pointer.Int32(6),
|
Completions: ptr.To[int32](6),
|
||||||
BackoffLimitPerIndex: pointer.Int32(1),
|
BackoffLimitPerIndex: ptr.To[int32](1),
|
||||||
},
|
},
|
||||||
Status: batch.JobStatus{
|
Status: batch.JobStatus{
|
||||||
Active: 1,
|
Active: 1,
|
||||||
@ -1769,13 +1769,13 @@ func TestTrackJobStatusAndRemoveFinalizers(t *testing.T) {
|
|||||||
UncountedTerminatedPods: &batch.UncountedTerminatedPods{
|
UncountedTerminatedPods: &batch.UncountedTerminatedPods{
|
||||||
Failed: []types.UID{"a1"},
|
Failed: []types.UID{"a1"},
|
||||||
},
|
},
|
||||||
FailedIndexes: pointer.String(""),
|
FailedIndexes: ptr.To(""),
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
Active: 1,
|
Active: 1,
|
||||||
Failed: 1,
|
Failed: 1,
|
||||||
UncountedTerminatedPods: &batch.UncountedTerminatedPods{},
|
UncountedTerminatedPods: &batch.UncountedTerminatedPods{},
|
||||||
FailedIndexes: pointer.String(""),
|
FailedIndexes: ptr.To(""),
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
wantFailedPodsMetric: 1,
|
wantFailedPodsMetric: 1,
|
||||||
@ -1785,8 +1785,8 @@ func TestTrackJobStatusAndRemoveFinalizers(t *testing.T) {
|
|||||||
job: batch.Job{
|
job: batch.Job{
|
||||||
Spec: batch.JobSpec{
|
Spec: batch.JobSpec{
|
||||||
CompletionMode: &indexedCompletion,
|
CompletionMode: &indexedCompletion,
|
||||||
Completions: pointer.Int32(6),
|
Completions: ptr.To[int32](6),
|
||||||
BackoffLimitPerIndex: pointer.Int32(1),
|
BackoffLimitPerIndex: ptr.To[int32](1),
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
pods: []*v1.Pod{
|
pods: []*v1.Pod{
|
||||||
@ -1795,14 +1795,14 @@ func TestTrackJobStatusAndRemoveFinalizers(t *testing.T) {
|
|||||||
wantRmFinalizers: 1,
|
wantRmFinalizers: 1,
|
||||||
wantStatusUpdates: []batch.JobStatus{
|
wantStatusUpdates: []batch.JobStatus{
|
||||||
{
|
{
|
||||||
FailedIndexes: pointer.String("1"),
|
FailedIndexes: ptr.To("1"),
|
||||||
UncountedTerminatedPods: &batch.UncountedTerminatedPods{
|
UncountedTerminatedPods: &batch.UncountedTerminatedPods{
|
||||||
Failed: []types.UID{"a"},
|
Failed: []types.UID{"a"},
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
Failed: 1,
|
Failed: 1,
|
||||||
FailedIndexes: pointer.String("1"),
|
FailedIndexes: ptr.To("1"),
|
||||||
UncountedTerminatedPods: &batch.UncountedTerminatedPods{},
|
UncountedTerminatedPods: &batch.UncountedTerminatedPods{},
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
@ -1974,7 +1974,7 @@ func TestSyncJobPastDeadline(t *testing.T) {
|
|||||||
// job & pods setup
|
// job & pods setup
|
||||||
job := newJob(tc.parallelism, tc.completions, tc.backoffLimit, batch.NonIndexedCompletion)
|
job := newJob(tc.parallelism, tc.completions, tc.backoffLimit, batch.NonIndexedCompletion)
|
||||||
job.Spec.ActiveDeadlineSeconds = &tc.activeDeadlineSeconds
|
job.Spec.ActiveDeadlineSeconds = &tc.activeDeadlineSeconds
|
||||||
job.Spec.Suspend = pointer.Bool(tc.suspend)
|
job.Spec.Suspend = ptr.To(tc.suspend)
|
||||||
start := metav1.Unix(metav1.Now().Time.Unix()-tc.startTime, 0)
|
start := metav1.Unix(metav1.Now().Time.Unix()-tc.startTime, 0)
|
||||||
job.Status.StartTime = &start
|
job.Status.StartTime = &start
|
||||||
sharedInformerFactory.Batch().V1().Jobs().Informer().GetIndexer().Add(job)
|
sharedInformerFactory.Batch().V1().Jobs().Informer().GetIndexer().Add(job)
|
||||||
@ -2071,7 +2071,7 @@ func TestPastDeadlineJobFinished(t *testing.T) {
|
|||||||
for _, tc := range tests {
|
for _, tc := range tests {
|
||||||
t.Run(tc.name, func(t *testing.T) {
|
t.Run(tc.name, func(t *testing.T) {
|
||||||
job := newJobWithName(tc.jobName, 1, 1, 6, batch.NonIndexedCompletion)
|
job := newJobWithName(tc.jobName, 1, 1, 6, batch.NonIndexedCompletion)
|
||||||
job.Spec.ActiveDeadlineSeconds = pointer.Int64(1)
|
job.Spec.ActiveDeadlineSeconds = ptr.To[int64](1)
|
||||||
if tc.setStartTime {
|
if tc.setStartTime {
|
||||||
start := metav1.NewTime(fakeClock.Now())
|
start := metav1.NewTime(fakeClock.Now())
|
||||||
job.Status.StartTime = &start
|
job.Status.StartTime = &start
|
||||||
@ -2129,7 +2129,7 @@ func TestSingleJobFailedCondition(t *testing.T) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
job := newJob(1, 1, 6, batch.NonIndexedCompletion)
|
job := newJob(1, 1, 6, batch.NonIndexedCompletion)
|
||||||
job.Spec.ActiveDeadlineSeconds = pointer.Int64(10)
|
job.Spec.ActiveDeadlineSeconds = ptr.To[int64](10)
|
||||||
start := metav1.Unix(metav1.Now().Time.Unix()-15, 0)
|
start := metav1.Unix(metav1.Now().Time.Unix()-15, 0)
|
||||||
job.Status.StartTime = &start
|
job.Status.StartTime = &start
|
||||||
job.Status.Conditions = append(job.Status.Conditions, *newCondition(batch.JobFailed, v1.ConditionFalse, "DeadlineExceeded", "Job was active longer than specified deadline", realClock.Now()))
|
job.Status.Conditions = append(job.Status.Conditions, *newCondition(batch.JobFailed, v1.ConditionFalse, "DeadlineExceeded", "Job was active longer than specified deadline", realClock.Now()))
|
||||||
@ -2266,15 +2266,15 @@ func TestSyncJobWithJobPodFailurePolicy(t *testing.T) {
|
|||||||
Spec: batch.JobSpec{
|
Spec: batch.JobSpec{
|
||||||
Selector: validSelector,
|
Selector: validSelector,
|
||||||
Template: validTemplate,
|
Template: validTemplate,
|
||||||
Parallelism: pointer.Int32(1),
|
Parallelism: ptr.To[int32](1),
|
||||||
Completions: pointer.Int32(1),
|
Completions: ptr.To[int32](1),
|
||||||
BackoffLimit: pointer.Int32(6),
|
BackoffLimit: ptr.To[int32](6),
|
||||||
PodFailurePolicy: &batch.PodFailurePolicy{
|
PodFailurePolicy: &batch.PodFailurePolicy{
|
||||||
Rules: []batch.PodFailurePolicyRule{
|
Rules: []batch.PodFailurePolicyRule{
|
||||||
{
|
{
|
||||||
Action: batch.PodFailurePolicyActionIgnore,
|
Action: batch.PodFailurePolicyActionIgnore,
|
||||||
OnExitCodes: &batch.PodFailurePolicyOnExitCodesRequirement{
|
OnExitCodes: &batch.PodFailurePolicyOnExitCodesRequirement{
|
||||||
ContainerName: pointer.String("main-container"),
|
ContainerName: ptr.To("main-container"),
|
||||||
Operator: batch.PodFailurePolicyOnExitCodesOpIn,
|
Operator: batch.PodFailurePolicyOnExitCodesOpIn,
|
||||||
Values: []int32{1, 2, 3},
|
Values: []int32{1, 2, 3},
|
||||||
},
|
},
|
||||||
@ -2282,7 +2282,7 @@ func TestSyncJobWithJobPodFailurePolicy(t *testing.T) {
|
|||||||
{
|
{
|
||||||
Action: batch.PodFailurePolicyActionFailJob,
|
Action: batch.PodFailurePolicyActionFailJob,
|
||||||
OnExitCodes: &batch.PodFailurePolicyOnExitCodesRequirement{
|
OnExitCodes: &batch.PodFailurePolicyOnExitCodesRequirement{
|
||||||
ContainerName: pointer.String("main-container"),
|
ContainerName: ptr.To("main-container"),
|
||||||
Operator: batch.PodFailurePolicyOnExitCodesOpIn,
|
Operator: batch.PodFailurePolicyOnExitCodesOpIn,
|
||||||
Values: []int32{5, 6, 7},
|
Values: []int32{5, 6, 7},
|
||||||
},
|
},
|
||||||
@ -2330,9 +2330,9 @@ func TestSyncJobWithJobPodFailurePolicy(t *testing.T) {
|
|||||||
Spec: batch.JobSpec{
|
Spec: batch.JobSpec{
|
||||||
Selector: validSelector,
|
Selector: validSelector,
|
||||||
Template: validTemplate,
|
Template: validTemplate,
|
||||||
Parallelism: pointer.Int32(1),
|
Parallelism: ptr.To[int32](1),
|
||||||
Completions: pointer.Int32(1),
|
Completions: ptr.To[int32](1),
|
||||||
BackoffLimit: pointer.Int32(6),
|
BackoffLimit: ptr.To[int32](6),
|
||||||
PodFailurePolicy: &batch.PodFailurePolicy{
|
PodFailurePolicy: &batch.PodFailurePolicy{
|
||||||
Rules: onExitCodeRules,
|
Rules: onExitCodeRules,
|
||||||
},
|
},
|
||||||
@ -2368,9 +2368,9 @@ func TestSyncJobWithJobPodFailurePolicy(t *testing.T) {
|
|||||||
Spec: batch.JobSpec{
|
Spec: batch.JobSpec{
|
||||||
Selector: validSelector,
|
Selector: validSelector,
|
||||||
Template: validTemplate,
|
Template: validTemplate,
|
||||||
Parallelism: pointer.Int32(1),
|
Parallelism: ptr.To[int32](1),
|
||||||
Completions: pointer.Int32(1),
|
Completions: ptr.To[int32](1),
|
||||||
BackoffLimit: pointer.Int32(6),
|
BackoffLimit: ptr.To[int32](6),
|
||||||
PodFailurePolicy: &batch.PodFailurePolicy{
|
PodFailurePolicy: &batch.PodFailurePolicy{
|
||||||
Rules: onExitCodeRules,
|
Rules: onExitCodeRules,
|
||||||
},
|
},
|
||||||
@ -2413,9 +2413,9 @@ func TestSyncJobWithJobPodFailurePolicy(t *testing.T) {
|
|||||||
Spec: batch.JobSpec{
|
Spec: batch.JobSpec{
|
||||||
Selector: validSelector,
|
Selector: validSelector,
|
||||||
Template: validTemplate,
|
Template: validTemplate,
|
||||||
Parallelism: pointer.Int32(1),
|
Parallelism: ptr.To[int32](1),
|
||||||
Completions: pointer.Int32(1),
|
Completions: ptr.To[int32](1),
|
||||||
BackoffLimit: pointer.Int32(6),
|
BackoffLimit: ptr.To[int32](6),
|
||||||
PodFailurePolicy: &batch.PodFailurePolicy{
|
PodFailurePolicy: &batch.PodFailurePolicy{
|
||||||
Rules: onExitCodeRules,
|
Rules: onExitCodeRules,
|
||||||
},
|
},
|
||||||
@ -2468,9 +2468,9 @@ func TestSyncJobWithJobPodFailurePolicy(t *testing.T) {
|
|||||||
Spec: batch.JobSpec{
|
Spec: batch.JobSpec{
|
||||||
Selector: validSelector,
|
Selector: validSelector,
|
||||||
Template: validTemplate,
|
Template: validTemplate,
|
||||||
Parallelism: pointer.Int32(1),
|
Parallelism: ptr.To[int32](1),
|
||||||
Completions: pointer.Int32(1),
|
Completions: ptr.To[int32](1),
|
||||||
BackoffLimit: pointer.Int32(6),
|
BackoffLimit: ptr.To[int32](6),
|
||||||
PodFailurePolicy: &batch.PodFailurePolicy{
|
PodFailurePolicy: &batch.PodFailurePolicy{
|
||||||
Rules: onExitCodeRules,
|
Rules: onExitCodeRules,
|
||||||
},
|
},
|
||||||
@ -2523,9 +2523,9 @@ func TestSyncJobWithJobPodFailurePolicy(t *testing.T) {
|
|||||||
Spec: batch.JobSpec{
|
Spec: batch.JobSpec{
|
||||||
Selector: validSelector,
|
Selector: validSelector,
|
||||||
Template: validTemplate,
|
Template: validTemplate,
|
||||||
Parallelism: pointer.Int32(1),
|
Parallelism: ptr.To[int32](1),
|
||||||
Completions: pointer.Int32(1),
|
Completions: ptr.To[int32](1),
|
||||||
BackoffLimit: pointer.Int32(6),
|
BackoffLimit: ptr.To[int32](6),
|
||||||
PodFailurePolicy: &batch.PodFailurePolicy{
|
PodFailurePolicy: &batch.PodFailurePolicy{
|
||||||
Rules: onExitCodeRules,
|
Rules: onExitCodeRules,
|
||||||
},
|
},
|
||||||
@ -2562,9 +2562,9 @@ func TestSyncJobWithJobPodFailurePolicy(t *testing.T) {
|
|||||||
Spec: batch.JobSpec{
|
Spec: batch.JobSpec{
|
||||||
Selector: validSelector,
|
Selector: validSelector,
|
||||||
Template: validTemplate,
|
Template: validTemplate,
|
||||||
Parallelism: pointer.Int32(2),
|
Parallelism: ptr.To[int32](2),
|
||||||
Completions: pointer.Int32(2),
|
Completions: ptr.To[int32](2),
|
||||||
BackoffLimit: pointer.Int32(6),
|
BackoffLimit: ptr.To[int32](6),
|
||||||
PodFailurePolicy: &batch.PodFailurePolicy{
|
PodFailurePolicy: &batch.PodFailurePolicy{
|
||||||
Rules: onExitCodeRules,
|
Rules: onExitCodeRules,
|
||||||
},
|
},
|
||||||
@ -2613,9 +2613,9 @@ func TestSyncJobWithJobPodFailurePolicy(t *testing.T) {
|
|||||||
Selector: validSelector,
|
Selector: validSelector,
|
||||||
Template: validTemplate,
|
Template: validTemplate,
|
||||||
CompletionMode: &indexedCompletionMode,
|
CompletionMode: &indexedCompletionMode,
|
||||||
Parallelism: pointer.Int32(1),
|
Parallelism: ptr.To[int32](1),
|
||||||
Completions: pointer.Int32(1),
|
Completions: ptr.To[int32](1),
|
||||||
BackoffLimit: pointer.Int32(6),
|
BackoffLimit: ptr.To[int32](6),
|
||||||
PodFailurePolicy: &batch.PodFailurePolicy{
|
PodFailurePolicy: &batch.PodFailurePolicy{
|
||||||
Rules: onExitCodeRules,
|
Rules: onExitCodeRules,
|
||||||
},
|
},
|
||||||
@ -2658,9 +2658,9 @@ func TestSyncJobWithJobPodFailurePolicy(t *testing.T) {
|
|||||||
Spec: batch.JobSpec{
|
Spec: batch.JobSpec{
|
||||||
Selector: validSelector,
|
Selector: validSelector,
|
||||||
Template: validTemplate,
|
Template: validTemplate,
|
||||||
Parallelism: pointer.Int32(1),
|
Parallelism: ptr.To[int32](1),
|
||||||
Completions: pointer.Int32(1),
|
Completions: ptr.To[int32](1),
|
||||||
BackoffLimit: pointer.Int32(6),
|
BackoffLimit: ptr.To[int32](6),
|
||||||
PodFailurePolicy: &batch.PodFailurePolicy{
|
PodFailurePolicy: &batch.PodFailurePolicy{
|
||||||
Rules: []batch.PodFailurePolicyRule{
|
Rules: []batch.PodFailurePolicyRule{
|
||||||
{
|
{
|
||||||
@ -2711,9 +2711,9 @@ func TestSyncJobWithJobPodFailurePolicy(t *testing.T) {
|
|||||||
Spec: batch.JobSpec{
|
Spec: batch.JobSpec{
|
||||||
Selector: validSelector,
|
Selector: validSelector,
|
||||||
Template: validTemplate,
|
Template: validTemplate,
|
||||||
Parallelism: pointer.Int32(1),
|
Parallelism: ptr.To[int32](1),
|
||||||
Completions: pointer.Int32(1),
|
Completions: ptr.To[int32](1),
|
||||||
BackoffLimit: pointer.Int32(6),
|
BackoffLimit: ptr.To[int32](6),
|
||||||
PodFailurePolicy: &batch.PodFailurePolicy{
|
PodFailurePolicy: &batch.PodFailurePolicy{
|
||||||
Rules: []batch.PodFailurePolicyRule{
|
Rules: []batch.PodFailurePolicyRule{
|
||||||
{
|
{
|
||||||
@ -2758,9 +2758,9 @@ func TestSyncJobWithJobPodFailurePolicy(t *testing.T) {
|
|||||||
Spec: batch.JobSpec{
|
Spec: batch.JobSpec{
|
||||||
Selector: validSelector,
|
Selector: validSelector,
|
||||||
Template: validTemplate,
|
Template: validTemplate,
|
||||||
Parallelism: pointer.Int32(1),
|
Parallelism: ptr.To[int32](1),
|
||||||
Completions: pointer.Int32(1),
|
Completions: ptr.To[int32](1),
|
||||||
BackoffLimit: pointer.Int32(6),
|
BackoffLimit: ptr.To[int32](6),
|
||||||
PodFailurePolicy: &batch.PodFailurePolicy{
|
PodFailurePolicy: &batch.PodFailurePolicy{
|
||||||
Rules: onExitCodeRules,
|
Rules: onExitCodeRules,
|
||||||
},
|
},
|
||||||
@ -2813,9 +2813,9 @@ func TestSyncJobWithJobPodFailurePolicy(t *testing.T) {
|
|||||||
Spec: batch.JobSpec{
|
Spec: batch.JobSpec{
|
||||||
Selector: validSelector,
|
Selector: validSelector,
|
||||||
Template: validTemplate,
|
Template: validTemplate,
|
||||||
Parallelism: pointer.Int32(1),
|
Parallelism: ptr.To[int32](1),
|
||||||
Completions: pointer.Int32(1),
|
Completions: ptr.To[int32](1),
|
||||||
BackoffLimit: pointer.Int32(0),
|
BackoffLimit: ptr.To[int32](0),
|
||||||
PodFailurePolicy: &batch.PodFailurePolicy{
|
PodFailurePolicy: &batch.PodFailurePolicy{
|
||||||
Rules: onExitCodeRules,
|
Rules: onExitCodeRules,
|
||||||
},
|
},
|
||||||
@ -2859,9 +2859,9 @@ func TestSyncJobWithJobPodFailurePolicy(t *testing.T) {
|
|||||||
Spec: batch.JobSpec{
|
Spec: batch.JobSpec{
|
||||||
Selector: validSelector,
|
Selector: validSelector,
|
||||||
Template: validTemplate,
|
Template: validTemplate,
|
||||||
Parallelism: pointer.Int32(1),
|
Parallelism: ptr.To[int32](1),
|
||||||
Completions: pointer.Int32(1),
|
Completions: ptr.To[int32](1),
|
||||||
BackoffLimit: pointer.Int32(0),
|
BackoffLimit: ptr.To[int32](0),
|
||||||
PodFailurePolicy: &batch.PodFailurePolicy{
|
PodFailurePolicy: &batch.PodFailurePolicy{
|
||||||
Rules: onExitCodeRules,
|
Rules: onExitCodeRules,
|
||||||
},
|
},
|
||||||
@ -2896,9 +2896,9 @@ func TestSyncJobWithJobPodFailurePolicy(t *testing.T) {
|
|||||||
Spec: batch.JobSpec{
|
Spec: batch.JobSpec{
|
||||||
Selector: validSelector,
|
Selector: validSelector,
|
||||||
Template: validTemplate,
|
Template: validTemplate,
|
||||||
Parallelism: pointer.Int32(1),
|
Parallelism: ptr.To[int32](1),
|
||||||
Completions: pointer.Int32(1),
|
Completions: ptr.To[int32](1),
|
||||||
BackoffLimit: pointer.Int32(0),
|
BackoffLimit: ptr.To[int32](0),
|
||||||
PodFailurePolicy: &batch.PodFailurePolicy{
|
PodFailurePolicy: &batch.PodFailurePolicy{
|
||||||
Rules: onExitCodeRules,
|
Rules: onExitCodeRules,
|
||||||
},
|
},
|
||||||
@ -2940,9 +2940,9 @@ func TestSyncJobWithJobPodFailurePolicy(t *testing.T) {
|
|||||||
Spec: batch.JobSpec{
|
Spec: batch.JobSpec{
|
||||||
Selector: validSelector,
|
Selector: validSelector,
|
||||||
Template: validTemplate,
|
Template: validTemplate,
|
||||||
Parallelism: pointer.Int32(1),
|
Parallelism: ptr.To[int32](1),
|
||||||
Completions: pointer.Int32(1),
|
Completions: ptr.To[int32](1),
|
||||||
BackoffLimit: pointer.Int32(6),
|
BackoffLimit: ptr.To[int32](6),
|
||||||
PodFailurePolicy: &batch.PodFailurePolicy{
|
PodFailurePolicy: &batch.PodFailurePolicy{
|
||||||
Rules: []batch.PodFailurePolicyRule{
|
Rules: []batch.PodFailurePolicyRule{
|
||||||
{
|
{
|
||||||
@ -2993,9 +2993,9 @@ func TestSyncJobWithJobPodFailurePolicy(t *testing.T) {
|
|||||||
Spec: batch.JobSpec{
|
Spec: batch.JobSpec{
|
||||||
Selector: validSelector,
|
Selector: validSelector,
|
||||||
Template: validTemplate,
|
Template: validTemplate,
|
||||||
Parallelism: pointer.Int32(1),
|
Parallelism: ptr.To[int32](1),
|
||||||
Completions: pointer.Int32(1),
|
Completions: ptr.To[int32](1),
|
||||||
BackoffLimit: pointer.Int32(6),
|
BackoffLimit: ptr.To[int32](6),
|
||||||
PodFailurePolicy: &batch.PodFailurePolicy{
|
PodFailurePolicy: &batch.PodFailurePolicy{
|
||||||
Rules: []batch.PodFailurePolicyRule{
|
Rules: []batch.PodFailurePolicyRule{
|
||||||
{
|
{
|
||||||
@ -3059,9 +3059,9 @@ func TestSyncJobWithJobPodFailurePolicy(t *testing.T) {
|
|||||||
Spec: batch.JobSpec{
|
Spec: batch.JobSpec{
|
||||||
Selector: validSelector,
|
Selector: validSelector,
|
||||||
Template: validTemplate,
|
Template: validTemplate,
|
||||||
Parallelism: pointer.Int32(1),
|
Parallelism: ptr.To[int32](1),
|
||||||
Completions: pointer.Int32(1),
|
Completions: ptr.To[int32](1),
|
||||||
BackoffLimit: pointer.Int32(0),
|
BackoffLimit: ptr.To[int32](0),
|
||||||
PodFailurePolicy: &batch.PodFailurePolicy{
|
PodFailurePolicy: &batch.PodFailurePolicy{
|
||||||
Rules: []batch.PodFailurePolicyRule{
|
Rules: []batch.PodFailurePolicyRule{
|
||||||
{
|
{
|
||||||
@ -3103,9 +3103,9 @@ func TestSyncJobWithJobPodFailurePolicy(t *testing.T) {
|
|||||||
Spec: batch.JobSpec{
|
Spec: batch.JobSpec{
|
||||||
Selector: validSelector,
|
Selector: validSelector,
|
||||||
Template: validTemplate,
|
Template: validTemplate,
|
||||||
Parallelism: pointer.Int32(1),
|
Parallelism: ptr.To[int32](1),
|
||||||
Completions: pointer.Int32(1),
|
Completions: ptr.To[int32](1),
|
||||||
BackoffLimit: pointer.Int32(0),
|
BackoffLimit: ptr.To[int32](0),
|
||||||
PodFailurePolicy: &batch.PodFailurePolicy{
|
PodFailurePolicy: &batch.PodFailurePolicy{
|
||||||
Rules: []batch.PodFailurePolicyRule{
|
Rules: []batch.PodFailurePolicyRule{
|
||||||
{
|
{
|
||||||
@ -3150,9 +3150,9 @@ func TestSyncJobWithJobPodFailurePolicy(t *testing.T) {
|
|||||||
Spec: batch.JobSpec{
|
Spec: batch.JobSpec{
|
||||||
Selector: validSelector,
|
Selector: validSelector,
|
||||||
Template: validTemplate,
|
Template: validTemplate,
|
||||||
Parallelism: pointer.Int32(1),
|
Parallelism: ptr.To[int32](1),
|
||||||
Completions: pointer.Int32(1),
|
Completions: ptr.To[int32](1),
|
||||||
BackoffLimit: pointer.Int32(6),
|
BackoffLimit: ptr.To[int32](6),
|
||||||
PodFailurePolicy: &batch.PodFailurePolicy{
|
PodFailurePolicy: &batch.PodFailurePolicy{
|
||||||
Rules: []batch.PodFailurePolicyRule{
|
Rules: []batch.PodFailurePolicyRule{
|
||||||
{
|
{
|
||||||
@ -3199,10 +3199,10 @@ func TestSyncJobWithJobPodFailurePolicy(t *testing.T) {
|
|||||||
TypeMeta: metav1.TypeMeta{Kind: "Job"},
|
TypeMeta: metav1.TypeMeta{Kind: "Job"},
|
||||||
ObjectMeta: validObjectMeta,
|
ObjectMeta: validObjectMeta,
|
||||||
Spec: batch.JobSpec{
|
Spec: batch.JobSpec{
|
||||||
Parallelism: pointer.Int32(1),
|
Parallelism: ptr.To[int32](1),
|
||||||
Selector: validSelector,
|
Selector: validSelector,
|
||||||
Template: validTemplate,
|
Template: validTemplate,
|
||||||
BackoffLimit: pointer.Int32(0),
|
BackoffLimit: ptr.To[int32](0),
|
||||||
PodFailurePolicy: &batch.PodFailurePolicy{
|
PodFailurePolicy: &batch.PodFailurePolicy{
|
||||||
Rules: []batch.PodFailurePolicyRule{
|
Rules: []batch.PodFailurePolicyRule{
|
||||||
{
|
{
|
||||||
@ -3233,10 +3233,10 @@ func TestSyncJobWithJobPodFailurePolicy(t *testing.T) {
|
|||||||
TypeMeta: metav1.TypeMeta{Kind: "Job"},
|
TypeMeta: metav1.TypeMeta{Kind: "Job"},
|
||||||
ObjectMeta: validObjectMeta,
|
ObjectMeta: validObjectMeta,
|
||||||
Spec: batch.JobSpec{
|
Spec: batch.JobSpec{
|
||||||
Parallelism: pointer.Int32(1),
|
Parallelism: ptr.To[int32](1),
|
||||||
Selector: validSelector,
|
Selector: validSelector,
|
||||||
Template: validTemplate,
|
Template: validTemplate,
|
||||||
BackoffLimit: pointer.Int32(0),
|
BackoffLimit: ptr.To[int32](0),
|
||||||
PodFailurePolicy: &batch.PodFailurePolicy{
|
PodFailurePolicy: &batch.PodFailurePolicy{
|
||||||
Rules: []batch.PodFailurePolicyRule{
|
Rules: []batch.PodFailurePolicyRule{
|
||||||
{
|
{
|
||||||
@ -3325,8 +3325,8 @@ func TestSyncJobWithJobPodFailurePolicy(t *testing.T) {
|
|||||||
if actual.Status.Failed != tc.wantStatusFailed {
|
if actual.Status.Failed != tc.wantStatusFailed {
|
||||||
t.Errorf("unexpected number of failed pods. Expected %d, saw %d\n", tc.wantStatusFailed, actual.Status.Failed)
|
t.Errorf("unexpected number of failed pods. Expected %d, saw %d\n", tc.wantStatusFailed, actual.Status.Failed)
|
||||||
}
|
}
|
||||||
if pointer.Int32Deref(actual.Status.Terminating, 0) != pointer.Int32Deref(tc.wantStatusTerminating, 0) {
|
if ptr.Deref(actual.Status.Terminating, 0) != ptr.Deref(tc.wantStatusTerminating, 0) {
|
||||||
t.Errorf("unexpected number of terminating pods. Expected %d, saw %d\n", pointer.Int32Deref(tc.wantStatusTerminating, 0), pointer.Int32Deref(actual.Status.Terminating, 0))
|
t.Errorf("unexpected number of terminating pods. Expected %d, saw %d\n", ptr.Deref(tc.wantStatusTerminating, 0), ptr.Deref(actual.Status.Terminating, 0))
|
||||||
}
|
}
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
@ -3371,11 +3371,11 @@ func TestSyncJobWithJobBackoffLimitPerIndex(t *testing.T) {
|
|||||||
Spec: batch.JobSpec{
|
Spec: batch.JobSpec{
|
||||||
Selector: validSelector,
|
Selector: validSelector,
|
||||||
Template: validTemplate,
|
Template: validTemplate,
|
||||||
Parallelism: pointer.Int32(2),
|
Parallelism: ptr.To[int32](2),
|
||||||
Completions: pointer.Int32(2),
|
Completions: ptr.To[int32](2),
|
||||||
BackoffLimit: pointer.Int32(math.MaxInt32),
|
BackoffLimit: ptr.To[int32](math.MaxInt32),
|
||||||
CompletionMode: completionModePtr(batch.IndexedCompletion),
|
CompletionMode: completionModePtr(batch.IndexedCompletion),
|
||||||
BackoffLimitPerIndex: pointer.Int32(1),
|
BackoffLimitPerIndex: ptr.To[int32](1),
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
pods: []v1.Pod{
|
pods: []v1.Pod{
|
||||||
@ -3387,7 +3387,7 @@ func TestSyncJobWithJobBackoffLimitPerIndex(t *testing.T) {
|
|||||||
Failed: 1,
|
Failed: 1,
|
||||||
Succeeded: 2,
|
Succeeded: 2,
|
||||||
CompletedIndexes: "0,1",
|
CompletedIndexes: "0,1",
|
||||||
FailedIndexes: pointer.String(""),
|
FailedIndexes: ptr.To(""),
|
||||||
UncountedTerminatedPods: &batch.UncountedTerminatedPods{},
|
UncountedTerminatedPods: &batch.UncountedTerminatedPods{},
|
||||||
Conditions: []batch.JobCondition{
|
Conditions: []batch.JobCondition{
|
||||||
{
|
{
|
||||||
@ -3405,11 +3405,11 @@ func TestSyncJobWithJobBackoffLimitPerIndex(t *testing.T) {
|
|||||||
Spec: batch.JobSpec{
|
Spec: batch.JobSpec{
|
||||||
Selector: validSelector,
|
Selector: validSelector,
|
||||||
Template: validTemplate,
|
Template: validTemplate,
|
||||||
Parallelism: pointer.Int32(2),
|
Parallelism: ptr.To[int32](2),
|
||||||
Completions: pointer.Int32(2),
|
Completions: ptr.To[int32](2),
|
||||||
BackoffLimit: pointer.Int32(math.MaxInt32),
|
BackoffLimit: ptr.To[int32](math.MaxInt32),
|
||||||
CompletionMode: completionModePtr(batch.IndexedCompletion),
|
CompletionMode: completionModePtr(batch.IndexedCompletion),
|
||||||
BackoffLimitPerIndex: pointer.Int32(1),
|
BackoffLimitPerIndex: ptr.To[int32](1),
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
pods: []v1.Pod{
|
pods: []v1.Pod{
|
||||||
@ -3418,7 +3418,7 @@ func TestSyncJobWithJobBackoffLimitPerIndex(t *testing.T) {
|
|||||||
wantStatus: batch.JobStatus{
|
wantStatus: batch.JobStatus{
|
||||||
Active: 2,
|
Active: 2,
|
||||||
UncountedTerminatedPods: &batch.UncountedTerminatedPods{},
|
UncountedTerminatedPods: &batch.UncountedTerminatedPods{},
|
||||||
FailedIndexes: pointer.String(""),
|
FailedIndexes: ptr.To(""),
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
"single failed pod replaced already": {
|
"single failed pod replaced already": {
|
||||||
@ -3429,11 +3429,11 @@ func TestSyncJobWithJobBackoffLimitPerIndex(t *testing.T) {
|
|||||||
Spec: batch.JobSpec{
|
Spec: batch.JobSpec{
|
||||||
Selector: validSelector,
|
Selector: validSelector,
|
||||||
Template: validTemplate,
|
Template: validTemplate,
|
||||||
Parallelism: pointer.Int32(2),
|
Parallelism: ptr.To[int32](2),
|
||||||
Completions: pointer.Int32(2),
|
Completions: ptr.To[int32](2),
|
||||||
BackoffLimit: pointer.Int32(math.MaxInt32),
|
BackoffLimit: ptr.To[int32](math.MaxInt32),
|
||||||
CompletionMode: completionModePtr(batch.IndexedCompletion),
|
CompletionMode: completionModePtr(batch.IndexedCompletion),
|
||||||
BackoffLimitPerIndex: pointer.Int32(1),
|
BackoffLimitPerIndex: ptr.To[int32](1),
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
pods: []v1.Pod{
|
pods: []v1.Pod{
|
||||||
@ -3444,7 +3444,7 @@ func TestSyncJobWithJobBackoffLimitPerIndex(t *testing.T) {
|
|||||||
Active: 2,
|
Active: 2,
|
||||||
Failed: 1,
|
Failed: 1,
|
||||||
UncountedTerminatedPods: &batch.UncountedTerminatedPods{},
|
UncountedTerminatedPods: &batch.UncountedTerminatedPods{},
|
||||||
FailedIndexes: pointer.String(""),
|
FailedIndexes: ptr.To(""),
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
"single failed index due to exceeding the backoff limit per index, the job continues": {
|
"single failed index due to exceeding the backoff limit per index, the job continues": {
|
||||||
@ -3455,11 +3455,11 @@ func TestSyncJobWithJobBackoffLimitPerIndex(t *testing.T) {
|
|||||||
Spec: batch.JobSpec{
|
Spec: batch.JobSpec{
|
||||||
Selector: validSelector,
|
Selector: validSelector,
|
||||||
Template: validTemplate,
|
Template: validTemplate,
|
||||||
Parallelism: pointer.Int32(2),
|
Parallelism: ptr.To[int32](2),
|
||||||
Completions: pointer.Int32(2),
|
Completions: ptr.To[int32](2),
|
||||||
BackoffLimit: pointer.Int32(math.MaxInt32),
|
BackoffLimit: ptr.To[int32](math.MaxInt32),
|
||||||
CompletionMode: completionModePtr(batch.IndexedCompletion),
|
CompletionMode: completionModePtr(batch.IndexedCompletion),
|
||||||
BackoffLimitPerIndex: pointer.Int32(1),
|
BackoffLimitPerIndex: ptr.To[int32](1),
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
pods: []v1.Pod{
|
pods: []v1.Pod{
|
||||||
@ -3468,7 +3468,7 @@ func TestSyncJobWithJobBackoffLimitPerIndex(t *testing.T) {
|
|||||||
wantStatus: batch.JobStatus{
|
wantStatus: batch.JobStatus{
|
||||||
Active: 1,
|
Active: 1,
|
||||||
Failed: 1,
|
Failed: 1,
|
||||||
FailedIndexes: pointer.String("0"),
|
FailedIndexes: ptr.To("0"),
|
||||||
UncountedTerminatedPods: &batch.UncountedTerminatedPods{},
|
UncountedTerminatedPods: &batch.UncountedTerminatedPods{},
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
@ -3481,11 +3481,11 @@ func TestSyncJobWithJobBackoffLimitPerIndex(t *testing.T) {
|
|||||||
Spec: batch.JobSpec{
|
Spec: batch.JobSpec{
|
||||||
Selector: validSelector,
|
Selector: validSelector,
|
||||||
Template: validTemplate,
|
Template: validTemplate,
|
||||||
Parallelism: pointer.Int32(2),
|
Parallelism: ptr.To[int32](2),
|
||||||
Completions: pointer.Int32(2),
|
Completions: ptr.To[int32](2),
|
||||||
BackoffLimit: pointer.Int32(math.MaxInt32),
|
BackoffLimit: ptr.To[int32](math.MaxInt32),
|
||||||
CompletionMode: completionModePtr(batch.IndexedCompletion),
|
CompletionMode: completionModePtr(batch.IndexedCompletion),
|
||||||
BackoffLimitPerIndex: pointer.Int32(1),
|
BackoffLimitPerIndex: ptr.To[int32](1),
|
||||||
PodFailurePolicy: &batch.PodFailurePolicy{
|
PodFailurePolicy: &batch.PodFailurePolicy{
|
||||||
Rules: []batch.PodFailurePolicyRule{
|
Rules: []batch.PodFailurePolicyRule{
|
||||||
{
|
{
|
||||||
@ -3516,7 +3516,7 @@ func TestSyncJobWithJobBackoffLimitPerIndex(t *testing.T) {
|
|||||||
wantStatus: batch.JobStatus{
|
wantStatus: batch.JobStatus{
|
||||||
Active: 1,
|
Active: 1,
|
||||||
Failed: 1,
|
Failed: 1,
|
||||||
FailedIndexes: pointer.String("0"),
|
FailedIndexes: ptr.To("0"),
|
||||||
UncountedTerminatedPods: &batch.UncountedTerminatedPods{},
|
UncountedTerminatedPods: &batch.UncountedTerminatedPods{},
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
@ -3529,11 +3529,11 @@ func TestSyncJobWithJobBackoffLimitPerIndex(t *testing.T) {
|
|||||||
Spec: batch.JobSpec{
|
Spec: batch.JobSpec{
|
||||||
Selector: validSelector,
|
Selector: validSelector,
|
||||||
Template: validTemplate,
|
Template: validTemplate,
|
||||||
Parallelism: pointer.Int32(2),
|
Parallelism: ptr.To[int32](2),
|
||||||
Completions: pointer.Int32(2),
|
Completions: ptr.To[int32](2),
|
||||||
BackoffLimit: pointer.Int32(6),
|
BackoffLimit: ptr.To[int32](6),
|
||||||
CompletionMode: completionModePtr(batch.IndexedCompletion),
|
CompletionMode: completionModePtr(batch.IndexedCompletion),
|
||||||
BackoffLimitPerIndex: pointer.Int32(1),
|
BackoffLimitPerIndex: ptr.To[int32](1),
|
||||||
PodFailurePolicy: &batch.PodFailurePolicy{
|
PodFailurePolicy: &batch.PodFailurePolicy{
|
||||||
Rules: []batch.PodFailurePolicyRule{
|
Rules: []batch.PodFailurePolicyRule{
|
||||||
{
|
{
|
||||||
@ -3565,7 +3565,7 @@ func TestSyncJobWithJobBackoffLimitPerIndex(t *testing.T) {
|
|||||||
wantStatus: batch.JobStatus{
|
wantStatus: batch.JobStatus{
|
||||||
Active: 0,
|
Active: 0,
|
||||||
Failed: 1,
|
Failed: 1,
|
||||||
FailedIndexes: pointer.String(""),
|
FailedIndexes: ptr.To(""),
|
||||||
UncountedTerminatedPods: &batch.UncountedTerminatedPods{},
|
UncountedTerminatedPods: &batch.UncountedTerminatedPods{},
|
||||||
Conditions: []batch.JobCondition{
|
Conditions: []batch.JobCondition{
|
||||||
{
|
{
|
||||||
@ -3592,11 +3592,11 @@ func TestSyncJobWithJobBackoffLimitPerIndex(t *testing.T) {
|
|||||||
Spec: batch.JobSpec{
|
Spec: batch.JobSpec{
|
||||||
Selector: validSelector,
|
Selector: validSelector,
|
||||||
Template: validTemplate,
|
Template: validTemplate,
|
||||||
Parallelism: pointer.Int32(2),
|
Parallelism: ptr.To[int32](2),
|
||||||
Completions: pointer.Int32(2),
|
Completions: ptr.To[int32](2),
|
||||||
BackoffLimit: pointer.Int32(6),
|
BackoffLimit: ptr.To[int32](6),
|
||||||
CompletionMode: completionModePtr(batch.IndexedCompletion),
|
CompletionMode: completionModePtr(batch.IndexedCompletion),
|
||||||
BackoffLimitPerIndex: pointer.Int32(1),
|
BackoffLimitPerIndex: ptr.To[int32](1),
|
||||||
PodFailurePolicy: &batch.PodFailurePolicy{
|
PodFailurePolicy: &batch.PodFailurePolicy{
|
||||||
Rules: []batch.PodFailurePolicyRule{
|
Rules: []batch.PodFailurePolicyRule{
|
||||||
{
|
{
|
||||||
@ -3628,7 +3628,7 @@ func TestSyncJobWithJobBackoffLimitPerIndex(t *testing.T) {
|
|||||||
wantStatus: batch.JobStatus{
|
wantStatus: batch.JobStatus{
|
||||||
Active: 2,
|
Active: 2,
|
||||||
Failed: 0,
|
Failed: 0,
|
||||||
FailedIndexes: pointer.String(""),
|
FailedIndexes: ptr.To(""),
|
||||||
UncountedTerminatedPods: &batch.UncountedTerminatedPods{},
|
UncountedTerminatedPods: &batch.UncountedTerminatedPods{},
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
@ -3640,11 +3640,11 @@ func TestSyncJobWithJobBackoffLimitPerIndex(t *testing.T) {
|
|||||||
Spec: batch.JobSpec{
|
Spec: batch.JobSpec{
|
||||||
Selector: validSelector,
|
Selector: validSelector,
|
||||||
Template: validTemplate,
|
Template: validTemplate,
|
||||||
Parallelism: pointer.Int32(2),
|
Parallelism: ptr.To[int32](2),
|
||||||
Completions: pointer.Int32(2),
|
Completions: ptr.To[int32](2),
|
||||||
BackoffLimit: pointer.Int32(1),
|
BackoffLimit: ptr.To[int32](1),
|
||||||
CompletionMode: completionModePtr(batch.IndexedCompletion),
|
CompletionMode: completionModePtr(batch.IndexedCompletion),
|
||||||
BackoffLimitPerIndex: pointer.Int32(1),
|
BackoffLimitPerIndex: ptr.To[int32](1),
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
pods: []v1.Pod{
|
pods: []v1.Pod{
|
||||||
@ -3654,7 +3654,7 @@ func TestSyncJobWithJobBackoffLimitPerIndex(t *testing.T) {
|
|||||||
wantStatus: batch.JobStatus{
|
wantStatus: batch.JobStatus{
|
||||||
Failed: 2,
|
Failed: 2,
|
||||||
Succeeded: 0,
|
Succeeded: 0,
|
||||||
FailedIndexes: pointer.String(""),
|
FailedIndexes: ptr.To(""),
|
||||||
UncountedTerminatedPods: &batch.UncountedTerminatedPods{},
|
UncountedTerminatedPods: &batch.UncountedTerminatedPods{},
|
||||||
Conditions: []batch.JobCondition{
|
Conditions: []batch.JobCondition{
|
||||||
{
|
{
|
||||||
@ -3674,11 +3674,11 @@ func TestSyncJobWithJobBackoffLimitPerIndex(t *testing.T) {
|
|||||||
Spec: batch.JobSpec{
|
Spec: batch.JobSpec{
|
||||||
Selector: validSelector,
|
Selector: validSelector,
|
||||||
Template: validTemplate,
|
Template: validTemplate,
|
||||||
Parallelism: pointer.Int32(2),
|
Parallelism: ptr.To[int32](2),
|
||||||
Completions: pointer.Int32(2),
|
Completions: ptr.To[int32](2),
|
||||||
BackoffLimit: pointer.Int32(math.MaxInt32),
|
BackoffLimit: ptr.To[int32](math.MaxInt32),
|
||||||
CompletionMode: completionModePtr(batch.IndexedCompletion),
|
CompletionMode: completionModePtr(batch.IndexedCompletion),
|
||||||
BackoffLimitPerIndex: pointer.Int32(1),
|
BackoffLimitPerIndex: ptr.To[int32](1),
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
pods: []v1.Pod{
|
pods: []v1.Pod{
|
||||||
@ -3688,7 +3688,7 @@ func TestSyncJobWithJobBackoffLimitPerIndex(t *testing.T) {
|
|||||||
wantStatus: batch.JobStatus{
|
wantStatus: batch.JobStatus{
|
||||||
Failed: 1,
|
Failed: 1,
|
||||||
Succeeded: 1,
|
Succeeded: 1,
|
||||||
FailedIndexes: pointer.String("0"),
|
FailedIndexes: ptr.To("0"),
|
||||||
CompletedIndexes: "1",
|
CompletedIndexes: "1",
|
||||||
UncountedTerminatedPods: &batch.UncountedTerminatedPods{},
|
UncountedTerminatedPods: &batch.UncountedTerminatedPods{},
|
||||||
Conditions: []batch.JobCondition{
|
Conditions: []batch.JobCondition{
|
||||||
@ -3709,12 +3709,12 @@ func TestSyncJobWithJobBackoffLimitPerIndex(t *testing.T) {
|
|||||||
Spec: batch.JobSpec{
|
Spec: batch.JobSpec{
|
||||||
Selector: validSelector,
|
Selector: validSelector,
|
||||||
Template: validTemplate,
|
Template: validTemplate,
|
||||||
Parallelism: pointer.Int32(4),
|
Parallelism: ptr.To[int32](4),
|
||||||
Completions: pointer.Int32(4),
|
Completions: ptr.To[int32](4),
|
||||||
BackoffLimit: pointer.Int32(math.MaxInt32),
|
BackoffLimit: ptr.To[int32](math.MaxInt32),
|
||||||
CompletionMode: completionModePtr(batch.IndexedCompletion),
|
CompletionMode: completionModePtr(batch.IndexedCompletion),
|
||||||
BackoffLimitPerIndex: pointer.Int32(1),
|
BackoffLimitPerIndex: ptr.To[int32](1),
|
||||||
MaxFailedIndexes: pointer.Int32(1),
|
MaxFailedIndexes: ptr.To[int32](1),
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
pods: []v1.Pod{
|
pods: []v1.Pod{
|
||||||
@ -3726,7 +3726,7 @@ func TestSyncJobWithJobBackoffLimitPerIndex(t *testing.T) {
|
|||||||
wantStatus: batch.JobStatus{
|
wantStatus: batch.JobStatus{
|
||||||
Failed: 3,
|
Failed: 3,
|
||||||
Succeeded: 1,
|
Succeeded: 1,
|
||||||
FailedIndexes: pointer.String("0,2"),
|
FailedIndexes: ptr.To("0,2"),
|
||||||
CompletedIndexes: "1",
|
CompletedIndexes: "1",
|
||||||
UncountedTerminatedPods: &batch.UncountedTerminatedPods{},
|
UncountedTerminatedPods: &batch.UncountedTerminatedPods{},
|
||||||
Conditions: []batch.JobCondition{
|
Conditions: []batch.JobCondition{
|
||||||
@ -3747,14 +3747,14 @@ func TestSyncJobWithJobBackoffLimitPerIndex(t *testing.T) {
|
|||||||
Spec: batch.JobSpec{
|
Spec: batch.JobSpec{
|
||||||
Selector: validSelector,
|
Selector: validSelector,
|
||||||
Template: validTemplate,
|
Template: validTemplate,
|
||||||
Parallelism: pointer.Int32(3),
|
Parallelism: ptr.To[int32](3),
|
||||||
Completions: pointer.Int32(3),
|
Completions: ptr.To[int32](3),
|
||||||
BackoffLimit: pointer.Int32(math.MaxInt32),
|
BackoffLimit: ptr.To[int32](math.MaxInt32),
|
||||||
CompletionMode: completionModePtr(batch.IndexedCompletion),
|
CompletionMode: completionModePtr(batch.IndexedCompletion),
|
||||||
BackoffLimitPerIndex: pointer.Int32(1),
|
BackoffLimitPerIndex: ptr.To[int32](1),
|
||||||
},
|
},
|
||||||
Status: batch.JobStatus{
|
Status: batch.JobStatus{
|
||||||
FailedIndexes: pointer.String("0"),
|
FailedIndexes: ptr.To("0"),
|
||||||
CompletedIndexes: "1",
|
CompletedIndexes: "1",
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
@ -3871,7 +3871,7 @@ func TestUpdateJobRequeue(t *testing.T) {
|
|||||||
"spec update": {
|
"spec update": {
|
||||||
oldJob: newJob(1, 1, 1, batch.IndexedCompletion),
|
oldJob: newJob(1, 1, 1, batch.IndexedCompletion),
|
||||||
updateFn: func(job *batch.Job) {
|
updateFn: func(job *batch.Job) {
|
||||||
job.Spec.Suspend = pointer.Bool(false)
|
job.Spec.Suspend = ptr.To(false)
|
||||||
job.Generation++
|
job.Generation++
|
||||||
},
|
},
|
||||||
wantRequeuedImmediately: true,
|
wantRequeuedImmediately: true,
|
||||||
@ -4861,7 +4861,7 @@ func TestJobBackoffForOnFailure(t *testing.T) {
|
|||||||
// job & pods setup
|
// job & pods setup
|
||||||
job := newJob(tc.parallelism, tc.completions, tc.backoffLimit, batch.NonIndexedCompletion)
|
job := newJob(tc.parallelism, tc.completions, tc.backoffLimit, batch.NonIndexedCompletion)
|
||||||
job.Spec.Template.Spec.RestartPolicy = v1.RestartPolicyOnFailure
|
job.Spec.Template.Spec.RestartPolicy = v1.RestartPolicyOnFailure
|
||||||
job.Spec.Suspend = pointer.Bool(tc.suspend)
|
job.Spec.Suspend = ptr.To(tc.suspend)
|
||||||
sharedInformerFactory.Batch().V1().Jobs().Informer().GetIndexer().Add(job)
|
sharedInformerFactory.Batch().V1().Jobs().Informer().GetIndexer().Add(job)
|
||||||
podIndexer := sharedInformerFactory.Core().V1().Pods().Informer().GetIndexer()
|
podIndexer := sharedInformerFactory.Core().V1().Pods().Informer().GetIndexer()
|
||||||
for i, pod := range newPodList(len(tc.restartCounts), tc.podPhase, job) {
|
for i, pod := range newPodList(len(tc.restartCounts), tc.podPhase, job) {
|
||||||
|
@ -27,7 +27,7 @@ import (
|
|||||||
featuregatetesting "k8s.io/component-base/featuregate/testing"
|
featuregatetesting "k8s.io/component-base/featuregate/testing"
|
||||||
_ "k8s.io/kubernetes/pkg/apis/core/install"
|
_ "k8s.io/kubernetes/pkg/apis/core/install"
|
||||||
"k8s.io/kubernetes/pkg/features"
|
"k8s.io/kubernetes/pkg/features"
|
||||||
"k8s.io/utils/pointer"
|
"k8s.io/utils/ptr"
|
||||||
)
|
)
|
||||||
|
|
||||||
func TestMatchPodFailurePolicy(t *testing.T) {
|
func TestMatchPodFailurePolicy(t *testing.T) {
|
||||||
@ -83,7 +83,7 @@ func TestMatchPodFailurePolicy(t *testing.T) {
|
|||||||
},
|
},
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
wantJobFailureMessage: pointer.String("Container main-container for pod default/mypod failed with exit code 2 matching FailJob rule at index 1"),
|
wantJobFailureMessage: ptr.To("Container main-container for pod default/mypod failed with exit code 2 matching FailJob rule at index 1"),
|
||||||
wantCountFailed: true,
|
wantCountFailed: true,
|
||||||
wantAction: &failJob,
|
wantAction: &failJob,
|
||||||
},
|
},
|
||||||
@ -161,7 +161,7 @@ func TestMatchPodFailurePolicy(t *testing.T) {
|
|||||||
},
|
},
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
wantJobFailureMessage: pointer.String("Container main-container for pod default/mypod failed with exit code 2 matching FailJob rule at index 1"),
|
wantJobFailureMessage: ptr.To("Container main-container for pod default/mypod failed with exit code 2 matching FailJob rule at index 1"),
|
||||||
wantCountFailed: true,
|
wantCountFailed: true,
|
||||||
wantAction: &failJob,
|
wantAction: &failJob,
|
||||||
},
|
},
|
||||||
@ -244,7 +244,7 @@ func TestMatchPodFailurePolicy(t *testing.T) {
|
|||||||
},
|
},
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
wantJobFailureMessage: pointer.String("Container main-container for pod default/mypod failed with exit code 2 matching FailJob rule at index 0"),
|
wantJobFailureMessage: ptr.To("Container main-container for pod default/mypod failed with exit code 2 matching FailJob rule at index 0"),
|
||||||
wantCountFailed: true,
|
wantCountFailed: true,
|
||||||
wantAction: &failJob,
|
wantAction: &failJob,
|
||||||
},
|
},
|
||||||
@ -395,7 +395,7 @@ func TestMatchPodFailurePolicy(t *testing.T) {
|
|||||||
},
|
},
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
wantJobFailureMessage: pointer.String("Container main-container for pod default/mypod failed with exit code 1 matching FailJob rule at index 0"),
|
wantJobFailureMessage: ptr.To("Container main-container for pod default/mypod failed with exit code 1 matching FailJob rule at index 0"),
|
||||||
wantCountFailed: true,
|
wantCountFailed: true,
|
||||||
wantAction: &failJob,
|
wantAction: &failJob,
|
||||||
},
|
},
|
||||||
@ -434,7 +434,7 @@ func TestMatchPodFailurePolicy(t *testing.T) {
|
|||||||
},
|
},
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
wantJobFailureMessage: pointer.String("Container main-container for pod default/mypod failed with exit code 6 matching FailJob rule at index 1"),
|
wantJobFailureMessage: ptr.To("Container main-container for pod default/mypod failed with exit code 6 matching FailJob rule at index 1"),
|
||||||
wantCountFailed: true,
|
wantCountFailed: true,
|
||||||
wantAction: &failJob,
|
wantAction: &failJob,
|
||||||
},
|
},
|
||||||
@ -736,7 +736,7 @@ func TestMatchPodFailurePolicy(t *testing.T) {
|
|||||||
},
|
},
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
wantJobFailureMessage: pointer.String("Pod default/mypod has condition DisruptionTarget matching FailJob rule at index 0"),
|
wantJobFailureMessage: ptr.To("Pod default/mypod has condition DisruptionTarget matching FailJob rule at index 0"),
|
||||||
wantCountFailed: true,
|
wantCountFailed: true,
|
||||||
wantAction: &failJob,
|
wantAction: &failJob,
|
||||||
},
|
},
|
||||||
|
Loading…
Reference in New Issue
Block a user