mirror of
https://github.com/k3s-io/kubernetes.git
synced 2025-08-03 17:30:00 +00:00
Make sync Job test tables more readable
And use t.Run to improve debugging experience Change-Id: Ia91adbfe9c419cc640abe0efe287f5b9ab715e87
This commit is contained in:
parent
6a4a0cdb91
commit
dbf9e3b2d3
@ -163,206 +163,307 @@ func TestControllerSyncJob(t *testing.T) {
|
|||||||
expectedConditionReason string
|
expectedConditionReason string
|
||||||
}{
|
}{
|
||||||
"job start": {
|
"job start": {
|
||||||
2, 5, 6, false, 0,
|
parallelism: 2,
|
||||||
nil, true, 0, 0, 0, 0,
|
completions: 5,
|
||||||
2, 0, 2, 0, 0, nil, "",
|
backoffLimit: 6,
|
||||||
|
jobKeyForget: true,
|
||||||
|
expectedCreations: 2,
|
||||||
|
expectedActive: 2,
|
||||||
},
|
},
|
||||||
"WQ job start": {
|
"WQ job start": {
|
||||||
2, -1, 6, false, 0,
|
parallelism: 2,
|
||||||
nil, true, 0, 0, 0, 0,
|
completions: -1,
|
||||||
2, 0, 2, 0, 0, nil, "",
|
backoffLimit: 6,
|
||||||
|
jobKeyForget: true,
|
||||||
|
expectedCreations: 2,
|
||||||
|
expectedActive: 2,
|
||||||
},
|
},
|
||||||
"pending pods": {
|
"pending pods": {
|
||||||
2, 5, 6, false, 0,
|
parallelism: 2,
|
||||||
nil, true, 2, 0, 0, 0,
|
completions: 5,
|
||||||
0, 0, 2, 0, 0, nil, "",
|
backoffLimit: 6,
|
||||||
|
jobKeyForget: true,
|
||||||
|
pendingPods: 2,
|
||||||
|
expectedActive: 2,
|
||||||
},
|
},
|
||||||
"correct # of pods": {
|
"correct # of pods": {
|
||||||
2, 5, 6, false, 0,
|
parallelism: 2,
|
||||||
nil, true, 0, 2, 0, 0,
|
completions: 5,
|
||||||
0, 0, 2, 0, 0, nil, "",
|
backoffLimit: 6,
|
||||||
|
jobKeyForget: true,
|
||||||
|
activePods: 2,
|
||||||
|
expectedActive: 2,
|
||||||
},
|
},
|
||||||
"WQ job: correct # of pods": {
|
"WQ job: correct # of pods": {
|
||||||
2, -1, 6, false, 0,
|
parallelism: 2,
|
||||||
nil, true, 0, 2, 0, 0,
|
completions: -1,
|
||||||
0, 0, 2, 0, 0, nil, "",
|
backoffLimit: 6,
|
||||||
|
jobKeyForget: true,
|
||||||
|
activePods: 2,
|
||||||
|
expectedActive: 2,
|
||||||
},
|
},
|
||||||
"too few active pods": {
|
"too few active pods": {
|
||||||
2, 5, 6, false, 0,
|
parallelism: 2,
|
||||||
nil, true, 0, 1, 1, 0,
|
completions: 5,
|
||||||
1, 0, 2, 1, 0, nil, "",
|
backoffLimit: 6,
|
||||||
|
jobKeyForget: true,
|
||||||
|
activePods: 1,
|
||||||
|
succeededPods: 1,
|
||||||
|
expectedCreations: 1,
|
||||||
|
expectedActive: 2,
|
||||||
|
expectedSucceeded: 1,
|
||||||
},
|
},
|
||||||
"too few active pods with a dynamic job": {
|
"too few active pods with a dynamic job": {
|
||||||
2, -1, 6, false, 0,
|
parallelism: 2,
|
||||||
nil, true, 0, 1, 0, 0,
|
completions: -1,
|
||||||
1, 0, 2, 0, 0, nil, "",
|
backoffLimit: 6,
|
||||||
|
jobKeyForget: true,
|
||||||
|
activePods: 1,
|
||||||
|
expectedCreations: 1,
|
||||||
|
expectedActive: 2,
|
||||||
},
|
},
|
||||||
"too few active pods, with controller error": {
|
"too few active pods, with controller error": {
|
||||||
2, 5, 6, false, 0,
|
parallelism: 2,
|
||||||
fmt.Errorf("fake error"), true, 0, 1, 1, 0,
|
completions: 5,
|
||||||
1, 0, 1, 1, 0, nil, "",
|
backoffLimit: 6,
|
||||||
|
podControllerError: fmt.Errorf("fake error"),
|
||||||
|
jobKeyForget: true,
|
||||||
|
activePods: 1,
|
||||||
|
succeededPods: 1,
|
||||||
|
expectedCreations: 1,
|
||||||
|
expectedActive: 1,
|
||||||
|
expectedSucceeded: 1,
|
||||||
},
|
},
|
||||||
"too many active pods": {
|
"too many active pods": {
|
||||||
2, 5, 6, false, 0,
|
parallelism: 2,
|
||||||
nil, true, 0, 3, 0, 0,
|
completions: 5,
|
||||||
0, 1, 2, 0, 0, nil, "",
|
backoffLimit: 6,
|
||||||
|
jobKeyForget: true,
|
||||||
|
activePods: 3,
|
||||||
|
expectedDeletions: 1,
|
||||||
|
expectedActive: 2,
|
||||||
},
|
},
|
||||||
"too many active pods, with controller error": {
|
"too many active pods, with controller error": {
|
||||||
2, 5, 6, false, 0,
|
parallelism: 2,
|
||||||
fmt.Errorf("fake error"), true, 0, 3, 0, 0,
|
completions: 5,
|
||||||
0, 1, 3, 0, 0, nil, "",
|
backoffLimit: 6,
|
||||||
|
podControllerError: fmt.Errorf("fake error"),
|
||||||
|
jobKeyForget: true,
|
||||||
|
activePods: 3,
|
||||||
|
expectedDeletions: 1,
|
||||||
|
expectedActive: 3,
|
||||||
},
|
},
|
||||||
"failed + succeed pods: reset backoff delay": {
|
"failed + succeed pods: reset backoff delay": {
|
||||||
2, 5, 6, false, 0,
|
parallelism: 2,
|
||||||
fmt.Errorf("fake error"), true, 0, 1, 1, 1,
|
completions: 5,
|
||||||
1, 0, 1, 1, 1, nil, "",
|
backoffLimit: 6,
|
||||||
|
podControllerError: fmt.Errorf("fake error"),
|
||||||
|
jobKeyForget: true,
|
||||||
|
activePods: 1,
|
||||||
|
succeededPods: 1,
|
||||||
|
failedPods: 1,
|
||||||
|
expectedCreations: 1,
|
||||||
|
expectedActive: 1,
|
||||||
|
expectedSucceeded: 1,
|
||||||
|
expectedFailed: 1,
|
||||||
},
|
},
|
||||||
"only new failed pod": {
|
"only new failed pod": {
|
||||||
2, 5, 6, false, 0,
|
parallelism: 2,
|
||||||
fmt.Errorf("fake error"), false, 0, 1, 0, 1,
|
completions: 5,
|
||||||
1, 0, 1, 0, 1, nil, "",
|
backoffLimit: 6,
|
||||||
|
podControllerError: fmt.Errorf("fake error"),
|
||||||
|
activePods: 1,
|
||||||
|
failedPods: 1,
|
||||||
|
expectedCreations: 1,
|
||||||
|
expectedActive: 1,
|
||||||
|
expectedFailed: 1,
|
||||||
},
|
},
|
||||||
"job finish": {
|
"job finish": {
|
||||||
2, 5, 6, false, 0,
|
parallelism: 2,
|
||||||
nil, true, 0, 0, 5, 0,
|
completions: 5,
|
||||||
0, 0, 0, 5, 0, nil, "",
|
backoffLimit: 6,
|
||||||
|
jobKeyForget: true,
|
||||||
|
succeededPods: 5,
|
||||||
|
expectedSucceeded: 5,
|
||||||
},
|
},
|
||||||
"WQ job finishing": {
|
"WQ job finishing": {
|
||||||
2, -1, 6, false, 0,
|
parallelism: 2,
|
||||||
nil, true, 0, 1, 1, 0,
|
completions: -1,
|
||||||
0, 0, 1, 1, 0, nil, "",
|
backoffLimit: 6,
|
||||||
|
jobKeyForget: true,
|
||||||
|
activePods: 1,
|
||||||
|
succeededPods: 1,
|
||||||
|
expectedActive: 1,
|
||||||
|
expectedSucceeded: 1,
|
||||||
},
|
},
|
||||||
"WQ job all finished": {
|
"WQ job all finished": {
|
||||||
2, -1, 6, false, 0,
|
parallelism: 2,
|
||||||
nil, true, 0, 0, 2, 0,
|
completions: -1,
|
||||||
0, 0, 0, 2, 0, &jobConditionComplete, "",
|
backoffLimit: 6,
|
||||||
|
jobKeyForget: true,
|
||||||
|
succeededPods: 2,
|
||||||
|
expectedSucceeded: 2,
|
||||||
|
expectedCondition: &jobConditionComplete,
|
||||||
},
|
},
|
||||||
"WQ job all finished despite one failure": {
|
"WQ job all finished despite one failure": {
|
||||||
2, -1, 6, false, 0,
|
parallelism: 2,
|
||||||
nil, true, 0, 0, 1, 1,
|
completions: -1,
|
||||||
0, 0, 0, 1, 1, &jobConditionComplete, "",
|
backoffLimit: 6,
|
||||||
|
jobKeyForget: true,
|
||||||
|
succeededPods: 1,
|
||||||
|
failedPods: 1,
|
||||||
|
expectedSucceeded: 1,
|
||||||
|
expectedFailed: 1,
|
||||||
|
expectedCondition: &jobConditionComplete,
|
||||||
},
|
},
|
||||||
"more active pods than completions": {
|
"more active pods than completions": {
|
||||||
2, 5, 6, false, 0,
|
parallelism: 2,
|
||||||
nil, true, 0, 10, 0, 0,
|
completions: 5,
|
||||||
0, 8, 2, 0, 0, nil, "",
|
backoffLimit: 6,
|
||||||
|
jobKeyForget: true,
|
||||||
|
activePods: 10,
|
||||||
|
expectedDeletions: 8,
|
||||||
|
expectedActive: 2,
|
||||||
},
|
},
|
||||||
"status change": {
|
"status change": {
|
||||||
2, 5, 6, false, 0,
|
parallelism: 2,
|
||||||
nil, true, 0, 2, 2, 0,
|
completions: 5,
|
||||||
0, 0, 2, 2, 0, nil, "",
|
backoffLimit: 6,
|
||||||
|
jobKeyForget: true,
|
||||||
|
activePods: 2,
|
||||||
|
succeededPods: 2,
|
||||||
|
expectedActive: 2,
|
||||||
|
expectedSucceeded: 2,
|
||||||
},
|
},
|
||||||
"deleting job": {
|
"deleting job": {
|
||||||
2, 5, 6, true, 0,
|
parallelism: 2,
|
||||||
nil, true, 1, 1, 1, 0,
|
completions: 5,
|
||||||
0, 0, 2, 1, 0, nil, "",
|
backoffLimit: 6,
|
||||||
|
deleting: true,
|
||||||
|
jobKeyForget: true,
|
||||||
|
pendingPods: 1,
|
||||||
|
activePods: 1,
|
||||||
|
succeededPods: 1,
|
||||||
|
expectedActive: 2,
|
||||||
|
expectedSucceeded: 1,
|
||||||
},
|
},
|
||||||
"limited pods": {
|
"limited pods": {
|
||||||
100, 200, 6, false, 10,
|
parallelism: 100,
|
||||||
nil, true, 0, 0, 0, 0,
|
completions: 200,
|
||||||
10, 0, 10, 0, 0, nil, "",
|
backoffLimit: 6,
|
||||||
|
podLimit: 10,
|
||||||
|
jobKeyForget: true,
|
||||||
|
expectedCreations: 10,
|
||||||
|
expectedActive: 10,
|
||||||
},
|
},
|
||||||
"too many job failures": {
|
"too many job failures": {
|
||||||
2, 5, 0, true, 0,
|
parallelism: 2,
|
||||||
nil, true, 0, 0, 0, 1,
|
completions: 5,
|
||||||
0, 0, 0, 0, 1, &jobConditionFailed, "BackoffLimitExceeded",
|
deleting: true,
|
||||||
|
jobKeyForget: true,
|
||||||
|
failedPods: 1,
|
||||||
|
expectedFailed: 1,
|
||||||
|
expectedCondition: &jobConditionFailed,
|
||||||
|
expectedConditionReason: "BackoffLimitExceeded",
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
|
|
||||||
for name, tc := range testCases {
|
for name, tc := range testCases {
|
||||||
// job manager setup
|
t.Run(name, func(t *testing.T) {
|
||||||
clientset := clientset.NewForConfigOrDie(&restclient.Config{Host: "", ContentConfig: restclient.ContentConfig{GroupVersion: &schema.GroupVersion{Group: "", Version: "v1"}}})
|
// job manager setup
|
||||||
manager, sharedInformerFactory := newControllerFromClient(clientset, controller.NoResyncPeriodFunc)
|
clientset := clientset.NewForConfigOrDie(&restclient.Config{Host: "", ContentConfig: restclient.ContentConfig{GroupVersion: &schema.GroupVersion{Group: "", Version: "v1"}}})
|
||||||
fakePodControl := controller.FakePodControl{Err: tc.podControllerError, CreateLimit: tc.podLimit}
|
manager, sharedInformerFactory := newControllerFromClient(clientset, controller.NoResyncPeriodFunc)
|
||||||
manager.podControl = &fakePodControl
|
fakePodControl := controller.FakePodControl{Err: tc.podControllerError, CreateLimit: tc.podLimit}
|
||||||
manager.podStoreSynced = alwaysReady
|
manager.podControl = &fakePodControl
|
||||||
manager.jobStoreSynced = alwaysReady
|
manager.podStoreSynced = alwaysReady
|
||||||
var actual *batch.Job
|
manager.jobStoreSynced = alwaysReady
|
||||||
manager.updateHandler = func(job *batch.Job) error {
|
var actual *batch.Job
|
||||||
actual = job
|
manager.updateHandler = func(job *batch.Job) error {
|
||||||
return nil
|
actual = job
|
||||||
}
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
// job & pods setup
|
// job & pods setup
|
||||||
job := newJob(tc.parallelism, tc.completions, tc.backoffLimit)
|
job := newJob(tc.parallelism, tc.completions, tc.backoffLimit)
|
||||||
if tc.deleting {
|
if tc.deleting {
|
||||||
now := metav1.Now()
|
now := metav1.Now()
|
||||||
job.DeletionTimestamp = &now
|
job.DeletionTimestamp = &now
|
||||||
}
|
}
|
||||||
sharedInformerFactory.Batch().V1().Jobs().Informer().GetIndexer().Add(job)
|
sharedInformerFactory.Batch().V1().Jobs().Informer().GetIndexer().Add(job)
|
||||||
podIndexer := sharedInformerFactory.Core().V1().Pods().Informer().GetIndexer()
|
podIndexer := sharedInformerFactory.Core().V1().Pods().Informer().GetIndexer()
|
||||||
setPodsStatuses(podIndexer, job, tc.pendingPods, tc.activePods, tc.succeededPods, tc.failedPods)
|
setPodsStatuses(podIndexer, job, tc.pendingPods, tc.activePods, tc.succeededPods, tc.failedPods)
|
||||||
|
|
||||||
// run
|
// run
|
||||||
forget, err := manager.syncJob(testutil.GetKey(job, t))
|
forget, err := manager.syncJob(testutil.GetKey(job, t))
|
||||||
|
|
||||||
// We need requeue syncJob task if podController error
|
// We need requeue syncJob task if podController error
|
||||||
if tc.podControllerError != nil {
|
if tc.podControllerError != nil {
|
||||||
if err == nil {
|
if err == nil {
|
||||||
t.Errorf("%s: Syncing jobs would return error when podController exception", name)
|
t.Errorf("Syncing jobs would return error when podController exception")
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
if err != nil && (tc.podLimit == 0 || fakePodControl.CreateCallCount < tc.podLimit) {
|
||||||
|
t.Errorf("Unexpected error when syncing jobs: %v", err)
|
||||||
|
}
|
||||||
}
|
}
|
||||||
} else {
|
if forget != tc.jobKeyForget {
|
||||||
if err != nil && (tc.podLimit == 0 || fakePodControl.CreateCallCount < tc.podLimit) {
|
t.Errorf("Unexpected forget value. Expected %v, saw %v\n", tc.jobKeyForget, forget)
|
||||||
t.Errorf("%s: unexpected error when syncing jobs %v", name, err)
|
|
||||||
}
|
}
|
||||||
}
|
// validate created/deleted pods
|
||||||
if forget != tc.jobKeyForget {
|
if int32(len(fakePodControl.Templates)) != tc.expectedCreations {
|
||||||
t.Errorf("%s: unexpected forget value. Expected %v, saw %v\n", name, tc.jobKeyForget, forget)
|
t.Errorf("Unexpected number of creates. Expected %d, saw %d\n", tc.expectedCreations, len(fakePodControl.Templates))
|
||||||
}
|
|
||||||
// validate created/deleted pods
|
|
||||||
if int32(len(fakePodControl.Templates)) != tc.expectedCreations {
|
|
||||||
t.Errorf("%s: unexpected number of creates. Expected %d, saw %d\n", name, tc.expectedCreations, len(fakePodControl.Templates))
|
|
||||||
}
|
|
||||||
if int32(len(fakePodControl.DeletePodName)) != tc.expectedDeletions {
|
|
||||||
t.Errorf("%s: unexpected number of deletes. Expected %d, saw %d\n", name, tc.expectedDeletions, len(fakePodControl.DeletePodName))
|
|
||||||
}
|
|
||||||
// Each create should have an accompanying ControllerRef.
|
|
||||||
if len(fakePodControl.ControllerRefs) != int(tc.expectedCreations) {
|
|
||||||
t.Errorf("%s: unexpected number of ControllerRefs. Expected %d, saw %d\n", name, tc.expectedCreations, len(fakePodControl.ControllerRefs))
|
|
||||||
}
|
|
||||||
// Make sure the ControllerRefs are correct.
|
|
||||||
for _, controllerRef := range fakePodControl.ControllerRefs {
|
|
||||||
if got, want := controllerRef.APIVersion, "batch/v1"; got != want {
|
|
||||||
t.Errorf("controllerRef.APIVersion = %q, want %q", got, want)
|
|
||||||
}
|
}
|
||||||
if got, want := controllerRef.Kind, "Job"; got != want {
|
if int32(len(fakePodControl.DeletePodName)) != tc.expectedDeletions {
|
||||||
t.Errorf("controllerRef.Kind = %q, want %q", got, want)
|
t.Errorf("Unexpected number of deletes. Expected %d, saw %d\n", tc.expectedDeletions, len(fakePodControl.DeletePodName))
|
||||||
}
|
}
|
||||||
if got, want := controllerRef.Name, job.Name; got != want {
|
// Each create should have an accompanying ControllerRef.
|
||||||
t.Errorf("controllerRef.Name = %q, want %q", got, want)
|
if len(fakePodControl.ControllerRefs) != int(tc.expectedCreations) {
|
||||||
|
t.Errorf("Unexpected number of ControllerRefs. Expected %d, saw %d\n", tc.expectedCreations, len(fakePodControl.ControllerRefs))
|
||||||
}
|
}
|
||||||
if got, want := controllerRef.UID, job.UID; got != want {
|
// Make sure the ControllerRefs are correct.
|
||||||
t.Errorf("controllerRef.UID = %q, want %q", got, want)
|
for _, controllerRef := range fakePodControl.ControllerRefs {
|
||||||
|
if got, want := controllerRef.APIVersion, "batch/v1"; got != want {
|
||||||
|
t.Errorf("controllerRef.APIVersion = %q, want %q", got, want)
|
||||||
|
}
|
||||||
|
if got, want := controllerRef.Kind, "Job"; got != want {
|
||||||
|
t.Errorf("controllerRef.Kind = %q, want %q", got, want)
|
||||||
|
}
|
||||||
|
if got, want := controllerRef.Name, job.Name; got != want {
|
||||||
|
t.Errorf("controllerRef.Name = %q, want %q", got, want)
|
||||||
|
}
|
||||||
|
if got, want := controllerRef.UID, job.UID; got != want {
|
||||||
|
t.Errorf("controllerRef.UID = %q, want %q", got, want)
|
||||||
|
}
|
||||||
|
if controllerRef.Controller == nil || *controllerRef.Controller != true {
|
||||||
|
t.Errorf("controllerRef.Controller is not set to true")
|
||||||
|
}
|
||||||
}
|
}
|
||||||
if controllerRef.Controller == nil || *controllerRef.Controller != true {
|
// validate status
|
||||||
t.Errorf("controllerRef.Controller is not set to true")
|
if actual.Status.Active != tc.expectedActive {
|
||||||
|
t.Errorf("Unexpected number of active pods. Expected %d, saw %d\n", tc.expectedActive, actual.Status.Active)
|
||||||
}
|
}
|
||||||
}
|
if actual.Status.Succeeded != tc.expectedSucceeded {
|
||||||
// validate status
|
t.Errorf("Unexpected number of succeeded pods. Expected %d, saw %d\n", tc.expectedSucceeded, actual.Status.Succeeded)
|
||||||
if actual.Status.Active != tc.expectedActive {
|
}
|
||||||
t.Errorf("%s: unexpected number of active pods. Expected %d, saw %d\n", name, tc.expectedActive, actual.Status.Active)
|
if actual.Status.Failed != tc.expectedFailed {
|
||||||
}
|
t.Errorf("Unexpected number of failed pods. Expected %d, saw %d\n", tc.expectedFailed, actual.Status.Failed)
|
||||||
if actual.Status.Succeeded != tc.expectedSucceeded {
|
}
|
||||||
t.Errorf("%s: unexpected number of succeeded pods. Expected %d, saw %d\n", name, tc.expectedSucceeded, actual.Status.Succeeded)
|
if actual.Status.StartTime == nil {
|
||||||
}
|
t.Error("Missing .status.startTime")
|
||||||
if actual.Status.Failed != tc.expectedFailed {
|
}
|
||||||
t.Errorf("%s: unexpected number of failed pods. Expected %d, saw %d\n", name, tc.expectedFailed, actual.Status.Failed)
|
// validate conditions
|
||||||
}
|
if tc.expectedCondition != nil && !getCondition(actual, *tc.expectedCondition, tc.expectedConditionReason) {
|
||||||
if actual.Status.StartTime == nil {
|
t.Errorf("Expected completion condition. Got %#v", actual.Status.Conditions)
|
||||||
t.Errorf("%s: .status.startTime was not set", name)
|
}
|
||||||
}
|
// validate slow start
|
||||||
// validate conditions
|
expectedLimit := 0
|
||||||
if tc.expectedCondition != nil && !getCondition(actual, *tc.expectedCondition, tc.expectedConditionReason) {
|
for pass := uint8(0); expectedLimit <= tc.podLimit; pass++ {
|
||||||
t.Errorf("%s: expected completion condition. Got %#v", name, actual.Status.Conditions)
|
expectedLimit += controller.SlowStartInitialBatchSize << pass
|
||||||
}
|
}
|
||||||
// validate slow start
|
if tc.podLimit > 0 && fakePodControl.CreateCallCount > expectedLimit {
|
||||||
expectedLimit := 0
|
t.Errorf("Unexpected number of create calls. Expected <= %d, saw %d\n", fakePodControl.CreateLimit*2, fakePodControl.CreateCallCount)
|
||||||
for pass := uint8(0); expectedLimit <= tc.podLimit; pass++ {
|
}
|
||||||
expectedLimit += controller.SlowStartInitialBatchSize << pass
|
})
|
||||||
}
|
|
||||||
if tc.podLimit > 0 && fakePodControl.CreateCallCount > expectedLimit {
|
|
||||||
t.Errorf("%s: Unexpected number of create calls. Expected <= %d, saw %d\n", name, fakePodControl.CreateLimit*2, fakePodControl.CreateCallCount)
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -389,82 +490,109 @@ func TestSyncJobPastDeadline(t *testing.T) {
|
|||||||
expectedConditionReason string
|
expectedConditionReason string
|
||||||
}{
|
}{
|
||||||
"activeDeadlineSeconds less than single pod execution": {
|
"activeDeadlineSeconds less than single pod execution": {
|
||||||
1, 1, 10, 15, 6,
|
parallelism: 1,
|
||||||
1, 0, 0,
|
completions: 1,
|
||||||
true, 1, 0, 0, 1, "DeadlineExceeded",
|
activeDeadlineSeconds: 10,
|
||||||
|
startTime: 15,
|
||||||
|
backoffLimit: 6,
|
||||||
|
activePods: 1,
|
||||||
|
expectedForGetKey: true,
|
||||||
|
expectedDeletions: 1,
|
||||||
|
expectedFailed: 1,
|
||||||
|
expectedConditionReason: "DeadlineExceeded",
|
||||||
},
|
},
|
||||||
"activeDeadlineSeconds bigger than single pod execution": {
|
"activeDeadlineSeconds bigger than single pod execution": {
|
||||||
1, 2, 10, 15, 6,
|
parallelism: 1,
|
||||||
1, 1, 0,
|
completions: 2,
|
||||||
true, 1, 0, 1, 1, "DeadlineExceeded",
|
activeDeadlineSeconds: 10,
|
||||||
|
startTime: 15,
|
||||||
|
backoffLimit: 6,
|
||||||
|
activePods: 1,
|
||||||
|
succeededPods: 1,
|
||||||
|
expectedForGetKey: true,
|
||||||
|
expectedDeletions: 1,
|
||||||
|
expectedSucceeded: 1,
|
||||||
|
expectedFailed: 1,
|
||||||
|
expectedConditionReason: "DeadlineExceeded",
|
||||||
},
|
},
|
||||||
"activeDeadlineSeconds times-out before any pod starts": {
|
"activeDeadlineSeconds times-out before any pod starts": {
|
||||||
1, 1, 10, 10, 6,
|
parallelism: 1,
|
||||||
0, 0, 0,
|
completions: 1,
|
||||||
true, 0, 0, 0, 0, "DeadlineExceeded",
|
activeDeadlineSeconds: 10,
|
||||||
|
startTime: 10,
|
||||||
|
backoffLimit: 6,
|
||||||
|
expectedForGetKey: true,
|
||||||
|
expectedConditionReason: "DeadlineExceeded",
|
||||||
},
|
},
|
||||||
"activeDeadlineSeconds with backofflimit reach": {
|
"activeDeadlineSeconds with backofflimit reach": {
|
||||||
1, 1, 1, 10, 0,
|
parallelism: 1,
|
||||||
0, 0, 1,
|
completions: 1,
|
||||||
true, 0, 0, 0, 1, "BackoffLimitExceeded",
|
activeDeadlineSeconds: 1,
|
||||||
|
startTime: 10,
|
||||||
|
failedPods: 1,
|
||||||
|
expectedForGetKey: true,
|
||||||
|
expectedFailed: 1,
|
||||||
|
expectedConditionReason: "BackoffLimitExceeded",
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
|
|
||||||
for name, tc := range testCases {
|
for name, tc := range testCases {
|
||||||
// job manager setup
|
t.Run(name, func(t *testing.T) {
|
||||||
clientset := clientset.NewForConfigOrDie(&restclient.Config{Host: "", ContentConfig: restclient.ContentConfig{GroupVersion: &schema.GroupVersion{Group: "", Version: "v1"}}})
|
// job manager setup
|
||||||
manager, sharedInformerFactory := newControllerFromClient(clientset, controller.NoResyncPeriodFunc)
|
clientSet := clientset.NewForConfigOrDie(&restclient.Config{Host: "", ContentConfig: restclient.ContentConfig{GroupVersion: &schema.GroupVersion{Group: "", Version: "v1"}}})
|
||||||
fakePodControl := controller.FakePodControl{}
|
manager, sharedInformerFactory := newControllerFromClient(clientSet, controller.NoResyncPeriodFunc)
|
||||||
manager.podControl = &fakePodControl
|
fakePodControl := controller.FakePodControl{}
|
||||||
manager.podStoreSynced = alwaysReady
|
manager.podControl = &fakePodControl
|
||||||
manager.jobStoreSynced = alwaysReady
|
manager.podStoreSynced = alwaysReady
|
||||||
var actual *batch.Job
|
manager.jobStoreSynced = alwaysReady
|
||||||
manager.updateHandler = func(job *batch.Job) error {
|
var actual *batch.Job
|
||||||
actual = job
|
manager.updateHandler = func(job *batch.Job) error {
|
||||||
return nil
|
actual = job
|
||||||
}
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
// job & pods setup
|
// job & pods setup
|
||||||
job := newJob(tc.parallelism, tc.completions, tc.backoffLimit)
|
job := newJob(tc.parallelism, tc.completions, tc.backoffLimit)
|
||||||
job.Spec.ActiveDeadlineSeconds = &tc.activeDeadlineSeconds
|
job.Spec.ActiveDeadlineSeconds = &tc.activeDeadlineSeconds
|
||||||
start := metav1.Unix(metav1.Now().Time.Unix()-tc.startTime, 0)
|
start := metav1.Unix(metav1.Now().Time.Unix()-tc.startTime, 0)
|
||||||
job.Status.StartTime = &start
|
job.Status.StartTime = &start
|
||||||
sharedInformerFactory.Batch().V1().Jobs().Informer().GetIndexer().Add(job)
|
sharedInformerFactory.Batch().V1().Jobs().Informer().GetIndexer().Add(job)
|
||||||
podIndexer := sharedInformerFactory.Core().V1().Pods().Informer().GetIndexer()
|
podIndexer := sharedInformerFactory.Core().V1().Pods().Informer().GetIndexer()
|
||||||
setPodsStatuses(podIndexer, job, 0, tc.activePods, tc.succeededPods, tc.failedPods)
|
setPodsStatuses(podIndexer, job, 0, tc.activePods, tc.succeededPods, tc.failedPods)
|
||||||
|
|
||||||
// run
|
// run
|
||||||
forget, err := manager.syncJob(testutil.GetKey(job, t))
|
forget, err := manager.syncJob(testutil.GetKey(job, t))
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Errorf("%s: unexpected error when syncing jobs %v", name, err)
|
t.Errorf("Unexpected error when syncing jobs %v", err)
|
||||||
}
|
}
|
||||||
if forget != tc.expectedForGetKey {
|
if forget != tc.expectedForGetKey {
|
||||||
t.Errorf("%s: unexpected forget value. Expected %v, saw %v\n", name, tc.expectedForGetKey, forget)
|
t.Errorf("Unexpected forget value. Expected %v, saw %v\n", tc.expectedForGetKey, forget)
|
||||||
}
|
}
|
||||||
// validate created/deleted pods
|
// validate created/deleted pods
|
||||||
if int32(len(fakePodControl.Templates)) != 0 {
|
if int32(len(fakePodControl.Templates)) != 0 {
|
||||||
t.Errorf("%s: unexpected number of creates. Expected 0, saw %d\n", name, len(fakePodControl.Templates))
|
t.Errorf("Unexpected number of creates. Expected 0, saw %d\n", len(fakePodControl.Templates))
|
||||||
}
|
}
|
||||||
if int32(len(fakePodControl.DeletePodName)) != tc.expectedDeletions {
|
if int32(len(fakePodControl.DeletePodName)) != tc.expectedDeletions {
|
||||||
t.Errorf("%s: unexpected number of deletes. Expected %d, saw %d\n", name, tc.expectedDeletions, len(fakePodControl.DeletePodName))
|
t.Errorf("Unexpected number of deletes. Expected %d, saw %d\n", tc.expectedDeletions, len(fakePodControl.DeletePodName))
|
||||||
}
|
}
|
||||||
// validate status
|
// validate status
|
||||||
if actual.Status.Active != tc.expectedActive {
|
if actual.Status.Active != tc.expectedActive {
|
||||||
t.Errorf("%s: unexpected number of active pods. Expected %d, saw %d\n", name, tc.expectedActive, actual.Status.Active)
|
t.Errorf("Unexpected number of active pods. Expected %d, saw %d\n", tc.expectedActive, actual.Status.Active)
|
||||||
}
|
}
|
||||||
if actual.Status.Succeeded != tc.expectedSucceeded {
|
if actual.Status.Succeeded != tc.expectedSucceeded {
|
||||||
t.Errorf("%s: unexpected number of succeeded pods. Expected %d, saw %d\n", name, tc.expectedSucceeded, actual.Status.Succeeded)
|
t.Errorf("Unexpected number of succeeded pods. Expected %d, saw %d\n", tc.expectedSucceeded, actual.Status.Succeeded)
|
||||||
}
|
}
|
||||||
if actual.Status.Failed != tc.expectedFailed {
|
if actual.Status.Failed != tc.expectedFailed {
|
||||||
t.Errorf("%s: unexpected number of failed pods. Expected %d, saw %d\n", name, tc.expectedFailed, actual.Status.Failed)
|
t.Errorf("Unexpected number of failed pods. Expected %d, saw %d\n", tc.expectedFailed, actual.Status.Failed)
|
||||||
}
|
}
|
||||||
if actual.Status.StartTime == nil {
|
if actual.Status.StartTime == nil {
|
||||||
t.Errorf("%s: .status.startTime was not set", name)
|
t.Error("Missing .status.startTime")
|
||||||
}
|
}
|
||||||
// validate conditions
|
// validate conditions
|
||||||
if !getCondition(actual, batch.JobFailed, tc.expectedConditionReason) {
|
if !getCondition(actual, batch.JobFailed, tc.expectedConditionReason) {
|
||||||
t.Errorf("%s: expected fail condition. Got %#v", name, actual.Status.Conditions)
|
t.Errorf("Expected fail condition. Got %#v", actual.Status.Conditions)
|
||||||
}
|
}
|
||||||
|
})
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
Loading…
Reference in New Issue
Block a user