mirror of
https://github.com/k3s-io/kubernetes.git
synced 2026-01-04 23:17:50 +00:00
Job: Handle error returned from AddEventHandler function (#119917)
* Job: Handle error returned from AddEventHandler function Signed-off-by: Yuki Iwai <yuki.iwai.tz@gmail.com> * Use the error message the similar to CronJob Signed-off-by: Yuki Iwai <yuki.iwai.tz@gmail.com> * Clean up error messages Signed-off-by: Yuki Iwai <yuki.iwai.tz@gmail.com> * Put the tesing.T on the second place in the args for the newControllerFromClient function Signed-off-by: Yuki Iwai <yuki.iwai.tz@gmail.com> * Put the testing.T on the second place in the args for the newControllerFromClientWithClock function Signed-off-by: Yuki Iwai <yuki.iwai.tz@gmail.com> * Call t.Helper() Signed-off-by: Yuki Iwai <yuki.iwai.tz@gmail.com> * Put the testing.TB on the second place in the args for the createJobControllerWithSharedInformers function and call tb.Helper() there Signed-off-by: Yuki Iwai <yuki.iwai.tz@gmail.com> * Put the testing.TB on the second place in the args for the startJobControllerAndWaitForCaches function and call tb.Helper() there Signed-off-by: Yuki Iwai <yuki.iwai.tz@gmail.com> * Adapt TestFinializerCleanup to the eventhandler error Signed-off-by: Yuki Iwai <yuki.iwai.tz@gmail.com> --------- Signed-off-by: Yuki Iwai <yuki.iwai.tz@gmail.com>
This commit is contained in:
@@ -152,11 +152,11 @@ type syncJobCtx struct {
|
||||
|
||||
// NewController creates a new Job controller that keeps the relevant pods
|
||||
// in sync with their corresponding Job objects.
|
||||
func NewController(ctx context.Context, podInformer coreinformers.PodInformer, jobInformer batchinformers.JobInformer, kubeClient clientset.Interface) *Controller {
|
||||
func NewController(ctx context.Context, podInformer coreinformers.PodInformer, jobInformer batchinformers.JobInformer, kubeClient clientset.Interface) (*Controller, error) {
|
||||
return newControllerWithClock(ctx, podInformer, jobInformer, kubeClient, &clock.RealClock{})
|
||||
}
|
||||
|
||||
func newControllerWithClock(ctx context.Context, podInformer coreinformers.PodInformer, jobInformer batchinformers.JobInformer, kubeClient clientset.Interface, clock clock.WithTicker) *Controller {
|
||||
func newControllerWithClock(ctx context.Context, podInformer coreinformers.PodInformer, jobInformer batchinformers.JobInformer, kubeClient clientset.Interface, clock clock.WithTicker) (*Controller, error) {
|
||||
eventBroadcaster := record.NewBroadcaster()
|
||||
logger := klog.FromContext(ctx)
|
||||
|
||||
@@ -176,7 +176,7 @@ func newControllerWithClock(ctx context.Context, podInformer coreinformers.PodIn
|
||||
podBackoffStore: newBackoffStore(),
|
||||
}
|
||||
|
||||
jobInformer.Informer().AddEventHandler(cache.ResourceEventHandlerFuncs{
|
||||
if _, err := jobInformer.Informer().AddEventHandler(cache.ResourceEventHandlerFuncs{
|
||||
AddFunc: func(obj interface{}) {
|
||||
jm.enqueueSyncJobImmediately(logger, obj)
|
||||
},
|
||||
@@ -186,11 +186,13 @@ func newControllerWithClock(ctx context.Context, podInformer coreinformers.PodIn
|
||||
DeleteFunc: func(obj interface{}) {
|
||||
jm.deleteJob(logger, obj)
|
||||
},
|
||||
})
|
||||
}); err != nil {
|
||||
return nil, fmt.Errorf("adding Job event handler: %w", err)
|
||||
}
|
||||
jm.jobLister = jobInformer.Lister()
|
||||
jm.jobStoreSynced = jobInformer.Informer().HasSynced
|
||||
|
||||
podInformer.Informer().AddEventHandler(cache.ResourceEventHandlerFuncs{
|
||||
if _, err := podInformer.Informer().AddEventHandler(cache.ResourceEventHandlerFuncs{
|
||||
AddFunc: func(obj interface{}) {
|
||||
jm.addPod(logger, obj)
|
||||
},
|
||||
@@ -200,7 +202,9 @@ func newControllerWithClock(ctx context.Context, podInformer coreinformers.PodIn
|
||||
DeleteFunc: func(obj interface{}) {
|
||||
jm.deletePod(logger, obj, true)
|
||||
},
|
||||
})
|
||||
}); err != nil {
|
||||
return nil, fmt.Errorf("adding Pod event handler: %w", err)
|
||||
}
|
||||
jm.podStore = podInformer.Lister()
|
||||
jm.podStoreSynced = podInformer.Informer().HasSynced
|
||||
|
||||
@@ -210,7 +214,7 @@ func newControllerWithClock(ctx context.Context, podInformer coreinformers.PodIn
|
||||
|
||||
metrics.Register()
|
||||
|
||||
return jm
|
||||
return jm, nil
|
||||
}
|
||||
|
||||
// Run the main goroutine responsible for watching and syncing jobs.
|
||||
|
||||
@@ -123,13 +123,18 @@ func newJob(parallelism, completions, backoffLimit int32, completionMode batch.C
|
||||
return newJobWithName("foobar", parallelism, completions, backoffLimit, completionMode)
|
||||
}
|
||||
|
||||
func newControllerFromClient(ctx context.Context, kubeClient clientset.Interface, resyncPeriod controller.ResyncPeriodFunc) (*Controller, informers.SharedInformerFactory) {
|
||||
return newControllerFromClientWithClock(ctx, kubeClient, resyncPeriod, realClock)
|
||||
func newControllerFromClient(ctx context.Context, t *testing.T, kubeClient clientset.Interface, resyncPeriod controller.ResyncPeriodFunc) (*Controller, informers.SharedInformerFactory) {
|
||||
t.Helper()
|
||||
return newControllerFromClientWithClock(ctx, t, kubeClient, resyncPeriod, realClock)
|
||||
}
|
||||
|
||||
func newControllerFromClientWithClock(ctx context.Context, kubeClient clientset.Interface, resyncPeriod controller.ResyncPeriodFunc, clock clock.WithTicker) (*Controller, informers.SharedInformerFactory) {
|
||||
func newControllerFromClientWithClock(ctx context.Context, t *testing.T, kubeClient clientset.Interface, resyncPeriod controller.ResyncPeriodFunc, clock clock.WithTicker) (*Controller, informers.SharedInformerFactory) {
|
||||
t.Helper()
|
||||
sharedInformers := informers.NewSharedInformerFactory(kubeClient, resyncPeriod())
|
||||
jm := newControllerWithClock(ctx, sharedInformers.Core().V1().Pods(), sharedInformers.Batch().V1().Jobs(), kubeClient, clock)
|
||||
jm, err := newControllerWithClock(ctx, sharedInformers.Core().V1().Pods(), sharedInformers.Batch().V1().Jobs(), kubeClient, clock)
|
||||
if err != nil {
|
||||
t.Fatalf("Error creating Job controller: %v", err)
|
||||
}
|
||||
jm.podControl = &controller.FakePodControl{}
|
||||
return jm, sharedInformers
|
||||
}
|
||||
@@ -879,7 +884,7 @@ func TestControllerSyncJob(t *testing.T) {
|
||||
fakeClock = clocktesting.NewFakeClock(time.Now())
|
||||
}
|
||||
|
||||
manager, sharedInformerFactory := newControllerFromClientWithClock(ctx, clientSet, controller.NoResyncPeriodFunc, fakeClock)
|
||||
manager, sharedInformerFactory := newControllerFromClientWithClock(ctx, t, clientSet, controller.NoResyncPeriodFunc, fakeClock)
|
||||
fakePodControl := controller.FakePodControl{Err: tc.podControllerError, CreateLimit: tc.podLimit}
|
||||
manager.podControl = &fakePodControl
|
||||
manager.podStoreSynced = alwaysReady
|
||||
@@ -1813,7 +1818,7 @@ func TestTrackJobStatusAndRemoveFinalizers(t *testing.T) {
|
||||
t.Run(name, func(t *testing.T) {
|
||||
defer featuregatetesting.SetFeatureGateDuringTest(t, feature.DefaultFeatureGate, features.JobBackoffLimitPerIndex, tc.enableJobBackoffLimitPerIndex)()
|
||||
clientSet := clientset.NewForConfigOrDie(&restclient.Config{Host: "", ContentConfig: restclient.ContentConfig{GroupVersion: &schema.GroupVersion{Group: "", Version: "v1"}}})
|
||||
manager, _ := newControllerFromClient(ctx, clientSet, controller.NoResyncPeriodFunc)
|
||||
manager, _ := newControllerFromClient(ctx, t, clientSet, controller.NoResyncPeriodFunc)
|
||||
fakePodControl := controller.FakePodControl{Err: tc.podControlErr}
|
||||
metrics.JobPodsFinished.Reset()
|
||||
manager.podControl = &fakePodControl
|
||||
@@ -1960,7 +1965,7 @@ func TestSyncJobPastDeadline(t *testing.T) {
|
||||
t.Run(name, func(t *testing.T) {
|
||||
// job manager setup
|
||||
clientSet := clientset.NewForConfigOrDie(&restclient.Config{Host: "", ContentConfig: restclient.ContentConfig{GroupVersion: &schema.GroupVersion{Group: "", Version: "v1"}}})
|
||||
manager, sharedInformerFactory := newControllerFromClient(ctx, clientSet, controller.NoResyncPeriodFunc)
|
||||
manager, sharedInformerFactory := newControllerFromClient(ctx, t, clientSet, controller.NoResyncPeriodFunc)
|
||||
fakePodControl := controller.FakePodControl{}
|
||||
manager.podControl = &fakePodControl
|
||||
manager.podStoreSynced = alwaysReady
|
||||
@@ -2038,7 +2043,7 @@ func TestPastDeadlineJobFinished(t *testing.T) {
|
||||
_, ctx := ktesting.NewTestContext(t)
|
||||
clientset := fake.NewSimpleClientset()
|
||||
fakeClock := clocktesting.NewFakeClock(time.Now().Truncate(time.Second))
|
||||
manager, sharedInformerFactory := newControllerFromClientWithClock(ctx, clientset, controller.NoResyncPeriodFunc, fakeClock)
|
||||
manager, sharedInformerFactory := newControllerFromClientWithClock(ctx, t, clientset, controller.NoResyncPeriodFunc, fakeClock)
|
||||
manager.podStoreSynced = alwaysReady
|
||||
manager.jobStoreSynced = alwaysReady
|
||||
manager.expectations = FakeJobExpectations{
|
||||
@@ -2117,7 +2122,7 @@ func TestPastDeadlineJobFinished(t *testing.T) {
|
||||
func TestSingleJobFailedCondition(t *testing.T) {
|
||||
_, ctx := ktesting.NewTestContext(t)
|
||||
clientset := clientset.NewForConfigOrDie(&restclient.Config{Host: "", ContentConfig: restclient.ContentConfig{GroupVersion: &schema.GroupVersion{Group: "", Version: "v1"}}})
|
||||
manager, sharedInformerFactory := newControllerFromClient(ctx, clientset, controller.NoResyncPeriodFunc)
|
||||
manager, sharedInformerFactory := newControllerFromClient(ctx, t, clientset, controller.NoResyncPeriodFunc)
|
||||
fakePodControl := controller.FakePodControl{}
|
||||
manager.podControl = &fakePodControl
|
||||
manager.podStoreSynced = alwaysReady
|
||||
@@ -2157,7 +2162,7 @@ func TestSingleJobFailedCondition(t *testing.T) {
|
||||
func TestSyncJobComplete(t *testing.T) {
|
||||
_, ctx := ktesting.NewTestContext(t)
|
||||
clientset := clientset.NewForConfigOrDie(&restclient.Config{Host: "", ContentConfig: restclient.ContentConfig{GroupVersion: &schema.GroupVersion{Group: "", Version: "v1"}}})
|
||||
manager, sharedInformerFactory := newControllerFromClient(ctx, clientset, controller.NoResyncPeriodFunc)
|
||||
manager, sharedInformerFactory := newControllerFromClient(ctx, t, clientset, controller.NoResyncPeriodFunc)
|
||||
fakePodControl := controller.FakePodControl{}
|
||||
manager.podControl = &fakePodControl
|
||||
manager.podStoreSynced = alwaysReady
|
||||
@@ -2183,7 +2188,7 @@ func TestSyncJobComplete(t *testing.T) {
|
||||
func TestSyncJobDeleted(t *testing.T) {
|
||||
_, ctx := ktesting.NewTestContext(t)
|
||||
clientset := clientset.NewForConfigOrDie(&restclient.Config{Host: "", ContentConfig: restclient.ContentConfig{GroupVersion: &schema.GroupVersion{Group: "", Version: "v1"}}})
|
||||
manager, _ := newControllerFromClient(ctx, clientset, controller.NoResyncPeriodFunc)
|
||||
manager, _ := newControllerFromClient(ctx, t, clientset, controller.NoResyncPeriodFunc)
|
||||
fakePodControl := controller.FakePodControl{}
|
||||
manager.podControl = &fakePodControl
|
||||
manager.podStoreSynced = alwaysReady
|
||||
@@ -3274,7 +3279,7 @@ func TestSyncJobWithJobPodFailurePolicy(t *testing.T) {
|
||||
tc.job.Spec.PodReplacementPolicy = podReplacementPolicy(batch.Failed)
|
||||
}
|
||||
clientset := clientset.NewForConfigOrDie(&restclient.Config{Host: "", ContentConfig: restclient.ContentConfig{GroupVersion: &schema.GroupVersion{Group: "", Version: "v1"}}})
|
||||
manager, sharedInformerFactory := newControllerFromClient(ctx, clientset, controller.NoResyncPeriodFunc)
|
||||
manager, sharedInformerFactory := newControllerFromClient(ctx, t, clientset, controller.NoResyncPeriodFunc)
|
||||
fakePodControl := controller.FakePodControl{}
|
||||
manager.podControl = &fakePodControl
|
||||
manager.podStoreSynced = alwaysReady
|
||||
@@ -3775,7 +3780,7 @@ func TestSyncJobWithJobBackoffLimitPerIndex(t *testing.T) {
|
||||
defer featuregatetesting.SetFeatureGateDuringTest(t, feature.DefaultFeatureGate, features.JobPodFailurePolicy, tc.enableJobPodFailurePolicy)()
|
||||
clientset := clientset.NewForConfigOrDie(&restclient.Config{Host: "", ContentConfig: restclient.ContentConfig{GroupVersion: &schema.GroupVersion{Group: "", Version: "v1"}}})
|
||||
fakeClock := clocktesting.NewFakeClock(now)
|
||||
manager, sharedInformerFactory := newControllerFromClientWithClock(ctx, clientset, controller.NoResyncPeriodFunc, fakeClock)
|
||||
manager, sharedInformerFactory := newControllerFromClientWithClock(ctx, t, clientset, controller.NoResyncPeriodFunc, fakeClock)
|
||||
fakePodControl := controller.FakePodControl{}
|
||||
manager.podControl = &fakePodControl
|
||||
manager.podStoreSynced = alwaysReady
|
||||
@@ -3833,7 +3838,7 @@ func TestSyncJobUpdateRequeue(t *testing.T) {
|
||||
t.Run(name, func(t *testing.T) {
|
||||
t.Cleanup(setDurationDuringTest(&DefaultJobApiBackOff, fastJobApiBackoff))
|
||||
fakeClient := clocktesting.NewFakeClock(time.Now())
|
||||
manager, sharedInformerFactory := newControllerFromClientWithClock(ctx, clientset, controller.NoResyncPeriodFunc, fakeClient)
|
||||
manager, sharedInformerFactory := newControllerFromClientWithClock(ctx, t, clientset, controller.NoResyncPeriodFunc, fakeClient)
|
||||
fakePodControl := controller.FakePodControl{}
|
||||
manager.podControl = &fakePodControl
|
||||
manager.podStoreSynced = alwaysReady
|
||||
@@ -3886,7 +3891,7 @@ func TestUpdateJobRequeue(t *testing.T) {
|
||||
}
|
||||
for name, tc := range cases {
|
||||
t.Run(name, func(t *testing.T) {
|
||||
manager, sharedInformerFactory := newControllerFromClient(ctx, clientset, controller.NoResyncPeriodFunc)
|
||||
manager, sharedInformerFactory := newControllerFromClient(ctx, t, clientset, controller.NoResyncPeriodFunc)
|
||||
manager.podStoreSynced = alwaysReady
|
||||
manager.jobStoreSynced = alwaysReady
|
||||
|
||||
@@ -3955,7 +3960,7 @@ func TestGetPodCreationInfoForIndependentIndexes(t *testing.T) {
|
||||
for name, tc := range cases {
|
||||
t.Run(name, func(t *testing.T) {
|
||||
fakeClock := clocktesting.NewFakeClock(now)
|
||||
manager, _ := newControllerFromClientWithClock(ctx, clientset, controller.NoResyncPeriodFunc, fakeClock)
|
||||
manager, _ := newControllerFromClientWithClock(ctx, t, clientset, controller.NoResyncPeriodFunc, fakeClock)
|
||||
gotIndexesToAdd, gotRemainingTime := manager.getPodCreationInfoForIndependentIndexes(logger, tc.indexesToAdd, tc.podsWithDelayedDeletionPerIndex)
|
||||
if diff := cmp.Diff(tc.wantIndexesToAdd, gotIndexesToAdd); diff != "" {
|
||||
t.Fatalf("Unexpected indexes to add: %s", diff)
|
||||
@@ -3970,7 +3975,7 @@ func TestGetPodCreationInfoForIndependentIndexes(t *testing.T) {
|
||||
func TestJobPodLookup(t *testing.T) {
|
||||
_, ctx := ktesting.NewTestContext(t)
|
||||
clientset := clientset.NewForConfigOrDie(&restclient.Config{Host: "", ContentConfig: restclient.ContentConfig{GroupVersion: &schema.GroupVersion{Group: "", Version: "v1"}}})
|
||||
manager, sharedInformerFactory := newControllerFromClient(ctx, clientset, controller.NoResyncPeriodFunc)
|
||||
manager, sharedInformerFactory := newControllerFromClient(ctx, t, clientset, controller.NoResyncPeriodFunc)
|
||||
manager.podStoreSynced = alwaysReady
|
||||
manager.jobStoreSynced = alwaysReady
|
||||
testCases := []struct {
|
||||
@@ -4114,7 +4119,7 @@ func TestGetPodsForJob(t *testing.T) {
|
||||
job.DeletionTimestamp = &metav1.Time{}
|
||||
}
|
||||
clientSet := fake.NewSimpleClientset(job, otherJob)
|
||||
jm, informer := newControllerFromClient(ctx, clientSet, controller.NoResyncPeriodFunc)
|
||||
jm, informer := newControllerFromClient(ctx, t, clientSet, controller.NoResyncPeriodFunc)
|
||||
jm.podStoreSynced = alwaysReady
|
||||
jm.jobStoreSynced = alwaysReady
|
||||
cachedJob := job.DeepCopy()
|
||||
@@ -4158,7 +4163,7 @@ func TestAddPod(t *testing.T) {
|
||||
|
||||
clientset := clientset.NewForConfigOrDie(&restclient.Config{Host: "", ContentConfig: restclient.ContentConfig{GroupVersion: &schema.GroupVersion{Group: "", Version: "v1"}}})
|
||||
fakeClock := clocktesting.NewFakeClock(time.Now())
|
||||
jm, informer := newControllerFromClientWithClock(ctx, clientset, controller.NoResyncPeriodFunc, fakeClock)
|
||||
jm, informer := newControllerFromClientWithClock(ctx, t, clientset, controller.NoResyncPeriodFunc, fakeClock)
|
||||
jm.podStoreSynced = alwaysReady
|
||||
jm.jobStoreSynced = alwaysReady
|
||||
|
||||
@@ -4202,7 +4207,7 @@ func TestAddPodOrphan(t *testing.T) {
|
||||
logger, ctx := ktesting.NewTestContext(t)
|
||||
clientset := clientset.NewForConfigOrDie(&restclient.Config{Host: "", ContentConfig: restclient.ContentConfig{GroupVersion: &schema.GroupVersion{Group: "", Version: "v1"}}})
|
||||
fakeClock := clocktesting.NewFakeClock(time.Now())
|
||||
jm, informer := newControllerFromClientWithClock(ctx, clientset, controller.NoResyncPeriodFunc, fakeClock)
|
||||
jm, informer := newControllerFromClientWithClock(ctx, t, clientset, controller.NoResyncPeriodFunc, fakeClock)
|
||||
jm.podStoreSynced = alwaysReady
|
||||
jm.jobStoreSynced = alwaysReady
|
||||
|
||||
@@ -4232,7 +4237,7 @@ func TestUpdatePod(t *testing.T) {
|
||||
logger := klog.FromContext(ctx)
|
||||
clientset := clientset.NewForConfigOrDie(&restclient.Config{Host: "", ContentConfig: restclient.ContentConfig{GroupVersion: &schema.GroupVersion{Group: "", Version: "v1"}}})
|
||||
fakeClock := clocktesting.NewFakeClock(time.Now())
|
||||
jm, informer := newControllerFromClientWithClock(ctx, clientset, controller.NoResyncPeriodFunc, fakeClock)
|
||||
jm, informer := newControllerFromClientWithClock(ctx, t, clientset, controller.NoResyncPeriodFunc, fakeClock)
|
||||
jm.podStoreSynced = alwaysReady
|
||||
jm.jobStoreSynced = alwaysReady
|
||||
|
||||
@@ -4280,7 +4285,7 @@ func TestUpdatePodOrphanWithNewLabels(t *testing.T) {
|
||||
logger, ctx := ktesting.NewTestContext(t)
|
||||
clientset := clientset.NewForConfigOrDie(&restclient.Config{Host: "", ContentConfig: restclient.ContentConfig{GroupVersion: &schema.GroupVersion{Group: "", Version: "v1"}}})
|
||||
fakeClock := clocktesting.NewFakeClock(time.Now())
|
||||
jm, informer := newControllerFromClientWithClock(ctx, clientset, controller.NoResyncPeriodFunc, fakeClock)
|
||||
jm, informer := newControllerFromClientWithClock(ctx, t, clientset, controller.NoResyncPeriodFunc, fakeClock)
|
||||
jm.podStoreSynced = alwaysReady
|
||||
jm.jobStoreSynced = alwaysReady
|
||||
|
||||
@@ -4309,7 +4314,7 @@ func TestUpdatePodChangeControllerRef(t *testing.T) {
|
||||
logger := klog.FromContext(ctx)
|
||||
clientset := clientset.NewForConfigOrDie(&restclient.Config{Host: "", ContentConfig: restclient.ContentConfig{GroupVersion: &schema.GroupVersion{Group: "", Version: "v1"}}})
|
||||
fakeClock := clocktesting.NewFakeClock(time.Now())
|
||||
jm, informer := newControllerFromClientWithClock(ctx, clientset, controller.NoResyncPeriodFunc, fakeClock)
|
||||
jm, informer := newControllerFromClientWithClock(ctx, t, clientset, controller.NoResyncPeriodFunc, fakeClock)
|
||||
jm.podStoreSynced = alwaysReady
|
||||
jm.jobStoreSynced = alwaysReady
|
||||
|
||||
@@ -4337,7 +4342,7 @@ func TestUpdatePodRelease(t *testing.T) {
|
||||
logger := klog.FromContext(ctx)
|
||||
clientset := clientset.NewForConfigOrDie(&restclient.Config{Host: "", ContentConfig: restclient.ContentConfig{GroupVersion: &schema.GroupVersion{Group: "", Version: "v1"}}})
|
||||
fakeClock := clocktesting.NewFakeClock(time.Now())
|
||||
jm, informer := newControllerFromClientWithClock(ctx, clientset, controller.NoResyncPeriodFunc, fakeClock)
|
||||
jm, informer := newControllerFromClientWithClock(ctx, t, clientset, controller.NoResyncPeriodFunc, fakeClock)
|
||||
jm.podStoreSynced = alwaysReady
|
||||
jm.jobStoreSynced = alwaysReady
|
||||
|
||||
@@ -4364,7 +4369,7 @@ func TestDeletePod(t *testing.T) {
|
||||
logger, ctx := ktesting.NewTestContext(t)
|
||||
clientset := clientset.NewForConfigOrDie(&restclient.Config{Host: "", ContentConfig: restclient.ContentConfig{GroupVersion: &schema.GroupVersion{Group: "", Version: "v1"}}})
|
||||
fakeClock := clocktesting.NewFakeClock(time.Now())
|
||||
jm, informer := newControllerFromClientWithClock(ctx, clientset, controller.NoResyncPeriodFunc, fakeClock)
|
||||
jm, informer := newControllerFromClientWithClock(ctx, t, clientset, controller.NoResyncPeriodFunc, fakeClock)
|
||||
jm.podStoreSynced = alwaysReady
|
||||
jm.jobStoreSynced = alwaysReady
|
||||
|
||||
@@ -4408,7 +4413,7 @@ func TestDeletePodOrphan(t *testing.T) {
|
||||
t.Cleanup(setDurationDuringTest(&syncJobBatchPeriod, 0))
|
||||
logger, ctx := ktesting.NewTestContext(t)
|
||||
clientset := clientset.NewForConfigOrDie(&restclient.Config{Host: "", ContentConfig: restclient.ContentConfig{GroupVersion: &schema.GroupVersion{Group: "", Version: "v1"}}})
|
||||
jm, informer := newControllerFromClient(ctx, clientset, controller.NoResyncPeriodFunc)
|
||||
jm, informer := newControllerFromClient(ctx, t, clientset, controller.NoResyncPeriodFunc)
|
||||
jm.podStoreSynced = alwaysReady
|
||||
jm.jobStoreSynced = alwaysReady
|
||||
|
||||
@@ -4449,7 +4454,7 @@ func (fe FakeJobExpectations) SatisfiedExpectations(logger klog.Logger, controll
|
||||
func TestSyncJobExpectations(t *testing.T) {
|
||||
_, ctx := ktesting.NewTestContext(t)
|
||||
clientset := clientset.NewForConfigOrDie(&restclient.Config{Host: "", ContentConfig: restclient.ContentConfig{GroupVersion: &schema.GroupVersion{Group: "", Version: "v1"}}})
|
||||
manager, sharedInformerFactory := newControllerFromClient(ctx, clientset, controller.NoResyncPeriodFunc)
|
||||
manager, sharedInformerFactory := newControllerFromClient(ctx, t, clientset, controller.NoResyncPeriodFunc)
|
||||
fakePodControl := controller.FakePodControl{}
|
||||
manager.podControl = &fakePodControl
|
||||
manager.podStoreSynced = alwaysReady
|
||||
@@ -4486,7 +4491,7 @@ func TestWatchJobs(t *testing.T) {
|
||||
clientset := fake.NewSimpleClientset()
|
||||
fakeWatch := watch.NewFake()
|
||||
clientset.PrependWatchReactor("jobs", core.DefaultWatchReactor(fakeWatch, nil))
|
||||
manager, sharedInformerFactory := newControllerFromClient(ctx, clientset, controller.NoResyncPeriodFunc)
|
||||
manager, sharedInformerFactory := newControllerFromClient(ctx, t, clientset, controller.NoResyncPeriodFunc)
|
||||
manager.podStoreSynced = alwaysReady
|
||||
manager.jobStoreSynced = alwaysReady
|
||||
|
||||
@@ -4532,7 +4537,7 @@ func TestWatchPods(t *testing.T) {
|
||||
clientset := fake.NewSimpleClientset(testJob)
|
||||
fakeWatch := watch.NewFake()
|
||||
clientset.PrependWatchReactor("pods", core.DefaultWatchReactor(fakeWatch, nil))
|
||||
manager, sharedInformerFactory := newControllerFromClient(ctx, clientset, controller.NoResyncPeriodFunc)
|
||||
manager, sharedInformerFactory := newControllerFromClient(ctx, t, clientset, controller.NoResyncPeriodFunc)
|
||||
manager.podStoreSynced = alwaysReady
|
||||
manager.jobStoreSynced = alwaysReady
|
||||
|
||||
@@ -4578,7 +4583,10 @@ func TestWatchOrphanPods(t *testing.T) {
|
||||
_, ctx := ktesting.NewTestContext(t)
|
||||
clientset := fake.NewSimpleClientset()
|
||||
sharedInformers := informers.NewSharedInformerFactory(clientset, controller.NoResyncPeriodFunc())
|
||||
manager := NewController(ctx, sharedInformers.Core().V1().Pods(), sharedInformers.Batch().V1().Jobs(), clientset)
|
||||
manager, err := NewController(ctx, sharedInformers.Core().V1().Pods(), sharedInformers.Batch().V1().Jobs(), clientset)
|
||||
if err != nil {
|
||||
t.Fatalf("Error creating Job controller: %v", err)
|
||||
}
|
||||
manager.podStoreSynced = alwaysReady
|
||||
manager.jobStoreSynced = alwaysReady
|
||||
|
||||
@@ -4655,7 +4663,7 @@ func TestJobApiBackoffReset(t *testing.T) {
|
||||
|
||||
clientset := clientset.NewForConfigOrDie(&restclient.Config{Host: "", ContentConfig: restclient.ContentConfig{GroupVersion: &schema.GroupVersion{Group: "", Version: "v1"}}})
|
||||
fakeClock := clocktesting.NewFakeClock(time.Now())
|
||||
manager, sharedInformerFactory := newControllerFromClientWithClock(ctx, clientset, controller.NoResyncPeriodFunc, fakeClock)
|
||||
manager, sharedInformerFactory := newControllerFromClientWithClock(ctx, t, clientset, controller.NoResyncPeriodFunc, fakeClock)
|
||||
fakePodControl := controller.FakePodControl{}
|
||||
manager.podControl = &fakePodControl
|
||||
manager.podStoreSynced = alwaysReady
|
||||
@@ -4739,7 +4747,7 @@ func TestJobBackoff(t *testing.T) {
|
||||
t.Run(name, func(t *testing.T) {
|
||||
defer featuregatetesting.SetFeatureGateDuringTest(t, feature.DefaultFeatureGate, features.JobReadyPods, tc.jobReadyPodsEnabled)()
|
||||
clientset := clientset.NewForConfigOrDie(&restclient.Config{Host: "", ContentConfig: restclient.ContentConfig{GroupVersion: &schema.GroupVersion{Group: "", Version: "v1"}}})
|
||||
manager, sharedInformerFactory := newControllerFromClient(ctx, clientset, controller.NoResyncPeriodFunc)
|
||||
manager, sharedInformerFactory := newControllerFromClient(ctx, t, clientset, controller.NoResyncPeriodFunc)
|
||||
fakePodControl := controller.FakePodControl{}
|
||||
manager.podControl = &fakePodControl
|
||||
manager.podStoreSynced = alwaysReady
|
||||
@@ -4847,7 +4855,7 @@ func TestJobBackoffForOnFailure(t *testing.T) {
|
||||
t.Run(name, func(t *testing.T) {
|
||||
// job manager setup
|
||||
clientset := clientset.NewForConfigOrDie(&restclient.Config{Host: "", ContentConfig: restclient.ContentConfig{GroupVersion: &schema.GroupVersion{Group: "", Version: "v1"}}})
|
||||
manager, sharedInformerFactory := newControllerFromClient(ctx, clientset, controller.NoResyncPeriodFunc)
|
||||
manager, sharedInformerFactory := newControllerFromClient(ctx, t, clientset, controller.NoResyncPeriodFunc)
|
||||
fakePodControl := controller.FakePodControl{}
|
||||
manager.podControl = &fakePodControl
|
||||
manager.podStoreSynced = alwaysReady
|
||||
@@ -4946,7 +4954,7 @@ func TestJobBackoffOnRestartPolicyNever(t *testing.T) {
|
||||
t.Run(name, func(t *testing.T) {
|
||||
// job manager setup
|
||||
clientset := clientset.NewForConfigOrDie(&restclient.Config{Host: "", ContentConfig: restclient.ContentConfig{GroupVersion: &schema.GroupVersion{Group: "", Version: "v1"}}})
|
||||
manager, sharedInformerFactory := newControllerFromClient(ctx, clientset, controller.NoResyncPeriodFunc)
|
||||
manager, sharedInformerFactory := newControllerFromClient(ctx, t, clientset, controller.NoResyncPeriodFunc)
|
||||
fakePodControl := controller.FakePodControl{}
|
||||
manager.podControl = &fakePodControl
|
||||
manager.podStoreSynced = alwaysReady
|
||||
@@ -5080,7 +5088,10 @@ func TestFinalizersRemovedExpectations(t *testing.T) {
|
||||
_, ctx := ktesting.NewTestContext(t)
|
||||
clientset := fake.NewSimpleClientset()
|
||||
sharedInformers := informers.NewSharedInformerFactory(clientset, controller.NoResyncPeriodFunc())
|
||||
manager := NewController(ctx, sharedInformers.Core().V1().Pods(), sharedInformers.Batch().V1().Jobs(), clientset)
|
||||
manager, err := NewController(ctx, sharedInformers.Core().V1().Pods(), sharedInformers.Batch().V1().Jobs(), clientset)
|
||||
if err != nil {
|
||||
t.Fatalf("Error creating Job controller: %v", err)
|
||||
}
|
||||
manager.podStoreSynced = alwaysReady
|
||||
manager.jobStoreSynced = alwaysReady
|
||||
manager.podControl = &controller.FakePodControl{Err: errors.New("fake pod controller error")}
|
||||
@@ -5133,7 +5144,7 @@ func TestFinalizersRemovedExpectations(t *testing.T) {
|
||||
update := pods[0].DeepCopy()
|
||||
update.Finalizers = nil
|
||||
update.ResourceVersion = "1"
|
||||
err := clientset.Tracker().Update(podsResource, update, update.Namespace)
|
||||
err = clientset.Tracker().Update(podsResource, update, update.Namespace)
|
||||
if err != nil {
|
||||
t.Errorf("Removing finalizer: %v", err)
|
||||
}
|
||||
@@ -5179,7 +5190,10 @@ func TestFinalizerCleanup(t *testing.T) {
|
||||
|
||||
clientset := fake.NewSimpleClientset()
|
||||
sharedInformers := informers.NewSharedInformerFactory(clientset, controller.NoResyncPeriodFunc())
|
||||
manager := NewController(ctx, sharedInformers.Core().V1().Pods(), sharedInformers.Batch().V1().Jobs(), clientset)
|
||||
manager, err := NewController(ctx, sharedInformers.Core().V1().Pods(), sharedInformers.Batch().V1().Jobs(), clientset)
|
||||
if err != nil {
|
||||
t.Fatalf("Error creating Job controller: %v", err)
|
||||
}
|
||||
manager.podStoreSynced = alwaysReady
|
||||
manager.jobStoreSynced = alwaysReady
|
||||
|
||||
@@ -5193,7 +5207,7 @@ func TestFinalizerCleanup(t *testing.T) {
|
||||
|
||||
// Create a simple Job
|
||||
job := newJob(1, 1, 1, batch.NonIndexedCompletion)
|
||||
job, err := clientset.BatchV1().Jobs(job.GetNamespace()).Create(ctx, job, metav1.CreateOptions{})
|
||||
job, err = clientset.BatchV1().Jobs(job.GetNamespace()).Create(ctx, job, metav1.CreateOptions{})
|
||||
if err != nil {
|
||||
t.Fatalf("Creating job: %v", err)
|
||||
}
|
||||
|
||||
Reference in New Issue
Block a user