mirror of
https://github.com/k3s-io/kubernetes.git
synced 2025-08-04 09:49:50 +00:00
Merge pull request #126228 from googs1025/fix_informer
chore(Job): make trivial improvements to job controller unit test
This commit is contained in:
commit
b3e769b72e
@ -2756,7 +2756,7 @@ func getCondition(job *batch.Job, condition batch.JobConditionType, status v1.Co
|
|||||||
// reaching the active deadline, at which point it is marked as Failed.
|
// reaching the active deadline, at which point it is marked as Failed.
|
||||||
func TestPastDeadlineJobFinished(t *testing.T) {
|
func TestPastDeadlineJobFinished(t *testing.T) {
|
||||||
_, ctx := ktesting.NewTestContext(t)
|
_, ctx := ktesting.NewTestContext(t)
|
||||||
clientset := fake.NewSimpleClientset()
|
clientset := fake.NewClientset()
|
||||||
fakeClock := clocktesting.NewFakeClock(time.Now().Truncate(time.Second))
|
fakeClock := clocktesting.NewFakeClock(time.Now().Truncate(time.Second))
|
||||||
manager, sharedInformerFactory := newControllerFromClientWithClock(ctx, t, clientset, controller.NoResyncPeriodFunc, fakeClock)
|
manager, sharedInformerFactory := newControllerFromClientWithClock(ctx, t, clientset, controller.NoResyncPeriodFunc, fakeClock)
|
||||||
manager.podStoreSynced = alwaysReady
|
manager.podStoreSynced = alwaysReady
|
||||||
@ -6044,7 +6044,7 @@ func TestGetPodsForJob(t *testing.T) {
|
|||||||
if tc.jobDeleted {
|
if tc.jobDeleted {
|
||||||
job.DeletionTimestamp = &metav1.Time{}
|
job.DeletionTimestamp = &metav1.Time{}
|
||||||
}
|
}
|
||||||
clientSet := fake.NewSimpleClientset(job, otherJob)
|
clientSet := fake.NewClientset(job, otherJob)
|
||||||
jm, informer := newControllerFromClient(ctx, t, clientSet, controller.NoResyncPeriodFunc)
|
jm, informer := newControllerFromClient(ctx, t, clientSet, controller.NoResyncPeriodFunc)
|
||||||
jm.podStoreSynced = alwaysReady
|
jm.podStoreSynced = alwaysReady
|
||||||
jm.jobStoreSynced = alwaysReady
|
jm.jobStoreSynced = alwaysReady
|
||||||
@ -6416,7 +6416,7 @@ func TestSyncJobExpectations(t *testing.T) {
|
|||||||
|
|
||||||
func TestWatchJobs(t *testing.T) {
|
func TestWatchJobs(t *testing.T) {
|
||||||
_, ctx := ktesting.NewTestContext(t)
|
_, ctx := ktesting.NewTestContext(t)
|
||||||
clientset := fake.NewSimpleClientset()
|
clientset := fake.NewClientset()
|
||||||
fakeWatch := watch.NewFake()
|
fakeWatch := watch.NewFake()
|
||||||
clientset.PrependWatchReactor("jobs", core.DefaultWatchReactor(fakeWatch, nil))
|
clientset.PrependWatchReactor("jobs", core.DefaultWatchReactor(fakeWatch, nil))
|
||||||
manager, sharedInformerFactory := newControllerFromClient(ctx, t, clientset, controller.NoResyncPeriodFunc)
|
manager, sharedInformerFactory := newControllerFromClient(ctx, t, clientset, controller.NoResyncPeriodFunc)
|
||||||
@ -6446,9 +6446,8 @@ func TestWatchJobs(t *testing.T) {
|
|||||||
}
|
}
|
||||||
// Start only the job watcher and the workqueue, send a watch event,
|
// Start only the job watcher and the workqueue, send a watch event,
|
||||||
// and make sure it hits the sync method.
|
// and make sure it hits the sync method.
|
||||||
stopCh := make(chan struct{})
|
sharedInformerFactory.Start(ctx.Done())
|
||||||
defer close(stopCh)
|
sharedInformerFactory.WaitForCacheSync(ctx.Done())
|
||||||
sharedInformerFactory.Start(stopCh)
|
|
||||||
go manager.Run(ctx, 1)
|
go manager.Run(ctx, 1)
|
||||||
|
|
||||||
// We're sending new job to see if it reaches syncHandler.
|
// We're sending new job to see if it reaches syncHandler.
|
||||||
@ -6462,7 +6461,7 @@ func TestWatchJobs(t *testing.T) {
|
|||||||
func TestWatchPods(t *testing.T) {
|
func TestWatchPods(t *testing.T) {
|
||||||
_, ctx := ktesting.NewTestContext(t)
|
_, ctx := ktesting.NewTestContext(t)
|
||||||
testJob := newJob(2, 2, 6, batch.NonIndexedCompletion)
|
testJob := newJob(2, 2, 6, batch.NonIndexedCompletion)
|
||||||
clientset := fake.NewSimpleClientset(testJob)
|
clientset := fake.NewClientset(testJob)
|
||||||
fakeWatch := watch.NewFake()
|
fakeWatch := watch.NewFake()
|
||||||
clientset.PrependWatchReactor("pods", core.DefaultWatchReactor(fakeWatch, nil))
|
clientset.PrependWatchReactor("pods", core.DefaultWatchReactor(fakeWatch, nil))
|
||||||
manager, sharedInformerFactory := newControllerFromClient(ctx, t, clientset, controller.NoResyncPeriodFunc)
|
manager, sharedInformerFactory := newControllerFromClient(ctx, t, clientset, controller.NoResyncPeriodFunc)
|
||||||
@ -6493,9 +6492,7 @@ func TestWatchPods(t *testing.T) {
|
|||||||
}
|
}
|
||||||
// Start only the pod watcher and the workqueue, send a watch event,
|
// Start only the pod watcher and the workqueue, send a watch event,
|
||||||
// and make sure it hits the sync method for the right job.
|
// and make sure it hits the sync method for the right job.
|
||||||
stopCh := make(chan struct{})
|
go sharedInformerFactory.Core().V1().Pods().Informer().Run(ctx.Done())
|
||||||
defer close(stopCh)
|
|
||||||
go sharedInformerFactory.Core().V1().Pods().Informer().Run(stopCh)
|
|
||||||
go manager.Run(ctx, 1)
|
go manager.Run(ctx, 1)
|
||||||
|
|
||||||
pods := newPodList(1, v1.PodRunning, testJob)
|
pods := newPodList(1, v1.PodRunning, testJob)
|
||||||
@ -6509,7 +6506,7 @@ func TestWatchPods(t *testing.T) {
|
|||||||
|
|
||||||
func TestWatchOrphanPods(t *testing.T) {
|
func TestWatchOrphanPods(t *testing.T) {
|
||||||
_, ctx := ktesting.NewTestContext(t)
|
_, ctx := ktesting.NewTestContext(t)
|
||||||
clientset := fake.NewSimpleClientset()
|
clientset := fake.NewClientset()
|
||||||
sharedInformers := informers.NewSharedInformerFactory(clientset, controller.NoResyncPeriodFunc())
|
sharedInformers := informers.NewSharedInformerFactory(clientset, controller.NoResyncPeriodFunc())
|
||||||
manager, err := NewController(ctx, sharedInformers.Core().V1().Pods(), sharedInformers.Batch().V1().Jobs(), clientset)
|
manager, err := NewController(ctx, sharedInformers.Core().V1().Pods(), sharedInformers.Batch().V1().Jobs(), clientset)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
@ -6518,11 +6515,9 @@ func TestWatchOrphanPods(t *testing.T) {
|
|||||||
manager.podStoreSynced = alwaysReady
|
manager.podStoreSynced = alwaysReady
|
||||||
manager.jobStoreSynced = alwaysReady
|
manager.jobStoreSynced = alwaysReady
|
||||||
|
|
||||||
stopCh := make(chan struct{})
|
|
||||||
defer close(stopCh)
|
|
||||||
podInformer := sharedInformers.Core().V1().Pods().Informer()
|
podInformer := sharedInformers.Core().V1().Pods().Informer()
|
||||||
go podInformer.Run(stopCh)
|
go podInformer.Run(ctx.Done())
|
||||||
cache.WaitForCacheSync(stopCh, podInformer.HasSynced)
|
cache.WaitForCacheSync(ctx.Done(), podInformer.HasSynced)
|
||||||
go manager.Run(ctx, 1)
|
go manager.Run(ctx, 1)
|
||||||
|
|
||||||
// Create job but don't add it to the store.
|
// Create job but don't add it to the store.
|
||||||
@ -6582,7 +6577,7 @@ func TestWatchOrphanPods(t *testing.T) {
|
|||||||
|
|
||||||
func TestSyncOrphanPod(t *testing.T) {
|
func TestSyncOrphanPod(t *testing.T) {
|
||||||
_, ctx := ktesting.NewTestContext(t)
|
_, ctx := ktesting.NewTestContext(t)
|
||||||
clientset := fake.NewSimpleClientset()
|
clientset := fake.NewClientset()
|
||||||
sharedInformers := informers.NewSharedInformerFactory(clientset, controller.NoResyncPeriodFunc())
|
sharedInformers := informers.NewSharedInformerFactory(clientset, controller.NoResyncPeriodFunc())
|
||||||
manager, err := NewController(ctx, sharedInformers.Core().V1().Pods(), sharedInformers.Batch().V1().Jobs(), clientset)
|
manager, err := NewController(ctx, sharedInformers.Core().V1().Pods(), sharedInformers.Batch().V1().Jobs(), clientset)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
@ -6591,11 +6586,9 @@ func TestSyncOrphanPod(t *testing.T) {
|
|||||||
manager.podStoreSynced = alwaysReady
|
manager.podStoreSynced = alwaysReady
|
||||||
manager.jobStoreSynced = alwaysReady
|
manager.jobStoreSynced = alwaysReady
|
||||||
|
|
||||||
stopCh := make(chan struct{})
|
|
||||||
defer close(stopCh)
|
|
||||||
podInformer := sharedInformers.Core().V1().Pods().Informer()
|
podInformer := sharedInformers.Core().V1().Pods().Informer()
|
||||||
go podInformer.Run(stopCh)
|
go podInformer.Run(ctx.Done())
|
||||||
cache.WaitForCacheSync(stopCh, podInformer.HasSynced)
|
cache.WaitForCacheSync(ctx.Done(), podInformer.HasSynced)
|
||||||
go manager.Run(ctx, 1)
|
go manager.Run(ctx, 1)
|
||||||
|
|
||||||
cases := map[string]struct {
|
cases := map[string]struct {
|
||||||
@ -7462,7 +7455,7 @@ func TestEnsureJobConditions(t *testing.T) {
|
|||||||
|
|
||||||
func TestFinalizersRemovedExpectations(t *testing.T) {
|
func TestFinalizersRemovedExpectations(t *testing.T) {
|
||||||
_, ctx := ktesting.NewTestContext(t)
|
_, ctx := ktesting.NewTestContext(t)
|
||||||
clientset := fake.NewSimpleClientset()
|
clientset := fake.NewClientset()
|
||||||
sharedInformers := informers.NewSharedInformerFactory(clientset, controller.NoResyncPeriodFunc())
|
sharedInformers := informers.NewSharedInformerFactory(clientset, controller.NoResyncPeriodFunc())
|
||||||
manager, err := NewController(ctx, sharedInformers.Core().V1().Pods(), sharedInformers.Batch().V1().Jobs(), clientset)
|
manager, err := NewController(ctx, sharedInformers.Core().V1().Pods(), sharedInformers.Batch().V1().Jobs(), clientset)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
@ -7506,10 +7499,8 @@ func TestFinalizersRemovedExpectations(t *testing.T) {
|
|||||||
t.Errorf("Different expectations for removed finalizers after syncJob (-want,+got):\n%s", diff)
|
t.Errorf("Different expectations for removed finalizers after syncJob (-want,+got):\n%s", diff)
|
||||||
}
|
}
|
||||||
|
|
||||||
stopCh := make(chan struct{})
|
go sharedInformers.Core().V1().Pods().Informer().Run(ctx.Done())
|
||||||
defer close(stopCh)
|
cache.WaitForCacheSync(ctx.Done(), podInformer.HasSynced)
|
||||||
go sharedInformers.Core().V1().Pods().Informer().Run(stopCh)
|
|
||||||
cache.WaitForCacheSync(stopCh, podInformer.HasSynced)
|
|
||||||
|
|
||||||
// Make sure the first syncJob sets the expectations, even after the caches synced.
|
// Make sure the first syncJob sets the expectations, even after the caches synced.
|
||||||
gotExpectedUIDs = manager.finalizerExpectations.getExpectedUIDs(jobKey)
|
gotExpectedUIDs = manager.finalizerExpectations.getExpectedUIDs(jobKey)
|
||||||
@ -7568,7 +7559,7 @@ func TestFinalizerCleanup(t *testing.T) {
|
|||||||
ctx, cancel := context.WithCancel(ctx)
|
ctx, cancel := context.WithCancel(ctx)
|
||||||
defer cancel()
|
defer cancel()
|
||||||
|
|
||||||
clientset := fake.NewSimpleClientset()
|
clientset := fake.NewClientset()
|
||||||
sharedInformers := informers.NewSharedInformerFactory(clientset, controller.NoResyncPeriodFunc())
|
sharedInformers := informers.NewSharedInformerFactory(clientset, controller.NoResyncPeriodFunc())
|
||||||
manager, err := NewController(ctx, sharedInformers.Core().V1().Pods(), sharedInformers.Batch().V1().Jobs(), clientset)
|
manager, err := NewController(ctx, sharedInformers.Core().V1().Pods(), sharedInformers.Batch().V1().Jobs(), clientset)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
|
Loading…
Reference in New Issue
Block a user