Cleanup Job controller tests

This commit is contained in:
Michal Wozniak
2024-07-05 14:59:03 +02:00
parent 58c44005cd
commit 4250d444f8
2 changed files with 53 additions and 53 deletions

View File

@@ -5287,7 +5287,7 @@ func TestSyncJobUpdateRequeue(t *testing.T) {
job := newJob(2, 2, 6, batch.NonIndexedCompletion)
sharedInformerFactory.Batch().V1().Jobs().Informer().GetIndexer().Add(job)
manager.queue.Add(testutil.GetKey(job, t))
manager.processNextWorkItem(context.TODO())
manager.processNextWorkItem(ctx)
if tc.wantRequeued {
verifyEmptyQueueAndAwaitForQueueLen(ctx, t, manager, 1)
} else {
@@ -5297,7 +5297,7 @@ func TestSyncJobUpdateRequeue(t *testing.T) {
// into the queue asynchronously.
manager.clock.Sleep(fastJobApiBackoff)
time.Sleep(time.Millisecond)
verifyEmptyQueue(ctx, t, manager)
verifyEmptyQueue(t, manager)
}
})
}
@@ -5570,7 +5570,7 @@ func TestGetPodsForJob(t *testing.T) {
informer.Core().V1().Pods().Informer().GetIndexer().Add(p)
}
pods, err := jm.getPodsForJob(context.TODO(), job)
pods, err := jm.getPodsForJob(ctx, job)
if err != nil {
t.Fatalf("getPodsForJob() error: %v", err)
}
@@ -5961,7 +5961,7 @@ func TestWatchJobs(t *testing.T) {
stopCh := make(chan struct{})
defer close(stopCh)
sharedInformerFactory.Start(stopCh)
go manager.Run(context.TODO(), 1)
go manager.Run(ctx, 1)
// We're sending new job to see if it reaches syncHandler.
testJob.Namespace = "bar"
@@ -6008,7 +6008,7 @@ func TestWatchPods(t *testing.T) {
stopCh := make(chan struct{})
defer close(stopCh)
go sharedInformerFactory.Core().V1().Pods().Informer().Run(stopCh)
go manager.Run(context.TODO(), 1)
go manager.Run(ctx, 1)
pods := newPodList(1, v1.PodRunning, testJob)
testPod := pods[0]
@@ -6035,7 +6035,7 @@ func TestWatchOrphanPods(t *testing.T) {
podInformer := sharedInformers.Core().V1().Pods().Informer()
go podInformer.Run(stopCh)
cache.WaitForCacheSync(stopCh, podInformer.HasSynced)
go manager.Run(context.TODO(), 1)
go manager.Run(ctx, 1)
// Create job but don't add it to the store.
cases := map[string]struct {
@@ -6297,7 +6297,7 @@ func TestJobApiBackoffReset(t *testing.T) {
// error returned make the key requeued
fakePodControl.Err = errors.New("Controller error")
manager.queue.Add(key)
manager.processNextWorkItem(context.TODO())
manager.processNextWorkItem(ctx)
retries := manager.queue.NumRequeues(key)
if retries != 1 {
t.Fatalf("%s: expected exactly 1 retry, got %d", job.Name, retries)
@@ -6307,8 +6307,8 @@ func TestJobApiBackoffReset(t *testing.T) {
// the queue is emptied on success
fakePodControl.Err = nil
manager.processNextWorkItem(context.TODO())
verifyEmptyQueue(ctx, t, manager)
manager.processNextWorkItem(ctx)
verifyEmptyQueue(t, manager)
}
var _ workqueue.TypedRateLimitingInterface[string] = &fakeRateLimitingQueue{}
@@ -7091,13 +7091,13 @@ func podReplacementPolicy(m batch.PodReplacementPolicy) *batch.PodReplacementPol
func verifyEmptyQueueAndAwaitForQueueLen(ctx context.Context, t *testing.T, jm *Controller, wantQueueLen int) {
t.Helper()
verifyEmptyQueue(ctx, t, jm)
verifyEmptyQueue(t, jm)
awaitForQueueLen(ctx, t, jm, wantQueueLen)
}
func awaitForQueueLen(ctx context.Context, t *testing.T, jm *Controller, wantQueueLen int) {
t.Helper()
verifyEmptyQueue(ctx, t, jm)
verifyEmptyQueue(t, jm)
if err := wait.PollUntilContextTimeout(ctx, fastRequeue, time.Second, true, func(ctx context.Context) (bool, error) {
if requeued := jm.queue.Len() == wantQueueLen; requeued {
return true, nil
@@ -7109,7 +7109,7 @@ func awaitForQueueLen(ctx context.Context, t *testing.T, jm *Controller, wantQue
}
}
func verifyEmptyQueue(ctx context.Context, t *testing.T, jm *Controller) {
func verifyEmptyQueue(t *testing.T, jm *Controller) {
t.Helper()
if jm.queue.Len() > 0 {
t.Errorf("Unexpected queue.Len(). Want: %d, got: %d", 0, jm.queue.Len())