mirror of
https://github.com/k3s-io/kubernetes.git
synced 2026-01-04 15:05:20 +00:00
Wire contexts to Core controllers
This commit is contained in:
@@ -114,9 +114,9 @@ func TestGarbageCollectorConstruction(t *testing.T) {
|
||||
assert.Equal(t, 1, len(gc.dependencyGraphBuilder.monitors))
|
||||
|
||||
// Make sure the syncing mechanism also works after Run() has been called
|
||||
stopCh := make(chan struct{})
|
||||
defer close(stopCh)
|
||||
go gc.Run(1, stopCh)
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
defer cancel()
|
||||
go gc.Run(ctx, 1)
|
||||
|
||||
err = gc.resyncMonitors(twoResources)
|
||||
if err != nil {
|
||||
@@ -287,7 +287,7 @@ func TestAttemptToDeleteItem(t *testing.T) {
|
||||
owners: nil,
|
||||
virtual: true,
|
||||
}
|
||||
err := gc.attemptToDeleteItem(item)
|
||||
err := gc.attemptToDeleteItem(context.TODO(), item)
|
||||
if err != nil {
|
||||
t.Errorf("Unexpected Error: %v", err)
|
||||
}
|
||||
@@ -546,12 +546,12 @@ func TestAbsentOwnerCache(t *testing.T) {
|
||||
gc := setupGC(t, clientConfig)
|
||||
defer close(gc.stop)
|
||||
gc.absentOwnerCache = NewReferenceCache(2)
|
||||
gc.attemptToDeleteItem(podToGCNode(rc1Pod1))
|
||||
gc.attemptToDeleteItem(podToGCNode(rc2Pod1))
|
||||
gc.attemptToDeleteItem(context.TODO(), podToGCNode(rc1Pod1))
|
||||
gc.attemptToDeleteItem(context.TODO(), podToGCNode(rc2Pod1))
|
||||
// rc1 should already be in the cache, no request should be sent. rc1 should be promoted in the UIDCache
|
||||
gc.attemptToDeleteItem(podToGCNode(rc1Pod2))
|
||||
gc.attemptToDeleteItem(context.TODO(), podToGCNode(rc1Pod2))
|
||||
// after this call, rc2 should be evicted from the UIDCache
|
||||
gc.attemptToDeleteItem(podToGCNode(rc3Pod1))
|
||||
gc.attemptToDeleteItem(context.TODO(), podToGCNode(rc3Pod1))
|
||||
// check cache
|
||||
if !gc.absentOwnerCache.Has(objectReference{Namespace: "ns1", OwnerReference: metav1.OwnerReference{Kind: "ReplicationController", Name: "rc1", UID: "1", APIVersion: "v1"}}) {
|
||||
t.Errorf("expected rc1 to be in the cache")
|
||||
@@ -851,9 +851,9 @@ func TestGarbageCollectorSync(t *testing.T) {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
stopCh := make(chan struct{})
|
||||
defer close(stopCh)
|
||||
go gc.Run(1, stopCh)
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
defer cancel()
|
||||
go gc.Run(ctx, 1)
|
||||
// The pseudo-code of GarbageCollector.Sync():
|
||||
// GarbageCollector.Sync(client, period, stopCh):
|
||||
// wait.Until() loops with `period` until the `stopCh` is closed :
|
||||
@@ -868,7 +868,7 @@ func TestGarbageCollectorSync(t *testing.T) {
|
||||
// The 1s sleep in the test allows GetDeletableResources and
|
||||
// gc.resyncMonitors to run ~5 times to ensure the changes to the
|
||||
// fakeDiscoveryClient are picked up.
|
||||
go gc.Sync(fakeDiscoveryClient, 200*time.Millisecond, stopCh)
|
||||
go gc.Sync(fakeDiscoveryClient, 200*time.Millisecond, ctx.Done())
|
||||
|
||||
// Wait until the sync discovers the initial resources
|
||||
time.Sleep(1 * time.Second)
|
||||
@@ -2434,7 +2434,7 @@ func processAttemptToDelete(count int) step {
|
||||
if count <= 0 {
|
||||
// process all
|
||||
for ctx.gc.dependencyGraphBuilder.attemptToDelete.Len() != 0 {
|
||||
ctx.gc.attemptToDeleteWorker()
|
||||
ctx.gc.attemptToDeleteWorker(context.TODO())
|
||||
}
|
||||
} else {
|
||||
for i := 0; i < count; i++ {
|
||||
@@ -2442,7 +2442,7 @@ func processAttemptToDelete(count int) step {
|
||||
ctx.t.Errorf("expected at least %d pending changes, got %d", count, i+1)
|
||||
return
|
||||
}
|
||||
ctx.gc.attemptToDeleteWorker()
|
||||
ctx.gc.attemptToDeleteWorker(context.TODO())
|
||||
}
|
||||
}
|
||||
},
|
||||
|
||||
Reference in New Issue
Block a user