test: ensure WaitForCacheSync is called after starting sharedInformerFacotry

Signed-off-by: knight42 <anonymousknight96@gmail.com>
This commit is contained in:
knight42 2020-09-12 14:12:43 +08:00
parent a33f6b44e9
commit e89e72b637
No known key found for this signature in database
GPG Key ID: 61C5DB9CE28EED62
5 changed files with 12 additions and 14 deletions

View File

@ -521,15 +521,10 @@ func TestExpectationsOnRecreate(t *testing.T) {
}
f.Start(stopCh)
cacheCtx, cancelCacheCtx := context.WithTimeout(context.Background(), 30*time.Second)
defer cancelCacheCtx()
ok := cache.WaitForNamedCacheSync(
"test dsc",
cacheCtx.Done(), f.Core().V1().Nodes().Informer().HasSynced,
)
if !ok {
t.Fatal("caches failed to sync")
for ty, ok := range f.WaitForCacheSync(stopCh) {
if !ok {
t.Fatalf("caches failed to sync: %v", ty)
}
}
expectStableQueueLength(0)
@ -542,7 +537,7 @@ func TestExpectationsOnRecreate(t *testing.T) {
// create of DS adds to queue, processes
waitForQueueLength(1, "created DS")
ok = dsc.processNextWorkItem()
ok := dsc.processNextWorkItem()
if !ok {
t.Fatal("queue is shutting down")
}

View File

@ -428,6 +428,7 @@ func TestDeploymentController_cleanupDeployment(t *testing.T) {
stopCh := make(chan struct{})
defer close(stopCh)
informers.Start(stopCh)
informers.WaitForCacheSync(stopCh)
t.Logf(" &test.revisionHistoryLimit: %d", test.revisionHistoryLimit)
d := newDeployment("foo", 1, &test.revisionHistoryLimit, nil, nil, map[string]string{"foo": "bar"})

View File

@ -31,7 +31,7 @@ import (
"time"
apps "k8s.io/api/apps/v1"
"k8s.io/api/core/v1"
v1 "k8s.io/api/core/v1"
apiequality "k8s.io/apimachinery/pkg/api/equality"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/runtime"
@ -599,6 +599,7 @@ func TestWatchControllers(t *testing.T) {
BurstReplicas,
)
informers.Start(stopCh)
informers.WaitForCacheSync(stopCh)
var testRSSpec apps.ReplicaSet
received := make(chan string)
@ -1151,6 +1152,7 @@ func TestExpectationsOnRecreate(t *testing.T) {
100,
)
f.Start(stopCh)
f.WaitForCacheSync(stopCh)
fakePodControl := controller.FakePodControl{}
manager.podControl = &fakePodControl

View File

@ -322,13 +322,12 @@ func TestControllerSync(t *testing.T) {
// Start the controller
stopCh := make(chan struct{})
informers.Start(stopCh)
informers.WaitForCacheSync(stopCh)
go ctrl.Run(stopCh)
// Wait for the controller to pass initial sync and fill its caches.
err = wait.Poll(10*time.Millisecond, wait.ForeverTestTimeout, func() (bool, error) {
return ctrl.volumeListerSynced() &&
ctrl.claimListerSynced() &&
len(ctrl.claims.ListKeys()) >= len(test.initialClaims) &&
return len(ctrl.claims.ListKeys()) >= len(test.initialClaims) &&
len(ctrl.volumes.store.ListKeys()) >= len(test.initialVolumes), nil
})
if err != nil {

View File

@ -255,6 +255,7 @@ func TestCSI_VolumeAll(t *testing.T) {
csiDriverInformer.Informer().GetStore().Add(driverInfo)
}
factory.Start(wait.NeverStop)
factory.WaitForCacheSync(wait.NeverStop)
host := volumetest.NewFakeVolumeHostWithCSINodeName(t,
tmpDir,