mirror of
https://github.com/k3s-io/kubernetes.git
synced 2026-01-04 23:17:50 +00:00
Fix duplicate GC event handlers getting added if discovery flutters
This commit is contained in:
@@ -28,6 +28,7 @@ import (
|
||||
"time"
|
||||
|
||||
"golang.org/x/time/rate"
|
||||
|
||||
"k8s.io/klog/v2"
|
||||
"k8s.io/klog/v2/ktesting"
|
||||
|
||||
@@ -805,10 +806,13 @@ func TestGetDeletableResources(t *testing.T) {
|
||||
PreferredResources: test.serverResources,
|
||||
Error: test.err,
|
||||
}
|
||||
actual := GetDeletableResources(logger, client)
|
||||
actual, actualErr := GetDeletableResources(logger, client)
|
||||
if !reflect.DeepEqual(test.deletableResources, actual) {
|
||||
t.Errorf("expected resources:\n%v\ngot:\n%v", test.deletableResources, actual)
|
||||
}
|
||||
if !reflect.DeepEqual(test.err, actualErr) {
|
||||
t.Errorf("expected error:\n%v\ngot:\n%v", test.err, actualErr)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@@ -822,7 +826,15 @@ func TestGarbageCollectorSync(t *testing.T) {
|
||||
{Name: "pods", Namespaced: true, Kind: "Pod", Verbs: metav1.Verbs{"delete", "list", "watch"}},
|
||||
},
|
||||
},
|
||||
{
|
||||
GroupVersion: "apps/v1",
|
||||
APIResources: []metav1.APIResource{
|
||||
{Name: "deployments", Namespaced: true, Kind: "Deployment", Verbs: metav1.Verbs{"delete", "list", "watch"}},
|
||||
},
|
||||
},
|
||||
}
|
||||
appsV1Error := &discovery.ErrGroupDiscoveryFailed{Groups: map[schema.GroupVersion]error{{Group: "apps", Version: "v1"}: fmt.Errorf(":-/")}}
|
||||
|
||||
unsyncableServerResources := []*metav1.APIResourceList{
|
||||
{
|
||||
GroupVersion: "v1",
|
||||
@@ -845,6 +857,10 @@ func TestGarbageCollectorSync(t *testing.T) {
|
||||
200,
|
||||
[]byte("{}"),
|
||||
},
|
||||
"GET" + "/apis/apps/v1/deployments": {
|
||||
200,
|
||||
[]byte("{}"),
|
||||
},
|
||||
"GET" + "/api/v1/secrets": {
|
||||
404,
|
||||
[]byte("{}"),
|
||||
@@ -859,7 +875,11 @@ func TestGarbageCollectorSync(t *testing.T) {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
rm := &testRESTMapper{testrestmapper.TestOnlyStaticRESTMapper(legacyscheme.Scheme)}
|
||||
tweakableRM := meta.NewDefaultRESTMapper(nil)
|
||||
tweakableRM.AddSpecific(schema.GroupVersionKind{Version: "v1", Kind: "Pod"}, schema.GroupVersionResource{Version: "v1", Resource: "pods"}, schema.GroupVersionResource{Version: "v1", Resource: "pod"}, meta.RESTScopeNamespace)
|
||||
tweakableRM.AddSpecific(schema.GroupVersionKind{Version: "v1", Kind: "Secret"}, schema.GroupVersionResource{Version: "v1", Resource: "secrets"}, schema.GroupVersionResource{Version: "v1", Resource: "secret"}, meta.RESTScopeNamespace)
|
||||
tweakableRM.AddSpecific(schema.GroupVersionKind{Group: "apps", Version: "v1", Kind: "Deployment"}, schema.GroupVersionResource{Group: "apps", Version: "v1", Resource: "deployments"}, schema.GroupVersionResource{Group: "apps", Version: "v1", Resource: "deployment"}, meta.RESTScopeNamespace)
|
||||
rm := &testRESTMapper{meta.MultiRESTMapper{tweakableRM, testrestmapper.TestOnlyStaticRESTMapper(legacyscheme.Scheme)}}
|
||||
metadataClient, err := metadata.NewForConfig(clientConfig)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
@@ -900,38 +920,70 @@ func TestGarbageCollectorSync(t *testing.T) {
|
||||
if err != nil {
|
||||
t.Fatalf("Expected garbagecollector.Sync to be running but it is blocked: %v", err)
|
||||
}
|
||||
assertMonitors(t, gc, "pods", "deployments")
|
||||
|
||||
// Simulate the discovery client returning an error
|
||||
fakeDiscoveryClient.setPreferredResources(nil)
|
||||
fakeDiscoveryClient.setError(fmt.Errorf("error calling discoveryClient.ServerPreferredResources()"))
|
||||
fakeDiscoveryClient.setPreferredResources(nil, fmt.Errorf("error calling discoveryClient.ServerPreferredResources()"))
|
||||
|
||||
// Wait until sync discovers the change
|
||||
time.Sleep(1 * time.Second)
|
||||
// No monitor changes
|
||||
assertMonitors(t, gc, "pods", "deployments")
|
||||
|
||||
// Remove the error from being returned and see if the garbage collector sync is still working
|
||||
fakeDiscoveryClient.setPreferredResources(serverResources)
|
||||
fakeDiscoveryClient.setError(nil)
|
||||
fakeDiscoveryClient.setPreferredResources(serverResources, nil)
|
||||
|
||||
err = expectSyncNotBlocked(fakeDiscoveryClient, &gc.workerLock)
|
||||
if err != nil {
|
||||
t.Fatalf("Expected garbagecollector.Sync to still be running but it is blocked: %v", err)
|
||||
}
|
||||
assertMonitors(t, gc, "pods", "deployments")
|
||||
|
||||
// Simulate the discovery client returning a resource the restmapper can resolve, but will not sync caches
|
||||
fakeDiscoveryClient.setPreferredResources(unsyncableServerResources)
|
||||
fakeDiscoveryClient.setError(nil)
|
||||
fakeDiscoveryClient.setPreferredResources(unsyncableServerResources, nil)
|
||||
|
||||
// Wait until sync discovers the change
|
||||
time.Sleep(1 * time.Second)
|
||||
assertMonitors(t, gc, "pods", "secrets")
|
||||
|
||||
// Put the resources back to normal and ensure garbage collector sync recovers
|
||||
fakeDiscoveryClient.setPreferredResources(serverResources)
|
||||
fakeDiscoveryClient.setError(nil)
|
||||
fakeDiscoveryClient.setPreferredResources(serverResources, nil)
|
||||
|
||||
err = expectSyncNotBlocked(fakeDiscoveryClient, &gc.workerLock)
|
||||
if err != nil {
|
||||
t.Fatalf("Expected garbagecollector.Sync to still be running but it is blocked: %v", err)
|
||||
}
|
||||
assertMonitors(t, gc, "pods", "deployments")
|
||||
|
||||
// Partial discovery failure
|
||||
fakeDiscoveryClient.setPreferredResources(unsyncableServerResources, appsV1Error)
|
||||
// Wait until sync discovers the change
|
||||
time.Sleep(1 * time.Second)
|
||||
// Deployments monitor kept
|
||||
assertMonitors(t, gc, "pods", "deployments", "secrets")
|
||||
|
||||
// Put the resources back to normal and ensure garbage collector sync recovers
|
||||
fakeDiscoveryClient.setPreferredResources(serverResources, nil)
|
||||
// Wait until sync discovers the change
|
||||
time.Sleep(1 * time.Second)
|
||||
err = expectSyncNotBlocked(fakeDiscoveryClient, &gc.workerLock)
|
||||
if err != nil {
|
||||
t.Fatalf("Expected garbagecollector.Sync to still be running but it is blocked: %v", err)
|
||||
}
|
||||
// Unsyncable monitor removed
|
||||
assertMonitors(t, gc, "pods", "deployments")
|
||||
}
|
||||
|
||||
func assertMonitors(t *testing.T, gc *GarbageCollector, resources ...string) {
|
||||
t.Helper()
|
||||
expected := sets.NewString(resources...)
|
||||
actual := sets.NewString()
|
||||
for m := range gc.dependencyGraphBuilder.monitors {
|
||||
actual.Insert(m.Resource)
|
||||
}
|
||||
if !actual.Equal(expected) {
|
||||
t.Fatalf("expected monitors %v, got %v", expected.List(), actual.List())
|
||||
}
|
||||
}
|
||||
|
||||
func expectSyncNotBlocked(fakeDiscoveryClient *fakeServerResources, workerLock *sync.RWMutex) error {
|
||||
@@ -979,15 +1031,10 @@ func (f *fakeServerResources) ServerPreferredResources() ([]*metav1.APIResourceL
|
||||
return f.PreferredResources, f.Error
|
||||
}
|
||||
|
||||
func (f *fakeServerResources) setPreferredResources(resources []*metav1.APIResourceList) {
|
||||
func (f *fakeServerResources) setPreferredResources(resources []*metav1.APIResourceList, err error) {
|
||||
f.Lock.Lock()
|
||||
defer f.Lock.Unlock()
|
||||
f.PreferredResources = resources
|
||||
}
|
||||
|
||||
func (f *fakeServerResources) setError(err error) {
|
||||
f.Lock.Lock()
|
||||
defer f.Lock.Unlock()
|
||||
f.Error = err
|
||||
}
|
||||
|
||||
|
||||
Reference in New Issue
Block a user