mirror of
https://github.com/k3s-io/kubernetes.git
synced 2026-01-04 23:17:50 +00:00
Use shared informers in gc controller if possible
This commit is contained in:
@@ -23,6 +23,7 @@ go_library(
|
||||
],
|
||||
tags = ["automanaged"],
|
||||
deps = [
|
||||
"//pkg/client/informers/informers_generated/externalversions:go_default_library",
|
||||
"//pkg/client/retry:go_default_library",
|
||||
"//pkg/controller:go_default_library",
|
||||
"//pkg/controller/garbagecollector/metaonly:go_default_library",
|
||||
@@ -60,6 +61,8 @@ go_test(
|
||||
"//pkg/api:go_default_library",
|
||||
"//pkg/api/install:go_default_library",
|
||||
"//pkg/api/v1:go_default_library",
|
||||
"//pkg/client/clientset_generated/clientset/fake:go_default_library",
|
||||
"//pkg/client/informers/informers_generated/externalversions:go_default_library",
|
||||
"//pkg/controller/garbagecollector/metaonly:go_default_library",
|
||||
"//vendor/github.com/stretchr/testify/assert:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/api/meta:go_default_library",
|
||||
|
||||
@@ -33,6 +33,7 @@ import (
|
||||
"k8s.io/apimachinery/pkg/util/wait"
|
||||
"k8s.io/client-go/dynamic"
|
||||
"k8s.io/client-go/util/workqueue"
|
||||
informers "k8s.io/kubernetes/pkg/client/informers/informers_generated/externalversions"
|
||||
"k8s.io/kubernetes/pkg/controller"
|
||||
"k8s.io/kubernetes/pkg/controller/garbagecollector/metaonly"
|
||||
// install the prometheus plugin
|
||||
@@ -69,9 +70,16 @@ type GarbageCollector struct {
|
||||
registeredRateLimiter *RegisteredRateLimiter
|
||||
// GC caches the owners that do not exist according to the API server.
|
||||
absentOwnerCache *UIDCache
|
||||
sharedInformers informers.SharedInformerFactory
|
||||
}
|
||||
|
||||
func NewGarbageCollector(metaOnlyClientPool dynamic.ClientPool, clientPool dynamic.ClientPool, mapper meta.RESTMapper, deletableResources map[schema.GroupVersionResource]struct{}) (*GarbageCollector, error) {
|
||||
func NewGarbageCollector(
|
||||
metaOnlyClientPool dynamic.ClientPool,
|
||||
clientPool dynamic.ClientPool,
|
||||
mapper meta.RESTMapper,
|
||||
deletableResources map[schema.GroupVersionResource]struct{},
|
||||
sharedInformers informers.SharedInformerFactory,
|
||||
) (*GarbageCollector, error) {
|
||||
attemptToDelete := workqueue.NewNamedRateLimitingQueue(workqueue.DefaultControllerRateLimiter(), "garbage_collector_attempt_to_delete")
|
||||
attemptToOrphan := workqueue.NewNamedRateLimitingQueue(workqueue.DefaultControllerRateLimiter(), "garbage_collector_attempt_to_orphan")
|
||||
absentOwnerCache := NewUIDCache(500)
|
||||
@@ -94,6 +102,7 @@ func NewGarbageCollector(metaOnlyClientPool dynamic.ClientPool, clientPool dynam
|
||||
attemptToDelete: attemptToDelete,
|
||||
attemptToOrphan: attemptToOrphan,
|
||||
absentOwnerCache: absentOwnerCache,
|
||||
sharedInformers: sharedInformers,
|
||||
}
|
||||
if err := gb.monitorsForResources(deletableResources); err != nil {
|
||||
return nil, err
|
||||
|
||||
@@ -41,6 +41,8 @@ import (
|
||||
"k8s.io/client-go/util/workqueue"
|
||||
"k8s.io/kubernetes/pkg/api"
|
||||
"k8s.io/kubernetes/pkg/api/v1"
|
||||
"k8s.io/kubernetes/pkg/client/clientset_generated/clientset/fake"
|
||||
informers "k8s.io/kubernetes/pkg/client/informers/informers_generated/externalversions"
|
||||
"k8s.io/kubernetes/pkg/controller/garbagecollector/metaonly"
|
||||
)
|
||||
|
||||
@@ -55,7 +57,10 @@ func TestNewGarbageCollector(t *testing.T) {
|
||||
// no monitor will be constructed for non-core resource, the GC construction will not fail.
|
||||
{Group: "tpr.io", Version: "v1", Resource: "unknown"}: {},
|
||||
}
|
||||
gc, err := NewGarbageCollector(metaOnlyClientPool, clientPool, api.Registry.RESTMapper(), podResource)
|
||||
|
||||
client := fake.NewSimpleClientset()
|
||||
sharedInformers := informers.NewSharedInformerFactory(client, 0)
|
||||
gc, err := NewGarbageCollector(metaOnlyClientPool, clientPool, api.Registry.RESTMapper(), podResource, sharedInformers)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
@@ -113,17 +118,26 @@ func testServerAndClientConfig(handler func(http.ResponseWriter, *http.Request))
|
||||
return srv, config
|
||||
}
|
||||
|
||||
func setupGC(t *testing.T, config *restclient.Config) *GarbageCollector {
|
||||
type garbageCollector struct {
|
||||
*GarbageCollector
|
||||
stop chan struct{}
|
||||
}
|
||||
|
||||
func setupGC(t *testing.T, config *restclient.Config) garbageCollector {
|
||||
config.ContentConfig.NegotiatedSerializer = serializer.DirectCodecFactory{CodecFactory: metaonly.NewMetadataCodecFactory()}
|
||||
metaOnlyClientPool := dynamic.NewClientPool(config, api.Registry.RESTMapper(), dynamic.LegacyAPIPathResolverFunc)
|
||||
config.ContentConfig.NegotiatedSerializer = nil
|
||||
clientPool := dynamic.NewClientPool(config, api.Registry.RESTMapper(), dynamic.LegacyAPIPathResolverFunc)
|
||||
podResource := map[schema.GroupVersionResource]struct{}{{Version: "v1", Resource: "pods"}: {}}
|
||||
gc, err := NewGarbageCollector(metaOnlyClientPool, clientPool, api.Registry.RESTMapper(), podResource)
|
||||
client := fake.NewSimpleClientset()
|
||||
sharedInformers := informers.NewSharedInformerFactory(client, 0)
|
||||
gc, err := NewGarbageCollector(metaOnlyClientPool, clientPool, api.Registry.RESTMapper(), podResource, sharedInformers)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
return gc
|
||||
stop := make(chan struct{})
|
||||
go sharedInformers.Start(stop)
|
||||
return garbageCollector{gc, stop}
|
||||
}
|
||||
|
||||
func getPod(podName string, ownerReferences []metav1.OwnerReference) *v1.Pod {
|
||||
@@ -172,7 +186,10 @@ func TestAttemptToDeleteItem(t *testing.T) {
|
||||
}
|
||||
srv, clientConfig := testServerAndClientConfig(testHandler.ServeHTTP)
|
||||
defer srv.Close()
|
||||
|
||||
gc := setupGC(t, clientConfig)
|
||||
defer close(gc.stop)
|
||||
|
||||
item := &node{
|
||||
identity: objectReference{
|
||||
OwnerReference: metav1.OwnerReference{
|
||||
@@ -320,6 +337,7 @@ func TestProcessEvent(t *testing.T) {
|
||||
// data race among in the dependents field.
|
||||
func TestDependentsRace(t *testing.T) {
|
||||
gc := setupGC(t, &restclient.Config{})
|
||||
defer close(gc.stop)
|
||||
|
||||
const updates = 100
|
||||
owner := &node{dependents: make(map[*node]struct{})}
|
||||
@@ -453,6 +471,7 @@ func TestAbsentUIDCache(t *testing.T) {
|
||||
srv, clientConfig := testServerAndClientConfig(testHandler.ServeHTTP)
|
||||
defer srv.Close()
|
||||
gc := setupGC(t, clientConfig)
|
||||
defer close(gc.stop)
|
||||
gc.absentOwnerCache = NewUIDCache(2)
|
||||
gc.attemptToDeleteItem(podToGCNode(rc1Pod1))
|
||||
gc.attemptToDeleteItem(podToGCNode(rc2Pod1))
|
||||
|
||||
@@ -34,6 +34,7 @@ import (
|
||||
"k8s.io/client-go/dynamic"
|
||||
"k8s.io/client-go/tools/cache"
|
||||
"k8s.io/client-go/util/workqueue"
|
||||
informers "k8s.io/kubernetes/pkg/client/informers/informers_generated/externalversions"
|
||||
)
|
||||
|
||||
type eventType int
|
||||
@@ -62,6 +63,7 @@ type event struct {
|
||||
obj interface{}
|
||||
// the update event comes with an old object, but it's not used by the garbage collector.
|
||||
oldObj interface{}
|
||||
gvk schema.GroupVersionKind
|
||||
}
|
||||
|
||||
// GraphBuilder: based on the events supplied by the informers, GraphBuilder updates
|
||||
@@ -90,6 +92,8 @@ type GraphBuilder struct {
|
||||
// GraphBuilder and GC share the absentOwnerCache. Objects that are known to
|
||||
// be non-existent are added to the cached.
|
||||
absentOwnerCache *UIDCache
|
||||
sharedInformers informers.SharedInformerFactory
|
||||
stopCh <-chan struct{}
|
||||
}
|
||||
|
||||
func listWatcher(client *dynamic.Client, resource schema.GroupVersionResource) *cache.ListWatch {
|
||||
@@ -118,6 +122,58 @@ func listWatcher(client *dynamic.Client, resource schema.GroupVersionResource) *
|
||||
}
|
||||
|
||||
func (gb *GraphBuilder) controllerFor(resource schema.GroupVersionResource, kind schema.GroupVersionKind) (cache.Controller, error) {
|
||||
handlers := cache.ResourceEventHandlerFuncs{
|
||||
// add the event to the dependencyGraphBuilder's graphChanges.
|
||||
AddFunc: func(obj interface{}) {
|
||||
event := &event{
|
||||
eventType: addEvent,
|
||||
obj: obj,
|
||||
gvk: kind,
|
||||
}
|
||||
gb.graphChanges.Add(event)
|
||||
},
|
||||
UpdateFunc: func(oldObj, newObj interface{}) {
|
||||
// TODO: check if there are differences in the ownerRefs,
|
||||
// finalizers, and DeletionTimestamp; if not, ignore the update.
|
||||
event := &event{
|
||||
eventType: updateEvent,
|
||||
obj: newObj,
|
||||
oldObj: oldObj,
|
||||
gvk: kind,
|
||||
}
|
||||
gb.graphChanges.Add(event)
|
||||
},
|
||||
DeleteFunc: func(obj interface{}) {
|
||||
// delta fifo may wrap the object in a cache.DeletedFinalStateUnknown, unwrap it
|
||||
if deletedFinalStateUnknown, ok := obj.(cache.DeletedFinalStateUnknown); ok {
|
||||
obj = deletedFinalStateUnknown.Obj
|
||||
}
|
||||
event := &event{
|
||||
eventType: deleteEvent,
|
||||
obj: obj,
|
||||
gvk: kind,
|
||||
}
|
||||
gb.graphChanges.Add(event)
|
||||
},
|
||||
}
|
||||
|
||||
shared, err := gb.sharedInformers.ForResource(resource)
|
||||
if err == nil {
|
||||
glog.V(4).Infof("using a shared informer for resource %q, kind %q", resource.String(), kind.String())
|
||||
// need to clone because it's from a shared cache
|
||||
shared.Informer().AddEventHandlerWithResyncPeriod(handlers, ResourceResyncTime)
|
||||
if gb.stopCh != nil {
|
||||
// if gb.stopCh is set, it means we've already gotten past the initial gb.Run() call, so this
|
||||
// means we've re-loaded and re-read discovery and we are adding a new monitor for a
|
||||
// previously unseen resource, so we need to call Start on the shared informers again (this
|
||||
// will only start those shared informers that have not yet been started).
|
||||
go gb.sharedInformers.Start(gb.stopCh)
|
||||
}
|
||||
return shared.Informer().GetController(), nil
|
||||
} else {
|
||||
glog.V(4).Infof("unable to use a shared informer for resource %q, kind %q: %v", resource.String(), kind.String(), err)
|
||||
}
|
||||
|
||||
// TODO: consider store in one storage.
|
||||
glog.V(5).Infof("create storage for resource %s", resource)
|
||||
client, err := gb.metaOnlyClientPool.ClientForGroupVersionKind(kind)
|
||||
@@ -125,47 +181,12 @@ func (gb *GraphBuilder) controllerFor(resource schema.GroupVersionResource, kind
|
||||
return nil, err
|
||||
}
|
||||
gb.registeredRateLimiterForControllers.registerIfNotPresent(resource.GroupVersion(), client, "garbage_collector_monitoring")
|
||||
setObjectTypeMeta := func(obj interface{}) {
|
||||
runtimeObject, ok := obj.(runtime.Object)
|
||||
if !ok {
|
||||
utilruntime.HandleError(fmt.Errorf("expected runtime.Object, got %#v", obj))
|
||||
}
|
||||
runtimeObject.GetObjectKind().SetGroupVersionKind(kind)
|
||||
}
|
||||
_, monitor := cache.NewInformer(
|
||||
listWatcher(client, resource),
|
||||
nil,
|
||||
ResourceResyncTime,
|
||||
cache.ResourceEventHandlerFuncs{
|
||||
// add the event to the dependencyGraphBuilder's graphChanges.
|
||||
AddFunc: func(obj interface{}) {
|
||||
setObjectTypeMeta(obj)
|
||||
event := &event{
|
||||
eventType: addEvent,
|
||||
obj: obj,
|
||||
}
|
||||
gb.graphChanges.Add(event)
|
||||
},
|
||||
UpdateFunc: func(oldObj, newObj interface{}) {
|
||||
setObjectTypeMeta(newObj)
|
||||
// TODO: check if there are differences in the ownerRefs,
|
||||
// finalizers, and DeletionTimestamp; if not, ignore the update.
|
||||
event := &event{updateEvent, newObj, oldObj}
|
||||
gb.graphChanges.Add(event)
|
||||
},
|
||||
DeleteFunc: func(obj interface{}) {
|
||||
// delta fifo may wrap the object in a cache.DeletedFinalStateUnknown, unwrap it
|
||||
if deletedFinalStateUnknown, ok := obj.(cache.DeletedFinalStateUnknown); ok {
|
||||
obj = deletedFinalStateUnknown.Obj
|
||||
}
|
||||
setObjectTypeMeta(obj)
|
||||
event := &event{
|
||||
eventType: deleteEvent,
|
||||
obj: obj,
|
||||
}
|
||||
gb.graphChanges.Add(event)
|
||||
},
|
||||
},
|
||||
// don't need to clone because it's not from shared cache
|
||||
handlers,
|
||||
)
|
||||
return monitor, nil
|
||||
}
|
||||
@@ -205,6 +226,9 @@ func (gb *GraphBuilder) Run(stopCh <-chan struct{}) {
|
||||
go monitor.Run(stopCh)
|
||||
}
|
||||
go wait.Until(gb.runProcessGraphChanges, 1*time.Second, stopCh)
|
||||
|
||||
// set this so that we can use it if we need to start new shared informers
|
||||
gb.stopCh = stopCh
|
||||
}
|
||||
|
||||
var ignoredResources = map[schema.GroupVersionResource]struct{}{
|
||||
@@ -435,12 +459,7 @@ func (gb *GraphBuilder) processGraphChanges() bool {
|
||||
utilruntime.HandleError(fmt.Errorf("cannot access obj: %v", err))
|
||||
return true
|
||||
}
|
||||
typeAccessor, err := meta.TypeAccessor(obj)
|
||||
if err != nil {
|
||||
utilruntime.HandleError(fmt.Errorf("cannot access obj: %v", err))
|
||||
return true
|
||||
}
|
||||
glog.V(5).Infof("GraphBuilder process object: %s/%s, namespace %s, name %s, event type %v", typeAccessor.GetAPIVersion(), typeAccessor.GetKind(), accessor.GetNamespace(), accessor.GetName(), event.eventType)
|
||||
glog.V(5).Infof("GraphBuilder process object: %s/%s, namespace %s, name %s, uid %s, event type %v", event.gvk.GroupVersion().String(), event.gvk.Kind, accessor.GetNamespace(), accessor.GetName(), string(accessor.GetUID()), event.eventType)
|
||||
// Check if the node already exsits
|
||||
existingNode, found := gb.uidToNode.Read(accessor.GetUID())
|
||||
switch {
|
||||
@@ -448,8 +467,8 @@ func (gb *GraphBuilder) processGraphChanges() bool {
|
||||
newNode := &node{
|
||||
identity: objectReference{
|
||||
OwnerReference: metav1.OwnerReference{
|
||||
APIVersion: typeAccessor.GetAPIVersion(),
|
||||
Kind: typeAccessor.GetKind(),
|
||||
APIVersion: event.gvk.GroupVersion().String(),
|
||||
Kind: event.gvk.Kind,
|
||||
UID: accessor.GetUID(),
|
||||
Name: accessor.GetName(),
|
||||
},
|
||||
|
||||
Reference in New Issue
Block a user