mirror of
https://github.com/niusmallnan/steve.git
synced 2025-06-23 13:17:04 +00:00
Don't reuse summary informer after the informer is stopped
This commit is contained in:
parent
9e4ed62a47
commit
a48c49f660
@ -13,6 +13,7 @@ import (
|
|||||||
"github.com/rancher/wrangler/pkg/summary/informer"
|
"github.com/rancher/wrangler/pkg/summary/informer"
|
||||||
"github.com/sirupsen/logrus"
|
"github.com/sirupsen/logrus"
|
||||||
"k8s.io/apimachinery/pkg/api/meta"
|
"k8s.io/apimachinery/pkg/api/meta"
|
||||||
|
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||||
"k8s.io/apimachinery/pkg/runtime"
|
"k8s.io/apimachinery/pkg/runtime"
|
||||||
schema2 "k8s.io/apimachinery/pkg/runtime/schema"
|
schema2 "k8s.io/apimachinery/pkg/runtime/schema"
|
||||||
"k8s.io/client-go/dynamic"
|
"k8s.io/client-go/dynamic"
|
||||||
@ -44,17 +45,15 @@ type watcher struct {
|
|||||||
informer cache.SharedIndexInformer
|
informer cache.SharedIndexInformer
|
||||||
gvk schema2.GroupVersionKind
|
gvk schema2.GroupVersionKind
|
||||||
gvr schema2.GroupVersionResource
|
gvr schema2.GroupVersionResource
|
||||||
start bool
|
|
||||||
}
|
}
|
||||||
|
|
||||||
type clusterCache struct {
|
type clusterCache struct {
|
||||||
sync.RWMutex
|
sync.RWMutex
|
||||||
|
|
||||||
ctx context.Context
|
ctx context.Context
|
||||||
typed map[schema2.GroupVersionKind]cache.SharedIndexInformer
|
summaryClient client.Interface
|
||||||
informerFactory informer.SummarySharedInformerFactory
|
watchers map[schema2.GroupVersionResource]*watcher
|
||||||
watchers map[schema2.GroupVersionResource]*watcher
|
workqueue workqueue.DelayingInterface
|
||||||
workqueue workqueue.DelayingInterface
|
|
||||||
|
|
||||||
addHandlers cancelCollection
|
addHandlers cancelCollection
|
||||||
removeHandlers cancelCollection
|
removeHandlers cancelCollection
|
||||||
@ -63,11 +62,10 @@ type clusterCache struct {
|
|||||||
|
|
||||||
func NewClusterCache(ctx context.Context, dynamicClient dynamic.Interface) ClusterCache {
|
func NewClusterCache(ctx context.Context, dynamicClient dynamic.Interface) ClusterCache {
|
||||||
c := &clusterCache{
|
c := &clusterCache{
|
||||||
ctx: ctx,
|
ctx: ctx,
|
||||||
typed: map[schema2.GroupVersionKind]cache.SharedIndexInformer{},
|
summaryClient: client.NewForDynamicClient(dynamicClient),
|
||||||
informerFactory: informer.NewSummarySharedInformerFactory(client.NewForDynamicClient(dynamicClient), 2*time.Hour),
|
watchers: map[schema2.GroupVersionResource]*watcher{},
|
||||||
watchers: map[schema2.GroupVersionResource]*watcher{},
|
workqueue: workqueue.NewNamedDelayingQueue("cluster-cache"),
|
||||||
workqueue: workqueue.NewNamedDelayingQueue("cluster-cache"),
|
|
||||||
}
|
}
|
||||||
go c.start()
|
go c.start()
|
||||||
return c
|
return c
|
||||||
@ -130,8 +128,8 @@ func (h *clusterCache) OnSchemas(schemas *schema.Collection) error {
|
|||||||
defer h.Unlock()
|
defer h.Unlock()
|
||||||
|
|
||||||
var (
|
var (
|
||||||
toStart = map[schema2.GroupVersionResource]*watcher{}
|
gvrs = map[schema2.GroupVersionResource]bool{}
|
||||||
gvrs = map[schema2.GroupVersionResource]bool{}
|
toWait []*watcher
|
||||||
)
|
)
|
||||||
|
|
||||||
for _, id := range schemas.IDs() {
|
for _, id := range schemas.IDs() {
|
||||||
@ -144,26 +142,26 @@ func (h *clusterCache) OnSchemas(schemas *schema.Collection) error {
|
|||||||
gvk := attributes.GVK(schema)
|
gvk := attributes.GVK(schema)
|
||||||
gvrs[gvr] = true
|
gvrs[gvr] = true
|
||||||
|
|
||||||
w := h.watchers[gvr]
|
if h.watchers[gvr] != nil {
|
||||||
if w != nil {
|
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
|
|
||||||
|
summaryInformer := informer.NewFilteredSummaryInformer(h.summaryClient, gvr, metav1.NamespaceAll, 2*time.Hour,
|
||||||
|
cache.Indexers{cache.NamespaceIndex: cache.MetaNamespaceIndexFunc}, nil)
|
||||||
ctx, cancel := context.WithCancel(h.ctx)
|
ctx, cancel := context.WithCancel(h.ctx)
|
||||||
w = &watcher{
|
w := &watcher{
|
||||||
ctx: ctx,
|
ctx: ctx,
|
||||||
cancel: cancel,
|
cancel: cancel,
|
||||||
gvk: gvk,
|
gvk: gvk,
|
||||||
gvr: gvr,
|
gvr: gvr,
|
||||||
informer: h.typed[gvk],
|
informer: summaryInformer.Informer(),
|
||||||
}
|
}
|
||||||
toStart[gvr] = w
|
h.watchers[gvr] = w
|
||||||
|
toWait = append(toWait, w)
|
||||||
|
|
||||||
if w.informer == nil {
|
logrus.Infof("Watching metadata for %s", w.gvk)
|
||||||
w.informer = h.informerFactory.ForResource(gvr).Informer()
|
h.addResourceEventHandler(w.gvr, w.informer)
|
||||||
w.start = true
|
go w.informer.Run(w.ctx.Done())
|
||||||
h.addResourceEventHandler(gvr, w.informer)
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
for gvr, w := range h.watchers {
|
for gvr, w := range h.watchers {
|
||||||
@ -174,24 +172,15 @@ func (h *clusterCache) OnSchemas(schemas *schema.Collection) error {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
var toWait []*watcher
|
|
||||||
|
|
||||||
for _, w := range toStart {
|
|
||||||
if !w.start {
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
w.start = false
|
|
||||||
logrus.Infof("Watching metadata for %s", w.gvk)
|
|
||||||
go w.informer.Run(w.ctx.Done())
|
|
||||||
toWait = append(toWait, w)
|
|
||||||
}
|
|
||||||
|
|
||||||
for _, w := range toWait {
|
for _, w := range toWait {
|
||||||
cache.WaitForCacheSync(w.ctx.Done(), w.informer.HasSynced)
|
ctx, cancel := context.WithTimeout(w.ctx, 15*time.Minute)
|
||||||
}
|
if !cache.WaitForCacheSync(ctx.Done(), w.informer.HasSynced) {
|
||||||
|
logrus.Errorf("failed to sync cache for %v", w.gvk)
|
||||||
for _, w := range toStart {
|
cancel()
|
||||||
h.watchers[w.gvr] = w
|
w.cancel()
|
||||||
|
delete(h.watchers, w.gvr)
|
||||||
|
}
|
||||||
|
cancel()
|
||||||
}
|
}
|
||||||
|
|
||||||
return nil
|
return nil
|
||||||
|
Loading…
Reference in New Issue
Block a user