mirror of
https://github.com/kubernetes/client-go.git
synced 2025-07-31 14:54:27 +00:00
Shared Informer Run blocks until all goroutines finish
Fixes #45454 Kubernetes-commit: d789615902ead2a112ad24fbfebe95285b87004b
This commit is contained in:
parent
b5da66a5eb
commit
5f6ea627a3
5
tools/cache/controller.go
vendored
5
tools/cache/controller.go
vendored
@ -116,7 +116,10 @@ func (c *controller) Run(stopCh <-chan struct{}) {
|
||||
c.reflector = r
|
||||
c.reflectorMutex.Unlock()
|
||||
|
||||
r.RunUntil(stopCh)
|
||||
var wg sync.WaitGroup
|
||||
defer wg.Wait()
|
||||
|
||||
wait.StartUntil(stopCh, &wg, r.Run)
|
||||
|
||||
wait.Until(c.processLoop, time.Second, stopCh)
|
||||
}
|
||||
|
16
tools/cache/mutation_detector.go
vendored
16
tools/cache/mutation_detector.go
vendored
@ -79,17 +79,15 @@ type cacheObj struct {
|
||||
|
||||
func (d *defaultCacheMutationDetector) Run(stopCh <-chan struct{}) {
|
||||
// we DON'T want protection from panics. If we're running this code, we want to die
|
||||
go func() {
|
||||
for {
|
||||
d.CompareObjects()
|
||||
for {
|
||||
d.CompareObjects()
|
||||
|
||||
select {
|
||||
case <-stopCh:
|
||||
return
|
||||
case <-time.After(d.period):
|
||||
}
|
||||
select {
|
||||
case <-stopCh:
|
||||
return
|
||||
case <-time.After(d.period):
|
||||
}
|
||||
}()
|
||||
}
|
||||
}
|
||||
|
||||
// AddObject makes a deep copy of the object for later comparison. It only works on runtime.Object
|
||||
|
17
tools/cache/reflector.go
vendored
17
tools/cache/reflector.go
vendored
@ -182,21 +182,10 @@ func extractStackCreator() (string, int, bool) {
|
||||
}
|
||||
|
||||
// Run starts a watch and handles watch events. Will restart the watch if it is closed.
|
||||
// Run starts a goroutine and returns immediately.
|
||||
func (r *Reflector) Run() {
|
||||
// Run will exit when stopCh is closed.
|
||||
func (r *Reflector) Run(stopCh <-chan struct{}) {
|
||||
glog.V(3).Infof("Starting reflector %v (%s) from %s", r.expectedType, r.resyncPeriod, r.name)
|
||||
go wait.Until(func() {
|
||||
if err := r.ListAndWatch(wait.NeverStop); err != nil {
|
||||
utilruntime.HandleError(err)
|
||||
}
|
||||
}, r.period, wait.NeverStop)
|
||||
}
|
||||
|
||||
// RunUntil starts a watch and handles watch events. Will restart the watch if it is closed.
|
||||
// RunUntil starts a goroutine and returns immediately. It will exit when stopCh is closed.
|
||||
func (r *Reflector) RunUntil(stopCh <-chan struct{}) {
|
||||
glog.V(3).Infof("Starting reflector %v (%s) from %s", r.expectedType, r.resyncPeriod, r.name)
|
||||
go wait.Until(func() {
|
||||
wait.Until(func() {
|
||||
if err := r.ListAndWatch(stopCh); err != nil {
|
||||
utilruntime.HandleError(err)
|
||||
}
|
||||
|
2
tools/cache/reflector_test.go
vendored
2
tools/cache/reflector_test.go
vendored
@ -83,7 +83,7 @@ func TestRunUntil(t *testing.T) {
|
||||
return &v1.PodList{ListMeta: metav1.ListMeta{ResourceVersion: "1"}}, nil
|
||||
},
|
||||
}
|
||||
r.RunUntil(stopCh)
|
||||
go r.Run(stopCh)
|
||||
// Synchronously add a dummy pod into the watch channel so we
|
||||
// know the RunUntil go routine is in the watch handler.
|
||||
fw.Add(&v1.Pod{ObjectMeta: metav1.ObjectMeta{Name: "bar"}})
|
||||
|
30
tools/cache/shared_informer.go
vendored
30
tools/cache/shared_informer.go
vendored
@ -147,6 +147,7 @@ type sharedIndexInformer struct {
|
||||
// stopCh is the channel used to stop the main Run process. We have to track it so that
|
||||
// late joiners can have a proper stop
|
||||
stopCh <-chan struct{}
|
||||
wg sync.WaitGroup
|
||||
}
|
||||
|
||||
// dummyController hides the fact that a SharedInformer is different from a dedicated one
|
||||
@ -204,12 +205,14 @@ func (s *sharedIndexInformer) Run(stopCh <-chan struct{}) {
|
||||
|
||||
s.controller = New(cfg)
|
||||
s.controller.(*controller).clock = s.clock
|
||||
s.stopCh = stopCh
|
||||
s.started = true
|
||||
}()
|
||||
|
||||
s.stopCh = stopCh
|
||||
s.cacheMutationDetector.Run(stopCh)
|
||||
s.processor.run(stopCh)
|
||||
defer s.wg.Wait()
|
||||
|
||||
wait.StartUntil(stopCh, &s.wg, s.cacheMutationDetector.Run)
|
||||
wait.StartUntil(stopCh, &s.wg, s.processor.run)
|
||||
s.controller.Run(stopCh)
|
||||
}
|
||||
|
||||
@ -324,8 +327,8 @@ func (s *sharedIndexInformer) AddEventHandlerWithResyncPeriod(handler ResourceEv
|
||||
|
||||
s.processor.addListener(listener)
|
||||
|
||||
go listener.run(s.stopCh)
|
||||
go listener.pop(s.stopCh)
|
||||
wait.StartUntil(s.stopCh, &s.wg, listener.run)
|
||||
wait.StartUntil(s.stopCh, &s.wg, listener.pop)
|
||||
|
||||
items := s.indexer.List()
|
||||
for i := range items {
|
||||
@ -395,13 +398,16 @@ func (p *sharedProcessor) distribute(obj interface{}, sync bool) {
|
||||
}
|
||||
|
||||
func (p *sharedProcessor) run(stopCh <-chan struct{}) {
|
||||
p.listenersLock.RLock()
|
||||
defer p.listenersLock.RUnlock()
|
||||
|
||||
for _, listener := range p.listeners {
|
||||
go listener.run(stopCh)
|
||||
go listener.pop(stopCh)
|
||||
}
|
||||
var wg sync.WaitGroup
|
||||
func() {
|
||||
p.listenersLock.RLock()
|
||||
defer p.listenersLock.RUnlock()
|
||||
for _, listener := range p.listeners {
|
||||
wait.StartUntil(stopCh, &wg, listener.run)
|
||||
wait.StartUntil(stopCh, &wg, listener.pop)
|
||||
}
|
||||
}()
|
||||
wg.Wait()
|
||||
}
|
||||
|
||||
// shouldResync queries every listener to determine if any of them need a resync, based on each
|
||||
|
Loading…
Reference in New Issue
Block a user