mirror of
https://github.com/k3s-io/kubernetes.git
synced 2025-07-30 23:15:14 +00:00
Merge pull request #32275 from wojtek-t/split_process_event
Automatic merge from submit-queue Split dispatching to watchers in Cacher into separate goroutine. Should help with #32257
This commit is contained in:
commit
504ccc6f37
@ -161,6 +161,9 @@ type Cacher struct {
|
|||||||
watcherIdx int
|
watcherIdx int
|
||||||
watchers indexedWatchers
|
watchers indexedWatchers
|
||||||
|
|
||||||
|
// Incoming events that should be dispatched to watchers.
|
||||||
|
incoming chan watchCacheEvent
|
||||||
|
|
||||||
// Handling graceful termination.
|
// Handling graceful termination.
|
||||||
stopLock sync.RWMutex
|
stopLock sync.RWMutex
|
||||||
stopped bool
|
stopped bool
|
||||||
@ -197,6 +200,8 @@ func NewCacherFromConfig(config CacherConfig) *Cacher {
|
|||||||
allWatchers: make(map[int]*cacheWatcher),
|
allWatchers: make(map[int]*cacheWatcher),
|
||||||
valueWatchers: make(map[string]watchersMap),
|
valueWatchers: make(map[string]watchersMap),
|
||||||
},
|
},
|
||||||
|
// TODO: Figure out the correct value for the buffer size.
|
||||||
|
incoming: make(chan watchCacheEvent, 100),
|
||||||
// We need to (potentially) stop both:
|
// We need to (potentially) stop both:
|
||||||
// - wait.Until go-routine
|
// - wait.Until go-routine
|
||||||
// - reflector.ListAndWatch
|
// - reflector.ListAndWatch
|
||||||
@ -205,6 +210,7 @@ func NewCacherFromConfig(config CacherConfig) *Cacher {
|
|||||||
stopCh: make(chan struct{}),
|
stopCh: make(chan struct{}),
|
||||||
}
|
}
|
||||||
watchCache.SetOnEvent(cacher.processEvent)
|
watchCache.SetOnEvent(cacher.processEvent)
|
||||||
|
go cacher.dispatchEvents()
|
||||||
|
|
||||||
stopCh := cacher.stopCh
|
stopCh := cacher.stopCh
|
||||||
cacher.stopWg.Add(1)
|
cacher.stopWg.Add(1)
|
||||||
@ -403,14 +409,26 @@ func (c *Cacher) triggerValues(event *watchCacheEvent) ([]string, bool) {
|
|||||||
return result, len(result) > 0
|
return result, len(result) > 0
|
||||||
}
|
}
|
||||||
|
|
||||||
// TODO: Most probably splitting this method to a separate thread will visibily
|
|
||||||
// improve throughput of our watch machinery. So what we should do is to:
|
|
||||||
// - OnEvent handler simply put an element to channel
|
|
||||||
// - processEvent be another goroutine processing events from that channel
|
|
||||||
// Additionally, if we make this channel buffered, cacher will be more resistant
|
|
||||||
// to single watchers being slow - see cacheWatcher::add method.
|
|
||||||
func (c *Cacher) processEvent(event watchCacheEvent) {
|
func (c *Cacher) processEvent(event watchCacheEvent) {
|
||||||
triggerValues, supported := c.triggerValues(&event)
|
c.incoming <- event
|
||||||
|
}
|
||||||
|
|
||||||
|
func (c *Cacher) dispatchEvents() {
|
||||||
|
for {
|
||||||
|
select {
|
||||||
|
case event, ok := <-c.incoming:
|
||||||
|
if !ok {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
c.dispatchEvent(&event)
|
||||||
|
case <-c.stopCh:
|
||||||
|
return
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (c *Cacher) dispatchEvent(event *watchCacheEvent) {
|
||||||
|
triggerValues, supported := c.triggerValues(event)
|
||||||
|
|
||||||
c.Lock()
|
c.Lock()
|
||||||
defer c.Unlock()
|
defer c.Unlock()
|
||||||
@ -614,10 +632,10 @@ func (c *cacheWatcher) stop() {
|
|||||||
|
|
||||||
var timerPool sync.Pool
|
var timerPool sync.Pool
|
||||||
|
|
||||||
func (c *cacheWatcher) add(event watchCacheEvent) {
|
func (c *cacheWatcher) add(event *watchCacheEvent) {
|
||||||
// Try to send the event immediately, without blocking.
|
// Try to send the event immediately, without blocking.
|
||||||
select {
|
select {
|
||||||
case c.input <- event:
|
case c.input <- *event:
|
||||||
return
|
return
|
||||||
default:
|
default:
|
||||||
}
|
}
|
||||||
@ -636,7 +654,7 @@ func (c *cacheWatcher) add(event watchCacheEvent) {
|
|||||||
defer timerPool.Put(t)
|
defer timerPool.Put(t)
|
||||||
|
|
||||||
select {
|
select {
|
||||||
case c.input <- event:
|
case c.input <- *event:
|
||||||
stopped := t.Stop()
|
stopped := t.Stop()
|
||||||
if !stopped {
|
if !stopped {
|
||||||
// Consume triggered (but not yet received) timer event
|
// Consume triggered (but not yet received) timer event
|
||||||
|
Loading…
Reference in New Issue
Block a user