Merge pull request #108080 from astoycos/issue-132

Fix Panic Condition

Kubernetes-commit: efb62b3538f34f304ea9c68966a053822bc23d1a
This commit is contained in:
Kubernetes Publisher
2022-05-11 05:01:02 -07:00
5 changed files with 25 additions and 9 deletions

4
go.mod
View File

@@ -27,7 +27,7 @@ require (
golang.org/x/time v0.0.0-20220210224613-90d013bbcef8
google.golang.org/protobuf v1.27.1
k8s.io/api v0.0.0-20220510154143-ae35a85329f1
k8s.io/apimachinery v0.0.0-20220509181918-47789511e916
k8s.io/apimachinery v0.0.0-20220511125320-f3b1305f4010
k8s.io/klog/v2 v2.60.1
k8s.io/kube-openapi v0.0.0-20220328201542-3ee0da9b0b42
k8s.io/utils v0.0.0-20220210201930-3a6ce19ff2f9
@@ -73,5 +73,5 @@ require (
replace (
k8s.io/api => k8s.io/api v0.0.0-20220510154143-ae35a85329f1
k8s.io/apimachinery => k8s.io/apimachinery v0.0.0-20220509181918-47789511e916
k8s.io/apimachinery => k8s.io/apimachinery v0.0.0-20220511125320-f3b1305f4010
)

4
go.sum
View File

@@ -610,8 +610,8 @@ honnef.co/go/tools v0.0.1-2020.1.3/go.mod h1:X/FiERA/W4tHapMX5mGpAtMSVEeEUOyHaw9
honnef.co/go/tools v0.0.1-2020.1.4/go.mod h1:X/FiERA/W4tHapMX5mGpAtMSVEeEUOyHaw9vFzvIQ3k=
k8s.io/api v0.0.0-20220510154143-ae35a85329f1 h1:yZN3QEsAnbRVMdS2+kSdD4NY7M23TUKydf8rAkD+FLA=
k8s.io/api v0.0.0-20220510154143-ae35a85329f1/go.mod h1:sZiCpVOT/SE/nrwAAwOLnekwXoB+cNDNfNMmYNckG3k=
k8s.io/apimachinery v0.0.0-20220509181918-47789511e916 h1:EK7ESaLKImLW0V+N2iBxxPqBHXANWPbjBnUs880QPGE=
k8s.io/apimachinery v0.0.0-20220509181918-47789511e916/go.mod h1:1oBVxgNUfLl978lJAlywA+H45m2ctSuqJU2stpbcjT4=
k8s.io/apimachinery v0.0.0-20220511125320-f3b1305f4010 h1:UDqeN/6SuN6isZIYNS8S4p1Dhd4uxH0eR18ntskjI4s=
k8s.io/apimachinery v0.0.0-20220511125320-f3b1305f4010/go.mod h1:1oBVxgNUfLl978lJAlywA+H45m2ctSuqJU2stpbcjT4=
k8s.io/gengo v0.0.0-20210813121822-485abfe95c7c/go.mod h1:FiNAH4ZV3gBg2Kwh89tzAEV2be7d5xI0vBa/VySYy3E=
k8s.io/klog/v2 v2.0.0/go.mod h1:PBfzABfn139FHAV07az/IF9Wp1bkk3vpT2XSJ76fSDE=
k8s.io/klog/v2 v2.2.0/go.mod h1:Od+F08eJP+W3HUb4pSrPpgp9DGU4GzlpG/TmITuYh/Y=

View File

@@ -275,11 +275,11 @@ func (f *FakeControllerSource) Watch(options metav1.ListOptions) (watch.Interfac
// clients).
changes = append(changes, watch.Event{Type: c.Type, Object: c.Object.DeepCopyObject()})
}
return f.Broadcaster.WatchWithPrefix(changes), nil
return f.Broadcaster.WatchWithPrefix(changes)
} else if rc > f.lastRV {
return nil, errors.New("resource version in the future not supported by this fake")
}
return f.Broadcaster.Watch(), nil
return f.Broadcaster.Watch()
}
// Shutdown closes the underlying broadcaster, waiting for events to be

View File

@@ -307,7 +307,15 @@ func (e *eventBroadcasterImpl) StartStructuredLogging(verbosity klog.Level) func
// StartEventWatcher starts sending events received from this EventBroadcaster to the given event handler function.
// The return value is used to stop recording
func (e *eventBroadcasterImpl) StartEventWatcher(eventHandler func(event runtime.Object)) func() {
watcher := e.Watch()
watcher, err := e.Watch()
if err != nil {
klog.Errorf("Unable start event watcher: '%v' (will not retry!)", err)
// TODO: Rewrite the function signature to return an error, for
// now just return a no-op function
return func() {
klog.Error("The event watcher failed to start")
}
}
go func() {
defer utilruntime.HandleCrash()
for {

View File

@@ -298,7 +298,10 @@ func (e *eventBroadcasterImpl) StartStructuredLogging(verbosity klog.Level) watc
// StartEventWatcher starts sending events received from this EventBroadcaster to the given event handler function.
// The return value can be ignored or used to stop recording, if desired.
func (e *eventBroadcasterImpl) StartEventWatcher(eventHandler func(*v1.Event)) watch.Interface {
watcher := e.Watch()
watcher, err := e.Watch()
if err != nil {
klog.Errorf("Unable start event watcher: '%v' (will not retry!)", err)
}
go func() {
defer utilruntime.HandleCrash()
for watchEvent := range watcher.ResultChan() {
@@ -346,7 +349,12 @@ func (recorder *recorderImpl) generateEvent(object runtime.Object, annotations m
// when we go to shut down this broadcaster. Just drop events if we get overloaded,
// and log an error if that happens (we've configured the broadcaster to drop
// outgoing events anyway).
if sent := recorder.ActionOrDrop(watch.Added, event); !sent {
sent, err := recorder.ActionOrDrop(watch.Added, event)
if err != nil {
klog.Errorf("unable to record event: %v (will not retry!)", err)
return
}
if !sent {
klog.Errorf("unable to record event: too many queued events, dropped event %#v", event)
}
}