mirror of
https://github.com/kubernetes/client-go.git
synced 2025-09-06 01:20:40 +00:00
client-go/tools/cache: add APIs with context parameter
The context is used for cancellation and to support contextual logging. In most cases, alternative *WithContext APIs get added, except for NewIntegerResourceVersionMutationCache where code searches indicate that the API is not used downstream. An API break around SharedInformer couldn't be avoided because the alternative (keeping the interface unchanged and adding a second one with the new method) would have been worse. controller-runtime needs to be updated because it implements that interface in a test package. Downstream consumers of controller-runtime will work unless they use those test package. Converting Kubernetes to use the other new alternatives will follow. In the meantime, usage of the new alternatives cannot be enforced via logcheck yet (see https://github.com/kubernetes/kubernetes/issues/126379 for the process). Passing context through and checking it for cancellation is tricky for event handlers. A better approach is to map the context cancellation to the normal removal of an event handler via a helper goroutine. Thanks to the new HandleErrorWithLogr and HandleCrashWithLogr, remembering the logger is sufficient for handling problems at runtime. Kubernetes-commit: 4638ba971661497b147906b8977ae206c9dd6e44
This commit is contained in:
committed by
Kubernetes Publisher
parent
b836a27b07
commit
5d289bc44c
@@ -17,6 +17,8 @@ limitations under the License.
|
||||
package main
|
||||
|
||||
import (
|
||||
"context"
|
||||
"errors"
|
||||
"flag"
|
||||
"fmt"
|
||||
"time"
|
||||
@@ -116,30 +118,30 @@ func (c *Controller) handleErr(err error, key string) {
|
||||
}
|
||||
|
||||
// Run begins watching and syncing.
|
||||
func (c *Controller) Run(workers int, stopCh chan struct{}) {
|
||||
defer runtime.HandleCrash()
|
||||
func (c *Controller) Run(ctx context.Context, workers int) {
|
||||
defer runtime.HandleCrashWithContext(ctx)
|
||||
|
||||
// Let the workers stop when we are done
|
||||
defer c.queue.ShutDown()
|
||||
klog.Info("Starting Pod controller")
|
||||
|
||||
go c.informer.Run(stopCh)
|
||||
go c.informer.RunWithContext(ctx)
|
||||
|
||||
// Wait for all involved caches to be synced, before processing items from the queue is started
|
||||
if !cache.WaitForCacheSync(stopCh, c.informer.HasSynced) {
|
||||
if !cache.WaitForNamedCacheSyncWithContext(ctx, c.informer.HasSynced) {
|
||||
runtime.HandleError(fmt.Errorf("Timed out waiting for caches to sync"))
|
||||
return
|
||||
}
|
||||
|
||||
for i := 0; i < workers; i++ {
|
||||
go wait.Until(c.runWorker, time.Second, stopCh)
|
||||
go wait.UntilWithContext(ctx, c.runWorker, time.Second)
|
||||
}
|
||||
|
||||
<-stopCh
|
||||
<-ctx.Done()
|
||||
klog.Info("Stopping Pod controller")
|
||||
}
|
||||
|
||||
func (c *Controller) runWorker() {
|
||||
func (c *Controller) runWorker(ctx context.Context) {
|
||||
for c.processNextItem() {
|
||||
}
|
||||
}
|
||||
@@ -164,6 +166,8 @@ func main() {
|
||||
klog.Fatal(err)
|
||||
}
|
||||
|
||||
ctx := context.Background()
|
||||
|
||||
// create the pod watcher
|
||||
podListWatcher := cache.NewListWatchFromClient(clientset.CoreV1().RESTClient(), "pods", v1.NamespaceDefault, fields.Everything())
|
||||
|
||||
@@ -211,9 +215,9 @@ func main() {
|
||||
})
|
||||
|
||||
// Now let's start the controller
|
||||
stop := make(chan struct{})
|
||||
defer close(stop)
|
||||
go controller.Run(1, stop)
|
||||
cancelCtx, cancel := context.WithCancelCause(ctx)
|
||||
defer cancel(errors.New("time to stop because main has completed"))
|
||||
go controller.Run(cancelCtx, 1)
|
||||
|
||||
// Wait forever
|
||||
select {}
|
||||
|
Reference in New Issue
Block a user