mirror of
https://github.com/kubernetes/client-go.git
synced 2026-05-15 11:43:33 +00:00
2f31bfec5d677fa5d74b8f014382ff5e4f62c84e
3 Commits
| Author | SHA1 | Message | Date | |
|---|---|---|---|---|
|
|
e70bc766e0 |
client-go cache: wait for cache sync via channels, better logging
The main advantage is that waiting on channels creates a causal relationship
between goroutines which is visible to synctest. When a controller in a
synctest bubble does a WaitFor in a test's background goroutine for the
controller, the test can use synctest.Wait to wait for completion of cache
sync, without requiring any test specific "has controller synced" API. Without
this, the test had to poll or otherwise wait for the controller.
The polling in WaitForCacheSync moved the virtual clock forward by a random
amount, depending on how often it had to check in wait.Poll. Now tests can be
written such that all events during a test happen at a predictable time. This
will be demonstrated in a separate commit for the
pkg/controller/devicetainteviction unit test.
The benefit for normal production is immediate continuation when the last
informer is synced (not really a problem, but still...) and more important,
nicer logging thanks to the names associated with the thing that is being
waited for. The caller decides whether logging is enabled or disabled and
describes what is being waited for (typically informer caches, but maybe also
event handlers or even something else entirely as long as it implements the
DoneChecker interface).
Before:
Waiting for caches to sync
Caches are synced
After:
Waiting for="cache and event handler sync"
Done waiting for="cache and event handler sync" instance="SharedIndexInformer *v1.Pod"
Done waiting for="cache and event handler sync" instance="SharedIndexInformer *v1.ResourceClaim"
Done waiting for="cache and event handler sync" instance="SharedIndexInformer *v1.ResourceSlice"
Done waiting for="cache and event handler sync" instance="SharedIndexInformer *v1.DeviceClass"
Done waiting for="cache and event handler sync" instance="SharedIndexInformer *v1alpha3.DeviceTaintRule"
Done waiting for="cache and event handler sync" instance="SharedIndexInformer *v1.ResourceClaim + event handler k8s.io/kubernetes/pkg/controller/devicetainteviction.(*Controller).Run"
Done waiting for="cache and event handler sync" instance="SharedIndexInformer *v1.Pod + event handler k8s.io/kubernetes/pkg/controller/devicetainteviction.(*Controller).Run"
Done waiting for="cache and event handler sync" instance="SharedIndexInformer *v1alpha3.DeviceTaintRule + event handler k8s.io/kubernetes/pkg/controller/devicetainteviction.(*Controller).Run"
Done waiting for="cache and event handler sync" instance="SharedIndexInformer *v1.ResourceSlice + event handler k8s.io/kubernetes/pkg/controller/devicetainteviction.(*Controller).Run"
The "SharedIndexInformer *v1.Pod" is also how this appears in metrics.
Kubernetes-commit: fdcbb6cba9a04c028b158bf66d505df7431f63fe
|
||
|
|
9e8c663097 |
Properly align synctrack.SingleFileTracker struct
count is used with atomic operations so it must be 64-bit aligned,
otherwise atomic operations will panic. Having it at the top of the
struct will guarantee that, even on 32-bit arches.
This fixes panics like that one observed in kube-apiserver:
E0310 13:48:47.476124 676 runtime.go:77] Observed a panic: unaligned 64-bit atomic operation
goroutine 141 [running]:
k8s.io/apimachinery/pkg/util/runtime.logPanic({0x2482378, 0x2db2ff8})
vendor/k8s.io/apimachinery/pkg/util/runtime/runtime.go:75 +0x94
k8s.io/apimachinery/pkg/util/runtime.HandleCrash({0x0, 0x0, 0x0})
vendor/k8s.io/apimachinery/pkg/util/runtime/runtime.go:49 +0x78
panic({0x2482378, 0x2db2ff8})
/usr/local/go/src/runtime/panic.go:884 +0x218
runtime/internal/atomic.panicUnaligned()
/usr/local/go/src/runtime/internal/atomic/unaligned.go:8 +0x24
runtime/internal/atomic.Load64(0x685f794)
/usr/local/go/src/runtime/internal/atomic/atomic_arm.s:280 +0x14
k8s.io/client-go/tools/cache/synctrack.(*SingleFileTracker).HasSynced(0x685f790)
vendor/k8s.io/client-go/tools/cache/synctrack/synctrack.go:115 +0x3c
k8s.io/client-go/tools/cache.(*processorListener).HasSynced(0x6013e60)
vendor/k8s.io/client-go/tools/cache/shared_informer.go:907 +0x20
k8s.io/client-go/tools/cache.WaitForCacheSync.func1()
vendor/k8s.io/client-go/tools/cache/shared_informer.go:332 +0x50
k8s.io/apimachinery/pkg/util/wait.ConditionFunc.WithContext.func1({0x2dcf274, 0x607c600})
vendor/k8s.io/apimachinery/pkg/util/wait/wait.go:222 +0x1c
k8s.io/apimachinery/pkg/util/wait.runConditionWithCrashProtectionWithContext({0x2dcf274, 0x607c600}, 0x6382050)
vendor/k8s.io/apimachinery/pkg/util/wait/wait.go:262 +0x64
k8s.io/apimachinery/pkg/util/wait.waitForWithContext({0x2dcf274, 0x607c600}, 0x64a6060, 0x6382050)
vendor/k8s.io/apimachinery/pkg/util/wait/wait.go:649 +0x11c
k8s.io/apimachinery/pkg/util/wait.poll({0x2dcf274, 0x607c600}, 0x1, 0x64a6060, 0x6382050)
vendor/k8s.io/apimachinery/pkg/util/wait/wait.go:600 +0xc4
k8s.io/apimachinery/pkg/util/wait.PollImmediateUntilWithContext({0x2dcf274, 0x607c600}, 0x5f5e100, 0x6382050)
vendor/k8s.io/apimachinery/pkg/util/wait/wait.go:551 +0x60
k8s.io/apimachinery/pkg/util/wait.PollImmediateUntil(0x5f5e100, 0x6298020, 0x607c600)
vendor/k8s.io/apimachinery/pkg/util/wait/wait.go:542 +0x48
k8s.io/client-go/tools/cache.WaitForCacheSync(0x607c600, {0x6298000, 0x3, 0x3})
vendor/k8s.io/client-go/tools/cache/shared_informer.go:329 +0x80
k8s.io/client-go/tools/cache.WaitForNamedCacheSync({0x283c5e1, 0xf}, 0x607c600, {0x6298000, 0x3, 0x3})
vendor/k8s.io/client-go/tools/cache/shared_informer.go:316 +0xe8
created by k8s.io/kubernetes/plugin/pkg/auth/authorizer/node.AddGraphEventHandlers
plugin/pkg/auth/authorizer/node/graph_populator.go:65 +0x5b0
panic: unaligned 64-bit atomic operation [recovered]
panic: unaligned 64-bit atomic operation
goroutine 141 [running]:
k8s.io/apimachinery/pkg/util/runtime.HandleCrash({0x0, 0x0, 0x0})
vendor/k8s.io/apimachinery/pkg/util/runtime/runtime.go:56 +0xf4
panic({0x2482378, 0x2db2ff8})
/usr/local/go/src/runtime/panic.go:884 +0x218
runtime/internal/atomic.panicUnaligned()
/usr/local/go/src/runtime/internal/atomic/unaligned.go:8 +0x24
runtime/internal/atomic.Load64(0x685f794)
/usr/local/go/src/runtime/internal/atomic/atomic_arm.s:280 +0x14
k8s.io/client-go/tools/cache/synctrack.(*SingleFileTracker).HasSynced(0x685f790)
vendor/k8s.io/client-go/tools/cache/synctrack/synctrack.go:115 +0x3c
k8s.io/client-go/tools/cache.(*processorListener).HasSynced(0x6013e60)
vendor/k8s.io/client-go/tools/cache/shared_informer.go:907 +0x20
k8s.io/client-go/tools/cache.WaitForCacheSync.func1()
vendor/k8s.io/client-go/tools/cache/shared_informer.go:332 +0x50
k8s.io/apimachinery/pkg/util/wait.ConditionFunc.WithContext.func1({0x2dcf274, 0x607c600})
vendor/k8s.io/apimachinery/pkg/util/wait/wait.go:222 +0x1c
k8s.io/apimachinery/pkg/util/wait.runConditionWithCrashProtectionWithContext({0x2dcf274, 0x607c600}, 0x6382050)
vendor/k8s.io/apimachinery/pkg/util/wait/wait.go:262 +0x64
k8s.io/apimachinery/pkg/util/wait.waitForWithContext({0x2dcf274, 0x607c600}, 0x64a6060, 0x6382050)
vendor/k8s.io/apimachinery/pkg/util/wait/wait.go:649 +0x11c
k8s.io/apimachinery/pkg/util/wait.poll({0x2dcf274, 0x607c600}, 0x1, 0x64a6060, 0x6382050)
vendor/k8s.io/apimachinery/pkg/util/wait/wait.go:600 +0xc4
k8s.io/apimachinery/pkg/util/wait.PollImmediateUntilWithContext({0x2dcf274, 0x607c600}, 0x5f5e100, 0x6382050)
vendor/k8s.io/apimachinery/pkg/util/wait/wait.go:551 +0x60
k8s.io/apimachinery/pkg/util/wait.PollImmediateUntil(0x5f5e100, 0x6298020, 0x607c600)
vendor/k8s.io/apimachinery/pkg/util/wait/wait.go:542 +0x48
k8s.io/client-go/tools/cache.WaitForCacheSync(0x607c600, {0x6298000, 0x3, 0x3})
vendor/k8s.io/client-go/tools/cache/shared_informer.go:329 +0x80
k8s.io/client-go/tools/cache.WaitForNamedCacheSync({0x283c5e1, 0xf}, 0x607c600, {0x6298000, 0x3, 0x3})
vendor/k8s.io/client-go/tools/cache/shared_informer.go:316 +0xe8
created by k8s.io/kubernetes/plugin/pkg/auth/authorizer/node.AddGraphEventHandlers
plugin/pkg/auth/authorizer/node/graph_populator.go:65 +0x5b0
Kubernetes-commit: ffcf653e0666366e6241c99d9418e830840afa0f
|
||
|
|
5d70a118df |
Enable propagration of HasSynced
* Add tracker types and tests * Modify ResourceEventHandler interface's OnAdd member * Add additional ResourceEventHandlerDetailedFuncs struct * Fix SharedInformer to let users track HasSynced for their handlers * Fix in-tree controllers which weren't computing HasSynced correctly * Deprecate the cache.Pop function Kubernetes-commit: 8100efc7b3122ad119ee8fa4bbbedef3b90f2e0d |