1
0
mirror of https://github.com/rancher/steve.git synced 2025-07-04 02:26:36 +00:00
steve/pkg/sqlcache/informer/shared_informer_test.go

326 lines
10 KiB
Go
Raw Normal View History

/*
Copyright 2023 SUSE LLC
Adapted from client-go, Copyright 2014 The Kubernetes Authors.
*/
package informer
import (
"fmt"
"k8s.io/client-go/tools/cache"
"strings"
"sync"
"testing"
"time"
v1 "k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/api/meta"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/util/sets"
"k8s.io/apimachinery/pkg/util/wait"
fcache "k8s.io/client-go/tools/cache/testing"
testingclock "k8s.io/utils/clock/testing"
)
type testListener struct {
lock sync.RWMutex
resyncPeriod time.Duration
expectedItemNames sets.Set[string]
receivedItemNames []string
name string
}
func newTestListener(name string, resyncPeriod time.Duration, expected ...string) *testListener {
l := &testListener{
resyncPeriod: resyncPeriod,
expectedItemNames: sets.New[string](expected...),
name: name,
}
return l
}
func (l *testListener) OnAdd(obj interface{}, isInInitialList bool) {
l.handle(obj)
}
func (l *testListener) OnUpdate(old, new interface{}) {
l.handle(new)
}
func (l *testListener) OnDelete(obj interface{}) {
}
func (l *testListener) handle(obj interface{}) {
key, _ := cache.MetaNamespaceKeyFunc(obj)
fmt.Printf("%s: handle: %v\n", l.name, key)
l.lock.Lock()
defer l.lock.Unlock()
objectMeta, _ := meta.Accessor(obj)
l.receivedItemNames = append(l.receivedItemNames, objectMeta.GetName())
}
func (l *testListener) ok() bool {
fmt.Println("polling")
err := wait.PollImmediate(100*time.Millisecond, 2*time.Second, func() (bool, error) {
if l.satisfiedExpectations() {
return true, nil
}
return false, nil
})
if err != nil {
return false
}
// wait just a bit to allow any unexpected stragglers to come in
fmt.Println("sleeping")
time.Sleep(1 * time.Second)
fmt.Println("final check")
return l.satisfiedExpectations()
}
func (l *testListener) satisfiedExpectations() bool {
l.lock.RLock()
defer l.lock.RUnlock()
return sets.New[string](l.receivedItemNames...).Equal(l.expectedItemNames)
}
func TestListenerResyncPeriods(t *testing.T) {
// source simulates an apiserver object endpoint.
source := fcache.NewFakeControllerSource()
source.Add(&v1.Pod{ObjectMeta: metav1.ObjectMeta{Name: "pod1"}})
source.Add(&v1.Pod{ObjectMeta: metav1.ObjectMeta{Name: "pod2"}})
// create the shared informer and resync every 1s
informer := cache.NewSharedInformer(source, &v1.Pod{}, 1*time.Second)
clock := testingclock.NewFakeClock(time.Now())
UnsafeSet(informer, "clock", clock)
UnsafeSet(UnsafeGet(informer, "processor"), "clock", clock)
// listener 1, never resync
listener1 := newTestListener("listener1", 0, "pod1", "pod2")
informer.AddEventHandlerWithResyncPeriod(listener1, listener1.resyncPeriod)
// listener 2, resync every 2s
listener2 := newTestListener("listener2", 2*time.Second, "pod1", "pod2")
informer.AddEventHandlerWithResyncPeriod(listener2, listener2.resyncPeriod)
// listener 3, resync every 3s
listener3 := newTestListener("listener3", 3*time.Second, "pod1", "pod2")
informer.AddEventHandlerWithResyncPeriod(listener3, listener3.resyncPeriod)
listeners := []*testListener{listener1, listener2, listener3}
stop := make(chan struct{})
defer close(stop)
go informer.Run(stop)
// ensure all listeners got the initial List
for _, listener := range listeners {
if !listener.ok() {
t.Errorf("%s: expected %v, got %v", listener.name, listener.expectedItemNames, listener.receivedItemNames)
}
}
// reset
for _, listener := range listeners {
listener.receivedItemNames = []string{}
}
// advance so listener2 gets a resync
clock.Step(2 * time.Second)
// make sure listener2 got the resync
if !listener2.ok() {
t.Errorf("%s: expected %v, got %v", listener2.name, listener2.expectedItemNames, listener2.receivedItemNames)
}
// wait a bit to give errant items a chance to go to 1 and 3
time.Sleep(1 * time.Second)
// make sure listeners 1 and 3 got nothing
if len(listener1.receivedItemNames) != 0 {
t.Errorf("listener1: should not have resynced (got %d)", len(listener1.receivedItemNames))
}
if len(listener3.receivedItemNames) != 0 {
t.Errorf("listener3: should not have resynced (got %d)", len(listener3.receivedItemNames))
}
// reset
for _, listener := range listeners {
listener.receivedItemNames = []string{}
}
// advance so listener3 gets a resync
clock.Step(1 * time.Second)
// make sure listener3 got the resync
if !listener3.ok() {
t.Errorf("%s: expected %v, got %v", listener3.name, listener3.expectedItemNames, listener3.receivedItemNames)
}
// wait a bit to give errant items a chance to go to 1 and 2
time.Sleep(1 * time.Second)
// make sure listeners 1 and 2 got nothing
if len(listener1.receivedItemNames) != 0 {
t.Errorf("listener1: should not have resynced (got %d)", len(listener1.receivedItemNames))
}
if len(listener2.receivedItemNames) != 0 {
t.Errorf("listener2: should not have resynced (got %d)", len(listener2.receivedItemNames))
}
}
// verify that https://github.com/kubernetes/kubernetes/issues/59822 is fixed
func TestSharedInformerInitializationRace(t *testing.T) {
source := fcache.NewFakeControllerSource()
informer := cache.NewSharedInformer(source, &v1.Pod{}, 1*time.Second)
listener := newTestListener("raceListener", 0)
stop := make(chan struct{})
go informer.AddEventHandlerWithResyncPeriod(listener, listener.resyncPeriod)
go informer.Run(stop)
close(stop)
}
// TestSharedInformerWatchDisruption simulates a watch that was closed
// with updates to the store during that time. We ensure that handlers with
// resync and no resync see the expected state.
func TestSharedInformerWatchDisruption(t *testing.T) {
// source simulates an apiserver object endpoint.
source := fcache.NewFakeControllerSource()
source.Add(&v1.Pod{ObjectMeta: metav1.ObjectMeta{Name: "pod1", UID: "pod1", ResourceVersion: "1"}})
source.Add(&v1.Pod{ObjectMeta: metav1.ObjectMeta{Name: "pod2", UID: "pod2", ResourceVersion: "2"}})
// create the shared informer and resync every 1s
informer := cache.NewSharedInformer(source, &v1.Pod{}, 1*time.Second)
clock := testingclock.NewFakeClock(time.Now())
UnsafeSet(informer, "clock", clock)
UnsafeSet(UnsafeGet(informer, "processor"), "clock", clock)
// listener, never resync
listenerNoResync := newTestListener("listenerNoResync", 0, "pod1", "pod2")
informer.AddEventHandlerWithResyncPeriod(listenerNoResync, listenerNoResync.resyncPeriod)
listenerResync := newTestListener("listenerResync", 1*time.Second, "pod1", "pod2")
informer.AddEventHandlerWithResyncPeriod(listenerResync, listenerResync.resyncPeriod)
listeners := []*testListener{listenerNoResync, listenerResync}
stop := make(chan struct{})
defer close(stop)
go informer.Run(stop)
for _, listener := range listeners {
if !listener.ok() {
t.Errorf("%s: expected %v, got %v", listener.name, listener.expectedItemNames, listener.receivedItemNames)
}
}
// Add pod3, bump pod2 but don't broadcast it, so that the change will be seen only on relist
source.AddDropWatch(&v1.Pod{ObjectMeta: metav1.ObjectMeta{Name: "pod3", UID: "pod3", ResourceVersion: "3"}})
source.ModifyDropWatch(&v1.Pod{ObjectMeta: metav1.ObjectMeta{Name: "pod2", UID: "pod2", ResourceVersion: "4"}})
// Ensure that nobody saw any changes
for _, listener := range listeners {
if !listener.ok() {
t.Errorf("%s: expected %v, got %v", listener.name, listener.expectedItemNames, listener.receivedItemNames)
}
}
for _, listener := range listeners {
listener.receivedItemNames = []string{}
}
listenerNoResync.expectedItemNames = sets.New[string]("pod2", "pod3")
listenerResync.expectedItemNames = sets.New[string]("pod1", "pod2", "pod3")
// This calls shouldSync, which deletes noResync from the list of syncingListeners
clock.Step(1 * time.Second)
// Simulate a connection loss (or even just a too-old-watch)
source.ResetWatch()
// Wait long enough for the reflector to exit and the backoff function to start waiting
// on the fake clock, otherwise advancing the fake clock will have no effect.
// TODO: Make this deterministic by counting the number of waiters on FakeClock
time.Sleep(10 * time.Millisecond)
// Advance the clock to cause the backoff wait to expire.
clock.Step(1601 * time.Millisecond)
// Wait long enough for backoff to invoke ListWatch a second time and distribute events
// to listeners.
time.Sleep(10 * time.Millisecond)
for _, listener := range listeners {
if !listener.ok() {
t.Errorf("%s: expected %v, got %v", listener.name, listener.expectedItemNames, listener.receivedItemNames)
}
}
}
func TestSharedInformerErrorHandling(t *testing.T) {
source := fcache.NewFakeControllerSource()
source.Add(&v1.Pod{ObjectMeta: metav1.ObjectMeta{Name: "pod1"}})
source.ListError = fmt.Errorf("Access Denied")
informer := cache.NewSharedInformer(source, &v1.Pod{}, 1*time.Second)
errCh := make(chan error)
_ = informer.SetWatchErrorHandler(func(_ *cache.Reflector, err error) {
errCh <- err
})
stop := make(chan struct{})
go informer.Run(stop)
select {
case err := <-errCh:
if !strings.Contains(err.Error(), "Access Denied") {
t.Errorf("Expected 'Access Denied' error. Actual: %v", err)
}
case <-time.After(time.Second):
t.Errorf("Timeout waiting for error handler call")
}
close(stop)
}
func TestSharedInformerTransformer(t *testing.T) {
// source simulates an apiserver object endpoint.
source := fcache.NewFakeControllerSource()
source.Add(&v1.Pod{ObjectMeta: metav1.ObjectMeta{Name: "pod1", UID: "pod1", ResourceVersion: "1"}})
source.Add(&v1.Pod{ObjectMeta: metav1.ObjectMeta{Name: "pod2", UID: "pod2", ResourceVersion: "2"}})
informer := cache.NewSharedInformer(source, &v1.Pod{}, 1*time.Second)
informer.SetTransform(func(obj interface{}) (interface{}, error) {
if pod, ok := obj.(*v1.Pod); ok {
name := pod.GetName()
if upper := strings.ToUpper(name); upper != name {
copied := pod.DeepCopyObject().(*v1.Pod)
copied.SetName(upper)
return copied, nil
}
}
return obj, nil
})
listenerTransformer := newTestListener("listenerTransformer", 0, "POD1", "POD2")
informer.AddEventHandler(listenerTransformer)
stop := make(chan struct{})
go informer.Run(stop)
defer close(stop)
if !listenerTransformer.ok() {
t.Errorf("%s: expected %v, got %v", listenerTransformer.name, listenerTransformer.expectedItemNames, listenerTransformer.receivedItemNames)
}
}