mirror of
https://github.com/k3s-io/kubernetes.git
synced 2026-01-29 21:29:24 +00:00
move informer and controller to pkg/client/cache
Signed-off-by: Mike Danese <mikedanese@google.com>
This commit is contained in:
325
pkg/client/cache/controller.go
vendored
Normal file
325
pkg/client/cache/controller.go
vendored
Normal file
@@ -0,0 +1,325 @@
|
||||
/*
|
||||
Copyright 2015 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package cache
|
||||
|
||||
import (
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"k8s.io/kubernetes/pkg/runtime"
|
||||
utilruntime "k8s.io/kubernetes/pkg/util/runtime"
|
||||
"k8s.io/kubernetes/pkg/util/wait"
|
||||
)
|
||||
|
||||
// Config contains all the settings for a Controller.
|
||||
type Config struct {
|
||||
// The queue for your objects; either a FIFO or
|
||||
// a DeltaFIFO. Your Process() function should accept
|
||||
// the output of this Oueue's Pop() method.
|
||||
Queue
|
||||
|
||||
// Something that can list and watch your objects.
|
||||
ListerWatcher
|
||||
|
||||
// Something that can process your objects.
|
||||
Process ProcessFunc
|
||||
|
||||
// The type of your objects.
|
||||
ObjectType runtime.Object
|
||||
|
||||
// Reprocess everything at least this often.
|
||||
// Note that if it takes longer for you to clear the queue than this
|
||||
// period, you will end up processing items in the order determined
|
||||
// by FIFO.Replace(). Currently, this is random. If this is a
|
||||
// problem, we can change that replacement policy to append new
|
||||
// things to the end of the queue instead of replacing the entire
|
||||
// queue.
|
||||
FullResyncPeriod time.Duration
|
||||
|
||||
// If true, when Process() returns an error, re-enqueue the object.
|
||||
// TODO: add interface to let you inject a delay/backoff or drop
|
||||
// the object completely if desired. Pass the object in
|
||||
// question to this interface as a parameter.
|
||||
RetryOnError bool
|
||||
}
|
||||
|
||||
// ProcessFunc processes a single object.
|
||||
type ProcessFunc func(obj interface{}) error
|
||||
|
||||
// Controller is a generic controller framework.
|
||||
type Controller struct {
|
||||
config Config
|
||||
reflector *Reflector
|
||||
reflectorMutex sync.RWMutex
|
||||
}
|
||||
|
||||
// TODO make the "Controller" private, and convert all references to use ControllerInterface instead
|
||||
type ControllerInterface interface {
|
||||
Run(stopCh <-chan struct{})
|
||||
HasSynced() bool
|
||||
}
|
||||
|
||||
// New makes a new Controller from the given Config.
|
||||
func New(c *Config) *Controller {
|
||||
ctlr := &Controller{
|
||||
config: *c,
|
||||
}
|
||||
return ctlr
|
||||
}
|
||||
|
||||
// Run begins processing items, and will continue until a value is sent down stopCh.
|
||||
// It's an error to call Run more than once.
|
||||
// Run blocks; call via go.
|
||||
func (c *Controller) Run(stopCh <-chan struct{}) {
|
||||
defer utilruntime.HandleCrash()
|
||||
r := NewReflector(
|
||||
c.config.ListerWatcher,
|
||||
c.config.ObjectType,
|
||||
c.config.Queue,
|
||||
c.config.FullResyncPeriod,
|
||||
)
|
||||
|
||||
c.reflectorMutex.Lock()
|
||||
c.reflector = r
|
||||
c.reflectorMutex.Unlock()
|
||||
|
||||
r.RunUntil(stopCh)
|
||||
|
||||
wait.Until(c.processLoop, time.Second, stopCh)
|
||||
}
|
||||
|
||||
// Returns true once this controller has completed an initial resource listing
|
||||
func (c *Controller) HasSynced() bool {
|
||||
return c.config.Queue.HasSynced()
|
||||
}
|
||||
|
||||
// Requeue adds the provided object back into the queue if it does not already exist.
|
||||
func (c *Controller) Requeue(obj interface{}) error {
|
||||
return c.config.Queue.AddIfNotPresent(Deltas{
|
||||
Delta{
|
||||
Type: Sync,
|
||||
Object: obj,
|
||||
},
|
||||
})
|
||||
}
|
||||
|
||||
// processLoop drains the work queue.
|
||||
// TODO: Consider doing the processing in parallel. This will require a little thought
|
||||
// to make sure that we don't end up processing the same object multiple times
|
||||
// concurrently.
|
||||
func (c *Controller) processLoop() {
|
||||
for {
|
||||
obj, err := c.config.Queue.Pop(PopProcessFunc(c.config.Process))
|
||||
if err != nil {
|
||||
if c.config.RetryOnError {
|
||||
// This is the safe way to re-enqueue.
|
||||
c.config.Queue.AddIfNotPresent(obj)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// ResourceEventHandler can handle notifications for events that happen to a
|
||||
// resource. The events are informational only, so you can't return an
|
||||
// error.
|
||||
// * OnAdd is called when an object is added.
|
||||
// * OnUpdate is called when an object is modified. Note that oldObj is the
|
||||
// last known state of the object-- it is possible that several changes
|
||||
// were combined together, so you can't use this to see every single
|
||||
// change. OnUpdate is also called when a re-list happens, and it will
|
||||
// get called even if nothing changed. This is useful for periodically
|
||||
// evaluating or syncing something.
|
||||
// * OnDelete will get the final state of the item if it is known, otherwise
|
||||
// it will get an object of type DeletedFinalStateUnknown. This can
|
||||
// happen if the watch is closed and misses the delete event and we don't
|
||||
// notice the deletion until the subsequent re-list.
|
||||
type ResourceEventHandler interface {
|
||||
OnAdd(obj interface{})
|
||||
OnUpdate(oldObj, newObj interface{})
|
||||
OnDelete(obj interface{})
|
||||
}
|
||||
|
||||
// ResourceEventHandlerFuncs is an adaptor to let you easily specify as many or
|
||||
// as few of the notification functions as you want while still implementing
|
||||
// ResourceEventHandler.
|
||||
type ResourceEventHandlerFuncs struct {
|
||||
AddFunc func(obj interface{})
|
||||
UpdateFunc func(oldObj, newObj interface{})
|
||||
DeleteFunc func(obj interface{})
|
||||
}
|
||||
|
||||
// OnAdd calls AddFunc if it's not nil.
|
||||
func (r ResourceEventHandlerFuncs) OnAdd(obj interface{}) {
|
||||
if r.AddFunc != nil {
|
||||
r.AddFunc(obj)
|
||||
}
|
||||
}
|
||||
|
||||
// OnUpdate calls UpdateFunc if it's not nil.
|
||||
func (r ResourceEventHandlerFuncs) OnUpdate(oldObj, newObj interface{}) {
|
||||
if r.UpdateFunc != nil {
|
||||
r.UpdateFunc(oldObj, newObj)
|
||||
}
|
||||
}
|
||||
|
||||
// OnDelete calls DeleteFunc if it's not nil.
|
||||
func (r ResourceEventHandlerFuncs) OnDelete(obj interface{}) {
|
||||
if r.DeleteFunc != nil {
|
||||
r.DeleteFunc(obj)
|
||||
}
|
||||
}
|
||||
|
||||
// DeletionHandlingMetaNamespaceKeyFunc checks for
|
||||
// DeletedFinalStateUnknown objects before calling
|
||||
// MetaNamespaceKeyFunc.
|
||||
func DeletionHandlingMetaNamespaceKeyFunc(obj interface{}) (string, error) {
|
||||
if d, ok := obj.(DeletedFinalStateUnknown); ok {
|
||||
return d.Key, nil
|
||||
}
|
||||
return MetaNamespaceKeyFunc(obj)
|
||||
}
|
||||
|
||||
// NewInformer returns a Store and a controller for populating the store
|
||||
// while also providing event notifications. You should only used the returned
|
||||
// Store for Get/List operations; Add/Modify/Deletes will cause the event
|
||||
// notifications to be faulty.
|
||||
//
|
||||
// Parameters:
|
||||
// * lw is list and watch functions for the source of the resource you want to
|
||||
// be informed of.
|
||||
// * objType is an object of the type that you expect to receive.
|
||||
// * resyncPeriod: if non-zero, will re-list this often (you will get OnUpdate
|
||||
// calls, even if nothing changed). Otherwise, re-list will be delayed as
|
||||
// long as possible (until the upstream source closes the watch or times out,
|
||||
// or you stop the controller).
|
||||
// * h is the object you want notifications sent to.
|
||||
//
|
||||
func NewInformer(
|
||||
lw ListerWatcher,
|
||||
objType runtime.Object,
|
||||
resyncPeriod time.Duration,
|
||||
h ResourceEventHandler,
|
||||
) (Store, *Controller) {
|
||||
// This will hold the client state, as we know it.
|
||||
clientState := NewStore(DeletionHandlingMetaNamespaceKeyFunc)
|
||||
|
||||
// This will hold incoming changes. Note how we pass clientState in as a
|
||||
// KeyLister, that way resync operations will result in the correct set
|
||||
// of update/delete deltas.
|
||||
fifo := NewDeltaFIFO(MetaNamespaceKeyFunc, nil, clientState)
|
||||
|
||||
cfg := &Config{
|
||||
Queue: fifo,
|
||||
ListerWatcher: lw,
|
||||
ObjectType: objType,
|
||||
FullResyncPeriod: resyncPeriod,
|
||||
RetryOnError: false,
|
||||
|
||||
Process: func(obj interface{}) error {
|
||||
// from oldest to newest
|
||||
for _, d := range obj.(Deltas) {
|
||||
switch d.Type {
|
||||
case Sync, Added, Updated:
|
||||
if old, exists, err := clientState.Get(d.Object); err == nil && exists {
|
||||
if err := clientState.Update(d.Object); err != nil {
|
||||
return err
|
||||
}
|
||||
h.OnUpdate(old, d.Object)
|
||||
} else {
|
||||
if err := clientState.Add(d.Object); err != nil {
|
||||
return err
|
||||
}
|
||||
h.OnAdd(d.Object)
|
||||
}
|
||||
case Deleted:
|
||||
if err := clientState.Delete(d.Object); err != nil {
|
||||
return err
|
||||
}
|
||||
h.OnDelete(d.Object)
|
||||
}
|
||||
}
|
||||
return nil
|
||||
},
|
||||
}
|
||||
return clientState, New(cfg)
|
||||
}
|
||||
|
||||
// NewIndexerInformer returns a Indexer and a controller for populating the index
|
||||
// while also providing event notifications. You should only used the returned
|
||||
// Index for Get/List operations; Add/Modify/Deletes will cause the event
|
||||
// notifications to be faulty.
|
||||
//
|
||||
// Parameters:
|
||||
// * lw is list and watch functions for the source of the resource you want to
|
||||
// be informed of.
|
||||
// * objType is an object of the type that you expect to receive.
|
||||
// * resyncPeriod: if non-zero, will re-list this often (you will get OnUpdate
|
||||
// calls, even if nothing changed). Otherwise, re-list will be delayed as
|
||||
// long as possible (until the upstream source closes the watch or times out,
|
||||
// or you stop the controller).
|
||||
// * h is the object you want notifications sent to.
|
||||
//
|
||||
func NewIndexerInformer(
|
||||
lw ListerWatcher,
|
||||
objType runtime.Object,
|
||||
resyncPeriod time.Duration,
|
||||
h ResourceEventHandler,
|
||||
indexers Indexers,
|
||||
) (Indexer, *Controller) {
|
||||
// This will hold the client state, as we know it.
|
||||
clientState := NewIndexer(DeletionHandlingMetaNamespaceKeyFunc, indexers)
|
||||
|
||||
// This will hold incoming changes. Note how we pass clientState in as a
|
||||
// KeyLister, that way resync operations will result in the correct set
|
||||
// of update/delete deltas.
|
||||
fifo := NewDeltaFIFO(MetaNamespaceKeyFunc, nil, clientState)
|
||||
|
||||
cfg := &Config{
|
||||
Queue: fifo,
|
||||
ListerWatcher: lw,
|
||||
ObjectType: objType,
|
||||
FullResyncPeriod: resyncPeriod,
|
||||
RetryOnError: false,
|
||||
|
||||
Process: func(obj interface{}) error {
|
||||
// from oldest to newest
|
||||
for _, d := range obj.(Deltas) {
|
||||
switch d.Type {
|
||||
case Sync, Added, Updated:
|
||||
if old, exists, err := clientState.Get(d.Object); err == nil && exists {
|
||||
if err := clientState.Update(d.Object); err != nil {
|
||||
return err
|
||||
}
|
||||
h.OnUpdate(old, d.Object)
|
||||
} else {
|
||||
if err := clientState.Add(d.Object); err != nil {
|
||||
return err
|
||||
}
|
||||
h.OnAdd(d.Object)
|
||||
}
|
||||
case Deleted:
|
||||
if err := clientState.Delete(d.Object); err != nil {
|
||||
return err
|
||||
}
|
||||
h.OnDelete(d.Object)
|
||||
}
|
||||
}
|
||||
return nil
|
||||
},
|
||||
}
|
||||
return clientState, New(cfg)
|
||||
}
|
||||
401
pkg/client/cache/controller_test.go
vendored
Normal file
401
pkg/client/cache/controller_test.go
vendored
Normal file
@@ -0,0 +1,401 @@
|
||||
/*
|
||||
Copyright 2015 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package cache
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"math/rand"
|
||||
"sync"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"k8s.io/kubernetes/pkg/api"
|
||||
fcache "k8s.io/kubernetes/pkg/client/testing/cache"
|
||||
"k8s.io/kubernetes/pkg/runtime"
|
||||
"k8s.io/kubernetes/pkg/util/sets"
|
||||
"k8s.io/kubernetes/pkg/util/wait"
|
||||
"k8s.io/kubernetes/pkg/watch"
|
||||
|
||||
"github.com/google/gofuzz"
|
||||
)
|
||||
|
||||
func Example() {
|
||||
// source simulates an apiserver object endpoint.
|
||||
source := fcache.NewFakeControllerSource()
|
||||
|
||||
// This will hold the downstream state, as we know it.
|
||||
downstream := NewStore(DeletionHandlingMetaNamespaceKeyFunc)
|
||||
|
||||
// This will hold incoming changes. Note how we pass downstream in as a
|
||||
// KeyLister, that way resync operations will result in the correct set
|
||||
// of update/delete deltas.
|
||||
fifo := NewDeltaFIFO(MetaNamespaceKeyFunc, nil, downstream)
|
||||
|
||||
// Let's do threadsafe output to get predictable test results.
|
||||
deletionCounter := make(chan string, 1000)
|
||||
|
||||
cfg := &Config{
|
||||
Queue: fifo,
|
||||
ListerWatcher: source,
|
||||
ObjectType: &api.Pod{},
|
||||
FullResyncPeriod: time.Millisecond * 100,
|
||||
RetryOnError: false,
|
||||
|
||||
// Let's implement a simple controller that just deletes
|
||||
// everything that comes in.
|
||||
Process: func(obj interface{}) error {
|
||||
// Obj is from the Pop method of the Queue we make above.
|
||||
newest := obj.(Deltas).Newest()
|
||||
|
||||
if newest.Type != Deleted {
|
||||
// Update our downstream store.
|
||||
err := downstream.Add(newest.Object)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Delete this object.
|
||||
source.Delete(newest.Object.(runtime.Object))
|
||||
} else {
|
||||
// Update our downstream store.
|
||||
err := downstream.Delete(newest.Object)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// fifo's KeyOf is easiest, because it handles
|
||||
// DeletedFinalStateUnknown markers.
|
||||
key, err := fifo.KeyOf(newest.Object)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Report this deletion.
|
||||
deletionCounter <- key
|
||||
}
|
||||
return nil
|
||||
},
|
||||
}
|
||||
|
||||
// Create the controller and run it until we close stop.
|
||||
stop := make(chan struct{})
|
||||
defer close(stop)
|
||||
go New(cfg).Run(stop)
|
||||
|
||||
// Let's add a few objects to the source.
|
||||
testIDs := []string{"a-hello", "b-controller", "c-framework"}
|
||||
for _, name := range testIDs {
|
||||
// Note that these pods are not valid-- the fake source doesn't
|
||||
// call validation or anything.
|
||||
source.Add(&api.Pod{ObjectMeta: api.ObjectMeta{Name: name}})
|
||||
}
|
||||
|
||||
// Let's wait for the controller to process the things we just added.
|
||||
outputSet := sets.String{}
|
||||
for i := 0; i < len(testIDs); i++ {
|
||||
outputSet.Insert(<-deletionCounter)
|
||||
}
|
||||
|
||||
for _, key := range outputSet.List() {
|
||||
fmt.Println(key)
|
||||
}
|
||||
// Output:
|
||||
// a-hello
|
||||
// b-controller
|
||||
// c-framework
|
||||
}
|
||||
|
||||
func ExampleNewInformer() {
|
||||
// source simulates an apiserver object endpoint.
|
||||
source := fcache.NewFakeControllerSource()
|
||||
|
||||
// Let's do threadsafe output to get predictable test results.
|
||||
deletionCounter := make(chan string, 1000)
|
||||
|
||||
// Make a controller that immediately deletes anything added to it, and
|
||||
// logs anything deleted.
|
||||
_, controller := NewInformer(
|
||||
source,
|
||||
&api.Pod{},
|
||||
time.Millisecond*100,
|
||||
ResourceEventHandlerFuncs{
|
||||
AddFunc: func(obj interface{}) {
|
||||
source.Delete(obj.(runtime.Object))
|
||||
},
|
||||
DeleteFunc: func(obj interface{}) {
|
||||
key, err := DeletionHandlingMetaNamespaceKeyFunc(obj)
|
||||
if err != nil {
|
||||
key = "oops something went wrong with the key"
|
||||
}
|
||||
|
||||
// Report this deletion.
|
||||
deletionCounter <- key
|
||||
},
|
||||
},
|
||||
)
|
||||
|
||||
// Run the controller and run it until we close stop.
|
||||
stop := make(chan struct{})
|
||||
defer close(stop)
|
||||
go controller.Run(stop)
|
||||
|
||||
// Let's add a few objects to the source.
|
||||
testIDs := []string{"a-hello", "b-controller", "c-framework"}
|
||||
for _, name := range testIDs {
|
||||
// Note that these pods are not valid-- the fake source doesn't
|
||||
// call validation or anything.
|
||||
source.Add(&api.Pod{ObjectMeta: api.ObjectMeta{Name: name}})
|
||||
}
|
||||
|
||||
// Let's wait for the controller to process the things we just added.
|
||||
outputSet := sets.String{}
|
||||
for i := 0; i < len(testIDs); i++ {
|
||||
outputSet.Insert(<-deletionCounter)
|
||||
}
|
||||
|
||||
for _, key := range outputSet.List() {
|
||||
fmt.Println(key)
|
||||
}
|
||||
// Output:
|
||||
// a-hello
|
||||
// b-controller
|
||||
// c-framework
|
||||
}
|
||||
|
||||
func TestHammerController(t *testing.T) {
|
||||
// This test executes a bunch of requests through the fake source and
|
||||
// controller framework to make sure there's no locking/threading
|
||||
// errors. If an error happens, it should hang forever or trigger the
|
||||
// race detector.
|
||||
|
||||
// source simulates an apiserver object endpoint.
|
||||
source := fcache.NewFakeControllerSource()
|
||||
|
||||
// Let's do threadsafe output to get predictable test results.
|
||||
outputSetLock := sync.Mutex{}
|
||||
// map of key to operations done on the key
|
||||
outputSet := map[string][]string{}
|
||||
|
||||
recordFunc := func(eventType string, obj interface{}) {
|
||||
key, err := DeletionHandlingMetaNamespaceKeyFunc(obj)
|
||||
if err != nil {
|
||||
t.Errorf("something wrong with key: %v", err)
|
||||
key = "oops something went wrong with the key"
|
||||
}
|
||||
|
||||
// Record some output when items are deleted.
|
||||
outputSetLock.Lock()
|
||||
defer outputSetLock.Unlock()
|
||||
outputSet[key] = append(outputSet[key], eventType)
|
||||
}
|
||||
|
||||
// Make a controller which just logs all the changes it gets.
|
||||
_, controller := NewInformer(
|
||||
source,
|
||||
&api.Pod{},
|
||||
time.Millisecond*100,
|
||||
ResourceEventHandlerFuncs{
|
||||
AddFunc: func(obj interface{}) { recordFunc("add", obj) },
|
||||
UpdateFunc: func(oldObj, newObj interface{}) { recordFunc("update", newObj) },
|
||||
DeleteFunc: func(obj interface{}) { recordFunc("delete", obj) },
|
||||
},
|
||||
)
|
||||
|
||||
if controller.HasSynced() {
|
||||
t.Errorf("Expected HasSynced() to return false before we started the controller")
|
||||
}
|
||||
|
||||
// Run the controller and run it until we close stop.
|
||||
stop := make(chan struct{})
|
||||
go controller.Run(stop)
|
||||
|
||||
// Let's wait for the controller to do its initial sync
|
||||
wait.Poll(100*time.Millisecond, wait.ForeverTestTimeout, func() (bool, error) {
|
||||
return controller.HasSynced(), nil
|
||||
})
|
||||
if !controller.HasSynced() {
|
||||
t.Errorf("Expected HasSynced() to return true after the initial sync")
|
||||
}
|
||||
|
||||
wg := sync.WaitGroup{}
|
||||
const threads = 3
|
||||
wg.Add(threads)
|
||||
for i := 0; i < threads; i++ {
|
||||
go func() {
|
||||
defer wg.Done()
|
||||
// Let's add a few objects to the source.
|
||||
currentNames := sets.String{}
|
||||
rs := rand.NewSource(rand.Int63())
|
||||
f := fuzz.New().NilChance(.5).NumElements(0, 2).RandSource(rs)
|
||||
r := rand.New(rs) // Mustn't use r and f concurrently!
|
||||
for i := 0; i < 100; i++ {
|
||||
var name string
|
||||
var isNew bool
|
||||
if currentNames.Len() == 0 || r.Intn(3) == 1 {
|
||||
f.Fuzz(&name)
|
||||
isNew = true
|
||||
} else {
|
||||
l := currentNames.List()
|
||||
name = l[r.Intn(len(l))]
|
||||
}
|
||||
|
||||
pod := &api.Pod{}
|
||||
f.Fuzz(pod)
|
||||
pod.ObjectMeta.Name = name
|
||||
pod.ObjectMeta.Namespace = "default"
|
||||
// Add, update, or delete randomly.
|
||||
// Note that these pods are not valid-- the fake source doesn't
|
||||
// call validation or perform any other checking.
|
||||
if isNew {
|
||||
currentNames.Insert(name)
|
||||
source.Add(pod)
|
||||
continue
|
||||
}
|
||||
switch r.Intn(2) {
|
||||
case 0:
|
||||
currentNames.Insert(name)
|
||||
source.Modify(pod)
|
||||
case 1:
|
||||
currentNames.Delete(name)
|
||||
source.Delete(pod)
|
||||
}
|
||||
}
|
||||
}()
|
||||
}
|
||||
wg.Wait()
|
||||
|
||||
// Let's wait for the controller to finish processing the things we just added.
|
||||
// TODO: look in the queue to see how many items need to be processed.
|
||||
time.Sleep(100 * time.Millisecond)
|
||||
close(stop)
|
||||
|
||||
outputSetLock.Lock()
|
||||
t.Logf("got: %#v", outputSet)
|
||||
}
|
||||
|
||||
func TestUpdate(t *testing.T) {
|
||||
// This test is going to exercise the various paths that result in a
|
||||
// call to update.
|
||||
|
||||
// source simulates an apiserver object endpoint.
|
||||
source := fcache.NewFakeControllerSource()
|
||||
|
||||
const (
|
||||
FROM = "from"
|
||||
TO = "to"
|
||||
)
|
||||
|
||||
// These are the transitions we expect to see; because this is
|
||||
// asynchronous, there are a lot of valid possibilities.
|
||||
type pair struct{ from, to string }
|
||||
allowedTransitions := map[pair]bool{
|
||||
pair{FROM, TO}: true,
|
||||
|
||||
// Because a resync can happen when we've already observed one
|
||||
// of the above but before the item is deleted.
|
||||
pair{TO, TO}: true,
|
||||
// Because a resync could happen before we observe an update.
|
||||
pair{FROM, FROM}: true,
|
||||
}
|
||||
|
||||
pod := func(name, check string, final bool) *api.Pod {
|
||||
p := &api.Pod{
|
||||
ObjectMeta: api.ObjectMeta{
|
||||
Name: name,
|
||||
Labels: map[string]string{"check": check},
|
||||
},
|
||||
}
|
||||
if final {
|
||||
p.Labels["final"] = "true"
|
||||
}
|
||||
return p
|
||||
}
|
||||
deletePod := func(p *api.Pod) bool {
|
||||
return p.Labels["final"] == "true"
|
||||
}
|
||||
|
||||
tests := []func(string){
|
||||
func(name string) {
|
||||
name = "a-" + name
|
||||
source.Add(pod(name, FROM, false))
|
||||
source.Modify(pod(name, TO, true))
|
||||
},
|
||||
}
|
||||
|
||||
const threads = 3
|
||||
|
||||
var testDoneWG sync.WaitGroup
|
||||
testDoneWG.Add(threads * len(tests))
|
||||
|
||||
// Make a controller that deletes things once it observes an update.
|
||||
// It calls Done() on the wait group on deletions so we can tell when
|
||||
// everything we've added has been deleted.
|
||||
watchCh := make(chan struct{})
|
||||
_, controller := NewInformer(
|
||||
&testLW{
|
||||
WatchFunc: func(options api.ListOptions) (watch.Interface, error) {
|
||||
watch, err := source.Watch(options)
|
||||
close(watchCh)
|
||||
return watch, err
|
||||
},
|
||||
ListFunc: func(options api.ListOptions) (runtime.Object, error) {
|
||||
return source.List(options)
|
||||
},
|
||||
},
|
||||
&api.Pod{},
|
||||
0,
|
||||
ResourceEventHandlerFuncs{
|
||||
UpdateFunc: func(oldObj, newObj interface{}) {
|
||||
o, n := oldObj.(*api.Pod), newObj.(*api.Pod)
|
||||
from, to := o.Labels["check"], n.Labels["check"]
|
||||
if !allowedTransitions[pair{from, to}] {
|
||||
t.Errorf("observed transition %q -> %q for %v", from, to, n.Name)
|
||||
}
|
||||
if deletePod(n) {
|
||||
source.Delete(n)
|
||||
}
|
||||
},
|
||||
DeleteFunc: func(obj interface{}) {
|
||||
testDoneWG.Done()
|
||||
},
|
||||
},
|
||||
)
|
||||
|
||||
// Run the controller and run it until we close stop.
|
||||
// Once Run() is called, calls to testDoneWG.Done() might start, so
|
||||
// all testDoneWG.Add() calls must happen before this point
|
||||
stop := make(chan struct{})
|
||||
go controller.Run(stop)
|
||||
<-watchCh
|
||||
|
||||
// run every test a few times, in parallel
|
||||
var wg sync.WaitGroup
|
||||
wg.Add(threads * len(tests))
|
||||
for i := 0; i < threads; i++ {
|
||||
for j, f := range tests {
|
||||
go func(name string, f func(string)) {
|
||||
defer wg.Done()
|
||||
f(name)
|
||||
}(fmt.Sprintf("%v-%v", i, j), f)
|
||||
}
|
||||
}
|
||||
wg.Wait()
|
||||
|
||||
// Let's wait for the controller to process the things we just added.
|
||||
testDoneWG.Wait()
|
||||
close(stop)
|
||||
}
|
||||
48
pkg/client/cache/processor_listener_test.go
vendored
Normal file
48
pkg/client/cache/processor_listener_test.go
vendored
Normal file
@@ -0,0 +1,48 @@
|
||||
/*
|
||||
Copyright 2016 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package cache
|
||||
|
||||
import (
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"k8s.io/kubernetes/pkg/util/wait"
|
||||
)
|
||||
|
||||
// TestPopReleaseLock tests that when processor listener blocks on chan,
|
||||
// it should release the lock for pendingNotifications.
|
||||
func TestPopReleaseLock(t *testing.T) {
|
||||
pl := newProcessListener(nil)
|
||||
stopCh := make(chan struct{})
|
||||
defer close(stopCh)
|
||||
// make pop() block on nextCh: waiting for receiver to get notification.
|
||||
pl.add(1)
|
||||
go pl.pop(stopCh)
|
||||
|
||||
resultCh := make(chan struct{})
|
||||
go func() {
|
||||
pl.lock.Lock()
|
||||
close(resultCh)
|
||||
}()
|
||||
|
||||
select {
|
||||
case <-resultCh:
|
||||
case <-time.After(wait.ForeverTestTimeout):
|
||||
t.Errorf("Timeout after %v", wait.ForeverTestTimeout)
|
||||
}
|
||||
pl.lock.Unlock()
|
||||
}
|
||||
14
pkg/client/cache/reflector_test.go
vendored
14
pkg/client/cache/reflector_test.go
vendored
@@ -34,12 +34,12 @@ import (
|
||||
var nevererrc chan error
|
||||
|
||||
type testLW struct {
|
||||
ListFunc func() (runtime.Object, error)
|
||||
ListFunc func(options api.ListOptions) (runtime.Object, error)
|
||||
WatchFunc func(options api.ListOptions) (watch.Interface, error)
|
||||
}
|
||||
|
||||
func (t *testLW) List(options api.ListOptions) (runtime.Object, error) {
|
||||
return t.ListFunc()
|
||||
return t.ListFunc(options)
|
||||
}
|
||||
func (t *testLW) Watch(options api.ListOptions) (watch.Interface, error) {
|
||||
return t.WatchFunc(options)
|
||||
@@ -53,7 +53,7 @@ func TestCloseWatchChannelOnError(t *testing.T) {
|
||||
WatchFunc: func(options api.ListOptions) (watch.Interface, error) {
|
||||
return fw, nil
|
||||
},
|
||||
ListFunc: func() (runtime.Object, error) {
|
||||
ListFunc: func(options api.ListOptions) (runtime.Object, error) {
|
||||
return &api.PodList{ListMeta: unversioned.ListMeta{ResourceVersion: "1"}}, nil
|
||||
},
|
||||
}
|
||||
@@ -79,7 +79,7 @@ func TestRunUntil(t *testing.T) {
|
||||
WatchFunc: func(options api.ListOptions) (watch.Interface, error) {
|
||||
return fw, nil
|
||||
},
|
||||
ListFunc: func() (runtime.Object, error) {
|
||||
ListFunc: func(options api.ListOptions) (runtime.Object, error) {
|
||||
return &api.PodList{ListMeta: unversioned.ListMeta{ResourceVersion: "1"}}, nil
|
||||
},
|
||||
}
|
||||
@@ -227,7 +227,7 @@ func TestReflectorListAndWatch(t *testing.T) {
|
||||
go func() { createdFakes <- fw }()
|
||||
return fw, nil
|
||||
},
|
||||
ListFunc: func() (runtime.Object, error) {
|
||||
ListFunc: func(options api.ListOptions) (runtime.Object, error) {
|
||||
return &api.PodList{ListMeta: unversioned.ListMeta{ResourceVersion: "1"}}, nil
|
||||
},
|
||||
}
|
||||
@@ -345,7 +345,7 @@ func TestReflectorListAndWatchWithErrors(t *testing.T) {
|
||||
}()
|
||||
return fw, nil
|
||||
},
|
||||
ListFunc: func() (runtime.Object, error) {
|
||||
ListFunc: func(options api.ListOptions) (runtime.Object, error) {
|
||||
return item.list, item.listErr
|
||||
},
|
||||
}
|
||||
@@ -373,7 +373,7 @@ func TestReflectorResync(t *testing.T) {
|
||||
fw := watch.NewFake()
|
||||
return fw, nil
|
||||
},
|
||||
ListFunc: func() (runtime.Object, error) {
|
||||
ListFunc: func(options api.ListOptions) (runtime.Object, error) {
|
||||
return &api.PodList{ListMeta: unversioned.ListMeta{ResourceVersion: "0"}}, nil
|
||||
},
|
||||
}
|
||||
|
||||
413
pkg/client/cache/shared_informer.go
vendored
Normal file
413
pkg/client/cache/shared_informer.go
vendored
Normal file
@@ -0,0 +1,413 @@
|
||||
/*
|
||||
Copyright 2015 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package cache
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"k8s.io/kubernetes/pkg/runtime"
|
||||
utilruntime "k8s.io/kubernetes/pkg/util/runtime"
|
||||
"k8s.io/kubernetes/pkg/util/wait"
|
||||
|
||||
"github.com/golang/glog"
|
||||
)
|
||||
|
||||
// if you use this, there is one behavior change compared to a standard Informer.
|
||||
// When you receive a notification, the cache will be AT LEAST as fresh as the
|
||||
// notification, but it MAY be more fresh. You should NOT depend on the contents
|
||||
// of the cache exactly matching the notification you've received in handler
|
||||
// functions. If there was a create, followed by a delete, the cache may NOT
|
||||
// have your item. This has advantages over the broadcaster since it allows us
|
||||
// to share a common cache across many controllers. Extending the broadcaster
|
||||
// would have required us keep duplicate caches for each watch.
|
||||
type SharedInformer interface {
|
||||
// events to a single handler are delivered sequentially, but there is no coordination between different handlers
|
||||
// You may NOT add a handler *after* the SharedInformer is running. That will result in an error being returned.
|
||||
// TODO we should try to remove this restriction eventually.
|
||||
AddEventHandler(handler ResourceEventHandler) error
|
||||
GetStore() Store
|
||||
// GetController gives back a synthetic interface that "votes" to start the informer
|
||||
GetController() ControllerInterface
|
||||
Run(stopCh <-chan struct{})
|
||||
HasSynced() bool
|
||||
LastSyncResourceVersion() string
|
||||
}
|
||||
|
||||
type SharedIndexInformer interface {
|
||||
SharedInformer
|
||||
// AddIndexers add indexers to the informer before it starts.
|
||||
AddIndexers(indexers Indexers) error
|
||||
GetIndexer() Indexer
|
||||
}
|
||||
|
||||
// NewSharedInformer creates a new instance for the listwatcher.
|
||||
// TODO: create a cache/factory of these at a higher level for the list all, watch all of a given resource that can
|
||||
// be shared amongst all consumers.
|
||||
func NewSharedInformer(lw ListerWatcher, objType runtime.Object, resyncPeriod time.Duration) SharedInformer {
|
||||
return NewSharedIndexInformer(lw, objType, resyncPeriod, Indexers{})
|
||||
}
|
||||
|
||||
// NewSharedIndexInformer creates a new instance for the listwatcher.
|
||||
// TODO: create a cache/factory of these at a higher level for the list all, watch all of a given resource that can
|
||||
// be shared amongst all consumers.
|
||||
func NewSharedIndexInformer(lw ListerWatcher, objType runtime.Object, resyncPeriod time.Duration, indexers Indexers) SharedIndexInformer {
|
||||
sharedIndexInformer := &sharedIndexInformer{
|
||||
processor: &sharedProcessor{},
|
||||
indexer: NewIndexer(DeletionHandlingMetaNamespaceKeyFunc, indexers),
|
||||
listerWatcher: lw,
|
||||
objectType: objType,
|
||||
fullResyncPeriod: resyncPeriod,
|
||||
}
|
||||
return sharedIndexInformer
|
||||
}
|
||||
|
||||
// InformerSynced is a function that can be used to determine if an informer has synced. This is useful for determining if caches have synced.
|
||||
type InformerSynced func() bool
|
||||
|
||||
// syncedPollPeriod controls how often you look at the status of your sync funcs
|
||||
const syncedPollPeriod = 100 * time.Millisecond
|
||||
|
||||
// WaitForCacheSync waits for caches to populate. It returns true if it was successful, false
|
||||
// if the contoller should shutdown
|
||||
func WaitForCacheSync(stopCh <-chan struct{}, cacheSyncs ...InformerSynced) bool {
|
||||
err := wait.PollUntil(syncedPollPeriod,
|
||||
func() (bool, error) {
|
||||
for _, syncFunc := range cacheSyncs {
|
||||
if !syncFunc() {
|
||||
return false, nil
|
||||
}
|
||||
}
|
||||
return true, nil
|
||||
},
|
||||
stopCh)
|
||||
if err != nil {
|
||||
glog.V(2).Infof("stop requested")
|
||||
return false
|
||||
}
|
||||
|
||||
glog.V(4).Infof("caches populated")
|
||||
return true
|
||||
}
|
||||
|
||||
type sharedIndexInformer struct {
|
||||
indexer Indexer
|
||||
controller *Controller
|
||||
|
||||
processor *sharedProcessor
|
||||
|
||||
// This block is tracked to handle late initialization of the controller
|
||||
listerWatcher ListerWatcher
|
||||
objectType runtime.Object
|
||||
fullResyncPeriod time.Duration
|
||||
|
||||
started bool
|
||||
startedLock sync.Mutex
|
||||
|
||||
// blockDeltas gives a way to stop all event distribution so that a late event handler
|
||||
// can safely join the shared informer.
|
||||
blockDeltas sync.Mutex
|
||||
// stopCh is the channel used to stop the main Run process. We have to track it so that
|
||||
// late joiners can have a proper stop
|
||||
stopCh <-chan struct{}
|
||||
}
|
||||
|
||||
// dummyController hides the fact that a SharedInformer is different from a dedicated one
|
||||
// where a caller can `Run`. The run method is disonnected in this case, because higher
|
||||
// level logic will decide when to start the SharedInformer and related controller.
|
||||
// Because returning information back is always asynchronous, the legacy callers shouldn't
|
||||
// notice any change in behavior.
|
||||
type dummyController struct {
|
||||
informer *sharedIndexInformer
|
||||
}
|
||||
|
||||
func (v *dummyController) Run(stopCh <-chan struct{}) {
|
||||
}
|
||||
|
||||
func (v *dummyController) HasSynced() bool {
|
||||
return v.informer.HasSynced()
|
||||
}
|
||||
|
||||
type updateNotification struct {
|
||||
oldObj interface{}
|
||||
newObj interface{}
|
||||
}
|
||||
|
||||
type addNotification struct {
|
||||
newObj interface{}
|
||||
}
|
||||
|
||||
type deleteNotification struct {
|
||||
oldObj interface{}
|
||||
}
|
||||
|
||||
func (s *sharedIndexInformer) Run(stopCh <-chan struct{}) {
|
||||
defer utilruntime.HandleCrash()
|
||||
|
||||
fifo := NewDeltaFIFO(MetaNamespaceKeyFunc, nil, s.indexer)
|
||||
|
||||
cfg := &Config{
|
||||
Queue: fifo,
|
||||
ListerWatcher: s.listerWatcher,
|
||||
ObjectType: s.objectType,
|
||||
FullResyncPeriod: s.fullResyncPeriod,
|
||||
RetryOnError: false,
|
||||
|
||||
Process: s.HandleDeltas,
|
||||
}
|
||||
|
||||
func() {
|
||||
s.startedLock.Lock()
|
||||
defer s.startedLock.Unlock()
|
||||
|
||||
s.controller = New(cfg)
|
||||
s.started = true
|
||||
}()
|
||||
|
||||
s.stopCh = stopCh
|
||||
s.processor.run(stopCh)
|
||||
s.controller.Run(stopCh)
|
||||
}
|
||||
|
||||
func (s *sharedIndexInformer) isStarted() bool {
|
||||
s.startedLock.Lock()
|
||||
defer s.startedLock.Unlock()
|
||||
return s.started
|
||||
}
|
||||
|
||||
func (s *sharedIndexInformer) HasSynced() bool {
|
||||
s.startedLock.Lock()
|
||||
defer s.startedLock.Unlock()
|
||||
|
||||
if s.controller == nil {
|
||||
return false
|
||||
}
|
||||
return s.controller.HasSynced()
|
||||
}
|
||||
|
||||
func (s *sharedIndexInformer) LastSyncResourceVersion() string {
|
||||
s.startedLock.Lock()
|
||||
defer s.startedLock.Unlock()
|
||||
|
||||
if s.controller == nil {
|
||||
return ""
|
||||
}
|
||||
return s.controller.reflector.LastSyncResourceVersion()
|
||||
}
|
||||
|
||||
func (s *sharedIndexInformer) GetStore() Store {
|
||||
return s.indexer
|
||||
}
|
||||
|
||||
func (s *sharedIndexInformer) GetIndexer() Indexer {
|
||||
return s.indexer
|
||||
}
|
||||
|
||||
func (s *sharedIndexInformer) AddIndexers(indexers Indexers) error {
|
||||
s.startedLock.Lock()
|
||||
defer s.startedLock.Unlock()
|
||||
|
||||
if s.started {
|
||||
return fmt.Errorf("informer has already started")
|
||||
}
|
||||
|
||||
return s.indexer.AddIndexers(indexers)
|
||||
}
|
||||
|
||||
func (s *sharedIndexInformer) GetController() ControllerInterface {
|
||||
return &dummyController{informer: s}
|
||||
}
|
||||
|
||||
func (s *sharedIndexInformer) AddEventHandler(handler ResourceEventHandler) error {
|
||||
s.startedLock.Lock()
|
||||
defer s.startedLock.Unlock()
|
||||
|
||||
if !s.started {
|
||||
listener := newProcessListener(handler)
|
||||
s.processor.listeners = append(s.processor.listeners, listener)
|
||||
return nil
|
||||
}
|
||||
|
||||
// in order to safely join, we have to
|
||||
// 1. stop sending add/update/delete notifications
|
||||
// 2. do a list against the store
|
||||
// 3. send synthetic "Add" events to the new handler
|
||||
// 4. unblock
|
||||
s.blockDeltas.Lock()
|
||||
defer s.blockDeltas.Unlock()
|
||||
|
||||
listener := newProcessListener(handler)
|
||||
s.processor.listeners = append(s.processor.listeners, listener)
|
||||
|
||||
go listener.run(s.stopCh)
|
||||
go listener.pop(s.stopCh)
|
||||
|
||||
items := s.indexer.List()
|
||||
for i := range items {
|
||||
listener.add(addNotification{newObj: items[i]})
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (s *sharedIndexInformer) HandleDeltas(obj interface{}) error {
|
||||
s.blockDeltas.Lock()
|
||||
defer s.blockDeltas.Unlock()
|
||||
|
||||
// from oldest to newest
|
||||
for _, d := range obj.(Deltas) {
|
||||
switch d.Type {
|
||||
case Sync, Added, Updated:
|
||||
if old, exists, err := s.indexer.Get(d.Object); err == nil && exists {
|
||||
if err := s.indexer.Update(d.Object); err != nil {
|
||||
return err
|
||||
}
|
||||
s.processor.distribute(updateNotification{oldObj: old, newObj: d.Object})
|
||||
} else {
|
||||
if err := s.indexer.Add(d.Object); err != nil {
|
||||
return err
|
||||
}
|
||||
s.processor.distribute(addNotification{newObj: d.Object})
|
||||
}
|
||||
case Deleted:
|
||||
if err := s.indexer.Delete(d.Object); err != nil {
|
||||
return err
|
||||
}
|
||||
s.processor.distribute(deleteNotification{oldObj: d.Object})
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
type sharedProcessor struct {
|
||||
listeners []*processorListener
|
||||
}
|
||||
|
||||
func (p *sharedProcessor) distribute(obj interface{}) {
|
||||
for _, listener := range p.listeners {
|
||||
listener.add(obj)
|
||||
}
|
||||
}
|
||||
|
||||
func (p *sharedProcessor) run(stopCh <-chan struct{}) {
|
||||
for _, listener := range p.listeners {
|
||||
go listener.run(stopCh)
|
||||
go listener.pop(stopCh)
|
||||
}
|
||||
}
|
||||
|
||||
type processorListener struct {
|
||||
// lock/cond protects access to 'pendingNotifications'.
|
||||
lock sync.RWMutex
|
||||
cond sync.Cond
|
||||
|
||||
// pendingNotifications is an unbounded slice that holds all notifications not yet distributed
|
||||
// there is one per listener, but a failing/stalled listener will have infinite pendingNotifications
|
||||
// added until we OOM.
|
||||
// TODO This is no worse that before, since reflectors were backed by unbounded DeltaFIFOs, but
|
||||
// we should try to do something better
|
||||
pendingNotifications []interface{}
|
||||
|
||||
nextCh chan interface{}
|
||||
|
||||
handler ResourceEventHandler
|
||||
}
|
||||
|
||||
func newProcessListener(handler ResourceEventHandler) *processorListener {
|
||||
ret := &processorListener{
|
||||
pendingNotifications: []interface{}{},
|
||||
nextCh: make(chan interface{}),
|
||||
handler: handler,
|
||||
}
|
||||
|
||||
ret.cond.L = &ret.lock
|
||||
return ret
|
||||
}
|
||||
|
||||
func (p *processorListener) add(notification interface{}) {
|
||||
p.lock.Lock()
|
||||
defer p.lock.Unlock()
|
||||
|
||||
p.pendingNotifications = append(p.pendingNotifications, notification)
|
||||
p.cond.Broadcast()
|
||||
}
|
||||
|
||||
func (p *processorListener) pop(stopCh <-chan struct{}) {
|
||||
defer utilruntime.HandleCrash()
|
||||
|
||||
for {
|
||||
blockingGet := func() (interface{}, bool) {
|
||||
p.lock.Lock()
|
||||
defer p.lock.Unlock()
|
||||
|
||||
for len(p.pendingNotifications) == 0 {
|
||||
// check if we're shutdown
|
||||
select {
|
||||
case <-stopCh:
|
||||
return nil, true
|
||||
default:
|
||||
}
|
||||
p.cond.Wait()
|
||||
}
|
||||
|
||||
nt := p.pendingNotifications[0]
|
||||
p.pendingNotifications = p.pendingNotifications[1:]
|
||||
return nt, false
|
||||
}
|
||||
|
||||
notification, stopped := blockingGet()
|
||||
if stopped {
|
||||
return
|
||||
}
|
||||
|
||||
select {
|
||||
case <-stopCh:
|
||||
return
|
||||
case p.nextCh <- notification:
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (p *processorListener) run(stopCh <-chan struct{}) {
|
||||
defer utilruntime.HandleCrash()
|
||||
|
||||
for {
|
||||
var next interface{}
|
||||
select {
|
||||
case <-stopCh:
|
||||
func() {
|
||||
p.lock.Lock()
|
||||
defer p.lock.Unlock()
|
||||
p.cond.Broadcast()
|
||||
}()
|
||||
return
|
||||
case next = <-p.nextCh:
|
||||
}
|
||||
|
||||
switch notification := next.(type) {
|
||||
case updateNotification:
|
||||
p.handler.OnUpdate(notification.oldObj, notification.newObj)
|
||||
case addNotification:
|
||||
p.handler.OnAdd(notification.newObj)
|
||||
case deleteNotification:
|
||||
p.handler.OnDelete(notification.oldObj)
|
||||
default:
|
||||
utilruntime.HandleError(fmt.Errorf("unrecognized notification: %#v", next))
|
||||
}
|
||||
}
|
||||
}
|
||||
262
pkg/client/testing/cache/fake_controller_source.go
vendored
Normal file
262
pkg/client/testing/cache/fake_controller_source.go
vendored
Normal file
@@ -0,0 +1,262 @@
|
||||
/*
|
||||
Copyright 2015 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package framework
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"math/rand"
|
||||
"strconv"
|
||||
"sync"
|
||||
|
||||
"k8s.io/kubernetes/pkg/api"
|
||||
"k8s.io/kubernetes/pkg/api/meta"
|
||||
"k8s.io/kubernetes/pkg/runtime"
|
||||
"k8s.io/kubernetes/pkg/types"
|
||||
"k8s.io/kubernetes/pkg/watch"
|
||||
)
|
||||
|
||||
func NewFakeControllerSource() *FakeControllerSource {
|
||||
return &FakeControllerSource{
|
||||
Items: map[nnu]runtime.Object{},
|
||||
Broadcaster: watch.NewBroadcaster(100, watch.WaitIfChannelFull),
|
||||
}
|
||||
}
|
||||
|
||||
func NewFakePVControllerSource() *FakePVControllerSource {
|
||||
return &FakePVControllerSource{
|
||||
FakeControllerSource{
|
||||
Items: map[nnu]runtime.Object{},
|
||||
Broadcaster: watch.NewBroadcaster(100, watch.WaitIfChannelFull),
|
||||
}}
|
||||
}
|
||||
|
||||
func NewFakePVCControllerSource() *FakePVCControllerSource {
|
||||
return &FakePVCControllerSource{
|
||||
FakeControllerSource{
|
||||
Items: map[nnu]runtime.Object{},
|
||||
Broadcaster: watch.NewBroadcaster(100, watch.WaitIfChannelFull),
|
||||
}}
|
||||
}
|
||||
|
||||
// FakeControllerSource implements listing/watching for testing.
|
||||
type FakeControllerSource struct {
|
||||
lock sync.RWMutex
|
||||
Items map[nnu]runtime.Object
|
||||
changes []watch.Event // one change per resourceVersion
|
||||
Broadcaster *watch.Broadcaster
|
||||
}
|
||||
|
||||
type FakePVControllerSource struct {
|
||||
FakeControllerSource
|
||||
}
|
||||
|
||||
type FakePVCControllerSource struct {
|
||||
FakeControllerSource
|
||||
}
|
||||
|
||||
// namespace, name, uid to be used as a key.
|
||||
type nnu struct {
|
||||
namespace, name string
|
||||
uid types.UID
|
||||
}
|
||||
|
||||
// Add adds an object to the set and sends an add event to watchers.
|
||||
// obj's ResourceVersion is set.
|
||||
func (f *FakeControllerSource) Add(obj runtime.Object) {
|
||||
f.Change(watch.Event{Type: watch.Added, Object: obj}, 1)
|
||||
}
|
||||
|
||||
// Modify updates an object in the set and sends a modified event to watchers.
|
||||
// obj's ResourceVersion is set.
|
||||
func (f *FakeControllerSource) Modify(obj runtime.Object) {
|
||||
f.Change(watch.Event{Type: watch.Modified, Object: obj}, 1)
|
||||
}
|
||||
|
||||
// Delete deletes an object from the set and sends a delete event to watchers.
|
||||
// obj's ResourceVersion is set.
|
||||
func (f *FakeControllerSource) Delete(lastValue runtime.Object) {
|
||||
f.Change(watch.Event{Type: watch.Deleted, Object: lastValue}, 1)
|
||||
}
|
||||
|
||||
// AddDropWatch adds an object to the set but forgets to send an add event to
|
||||
// watchers.
|
||||
// obj's ResourceVersion is set.
|
||||
func (f *FakeControllerSource) AddDropWatch(obj runtime.Object) {
|
||||
f.Change(watch.Event{Type: watch.Added, Object: obj}, 0)
|
||||
}
|
||||
|
||||
// ModifyDropWatch updates an object in the set but forgets to send a modify
|
||||
// event to watchers.
|
||||
// obj's ResourceVersion is set.
|
||||
func (f *FakeControllerSource) ModifyDropWatch(obj runtime.Object) {
|
||||
f.Change(watch.Event{Type: watch.Modified, Object: obj}, 0)
|
||||
}
|
||||
|
||||
// DeleteDropWatch deletes an object from the set but forgets to send a delete
|
||||
// event to watchers.
|
||||
// obj's ResourceVersion is set.
|
||||
func (f *FakeControllerSource) DeleteDropWatch(lastValue runtime.Object) {
|
||||
f.Change(watch.Event{Type: watch.Deleted, Object: lastValue}, 0)
|
||||
}
|
||||
|
||||
func (f *FakeControllerSource) key(accessor meta.Object) nnu {
|
||||
return nnu{accessor.GetNamespace(), accessor.GetName(), accessor.GetUID()}
|
||||
}
|
||||
|
||||
// Change records the given event (setting the object's resource version) and
|
||||
// sends a watch event with the specified probability.
|
||||
func (f *FakeControllerSource) Change(e watch.Event, watchProbability float64) {
|
||||
f.lock.Lock()
|
||||
defer f.lock.Unlock()
|
||||
|
||||
accessor, err := meta.Accessor(e.Object)
|
||||
if err != nil {
|
||||
panic(err) // this is test code only
|
||||
}
|
||||
|
||||
resourceVersion := len(f.changes) + 1
|
||||
accessor.SetResourceVersion(strconv.Itoa(resourceVersion))
|
||||
f.changes = append(f.changes, e)
|
||||
key := f.key(accessor)
|
||||
switch e.Type {
|
||||
case watch.Added, watch.Modified:
|
||||
f.Items[key] = e.Object
|
||||
case watch.Deleted:
|
||||
delete(f.Items, key)
|
||||
}
|
||||
|
||||
if rand.Float64() < watchProbability {
|
||||
f.Broadcaster.Action(e.Type, e.Object)
|
||||
}
|
||||
}
|
||||
|
||||
func (f *FakeControllerSource) getListItemsLocked() ([]runtime.Object, error) {
|
||||
list := make([]runtime.Object, 0, len(f.Items))
|
||||
for _, obj := range f.Items {
|
||||
// Must make a copy to allow clients to modify the object.
|
||||
// Otherwise, if they make a change and write it back, they
|
||||
// will inadvertently change our canonical copy (in
|
||||
// addition to racing with other clients).
|
||||
objCopy, err := api.Scheme.DeepCopy(obj)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
list = append(list, objCopy.(runtime.Object))
|
||||
}
|
||||
return list, nil
|
||||
}
|
||||
|
||||
// List returns a list object, with its resource version set.
|
||||
func (f *FakeControllerSource) List(options api.ListOptions) (runtime.Object, error) {
|
||||
f.lock.RLock()
|
||||
defer f.lock.RUnlock()
|
||||
list, err := f.getListItemsLocked()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
listObj := &api.List{}
|
||||
if err := meta.SetList(listObj, list); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
objMeta, err := api.ListMetaFor(listObj)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
resourceVersion := len(f.changes)
|
||||
objMeta.ResourceVersion = strconv.Itoa(resourceVersion)
|
||||
return listObj, nil
|
||||
}
|
||||
|
||||
// List returns a list object, with its resource version set.
|
||||
func (f *FakePVControllerSource) List(options api.ListOptions) (runtime.Object, error) {
|
||||
f.lock.RLock()
|
||||
defer f.lock.RUnlock()
|
||||
list, err := f.FakeControllerSource.getListItemsLocked()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
listObj := &api.PersistentVolumeList{}
|
||||
if err := meta.SetList(listObj, list); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
objMeta, err := api.ListMetaFor(listObj)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
resourceVersion := len(f.changes)
|
||||
objMeta.ResourceVersion = strconv.Itoa(resourceVersion)
|
||||
return listObj, nil
|
||||
}
|
||||
|
||||
// List returns a list object, with its resource version set.
|
||||
func (f *FakePVCControllerSource) List(options api.ListOptions) (runtime.Object, error) {
|
||||
f.lock.RLock()
|
||||
defer f.lock.RUnlock()
|
||||
list, err := f.FakeControllerSource.getListItemsLocked()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
listObj := &api.PersistentVolumeClaimList{}
|
||||
if err := meta.SetList(listObj, list); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
objMeta, err := api.ListMetaFor(listObj)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
resourceVersion := len(f.changes)
|
||||
objMeta.ResourceVersion = strconv.Itoa(resourceVersion)
|
||||
return listObj, nil
|
||||
}
|
||||
|
||||
// Watch returns a watch, which will be pre-populated with all changes
|
||||
// after resourceVersion.
|
||||
func (f *FakeControllerSource) Watch(options api.ListOptions) (watch.Interface, error) {
|
||||
f.lock.RLock()
|
||||
defer f.lock.RUnlock()
|
||||
rc, err := strconv.Atoi(options.ResourceVersion)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if rc < len(f.changes) {
|
||||
changes := []watch.Event{}
|
||||
for _, c := range f.changes[rc:] {
|
||||
// Must make a copy to allow clients to modify the
|
||||
// object. Otherwise, if they make a change and write
|
||||
// it back, they will inadvertently change the our
|
||||
// canonical copy (in addition to racing with other
|
||||
// clients).
|
||||
objCopy, err := api.Scheme.DeepCopy(c.Object)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
changes = append(changes, watch.Event{Type: c.Type, Object: objCopy.(runtime.Object)})
|
||||
}
|
||||
return f.Broadcaster.WatchWithPrefix(changes), nil
|
||||
} else if rc > len(f.changes) {
|
||||
return nil, errors.New("resource version in the future not supported by this fake")
|
||||
}
|
||||
return f.Broadcaster.Watch(), nil
|
||||
}
|
||||
|
||||
// Shutdown closes the underlying broadcaster, waiting for events to be
|
||||
// delivered. It's an error to call any method after calling shutdown. This is
|
||||
// enforced by Shutdown() leaving f locked.
|
||||
func (f *FakeControllerSource) Shutdown() {
|
||||
f.lock.Lock() // Purposely no unlock.
|
||||
f.Broadcaster.Shutdown()
|
||||
}
|
||||
94
pkg/client/testing/cache/fake_controller_source_test.go
vendored
Normal file
94
pkg/client/testing/cache/fake_controller_source_test.go
vendored
Normal file
@@ -0,0 +1,94 @@
|
||||
/*
|
||||
Copyright 2015 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package framework
|
||||
|
||||
import (
|
||||
"sync"
|
||||
"testing"
|
||||
|
||||
"k8s.io/kubernetes/pkg/api"
|
||||
"k8s.io/kubernetes/pkg/watch"
|
||||
)
|
||||
|
||||
// ensure the watch delivers the requested and only the requested items.
|
||||
func consume(t *testing.T, w watch.Interface, rvs []string, done *sync.WaitGroup) {
|
||||
defer done.Done()
|
||||
for _, rv := range rvs {
|
||||
got, ok := <-w.ResultChan()
|
||||
if !ok {
|
||||
t.Errorf("%#v: unexpected channel close, wanted %v", rvs, rv)
|
||||
return
|
||||
}
|
||||
gotRV := got.Object.(*api.Pod).ObjectMeta.ResourceVersion
|
||||
if e, a := rv, gotRV; e != a {
|
||||
t.Errorf("wanted %v, got %v", e, a)
|
||||
} else {
|
||||
t.Logf("Got %v as expected", gotRV)
|
||||
}
|
||||
}
|
||||
// We should not get anything else.
|
||||
got, open := <-w.ResultChan()
|
||||
if open {
|
||||
t.Errorf("%#v: unwanted object %#v", rvs, got)
|
||||
}
|
||||
}
|
||||
|
||||
func TestRCNumber(t *testing.T) {
|
||||
pod := func(name string) *api.Pod {
|
||||
return &api.Pod{
|
||||
ObjectMeta: api.ObjectMeta{
|
||||
Name: name,
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
wg := &sync.WaitGroup{}
|
||||
wg.Add(3)
|
||||
|
||||
source := NewFakeControllerSource()
|
||||
source.Add(pod("foo"))
|
||||
source.Modify(pod("foo"))
|
||||
source.Modify(pod("foo"))
|
||||
|
||||
w, err := source.Watch(api.ListOptions{ResourceVersion: "1"})
|
||||
if err != nil {
|
||||
t.Fatalf("Unexpected error: %v", err)
|
||||
}
|
||||
go consume(t, w, []string{"2", "3"}, wg)
|
||||
|
||||
list, err := source.List(api.ListOptions{})
|
||||
if err != nil {
|
||||
t.Fatalf("Unexpected error: %v", err)
|
||||
}
|
||||
if e, a := "3", list.(*api.List).ResourceVersion; e != a {
|
||||
t.Errorf("wanted %v, got %v", e, a)
|
||||
}
|
||||
|
||||
w2, err := source.Watch(api.ListOptions{ResourceVersion: "2"})
|
||||
if err != nil {
|
||||
t.Fatalf("Unexpected error: %v", err)
|
||||
}
|
||||
go consume(t, w2, []string{"3"}, wg)
|
||||
|
||||
w3, err := source.Watch(api.ListOptions{ResourceVersion: "3"})
|
||||
if err != nil {
|
||||
t.Fatalf("Unexpected error: %v", err)
|
||||
}
|
||||
go consume(t, w3, []string{}, wg)
|
||||
source.Shutdown()
|
||||
wg.Wait()
|
||||
}
|
||||
Reference in New Issue
Block a user