mirror of
https://github.com/rancher/rke.git
synced 2025-09-15 22:49:13 +00:00
Update to k8s 1.17
This commit is contained in:
6
vendor/k8s.io/client-go/tools/cache/OWNERS
generated
vendored
6
vendor/k8s.io/client-go/tools/cache/OWNERS
generated
vendored
@@ -33,8 +33,6 @@ reviewers:
|
||||
- madhusudancs
|
||||
- hongchaodeng
|
||||
- krousey
|
||||
- markturansky
|
||||
- fgrzadkowski
|
||||
- xiang90
|
||||
- mml
|
||||
- ingvagabund
|
||||
@@ -42,10 +40,6 @@ reviewers:
|
||||
- jessfraz
|
||||
- david-mcmahon
|
||||
- mfojtik
|
||||
- '249043822'
|
||||
- lixiaobing10051267
|
||||
- ddysher
|
||||
- mqliang
|
||||
- feihujiang
|
||||
- sdminonne
|
||||
- ncdc
|
||||
|
21
vendor/k8s.io/client-go/tools/cache/delta_fifo.go
generated
vendored
21
vendor/k8s.io/client-go/tools/cache/delta_fifo.go
generated
vendored
@@ -295,13 +295,6 @@ func isDeletionDup(a, b *Delta) *Delta {
|
||||
return b
|
||||
}
|
||||
|
||||
// willObjectBeDeletedLocked returns true only if the last delta for the
|
||||
// given object is Delete. Caller must lock first.
|
||||
func (f *DeltaFIFO) willObjectBeDeletedLocked(id string) bool {
|
||||
deltas := f.items[id]
|
||||
return len(deltas) > 0 && deltas[len(deltas)-1].Type == Deleted
|
||||
}
|
||||
|
||||
// queueActionLocked appends to the delta list for the object.
|
||||
// Caller must lock first.
|
||||
func (f *DeltaFIFO) queueActionLocked(actionType DeltaType, obj interface{}) error {
|
||||
@@ -310,13 +303,6 @@ func (f *DeltaFIFO) queueActionLocked(actionType DeltaType, obj interface{}) err
|
||||
return KeyError{obj, err}
|
||||
}
|
||||
|
||||
// If object is supposed to be deleted (last event is Deleted),
|
||||
// then we should ignore Sync events, because it would result in
|
||||
// recreation of this object.
|
||||
if actionType == Sync && f.willObjectBeDeletedLocked(id) {
|
||||
return nil
|
||||
}
|
||||
|
||||
newDeltas := append(f.items[id], Delta{actionType, obj})
|
||||
newDeltas = dedupDeltas(newDeltas)
|
||||
|
||||
@@ -539,13 +525,6 @@ func (f *DeltaFIFO) Resync() error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func (f *DeltaFIFO) syncKey(key string) error {
|
||||
f.lock.Lock()
|
||||
defer f.lock.Unlock()
|
||||
|
||||
return f.syncKeyLocked(key)
|
||||
}
|
||||
|
||||
func (f *DeltaFIFO) syncKeyLocked(key string) error {
|
||||
obj, exists, err := f.knownObjects.GetByKey(key)
|
||||
if err != nil {
|
||||
|
2
vendor/k8s.io/client-go/tools/cache/index.go
generated
vendored
2
vendor/k8s.io/client-go/tools/cache/index.go
generated
vendored
@@ -56,7 +56,7 @@ type Indexer interface {
|
||||
type IndexFunc func(obj interface{}) ([]string, error)
|
||||
|
||||
// IndexFuncToKeyFuncAdapter adapts an indexFunc to a keyFunc. This is only useful if your index function returns
|
||||
// unique values for every object. This is conversion can create errors when more than one key is found. You
|
||||
// unique values for every object. This conversion can create errors when more than one key is found. You
|
||||
// should prefer to make proper key and index functions.
|
||||
func IndexFuncToKeyFuncAdapter(indexFunc IndexFunc) KeyFunc {
|
||||
return func(obj interface{}) (string, error) {
|
||||
|
20
vendor/k8s.io/client-go/tools/cache/mutation_detector.go
generated
vendored
20
vendor/k8s.io/client-go/tools/cache/mutation_detector.go
generated
vendored
@@ -48,7 +48,7 @@ func NewCacheMutationDetector(name string) MutationDetector {
|
||||
return dummyMutationDetector{}
|
||||
}
|
||||
klog.Warningln("Mutation detector is enabled, this will result in memory leakage.")
|
||||
return &defaultCacheMutationDetector{name: name, period: 1 * time.Second}
|
||||
return &defaultCacheMutationDetector{name: name, period: 1 * time.Second, retainDuration: 2 * time.Minute}
|
||||
}
|
||||
|
||||
type dummyMutationDetector struct{}
|
||||
@@ -68,6 +68,10 @@ type defaultCacheMutationDetector struct {
|
||||
lock sync.Mutex
|
||||
cachedObjs []cacheObj
|
||||
|
||||
retainDuration time.Duration
|
||||
lastRotated time.Time
|
||||
retainedCachedObjs []cacheObj
|
||||
|
||||
// failureFunc is injectable for unit testing. If you don't have it, the process will panic.
|
||||
// This panic is intentional, since turning on this detection indicates you want a strong
|
||||
// failure signal. This failure is effectively a p0 bug and you can't trust process results
|
||||
@@ -84,6 +88,14 @@ type cacheObj struct {
|
||||
func (d *defaultCacheMutationDetector) Run(stopCh <-chan struct{}) {
|
||||
// we DON'T want protection from panics. If we're running this code, we want to die
|
||||
for {
|
||||
if d.lastRotated.IsZero() {
|
||||
d.lastRotated = time.Now()
|
||||
} else if time.Now().Sub(d.lastRotated) > d.retainDuration {
|
||||
d.retainedCachedObjs = d.cachedObjs
|
||||
d.cachedObjs = nil
|
||||
d.lastRotated = time.Now()
|
||||
}
|
||||
|
||||
d.CompareObjects()
|
||||
|
||||
select {
|
||||
@@ -120,6 +132,12 @@ func (d *defaultCacheMutationDetector) CompareObjects() {
|
||||
altered = true
|
||||
}
|
||||
}
|
||||
for i, obj := range d.retainedCachedObjs {
|
||||
if !reflect.DeepEqual(obj.cached, obj.copied) {
|
||||
fmt.Printf("CACHE %s[%d] ALTERED!\n%v\n", d.name, i, diff.ObjectGoPrintSideBySide(obj.cached, obj.copied))
|
||||
altered = true
|
||||
}
|
||||
}
|
||||
|
||||
if altered {
|
||||
msg := fmt.Sprintf("cache %s modified", d.name)
|
||||
|
69
vendor/k8s.io/client-go/tools/cache/reflector.go
generated
vendored
69
vendor/k8s.io/client-go/tools/cache/reflector.go
generated
vendored
@@ -29,7 +29,9 @@ import (
|
||||
apierrs "k8s.io/apimachinery/pkg/api/errors"
|
||||
"k8s.io/apimachinery/pkg/api/meta"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/apis/meta/v1/unstructured"
|
||||
"k8s.io/apimachinery/pkg/runtime"
|
||||
"k8s.io/apimachinery/pkg/runtime/schema"
|
||||
"k8s.io/apimachinery/pkg/util/clock"
|
||||
"k8s.io/apimachinery/pkg/util/naming"
|
||||
utilnet "k8s.io/apimachinery/pkg/util/net"
|
||||
@@ -41,15 +43,22 @@ import (
|
||||
"k8s.io/utils/trace"
|
||||
)
|
||||
|
||||
const defaultExpectedTypeName = "<unspecified>"
|
||||
|
||||
// Reflector watches a specified resource and causes all changes to be reflected in the given store.
|
||||
type Reflector struct {
|
||||
// name identifies this reflector. By default it will be a file:line if possible.
|
||||
name string
|
||||
// metrics tracks basic metric information about the reflector
|
||||
metrics *reflectorMetrics
|
||||
|
||||
// The name of the type we expect to place in the store. The name
|
||||
// will be the stringification of expectedGVK if provided, and the
|
||||
// stringification of expectedType otherwise. It is for display
|
||||
// only, and should not be used for parsing or comparison.
|
||||
expectedTypeName string
|
||||
// The type of object we expect to place in the store.
|
||||
expectedType reflect.Type
|
||||
// The GVK of the object we expect to place in the store if unstructured.
|
||||
expectedGVK *schema.GroupVersionKind
|
||||
// The destination to sync up with the watch source
|
||||
store Store
|
||||
// listerWatcher is used to perform lists and watches.
|
||||
@@ -102,14 +111,35 @@ func NewNamedReflector(name string, lw ListerWatcher, expectedType interface{},
|
||||
name: name,
|
||||
listerWatcher: lw,
|
||||
store: store,
|
||||
expectedType: reflect.TypeOf(expectedType),
|
||||
period: time.Second,
|
||||
resyncPeriod: resyncPeriod,
|
||||
clock: &clock.RealClock{},
|
||||
}
|
||||
r.setExpectedType(expectedType)
|
||||
return r
|
||||
}
|
||||
|
||||
func (r *Reflector) setExpectedType(expectedType interface{}) {
|
||||
r.expectedType = reflect.TypeOf(expectedType)
|
||||
if r.expectedType == nil {
|
||||
r.expectedTypeName = defaultExpectedTypeName
|
||||
return
|
||||
}
|
||||
|
||||
r.expectedTypeName = r.expectedType.String()
|
||||
|
||||
if obj, ok := expectedType.(*unstructured.Unstructured); ok {
|
||||
// Use gvk to check that watch event objects are of the desired type.
|
||||
gvk := obj.GroupVersionKind()
|
||||
if gvk.Empty() {
|
||||
klog.V(4).Infof("Reflector from %s configured with expectedType of *unstructured.Unstructured with empty GroupVersionKind.", r.name)
|
||||
return
|
||||
}
|
||||
r.expectedGVK = &gvk
|
||||
r.expectedTypeName = gvk.String()
|
||||
}
|
||||
}
|
||||
|
||||
// internalPackages are packages that ignored when creating a default reflector name. These packages are in the common
|
||||
// call chains to NewReflector, so they'd be low entropy names for reflectors
|
||||
var internalPackages = []string{"client-go/tools/cache/"}
|
||||
@@ -117,7 +147,7 @@ var internalPackages = []string{"client-go/tools/cache/"}
|
||||
// Run starts a watch and handles watch events. Will restart the watch if it is closed.
|
||||
// Run will exit when stopCh is closed.
|
||||
func (r *Reflector) Run(stopCh <-chan struct{}) {
|
||||
klog.V(3).Infof("Starting reflector %v (%s) from %s", r.expectedType, r.resyncPeriod, r.name)
|
||||
klog.V(3).Infof("Starting reflector %v (%s) from %s", r.expectedTypeName, r.resyncPeriod, r.name)
|
||||
wait.Until(func() {
|
||||
if err := r.ListAndWatch(stopCh); err != nil {
|
||||
utilruntime.HandleError(err)
|
||||
@@ -129,9 +159,6 @@ var (
|
||||
// nothing will ever be sent down this channel
|
||||
neverExitWatch <-chan time.Time = make(chan time.Time)
|
||||
|
||||
// Used to indicate that watching stopped so that a resync could happen.
|
||||
errorResyncRequested = errors.New("resync channel fired")
|
||||
|
||||
// Used to indicate that watching stopped because of a signal from the stop
|
||||
// channel passed in from a client of the reflector.
|
||||
errorStopRequested = errors.New("Stop requested")
|
||||
@@ -155,7 +182,7 @@ func (r *Reflector) resyncChan() (<-chan time.Time, func() bool) {
|
||||
// and then use the resource version to watch.
|
||||
// It returns error if ListAndWatch didn't even try to initialize watch.
|
||||
func (r *Reflector) ListAndWatch(stopCh <-chan struct{}) error {
|
||||
klog.V(3).Infof("Listing and watching %v from %s", r.expectedType, r.name)
|
||||
klog.V(3).Infof("Listing and watching %v from %s", r.expectedTypeName, r.name)
|
||||
var resourceVersion string
|
||||
|
||||
// Explicitly set "0" as resource version - it's fine for the List()
|
||||
@@ -196,7 +223,7 @@ func (r *Reflector) ListAndWatch(stopCh <-chan struct{}) error {
|
||||
case <-listCh:
|
||||
}
|
||||
if err != nil {
|
||||
return fmt.Errorf("%s: Failed to list %v: %v", r.name, r.expectedType, err)
|
||||
return fmt.Errorf("%s: Failed to list %v: %v", r.name, r.expectedTypeName, err)
|
||||
}
|
||||
initTrace.Step("Objects listed")
|
||||
listMetaInterface, err := meta.ListAccessor(list)
|
||||
@@ -275,9 +302,9 @@ func (r *Reflector) ListAndWatch(stopCh <-chan struct{}) error {
|
||||
case io.EOF:
|
||||
// watch closed normally
|
||||
case io.ErrUnexpectedEOF:
|
||||
klog.V(1).Infof("%s: Watch for %v closed with unexpected EOF: %v", r.name, r.expectedType, err)
|
||||
klog.V(1).Infof("%s: Watch for %v closed with unexpected EOF: %v", r.name, r.expectedTypeName, err)
|
||||
default:
|
||||
utilruntime.HandleError(fmt.Errorf("%s: Failed to watch %v: %v", r.name, r.expectedType, err))
|
||||
utilruntime.HandleError(fmt.Errorf("%s: Failed to watch %v: %v", r.name, r.expectedTypeName, err))
|
||||
}
|
||||
// If this is "connection refused" error, it means that most likely apiserver is not responsive.
|
||||
// It doesn't make sense to re-list all objects because most likely we will be able to restart
|
||||
@@ -294,9 +321,9 @@ func (r *Reflector) ListAndWatch(stopCh <-chan struct{}) error {
|
||||
if err != errorStopRequested {
|
||||
switch {
|
||||
case apierrs.IsResourceExpired(err):
|
||||
klog.V(4).Infof("%s: watch of %v ended with: %v", r.name, r.expectedType, err)
|
||||
klog.V(4).Infof("%s: watch of %v ended with: %v", r.name, r.expectedTypeName, err)
|
||||
default:
|
||||
klog.Warningf("%s: watch of %v ended with: %v", r.name, r.expectedType, err)
|
||||
klog.Warningf("%s: watch of %v ended with: %v", r.name, r.expectedTypeName, err)
|
||||
}
|
||||
}
|
||||
return nil
|
||||
@@ -336,9 +363,17 @@ loop:
|
||||
if event.Type == watch.Error {
|
||||
return apierrs.FromObject(event.Object)
|
||||
}
|
||||
if e, a := r.expectedType, reflect.TypeOf(event.Object); e != nil && e != a {
|
||||
utilruntime.HandleError(fmt.Errorf("%s: expected type %v, but watch event object had type %v", r.name, e, a))
|
||||
continue
|
||||
if r.expectedType != nil {
|
||||
if e, a := r.expectedType, reflect.TypeOf(event.Object); e != a {
|
||||
utilruntime.HandleError(fmt.Errorf("%s: expected type %v, but watch event object had type %v", r.name, e, a))
|
||||
continue
|
||||
}
|
||||
}
|
||||
if r.expectedGVK != nil {
|
||||
if e, a := *r.expectedGVK, event.Object.GetObjectKind().GroupVersionKind(); e != a {
|
||||
utilruntime.HandleError(fmt.Errorf("%s: expected gvk %v, but watch event object had gvk %v", r.name, e, a))
|
||||
continue
|
||||
}
|
||||
}
|
||||
meta, err := meta.Accessor(event.Object)
|
||||
if err != nil {
|
||||
@@ -380,7 +415,7 @@ loop:
|
||||
if watchDuration < 1*time.Second && eventCount == 0 {
|
||||
return fmt.Errorf("very short watch: %s: Unexpected watch close - watch lasted less than a second and no items received", r.name)
|
||||
}
|
||||
klog.V(4).Infof("%s: Watch close - %v total %v items received", r.name, r.expectedType, eventCount)
|
||||
klog.V(4).Infof("%s: Watch close - %v total %v items received", r.name, r.expectedTypeName, eventCount)
|
||||
return nil
|
||||
}
|
||||
|
||||
|
13
vendor/k8s.io/client-go/tools/cache/reflector_metrics.go
generated
vendored
13
vendor/k8s.io/client-go/tools/cache/reflector_metrics.go
generated
vendored
@@ -47,19 +47,6 @@ func (noopMetric) Dec() {}
|
||||
func (noopMetric) Observe(float64) {}
|
||||
func (noopMetric) Set(float64) {}
|
||||
|
||||
type reflectorMetrics struct {
|
||||
numberOfLists CounterMetric
|
||||
listDuration SummaryMetric
|
||||
numberOfItemsInList SummaryMetric
|
||||
|
||||
numberOfWatches CounterMetric
|
||||
numberOfShortWatches CounterMetric
|
||||
watchDuration SummaryMetric
|
||||
numberOfItemsInWatch SummaryMetric
|
||||
|
||||
lastResourceVersion GaugeMetric
|
||||
}
|
||||
|
||||
// MetricsProvider generates various metrics used by the reflector.
|
||||
type MetricsProvider interface {
|
||||
NewListsMetric(name string) CounterMetric
|
||||
|
5
vendor/k8s.io/client-go/tools/cache/shared_informer.go
generated
vendored
5
vendor/k8s.io/client-go/tools/cache/shared_informer.go
generated
vendored
@@ -209,7 +209,7 @@ func WaitForNamedCacheSync(controllerName string, stopCh <-chan struct{}, cacheS
|
||||
// if the controller should shutdown
|
||||
// callers should prefer WaitForNamedCacheSync()
|
||||
func WaitForCacheSync(stopCh <-chan struct{}, cacheSyncs ...InformerSynced) bool {
|
||||
err := wait.PollUntil(syncedPollPeriod,
|
||||
err := wait.PollImmediateUntil(syncedPollPeriod,
|
||||
func() (bool, error) {
|
||||
for _, syncFunc := range cacheSyncs {
|
||||
if !syncFunc() {
|
||||
@@ -364,8 +364,7 @@ func (s *sharedIndexInformer) AddIndexers(indexers Indexers) error {
|
||||
defer s.startedLock.Unlock()
|
||||
|
||||
if s.started {
|
||||
s.blockDeltas.Lock()
|
||||
defer s.blockDeltas.Unlock()
|
||||
return fmt.Errorf("informer has already started")
|
||||
}
|
||||
|
||||
return s.indexer.AddIndexers(indexers)
|
||||
|
21
vendor/k8s.io/client-go/tools/cache/thread_safe_store.go
generated
vendored
21
vendor/k8s.io/client-go/tools/cache/thread_safe_store.go
generated
vendored
@@ -125,11 +125,6 @@ func (c *threadSafeMap) Replace(items map[string]interface{}, resourceVersion st
|
||||
c.items = items
|
||||
|
||||
// rebuild any index
|
||||
c.rebuildIndices()
|
||||
}
|
||||
|
||||
// rebuildIndices rebuilds all indices for the current set c.items. Assumes that c.lock is held by caller
|
||||
func (c *threadSafeMap) rebuildIndices() {
|
||||
c.indices = Indices{}
|
||||
for key, item := range c.items {
|
||||
c.updateIndices(nil, item, key)
|
||||
@@ -234,6 +229,10 @@ func (c *threadSafeMap) AddIndexers(newIndexers Indexers) error {
|
||||
c.lock.Lock()
|
||||
defer c.lock.Unlock()
|
||||
|
||||
if len(c.items) > 0 {
|
||||
return fmt.Errorf("cannot add indexers to running index")
|
||||
}
|
||||
|
||||
oldKeys := sets.StringKeySet(c.indexers)
|
||||
newKeys := sets.StringKeySet(newIndexers)
|
||||
|
||||
@@ -244,11 +243,6 @@ func (c *threadSafeMap) AddIndexers(newIndexers Indexers) error {
|
||||
for k, v := range newIndexers {
|
||||
c.indexers[k] = v
|
||||
}
|
||||
|
||||
if len(c.items) > 0 {
|
||||
c.rebuildIndices()
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
@@ -298,6 +292,13 @@ func (c *threadSafeMap) deleteFromIndices(obj interface{}, key string) {
|
||||
set := index[indexValue]
|
||||
if set != nil {
|
||||
set.Delete(key)
|
||||
|
||||
// If we don't delete the set when zero, indices with high cardinality
|
||||
// short lived resources can cause memory to increase over time from
|
||||
// unused empty sets. See `kubernetes/kubernetes/issues/84959`.
|
||||
if len(set) == 0 {
|
||||
delete(index, indexValue)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
Reference in New Issue
Block a user