mirror of
https://github.com/k3s-io/kubernetes.git
synced 2025-07-26 21:17:23 +00:00
Merge pull request #108252 from wojtek-t/avoid_object_deep_copies
Avoid deep-copying object when possible on kube-apiserver watch path
This commit is contained in:
commit
9946b5364e
@ -27,11 +27,11 @@ import (
|
|||||||
|
|
||||||
// Interface can be implemented by anything that knows how to watch and report changes.
|
// Interface can be implemented by anything that knows how to watch and report changes.
|
||||||
type Interface interface {
|
type Interface interface {
|
||||||
// Stops watching. Will close the channel returned by ResultChan(). Releases
|
// Stop stops watching. Will close the channel returned by ResultChan(). Releases
|
||||||
// any resources used by the watch.
|
// any resources used by the watch.
|
||||||
Stop()
|
Stop()
|
||||||
|
|
||||||
// Returns a chan which will receive all the events. If an error occurs
|
// ResultChan returns a chan which will receive all the events. If an error occurs
|
||||||
// or Stop() is called, the implementation will close this channel and
|
// or Stop() is called, the implementation will close this channel and
|
||||||
// release any resources used by the watch.
|
// release any resources used by the watch.
|
||||||
ResultChan() <-chan Event
|
ResultChan() <-chan Event
|
||||||
|
@ -59,9 +59,15 @@ func doTransformObject(ctx context.Context, obj runtime.Object, opts interface{}
|
|||||||
if _, ok := obj.(*metav1.Status); ok {
|
if _, ok := obj.(*metav1.Status); ok {
|
||||||
return obj, nil
|
return obj, nil
|
||||||
}
|
}
|
||||||
if err := ensureNonNilItems(obj); err != nil {
|
|
||||||
|
// ensure that for empty lists we don't return <nil> items.
|
||||||
|
// This is safe to modify without deep-copying the object, as
|
||||||
|
// List objects themselves are never cached.
|
||||||
|
if meta.IsListType(obj) && meta.LenList(obj) == 0 {
|
||||||
|
if err := meta.SetList(obj, []runtime.Object{}); err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
}
|
||||||
|
|
||||||
switch target := mediaType.Convert; {
|
switch target := mediaType.Convert; {
|
||||||
case target == nil:
|
case target == nil:
|
||||||
|
@ -356,16 +356,6 @@ func dedupOwnerReferencesAndAddWarning(obj runtime.Object, requestContext contex
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// ensureNonNilItems ensures that for empty lists we don't return <nil> items.
|
|
||||||
func ensureNonNilItems(obj runtime.Object) error {
|
|
||||||
if meta.IsListType(obj) && meta.LenList(obj) == 0 {
|
|
||||||
if err := meta.SetList(obj, []runtime.Object{}); err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func summarizeData(data []byte, maxLength int) string {
|
func summarizeData(data []byte, maxLength int) string {
|
||||||
switch {
|
switch {
|
||||||
case len(data) == 0:
|
case len(data) == 0:
|
||||||
|
@ -818,11 +818,11 @@ func setCachingObjects(event *watchCacheEvent, versioner storage.Versioner) {
|
|||||||
// Don't wrap Object for delete events - these are not to deliver any
|
// Don't wrap Object for delete events - these are not to deliver any
|
||||||
// events. Only wrap PrevObject.
|
// events. Only wrap PrevObject.
|
||||||
if object, err := newCachingObject(event.PrevObject); err == nil {
|
if object, err := newCachingObject(event.PrevObject); err == nil {
|
||||||
// Update resource version of the underlying object.
|
// Update resource version of the object.
|
||||||
// event.PrevObject is used to deliver DELETE watch events and
|
// event.PrevObject is used to deliver DELETE watch events and
|
||||||
// for them, we set resourceVersion to <current> instead of
|
// for them, we set resourceVersion to <current> instead of
|
||||||
// the resourceVersion of the last modification of the object.
|
// the resourceVersion of the last modification of the object.
|
||||||
updateResourceVersionIfNeeded(object.object, versioner, event.ResourceVersion)
|
updateResourceVersion(object, versioner, event.ResourceVersion)
|
||||||
event.PrevObject = object
|
event.PrevObject = object
|
||||||
} else {
|
} else {
|
||||||
klog.Errorf("couldn't create cachingObject from: %#v", event.Object)
|
klog.Errorf("couldn't create cachingObject from: %#v", event.Object)
|
||||||
@ -851,14 +851,14 @@ func (c *Cacher) dispatchEvent(event *watchCacheEvent) {
|
|||||||
// from it justifies increased memory usage, so for now we drop the cached
|
// from it justifies increased memory usage, so for now we drop the cached
|
||||||
// serializations after dispatching this event.
|
// serializations after dispatching this event.
|
||||||
//
|
//
|
||||||
// Given the deep-copies that are done to create cachingObjects,
|
// Given that CachingObject is just wrapping the object and not perfoming
|
||||||
// we try to cache serializations only if there are at least 3 watchers.
|
// deep-copying (until some field is explicitly being modified), we create
|
||||||
if len(c.watchersBuffer) >= 3 {
|
// it unconditionally to ensure safety and reduce deep-copying.
|
||||||
|
//
|
||||||
// Make a shallow copy to allow overwriting Object and PrevObject.
|
// Make a shallow copy to allow overwriting Object and PrevObject.
|
||||||
wcEvent := *event
|
wcEvent := *event
|
||||||
setCachingObjects(&wcEvent, c.versioner)
|
setCachingObjects(&wcEvent, c.versioner)
|
||||||
event = &wcEvent
|
event = &wcEvent
|
||||||
}
|
|
||||||
|
|
||||||
c.blockedWatchers = c.blockedWatchers[:0]
|
c.blockedWatchers = c.blockedWatchers[:0]
|
||||||
for _, watcher := range c.watchersBuffer {
|
for _, watcher := range c.watchersBuffer {
|
||||||
@ -1255,20 +1255,17 @@ func (c *cacheWatcher) nextBookmarkTime(now time.Time, bookmarkFrequency time.Du
|
|||||||
return heartbeatTime, true
|
return heartbeatTime, true
|
||||||
}
|
}
|
||||||
|
|
||||||
func getEventObject(object runtime.Object) runtime.Object {
|
func getMutableObject(object runtime.Object) runtime.Object {
|
||||||
if _, ok := object.(runtime.CacheableObject); ok {
|
if _, ok := object.(*cachingObject); ok {
|
||||||
// It is safe to return without deep-copy, because the underlying
|
// It is safe to return without deep-copy, because the underlying
|
||||||
// object was already deep-copied during construction.
|
// object will lazily perform deep-copy on the first try to change
|
||||||
|
// any of its fields.
|
||||||
return object
|
return object
|
||||||
}
|
}
|
||||||
return object.DeepCopyObject()
|
return object.DeepCopyObject()
|
||||||
}
|
}
|
||||||
|
|
||||||
func updateResourceVersionIfNeeded(object runtime.Object, versioner storage.Versioner, resourceVersion uint64) {
|
func updateResourceVersion(object runtime.Object, versioner storage.Versioner, resourceVersion uint64) {
|
||||||
if _, ok := object.(*cachingObject); ok {
|
|
||||||
// We assume that for cachingObject resourceVersion was already propagated before.
|
|
||||||
return
|
|
||||||
}
|
|
||||||
if err := versioner.UpdateObject(object, resourceVersion); err != nil {
|
if err := versioner.UpdateObject(object, resourceVersion); err != nil {
|
||||||
utilruntime.HandleError(fmt.Errorf("failure to version api object (%d) %#v: %v", resourceVersion, object, err))
|
utilruntime.HandleError(fmt.Errorf("failure to version api object (%d) %#v: %v", resourceVersion, object, err))
|
||||||
}
|
}
|
||||||
@ -1291,13 +1288,17 @@ func (c *cacheWatcher) convertToWatchEvent(event *watchCacheEvent) *watch.Event
|
|||||||
|
|
||||||
switch {
|
switch {
|
||||||
case curObjPasses && !oldObjPasses:
|
case curObjPasses && !oldObjPasses:
|
||||||
return &watch.Event{Type: watch.Added, Object: getEventObject(event.Object)}
|
return &watch.Event{Type: watch.Added, Object: getMutableObject(event.Object)}
|
||||||
case curObjPasses && oldObjPasses:
|
case curObjPasses && oldObjPasses:
|
||||||
return &watch.Event{Type: watch.Modified, Object: getEventObject(event.Object)}
|
return &watch.Event{Type: watch.Modified, Object: getMutableObject(event.Object)}
|
||||||
case !curObjPasses && oldObjPasses:
|
case !curObjPasses && oldObjPasses:
|
||||||
// return a delete event with the previous object content, but with the event's resource version
|
// return a delete event with the previous object content, but with the event's resource version
|
||||||
oldObj := getEventObject(event.PrevObject)
|
oldObj := getMutableObject(event.PrevObject)
|
||||||
updateResourceVersionIfNeeded(oldObj, c.versioner, event.ResourceVersion)
|
// We know that if oldObj is cachingObject (which can only be set via
|
||||||
|
// setCachingObjects), its resourceVersion is already set correctly and
|
||||||
|
// we don't need to update it. However, since cachingObject efficiently
|
||||||
|
// handles noop updates, we avoid this microoptimization here.
|
||||||
|
updateResourceVersion(oldObj, c.versioner, event.ResourceVersion)
|
||||||
return &watch.Event{Type: watch.Deleted, Object: oldObj}
|
return &watch.Event{Type: watch.Deleted, Object: oldObj}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -1370,24 +1370,17 @@ func testCachingObjects(t *testing.T, watchersCount int) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
var object runtime.Object
|
var object runtime.Object
|
||||||
if watchersCount >= 3 {
|
|
||||||
if _, ok := event.Object.(runtime.CacheableObject); !ok {
|
if _, ok := event.Object.(runtime.CacheableObject); !ok {
|
||||||
t.Fatalf("Object in %s event should support caching: %#v", event.Type, event.Object)
|
t.Fatalf("Object in %s event should support caching: %#v", event.Type, event.Object)
|
||||||
}
|
}
|
||||||
object = event.Object.(runtime.CacheableObject).GetObject()
|
object = event.Object.(runtime.CacheableObject).GetObject()
|
||||||
} else {
|
|
||||||
if _, ok := event.Object.(runtime.CacheableObject); ok {
|
|
||||||
t.Fatalf("Object in %s event should not support caching: %#v", event.Type, event.Object)
|
|
||||||
}
|
|
||||||
object = event.Object.DeepCopyObject()
|
|
||||||
}
|
|
||||||
|
|
||||||
if event.Type == watch.Deleted {
|
if event.Type == watch.Deleted {
|
||||||
resourceVersion, err := cacher.versioner.ObjectResourceVersion(cacher.watchCache.cache[index].PrevObject)
|
resourceVersion, err := cacher.versioner.ObjectResourceVersion(cacher.watchCache.cache[index].PrevObject)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatalf("Failed to parse resource version: %v", err)
|
t.Fatalf("Failed to parse resource version: %v", err)
|
||||||
}
|
}
|
||||||
updateResourceVersionIfNeeded(object, cacher.versioner, resourceVersion)
|
updateResourceVersion(object, cacher.versioner, resourceVersion)
|
||||||
}
|
}
|
||||||
|
|
||||||
var e runtime.Object
|
var e runtime.Object
|
||||||
|
@ -64,6 +64,16 @@ type serializationsCache map[runtime.Identifier]*serializationResult
|
|||||||
type cachingObject struct {
|
type cachingObject struct {
|
||||||
lock sync.RWMutex
|
lock sync.RWMutex
|
||||||
|
|
||||||
|
// deepCopied defines whether the object below has already been
|
||||||
|
// deep copied. The operation is performed lazily on the first
|
||||||
|
// setXxx operation.
|
||||||
|
//
|
||||||
|
// The lazy deep-copy make is useful, as effectively the only
|
||||||
|
// case when we are setting some fields are ResourceVersion for
|
||||||
|
// DELETE events, so in all other cases we can effectively avoid
|
||||||
|
// performing any deep copies.
|
||||||
|
deepCopied bool
|
||||||
|
|
||||||
// Object for which serializations are cached.
|
// Object for which serializations are cached.
|
||||||
object metaRuntimeInterface
|
object metaRuntimeInterface
|
||||||
|
|
||||||
@ -79,7 +89,10 @@ type cachingObject struct {
|
|||||||
// metav1.Object type.
|
// metav1.Object type.
|
||||||
func newCachingObject(object runtime.Object) (*cachingObject, error) {
|
func newCachingObject(object runtime.Object) (*cachingObject, error) {
|
||||||
if obj, ok := object.(metaRuntimeInterface); ok {
|
if obj, ok := object.(metaRuntimeInterface); ok {
|
||||||
result := &cachingObject{object: obj.DeepCopyObject().(metaRuntimeInterface)}
|
result := &cachingObject{
|
||||||
|
object: obj,
|
||||||
|
deepCopied: false,
|
||||||
|
}
|
||||||
result.serializations.Store(make(serializationsCache))
|
result.serializations.Store(make(serializationsCache))
|
||||||
return result, nil
|
return result, nil
|
||||||
}
|
}
|
||||||
@ -124,6 +137,10 @@ func (o *cachingObject) CacheEncode(id runtime.Identifier, encode func(runtime.O
|
|||||||
result := o.getSerializationResult(id)
|
result := o.getSerializationResult(id)
|
||||||
result.once.Do(func() {
|
result.once.Do(func() {
|
||||||
buffer := bytes.NewBuffer(nil)
|
buffer := bytes.NewBuffer(nil)
|
||||||
|
// TODO(wojtek-t): This is currently making a copy to avoid races
|
||||||
|
// in cases where encoding is making subtle object modifications,
|
||||||
|
// e.g. #82497
|
||||||
|
// Figure out if we can somehow avoid this under some conditions.
|
||||||
result.err = encode(o.GetObject(), buffer)
|
result.err = encode(o.GetObject(), buffer)
|
||||||
result.raw = buffer.Bytes()
|
result.raw = buffer.Bytes()
|
||||||
})
|
})
|
||||||
@ -156,7 +173,9 @@ func (o *cachingObject) DeepCopyObject() runtime.Object {
|
|||||||
// DeepCopyObject on cachingObject is not expected to be called anywhere.
|
// DeepCopyObject on cachingObject is not expected to be called anywhere.
|
||||||
// However, to be on the safe-side, we implement it, though given the
|
// However, to be on the safe-side, we implement it, though given the
|
||||||
// cache is only an optimization we ignore copying it.
|
// cache is only an optimization we ignore copying it.
|
||||||
result := &cachingObject{}
|
result := &cachingObject{
|
||||||
|
deepCopied: true,
|
||||||
|
}
|
||||||
result.serializations.Store(make(serializationsCache))
|
result.serializations.Store(make(serializationsCache))
|
||||||
|
|
||||||
o.lock.RLock()
|
o.lock.RLock()
|
||||||
@ -214,6 +233,10 @@ func (o *cachingObject) conditionalSet(isNoop func() bool, set func()) {
|
|||||||
if isNoop() {
|
if isNoop() {
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
if !o.deepCopied {
|
||||||
|
o.object = o.object.DeepCopyObject().(metaRuntimeInterface)
|
||||||
|
o.deepCopied = true
|
||||||
|
}
|
||||||
o.invalidateCacheLocked()
|
o.invalidateCacheLocked()
|
||||||
set()
|
set()
|
||||||
}
|
}
|
||||||
|
@ -26,6 +26,7 @@ import (
|
|||||||
|
|
||||||
v1 "k8s.io/api/core/v1"
|
v1 "k8s.io/api/core/v1"
|
||||||
"k8s.io/apimachinery/pkg/api/meta"
|
"k8s.io/apimachinery/pkg/api/meta"
|
||||||
|
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||||
"k8s.io/apimachinery/pkg/runtime"
|
"k8s.io/apimachinery/pkg/runtime"
|
||||||
)
|
)
|
||||||
|
|
||||||
@ -162,3 +163,29 @@ func TestCachingObjectRaces(t *testing.T) {
|
|||||||
}
|
}
|
||||||
wg.Wait()
|
wg.Wait()
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func TestCachingObjectLazyDeepCopy(t *testing.T) {
|
||||||
|
pod := &v1.Pod{
|
||||||
|
ObjectMeta: metav1.ObjectMeta{
|
||||||
|
Name: "name",
|
||||||
|
ResourceVersion: "123",
|
||||||
|
},
|
||||||
|
}
|
||||||
|
object, err := newCachingObject(pod)
|
||||||
|
if err != nil {
|
||||||
|
t.Fatalf("couldn't create cachingObject: %v", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
if object.deepCopied != false {
|
||||||
|
t.Errorf("object deep-copied without the need")
|
||||||
|
}
|
||||||
|
|
||||||
|
object.SetResourceVersion("123")
|
||||||
|
if object.deepCopied != false {
|
||||||
|
t.Errorf("object deep-copied on no-op change")
|
||||||
|
}
|
||||||
|
object.SetResourceVersion("234")
|
||||||
|
if object.deepCopied != true {
|
||||||
|
t.Errorf("object not deep-copied on change")
|
||||||
|
}
|
||||||
|
}
|
||||||
|
Loading…
Reference in New Issue
Block a user