mirror of
https://github.com/k3s-io/kubernetes.git
synced 2025-09-18 16:01:53 +00:00
Merge pull request #36889 from wojtek-t/reuse_fields_and_labels
Automatic merge from submit-queue Reuse fields and labels This should significantly reduce memory allocations in apiserver in large cluster. Explanation: - every kubelet is refreshing watch every 5-10 minutes (this generally is not causing relist - it just renews watch) - that means, in 5000-node cluster, we are issuing ~10 watches per second - since we don't have "watch heartbets", the watch is issued from previously received resourceVersion - to make some assumption, let's assume pods are evenly spread across pods, and writes for them are evenly spread - that means, that a given kubelet is interested in 1 per 5000 pod changes - with that assumption, each watch, has to process 2500 (on average) previous watch events - for each of such even, we are currently computing fields. This PR is fixing this problem.
This commit is contained in:
@@ -40,8 +40,15 @@ func NewREST(config *storagebackend.Config, storageDecorator generic.StorageDeco
|
||||
newListFunc := func() runtime.Object { return &testgroup.TestTypeList{} }
|
||||
// Usually you should reuse your RESTCreateStrategy.
|
||||
strategy := &NotNamespaceScoped{}
|
||||
getAttrs := func(obj runtime.Object) (labels.Set, fields.Set, error) {
|
||||
testObj, ok := obj.(*testgroup.TestType)
|
||||
if !ok {
|
||||
return nil, nil, fmt.Errorf("not a TestType")
|
||||
}
|
||||
return labels.Set(testObj.Labels), nil, nil
|
||||
}
|
||||
storageInterface, _ := storageDecorator(
|
||||
config, 100, &testgroup.TestType{}, prefix, strategy, newListFunc, storage.NoTriggerPublisher)
|
||||
config, 100, &testgroup.TestType{}, prefix, strategy, newListFunc, getAttrs, storage.NoTriggerPublisher)
|
||||
store := ®istry.Store{
|
||||
NewFunc: func() runtime.Object { return &testgroup.TestType{} },
|
||||
// NewListFunc returns an object capable of storing results of an etcd list.
|
||||
|
Reference in New Issue
Block a user