diff --git a/cmd/kube-apiserver/app/server.go b/cmd/kube-apiserver/app/server.go index 8c0cce89b3e..8e4390e2f25 100644 --- a/cmd/kube-apiserver/app/server.go +++ b/cmd/kube-apiserver/app/server.go @@ -145,6 +145,28 @@ func Run(s *options.APIServer) error { glog.Fatalf("Failed to start kubelet client: %v", err) } + if s.StorageConfig.DeserializationCacheSize == 0 { + // When size of cache is not explicitly set, estimate its size based on + // target memory usage. + glog.V(2).Infof("Initalizing deserialization cache size based on %dMB limit", s.TargetRAMMB) + + // This is the heuristics that from memory capacity is trying to infer + // the maximum number of nodes in the cluster and set cache sizes based + // on that value. + // From our documentation, we officially recomment 120GB machines for + // 2000 nodes, and we scale from that point. Thus we assume ~60MB of + // capacity per node. + // TODO: We may consider deciding that some percentage of memory will + // be used for the deserialization cache and divide it by the max object + // size to compute its size. We may even go further and measure + // collective sizes of the objects in the cache. + clusterSize := s.TargetRAMMB / 60 + s.StorageConfig.DeserializationCacheSize = 25 * clusterSize + if s.StorageConfig.DeserializationCacheSize < 1000 { + s.StorageConfig.DeserializationCacheSize = 1000 + } + } + storageGroupsToEncodingVersion, err := s.StorageGroupsToEncodingVersion() if err != nil { glog.Fatalf("error generating storage version map: %s", err) diff --git a/federation/cmd/federation-apiserver/app/server.go b/federation/cmd/federation-apiserver/app/server.go index 01001285787..034555b95d7 100644 --- a/federation/cmd/federation-apiserver/app/server.go +++ b/federation/cmd/federation-apiserver/app/server.go @@ -82,6 +82,10 @@ func Run(s *options.ServerRunOptions) error { // TODO: register cluster federation resources here. resourceConfig := genericapiserver.NewResourceConfig() + if s.StorageConfig.DeserializationCacheSize == 0 { + // When size of cache is not explicitly set, set it to 50000 + s.StorageConfig.DeserializationCacheSize = 50000 + } storageGroupsToEncodingVersion, err := s.StorageGroupsToEncodingVersion() if err != nil { glog.Fatalf("error generating storage version map: %s", err) diff --git a/pkg/genericapiserver/options/server_run_options.go b/pkg/genericapiserver/options/server_run_options.go index 40289b11fd6..ec1c66f0a2b 100644 --- a/pkg/genericapiserver/options/server_run_options.go +++ b/pkg/genericapiserver/options/server_run_options.go @@ -37,8 +37,6 @@ import ( ) const ( - DefaultDeserializationCacheSize = 50000 - // TODO: This can be tightened up. It still matches objects named watch or proxy. defaultLongRunningRequestRE = "(/|^)((watch|proxy)(/|$)|(logs?|portforward|exec|attach)/?$)" ) @@ -158,7 +156,9 @@ func NewServerRunOptions() *ServerRunOptions { func (o *ServerRunOptions) WithEtcdOptions() *ServerRunOptions { o.StorageConfig = storagebackend.Config{ Prefix: DefaultEtcdPathPrefix, - DeserializationCacheSize: DefaultDeserializationCacheSize, + // Default cache size to 0 - if unset, its size will be set based on target + // memory usage. + DeserializationCacheSize: 0, } return o }