Set deserialization cache size based on target memory usage

This commit is contained in:
Wojciech Tyczynski 2016-10-04 09:17:44 +02:00
parent 158dc1a863
commit 2bfcb1a850
3 changed files with 29 additions and 3 deletions

View File

@ -145,6 +145,28 @@ func Run(s *options.APIServer) error {
glog.Fatalf("Failed to start kubelet client: %v", err)
}
if s.StorageConfig.DeserializationCacheSize == 0 {
// When size of cache is not explicitly set, estimate its size based on
// target memory usage.
glog.V(2).Infof("Initalizing deserialization cache size based on %dMB limit", s.TargetRAMMB)
// This is the heuristics that from memory capacity is trying to infer
// the maximum number of nodes in the cluster and set cache sizes based
// on that value.
// From our documentation, we officially recomment 120GB machines for
// 2000 nodes, and we scale from that point. Thus we assume ~60MB of
// capacity per node.
// TODO: We may consider deciding that some percentage of memory will
// be used for the deserialization cache and divide it by the max object
// size to compute its size. We may even go further and measure
// collective sizes of the objects in the cache.
clusterSize := s.TargetRAMMB / 60
s.StorageConfig.DeserializationCacheSize = 25 * clusterSize
if s.StorageConfig.DeserializationCacheSize < 1000 {
s.StorageConfig.DeserializationCacheSize = 1000
}
}
storageGroupsToEncodingVersion, err := s.StorageGroupsToEncodingVersion()
if err != nil {
glog.Fatalf("error generating storage version map: %s", err)

View File

@ -82,6 +82,10 @@ func Run(s *options.ServerRunOptions) error {
// TODO: register cluster federation resources here.
resourceConfig := genericapiserver.NewResourceConfig()
if s.StorageConfig.DeserializationCacheSize == 0 {
// When size of cache is not explicitly set, set it to 50000
s.StorageConfig.DeserializationCacheSize = 50000
}
storageGroupsToEncodingVersion, err := s.StorageGroupsToEncodingVersion()
if err != nil {
glog.Fatalf("error generating storage version map: %s", err)

View File

@ -37,8 +37,6 @@ import (
)
const (
DefaultDeserializationCacheSize = 50000
// TODO: This can be tightened up. It still matches objects named watch or proxy.
defaultLongRunningRequestRE = "(/|^)((watch|proxy)(/|$)|(logs?|portforward|exec|attach)/?$)"
)
@ -158,7 +156,9 @@ func NewServerRunOptions() *ServerRunOptions {
func (o *ServerRunOptions) WithEtcdOptions() *ServerRunOptions {
o.StorageConfig = storagebackend.Config{
Prefix: DefaultEtcdPathPrefix,
DeserializationCacheSize: DefaultDeserializationCacheSize,
// Default cache size to 0 - if unset, its size will be set based on target
// memory usage.
DeserializationCacheSize: 0,
}
return o
}