Configurable cache sizes of cachers

This commit is contained in:
Wojciech Tyczynski 2016-08-03 11:44:10 +02:00
parent 70d6fe6d1c
commit 8a8cd06ea4
9 changed files with 58 additions and 33 deletions

View File

@ -282,6 +282,8 @@ func Run(s *options.APIServer) error {
} }
if s.EnableWatchCache { if s.EnableWatchCache {
glog.V(2).Infof("Initalizing cache sizes based on %dMB limit", s.TargetRAMMB)
cachesize.InitializeWatchCacheSizes(s.TargetRAMMB)
cachesize.SetWatchCacheSizes(s.WatchCacheSizes) cachesize.SetWatchCacheSizes(s.WatchCacheSizes)
} }

View File

@ -142,6 +142,7 @@ func Run(s *genericoptions.ServerRunOptions) error {
// TODO: Move this to generic api server (Need to move the command line flag). // TODO: Move this to generic api server (Need to move the command line flag).
if s.EnableWatchCache { if s.EnableWatchCache {
cachesize.InitializeWatchCacheSizes(s.TargetRAMMB)
cachesize.SetWatchCacheSizes(s.WatchCacheSizes) cachesize.SetWatchCacheSizes(s.WatchCacheSizes)
} }

View File

@ -39,7 +39,7 @@ cluster/rackspace/util.sh: local node_ip=$(nova show --minimal ${NODE_NAMES[$
cluster/saltbase/salt/cluster-autoscaler/cluster-autoscaler.manifest:{% set params = pillar['autoscaler_mig_config'] + " " + cloud_config -%} cluster/saltbase/salt/cluster-autoscaler/cluster-autoscaler.manifest:{% set params = pillar['autoscaler_mig_config'] + " " + cloud_config -%}
cluster/saltbase/salt/kube-admission-controls/init.sls:{% if 'LimitRanger' in pillar.get('admission_control', '') %} cluster/saltbase/salt/kube-admission-controls/init.sls:{% if 'LimitRanger' in pillar.get('admission_control', '') %}
cluster/saltbase/salt/kube-apiserver/kube-apiserver.manifest:{% set enable_garbage_collector = pillar['enable_garbage_collector'] -%} cluster/saltbase/salt/kube-apiserver/kube-apiserver.manifest:{% set enable_garbage_collector = pillar['enable_garbage_collector'] -%}
cluster/saltbase/salt/kube-apiserver/kube-apiserver.manifest:{% set params = address + " " + etcd_servers + " " + etcd_servers_overrides + " " + cloud_provider + " " + cloud_config + " " + runtime_config + " " + admission_control + " " + service_cluster_ip_range + " " + client_ca_file + basic_auth_file + " " + min_request_timeout + " " + enable_garbage_collector -%} cluster/saltbase/salt/kube-apiserver/kube-apiserver.manifest:{% set params = address + " " + etcd_servers + " " + etcd_servers_overrides + " " + cloud_provider + " " + cloud_config + " " + runtime_config + " " + admission_control + " " + target_ram_mb + " " + service_cluster_ip_range + " " + client_ca_file + basic_auth_file + " " + min_request_timeout + " " + enable_garbage_collector -%}
cluster/saltbase/salt/kube-controller-manager/kube-controller-manager.manifest:{% set enable_garbage_collector = pillar['enable_garbage_collector'] -%} cluster/saltbase/salt/kube-controller-manager/kube-controller-manager.manifest:{% set enable_garbage_collector = pillar['enable_garbage_collector'] -%}
cluster/saltbase/salt/kube-controller-manager/kube-controller-manager.manifest:{% set params = "--master=127.0.0.1:8080" + " " + cluster_name + " " + cluster_cidr + " " + allocate_node_cidrs + " " + service_cluster_ip_range + " " + terminated_pod_gc + " " + enable_garbage_collector + " " + cloud_provider + " " + cloud_config + " " + service_account_key + " " + log_level + " " + root_ca_file -%} cluster/saltbase/salt/kube-controller-manager/kube-controller-manager.manifest:{% set params = "--master=127.0.0.1:8080" + " " + cluster_name + " " + cluster_cidr + " " + allocate_node_cidrs + " " + service_cluster_ip_range + " " + terminated_pod_gc + " " + enable_garbage_collector + " " + cloud_provider + " " + cloud_config + " " + service_account_key + " " + log_level + " " + root_ca_file -%}
cluster/saltbase/salt/kube-proxy/kube-proxy.manifest: {% set api_servers_with_port = api_servers + ":6443" -%} cluster/saltbase/salt/kube-proxy/kube-proxy.manifest: {% set api_servers_with_port = api_servers + ":6443" -%}

View File

@ -471,6 +471,7 @@ system-container
system-pods-startup-timeout system-pods-startup-timeout
system-reserved system-reserved
target-port target-port
target-ram-mb
tcp-services tcp-services
terminated-pod-gc-threshold terminated-pod-gc-threshold
test-timeout test-timeout

View File

@ -98,6 +98,7 @@ type ServerRunOptions struct {
// these; you can change this if you want to change the defaults (e.g., // these; you can change this if you want to change the defaults (e.g.,
// for testing). This is not actually exposed as a flag. // for testing). This is not actually exposed as a flag.
DefaultStorageVersions string DefaultStorageVersions string
TargetRAMMB int
TLSCertFile string TLSCertFile string
TLSPrivateKeyFile string TLSPrivateKeyFile string
TokenAuthFile string TokenAuthFile string
@ -304,6 +305,9 @@ func (s *ServerRunOptions) AddFlags(fs *pflag.FlagSet) {
"Per-resource etcd servers overrides, comma separated. The individual override "+ "Per-resource etcd servers overrides, comma separated. The individual override "+
"format: group/resource#servers, where servers are http://ip:port, semicolon separated.") "format: group/resource#servers, where servers are http://ip:port, semicolon separated.")
fs.IntVar(&s.TargetRAMMB, "target-ram-mb", s.TargetRAMMB,
"Memory limit for apiserver in MB (used to configure sizes of caches, etc.)")
fs.StringVar(&s.ExternalHost, "external-hostname", s.ExternalHost, fs.StringVar(&s.ExternalHost, "external-hostname", s.ExternalHost,
"The hostname to use when generating externalized URLs for this master (e.g. Swagger API Docs).") "The hostname to use when generating externalized URLs for this master (e.g. Swagger API Docs).")

View File

@ -31,6 +31,7 @@ const (
CertificateSigningRequests Resource = "certificatesigningrequests" CertificateSigningRequests Resource = "certificatesigningrequests"
ClusterRoles Resource = "clusterroles" ClusterRoles Resource = "clusterroles"
ClusterRoleBindings Resource = "clusterrolebindings" ClusterRoleBindings Resource = "clusterrolebindings"
ConfigMaps Resource = "configmaps"
Controllers Resource = "controllers" Controllers Resource = "controllers"
Daemonsets Resource = "daemonsets" Daemonsets Resource = "daemonsets"
Deployments Resource = "deployments" Deployments Resource = "deployments"
@ -47,6 +48,7 @@ const (
PersistentVolumes Resource = "persistentvolumes" PersistentVolumes Resource = "persistentvolumes"
PersistentVolumeClaims Resource = "persistentvolumeclaims" PersistentVolumeClaims Resource = "persistentvolumeclaims"
Pods Resource = "pods" Pods Resource = "pods"
PodSecurityPolicies Resource = "podsecuritypolicies"
PodTemplates Resource = "podtemplates" PodTemplates Resource = "podtemplates"
Replicasets Resource = "replicasets" Replicasets Resource = "replicasets"
ResourceQuotas Resource = "resourcequotas" ResourceQuotas Resource = "resourcequotas"
@ -56,40 +58,36 @@ const (
Secrets Resource = "secrets" Secrets Resource = "secrets"
ServiceAccounts Resource = "serviceaccounts" ServiceAccounts Resource = "serviceaccounts"
Services Resource = "services" Services Resource = "services"
StorageClasses Resource = "storageclasses"
// Default value of watch cache size for a resource if not specified.
defaultWatchCacheSize = 100
) )
// TODO: This shouldn't be a global variable.
var watchCacheSizes map[Resource]int var watchCacheSizes map[Resource]int
func init() { func init() {
watchCacheSizes = make(map[Resource]int) watchCacheSizes = make(map[Resource]int)
watchCacheSizes[CertificateSigningRequests] = 1000 }
watchCacheSizes[ClusterRoles] = 100
watchCacheSizes[ClusterRoleBindings] = 100 func InitializeWatchCacheSizes(expectedRAMCapacityMB int) {
watchCacheSizes[Controllers] = 100 // This is the heuristics that from memory capacity is trying to infer
watchCacheSizes[Daemonsets] = 100 // the maximum number of nodes in the cluster and set cache sizes based
watchCacheSizes[Deployments] = 100 // on that value.
watchCacheSizes[Endpoints] = 1000 // From our documentation, we officially recomment 120GB machines for
watchCacheSizes[HorizontalPodAutoscalers] = 100 // 2000 nodes, and we scale from that point. Thus we assume ~60MB of
watchCacheSizes[Ingress] = 100 // capacity per node.
watchCacheSizes[PetSet] = 100 // TODO: Revisit this heuristics
watchCacheSizes[PodDisruptionBudget] = 100 clusterSize := expectedRAMCapacityMB / 60
watchCacheSizes[Jobs] = 100
watchCacheSizes[LimitRanges] = 100 // We should specify cache size for a given resource only if it
watchCacheSizes[Namespaces] = 100 // is supposed to have non-default value.
watchCacheSizes[NetworkPolicys] = 100 //
watchCacheSizes[Nodes] = 1000 // TODO: Figure out which resource we should have non-default value.
watchCacheSizes[PersistentVolumes] = 100 watchCacheSizes[Endpoints] = maxInt(10*clusterSize, 1000)
watchCacheSizes[PersistentVolumeClaims] = 100 watchCacheSizes[Nodes] = maxInt(3*clusterSize, 1000)
watchCacheSizes[Pods] = 1000 watchCacheSizes[Pods] = maxInt(10*clusterSize, 1000)
watchCacheSizes[PodTemplates] = 100
watchCacheSizes[Replicasets] = 100
watchCacheSizes[ResourceQuotas] = 100
watchCacheSizes[ScheduledJobs] = 100
watchCacheSizes[Roles] = 100
watchCacheSizes[RoleBindings] = 100
watchCacheSizes[Secrets] = 100
watchCacheSizes[ServiceAccounts] = 100
watchCacheSizes[Services] = 100
} }
func SetWatchCacheSizes(cacheSizes []string) { func SetWatchCacheSizes(cacheSizes []string) {
@ -111,5 +109,15 @@ func SetWatchCacheSizes(cacheSizes []string) {
} }
func GetWatchCacheSizeByResource(resource Resource) int { func GetWatchCacheSizeByResource(resource Resource) int {
return watchCacheSizes[resource] if value, found := watchCacheSizes[resource]; found {
return value
}
return defaultWatchCacheSize
}
func maxInt(a, b int) int {
if a > b {
return a
}
return b
} }

View File

@ -18,6 +18,7 @@ package etcd
import ( import (
"k8s.io/kubernetes/pkg/api" "k8s.io/kubernetes/pkg/api"
"k8s.io/kubernetes/pkg/registry/cachesize"
"k8s.io/kubernetes/pkg/registry/configmap" "k8s.io/kubernetes/pkg/registry/configmap"
"k8s.io/kubernetes/pkg/registry/generic" "k8s.io/kubernetes/pkg/registry/generic"
"k8s.io/kubernetes/pkg/registry/generic/registry" "k8s.io/kubernetes/pkg/registry/generic/registry"
@ -36,7 +37,13 @@ func NewREST(opts generic.RESTOptions) *REST {
newListFunc := func() runtime.Object { return &api.ConfigMapList{} } newListFunc := func() runtime.Object { return &api.ConfigMapList{} }
storageInterface := opts.Decorator( storageInterface := opts.Decorator(
opts.Storage, 100, &api.ConfigMap{}, prefix, configmap.Strategy, newListFunc, storage.NoTriggerPublisher) opts.Storage,
cachesize.GetWatchCacheSizeByResource(cachesize.ConfigMaps),
&api.ConfigMap{},
prefix,
configmap.Strategy,
newListFunc,
storage.NoTriggerPublisher)
store := &registry.Store{ store := &registry.Store{
NewFunc: func() runtime.Object { NewFunc: func() runtime.Object {

View File

@ -19,6 +19,7 @@ package etcd
import ( import (
"k8s.io/kubernetes/pkg/api" "k8s.io/kubernetes/pkg/api"
"k8s.io/kubernetes/pkg/apis/extensions" "k8s.io/kubernetes/pkg/apis/extensions"
"k8s.io/kubernetes/pkg/registry/cachesize"
"k8s.io/kubernetes/pkg/registry/generic" "k8s.io/kubernetes/pkg/registry/generic"
"k8s.io/kubernetes/pkg/registry/generic/registry" "k8s.io/kubernetes/pkg/registry/generic/registry"
"k8s.io/kubernetes/pkg/registry/podsecuritypolicy" "k8s.io/kubernetes/pkg/registry/podsecuritypolicy"
@ -38,7 +39,7 @@ func NewREST(opts generic.RESTOptions) *REST {
newListFunc := func() runtime.Object { return &extensions.PodSecurityPolicyList{} } newListFunc := func() runtime.Object { return &extensions.PodSecurityPolicyList{} }
storageInterface := opts.Decorator( storageInterface := opts.Decorator(
opts.Storage, opts.Storage,
100, cachesize.GetWatchCacheSizeByResource(cachesize.PodSecurityPolicies),
&extensions.PodSecurityPolicy{}, &extensions.PodSecurityPolicy{},
prefix, prefix,
podsecuritypolicy.Strategy, podsecuritypolicy.Strategy,

View File

@ -19,6 +19,7 @@ package etcd
import ( import (
"k8s.io/kubernetes/pkg/api" "k8s.io/kubernetes/pkg/api"
"k8s.io/kubernetes/pkg/apis/extensions" "k8s.io/kubernetes/pkg/apis/extensions"
"k8s.io/kubernetes/pkg/registry/cachesize"
"k8s.io/kubernetes/pkg/registry/generic" "k8s.io/kubernetes/pkg/registry/generic"
"k8s.io/kubernetes/pkg/registry/generic/registry" "k8s.io/kubernetes/pkg/registry/generic/registry"
"k8s.io/kubernetes/pkg/registry/storageclass" "k8s.io/kubernetes/pkg/registry/storageclass"
@ -37,7 +38,7 @@ func NewREST(opts generic.RESTOptions) *REST {
newListFunc := func() runtime.Object { return &extensions.StorageClassList{} } newListFunc := func() runtime.Object { return &extensions.StorageClassList{} }
storageInterface := opts.Decorator( storageInterface := opts.Decorator(
opts.Storage, opts.Storage,
100, cachesize.GetWatchCacheSizeByResource(cachesize.StorageClasses),
&extensions.StorageClass{}, &extensions.StorageClass{},
prefix, prefix,
storageclass.Strategy, storageclass.Strategy,