From 68a66f454ddac31c99a72d5672f80edba0e182eb Mon Sep 17 00:00:00 2001 From: Matt Matejczyk Date: Wed, 13 Nov 2019 16:19:32 +0100 Subject: [PATCH] Increase cache size for leases The default size (100) is not enough for large clusters and results in unnecessary restarts of kube-controller-manager watcher for leases, see http://perf-dash.k8s.io/#/?jobname=gce-5000Nodes&metriccategoryname=APIServer&metricname=LoadRequestCount&Resource=leases&Scope=cluster&Subresource=&Verb=LIST This PR will make it match what we have for nodes. --- pkg/registry/cachesize/cachesize.go | 1 + 1 file changed, 1 insertion(+) diff --git a/pkg/registry/cachesize/cachesize.go b/pkg/registry/cachesize/cachesize.go index 1b366c7416c..92bcc6a6306 100644 --- a/pkg/registry/cachesize/cachesize.go +++ b/pkg/registry/cachesize/cachesize.go @@ -41,6 +41,7 @@ func NewHeuristicWatchCacheSizes(expectedRAMCapacityMB int) map[schema.GroupReso watchCacheSizes[schema.GroupResource{Resource: "services"}] = maxInt(5*clusterSize, 1000) watchCacheSizes[schema.GroupResource{Resource: "events"}] = 0 watchCacheSizes[schema.GroupResource{Resource: "apiservices", Group: "apiregistration.k8s.io"}] = maxInt(5*clusterSize, 1000) + watchCacheSizes[schema.GroupResource{Resource: "leases", Group: "coordination.k8s.io"}] = maxInt(5*clusterSize, 1000) return watchCacheSizes }