mirror of
https://github.com/k3s-io/kubernetes.git
synced 2025-07-27 13:37:30 +00:00
Increase cache size for leases
The default size (100) is not enough for large clusters and results in unnecessary restarts of kube-controller-manager watcher for leases, see http://perf-dash.k8s.io/#/?jobname=gce-5000Nodes&metriccategoryname=APIServer&metricname=LoadRequestCount&Resource=leases&Scope=cluster&Subresource=&Verb=LIST This PR will make it match what we have for nodes.
This commit is contained in:
parent
be1658e08c
commit
68a66f454d
@ -41,6 +41,7 @@ func NewHeuristicWatchCacheSizes(expectedRAMCapacityMB int) map[schema.GroupReso
|
|||||||
watchCacheSizes[schema.GroupResource{Resource: "services"}] = maxInt(5*clusterSize, 1000)
|
watchCacheSizes[schema.GroupResource{Resource: "services"}] = maxInt(5*clusterSize, 1000)
|
||||||
watchCacheSizes[schema.GroupResource{Resource: "events"}] = 0
|
watchCacheSizes[schema.GroupResource{Resource: "events"}] = 0
|
||||||
watchCacheSizes[schema.GroupResource{Resource: "apiservices", Group: "apiregistration.k8s.io"}] = maxInt(5*clusterSize, 1000)
|
watchCacheSizes[schema.GroupResource{Resource: "apiservices", Group: "apiregistration.k8s.io"}] = maxInt(5*clusterSize, 1000)
|
||||||
|
watchCacheSizes[schema.GroupResource{Resource: "leases", Group: "coordination.k8s.io"}] = maxInt(5*clusterSize, 1000)
|
||||||
return watchCacheSizes
|
return watchCacheSizes
|
||||||
}
|
}
|
||||||
|
|
||||||
|
Loading…
Reference in New Issue
Block a user