diff --git a/cluster/saltbase/salt/etcd/init.sls b/cluster/saltbase/salt/etcd/init.sls index 3aa10e57ac4..edd9f652f4b 100644 --- a/cluster/saltbase/salt/etcd/init.sls +++ b/cluster/saltbase/salt/etcd/init.sls @@ -67,8 +67,6 @@ touch /var/log/etcd-events.log: server_port: 2380 cpulimit: '"200m"' -# Switch on second etcd instance if there are more than 50 nodes. -{% if pillar['num_nodes'] is defined and pillar['num_nodes'] > 50 -%} /etc/kubernetes/manifests/etcd-events.manifest: file.managed: - source: salt://etcd/etcd.manifest @@ -83,4 +81,3 @@ touch /var/log/etcd-events.log: port: 4002 server_port: 2381 cpulimit: '"100m"' -{% endif -%} diff --git a/cluster/saltbase/salt/kube-apiserver/kube-apiserver.manifest b/cluster/saltbase/salt/kube-apiserver/kube-apiserver.manifest index 0b385e29b3e..2ffd6b271a0 100644 --- a/cluster/saltbase/salt/kube-apiserver/kube-apiserver.manifest +++ b/cluster/saltbase/salt/kube-apiserver/kube-apiserver.manifest @@ -41,11 +41,7 @@ {% endif -%} {% set etcd_servers = "--etcd-servers=http://127.0.0.1:4001" -%} -{% set etcd_servers_overrides = "" -%} -# If there are more than 50 nodes, there is a dedicated etcd instance for events. -{% if pillar['num_nodes'] is defined and pillar['num_nodes'] > 50 -%} - {% set etcd_servers_overrides = "--etcd-servers-overrides=/events#http://127.0.0.1:4002" -%} -{% endif -%} +{% set etcd_servers_overrides = "--etcd-servers-overrides=/events#http://127.0.0.1:4002" -%} {% set service_cluster_ip_range = "" -%} {% if pillar['service_cluster_ip_range'] is defined -%} diff --git a/docs/admin/cluster-large.md b/docs/admin/cluster-large.md index af0b5e97b85..589fabca037 100644 --- a/docs/admin/cluster-large.md +++ b/docs/admin/cluster-large.md @@ -76,8 +76,6 @@ When creating a cluster, existing salt scripts: * start and configure additional etcd instance * configure api-server to use it for storing events -However, this is done automatically only for clusters having more than 50 nodes. - ### Addon Resources To prevent memory leaks or other resource issues in [cluster addons](../../cluster/addons/) from consuming all the resources available on a node, Kubernetes sets resource limits on addon containers to limit the CPU and Memory resources they can consume (See PR [#10653](http://pr.k8s.io/10653/files) and [#10778](http://pr.k8s.io/10778/files)).