mirror of
https://github.com/k3s-io/kubernetes.git
synced 2025-07-24 12:15:52 +00:00
Increase max-requests-inflight in large clusters
This commit is contained in:
parent
d8fa6a99a2
commit
75d7d1ad37
@ -737,6 +737,10 @@ function start-kube-apiserver {
|
||||
params+=" --enable-garbage-collector=${ENABLE_GARBAGE_COLLECTOR}"
|
||||
fi
|
||||
if [[ -n "${NUM_NODES:-}" ]]; then
|
||||
# If the cluster is large, increase max-requests-inflight limit in apiserver.
|
||||
if [[ "${NUM_NODES}" -ge 1000 ]]; then
|
||||
params+=" --max-requests-inflight=1500"
|
||||
fi
|
||||
# Set amount of memory available for apiserver based on number of nodes.
|
||||
# TODO: Once we start setting proper requests and limits for apiserver
|
||||
# we should reuse the same logic here instead of current heuristic.
|
||||
|
@ -566,6 +566,10 @@ start_kube_apiserver() {
|
||||
params="${params} --storage-backend=${STORAGE_BACKEND}"
|
||||
fi
|
||||
if [ -n "${NUM_NODES:-}" ]; then
|
||||
# If the cluster is large, increase max-requests-inflight limit in apiserver.
|
||||
if [[ "${NUM_NODES}" -ge 1000 ]]; then
|
||||
params+=" --max-requests-inflight=1500"
|
||||
fi
|
||||
# Set amount of memory available for apiserver based on number of nodes.
|
||||
# TODO: Once we start setting proper requests and limits for apiserver
|
||||
# we should reuse the same logic here instead of current heuristic.
|
||||
|
@ -61,8 +61,13 @@
|
||||
{% set etcd_servers = "--etcd-servers=http://127.0.0.1:2379" -%}
|
||||
{% set etcd_servers_overrides = "--etcd-servers-overrides=/events#http://127.0.0.1:4002" -%}
|
||||
|
||||
{% set max_requests_inflight = "" -%}
|
||||
{% set target_ram_mb = "" -%}
|
||||
{% if pillar['num_nodes'] is defined -%}
|
||||
# If the cluster is large, increase max-requests-inflight limit in apiserver.
|
||||
{% if pillar['num_nodes']|int >= 1000 -%}
|
||||
{% set max_requests_inflight = "--max-requests-inflight=1500" -%}
|
||||
{% endif -%}
|
||||
# Set amount of memory available for apiserver based on number of nodes.
|
||||
# TODO: Once we start setting proper requests and limits for apiserver
|
||||
# we should reuse the same logic here instead of current heuristic.
|
||||
@ -158,7 +163,7 @@
|
||||
{% set enable_garbage_collector = "--enable-garbage-collector=" + pillar['enable_garbage_collector'] -%}
|
||||
{% endif -%}
|
||||
|
||||
{% set params = address + " " + storage_backend + " " + etcd_servers + " " + etcd_servers_overrides + " " + cloud_provider + " " + cloud_config + " " + runtime_config + " " + feature_gates + " " + admission_control + " " + target_ram_mb + " " + service_cluster_ip_range + " " + client_ca_file + basic_auth_file + " " + min_request_timeout + " " + enable_garbage_collector -%}
|
||||
{% set params = address + " " + storage_backend + " " + etcd_servers + " " + etcd_servers_overrides + " " + cloud_provider + " " + cloud_config + " " + runtime_config + " " + feature_gates + " " + admission_control + " " + max_requests_inflight + " " + target_ram_mb + " " + service_cluster_ip_range + " " + client_ca_file + basic_auth_file + " " + min_request_timeout + " " + enable_garbage_collector -%}
|
||||
{% set params = params + " " + cert_file + " " + key_file + " --secure-port=" + secure_port + token_auth_file + " " + bind_address + " " + log_level + " " + advertise_address + " " + proxy_ssh_options + authz_mode + abac_policy_file + webhook_authentication_config + webhook_authorization_config + image_review_config -%}
|
||||
|
||||
# test_args has to be kept at the end, so they'll overwrite any prior configuration
|
||||
|
@ -39,10 +39,10 @@ cluster/photon-controller/util.sh: node_name=${1}
|
||||
cluster/rackspace/util.sh: local node_ip=$(nova show --minimal ${NODE_NAMES[$i]} \
|
||||
cluster/saltbase/salt/cluster-autoscaler/cluster-autoscaler.manifest:{% set params = pillar['autoscaler_mig_config'] + " " + cloud_config -%}
|
||||
cluster/saltbase/salt/etcd/etcd.manifest: "value": "{{ storage_backend }}"
|
||||
cluster/saltbase/salt/etcd/etcd.manifest:{% set storage_backend = pillar.get('storage_backend', 'etcd2') -%}
|
||||
cluster/saltbase/salt/etcd/etcd.manifest:{% if pillar.get('storage_backend', 'etcd2') == 'etcd3' -%}
|
||||
cluster/saltbase/salt/etcd/etcd.manifest:{% set storage_backend = pillar.get('storage_backend', 'etcd2') -%}
|
||||
cluster/saltbase/salt/kube-admission-controls/init.sls:{% if 'LimitRanger' in pillar.get('admission_control', '') %}
|
||||
cluster/saltbase/salt/kube-apiserver/kube-apiserver.manifest:{% set params = address + " " + storage_backend + " " + etcd_servers + " " + etcd_servers_overrides + " " + cloud_provider + " " + cloud_config + " " + runtime_config + " " + feature_gates + " " + admission_control + " " + target_ram_mb + " " + service_cluster_ip_range + " " + client_ca_file + basic_auth_file + " " + min_request_timeout + " " + enable_garbage_collector -%}
|
||||
cluster/saltbase/salt/kube-apiserver/kube-apiserver.manifest:{% set params = address + " " + storage_backend + " " + etcd_servers + " " + etcd_servers_overrides + " " + cloud_provider + " " + cloud_config + " " + runtime_config + " " + feature_gates + " " + admission_control + " " + max_requests_inflight + " " + target_ram_mb + " " + service_cluster_ip_range + " " + client_ca_file + basic_auth_file + " " + min_request_timeout + " " + enable_garbage_collector -%}
|
||||
cluster/saltbase/salt/kube-controller-manager/kube-controller-manager.manifest:{% if pillar.get('enable_hostpath_provisioner', '').lower() == 'true' -%}
|
||||
cluster/saltbase/salt/kube-controller-manager/kube-controller-manager.manifest:{% set params = "--master=127.0.0.1:8080" + " " + cluster_name + " " + cluster_cidr + " " + allocate_node_cidrs + " " + service_cluster_ip_range + " " + terminated_pod_gc + " " + enable_garbage_collector + " " + cloud_provider + " " + cloud_config + " " + service_account_key + " " + log_level + " " + root_ca_file -%}
|
||||
cluster/saltbase/salt/kube-controller-manager/kube-controller-manager.manifest:{% set params = params + " " + feature_gates -%}
|
||||
|
Loading…
Reference in New Issue
Block a user