From 679afea360c56b279ad0349f3c7628db58fe791d Mon Sep 17 00:00:00 2001 From: Wojciech Tyczynski Date: Wed, 10 Aug 2016 09:59:55 +0200 Subject: [PATCH] etcd3 support --- cluster/common.sh | 1 + cluster/gce/config-default.sh | 3 +++ cluster/gce/config-test.sh | 3 +++ cluster/gce/configure-vm.sh | 1 + cluster/gce/gci/configure-helper.sh | 3 +++ cluster/gce/trusty/configure-helper.sh | 3 +++ .../saltbase/salt/kube-apiserver/kube-apiserver.manifest | 6 +++++- hack/verify-flags/exceptions.txt | 2 +- 8 files changed, 20 insertions(+), 2 deletions(-) diff --git a/cluster/common.sh b/cluster/common.sh index b74141c2410..561ef82da06 100755 --- a/cluster/common.sh +++ b/cluster/common.sh @@ -644,6 +644,7 @@ ENABLE_MANIFEST_URL: $(yaml-quote ${ENABLE_MANIFEST_URL:-false}) MANIFEST_URL: $(yaml-quote ${MANIFEST_URL:-}) MANIFEST_URL_HEADER: $(yaml-quote ${MANIFEST_URL_HEADER:-}) NUM_NODES: $(yaml-quote ${NUM_NODES}) +STORAGE_BACKEND: $(yaml-quote ${STORAGE_BACKEND:-}) ENABLE_GARBAGE_COLLECTOR: $(yaml-quote ${ENABLE_GARBAGE_COLLECTOR:-false}) EOF if [ -n "${APISERVER_TEST_ARGS:-}" ]; then diff --git a/cluster/gce/config-default.sh b/cluster/gce/config-default.sh index f47375b536c..093515cc167 100755 --- a/cluster/gce/config-default.sh +++ b/cluster/gce/config-default.sh @@ -135,6 +135,9 @@ ADMISSION_CONTROL=NamespaceLifecycle,LimitRanger,ServiceAccount,PersistentVolume # Optional: if set to true kube-up will automatically check for existing resources and clean them up. KUBE_UP_AUTOMATIC_CLEANUP=${KUBE_UP_AUTOMATIC_CLEANUP:-false} +# Storage backend. 'etcd2' supported, 'etcd3' experimental. +STORAGE_BACKEND=${STORAGE_BACKEND:-etcd2} + # Networking plugin specific settings. NETWORK_PROVIDER="${NETWORK_PROVIDER:-kubenet}" # none, opencontrail, flannel, kubenet OPENCONTRAIL_TAG="${OPENCONTRAIL_TAG:-R2.20}" diff --git a/cluster/gce/config-test.sh b/cluster/gce/config-test.sh index f2f8c327fdc..6ef55fc987d 100755 --- a/cluster/gce/config-test.sh +++ b/cluster/gce/config-test.sh @@ -160,6 +160,9 @@ KUBE_UP_AUTOMATIC_CLEANUP=${KUBE_UP_AUTOMATIC_CLEANUP:-false} # is only supported in trusty or GCI. TEST_CLUSTER="${TEST_CLUSTER:-true}" +# Storage backend. 'etcd2' supported, 'etcd3' experimental. +STORAGE_BACKEND=${STORAGE_BACKEND:-etcd2} + # OpenContrail networking plugin specific settings NETWORK_PROVIDER="${NETWORK_PROVIDER:-kubenet}" # none, opencontrail, flannel, kubenet OPENCONTRAIL_TAG="${OPENCONTRAIL_TAG:-R2.20}" diff --git a/cluster/gce/configure-vm.sh b/cluster/gce/configure-vm.sh index ba4ff195000..d7192649e6f 100755 --- a/cluster/gce/configure-vm.sh +++ b/cluster/gce/configure-vm.sh @@ -442,6 +442,7 @@ dns_replicas: '$(echo "$DNS_REPLICAS" | sed -e "s/'/''/g")' dns_server: '$(echo "$DNS_SERVER_IP" | sed -e "s/'/''/g")' dns_domain: '$(echo "$DNS_DOMAIN" | sed -e "s/'/''/g")' admission_control: '$(echo "$ADMISSION_CONTROL" | sed -e "s/'/''/g")' +storage_backend: '$(echo "$STORAGE_BACKEND" | sed -e "s/'/''/g")' network_provider: '$(echo "$NETWORK_PROVIDER" | sed -e "s/'/''/g")' prepull_e2e_images: '$(echo "$PREPULL_E2E_IMAGES" | sed -e "s/'/''/g")' hairpin_mode: '$(echo "$HAIRPIN_MODE" | sed -e "s/'/''/g")' diff --git a/cluster/gce/gci/configure-helper.sh b/cluster/gce/gci/configure-helper.sh index 2652eaf132a..891a34e2e1b 100644 --- a/cluster/gce/gci/configure-helper.sh +++ b/cluster/gce/gci/configure-helper.sh @@ -632,6 +632,9 @@ function start-kube-apiserver { params+=" --tls-cert-file=/etc/srv/kubernetes/server.cert" params+=" --tls-private-key-file=/etc/srv/kubernetes/server.key" params+=" --token-auth-file=/etc/srv/kubernetes/known_tokens.csv" + if [[ -n "${STORAGE_BACKEND:-}" ]]; then + params+=" --storage-backend=${STORAGE_BACKEND}" + fi if [[ -n "${ENABLE_GARBAGE_COLLECTOR:-}" ]]; then params+=" --enable-garbage-collector=${ENABLE_GARBAGE_COLLECTOR}" fi diff --git a/cluster/gce/trusty/configure-helper.sh b/cluster/gce/trusty/configure-helper.sh index 67cf49a8b6a..62a59018097 100644 --- a/cluster/gce/trusty/configure-helper.sh +++ b/cluster/gce/trusty/configure-helper.sh @@ -519,6 +519,9 @@ start_kube_apiserver() { params="${params} --authorization-policy-file=/etc/srv/kubernetes/abac-authz-policy.jsonl" params="${params} --etcd-servers-overrides=/events#http://127.0.0.1:4002" + if [[ -n "${STORAGE_BACKEND:-}" ]]; then + params="${params} --storage-backend=${STORAGE_BACKEND}" + fi if [ -n "${NUM_NODES:-}" ]; then # Set amount of memory available for apiserver based on number of nodes. # TODO: Once we start setting proper requests and limits for apiserver diff --git a/cluster/saltbase/salt/kube-apiserver/kube-apiserver.manifest b/cluster/saltbase/salt/kube-apiserver/kube-apiserver.manifest index a9ded4a88a7..6ff98fce778 100644 --- a/cluster/saltbase/salt/kube-apiserver/kube-apiserver.manifest +++ b/cluster/saltbase/salt/kube-apiserver/kube-apiserver.manifest @@ -54,6 +54,10 @@ {% set bind_address = "--bind-address=" + grains.publicAddressOverride -%} {% endif -%} +{% set storage_backend = "" -%} +{% if pillar['storage_backend'] is defined -%} + {% set storage_backend = "--storage-backend=" + pillar['storage_backend'] -%} +{% endif -%} {% set etcd_servers = "--etcd-servers=http://127.0.0.1:4001" -%} {% set etcd_servers_overrides = "--etcd-servers-overrides=/events#http://127.0.0.1:4002" -%} @@ -136,7 +140,7 @@ {% set enable_garbage_collector = "--enable-garbage-collector=" + pillar['enable_garbage_collector'] -%} {% endif -%} -{% set params = address + " " + etcd_servers + " " + etcd_servers_overrides + " " + cloud_provider + " " + cloud_config + " " + runtime_config + " " + admission_control + " " + target_ram_mb + " " + service_cluster_ip_range + " " + client_ca_file + basic_auth_file + " " + min_request_timeout + " " + enable_garbage_collector -%} +{% set params = address + " " + storage_backend + " " + etcd_servers + " " + etcd_servers_overrides + " " + cloud_provider + " " + cloud_config + " " + runtime_config + " " + admission_control + " " + target_ram_mb + " " + service_cluster_ip_range + " " + client_ca_file + basic_auth_file + " " + min_request_timeout + " " + enable_garbage_collector -%} {% set params = params + " " + cert_file + " " + key_file + " --secure-port=" + secure_port + token_auth_file + " " + bind_address + " " + log_level + " " + advertise_address + " " + proxy_ssh_options + authz_mode + abac_policy_file + webhook_authentication_config + webhook_authorization_config -%} # test_args has to be kept at the end, so they'll overwrite any prior configuration diff --git a/hack/verify-flags/exceptions.txt b/hack/verify-flags/exceptions.txt index 5c3bf6cb782..ab1a6e1de0c 100644 --- a/hack/verify-flags/exceptions.txt +++ b/hack/verify-flags/exceptions.txt @@ -39,7 +39,7 @@ cluster/rackspace/util.sh: local node_ip=$(nova show --minimal ${NODE_NAMES[$ cluster/saltbase/salt/cluster-autoscaler/cluster-autoscaler.manifest:{% set params = pillar['autoscaler_mig_config'] + " " + cloud_config -%} cluster/saltbase/salt/kube-admission-controls/init.sls:{% if 'LimitRanger' in pillar.get('admission_control', '') %} cluster/saltbase/salt/kube-apiserver/kube-apiserver.manifest:{% set enable_garbage_collector = pillar['enable_garbage_collector'] -%} -cluster/saltbase/salt/kube-apiserver/kube-apiserver.manifest:{% set params = address + " " + etcd_servers + " " + etcd_servers_overrides + " " + cloud_provider + " " + cloud_config + " " + runtime_config + " " + admission_control + " " + target_ram_mb + " " + service_cluster_ip_range + " " + client_ca_file + basic_auth_file + " " + min_request_timeout + " " + enable_garbage_collector -%} +cluster/saltbase/salt/kube-apiserver/kube-apiserver.manifest:{% set params = address + " " + storage_backend + " " + etcd_servers + " " + etcd_servers_overrides + " " + cloud_provider + " " + cloud_config + " " + runtime_config + " " + admission_control + " " + target_ram_mb + " " + service_cluster_ip_range + " " + client_ca_file + basic_auth_file + " " + min_request_timeout + " " + enable_garbage_collector -%} cluster/saltbase/salt/kube-controller-manager/kube-controller-manager.manifest:{% set enable_garbage_collector = pillar['enable_garbage_collector'] -%} cluster/saltbase/salt/kube-controller-manager/kube-controller-manager.manifest:{% set params = "--master=127.0.0.1:8080" + " " + cluster_name + " " + cluster_cidr + " " + allocate_node_cidrs + " " + service_cluster_ip_range + " " + terminated_pod_gc + " " + enable_garbage_collector + " " + cloud_provider + " " + cloud_config + " " + service_account_key + " " + log_level + " " + root_ca_file -%} cluster/saltbase/salt/kube-proxy/kube-proxy.manifest: {% set api_servers_with_port = api_servers + ":6443" -%}