diff --git a/cluster/gce/config-default.sh b/cluster/gce/config-default.sh index 6905b62fc4b..e9db921c90b 100755 --- a/cluster/gce/config-default.sh +++ b/cluster/gce/config-default.sh @@ -21,10 +21,10 @@ source "${KUBE_ROOT}/cluster/gce/config-common.sh" # Specifying KUBE_GCE_API_ENDPOINT will override the default GCE Compute API endpoint (https://www.googleapis.com/compute/v1/). # This endpoint has to be pointing to v1 api. For example, https://www.googleapis.com/compute/staging_v1/ -GCE_API_ENDPOINT=${KUBE_GCE_API_ENDPOINT:-} -GCLOUD=gcloud +export GCE_API_ENDPOINT=${KUBE_GCE_API_ENDPOINT:-} +export GCLOUD=gcloud ZONE=${KUBE_GCE_ZONE:-us-central1-b} -REGION=${ZONE%-*} +export REGION=${ZONE%-*} RELEASE_REGION_FALLBACK=${RELEASE_REGION_FALLBACK:-false} REGIONAL_KUBE_ADDONS=${REGIONAL_KUBE_ADDONS:-true} NODE_SIZE=${NODE_SIZE:-n1-standard-2} @@ -32,7 +32,7 @@ NUM_NODES=${NUM_NODES:-3} NUM_WINDOWS_NODES=${NUM_WINDOWS_NODES:-0} MASTER_SIZE=${MASTER_SIZE:-n1-standard-$(get-master-size)} MASTER_MIN_CPU_ARCHITECTURE=${MASTER_MIN_CPU_ARCHITECTURE:-} # To allow choosing better architectures. -MASTER_DISK_TYPE=pd-ssd +export MASTER_DISK_TYPE=pd-ssd MASTER_DISK_SIZE=${MASTER_DISK_SIZE:-$(get-master-disk-size)} MASTER_ROOT_DISK_SIZE=${MASTER_ROOT_DISK_SIZE:-$(get-master-root-disk-size)} NODE_DISK_TYPE=${NODE_DISK_TYPE:-pd-standard} @@ -53,7 +53,7 @@ NODE_LOCAL_SSDS_EXT=${NODE_LOCAL_SSDS_EXT:-} # Accelerators to be attached to each node. Format "type=,count=" # More information on available GPUs here - https://cloud.google.com/compute/docs/gpus/ NODE_ACCELERATORS=${NODE_ACCELERATORS:-""} -REGISTER_MASTER_KUBELET=${REGISTER_MASTER:-true} +export REGISTER_MASTER_KUBELET=${REGISTER_MASTER:-true} PREEMPTIBLE_NODE=${PREEMPTIBLE_NODE:-false} PREEMPTIBLE_MASTER=${PREEMPTIBLE_MASTER:-false} KUBE_DELETE_NODES=${KUBE_DELETE_NODES:-true} @@ -84,34 +84,34 @@ fi # Also please update corresponding image for node e2e at: # https://github.com/kubernetes/kubernetes/blob/master/test/e2e_node/jenkins/image-config.yaml GCI_VERSION=${KUBE_GCI_VERSION:-cos-81-12871-59-0} -MASTER_IMAGE=${KUBE_GCE_MASTER_IMAGE:-} -MASTER_IMAGE_PROJECT=${KUBE_GCE_MASTER_PROJECT:-cos-cloud} -NODE_IMAGE=${KUBE_GCE_NODE_IMAGE:-${GCI_VERSION}} -NODE_IMAGE_PROJECT=${KUBE_GCE_NODE_PROJECT:-cos-cloud} -NODE_SERVICE_ACCOUNT=${KUBE_GCE_NODE_SERVICE_ACCOUNT:-default} +export MASTER_IMAGE=${KUBE_GCE_MASTER_IMAGE:-} +export MASTER_IMAGE_PROJECT=${KUBE_GCE_MASTER_PROJECT:-cos-cloud} +export NODE_IMAGE=${KUBE_GCE_NODE_IMAGE:-${GCI_VERSION}} +export NODE_IMAGE_PROJECT=${KUBE_GCE_NODE_PROJECT:-cos-cloud} +export NODE_SERVICE_ACCOUNT=${KUBE_GCE_NODE_SERVICE_ACCOUNT:-default} # KUBELET_TEST_ARGS are extra arguments passed to kubelet. -KUBELET_TEST_ARGS=${KUBE_KUBELET_EXTRA_ARGS:-} +export KUBELET_TEST_ARGS=${KUBE_KUBELET_EXTRA_ARGS:-} CONTAINER_RUNTIME=${KUBE_CONTAINER_RUNTIME:-docker} -CONTAINER_RUNTIME_ENDPOINT=${KUBE_CONTAINER_RUNTIME_ENDPOINT:-} +export CONTAINER_RUNTIME_ENDPOINT=${KUBE_CONTAINER_RUNTIME_ENDPOINT:-} CONTAINER_RUNTIME_NAME=${KUBE_CONTAINER_RUNTIME_NAME:-} LOAD_IMAGE_COMMAND=${KUBE_LOAD_IMAGE_COMMAND:-} if [[ "${CONTAINER_RUNTIME}" == "containerd" ]]; then - CONTAINER_RUNTIME_NAME=${KUBE_CONTAINER_RUNTIME_NAME:-containerd} - LOAD_IMAGE_COMMAND=${KUBE_LOAD_IMAGE_COMMAND:-ctr -n=k8s.io images import} +export CONTAINER_RUNTIME_NAME=${KUBE_CONTAINER_RUNTIME_NAME:-containerd} +export LOAD_IMAGE_COMMAND=${KUBE_LOAD_IMAGE_COMMAND:-ctr -n=k8s.io images import} fi # Ability to inject custom versions (Ubuntu OS images ONLY) # if KUBE_UBUNTU_INSTALL_CONTAINERD_VERSION or KUBE_UBUNTU_INSTALL_RUNC_VERSION # is set to empty then we do not override the version(s) and just # use whatever is in the default installation of containerd package -UBUNTU_INSTALL_CONTAINERD_VERSION=${KUBE_UBUNTU_INSTALL_CONTAINERD_VERSION:-} -UBUNTU_INSTALL_RUNC_VERSION=${KUBE_UBUNTU_INSTALL_RUNC_VERSION:-} +export UBUNTU_INSTALL_CONTAINERD_VERSION=${KUBE_UBUNTU_INSTALL_CONTAINERD_VERSION:-} +export UBUNTU_INSTALL_RUNC_VERSION=${KUBE_UBUNTU_INSTALL_RUNC_VERSION:-} # MASTER_EXTRA_METADATA is the extra instance metadata on master instance separated by commas. -MASTER_EXTRA_METADATA=${KUBE_MASTER_EXTRA_METADATA:-${KUBE_EXTRA_METADATA:-}} +export MASTER_EXTRA_METADATA=${KUBE_MASTER_EXTRA_METADATA:-${KUBE_EXTRA_METADATA:-}} # MASTER_EXTRA_METADATA is the extra instance metadata on node instance separated by commas. -NODE_EXTRA_METADATA=${KUBE_NODE_EXTRA_METADATA:-${KUBE_EXTRA_METADATA:-}} +export NODE_EXTRA_METADATA=${KUBE_NODE_EXTRA_METADATA:-${KUBE_EXTRA_METADATA:-}} NETWORK=${KUBE_GCE_NETWORK:-default} # Enable network deletion by default (for kube-down), unless we're using 'default' network. @@ -126,16 +126,17 @@ fi INSTANCE_PREFIX="${KUBE_GCE_INSTANCE_PREFIX:-kubernetes}" CLUSTER_NAME="${CLUSTER_NAME:-${INSTANCE_PREFIX}}" MASTER_NAME="${INSTANCE_PREFIX}-master" -AGGREGATOR_MASTER_NAME="${INSTANCE_PREFIX}-aggregator" -INITIAL_ETCD_CLUSTER="${MASTER_NAME}" -MASTER_TAG="${INSTANCE_PREFIX}-master" -NODE_TAG="${INSTANCE_PREFIX}-minion" +export AGGREGATOR_MASTER_NAME="${INSTANCE_PREFIX}-aggregator" +export INITIAL_ETCD_CLUSTER="${MASTER_NAME}" +export MASTER_TAG="${INSTANCE_PREFIX}-master" +export NODE_TAG="${INSTANCE_PREFIX}-minion" CLUSTER_IP_RANGE="${CLUSTER_IP_RANGE:-$(get-cluster-ip-range)}" MASTER_IP_RANGE="${MASTER_IP_RANGE:-10.246.0.0/24}" # NODE_IP_RANGE is used when ENABLE_IP_ALIASES=true or CREATE_CUSTOM_NETWORK=true. # It is the primary range in the subnet and is the range used for node instance IPs. NODE_IP_RANGE="$(get-node-ip-range)" +export NODE_IP_RANGE # NOTE: Avoid giving nodes empty scopes, because kubelet needs a service account # in order to initialize properly. @@ -147,27 +148,27 @@ EXTRA_DOCKER_OPTS="${EXTRA_DOCKER_OPTS:-}" VOLUME_PLUGIN_DIR="${VOLUME_PLUGIN_DIR:-/home/kubernetes/flexvolume}" SERVICE_CLUSTER_IP_RANGE="${SERVICE_CLUSTER_IP_RANGE:-10.0.0.0/16}" # formerly PORTAL_NET -ALLOCATE_NODE_CIDRS=true +export ALLOCATE_NODE_CIDRS=true # When set to true, Docker Cache is enabled by default as part of the cluster bring up. -ENABLE_DOCKER_REGISTRY_CACHE=true +export ENABLE_DOCKER_REGISTRY_CACHE=true # Optional: Deploy a L7 loadbalancer controller to fulfill Ingress requests: # glbc - CE L7 Load Balancer Controller -ENABLE_L7_LOADBALANCING="${KUBE_ENABLE_L7_LOADBALANCING:-glbc}" +export ENABLE_L7_LOADBALANCING="${KUBE_ENABLE_L7_LOADBALANCING:-glbc}" # Optional: Enable Metrics Server. Metrics Server should be enable everywhere, # since it's a critical component, but in the first release we need a way to disable # this in case of stability issues. # TODO(piosz) remove this option once Metrics Server became a stable thing. -ENABLE_METRICS_SERVER="${KUBE_ENABLE_METRICS_SERVER:-true}" +export ENABLE_METRICS_SERVER="${KUBE_ENABLE_METRICS_SERVER:-true}" # Optional: Metadata agent to setup as part of the cluster bring up: # none - No metadata agent # stackdriver - Stackdriver metadata agent # Metadata agent is a daemon set that provides metadata of kubernetes objects # running on the same node for exporting metrics and logs. -ENABLE_METADATA_AGENT="${KUBE_ENABLE_METADATA_AGENT:-none}" +export ENABLE_METADATA_AGENT="${KUBE_ENABLE_METADATA_AGENT:-none}" # One special node out of NUM_NODES would be created of this type if specified. # Useful for scheduling heapster in large clusters with nodes of small size. @@ -180,7 +181,7 @@ HEAPSTER_MACHINE_TYPE="${HEAPSTER_MACHINE_TYPE:-}" NUM_ADDITIONAL_NODES="${NUM_ADDITIONAL_NODES:-}" ADDITIONAL_MACHINE_TYPE="${ADDITIONAL_MACHINE_TYPE:-}" -MASTER_NODE_LABELS="${KUBE_MASTER_NODE_LABELS:-}" +export MASTER_NODE_LABELS="${KUBE_MASTER_NODE_LABELS:-}" # NON_MASTER_NODE_LABELS are labels will only be applied on non-master nodes. NON_MASTER_NODE_LABELS="${KUBE_NON_MASTER_NODE_LABELS:-}" WINDOWS_NON_MASTER_NODE_LABELS="${WINDOWS_NON_MASTER_NODE_LABELS:-}" @@ -202,9 +203,9 @@ fi # Optional: Enable netd. ENABLE_NETD="${KUBE_ENABLE_NETD:-false}" -CUSTOM_NETD_YAML="${KUBE_CUSTOM_NETD_YAML:-}" -CUSTOM_CALICO_NODE_DAEMONSET_YAML="${KUBE_CUSTOM_CALICO_NODE_DAEMONSET_YAML:-}" -CUSTOM_TYPHA_DEPLOYMENT_YAML="${KUBE_CUSTOM_TYPHA_DEPLOYMENT_YAML:-}" +export CUSTOM_NETD_YAML="${KUBE_CUSTOM_NETD_YAML:-}" +export CUSTOM_CALICO_NODE_DAEMONSET_YAML="${KUBE_CUSTOM_CALICO_NODE_DAEMONSET_YAML:-}" +export CUSTOM_TYPHA_DEPLOYMENT_YAML="${KUBE_CUSTOM_TYPHA_DEPLOYMENT_YAML:-}" # To avoid running netd on a node that is not configured appropriately, # label each Node so that the DaemonSet can run the Pods only on ready Nodes. @@ -213,8 +214,8 @@ if [[ ${ENABLE_NETD:-} == "true" ]]; then NON_MASTER_NODE_LABELS="${NON_MASTER_NODE_LABELS:+${NON_MASTER_NODE_LABELS},}cloud.google.com/gke-netd-ready=true" fi -ENABLE_NODELOCAL_DNS="${KUBE_ENABLE_NODELOCAL_DNS:-false}" -LOCAL_DNS_IP="${KUBE_LOCAL_DNS_IP:-169.254.20.10}" +export ENABLE_NODELOCAL_DNS="${KUBE_ENABLE_NODELOCAL_DNS:-false}" +export LOCAL_DNS_IP="${KUBE_LOCAL_DNS_IP:-169.254.20.10}" # Enable metadata concealment by firewalling pod traffic to the metadata server # and run a proxy daemonset on nodes. @@ -230,12 +231,12 @@ if [[ ${ENABLE_METADATA_CONCEALMENT:-} == "true" ]]; then fi # Optional: Enable node logging. -ENABLE_NODE_LOGGING="${KUBE_ENABLE_NODE_LOGGING:-true}" -LOGGING_DESTINATION="${KUBE_LOGGING_DESTINATION:-gcp}" # options: elasticsearch, gcp +export ENABLE_NODE_LOGGING="${KUBE_ENABLE_NODE_LOGGING:-true}" +export LOGGING_DESTINATION="${KUBE_LOGGING_DESTINATION:-gcp}" # options: elasticsearch, gcp # Optional: When set to true, Elasticsearch and Kibana will be setup as part of the cluster bring up. -ENABLE_CLUSTER_LOGGING="${KUBE_ENABLE_CLUSTER_LOGGING:-true}" -ELASTICSEARCH_LOGGING_REPLICAS=1 +export ENABLE_CLUSTER_LOGGING="${KUBE_ENABLE_CLUSTER_LOGGING:-true}" +export ELASTICSEARCH_LOGGING_REPLICAS=1 # Optional: Don't require https for registries in our local RFC1918 network if [[ ${KUBE_ENABLE_INSECURE_REGISTRY:-false} == "true" ]]; then @@ -246,7 +247,7 @@ fi RUNTIME_CONFIG="${KUBE_RUNTIME_CONFIG:-}" if [[ "${KUBE_FEATURE_GATES:-}" == "AllAlpha=true" ]]; then - RUNTIME_CONFIG="${KUBE_RUNTIME_CONFIG:-api/all=true}" + export RUNTIME_CONFIG="${KUBE_RUNTIME_CONFIG:-api/all=true}" fi # If feature gates includes AllAlpha or EndpointSlice, and EndpointSlice has not been disabled, add EndpointSlice controller to list of controllers to run. @@ -257,7 +258,7 @@ fi # Optional: set feature gates FEATURE_GATES="${KUBE_FEATURE_GATES:-}" -if [[ ! -z "${NODE_ACCELERATORS}" ]]; then +if [[ -n "${NODE_ACCELERATORS}" ]]; then if [[ -z "${FEATURE_GATES:-}" ]]; then FEATURE_GATES="DevicePlugins=true" else @@ -271,16 +272,16 @@ fi # Optional: Install cluster DNS. # Set CLUSTER_DNS_CORE_DNS to 'false' to install kube-dns instead of CoreDNS. CLUSTER_DNS_CORE_DNS="${CLUSTER_DNS_CORE_DNS:-true}" -ENABLE_CLUSTER_DNS="${KUBE_ENABLE_CLUSTER_DNS:-true}" -DNS_SERVER_IP="${KUBE_DNS_SERVER_IP:-10.0.0.10}" -DNS_DOMAIN="${KUBE_DNS_DOMAIN:-cluster.local}" -DNS_MEMORY_LIMIT="${KUBE_DNS_MEMORY_LIMIT:-170Mi}" +export ENABLE_CLUSTER_DNS="${KUBE_ENABLE_CLUSTER_DNS:-true}" +export DNS_SERVER_IP="${KUBE_DNS_SERVER_IP:-10.0.0.10}" +export DNS_DOMAIN="${KUBE_DNS_DOMAIN:-cluster.local}" +export DNS_MEMORY_LIMIT="${KUBE_DNS_MEMORY_LIMIT:-170Mi}" # Optional: Enable DNS horizontal autoscaler -ENABLE_DNS_HORIZONTAL_AUTOSCALER="${KUBE_ENABLE_DNS_HORIZONTAL_AUTOSCALER:-true}" +export ENABLE_DNS_HORIZONTAL_AUTOSCALER="${KUBE_ENABLE_DNS_HORIZONTAL_AUTOSCALER:-true}" # Optional: Install Kubernetes UI -ENABLE_CLUSTER_UI="${KUBE_ENABLE_CLUSTER_UI:-true}" +export ENABLE_CLUSTER_UI="${KUBE_ENABLE_CLUSTER_UI:-true}" # Optional: Install node problem detector. # none - Not run node problem detector. @@ -290,7 +291,7 @@ if [[ "${NODE_OS_DISTRIBUTION}" == "gci" ]]; then # Enable standalone mode by default for gci. ENABLE_NODE_PROBLEM_DETECTOR="${KUBE_ENABLE_NODE_PROBLEM_DETECTOR:-standalone}" else - ENABLE_NODE_PROBLEM_DETECTOR="${KUBE_ENABLE_NODE_PROBLEM_DETECTOR:-daemonset}" + export ENABLE_NODE_PROBLEM_DETECTOR="${KUBE_ENABLE_NODE_PROBLEM_DETECTOR:-daemonset}" fi NODE_PROBLEM_DETECTOR_VERSION="${NODE_PROBLEM_DETECTOR_VERSION:-}" NODE_PROBLEM_DETECTOR_TAR_HASH="${NODE_PROBLEM_DETECTOR_TAR_HASH:-}" @@ -304,10 +305,10 @@ CNI_STORAGE_URL_BASE="${CNI_STORAGE_URL_BASE:-https://storage.googleapis.com/k8s # Optional: Create autoscaler for cluster's nodes. ENABLE_CLUSTER_AUTOSCALER="${KUBE_ENABLE_CLUSTER_AUTOSCALER:-false}" if [[ "${ENABLE_CLUSTER_AUTOSCALER}" == "true" ]]; then - AUTOSCALER_MIN_NODES="${KUBE_AUTOSCALER_MIN_NODES:-}" - AUTOSCALER_MAX_NODES="${KUBE_AUTOSCALER_MAX_NODES:-}" - AUTOSCALER_ENABLE_SCALE_DOWN="${KUBE_AUTOSCALER_ENABLE_SCALE_DOWN:-true}" - AUTOSCALER_EXPANDER_CONFIG="${KUBE_AUTOSCALER_EXPANDER_CONFIG:---expander=price}" + export AUTOSCALER_MIN_NODES="${KUBE_AUTOSCALER_MIN_NODES:-}" + export AUTOSCALER_MAX_NODES="${KUBE_AUTOSCALER_MAX_NODES:-}" + export AUTOSCALER_ENABLE_SCALE_DOWN="${KUBE_AUTOSCALER_ENABLE_SCALE_DOWN:-true}" + export AUTOSCALER_EXPANDER_CONFIG="${KUBE_AUTOSCALER_EXPANDER_CONFIG:---expander=price}" fi # Optional: Enable allocation of pod IPs using IP aliases. @@ -319,20 +320,19 @@ fi # new subnetwork will be created for the cluster. ENABLE_IP_ALIASES=${KUBE_GCE_ENABLE_IP_ALIASES:-false} NODE_IPAM_MODE=${KUBE_GCE_NODE_IPAM_MODE:-RangeAllocator} -if [ ${ENABLE_IP_ALIASES} = true ]; then +if [ "${ENABLE_IP_ALIASES}" = true ]; then # Number of Pods that can run on this node. MAX_PODS_PER_NODE=${MAX_PODS_PER_NODE:-110} # Size of ranges allocated to each node. - IP_ALIAS_SIZE="/$(get-alias-range-size ${MAX_PODS_PER_NODE})" + IP_ALIAS_SIZE="/$(get-alias-range-size "${MAX_PODS_PER_NODE}")" + export IP_ALIAS_SIZE IP_ALIAS_SUBNETWORK=${KUBE_GCE_IP_ALIAS_SUBNETWORK:-${INSTANCE_PREFIX}-subnet-default} # If we're using custom network, use the subnet we already create for it as the one for ip-alias. # Note that this means SUBNETWORK would override KUBE_GCE_IP_ALIAS_SUBNETWORK in case of custom network. if [[ "${CREATE_CUSTOM_NETWORK}" == true ]]; then - IP_ALIAS_SUBNETWORK="${SUBNETWORK}" + export IP_ALIAS_SUBNETWORK="${SUBNETWORK}" fi - # Reserve the services IP space to avoid being allocated for other GCP resources. - SERVICE_CLUSTER_IP_SUBNETWORK=${KUBE_GCE_SERVICE_CLUSTER_IP_SUBNETWORK:-${INSTANCE_PREFIX}-subnet-services} - NODE_IPAM_MODE=${KUBE_GCE_NODE_IPAM_MODE:-CloudAllocator} + export NODE_IPAM_MODE=${KUBE_GCE_NODE_IPAM_MODE:-CloudAllocator} SECONDARY_RANGE_NAME=${SECONDARY_RANGE_NAME:-} # Add to the provider custom variables. PROVIDER_VARS="${PROVIDER_VARS:-} ENABLE_IP_ALIASES" @@ -340,7 +340,7 @@ if [ ${ENABLE_IP_ALIASES} = true ]; then PROVIDER_VARS="${PROVIDER_VARS:-} SECONDARY_RANGE_NAME" elif [[ -n "${MAX_PODS_PER_NODE:-}" ]]; then # Should not have MAX_PODS_PER_NODE set for route-based clusters. - echo -e "${color_red}Cannot set MAX_PODS_PER_NODE for route-based projects for ${PROJECT}." >&2 + echo -e "${color_red:-}Cannot set MAX_PODS_PER_NODE for route-based projects for ${PROJECT}." >&2 exit 1 fi @@ -387,12 +387,12 @@ NETWORK_PROVIDER="${NETWORK_PROVIDER:-kubenet}" # none, kubenet # Network Policy plugin specific settings. NETWORK_POLICY_PROVIDER="${NETWORK_POLICY_PROVIDER:-none}" # calico -NON_MASQUERADE_CIDR="0.0.0.0/0" +export NON_MASQUERADE_CIDR="0.0.0.0/0" # How should the kubelet configure hairpin mode? HAIRPIN_MODE="${HAIRPIN_MODE:-hairpin-veth}" # promiscuous-bridge, hairpin-veth, none # Optional: if set to true, kube-up will configure the cluster to run e2e tests. -E2E_STORAGE_TEST_ENVIRONMENT="${KUBE_E2E_STORAGE_TEST_ENVIRONMENT:-false}" +export E2E_STORAGE_TEST_ENVIRONMENT="${KUBE_E2E_STORAGE_TEST_ENVIRONMENT:-false}" # Evict pods whenever compute resource availability on the nodes gets below a threshold. EVICTION_HARD="${EVICTION_HARD:-memory.available<250Mi,nodefs.available<10%,nodefs.inodesFree<5%}" @@ -433,7 +433,7 @@ fi # Fluentd requirements # YAML exists to trigger a configuration refresh when changes are made. -FLUENTD_GCP_YAML_VERSION="v3.2.0" +export FLUENTD_GCP_YAML_VERSION="v3.2.0" FLUENTD_GCP_VERSION="${FLUENTD_GCP_VERSION:-1.6.17}" FLUENTD_GCP_MEMORY_LIMIT="${FLUENTD_GCP_MEMORY_LIMIT:-}" FLUENTD_GCP_CPU_REQUEST="${FLUENTD_GCP_CPU_REQUEST:-}" @@ -476,7 +476,7 @@ ROTATE_CERTIFICATES="${ROTATE_CERTIFICATES:-}" # into kube-controller-manager via `--concurrent-service-syncs` CONCURRENT_SERVICE_SYNCS="${CONCURRENT_SERVICE_SYNCS:-}" -SERVICEACCOUNT_ISSUER="https://kubernetes.io/${CLUSTER_NAME}" +export SERVICEACCOUNT_ISSUER="https://kubernetes.io/${CLUSTER_NAME}" # Optional: Enable Node termination Handler for Preemptible and GPU VMs. # https://github.com/GoogleCloudPlatform/k8s-node-termination-handler @@ -491,9 +491,9 @@ fi WINDOWS_NODE_TAINTS="${WINDOWS_NODE_TAINTS:-node.kubernetes.io/os=win1809:NoSchedule}" # Whether to set up a private GCE cluster, i.e. a cluster where nodes have only private IPs. -GCE_PRIVATE_CLUSTER="${KUBE_GCE_PRIVATE_CLUSTER:-false}" -GCE_PRIVATE_CLUSTER_PORTS_PER_VM="${KUBE_GCE_PRIVATE_CLUSTER_PORTS_PER_VM:-}" +export GCE_PRIVATE_CLUSTER="${KUBE_GCE_PRIVATE_CLUSTER:-false}" +export GCE_PRIVATE_CLUSTER_PORTS_PER_VM="${KUBE_GCE_PRIVATE_CLUSTER_PORTS_PER_VM:-}" # Optional: Create apiserver konnectivity server and agent. -ENABLE_EGRESS_VIA_KONNECTIVITY_SERVICE="${KUBE_ENABLE_EGRESS_VIA_KONNECTIVITY_SERVICE:-false}" -KONNECTIVITY_SERVICE_PROXY_PROTOCOL_MODE="${KUBE_KONNECTIVITY_SERVICE_PROXY_PROTOCOL_MODE:-grpc}" +export ENABLE_EGRESS_VIA_KONNECTIVITY_SERVICE="${KUBE_ENABLE_EGRESS_VIA_KONNECTIVITY_SERVICE:-false}" +export KONNECTIVITY_SERVICE_PROXY_PROTOCOL_MODE="${KUBE_KONNECTIVITY_SERVICE_PROXY_PROTOCOL_MODE:-grpc}" diff --git a/hack/.shellcheck_failures b/hack/.shellcheck_failures index 4607116036b..bb56a7a4bc7 100644 --- a/hack/.shellcheck_failures +++ b/hack/.shellcheck_failures @@ -1,4 +1,3 @@ -./cluster/gce/config-default.sh ./cluster/gce/gci/configure-helper.sh ./cluster/gce/gci/configure.sh ./cluster/gce/gci/master-helper.sh