diff --git a/cluster/aws/config-default.sh b/cluster/aws/config-default.sh index 016f6dcf0fa..4e0b08a65fd 100644 --- a/cluster/aws/config-default.sh +++ b/cluster/aws/config-default.sh @@ -74,7 +74,8 @@ MASTER_TAG="${INSTANCE_PREFIX}-master" NODE_TAG="${INSTANCE_PREFIX}-minion" NODE_SCOPES="" POLL_SLEEP_INTERVAL=3 -SERVICE_CLUSTER_IP_RANGE="10.0.0.0/16" # formerly PORTAL_NET +NON_MASQUERADE_CIDR="${NON_MASQUERADE_CIDR:-10.0.0.0/8}" # Traffic to IPs outside this range will use IP masquerade +SERVICE_CLUSTER_IP_RANGE="${SERVICE_CLUSTER_IP_RANGE:-10.0.0.0/16}" # formerly PORTAL_NET CLUSTER_IP_RANGE="${CLUSTER_IP_RANGE:-10.244.0.0/16}" MASTER_IP_RANGE="${MASTER_IP_RANGE:-10.246.0.0/24}" # If set to Elastic IP, master instance will be associated with this IP. @@ -104,12 +105,12 @@ ELASTICSEARCH_LOGGING_REPLICAS=1 # Optional: Don't require https for registries in our local RFC1918 network if [[ ${KUBE_ENABLE_INSECURE_REGISTRY:-false} == "true" ]]; then - EXTRA_DOCKER_OPTS="--insecure-registry 10.0.0.0/8" + EXTRA_DOCKER_OPTS="--insecure-registry ${NON_MASQUERADE_CIDR}" fi # Optional: Install cluster DNS. ENABLE_CLUSTER_DNS="${KUBE_ENABLE_CLUSTER_DNS:-true}" -DNS_SERVER_IP="10.0.0.10" +DNS_SERVER_IP="${DNS_SERVER_IP:-10.0.0.10}" DNS_DOMAIN="cluster.local" DNS_REPLICAS=1 diff --git a/cluster/aws/config-test.sh b/cluster/aws/config-test.sh index 2162d58f4f7..ec67197b21b 100755 --- a/cluster/aws/config-test.sh +++ b/cluster/aws/config-test.sh @@ -72,7 +72,8 @@ MASTER_TAG="${INSTANCE_PREFIX}-master" NODE_TAG="${INSTANCE_PREFIX}-minion" NODE_SCOPES="" POLL_SLEEP_INTERVAL=3 -SERVICE_CLUSTER_IP_RANGE="10.0.0.0/16" # formerly PORTAL_NET +NON_MASQUERADE_CIDR="${NON_MASQUERADE_CIDR:-10.0.0.0/8}" # Traffic to IPs outside this range will use IP masquerade +SERVICE_CLUSTER_IP_RANGE="${SERVICE_CLUSTER_IP_RANGE:-10.0.0.0/16}" # formerly PORTAL_NET CLUSTER_IP_RANGE="${CLUSTER_IP_RANGE:-10.245.0.0/16}" MASTER_IP_RANGE="${MASTER_IP_RANGE:-10.246.0.0/24}" # If set to Elastic IP, master instance will be associated with this IP. @@ -100,12 +101,12 @@ ELASTICSEARCH_LOGGING_REPLICAS=1 # Optional: Don't require https for registries in our local RFC1918 network if [[ ${KUBE_ENABLE_INSECURE_REGISTRY:-false} == "true" ]]; then - EXTRA_DOCKER_OPTS="--insecure-registry 10.0.0.0/8" + EXTRA_DOCKER_OPTS="--insecure-registry ${NON_MASQUERADE_CIDR}" fi # Optional: Install cluster DNS. ENABLE_CLUSTER_DNS="${KUBE_ENABLE_CLUSTER_DNS:-true}" -DNS_SERVER_IP="10.0.0.10" +DNS_SERVER_IP="${DNS_SERVER_IP:-10.0.0.10}" DNS_DOMAIN="cluster.local" DNS_REPLICAS=1 diff --git a/cluster/aws/options.md b/cluster/aws/options.md index c719c4b415d..358c9e30caf 100644 --- a/cluster/aws/options.md +++ b/cluster/aws/options.md @@ -88,4 +88,47 @@ Defaults to vivid (Ubuntu Vivid Vervet), which has a modern kernel and does not Other options may require reboots, updates or configuration, and should be used only if you have a compelling requirement to do so. +**NON_MASQUERADE_CIDR** + +The 'internal' IP range which Kuberenetes will use, which will therefore not +use IP masquerade. By default kubernetes runs an internal network for traffic +between pods (and between pods and services), and by default this uses the +`10.0.0.0/8` range. However, this sometimes overlaps with a range that you may +want to use; in particular the range cannot be used with EC2 ClassicLink. You +may also want to run kubernetes in an existing VPC where you have chosen a CIDR +in the `10.0.0.0/8` range. + +Setting this flag allows you to change this internal network CIDR. Note that +you must set other values consistently within the CIDR that you choose. + +For example, you might choose `172.16.0.0/14`; and you could then choose to +configure like this: + +``` +export NON_MASQUERADE_CIDR="172.16.0.0/14" +export SERVICE_CLUSTER_IP_RANGE="172.16.0.0/16" +export DNS_SERVER_IP="172.16.0.10" +export MASTER_IP_RANGE="172.17.0.0/24" +export CLUSTER_IP_RANGE="172.18.0.0/16" +``` + +When choosing a CIDR in the 172.20/12 reserved range you should be careful not +to choose a CIDR that overlaps your VPC CIDR (the kube-up script sets the VPC +CIDR to 172.20.0.0/16 by default, so you should not overlap that). If you want +to allow inter-VPC traffic you should be careful to avoid your other VPCs as +well. + +There is also a 100.64/10 address block which is reserved for "Carrier Grade +NAT", and which some users have reported success using. While we haven't seen +any problems, or conflicts with any AWS networks, we can't guarantee it. If you +decide you are comfortable using 100.64, you might use: + +``` +export NON_MASQUERADE_CIDR="100.64.0.0/10" +export SERVICE_CLUSTER_IP_RANGE="100.64.0.0/16" +export DNS_SERVER_IP="100.64.0.10" +export MASTER_IP_RANGE="100.65.0.0/24" +export CLUSTER_IP_RANGE="100.66.0.0/16" +``` + [![Analytics](https://kubernetes-site.appspot.com/UA-36037335-10/GitHub/cluster/aws/options.md?pixel)]() diff --git a/cluster/aws/templates/salt-master.sh b/cluster/aws/templates/salt-master.sh index 60eb94cdf3e..8d975e52c30 100755 --- a/cluster/aws/templates/salt-master.sh +++ b/cluster/aws/templates/salt-master.sh @@ -44,6 +44,7 @@ env_to_salt docker_root env_to_salt kubelet_root env_to_salt master_extra_sans env_to_salt runtime_config +env_to_salt non_masquerade_cidr # Auto accept all keys from minions that try to join mkdir -p /etc/salt/master.d diff --git a/cluster/aws/templates/salt-minion.sh b/cluster/aws/templates/salt-minion.sh index e5e25722c1b..6b4f2b39535 100755 --- a/cluster/aws/templates/salt-minion.sh +++ b/cluster/aws/templates/salt-minion.sh @@ -37,29 +37,24 @@ if [[ -z "${HOSTNAME_OVERRIDE}" ]]; then HOSTNAME_OVERRIDE=`curl --silent curl http://169.254.169.254/2007-01-19/meta-data/local-hostname` fi -if [[ -n "${HOSTNAME_OVERRIDE}" ]]; then - cat <>/etc/salt/minion.d/grains.conf - hostname_override: "${HOSTNAME_OVERRIDE}" +# Helper that sets a salt grain in grains.conf, if the upper-cased key is a non-empty env +function env_to_salt { + local key=$1 + local env_key=`echo $key | tr '[:lower:]' '[:upper:]'` + local value=${!env_key} + if [[ -n "${value}" ]]; then + # Note this is yaml, so indentation matters + cat <>/etc/salt/minion.d/grains.conf + ${key}: '$(echo "${value}" | sed -e "s/'/''/g")' EOF -fi + fi +} -if [[ -n "${DOCKER_OPTS}" ]]; then - cat <>/etc/salt/minion.d/grains.conf - docker_opts: '$(echo "$DOCKER_OPTS" | sed -e "s/'/''/g")' -EOF -fi - -if [[ -n "${DOCKER_ROOT}" ]]; then - cat <>/etc/salt/minion.d/grains.conf - docker_root: '$(echo "$DOCKER_ROOT" | sed -e "s/'/''/g")' -EOF -fi - -if [[ -n "${KUBELET_ROOT}" ]]; then - cat <>/etc/salt/minion.d/grains.conf - kubelet_root: '$(echo "$KUBELET_ROOT" | sed -e "s/'/''/g")' -EOF -fi +env_to_salt hostname_override +env_to_salt docker_opts +env_to_salt docker_root +env_to_salt kubelet_root +env_to_salt non_masquerade_cidr install-salt diff --git a/cluster/aws/trusty/common.sh b/cluster/aws/trusty/common.sh index bfc98b1882b..4d138751e6b 100644 --- a/cluster/aws/trusty/common.sh +++ b/cluster/aws/trusty/common.sh @@ -29,6 +29,7 @@ function generate-minion-user-data { echo "#! /bin/bash" echo "SALT_MASTER='${MASTER_INTERNAL_IP}'" echo "DOCKER_OPTS='${EXTRA_DOCKER_OPTS:-}'" + echo "readonly NON_MASQUERADE_CIDR='${NON_MASQUERADE_CIDR:-}'" echo "readonly DOCKER_STORAGE='${DOCKER_STORAGE:-}'" grep -v "^#" "${KUBE_ROOT}/cluster/aws/templates/common.sh" grep -v "^#" "${KUBE_ROOT}/cluster/aws/templates/format-disks.sh" diff --git a/cluster/aws/util.sh b/cluster/aws/util.sh index 5fb27ab59e6..a75f875be11 100755 --- a/cluster/aws/util.sh +++ b/cluster/aws/util.sh @@ -855,6 +855,7 @@ function start-master() { echo "readonly SALT_MASTER='${MASTER_INTERNAL_IP}'" echo "readonly INSTANCE_PREFIX='${INSTANCE_PREFIX}'" echo "readonly NODE_INSTANCE_PREFIX='${NODE_INSTANCE_PREFIX}'" + echo "readonly NON_MASQUERADE_CIDR='${NON_MASQUERADE_CIDR:-}'" echo "readonly CLUSTER_IP_RANGE='${CLUSTER_IP_RANGE}'" echo "readonly ALLOCATE_NODE_CIDRS='${ALLOCATE_NODE_CIDRS}'" echo "readonly SERVER_BINARY_TAR_URL='${SERVER_BINARY_TAR_URL}'" diff --git a/cluster/saltbase/salt/kubelet/default b/cluster/saltbase/salt/kubelet/default index 4f8ab75e2fc..d04c4832717 100644 --- a/cluster/saltbase/salt/kubelet/default +++ b/cluster/saltbase/salt/kubelet/default @@ -85,6 +85,11 @@ {% set configure_cbr0 = "--configure-cbr0=" + pillar['allocate_node_cidrs'] -%} {% endif -%} +{% set non_masquerade_cidr = "" -%} +{% if grains.non_masquerade_cidr is defined -%} + {% set non_masquerade_cidr = "--non-masquerade-cidr=" + grains.non_masquerade_cidr -%} +{% endif -%} + # The master kubelet cannot wait for the flannel daemon because it is responsible # for starting up the flannel server in a static pod. So even though the flannel # daemon runs on the master, it doesn't hold up cluster bootstrap. All the pods @@ -137,4 +142,4 @@ {% endif -%} # test_args has to be kept at the end, so they'll overwrite any prior configuration -DAEMON_ARGS="{{daemon_args}} {{api_servers_with_port}} {{debugging_handlers}} {{hostname_override}} {{cloud_provider}} {{config}} {{manifest_url}} --allow-privileged={{pillar['allow_privileged']}} {{log_level}} {{cluster_dns}} {{cluster_domain}} {{docker_root}} {{kubelet_root}} {{configure_cbr0}} {{cgroup_root}} {{system_container}} {{pod_cidr}} {{ master_kubelet_args }} {{cpu_cfs_quota}} {{network_plugin}} {{kubelet_port}} {{experimental_flannel_overlay}} {{test_args}}" +DAEMON_ARGS="{{daemon_args}} {{api_servers_with_port}} {{debugging_handlers}} {{hostname_override}} {{cloud_provider}} {{config}} {{manifest_url}} --allow-privileged={{pillar['allow_privileged']}} {{log_level}} {{cluster_dns}} {{cluster_domain}} {{docker_root}} {{kubelet_root}} {{configure_cbr0}} {{non_masquerade_cidr}} {{cgroup_root}} {{system_container}} {{pod_cidr}} {{ master_kubelet_args }} {{cpu_cfs_quota}} {{network_plugin}} {{kubelet_port}} {{experimental_flannel_overlay}} {{test_args}}" diff --git a/hack/verify-flags/known-flags.txt b/hack/verify-flags/known-flags.txt index 9c307172e38..988b01869b5 100644 --- a/hack/verify-flags/known-flags.txt +++ b/hack/verify-flags/known-flags.txt @@ -142,7 +142,6 @@ input-dirs insecure-bind-address insecure-port insecure-skip-tls-verify -internal-cidr iptables-sync-period ir-data-source ir-dbname @@ -234,6 +233,7 @@ node-name node-startup-grace-period node-status-update-frequency node-sync-period +non-masquerade-cidr num-nodes oidc-ca-file oidc-client-id