diff --git a/cluster/aws/config-default.sh b/cluster/aws/config-default.sh index 4910ce92599..cdec7ea8aaf 100644 --- a/cluster/aws/config-default.sh +++ b/cluster/aws/config-default.sh @@ -41,7 +41,7 @@ MINION_TAG="${INSTANCE_PREFIX}-minion" MINION_IP_RANGES=($(eval echo "10.244.{1..${NUM_MINIONS}}.0/24")) MINION_SCOPES="" POLL_SLEEP_INTERVAL=3 -PORTAL_NET="10.0.0.0/16" +SERVICE_CLUSTER_IP_RANGE="10.0.0.0/16" # formerly PORTAL_NET MASTER_IP_RANGE="${MASTER_IP_RANGE:-10.246.0.0/24}" # If set to Elastic IP, master instance will be associated with this IP. # If set to auto, a new Elastic IP will be aquired diff --git a/cluster/aws/config-test.sh b/cluster/aws/config-test.sh index 584e0b29236..5b5506aed05 100755 --- a/cluster/aws/config-test.sh +++ b/cluster/aws/config-test.sh @@ -37,7 +37,7 @@ MINION_TAG="${INSTANCE_PREFIX}-minion" MINION_IP_RANGES=($(eval echo "10.244.{1..${NUM_MINIONS}}.0/24")) MINION_SCOPES="" POLL_SLEEP_INTERVAL=3 -PORTAL_NET="10.0.0.0/16" +SERVICE_CLUSTER_IP_RANGE="10.0.0.0/16" # formerly PORTAL_NET MASTER_IP_RANGE="${MASTER_IP_RANGE:-10.246.0.0/24}" # If set to Elastic IP, master instance will be associated with this IP. # If set to auto, a new Elastic IP will be aquired diff --git a/cluster/aws/templates/create-dynamic-salt-files.sh b/cluster/aws/templates/create-dynamic-salt-files.sh index 2e767e329ac..e03d23665c2 100644 --- a/cluster/aws/templates/create-dynamic-salt-files.sh +++ b/cluster/aws/templates/create-dynamic-salt-files.sh @@ -22,7 +22,7 @@ mkdir -p /srv/salt-overlay/pillar cat </srv/salt-overlay/pillar/cluster-params.sls instance_prefix: '$(echo "$INSTANCE_PREFIX" | sed -e "s/'/''/g")' node_instance_prefix: '$(echo "$NODE_INSTANCE_PREFIX" | sed -e "s/'/''/g")' -portal_net: '$(echo "$PORTAL_NET" | sed -e "s/'/''/g")' +service_cluster_ip_range: '$(echo "$SERVICE_CLUSTER_IP_RANGE" | sed -e "s/'/''/g")' enable_cluster_monitoring: '$(echo "$ENABLE_CLUSTER_MONITORING" | sed -e "s/'/''/g")' enable_node_monitoring: '$(echo "$ENABLE_NODE_MONITORING" | sed -e "s/'/''/g")' enable_cluster_logging: '$(echo "$ENABLE_CLUSTER_LOGGING" | sed -e "s/'/''/g")' diff --git a/cluster/aws/util.sh b/cluster/aws/util.sh index d0e60d2e4c0..96a00c24f14 100644 --- a/cluster/aws/util.sh +++ b/cluster/aws/util.sh @@ -507,7 +507,7 @@ function kube-up { echo "readonly ZONE='${ZONE}'" echo "readonly KUBE_USER='${KUBE_USER}'" echo "readonly KUBE_PASSWORD='${KUBE_PASSWORD}'" - echo "readonly PORTAL_NET='${PORTAL_NET}'" + echo "readonly SERVICE_CLUSTER_IP_RANGE='${SERVICE_CLUSTER_IP_RANGE}'" echo "readonly ENABLE_CLUSTER_MONITORING='${ENABLE_CLUSTER_MONITORING:-false}'" echo "readonly ENABLE_NODE_MONITORING='${ENABLE_NODE_MONITORING:-false}'" echo "readonly ENABLE_CLUSTER_LOGGING='${ENABLE_CLUSTER_LOGGING:-false}'" diff --git a/cluster/azure/config-default.sh b/cluster/azure/config-default.sh index 8b8bbb6dfc9..725f79705e4 100644 --- a/cluster/azure/config-default.sh +++ b/cluster/azure/config-default.sh @@ -35,7 +35,7 @@ MINION_NAMES=($(eval echo ${INSTANCE_PREFIX}-minion-{1..${NUM_MINIONS}})) MINION_IP_RANGES=($(eval echo "10.244.{1..${NUM_MINIONS}}.0/24")) MINION_SCOPES="" -PORTAL_NET="10.250.0.0/16" +SERVICE_CLUSTER_IP_RANGE="10.250.0.0/16" # formerly PORTAL_NET # Optional: Install node logging ENABLE_NODE_LOGGING=false diff --git a/cluster/azure/templates/create-dynamic-salt-files.sh b/cluster/azure/templates/create-dynamic-salt-files.sh index f8e5a42726e..d946fa1957b 100644 --- a/cluster/azure/templates/create-dynamic-salt-files.sh +++ b/cluster/azure/templates/create-dynamic-salt-files.sh @@ -22,7 +22,7 @@ mkdir -p /srv/salt-overlay/pillar cat </srv/salt-overlay/pillar/cluster-params.sls instance_prefix: '$(echo "$INSTANCE_PREFIX" | sed -e "s/'/''/g")' node_instance_prefix: $NODE_INSTANCE_PREFIX -portal_net: $PORTAL_NET +service_cluster_ip_range: $SERVICE_CLUSTER_IP_RANGE admission_control: '$(echo "$ADMISSION_CONTROL" | sed -e "s/'/''/g")' EOF diff --git a/cluster/azure/util.sh b/cluster/azure/util.sh index 399c13ca59c..df1a1083fa2 100644 --- a/cluster/azure/util.sh +++ b/cluster/azure/util.sh @@ -322,7 +322,7 @@ function kube-up { echo "readonly SERVER_BINARY_TAR_URL='${SERVER_BINARY_TAR_URL}'" echo "readonly SALT_TAR_URL='${SALT_TAR_URL}'" echo "readonly MASTER_HTPASSWD='${htpasswd}'" - echo "readonly PORTAL_NET='${PORTAL_NET}'" + echo "readonly SERVICE_CLUSTER_IP_RANGE='${SERVICE_CLUSTER_IP_RANGE}'" echo "readonly ADMISSION_CONTROL='${ADMISSION_CONTROL:-}'" grep -v "^#" "${KUBE_ROOT}/cluster/azure/templates/common.sh" grep -v "^#" "${KUBE_ROOT}/cluster/azure/templates/create-dynamic-salt-files.sh" diff --git a/cluster/gce/config-default.sh b/cluster/gce/config-default.sh index 0b2d68e1a40..5be9e9c8a9a 100755 --- a/cluster/gce/config-default.sh +++ b/cluster/gce/config-default.sh @@ -44,7 +44,7 @@ CLUSTER_IP_RANGE="${CLUSTER_IP_RANGE:-10.244.0.0/16}" MINION_SCOPES=("storage-ro" "compute-rw" "https://www.googleapis.com/auth/monitoring" "https://www.googleapis.com/auth/logging.write") # Increase the sleep interval value if concerned about API rate limits. 3, in seconds, is the default. POLL_SLEEP_INTERVAL=3 -PORTAL_NET="10.0.0.0/16" +SERVICE_CLUSTER_IP_RANGE="10.0.0.0/16" # formerly PORTAL_NET ALLOCATE_NODE_CIDRS=true # When set to true, Docker Cache is enabled by default as part of the cluster bring up. diff --git a/cluster/gce/config-test.sh b/cluster/gce/config-test.sh index 405f05eee26..270a5bc131f 100755 --- a/cluster/gce/config-test.sh +++ b/cluster/gce/config-test.sh @@ -44,7 +44,7 @@ MASTER_IP_RANGE="${MASTER_IP_RANGE:-10.246.0.0/24}" MINION_SCOPES=("storage-ro" "compute-rw" "https://www.googleapis.com/auth/logging.write" "https://www.googleapis.com/auth/monitoring") # Increase the sleep interval value if concerned about API rate limits. 3, in seconds, is the default. POLL_SLEEP_INTERVAL=3 -PORTAL_NET="10.0.0.0/16" +SERVICE_CLUSTER_IP_RANGE="10.0.0.0/16" # formerly PORTAL_NET # When set to true, Docker Cache is enabled by default as part of the cluster bring up. ENABLE_DOCKER_REGISTRY_CACHE=true diff --git a/cluster/gce/configure-vm.sh b/cluster/gce/configure-vm.sh index 800d8e92bfa..09209a38976 100644 --- a/cluster/gce/configure-vm.sh +++ b/cluster/gce/configure-vm.sh @@ -250,7 +250,7 @@ instance_prefix: '$(echo "$INSTANCE_PREFIX" | sed -e "s/'/''/g")' node_instance_prefix: '$(echo "$NODE_INSTANCE_PREFIX" | sed -e "s/'/''/g")' cluster_cidr: '$(echo "$CLUSTER_IP_RANGE" | sed -e "s/'/''/g")' allocate_node_cidrs: '$(echo "$ALLOCATE_NODE_CIDRS" | sed -e "s/'/''/g")' -portal_net: '$(echo "$PORTAL_NET" | sed -e "s/'/''/g")' +service_cluster_ip_range: '$(echo "$SERVICE_CLUSTER_IP_RANGE" | sed -e "s/'/''/g")' enable_cluster_monitoring: '$(echo "$ENABLE_CLUSTER_MONITORING" | sed -e "s/'/''/g")' enable_node_monitoring: '$(echo "$ENABLE_NODE_MONITORING" | sed -e "s/'/''/g")' enable_cluster_logging: '$(echo "$ENABLE_CLUSTER_LOGGING" | sed -e "s/'/''/g")' diff --git a/cluster/gce/coreos/helper.sh b/cluster/gce/coreos/helper.sh index 914fcdb95d1..e8f5c6b24df 100644 --- a/cluster/gce/coreos/helper.sh +++ b/cluster/gce/coreos/helper.sh @@ -31,7 +31,7 @@ NODE_INSTANCE_PREFIX: $(yaml-quote ${NODE_INSTANCE_PREFIX}) CLUSTER_IP_RANGE: $(yaml-quote ${CLUSTER_IP_RANGE:-10.244.0.0/16}) SERVER_BINARY_TAR_URL: $(yaml-quote ${SERVER_BINARY_TAR_URL}) SALT_TAR_URL: $(yaml-quote ${SALT_TAR_URL}) -PORTAL_NET: $(yaml-quote ${PORTAL_NET}) +SERVICE_CLUSTER_IP_RANGE: $(yaml-quote ${SERVICE_CLUSTER_IP_RANGE}) ALLOCATE_NODE_CIDRS: $(yaml-quote ${ALLOCATE_NODE_CIDRS:-false}) ENABLE_CLUSTER_MONITORING: $(yaml-quote ${ENABLE_CLUSTER_MONITORING:-none}) ENABLE_NODE_MONITORING: $(yaml-quote ${ENABLE_NODE_MONITORING:-false}) @@ -64,7 +64,7 @@ ENV_TIMESTAMP=$(yaml-quote $(date -u +%Y-%m-%dT%T%z)) INSTANCE_PREFIX=$(yaml-quote ${INSTANCE_PREFIX}) NODE_INSTANCE_PREFIX=$(yaml-quote ${NODE_INSTANCE_PREFIX}) SERVER_BINARY_TAR_URL=$(yaml-quote ${SERVER_BINARY_TAR_URL}) -PORTAL_NET=$(yaml-quote ${PORTAL_NET}) +SERVICE_CLUSTER_IP_RANGE=$(yaml-quote ${SERVICE_CLUSTER_IP_RANGE}) ENABLE_CLUSTER_MONITORING=$(yaml-quote ${ENABLE_CLUSTER_MONITORING:-none}) ENABLE_NODE_MONITORING=$(yaml-quote ${ENABLE_NODE_MONITORING:-false}) ENABLE_CLUSTER_LOGGING=$(yaml-quote ${ENABLE_CLUSTER_LOGGING:-false}) diff --git a/cluster/gce/debian/helper.sh b/cluster/gce/debian/helper.sh index c338221cc33..204318dfd72 100644 --- a/cluster/gce/debian/helper.sh +++ b/cluster/gce/debian/helper.sh @@ -29,7 +29,7 @@ NODE_INSTANCE_PREFIX: $(yaml-quote ${NODE_INSTANCE_PREFIX}) CLUSTER_IP_RANGE: $(yaml-quote ${CLUSTER_IP_RANGE:-10.244.0.0/16}) SERVER_BINARY_TAR_URL: $(yaml-quote ${SERVER_BINARY_TAR_URL}) SALT_TAR_URL: $(yaml-quote ${SALT_TAR_URL}) -PORTAL_NET: $(yaml-quote ${PORTAL_NET}) +SERVICE_CLUSTER_IP_RANGE: $(yaml-quote ${SERVICE_CLUSTER_IP_RANGE}) ALLOCATE_NODE_CIDRS: $(yaml-quote ${ALLOCATE_NODE_CIDRS:-false}) ENABLE_CLUSTER_MONITORING: $(yaml-quote ${ENABLE_CLUSTER_MONITORING:-none}) ENABLE_NODE_MONITORING: $(yaml-quote ${ENABLE_NODE_MONITORING:-false}) diff --git a/cluster/libvirt-coreos/config-default.sh b/cluster/libvirt-coreos/config-default.sh index 7a87873729a..7cdcca8769a 100644 --- a/cluster/libvirt-coreos/config-default.sh +++ b/cluster/libvirt-coreos/config-default.sh @@ -46,7 +46,7 @@ for ((i=0; i < NUM_MINIONS; i++)) do done MINION_CONTAINER_SUBNETS[$NUM_MINIONS]=$MASTER_CONTAINER_SUBNET -PORTAL_NET=10.11.0.0/16 +SERVICE_CLUSTER_IP_RANGE=10.11.0.0/16 # formerly PORTAL_NET # Optional: Install node monitoring. ENABLE_NODE_MONITORING=true diff --git a/cluster/libvirt-coreos/user_data_master.yml b/cluster/libvirt-coreos/user_data_master.yml index 5bd61bbca85..376c3db8fd9 100644 --- a/cluster/libvirt-coreos/user_data_master.yml +++ b/cluster/libvirt-coreos/user_data_master.yml @@ -18,7 +18,7 @@ coreos: --port=8080 \ --etcd_servers=http://127.0.0.1:4001 \ --kubelet_port=10250 \ - --service-cluster-ip-range=${PORTAL_NET} + --service-cluster-ip-range=${SERVICE_CLUSTER_IP_RANGE} Restart=always RestartSec=2 diff --git a/cluster/rackspace/cloud-config/master-cloud-config.yaml b/cluster/rackspace/cloud-config/master-cloud-config.yaml index 9238d4c9b07..1102d3a023a 100644 --- a/cluster/rackspace/cloud-config/master-cloud-config.yaml +++ b/cluster/rackspace/cloud-config/master-cloud-config.yaml @@ -93,7 +93,7 @@ coreos: --etcd_servers=http://127.0.0.1:4001 \ --logtostderr=true \ --port=8080 \ - --service-cluster-ip-range=PORTAL_NET \ + --service-cluster-ip-range=SERVICE_CLUSTER_IP_RANGE \ --token-auth-file=/var/lib/kube-apiserver/known_tokens.csv \ --v=2 Restart=always diff --git a/cluster/rackspace/config-default.sh b/cluster/rackspace/config-default.sh index 6cc4fc918aa..ce1e07fac60 100644 --- a/cluster/rackspace/config-default.sh +++ b/cluster/rackspace/config-default.sh @@ -36,7 +36,7 @@ RAX_NUM_MINIONS="${RAX_NUM_MINIONS-4}" MINION_TAG="tags=${INSTANCE_PREFIX}-minion" MINION_NAMES=($(eval echo ${INSTANCE_PREFIX}-minion-{1..${RAX_NUM_MINIONS}})) KUBE_NETWORK="10.240.0.0/16" -PORTAL_NET="10.0.0.0/16" +SERVICE_CLUSTER_IP_RANGE="10.0.0.0/16" # formerly PORTAL_NET # Optional: Install node monitoring. ENABLE_NODE_MONITORING=true diff --git a/cluster/rackspace/util.sh b/cluster/rackspace/util.sh index c72a9952ac6..faa6705f651 100644 --- a/cluster/rackspace/util.sh +++ b/cluster/rackspace/util.sh @@ -164,7 +164,7 @@ rax-boot-master() { -e "s|CLOUD_FILES_URL|${RELEASE_TMP_URL//&/\\&}|" \ -e "s|KUBE_USER|${KUBE_USER}|" \ -e "s|KUBE_PASSWORD|${KUBE_PASSWORD}|" \ - -e "s|PORTAL_NET|${PORTAL_NET}|" \ + -e "s|SERVICE_CLUSTER_IP_RANGE|${SERVICE_CLUSTER_IP_RANGE}|" \ -e "s|OS_AUTH_URL|${OS_AUTH_URL}|" \ -e "s|OS_USERNAME|${OS_USERNAME}|" \ -e "s|OS_PASSWORD|${OS_PASSWORD}|" \ diff --git a/cluster/saltbase/salt/kube-apiserver/kube-apiserver.manifest b/cluster/saltbase/salt/kube-apiserver/kube-apiserver.manifest index 23cc31de4d8..07e45c639e4 100644 --- a/cluster/saltbase/salt/kube-apiserver/kube-apiserver.manifest +++ b/cluster/saltbase/salt/kube-apiserver/kube-apiserver.manifest @@ -36,9 +36,9 @@ {% set etcd_servers = "--etcd_servers=http://127.0.0.1:4001" -%} -{% set portal_net = "" -%} -{% if pillar['portal_net'] is defined -%} - {% set portal_net = "--service-cluster-ip-range=" + pillar['portal_net'] -%} +{% set service_cluster_ip_range = "" -%} +{% if pillar['service_cluster_ip_range'] is defined -%} + {% set service_cluster_ip_range = "--service-cluster-ip-range=" + pillar['service_cluster_ip_range'] -%} {% endif -%} {% set cert_file = "--tls_cert_file=/srv/kubernetes/server.cert" -%} @@ -74,7 +74,7 @@ {% set runtime_config = "--runtime_config=" + grains.runtime_config -%} {% endif -%} -{% set params = address + " " + etcd_servers + " " + cloud_provider + " " + cloud_config + " " + runtime_config + " " + admission_control + " " + portal_net + " " + client_ca_file + " " + basic_auth_file -%} +{% set params = address + " " + etcd_servers + " " + cloud_provider + " " + cloud_config + " " + runtime_config + " " + admission_control + " " + service_cluster_ip_range + " " + client_ca_file + " " + basic_auth_file -%} {% set params = params + " " + cluster_name + " " + cert_file + " " + key_file + " --secure_port=" + secure_port + " " + token_auth_file + " " + publicAddressOverride + " " + pillar['log_level'] -%} diff --git a/cluster/ubuntu/config-default.sh b/cluster/ubuntu/config-default.sh index 57485f830e4..0f603d3b390 100755 --- a/cluster/ubuntu/config-default.sh +++ b/cluster/ubuntu/config-default.sh @@ -25,8 +25,8 @@ export roles=("ai" "i" "i") export NUM_MINIONS=${NUM_MINIONS:-3} # define the IP range used for service cluster IPs. # according to rfc 1918 ref: https://tools.ietf.org/html/rfc1918 choose a private ip range here. -export PORTAL_NET=192.168.3.0/24 -# define the IP range used for flannel overlay network, should not conflict with above PORTAL_NET range +export SERVICE_CLUSTER_IP_RANGE=192.168.3.0/24 # formerly PORTAL_NET +# define the IP range used for flannel overlay network, should not conflict with above SERVICE_CLUSTER_IP_RANGE export FLANNEL_NET=172.16.0.0/16 # Admission Controllers to invoke prior to persisting objects in cluster @@ -52,7 +52,7 @@ DOCKER_OPTS="" # Optional: Install cluster DNS. ENABLE_CLUSTER_DNS=true -# DNS_SERVER_IP must be a IP in PORTAL_NET range +# DNS_SERVER_IP must be a IP in SERVICE_CLUSTER_IP_RANGE DNS_SERVER_IP="192.168.3.10" DNS_DOMAIN="cluster.local" DNS_REPLICAS=1 diff --git a/cluster/ubuntu/util.sh b/cluster/ubuntu/util.sh index 2e7e556d204..ab74faa4449 100755 --- a/cluster/ubuntu/util.sh +++ b/cluster/ubuntu/util.sh @@ -377,7 +377,7 @@ function provision-master() { ssh $SSH_OPTS -t $MASTER "source ~/kube/util.sh; \ setClusterInfo; \ create-etcd-opts "${mm[${MASTER_IP}]}" "${MASTER_IP}" "${CLUSTER}"; \ - create-kube-apiserver-opts "${PORTAL_NET}"; \ + create-kube-apiserver-opts "${SERVICE_CLUSTER_IP_RANGE}"; \ create-kube-controller-manager-opts "${MINION_IPS}"; \ create-kube-scheduler-opts; \ sudo -p '[sudo] password to copy files and start master: ' cp ~/kube/default/* /etc/default/ && sudo cp ~/kube/init_conf/* /etc/init/ && sudo cp ~/kube/init_scripts/* /etc/init.d/ \ @@ -416,7 +416,7 @@ function provision-masterandminion() { ssh $SSH_OPTS -t $MASTER "source ~/kube/util.sh; \ setClusterInfo; \ create-etcd-opts "${mm[${MASTER_IP}]}" "${MASTER_IP}" "${CLUSTER}"; \ - create-kube-apiserver-opts "${PORTAL_NET}"; \ + create-kube-apiserver-opts "${SERVICE_CLUSTER_IP_RANGE}"; \ create-kube-controller-manager-opts "${MINION_IPS}"; \ create-kube-scheduler-opts; \ create-kubelet-opts "${MASTER_IP}" "${MASTER_IP}" "${DNS_SERVER_IP}" "${DNS_DOMAIN}"; diff --git a/cluster/vagrant/config-default.sh b/cluster/vagrant/config-default.sh index a59f1b95e0d..57f40b86b72 100755 --- a/cluster/vagrant/config-default.sh +++ b/cluster/vagrant/config-default.sh @@ -43,7 +43,7 @@ for ((i=0; i < NUM_MINIONS; i++)) do VAGRANT_MINION_NAMES[$i]="minion-$((i+1))" done -PORTAL_NET=10.247.0.0/16 +SERVICE_CLUSTER_IP_RANGE=10.247.0.0/16 # formerly PORTAL_NET # Since this isn't exposed on the network, default to a simple user/passwd MASTER_USER=vagrant diff --git a/cluster/vagrant/provision-master.sh b/cluster/vagrant/provision-master.sh index bdf9ba514d4..33203fd0529 100755 --- a/cluster/vagrant/provision-master.sh +++ b/cluster/vagrant/provision-master.sh @@ -85,7 +85,7 @@ EOF mkdir -p /srv/salt-overlay/pillar cat </srv/salt-overlay/pillar/cluster-params.sls - portal_net: '$(echo "$PORTAL_NET" | sed -e "s/'/''/g")' + service_cluster_ip_range: '$(echo "$SERVICE_CLUSTER_IP_RANGE" | sed -e "s/'/''/g")' cert_ip: '$(echo "$MASTER_IP" | sed -e "s/'/''/g")' enable_cluster_monitoring: '$(echo "$ENABLE_CLUSTER_MONITORING" | sed -e "s/'/''/g")' enable_node_monitoring: '$(echo "$ENABLE_NODE_MONITORING" | sed -e "s/'/''/g")' diff --git a/cluster/vagrant/util.sh b/cluster/vagrant/util.sh index b558e0d3d6f..35ef1d31b0e 100644 --- a/cluster/vagrant/util.sh +++ b/cluster/vagrant/util.sh @@ -127,7 +127,7 @@ function create-provision-scripts { echo "CONTAINER_ADDR='${MASTER_CONTAINER_ADDR}'" echo "MINION_CONTAINER_NETMASKS='${MINION_CONTAINER_NETMASKS[@]}'" echo "MINION_CONTAINER_SUBNETS=(${MINION_CONTAINER_SUBNETS[@]})" - echo "PORTAL_NET='${PORTAL_NET}'" + echo "SERVICE_CLUSTER_IP_RANGE='${SERVICE_CLUSTER_IP_RANGE}'" echo "MASTER_USER='${MASTER_USER}'" echo "MASTER_PASSWD='${MASTER_PASSWD}'" echo "ENABLE_NODE_MONITORING='${ENABLE_NODE_MONITORING:-false}'" diff --git a/cluster/vsphere/config-default.sh b/cluster/vsphere/config-default.sh index 8d629f63eb4..1ece58e31f9 100755 --- a/cluster/vsphere/config-default.sh +++ b/cluster/vsphere/config-default.sh @@ -31,7 +31,7 @@ MINION_IP_RANGES=($(eval echo "10.244.{1..${NUM_MINIONS}}.0/24")) MINION_MEMORY_MB=2048 MINION_CPU=1 -PORTAL_NET="10.244.240.0/20" +SERVICE_CLUSTER_IP_RANGE="10.244.240.0/20" # formerly PORTAL_NET # Optional: Install node monitoring. ENABLE_NODE_MONITORING=true diff --git a/cluster/vsphere/config-test.sh b/cluster/vsphere/config-test.sh index 09d5c438667..57cbe8e935e 100755 --- a/cluster/vsphere/config-test.sh +++ b/cluster/vsphere/config-test.sh @@ -31,4 +31,4 @@ MINION_IP_RANGES=($(eval echo "10.244.{1..${NUM_MINIONS}}.0/24")) MINION_MEMORY_MB=1024 MINION_CPU=1 -PORTAL_NET="10.244.240.0/20" +SERVICE_CLUSTER_IP_RANGE="10.244.240.0/20" # formerly PORTAL_NET diff --git a/cluster/vsphere/templates/create-dynamic-salt-files.sh b/cluster/vsphere/templates/create-dynamic-salt-files.sh index 41fd1ae15a3..880b24757a9 100755 --- a/cluster/vsphere/templates/create-dynamic-salt-files.sh +++ b/cluster/vsphere/templates/create-dynamic-salt-files.sh @@ -22,7 +22,7 @@ mkdir -p /srv/salt-overlay/pillar cat </srv/salt-overlay/pillar/cluster-params.sls instance_prefix: '$(echo "$INSTANCE_PREFIX" | sed -e "s/'/''/g")' node_instance_prefix: $NODE_INSTANCE_PREFIX -portal_net: $PORTAL_NET +service_cluster_ip_range: $SERVICE_CLUSTER_IP_RANGE enable_cluster_monitoring: $ENABLE_CLUSTER_MONITORING enable_node_monitoring: $ENABLE_NODE_MONITORING enable_cluster_logging: $ENABLE_CLUSTER_LOGGING diff --git a/cluster/vsphere/util.sh b/cluster/vsphere/util.sh index c517b7d0c87..19361515d4d 100755 --- a/cluster/vsphere/util.sh +++ b/cluster/vsphere/util.sh @@ -280,7 +280,7 @@ function kube-up { echo "readonly MASTER_NAME='${MASTER_NAME}'" echo "readonly INSTANCE_PREFIX='${INSTANCE_PREFIX}'" echo "readonly NODE_INSTANCE_PREFIX='${INSTANCE_PREFIX}-minion'" - echo "readonly PORTAL_NET='${PORTAL_NET}'" + echo "readonly SERVICE_CLUSTER_IP_RANGE='${SERVICE_CLUSTER_IP_RANGE}'" echo "readonly ENABLE_NODE_MONITORING='${ENABLE_NODE_MONITORING:-false}'" echo "readonly ENABLE_NODE_LOGGING='${ENABLE_NODE_LOGGING:-false}'" echo "readonly LOGGING_DESTINATION='${LOGGING_DESTINATION:-}'" diff --git a/docs/getting-started-guides/fedora/fedora_manual_config.md b/docs/getting-started-guides/fedora/fedora_manual_config.md index 5ff3faacc91..d72fafeb5bc 100644 --- a/docs/getting-started-guides/fedora/fedora_manual_config.md +++ b/docs/getting-started-guides/fedora/fedora_manual_config.md @@ -61,7 +61,7 @@ systemctl stop iptables-services firewalld **Configure the kubernetes services on the master.** -* Edit /etc/kubernetes/apiserver to appear as such. The portal_net IP addresses must be an unused block of addresses, not used anywhere else. They do not need to be routed or assigned to anything. +* Edit /etc/kubernetes/apiserver to appear as such. The service_cluster_ip_range IP addresses must be an unused block of addresses, not used anywhere else. They do not need to be routed or assigned to anything. ``` # The address on the local server to listen to. diff --git a/docs/getting-started-guides/locally.md b/docs/getting-started-guides/locally.md index d1c5f63716c..e486f03fc05 100644 --- a/docs/getting-started-guides/locally.md +++ b/docs/getting-started-guides/locally.md @@ -90,7 +90,7 @@ firewall or other iptables-using systems, first. By default the IP range for service cluster IPs is 10.0.*.* - depending on your docker installation, this may conflict with IPs for containers. If you find containers running with IPs in this range, edit hack/local-cluster-up.sh and -change the portal_net flag to something else. +change the service-cluster-ip-range flag to something else. #### I cannot create a replication controller with replica size greater than 1! What gives? diff --git a/docs/getting-started-guides/ubuntu.md b/docs/getting-started-guides/ubuntu.md index d8b76a197b0..3867af4932b 100644 --- a/docs/getting-started-guides/ubuntu.md +++ b/docs/getting-started-guides/ubuntu.md @@ -48,7 +48,7 @@ export roles=("ai" "i" "i") export NUM_MINIONS=${NUM_MINIONS:-3} -export PORTAL_NET=11.1.1.0/24 +export SERVICE_CLUSTER_IP_RANGE=11.1.1.0/24 export FLANNEL_NET=172.16.0.0/16 @@ -61,7 +61,7 @@ Then the `roles ` variable defines the role of above machine in the same order, The `NUM_MINIONS` variable defines the total number of minions. -The `PORTAL_NET` variable defines the kubernetes service portal ip range. Please make sure that you do have a valid private ip range defined here, because some IaaS provider may reserve private ips. You can use below three private network range accordin to rfc1918. Besides you'd better not choose the one that conflicts with your own private network range. +The `SERVICE_CLUSTER_IP_RANGE` variable defines the kubernetes service portal ip range. Please make sure that you do have a valid private ip range defined here, because some IaaS provider may reserve private ips. You can use below three private network range accordin to rfc1918. Besides you'd better not choose the one that conflicts with your own private network range. 10.0.0.0 - 10.255.255.255 (10/8 prefix) @@ -69,7 +69,7 @@ The `PORTAL_NET` variable defines the kubernetes service portal ip range. Please 192.168.0.0 - 192.168.255.255 (192.168/16 prefix) -The `FLANNEL_NET` variable defines the IP range used for flannel overlay network, should not conflict with above PORTAL_NET range +The `FLANNEL_NET` variable defines the IP range used for flannel overlay network, should not conflict with above `SERVICE_CLUSTER_IP_RANGE`. After all the above variable being set correctly. We can use below command in cluster/ directory to bring up the whole cluster. @@ -127,7 +127,7 @@ DNS_DOMAIN="kubernetes.local" DNS_REPLICAS=1 ``` -The `DNS_SERVER_IP` is defining the ip of dns server which must be in the portal_net range. +The `DNS_SERVER_IP` is defining the ip of dns server which must be in the service_cluster_ip_range. The `DNS_REPLICAS` describes how many dns pod running in the cluster. diff --git a/docs/man/man1/kube-apiserver.1 b/docs/man/man1/kube-apiserver.1 index db62a6bdd4f..2fa1600b7ea 100644 --- a/docs/man/man1/kube-apiserver.1 +++ b/docs/man/man1/kube-apiserver.1 @@ -178,7 +178,7 @@ The the kube\-apiserver several options. DEPRECATED: see \-\-insecure\-port instead .PP -\fB\-\-portal\-net\fP= +\fB\-\-service\-cluster\-ip\-range\fP= A CIDR notation IP range from which to assign service cluster IPs. This must not overlap with any IP ranges assigned to nodes for pods. .PP @@ -246,7 +246,7 @@ The the kube\-apiserver several options. .RS .nf -/usr/bin/kube\-apiserver \-\-logtostderr=true \-\-v=0 \-\-etcd\_servers=http://127.0.0.1:4001 \-\-insecure\_bind\_address=127.0.0.1 \-\-insecure\_port=8080 \-\-kubelet\_port=10250 \-\-portal\_net=11.1.1.0/24 \-\-allow\_privileged=false +/usr/bin/kube\-apiserver \-\-logtostderr=true \-\-v=0 \-\-etcd\_servers=http://127.0.0.1:4001 \-\-insecure\_bind\_address=127.0.0.1 \-\-insecure\_port=8080 \-\-kubelet\_port=10250 \-\-service\-cluster\-ip\-range=10.1.1.0/24 \-\-allow\_privileged=false .fi diff --git a/docs/services.md b/docs/services.md index 8a482ad4830..86cdcf8349d 100644 --- a/docs/services.md +++ b/docs/services.md @@ -206,9 +206,10 @@ request. To do this, set the `spec.clusterIP` field (called `portalIP` in v1beta3 and earlier APIs). For example, if they already have an existing DNS entry that they wish to replace, or legacy systems that are configured for a specific IP address and difficult to re-configure. The IP address that a user -chooses must be a valid IP address and within the portal_net CIDR range that is -specified by flag to the API server. If the IP address value is invalid, the -apiserver returns a 422 HTTP status code to indicate that the value is invalid. +chooses must be a valid IP address and within the service_cluster_ip_range CIDR +range that is specified by flag to the API server. If the IP address value is +invalid, the apiserver returns a 422 HTTP status code to indicate that the +value is invalid. ### Why not use round-robin DNS? diff --git a/pkg/proxy/proxier.go b/pkg/proxy/proxier.go index c388623f3bd..c2e78747197 100644 --- a/pkg/proxy/proxier.go +++ b/pkg/proxy/proxier.go @@ -560,7 +560,7 @@ var iptablesHostNodePortChain iptables.Chain = "KUBE-NODEPORT-HOST" // Ensure that the iptables infrastructure we use is set up. This can safely be called periodically. func iptablesInit(ipt iptables.Interface) error { // TODO: There is almost certainly room for optimization here. E.g. If - // we knew the portal_net CIDR we could fast-track outbound packets not + // we knew the service_cluster_ip_range CIDR we could fast-track outbound packets not // destined for a service. There's probably more, help wanted. // Danger - order of these rules matters here: @@ -580,7 +580,7 @@ func iptablesInit(ipt iptables.Interface) error { // the NodePort would take priority (incorrectly). // This is unlikely (and would only affect outgoing traffic from the cluster to the load balancer, which seems // doubly-unlikely), but we need to be careful to keep the rules in the right order. - args := []string{ /* portal_net matching could go here */ } + args := []string{ /* service_cluster_ip_range matching could go here */ } args = append(args, "-m", "comment", "--comment", "handle ClusterIPs; NOTE: this must be before the NodePort rules") if _, err := ipt.EnsureChain(iptables.TableNAT, iptablesContainerPortalChain); err != nil { return err