Rename PORTAL_NET all over

This commit is contained in:
Tim Hockin 2015-05-23 22:17:55 -07:00
parent 3005471100
commit ac3cc3c518
33 changed files with 48 additions and 47 deletions

View File

@ -41,7 +41,7 @@ MINION_TAG="${INSTANCE_PREFIX}-minion"
MINION_IP_RANGES=($(eval echo "10.244.{1..${NUM_MINIONS}}.0/24"))
MINION_SCOPES=""
POLL_SLEEP_INTERVAL=3
PORTAL_NET="10.0.0.0/16"
SERVICE_CLUSTER_IP_RANGE="10.0.0.0/16" # formerly PORTAL_NET
MASTER_IP_RANGE="${MASTER_IP_RANGE:-10.246.0.0/24}"
# If set to Elastic IP, master instance will be associated with this IP.
# If set to auto, a new Elastic IP will be aquired

View File

@ -37,7 +37,7 @@ MINION_TAG="${INSTANCE_PREFIX}-minion"
MINION_IP_RANGES=($(eval echo "10.244.{1..${NUM_MINIONS}}.0/24"))
MINION_SCOPES=""
POLL_SLEEP_INTERVAL=3
PORTAL_NET="10.0.0.0/16"
SERVICE_CLUSTER_IP_RANGE="10.0.0.0/16" # formerly PORTAL_NET
MASTER_IP_RANGE="${MASTER_IP_RANGE:-10.246.0.0/24}"
# If set to Elastic IP, master instance will be associated with this IP.
# If set to auto, a new Elastic IP will be aquired

View File

@ -22,7 +22,7 @@ mkdir -p /srv/salt-overlay/pillar
cat <<EOF >/srv/salt-overlay/pillar/cluster-params.sls
instance_prefix: '$(echo "$INSTANCE_PREFIX" | sed -e "s/'/''/g")'
node_instance_prefix: '$(echo "$NODE_INSTANCE_PREFIX" | sed -e "s/'/''/g")'
portal_net: '$(echo "$PORTAL_NET" | sed -e "s/'/''/g")'
service_cluster_ip_range: '$(echo "$SERVICE_CLUSTER_IP_RANGE" | sed -e "s/'/''/g")'
enable_cluster_monitoring: '$(echo "$ENABLE_CLUSTER_MONITORING" | sed -e "s/'/''/g")'
enable_node_monitoring: '$(echo "$ENABLE_NODE_MONITORING" | sed -e "s/'/''/g")'
enable_cluster_logging: '$(echo "$ENABLE_CLUSTER_LOGGING" | sed -e "s/'/''/g")'

View File

@ -507,7 +507,7 @@ function kube-up {
echo "readonly ZONE='${ZONE}'"
echo "readonly KUBE_USER='${KUBE_USER}'"
echo "readonly KUBE_PASSWORD='${KUBE_PASSWORD}'"
echo "readonly PORTAL_NET='${PORTAL_NET}'"
echo "readonly SERVICE_CLUSTER_IP_RANGE='${SERVICE_CLUSTER_IP_RANGE}'"
echo "readonly ENABLE_CLUSTER_MONITORING='${ENABLE_CLUSTER_MONITORING:-false}'"
echo "readonly ENABLE_NODE_MONITORING='${ENABLE_NODE_MONITORING:-false}'"
echo "readonly ENABLE_CLUSTER_LOGGING='${ENABLE_CLUSTER_LOGGING:-false}'"

View File

@ -35,7 +35,7 @@ MINION_NAMES=($(eval echo ${INSTANCE_PREFIX}-minion-{1..${NUM_MINIONS}}))
MINION_IP_RANGES=($(eval echo "10.244.{1..${NUM_MINIONS}}.0/24"))
MINION_SCOPES=""
PORTAL_NET="10.250.0.0/16"
SERVICE_CLUSTER_IP_RANGE="10.250.0.0/16" # formerly PORTAL_NET
# Optional: Install node logging
ENABLE_NODE_LOGGING=false

View File

@ -22,7 +22,7 @@ mkdir -p /srv/salt-overlay/pillar
cat <<EOF >/srv/salt-overlay/pillar/cluster-params.sls
instance_prefix: '$(echo "$INSTANCE_PREFIX" | sed -e "s/'/''/g")'
node_instance_prefix: $NODE_INSTANCE_PREFIX
portal_net: $PORTAL_NET
service_cluster_ip_range: $SERVICE_CLUSTER_IP_RANGE
admission_control: '$(echo "$ADMISSION_CONTROL" | sed -e "s/'/''/g")'
EOF

View File

@ -322,7 +322,7 @@ function kube-up {
echo "readonly SERVER_BINARY_TAR_URL='${SERVER_BINARY_TAR_URL}'"
echo "readonly SALT_TAR_URL='${SALT_TAR_URL}'"
echo "readonly MASTER_HTPASSWD='${htpasswd}'"
echo "readonly PORTAL_NET='${PORTAL_NET}'"
echo "readonly SERVICE_CLUSTER_IP_RANGE='${SERVICE_CLUSTER_IP_RANGE}'"
echo "readonly ADMISSION_CONTROL='${ADMISSION_CONTROL:-}'"
grep -v "^#" "${KUBE_ROOT}/cluster/azure/templates/common.sh"
grep -v "^#" "${KUBE_ROOT}/cluster/azure/templates/create-dynamic-salt-files.sh"

View File

@ -44,7 +44,7 @@ CLUSTER_IP_RANGE="${CLUSTER_IP_RANGE:-10.244.0.0/16}"
MINION_SCOPES=("storage-ro" "compute-rw" "https://www.googleapis.com/auth/monitoring" "https://www.googleapis.com/auth/logging.write")
# Increase the sleep interval value if concerned about API rate limits. 3, in seconds, is the default.
POLL_SLEEP_INTERVAL=3
PORTAL_NET="10.0.0.0/16"
SERVICE_CLUSTER_IP_RANGE="10.0.0.0/16" # formerly PORTAL_NET
ALLOCATE_NODE_CIDRS=true
# When set to true, Docker Cache is enabled by default as part of the cluster bring up.

View File

@ -44,7 +44,7 @@ MASTER_IP_RANGE="${MASTER_IP_RANGE:-10.246.0.0/24}"
MINION_SCOPES=("storage-ro" "compute-rw" "https://www.googleapis.com/auth/logging.write" "https://www.googleapis.com/auth/monitoring")
# Increase the sleep interval value if concerned about API rate limits. 3, in seconds, is the default.
POLL_SLEEP_INTERVAL=3
PORTAL_NET="10.0.0.0/16"
SERVICE_CLUSTER_IP_RANGE="10.0.0.0/16" # formerly PORTAL_NET
# When set to true, Docker Cache is enabled by default as part of the cluster bring up.
ENABLE_DOCKER_REGISTRY_CACHE=true

View File

@ -250,7 +250,7 @@ instance_prefix: '$(echo "$INSTANCE_PREFIX" | sed -e "s/'/''/g")'
node_instance_prefix: '$(echo "$NODE_INSTANCE_PREFIX" | sed -e "s/'/''/g")'
cluster_cidr: '$(echo "$CLUSTER_IP_RANGE" | sed -e "s/'/''/g")'
allocate_node_cidrs: '$(echo "$ALLOCATE_NODE_CIDRS" | sed -e "s/'/''/g")'
portal_net: '$(echo "$PORTAL_NET" | sed -e "s/'/''/g")'
service_cluster_ip_range: '$(echo "$SERVICE_CLUSTER_IP_RANGE" | sed -e "s/'/''/g")'
enable_cluster_monitoring: '$(echo "$ENABLE_CLUSTER_MONITORING" | sed -e "s/'/''/g")'
enable_node_monitoring: '$(echo "$ENABLE_NODE_MONITORING" | sed -e "s/'/''/g")'
enable_cluster_logging: '$(echo "$ENABLE_CLUSTER_LOGGING" | sed -e "s/'/''/g")'

View File

@ -31,7 +31,7 @@ NODE_INSTANCE_PREFIX: $(yaml-quote ${NODE_INSTANCE_PREFIX})
CLUSTER_IP_RANGE: $(yaml-quote ${CLUSTER_IP_RANGE:-10.244.0.0/16})
SERVER_BINARY_TAR_URL: $(yaml-quote ${SERVER_BINARY_TAR_URL})
SALT_TAR_URL: $(yaml-quote ${SALT_TAR_URL})
PORTAL_NET: $(yaml-quote ${PORTAL_NET})
SERVICE_CLUSTER_IP_RANGE: $(yaml-quote ${SERVICE_CLUSTER_IP_RANGE})
ALLOCATE_NODE_CIDRS: $(yaml-quote ${ALLOCATE_NODE_CIDRS:-false})
ENABLE_CLUSTER_MONITORING: $(yaml-quote ${ENABLE_CLUSTER_MONITORING:-none})
ENABLE_NODE_MONITORING: $(yaml-quote ${ENABLE_NODE_MONITORING:-false})
@ -64,7 +64,7 @@ ENV_TIMESTAMP=$(yaml-quote $(date -u +%Y-%m-%dT%T%z))
INSTANCE_PREFIX=$(yaml-quote ${INSTANCE_PREFIX})
NODE_INSTANCE_PREFIX=$(yaml-quote ${NODE_INSTANCE_PREFIX})
SERVER_BINARY_TAR_URL=$(yaml-quote ${SERVER_BINARY_TAR_URL})
PORTAL_NET=$(yaml-quote ${PORTAL_NET})
SERVICE_CLUSTER_IP_RANGE=$(yaml-quote ${SERVICE_CLUSTER_IP_RANGE})
ENABLE_CLUSTER_MONITORING=$(yaml-quote ${ENABLE_CLUSTER_MONITORING:-none})
ENABLE_NODE_MONITORING=$(yaml-quote ${ENABLE_NODE_MONITORING:-false})
ENABLE_CLUSTER_LOGGING=$(yaml-quote ${ENABLE_CLUSTER_LOGGING:-false})

View File

@ -29,7 +29,7 @@ NODE_INSTANCE_PREFIX: $(yaml-quote ${NODE_INSTANCE_PREFIX})
CLUSTER_IP_RANGE: $(yaml-quote ${CLUSTER_IP_RANGE:-10.244.0.0/16})
SERVER_BINARY_TAR_URL: $(yaml-quote ${SERVER_BINARY_TAR_URL})
SALT_TAR_URL: $(yaml-quote ${SALT_TAR_URL})
PORTAL_NET: $(yaml-quote ${PORTAL_NET})
SERVICE_CLUSTER_IP_RANGE: $(yaml-quote ${SERVICE_CLUSTER_IP_RANGE})
ALLOCATE_NODE_CIDRS: $(yaml-quote ${ALLOCATE_NODE_CIDRS:-false})
ENABLE_CLUSTER_MONITORING: $(yaml-quote ${ENABLE_CLUSTER_MONITORING:-none})
ENABLE_NODE_MONITORING: $(yaml-quote ${ENABLE_NODE_MONITORING:-false})

View File

@ -46,7 +46,7 @@ for ((i=0; i < NUM_MINIONS; i++)) do
done
MINION_CONTAINER_SUBNETS[$NUM_MINIONS]=$MASTER_CONTAINER_SUBNET
PORTAL_NET=10.11.0.0/16
SERVICE_CLUSTER_IP_RANGE=10.11.0.0/16 # formerly PORTAL_NET
# Optional: Install node monitoring.
ENABLE_NODE_MONITORING=true

View File

@ -18,7 +18,7 @@ coreos:
--port=8080 \
--etcd_servers=http://127.0.0.1:4001 \
--kubelet_port=10250 \
--service-cluster-ip-range=${PORTAL_NET}
--service-cluster-ip-range=${SERVICE_CLUSTER_IP_RANGE}
Restart=always
RestartSec=2

View File

@ -93,7 +93,7 @@ coreos:
--etcd_servers=http://127.0.0.1:4001 \
--logtostderr=true \
--port=8080 \
--service-cluster-ip-range=PORTAL_NET \
--service-cluster-ip-range=SERVICE_CLUSTER_IP_RANGE \
--token-auth-file=/var/lib/kube-apiserver/known_tokens.csv \
--v=2
Restart=always

View File

@ -36,7 +36,7 @@ RAX_NUM_MINIONS="${RAX_NUM_MINIONS-4}"
MINION_TAG="tags=${INSTANCE_PREFIX}-minion"
MINION_NAMES=($(eval echo ${INSTANCE_PREFIX}-minion-{1..${RAX_NUM_MINIONS}}))
KUBE_NETWORK="10.240.0.0/16"
PORTAL_NET="10.0.0.0/16"
SERVICE_CLUSTER_IP_RANGE="10.0.0.0/16" # formerly PORTAL_NET
# Optional: Install node monitoring.
ENABLE_NODE_MONITORING=true

View File

@ -164,7 +164,7 @@ rax-boot-master() {
-e "s|CLOUD_FILES_URL|${RELEASE_TMP_URL//&/\\&}|" \
-e "s|KUBE_USER|${KUBE_USER}|" \
-e "s|KUBE_PASSWORD|${KUBE_PASSWORD}|" \
-e "s|PORTAL_NET|${PORTAL_NET}|" \
-e "s|SERVICE_CLUSTER_IP_RANGE|${SERVICE_CLUSTER_IP_RANGE}|" \
-e "s|OS_AUTH_URL|${OS_AUTH_URL}|" \
-e "s|OS_USERNAME|${OS_USERNAME}|" \
-e "s|OS_PASSWORD|${OS_PASSWORD}|" \

View File

@ -36,9 +36,9 @@
{% set etcd_servers = "--etcd_servers=http://127.0.0.1:4001" -%}
{% set portal_net = "" -%}
{% if pillar['portal_net'] is defined -%}
{% set portal_net = "--service-cluster-ip-range=" + pillar['portal_net'] -%}
{% set service_cluster_ip_range = "" -%}
{% if pillar['service_cluster_ip_range'] is defined -%}
{% set service_cluster_ip_range = "--service-cluster-ip-range=" + pillar['service_cluster_ip_range'] -%}
{% endif -%}
{% set cert_file = "--tls_cert_file=/srv/kubernetes/server.cert" -%}
@ -74,7 +74,7 @@
{% set runtime_config = "--runtime_config=" + grains.runtime_config -%}
{% endif -%}
{% set params = address + " " + etcd_servers + " " + cloud_provider + " " + cloud_config + " " + runtime_config + " " + admission_control + " " + portal_net + " " + client_ca_file + " " + basic_auth_file -%}
{% set params = address + " " + etcd_servers + " " + cloud_provider + " " + cloud_config + " " + runtime_config + " " + admission_control + " " + service_cluster_ip_range + " " + client_ca_file + " " + basic_auth_file -%}
{% set params = params + " " + cluster_name + " " + cert_file + " " + key_file + " --secure_port=" + secure_port + " " + token_auth_file + " " + publicAddressOverride + " " + pillar['log_level'] -%}

View File

@ -25,8 +25,8 @@ export roles=("ai" "i" "i")
export NUM_MINIONS=${NUM_MINIONS:-3}
# define the IP range used for service cluster IPs.
# according to rfc 1918 ref: https://tools.ietf.org/html/rfc1918 choose a private ip range here.
export PORTAL_NET=192.168.3.0/24
# define the IP range used for flannel overlay network, should not conflict with above PORTAL_NET range
export SERVICE_CLUSTER_IP_RANGE=192.168.3.0/24 # formerly PORTAL_NET
# define the IP range used for flannel overlay network, should not conflict with above SERVICE_CLUSTER_IP_RANGE
export FLANNEL_NET=172.16.0.0/16
# Admission Controllers to invoke prior to persisting objects in cluster
@ -52,7 +52,7 @@ DOCKER_OPTS=""
# Optional: Install cluster DNS.
ENABLE_CLUSTER_DNS=true
# DNS_SERVER_IP must be a IP in PORTAL_NET range
# DNS_SERVER_IP must be a IP in SERVICE_CLUSTER_IP_RANGE
DNS_SERVER_IP="192.168.3.10"
DNS_DOMAIN="cluster.local"
DNS_REPLICAS=1

View File

@ -377,7 +377,7 @@ function provision-master() {
ssh $SSH_OPTS -t $MASTER "source ~/kube/util.sh; \
setClusterInfo; \
create-etcd-opts "${mm[${MASTER_IP}]}" "${MASTER_IP}" "${CLUSTER}"; \
create-kube-apiserver-opts "${PORTAL_NET}"; \
create-kube-apiserver-opts "${SERVICE_CLUSTER_IP_RANGE}"; \
create-kube-controller-manager-opts "${MINION_IPS}"; \
create-kube-scheduler-opts; \
sudo -p '[sudo] password to copy files and start master: ' cp ~/kube/default/* /etc/default/ && sudo cp ~/kube/init_conf/* /etc/init/ && sudo cp ~/kube/init_scripts/* /etc/init.d/ \
@ -416,7 +416,7 @@ function provision-masterandminion() {
ssh $SSH_OPTS -t $MASTER "source ~/kube/util.sh; \
setClusterInfo; \
create-etcd-opts "${mm[${MASTER_IP}]}" "${MASTER_IP}" "${CLUSTER}"; \
create-kube-apiserver-opts "${PORTAL_NET}"; \
create-kube-apiserver-opts "${SERVICE_CLUSTER_IP_RANGE}"; \
create-kube-controller-manager-opts "${MINION_IPS}"; \
create-kube-scheduler-opts; \
create-kubelet-opts "${MASTER_IP}" "${MASTER_IP}" "${DNS_SERVER_IP}" "${DNS_DOMAIN}";

View File

@ -43,7 +43,7 @@ for ((i=0; i < NUM_MINIONS; i++)) do
VAGRANT_MINION_NAMES[$i]="minion-$((i+1))"
done
PORTAL_NET=10.247.0.0/16
SERVICE_CLUSTER_IP_RANGE=10.247.0.0/16 # formerly PORTAL_NET
# Since this isn't exposed on the network, default to a simple user/passwd
MASTER_USER=vagrant

View File

@ -85,7 +85,7 @@ EOF
mkdir -p /srv/salt-overlay/pillar
cat <<EOF >/srv/salt-overlay/pillar/cluster-params.sls
portal_net: '$(echo "$PORTAL_NET" | sed -e "s/'/''/g")'
service_cluster_ip_range: '$(echo "$SERVICE_CLUSTER_IP_RANGE" | sed -e "s/'/''/g")'
cert_ip: '$(echo "$MASTER_IP" | sed -e "s/'/''/g")'
enable_cluster_monitoring: '$(echo "$ENABLE_CLUSTER_MONITORING" | sed -e "s/'/''/g")'
enable_node_monitoring: '$(echo "$ENABLE_NODE_MONITORING" | sed -e "s/'/''/g")'

View File

@ -127,7 +127,7 @@ function create-provision-scripts {
echo "CONTAINER_ADDR='${MASTER_CONTAINER_ADDR}'"
echo "MINION_CONTAINER_NETMASKS='${MINION_CONTAINER_NETMASKS[@]}'"
echo "MINION_CONTAINER_SUBNETS=(${MINION_CONTAINER_SUBNETS[@]})"
echo "PORTAL_NET='${PORTAL_NET}'"
echo "SERVICE_CLUSTER_IP_RANGE='${SERVICE_CLUSTER_IP_RANGE}'"
echo "MASTER_USER='${MASTER_USER}'"
echo "MASTER_PASSWD='${MASTER_PASSWD}'"
echo "ENABLE_NODE_MONITORING='${ENABLE_NODE_MONITORING:-false}'"

View File

@ -31,7 +31,7 @@ MINION_IP_RANGES=($(eval echo "10.244.{1..${NUM_MINIONS}}.0/24"))
MINION_MEMORY_MB=2048
MINION_CPU=1
PORTAL_NET="10.244.240.0/20"
SERVICE_CLUSTER_IP_RANGE="10.244.240.0/20" # formerly PORTAL_NET
# Optional: Install node monitoring.
ENABLE_NODE_MONITORING=true

View File

@ -31,4 +31,4 @@ MINION_IP_RANGES=($(eval echo "10.244.{1..${NUM_MINIONS}}.0/24"))
MINION_MEMORY_MB=1024
MINION_CPU=1
PORTAL_NET="10.244.240.0/20"
SERVICE_CLUSTER_IP_RANGE="10.244.240.0/20" # formerly PORTAL_NET

View File

@ -22,7 +22,7 @@ mkdir -p /srv/salt-overlay/pillar
cat <<EOF >/srv/salt-overlay/pillar/cluster-params.sls
instance_prefix: '$(echo "$INSTANCE_PREFIX" | sed -e "s/'/''/g")'
node_instance_prefix: $NODE_INSTANCE_PREFIX
portal_net: $PORTAL_NET
service_cluster_ip_range: $SERVICE_CLUSTER_IP_RANGE
enable_cluster_monitoring: $ENABLE_CLUSTER_MONITORING
enable_node_monitoring: $ENABLE_NODE_MONITORING
enable_cluster_logging: $ENABLE_CLUSTER_LOGGING

View File

@ -280,7 +280,7 @@ function kube-up {
echo "readonly MASTER_NAME='${MASTER_NAME}'"
echo "readonly INSTANCE_PREFIX='${INSTANCE_PREFIX}'"
echo "readonly NODE_INSTANCE_PREFIX='${INSTANCE_PREFIX}-minion'"
echo "readonly PORTAL_NET='${PORTAL_NET}'"
echo "readonly SERVICE_CLUSTER_IP_RANGE='${SERVICE_CLUSTER_IP_RANGE}'"
echo "readonly ENABLE_NODE_MONITORING='${ENABLE_NODE_MONITORING:-false}'"
echo "readonly ENABLE_NODE_LOGGING='${ENABLE_NODE_LOGGING:-false}'"
echo "readonly LOGGING_DESTINATION='${LOGGING_DESTINATION:-}'"

View File

@ -61,7 +61,7 @@ systemctl stop iptables-services firewalld
**Configure the kubernetes services on the master.**
* Edit /etc/kubernetes/apiserver to appear as such. The portal_net IP addresses must be an unused block of addresses, not used anywhere else. They do not need to be routed or assigned to anything.
* Edit /etc/kubernetes/apiserver to appear as such. The service_cluster_ip_range IP addresses must be an unused block of addresses, not used anywhere else. They do not need to be routed or assigned to anything.
```
# The address on the local server to listen to.

View File

@ -90,7 +90,7 @@ firewall or other iptables-using systems, first.
By default the IP range for service cluster IPs is 10.0.*.* - depending on your
docker installation, this may conflict with IPs for containers. If you find
containers running with IPs in this range, edit hack/local-cluster-up.sh and
change the portal_net flag to something else.
change the service-cluster-ip-range flag to something else.
#### I cannot create a replication controller with replica size greater than 1! What gives?

View File

@ -48,7 +48,7 @@ export roles=("ai" "i" "i")
export NUM_MINIONS=${NUM_MINIONS:-3}
export PORTAL_NET=11.1.1.0/24
export SERVICE_CLUSTER_IP_RANGE=11.1.1.0/24
export FLANNEL_NET=172.16.0.0/16
@ -61,7 +61,7 @@ Then the `roles ` variable defines the role of above machine in the same order,
The `NUM_MINIONS` variable defines the total number of minions.
The `PORTAL_NET` variable defines the kubernetes service portal ip range. Please make sure that you do have a valid private ip range defined here, because some IaaS provider may reserve private ips. You can use below three private network range accordin to rfc1918. Besides you'd better not choose the one that conflicts with your own private network range.
The `SERVICE_CLUSTER_IP_RANGE` variable defines the kubernetes service portal ip range. Please make sure that you do have a valid private ip range defined here, because some IaaS provider may reserve private ips. You can use below three private network range accordin to rfc1918. Besides you'd better not choose the one that conflicts with your own private network range.
10.0.0.0 - 10.255.255.255 (10/8 prefix)
@ -69,7 +69,7 @@ The `PORTAL_NET` variable defines the kubernetes service portal ip range. Please
192.168.0.0 - 192.168.255.255 (192.168/16 prefix)
The `FLANNEL_NET` variable defines the IP range used for flannel overlay network, should not conflict with above PORTAL_NET range
The `FLANNEL_NET` variable defines the IP range used for flannel overlay network, should not conflict with above `SERVICE_CLUSTER_IP_RANGE`.
After all the above variable being set correctly. We can use below command in cluster/ directory to bring up the whole cluster.
@ -127,7 +127,7 @@ DNS_DOMAIN="kubernetes.local"
DNS_REPLICAS=1
```
The `DNS_SERVER_IP` is defining the ip of dns server which must be in the portal_net range.
The `DNS_SERVER_IP` is defining the ip of dns server which must be in the service_cluster_ip_range.
The `DNS_REPLICAS` describes how many dns pod running in the cluster.

View File

@ -178,7 +178,7 @@ The the kube\-apiserver several options.
DEPRECATED: see \-\-insecure\-port instead
.PP
\fB\-\-portal\-net\fP=
\fB\-\-service\-cluster\-ip\-range\fP=
A CIDR notation IP range from which to assign service cluster IPs. This must not overlap with any IP ranges assigned to nodes for pods.
.PP
@ -246,7 +246,7 @@ The the kube\-apiserver several options.
.RS
.nf
/usr/bin/kube\-apiserver \-\-logtostderr=true \-\-v=0 \-\-etcd\_servers=http://127.0.0.1:4001 \-\-insecure\_bind\_address=127.0.0.1 \-\-insecure\_port=8080 \-\-kubelet\_port=10250 \-\-portal\_net=11.1.1.0/24 \-\-allow\_privileged=false
/usr/bin/kube\-apiserver \-\-logtostderr=true \-\-v=0 \-\-etcd\_servers=http://127.0.0.1:4001 \-\-insecure\_bind\_address=127.0.0.1 \-\-insecure\_port=8080 \-\-kubelet\_port=10250 \-\-service\-cluster\-ip\-range=10.1.1.0/24 \-\-allow\_privileged=false
.fi

View File

@ -206,9 +206,10 @@ request. To do this, set the `spec.clusterIP` field (called `portalIP` in
v1beta3 and earlier APIs). For example, if they already have an existing DNS
entry that they wish to replace, or legacy systems that are configured for a
specific IP address and difficult to re-configure. The IP address that a user
chooses must be a valid IP address and within the portal_net CIDR range that is
specified by flag to the API server. If the IP address value is invalid, the
apiserver returns a 422 HTTP status code to indicate that the value is invalid.
chooses must be a valid IP address and within the service_cluster_ip_range CIDR
range that is specified by flag to the API server. If the IP address value is
invalid, the apiserver returns a 422 HTTP status code to indicate that the
value is invalid.
### Why not use round-robin DNS?

View File

@ -560,7 +560,7 @@ var iptablesHostNodePortChain iptables.Chain = "KUBE-NODEPORT-HOST"
// Ensure that the iptables infrastructure we use is set up. This can safely be called periodically.
func iptablesInit(ipt iptables.Interface) error {
// TODO: There is almost certainly room for optimization here. E.g. If
// we knew the portal_net CIDR we could fast-track outbound packets not
// we knew the service_cluster_ip_range CIDR we could fast-track outbound packets not
// destined for a service. There's probably more, help wanted.
// Danger - order of these rules matters here:
@ -580,7 +580,7 @@ func iptablesInit(ipt iptables.Interface) error {
// the NodePort would take priority (incorrectly).
// This is unlikely (and would only affect outgoing traffic from the cluster to the load balancer, which seems
// doubly-unlikely), but we need to be careful to keep the rules in the right order.
args := []string{ /* portal_net matching could go here */ }
args := []string{ /* service_cluster_ip_range matching could go here */ }
args = append(args, "-m", "comment", "--comment", "handle ClusterIPs; NOTE: this must be before the NodePort rules")
if _, err := ipt.EnsureChain(iptables.TableNAT, iptablesContainerPortalChain); err != nil {
return err