mirror of
https://github.com/k3s-io/kubernetes.git
synced 2025-08-05 18:24:07 +00:00
AWS: Experimental support for multiple subnets/AZs in kube-up
By setting KUBE_SHARE_MASTER=true we reuse an existing master, rather than creating a new one. By setting KUBE_SUBNET_CIDR=172.20.1.0/24 you can specify the CIDR for a new subnet, avoiding conflicts. Both these options are documented only in kube-up and clearly marked as 'experimental' i.e. likely to change. By combining these, you can kube-up a cluster normally, and then kube-up a cluster in a different AZ, and the new nodes will attach to the same master. KUBE_SHARE_MASTER is also useful for addding a second node auto-scaling-group, for example if you wanted to mix spot & on-demand instances.
This commit is contained in:
parent
df3897c4ab
commit
d64643fe26
@ -16,6 +16,19 @@
|
|||||||
|
|
||||||
# A library of helper functions and constant for the local config.
|
# A library of helper functions and constant for the local config.
|
||||||
|
|
||||||
|
# Experimental flags can be removed/renamed at any time.
|
||||||
|
# The intent is to allow experimentation/advanced functionality before we
|
||||||
|
# are ready to commit to supporting it.
|
||||||
|
# Experimental functionality:
|
||||||
|
# KUBE_SHARE_MASTER=true
|
||||||
|
# Detects an existing master and reuse it; useful if you want to
|
||||||
|
# create more nodes, perhaps with a different instance type or in
|
||||||
|
# a different subnet/AZ
|
||||||
|
# KUBE_SUBNET_CIDR=172.20.1.0/24
|
||||||
|
# Override the default subnet CIDR; useful if you want to create
|
||||||
|
# a second subnet. The default subnet is 172.20.0.0/24. The VPC
|
||||||
|
# is created with 172.20.0.0/16; you must pick a sub-CIDR of that.
|
||||||
|
|
||||||
# Use the config file specified in $KUBE_CONFIG_FILE, or default to
|
# Use the config file specified in $KUBE_CONFIG_FILE, or default to
|
||||||
# config-default.sh.
|
# config-default.sh.
|
||||||
KUBE_ROOT=$(dirname "${BASH_SOURCE}")/../..
|
KUBE_ROOT=$(dirname "${BASH_SOURCE}")/../..
|
||||||
@ -25,7 +38,9 @@ source "${KUBE_ROOT}/cluster/common.sh"
|
|||||||
ALLOCATE_NODE_CIDRS=true
|
ALLOCATE_NODE_CIDRS=true
|
||||||
|
|
||||||
NODE_INSTANCE_PREFIX="${INSTANCE_PREFIX}-minion"
|
NODE_INSTANCE_PREFIX="${INSTANCE_PREFIX}-minion"
|
||||||
ASG_NAME="${NODE_INSTANCE_PREFIX}-group"
|
|
||||||
|
# The ASG name must be unique, so we include the zone
|
||||||
|
ASG_NAME="${NODE_INSTANCE_PREFIX}-group-${ZONE}"
|
||||||
|
|
||||||
# We could allow the master disk volume id to be specified in future
|
# We could allow the master disk volume id to be specified in future
|
||||||
MASTER_DISK_ID=
|
MASTER_DISK_ID=
|
||||||
@ -53,9 +68,15 @@ AWS_CMD="aws --output json ec2"
|
|||||||
AWS_ELB_CMD="aws --output json elb"
|
AWS_ELB_CMD="aws --output json elb"
|
||||||
AWS_ASG_CMD="aws --output json autoscaling"
|
AWS_ASG_CMD="aws --output json autoscaling"
|
||||||
|
|
||||||
INTERNAL_IP_BASE=172.20.0
|
VPC_CIDR_BASE=172.20
|
||||||
MASTER_IP_SUFFIX=.9
|
MASTER_IP_SUFFIX=.9
|
||||||
MASTER_INTERNAL_IP=${INTERNAL_IP_BASE}${MASTER_IP_SUFFIX}
|
MASTER_INTERNAL_IP=${VPC_CIDR_BASE}.0${MASTER_IP_SUFFIX}
|
||||||
|
VPC_CIDR=${VPC_CIDR_BASE}.0.0/16
|
||||||
|
SUBNET_CIDR=${VPC_CIDR_BASE}.0.0/24
|
||||||
|
if [[ -n "${KUBE_SUBNET_CIDR:-}" ]]; then
|
||||||
|
echo "Using subnet CIDR override: ${KUBE_SUBNET_CIDR}"
|
||||||
|
SUBNET_CIDR=${KUBE_SUBNET_CIDR}
|
||||||
|
fi
|
||||||
|
|
||||||
MASTER_SG_NAME="kubernetes-master-${CLUSTER_ID}"
|
MASTER_SG_NAME="kubernetes-master-${CLUSTER_ID}"
|
||||||
MINION_SG_NAME="kubernetes-minion-${CLUSTER_ID}"
|
MINION_SG_NAME="kubernetes-minion-${CLUSTER_ID}"
|
||||||
@ -706,7 +727,7 @@ function kube-up {
|
|||||||
fi
|
fi
|
||||||
if [[ -z "$VPC_ID" ]]; then
|
if [[ -z "$VPC_ID" ]]; then
|
||||||
echo "Creating vpc."
|
echo "Creating vpc."
|
||||||
VPC_ID=$($AWS_CMD create-vpc --cidr-block $INTERNAL_IP_BASE.0/16 | json_val '["Vpc"]["VpcId"]')
|
VPC_ID=$($AWS_CMD create-vpc --cidr-block ${VPC_CIDR} | json_val '["Vpc"]["VpcId"]')
|
||||||
$AWS_CMD modify-vpc-attribute --vpc-id $VPC_ID --enable-dns-support '{"Value": true}' > $LOG
|
$AWS_CMD modify-vpc-attribute --vpc-id $VPC_ID --enable-dns-support '{"Value": true}' > $LOG
|
||||||
$AWS_CMD modify-vpc-attribute --vpc-id $VPC_ID --enable-dns-hostnames '{"Value": true}' > $LOG
|
$AWS_CMD modify-vpc-attribute --vpc-id $VPC_ID --enable-dns-hostnames '{"Value": true}' > $LOG
|
||||||
add-tag $VPC_ID Name kubernetes-vpc
|
add-tag $VPC_ID Name kubernetes-vpc
|
||||||
@ -723,13 +744,16 @@ function kube-up {
|
|||||||
|
|
||||||
if [[ -z "$SUBNET_ID" ]]; then
|
if [[ -z "$SUBNET_ID" ]]; then
|
||||||
echo "Creating subnet."
|
echo "Creating subnet."
|
||||||
SUBNET_ID=$($AWS_CMD create-subnet --cidr-block $INTERNAL_IP_BASE.0/24 --vpc-id $VPC_ID --availability-zone ${ZONE} | json_val '["Subnet"]["SubnetId"]')
|
SUBNET_ID=$($AWS_CMD create-subnet --cidr-block ${SUBNET_CIDR} --vpc-id $VPC_ID --availability-zone ${ZONE} | json_val '["Subnet"]["SubnetId"]')
|
||||||
add-tag $SUBNET_ID KubernetesCluster ${CLUSTER_ID}
|
add-tag $SUBNET_ID KubernetesCluster ${CLUSTER_ID}
|
||||||
else
|
else
|
||||||
EXISTING_CIDR=$($AWS_CMD describe-subnets --subnet-ids ${SUBNET_ID} --query Subnets[].CidrBlock --output text)
|
EXISTING_CIDR=$($AWS_CMD describe-subnets --subnet-ids ${SUBNET_ID} --query Subnets[].CidrBlock --output text)
|
||||||
echo "Using existing CIDR $EXISTING_CIDR"
|
echo "Using existing subnet with CIDR $EXISTING_CIDR"
|
||||||
INTERNAL_IP_BASE=${EXISTING_CIDR%.*}
|
VPC_CIDR=$($AWS_CMD describe-vpcs --vpc-ids ${VPC_ID} --query Vpcs[].CidrBlock --output text)
|
||||||
MASTER_INTERNAL_IP=${INTERNAL_IP_BASE}${MASTER_IP_SUFFIX}
|
echo "VPC CIDR is $VPC_CIDR"
|
||||||
|
VPC_CIDR_BASE=${VPC_CIDR%.*.*}
|
||||||
|
MASTER_INTERNAL_IP=${VPC_CIDR_BASE}.0${MASTER_IP_SUFFIX}
|
||||||
|
echo "Assuming MASTER_INTERNAL_IP=${MASTER_INTERNAL_IP}"
|
||||||
fi
|
fi
|
||||||
|
|
||||||
echo "Using subnet $SUBNET_ID"
|
echo "Using subnet $SUBNET_ID"
|
||||||
@ -796,17 +820,26 @@ function kube-up {
|
|||||||
# HTTPS to the master is allowed (for API access)
|
# HTTPS to the master is allowed (for API access)
|
||||||
authorize-security-group-ingress "${MASTER_SG_ID}" "--protocol tcp --port 443 --cidr 0.0.0.0/0"
|
authorize-security-group-ingress "${MASTER_SG_ID}" "--protocol tcp --port 443 --cidr 0.0.0.0/0"
|
||||||
|
|
||||||
# Create the master
|
# KUBE_SHARE_MASTER is used to add minions to an existing master
|
||||||
start-master
|
if [[ "${KUBE_SHARE_MASTER:-}" == "true" ]]; then
|
||||||
|
# Detect existing master
|
||||||
|
detect-master
|
||||||
|
|
||||||
# Start minions
|
# Start minions
|
||||||
start-minions
|
start-minions
|
||||||
|
else
|
||||||
|
# Create the master
|
||||||
|
start-master
|
||||||
|
|
||||||
# Wait for the master to be ready
|
# Start minions
|
||||||
wait-master
|
start-minions
|
||||||
|
|
||||||
# Build ~/.kube/config
|
# Wait for the master to be ready
|
||||||
build-config
|
wait-master
|
||||||
|
|
||||||
|
# Build ~/.kube/config
|
||||||
|
build-config
|
||||||
|
fi
|
||||||
|
|
||||||
# Check the cluster is OK
|
# Check the cluster is OK
|
||||||
check-cluster
|
check-cluster
|
||||||
@ -1038,7 +1071,6 @@ function start-minions() {
|
|||||||
# Wait for the master to be started
|
# Wait for the master to be started
|
||||||
function wait-master() {
|
function wait-master() {
|
||||||
detect-master > $LOG
|
detect-master > $LOG
|
||||||
detect-minions > $LOG
|
|
||||||
|
|
||||||
# TODO(justinsb): This is really not necessary any more
|
# TODO(justinsb): This is really not necessary any more
|
||||||
# Wait 3 minutes for cluster to come up. We hit it with a "highstate" after that to
|
# Wait 3 minutes for cluster to come up. We hit it with a "highstate" after that to
|
||||||
@ -1098,6 +1130,8 @@ function check-cluster() {
|
|||||||
|
|
||||||
sleep 5
|
sleep 5
|
||||||
|
|
||||||
|
detect-minions > $LOG
|
||||||
|
|
||||||
# Don't bail on errors, we want to be able to print some info.
|
# Don't bail on errors, we want to be able to print some info.
|
||||||
set +e
|
set +e
|
||||||
|
|
||||||
@ -1167,6 +1201,7 @@ function kube-down {
|
|||||||
echo "Deleting auto-scaling group: ${ASG_NAME}"
|
echo "Deleting auto-scaling group: ${ASG_NAME}"
|
||||||
${AWS_ASG_CMD} delete-auto-scaling-group --force-delete --auto-scaling-group-name ${ASG_NAME}
|
${AWS_ASG_CMD} delete-auto-scaling-group --force-delete --auto-scaling-group-name ${ASG_NAME}
|
||||||
fi
|
fi
|
||||||
|
|
||||||
if [[ -n $(${AWS_ASG_CMD} --output text describe-launch-configurations --launch-configuration-names ${ASG_NAME} --query LaunchConfigurations[].LaunchConfigurationName) ]]; then
|
if [[ -n $(${AWS_ASG_CMD} --output text describe-launch-configurations --launch-configuration-names ${ASG_NAME} --query LaunchConfigurations[].LaunchConfigurationName) ]]; then
|
||||||
echo "Deleting auto-scaling launch configuration: ${ASG_NAME}"
|
echo "Deleting auto-scaling launch configuration: ${ASG_NAME}"
|
||||||
${AWS_ASG_CMD} delete-launch-configuration --launch-configuration-name ${ASG_NAME}
|
${AWS_ASG_CMD} delete-launch-configuration --launch-configuration-name ${ASG_NAME}
|
||||||
|
Loading…
Reference in New Issue
Block a user