mirror of
https://github.com/k3s-io/kubernetes.git
synced 2025-07-23 19:56:01 +00:00
GCE: Allow for reuse of master
This is for internal use at the moment, for testing Ubernetes Lite, but arguably makes the code a little cleaner. Also rename KUBE_SHARE_MASTER -> KUBE_USE_EXISTING_MASTER
This commit is contained in:
parent
ad827c6b62
commit
2958ea253a
@ -20,7 +20,7 @@
|
|||||||
# The intent is to allow experimentation/advanced functionality before we
|
# The intent is to allow experimentation/advanced functionality before we
|
||||||
# are ready to commit to supporting it.
|
# are ready to commit to supporting it.
|
||||||
# Experimental functionality:
|
# Experimental functionality:
|
||||||
# KUBE_SHARE_MASTER=true
|
# KUBE_USE_EXISTING_MASTER=true
|
||||||
# Detect and reuse an existing master; useful if you want to
|
# Detect and reuse an existing master; useful if you want to
|
||||||
# create more nodes, perhaps with a different instance type or in
|
# create more nodes, perhaps with a different instance type or in
|
||||||
# a different subnet/AZ
|
# a different subnet/AZ
|
||||||
@ -808,8 +808,8 @@ function kube-up {
|
|||||||
# HTTPS to the master is allowed (for API access)
|
# HTTPS to the master is allowed (for API access)
|
||||||
authorize-security-group-ingress "${MASTER_SG_ID}" "--protocol tcp --port 443 --cidr 0.0.0.0/0"
|
authorize-security-group-ingress "${MASTER_SG_ID}" "--protocol tcp --port 443 --cidr 0.0.0.0/0"
|
||||||
|
|
||||||
# KUBE_SHARE_MASTER is used to add minions to an existing master
|
# KUBE_USE_EXISTING_MASTER is used to add minions to an existing master
|
||||||
if [[ "${KUBE_SHARE_MASTER:-}" == "true" ]]; then
|
if [[ "${KUBE_USE_EXISTING_MASTER:-}" == "true" ]]; then
|
||||||
# Detect existing master
|
# Detect existing master
|
||||||
detect-master
|
detect-master
|
||||||
|
|
||||||
|
@ -575,6 +575,22 @@ function kube-up {
|
|||||||
find-release-tars
|
find-release-tars
|
||||||
upload-server-tars
|
upload-server-tars
|
||||||
|
|
||||||
|
if [[ ${KUBE_USE_EXISTING_MASTER:-} == "true" ]]; then
|
||||||
|
create-nodes
|
||||||
|
create-autoscaler
|
||||||
|
else
|
||||||
|
check-existing
|
||||||
|
create-network
|
||||||
|
create-master
|
||||||
|
create-nodes-firewall
|
||||||
|
create-nodes-template
|
||||||
|
create-nodes
|
||||||
|
create-autoscaler
|
||||||
|
check-cluster
|
||||||
|
fi
|
||||||
|
}
|
||||||
|
|
||||||
|
function check-existing() {
|
||||||
local running_in_terminal=false
|
local running_in_terminal=false
|
||||||
# May be false if tty is not allocated (for example with ssh -T).
|
# May be false if tty is not allocated (for example with ssh -T).
|
||||||
if [ -t 1 ]; then
|
if [ -t 1 ]; then
|
||||||
@ -595,7 +611,9 @@ function kube-up {
|
|||||||
fi
|
fi
|
||||||
fi
|
fi
|
||||||
fi
|
fi
|
||||||
|
}
|
||||||
|
|
||||||
|
function create-network() {
|
||||||
if ! gcloud compute networks --project "${PROJECT}" describe "${NETWORK}" &>/dev/null; then
|
if ! gcloud compute networks --project "${PROJECT}" describe "${NETWORK}" &>/dev/null; then
|
||||||
echo "Creating new network: ${NETWORK}"
|
echo "Creating new network: ${NETWORK}"
|
||||||
# The network needs to be created synchronously or we have a race. The
|
# The network needs to be created synchronously or we have a race. The
|
||||||
@ -618,7 +636,9 @@ function kube-up {
|
|||||||
--source-ranges "0.0.0.0/0" \
|
--source-ranges "0.0.0.0/0" \
|
||||||
--allow "tcp:22" &
|
--allow "tcp:22" &
|
||||||
fi
|
fi
|
||||||
|
}
|
||||||
|
|
||||||
|
function create-master() {
|
||||||
echo "Starting master and configuring firewalls"
|
echo "Starting master and configuring firewalls"
|
||||||
gcloud compute firewall-rules create "${MASTER_NAME}-https" \
|
gcloud compute firewall-rules create "${MASTER_NAME}-https" \
|
||||||
--project "${PROJECT}" \
|
--project "${PROJECT}" \
|
||||||
@ -663,7 +683,9 @@ function kube-up {
|
|||||||
create-certs "${MASTER_RESERVED_IP}"
|
create-certs "${MASTER_RESERVED_IP}"
|
||||||
|
|
||||||
create-master-instance "${MASTER_RESERVED_IP}" &
|
create-master-instance "${MASTER_RESERVED_IP}" &
|
||||||
|
}
|
||||||
|
|
||||||
|
function create-nodes-firewall() {
|
||||||
# Create a single firewall rule for all minions.
|
# Create a single firewall rule for all minions.
|
||||||
create-firewall-rule "${NODE_TAG}-all" "${CLUSTER_IP_RANGE}" "${NODE_TAG}" &
|
create-firewall-rule "${NODE_TAG}-all" "${CLUSTER_IP_RANGE}" "${NODE_TAG}" &
|
||||||
|
|
||||||
@ -676,7 +698,9 @@ function kube-up {
|
|||||||
kube::util::wait-for-jobs || {
|
kube::util::wait-for-jobs || {
|
||||||
echo -e "${color_red}${fail} commands failed.${color_norm}" >&2
|
echo -e "${color_red}${fail} commands failed.${color_norm}" >&2
|
||||||
}
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
function create-nodes-template() {
|
||||||
echo "Creating minions."
|
echo "Creating minions."
|
||||||
|
|
||||||
# TODO(zmerlynn): Refactor setting scope flags.
|
# TODO(zmerlynn): Refactor setting scope flags.
|
||||||
@ -690,8 +714,12 @@ function kube-up {
|
|||||||
write-node-env
|
write-node-env
|
||||||
|
|
||||||
local template_name="${NODE_INSTANCE_PREFIX}-template"
|
local template_name="${NODE_INSTANCE_PREFIX}-template"
|
||||||
|
|
||||||
create-node-instance-template $template_name
|
create-node-instance-template $template_name
|
||||||
|
}
|
||||||
|
|
||||||
|
function create-nodes() {
|
||||||
|
local template_name="${NODE_INSTANCE_PREFIX}-template"
|
||||||
|
|
||||||
local defaulted_max_instances_per_mig=${MAX_INSTANCES_PER_MIG:-500}
|
local defaulted_max_instances_per_mig=${MAX_INSTANCES_PER_MIG:-500}
|
||||||
|
|
||||||
@ -731,10 +759,9 @@ function kube-up {
|
|||||||
"${NODE_INSTANCE_PREFIX}-group" \
|
"${NODE_INSTANCE_PREFIX}-group" \
|
||||||
--zone "${ZONE}" \
|
--zone "${ZONE}" \
|
||||||
--project "${PROJECT}" || true;
|
--project "${PROJECT}" || true;
|
||||||
|
}
|
||||||
|
|
||||||
detect-node-names
|
function create-autoscaler() {
|
||||||
detect-master
|
|
||||||
|
|
||||||
# Create autoscaler for nodes if requested
|
# Create autoscaler for nodes if requested
|
||||||
if [[ "${ENABLE_NODE_AUTOSCALER}" == "true" ]]; then
|
if [[ "${ENABLE_NODE_AUTOSCALER}" == "true" ]]; then
|
||||||
METRICS=""
|
METRICS=""
|
||||||
@ -764,6 +791,11 @@ function kube-up {
|
|||||||
gcloud compute instance-groups managed set-autoscaling "${NODE_INSTANCE_PREFIX}-group" --zone "${ZONE}" --project "${PROJECT}" \
|
gcloud compute instance-groups managed set-autoscaling "${NODE_INSTANCE_PREFIX}-group" --zone "${ZONE}" --project "${PROJECT}" \
|
||||||
--min-num-replicas "${last_min_instances}" --max-num-replicas "${last_max_instances}" ${METRICS} || true
|
--min-num-replicas "${last_min_instances}" --max-num-replicas "${last_max_instances}" ${METRICS} || true
|
||||||
fi
|
fi
|
||||||
|
}
|
||||||
|
|
||||||
|
function check-cluster() {
|
||||||
|
detect-node-names
|
||||||
|
detect-master
|
||||||
|
|
||||||
echo "Waiting up to ${KUBE_CLUSTER_INITIALIZATION_TIMEOUT} seconds for cluster initialization."
|
echo "Waiting up to ${KUBE_CLUSTER_INITIALIZATION_TIMEOUT} seconds for cluster initialization."
|
||||||
echo
|
echo
|
||||||
@ -845,7 +877,7 @@ function kube-down {
|
|||||||
fi
|
fi
|
||||||
|
|
||||||
# Get the name of the managed instance group template before we delete the
|
# Get the name of the managed instance group template before we delete the
|
||||||
# managed instange group. (The name of the managed instnace group template may
|
# managed instance group. (The name of the managed instance group template may
|
||||||
# change during a cluster upgrade.)
|
# change during a cluster upgrade.)
|
||||||
local template=$(get-template "${PROJECT}" "${ZONE}" "${NODE_INSTANCE_PREFIX}-group")
|
local template=$(get-template "${PROJECT}" "${ZONE}" "${NODE_INSTANCE_PREFIX}-group")
|
||||||
|
|
||||||
|
@ -71,7 +71,7 @@ make multiple zones better supported.
|
|||||||
For the initial implemenation, kube-up must be run multiple times, once for
|
For the initial implemenation, kube-up must be run multiple times, once for
|
||||||
each zone. The first kube-up will take place as normal, but then for each
|
each zone. The first kube-up will take place as normal, but then for each
|
||||||
additional zone the user must run kube-up again, specifying
|
additional zone the user must run kube-up again, specifying
|
||||||
`KUBE_SHARE_MASTER=true` and `KUBE_SUBNET_CIDR=172.20.x.0/24`. This will then
|
`KUBE_USE_EXISTING_MASTER=true` and `KUBE_SUBNET_CIDR=172.20.x.0/24`. This will then
|
||||||
create additional nodes in a different zone, but will register them with the
|
create additional nodes in a different zone, but will register them with the
|
||||||
existing master.
|
existing master.
|
||||||
|
|
||||||
|
Loading…
Reference in New Issue
Block a user