Merge pull request #32873 from jszczepkowski/ha-delete-nodes2

Automatic merge from submit-queue

Implemented KUBE_DELETE_NODES flag in kube-down.

Implemented KUBE_DELETE_NODES flag in kube-down script.
It prevents removal of nodes when shutting down a HA master replica.
This commit is contained in:
Kubernetes Submit Queue 2016-09-19 01:08:18 -07:00 committed by GitHub
commit 87c2650038
3 changed files with 48 additions and 42 deletions

View File

@ -34,6 +34,7 @@ NODE_DISK_SIZE=${NODE_DISK_SIZE:-100GB}
REGISTER_MASTER_KUBELET=${REGISTER_MASTER:-true} REGISTER_MASTER_KUBELET=${REGISTER_MASTER:-true}
PREEMPTIBLE_NODE=${PREEMPTIBLE_NODE:-false} PREEMPTIBLE_NODE=${PREEMPTIBLE_NODE:-false}
PREEMPTIBLE_MASTER=${PREEMPTIBLE_MASTER:-false} PREEMPTIBLE_MASTER=${PREEMPTIBLE_MASTER:-false}
KUBE_DELETE_NODES=${KUBE_DELETE_NODES:-true}
MASTER_OS_DISTRIBUTION=${KUBE_MASTER_OS_DISTRIBUTION:-${KUBE_OS_DISTRIBUTION:-gci}} MASTER_OS_DISTRIBUTION=${KUBE_MASTER_OS_DISTRIBUTION:-${KUBE_OS_DISTRIBUTION:-gci}}
NODE_OS_DISTRIBUTION=${KUBE_NODE_OS_DISTRIBUTION:-${KUBE_OS_DISTRIBUTION:-debian}} NODE_OS_DISTRIBUTION=${KUBE_NODE_OS_DISTRIBUTION:-${KUBE_OS_DISTRIBUTION:-debian}}

View File

@ -35,6 +35,7 @@ REGISTER_MASTER_KUBELET=${REGISTER_MASTER:-true}
KUBE_APISERVER_REQUEST_TIMEOUT=300 KUBE_APISERVER_REQUEST_TIMEOUT=300
PREEMPTIBLE_NODE=${PREEMPTIBLE_NODE:-false} PREEMPTIBLE_NODE=${PREEMPTIBLE_NODE:-false}
PREEMPTIBLE_MASTER=${PREEMPTIBLE_MASTER:-false} PREEMPTIBLE_MASTER=${PREEMPTIBLE_MASTER:-false}
KUBE_DELETE_NODES=${KUBE_DELETE_NODES:-true}
MASTER_OS_DISTRIBUTION=${KUBE_MASTER_OS_DISTRIBUTION:-${KUBE_OS_DISTRIBUTION:-gci}} MASTER_OS_DISTRIBUTION=${KUBE_MASTER_OS_DISTRIBUTION:-${KUBE_OS_DISTRIBUTION:-gci}}
NODE_OS_DISTRIBUTION=${KUBE_NODE_OS_DISTRIBUTION:-${KUBE_OS_DISTRIBUTION:-debian}} NODE_OS_DISTRIBUTION=${KUBE_NODE_OS_DISTRIBUTION:-${KUBE_OS_DISTRIBUTION:-debian}}

View File

@ -1132,34 +1132,36 @@ function kube-down() {
echo "Bringing down cluster" echo "Bringing down cluster"
set +e # Do not stop on error set +e # Do not stop on error
# Get the name of the managed instance group template before we delete the if [[ "${KUBE_DELETE_NODES:-}" != "false" ]]; then
# managed instance group. (The name of the managed instance group template may # Get the name of the managed instance group template before we delete the
# change during a cluster upgrade.) # managed instance group. (The name of the managed instance group template may
local templates=$(get-template "${PROJECT}") # change during a cluster upgrade.)
local templates=$(get-template "${PROJECT}")
for group in ${INSTANCE_GROUPS[@]:-}; do for group in ${INSTANCE_GROUPS[@]:-}; do
if gcloud compute instance-groups managed describe "${group}" --project "${PROJECT}" --zone "${ZONE}" &>/dev/null; then if gcloud compute instance-groups managed describe "${group}" --project "${PROJECT}" --zone "${ZONE}" &>/dev/null; then
gcloud compute instance-groups managed delete \ gcloud compute instance-groups managed delete \
--project "${PROJECT}" \ --project "${PROJECT}" \
--quiet \ --quiet \
--zone "${ZONE}" \ --zone "${ZONE}" \
"${group}" & "${group}" &
fi fi
done done
# Wait for last batch of jobs # Wait for last batch of jobs
kube::util::wait-for-jobs || { kube::util::wait-for-jobs || {
echo -e "Failed to delete instance group(s)." >&2 echo -e "Failed to delete instance group(s)." >&2
} }
for template in ${templates[@]:-}; do for template in ${templates[@]:-}; do
if gcloud compute instance-templates describe --project "${PROJECT}" "${template}" &>/dev/null; then if gcloud compute instance-templates describe --project "${PROJECT}" "${template}" &>/dev/null; then
gcloud compute instance-templates delete \ gcloud compute instance-templates delete \
--project "${PROJECT}" \ --project "${PROJECT}" \
--quiet \ --quiet \
"${template}" "${template}"
fi fi
done done
fi
local -r REPLICA_NAME="$(get-replica-name)" local -r REPLICA_NAME="$(get-replica-name)"
@ -1261,23 +1263,25 @@ function kube-down() {
fi fi
fi fi
# Find out what minions are running. if [[ "${KUBE_DELETE_NODES:-}" != "false" ]]; then
local -a minions # Find out what minions are running.
minions=( $(gcloud compute instances list \ local -a minions
--project "${PROJECT}" --zones "${ZONE}" \ minions=( $(gcloud compute instances list \
--regexp "${NODE_INSTANCE_PREFIX}-.+" \ --project "${PROJECT}" --zones "${ZONE}" \
--format='value(name)') ) --regexp "${NODE_INSTANCE_PREFIX}-.+" \
# If any minions are running, delete them in batches. --format='value(name)') )
while (( "${#minions[@]}" > 0 )); do # If any minions are running, delete them in batches.
echo Deleting nodes "${minions[*]::${batch}}" while (( "${#minions[@]}" > 0 )); do
gcloud compute instances delete \ echo Deleting nodes "${minions[*]::${batch}}"
--project "${PROJECT}" \ gcloud compute instances delete \
--quiet \ --project "${PROJECT}" \
--delete-disks boot \ --quiet \
--zone "${ZONE}" \ --delete-disks boot \
"${minions[@]::${batch}}" --zone "${ZONE}" \
minions=( "${minions[@]:${batch}}" ) "${minions[@]::${batch}}"
done minions=( "${minions[@]:${batch}}" )
done
fi
# Delete routes. # Delete routes.
local -a routes local -a routes