From 4f6dc99075aa207e0a51974b6e807381d393ea68 Mon Sep 17 00:00:00 2001 From: Jeff Lowdermilk Date: Thu, 9 Apr 2015 17:07:24 -0700 Subject: [PATCH] Generate kubeconfig for all providers in cluster/ that use auth --- cluster/aws/util.sh | 56 ++++++++------------------- cluster/azure/util.sh | 51 ++++++++----------------- cluster/common.sh | 36 +++++++++++++----- cluster/gce/util.sh | 6 +-- cluster/libvirt-coreos/util.sh | 2 +- cluster/rackspace/util.sh | 31 +++++++-------- cluster/vagrant/util.sh | 53 ++++++-------------------- cluster/vsphere/util.sh | 69 +++++++++++++--------------------- 8 files changed, 114 insertions(+), 190 deletions(-) diff --git a/cluster/aws/util.sh b/cluster/aws/util.sh index 579be1474e4..30c6860811d 100644 --- a/cluster/aws/util.sh +++ b/cluster/aws/util.sh @@ -20,6 +20,7 @@ # config-default.sh. KUBE_ROOT=$(dirname "${BASH_SOURCE}")/../.. source "${KUBE_ROOT}/cluster/aws/${KUBE_CONFIG_FILE-"config-default.sh"}" +source "${KUBE_ROOT}/cluster/common.sh" # This removes the final character in bash (somehow) AWS_REGION=${ZONE%?} @@ -265,7 +266,7 @@ function upload-server-tars() { # Ensure that we have a password created for validating to the master. Will -# read from the kubernetes auth-file for the current context if available. +# read from kubeconfig for the current context if available. # # Assumed vars # KUBE_ROOT @@ -274,17 +275,11 @@ function upload-server-tars() { # KUBE_USER # KUBE_PASSWORD function get-password { - # go template to extract the auth-path of the current-context user - # Note: we save dot ('.') to $dot because the 'with' action overrides dot - local template='{{$dot := .}}{{with $ctx := index $dot "current-context"}}{{range $element := (index $dot "contexts")}}{{ if eq .name $ctx }}{{ with $user := .context.user }}{{range $element := (index $dot "users")}}{{ if eq .name $user }}{{ index . "user" "auth-path" }}{{end}}{{end}}{{end}}{{end}}{{end}}{{end}}' - local file=$("${KUBE_ROOT}/cluster/kubectl.sh" config view -o template --template="${template}") - if [[ ! -z "$file" && -r "$file" ]]; then - KUBE_USER=$(cat "$file" | python -c 'import json,sys;print json.load(sys.stdin)["User"]') - KUBE_PASSWORD=$(cat "$file" | python -c 'import json,sys;print json.load(sys.stdin)["Password"]') - return + get-kubeconfig-basicauth + if [[ -z "${KUBE_USER}" || -z "${KUBE_PASSWORD}" ]]; then + KUBE_USER=admin + KUBE_PASSWORD=$(python -c 'import string,random; print "".join(random.SystemRandom().choice(string.ascii_letters + string.digits) for _ in range(16))') fi - KUBE_USER=admin - KUBE_PASSWORD=$(python -c 'import string,random; print "".join(random.SystemRandom().choice(string.ascii_letters + string.digits) for _ in range(16))') } # Adds a tag to an AWS resource @@ -609,44 +604,25 @@ function kube-up { echo "Kubernetes cluster created." - local kube_cert="kubecfg.crt" - local kube_key="kubecfg.key" - local ca_cert="kubernetes.ca.crt" # TODO use token instead of kube_auth - local kube_auth="kubernetes_auth" + export KUBECONFIG="${KUBECONFIG:-${HOME}/.kube/.kubeconfig}" + export KUBE_CERT="/tmp/$RANDOM-kubecfg.crt" + export KUBE_KEY="/tmp/$RANDOM-kubecfg.key" + export CA_CERT="/tmp/$RANDOM-kubernetes.ca.crt" + export CONTEXT="aws_${INSTANCE_PREFIX}" local kubectl="${KUBE_ROOT}/cluster/kubectl.sh" - local context="${INSTANCE_PREFIX}" - local user="${INSTANCE_PREFIX}-admin" - local config_dir="${HOME}/.kube/${context}" # TODO: generate ADMIN (and KUBELET) tokens and put those in the master's # config file. Distribute the same way the htpasswd is done. ( mkdir -p "${config_dir}" umask 077 - ssh -oStrictHostKeyChecking=no -i "${AWS_SSH_KEY}" ubuntu@${KUBE_MASTER_IP} sudo cat /srv/kubernetes/kubecfg.crt >"${config_dir}/${kube_cert}" 2>$LOG - ssh -oStrictHostKeyChecking=no -i "${AWS_SSH_KEY}" ubuntu@${KUBE_MASTER_IP} sudo cat /srv/kubernetes/kubecfg.key >"${config_dir}/${kube_key}" 2>$LOG - ssh -oStrictHostKeyChecking=no -i "${AWS_SSH_KEY}" ubuntu@${KUBE_MASTER_IP} sudo cat /srv/kubernetes/ca.crt >"${config_dir}/${ca_cert}" 2>$LOG + ssh -oStrictHostKeyChecking=no -i "${AWS_SSH_KEY}" "ubuntu@${KUBE_MASTER_IP}" sudo cat /srv/kubernetes/kubecfg.crt >"${KUBE_CERT}" 2>"$LOG" + ssh -oStrictHostKeyChecking=no -i "${AWS_SSH_KEY}" "ubuntu@${KUBE_MASTER_IP}" sudo cat /srv/kubernetes/kubecfg.key >"${KUBE_KEY}" 2>"$LOG" + ssh -oStrictHostKeyChecking=no -i "${AWS_SSH_KEY}" "ubuntu@${KUBE_MASTER_IP}" sudo cat /srv/kubernetes/ca.crt >"${CA_CERT}" 2>"$LOG" - "${kubectl}" config set-cluster "${context}" --server="https://${KUBE_MASTER_IP}" --certificate-authority="${config_dir}/${ca_cert}" --global - "${kubectl}" config set-credentials "${user}" --auth-path="${config_dir}/${kube_auth}" --global - "${kubectl}" config set-context "${context}" --cluster="${context}" --user="${user}" --global - "${kubectl}" config use-context "${context}" --global - - cat << EOF > "${config_dir}/${kube_auth}" -{ - "User": "$KUBE_USER", - "Password": "$KUBE_PASSWORD", - "CAFile": "${config_dir}/${ca_cert}", - "CertFile": "${config_dir}/${kube_cert}", - "KeyFile": "${config_dir}/${kube_key}" -} -EOF - - chmod 0600 "${config_dir}/${kube_auth}" "${config_dir}/$kube_cert" \ - "${config_dir}/${kube_key}" "${config_dir}/${ca_cert}" - echo "Wrote ${config_dir}/${kube_auth}" + create-kubeconfig ) echo "Sanity checking cluster..." @@ -700,7 +676,7 @@ EOF echo echo -e "${color_yellow} https://${KUBE_MASTER_IP}" echo - echo -e "${color_green}The user name and password to use is located in ${config_dir}/${kube_auth}${color_norm}" + echo -e "${color_green}The user name and password to use is located in ${KUBECONFIG}.${color_norm}" echo } diff --git a/cluster/azure/util.sh b/cluster/azure/util.sh index b9e95f5b576..b57928c5ca2 100644 --- a/cluster/azure/util.sh +++ b/cluster/azure/util.sh @@ -21,6 +21,7 @@ KUBE_ROOT=$(dirname "${BASH_SOURCE}")/../.. source "${KUBE_ROOT}/cluster/azure/${KUBE_CONFIG_FILE-"config-default.sh"}" +source "${KUBE_ROOT}/cluster/common.sh" function azure_call { local -a params=() @@ -242,30 +243,17 @@ function detect-master () { } # Ensure that we have a password created for validating to the master. Will -# read from $HOME/.kubernetres_auth if available. +# read from kubeconfig current-context if available. # # Vars set: # KUBE_USER # KUBE_PASSWORD function get-password { - local file="$HOME/.kubernetes_auth" - if [[ -r "$file" ]]; then - KUBE_USER=$(cat "$file" | python -c 'import json,sys;print json.load(sys.stdin)["User"]') - KUBE_PASSWORD=$(cat "$file" | python -c 'import json,sys;print json.load(sys.stdin)["Password"]') - return - fi + get-kubeconfig-basicauth + if [[ -z "${KUBE_USER}" || -z "${KUBE_PASSWORD}" ]]; then KUBE_USER=admin KUBE_PASSWORD=$(python -c 'import string,random; print "".join(random.SystemRandom().choice(string.ascii_letters + string.digits) for _ in range(16))') - - # Remove this code, since in all use cases I can see, we are overwriting this - # at cluster creation time. - cat << EOF > "$file" -{ - "User": "$KUBE_USER", - "Password": "$KUBE_PASSWORD" -} -EOF - chmod 0600 "$file" + fi } # Generate authentication token for admin user. Will @@ -432,32 +420,23 @@ function kube-up { printf "\n" echo "Kubernetes cluster created." - local kube_cert=".kubecfg.crt" - local kube_key=".kubecfg.key" - local ca_cert=".kubernetes.ca.crt" + export KUBECONFIG="${KUBECONFIG:-${HOME}/.kube/.kubeconfig}" + export KUBE_CERT="/tmp/$RANDOM-kubecfg.crt" + export KUBE_KEY="/tmp/$RANDOM-kubecfg.key" + export CA_CERT="/tmp/$RANDOM-kubernetes.ca.crt" + export CONTEXT="azure_${INSTANCE_PREFIX}" # TODO: generate ADMIN (and KUBELET) tokens and put those in the master's # config file. Distribute the same way the htpasswd is done. (umask 077 ssh -oStrictHostKeyChecking=no -i $AZ_SSH_KEY -p 22000 $AZ_CS.cloudapp.net \ - sudo cat /srv/kubernetes/kubecfg.crt >"${HOME}/${kube_cert}" 2>/dev/null + sudo cat /srv/kubernetes/kubecfg.crt >"${KUBE_CERT}" 2>/dev/null ssh -oStrictHostKeyChecking=no -i $AZ_SSH_KEY -p 22000 $AZ_CS.cloudapp.net \ - sudo cat /srv/kubernetes/kubecfg.key >"${HOME}/${kube_key}" 2>/dev/null + sudo cat /srv/kubernetes/kubecfg.key >"${KUBE_KEY}" 2>/dev/null ssh -oStrictHostKeyChecking=no -i $AZ_SSH_KEY -p 22000 $AZ_CS.cloudapp.net \ - sudo cat /srv/kubernetes/ca.crt >"${HOME}/${ca_cert}" 2>/dev/null + sudo cat /srv/kubernetes/ca.crt >"${CA_CERT}" 2>/dev/null - cat << EOF > ~/.kubernetes_auth -{ - "User": "$KUBE_USER", - "Password": "$KUBE_PASSWORD", - "CAFile": "$HOME/$ca_cert", - "CertFile": "$HOME/$kube_cert", - "KeyFile": "$HOME/$kube_key" -} -EOF - - chmod 0600 ~/.kubernetes_auth "${HOME}/${kube_cert}" \ - "${HOME}/${kube_key}" "${HOME}/${ca_cert}" + create-kubeconfig ) # Wait for salt on the minions @@ -482,7 +461,7 @@ EOF echo echo " https://${KUBE_MASTER_IP}" echo - echo "The user name and password to use is located in ~/.kubernetes_auth." + echo "The user name and password to use is located in ${KUBECONFIG}." echo } diff --git a/cluster/common.sh b/cluster/common.sh index 56d7d804ac6..076b69325f2 100644 --- a/cluster/common.sh +++ b/cluster/common.sh @@ -30,11 +30,12 @@ DEFAULT_KUBECONFIG="${HOME}/.kube/config" # KUBE_PASSWORD # KUBE_MASTER_IP # KUBECONFIG +# CONTEXT # +# The following can be omitted for --insecure-skip-tls-verify # KUBE_CERT # KUBE_KEY # CA_CERT -# CONTEXT function create-kubeconfig() { local kubectl="${KUBE_ROOT}/cluster/kubectl.sh" @@ -44,14 +45,31 @@ function create-kubeconfig() { mkdir -p $(dirname "${KUBECONFIG}") touch "${KUBECONFIG}" fi - "${kubectl}" config set-cluster "${CONTEXT}" --server="https://${KUBE_MASTER_IP}" \ - --certificate-authority="${CA_CERT}" \ - --embed-certs=true - "${kubectl}" config set-credentials "${CONTEXT}" --username="${KUBE_USER}" \ - --password="${KUBE_PASSWORD}" \ - --client-certificate="${KUBE_CERT}" \ - --client-key="${KUBE_KEY}" \ - --embed-certs=true + local cluster_args=( + "--server=https://${KUBE_MASTER_IP}" + ) + if [[ -z "${CA_CERT:-}" ]]; then + cluster_args+=("--insecure-skip-tls-verify=true") + else + cluster_args+=( + "--certificate-authority=${CA_CERT}" + "--embed-certs=true" + ) + fi + local user_args=( + "--username=${KUBE_USER}" + "--password=${KUBE_PASSWORD}" + ) + if [[ ! -z "${KUBE_CERT:-}" && ! -z "${KUBE_KEY:-}" ]]; then + user_args+=( + "--client-certificate=${KUBE_CERT}" + "--client-key=${KUBE_KEY}" + "--embed-certs=true" + ) + fi + + "${kubectl}" config set-cluster "${CONTEXT}" "${cluster_args[@]}" + "${kubectl}" config set-credentials "${CONTEXT}" "${user_args[@]}" "${kubectl}" config set-context "${CONTEXT}" --cluster="${CONTEXT}" --user="${CONTEXT}" "${kubectl}" config use-context "${CONTEXT}" --cluster="${CONTEXT}" diff --git a/cluster/gce/util.sh b/cluster/gce/util.sh index d023924f437..cddb03f26ea 100755 --- a/cluster/gce/util.sh +++ b/cluster/gce/util.sh @@ -679,9 +679,9 @@ function kube-up { echo "Kubernetes cluster created." # TODO use token instead of basic auth - export KUBE_CERT="/tmp/kubecfg.crt" - export KUBE_KEY="/tmp/kubecfg.key" - export CA_CERT="/tmp/kubernetes.ca.crt" + export KUBE_CERT="/tmp/$RANDOM-kubecfg.crt" + export KUBE_KEY="/tmp/$RANDOM-kubecfg.key" + export CA_CERT="/tmp/$RANDOM-kubernetes.ca.crt" export CONTEXT="${PROJECT}_${INSTANCE_PREFIX}" # TODO: generate ADMIN (and KUBELET) tokens and put those in the master's diff --git a/cluster/libvirt-coreos/util.sh b/cluster/libvirt-coreos/util.sh index d532232ef0c..c8b4116051b 100644 --- a/cluster/libvirt-coreos/util.sh +++ b/cluster/libvirt-coreos/util.sh @@ -235,7 +235,7 @@ function kube-up { rm $domain_xml done - export KUBECONFIG="${HOME}/.kube/.kubeconfig" + export KUBECONFIG="${KUBECONFIG:-${HOME}/.kube/.kubeconfig}" local kubectl="${KUBE_ROOT}/cluster/kubectl.sh" "${kubectl}" config set-cluster libvirt-coreos --server=http://${KUBE_MASTER_IP-}:8080 diff --git a/cluster/rackspace/util.sh b/cluster/rackspace/util.sh index 72a9aab0e12..01ce8528466 100644 --- a/cluster/rackspace/util.sh +++ b/cluster/rackspace/util.sh @@ -20,6 +20,7 @@ # config-default.sh. KUBE_ROOT=$(dirname "${BASH_SOURCE}")/../.. source $(dirname ${BASH_SOURCE})/${KUBE_CONFIG_FILE-"config-default.sh"} +source "${KUBE_ROOT}/cluster/common.sh" verify-prereqs() { # Make sure that prerequisites are installed. @@ -50,29 +51,17 @@ verify-prereqs() { } # Ensure that we have a password created for validating to the master. Will -# read from $HOME/.kubernetres_auth if available. +# read from kubeconfig current-context if available. # # Vars set: # KUBE_USER # KUBE_PASSWORD get-password() { - local file="$HOME/.kubernetes_auth" - if [[ -r "$file" ]]; then - KUBE_USER=$(cat "$file" | python -c 'import json,sys;print json.load(sys.stdin)["User"]') - KUBE_PASSWORD=$(cat "$file" | python -c 'import json,sys;print json.load(sys.stdin)["Password"]') - return + get-kubeconfig-basicauth + if [[ -z "${KUBE_USER}" || -z "${KUBE_PASSWORD}" ]]; then + KUBE_USER=admin + KUBE_PASSWORD=$(python -c 'import string,random; print "".join(random.SystemRandom().choice(string.ascii_letters + string.digits) for _ in range(16))') fi - KUBE_USER=admin - KUBE_PASSWORD=$(python -c 'import string,random; print "".join(random.SystemRandom().choice(string.ascii_letters + string.digits) for _ in range(16))') - - # Store password for reuse. - cat << EOF > "$file" -{ - "User": "$KUBE_USER", - "Password": "$KUBE_PASSWORD" -} -EOF - chmod 0600 "$file" } rax-ssh-key() { @@ -329,6 +318,14 @@ kube-up() { echo "Kubernetes cluster created." + export KUBECONFIG="${KUBECONFIG-${HOME}/.kube/.kubeconfig}" + export KUBE_CERT="" + export KUBE_KEY="" + export CA_CERT="" + export CONTEXT="rackspace_${INSTANCE_PREFIX}" + + create-kubeconfig + # Don't bail on errors, we want to be able to print some info. set +e diff --git a/cluster/vagrant/util.sh b/cluster/vagrant/util.sh index 212cd7bc988..db0db1c6cb2 100644 --- a/cluster/vagrant/util.sh +++ b/cluster/vagrant/util.sh @@ -18,6 +18,7 @@ KUBE_ROOT=$(dirname "${BASH_SOURCE}")/../.. source "${KUBE_ROOT}/cluster/vagrant/${KUBE_CONFIG_FILE-"config-default.sh"}" +source "${KUBE_ROOT}/cluster/common.sh" function detect-master () { KUBE_MASTER_IP=$MASTER_IP @@ -252,49 +253,19 @@ function kube-up { vagrant up - local kube_cert=".kubecfg.vagrant.crt" - local kube_key=".kubecfg.vagrant.key" - local ca_cert=".kubernetes.vagrant.ca.crt" + export KUBECONFIG="${KUBECONFIG:-${HOME}/.kube/.kubeconfig}" + export KUBE_CERT="/tmp/$RANDOM-kubecfg.crt" + export KUBE_KEY="/tmp/$RANDOM-kubecfg.key" + export CA_CERT="/tmp/$RANDOM-kubernetes.ca.crt" + export CONTEXT="vagrant" - (umask 077 - vagrant ssh master -- sudo cat /srv/kubernetes/kubecfg.crt >"${HOME}/${kube_cert}" 2>/dev/null - vagrant ssh master -- sudo cat /srv/kubernetes/kubecfg.key >"${HOME}/${kube_key}" 2>/dev/null - vagrant ssh master -- sudo cat /srv/kubernetes/ca.crt >"${HOME}/${ca_cert}" 2>/dev/null + ( + umask 077 + vagrant ssh master -- sudo cat /srv/kubernetes/kubecfg.crt >"${KUBE_CERT}" 2>/dev/null + vagrant ssh master -- sudo cat /srv/kubernetes/kubecfg.key >"${KUBE_KEY}" 2>/dev/null + vagrant ssh master -- sudo cat /srv/kubernetes/ca.crt >"${CA_CERT}" 2>/dev/null - cat <"${HOME}/.kubernetes_vagrant_auth" -{ - "User": "$KUBE_USER", - "Password": "$KUBE_PASSWORD", - "CAFile": "$HOME/$ca_cert", - "CertFile": "$HOME/$kube_cert", - "KeyFile": "$HOME/$kube_key" -} -EOF - - cat <"${HOME}/.kubernetes_vagrant_kubeconfig" -apiVersion: v1 -clusters: -- cluster: - certificate-authority: ${HOME}/$ca_cert - server: https://${MASTER_IP}:443 - name: vagrant -contexts: -- context: - cluster: vagrant - namespace: default - user: vagrant - name: vagrant -current-context: "vagrant" -kind: Config -preferences: {} -users: -- name: vagrant - user: - auth-path: ${HOME}/.kubernetes_vagrant_auth -EOF - - chmod 0600 ~/.kubernetes_vagrant_auth "${HOME}/${kube_cert}" \ - "${HOME}/${kube_key}" "${HOME}/${ca_cert}" + create-kubeconfig ) verify-cluster diff --git a/cluster/vsphere/util.sh b/cluster/vsphere/util.sh index c866963cd8a..92b2d191d57 100755 --- a/cluster/vsphere/util.sh +++ b/cluster/vsphere/util.sh @@ -21,6 +21,7 @@ KUBE_ROOT=$(dirname "${BASH_SOURCE}")/../.. source "${KUBE_ROOT}/cluster/vsphere/config-common.sh" source "${KUBE_ROOT}/cluster/vsphere/${KUBE_CONFIG_FILE-"config-default.sh"}" +source "${KUBE_ROOT}/cluster/common.sh" # Detect the IP for the master # @@ -169,29 +170,17 @@ function upload-server-tars { } # Ensure that we have a password created for validating to the master. Will -# read from $HOME/.kubernetes_auth if available. +# read from kubeconfig if available. # # Vars set: # KUBE_USER # KUBE_PASSWORD function get-password { - local file="$HOME/.kubernetes_auth" - if [[ -r "$file" ]]; then - KUBE_USER=$(cat "$file" | python -c 'import json,sys;print json.load(sys.stdin)["User"]') - KUBE_PASSWORD=$(cat "$file" | python -c 'import json,sys;print json.load(sys.stdin)["Password"]') - return + get-kubeconfig-basicauth + if [[ -z "${KUBE_USER}" || -z "${KUBE_PASSWORD}" ]]; then + KUBE_USER=admin + KUBE_PASSWORD=$(python -c 'import string,random; print "".join(random.SystemRandom().choice(string.ascii_letters + string.digits) for _ in range(16))') fi - KUBE_USER=admin - KUBE_PASSWORD=$(python -c 'import string,random; print "".join(random.SystemRandom().choice(string.ascii_letters + string.digits) for _ in range(16))') - - # Store password for reuse. - cat << EOF > "$file" -{ - "User": "$KUBE_USER", - "Password": "$KUBE_PASSWORD" -} -EOF - chmod 0600 "$file" } # Run command over ssh @@ -372,6 +361,25 @@ function kube-up { printf " OK\n" done + echo "Kubernetes cluster created." + + # TODO use token instead of basic auth + export KUBECONFIG="${KUBECONFIG:-"${HOME}/.kube/.kubeconfig"}" + export KUBE_CERT="/tmp/$RANDOM-kubecfg.crt" + export KUBE_KEY="/tmp/$RANDOM-kubecfg.key" + export CA_CERT="/tmp/$RANDOM-kubernetes.ca.crt" + export CONTEXT="vsphere_${INSTANCE_PREFIX}" + + ( + umask 077 + + kube-ssh "${KUBE_MASTER_IP}" sudo cat /srv/kubernetes/kubecfg.crt >"${KUBE_CERT}" 2>/dev/null + kube-ssh "${KUBE_MASTER_IP}" sudo cat /srv/kubernetes/kubecfg.key >"${KUBE_KEY}" 2>/dev/null + kube-ssh "${KUBE_MASTER_IP}" sudo cat /srv/kubernetes/ca.crt >"${CA_CERT}" 2>/dev/null + + create-kubeconfig + ) + echo echo "Sanity checking cluster..." @@ -394,33 +402,8 @@ function kube-up { echo echo " https://${KUBE_MASTER_IP}" echo - echo "The user name and password to use is located in ~/.kubernetes_auth." + echo "The user name and password to use is located in ${KUBECONFIG}" echo - - local kube_cert=".kubecfg.crt" - local kube_key=".kubecfg.key" - local ca_cert=".kubernetes.ca.crt" - - ( - umask 077 - - kube-ssh "${KUBE_MASTER_IP}" sudo cat /srv/kubernetes/kubecfg.crt >"${HOME}/${kube_cert}" 2>/dev/null - kube-ssh "${KUBE_MASTER_IP}" sudo cat /srv/kubernetes/kubecfg.key >"${HOME}/${kube_key}" 2>/dev/null - kube-ssh "${KUBE_MASTER_IP}" sudo cat /srv/kubernetes/ca.crt >"${HOME}/${ca_cert}" 2>/dev/null - - cat << EOF > ~/.kubernetes_auth - { - "User": "$KUBE_USER", - "Password": "$KUBE_PASSWORD", - "CAFile": "$HOME/$ca_cert", - "CertFile": "$HOME/$kube_cert", - "KeyFile": "$HOME/$kube_key" - } -EOF - - chmod 0600 ~/.kubernetes_auth "${HOME}/${kube_cert}" \ - "${HOME}/${kube_key}" "${HOME}/${ca_cert}" - ) } # Delete a kubernetes cluster