From 438216844f89b286a59a223c40388498e03d611c Mon Sep 17 00:00:00 2001 From: Jeff Lowdermilk Date: Fri, 21 Aug 2015 18:47:31 -0700 Subject: [PATCH] Stop reusing basic auth on cluster create --- cluster/aws/util.sh | 24 ++++--------------- cluster/azure/util.sh | 20 ++++------------ cluster/common.sh | 18 +++++++++++++++ cluster/gce/util.sh | 42 +++++----------------------------- cluster/gke/util.sh | 19 --------------- cluster/juju/util.sh | 4 ---- cluster/kube-util.sh | 5 ---- cluster/libvirt-coreos/util.sh | 2 +- cluster/rackspace/util.sh | 18 +++------------ cluster/ubuntu/util.sh | 16 +------------ cluster/vagrant/util.sh | 15 ++++-------- cluster/vsphere/util.sh | 20 ++++------------ 12 files changed, 46 insertions(+), 157 deletions(-) diff --git a/cluster/aws/util.sh b/cluster/aws/util.sh index c7e4cb717a6..3165813f873 100644 --- a/cluster/aws/util.sh +++ b/cluster/aws/util.sh @@ -544,24 +544,6 @@ function upload-server-tars() { SALT_TAR_URL="${s3_url_base}/${AWS_S3_BUCKET}/${salt_tar_path}" } - -# Ensure that we have a password created for validating to the master. Will -# read from kubeconfig for the current context if available. -# -# Assumed vars -# KUBE_ROOT -# -# Vars set: -# KUBE_USER -# KUBE_PASSWORD -function get-password { - get-kubeconfig-basicauth - if [[ -z "${KUBE_USER}" || -z "${KUBE_PASSWORD}" ]]; then - KUBE_USER=admin - KUBE_PASSWORD=$(python -c 'import string,random; print "".join(random.SystemRandom().choice(string.ascii_letters + string.digits) for _ in range(16))') - fi -} - # Adds a tag to an AWS resource # usage: add-tag function add-tag { @@ -681,7 +663,7 @@ function kube-up { ensure-iam-profiles - get-password + gen-kube-basicauth if [[ ! -f "$AWS_SSH_KEY" ]]; then ssh-keygen -f "$AWS_SSH_KEY" -N '' @@ -1082,6 +1064,8 @@ function kube-up { done done + # ensures KUBECONFIG is set + get-kubeconfig-basicauth echo echo -e "${color_green}Kubernetes cluster is running. The master is running at:" echo @@ -1241,7 +1225,7 @@ function kube-push { echo "sudo salt --force-color '*' state.highstate" ) | ssh -oStrictHostKeyChecking=no -i "${AWS_SSH_KEY}" ${SSH_USER}@${KUBE_MASTER_IP} sudo bash - get-password + get-kubeconfig-basicauth echo echo "Kubernetes cluster is running. The master is running at:" diff --git a/cluster/azure/util.sh b/cluster/azure/util.sh index 849799f6585..23fe542c20b 100644 --- a/cluster/azure/util.sh +++ b/cluster/azure/util.sh @@ -242,20 +242,6 @@ function detect-master () { echo "Using master: $KUBE_MASTER (external IP: $KUBE_MASTER_IP)" } -# Ensure that we have a password created for validating to the master. Will -# read from kubeconfig current-context if available. -# -# Vars set: -# KUBE_USER -# KUBE_PASSWORD -function get-password { - get-kubeconfig-basicauth - if [[ -z "${KUBE_USER}" || -z "${KUBE_PASSWORD}" ]]; then - KUBE_USER=admin - KUBE_PASSWORD=$(python -c 'import string,random; print "".join(random.SystemRandom().choice(string.ascii_letters + string.digits) for _ in range(16))') - fi -} - # Instantiate a kubernetes cluster # # Assumed vars @@ -268,7 +254,7 @@ function kube-up { ensure-temp-dir - get-password + gen-kube-basicauth python "${KUBE_ROOT}/third_party/htpasswd/htpasswd.py" \ -b -c "${KUBE_TEMP}/htpasswd" "$KUBE_USER" "$KUBE_PASSWORD" local htpasswd @@ -442,6 +428,8 @@ function kube-up { done done + # ensures KUBECONFIG is set + get-kubeconfig-basicauth echo echo "Kubernetes cluster is running. The master is running at:" echo @@ -486,7 +474,7 @@ function kube-down { # echo "sudo salt --force-color '*' state.highstate" # ) | gcutil ssh --project "$PROJECT" --zone "$ZONE" "$KUBE_MASTER" sudo bash -# get-password +# get-kubeconfig-basicauth # echo # echo "Kubernetes cluster is running. The master is running at:" diff --git a/cluster/common.sh b/cluster/common.sh index 8dd146e1ffe..7a08180a8e2 100755 --- a/cluster/common.sh +++ b/cluster/common.sh @@ -146,6 +146,16 @@ function get-kubeconfig-basicauth() { fi } +# Generate basic auth user and password. + +# Vars set: +# KUBE_USER +# KUBE_PASSWORD +function gen-kube-basicauth() { + KUBE_USER=admin + KUBE_PASSWORD=$(python -c 'import string,random; print "".join(random.SystemRandom().choice(string.ascii_letters + string.digits) for _ in range(16))') +} + # Get the bearer token for the current-context in kubeconfig if one exists. # Assumed vars: # KUBECONFIG # if unset, defaults to global @@ -171,6 +181,14 @@ function get-kubeconfig-bearertoken() { fi } +# Generate bearer token. +# +# Vars set: +# KUBE_BEARER_TOKEN +function gen-kube-bearertoken() { + KUBE_BEARER_TOKEN=$(dd if=/dev/urandom bs=128 count=1 2>/dev/null | base64 | tr -d "=+/" | dd bs=32 count=1 2>/dev/null) +} + # Get the master IP for the current-context in kubeconfig if one exists. # # Assumed vars: diff --git a/cluster/gce/util.sh b/cluster/gce/util.sh index bb035a52861..de4983d62f3 100755 --- a/cluster/gce/util.sh +++ b/cluster/gce/util.sh @@ -304,38 +304,6 @@ function detect-master () { echo "Using master: $KUBE_MASTER (external IP: $KUBE_MASTER_IP)" } -# Ensure that we have a password created for validating to the master. Will -# read from kubeconfig for the current context if available. -# -# Assumed vars -# KUBE_ROOT -# -# Vars set: -# KUBE_USER -# KUBE_PASSWORD -function get-password { - get-kubeconfig-basicauth - if [[ -z "${KUBE_USER}" || -z "${KUBE_PASSWORD}" ]]; then - KUBE_USER=admin - KUBE_PASSWORD=$(python -c 'import string,random; print "".join(random.SystemRandom().choice(string.ascii_letters + string.digits) for _ in range(16))') - fi -} - -# Ensure that we have a bearer token created for validating to the master. -# Will read from kubeconfig for the current context if available. -# -# Assumed vars -# KUBE_ROOT -# -# Vars set: -# KUBE_BEARER_TOKEN -function get-bearer-token() { - get-kubeconfig-bearertoken - if [[ -z "${KUBE_BEARER_TOKEN:-}" ]]; then - KUBE_BEARER_TOKEN=$(dd if=/dev/urandom bs=128 count=1 2>/dev/null | base64 | tr -d "=+/" | dd bs=32 count=1 2>/dev/null) - fi -} - # Wait for background jobs to finish. Exit with # an error status if any of the jobs failed. function wait-for-jobs { @@ -577,8 +545,8 @@ function kube-up { ensure-temp-dir detect-project - get-password - get-bearer-token + gen-kube-basicauth + gen-kube-bearertoken # Make sure we have the tar files staged on Google Storage find-release-tars @@ -758,6 +726,8 @@ function kube-up { create-kubeconfig ) + # ensures KUBECONFIG is set + get-kubeconfig-basicauth echo echo -e "${color_green}Kubernetes cluster is running. The master is running at:" echo @@ -1035,8 +1005,8 @@ function prepare-push() { detect-project detect-master detect-minion-names - get-password - get-bearer-token + get-kubeconfig-basicauth + get-kubeconfig-bearertoken # Make sure we have the tar files staged on Google Storage tars_from_version diff --git a/cluster/gke/util.sh b/cluster/gke/util.sh index db0adf9b2b5..b2812adae0f 100755 --- a/cluster/gke/util.sh +++ b/cluster/gke/util.sh @@ -189,25 +189,6 @@ function test-setup() { --network="${NETWORK}" } -# Ensure that we have a password created for validating to the master. -# -# Assumed vars: -# ZONE -# CLUSTER_NAME -# Vars set: -# KUBE_USER -# KUBE_PASSWORD -function get-password() { - echo "... in gke:get-password()" >&2 - detect-project >&2 - KUBE_USER=$("${GCLOUD}" "${CMD_GROUP}" container clusters describe \ - --project="${PROJECT}" --zone="${ZONE}" "${CLUSTER_NAME}" \ - | grep user | cut -f 4 -d ' ') - KUBE_PASSWORD=$("${GCLOUD}" "${CMD_GROUP}" container clusters describe \ - --project="${PROJECT}" --zone="${ZONE}" "${CLUSTER_NAME}" \ - | grep password | cut -f 4 -d ' ') -} - # Detect the IP for the master. Note that on GKE, we don't know the name of the # master, so KUBE_MASTER is not set. # diff --git a/cluster/juju/util.sh b/cluster/juju/util.sh index 20c024856b5..9407b44004f 100755 --- a/cluster/juju/util.sh +++ b/cluster/juju/util.sh @@ -41,10 +41,6 @@ function build-local() { cp -v $OUTPUT_DIR/* cluster/juju/charms/trusty/kubernetes-master/files/output } -function get-password() { - echo "TODO: Assign username/password security" -} - function kube-up() { build-local if [[ -d "~/.juju/current-env" ]]; then diff --git a/cluster/kube-util.sh b/cluster/kube-util.sh index 04b32e59b4d..8bbb3b97ca2 100644 --- a/cluster/kube-util.sh +++ b/cluster/kube-util.sh @@ -91,11 +91,6 @@ function test-teardown { echo "TODO: test-teardown" 1>&2 } -# Set the {KUBE_USER} and {KUBE_PASSWORD} environment values required to interact with provider -function get-password { - echo "TODO: get-password" 1>&2 -} - # Providers util.sh scripts should define functions that override the above default functions impls if [ -n "${KUBERNETES_PROVIDER}" ]; then PROVIDER_UTILS="${KUBE_ROOT}/cluster/${KUBERNETES_PROVIDER}/util.sh" diff --git a/cluster/libvirt-coreos/util.sh b/cluster/libvirt-coreos/util.sh index f424d89ddbf..dda855a1763 100644 --- a/cluster/libvirt-coreos/util.sh +++ b/cluster/libvirt-coreos/util.sh @@ -183,7 +183,7 @@ function wait-cluster-readiness { function kube-up { detect-master detect-minions - get-kubeconfig-bearertoken + gen-kube-bearertoken initialize-pool keep_base_image initialize-network diff --git a/cluster/rackspace/util.sh b/cluster/rackspace/util.sh index 0f94dce686e..55e0cf8b536 100644 --- a/cluster/rackspace/util.sh +++ b/cluster/rackspace/util.sh @@ -51,20 +51,6 @@ verify-prereqs() { fi } -# Ensure that we have a password created for validating to the master. Will -# read from kubeconfig current-context if available. -# -# Vars set: -# KUBE_USER -# KUBE_PASSWORD -get-password() { - get-kubeconfig-basicauth - if [[ -z "${KUBE_USER}" || -z "${KUBE_PASSWORD}" ]]; then - KUBE_USER=admin - KUBE_PASSWORD=$(python2.7 -c 'import string,random; print "".join(random.SystemRandom().choice(string.ascii_letters + string.digits) for _ in range(16))') - fi -} - rax-ssh-key() { if [ ! -f $HOME/.ssh/${SSH_KEY_NAME} ]; then echo "cluster/rackspace/util.sh: Generating SSH KEY ${HOME}/.ssh/${SSH_KEY_NAME}" @@ -301,7 +287,7 @@ kube-up() { KUBE_TEMP=$(mktemp -d -t kubernetes.XXXXXX) trap "rm -rf ${KUBE_TEMP}" EXIT - get-password + gen-kube-basicauth python2.7 $(dirname $0)/../third_party/htpasswd/htpasswd.py -b -c ${KUBE_TEMP}/htpasswd $KUBE_USER $KUBE_PASSWORD HTPASSWD=$(cat ${KUBE_TEMP}/htpasswd) @@ -365,6 +351,8 @@ kube-up() { detect-minions + # ensures KUBECONFIG is set + get-kubeconfig-basicauth echo "All minions may not be online yet, this is okay." echo echo "Kubernetes cluster is running. The master is running at:" diff --git a/cluster/ubuntu/util.sh b/cluster/ubuntu/util.sh index 3a7f11f970e..07bcb80c255 100755 --- a/cluster/ubuntu/util.sh +++ b/cluster/ubuntu/util.sh @@ -260,20 +260,6 @@ FLANNEL_OPTS="" EOF } -# Ensure that we have a password created for validating to the master. Will -# read from ${KUBECONFIG:-$DEFAULT_KUBECONFIG} if available. -# -# Vars set: -# KUBE_USER -# KUBE_PASSWORD -function get-password { - get-kubeconfig-basicauth - if [[ -z "${KUBE_USER}" || -z "${KUBE_PASSWORD}" ]]; then - KUBE_USER=admin - KUBE_PASSWORD=$(python -c 'import string,random; print "".join(random.SystemRandom().choice(string.ascii_letters + string.digits) for _ in range(16))') - fi -} - # Detect the IP for the master # # Assumed vars: @@ -360,7 +346,7 @@ function kube-up() { source "${KUBE_ROOT}/cluster/common.sh" # set kubernetes user and password - get-password + gen-kube-basicauth create-kubeconfig } diff --git a/cluster/vagrant/util.sh b/cluster/vagrant/util.sh index 6902c51ab65..15bd0e1a3ea 100644 --- a/cluster/vagrant/util.sh +++ b/cluster/vagrant/util.sh @@ -253,19 +253,21 @@ function verify-cluster { } ( + # ensures KUBECONFIG is set + get-kubeconfig-basicauth echo echo "Kubernetes cluster is running. The master is running at:" echo echo " https://${MASTER_IP}" echo - echo "The user name and password to use is located in ~/.kubernetes_vagrant_auth." + echo "The user name and password to use is located in ${KUBECONIG}" echo ) } # Instantiate a kubernetes cluster function kube-up { - get-password + gen-kube-basicauth get-tokens create-provision-scripts @@ -295,7 +297,7 @@ function kube-down { # Update a kubernetes cluster with latest source function kube-push { - get-password + get-kubeconfig-basicauth create-provision-scripts vagrant provision } @@ -316,13 +318,6 @@ function test-teardown { kube-down } -# Set the {user} and {password} environment values required to interact with provider -function get-password { - export KUBE_USER=vagrant - export KUBE_PASSWORD=vagrant - echo "Using credentials: $KUBE_USER:$KUBE_PASSWORD" 1>&2 -} - # Find the minion name based on the IP address function find-vagrant-name-by-ip { local ip="$1" diff --git a/cluster/vsphere/util.sh b/cluster/vsphere/util.sh index 95c8fac035b..5d3ebc56f27 100755 --- a/cluster/vsphere/util.sh +++ b/cluster/vsphere/util.sh @@ -169,20 +169,6 @@ function upload-server-tars { done } -# Ensure that we have a password created for validating to the master. Will -# read from kubeconfig if available. -# -# Vars set: -# KUBE_USER -# KUBE_PASSWORD -function get-password { - get-kubeconfig-basicauth - if [[ -z "${KUBE_USER}" || -z "${KUBE_PASSWORD}" ]]; then - KUBE_USER=admin - KUBE_PASSWORD=$(python -c 'import string,random; print "".join(random.SystemRandom().choice(string.ascii_letters + string.digits) for _ in range(16))') - fi -} - # Run command over ssh function kube-ssh { local host="$1" @@ -264,7 +250,7 @@ function kube-up { ensure-temp-dir - get-password + gen-kube-basicauth python "${KUBE_ROOT}/third_party/htpasswd/htpasswd.py" \ -b -c "${KUBE_TEMP}/htpasswd" "$KUBE_USER" "$KUBE_PASSWORD" local htpasswd @@ -395,6 +381,8 @@ function kube-up { } done + # ensures KUBECONFIG is set + get-kubeconfig-basicauth echo echo "Kubernetes cluster is running. The master is running at:" echo @@ -434,7 +422,7 @@ function kube-push { echo "sudo salt --force-color '*' state.highstate" ) | kube-ssh "${KUBE_MASTER_IP}" - get-password + get-kubeconfig-basicauth echo echo "Kubernetes cluster is running. The master is running at:"