Stop reusing basic auth on cluster create

This commit is contained in:
Jeff Lowdermilk 2015-08-21 18:47:31 -07:00
parent 185b5af26f
commit 438216844f
12 changed files with 46 additions and 157 deletions

View File

@ -544,24 +544,6 @@ function upload-server-tars() {
SALT_TAR_URL="${s3_url_base}/${AWS_S3_BUCKET}/${salt_tar_path}"
}
# Ensure that we have a password created for validating to the master. Will
# read from kubeconfig for the current context if available.
#
# Assumed vars
# KUBE_ROOT
#
# Vars set:
# KUBE_USER
# KUBE_PASSWORD
function get-password {
get-kubeconfig-basicauth
if [[ -z "${KUBE_USER}" || -z "${KUBE_PASSWORD}" ]]; then
KUBE_USER=admin
KUBE_PASSWORD=$(python -c 'import string,random; print "".join(random.SystemRandom().choice(string.ascii_letters + string.digits) for _ in range(16))')
fi
}
# Adds a tag to an AWS resource
# usage: add-tag <resource-id> <tag-name> <tag-value>
function add-tag {
@ -681,7 +663,7 @@ function kube-up {
ensure-iam-profiles
get-password
gen-kube-basicauth
if [[ ! -f "$AWS_SSH_KEY" ]]; then
ssh-keygen -f "$AWS_SSH_KEY" -N ''
@ -1082,6 +1064,8 @@ function kube-up {
done
done
# ensures KUBECONFIG is set
get-kubeconfig-basicauth
echo
echo -e "${color_green}Kubernetes cluster is running. The master is running at:"
echo
@ -1241,7 +1225,7 @@ function kube-push {
echo "sudo salt --force-color '*' state.highstate"
) | ssh -oStrictHostKeyChecking=no -i "${AWS_SSH_KEY}" ${SSH_USER}@${KUBE_MASTER_IP} sudo bash
get-password
get-kubeconfig-basicauth
echo
echo "Kubernetes cluster is running. The master is running at:"

View File

@ -242,20 +242,6 @@ function detect-master () {
echo "Using master: $KUBE_MASTER (external IP: $KUBE_MASTER_IP)"
}
# Ensure that we have a password created for validating to the master. Will
# read from kubeconfig current-context if available.
#
# Vars set:
# KUBE_USER
# KUBE_PASSWORD
function get-password {
get-kubeconfig-basicauth
if [[ -z "${KUBE_USER}" || -z "${KUBE_PASSWORD}" ]]; then
KUBE_USER=admin
KUBE_PASSWORD=$(python -c 'import string,random; print "".join(random.SystemRandom().choice(string.ascii_letters + string.digits) for _ in range(16))')
fi
}
# Instantiate a kubernetes cluster
#
# Assumed vars
@ -268,7 +254,7 @@ function kube-up {
ensure-temp-dir
get-password
gen-kube-basicauth
python "${KUBE_ROOT}/third_party/htpasswd/htpasswd.py" \
-b -c "${KUBE_TEMP}/htpasswd" "$KUBE_USER" "$KUBE_PASSWORD"
local htpasswd
@ -442,6 +428,8 @@ function kube-up {
done
done
# ensures KUBECONFIG is set
get-kubeconfig-basicauth
echo
echo "Kubernetes cluster is running. The master is running at:"
echo
@ -486,7 +474,7 @@ function kube-down {
# echo "sudo salt --force-color '*' state.highstate"
# ) | gcutil ssh --project "$PROJECT" --zone "$ZONE" "$KUBE_MASTER" sudo bash
# get-password
# get-kubeconfig-basicauth
# echo
# echo "Kubernetes cluster is running. The master is running at:"

View File

@ -146,6 +146,16 @@ function get-kubeconfig-basicauth() {
fi
}
# Generate basic auth user and password.
# Vars set:
# KUBE_USER
# KUBE_PASSWORD
function gen-kube-basicauth() {
KUBE_USER=admin
KUBE_PASSWORD=$(python -c 'import string,random; print "".join(random.SystemRandom().choice(string.ascii_letters + string.digits) for _ in range(16))')
}
# Get the bearer token for the current-context in kubeconfig if one exists.
# Assumed vars:
# KUBECONFIG # if unset, defaults to global
@ -171,6 +181,14 @@ function get-kubeconfig-bearertoken() {
fi
}
# Generate bearer token.
#
# Vars set:
# KUBE_BEARER_TOKEN
function gen-kube-bearertoken() {
KUBE_BEARER_TOKEN=$(dd if=/dev/urandom bs=128 count=1 2>/dev/null | base64 | tr -d "=+/" | dd bs=32 count=1 2>/dev/null)
}
# Get the master IP for the current-context in kubeconfig if one exists.
#
# Assumed vars:

View File

@ -304,38 +304,6 @@ function detect-master () {
echo "Using master: $KUBE_MASTER (external IP: $KUBE_MASTER_IP)"
}
# Ensure that we have a password created for validating to the master. Will
# read from kubeconfig for the current context if available.
#
# Assumed vars
# KUBE_ROOT
#
# Vars set:
# KUBE_USER
# KUBE_PASSWORD
function get-password {
get-kubeconfig-basicauth
if [[ -z "${KUBE_USER}" || -z "${KUBE_PASSWORD}" ]]; then
KUBE_USER=admin
KUBE_PASSWORD=$(python -c 'import string,random; print "".join(random.SystemRandom().choice(string.ascii_letters + string.digits) for _ in range(16))')
fi
}
# Ensure that we have a bearer token created for validating to the master.
# Will read from kubeconfig for the current context if available.
#
# Assumed vars
# KUBE_ROOT
#
# Vars set:
# KUBE_BEARER_TOKEN
function get-bearer-token() {
get-kubeconfig-bearertoken
if [[ -z "${KUBE_BEARER_TOKEN:-}" ]]; then
KUBE_BEARER_TOKEN=$(dd if=/dev/urandom bs=128 count=1 2>/dev/null | base64 | tr -d "=+/" | dd bs=32 count=1 2>/dev/null)
fi
}
# Wait for background jobs to finish. Exit with
# an error status if any of the jobs failed.
function wait-for-jobs {
@ -577,8 +545,8 @@ function kube-up {
ensure-temp-dir
detect-project
get-password
get-bearer-token
gen-kube-basicauth
gen-kube-bearertoken
# Make sure we have the tar files staged on Google Storage
find-release-tars
@ -758,6 +726,8 @@ function kube-up {
create-kubeconfig
)
# ensures KUBECONFIG is set
get-kubeconfig-basicauth
echo
echo -e "${color_green}Kubernetes cluster is running. The master is running at:"
echo
@ -1035,8 +1005,8 @@ function prepare-push() {
detect-project
detect-master
detect-minion-names
get-password
get-bearer-token
get-kubeconfig-basicauth
get-kubeconfig-bearertoken
# Make sure we have the tar files staged on Google Storage
tars_from_version

View File

@ -189,25 +189,6 @@ function test-setup() {
--network="${NETWORK}"
}
# Ensure that we have a password created for validating to the master.
#
# Assumed vars:
# ZONE
# CLUSTER_NAME
# Vars set:
# KUBE_USER
# KUBE_PASSWORD
function get-password() {
echo "... in gke:get-password()" >&2
detect-project >&2
KUBE_USER=$("${GCLOUD}" "${CMD_GROUP}" container clusters describe \
--project="${PROJECT}" --zone="${ZONE}" "${CLUSTER_NAME}" \
| grep user | cut -f 4 -d ' ')
KUBE_PASSWORD=$("${GCLOUD}" "${CMD_GROUP}" container clusters describe \
--project="${PROJECT}" --zone="${ZONE}" "${CLUSTER_NAME}" \
| grep password | cut -f 4 -d ' ')
}
# Detect the IP for the master. Note that on GKE, we don't know the name of the
# master, so KUBE_MASTER is not set.
#

View File

@ -41,10 +41,6 @@ function build-local() {
cp -v $OUTPUT_DIR/* cluster/juju/charms/trusty/kubernetes-master/files/output
}
function get-password() {
echo "TODO: Assign username/password security"
}
function kube-up() {
build-local
if [[ -d "~/.juju/current-env" ]]; then

View File

@ -91,11 +91,6 @@ function test-teardown {
echo "TODO: test-teardown" 1>&2
}
# Set the {KUBE_USER} and {KUBE_PASSWORD} environment values required to interact with provider
function get-password {
echo "TODO: get-password" 1>&2
}
# Providers util.sh scripts should define functions that override the above default functions impls
if [ -n "${KUBERNETES_PROVIDER}" ]; then
PROVIDER_UTILS="${KUBE_ROOT}/cluster/${KUBERNETES_PROVIDER}/util.sh"

View File

@ -183,7 +183,7 @@ function wait-cluster-readiness {
function kube-up {
detect-master
detect-minions
get-kubeconfig-bearertoken
gen-kube-bearertoken
initialize-pool keep_base_image
initialize-network

View File

@ -51,20 +51,6 @@ verify-prereqs() {
fi
}
# Ensure that we have a password created for validating to the master. Will
# read from kubeconfig current-context if available.
#
# Vars set:
# KUBE_USER
# KUBE_PASSWORD
get-password() {
get-kubeconfig-basicauth
if [[ -z "${KUBE_USER}" || -z "${KUBE_PASSWORD}" ]]; then
KUBE_USER=admin
KUBE_PASSWORD=$(python2.7 -c 'import string,random; print "".join(random.SystemRandom().choice(string.ascii_letters + string.digits) for _ in range(16))')
fi
}
rax-ssh-key() {
if [ ! -f $HOME/.ssh/${SSH_KEY_NAME} ]; then
echo "cluster/rackspace/util.sh: Generating SSH KEY ${HOME}/.ssh/${SSH_KEY_NAME}"
@ -301,7 +287,7 @@ kube-up() {
KUBE_TEMP=$(mktemp -d -t kubernetes.XXXXXX)
trap "rm -rf ${KUBE_TEMP}" EXIT
get-password
gen-kube-basicauth
python2.7 $(dirname $0)/../third_party/htpasswd/htpasswd.py -b -c ${KUBE_TEMP}/htpasswd $KUBE_USER $KUBE_PASSWORD
HTPASSWD=$(cat ${KUBE_TEMP}/htpasswd)
@ -365,6 +351,8 @@ kube-up() {
detect-minions
# ensures KUBECONFIG is set
get-kubeconfig-basicauth
echo "All minions may not be online yet, this is okay."
echo
echo "Kubernetes cluster is running. The master is running at:"

View File

@ -260,20 +260,6 @@ FLANNEL_OPTS=""
EOF
}
# Ensure that we have a password created for validating to the master. Will
# read from ${KUBECONFIG:-$DEFAULT_KUBECONFIG} if available.
#
# Vars set:
# KUBE_USER
# KUBE_PASSWORD
function get-password {
get-kubeconfig-basicauth
if [[ -z "${KUBE_USER}" || -z "${KUBE_PASSWORD}" ]]; then
KUBE_USER=admin
KUBE_PASSWORD=$(python -c 'import string,random; print "".join(random.SystemRandom().choice(string.ascii_letters + string.digits) for _ in range(16))')
fi
}
# Detect the IP for the master
#
# Assumed vars:
@ -360,7 +346,7 @@ function kube-up() {
source "${KUBE_ROOT}/cluster/common.sh"
# set kubernetes user and password
get-password
gen-kube-basicauth
create-kubeconfig
}

View File

@ -253,19 +253,21 @@ function verify-cluster {
}
(
# ensures KUBECONFIG is set
get-kubeconfig-basicauth
echo
echo "Kubernetes cluster is running. The master is running at:"
echo
echo " https://${MASTER_IP}"
echo
echo "The user name and password to use is located in ~/.kubernetes_vagrant_auth."
echo "The user name and password to use is located in ${KUBECONIG}"
echo
)
}
# Instantiate a kubernetes cluster
function kube-up {
get-password
gen-kube-basicauth
get-tokens
create-provision-scripts
@ -295,7 +297,7 @@ function kube-down {
# Update a kubernetes cluster with latest source
function kube-push {
get-password
get-kubeconfig-basicauth
create-provision-scripts
vagrant provision
}
@ -316,13 +318,6 @@ function test-teardown {
kube-down
}
# Set the {user} and {password} environment values required to interact with provider
function get-password {
export KUBE_USER=vagrant
export KUBE_PASSWORD=vagrant
echo "Using credentials: $KUBE_USER:$KUBE_PASSWORD" 1>&2
}
# Find the minion name based on the IP address
function find-vagrant-name-by-ip {
local ip="$1"

View File

@ -169,20 +169,6 @@ function upload-server-tars {
done
}
# Ensure that we have a password created for validating to the master. Will
# read from kubeconfig if available.
#
# Vars set:
# KUBE_USER
# KUBE_PASSWORD
function get-password {
get-kubeconfig-basicauth
if [[ -z "${KUBE_USER}" || -z "${KUBE_PASSWORD}" ]]; then
KUBE_USER=admin
KUBE_PASSWORD=$(python -c 'import string,random; print "".join(random.SystemRandom().choice(string.ascii_letters + string.digits) for _ in range(16))')
fi
}
# Run command over ssh
function kube-ssh {
local host="$1"
@ -264,7 +250,7 @@ function kube-up {
ensure-temp-dir
get-password
gen-kube-basicauth
python "${KUBE_ROOT}/third_party/htpasswd/htpasswd.py" \
-b -c "${KUBE_TEMP}/htpasswd" "$KUBE_USER" "$KUBE_PASSWORD"
local htpasswd
@ -395,6 +381,8 @@ function kube-up {
}
done
# ensures KUBECONFIG is set
get-kubeconfig-basicauth
echo
echo "Kubernetes cluster is running. The master is running at:"
echo
@ -434,7 +422,7 @@ function kube-push {
echo "sudo salt --force-color '*' state.highstate"
) | kube-ssh "${KUBE_MASTER_IP}"
get-password
get-kubeconfig-basicauth
echo
echo "Kubernetes cluster is running. The master is running at:"