mirror of
https://github.com/k3s-io/kubernetes.git
synced 2025-07-30 06:54:01 +00:00
Merge pull request #6691 from jlowdermilk/gen-kubeconfig
Generate kubeconfig for all providers in cluster/ that use auth
This commit is contained in:
commit
7f75c982ce
@ -20,6 +20,7 @@
|
||||
# config-default.sh.
|
||||
KUBE_ROOT=$(dirname "${BASH_SOURCE}")/../..
|
||||
source "${KUBE_ROOT}/cluster/aws/${KUBE_CONFIG_FILE-"config-default.sh"}"
|
||||
source "${KUBE_ROOT}/cluster/common.sh"
|
||||
|
||||
# This removes the final character in bash (somehow)
|
||||
AWS_REGION=${ZONE%?}
|
||||
@ -265,7 +266,7 @@ function upload-server-tars() {
|
||||
|
||||
|
||||
# Ensure that we have a password created for validating to the master. Will
|
||||
# read from the kubernetes auth-file for the current context if available.
|
||||
# read from kubeconfig for the current context if available.
|
||||
#
|
||||
# Assumed vars
|
||||
# KUBE_ROOT
|
||||
@ -274,17 +275,11 @@ function upload-server-tars() {
|
||||
# KUBE_USER
|
||||
# KUBE_PASSWORD
|
||||
function get-password {
|
||||
# go template to extract the auth-path of the current-context user
|
||||
# Note: we save dot ('.') to $dot because the 'with' action overrides dot
|
||||
local template='{{$dot := .}}{{with $ctx := index $dot "current-context"}}{{range $element := (index $dot "contexts")}}{{ if eq .name $ctx }}{{ with $user := .context.user }}{{range $element := (index $dot "users")}}{{ if eq .name $user }}{{ index . "user" "auth-path" }}{{end}}{{end}}{{end}}{{end}}{{end}}{{end}}'
|
||||
local file=$("${KUBE_ROOT}/cluster/kubectl.sh" config view -o template --template="${template}")
|
||||
if [[ ! -z "$file" && -r "$file" ]]; then
|
||||
KUBE_USER=$(cat "$file" | python -c 'import json,sys;print json.load(sys.stdin)["User"]')
|
||||
KUBE_PASSWORD=$(cat "$file" | python -c 'import json,sys;print json.load(sys.stdin)["Password"]')
|
||||
return
|
||||
get-kubeconfig-basicauth
|
||||
if [[ -z "${KUBE_USER}" || -z "${KUBE_PASSWORD}" ]]; then
|
||||
KUBE_USER=admin
|
||||
KUBE_PASSWORD=$(python -c 'import string,random; print "".join(random.SystemRandom().choice(string.ascii_letters + string.digits) for _ in range(16))')
|
||||
fi
|
||||
KUBE_USER=admin
|
||||
KUBE_PASSWORD=$(python -c 'import string,random; print "".join(random.SystemRandom().choice(string.ascii_letters + string.digits) for _ in range(16))')
|
||||
}
|
||||
|
||||
# Adds a tag to an AWS resource
|
||||
@ -609,44 +604,24 @@ function kube-up {
|
||||
|
||||
echo "Kubernetes cluster created."
|
||||
|
||||
local kube_cert="kubecfg.crt"
|
||||
local kube_key="kubecfg.key"
|
||||
local ca_cert="kubernetes.ca.crt"
|
||||
# TODO use token instead of kube_auth
|
||||
local kube_auth="kubernetes_auth"
|
||||
export KUBE_CERT="/tmp/$RANDOM-kubecfg.crt"
|
||||
export KUBE_KEY="/tmp/$RANDOM-kubecfg.key"
|
||||
export CA_CERT="/tmp/$RANDOM-kubernetes.ca.crt"
|
||||
export CONTEXT="aws_${INSTANCE_PREFIX}"
|
||||
|
||||
local kubectl="${KUBE_ROOT}/cluster/kubectl.sh"
|
||||
local context="${INSTANCE_PREFIX}"
|
||||
local user="${INSTANCE_PREFIX}-admin"
|
||||
local config_dir="${HOME}/.kube/${context}"
|
||||
|
||||
# TODO: generate ADMIN (and KUBELET) tokens and put those in the master's
|
||||
# config file. Distribute the same way the htpasswd is done.
|
||||
(
|
||||
mkdir -p "${config_dir}"
|
||||
umask 077
|
||||
ssh -oStrictHostKeyChecking=no -i "${AWS_SSH_KEY}" ubuntu@${KUBE_MASTER_IP} sudo cat /srv/kubernetes/kubecfg.crt >"${config_dir}/${kube_cert}" 2>$LOG
|
||||
ssh -oStrictHostKeyChecking=no -i "${AWS_SSH_KEY}" ubuntu@${KUBE_MASTER_IP} sudo cat /srv/kubernetes/kubecfg.key >"${config_dir}/${kube_key}" 2>$LOG
|
||||
ssh -oStrictHostKeyChecking=no -i "${AWS_SSH_KEY}" ubuntu@${KUBE_MASTER_IP} sudo cat /srv/kubernetes/ca.crt >"${config_dir}/${ca_cert}" 2>$LOG
|
||||
ssh -oStrictHostKeyChecking=no -i "${AWS_SSH_KEY}" "ubuntu@${KUBE_MASTER_IP}" sudo cat /srv/kubernetes/kubecfg.crt >"${KUBE_CERT}" 2>"$LOG"
|
||||
ssh -oStrictHostKeyChecking=no -i "${AWS_SSH_KEY}" "ubuntu@${KUBE_MASTER_IP}" sudo cat /srv/kubernetes/kubecfg.key >"${KUBE_KEY}" 2>"$LOG"
|
||||
ssh -oStrictHostKeyChecking=no -i "${AWS_SSH_KEY}" "ubuntu@${KUBE_MASTER_IP}" sudo cat /srv/kubernetes/ca.crt >"${CA_CERT}" 2>"$LOG"
|
||||
|
||||
"${kubectl}" config set-cluster "${context}" --server="https://${KUBE_MASTER_IP}" --certificate-authority="${config_dir}/${ca_cert}" --global
|
||||
"${kubectl}" config set-credentials "${user}" --auth-path="${config_dir}/${kube_auth}" --global
|
||||
"${kubectl}" config set-context "${context}" --cluster="${context}" --user="${user}" --global
|
||||
"${kubectl}" config use-context "${context}" --global
|
||||
|
||||
cat << EOF > "${config_dir}/${kube_auth}"
|
||||
{
|
||||
"User": "$KUBE_USER",
|
||||
"Password": "$KUBE_PASSWORD",
|
||||
"CAFile": "${config_dir}/${ca_cert}",
|
||||
"CertFile": "${config_dir}/${kube_cert}",
|
||||
"KeyFile": "${config_dir}/${kube_key}"
|
||||
}
|
||||
EOF
|
||||
|
||||
chmod 0600 "${config_dir}/${kube_auth}" "${config_dir}/$kube_cert" \
|
||||
"${config_dir}/${kube_key}" "${config_dir}/${ca_cert}"
|
||||
echo "Wrote ${config_dir}/${kube_auth}"
|
||||
create-kubeconfig
|
||||
)
|
||||
|
||||
echo "Sanity checking cluster..."
|
||||
@ -700,7 +675,7 @@ EOF
|
||||
echo
|
||||
echo -e "${color_yellow} https://${KUBE_MASTER_IP}"
|
||||
echo
|
||||
echo -e "${color_green}The user name and password to use is located in ${config_dir}/${kube_auth}${color_norm}"
|
||||
echo -e "${color_green}The user name and password to use is located in ${KUBECONFIG}.${color_norm}"
|
||||
echo
|
||||
}
|
||||
|
||||
|
@ -21,6 +21,7 @@
|
||||
|
||||
KUBE_ROOT=$(dirname "${BASH_SOURCE}")/../..
|
||||
source "${KUBE_ROOT}/cluster/azure/${KUBE_CONFIG_FILE-"config-default.sh"}"
|
||||
source "${KUBE_ROOT}/cluster/common.sh"
|
||||
|
||||
function azure_call {
|
||||
local -a params=()
|
||||
@ -242,30 +243,17 @@ function detect-master () {
|
||||
}
|
||||
|
||||
# Ensure that we have a password created for validating to the master. Will
|
||||
# read from $HOME/.kubernetres_auth if available.
|
||||
# read from kubeconfig current-context if available.
|
||||
#
|
||||
# Vars set:
|
||||
# KUBE_USER
|
||||
# KUBE_PASSWORD
|
||||
function get-password {
|
||||
local file="$HOME/.kubernetes_auth"
|
||||
if [[ -r "$file" ]]; then
|
||||
KUBE_USER=$(cat "$file" | python -c 'import json,sys;print json.load(sys.stdin)["User"]')
|
||||
KUBE_PASSWORD=$(cat "$file" | python -c 'import json,sys;print json.load(sys.stdin)["Password"]')
|
||||
return
|
||||
fi
|
||||
get-kubeconfig-basicauth
|
||||
if [[ -z "${KUBE_USER}" || -z "${KUBE_PASSWORD}" ]]; then
|
||||
KUBE_USER=admin
|
||||
KUBE_PASSWORD=$(python -c 'import string,random; print "".join(random.SystemRandom().choice(string.ascii_letters + string.digits) for _ in range(16))')
|
||||
|
||||
# Remove this code, since in all use cases I can see, we are overwriting this
|
||||
# at cluster creation time.
|
||||
cat << EOF > "$file"
|
||||
{
|
||||
"User": "$KUBE_USER",
|
||||
"Password": "$KUBE_PASSWORD"
|
||||
}
|
||||
EOF
|
||||
chmod 0600 "$file"
|
||||
fi
|
||||
}
|
||||
|
||||
# Generate authentication token for admin user. Will
|
||||
@ -432,32 +420,22 @@ function kube-up {
|
||||
printf "\n"
|
||||
echo "Kubernetes cluster created."
|
||||
|
||||
local kube_cert=".kubecfg.crt"
|
||||
local kube_key=".kubecfg.key"
|
||||
local ca_cert=".kubernetes.ca.crt"
|
||||
export KUBE_CERT="/tmp/$RANDOM-kubecfg.crt"
|
||||
export KUBE_KEY="/tmp/$RANDOM-kubecfg.key"
|
||||
export CA_CERT="/tmp/$RANDOM-kubernetes.ca.crt"
|
||||
export CONTEXT="azure_${INSTANCE_PREFIX}"
|
||||
|
||||
# TODO: generate ADMIN (and KUBELET) tokens and put those in the master's
|
||||
# config file. Distribute the same way the htpasswd is done.
|
||||
(umask 077
|
||||
ssh -oStrictHostKeyChecking=no -i $AZ_SSH_KEY -p 22000 $AZ_CS.cloudapp.net \
|
||||
sudo cat /srv/kubernetes/kubecfg.crt >"${HOME}/${kube_cert}" 2>/dev/null
|
||||
sudo cat /srv/kubernetes/kubecfg.crt >"${KUBE_CERT}" 2>/dev/null
|
||||
ssh -oStrictHostKeyChecking=no -i $AZ_SSH_KEY -p 22000 $AZ_CS.cloudapp.net \
|
||||
sudo cat /srv/kubernetes/kubecfg.key >"${HOME}/${kube_key}" 2>/dev/null
|
||||
sudo cat /srv/kubernetes/kubecfg.key >"${KUBE_KEY}" 2>/dev/null
|
||||
ssh -oStrictHostKeyChecking=no -i $AZ_SSH_KEY -p 22000 $AZ_CS.cloudapp.net \
|
||||
sudo cat /srv/kubernetes/ca.crt >"${HOME}/${ca_cert}" 2>/dev/null
|
||||
sudo cat /srv/kubernetes/ca.crt >"${CA_CERT}" 2>/dev/null
|
||||
|
||||
cat << EOF > ~/.kubernetes_auth
|
||||
{
|
||||
"User": "$KUBE_USER",
|
||||
"Password": "$KUBE_PASSWORD",
|
||||
"CAFile": "$HOME/$ca_cert",
|
||||
"CertFile": "$HOME/$kube_cert",
|
||||
"KeyFile": "$HOME/$kube_key"
|
||||
}
|
||||
EOF
|
||||
|
||||
chmod 0600 ~/.kubernetes_auth "${HOME}/${kube_cert}" \
|
||||
"${HOME}/${kube_key}" "${HOME}/${ca_cert}"
|
||||
create-kubeconfig
|
||||
)
|
||||
|
||||
# Wait for salt on the minions
|
||||
@ -482,7 +460,7 @@ EOF
|
||||
echo
|
||||
echo " https://${KUBE_MASTER_IP}"
|
||||
echo
|
||||
echo "The user name and password to use is located in ~/.kubernetes_auth."
|
||||
echo "The user name and password to use is located in ${KUBECONFIG}."
|
||||
echo
|
||||
}
|
||||
|
||||
|
@ -30,11 +30,12 @@ DEFAULT_KUBECONFIG="${HOME}/.kube/config"
|
||||
# KUBE_PASSWORD
|
||||
# KUBE_MASTER_IP
|
||||
# KUBECONFIG
|
||||
# CONTEXT
|
||||
#
|
||||
# The following can be omitted for --insecure-skip-tls-verify
|
||||
# KUBE_CERT
|
||||
# KUBE_KEY
|
||||
# CA_CERT
|
||||
# CONTEXT
|
||||
function create-kubeconfig() {
|
||||
local kubectl="${KUBE_ROOT}/cluster/kubectl.sh"
|
||||
|
||||
@ -44,14 +45,31 @@ function create-kubeconfig() {
|
||||
mkdir -p $(dirname "${KUBECONFIG}")
|
||||
touch "${KUBECONFIG}"
|
||||
fi
|
||||
"${kubectl}" config set-cluster "${CONTEXT}" --server="https://${KUBE_MASTER_IP}" \
|
||||
--certificate-authority="${CA_CERT}" \
|
||||
--embed-certs=true
|
||||
"${kubectl}" config set-credentials "${CONTEXT}" --username="${KUBE_USER}" \
|
||||
--password="${KUBE_PASSWORD}" \
|
||||
--client-certificate="${KUBE_CERT}" \
|
||||
--client-key="${KUBE_KEY}" \
|
||||
--embed-certs=true
|
||||
local cluster_args=(
|
||||
"--server=${KUBE_SERVER:-https://${KUBE_MASTER_IP}}"
|
||||
)
|
||||
if [[ -z "${CA_CERT:-}" ]]; then
|
||||
cluster_args+=("--insecure-skip-tls-verify=true")
|
||||
else
|
||||
cluster_args+=(
|
||||
"--certificate-authority=${CA_CERT}"
|
||||
"--embed-certs=true"
|
||||
)
|
||||
fi
|
||||
local user_args=(
|
||||
"--username=${KUBE_USER}"
|
||||
"--password=${KUBE_PASSWORD}"
|
||||
)
|
||||
if [[ ! -z "${KUBE_CERT:-}" && ! -z "${KUBE_KEY:-}" ]]; then
|
||||
user_args+=(
|
||||
"--client-certificate=${KUBE_CERT}"
|
||||
"--client-key=${KUBE_KEY}"
|
||||
"--embed-certs=true"
|
||||
)
|
||||
fi
|
||||
|
||||
"${kubectl}" config set-cluster "${CONTEXT}" "${cluster_args[@]}"
|
||||
"${kubectl}" config set-credentials "${CONTEXT}" "${user_args[@]}"
|
||||
"${kubectl}" config set-context "${CONTEXT}" --cluster="${CONTEXT}" --user="${CONTEXT}"
|
||||
"${kubectl}" config use-context "${CONTEXT}" --cluster="${CONTEXT}"
|
||||
|
||||
|
@ -679,9 +679,9 @@ function kube-up {
|
||||
echo "Kubernetes cluster created."
|
||||
|
||||
# TODO use token instead of basic auth
|
||||
export KUBE_CERT="/tmp/kubecfg.crt"
|
||||
export KUBE_KEY="/tmp/kubecfg.key"
|
||||
export CA_CERT="/tmp/kubernetes.ca.crt"
|
||||
export KUBE_CERT="/tmp/$RANDOM-kubecfg.crt"
|
||||
export KUBE_KEY="/tmp/$RANDOM-kubecfg.key"
|
||||
export CA_CERT="/tmp/$RANDOM-kubernetes.ca.crt"
|
||||
export CONTEXT="${PROJECT}_${INSTANCE_PREFIX}"
|
||||
|
||||
# TODO: generate ADMIN (and KUBELET) tokens and put those in the master's
|
||||
|
@ -120,11 +120,6 @@ elif [[ "$KUBERNETES_PROVIDER" == "vagrant" ]]; then
|
||||
config=(
|
||||
"--kubeconfig=$HOME/.kubernetes_vagrant_kubeconfig"
|
||||
)
|
||||
elif [[ "$KUBERNETES_PROVIDER" == "libvirt-coreos" ]]; then
|
||||
detect-master > /dev/null
|
||||
config=(
|
||||
"--server=http://${KUBE_MASTER_IP}:8080"
|
||||
)
|
||||
fi
|
||||
|
||||
echo "current-context: \"$(${kubectl} "${config[@]:+${config[@]}}" config view -o template --template='{{index . "current-context"}}')\"" >&2
|
||||
|
@ -18,7 +18,8 @@
|
||||
|
||||
KUBE_ROOT=$(dirname "${BASH_SOURCE}")/../..
|
||||
readonly ROOT=$(dirname "${BASH_SOURCE}")
|
||||
source $ROOT/${KUBE_CONFIG_FILE:-"config-default.sh"}
|
||||
source "$ROOT/${KUBE_CONFIG_FILE:-"config-default.sh"}"
|
||||
source "$KUBE_ROOT/cluster/common.sh"
|
||||
|
||||
export LIBVIRT_DEFAULT_URI=qemu:///system
|
||||
|
||||
@ -199,6 +200,7 @@ function wait-cluster-readiness {
|
||||
function kube-up {
|
||||
detect-master
|
||||
detect-minions
|
||||
get-password
|
||||
initialize-pool keep_base_image
|
||||
initialize-network
|
||||
|
||||
@ -235,12 +237,9 @@ function kube-up {
|
||||
rm $domain_xml
|
||||
done
|
||||
|
||||
export KUBECONFIG="${HOME}/.kube/.kubeconfig"
|
||||
local kubectl="${KUBE_ROOT}/cluster/kubectl.sh"
|
||||
|
||||
"${kubectl}" config set-cluster libvirt-coreos --server=http://${KUBE_MASTER_IP-}:8080
|
||||
"${kubectl}" config set-context libvirt-coreos --cluster=libvirt-coreos
|
||||
"${kubectl}" config use-context libvirt-coreos --cluster=libvirt-coreos
|
||||
export KUBE_SERVER="http://192.168.10.1:8080"
|
||||
export CONTEXT="libvirt-coreos"
|
||||
create-kubeconfig
|
||||
|
||||
wait-cluster-readiness
|
||||
|
||||
@ -331,8 +330,8 @@ function test-teardown {
|
||||
|
||||
# Set the {KUBE_USER} and {KUBE_PASSWORD} environment values required to interact with provider
|
||||
function get-password {
|
||||
export KUBE_USER=core
|
||||
echo "TODO get-password"
|
||||
export KUBE_USER=''
|
||||
export KUBE_PASSWORD=''
|
||||
}
|
||||
|
||||
# SSH to a node by name or IP ($1) and run a command ($2).
|
||||
|
@ -20,6 +20,7 @@
|
||||
# config-default.sh.
|
||||
KUBE_ROOT=$(dirname "${BASH_SOURCE}")/../..
|
||||
source $(dirname ${BASH_SOURCE})/${KUBE_CONFIG_FILE-"config-default.sh"}
|
||||
source "${KUBE_ROOT}/cluster/common.sh"
|
||||
|
||||
verify-prereqs() {
|
||||
# Make sure that prerequisites are installed.
|
||||
@ -50,29 +51,17 @@ verify-prereqs() {
|
||||
}
|
||||
|
||||
# Ensure that we have a password created for validating to the master. Will
|
||||
# read from $HOME/.kubernetres_auth if available.
|
||||
# read from kubeconfig current-context if available.
|
||||
#
|
||||
# Vars set:
|
||||
# KUBE_USER
|
||||
# KUBE_PASSWORD
|
||||
get-password() {
|
||||
local file="$HOME/.kubernetes_auth"
|
||||
if [[ -r "$file" ]]; then
|
||||
KUBE_USER=$(cat "$file" | python -c 'import json,sys;print json.load(sys.stdin)["User"]')
|
||||
KUBE_PASSWORD=$(cat "$file" | python -c 'import json,sys;print json.load(sys.stdin)["Password"]')
|
||||
return
|
||||
get-kubeconfig-basicauth
|
||||
if [[ -z "${KUBE_USER}" || -z "${KUBE_PASSWORD}" ]]; then
|
||||
KUBE_USER=admin
|
||||
KUBE_PASSWORD=$(python -c 'import string,random; print "".join(random.SystemRandom().choice(string.ascii_letters + string.digits) for _ in range(16))')
|
||||
fi
|
||||
KUBE_USER=admin
|
||||
KUBE_PASSWORD=$(python -c 'import string,random; print "".join(random.SystemRandom().choice(string.ascii_letters + string.digits) for _ in range(16))')
|
||||
|
||||
# Store password for reuse.
|
||||
cat << EOF > "$file"
|
||||
{
|
||||
"User": "$KUBE_USER",
|
||||
"Password": "$KUBE_PASSWORD"
|
||||
}
|
||||
EOF
|
||||
chmod 0600 "$file"
|
||||
}
|
||||
|
||||
rax-ssh-key() {
|
||||
@ -329,6 +318,13 @@ kube-up() {
|
||||
|
||||
echo "Kubernetes cluster created."
|
||||
|
||||
export KUBE_CERT=""
|
||||
export KUBE_KEY=""
|
||||
export CA_CERT=""
|
||||
export CONTEXT="rackspace_${INSTANCE_PREFIX}"
|
||||
|
||||
create-kubeconfig
|
||||
|
||||
# Don't bail on errors, we want to be able to print some info.
|
||||
set +e
|
||||
|
||||
|
@ -18,6 +18,7 @@
|
||||
|
||||
KUBE_ROOT=$(dirname "${BASH_SOURCE}")/../..
|
||||
source "${KUBE_ROOT}/cluster/vagrant/${KUBE_CONFIG_FILE-"config-default.sh"}"
|
||||
source "${KUBE_ROOT}/cluster/common.sh"
|
||||
|
||||
function detect-master () {
|
||||
KUBE_MASTER_IP=$MASTER_IP
|
||||
@ -252,49 +253,18 @@ function kube-up {
|
||||
|
||||
vagrant up
|
||||
|
||||
local kube_cert=".kubecfg.vagrant.crt"
|
||||
local kube_key=".kubecfg.vagrant.key"
|
||||
local ca_cert=".kubernetes.vagrant.ca.crt"
|
||||
export KUBE_CERT="/tmp/$RANDOM-kubecfg.crt"
|
||||
export KUBE_KEY="/tmp/$RANDOM-kubecfg.key"
|
||||
export CA_CERT="/tmp/$RANDOM-kubernetes.ca.crt"
|
||||
export CONTEXT="vagrant"
|
||||
|
||||
(umask 077
|
||||
vagrant ssh master -- sudo cat /srv/kubernetes/kubecfg.crt >"${HOME}/${kube_cert}" 2>/dev/null
|
||||
vagrant ssh master -- sudo cat /srv/kubernetes/kubecfg.key >"${HOME}/${kube_key}" 2>/dev/null
|
||||
vagrant ssh master -- sudo cat /srv/kubernetes/ca.crt >"${HOME}/${ca_cert}" 2>/dev/null
|
||||
(
|
||||
umask 077
|
||||
vagrant ssh master -- sudo cat /srv/kubernetes/kubecfg.crt >"${KUBE_CERT}" 2>/dev/null
|
||||
vagrant ssh master -- sudo cat /srv/kubernetes/kubecfg.key >"${KUBE_KEY}" 2>/dev/null
|
||||
vagrant ssh master -- sudo cat /srv/kubernetes/ca.crt >"${CA_CERT}" 2>/dev/null
|
||||
|
||||
cat <<EOF >"${HOME}/.kubernetes_vagrant_auth"
|
||||
{
|
||||
"User": "$KUBE_USER",
|
||||
"Password": "$KUBE_PASSWORD",
|
||||
"CAFile": "$HOME/$ca_cert",
|
||||
"CertFile": "$HOME/$kube_cert",
|
||||
"KeyFile": "$HOME/$kube_key"
|
||||
}
|
||||
EOF
|
||||
|
||||
cat <<EOF >"${HOME}/.kubernetes_vagrant_kubeconfig"
|
||||
apiVersion: v1
|
||||
clusters:
|
||||
- cluster:
|
||||
certificate-authority: ${HOME}/$ca_cert
|
||||
server: https://${MASTER_IP}:443
|
||||
name: vagrant
|
||||
contexts:
|
||||
- context:
|
||||
cluster: vagrant
|
||||
namespace: default
|
||||
user: vagrant
|
||||
name: vagrant
|
||||
current-context: "vagrant"
|
||||
kind: Config
|
||||
preferences: {}
|
||||
users:
|
||||
- name: vagrant
|
||||
user:
|
||||
auth-path: ${HOME}/.kubernetes_vagrant_auth
|
||||
EOF
|
||||
|
||||
chmod 0600 ~/.kubernetes_vagrant_auth "${HOME}/${kube_cert}" \
|
||||
"${HOME}/${kube_key}" "${HOME}/${ca_cert}"
|
||||
create-kubeconfig
|
||||
)
|
||||
|
||||
verify-cluster
|
||||
|
@ -21,6 +21,7 @@
|
||||
KUBE_ROOT=$(dirname "${BASH_SOURCE}")/../..
|
||||
source "${KUBE_ROOT}/cluster/vsphere/config-common.sh"
|
||||
source "${KUBE_ROOT}/cluster/vsphere/${KUBE_CONFIG_FILE-"config-default.sh"}"
|
||||
source "${KUBE_ROOT}/cluster/common.sh"
|
||||
|
||||
# Detect the IP for the master
|
||||
#
|
||||
@ -169,29 +170,17 @@ function upload-server-tars {
|
||||
}
|
||||
|
||||
# Ensure that we have a password created for validating to the master. Will
|
||||
# read from $HOME/.kubernetes_auth if available.
|
||||
# read from kubeconfig if available.
|
||||
#
|
||||
# Vars set:
|
||||
# KUBE_USER
|
||||
# KUBE_PASSWORD
|
||||
function get-password {
|
||||
local file="$HOME/.kubernetes_auth"
|
||||
if [[ -r "$file" ]]; then
|
||||
KUBE_USER=$(cat "$file" | python -c 'import json,sys;print json.load(sys.stdin)["User"]')
|
||||
KUBE_PASSWORD=$(cat "$file" | python -c 'import json,sys;print json.load(sys.stdin)["Password"]')
|
||||
return
|
||||
get-kubeconfig-basicauth
|
||||
if [[ -z "${KUBE_USER}" || -z "${KUBE_PASSWORD}" ]]; then
|
||||
KUBE_USER=admin
|
||||
KUBE_PASSWORD=$(python -c 'import string,random; print "".join(random.SystemRandom().choice(string.ascii_letters + string.digits) for _ in range(16))')
|
||||
fi
|
||||
KUBE_USER=admin
|
||||
KUBE_PASSWORD=$(python -c 'import string,random; print "".join(random.SystemRandom().choice(string.ascii_letters + string.digits) for _ in range(16))')
|
||||
|
||||
# Store password for reuse.
|
||||
cat << EOF > "$file"
|
||||
{
|
||||
"User": "$KUBE_USER",
|
||||
"Password": "$KUBE_PASSWORD"
|
||||
}
|
||||
EOF
|
||||
chmod 0600 "$file"
|
||||
}
|
||||
|
||||
# Run command over ssh
|
||||
@ -372,6 +361,24 @@ function kube-up {
|
||||
printf " OK\n"
|
||||
done
|
||||
|
||||
echo "Kubernetes cluster created."
|
||||
|
||||
# TODO use token instead of basic auth
|
||||
export KUBE_CERT="/tmp/$RANDOM-kubecfg.crt"
|
||||
export KUBE_KEY="/tmp/$RANDOM-kubecfg.key"
|
||||
export CA_CERT="/tmp/$RANDOM-kubernetes.ca.crt"
|
||||
export CONTEXT="vsphere_${INSTANCE_PREFIX}"
|
||||
|
||||
(
|
||||
umask 077
|
||||
|
||||
kube-ssh "${KUBE_MASTER_IP}" sudo cat /srv/kubernetes/kubecfg.crt >"${KUBE_CERT}" 2>/dev/null
|
||||
kube-ssh "${KUBE_MASTER_IP}" sudo cat /srv/kubernetes/kubecfg.key >"${KUBE_KEY}" 2>/dev/null
|
||||
kube-ssh "${KUBE_MASTER_IP}" sudo cat /srv/kubernetes/ca.crt >"${CA_CERT}" 2>/dev/null
|
||||
|
||||
create-kubeconfig
|
||||
)
|
||||
|
||||
echo
|
||||
echo "Sanity checking cluster..."
|
||||
|
||||
@ -394,33 +401,8 @@ function kube-up {
|
||||
echo
|
||||
echo " https://${KUBE_MASTER_IP}"
|
||||
echo
|
||||
echo "The user name and password to use is located in ~/.kubernetes_auth."
|
||||
echo "The user name and password to use is located in ${KUBECONFIG}"
|
||||
echo
|
||||
|
||||
local kube_cert=".kubecfg.crt"
|
||||
local kube_key=".kubecfg.key"
|
||||
local ca_cert=".kubernetes.ca.crt"
|
||||
|
||||
(
|
||||
umask 077
|
||||
|
||||
kube-ssh "${KUBE_MASTER_IP}" sudo cat /srv/kubernetes/kubecfg.crt >"${HOME}/${kube_cert}" 2>/dev/null
|
||||
kube-ssh "${KUBE_MASTER_IP}" sudo cat /srv/kubernetes/kubecfg.key >"${HOME}/${kube_key}" 2>/dev/null
|
||||
kube-ssh "${KUBE_MASTER_IP}" sudo cat /srv/kubernetes/ca.crt >"${HOME}/${ca_cert}" 2>/dev/null
|
||||
|
||||
cat << EOF > ~/.kubernetes_auth
|
||||
{
|
||||
"User": "$KUBE_USER",
|
||||
"Password": "$KUBE_PASSWORD",
|
||||
"CAFile": "$HOME/$ca_cert",
|
||||
"CertFile": "$HOME/$kube_cert",
|
||||
"KeyFile": "$HOME/$kube_key"
|
||||
}
|
||||
EOF
|
||||
|
||||
chmod 0600 ~/.kubernetes_auth "${HOME}/${kube_cert}" \
|
||||
"${HOME}/${kube_key}" "${HOME}/${ca_cert}"
|
||||
)
|
||||
}
|
||||
|
||||
# Delete a kubernetes cluster
|
||||
|
@ -87,15 +87,7 @@ if [[ -z "${AUTH_CONFIG:-}" ]]; then
|
||||
|
||||
detect-master >/dev/null
|
||||
|
||||
|
||||
if [[ "$KUBERNETES_PROVIDER" == "vagrant" ]]; then
|
||||
# When we are using vagrant it has hard coded auth. We repeat that here so that
|
||||
# we don't clobber auth that might be used for a publicly facing cluster.
|
||||
auth_config=(
|
||||
"--auth_config=${HOME}/.kubernetes_vagrant_auth"
|
||||
"--kubeconfig=${HOME}/.kubernetes_vagrant_kubeconfig"
|
||||
)
|
||||
elif [[ "${KUBERNETES_PROVIDER}" == "gke" ]]; then
|
||||
if [[ "${KUBERNETES_PROVIDER}" == "gke" ]]; then
|
||||
# GKE stores its own kubeconfig in gcloud's config directory.
|
||||
detect-project &> /dev/null
|
||||
auth_config=(
|
||||
@ -103,25 +95,15 @@ if [[ -z "${AUTH_CONFIG:-}" ]]; then
|
||||
# gcloud doesn't set the current-context, so we have to set it
|
||||
"--context=gke_${PROJECT}_${ZONE}_${CLUSTER_NAME}"
|
||||
)
|
||||
elif [[ "${KUBERNETES_PROVIDER}" == "gce" ]]; then
|
||||
auth_config=(
|
||||
"--kubeconfig=${KUBECONFIG:-$DEFAULT_KUBECONFIG}"
|
||||
)
|
||||
elif [[ "${KUBERNETES_PROVIDER}" == "aws" ]]; then
|
||||
auth_config=(
|
||||
"--auth_config=${HOME}/.kube/${INSTANCE_PREFIX}/kubernetes_auth"
|
||||
)
|
||||
elif [[ "${KUBERNETES_PROVIDER}" == "libvirt-coreos" ]]; then
|
||||
auth_config=(
|
||||
"--kubeconfig=${HOME}/.kube/.kubeconfig"
|
||||
)
|
||||
elif [[ "${KUBERNETES_PROVIDER}" == "conformance_test" ]]; then
|
||||
auth_config=(
|
||||
"--auth_config=${KUBERNETES_CONFORMANCE_TEST_AUTH_CONFIG:-}"
|
||||
"--cert_dir=${KUBERNETES_CONFORMANCE_TEST_CERT_DIR:-}"
|
||||
)
|
||||
else
|
||||
auth_config=()
|
||||
auth_config=(
|
||||
"--kubeconfig=${KUBECONFIG:-$DEFAULT_KUBECONFIG}"
|
||||
)
|
||||
fi
|
||||
else
|
||||
echo "Conformance Test. No cloud-provider-specific preparation."
|
||||
|
Loading…
Reference in New Issue
Block a user