federation: aws compatibility for cluster and e2e frameworks

This commit is contained in:
Colin Hom 2016-09-01 13:02:10 -07:00
parent 4a9f402808
commit 0c562bac48
5 changed files with 102 additions and 39 deletions

View File

@ -56,8 +56,8 @@ CONFIG_CONTEXT="${KUBE_CONFIG_CONTEXT:-aws_${INSTANCE_PREFIX}}"
CLUSTER_ID=${INSTANCE_PREFIX}
VPC_NAME=${VPC_NAME:-kubernetes-vpc}
AWS_SSH_KEY=${AWS_SSH_KEY:-$HOME/.ssh/kube_aws_rsa}
IAM_PROFILE_MASTER="kubernetes-master"
IAM_PROFILE_NODE="kubernetes-minion"
IAM_PROFILE_MASTER="kubernetes-master-${INSTANCE_PREFIX}"
IAM_PROFILE_NODE="kubernetes-minion-${INSTANCE_PREFIX}"
LOG="/dev/null"

View File

@ -701,16 +701,18 @@ function add-tag {
}
# Creates the IAM profile, based on configuration files in templates/iam
# usage: create-iam-profile kubernetes-master-us-west-1a-chom kubernetes-master
function create-iam-profile {
local key=$1
local role=$2
local conf_dir=file://${KUBE_ROOT}/cluster/aws/templates/iam
echo "Creating IAM role: ${key}"
aws iam create-role --role-name ${key} --assume-role-policy-document ${conf_dir}/${key}-role.json > $LOG
aws iam create-role --role-name ${key} --assume-role-policy-document ${conf_dir}/${role}-role.json > $LOG
echo "Creating IAM role-policy: ${key}"
aws iam put-role-policy --role-name ${key} --policy-name ${key} --policy-document ${conf_dir}/${key}-policy.json > $LOG
aws iam put-role-policy --role-name ${key} --policy-name ${key} --policy-document ${conf_dir}/${role}-policy.json > $LOG
echo "Creating IAM instance-policy: ${key}"
aws iam create-instance-profile --instance-profile-name ${key} > $LOG
@ -721,14 +723,11 @@ function create-iam-profile {
# Creates the IAM roles (if they do not already exist)
function ensure-iam-profiles {
aws iam get-instance-profile --instance-profile-name ${IAM_PROFILE_MASTER} || {
echo "Creating master IAM profile: ${IAM_PROFILE_MASTER}"
create-iam-profile ${IAM_PROFILE_MASTER}
}
aws iam get-instance-profile --instance-profile-name ${IAM_PROFILE_NODE} || {
create-iam-profile ${IAM_PROFILE_MASTER} kubernetes-master
echo "Creating minion IAM profile: ${IAM_PROFILE_NODE}"
create-iam-profile ${IAM_PROFILE_NODE}
}
create-iam-profile ${IAM_PROFILE_NODE} kubernetes-minion
}
# Wait for instance to be in specified state
@ -785,7 +784,7 @@ function delete_security_group {
echo "Deleting security group: ${sg_id}"
# We retry in case there's a dependent resource - typically an ELB
n=0
local n=0
until [ $n -ge 20 ]; do
$AWS_CMD delete-security-group --group-id ${sg_id} > $LOG && return
n=$[$n+1]
@ -795,6 +794,46 @@ function delete_security_group {
exit 1
}
# Deletes master and minion IAM roles and instance profiles
# usage: delete-iam-instance-profiles
function delete-iam-profiles {
for iam_profile_name in ${IAM_PROFILE_MASTER} ${IAM_PROFILE_NODE};do
echo "Removing role from instance profile: ${iam_profile_name}"
conceal-no-such-entity-response aws iam remove-role-from-instance-profile --instance-profile-name "${iam_profile_name}" --role-name "${iam_profile_name}"
echo "Deleting IAM Instance-Profile: ${iam_profile_name}"
conceal-no-such-entity-response aws iam delete-instance-profile --instance-profile-name "${iam_profile_name}"
echo "Delete IAM role policy: ${iam_profile_name}"
conceal-no-such-entity-response aws iam delete-role-policy --role-name "${iam_profile_name}" --policy-name "${iam_profile_name}"
echo "Deleting IAM Role: ${iam_profile_name}"
conceal-no-such-entity-response aws iam delete-role --role-name "${iam_profile_name}"
done
}
# Detects NoSuchEntity response from AWS cli stderr output and conceals error
# Otherwise the error is treated as fatal
# usage: conceal-no-such-entity-response ...args
function conceal-no-such-entity-response {
# in plain english: redirect stderr to stdout, and stdout to the log file
local -r errMsg=$($@ 2>&1 > $LOG)
if [[ "$errMsg" == "" ]];then
return
fi
echo $errMsg
if [[ "$errMsg" =~ " (NoSuchEntity) " ]];then
echo " -> no such entity response detected. will assume operation is not necessary due to prior incomplete teardown"
return
fi
echo "Error message is fatal. Will exit"
exit 1
}
function ssh-key-setup {
if [[ ! -f "$AWS_SSH_KEY" ]]; then
ssh-keygen -f "$AWS_SSH_KEY" -N ''
@ -1446,6 +1485,9 @@ function kube-down {
echo "Note: You may be seeing this message may be because the cluster was already deleted, or" >&2
echo "has a name other than '${CLUSTER_ID}'." >&2
fi
echo "Deleting IAM Instance profiles"
delete-iam-profiles
}
# Update a kubernetes cluster with latest source

View File

@ -57,6 +57,13 @@ function set-federation-zone-vars {
export KUBE_AWS_ZONE="$zone"
export KUBE_AWS_INSTANCE_PREFIX="${USER}-${zone}"
# WARNING: This is hack
# After KUBE_AWS_INSTANCE_PREFIX is changed,
# we need to make sure the config-xxx.sh file is
# re-sourced so the change propogates to dependent computed values
# (eg: MASTER_SG_NAME, NODE_SG_NAME, etc)
source "${KUBE_ROOT}/cluster/aws/util.sh"
else
echo "Provider \"${KUBERNETES_PROVIDER}\" is not supported"
exit 1

View File

@ -109,6 +109,7 @@ function create-federation-api-objects {
export FEDERATION_API_HOST=""
export KUBE_MASTER_IP=""
local is_dns_name="false"
if [[ "$KUBERNETES_PROVIDER" == "vagrant" ]];then
# The vagrant approach is to use a nodeport service, and point kubectl at one of the nodes
$template "${manifests_root}/federation-apiserver-nodeport-service.yaml" | $host_kubectl create -f -
@ -116,6 +117,12 @@ function create-federation-api-objects {
FEDERATION_API_HOST=`printf "$node_addresses" | cut -d " " -f1`
KUBE_MASTER_IP="${FEDERATION_API_HOST}:${FEDERATION_API_NODEPORT}"
elif [[ "$KUBERNETES_PROVIDER" == "gce" || "$KUBERNETES_PROVIDER" == "gke" || "$KUBERNETES_PROVIDER" == "aws" ]];then
# Any providers where ingress is a DNS name should tick this box.
# TODO(chom): attempt to do this automatically
if [[ "$KUBERNETES_PROVIDER" == "aws" ]];then
is_dns_name="true"
fi
# any capable providers should use a loadbalancer service
# we check for ingress.ip and ingress.hostname, so should work for any loadbalancer-providing provider
# allows 30x5 = 150 seconds for loadbalancer creation
@ -179,8 +186,8 @@ function create-federation-api-objects {
# Create server certificates.
ensure-temp-dir
echo "Creating federation apiserver certs for IP: $FEDERATION_API_HOST"
MASTER_NAME="federation-apiserver" create-federation-apiserver-certs ${FEDERATION_API_HOST}
echo "Creating federation apiserver certs for federation api host: ${FEDERATION_API_HOST} ( is this a dns name?: ${is_dns_name} )"
MASTER_NAME="federation-apiserver" IS_DNS_NAME="${is_dns_name}" create-federation-apiserver-certs ${FEDERATION_API_HOST}
export FEDERATION_APISERVER_CA_CERT_BASE64="${FEDERATION_APISERVER_CA_CERT_BASE64}"
export FEDERATION_APISERVER_CERT_BASE64="${FEDERATION_APISERVER_CERT_BASE64}"
export FEDERATION_APISERVER_KEY_BASE64="${FEDERATION_APISERVER_KEY_BASE64}"
@ -239,15 +246,23 @@ function create-federation-api-objects {
}
# Creates the required certificates for federation apiserver.
# $1: The public IP for the master.
# $1: The public IP or DNS name for the master.
#
# Assumed vars
# KUBE_TEMP
# MASTER_NAME
#
# IS_DNS_NAME=true|false
function create-federation-apiserver-certs {
local -r primary_cn="${1}"
local sans="IP:${1},DNS:${MASTER_NAME}"
local primary_cn
local sans
if [[ "${IS_DNS_NAME:-}" == "true" ]];then
primary_cn="$(printf "${1}" | sha1sum | tr " -" " ")"
sans="DNS:${1},DNS:${MASTER_NAME}"
else
primary_cn="${1}"
sans="IP:${1},DNS:${MASTER_NAME}"
fi
echo "Generating certs for alternate-names: ${sans}"
@ -332,5 +347,5 @@ function cleanup-federation-api-objects {
# Delete all resources with the federated-cluster label.
$host_kubectl delete pods,svc,rc,deployment,secret -lapp=federated-cluster
# Delete all resources in FEDERATION_NAMESPACE.
$host_kubectl delete pods,svc,rc,deployment,secret --namespace=${FEDERATION_NAMESPACE} --all
$host_kubectl delete pvc,pv,pods,svc,rc,deployment,secret --namespace=${FEDERATION_NAMESPACE} --all
}

View File

@ -23,7 +23,6 @@ spec:
- --etcd-servers=http://localhost:2379
- --service-cluster-ip-range={{.FEDERATION_SERVICE_CIDR}}
- --secure-port=443
- --advertise-address={{.FEDERATION_API_HOST}}
- --client-ca-file=/srv/kubernetes/ca.crt
- --basic-auth-file=/srv/kubernetes/basic-auth.csv
- --tls-cert-file=/srv/kubernetes/server.cert