federation: aws compatibility for cluster and e2e frameworks

This commit is contained in:
Colin Hom 2016-09-01 13:02:10 -07:00
parent 4a9f402808
commit 0c562bac48
5 changed files with 102 additions and 39 deletions

View File

@ -56,8 +56,8 @@ CONFIG_CONTEXT="${KUBE_CONFIG_CONTEXT:-aws_${INSTANCE_PREFIX}}"
CLUSTER_ID=${INSTANCE_PREFIX} CLUSTER_ID=${INSTANCE_PREFIX}
VPC_NAME=${VPC_NAME:-kubernetes-vpc} VPC_NAME=${VPC_NAME:-kubernetes-vpc}
AWS_SSH_KEY=${AWS_SSH_KEY:-$HOME/.ssh/kube_aws_rsa} AWS_SSH_KEY=${AWS_SSH_KEY:-$HOME/.ssh/kube_aws_rsa}
IAM_PROFILE_MASTER="kubernetes-master" IAM_PROFILE_MASTER="kubernetes-master-${INSTANCE_PREFIX}"
IAM_PROFILE_NODE="kubernetes-minion" IAM_PROFILE_NODE="kubernetes-minion-${INSTANCE_PREFIX}"
LOG="/dev/null" LOG="/dev/null"

View File

@ -701,16 +701,18 @@ function add-tag {
} }
# Creates the IAM profile, based on configuration files in templates/iam # Creates the IAM profile, based on configuration files in templates/iam
# usage: create-iam-profile kubernetes-master-us-west-1a-chom kubernetes-master
function create-iam-profile { function create-iam-profile {
local key=$1 local key=$1
local role=$2
local conf_dir=file://${KUBE_ROOT}/cluster/aws/templates/iam local conf_dir=file://${KUBE_ROOT}/cluster/aws/templates/iam
echo "Creating IAM role: ${key}" echo "Creating IAM role: ${key}"
aws iam create-role --role-name ${key} --assume-role-policy-document ${conf_dir}/${key}-role.json > $LOG aws iam create-role --role-name ${key} --assume-role-policy-document ${conf_dir}/${role}-role.json > $LOG
echo "Creating IAM role-policy: ${key}" echo "Creating IAM role-policy: ${key}"
aws iam put-role-policy --role-name ${key} --policy-name ${key} --policy-document ${conf_dir}/${key}-policy.json > $LOG aws iam put-role-policy --role-name ${key} --policy-name ${key} --policy-document ${conf_dir}/${role}-policy.json > $LOG
echo "Creating IAM instance-policy: ${key}" echo "Creating IAM instance-policy: ${key}"
aws iam create-instance-profile --instance-profile-name ${key} > $LOG aws iam create-instance-profile --instance-profile-name ${key} > $LOG
@ -721,14 +723,11 @@ function create-iam-profile {
# Creates the IAM roles (if they do not already exist) # Creates the IAM roles (if they do not already exist)
function ensure-iam-profiles { function ensure-iam-profiles {
aws iam get-instance-profile --instance-profile-name ${IAM_PROFILE_MASTER} || { echo "Creating master IAM profile: ${IAM_PROFILE_MASTER}"
echo "Creating master IAM profile: ${IAM_PROFILE_MASTER}" create-iam-profile ${IAM_PROFILE_MASTER} kubernetes-master
create-iam-profile ${IAM_PROFILE_MASTER}
} echo "Creating minion IAM profile: ${IAM_PROFILE_NODE}"
aws iam get-instance-profile --instance-profile-name ${IAM_PROFILE_NODE} || { create-iam-profile ${IAM_PROFILE_NODE} kubernetes-minion
echo "Creating minion IAM profile: ${IAM_PROFILE_NODE}"
create-iam-profile ${IAM_PROFILE_NODE}
}
} }
# Wait for instance to be in specified state # Wait for instance to be in specified state
@ -785,7 +784,7 @@ function delete_security_group {
echo "Deleting security group: ${sg_id}" echo "Deleting security group: ${sg_id}"
# We retry in case there's a dependent resource - typically an ELB # We retry in case there's a dependent resource - typically an ELB
n=0 local n=0
until [ $n -ge 20 ]; do until [ $n -ge 20 ]; do
$AWS_CMD delete-security-group --group-id ${sg_id} > $LOG && return $AWS_CMD delete-security-group --group-id ${sg_id} > $LOG && return
n=$[$n+1] n=$[$n+1]
@ -795,6 +794,46 @@ function delete_security_group {
exit 1 exit 1
} }
# Deletes master and minion IAM roles and instance profiles
# usage: delete-iam-instance-profiles
function delete-iam-profiles {
for iam_profile_name in ${IAM_PROFILE_MASTER} ${IAM_PROFILE_NODE};do
echo "Removing role from instance profile: ${iam_profile_name}"
conceal-no-such-entity-response aws iam remove-role-from-instance-profile --instance-profile-name "${iam_profile_name}" --role-name "${iam_profile_name}"
echo "Deleting IAM Instance-Profile: ${iam_profile_name}"
conceal-no-such-entity-response aws iam delete-instance-profile --instance-profile-name "${iam_profile_name}"
echo "Delete IAM role policy: ${iam_profile_name}"
conceal-no-such-entity-response aws iam delete-role-policy --role-name "${iam_profile_name}" --policy-name "${iam_profile_name}"
echo "Deleting IAM Role: ${iam_profile_name}"
conceal-no-such-entity-response aws iam delete-role --role-name "${iam_profile_name}"
done
}
# Detects NoSuchEntity response from AWS cli stderr output and conceals error
# Otherwise the error is treated as fatal
# usage: conceal-no-such-entity-response ...args
function conceal-no-such-entity-response {
# in plain english: redirect stderr to stdout, and stdout to the log file
local -r errMsg=$($@ 2>&1 > $LOG)
if [[ "$errMsg" == "" ]];then
return
fi
echo $errMsg
if [[ "$errMsg" =~ " (NoSuchEntity) " ]];then
echo " -> no such entity response detected. will assume operation is not necessary due to prior incomplete teardown"
return
fi
echo "Error message is fatal. Will exit"
exit 1
}
function ssh-key-setup { function ssh-key-setup {
if [[ ! -f "$AWS_SSH_KEY" ]]; then if [[ ! -f "$AWS_SSH_KEY" ]]; then
ssh-keygen -f "$AWS_SSH_KEY" -N '' ssh-keygen -f "$AWS_SSH_KEY" -N ''
@ -1446,6 +1485,9 @@ function kube-down {
echo "Note: You may be seeing this message may be because the cluster was already deleted, or" >&2 echo "Note: You may be seeing this message may be because the cluster was already deleted, or" >&2
echo "has a name other than '${CLUSTER_ID}'." >&2 echo "has a name other than '${CLUSTER_ID}'." >&2
fi fi
echo "Deleting IAM Instance profiles"
delete-iam-profiles
} }
# Update a kubernetes cluster with latest source # Update a kubernetes cluster with latest source

View File

@ -38,27 +38,34 @@ fi
# Should NOT be called within the global scope, unless setting the desired global zone vars # Should NOT be called within the global scope, unless setting the desired global zone vars
# This function is currently NOT USED in the global scope # This function is currently NOT USED in the global scope
function set-federation-zone-vars { function set-federation-zone-vars {
zone="$1" zone="$1"
export OVERRIDE_CONTEXT="federation-e2e-${KUBERNETES_PROVIDER}-$zone" export OVERRIDE_CONTEXT="federation-e2e-${KUBERNETES_PROVIDER}-$zone"
echo "Setting zone vars to: $OVERRIDE_CONTEXT" echo "Setting zone vars to: $OVERRIDE_CONTEXT"
if [[ "$KUBERNETES_PROVIDER" == "gce" ]];then if [[ "$KUBERNETES_PROVIDER" == "gce" ]];then
export KUBE_GCE_ZONE="$zone" export KUBE_GCE_ZONE="$zone"
# gcloud has a 61 character limit, and for firewall rules this # gcloud has a 61 character limit, and for firewall rules this
# prefix gets appended to itself, with some extra information # prefix gets appended to itself, with some extra information
# need tot keep it short # need tot keep it short
export KUBE_GCE_INSTANCE_PREFIX="${USER}-${zone}" export KUBE_GCE_INSTANCE_PREFIX="${USER}-${zone}"
elif [[ "$KUBERNETES_PROVIDER" == "gke" ]];then elif [[ "$KUBERNETES_PROVIDER" == "gke" ]];then
export CLUSTER_NAME="${USER}-${zone}" export CLUSTER_NAME="${USER}-${zone}"
elif [[ "$KUBERNETES_PROVIDER" == "aws" ]];then elif [[ "$KUBERNETES_PROVIDER" == "aws" ]];then
export KUBE_AWS_ZONE="$zone" export KUBE_AWS_ZONE="$zone"
export KUBE_AWS_INSTANCE_PREFIX="${USER}-${zone}" export KUBE_AWS_INSTANCE_PREFIX="${USER}-${zone}"
else
echo "Provider \"${KUBERNETES_PROVIDER}\" is not supported" # WARNING: This is hack
exit 1 # After KUBE_AWS_INSTANCE_PREFIX is changed,
fi # we need to make sure the config-xxx.sh file is
# re-sourced so the change propogates to dependent computed values
# (eg: MASTER_SG_NAME, NODE_SG_NAME, etc)
source "${KUBE_ROOT}/cluster/aws/util.sh"
else
echo "Provider \"${KUBERNETES_PROVIDER}\" is not supported"
exit 1
fi
} }

View File

@ -109,6 +109,7 @@ function create-federation-api-objects {
export FEDERATION_API_HOST="" export FEDERATION_API_HOST=""
export KUBE_MASTER_IP="" export KUBE_MASTER_IP=""
local is_dns_name="false"
if [[ "$KUBERNETES_PROVIDER" == "vagrant" ]];then if [[ "$KUBERNETES_PROVIDER" == "vagrant" ]];then
# The vagrant approach is to use a nodeport service, and point kubectl at one of the nodes # The vagrant approach is to use a nodeport service, and point kubectl at one of the nodes
$template "${manifests_root}/federation-apiserver-nodeport-service.yaml" | $host_kubectl create -f - $template "${manifests_root}/federation-apiserver-nodeport-service.yaml" | $host_kubectl create -f -
@ -116,6 +117,12 @@ function create-federation-api-objects {
FEDERATION_API_HOST=`printf "$node_addresses" | cut -d " " -f1` FEDERATION_API_HOST=`printf "$node_addresses" | cut -d " " -f1`
KUBE_MASTER_IP="${FEDERATION_API_HOST}:${FEDERATION_API_NODEPORT}" KUBE_MASTER_IP="${FEDERATION_API_HOST}:${FEDERATION_API_NODEPORT}"
elif [[ "$KUBERNETES_PROVIDER" == "gce" || "$KUBERNETES_PROVIDER" == "gke" || "$KUBERNETES_PROVIDER" == "aws" ]];then elif [[ "$KUBERNETES_PROVIDER" == "gce" || "$KUBERNETES_PROVIDER" == "gke" || "$KUBERNETES_PROVIDER" == "aws" ]];then
# Any providers where ingress is a DNS name should tick this box.
# TODO(chom): attempt to do this automatically
if [[ "$KUBERNETES_PROVIDER" == "aws" ]];then
is_dns_name="true"
fi
# any capable providers should use a loadbalancer service # any capable providers should use a loadbalancer service
# we check for ingress.ip and ingress.hostname, so should work for any loadbalancer-providing provider # we check for ingress.ip and ingress.hostname, so should work for any loadbalancer-providing provider
# allows 30x5 = 150 seconds for loadbalancer creation # allows 30x5 = 150 seconds for loadbalancer creation
@ -179,8 +186,8 @@ function create-federation-api-objects {
# Create server certificates. # Create server certificates.
ensure-temp-dir ensure-temp-dir
echo "Creating federation apiserver certs for IP: $FEDERATION_API_HOST" echo "Creating federation apiserver certs for federation api host: ${FEDERATION_API_HOST} ( is this a dns name?: ${is_dns_name} )"
MASTER_NAME="federation-apiserver" create-federation-apiserver-certs ${FEDERATION_API_HOST} MASTER_NAME="federation-apiserver" IS_DNS_NAME="${is_dns_name}" create-federation-apiserver-certs ${FEDERATION_API_HOST}
export FEDERATION_APISERVER_CA_CERT_BASE64="${FEDERATION_APISERVER_CA_CERT_BASE64}" export FEDERATION_APISERVER_CA_CERT_BASE64="${FEDERATION_APISERVER_CA_CERT_BASE64}"
export FEDERATION_APISERVER_CERT_BASE64="${FEDERATION_APISERVER_CERT_BASE64}" export FEDERATION_APISERVER_CERT_BASE64="${FEDERATION_APISERVER_CERT_BASE64}"
export FEDERATION_APISERVER_KEY_BASE64="${FEDERATION_APISERVER_KEY_BASE64}" export FEDERATION_APISERVER_KEY_BASE64="${FEDERATION_APISERVER_KEY_BASE64}"
@ -239,15 +246,23 @@ function create-federation-api-objects {
} }
# Creates the required certificates for federation apiserver. # Creates the required certificates for federation apiserver.
# $1: The public IP for the master. # $1: The public IP or DNS name for the master.
# #
# Assumed vars # Assumed vars
# KUBE_TEMP # KUBE_TEMP
# MASTER_NAME # MASTER_NAME
# # IS_DNS_NAME=true|false
function create-federation-apiserver-certs { function create-federation-apiserver-certs {
local -r primary_cn="${1}" local primary_cn
local sans="IP:${1},DNS:${MASTER_NAME}" local sans
if [[ "${IS_DNS_NAME:-}" == "true" ]];then
primary_cn="$(printf "${1}" | sha1sum | tr " -" " ")"
sans="DNS:${1},DNS:${MASTER_NAME}"
else
primary_cn="${1}"
sans="IP:${1},DNS:${MASTER_NAME}"
fi
echo "Generating certs for alternate-names: ${sans}" echo "Generating certs for alternate-names: ${sans}"
@ -332,5 +347,5 @@ function cleanup-federation-api-objects {
# Delete all resources with the federated-cluster label. # Delete all resources with the federated-cluster label.
$host_kubectl delete pods,svc,rc,deployment,secret -lapp=federated-cluster $host_kubectl delete pods,svc,rc,deployment,secret -lapp=federated-cluster
# Delete all resources in FEDERATION_NAMESPACE. # Delete all resources in FEDERATION_NAMESPACE.
$host_kubectl delete pods,svc,rc,deployment,secret --namespace=${FEDERATION_NAMESPACE} --all $host_kubectl delete pvc,pv,pods,svc,rc,deployment,secret --namespace=${FEDERATION_NAMESPACE} --all
} }

View File

@ -23,7 +23,6 @@ spec:
- --etcd-servers=http://localhost:2379 - --etcd-servers=http://localhost:2379
- --service-cluster-ip-range={{.FEDERATION_SERVICE_CIDR}} - --service-cluster-ip-range={{.FEDERATION_SERVICE_CIDR}}
- --secure-port=443 - --secure-port=443
- --advertise-address={{.FEDERATION_API_HOST}}
- --client-ca-file=/srv/kubernetes/ca.crt - --client-ca-file=/srv/kubernetes/ca.crt
- --basic-auth-file=/srv/kubernetes/basic-auth.csv - --basic-auth-file=/srv/kubernetes/basic-auth.csv
- --tls-cert-file=/srv/kubernetes/server.cert - --tls-cert-file=/srv/kubernetes/server.cert