mirror of
https://github.com/k3s-io/kubernetes.git
synced 2025-07-25 12:43:23 +00:00
Merge pull request #31908 from colhom/aws-compat
Automatic merge from submit-queue [WIP] AWS compatibility for federation cluster and e2e I've been testing this and have reached a point where the e2e tests run, and some test failures are popping up which are not overtly related to AWS specific things. ```sh SSSSSSSSSSSSSSSS Summarizing 5 Failures: [Fail] [k8s.io] [Feature:Federation] Federated Services DNS [BeforeEach] should be able to discover a federated service /go/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/federation-util.go:233 [Fail] [k8s.io] [Feature:Federation] Federated Services Service creation [It] should create matching services in underlying clusters /go/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/federation-util.go:233 [Fail] [k8s.io] Federated ingresses [Feature:Federation] Federated Ingresses [It] should create and update matching ingresses in underlying clusters /go/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/federated-ingress.go:289 [Fail] [k8s.io] [Feature:Federation] Federated Services DNS [BeforeEach] non-local federated service [Slow] missing local service should never find DNS entries for a missing local service /go/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/federation-util.go:233 [Fail] [k8s.io] [Feature:Federation] Federated Services DNS [BeforeEach] non-local federated service should be able to discover a non-local federated service /go/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/federation-util.go:233 Ran 16 of 383 Specs in 519.872 seconds FAIL! -- 11 Passed | 5 Failed | 1 Pending | 366 Skipped --- FAIL: TestE2E (519.89s) ``` \cc @quinton-hoole @madhusudancs for advice. Should I investigate further?
This commit is contained in:
commit
d27076c18b
@ -68,8 +68,6 @@ CLUSTER_ID=${INSTANCE_PREFIX}
|
||||
VPC_NAME=${VPC_NAME:-kubernetes-vpc}
|
||||
AWS_SSH_KEY=${AWS_SSH_KEY:-$HOME/.ssh/kube_aws_rsa}
|
||||
CONFIG_CONTEXT="${KUBE_CONFIG_CONTEXT:-aws_${INSTANCE_PREFIX}}"
|
||||
IAM_PROFILE_MASTER="kubernetes-master"
|
||||
IAM_PROFILE_NODE="kubernetes-minion"
|
||||
|
||||
LOG="/dev/null"
|
||||
|
||||
|
@ -56,8 +56,6 @@ CONFIG_CONTEXT="${KUBE_CONFIG_CONTEXT:-aws_${INSTANCE_PREFIX}}"
|
||||
CLUSTER_ID=${INSTANCE_PREFIX}
|
||||
VPC_NAME=${VPC_NAME:-kubernetes-vpc}
|
||||
AWS_SSH_KEY=${AWS_SSH_KEY:-$HOME/.ssh/kube_aws_rsa}
|
||||
IAM_PROFILE_MASTER="kubernetes-master"
|
||||
IAM_PROFILE_NODE="kubernetes-minion"
|
||||
|
||||
LOG="/dev/null"
|
||||
|
||||
|
@ -120,6 +120,9 @@ fi
|
||||
MASTER_SG_NAME="kubernetes-master-${CLUSTER_ID}"
|
||||
NODE_SG_NAME="kubernetes-minion-${CLUSTER_ID}"
|
||||
|
||||
IAM_PROFILE_MASTER="kubernetes-master-${CLUSTER_ID}-${VPC_NAME}"
|
||||
IAM_PROFILE_NODE="kubernetes-minion-${CLUSTER_ID}-${VPC_NAME}"
|
||||
|
||||
# Be sure to map all the ephemeral drives. We can specify more than we actually have.
|
||||
# TODO: Actually mount the correct number (especially if we have more), though this is non-trivial, and
|
||||
# only affects the big storage instance types, which aren't a typical use case right now.
|
||||
@ -701,16 +704,18 @@ function add-tag {
|
||||
}
|
||||
|
||||
# Creates the IAM profile, based on configuration files in templates/iam
|
||||
# usage: create-iam-profile kubernetes-master-us-west-1a-chom kubernetes-master
|
||||
function create-iam-profile {
|
||||
local key=$1
|
||||
local role=$2
|
||||
|
||||
local conf_dir=file://${KUBE_ROOT}/cluster/aws/templates/iam
|
||||
|
||||
echo "Creating IAM role: ${key}"
|
||||
aws iam create-role --role-name ${key} --assume-role-policy-document ${conf_dir}/${key}-role.json > $LOG
|
||||
aws iam create-role --role-name ${key} --assume-role-policy-document ${conf_dir}/${role}-role.json > $LOG
|
||||
|
||||
echo "Creating IAM role-policy: ${key}"
|
||||
aws iam put-role-policy --role-name ${key} --policy-name ${key} --policy-document ${conf_dir}/${key}-policy.json > $LOG
|
||||
aws iam put-role-policy --role-name ${key} --policy-name ${key} --policy-document ${conf_dir}/${role}-policy.json > $LOG
|
||||
|
||||
echo "Creating IAM instance-policy: ${key}"
|
||||
aws iam create-instance-profile --instance-profile-name ${key} > $LOG
|
||||
@ -721,14 +726,11 @@ function create-iam-profile {
|
||||
|
||||
# Creates the IAM roles (if they do not already exist)
|
||||
function ensure-iam-profiles {
|
||||
aws iam get-instance-profile --instance-profile-name ${IAM_PROFILE_MASTER} || {
|
||||
echo "Creating master IAM profile: ${IAM_PROFILE_MASTER}"
|
||||
create-iam-profile ${IAM_PROFILE_MASTER}
|
||||
}
|
||||
aws iam get-instance-profile --instance-profile-name ${IAM_PROFILE_NODE} || {
|
||||
echo "Creating minion IAM profile: ${IAM_PROFILE_NODE}"
|
||||
create-iam-profile ${IAM_PROFILE_NODE}
|
||||
}
|
||||
echo "Creating master IAM profile: ${IAM_PROFILE_MASTER}"
|
||||
create-iam-profile ${IAM_PROFILE_MASTER} kubernetes-master
|
||||
|
||||
echo "Creating minion IAM profile: ${IAM_PROFILE_NODE}"
|
||||
create-iam-profile ${IAM_PROFILE_NODE} kubernetes-minion
|
||||
}
|
||||
|
||||
# Wait for instance to be in specified state
|
||||
@ -785,7 +787,7 @@ function delete_security_group {
|
||||
echo "Deleting security group: ${sg_id}"
|
||||
|
||||
# We retry in case there's a dependent resource - typically an ELB
|
||||
n=0
|
||||
local n=0
|
||||
until [ $n -ge 20 ]; do
|
||||
$AWS_CMD delete-security-group --group-id ${sg_id} > $LOG && return
|
||||
n=$[$n+1]
|
||||
@ -795,6 +797,46 @@ function delete_security_group {
|
||||
exit 1
|
||||
}
|
||||
|
||||
|
||||
|
||||
# Deletes master and minion IAM roles and instance profiles
|
||||
# usage: delete-iam-instance-profiles
|
||||
function delete-iam-profiles {
|
||||
for iam_profile_name in ${IAM_PROFILE_MASTER} ${IAM_PROFILE_NODE};do
|
||||
echo "Removing role from instance profile: ${iam_profile_name}"
|
||||
conceal-no-such-entity-response aws iam remove-role-from-instance-profile --instance-profile-name "${iam_profile_name}" --role-name "${iam_profile_name}"
|
||||
|
||||
echo "Deleting IAM Instance-Profile: ${iam_profile_name}"
|
||||
conceal-no-such-entity-response aws iam delete-instance-profile --instance-profile-name "${iam_profile_name}"
|
||||
|
||||
echo "Delete IAM role policy: ${iam_profile_name}"
|
||||
conceal-no-such-entity-response aws iam delete-role-policy --role-name "${iam_profile_name}" --policy-name "${iam_profile_name}"
|
||||
|
||||
echo "Deleting IAM Role: ${iam_profile_name}"
|
||||
conceal-no-such-entity-response aws iam delete-role --role-name "${iam_profile_name}"
|
||||
done
|
||||
}
|
||||
|
||||
# Detects NoSuchEntity response from AWS cli stderr output and conceals error
|
||||
# Otherwise the error is treated as fatal
|
||||
# usage: conceal-no-such-entity-response ...args
|
||||
function conceal-no-such-entity-response {
|
||||
# in plain english: redirect stderr to stdout, and stdout to the log file
|
||||
local -r errMsg=$($@ 2>&1 > $LOG)
|
||||
if [[ "$errMsg" == "" ]];then
|
||||
return
|
||||
fi
|
||||
|
||||
echo $errMsg
|
||||
if [[ "$errMsg" =~ " (NoSuchEntity) " ]];then
|
||||
echo " -> no such entity response detected. will assume operation is not necessary due to prior incomplete teardown"
|
||||
return
|
||||
fi
|
||||
|
||||
echo "Error message is fatal. Will exit"
|
||||
exit 1
|
||||
}
|
||||
|
||||
function ssh-key-setup {
|
||||
if [[ ! -f "$AWS_SSH_KEY" ]]; then
|
||||
ssh-keygen -f "$AWS_SSH_KEY" -N ''
|
||||
@ -1446,6 +1488,9 @@ function kube-down {
|
||||
echo "Note: You may be seeing this message may be because the cluster was already deleted, or" >&2
|
||||
echo "has a name other than '${CLUSTER_ID}'." >&2
|
||||
fi
|
||||
|
||||
echo "Deleting IAM Instance profiles"
|
||||
delete-iam-profiles
|
||||
}
|
||||
|
||||
# Update a kubernetes cluster with latest source
|
||||
|
@ -38,27 +38,34 @@ fi
|
||||
# Should NOT be called within the global scope, unless setting the desired global zone vars
|
||||
# This function is currently NOT USED in the global scope
|
||||
function set-federation-zone-vars {
|
||||
zone="$1"
|
||||
export OVERRIDE_CONTEXT="federation-e2e-${KUBERNETES_PROVIDER}-$zone"
|
||||
echo "Setting zone vars to: $OVERRIDE_CONTEXT"
|
||||
if [[ "$KUBERNETES_PROVIDER" == "gce" ]];then
|
||||
zone="$1"
|
||||
export OVERRIDE_CONTEXT="federation-e2e-${KUBERNETES_PROVIDER}-$zone"
|
||||
echo "Setting zone vars to: $OVERRIDE_CONTEXT"
|
||||
if [[ "$KUBERNETES_PROVIDER" == "gce" ]];then
|
||||
|
||||
export KUBE_GCE_ZONE="$zone"
|
||||
# gcloud has a 61 character limit, and for firewall rules this
|
||||
# prefix gets appended to itself, with some extra information
|
||||
# need tot keep it short
|
||||
export KUBE_GCE_INSTANCE_PREFIX="${USER}-${zone}"
|
||||
export KUBE_GCE_ZONE="$zone"
|
||||
# gcloud has a 61 character limit, and for firewall rules this
|
||||
# prefix gets appended to itself, with some extra information
|
||||
# need tot keep it short
|
||||
export KUBE_GCE_INSTANCE_PREFIX="${USER}-${zone}"
|
||||
|
||||
elif [[ "$KUBERNETES_PROVIDER" == "gke" ]];then
|
||||
elif [[ "$KUBERNETES_PROVIDER" == "gke" ]];then
|
||||
|
||||
export CLUSTER_NAME="${USER}-${zone}"
|
||||
export CLUSTER_NAME="${USER}-${zone}"
|
||||
|
||||
elif [[ "$KUBERNETES_PROVIDER" == "aws" ]];then
|
||||
elif [[ "$KUBERNETES_PROVIDER" == "aws" ]];then
|
||||
|
||||
export KUBE_AWS_ZONE="$zone"
|
||||
export KUBE_AWS_INSTANCE_PREFIX="${USER}-${zone}"
|
||||
else
|
||||
echo "Provider \"${KUBERNETES_PROVIDER}\" is not supported"
|
||||
exit 1
|
||||
fi
|
||||
export KUBE_AWS_ZONE="$zone"
|
||||
export KUBE_AWS_INSTANCE_PREFIX="${USER}-${zone}"
|
||||
|
||||
# WARNING: This is hack
|
||||
# After KUBE_AWS_INSTANCE_PREFIX is changed,
|
||||
# we need to make sure the config-xxx.sh file is
|
||||
# re-sourced so the change propogates to dependent computed values
|
||||
# (eg: MASTER_SG_NAME, NODE_SG_NAME, etc)
|
||||
source "${KUBE_ROOT}/cluster/aws/util.sh"
|
||||
else
|
||||
echo "Provider \"${KUBERNETES_PROVIDER}\" is not supported"
|
||||
exit 1
|
||||
fi
|
||||
}
|
||||
|
@ -109,6 +109,7 @@ function create-federation-api-objects {
|
||||
|
||||
export FEDERATION_API_HOST=""
|
||||
export KUBE_MASTER_IP=""
|
||||
export IS_DNS_NAME="false"
|
||||
if [[ "$KUBERNETES_PROVIDER" == "vagrant" ]];then
|
||||
# The vagrant approach is to use a nodeport service, and point kubectl at one of the nodes
|
||||
$template "${manifests_root}/federation-apiserver-nodeport-service.yaml" | $host_kubectl create -f -
|
||||
@ -116,6 +117,12 @@ function create-federation-api-objects {
|
||||
FEDERATION_API_HOST=`printf "$node_addresses" | cut -d " " -f1`
|
||||
KUBE_MASTER_IP="${FEDERATION_API_HOST}:${FEDERATION_API_NODEPORT}"
|
||||
elif [[ "$KUBERNETES_PROVIDER" == "gce" || "$KUBERNETES_PROVIDER" == "gke" || "$KUBERNETES_PROVIDER" == "aws" ]];then
|
||||
|
||||
# Any providers where ingress is a DNS name should tick this box.
|
||||
# TODO(chom): attempt to do this automatically
|
||||
if [[ "$KUBERNETES_PROVIDER" == "aws" ]];then
|
||||
IS_DNS_NAME="true"
|
||||
fi
|
||||
# any capable providers should use a loadbalancer service
|
||||
# we check for ingress.ip and ingress.hostname, so should work for any loadbalancer-providing provider
|
||||
# allows 30x5 = 150 seconds for loadbalancer creation
|
||||
@ -179,7 +186,7 @@ function create-federation-api-objects {
|
||||
|
||||
# Create server certificates.
|
||||
ensure-temp-dir
|
||||
echo "Creating federation apiserver certs for IP: $FEDERATION_API_HOST"
|
||||
echo "Creating federation apiserver certs for federation api host: ${FEDERATION_API_HOST} ( is this a dns name?: ${IS_DNS_NAME} )"
|
||||
MASTER_NAME="federation-apiserver" create-federation-apiserver-certs ${FEDERATION_API_HOST}
|
||||
export FEDERATION_APISERVER_CA_CERT_BASE64="${FEDERATION_APISERVER_CA_CERT_BASE64}"
|
||||
export FEDERATION_APISERVER_CERT_BASE64="${FEDERATION_APISERVER_CERT_BASE64}"
|
||||
@ -239,15 +246,23 @@ function create-federation-api-objects {
|
||||
}
|
||||
|
||||
# Creates the required certificates for federation apiserver.
|
||||
# $1: The public IP for the master.
|
||||
# $1: The public IP or DNS name for the master.
|
||||
#
|
||||
# Assumed vars
|
||||
# KUBE_TEMP
|
||||
# MASTER_NAME
|
||||
#
|
||||
# IS_DNS_NAME=true|false
|
||||
function create-federation-apiserver-certs {
|
||||
local -r primary_cn="${1}"
|
||||
local sans="IP:${1},DNS:${MASTER_NAME}"
|
||||
local primary_cn
|
||||
local sans
|
||||
|
||||
if [[ "${IS_DNS_NAME:-}" == "true" ]];then
|
||||
primary_cn="$(printf "${1}" | sha1sum | tr " -" " ")"
|
||||
sans="DNS:${1},DNS:${MASTER_NAME}"
|
||||
else
|
||||
primary_cn="${1}"
|
||||
sans="IP:${1},DNS:${MASTER_NAME}"
|
||||
fi
|
||||
|
||||
echo "Generating certs for alternate-names: ${sans}"
|
||||
|
||||
@ -332,5 +347,5 @@ function cleanup-federation-api-objects {
|
||||
# Delete all resources with the federated-cluster label.
|
||||
$host_kubectl delete pods,svc,rc,deployment,secret -lapp=federated-cluster
|
||||
# Delete all resources in FEDERATION_NAMESPACE.
|
||||
$host_kubectl delete pods,svc,rc,deployment,secret --namespace=${FEDERATION_NAMESPACE} --all
|
||||
$host_kubectl delete pvc,pv,pods,svc,rc,deployment,secret --namespace=${FEDERATION_NAMESPACE} --all
|
||||
}
|
||||
|
@ -23,7 +23,9 @@ spec:
|
||||
- --etcd-servers=http://localhost:2379
|
||||
- --service-cluster-ip-range={{.FEDERATION_SERVICE_CIDR}}
|
||||
- --secure-port=443
|
||||
{{if eq .IS_DNS_NAME "false"}}
|
||||
- --advertise-address={{.FEDERATION_API_HOST}}
|
||||
{{end}}
|
||||
- --client-ca-file=/srv/kubernetes/ca.crt
|
||||
- --basic-auth-file=/srv/kubernetes/basic-auth.csv
|
||||
- --tls-cert-file=/srv/kubernetes/server.cert
|
||||
|
Loading…
Reference in New Issue
Block a user