From 2cb22008477c7107cae3afbb119254e0dc4d0294 Mon Sep 17 00:00:00 2001 From: "Madhusudan.C.S" Date: Thu, 16 Feb 2017 13:27:43 -0800 Subject: [PATCH] Move kube-dns ConfigMap creation/deletion out of federated services e2e tests to federation-up.sh/federation-down.sh where the clusters are joined/unjoined. --- cluster/kube-util.sh | 8 +++- federation/cluster/common.sh | 60 ++++++++++++++++++++++-- federation/cluster/federation-down.sh | 44 +++++++++++++++-- federation/cluster/federation-up.sh | 38 ++++++++------- test/e2e_federation/federated-service.go | 27 ----------- 5 files changed, 124 insertions(+), 53 deletions(-) diff --git a/cluster/kube-util.sh b/cluster/kube-util.sh index 48a8202ba85..1a01d5ee833 100644 --- a/cluster/kube-util.sh +++ b/cluster/kube-util.sh @@ -36,11 +36,17 @@ fi # Federation utils # Sets the kubeconfig context value for the current cluster. +# Args: +# $1: zone (required) # # Vars set: # CLUSTER_CONTEXT function kubeconfig-federation-context() { - CLUSTER_CONTEXT="federation-e2e-${KUBERNETES_PROVIDER}-$zone" + if [[ -z "${1:-}" ]]; then + echo "zone parameter is required" + exit 1 + fi + CLUSTER_CONTEXT="federation-e2e-${KUBERNETES_PROVIDER}-${1}" } diff --git a/federation/cluster/common.sh b/federation/cluster/common.sh index 1cc840f6a10..8a23b6ac8c4 100644 --- a/federation/cluster/common.sh +++ b/federation/cluster/common.sh @@ -15,6 +15,60 @@ # required: # KUBE_ROOT: path of the root of the Kubernetes reposiitory +: "${KUBE_ROOT?Must set KUBE_ROOT env var}" + +# Provides the $KUBERNETES_PROVIDER, kubeconfig-federation-context() +# and detect-project function +source "${KUBE_ROOT}/cluster/kube-util.sh" + +# kubefed configuration +FEDERATION_NAME="${FEDERATION_NAME:-e2e-federation}" +FEDERATION_NAMESPACE=${FEDERATION_NAMESPACE:-federation-system} +FEDERATION_KUBE_CONTEXT="${FEDERATION_KUBE_CONTEXT:-e2e-federation}" +HOST_CLUSTER_ZONE="${FEDERATION_HOST_CLUSTER_ZONE:-}" +# If $HOST_CLUSTER_ZONE isn't specified, arbitrarily choose +# last zone as the host cluster zone. +if [[ -z "${HOST_CLUSTER_ZONE}" ]]; then + E2E_ZONES_ARR=($E2E_ZONES) + HOST_CLUSTER_ZONE=${E2E_ZONES_ARR[-1]} +fi + +HOST_CLUSTER_CONTEXT="${FEDERATION_HOST_CLUSTER_CONTEXT:-}" +if [[ -z "${HOST_CLUSTER_CONTEXT}" ]]; then + # Sets ${CLUSTER_CONTEXT} + kubeconfig-federation-context "${HOST_CLUSTER_ZONE:-}" + HOST_CLUSTER_CONTEXT="${CLUSTER_CONTEXT}" +fi + +# kube-dns configuration. +KUBEDNS_CONFIGMAP_NAME="kube-dns" +KUBEDNS_CONFIGMAP_NAMESPACE="kube-system" +KUBEDNS_FEDERATION_FLAG="federations" + +function federation_cluster_contexts() { + local -r contexts=$("${KUBE_ROOT}/cluster/kubectl.sh" config get-contexts -o name) + federation_contexts=() + for context in ${contexts}; do + # Skip federation context + if [[ "${context}" == "${FEDERATION_NAME}" ]]; then + continue + fi + # Skip contexts not beginning with "federation" + if [[ "${context}" != federation* ]]; then + continue + fi + federation_contexts+=("${context}") + done + echo ${federation_contexts[@]} +} + + +#-----------------------------------------------------------------# +# NOTE: # +# Everything below this line is deprecated. It will be removed # +# once we have sufficient confidence in kubefed based testing. # +#-----------------------------------------------------------------# + # optional override # FEDERATION_IMAGE_REPO_BASE: repo which federated images are tagged under (default gcr.io/google_containers) # FEDERATION_NAMESPACE: name of the namespace will created for the federated components in the underlying cluster. @@ -22,10 +76,9 @@ # KUBE_ARCH # KUBE_BUILD_STAGE -: "${KUBE_ROOT?Must set KUBE_ROOT env var}" +source "${KUBE_ROOT}/cluster/common.sh" -# Provides the $KUBERNETES_PROVIDER variable and detect-project function -source "${KUBE_ROOT}/cluster/kube-util.sh" +host_kubectl="${KUBE_ROOT}/cluster/kubectl.sh --namespace=${FEDERATION_NAMESPACE}" # If $FEDERATION_PUSH_REPO_BASE isn't set, then set the GCR registry name # based on the detected project name for gce and gke providers. @@ -46,7 +99,6 @@ if [[ -z "${FEDERATION_PUSH_REPO_BASE}" ]]; then fi FEDERATION_IMAGE_REPO_BASE=${FEDERATION_IMAGE_REPO_BASE:-'gcr.io/google_containers'} -FEDERATION_NAMESPACE=${FEDERATION_NAMESPACE:-federation-system} KUBE_PLATFORM=${KUBE_PLATFORM:-linux} KUBE_ARCH=${KUBE_ARCH:-amd64} diff --git a/federation/cluster/federation-down.sh b/federation/cluster/federation-down.sh index 7eeef61ec93..d74472d0728 100755 --- a/federation/cluster/federation-down.sh +++ b/federation/cluster/federation-down.sh @@ -20,8 +20,46 @@ set -o pipefail KUBE_ROOT=$(readlink -m $(dirname "${BASH_SOURCE}")/../../) -. ${KUBE_ROOT}/federation/cluster/common.sh +# For $FEDERATION_NAME, $FEDERATION_KUBE_CONTEXT, $HOST_CLUSTER_CONTEXT, +# $KUBEDNS_CONFIGMAP_NAME and $KUBEDNS_CONFIGMAP_NAMESPACE. +source "${KUBE_ROOT}/federation/cluster/common.sh" -cleanup-federation-api-objects +# unjoin_clusters unjoins all the clusters from federation. +function unjoin_clusters() { + for context in $(federation_cluster_contexts); do + kube::log::status "Unjoining cluster \"${context}\" from federation \"${FEDERATION_NAME}\"" -$host_kubectl delete ns/${FEDERATION_NAMESPACE} + "${KUBE_ROOT}/federation/develop/kubefed.sh" unjoin \ + "${context}" \ + --context="${FEDERATION_KUBE_CONTEXT}" \ + --host-cluster-context="${HOST_CLUSTER_CONTEXT}" + + # Delete kube-dns configmap that contains federation + # configuration from each cluster. + # TODO: This shouldn't be required after + # https://github.com/kubernetes/kubernetes/pull/39338. + # Remove this after the PR is merged. + kube::log::status "Deleting \"kube-dns\" ConfigMap from \"kube-system\" namespace in cluster \"${context}\"" + "${KUBE_ROOT}/cluster/kubectl.sh" delete configmap \ + --context="${context}" \ + --namespace="${KUBEDNS_CONFIGMAP_NAMESPACE}" \ + "${KUBEDNS_CONFIGMAP_NAME}" + done +} + +unjoin_clusters + +cleanup-federation-api-objects || echo "Couldn't cleanup federation api objects" + +"${KUBE_ROOT}/cluster/kubectl.sh" delete namespace \ + --context="${HOST_CLUSTER_CONTEXT}" \ + "${FEDERATION_NAMESPACE}" + +# TODO(madhusudancs): This is an arbitrary amount of sleep to give Kubernetes +# clusters enough time to delete the underlying cloud provider resources +# corresponding to the Kubernetes resources we deleted as part of the test +# teardowns. It is shameful that we are doing this, but this is just a bandage +# to stop the bleeding. Please don't use this pattern anywhere. Remove this +# when proper cloud provider cleanups are implemented in the individual test +# `AfterEach` blocks. +sleep 2m diff --git a/federation/cluster/federation-up.sh b/federation/cluster/federation-up.sh index 22668978496..b5e4884f209 100755 --- a/federation/cluster/federation-up.sh +++ b/federation/cluster/federation-up.sh @@ -29,13 +29,11 @@ KUBE_ROOT=$(dirname "${BASH_SOURCE}")/../.. # For `kube::log::status` function since it already sources # "${KUBE_ROOT}/cluster/lib/logging.sh" and DEFAULT_KUBECONFIG source "${KUBE_ROOT}/cluster/common.sh" -# For $FEDERATION_KUBE_CONTEXT, $HOST_CLUSTER_CONTEXT, +# For $FEDERATION_NAME, $FEDERATION_KUBE_CONTEXT, $HOST_CLUSTER_CONTEXT, # $KUBEDNS_CONFIGMAP_NAME, $KUBEDNS_CONFIGMAP_NAMESPACE and # $KUBEDNS_FEDERATION_FLAG. source "${KUBE_ROOT}/federation/cluster/common.sh" -FEDERATION_NAME="${FEDERATION_NAME:-e2e-federation}" - DNS_ZONE_NAME="${FEDERATION_DNS_ZONE_NAME:-}" FEDERATIONS_DOMAIN_MAP="${FEDERATIONS_DOMAIN_MAP:-}" @@ -90,23 +88,27 @@ function init() { # join_clusters joins the clusters in the local kubeconfig to federation. The clusters # and their kubeconfig entries in the local kubeconfig are created while deploying clusters, i.e. when kube-up is run. function join_clusters() { - for cluster in $("${KUBE_ROOT}/cluster/kubectl.sh" config get-clusters |sed -n '1!p'); do - # Skip federation context - if [[ "${cluster}" == "${FEDERATION_NAME}" ]]; then - continue - fi - # Skip contexts not beginning with "federation" - if [[ "${cluster}" != federation* ]]; then - continue - fi + for context in $(federation_cluster_contexts); do + kube::log::status "Joining cluster with name '${context}' to federation with name '${FEDERATION_NAME}'" - kube::log::status "Joining cluster with name '${cluster}' to federation with name '${FEDERATION_NAME}'" + "${KUBE_ROOT}/federation/develop/kubefed.sh" join \ + "${context}" \ + --host-cluster-context="${HOST_CLUSTER_CONTEXT}" \ + --context="${FEDERATION_NAME}" \ + --secret-name="${context//_/-}" # Replace "_" by "-" - "${KUBE_ROOT}/federation/develop/kubefed.sh" join \ - "${cluster}" \ - --host-cluster-context="${HOST_CLUSTER_CONTEXT}" \ - --context="${FEDERATION_NAME}" \ - --secret-name="${cluster//_/-}" # Replace "_" by "-" + + # Create kube-dns configmap in each cluster for kube-dns to accept + # federation queries. + # TODO: This shouldn't be required after + # https://github.com/kubernetes/kubernetes/pull/39338. + # Remove this after the PR is merged. + kube::log::status "Creating \"kube-dns\" ConfigMap in \"kube-system\" namespace in cluster \"${context}\"" + "${KUBE_ROOT}/cluster/kubectl.sh" create configmap \ + --context="${context}" \ + --namespace="${KUBEDNS_CONFIGMAP_NAMESPACE}" \ + "${KUBEDNS_CONFIGMAP_NAME}" \ + --from-literal="${KUBEDNS_FEDERATION_FLAG}"="${FEDERATIONS_DOMAIN_MAP}" done } diff --git a/test/e2e_federation/federated-service.go b/test/e2e_federation/federated-service.go index f7a42aad192..15aa172f6fa 100644 --- a/test/e2e_federation/federated-service.go +++ b/test/e2e_federation/federated-service.go @@ -157,26 +157,6 @@ var _ = framework.KubeDescribe("Federated Services [Feature:Federation]", func() fedframework.SkipUnlessFederated(f.ClientSet) nsName := f.FederationNamespace.Name - // Create kube-dns configmap for kube-dns to accept federation queries. - federationsDomainMap := os.Getenv("FEDERATIONS_DOMAIN_MAP") - if federationsDomainMap == "" { - framework.Failf("missing required env var FEDERATIONS_DOMAIN_MAP") - } - kubeDNSConfigMap := v1.ConfigMap{ - ObjectMeta: metav1.ObjectMeta{ - Name: KubeDNSConfigMapName, - Namespace: KubeDNSConfigMapNamespace, - }, - Data: map[string]string{ - "federations": federationsDomainMap, - }, - } - // Create this configmap in all clusters. - for clusterName, cluster := range clusters { - By(fmt.Sprintf("Creating kube dns config map in cluster: %s", clusterName)) - _, err := cluster.Clientset.Core().ConfigMaps(KubeDNSConfigMapNamespace).Create(&kubeDNSConfigMap) - framework.ExpectNoError(err, fmt.Sprintf("Error in creating config map in cluster %s", clusterName)) - } createBackendPodsOrFail(clusters, nsName, FederatedServicePodName) @@ -232,13 +212,6 @@ var _ = framework.KubeDescribe("Federated Services [Feature:Federation]", func() } else { By("No service shards to delete. `serviceShard` is nil") } - - // Delete the kube-dns config map from all clusters. - for clusterName, cluster := range clusters { - By(fmt.Sprintf("Deleting kube dns config map from cluster: %s", clusterName)) - err := cluster.Clientset.Core().ConfigMaps(KubeDNSConfigMapNamespace).Delete(KubeDNSConfigMapName, nil) - framework.ExpectNoError(err, fmt.Sprintf("Error in deleting config map from cluster %s", clusterName)) - } }) It("should be able to discover a federated service", func() {