Merge pull request #44072 from marun/fed-moar-e2e-cleanup

Automatic merge from submit-queue (batch tested with PRs 44191, 44117, 44072)

[Federation] Cleanup e2e framework

This PR is intended to simplify maintenance of the federation e2e tests:

 - refactor cluster helpers into framework so they can be reused by upgrade tests
 - remove unused functions
 - move backend pod management to the tests that require it rather than relying on the common cluster object
 - use a slice instead of a map to manage cluster objects to make it simpler for tests that need it to find the primary cluster themselves

Commit ``fed: Refactor e2e cluster functions into framework for reuse`` was originally included in #43500, but is included here because it formed the basis for this cleanup.

cc: @kubernetes/sig-federation-pr-reviews @perotinus
This commit is contained in:
Kubernetes Submit Queue 2017-04-06 22:16:22 -07:00 committed by GitHub
commit ca1f844bc8
15 changed files with 299 additions and 283 deletions

View File

@ -49,9 +49,7 @@ go_library(
"//vendor:k8s.io/apimachinery/pkg/util/rand", "//vendor:k8s.io/apimachinery/pkg/util/rand",
"//vendor:k8s.io/apimachinery/pkg/util/uuid", "//vendor:k8s.io/apimachinery/pkg/util/uuid",
"//vendor:k8s.io/apimachinery/pkg/util/wait", "//vendor:k8s.io/apimachinery/pkg/util/wait",
"//vendor:k8s.io/client-go/rest",
"//vendor:k8s.io/client-go/tools/clientcmd", "//vendor:k8s.io/client-go/tools/clientcmd",
"//vendor:k8s.io/client-go/tools/clientcmd/api",
], ],
) )

View File

@ -18,14 +18,10 @@ package e2e_federation
import ( import (
"fmt" "fmt"
"time"
. "github.com/onsi/ginkgo" . "github.com/onsi/ginkgo"
. "github.com/onsi/gomega" . "github.com/onsi/gomega"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/util/wait"
federationapi "k8s.io/kubernetes/federation/apis/federation/v1beta1"
"k8s.io/kubernetes/federation/client/clientset_generated/federation_clientset"
"k8s.io/kubernetes/pkg/api/v1" "k8s.io/kubernetes/pkg/api/v1"
"k8s.io/kubernetes/test/e2e/framework" "k8s.io/kubernetes/test/e2e/framework"
fedframework "k8s.io/kubernetes/test/e2e_federation/framework" fedframework "k8s.io/kubernetes/test/e2e_federation/framework"
@ -61,7 +57,7 @@ var _ = framework.KubeDescribe("Federation apiserver [Feature:Federation]", func
framework.Logf("Checking that %d clusters are Ready", len(contexts)) framework.Logf("Checking that %d clusters are Ready", len(contexts))
for _, context := range contexts { for _, context := range contexts {
clusterIsReadyOrFail(f, &context) fedframework.ClusterIsReadyOrFail(f, &context)
} }
framework.Logf("%d clusters are Ready", len(contexts)) framework.Logf("%d clusters are Ready", len(contexts))
@ -122,19 +118,3 @@ func newService(name, namespace string) *v1.Service {
}, },
} }
} }
// Verify that the cluster is marked ready.
func isReady(clusterName string, clientset *federation_clientset.Clientset) error {
return wait.PollImmediate(time.Second, 5*time.Minute, func() (bool, error) {
c, err := clientset.Federation().Clusters().Get(clusterName, metav1.GetOptions{})
if err != nil {
return false, err
}
for _, condition := range c.Status.Conditions {
if condition.Type == federationapi.ClusterReady && condition.Status == v1.ConditionTrue {
return true, nil
}
}
return false, nil
})
}

View File

@ -43,7 +43,7 @@ const (
// Create/delete daemonset api objects // Create/delete daemonset api objects
var _ = framework.KubeDescribe("Federation daemonsets [Feature:Federation]", func() { var _ = framework.KubeDescribe("Federation daemonsets [Feature:Federation]", func() {
var clusters map[string]*cluster // All clusters, keyed by cluster name var clusters fedframework.ClusterSlice
f := fedframework.NewDefaultFederatedFramework("federated-daemonset") f := fedframework.NewDefaultFederatedFramework("federated-daemonset")
@ -51,7 +51,7 @@ var _ = framework.KubeDescribe("Federation daemonsets [Feature:Federation]", fun
BeforeEach(func() { BeforeEach(func() {
fedframework.SkipUnlessFederated(f.ClientSet) fedframework.SkipUnlessFederated(f.ClientSet)
clusters, _ = getRegisteredClusters(UserAgentName, f) clusters = f.GetRegisteredClusters()
}) })
AfterEach(func() { AfterEach(func() {
@ -114,7 +114,7 @@ func deleteAllDaemonSetsOrFail(clientset *fedclientset.Clientset, nsName string)
// verifyCascadingDeletionForDS verifies that daemonsets are deleted from // verifyCascadingDeletionForDS verifies that daemonsets are deleted from
// underlying clusters when orphan dependents is false and they are not // underlying clusters when orphan dependents is false and they are not
// deleted when orphan dependents is true. // deleted when orphan dependents is true.
func verifyCascadingDeletionForDS(clientset *fedclientset.Clientset, clusters map[string]*cluster, orphanDependents *bool, nsName string) { func verifyCascadingDeletionForDS(clientset *fedclientset.Clientset, clusters fedframework.ClusterSlice, orphanDependents *bool, nsName string) {
daemonset := createDaemonSetOrFail(clientset, nsName) daemonset := createDaemonSetOrFail(clientset, nsName)
daemonsetName := daemonset.Name daemonsetName := daemonset.Name
// Check subclusters if the daemonset was created there. // Check subclusters if the daemonset was created there.
@ -140,8 +140,9 @@ func verifyCascadingDeletionForDS(clientset *fedclientset.Clientset, clusters ma
errMessages := []string{} errMessages := []string{}
// daemon set should be present in underlying clusters unless orphanDependents is false. // daemon set should be present in underlying clusters unless orphanDependents is false.
shouldExist := orphanDependents == nil || *orphanDependents == true shouldExist := orphanDependents == nil || *orphanDependents == true
for clusterName, clusterClientset := range clusters { for _, cluster := range clusters {
_, err := clusterClientset.Extensions().DaemonSets(nsName).Get(daemonsetName, metav1.GetOptions{}) clusterName := cluster.Name
_, err := cluster.Extensions().DaemonSets(nsName).Get(daemonsetName, metav1.GetOptions{})
if shouldExist && errors.IsNotFound(err) { if shouldExist && errors.IsNotFound(err) {
errMessages = append(errMessages, fmt.Sprintf("unexpected NotFound error for daemonset %s in cluster %s, expected daemonset to exist", daemonsetName, clusterName)) errMessages = append(errMessages, fmt.Sprintf("unexpected NotFound error for daemonset %s in cluster %s, expected daemonset to exist", daemonsetName, clusterName))
} else if !shouldExist && !errors.IsNotFound(err) { } else if !shouldExist && !errors.IsNotFound(err) {
@ -233,7 +234,7 @@ func updateDaemonSetOrFail(clientset *fedclientset.Clientset, namespace string)
return newDaemonSet return newDaemonSet
} }
func waitForDaemonSetShardsOrFail(namespace string, daemonset *v1beta1.DaemonSet, clusters map[string]*cluster) { func waitForDaemonSetShardsOrFail(namespace string, daemonset *v1beta1.DaemonSet, clusters fedframework.ClusterSlice) {
framework.Logf("Waiting for daemonset %q in %d clusters", daemonset.Name, len(clusters)) framework.Logf("Waiting for daemonset %q in %d clusters", daemonset.Name, len(clusters))
for _, c := range clusters { for _, c := range clusters {
waitForDaemonSetOrFail(c.Clientset, namespace, daemonset, true, FederatedDaemonSetTimeout) waitForDaemonSetOrFail(c.Clientset, namespace, daemonset, true, FederatedDaemonSetTimeout)
@ -263,7 +264,7 @@ func waitForDaemonSetOrFail(clientset *kubeclientset.Clientset, namespace string
} }
} }
func waitForDaemonSetShardsUpdatedOrFail(namespace string, daemonset *v1beta1.DaemonSet, clusters map[string]*cluster) { func waitForDaemonSetShardsUpdatedOrFail(namespace string, daemonset *v1beta1.DaemonSet, clusters fedframework.ClusterSlice) {
framework.Logf("Waiting for daemonset %q in %d clusters", daemonset.Name, len(clusters)) framework.Logf("Waiting for daemonset %q in %d clusters", daemonset.Name, len(clusters))
for _, c := range clusters { for _, c := range clusters {
waitForDaemonSetUpdateOrFail(c.Clientset, namespace, daemonset, FederatedDaemonSetTimeout) waitForDaemonSetUpdateOrFail(c.Clientset, namespace, daemonset, FederatedDaemonSetTimeout)

View File

@ -69,11 +69,11 @@ var _ = framework.KubeDescribe("Federation deployments [Feature:Federation]", fu
// e2e cases for federated deployment controller // e2e cases for federated deployment controller
Describe("Federated Deployment", func() { Describe("Federated Deployment", func() {
var ( var (
clusters map[string]*cluster clusters fedframework.ClusterSlice
) )
BeforeEach(func() { BeforeEach(func() {
fedframework.SkipUnlessFederated(f.ClientSet) fedframework.SkipUnlessFederated(f.ClientSet)
clusters, _ = getRegisteredClusters(UserAgentName, f) clusters = f.GetRegisteredClusters()
}) })
AfterEach(func() { AfterEach(func() {
@ -140,7 +140,7 @@ func deleteAllDeploymentsOrFail(clientset *fedclientset.Clientset, nsName string
// verifyCascadingDeletionForDeployment verifies that deployments are deleted // verifyCascadingDeletionForDeployment verifies that deployments are deleted
// from underlying clusters when orphan dependents is false and they are not // from underlying clusters when orphan dependents is false and they are not
// deleted when orphan dependents is true. // deleted when orphan dependents is true.
func verifyCascadingDeletionForDeployment(clientset *fedclientset.Clientset, clusters map[string]*cluster, orphanDependents *bool, nsName string) { func verifyCascadingDeletionForDeployment(clientset *fedclientset.Clientset, clusters fedframework.ClusterSlice, orphanDependents *bool, nsName string) {
deployment := createDeploymentOrFail(clientset, nsName) deployment := createDeploymentOrFail(clientset, nsName)
deploymentName := deployment.Name deploymentName := deployment.Name
// Check subclusters if the deployment was created there. // Check subclusters if the deployment was created there.
@ -166,8 +166,9 @@ func verifyCascadingDeletionForDeployment(clientset *fedclientset.Clientset, clu
errMessages := []string{} errMessages := []string{}
// deployment should be present in underlying clusters unless orphanDependents is false. // deployment should be present in underlying clusters unless orphanDependents is false.
shouldExist := orphanDependents == nil || *orphanDependents == true shouldExist := orphanDependents == nil || *orphanDependents == true
for clusterName, clusterClientset := range clusters { for _, cluster := range clusters {
_, err := clusterClientset.Extensions().Deployments(nsName).Get(deploymentName, metav1.GetOptions{}) clusterName := cluster.Name
_, err := cluster.Extensions().Deployments(nsName).Get(deploymentName, metav1.GetOptions{})
if shouldExist && errors.IsNotFound(err) { if shouldExist && errors.IsNotFound(err) {
errMessages = append(errMessages, fmt.Sprintf("unexpected NotFound error for deployment %s in cluster %s, expected deployment to exist", deploymentName, clusterName)) errMessages = append(errMessages, fmt.Sprintf("unexpected NotFound error for deployment %s in cluster %s, expected deployment to exist", deploymentName, clusterName))
} else if !shouldExist && !errors.IsNotFound(err) { } else if !shouldExist && !errors.IsNotFound(err) {
@ -179,13 +180,13 @@ func verifyCascadingDeletionForDeployment(clientset *fedclientset.Clientset, clu
} }
} }
func waitForDeploymentOrFail(c *fedclientset.Clientset, namespace string, deploymentName string, clusters map[string]*cluster) { func waitForDeploymentOrFail(c *fedclientset.Clientset, namespace string, deploymentName string, clusters fedframework.ClusterSlice) {
err := waitForDeployment(c, namespace, deploymentName, clusters) err := waitForDeployment(c, namespace, deploymentName, clusters)
framework.ExpectNoError(err, "Failed to verify deployment %q/%q, err: %v", namespace, deploymentName, err) framework.ExpectNoError(err, "Failed to verify deployment %q/%q, err: %v", namespace, deploymentName, err)
} }
func waitForDeployment(c *fedclientset.Clientset, namespace string, deploymentName string, clusters map[string]*cluster) error { func waitForDeployment(c *fedclientset.Clientset, namespace string, deploymentName string, clusters fedframework.ClusterSlice) error {
err := wait.Poll(10*time.Second, federatedDefaultTestTimeout, func() (bool, error) { err := wait.Poll(10*time.Second, fedframework.FederatedDefaultTestTimeout, func() (bool, error) {
fdep, err := c.Deployments(namespace).Get(deploymentName, metav1.GetOptions{}) fdep, err := c.Deployments(namespace).Get(deploymentName, metav1.GetOptions{})
if err != nil { if err != nil {
return false, err return false, err
@ -194,12 +195,12 @@ func waitForDeployment(c *fedclientset.Clientset, namespace string, deploymentNa
for _, cluster := range clusters { for _, cluster := range clusters {
dep, err := cluster.Extensions().Deployments(namespace).Get(deploymentName, metav1.GetOptions{}) dep, err := cluster.Extensions().Deployments(namespace).Get(deploymentName, metav1.GetOptions{})
if err != nil && !errors.IsNotFound(err) { if err != nil && !errors.IsNotFound(err) {
By(fmt.Sprintf("Failed getting deployment: %q/%q/%q, err: %v", cluster.name, namespace, deploymentName, err)) By(fmt.Sprintf("Failed getting deployment: %q/%q/%q, err: %v", cluster.Name, namespace, deploymentName, err))
return false, err return false, err
} }
if err == nil { if err == nil {
if !verifyDeployment(fdep, dep) { if !verifyDeployment(fdep, dep) {
By(fmt.Sprintf("Deployment meta or spec not match for cluster %q:\n federation: %v\n cluster: %v", cluster.name, fdep, dep)) By(fmt.Sprintf("Deployment meta or spec not match for cluster %q:\n federation: %v\n cluster: %v", cluster.Name, fdep, dep))
return false, nil return false, nil
} }
specReplicas += *dep.Spec.Replicas specReplicas += *dep.Spec.Replicas
@ -259,7 +260,7 @@ func deleteDeploymentOrFail(clientset *fedclientset.Clientset, nsName string, de
} }
// Wait for the deployment to be deleted. // Wait for the deployment to be deleted.
err = wait.Poll(10*time.Second, federatedDefaultTestTimeout, func() (bool, error) { err = wait.Poll(10*time.Second, fedframework.FederatedDefaultTestTimeout, func() (bool, error) {
_, err := clientset.Extensions().Deployments(nsName).Get(deploymentName, metav1.GetOptions{}) _, err := clientset.Extensions().Deployments(nsName).Get(deploymentName, metav1.GetOptions{})
if err != nil && errors.IsNotFound(err) { if err != nil && errors.IsNotFound(err) {
return true, nil return true, nil

View File

@ -10,6 +10,7 @@ load(
go_library( go_library(
name = "go_default_library", name = "go_default_library",
srcs = [ srcs = [
"cluster.go",
"framework.go", "framework.go",
"util.go", "util.go",
], ],
@ -31,6 +32,7 @@ go_library(
"//vendor:k8s.io/apimachinery/pkg/util/wait", "//vendor:k8s.io/apimachinery/pkg/util/wait",
"//vendor:k8s.io/client-go/rest", "//vendor:k8s.io/client-go/rest",
"//vendor:k8s.io/client-go/tools/clientcmd", "//vendor:k8s.io/client-go/tools/clientcmd",
"//vendor:k8s.io/client-go/tools/clientcmd/api",
], ],
) )

View File

@ -0,0 +1,164 @@
/*
Copyright 2017 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package framework
import (
"fmt"
"time"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/util/wait"
restclient "k8s.io/client-go/rest"
"k8s.io/client-go/tools/clientcmd"
clientcmdapi "k8s.io/client-go/tools/clientcmd/api"
federationapi "k8s.io/kubernetes/federation/apis/federation/v1beta1"
fedclientset "k8s.io/kubernetes/federation/client/clientset_generated/federation_clientset"
"k8s.io/kubernetes/pkg/api/v1"
kubeclientset "k8s.io/kubernetes/pkg/client/clientset_generated/clientset"
"k8s.io/kubernetes/test/e2e/framework"
. "github.com/onsi/ginkgo"
. "github.com/onsi/gomega"
)
const (
KubeAPIQPS float32 = 20.0
KubeAPIBurst = 30
userAgentName = "federation-e2e"
federatedNamespaceTimeout = 5 * time.Minute
federatedClustersWaitTimeout = 1 * time.Minute
)
// ClusterSlice is a slice of clusters
type ClusterSlice []*Cluster
// Cluster keeps track of the name and client of a cluster in the federation
type Cluster struct {
Name string
*kubeclientset.Clientset
}
func getRegisteredClusters(f *Framework) ClusterSlice {
contexts := f.GetUnderlyingFederatedContexts()
By("Obtaining a list of all the clusters")
clusterList := waitForAllRegisteredClusters(f, len(contexts))
framework.Logf("Checking that %d clusters are Ready", len(contexts))
for _, context := range contexts {
ClusterIsReadyOrFail(f, &context)
}
framework.Logf("%d clusters are Ready", len(contexts))
clusters := ClusterSlice{}
for i, c := range clusterList.Items {
framework.Logf("Creating a clientset for the cluster %s", c.Name)
Expect(framework.TestContext.KubeConfig).ToNot(Equal(""), "KubeConfig must be specified to load clusters' client config")
clusters = append(clusters, &Cluster{
Name: c.Name,
Clientset: createClientsetForCluster(c, i, userAgentName),
})
}
waitForNamespaceInFederatedClusters(clusters, f.FederationNamespace.Name, federatedNamespaceTimeout)
return clusters
}
// waitForAllRegisteredClusters waits for all clusters defined in e2e context to be created
// return ClusterList until the listed cluster items equals clusterCount
func waitForAllRegisteredClusters(f *Framework, clusterCount int) *federationapi.ClusterList {
var clusterList *federationapi.ClusterList
if err := wait.PollImmediate(framework.Poll, federatedClustersWaitTimeout, func() (bool, error) {
var err error
clusterList, err = f.FederationClientset.Federation().Clusters().List(metav1.ListOptions{})
if err != nil {
return false, err
}
framework.Logf("%d clusters registered, waiting for %d", len(clusterList.Items), clusterCount)
if len(clusterList.Items) == clusterCount {
return true, nil
}
return false, nil
}); err != nil {
framework.Failf("Failed to list registered clusters: %+v", err)
}
return clusterList
}
func createClientsetForCluster(c federationapi.Cluster, i int, userAgentName string) *kubeclientset.Clientset {
kubecfg, err := clientcmd.LoadFromFile(framework.TestContext.KubeConfig)
framework.ExpectNoError(err, "error loading KubeConfig: %v", err)
cfgOverride := &clientcmd.ConfigOverrides{
ClusterInfo: clientcmdapi.Cluster{
Server: c.Spec.ServerAddressByClientCIDRs[0].ServerAddress,
},
}
ccfg := clientcmd.NewNonInteractiveClientConfig(*kubecfg, c.Name, cfgOverride, clientcmd.NewDefaultClientConfigLoadingRules())
cfg, err := ccfg.ClientConfig()
framework.ExpectNoError(err, "Error creating client config in cluster #%d (%q)", i, c.Name)
cfg.QPS = KubeAPIQPS
cfg.Burst = KubeAPIBurst
return kubeclientset.NewForConfigOrDie(restclient.AddUserAgent(cfg, userAgentName))
}
// waitForNamespaceInFederatedClusters waits for the federated namespace to be created in federated clusters
func waitForNamespaceInFederatedClusters(clusters ClusterSlice, nsName string, timeout time.Duration) {
for _, c := range clusters {
name := c.Name
err := wait.PollImmediate(framework.Poll, timeout, func() (bool, error) {
_, err := c.Clientset.Core().Namespaces().Get(nsName, metav1.GetOptions{})
if err != nil {
By(fmt.Sprintf("Waiting for namespace %q to be created in cluster %q, err: %v", nsName, name, err))
return false, nil
}
By(fmt.Sprintf("Namespace %q exists in cluster %q", nsName, name))
return true, nil
})
framework.ExpectNoError(err, "Failed to verify federated namespace %q creation in cluster %q", nsName, name)
}
}
// ClusterIsReadyOrFail checks whether the federated cluster of the provided context is ready
func ClusterIsReadyOrFail(f *Framework, context *E2EContext) {
c, err := f.FederationClientset.Federation().Clusters().Get(context.Name, metav1.GetOptions{})
framework.ExpectNoError(err, fmt.Sprintf("get cluster: %+v", err))
if c.ObjectMeta.Name != context.Name {
framework.Failf("cluster name does not match input context: actual=%+v, expected=%+v", c, context)
}
err = isReady(context.Name, f.FederationClientset)
framework.ExpectNoError(err, fmt.Sprintf("unexpected error in verifying if cluster %s is ready: %+v", context.Name, err))
framework.Logf("Cluster %s is Ready", context.Name)
}
// Verify that the cluster is marked ready.
func isReady(clusterName string, clientset *fedclientset.Clientset) error {
return wait.PollImmediate(time.Second, 5*time.Minute, func() (bool, error) {
c, err := clientset.Federation().Clusters().Get(clusterName, metav1.GetOptions{})
if err != nil {
return false, err
}
for _, condition := range c.Status.Conditions {
if condition.Type == federationapi.ClusterReady && condition.Status == v1.ConditionTrue {
return true, nil
}
}
return false, nil
})
}

View File

@ -231,3 +231,7 @@ func (f *Framework) GetUnderlyingFederatedContexts() []E2EContext {
return e2eContexts return e2eContexts
} }
func (f *Framework) GetRegisteredClusters() ClusterSlice {
return getRegisteredClusters(f)
}

View File

@ -37,6 +37,8 @@ import (
"k8s.io/kubernetes/test/e2e/framework" "k8s.io/kubernetes/test/e2e/framework"
) )
const FederatedDefaultTestTimeout = 5 * time.Minute
// Detects whether the federation namespace exists in the underlying cluster // Detects whether the federation namespace exists in the underlying cluster
func SkipUnlessFederated(c clientset.Interface) { func SkipUnlessFederated(c clientset.Interface) {
federationNS := framework.FederationSystemNamespace() federationNS := framework.FederationSystemNamespace()

View File

@ -142,11 +142,11 @@ var _ = framework.KubeDescribe("Federated ingresses [Feature:Federation]", func(
// e2e cases for federation ingress controller // e2e cases for federation ingress controller
var _ = Describe("Federated Ingresses", func() { var _ = Describe("Federated Ingresses", func() {
var ( var (
clusters map[string]*cluster // All clusters, keyed by cluster name clusters fedframework.ClusterSlice
primaryClusterName, federationName, ns string federationName, ns string
jig *federationTestJig jig *federationTestJig
service *v1.Service service *v1.Service
secret *v1.Secret secret *v1.Secret
) )
// register clusters in federation apiserver // register clusters in federation apiserver
@ -157,7 +157,7 @@ var _ = framework.KubeDescribe("Federated ingresses [Feature:Federation]", func(
federationName = DefaultFederationName federationName = DefaultFederationName
} }
jig = newFederationTestJig(f.FederationClientset) jig = newFederationTestJig(f.FederationClientset)
clusters, primaryClusterName = getRegisteredClusters(UserAgentName, f) clusters = f.GetRegisteredClusters()
ns = f.FederationNamespace.Name ns = f.FederationNamespace.Name
// create backend service // create backend service
service = createServiceOrFail(f.FederationClientset, ns, FederatedIngressServiceName) service = createServiceOrFail(f.FederationClientset, ns, FederatedIngressServiceName)
@ -221,10 +221,12 @@ var _ = framework.KubeDescribe("Federated ingresses [Feature:Federation]", func(
var _ = Describe("Ingress connectivity and DNS", func() { var _ = Describe("Ingress connectivity and DNS", func() {
var backendPods BackendPodMap
BeforeEach(func() { BeforeEach(func() {
fedframework.SkipUnlessFederated(f.ClientSet) fedframework.SkipUnlessFederated(f.ClientSet)
// create backend pod // create backend pod
createBackendPodsOrFail(clusters, ns, FederatedIngressServicePodName) backendPods = createBackendPodsOrFail(clusters, ns, FederatedIngressServicePodName)
// create ingress object // create ingress object
jig.ing = createIngressOrFail(f.FederationClientset, ns, service.Name, FederatedIngressTLSSecretName) jig.ing = createIngressOrFail(f.FederationClientset, ns, service.Name, FederatedIngressTLSSecretName)
// wait for ingress objects sync // wait for ingress objects sync
@ -233,7 +235,9 @@ var _ = framework.KubeDescribe("Federated ingresses [Feature:Federation]", func(
}) })
AfterEach(func() { AfterEach(func() {
deleteBackendPodsOrFail(clusters, ns) deleteBackendPodsOrFail(clusters, backendPods)
backendPods = nil
if jig.ing != nil { if jig.ing != nil {
By(fmt.Sprintf("Deleting ingress %v on all clusters", jig.ing.Name)) By(fmt.Sprintf("Deleting ingress %v on all clusters", jig.ing.Name))
orphanDependents := false orphanDependents := false
@ -285,7 +289,7 @@ func equivalentIngress(federatedIngress, clusterIngress v1beta1.Ingress) bool {
// verifyCascadingDeletionForIngress verifies that ingresses are deleted from // verifyCascadingDeletionForIngress verifies that ingresses are deleted from
// underlying clusters when orphan dependents is false and they are not deleted // underlying clusters when orphan dependents is false and they are not deleted
// when orphan dependents is true. // when orphan dependents is true.
func verifyCascadingDeletionForIngress(clientset *fedclientset.Clientset, clusters map[string]*cluster, orphanDependents *bool, nsName string) { func verifyCascadingDeletionForIngress(clientset *fedclientset.Clientset, clusters fedframework.ClusterSlice, orphanDependents *bool, nsName string) {
ingress := createIngressOrFail(clientset, nsName, FederatedIngressServiceName, FederatedIngressTLSSecretName) ingress := createIngressOrFail(clientset, nsName, FederatedIngressServiceName, FederatedIngressTLSSecretName)
ingressName := ingress.Name ingressName := ingress.Name
// Check subclusters if the ingress was created there. // Check subclusters if the ingress was created there.
@ -299,8 +303,9 @@ func verifyCascadingDeletionForIngress(clientset *fedclientset.Clientset, cluste
errMessages := []string{} errMessages := []string{}
// ingress should be present in underlying clusters unless orphanDependents is false. // ingress should be present in underlying clusters unless orphanDependents is false.
shouldExist := orphanDependents == nil || *orphanDependents == true shouldExist := orphanDependents == nil || *orphanDependents == true
for clusterName, clusterClientset := range clusters { for _, cluster := range clusters {
_, err := clusterClientset.Extensions().Ingresses(nsName).Get(ingressName, metav1.GetOptions{}) clusterName := cluster.Name
_, err := cluster.Extensions().Ingresses(nsName).Get(ingressName, metav1.GetOptions{})
if shouldExist && errors.IsNotFound(err) { if shouldExist && errors.IsNotFound(err) {
errMessages = append(errMessages, fmt.Sprintf("unexpected NotFound error for ingress %s in cluster %s, expected ingress to exist", ingressName, clusterName)) errMessages = append(errMessages, fmt.Sprintf("unexpected NotFound error for ingress %s in cluster %s, expected ingress to exist", ingressName, clusterName))
} else if !shouldExist && !errors.IsNotFound(err) { } else if !shouldExist && !errors.IsNotFound(err) {
@ -338,7 +343,7 @@ func waitForIngressOrFail(clientset *kubeclientset.Clientset, namespace string,
} }
// waitForIngressShardsOrFail waits for the ingress to appear in all clusters // waitForIngressShardsOrFail waits for the ingress to appear in all clusters
func waitForIngressShardsOrFail(namespace string, ingress *v1beta1.Ingress, clusters map[string]*cluster) { func waitForIngressShardsOrFail(namespace string, ingress *v1beta1.Ingress, clusters fedframework.ClusterSlice) {
framework.Logf("Waiting for ingress %q in %d clusters", ingress.Name, len(clusters)) framework.Logf("Waiting for ingress %q in %d clusters", ingress.Name, len(clusters))
for _, c := range clusters { for _, c := range clusters {
waitForIngressOrFail(c.Clientset, namespace, ingress, true, FederatedIngressTimeout) waitForIngressOrFail(c.Clientset, namespace, ingress, true, FederatedIngressTimeout)
@ -346,7 +351,7 @@ func waitForIngressShardsOrFail(namespace string, ingress *v1beta1.Ingress, clus
} }
// waitForIngressShardsUpdatedOrFail waits for the ingress to be updated in all clusters // waitForIngressShardsUpdatedOrFail waits for the ingress to be updated in all clusters
func waitForIngressShardsUpdatedOrFail(namespace string, ingress *v1beta1.Ingress, clusters map[string]*cluster) { func waitForIngressShardsUpdatedOrFail(namespace string, ingress *v1beta1.Ingress, clusters fedframework.ClusterSlice) {
framework.Logf("Waiting for ingress %q in %d clusters", ingress.Name, len(clusters)) framework.Logf("Waiting for ingress %q in %d clusters", ingress.Name, len(clusters))
for _, c := range clusters { for _, c := range clusters {
waitForIngressUpdateOrFail(c.Clientset, namespace, ingress, FederatedIngressTimeout) waitForIngressUpdateOrFail(c.Clientset, namespace, ingress, FederatedIngressTimeout)
@ -374,7 +379,7 @@ func waitForIngressUpdateOrFail(clientset *kubeclientset.Clientset, namespace st
} }
// waitForIngressShardsGoneOrFail waits for the ingress to disappear in all clusters // waitForIngressShardsGoneOrFail waits for the ingress to disappear in all clusters
func waitForIngressShardsGoneOrFail(namespace string, ingress *v1beta1.Ingress, clusters map[string]*cluster) { func waitForIngressShardsGoneOrFail(namespace string, ingress *v1beta1.Ingress, clusters fedframework.ClusterSlice) {
framework.Logf("Waiting for ingress %q in %d clusters", ingress.Name, len(clusters)) framework.Logf("Waiting for ingress %q in %d clusters", ingress.Name, len(clusters))
for _, c := range clusters { for _, c := range clusters {
waitForIngressOrFail(c.Clientset, namespace, ingress, false, FederatedIngressTimeout) waitForIngressOrFail(c.Clientset, namespace, ingress, false, FederatedIngressTimeout)

View File

@ -44,13 +44,13 @@ var _ = framework.KubeDescribe("Federation namespace [Feature:Federation]", func
f := fedframework.NewDefaultFederatedFramework("federation-namespace") f := fedframework.NewDefaultFederatedFramework("federation-namespace")
Describe("Namespace objects", func() { Describe("Namespace objects", func() {
var clusters map[string]*cluster // All clusters, keyed by cluster name var clusters fedframework.ClusterSlice
var nsName string var nsName string
BeforeEach(func() { BeforeEach(func() {
fedframework.SkipUnlessFederated(f.ClientSet) fedframework.SkipUnlessFederated(f.ClientSet)
clusters, _ = getRegisteredClusters(UserAgentName, f) clusters = f.GetRegisteredClusters()
}) })
AfterEach(func() { AfterEach(func() {
@ -188,7 +188,7 @@ var _ = framework.KubeDescribe("Federation namespace [Feature:Federation]", func
// verifyNsCascadingDeletion verifies that namespaces are deleted from // verifyNsCascadingDeletion verifies that namespaces are deleted from
// underlying clusters when orphan dependents is false and they are not // underlying clusters when orphan dependents is false and they are not
// deleted when orphan dependents is true. // deleted when orphan dependents is true.
func verifyNsCascadingDeletion(nsClient clientset.NamespaceInterface, clusters map[string]*cluster, orphanDependents *bool) string { func verifyNsCascadingDeletion(nsClient clientset.NamespaceInterface, clusters fedframework.ClusterSlice, orphanDependents *bool) string {
nsName := createNamespace(nsClient) nsName := createNamespace(nsClient)
// Check subclusters if the namespace was created there. // Check subclusters if the namespace was created there.
By(fmt.Sprintf("Waiting for namespace %s to be created in all underlying clusters", nsName)) By(fmt.Sprintf("Waiting for namespace %s to be created in all underlying clusters", nsName))
@ -213,8 +213,9 @@ func verifyNsCascadingDeletion(nsClient clientset.NamespaceInterface, clusters m
errMessages := []string{} errMessages := []string{}
// namespace should be present in underlying clusters unless orphanDependents is false. // namespace should be present in underlying clusters unless orphanDependents is false.
shouldExist := orphanDependents == nil || *orphanDependents == true shouldExist := orphanDependents == nil || *orphanDependents == true
for clusterName, clusterClientset := range clusters { for _, cluster := range clusters {
_, err := clusterClientset.Core().Namespaces().Get(nsName, metav1.GetOptions{}) clusterName := cluster.Name
_, err := cluster.Core().Namespaces().Get(nsName, metav1.GetOptions{})
if shouldExist && errors.IsNotFound(err) { if shouldExist && errors.IsNotFound(err) {
errMessages = append(errMessages, fmt.Sprintf("unexpected NotFound error for namespace %s in cluster %s, expected namespace to exist", nsName, clusterName)) errMessages = append(errMessages, fmt.Sprintf("unexpected NotFound error for namespace %s in cluster %s, expected namespace to exist", nsName, clusterName))
} else if !shouldExist && !errors.IsNotFound(err) { } else if !shouldExist && !errors.IsNotFound(err) {

View File

@ -74,12 +74,12 @@ var _ = framework.KubeDescribe("Federated ReplicaSet [Feature:Federation]", func
// e2e cases for federated replicaset controller // e2e cases for federated replicaset controller
Describe("Features", func() { Describe("Features", func() {
var ( var (
clusters map[string]*cluster clusters fedframework.ClusterSlice
) )
BeforeEach(func() { BeforeEach(func() {
fedframework.SkipUnlessFederated(f.ClientSet) fedframework.SkipUnlessFederated(f.ClientSet)
clusters, _ = getRegisteredClusters(UserAgentName, f) clusters = f.GetRegisteredClusters()
}) })
// e2e cases for federated replicaset controller // e2e cases for federated replicaset controller
@ -200,7 +200,7 @@ var _ = framework.KubeDescribe("Federated ReplicaSet [Feature:Federation]", func
}) })
}) })
func createAndWaitForReplicasetOrFail(clientset *fedclientset.Clientset, nsName string, clusters map[string]*cluster) *v1beta1.ReplicaSet { func createAndWaitForReplicasetOrFail(clientset *fedclientset.Clientset, nsName string, clusters fedframework.ClusterSlice) *v1beta1.ReplicaSet {
rs := createReplicaSetOrFail(clientset, newReplicaSet(nsName, FederationReplicaSetPrefix, 5, nil)) rs := createReplicaSetOrFail(clientset, newReplicaSet(nsName, FederationReplicaSetPrefix, 5, nil))
// Check subclusters if the replicaSet was created there. // Check subclusters if the replicaSet was created there.
By(fmt.Sprintf("Waiting for replica sets %s to be created in all underlying clusters", rs.Name)) By(fmt.Sprintf("Waiting for replica sets %s to be created in all underlying clusters", rs.Name))
@ -220,7 +220,7 @@ func createAndWaitForReplicasetOrFail(clientset *fedclientset.Clientset, nsName
return rs return rs
} }
func createAndUpdateFedRSWithPref(clientset *fedclientset.Clientset, nsName string, clusters map[string]*cluster, pref *federation.FederatedReplicaSetPreferences, replicas int32, expect map[string]int32) *v1beta1.ReplicaSet { func createAndUpdateFedRSWithPref(clientset *fedclientset.Clientset, nsName string, clusters fedframework.ClusterSlice, pref *federation.FederatedReplicaSetPreferences, replicas int32, expect map[string]int32) *v1beta1.ReplicaSet {
framework.Logf("Replicas: %d, Preference: %#v", replicas, pref) framework.Logf("Replicas: %d, Preference: %#v", replicas, pref)
rs := newReplicaSet(nsName, FederationReplicaSetPrefix, replicas, pref) rs := newReplicaSet(nsName, FederationReplicaSetPrefix, replicas, pref)
rs = createReplicaSetOrFail(clientset, rs) rs = createReplicaSetOrFail(clientset, rs)
@ -254,14 +254,15 @@ func deleteAllReplicaSetsOrFail(clientset *fedclientset.Clientset, nsName string
// verifyCascadingDeletionForReplicaSet verifies that replicaSets are deleted // verifyCascadingDeletionForReplicaSet verifies that replicaSets are deleted
// from underlying clusters when orphan dependents is false and they are not // from underlying clusters when orphan dependents is false and they are not
// deleted when orphan dependents is true. // deleted when orphan dependents is true.
func verifyCascadingDeletionForReplicaSet(clientset *fedclientset.Clientset, clusters map[string]*cluster, orphanDependents *bool, nsName, rsName string) { func verifyCascadingDeletionForReplicaSet(clientset *fedclientset.Clientset, clusters fedframework.ClusterSlice, orphanDependents *bool, nsName, rsName string) {
By(fmt.Sprintf("Deleting replica set %s", rsName)) By(fmt.Sprintf("Deleting replica set %s", rsName))
deleteReplicaSetOrFail(clientset, nsName, rsName, orphanDependents) deleteReplicaSetOrFail(clientset, nsName, rsName, orphanDependents)
By(fmt.Sprintf("Verifying replica sets %s in underlying clusters", rsName)) By(fmt.Sprintf("Verifying replica sets %s in underlying clusters", rsName))
errMessages := []string{} errMessages := []string{}
for clusterName, clusterClientset := range clusters { for _, cluster := range clusters {
_, err := clusterClientset.Extensions().ReplicaSets(nsName).Get(rsName, metav1.GetOptions{}) clusterName := cluster.Name
_, err := cluster.Extensions().ReplicaSets(nsName).Get(rsName, metav1.GetOptions{})
if (orphanDependents == nil || *orphanDependents == true) && errors.IsNotFound(err) { if (orphanDependents == nil || *orphanDependents == true) && errors.IsNotFound(err) {
errMessages = append(errMessages, fmt.Sprintf("unexpected NotFound error for replica set %s in cluster %s, expected replica set to exist", rsName, clusterName)) errMessages = append(errMessages, fmt.Sprintf("unexpected NotFound error for replica set %s in cluster %s, expected replica set to exist", rsName, clusterName))
} else if (orphanDependents != nil && *orphanDependents == false) && (err == nil || !errors.IsNotFound(err)) { } else if (orphanDependents != nil && *orphanDependents == false) && (err == nil || !errors.IsNotFound(err)) {
@ -273,7 +274,7 @@ func verifyCascadingDeletionForReplicaSet(clientset *fedclientset.Clientset, clu
} }
} }
func generateFedRSPrefsWithWeight(clusters map[string]*cluster) (pref *federation.FederatedReplicaSetPreferences, replicas int32, expect map[string]int32) { func generateFedRSPrefsWithWeight(clusters fedframework.ClusterSlice) (pref *federation.FederatedReplicaSetPreferences, replicas int32, expect map[string]int32) {
By("Generating replicaset preferences with weights") By("Generating replicaset preferences with weights")
clusterNames := extractClusterNames(clusters) clusterNames := extractClusterNames(clusters)
pref = &federation.FederatedReplicaSetPreferences{ pref = &federation.FederatedReplicaSetPreferences{
@ -294,7 +295,7 @@ func generateFedRSPrefsWithWeight(clusters map[string]*cluster) (pref *federatio
return return
} }
func generateFedRSPrefsWithMin(clusters map[string]*cluster) (pref *federation.FederatedReplicaSetPreferences, replicas int32, expect map[string]int32) { func generateFedRSPrefsWithMin(clusters fedframework.ClusterSlice) (pref *federation.FederatedReplicaSetPreferences, replicas int32, expect map[string]int32) {
By("Generating replicaset preferences with min replicas") By("Generating replicaset preferences with min replicas")
clusterNames := extractClusterNames(clusters) clusterNames := extractClusterNames(clusters)
pref = &federation.FederatedReplicaSetPreferences{ pref = &federation.FederatedReplicaSetPreferences{
@ -321,7 +322,7 @@ func generateFedRSPrefsWithMin(clusters map[string]*cluster) (pref *federation.F
return return
} }
func generateFedRSPrefsWithMax(clusters map[string]*cluster) (pref *federation.FederatedReplicaSetPreferences, replicas int32, expect map[string]int32) { func generateFedRSPrefsWithMax(clusters fedframework.ClusterSlice) (pref *federation.FederatedReplicaSetPreferences, replicas int32, expect map[string]int32) {
By("Generating replicaset preferences with max replicas") By("Generating replicaset preferences with max replicas")
clusterNames := extractClusterNames(clusters) clusterNames := extractClusterNames(clusters)
pref = &federation.FederatedReplicaSetPreferences{ pref = &federation.FederatedReplicaSetPreferences{
@ -354,7 +355,7 @@ func updateFedRSPrefsRebalance(pref *federation.FederatedReplicaSetPreferences,
return pref return pref
} }
func generateFedRSPrefsForRebalancing(clusters map[string]*cluster) (pref1, pref2 *federation.FederatedReplicaSetPreferences, replicas int32, expect1, expect2 map[string]int32) { func generateFedRSPrefsForRebalancing(clusters fedframework.ClusterSlice) (pref1, pref2 *federation.FederatedReplicaSetPreferences, replicas int32, expect1, expect2 map[string]int32) {
By("Generating replicaset for rebalancing") By("Generating replicaset for rebalancing")
clusterNames := extractClusterNames(clusters) clusterNames := extractClusterNames(clusters)
replicas = 3 replicas = 3
@ -382,14 +383,14 @@ func generateFedRSPrefsForRebalancing(clusters map[string]*cluster) (pref1, pref
return return
} }
func waitForReplicaSetOrFail(c *fedclientset.Clientset, namespace string, replicaSetName string, clusters map[string]*cluster, expect map[string]int32) { func waitForReplicaSetOrFail(c *fedclientset.Clientset, namespace string, replicaSetName string, clusters fedframework.ClusterSlice, expect map[string]int32) {
err := waitForReplicaSet(c, namespace, replicaSetName, clusters, expect) err := waitForReplicaSet(c, namespace, replicaSetName, clusters, expect)
framework.ExpectNoError(err, "Failed to verify replica set \"%s/%s\", err: %v", namespace, replicaSetName, err) framework.ExpectNoError(err, "Failed to verify replica set \"%s/%s\", err: %v", namespace, replicaSetName, err)
} }
func waitForReplicaSet(c *fedclientset.Clientset, namespace string, replicaSetName string, clusters map[string]*cluster, expect map[string]int32) error { func waitForReplicaSet(c *fedclientset.Clientset, namespace string, replicaSetName string, clusters fedframework.ClusterSlice, expect map[string]int32) error {
framework.Logf("waitForReplicaSet: %s/%s; clusters: %v; expect: %v", namespace, replicaSetName, clusters, expect) framework.Logf("waitForReplicaSet: %s/%s; clusters: %v; expect: %v", namespace, replicaSetName, clusters, expect)
err := wait.Poll(10*time.Second, federatedDefaultTestTimeout, func() (bool, error) { err := wait.Poll(10*time.Second, fedframework.FederatedDefaultTestTimeout, func() (bool, error) {
frs, err := c.ReplicaSets(namespace).Get(replicaSetName, metav1.GetOptions{}) frs, err := c.ReplicaSets(namespace).Get(replicaSetName, metav1.GetOptions{})
if err != nil { if err != nil {
return false, err return false, err
@ -398,21 +399,21 @@ func waitForReplicaSet(c *fedclientset.Clientset, namespace string, replicaSetNa
for _, cluster := range clusters { for _, cluster := range clusters {
rs, err := cluster.ReplicaSets(namespace).Get(replicaSetName, metav1.GetOptions{}) rs, err := cluster.ReplicaSets(namespace).Get(replicaSetName, metav1.GetOptions{})
if err != nil && !errors.IsNotFound(err) { if err != nil && !errors.IsNotFound(err) {
framework.Logf("Failed getting replicaset: \"%s/%s/%s\", err: %v", cluster.name, namespace, replicaSetName, err) framework.Logf("Failed getting replicaset: \"%s/%s/%s\", err: %v", cluster.Name, namespace, replicaSetName, err)
return false, err return false, err
} }
if errors.IsNotFound(err) { if errors.IsNotFound(err) {
if expect != nil && expect[cluster.name] > 0 { if expect != nil && expect[cluster.Name] > 0 {
framework.Logf("Replicaset \"%s/%s/%s\" with replica count %d does not exist", cluster.name, namespace, replicaSetName, expect[cluster.name]) framework.Logf("Replicaset \"%s/%s/%s\" with replica count %d does not exist", cluster.Name, namespace, replicaSetName, expect[cluster.Name])
return false, nil return false, nil
} }
} else { } else {
if !equivalentReplicaSet(frs, rs) { if !equivalentReplicaSet(frs, rs) {
framework.Logf("Replicaset meta or spec does not match for cluster %q:\n federation: %v\n cluster: %v", cluster.name, frs, rs) framework.Logf("Replicaset meta or spec does not match for cluster %q:\n federation: %v\n cluster: %v", cluster.Name, frs, rs)
return false, nil return false, nil
} }
if expect != nil && *rs.Spec.Replicas < expect[cluster.name] { if expect != nil && *rs.Spec.Replicas < expect[cluster.Name] {
framework.Logf("Replicas do not match for \"%s/%s/%s\": expected: >= %v, actual: %v", cluster.name, namespace, replicaSetName, expect[cluster.name], *rs.Spec.Replicas) framework.Logf("Replicas do not match for \"%s/%s/%s\": expected: >= %v, actual: %v", cluster.Name, namespace, replicaSetName, expect[cluster.Name], *rs.Spec.Replicas)
return false, nil return false, nil
} }
specReplicas += *rs.Spec.Replicas specReplicas += *rs.Spec.Replicas
@ -530,10 +531,10 @@ func newReplicaSetWithName(namespace string, name string, replicas int32, pref *
return rs return rs
} }
func extractClusterNames(clusters map[string]*cluster) []string { func extractClusterNames(clusters fedframework.ClusterSlice) []string {
clusterNames := make([]string, 0, len(clusters)) clusterNames := make([]string, 0, len(clusters))
for clusterName := range clusters { for _, cluster := range clusters {
clusterNames = append(clusterNames, clusterName) clusterNames = append(clusterNames, cluster.Name)
} }
return clusterNames return clusterNames
} }

View File

@ -42,7 +42,7 @@ const (
// Create/delete secret api objects // Create/delete secret api objects
var _ = framework.KubeDescribe("Federation secrets [Feature:Federation]", func() { var _ = framework.KubeDescribe("Federation secrets [Feature:Federation]", func() {
var clusters map[string]*cluster // All clusters, keyed by cluster name var clusters fedframework.ClusterSlice
f := fedframework.NewDefaultFederatedFramework("federated-secret") f := fedframework.NewDefaultFederatedFramework("federated-secret")
@ -50,7 +50,7 @@ var _ = framework.KubeDescribe("Federation secrets [Feature:Federation]", func()
BeforeEach(func() { BeforeEach(func() {
fedframework.SkipUnlessFederated(f.ClientSet) fedframework.SkipUnlessFederated(f.ClientSet)
clusters, _ = getRegisteredClusters(UserAgentName, f) clusters = f.GetRegisteredClusters()
}) })
AfterEach(func() { AfterEach(func() {
@ -108,7 +108,7 @@ func deleteAllSecretsOrFail(clientset *fedclientset.Clientset, nsName string) {
// verifyCascadingDeletionForSecret verifies that secrets are deleted from // verifyCascadingDeletionForSecret verifies that secrets are deleted from
// underlying clusters when orphan dependents is false and they are not // underlying clusters when orphan dependents is false and they are not
// deleted when orphan dependents is true. // deleted when orphan dependents is true.
func verifyCascadingDeletionForSecret(clientset *fedclientset.Clientset, clusters map[string]*cluster, orphanDependents *bool, nsName string) { func verifyCascadingDeletionForSecret(clientset *fedclientset.Clientset, clusters fedframework.ClusterSlice, orphanDependents *bool, nsName string) {
secret := createSecretOrFail(clientset, nsName) secret := createSecretOrFail(clientset, nsName)
secretName := secret.Name secretName := secret.Name
// Check subclusters if the secret was created there. // Check subclusters if the secret was created there.
@ -134,8 +134,9 @@ func verifyCascadingDeletionForSecret(clientset *fedclientset.Clientset, cluster
errMessages := []string{} errMessages := []string{}
// secret should be present in underlying clusters unless orphanDependents is false. // secret should be present in underlying clusters unless orphanDependents is false.
shouldExist := orphanDependents == nil || *orphanDependents == true shouldExist := orphanDependents == nil || *orphanDependents == true
for clusterName, clusterClientset := range clusters { for _, cluster := range clusters {
_, err := clusterClientset.Core().Secrets(nsName).Get(secretName, metav1.GetOptions{}) clusterName := cluster.Name
_, err := cluster.Core().Secrets(nsName).Get(secretName, metav1.GetOptions{})
if shouldExist && errors.IsNotFound(err) { if shouldExist && errors.IsNotFound(err) {
errMessages = append(errMessages, fmt.Sprintf("unexpected NotFound error for secret %s in cluster %s, expected secret to exist", secretName, clusterName)) errMessages = append(errMessages, fmt.Sprintf("unexpected NotFound error for secret %s in cluster %s, expected secret to exist", secretName, clusterName))
} else if !shouldExist && !errors.IsNotFound(err) { } else if !shouldExist && !errors.IsNotFound(err) {
@ -213,7 +214,7 @@ func updateSecretOrFail(clientset *fedclientset.Clientset, nsName string, secret
return newSecret return newSecret
} }
func waitForSecretShardsOrFail(nsName string, secret *v1.Secret, clusters map[string]*cluster) { func waitForSecretShardsOrFail(nsName string, secret *v1.Secret, clusters fedframework.ClusterSlice) {
framework.Logf("Waiting for secret %q in %d clusters", secret.Name, len(clusters)) framework.Logf("Waiting for secret %q in %d clusters", secret.Name, len(clusters))
for _, c := range clusters { for _, c := range clusters {
waitForSecretOrFail(c.Clientset, nsName, secret, true, FederatedSecretTimeout) waitForSecretOrFail(c.Clientset, nsName, secret, true, FederatedSecretTimeout)
@ -243,7 +244,7 @@ func waitForSecretOrFail(clientset *kubeclientset.Clientset, nsName string, secr
} }
} }
func waitForSecretShardsUpdatedOrFail(nsName string, secret *v1.Secret, clusters map[string]*cluster) { func waitForSecretShardsUpdatedOrFail(nsName string, secret *v1.Secret, clusters fedframework.ClusterSlice) {
framework.Logf("Waiting for secret %q in %d clusters", secret.Name, len(clusters)) framework.Logf("Waiting for secret %q in %d clusters", secret.Name, len(clusters))
for _, c := range clusters { for _, c := range clusters {
waitForSecretUpdateOrFail(c.Clientset, nsName, secret, FederatedSecretTimeout) waitForSecretUpdateOrFail(c.Clientset, nsName, secret, FederatedSecretTimeout)

View File

@ -47,9 +47,8 @@ var FederatedServiceLabels = map[string]string{
var _ = framework.KubeDescribe("Federated Services [Feature:Federation]", func() { var _ = framework.KubeDescribe("Federated Services [Feature:Federation]", func() {
f := fedframework.NewDefaultFederatedFramework("federated-service") f := fedframework.NewDefaultFederatedFramework("federated-service")
var clusters map[string]*cluster // All clusters, keyed by cluster name var clusters fedframework.ClusterSlice
var federationName string var federationName string
var primaryClusterName string // The name of the "primary" cluster
var _ = Describe("Without Clusters [NoCluster]", func() { var _ = Describe("Without Clusters [NoCluster]", func() {
BeforeEach(func() { BeforeEach(func() {
@ -84,7 +83,7 @@ var _ = framework.KubeDescribe("Federated Services [Feature:Federation]", func()
federationName = DefaultFederationName federationName = DefaultFederationName
} }
clusters, primaryClusterName = getRegisteredClusters(UserAgentName, f) clusters = f.GetRegisteredClusters()
}) })
Describe("Federated Service", func() { Describe("Federated Service", func() {
@ -146,6 +145,7 @@ var _ = framework.KubeDescribe("Federated Services [Feature:Federation]", func()
var ( var (
service *v1.Service service *v1.Service
serviceShard *v1.Service serviceShard *v1.Service
backendPods BackendPodMap
) )
BeforeEach(func() { BeforeEach(func() {
@ -153,7 +153,7 @@ var _ = framework.KubeDescribe("Federated Services [Feature:Federation]", func()
nsName := f.FederationNamespace.Name nsName := f.FederationNamespace.Name
createBackendPodsOrFail(clusters, nsName, FederatedServicePodName) backendPods = createBackendPodsOrFail(clusters, nsName, FederatedServicePodName)
service = createServiceOrFail(f.FederationClientset, nsName, FederatedServiceName) service = createServiceOrFail(f.FederationClientset, nsName, FederatedServiceName)
obj, err := api.Scheme.DeepCopy(service) obj, err := api.Scheme.DeepCopy(service)
@ -191,7 +191,8 @@ var _ = framework.KubeDescribe("Federated Services [Feature:Federation]", func()
fedframework.SkipUnlessFederated(f.ClientSet) fedframework.SkipUnlessFederated(f.ClientSet)
nsName := f.FederationNamespace.Name nsName := f.FederationNamespace.Name
deleteBackendPodsOrFail(clusters, nsName) deleteBackendPodsOrFail(clusters, backendPods)
backendPods = nil
if service != nil { if service != nil {
deleteServiceOrFail(f.FederationClientset, nsName, service.Name, nil) deleteServiceOrFail(f.FederationClientset, nsName, service.Name, nil)
@ -244,8 +245,10 @@ var _ = framework.KubeDescribe("Federated Services [Feature:Federation]", func()
BeforeEach(func() { BeforeEach(func() {
fedframework.SkipUnlessFederated(f.ClientSet) fedframework.SkipUnlessFederated(f.ClientSet)
// Delete all the backend pods from the shard which is local to the discovery pod. // Delete the backend pod from the shard which is local to the discovery pod.
deleteOneBackendPodOrFail(clusters[primaryClusterName]) primaryCluster := clusters[0]
backendPod := backendPods[primaryCluster.Name]
deleteOneBackendPodOrFail(primaryCluster, backendPod)
}) })
@ -287,7 +290,7 @@ var _ = framework.KubeDescribe("Federated Services [Feature:Federation]", func()
// verifyCascadingDeletionForService verifies that services are deleted from // verifyCascadingDeletionForService verifies that services are deleted from
// underlying clusters when orphan dependents is false and they are not // underlying clusters when orphan dependents is false and they are not
// deleted when orphan dependents is true. // deleted when orphan dependents is true.
func verifyCascadingDeletionForService(clientset *fedclientset.Clientset, clusters map[string]*cluster, orphanDependents *bool, nsName string) { func verifyCascadingDeletionForService(clientset *fedclientset.Clientset, clusters fedframework.ClusterSlice, orphanDependents *bool, nsName string) {
service := createServiceOrFail(clientset, nsName, FederatedServiceName) service := createServiceOrFail(clientset, nsName, FederatedServiceName)
serviceName := service.Name serviceName := service.Name
// Check subclusters if the service was created there. // Check subclusters if the service was created there.
@ -313,8 +316,9 @@ func verifyCascadingDeletionForService(clientset *fedclientset.Clientset, cluste
errMessages := []string{} errMessages := []string{}
// service should be present in underlying clusters unless orphanDependents is false. // service should be present in underlying clusters unless orphanDependents is false.
shouldExist := orphanDependents == nil || *orphanDependents == true shouldExist := orphanDependents == nil || *orphanDependents == true
for clusterName, clusterClientset := range clusters { for _, cluster := range clusters {
_, err := clusterClientset.Core().Services(nsName).Get(serviceName, metav1.GetOptions{}) clusterName := cluster.Name
_, err := cluster.Core().Services(nsName).Get(serviceName, metav1.GetOptions{})
if shouldExist && errors.IsNotFound(err) { if shouldExist && errors.IsNotFound(err) {
errMessages = append(errMessages, fmt.Sprintf("unexpected NotFound error for service %s in cluster %s, expected service to exist", serviceName, clusterName)) errMessages = append(errMessages, fmt.Sprintf("unexpected NotFound error for service %s in cluster %s, expected service to exist", serviceName, clusterName))
} else if !shouldExist && !errors.IsNotFound(err) { } else if !shouldExist && !errors.IsNotFound(err) {

View File

@ -129,9 +129,9 @@ func federationControlPlaneUpgrade(f *fedframework.Framework) {
func federatedClustersUpgrade(f *fedframework.Framework) { func federatedClustersUpgrade(f *fedframework.Framework) {
k8sVersion, err := framework.RealVersion(framework.TestContext.UpgradeTarget) k8sVersion, err := framework.RealVersion(framework.TestContext.UpgradeTarget)
framework.ExpectNoError(err) framework.ExpectNoError(err)
clusters, _ := getRegisteredClusters(UserAgentName, f) clusters := f.GetRegisteredClusters()
for _, cluster := range clusters { for _, cluster := range clusters {
framework.ExpectNoError(fedframework.MasterUpgrade(cluster.name, k8sVersion)) framework.ExpectNoError(fedframework.MasterUpgrade(cluster.Name, k8sVersion))
framework.ExpectNoError(framework.CheckMasterVersion(cluster.Clientset, k8sVersion)) framework.ExpectNoError(framework.CheckMasterVersion(cluster.Clientset, k8sVersion))
// TODO: Need to add Node upgrade. Add once this framework is stable // TODO: Need to add Node upgrade. Add once this framework is stable

View File

@ -25,9 +25,6 @@ import (
"k8s.io/apimachinery/pkg/util/intstr" "k8s.io/apimachinery/pkg/util/intstr"
"k8s.io/apimachinery/pkg/util/rand" "k8s.io/apimachinery/pkg/util/rand"
"k8s.io/apimachinery/pkg/util/wait" "k8s.io/apimachinery/pkg/util/wait"
restclient "k8s.io/client-go/rest"
"k8s.io/client-go/tools/clientcmd"
clientcmdapi "k8s.io/client-go/tools/clientcmd/api"
federationapi "k8s.io/kubernetes/federation/apis/federation/v1beta1" federationapi "k8s.io/kubernetes/federation/apis/federation/v1beta1"
fedclientset "k8s.io/kubernetes/federation/client/clientset_generated/federation_clientset" fedclientset "k8s.io/kubernetes/federation/client/clientset_generated/federation_clientset"
"k8s.io/kubernetes/pkg/api" "k8s.io/kubernetes/pkg/api"
@ -42,18 +39,12 @@ import (
) )
var ( var (
KubeAPIQPS float32 = 20.0 DefaultFederationName = "e2e-federation"
KubeAPIBurst = 30
DefaultFederationName = "e2e-federation"
UserAgentName = "federation-e2e"
// We use this to decide how long to wait for our DNS probes to succeed. // We use this to decide how long to wait for our DNS probes to succeed.
DNSTTL = 180 * time.Second // TODO: make k8s.io/kubernetes/federation/pkg/federation-controller/service.minDnsTtl exported, and import it here. DNSTTL = 180 * time.Second // TODO: make k8s.io/kubernetes/federation/pkg/federation-controller/service.minDnsTtl exported, and import it here.
) )
const ( const (
federatedDefaultTestTimeout = 5 * time.Minute
federatedClustersWaitTimeout = 1 * time.Minute
// [30000, 32767] is the allowed default service nodeport range and our // [30000, 32767] is the allowed default service nodeport range and our
// tests just use the defaults. // tests just use the defaults.
FederatedSvcNodePortFirst = 30000 FederatedSvcNodePortFirst = 30000
@ -62,15 +53,6 @@ const (
var FederationSuite common.Suite var FederationSuite common.Suite
// cluster keeps track of the assorted objects and state related to each cluster
// in the federation
type cluster struct {
name string
*kubeclientset.Clientset
namespaceCreated bool // Did we need to create a new namespace in this cluster? If so, we should delete it.
backendPod *v1.Pod // The backend pod, if one's been created.
}
func createClusterObjectOrFail(f *fedframework.Framework, context *fedframework.E2EContext) { func createClusterObjectOrFail(f *fedframework.Framework, context *fedframework.E2EContext) {
framework.Logf("Creating cluster object: %s (%s, secret: %s)", context.Name, context.Cluster.Cluster.Server, context.Name) framework.Logf("Creating cluster object: %s (%s, secret: %s)", context.Name, context.Cluster.Cluster.Server, context.Name)
cluster := federationapi.Cluster{ cluster := federationapi.Cluster{
@ -97,144 +79,6 @@ func createClusterObjectOrFail(f *fedframework.Framework, context *fedframework.
framework.Logf("Successfully created cluster object: %s (%s, secret: %s)", context.Name, context.Cluster.Cluster.Server, context.Name) framework.Logf("Successfully created cluster object: %s (%s, secret: %s)", context.Name, context.Cluster.Cluster.Server, context.Name)
} }
func clusterIsReadyOrFail(f *fedframework.Framework, context *fedframework.E2EContext) {
c, err := f.FederationClientset.Federation().Clusters().Get(context.Name, metav1.GetOptions{})
framework.ExpectNoError(err, fmt.Sprintf("get cluster: %+v", err))
if c.ObjectMeta.Name != context.Name {
framework.Failf("cluster name does not match input context: actual=%+v, expected=%+v", c, context)
}
err = isReady(context.Name, f.FederationClientset)
framework.ExpectNoError(err, fmt.Sprintf("unexpected error in verifying if cluster %s is ready: %+v", context.Name, err))
framework.Logf("Cluster %s is Ready", context.Name)
}
// waitForAllRegisteredClusters waits for all clusters defined in e2e context to be created
// return ClusterList until the listed cluster items equals clusterCount
func waitForAllRegisteredClusters(f *fedframework.Framework, clusterCount int) *federationapi.ClusterList {
var clusterList *federationapi.ClusterList
if err := wait.PollImmediate(framework.Poll, federatedClustersWaitTimeout, func() (bool, error) {
var err error
clusterList, err = f.FederationClientset.Federation().Clusters().List(metav1.ListOptions{})
if err != nil {
return false, err
}
framework.Logf("%d clusters registered, waiting for %d", len(clusterList.Items), clusterCount)
if len(clusterList.Items) == clusterCount {
return true, nil
}
return false, nil
}); err != nil {
framework.Failf("Failed to list registered clusters: %+v", err)
}
return clusterList
}
func createClientsetForCluster(c federationapi.Cluster, i int, userAgentName string) *kubeclientset.Clientset {
kubecfg, err := clientcmd.LoadFromFile(framework.TestContext.KubeConfig)
framework.ExpectNoError(err, "error loading KubeConfig: %v", err)
cfgOverride := &clientcmd.ConfigOverrides{
ClusterInfo: clientcmdapi.Cluster{
Server: c.Spec.ServerAddressByClientCIDRs[0].ServerAddress,
},
}
ccfg := clientcmd.NewNonInteractiveClientConfig(*kubecfg, c.Name, cfgOverride, clientcmd.NewDefaultClientConfigLoadingRules())
cfg, err := ccfg.ClientConfig()
framework.ExpectNoError(err, "Error creating client config in cluster #%d (%q)", i, c.Name)
cfg.QPS = KubeAPIQPS
cfg.Burst = KubeAPIBurst
return kubeclientset.NewForConfigOrDie(restclient.AddUserAgent(cfg, userAgentName))
}
// Creates the federation namespace in all underlying clusters.
func createNamespaceInClusters(clusters map[string]*cluster, f *fedframework.Framework) {
nsName := f.FederationNamespace.Name
for name, c := range clusters {
// The e2e Framework created the required namespace in federation control plane, but we need to create it in all the others, if it doesn't yet exist.
// TODO(nikhiljindal): remove this once we have the namespace controller working as expected.
if _, err := c.Clientset.Core().Namespaces().Get(nsName, metav1.GetOptions{}); errors.IsNotFound(err) {
ns := &v1.Namespace{
ObjectMeta: metav1.ObjectMeta{
Name: nsName,
},
}
_, err := c.Clientset.Core().Namespaces().Create(ns)
if err == nil {
c.namespaceCreated = true
}
framework.ExpectNoError(err, "Couldn't create the namespace %s in cluster %q", nsName, name)
framework.Logf("Namespace %s created in cluster %q", nsName, name)
} else if err != nil {
framework.Logf("Couldn't create the namespace %s in cluster %q: %v", nsName, name, err)
}
}
}
// Unregisters the given clusters from federation control plane.
// Also deletes the federation namespace from each cluster.
func unregisterClusters(clusters map[string]*cluster, f *fedframework.Framework) {
nsName := f.FederationNamespace.Name
for name, c := range clusters {
if c.namespaceCreated {
if _, err := c.Clientset.Core().Namespaces().Get(nsName, metav1.GetOptions{}); !errors.IsNotFound(err) {
err := c.Clientset.Core().Namespaces().Delete(nsName, &metav1.DeleteOptions{})
framework.ExpectNoError(err, "Couldn't delete the namespace %s in cluster %q: %v", nsName, name, err)
}
framework.Logf("Namespace %s deleted in cluster %q", nsName, name)
}
}
// Delete the registered clusters in the federation API server.
clusterList, err := f.FederationClientset.Federation().Clusters().List(metav1.ListOptions{})
framework.ExpectNoError(err, "Error listing clusters")
for _, cluster := range clusterList.Items {
err := f.FederationClientset.Federation().Clusters().Delete(cluster.Name, &metav1.DeleteOptions{})
framework.ExpectNoError(err, "Error deleting cluster %q", cluster.Name)
}
}
// waitForNamespaceInFederatedClusters waits for the federated namespace to be created in federated clusters
func waitForNamespaceInFederatedClusters(clusters map[string]*cluster, nsName string, timeout time.Duration) {
for name, c := range clusters {
err := wait.PollImmediate(framework.Poll, timeout, func() (bool, error) {
_, err := c.Clientset.Core().Namespaces().Get(nsName, metav1.GetOptions{})
if err != nil {
By(fmt.Sprintf("Waiting for namespace %q to be created in cluster %q, err: %v", nsName, name, err))
return false, nil
}
By(fmt.Sprintf("Namespace %q exists in cluster %q", nsName, name))
return true, nil
})
framework.ExpectNoError(err, "Failed to verify federated namespace %q creation in cluster %q", nsName, name)
}
}
// can not be moved to util, as By and Expect must be put in Ginkgo test unit
func getRegisteredClusters(userAgentName string, f *fedframework.Framework) (map[string]*cluster, string) {
clusters := make(map[string]*cluster)
contexts := f.GetUnderlyingFederatedContexts()
By("Obtaining a list of all the clusters")
clusterList := waitForAllRegisteredClusters(f, len(contexts))
framework.Logf("Checking that %d clusters are Ready", len(contexts))
for _, context := range contexts {
clusterIsReadyOrFail(f, &context)
}
framework.Logf("%d clusters are Ready", len(contexts))
primaryClusterName := clusterList.Items[0].Name
By(fmt.Sprintf("Labeling %q as the first cluster", primaryClusterName))
for i, c := range clusterList.Items {
framework.Logf("Creating a clientset for the cluster %s", c.Name)
Expect(framework.TestContext.KubeConfig).ToNot(Equal(""), "KubeConfig must be specified to load clusters' client config")
clusters[c.Name] = &cluster{c.Name, createClientsetForCluster(c, i, userAgentName), false, nil}
}
waitForNamespaceInFederatedClusters(clusters, f.FederationNamespace.Name, federatedDefaultTestTimeout)
return clusters, primaryClusterName
}
// waitForServiceOrFail waits until a service is either present or absent in the cluster specified by clientset. // waitForServiceOrFail waits until a service is either present or absent in the cluster specified by clientset.
// If the condition is not met within timout, it fails the calling test. // If the condition is not met within timout, it fails the calling test.
func waitForServiceOrFail(clientset *kubeclientset.Clientset, namespace string, service *v1.Service, present bool, timeout time.Duration) { func waitForServiceOrFail(clientset *kubeclientset.Clientset, namespace string, service *v1.Service, present bool, timeout time.Duration) {
@ -259,10 +103,10 @@ func waitForServiceOrFail(clientset *kubeclientset.Clientset, namespace string,
} }
// waitForServiceShardsOrFail waits for the service to appear in all clusters // waitForServiceShardsOrFail waits for the service to appear in all clusters
func waitForServiceShardsOrFail(namespace string, service *v1.Service, clusters map[string]*cluster) { func waitForServiceShardsOrFail(namespace string, service *v1.Service, clusters fedframework.ClusterSlice) {
framework.Logf("Waiting for service %q in %d clusters", service.Name, len(clusters)) framework.Logf("Waiting for service %q in %d clusters", service.Name, len(clusters))
for _, c := range clusters { for _, c := range clusters {
waitForServiceOrFail(c.Clientset, namespace, service, true, federatedDefaultTestTimeout) waitForServiceOrFail(c.Clientset, namespace, service, true, fedframework.FederatedDefaultTestTimeout)
} }
} }
@ -317,7 +161,7 @@ func deleteServiceOrFail(clientset *fedclientset.Clientset, namespace string, se
err := clientset.Services(namespace).Delete(serviceName, &metav1.DeleteOptions{OrphanDependents: orphanDependents}) err := clientset.Services(namespace).Delete(serviceName, &metav1.DeleteOptions{OrphanDependents: orphanDependents})
framework.ExpectNoError(err, "Error deleting service %q from namespace %q", serviceName, namespace) framework.ExpectNoError(err, "Error deleting service %q from namespace %q", serviceName, namespace)
// Wait for the service to be deleted. // Wait for the service to be deleted.
err = wait.Poll(5*time.Second, federatedDefaultTestTimeout, func() (bool, error) { err = wait.Poll(5*time.Second, fedframework.FederatedDefaultTestTimeout, func() (bool, error) {
_, err := clientset.Core().Services(namespace).Get(serviceName, metav1.GetOptions{}) _, err := clientset.Core().Services(namespace).Get(serviceName, metav1.GetOptions{})
if err != nil && errors.IsNotFound(err) { if err != nil && errors.IsNotFound(err) {
return true, nil return true, nil
@ -330,12 +174,13 @@ func deleteServiceOrFail(clientset *fedclientset.Clientset, namespace string, se
} }
} }
func cleanupServiceShardsAndProviderResources(namespace string, service *v1.Service, clusters map[string]*cluster) { func cleanupServiceShardsAndProviderResources(namespace string, service *v1.Service, clusters fedframework.ClusterSlice) {
framework.Logf("Deleting service %q in %d clusters", service.Name, len(clusters)) framework.Logf("Deleting service %q in %d clusters", service.Name, len(clusters))
for name, c := range clusters { for _, c := range clusters {
name := c.Name
var cSvc *v1.Service var cSvc *v1.Service
err := wait.PollImmediate(framework.Poll, federatedDefaultTestTimeout, func() (bool, error) { err := wait.PollImmediate(framework.Poll, fedframework.FederatedDefaultTestTimeout, func() (bool, error) {
var err error var err error
cSvc, err = c.Clientset.Services(namespace).Get(service.Name, metav1.GetOptions{}) cSvc, err = c.Clientset.Services(namespace).Get(service.Name, metav1.GetOptions{})
if err != nil && !errors.IsNotFound(err) { if err != nil && !errors.IsNotFound(err) {
@ -352,15 +197,15 @@ func cleanupServiceShardsAndProviderResources(namespace string, service *v1.Serv
}) })
if err != nil || cSvc == nil { if err != nil || cSvc == nil {
By(fmt.Sprintf("Failed to find service %q in namespace %q, in cluster %q in %s", service.Name, namespace, name, federatedDefaultTestTimeout)) By(fmt.Sprintf("Failed to find service %q in namespace %q, in cluster %q in %s", service.Name, namespace, name, fedframework.FederatedDefaultTestTimeout))
continue continue
} }
err = cleanupServiceShard(c.Clientset, name, namespace, cSvc, federatedDefaultTestTimeout) err = cleanupServiceShard(c.Clientset, name, namespace, cSvc, fedframework.FederatedDefaultTestTimeout)
if err != nil { if err != nil {
framework.Logf("Failed to delete service %q in namespace %q, in cluster %q: %v", service.Name, namespace, name, err) framework.Logf("Failed to delete service %q in namespace %q, in cluster %q: %v", service.Name, namespace, name, err)
} }
err = cleanupServiceShardLoadBalancer(name, cSvc, federatedDefaultTestTimeout) err = cleanupServiceShardLoadBalancer(name, cSvc, fedframework.FederatedDefaultTestTimeout)
if err != nil { if err != nil {
framework.Logf("Failed to delete cloud provider resources for service %q in namespace %q, in cluster %q", service.Name, namespace, name) framework.Logf("Failed to delete cloud provider resources for service %q in namespace %q, in cluster %q", service.Name, namespace, name)
} }
@ -493,9 +338,12 @@ func discoverService(f *fedframework.Framework, name string, exists bool, podNam
} }
} }
// createBackendPodsOrFail creates one pod in each cluster, and returns the created pods (in the same order as clusterClientSets). // BackendPodMap maps a cluster name to a backend pod created in that cluster
// If creation of any pod fails, the test fails (possibly with a partially created set of pods). No retries are attempted. type BackendPodMap map[string]*v1.Pod
func createBackendPodsOrFail(clusters map[string]*cluster, namespace string, name string) {
// createBackendPodsOrFail creates one pod in each cluster, and returns the created pods. If creation of any pod fails,
// the test fails (possibly with a partially created set of pods). No retries are attempted.
func createBackendPodsOrFail(clusters fedframework.ClusterSlice, namespace string, name string) BackendPodMap {
pod := &v1.Pod{ pod := &v1.Pod{
ObjectMeta: metav1.ObjectMeta{ ObjectMeta: metav1.ObjectMeta{
Name: name, Name: name,
@ -512,22 +360,24 @@ func createBackendPodsOrFail(clusters map[string]*cluster, namespace string, nam
RestartPolicy: v1.RestartPolicyAlways, RestartPolicy: v1.RestartPolicyAlways,
}, },
} }
for name, c := range clusters { podMap := make(BackendPodMap)
for _, c := range clusters {
name := c.Name
By(fmt.Sprintf("Creating pod %q in namespace %q in cluster %q", pod.Name, namespace, name)) By(fmt.Sprintf("Creating pod %q in namespace %q in cluster %q", pod.Name, namespace, name))
createdPod, err := c.Clientset.Core().Pods(namespace).Create(pod) createdPod, err := c.Clientset.Core().Pods(namespace).Create(pod)
framework.ExpectNoError(err, "Creating pod %q in namespace %q in cluster %q", name, namespace, name) framework.ExpectNoError(err, "Creating pod %q in namespace %q in cluster %q", name, namespace, name)
By(fmt.Sprintf("Successfully created pod %q in namespace %q in cluster %q: %v", pod.Name, namespace, name, *createdPod)) By(fmt.Sprintf("Successfully created pod %q in namespace %q in cluster %q: %v", pod.Name, namespace, name, *createdPod))
c.backendPod = createdPod podMap[name] = createdPod
} }
return podMap
} }
// deleteOneBackendPodOrFail deletes exactly one backend pod which must not be nil // deleteOneBackendPodOrFail deletes exactly one backend pod which must not be nil
// The test fails if there are any errors. // The test fails if there are any errors.
func deleteOneBackendPodOrFail(c *cluster) { func deleteOneBackendPodOrFail(c *fedframework.Cluster, pod *v1.Pod) {
pod := c.backendPod
Expect(pod).ToNot(BeNil()) Expect(pod).ToNot(BeNil())
err := c.Clientset.Core().Pods(pod.Namespace).Delete(pod.Name, metav1.NewDeleteOptions(0)) err := c.Clientset.Core().Pods(pod.Namespace).Delete(pod.Name, metav1.NewDeleteOptions(0))
msgFmt := fmt.Sprintf("Deleting Pod %q in namespace %q in cluster %q %%v", pod.Name, pod.Namespace, c.name) msgFmt := fmt.Sprintf("Deleting Pod %q in namespace %q in cluster %q %%v", pod.Name, pod.Namespace, c.Name)
if errors.IsNotFound(err) { if errors.IsNotFound(err) {
framework.Logf(msgFmt, "does not exist. No need to delete it.") framework.Logf(msgFmt, "does not exist. No need to delete it.")
return return
@ -538,13 +388,15 @@ func deleteOneBackendPodOrFail(c *cluster) {
// deleteBackendPodsOrFail deletes one pod from each cluster that has one. // deleteBackendPodsOrFail deletes one pod from each cluster that has one.
// If deletion of any pod fails, the test fails (possibly with a partially deleted set of pods). No retries are attempted. // If deletion of any pod fails, the test fails (possibly with a partially deleted set of pods). No retries are attempted.
func deleteBackendPodsOrFail(clusters map[string]*cluster, namespace string) { func deleteBackendPodsOrFail(clusters fedframework.ClusterSlice, backendPods BackendPodMap) {
for name, c := range clusters { if backendPods == nil {
if c.backendPod != nil { return
deleteOneBackendPodOrFail(c) }
c.backendPod = nil for _, c := range clusters {
if pod, ok := backendPods[c.Name]; ok {
deleteOneBackendPodOrFail(c, pod)
} else { } else {
By(fmt.Sprintf("No backend pod to delete for cluster %q", name)) By(fmt.Sprintf("No backend pod to delete for cluster %q", c.Name))
} }
} }
} }
@ -552,7 +404,7 @@ func deleteBackendPodsOrFail(clusters map[string]*cluster, namespace string) {
// waitForReplicatSetToBeDeletedOrFail waits for the named ReplicaSet in namespace to be deleted. // waitForReplicatSetToBeDeletedOrFail waits for the named ReplicaSet in namespace to be deleted.
// If the deletion fails, the enclosing test fails. // If the deletion fails, the enclosing test fails.
func waitForReplicaSetToBeDeletedOrFail(clientset *fedclientset.Clientset, namespace string, replicaSet string) { func waitForReplicaSetToBeDeletedOrFail(clientset *fedclientset.Clientset, namespace string, replicaSet string) {
err := wait.Poll(5*time.Second, federatedDefaultTestTimeout, func() (bool, error) { err := wait.Poll(5*time.Second, fedframework.FederatedDefaultTestTimeout, func() (bool, error) {
_, err := clientset.Extensions().ReplicaSets(namespace).Get(replicaSet, metav1.GetOptions{}) _, err := clientset.Extensions().ReplicaSets(namespace).Get(replicaSet, metav1.GetOptions{})
if err != nil && errors.IsNotFound(err) { if err != nil && errors.IsNotFound(err) {
return true, nil return true, nil