diff --git a/test/e2e_federation/daemonset.go b/test/e2e_federation/daemonset.go index b8d5e74356d..eeaf2a2dad9 100644 --- a/test/e2e_federation/daemonset.go +++ b/test/e2e_federation/daemonset.go @@ -43,7 +43,7 @@ const ( // Create/delete daemonset api objects var _ = framework.KubeDescribe("Federation daemonsets [Feature:Federation]", func() { - var clusters fedframework.ClusterMap // All clusters, keyed by cluster name + var clusters fedframework.ClusterSlice f := fedframework.NewDefaultFederatedFramework("federated-daemonset") @@ -51,7 +51,7 @@ var _ = framework.KubeDescribe("Federation daemonsets [Feature:Federation]", fun BeforeEach(func() { fedframework.SkipUnlessFederated(f.ClientSet) - clusters, _ = f.GetRegisteredClusters() + clusters = f.GetRegisteredClusters() }) AfterEach(func() { @@ -114,7 +114,7 @@ func deleteAllDaemonSetsOrFail(clientset *fedclientset.Clientset, nsName string) // verifyCascadingDeletionForDS verifies that daemonsets are deleted from // underlying clusters when orphan dependents is false and they are not // deleted when orphan dependents is true. -func verifyCascadingDeletionForDS(clientset *fedclientset.Clientset, clusters fedframework.ClusterMap, orphanDependents *bool, nsName string) { +func verifyCascadingDeletionForDS(clientset *fedclientset.Clientset, clusters fedframework.ClusterSlice, orphanDependents *bool, nsName string) { daemonset := createDaemonSetOrFail(clientset, nsName) daemonsetName := daemonset.Name // Check subclusters if the daemonset was created there. @@ -140,8 +140,9 @@ func verifyCascadingDeletionForDS(clientset *fedclientset.Clientset, clusters fe errMessages := []string{} // daemon set should be present in underlying clusters unless orphanDependents is false. shouldExist := orphanDependents == nil || *orphanDependents == true - for clusterName, clusterClientset := range clusters { - _, err := clusterClientset.Extensions().DaemonSets(nsName).Get(daemonsetName, metav1.GetOptions{}) + for _, cluster := range clusters { + clusterName := cluster.Name + _, err := cluster.Extensions().DaemonSets(nsName).Get(daemonsetName, metav1.GetOptions{}) if shouldExist && errors.IsNotFound(err) { errMessages = append(errMessages, fmt.Sprintf("unexpected NotFound error for daemonset %s in cluster %s, expected daemonset to exist", daemonsetName, clusterName)) } else if !shouldExist && !errors.IsNotFound(err) { @@ -233,7 +234,7 @@ func updateDaemonSetOrFail(clientset *fedclientset.Clientset, namespace string) return newDaemonSet } -func waitForDaemonSetShardsOrFail(namespace string, daemonset *v1beta1.DaemonSet, clusters fedframework.ClusterMap) { +func waitForDaemonSetShardsOrFail(namespace string, daemonset *v1beta1.DaemonSet, clusters fedframework.ClusterSlice) { framework.Logf("Waiting for daemonset %q in %d clusters", daemonset.Name, len(clusters)) for _, c := range clusters { waitForDaemonSetOrFail(c.Clientset, namespace, daemonset, true, FederatedDaemonSetTimeout) @@ -263,7 +264,7 @@ func waitForDaemonSetOrFail(clientset *kubeclientset.Clientset, namespace string } } -func waitForDaemonSetShardsUpdatedOrFail(namespace string, daemonset *v1beta1.DaemonSet, clusters fedframework.ClusterMap) { +func waitForDaemonSetShardsUpdatedOrFail(namespace string, daemonset *v1beta1.DaemonSet, clusters fedframework.ClusterSlice) { framework.Logf("Waiting for daemonset %q in %d clusters", daemonset.Name, len(clusters)) for _, c := range clusters { waitForDaemonSetUpdateOrFail(c.Clientset, namespace, daemonset, FederatedDaemonSetTimeout) diff --git a/test/e2e_federation/deployment.go b/test/e2e_federation/deployment.go index 72ab625588d..e873dcd8986 100644 --- a/test/e2e_federation/deployment.go +++ b/test/e2e_federation/deployment.go @@ -69,11 +69,11 @@ var _ = framework.KubeDescribe("Federation deployments [Feature:Federation]", fu // e2e cases for federated deployment controller Describe("Federated Deployment", func() { var ( - clusters fedframework.ClusterMap + clusters fedframework.ClusterSlice ) BeforeEach(func() { fedframework.SkipUnlessFederated(f.ClientSet) - clusters, _ = f.GetRegisteredClusters() + clusters = f.GetRegisteredClusters() }) AfterEach(func() { @@ -140,7 +140,7 @@ func deleteAllDeploymentsOrFail(clientset *fedclientset.Clientset, nsName string // verifyCascadingDeletionForDeployment verifies that deployments are deleted // from underlying clusters when orphan dependents is false and they are not // deleted when orphan dependents is true. -func verifyCascadingDeletionForDeployment(clientset *fedclientset.Clientset, clusters fedframework.ClusterMap, orphanDependents *bool, nsName string) { +func verifyCascadingDeletionForDeployment(clientset *fedclientset.Clientset, clusters fedframework.ClusterSlice, orphanDependents *bool, nsName string) { deployment := createDeploymentOrFail(clientset, nsName) deploymentName := deployment.Name // Check subclusters if the deployment was created there. @@ -166,8 +166,9 @@ func verifyCascadingDeletionForDeployment(clientset *fedclientset.Clientset, clu errMessages := []string{} // deployment should be present in underlying clusters unless orphanDependents is false. shouldExist := orphanDependents == nil || *orphanDependents == true - for clusterName, clusterClientset := range clusters { - _, err := clusterClientset.Extensions().Deployments(nsName).Get(deploymentName, metav1.GetOptions{}) + for _, cluster := range clusters { + clusterName := cluster.Name + _, err := cluster.Extensions().Deployments(nsName).Get(deploymentName, metav1.GetOptions{}) if shouldExist && errors.IsNotFound(err) { errMessages = append(errMessages, fmt.Sprintf("unexpected NotFound error for deployment %s in cluster %s, expected deployment to exist", deploymentName, clusterName)) } else if !shouldExist && !errors.IsNotFound(err) { @@ -179,12 +180,12 @@ func verifyCascadingDeletionForDeployment(clientset *fedclientset.Clientset, clu } } -func waitForDeploymentOrFail(c *fedclientset.Clientset, namespace string, deploymentName string, clusters fedframework.ClusterMap) { +func waitForDeploymentOrFail(c *fedclientset.Clientset, namespace string, deploymentName string, clusters fedframework.ClusterSlice) { err := waitForDeployment(c, namespace, deploymentName, clusters) framework.ExpectNoError(err, "Failed to verify deployment %q/%q, err: %v", namespace, deploymentName, err) } -func waitForDeployment(c *fedclientset.Clientset, namespace string, deploymentName string, clusters fedframework.ClusterMap) error { +func waitForDeployment(c *fedclientset.Clientset, namespace string, deploymentName string, clusters fedframework.ClusterSlice) error { err := wait.Poll(10*time.Second, fedframework.FederatedDefaultTestTimeout, func() (bool, error) { fdep, err := c.Deployments(namespace).Get(deploymentName, metav1.GetOptions{}) if err != nil { diff --git a/test/e2e_federation/framework/cluster.go b/test/e2e_federation/framework/cluster.go index dd073e12912..d5dba98dd3b 100644 --- a/test/e2e_federation/framework/cluster.go +++ b/test/e2e_federation/framework/cluster.go @@ -45,19 +45,16 @@ const ( federatedClustersWaitTimeout = 1 * time.Minute ) -// ClusterMap is a map of Cluster instances keyed by cluster name -type ClusterMap map[string]*Cluster +// ClusterSlice is a slice of clusters +type ClusterSlice []*Cluster -// Cluster keeps track of the assorted objects and state related to each cluster -// in the federation +// Cluster keeps track of the name and client of a cluster in the federation type Cluster struct { Name string *kubeclientset.Clientset } -// can not be moved to util, as By and Expect must be put in Ginkgo test unit -func getRegisteredClusters(f *Framework) (ClusterMap, string) { - clusters := make(ClusterMap) +func getRegisteredClusters(f *Framework) ClusterSlice { contexts := f.GetUnderlyingFederatedContexts() By("Obtaining a list of all the clusters") @@ -69,15 +66,17 @@ func getRegisteredClusters(f *Framework) (ClusterMap, string) { } framework.Logf("%d clusters are Ready", len(contexts)) - primaryClusterName := clusterList.Items[0].Name - By(fmt.Sprintf("Labeling %q as the first cluster", primaryClusterName)) + clusters := ClusterSlice{} for i, c := range clusterList.Items { framework.Logf("Creating a clientset for the cluster %s", c.Name) Expect(framework.TestContext.KubeConfig).ToNot(Equal(""), "KubeConfig must be specified to load clusters' client config") - clusters[c.Name] = &Cluster{c.Name, createClientsetForCluster(c, i, userAgentName), false, nil} + clusters = append(clusters, &Cluster{ + Name: c.Name, + Clientset: createClientsetForCluster(c, i, userAgentName), + }) } waitForNamespaceInFederatedClusters(clusters, f.FederationNamespace.Name, federatedNamespaceTimeout) - return clusters, primaryClusterName + return clusters } // waitForAllRegisteredClusters waits for all clusters defined in e2e context to be created @@ -120,8 +119,9 @@ func createClientsetForCluster(c federationapi.Cluster, i int, userAgentName str } // waitForNamespaceInFederatedClusters waits for the federated namespace to be created in federated clusters -func waitForNamespaceInFederatedClusters(clusters ClusterMap, nsName string, timeout time.Duration) { - for name, c := range clusters { +func waitForNamespaceInFederatedClusters(clusters ClusterSlice, nsName string, timeout time.Duration) { + for _, c := range clusters { + name := c.Name err := wait.PollImmediate(framework.Poll, timeout, func() (bool, error) { _, err := c.Clientset.Core().Namespaces().Get(nsName, metav1.GetOptions{}) if err != nil { diff --git a/test/e2e_federation/framework/framework.go b/test/e2e_federation/framework/framework.go index 7fe24a6a7a6..38c5fb20560 100644 --- a/test/e2e_federation/framework/framework.go +++ b/test/e2e_federation/framework/framework.go @@ -232,6 +232,6 @@ func (f *Framework) GetUnderlyingFederatedContexts() []E2EContext { return e2eContexts } -func (f *Framework) GetRegisteredClusters() (ClusterMap, string) { +func (f *Framework) GetRegisteredClusters() ClusterSlice { return getRegisteredClusters(f) } diff --git a/test/e2e_federation/ingress.go b/test/e2e_federation/ingress.go index f035efa9845..701dd2583f5 100644 --- a/test/e2e_federation/ingress.go +++ b/test/e2e_federation/ingress.go @@ -142,11 +142,11 @@ var _ = framework.KubeDescribe("Federated ingresses [Feature:Federation]", func( // e2e cases for federation ingress controller var _ = Describe("Federated Ingresses", func() { var ( - clusters fedframework.ClusterMap // All clusters, keyed by cluster name - primaryClusterName, federationName, ns string - jig *federationTestJig - service *v1.Service - secret *v1.Secret + clusters fedframework.ClusterSlice + federationName, ns string + jig *federationTestJig + service *v1.Service + secret *v1.Secret ) // register clusters in federation apiserver @@ -157,7 +157,7 @@ var _ = framework.KubeDescribe("Federated ingresses [Feature:Federation]", func( federationName = DefaultFederationName } jig = newFederationTestJig(f.FederationClientset) - clusters, primaryClusterName = f.GetRegisteredClusters() + clusters = f.GetRegisteredClusters() ns = f.FederationNamespace.Name // create backend service service = createServiceOrFail(f.FederationClientset, ns, FederatedIngressServiceName) @@ -289,7 +289,7 @@ func equivalentIngress(federatedIngress, clusterIngress v1beta1.Ingress) bool { // verifyCascadingDeletionForIngress verifies that ingresses are deleted from // underlying clusters when orphan dependents is false and they are not deleted // when orphan dependents is true. -func verifyCascadingDeletionForIngress(clientset *fedclientset.Clientset, clusters fedframework.ClusterMap, orphanDependents *bool, nsName string) { +func verifyCascadingDeletionForIngress(clientset *fedclientset.Clientset, clusters fedframework.ClusterSlice, orphanDependents *bool, nsName string) { ingress := createIngressOrFail(clientset, nsName, FederatedIngressServiceName, FederatedIngressTLSSecretName) ingressName := ingress.Name // Check subclusters if the ingress was created there. @@ -303,8 +303,9 @@ func verifyCascadingDeletionForIngress(clientset *fedclientset.Clientset, cluste errMessages := []string{} // ingress should be present in underlying clusters unless orphanDependents is false. shouldExist := orphanDependents == nil || *orphanDependents == true - for clusterName, clusterClientset := range clusters { - _, err := clusterClientset.Extensions().Ingresses(nsName).Get(ingressName, metav1.GetOptions{}) + for _, cluster := range clusters { + clusterName := cluster.Name + _, err := cluster.Extensions().Ingresses(nsName).Get(ingressName, metav1.GetOptions{}) if shouldExist && errors.IsNotFound(err) { errMessages = append(errMessages, fmt.Sprintf("unexpected NotFound error for ingress %s in cluster %s, expected ingress to exist", ingressName, clusterName)) } else if !shouldExist && !errors.IsNotFound(err) { @@ -342,7 +343,7 @@ func waitForIngressOrFail(clientset *kubeclientset.Clientset, namespace string, } // waitForIngressShardsOrFail waits for the ingress to appear in all clusters -func waitForIngressShardsOrFail(namespace string, ingress *v1beta1.Ingress, clusters fedframework.ClusterMap) { +func waitForIngressShardsOrFail(namespace string, ingress *v1beta1.Ingress, clusters fedframework.ClusterSlice) { framework.Logf("Waiting for ingress %q in %d clusters", ingress.Name, len(clusters)) for _, c := range clusters { waitForIngressOrFail(c.Clientset, namespace, ingress, true, FederatedIngressTimeout) @@ -350,7 +351,7 @@ func waitForIngressShardsOrFail(namespace string, ingress *v1beta1.Ingress, clus } // waitForIngressShardsUpdatedOrFail waits for the ingress to be updated in all clusters -func waitForIngressShardsUpdatedOrFail(namespace string, ingress *v1beta1.Ingress, clusters fedframework.ClusterMap) { +func waitForIngressShardsUpdatedOrFail(namespace string, ingress *v1beta1.Ingress, clusters fedframework.ClusterSlice) { framework.Logf("Waiting for ingress %q in %d clusters", ingress.Name, len(clusters)) for _, c := range clusters { waitForIngressUpdateOrFail(c.Clientset, namespace, ingress, FederatedIngressTimeout) @@ -378,7 +379,7 @@ func waitForIngressUpdateOrFail(clientset *kubeclientset.Clientset, namespace st } // waitForIngressShardsGoneOrFail waits for the ingress to disappear in all clusters -func waitForIngressShardsGoneOrFail(namespace string, ingress *v1beta1.Ingress, clusters fedframework.ClusterMap) { +func waitForIngressShardsGoneOrFail(namespace string, ingress *v1beta1.Ingress, clusters fedframework.ClusterSlice) { framework.Logf("Waiting for ingress %q in %d clusters", ingress.Name, len(clusters)) for _, c := range clusters { waitForIngressOrFail(c.Clientset, namespace, ingress, false, FederatedIngressTimeout) diff --git a/test/e2e_federation/namespace.go b/test/e2e_federation/namespace.go index 199aa7c1720..69569d78820 100644 --- a/test/e2e_federation/namespace.go +++ b/test/e2e_federation/namespace.go @@ -44,13 +44,13 @@ var _ = framework.KubeDescribe("Federation namespace [Feature:Federation]", func f := fedframework.NewDefaultFederatedFramework("federation-namespace") Describe("Namespace objects", func() { - var clusters fedframework.ClusterMap // All clusters, keyed by cluster name + var clusters fedframework.ClusterSlice var nsName string BeforeEach(func() { fedframework.SkipUnlessFederated(f.ClientSet) - clusters, _ = f.GetRegisteredClusters() + clusters = f.GetRegisteredClusters() }) AfterEach(func() { @@ -188,7 +188,7 @@ var _ = framework.KubeDescribe("Federation namespace [Feature:Federation]", func // verifyNsCascadingDeletion verifies that namespaces are deleted from // underlying clusters when orphan dependents is false and they are not // deleted when orphan dependents is true. -func verifyNsCascadingDeletion(nsClient clientset.NamespaceInterface, clusters fedframework.ClusterMap, orphanDependents *bool) string { +func verifyNsCascadingDeletion(nsClient clientset.NamespaceInterface, clusters fedframework.ClusterSlice, orphanDependents *bool) string { nsName := createNamespace(nsClient) // Check subclusters if the namespace was created there. By(fmt.Sprintf("Waiting for namespace %s to be created in all underlying clusters", nsName)) @@ -213,8 +213,9 @@ func verifyNsCascadingDeletion(nsClient clientset.NamespaceInterface, clusters f errMessages := []string{} // namespace should be present in underlying clusters unless orphanDependents is false. shouldExist := orphanDependents == nil || *orphanDependents == true - for clusterName, clusterClientset := range clusters { - _, err := clusterClientset.Core().Namespaces().Get(nsName, metav1.GetOptions{}) + for _, cluster := range clusters { + clusterName := cluster.Name + _, err := cluster.Core().Namespaces().Get(nsName, metav1.GetOptions{}) if shouldExist && errors.IsNotFound(err) { errMessages = append(errMessages, fmt.Sprintf("unexpected NotFound error for namespace %s in cluster %s, expected namespace to exist", nsName, clusterName)) } else if !shouldExist && !errors.IsNotFound(err) { diff --git a/test/e2e_federation/replicaset.go b/test/e2e_federation/replicaset.go index 6fa50d62152..f542449d9c2 100644 --- a/test/e2e_federation/replicaset.go +++ b/test/e2e_federation/replicaset.go @@ -74,12 +74,12 @@ var _ = framework.KubeDescribe("Federated ReplicaSet [Feature:Federation]", func // e2e cases for federated replicaset controller Describe("Features", func() { var ( - clusters fedframework.ClusterMap + clusters fedframework.ClusterSlice ) BeforeEach(func() { fedframework.SkipUnlessFederated(f.ClientSet) - clusters, _ = f.GetRegisteredClusters() + clusters = f.GetRegisteredClusters() }) // e2e cases for federated replicaset controller @@ -200,7 +200,7 @@ var _ = framework.KubeDescribe("Federated ReplicaSet [Feature:Federation]", func }) }) -func createAndWaitForReplicasetOrFail(clientset *fedclientset.Clientset, nsName string, clusters fedframework.ClusterMap) *v1beta1.ReplicaSet { +func createAndWaitForReplicasetOrFail(clientset *fedclientset.Clientset, nsName string, clusters fedframework.ClusterSlice) *v1beta1.ReplicaSet { rs := createReplicaSetOrFail(clientset, newReplicaSet(nsName, FederationReplicaSetPrefix, 5, nil)) // Check subclusters if the replicaSet was created there. By(fmt.Sprintf("Waiting for replica sets %s to be created in all underlying clusters", rs.Name)) @@ -220,7 +220,7 @@ func createAndWaitForReplicasetOrFail(clientset *fedclientset.Clientset, nsName return rs } -func createAndUpdateFedRSWithPref(clientset *fedclientset.Clientset, nsName string, clusters fedframework.ClusterMap, pref *federation.FederatedReplicaSetPreferences, replicas int32, expect map[string]int32) *v1beta1.ReplicaSet { +func createAndUpdateFedRSWithPref(clientset *fedclientset.Clientset, nsName string, clusters fedframework.ClusterSlice, pref *federation.FederatedReplicaSetPreferences, replicas int32, expect map[string]int32) *v1beta1.ReplicaSet { framework.Logf("Replicas: %d, Preference: %#v", replicas, pref) rs := newReplicaSet(nsName, FederationReplicaSetPrefix, replicas, pref) rs = createReplicaSetOrFail(clientset, rs) @@ -254,14 +254,15 @@ func deleteAllReplicaSetsOrFail(clientset *fedclientset.Clientset, nsName string // verifyCascadingDeletionForReplicaSet verifies that replicaSets are deleted // from underlying clusters when orphan dependents is false and they are not // deleted when orphan dependents is true. -func verifyCascadingDeletionForReplicaSet(clientset *fedclientset.Clientset, clusters fedframework.ClusterMap, orphanDependents *bool, nsName, rsName string) { +func verifyCascadingDeletionForReplicaSet(clientset *fedclientset.Clientset, clusters fedframework.ClusterSlice, orphanDependents *bool, nsName, rsName string) { By(fmt.Sprintf("Deleting replica set %s", rsName)) deleteReplicaSetOrFail(clientset, nsName, rsName, orphanDependents) By(fmt.Sprintf("Verifying replica sets %s in underlying clusters", rsName)) errMessages := []string{} - for clusterName, clusterClientset := range clusters { - _, err := clusterClientset.Extensions().ReplicaSets(nsName).Get(rsName, metav1.GetOptions{}) + for _, cluster := range clusters { + clusterName := cluster.Name + _, err := cluster.Extensions().ReplicaSets(nsName).Get(rsName, metav1.GetOptions{}) if (orphanDependents == nil || *orphanDependents == true) && errors.IsNotFound(err) { errMessages = append(errMessages, fmt.Sprintf("unexpected NotFound error for replica set %s in cluster %s, expected replica set to exist", rsName, clusterName)) } else if (orphanDependents != nil && *orphanDependents == false) && (err == nil || !errors.IsNotFound(err)) { @@ -273,7 +274,7 @@ func verifyCascadingDeletionForReplicaSet(clientset *fedclientset.Clientset, clu } } -func generateFedRSPrefsWithWeight(clusters fedframework.ClusterMap) (pref *federation.FederatedReplicaSetPreferences, replicas int32, expect map[string]int32) { +func generateFedRSPrefsWithWeight(clusters fedframework.ClusterSlice) (pref *federation.FederatedReplicaSetPreferences, replicas int32, expect map[string]int32) { By("Generating replicaset preferences with weights") clusterNames := extractClusterNames(clusters) pref = &federation.FederatedReplicaSetPreferences{ @@ -294,7 +295,7 @@ func generateFedRSPrefsWithWeight(clusters fedframework.ClusterMap) (pref *feder return } -func generateFedRSPrefsWithMin(clusters fedframework.ClusterMap) (pref *federation.FederatedReplicaSetPreferences, replicas int32, expect map[string]int32) { +func generateFedRSPrefsWithMin(clusters fedframework.ClusterSlice) (pref *federation.FederatedReplicaSetPreferences, replicas int32, expect map[string]int32) { By("Generating replicaset preferences with min replicas") clusterNames := extractClusterNames(clusters) pref = &federation.FederatedReplicaSetPreferences{ @@ -321,7 +322,7 @@ func generateFedRSPrefsWithMin(clusters fedframework.ClusterMap) (pref *federati return } -func generateFedRSPrefsWithMax(clusters fedframework.ClusterMap) (pref *federation.FederatedReplicaSetPreferences, replicas int32, expect map[string]int32) { +func generateFedRSPrefsWithMax(clusters fedframework.ClusterSlice) (pref *federation.FederatedReplicaSetPreferences, replicas int32, expect map[string]int32) { By("Generating replicaset preferences with max replicas") clusterNames := extractClusterNames(clusters) pref = &federation.FederatedReplicaSetPreferences{ @@ -354,7 +355,7 @@ func updateFedRSPrefsRebalance(pref *federation.FederatedReplicaSetPreferences, return pref } -func generateFedRSPrefsForRebalancing(clusters fedframework.ClusterMap) (pref1, pref2 *federation.FederatedReplicaSetPreferences, replicas int32, expect1, expect2 map[string]int32) { +func generateFedRSPrefsForRebalancing(clusters fedframework.ClusterSlice) (pref1, pref2 *federation.FederatedReplicaSetPreferences, replicas int32, expect1, expect2 map[string]int32) { By("Generating replicaset for rebalancing") clusterNames := extractClusterNames(clusters) replicas = 3 @@ -382,12 +383,12 @@ func generateFedRSPrefsForRebalancing(clusters fedframework.ClusterMap) (pref1, return } -func waitForReplicaSetOrFail(c *fedclientset.Clientset, namespace string, replicaSetName string, clusters fedframework.ClusterMap, expect map[string]int32) { +func waitForReplicaSetOrFail(c *fedclientset.Clientset, namespace string, replicaSetName string, clusters fedframework.ClusterSlice, expect map[string]int32) { err := waitForReplicaSet(c, namespace, replicaSetName, clusters, expect) framework.ExpectNoError(err, "Failed to verify replica set \"%s/%s\", err: %v", namespace, replicaSetName, err) } -func waitForReplicaSet(c *fedclientset.Clientset, namespace string, replicaSetName string, clusters fedframework.ClusterMap, expect map[string]int32) error { +func waitForReplicaSet(c *fedclientset.Clientset, namespace string, replicaSetName string, clusters fedframework.ClusterSlice, expect map[string]int32) error { framework.Logf("waitForReplicaSet: %s/%s; clusters: %v; expect: %v", namespace, replicaSetName, clusters, expect) err := wait.Poll(10*time.Second, fedframework.FederatedDefaultTestTimeout, func() (bool, error) { frs, err := c.ReplicaSets(namespace).Get(replicaSetName, metav1.GetOptions{}) @@ -530,10 +531,10 @@ func newReplicaSetWithName(namespace string, name string, replicas int32, pref * return rs } -func extractClusterNames(clusters fedframework.ClusterMap) []string { +func extractClusterNames(clusters fedframework.ClusterSlice) []string { clusterNames := make([]string, 0, len(clusters)) - for clusterName := range clusters { - clusterNames = append(clusterNames, clusterName) + for _, cluster := range clusters { + clusterNames = append(clusterNames, cluster.Name) } return clusterNames } diff --git a/test/e2e_federation/secret.go b/test/e2e_federation/secret.go index c26c31f14b1..40e8c414835 100644 --- a/test/e2e_federation/secret.go +++ b/test/e2e_federation/secret.go @@ -42,7 +42,7 @@ const ( // Create/delete secret api objects var _ = framework.KubeDescribe("Federation secrets [Feature:Federation]", func() { - var clusters fedframework.ClusterMap // All clusters, keyed by cluster name + var clusters fedframework.ClusterSlice f := fedframework.NewDefaultFederatedFramework("federated-secret") @@ -50,7 +50,7 @@ var _ = framework.KubeDescribe("Federation secrets [Feature:Federation]", func() BeforeEach(func() { fedframework.SkipUnlessFederated(f.ClientSet) - clusters, _ = f.GetRegisteredClusters() + clusters = f.GetRegisteredClusters() }) AfterEach(func() { @@ -108,7 +108,7 @@ func deleteAllSecretsOrFail(clientset *fedclientset.Clientset, nsName string) { // verifyCascadingDeletionForSecret verifies that secrets are deleted from // underlying clusters when orphan dependents is false and they are not // deleted when orphan dependents is true. -func verifyCascadingDeletionForSecret(clientset *fedclientset.Clientset, clusters fedframework.ClusterMap, orphanDependents *bool, nsName string) { +func verifyCascadingDeletionForSecret(clientset *fedclientset.Clientset, clusters fedframework.ClusterSlice, orphanDependents *bool, nsName string) { secret := createSecretOrFail(clientset, nsName) secretName := secret.Name // Check subclusters if the secret was created there. @@ -134,8 +134,9 @@ func verifyCascadingDeletionForSecret(clientset *fedclientset.Clientset, cluster errMessages := []string{} // secret should be present in underlying clusters unless orphanDependents is false. shouldExist := orphanDependents == nil || *orphanDependents == true - for clusterName, clusterClientset := range clusters { - _, err := clusterClientset.Core().Secrets(nsName).Get(secretName, metav1.GetOptions{}) + for _, cluster := range clusters { + clusterName := cluster.Name + _, err := cluster.Core().Secrets(nsName).Get(secretName, metav1.GetOptions{}) if shouldExist && errors.IsNotFound(err) { errMessages = append(errMessages, fmt.Sprintf("unexpected NotFound error for secret %s in cluster %s, expected secret to exist", secretName, clusterName)) } else if !shouldExist && !errors.IsNotFound(err) { @@ -213,7 +214,7 @@ func updateSecretOrFail(clientset *fedclientset.Clientset, nsName string, secret return newSecret } -func waitForSecretShardsOrFail(nsName string, secret *v1.Secret, clusters fedframework.ClusterMap) { +func waitForSecretShardsOrFail(nsName string, secret *v1.Secret, clusters fedframework.ClusterSlice) { framework.Logf("Waiting for secret %q in %d clusters", secret.Name, len(clusters)) for _, c := range clusters { waitForSecretOrFail(c.Clientset, nsName, secret, true, FederatedSecretTimeout) @@ -243,7 +244,7 @@ func waitForSecretOrFail(clientset *kubeclientset.Clientset, nsName string, secr } } -func waitForSecretShardsUpdatedOrFail(nsName string, secret *v1.Secret, clusters fedframework.ClusterMap) { +func waitForSecretShardsUpdatedOrFail(nsName string, secret *v1.Secret, clusters fedframework.ClusterSlice) { framework.Logf("Waiting for secret %q in %d clusters", secret.Name, len(clusters)) for _, c := range clusters { waitForSecretUpdateOrFail(c.Clientset, nsName, secret, FederatedSecretTimeout) diff --git a/test/e2e_federation/service.go b/test/e2e_federation/service.go index ecd745400e0..479a5fd7878 100644 --- a/test/e2e_federation/service.go +++ b/test/e2e_federation/service.go @@ -47,9 +47,8 @@ var FederatedServiceLabels = map[string]string{ var _ = framework.KubeDescribe("Federated Services [Feature:Federation]", func() { f := fedframework.NewDefaultFederatedFramework("federated-service") - var clusters fedframework.ClusterMap // All clusters, keyed by cluster name + var clusters fedframework.ClusterSlice var federationName string - var primaryClusterName string // The name of the "primary" cluster var _ = Describe("Without Clusters [NoCluster]", func() { BeforeEach(func() { @@ -84,7 +83,7 @@ var _ = framework.KubeDescribe("Federated Services [Feature:Federation]", func() federationName = DefaultFederationName } - clusters, primaryClusterName = f.GetRegisteredClusters() + clusters = f.GetRegisteredClusters() }) Describe("Federated Service", func() { @@ -246,8 +245,10 @@ var _ = framework.KubeDescribe("Federated Services [Feature:Federation]", func() BeforeEach(func() { fedframework.SkipUnlessFederated(f.ClientSet) - // Delete all the backend pods from the shard which is local to the discovery pod. - deleteOneBackendPodOrFail(clusters[primaryClusterName], backendPods[primaryClusterName]) + // Delete the backend pod from the shard which is local to the discovery pod. + primaryCluster := clusters[0] + backendPod := backendPods[primaryCluster.Name] + deleteOneBackendPodOrFail(primaryCluster, backendPod) }) @@ -289,7 +290,7 @@ var _ = framework.KubeDescribe("Federated Services [Feature:Federation]", func() // verifyCascadingDeletionForService verifies that services are deleted from // underlying clusters when orphan dependents is false and they are not // deleted when orphan dependents is true. -func verifyCascadingDeletionForService(clientset *fedclientset.Clientset, clusters fedframework.ClusterMap, orphanDependents *bool, nsName string) { +func verifyCascadingDeletionForService(clientset *fedclientset.Clientset, clusters fedframework.ClusterSlice, orphanDependents *bool, nsName string) { service := createServiceOrFail(clientset, nsName, FederatedServiceName) serviceName := service.Name // Check subclusters if the service was created there. @@ -315,8 +316,9 @@ func verifyCascadingDeletionForService(clientset *fedclientset.Clientset, cluste errMessages := []string{} // service should be present in underlying clusters unless orphanDependents is false. shouldExist := orphanDependents == nil || *orphanDependents == true - for clusterName, clusterClientset := range clusters { - _, err := clusterClientset.Core().Services(nsName).Get(serviceName, metav1.GetOptions{}) + for _, cluster := range clusters { + clusterName := cluster.Name + _, err := cluster.Core().Services(nsName).Get(serviceName, metav1.GetOptions{}) if shouldExist && errors.IsNotFound(err) { errMessages = append(errMessages, fmt.Sprintf("unexpected NotFound error for service %s in cluster %s, expected service to exist", serviceName, clusterName)) } else if !shouldExist && !errors.IsNotFound(err) { diff --git a/test/e2e_federation/upgrade.go b/test/e2e_federation/upgrade.go index 15007a1f8d4..6b893067e1a 100644 --- a/test/e2e_federation/upgrade.go +++ b/test/e2e_federation/upgrade.go @@ -129,7 +129,7 @@ func federationControlPlaneUpgrade(f *fedframework.Framework) { func federatedClustersUpgrade(f *fedframework.Framework) { k8sVersion, err := framework.RealVersion(framework.TestContext.UpgradeTarget) framework.ExpectNoError(err) - clusters, _ := f.GetRegisteredClusters() + clusters := f.GetRegisteredClusters() for _, cluster := range clusters { framework.ExpectNoError(fedframework.MasterUpgrade(cluster.Name, k8sVersion)) framework.ExpectNoError(framework.CheckMasterVersion(cluster.Clientset, k8sVersion)) diff --git a/test/e2e_federation/util.go b/test/e2e_federation/util.go index 3ba14101754..97743e2fd62 100644 --- a/test/e2e_federation/util.go +++ b/test/e2e_federation/util.go @@ -103,7 +103,7 @@ func waitForServiceOrFail(clientset *kubeclientset.Clientset, namespace string, } // waitForServiceShardsOrFail waits for the service to appear in all clusters -func waitForServiceShardsOrFail(namespace string, service *v1.Service, clusters fedframework.ClusterMap) { +func waitForServiceShardsOrFail(namespace string, service *v1.Service, clusters fedframework.ClusterSlice) { framework.Logf("Waiting for service %q in %d clusters", service.Name, len(clusters)) for _, c := range clusters { waitForServiceOrFail(c.Clientset, namespace, service, true, fedframework.FederatedDefaultTestTimeout) @@ -174,9 +174,10 @@ func deleteServiceOrFail(clientset *fedclientset.Clientset, namespace string, se } } -func cleanupServiceShardsAndProviderResources(namespace string, service *v1.Service, clusters fedframework.ClusterMap) { +func cleanupServiceShardsAndProviderResources(namespace string, service *v1.Service, clusters fedframework.ClusterSlice) { framework.Logf("Deleting service %q in %d clusters", service.Name, len(clusters)) - for name, c := range clusters { + for _, c := range clusters { + name := c.Name var cSvc *v1.Service err := wait.PollImmediate(framework.Poll, fedframework.FederatedDefaultTestTimeout, func() (bool, error) { @@ -342,7 +343,7 @@ type BackendPodMap map[string]*v1.Pod // createBackendPodsOrFail creates one pod in each cluster, and returns the created pods. If creation of any pod fails, // the test fails (possibly with a partially created set of pods). No retries are attempted. -func createBackendPodsOrFail(clusters fedframework.ClusterMap, namespace string, name string) BackendPodMap { +func createBackendPodsOrFail(clusters fedframework.ClusterSlice, namespace string, name string) BackendPodMap { pod := &v1.Pod{ ObjectMeta: metav1.ObjectMeta{ Name: name, @@ -360,7 +361,8 @@ func createBackendPodsOrFail(clusters fedframework.ClusterMap, namespace string, }, } podMap := make(BackendPodMap) - for name, c := range clusters { + for _, c := range clusters { + name := c.Name By(fmt.Sprintf("Creating pod %q in namespace %q in cluster %q", pod.Name, namespace, name)) createdPod, err := c.Clientset.Core().Pods(namespace).Create(pod) framework.ExpectNoError(err, "Creating pod %q in namespace %q in cluster %q", name, namespace, name) @@ -386,15 +388,15 @@ func deleteOneBackendPodOrFail(c *fedframework.Cluster, pod *v1.Pod) { // deleteBackendPodsOrFail deletes one pod from each cluster that has one. // If deletion of any pod fails, the test fails (possibly with a partially deleted set of pods). No retries are attempted. -func deleteBackendPodsOrFail(clusters fedframework.ClusterMap, backendPods BackendPodMap) { +func deleteBackendPodsOrFail(clusters fedframework.ClusterSlice, backendPods BackendPodMap) { if backendPods == nil { return } - for name, c := range clusters { - if pod, ok := backendPods[name]; ok { + for _, c := range clusters { + if pod, ok := backendPods[c.Name]; ok { deleteOneBackendPodOrFail(c, pod) } else { - By(fmt.Sprintf("No backend pod to delete for cluster %q", name)) + By(fmt.Sprintf("No backend pod to delete for cluster %q", c.Name)) } } }