fed: Manage e2e clusters with a slice instead of a map

There was no code relying on a map, and using a slice eliminates the
need to select the primary cluster when retrieving registered
clusters.
This commit is contained in:
Maru Newby 2017-04-04 09:21:38 -07:00
parent b2bdc9235e
commit ea825085b3
11 changed files with 96 additions and 86 deletions

View File

@ -43,7 +43,7 @@ const (
// Create/delete daemonset api objects // Create/delete daemonset api objects
var _ = framework.KubeDescribe("Federation daemonsets [Feature:Federation]", func() { var _ = framework.KubeDescribe("Federation daemonsets [Feature:Federation]", func() {
var clusters fedframework.ClusterMap // All clusters, keyed by cluster name var clusters fedframework.ClusterSlice
f := fedframework.NewDefaultFederatedFramework("federated-daemonset") f := fedframework.NewDefaultFederatedFramework("federated-daemonset")
@ -51,7 +51,7 @@ var _ = framework.KubeDescribe("Federation daemonsets [Feature:Federation]", fun
BeforeEach(func() { BeforeEach(func() {
fedframework.SkipUnlessFederated(f.ClientSet) fedframework.SkipUnlessFederated(f.ClientSet)
clusters, _ = f.GetRegisteredClusters() clusters = f.GetRegisteredClusters()
}) })
AfterEach(func() { AfterEach(func() {
@ -114,7 +114,7 @@ func deleteAllDaemonSetsOrFail(clientset *fedclientset.Clientset, nsName string)
// verifyCascadingDeletionForDS verifies that daemonsets are deleted from // verifyCascadingDeletionForDS verifies that daemonsets are deleted from
// underlying clusters when orphan dependents is false and they are not // underlying clusters when orphan dependents is false and they are not
// deleted when orphan dependents is true. // deleted when orphan dependents is true.
func verifyCascadingDeletionForDS(clientset *fedclientset.Clientset, clusters fedframework.ClusterMap, orphanDependents *bool, nsName string) { func verifyCascadingDeletionForDS(clientset *fedclientset.Clientset, clusters fedframework.ClusterSlice, orphanDependents *bool, nsName string) {
daemonset := createDaemonSetOrFail(clientset, nsName) daemonset := createDaemonSetOrFail(clientset, nsName)
daemonsetName := daemonset.Name daemonsetName := daemonset.Name
// Check subclusters if the daemonset was created there. // Check subclusters if the daemonset was created there.
@ -140,8 +140,9 @@ func verifyCascadingDeletionForDS(clientset *fedclientset.Clientset, clusters fe
errMessages := []string{} errMessages := []string{}
// daemon set should be present in underlying clusters unless orphanDependents is false. // daemon set should be present in underlying clusters unless orphanDependents is false.
shouldExist := orphanDependents == nil || *orphanDependents == true shouldExist := orphanDependents == nil || *orphanDependents == true
for clusterName, clusterClientset := range clusters { for _, cluster := range clusters {
_, err := clusterClientset.Extensions().DaemonSets(nsName).Get(daemonsetName, metav1.GetOptions{}) clusterName := cluster.Name
_, err := cluster.Extensions().DaemonSets(nsName).Get(daemonsetName, metav1.GetOptions{})
if shouldExist && errors.IsNotFound(err) { if shouldExist && errors.IsNotFound(err) {
errMessages = append(errMessages, fmt.Sprintf("unexpected NotFound error for daemonset %s in cluster %s, expected daemonset to exist", daemonsetName, clusterName)) errMessages = append(errMessages, fmt.Sprintf("unexpected NotFound error for daemonset %s in cluster %s, expected daemonset to exist", daemonsetName, clusterName))
} else if !shouldExist && !errors.IsNotFound(err) { } else if !shouldExist && !errors.IsNotFound(err) {
@ -233,7 +234,7 @@ func updateDaemonSetOrFail(clientset *fedclientset.Clientset, namespace string)
return newDaemonSet return newDaemonSet
} }
func waitForDaemonSetShardsOrFail(namespace string, daemonset *v1beta1.DaemonSet, clusters fedframework.ClusterMap) { func waitForDaemonSetShardsOrFail(namespace string, daemonset *v1beta1.DaemonSet, clusters fedframework.ClusterSlice) {
framework.Logf("Waiting for daemonset %q in %d clusters", daemonset.Name, len(clusters)) framework.Logf("Waiting for daemonset %q in %d clusters", daemonset.Name, len(clusters))
for _, c := range clusters { for _, c := range clusters {
waitForDaemonSetOrFail(c.Clientset, namespace, daemonset, true, FederatedDaemonSetTimeout) waitForDaemonSetOrFail(c.Clientset, namespace, daemonset, true, FederatedDaemonSetTimeout)
@ -263,7 +264,7 @@ func waitForDaemonSetOrFail(clientset *kubeclientset.Clientset, namespace string
} }
} }
func waitForDaemonSetShardsUpdatedOrFail(namespace string, daemonset *v1beta1.DaemonSet, clusters fedframework.ClusterMap) { func waitForDaemonSetShardsUpdatedOrFail(namespace string, daemonset *v1beta1.DaemonSet, clusters fedframework.ClusterSlice) {
framework.Logf("Waiting for daemonset %q in %d clusters", daemonset.Name, len(clusters)) framework.Logf("Waiting for daemonset %q in %d clusters", daemonset.Name, len(clusters))
for _, c := range clusters { for _, c := range clusters {
waitForDaemonSetUpdateOrFail(c.Clientset, namespace, daemonset, FederatedDaemonSetTimeout) waitForDaemonSetUpdateOrFail(c.Clientset, namespace, daemonset, FederatedDaemonSetTimeout)

View File

@ -69,11 +69,11 @@ var _ = framework.KubeDescribe("Federation deployments [Feature:Federation]", fu
// e2e cases for federated deployment controller // e2e cases for federated deployment controller
Describe("Federated Deployment", func() { Describe("Federated Deployment", func() {
var ( var (
clusters fedframework.ClusterMap clusters fedframework.ClusterSlice
) )
BeforeEach(func() { BeforeEach(func() {
fedframework.SkipUnlessFederated(f.ClientSet) fedframework.SkipUnlessFederated(f.ClientSet)
clusters, _ = f.GetRegisteredClusters() clusters = f.GetRegisteredClusters()
}) })
AfterEach(func() { AfterEach(func() {
@ -140,7 +140,7 @@ func deleteAllDeploymentsOrFail(clientset *fedclientset.Clientset, nsName string
// verifyCascadingDeletionForDeployment verifies that deployments are deleted // verifyCascadingDeletionForDeployment verifies that deployments are deleted
// from underlying clusters when orphan dependents is false and they are not // from underlying clusters when orphan dependents is false and they are not
// deleted when orphan dependents is true. // deleted when orphan dependents is true.
func verifyCascadingDeletionForDeployment(clientset *fedclientset.Clientset, clusters fedframework.ClusterMap, orphanDependents *bool, nsName string) { func verifyCascadingDeletionForDeployment(clientset *fedclientset.Clientset, clusters fedframework.ClusterSlice, orphanDependents *bool, nsName string) {
deployment := createDeploymentOrFail(clientset, nsName) deployment := createDeploymentOrFail(clientset, nsName)
deploymentName := deployment.Name deploymentName := deployment.Name
// Check subclusters if the deployment was created there. // Check subclusters if the deployment was created there.
@ -166,8 +166,9 @@ func verifyCascadingDeletionForDeployment(clientset *fedclientset.Clientset, clu
errMessages := []string{} errMessages := []string{}
// deployment should be present in underlying clusters unless orphanDependents is false. // deployment should be present in underlying clusters unless orphanDependents is false.
shouldExist := orphanDependents == nil || *orphanDependents == true shouldExist := orphanDependents == nil || *orphanDependents == true
for clusterName, clusterClientset := range clusters { for _, cluster := range clusters {
_, err := clusterClientset.Extensions().Deployments(nsName).Get(deploymentName, metav1.GetOptions{}) clusterName := cluster.Name
_, err := cluster.Extensions().Deployments(nsName).Get(deploymentName, metav1.GetOptions{})
if shouldExist && errors.IsNotFound(err) { if shouldExist && errors.IsNotFound(err) {
errMessages = append(errMessages, fmt.Sprintf("unexpected NotFound error for deployment %s in cluster %s, expected deployment to exist", deploymentName, clusterName)) errMessages = append(errMessages, fmt.Sprintf("unexpected NotFound error for deployment %s in cluster %s, expected deployment to exist", deploymentName, clusterName))
} else if !shouldExist && !errors.IsNotFound(err) { } else if !shouldExist && !errors.IsNotFound(err) {
@ -179,12 +180,12 @@ func verifyCascadingDeletionForDeployment(clientset *fedclientset.Clientset, clu
} }
} }
func waitForDeploymentOrFail(c *fedclientset.Clientset, namespace string, deploymentName string, clusters fedframework.ClusterMap) { func waitForDeploymentOrFail(c *fedclientset.Clientset, namespace string, deploymentName string, clusters fedframework.ClusterSlice) {
err := waitForDeployment(c, namespace, deploymentName, clusters) err := waitForDeployment(c, namespace, deploymentName, clusters)
framework.ExpectNoError(err, "Failed to verify deployment %q/%q, err: %v", namespace, deploymentName, err) framework.ExpectNoError(err, "Failed to verify deployment %q/%q, err: %v", namespace, deploymentName, err)
} }
func waitForDeployment(c *fedclientset.Clientset, namespace string, deploymentName string, clusters fedframework.ClusterMap) error { func waitForDeployment(c *fedclientset.Clientset, namespace string, deploymentName string, clusters fedframework.ClusterSlice) error {
err := wait.Poll(10*time.Second, fedframework.FederatedDefaultTestTimeout, func() (bool, error) { err := wait.Poll(10*time.Second, fedframework.FederatedDefaultTestTimeout, func() (bool, error) {
fdep, err := c.Deployments(namespace).Get(deploymentName, metav1.GetOptions{}) fdep, err := c.Deployments(namespace).Get(deploymentName, metav1.GetOptions{})
if err != nil { if err != nil {

View File

@ -45,19 +45,16 @@ const (
federatedClustersWaitTimeout = 1 * time.Minute federatedClustersWaitTimeout = 1 * time.Minute
) )
// ClusterMap is a map of Cluster instances keyed by cluster name // ClusterSlice is a slice of clusters
type ClusterMap map[string]*Cluster type ClusterSlice []*Cluster
// Cluster keeps track of the assorted objects and state related to each cluster // Cluster keeps track of the name and client of a cluster in the federation
// in the federation
type Cluster struct { type Cluster struct {
Name string Name string
*kubeclientset.Clientset *kubeclientset.Clientset
} }
// can not be moved to util, as By and Expect must be put in Ginkgo test unit func getRegisteredClusters(f *Framework) ClusterSlice {
func getRegisteredClusters(f *Framework) (ClusterMap, string) {
clusters := make(ClusterMap)
contexts := f.GetUnderlyingFederatedContexts() contexts := f.GetUnderlyingFederatedContexts()
By("Obtaining a list of all the clusters") By("Obtaining a list of all the clusters")
@ -69,15 +66,17 @@ func getRegisteredClusters(f *Framework) (ClusterMap, string) {
} }
framework.Logf("%d clusters are Ready", len(contexts)) framework.Logf("%d clusters are Ready", len(contexts))
primaryClusterName := clusterList.Items[0].Name clusters := ClusterSlice{}
By(fmt.Sprintf("Labeling %q as the first cluster", primaryClusterName))
for i, c := range clusterList.Items { for i, c := range clusterList.Items {
framework.Logf("Creating a clientset for the cluster %s", c.Name) framework.Logf("Creating a clientset for the cluster %s", c.Name)
Expect(framework.TestContext.KubeConfig).ToNot(Equal(""), "KubeConfig must be specified to load clusters' client config") Expect(framework.TestContext.KubeConfig).ToNot(Equal(""), "KubeConfig must be specified to load clusters' client config")
clusters[c.Name] = &Cluster{c.Name, createClientsetForCluster(c, i, userAgentName), false, nil} clusters = append(clusters, &Cluster{
Name: c.Name,
Clientset: createClientsetForCluster(c, i, userAgentName),
})
} }
waitForNamespaceInFederatedClusters(clusters, f.FederationNamespace.Name, federatedNamespaceTimeout) waitForNamespaceInFederatedClusters(clusters, f.FederationNamespace.Name, federatedNamespaceTimeout)
return clusters, primaryClusterName return clusters
} }
// waitForAllRegisteredClusters waits for all clusters defined in e2e context to be created // waitForAllRegisteredClusters waits for all clusters defined in e2e context to be created
@ -120,8 +119,9 @@ func createClientsetForCluster(c federationapi.Cluster, i int, userAgentName str
} }
// waitForNamespaceInFederatedClusters waits for the federated namespace to be created in federated clusters // waitForNamespaceInFederatedClusters waits for the federated namespace to be created in federated clusters
func waitForNamespaceInFederatedClusters(clusters ClusterMap, nsName string, timeout time.Duration) { func waitForNamespaceInFederatedClusters(clusters ClusterSlice, nsName string, timeout time.Duration) {
for name, c := range clusters { for _, c := range clusters {
name := c.Name
err := wait.PollImmediate(framework.Poll, timeout, func() (bool, error) { err := wait.PollImmediate(framework.Poll, timeout, func() (bool, error) {
_, err := c.Clientset.Core().Namespaces().Get(nsName, metav1.GetOptions{}) _, err := c.Clientset.Core().Namespaces().Get(nsName, metav1.GetOptions{})
if err != nil { if err != nil {

View File

@ -232,6 +232,6 @@ func (f *Framework) GetUnderlyingFederatedContexts() []E2EContext {
return e2eContexts return e2eContexts
} }
func (f *Framework) GetRegisteredClusters() (ClusterMap, string) { func (f *Framework) GetRegisteredClusters() ClusterSlice {
return getRegisteredClusters(f) return getRegisteredClusters(f)
} }

View File

@ -142,11 +142,11 @@ var _ = framework.KubeDescribe("Federated ingresses [Feature:Federation]", func(
// e2e cases for federation ingress controller // e2e cases for federation ingress controller
var _ = Describe("Federated Ingresses", func() { var _ = Describe("Federated Ingresses", func() {
var ( var (
clusters fedframework.ClusterMap // All clusters, keyed by cluster name clusters fedframework.ClusterSlice
primaryClusterName, federationName, ns string federationName, ns string
jig *federationTestJig jig *federationTestJig
service *v1.Service service *v1.Service
secret *v1.Secret secret *v1.Secret
) )
// register clusters in federation apiserver // register clusters in federation apiserver
@ -157,7 +157,7 @@ var _ = framework.KubeDescribe("Federated ingresses [Feature:Federation]", func(
federationName = DefaultFederationName federationName = DefaultFederationName
} }
jig = newFederationTestJig(f.FederationClientset) jig = newFederationTestJig(f.FederationClientset)
clusters, primaryClusterName = f.GetRegisteredClusters() clusters = f.GetRegisteredClusters()
ns = f.FederationNamespace.Name ns = f.FederationNamespace.Name
// create backend service // create backend service
service = createServiceOrFail(f.FederationClientset, ns, FederatedIngressServiceName) service = createServiceOrFail(f.FederationClientset, ns, FederatedIngressServiceName)
@ -289,7 +289,7 @@ func equivalentIngress(federatedIngress, clusterIngress v1beta1.Ingress) bool {
// verifyCascadingDeletionForIngress verifies that ingresses are deleted from // verifyCascadingDeletionForIngress verifies that ingresses are deleted from
// underlying clusters when orphan dependents is false and they are not deleted // underlying clusters when orphan dependents is false and they are not deleted
// when orphan dependents is true. // when orphan dependents is true.
func verifyCascadingDeletionForIngress(clientset *fedclientset.Clientset, clusters fedframework.ClusterMap, orphanDependents *bool, nsName string) { func verifyCascadingDeletionForIngress(clientset *fedclientset.Clientset, clusters fedframework.ClusterSlice, orphanDependents *bool, nsName string) {
ingress := createIngressOrFail(clientset, nsName, FederatedIngressServiceName, FederatedIngressTLSSecretName) ingress := createIngressOrFail(clientset, nsName, FederatedIngressServiceName, FederatedIngressTLSSecretName)
ingressName := ingress.Name ingressName := ingress.Name
// Check subclusters if the ingress was created there. // Check subclusters if the ingress was created there.
@ -303,8 +303,9 @@ func verifyCascadingDeletionForIngress(clientset *fedclientset.Clientset, cluste
errMessages := []string{} errMessages := []string{}
// ingress should be present in underlying clusters unless orphanDependents is false. // ingress should be present in underlying clusters unless orphanDependents is false.
shouldExist := orphanDependents == nil || *orphanDependents == true shouldExist := orphanDependents == nil || *orphanDependents == true
for clusterName, clusterClientset := range clusters { for _, cluster := range clusters {
_, err := clusterClientset.Extensions().Ingresses(nsName).Get(ingressName, metav1.GetOptions{}) clusterName := cluster.Name
_, err := cluster.Extensions().Ingresses(nsName).Get(ingressName, metav1.GetOptions{})
if shouldExist && errors.IsNotFound(err) { if shouldExist && errors.IsNotFound(err) {
errMessages = append(errMessages, fmt.Sprintf("unexpected NotFound error for ingress %s in cluster %s, expected ingress to exist", ingressName, clusterName)) errMessages = append(errMessages, fmt.Sprintf("unexpected NotFound error for ingress %s in cluster %s, expected ingress to exist", ingressName, clusterName))
} else if !shouldExist && !errors.IsNotFound(err) { } else if !shouldExist && !errors.IsNotFound(err) {
@ -342,7 +343,7 @@ func waitForIngressOrFail(clientset *kubeclientset.Clientset, namespace string,
} }
// waitForIngressShardsOrFail waits for the ingress to appear in all clusters // waitForIngressShardsOrFail waits for the ingress to appear in all clusters
func waitForIngressShardsOrFail(namespace string, ingress *v1beta1.Ingress, clusters fedframework.ClusterMap) { func waitForIngressShardsOrFail(namespace string, ingress *v1beta1.Ingress, clusters fedframework.ClusterSlice) {
framework.Logf("Waiting for ingress %q in %d clusters", ingress.Name, len(clusters)) framework.Logf("Waiting for ingress %q in %d clusters", ingress.Name, len(clusters))
for _, c := range clusters { for _, c := range clusters {
waitForIngressOrFail(c.Clientset, namespace, ingress, true, FederatedIngressTimeout) waitForIngressOrFail(c.Clientset, namespace, ingress, true, FederatedIngressTimeout)
@ -350,7 +351,7 @@ func waitForIngressShardsOrFail(namespace string, ingress *v1beta1.Ingress, clus
} }
// waitForIngressShardsUpdatedOrFail waits for the ingress to be updated in all clusters // waitForIngressShardsUpdatedOrFail waits for the ingress to be updated in all clusters
func waitForIngressShardsUpdatedOrFail(namespace string, ingress *v1beta1.Ingress, clusters fedframework.ClusterMap) { func waitForIngressShardsUpdatedOrFail(namespace string, ingress *v1beta1.Ingress, clusters fedframework.ClusterSlice) {
framework.Logf("Waiting for ingress %q in %d clusters", ingress.Name, len(clusters)) framework.Logf("Waiting for ingress %q in %d clusters", ingress.Name, len(clusters))
for _, c := range clusters { for _, c := range clusters {
waitForIngressUpdateOrFail(c.Clientset, namespace, ingress, FederatedIngressTimeout) waitForIngressUpdateOrFail(c.Clientset, namespace, ingress, FederatedIngressTimeout)
@ -378,7 +379,7 @@ func waitForIngressUpdateOrFail(clientset *kubeclientset.Clientset, namespace st
} }
// waitForIngressShardsGoneOrFail waits for the ingress to disappear in all clusters // waitForIngressShardsGoneOrFail waits for the ingress to disappear in all clusters
func waitForIngressShardsGoneOrFail(namespace string, ingress *v1beta1.Ingress, clusters fedframework.ClusterMap) { func waitForIngressShardsGoneOrFail(namespace string, ingress *v1beta1.Ingress, clusters fedframework.ClusterSlice) {
framework.Logf("Waiting for ingress %q in %d clusters", ingress.Name, len(clusters)) framework.Logf("Waiting for ingress %q in %d clusters", ingress.Name, len(clusters))
for _, c := range clusters { for _, c := range clusters {
waitForIngressOrFail(c.Clientset, namespace, ingress, false, FederatedIngressTimeout) waitForIngressOrFail(c.Clientset, namespace, ingress, false, FederatedIngressTimeout)

View File

@ -44,13 +44,13 @@ var _ = framework.KubeDescribe("Federation namespace [Feature:Federation]", func
f := fedframework.NewDefaultFederatedFramework("federation-namespace") f := fedframework.NewDefaultFederatedFramework("federation-namespace")
Describe("Namespace objects", func() { Describe("Namespace objects", func() {
var clusters fedframework.ClusterMap // All clusters, keyed by cluster name var clusters fedframework.ClusterSlice
var nsName string var nsName string
BeforeEach(func() { BeforeEach(func() {
fedframework.SkipUnlessFederated(f.ClientSet) fedframework.SkipUnlessFederated(f.ClientSet)
clusters, _ = f.GetRegisteredClusters() clusters = f.GetRegisteredClusters()
}) })
AfterEach(func() { AfterEach(func() {
@ -188,7 +188,7 @@ var _ = framework.KubeDescribe("Federation namespace [Feature:Federation]", func
// verifyNsCascadingDeletion verifies that namespaces are deleted from // verifyNsCascadingDeletion verifies that namespaces are deleted from
// underlying clusters when orphan dependents is false and they are not // underlying clusters when orphan dependents is false and they are not
// deleted when orphan dependents is true. // deleted when orphan dependents is true.
func verifyNsCascadingDeletion(nsClient clientset.NamespaceInterface, clusters fedframework.ClusterMap, orphanDependents *bool) string { func verifyNsCascadingDeletion(nsClient clientset.NamespaceInterface, clusters fedframework.ClusterSlice, orphanDependents *bool) string {
nsName := createNamespace(nsClient) nsName := createNamespace(nsClient)
// Check subclusters if the namespace was created there. // Check subclusters if the namespace was created there.
By(fmt.Sprintf("Waiting for namespace %s to be created in all underlying clusters", nsName)) By(fmt.Sprintf("Waiting for namespace %s to be created in all underlying clusters", nsName))
@ -213,8 +213,9 @@ func verifyNsCascadingDeletion(nsClient clientset.NamespaceInterface, clusters f
errMessages := []string{} errMessages := []string{}
// namespace should be present in underlying clusters unless orphanDependents is false. // namespace should be present in underlying clusters unless orphanDependents is false.
shouldExist := orphanDependents == nil || *orphanDependents == true shouldExist := orphanDependents == nil || *orphanDependents == true
for clusterName, clusterClientset := range clusters { for _, cluster := range clusters {
_, err := clusterClientset.Core().Namespaces().Get(nsName, metav1.GetOptions{}) clusterName := cluster.Name
_, err := cluster.Core().Namespaces().Get(nsName, metav1.GetOptions{})
if shouldExist && errors.IsNotFound(err) { if shouldExist && errors.IsNotFound(err) {
errMessages = append(errMessages, fmt.Sprintf("unexpected NotFound error for namespace %s in cluster %s, expected namespace to exist", nsName, clusterName)) errMessages = append(errMessages, fmt.Sprintf("unexpected NotFound error for namespace %s in cluster %s, expected namespace to exist", nsName, clusterName))
} else if !shouldExist && !errors.IsNotFound(err) { } else if !shouldExist && !errors.IsNotFound(err) {

View File

@ -74,12 +74,12 @@ var _ = framework.KubeDescribe("Federated ReplicaSet [Feature:Federation]", func
// e2e cases for federated replicaset controller // e2e cases for federated replicaset controller
Describe("Features", func() { Describe("Features", func() {
var ( var (
clusters fedframework.ClusterMap clusters fedframework.ClusterSlice
) )
BeforeEach(func() { BeforeEach(func() {
fedframework.SkipUnlessFederated(f.ClientSet) fedframework.SkipUnlessFederated(f.ClientSet)
clusters, _ = f.GetRegisteredClusters() clusters = f.GetRegisteredClusters()
}) })
// e2e cases for federated replicaset controller // e2e cases for federated replicaset controller
@ -200,7 +200,7 @@ var _ = framework.KubeDescribe("Federated ReplicaSet [Feature:Federation]", func
}) })
}) })
func createAndWaitForReplicasetOrFail(clientset *fedclientset.Clientset, nsName string, clusters fedframework.ClusterMap) *v1beta1.ReplicaSet { func createAndWaitForReplicasetOrFail(clientset *fedclientset.Clientset, nsName string, clusters fedframework.ClusterSlice) *v1beta1.ReplicaSet {
rs := createReplicaSetOrFail(clientset, newReplicaSet(nsName, FederationReplicaSetPrefix, 5, nil)) rs := createReplicaSetOrFail(clientset, newReplicaSet(nsName, FederationReplicaSetPrefix, 5, nil))
// Check subclusters if the replicaSet was created there. // Check subclusters if the replicaSet was created there.
By(fmt.Sprintf("Waiting for replica sets %s to be created in all underlying clusters", rs.Name)) By(fmt.Sprintf("Waiting for replica sets %s to be created in all underlying clusters", rs.Name))
@ -220,7 +220,7 @@ func createAndWaitForReplicasetOrFail(clientset *fedclientset.Clientset, nsName
return rs return rs
} }
func createAndUpdateFedRSWithPref(clientset *fedclientset.Clientset, nsName string, clusters fedframework.ClusterMap, pref *federation.FederatedReplicaSetPreferences, replicas int32, expect map[string]int32) *v1beta1.ReplicaSet { func createAndUpdateFedRSWithPref(clientset *fedclientset.Clientset, nsName string, clusters fedframework.ClusterSlice, pref *federation.FederatedReplicaSetPreferences, replicas int32, expect map[string]int32) *v1beta1.ReplicaSet {
framework.Logf("Replicas: %d, Preference: %#v", replicas, pref) framework.Logf("Replicas: %d, Preference: %#v", replicas, pref)
rs := newReplicaSet(nsName, FederationReplicaSetPrefix, replicas, pref) rs := newReplicaSet(nsName, FederationReplicaSetPrefix, replicas, pref)
rs = createReplicaSetOrFail(clientset, rs) rs = createReplicaSetOrFail(clientset, rs)
@ -254,14 +254,15 @@ func deleteAllReplicaSetsOrFail(clientset *fedclientset.Clientset, nsName string
// verifyCascadingDeletionForReplicaSet verifies that replicaSets are deleted // verifyCascadingDeletionForReplicaSet verifies that replicaSets are deleted
// from underlying clusters when orphan dependents is false and they are not // from underlying clusters when orphan dependents is false and they are not
// deleted when orphan dependents is true. // deleted when orphan dependents is true.
func verifyCascadingDeletionForReplicaSet(clientset *fedclientset.Clientset, clusters fedframework.ClusterMap, orphanDependents *bool, nsName, rsName string) { func verifyCascadingDeletionForReplicaSet(clientset *fedclientset.Clientset, clusters fedframework.ClusterSlice, orphanDependents *bool, nsName, rsName string) {
By(fmt.Sprintf("Deleting replica set %s", rsName)) By(fmt.Sprintf("Deleting replica set %s", rsName))
deleteReplicaSetOrFail(clientset, nsName, rsName, orphanDependents) deleteReplicaSetOrFail(clientset, nsName, rsName, orphanDependents)
By(fmt.Sprintf("Verifying replica sets %s in underlying clusters", rsName)) By(fmt.Sprintf("Verifying replica sets %s in underlying clusters", rsName))
errMessages := []string{} errMessages := []string{}
for clusterName, clusterClientset := range clusters { for _, cluster := range clusters {
_, err := clusterClientset.Extensions().ReplicaSets(nsName).Get(rsName, metav1.GetOptions{}) clusterName := cluster.Name
_, err := cluster.Extensions().ReplicaSets(nsName).Get(rsName, metav1.GetOptions{})
if (orphanDependents == nil || *orphanDependents == true) && errors.IsNotFound(err) { if (orphanDependents == nil || *orphanDependents == true) && errors.IsNotFound(err) {
errMessages = append(errMessages, fmt.Sprintf("unexpected NotFound error for replica set %s in cluster %s, expected replica set to exist", rsName, clusterName)) errMessages = append(errMessages, fmt.Sprintf("unexpected NotFound error for replica set %s in cluster %s, expected replica set to exist", rsName, clusterName))
} else if (orphanDependents != nil && *orphanDependents == false) && (err == nil || !errors.IsNotFound(err)) { } else if (orphanDependents != nil && *orphanDependents == false) && (err == nil || !errors.IsNotFound(err)) {
@ -273,7 +274,7 @@ func verifyCascadingDeletionForReplicaSet(clientset *fedclientset.Clientset, clu
} }
} }
func generateFedRSPrefsWithWeight(clusters fedframework.ClusterMap) (pref *federation.FederatedReplicaSetPreferences, replicas int32, expect map[string]int32) { func generateFedRSPrefsWithWeight(clusters fedframework.ClusterSlice) (pref *federation.FederatedReplicaSetPreferences, replicas int32, expect map[string]int32) {
By("Generating replicaset preferences with weights") By("Generating replicaset preferences with weights")
clusterNames := extractClusterNames(clusters) clusterNames := extractClusterNames(clusters)
pref = &federation.FederatedReplicaSetPreferences{ pref = &federation.FederatedReplicaSetPreferences{
@ -294,7 +295,7 @@ func generateFedRSPrefsWithWeight(clusters fedframework.ClusterMap) (pref *feder
return return
} }
func generateFedRSPrefsWithMin(clusters fedframework.ClusterMap) (pref *federation.FederatedReplicaSetPreferences, replicas int32, expect map[string]int32) { func generateFedRSPrefsWithMin(clusters fedframework.ClusterSlice) (pref *federation.FederatedReplicaSetPreferences, replicas int32, expect map[string]int32) {
By("Generating replicaset preferences with min replicas") By("Generating replicaset preferences with min replicas")
clusterNames := extractClusterNames(clusters) clusterNames := extractClusterNames(clusters)
pref = &federation.FederatedReplicaSetPreferences{ pref = &federation.FederatedReplicaSetPreferences{
@ -321,7 +322,7 @@ func generateFedRSPrefsWithMin(clusters fedframework.ClusterMap) (pref *federati
return return
} }
func generateFedRSPrefsWithMax(clusters fedframework.ClusterMap) (pref *federation.FederatedReplicaSetPreferences, replicas int32, expect map[string]int32) { func generateFedRSPrefsWithMax(clusters fedframework.ClusterSlice) (pref *federation.FederatedReplicaSetPreferences, replicas int32, expect map[string]int32) {
By("Generating replicaset preferences with max replicas") By("Generating replicaset preferences with max replicas")
clusterNames := extractClusterNames(clusters) clusterNames := extractClusterNames(clusters)
pref = &federation.FederatedReplicaSetPreferences{ pref = &federation.FederatedReplicaSetPreferences{
@ -354,7 +355,7 @@ func updateFedRSPrefsRebalance(pref *federation.FederatedReplicaSetPreferences,
return pref return pref
} }
func generateFedRSPrefsForRebalancing(clusters fedframework.ClusterMap) (pref1, pref2 *federation.FederatedReplicaSetPreferences, replicas int32, expect1, expect2 map[string]int32) { func generateFedRSPrefsForRebalancing(clusters fedframework.ClusterSlice) (pref1, pref2 *federation.FederatedReplicaSetPreferences, replicas int32, expect1, expect2 map[string]int32) {
By("Generating replicaset for rebalancing") By("Generating replicaset for rebalancing")
clusterNames := extractClusterNames(clusters) clusterNames := extractClusterNames(clusters)
replicas = 3 replicas = 3
@ -382,12 +383,12 @@ func generateFedRSPrefsForRebalancing(clusters fedframework.ClusterMap) (pref1,
return return
} }
func waitForReplicaSetOrFail(c *fedclientset.Clientset, namespace string, replicaSetName string, clusters fedframework.ClusterMap, expect map[string]int32) { func waitForReplicaSetOrFail(c *fedclientset.Clientset, namespace string, replicaSetName string, clusters fedframework.ClusterSlice, expect map[string]int32) {
err := waitForReplicaSet(c, namespace, replicaSetName, clusters, expect) err := waitForReplicaSet(c, namespace, replicaSetName, clusters, expect)
framework.ExpectNoError(err, "Failed to verify replica set \"%s/%s\", err: %v", namespace, replicaSetName, err) framework.ExpectNoError(err, "Failed to verify replica set \"%s/%s\", err: %v", namespace, replicaSetName, err)
} }
func waitForReplicaSet(c *fedclientset.Clientset, namespace string, replicaSetName string, clusters fedframework.ClusterMap, expect map[string]int32) error { func waitForReplicaSet(c *fedclientset.Clientset, namespace string, replicaSetName string, clusters fedframework.ClusterSlice, expect map[string]int32) error {
framework.Logf("waitForReplicaSet: %s/%s; clusters: %v; expect: %v", namespace, replicaSetName, clusters, expect) framework.Logf("waitForReplicaSet: %s/%s; clusters: %v; expect: %v", namespace, replicaSetName, clusters, expect)
err := wait.Poll(10*time.Second, fedframework.FederatedDefaultTestTimeout, func() (bool, error) { err := wait.Poll(10*time.Second, fedframework.FederatedDefaultTestTimeout, func() (bool, error) {
frs, err := c.ReplicaSets(namespace).Get(replicaSetName, metav1.GetOptions{}) frs, err := c.ReplicaSets(namespace).Get(replicaSetName, metav1.GetOptions{})
@ -530,10 +531,10 @@ func newReplicaSetWithName(namespace string, name string, replicas int32, pref *
return rs return rs
} }
func extractClusterNames(clusters fedframework.ClusterMap) []string { func extractClusterNames(clusters fedframework.ClusterSlice) []string {
clusterNames := make([]string, 0, len(clusters)) clusterNames := make([]string, 0, len(clusters))
for clusterName := range clusters { for _, cluster := range clusters {
clusterNames = append(clusterNames, clusterName) clusterNames = append(clusterNames, cluster.Name)
} }
return clusterNames return clusterNames
} }

View File

@ -42,7 +42,7 @@ const (
// Create/delete secret api objects // Create/delete secret api objects
var _ = framework.KubeDescribe("Federation secrets [Feature:Federation]", func() { var _ = framework.KubeDescribe("Federation secrets [Feature:Federation]", func() {
var clusters fedframework.ClusterMap // All clusters, keyed by cluster name var clusters fedframework.ClusterSlice
f := fedframework.NewDefaultFederatedFramework("federated-secret") f := fedframework.NewDefaultFederatedFramework("federated-secret")
@ -50,7 +50,7 @@ var _ = framework.KubeDescribe("Federation secrets [Feature:Federation]", func()
BeforeEach(func() { BeforeEach(func() {
fedframework.SkipUnlessFederated(f.ClientSet) fedframework.SkipUnlessFederated(f.ClientSet)
clusters, _ = f.GetRegisteredClusters() clusters = f.GetRegisteredClusters()
}) })
AfterEach(func() { AfterEach(func() {
@ -108,7 +108,7 @@ func deleteAllSecretsOrFail(clientset *fedclientset.Clientset, nsName string) {
// verifyCascadingDeletionForSecret verifies that secrets are deleted from // verifyCascadingDeletionForSecret verifies that secrets are deleted from
// underlying clusters when orphan dependents is false and they are not // underlying clusters when orphan dependents is false and they are not
// deleted when orphan dependents is true. // deleted when orphan dependents is true.
func verifyCascadingDeletionForSecret(clientset *fedclientset.Clientset, clusters fedframework.ClusterMap, orphanDependents *bool, nsName string) { func verifyCascadingDeletionForSecret(clientset *fedclientset.Clientset, clusters fedframework.ClusterSlice, orphanDependents *bool, nsName string) {
secret := createSecretOrFail(clientset, nsName) secret := createSecretOrFail(clientset, nsName)
secretName := secret.Name secretName := secret.Name
// Check subclusters if the secret was created there. // Check subclusters if the secret was created there.
@ -134,8 +134,9 @@ func verifyCascadingDeletionForSecret(clientset *fedclientset.Clientset, cluster
errMessages := []string{} errMessages := []string{}
// secret should be present in underlying clusters unless orphanDependents is false. // secret should be present in underlying clusters unless orphanDependents is false.
shouldExist := orphanDependents == nil || *orphanDependents == true shouldExist := orphanDependents == nil || *orphanDependents == true
for clusterName, clusterClientset := range clusters { for _, cluster := range clusters {
_, err := clusterClientset.Core().Secrets(nsName).Get(secretName, metav1.GetOptions{}) clusterName := cluster.Name
_, err := cluster.Core().Secrets(nsName).Get(secretName, metav1.GetOptions{})
if shouldExist && errors.IsNotFound(err) { if shouldExist && errors.IsNotFound(err) {
errMessages = append(errMessages, fmt.Sprintf("unexpected NotFound error for secret %s in cluster %s, expected secret to exist", secretName, clusterName)) errMessages = append(errMessages, fmt.Sprintf("unexpected NotFound error for secret %s in cluster %s, expected secret to exist", secretName, clusterName))
} else if !shouldExist && !errors.IsNotFound(err) { } else if !shouldExist && !errors.IsNotFound(err) {
@ -213,7 +214,7 @@ func updateSecretOrFail(clientset *fedclientset.Clientset, nsName string, secret
return newSecret return newSecret
} }
func waitForSecretShardsOrFail(nsName string, secret *v1.Secret, clusters fedframework.ClusterMap) { func waitForSecretShardsOrFail(nsName string, secret *v1.Secret, clusters fedframework.ClusterSlice) {
framework.Logf("Waiting for secret %q in %d clusters", secret.Name, len(clusters)) framework.Logf("Waiting for secret %q in %d clusters", secret.Name, len(clusters))
for _, c := range clusters { for _, c := range clusters {
waitForSecretOrFail(c.Clientset, nsName, secret, true, FederatedSecretTimeout) waitForSecretOrFail(c.Clientset, nsName, secret, true, FederatedSecretTimeout)
@ -243,7 +244,7 @@ func waitForSecretOrFail(clientset *kubeclientset.Clientset, nsName string, secr
} }
} }
func waitForSecretShardsUpdatedOrFail(nsName string, secret *v1.Secret, clusters fedframework.ClusterMap) { func waitForSecretShardsUpdatedOrFail(nsName string, secret *v1.Secret, clusters fedframework.ClusterSlice) {
framework.Logf("Waiting for secret %q in %d clusters", secret.Name, len(clusters)) framework.Logf("Waiting for secret %q in %d clusters", secret.Name, len(clusters))
for _, c := range clusters { for _, c := range clusters {
waitForSecretUpdateOrFail(c.Clientset, nsName, secret, FederatedSecretTimeout) waitForSecretUpdateOrFail(c.Clientset, nsName, secret, FederatedSecretTimeout)

View File

@ -47,9 +47,8 @@ var FederatedServiceLabels = map[string]string{
var _ = framework.KubeDescribe("Federated Services [Feature:Federation]", func() { var _ = framework.KubeDescribe("Federated Services [Feature:Federation]", func() {
f := fedframework.NewDefaultFederatedFramework("federated-service") f := fedframework.NewDefaultFederatedFramework("federated-service")
var clusters fedframework.ClusterMap // All clusters, keyed by cluster name var clusters fedframework.ClusterSlice
var federationName string var federationName string
var primaryClusterName string // The name of the "primary" cluster
var _ = Describe("Without Clusters [NoCluster]", func() { var _ = Describe("Without Clusters [NoCluster]", func() {
BeforeEach(func() { BeforeEach(func() {
@ -84,7 +83,7 @@ var _ = framework.KubeDescribe("Federated Services [Feature:Federation]", func()
federationName = DefaultFederationName federationName = DefaultFederationName
} }
clusters, primaryClusterName = f.GetRegisteredClusters() clusters = f.GetRegisteredClusters()
}) })
Describe("Federated Service", func() { Describe("Federated Service", func() {
@ -246,8 +245,10 @@ var _ = framework.KubeDescribe("Federated Services [Feature:Federation]", func()
BeforeEach(func() { BeforeEach(func() {
fedframework.SkipUnlessFederated(f.ClientSet) fedframework.SkipUnlessFederated(f.ClientSet)
// Delete all the backend pods from the shard which is local to the discovery pod. // Delete the backend pod from the shard which is local to the discovery pod.
deleteOneBackendPodOrFail(clusters[primaryClusterName], backendPods[primaryClusterName]) primaryCluster := clusters[0]
backendPod := backendPods[primaryCluster.Name]
deleteOneBackendPodOrFail(primaryCluster, backendPod)
}) })
@ -289,7 +290,7 @@ var _ = framework.KubeDescribe("Federated Services [Feature:Federation]", func()
// verifyCascadingDeletionForService verifies that services are deleted from // verifyCascadingDeletionForService verifies that services are deleted from
// underlying clusters when orphan dependents is false and they are not // underlying clusters when orphan dependents is false and they are not
// deleted when orphan dependents is true. // deleted when orphan dependents is true.
func verifyCascadingDeletionForService(clientset *fedclientset.Clientset, clusters fedframework.ClusterMap, orphanDependents *bool, nsName string) { func verifyCascadingDeletionForService(clientset *fedclientset.Clientset, clusters fedframework.ClusterSlice, orphanDependents *bool, nsName string) {
service := createServiceOrFail(clientset, nsName, FederatedServiceName) service := createServiceOrFail(clientset, nsName, FederatedServiceName)
serviceName := service.Name serviceName := service.Name
// Check subclusters if the service was created there. // Check subclusters if the service was created there.
@ -315,8 +316,9 @@ func verifyCascadingDeletionForService(clientset *fedclientset.Clientset, cluste
errMessages := []string{} errMessages := []string{}
// service should be present in underlying clusters unless orphanDependents is false. // service should be present in underlying clusters unless orphanDependents is false.
shouldExist := orphanDependents == nil || *orphanDependents == true shouldExist := orphanDependents == nil || *orphanDependents == true
for clusterName, clusterClientset := range clusters { for _, cluster := range clusters {
_, err := clusterClientset.Core().Services(nsName).Get(serviceName, metav1.GetOptions{}) clusterName := cluster.Name
_, err := cluster.Core().Services(nsName).Get(serviceName, metav1.GetOptions{})
if shouldExist && errors.IsNotFound(err) { if shouldExist && errors.IsNotFound(err) {
errMessages = append(errMessages, fmt.Sprintf("unexpected NotFound error for service %s in cluster %s, expected service to exist", serviceName, clusterName)) errMessages = append(errMessages, fmt.Sprintf("unexpected NotFound error for service %s in cluster %s, expected service to exist", serviceName, clusterName))
} else if !shouldExist && !errors.IsNotFound(err) { } else if !shouldExist && !errors.IsNotFound(err) {

View File

@ -129,7 +129,7 @@ func federationControlPlaneUpgrade(f *fedframework.Framework) {
func federatedClustersUpgrade(f *fedframework.Framework) { func federatedClustersUpgrade(f *fedframework.Framework) {
k8sVersion, err := framework.RealVersion(framework.TestContext.UpgradeTarget) k8sVersion, err := framework.RealVersion(framework.TestContext.UpgradeTarget)
framework.ExpectNoError(err) framework.ExpectNoError(err)
clusters, _ := f.GetRegisteredClusters() clusters := f.GetRegisteredClusters()
for _, cluster := range clusters { for _, cluster := range clusters {
framework.ExpectNoError(fedframework.MasterUpgrade(cluster.Name, k8sVersion)) framework.ExpectNoError(fedframework.MasterUpgrade(cluster.Name, k8sVersion))
framework.ExpectNoError(framework.CheckMasterVersion(cluster.Clientset, k8sVersion)) framework.ExpectNoError(framework.CheckMasterVersion(cluster.Clientset, k8sVersion))

View File

@ -103,7 +103,7 @@ func waitForServiceOrFail(clientset *kubeclientset.Clientset, namespace string,
} }
// waitForServiceShardsOrFail waits for the service to appear in all clusters // waitForServiceShardsOrFail waits for the service to appear in all clusters
func waitForServiceShardsOrFail(namespace string, service *v1.Service, clusters fedframework.ClusterMap) { func waitForServiceShardsOrFail(namespace string, service *v1.Service, clusters fedframework.ClusterSlice) {
framework.Logf("Waiting for service %q in %d clusters", service.Name, len(clusters)) framework.Logf("Waiting for service %q in %d clusters", service.Name, len(clusters))
for _, c := range clusters { for _, c := range clusters {
waitForServiceOrFail(c.Clientset, namespace, service, true, fedframework.FederatedDefaultTestTimeout) waitForServiceOrFail(c.Clientset, namespace, service, true, fedframework.FederatedDefaultTestTimeout)
@ -174,9 +174,10 @@ func deleteServiceOrFail(clientset *fedclientset.Clientset, namespace string, se
} }
} }
func cleanupServiceShardsAndProviderResources(namespace string, service *v1.Service, clusters fedframework.ClusterMap) { func cleanupServiceShardsAndProviderResources(namespace string, service *v1.Service, clusters fedframework.ClusterSlice) {
framework.Logf("Deleting service %q in %d clusters", service.Name, len(clusters)) framework.Logf("Deleting service %q in %d clusters", service.Name, len(clusters))
for name, c := range clusters { for _, c := range clusters {
name := c.Name
var cSvc *v1.Service var cSvc *v1.Service
err := wait.PollImmediate(framework.Poll, fedframework.FederatedDefaultTestTimeout, func() (bool, error) { err := wait.PollImmediate(framework.Poll, fedframework.FederatedDefaultTestTimeout, func() (bool, error) {
@ -342,7 +343,7 @@ type BackendPodMap map[string]*v1.Pod
// createBackendPodsOrFail creates one pod in each cluster, and returns the created pods. If creation of any pod fails, // createBackendPodsOrFail creates one pod in each cluster, and returns the created pods. If creation of any pod fails,
// the test fails (possibly with a partially created set of pods). No retries are attempted. // the test fails (possibly with a partially created set of pods). No retries are attempted.
func createBackendPodsOrFail(clusters fedframework.ClusterMap, namespace string, name string) BackendPodMap { func createBackendPodsOrFail(clusters fedframework.ClusterSlice, namespace string, name string) BackendPodMap {
pod := &v1.Pod{ pod := &v1.Pod{
ObjectMeta: metav1.ObjectMeta{ ObjectMeta: metav1.ObjectMeta{
Name: name, Name: name,
@ -360,7 +361,8 @@ func createBackendPodsOrFail(clusters fedframework.ClusterMap, namespace string,
}, },
} }
podMap := make(BackendPodMap) podMap := make(BackendPodMap)
for name, c := range clusters { for _, c := range clusters {
name := c.Name
By(fmt.Sprintf("Creating pod %q in namespace %q in cluster %q", pod.Name, namespace, name)) By(fmt.Sprintf("Creating pod %q in namespace %q in cluster %q", pod.Name, namespace, name))
createdPod, err := c.Clientset.Core().Pods(namespace).Create(pod) createdPod, err := c.Clientset.Core().Pods(namespace).Create(pod)
framework.ExpectNoError(err, "Creating pod %q in namespace %q in cluster %q", name, namespace, name) framework.ExpectNoError(err, "Creating pod %q in namespace %q in cluster %q", name, namespace, name)
@ -386,15 +388,15 @@ func deleteOneBackendPodOrFail(c *fedframework.Cluster, pod *v1.Pod) {
// deleteBackendPodsOrFail deletes one pod from each cluster that has one. // deleteBackendPodsOrFail deletes one pod from each cluster that has one.
// If deletion of any pod fails, the test fails (possibly with a partially deleted set of pods). No retries are attempted. // If deletion of any pod fails, the test fails (possibly with a partially deleted set of pods). No retries are attempted.
func deleteBackendPodsOrFail(clusters fedframework.ClusterMap, backendPods BackendPodMap) { func deleteBackendPodsOrFail(clusters fedframework.ClusterSlice, backendPods BackendPodMap) {
if backendPods == nil { if backendPods == nil {
return return
} }
for name, c := range clusters { for _, c := range clusters {
if pod, ok := backendPods[name]; ok { if pod, ok := backendPods[c.Name]; ok {
deleteOneBackendPodOrFail(c, pod) deleteOneBackendPodOrFail(c, pod)
} else { } else {
By(fmt.Sprintf("No backend pod to delete for cluster %q", name)) By(fmt.Sprintf("No backend pod to delete for cluster %q", c.Name))
} }
} }
} }