Switch to a map.

This commit is contained in:
Matt Liggett 2016-07-01 14:54:29 -07:00
parent 49a69b17f7
commit 088b871729

View File

@ -67,14 +67,17 @@ type cluster keeps track of the assorted objects and state related to each
cluster in the federation cluster in the federation
*/ */
type cluster struct { type cluster struct {
name string
*release_1_3.Clientset *release_1_3.Clientset
namespaceCreated bool // Did we need to create a new namespace in this cluster? If so, we should delete it. namespaceCreated bool // Did we need to create a new namespace in this cluster? If so, we should delete it.
backendPod *v1.Pod // The backend pod, if one's been created.
} }
var _ = framework.KubeDescribe("[Feature:Federation]", func() { var _ = framework.KubeDescribe("[Feature:Federation]", func() {
f := framework.NewDefaultFederatedFramework("federated-service") f := framework.NewDefaultFederatedFramework("federated-service")
var clusters []cluster var clusters map[string]cluster
var federationName string var federationName string
var primaryClusterName string // The name of the "primary" cluster
var _ = Describe("Federated Services", func() { var _ = Describe("Federated Services", func() {
BeforeEach(func() { BeforeEach(func() {
@ -114,9 +117,12 @@ var _ = framework.KubeDescribe("[Feature:Federation]", func() {
} }
framework.Logf("%d clusters are Ready", len(contexts)) framework.Logf("%d clusters are Ready", len(contexts))
clusters = make([]cluster, len(clusterList.Items)) // clusters = make([]cluster, len(clusterList.Items))
for i, cluster := range clusterList.Items { clusters = map[string]cluster{}
framework.Logf("Creating a clientset for the cluster %s", cluster.Name) primaryClusterName = clusterList.Items[0].Name
By(fmt.Sprintf("Labeling %q as the first cluster", primaryClusterName))
for i, c := range clusterList.Items {
framework.Logf("Creating a clientset for the cluster %s", c.Name)
Expect(framework.TestContext.KubeConfig).ToNot(Equal(""), "KubeConfig must be specified to load clusters' client config") Expect(framework.TestContext.KubeConfig).ToNot(Equal(""), "KubeConfig must be specified to load clusters' client config")
kubecfg, err := clientcmd.LoadFromFile(framework.TestContext.KubeConfig) kubecfg, err := clientcmd.LoadFromFile(framework.TestContext.KubeConfig)
@ -124,20 +130,20 @@ var _ = framework.KubeDescribe("[Feature:Federation]", func() {
cfgOverride := &clientcmd.ConfigOverrides{ cfgOverride := &clientcmd.ConfigOverrides{
ClusterInfo: clientcmdapi.Cluster{ ClusterInfo: clientcmdapi.Cluster{
Server: cluster.Spec.ServerAddressByClientCIDRs[0].ServerAddress, Server: c.Spec.ServerAddressByClientCIDRs[0].ServerAddress,
}, },
} }
ccfg := clientcmd.NewNonInteractiveClientConfig(*kubecfg, cluster.Name, cfgOverride, clientcmd.NewDefaultClientConfigLoadingRules()) ccfg := clientcmd.NewNonInteractiveClientConfig(*kubecfg, c.Name, cfgOverride, clientcmd.NewDefaultClientConfigLoadingRules())
cfg, err := ccfg.ClientConfig() cfg, err := ccfg.ClientConfig()
framework.ExpectNoError(err, "Error creating client config in cluster #%d", i) framework.ExpectNoError(err, "Error creating client config in cluster #%d (%q)", i, c.Name)
cfg.QPS = KubeAPIQPS cfg.QPS = KubeAPIQPS
cfg.Burst = KubeAPIBurst cfg.Burst = KubeAPIBurst
clset := release_1_3.NewForConfigOrDie(restclient.AddUserAgent(cfg, UserAgentName)) clset := release_1_3.NewForConfigOrDie(restclient.AddUserAgent(cfg, UserAgentName))
clusters[i].Clientset = clset clusters[c.Name] = cluster{c.Name, clset, false, nil}
} }
for i, c := range clusters { for name, c := range clusters {
// The e2e Framework created the required namespace in one of the clusters, but we need to create it in all the others, if it doesn't yet exist. // The e2e Framework created the required namespace in one of the clusters, but we need to create it in all the others, if it doesn't yet exist.
if _, err := c.Clientset.Core().Namespaces().Get(f.Namespace.Name); errors.IsNotFound(err) { if _, err := c.Clientset.Core().Namespaces().Get(f.Namespace.Name); errors.IsNotFound(err) {
ns := &v1.Namespace{ ns := &v1.Namespace{
@ -149,22 +155,22 @@ var _ = framework.KubeDescribe("[Feature:Federation]", func() {
if err == nil { if err == nil {
c.namespaceCreated = true c.namespaceCreated = true
} }
framework.ExpectNoError(err, "Couldn't create the namespace %s in cluster [%d]", f.Namespace.Name, i) framework.ExpectNoError(err, "Couldn't create the namespace %s in cluster %q", f.Namespace.Name, name)
framework.Logf("Namespace %s created in cluster [%d]", f.Namespace.Name, i) framework.Logf("Namespace %s created in cluster %q", f.Namespace.Name, name)
} else if err != nil { } else if err != nil {
framework.Logf("Couldn't create the namespace %s in cluster [%d]: %v", f.Namespace.Name, i, err) framework.Logf("Couldn't create the namespace %s in cluster %q: %v", f.Namespace.Name, name, err)
} }
} }
}) })
AfterEach(func() { AfterEach(func() {
for i, c := range clusters { for name, c := range clusters {
if c.namespaceCreated { if c.namespaceCreated {
if _, err := c.Clientset.Core().Namespaces().Get(f.Namespace.Name); !errors.IsNotFound(err) { if _, err := c.Clientset.Core().Namespaces().Get(f.Namespace.Name); !errors.IsNotFound(err) {
err := c.Clientset.Core().Namespaces().Delete(f.Namespace.Name, &api.DeleteOptions{}) err := c.Clientset.Core().Namespaces().Delete(f.Namespace.Name, &api.DeleteOptions{})
framework.ExpectNoError(err, "Couldn't delete the namespace %s in cluster [%d]: %v", f.Namespace.Name, i, err) framework.ExpectNoError(err, "Couldn't delete the namespace %s in cluster %q: %v", f.Namespace.Name, name, err)
} }
framework.Logf("Namespace %s deleted in cluster [%d]", f.Namespace.Name, i) framework.Logf("Namespace %s deleted in cluster %q", f.Namespace.Name, name)
} }
} }
@ -206,32 +212,26 @@ var _ = framework.KubeDescribe("[Feature:Federation]", func() {
err := f.FederationClientset_1_3.Services(f.Namespace.Name).Delete(service.Name, &api.DeleteOptions{}) err := f.FederationClientset_1_3.Services(f.Namespace.Name).Delete(service.Name, &api.DeleteOptions{})
framework.ExpectNoError(err, "Error deleting service %q in namespace %q", service.Name, f.Namespace.Name) framework.ExpectNoError(err, "Error deleting service %q in namespace %q", service.Name, f.Namespace.Name)
}() }()
waitForServiceShardsOrFail(f.Namespace.Name, service, clusters, nil) waitForServiceShardsOrFail(f.Namespace.Name, service, clusters)
}) })
}) })
var _ = Describe("DNS", func() { var _ = Describe("DNS", func() {
var ( var (
service *v1.Service service *v1.Service
backendPods []*v1.Pod
) )
BeforeEach(func() { BeforeEach(func() {
framework.SkipUnlessFederated(f.Client) framework.SkipUnlessFederated(f.Client)
backendPods = createBackendPodsOrFail(clusters, f.Namespace.Name, FederatedServicePodName) createBackendPodsOrFail(clusters, f.Namespace.Name, FederatedServicePodName)
service = createServiceOrFail(f.FederationClientset_1_3, f.Namespace.Name) service = createServiceOrFail(f.FederationClientset_1_3, f.Namespace.Name)
waitForServiceShardsOrFail(f.Namespace.Name, service, clusters, nil) waitForServiceShardsOrFail(f.Namespace.Name, service, clusters)
}) })
AfterEach(func() { AfterEach(func() {
framework.SkipUnlessFederated(f.Client) framework.SkipUnlessFederated(f.Client)
if backendPods != nil { deleteBackendPodsOrFail(clusters, f.Namespace.Name)
deleteBackendPodsOrFail(clusters, f.Namespace.Name, backendPods)
backendPods = nil
} else {
By("No backend pods to delete. BackendPods is nil.")
}
if service != nil { if service != nil {
deleteServiceOrFail(f.FederationClientset_1_3, f.Namespace.Name, service.Name) deleteServiceOrFail(f.FederationClientset_1_3, f.Namespace.Name, service.Name)
@ -264,8 +264,7 @@ var _ = framework.KubeDescribe("[Feature:Federation]", func() {
framework.SkipUnlessFederated(f.Client) framework.SkipUnlessFederated(f.Client)
// Delete all the backend pods from the shard which is local to the discovery pod. // Delete all the backend pods from the shard which is local to the discovery pod.
// FIXME(mml): Use a function that deletes backends only in one cluster. deleteOneBackendPodOrFail(clusters[primaryClusterName])
deleteBackendPodsOrFail([]cluster{{f.Clientset_1_3, false}}, f.Namespace.Name, []*v1.Pod{backendPods[0]})
}) })
@ -349,23 +348,12 @@ func waitForServiceOrFail(clientset *release_1_3.Clientset, namespace string, se
} }
/* /*
waitForServiceShardsOrFail waits for the service to appear (or disappear) in the clientsets specifed in presentInCluster (or all if presentInCluster is nil). waitForServiceShardsOrFail waits for the service to appear in all clusters
If presentInCluster[n] is true, then wait for service shard to exist in the cluster specifid in clientsets[n]
If presentInCluster[n] is false, then wait for service shard to not exist in the cluster specifid in clientsets[n]
*/ */
func waitForServiceShardsOrFail(namespace string, service *v1.Service, clusters []cluster, presentInCluster []bool) { func waitForServiceShardsOrFail(namespace string, service *v1.Service, clusters map[string]cluster) {
if presentInCluster != nil {
Expect(len(presentInCluster)).To(Equal(len(clusters)), "Internal error: Number of presence flags does not equal number of clients/clusters")
}
framework.Logf("Waiting for service %q in %d clusters", service.Name, len(clusters)) framework.Logf("Waiting for service %q in %d clusters", service.Name, len(clusters))
for i, c := range clusters { for _, c := range clusters {
var present bool // Should the service be present or absent in this cluster? waitForServiceOrFail(c.Clientset, namespace, service, true, FederatedServiceTimeout)
if presentInCluster == nil {
present = true
} else {
present = presentInCluster[i]
}
waitForServiceOrFail(c.Clientset, namespace, service, present, FederatedServiceTimeout)
} }
} }
@ -492,7 +480,7 @@ func discoverService(f *framework.Framework, name string, exists bool, podName s
createBackendPodsOrFail creates one pod in each cluster, and returns the created pods (in the same order as clusterClientSets). createBackendPodsOrFail creates one pod in each cluster, and returns the created pods (in the same order as clusterClientSets).
If creation of any pod fails, the test fails (possibly with a partially created set of pods). No retries are attempted. If creation of any pod fails, the test fails (possibly with a partially created set of pods). No retries are attempted.
*/ */
func createBackendPodsOrFail(clusters []cluster, namespace string, name string) []*v1.Pod { func createBackendPodsOrFail(clusters map[string]cluster, namespace string, name string) {
pod := &v1.Pod{ pod := &v1.Pod{
ObjectMeta: v1.ObjectMeta{ ObjectMeta: v1.ObjectMeta{
Name: name, Name: name,
@ -509,36 +497,41 @@ func createBackendPodsOrFail(clusters []cluster, namespace string, name string)
RestartPolicy: v1.RestartPolicyAlways, RestartPolicy: v1.RestartPolicyAlways,
}, },
} }
pods := make([]*v1.Pod, len(clusters)) for name, c := range clusters {
for i, c := range clusters { By(fmt.Sprintf("Creating pod %q in namespace %q in cluster %q", pod.Name, namespace, name))
By(fmt.Sprintf("Creating pod %q in namespace %q in cluster %d", pod.Name, namespace, i))
createdPod, err := c.Clientset.Core().Pods(namespace).Create(pod) createdPod, err := c.Clientset.Core().Pods(namespace).Create(pod)
framework.ExpectNoError(err, "Creating pod %q in namespace %q in cluster %d", name, namespace, i) framework.ExpectNoError(err, "Creating pod %q in namespace %q in cluster %q", name, namespace, name)
By(fmt.Sprintf("Successfully created pod %q in namespace %q in cluster %d: %v", pod.Name, namespace, i, *createdPod)) By(fmt.Sprintf("Successfully created pod %q in namespace %q in cluster %q: %v", pod.Name, namespace, name, *createdPod))
pods[i] = createdPod c.backendPod = createdPod
} }
return pods
} }
/* /*
deleteBackendPodsOrFail deletes one pod from each cluster (unless pods[n] is nil for that cluster) deletes exactly one backend pod which must not be nil
If deletion of any pod fails, the test fails (possibly with a partially deleted set of pods). No retries are attempted. The test fails if there are any errors.
*/ */
func deleteBackendPodsOrFail(clusters []cluster, namespace string, pods []*v1.Pod) { func deleteOneBackendPodOrFail(c cluster) {
if len(clusters) != len(pods) { pod := c.backendPod
Fail(fmt.Sprintf("Internal error: number of clients (%d) does not equal number of pods (%d). One pod per client please.", len(clusters), len(pods))) err := c.Clientset.Core().Pods(pod.Namespace).Delete(pod.Name, api.NewDeleteOptions(0))
if errors.IsNotFound(err) {
By(fmt.Sprintf("Pod %q in namespace %q in cluster %q does not exist. No need to delete it.", pod.Name, pod.Namespace, c.name))
} else {
framework.ExpectNoError(err, "Deleting pod %q in namespace %q from cluster %q", pod.Name, pod.Namespace, c.name)
} }
for i, c := range clusters { By(fmt.Sprintf("Backend pod %q in namespace %q in cluster %q deleted or does not exist", pod.Name, pod.Namespace, c.name))
if pods[i] != nil { }
err := c.Clientset.Core().Pods(namespace).Delete(pods[i].Name, api.NewDeleteOptions(0))
if errors.IsNotFound(err) { /*
By(fmt.Sprintf("Pod %q in namespace %q in cluster %d does not exist. No need to delete it.", pods[i].Name, namespace, i)) deleteBackendPodsOrFail deletes one pod from each cluster that has one.
} else { If deletion of any pod fails, the test fails (possibly with a partially deleted set of pods). No retries are attempted.
framework.ExpectNoError(err, "Deleting pod %q in namespace %q from cluster %d", pods[i].Name, namespace, i) */
} func deleteBackendPodsOrFail(clusters map[string]cluster, namespace string) {
By(fmt.Sprintf("Backend pod %q in namespace %q in cluster %d deleted or does not exist", pods[i].Name, namespace, i)) for name, c := range clusters {
if c.backendPod != nil {
deleteOneBackendPodOrFail(c)
c.backendPod = nil
} else { } else {
By(fmt.Sprintf("No backend pod to delete for cluster %d", i)) By(fmt.Sprintf("No backend pod to delete for cluster %q", name))
} }
} }
} }