fed: Refactor e2e cluster functions into framework for reuse

This commit is contained in:
Maru Newby 2017-03-21 22:28:59 -07:00
parent 27cf62ac29
commit d4552c52bf
15 changed files with 249 additions and 206 deletions

View File

@ -49,9 +49,7 @@ go_library(
"//vendor:k8s.io/apimachinery/pkg/util/rand",
"//vendor:k8s.io/apimachinery/pkg/util/uuid",
"//vendor:k8s.io/apimachinery/pkg/util/wait",
"//vendor:k8s.io/client-go/rest",
"//vendor:k8s.io/client-go/tools/clientcmd",
"//vendor:k8s.io/client-go/tools/clientcmd/api",
],
)

View File

@ -18,14 +18,10 @@ package e2e_federation
import (
"fmt"
"time"
. "github.com/onsi/ginkgo"
. "github.com/onsi/gomega"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/util/wait"
federationapi "k8s.io/kubernetes/federation/apis/federation/v1beta1"
"k8s.io/kubernetes/federation/client/clientset_generated/federation_clientset"
"k8s.io/kubernetes/pkg/api/v1"
"k8s.io/kubernetes/test/e2e/framework"
fedframework "k8s.io/kubernetes/test/e2e_federation/framework"
@ -61,7 +57,7 @@ var _ = framework.KubeDescribe("Federation apiserver [Feature:Federation]", func
framework.Logf("Checking that %d clusters are Ready", len(contexts))
for _, context := range contexts {
clusterIsReadyOrFail(f, &context)
fedframework.ClusterIsReadyOrFail(f, &context)
}
framework.Logf("%d clusters are Ready", len(contexts))
@ -122,19 +118,3 @@ func newService(name, namespace string) *v1.Service {
},
}
}
// Verify that the cluster is marked ready.
func isReady(clusterName string, clientset *federation_clientset.Clientset) error {
return wait.PollImmediate(time.Second, 5*time.Minute, func() (bool, error) {
c, err := clientset.Federation().Clusters().Get(clusterName, metav1.GetOptions{})
if err != nil {
return false, err
}
for _, condition := range c.Status.Conditions {
if condition.Type == federationapi.ClusterReady && condition.Status == v1.ConditionTrue {
return true, nil
}
}
return false, nil
})
}

View File

@ -43,7 +43,7 @@ const (
// Create/delete daemonset api objects
var _ = framework.KubeDescribe("Federation daemonsets [Feature:Federation]", func() {
var clusters map[string]*cluster // All clusters, keyed by cluster name
var clusters fedframework.ClusterMap // All clusters, keyed by cluster name
f := fedframework.NewDefaultFederatedFramework("federated-daemonset")
@ -51,7 +51,7 @@ var _ = framework.KubeDescribe("Federation daemonsets [Feature:Federation]", fun
BeforeEach(func() {
fedframework.SkipUnlessFederated(f.ClientSet)
clusters, _ = getRegisteredClusters(UserAgentName, f)
clusters, _ = f.GetRegisteredClusters()
})
AfterEach(func() {
@ -114,7 +114,7 @@ func deleteAllDaemonSetsOrFail(clientset *fedclientset.Clientset, nsName string)
// verifyCascadingDeletionForDS verifies that daemonsets are deleted from
// underlying clusters when orphan dependents is false and they are not
// deleted when orphan dependents is true.
func verifyCascadingDeletionForDS(clientset *fedclientset.Clientset, clusters map[string]*cluster, orphanDependents *bool, nsName string) {
func verifyCascadingDeletionForDS(clientset *fedclientset.Clientset, clusters fedframework.ClusterMap, orphanDependents *bool, nsName string) {
daemonset := createDaemonSetOrFail(clientset, nsName)
daemonsetName := daemonset.Name
// Check subclusters if the daemonset was created there.
@ -233,7 +233,7 @@ func updateDaemonSetOrFail(clientset *fedclientset.Clientset, namespace string)
return newDaemonSet
}
func waitForDaemonSetShardsOrFail(namespace string, daemonset *v1beta1.DaemonSet, clusters map[string]*cluster) {
func waitForDaemonSetShardsOrFail(namespace string, daemonset *v1beta1.DaemonSet, clusters fedframework.ClusterMap) {
framework.Logf("Waiting for daemonset %q in %d clusters", daemonset.Name, len(clusters))
for _, c := range clusters {
waitForDaemonSetOrFail(c.Clientset, namespace, daemonset, true, FederatedDaemonSetTimeout)
@ -263,7 +263,7 @@ func waitForDaemonSetOrFail(clientset *kubeclientset.Clientset, namespace string
}
}
func waitForDaemonSetShardsUpdatedOrFail(namespace string, daemonset *v1beta1.DaemonSet, clusters map[string]*cluster) {
func waitForDaemonSetShardsUpdatedOrFail(namespace string, daemonset *v1beta1.DaemonSet, clusters fedframework.ClusterMap) {
framework.Logf("Waiting for daemonset %q in %d clusters", daemonset.Name, len(clusters))
for _, c := range clusters {
waitForDaemonSetUpdateOrFail(c.Clientset, namespace, daemonset, FederatedDaemonSetTimeout)

View File

@ -69,11 +69,11 @@ var _ = framework.KubeDescribe("Federation deployments [Feature:Federation]", fu
// e2e cases for federated deployment controller
Describe("Federated Deployment", func() {
var (
clusters map[string]*cluster
clusters fedframework.ClusterMap
)
BeforeEach(func() {
fedframework.SkipUnlessFederated(f.ClientSet)
clusters, _ = getRegisteredClusters(UserAgentName, f)
clusters, _ = f.GetRegisteredClusters()
})
AfterEach(func() {
@ -140,7 +140,7 @@ func deleteAllDeploymentsOrFail(clientset *fedclientset.Clientset, nsName string
// verifyCascadingDeletionForDeployment verifies that deployments are deleted
// from underlying clusters when orphan dependents is false and they are not
// deleted when orphan dependents is true.
func verifyCascadingDeletionForDeployment(clientset *fedclientset.Clientset, clusters map[string]*cluster, orphanDependents *bool, nsName string) {
func verifyCascadingDeletionForDeployment(clientset *fedclientset.Clientset, clusters fedframework.ClusterMap, orphanDependents *bool, nsName string) {
deployment := createDeploymentOrFail(clientset, nsName)
deploymentName := deployment.Name
// Check subclusters if the deployment was created there.
@ -179,13 +179,13 @@ func verifyCascadingDeletionForDeployment(clientset *fedclientset.Clientset, clu
}
}
func waitForDeploymentOrFail(c *fedclientset.Clientset, namespace string, deploymentName string, clusters map[string]*cluster) {
func waitForDeploymentOrFail(c *fedclientset.Clientset, namespace string, deploymentName string, clusters fedframework.ClusterMap) {
err := waitForDeployment(c, namespace, deploymentName, clusters)
framework.ExpectNoError(err, "Failed to verify deployment %q/%q, err: %v", namespace, deploymentName, err)
}
func waitForDeployment(c *fedclientset.Clientset, namespace string, deploymentName string, clusters map[string]*cluster) error {
err := wait.Poll(10*time.Second, federatedDefaultTestTimeout, func() (bool, error) {
func waitForDeployment(c *fedclientset.Clientset, namespace string, deploymentName string, clusters fedframework.ClusterMap) error {
err := wait.Poll(10*time.Second, fedframework.FederatedDefaultTestTimeout, func() (bool, error) {
fdep, err := c.Deployments(namespace).Get(deploymentName, metav1.GetOptions{})
if err != nil {
return false, err
@ -194,12 +194,12 @@ func waitForDeployment(c *fedclientset.Clientset, namespace string, deploymentNa
for _, cluster := range clusters {
dep, err := cluster.Extensions().Deployments(namespace).Get(deploymentName, metav1.GetOptions{})
if err != nil && !errors.IsNotFound(err) {
By(fmt.Sprintf("Failed getting deployment: %q/%q/%q, err: %v", cluster.name, namespace, deploymentName, err))
By(fmt.Sprintf("Failed getting deployment: %q/%q/%q, err: %v", cluster.Name, namespace, deploymentName, err))
return false, err
}
if err == nil {
if !verifyDeployment(fdep, dep) {
By(fmt.Sprintf("Deployment meta or spec not match for cluster %q:\n federation: %v\n cluster: %v", cluster.name, fdep, dep))
By(fmt.Sprintf("Deployment meta or spec not match for cluster %q:\n federation: %v\n cluster: %v", cluster.Name, fdep, dep))
return false, nil
}
specReplicas += *dep.Spec.Replicas
@ -259,7 +259,7 @@ func deleteDeploymentOrFail(clientset *fedclientset.Clientset, nsName string, de
}
// Wait for the deployment to be deleted.
err = wait.Poll(10*time.Second, federatedDefaultTestTimeout, func() (bool, error) {
err = wait.Poll(10*time.Second, fedframework.FederatedDefaultTestTimeout, func() (bool, error) {
_, err := clientset.Extensions().Deployments(nsName).Get(deploymentName, metav1.GetOptions{})
if err != nil && errors.IsNotFound(err) {
return true, nil

View File

@ -10,6 +10,7 @@ load(
go_library(
name = "go_default_library",
srcs = [
"cluster.go",
"framework.go",
"util.go",
],
@ -31,6 +32,7 @@ go_library(
"//vendor:k8s.io/apimachinery/pkg/util/wait",
"//vendor:k8s.io/client-go/rest",
"//vendor:k8s.io/client-go/tools/clientcmd",
"//vendor:k8s.io/client-go/tools/clientcmd/api",
],
)

View File

@ -0,0 +1,166 @@
/*
Copyright 2017 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package framework
import (
"fmt"
"time"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/util/wait"
restclient "k8s.io/client-go/rest"
"k8s.io/client-go/tools/clientcmd"
clientcmdapi "k8s.io/client-go/tools/clientcmd/api"
federationapi "k8s.io/kubernetes/federation/apis/federation/v1beta1"
fedclientset "k8s.io/kubernetes/federation/client/clientset_generated/federation_clientset"
"k8s.io/kubernetes/pkg/api/v1"
kubeclientset "k8s.io/kubernetes/pkg/client/clientset_generated/clientset"
"k8s.io/kubernetes/test/e2e/framework"
. "github.com/onsi/ginkgo"
. "github.com/onsi/gomega"
)
const (
KubeAPIQPS float32 = 20.0
KubeAPIBurst = 30
userAgentName = "federation-e2e"
federatedNamespaceTimeout = 5 * time.Minute
federatedClustersWaitTimeout = 1 * time.Minute
)
// ClusterMap is a map of Cluster instances keyed by cluster name
type ClusterMap map[string]*Cluster
// Cluster keeps track of the assorted objects and state related to each cluster
// in the federation
type Cluster struct {
Name string
*kubeclientset.Clientset
NamespaceCreated bool // Did we need to create a new namespace in this cluster? If so, we should delete it.
BackendPod *v1.Pod // The backend pod, if one's been created.
}
// can not be moved to util, as By and Expect must be put in Ginkgo test unit
func getRegisteredClusters(f *Framework) (ClusterMap, string) {
clusters := make(ClusterMap)
contexts := f.GetUnderlyingFederatedContexts()
By("Obtaining a list of all the clusters")
clusterList := waitForAllRegisteredClusters(f, len(contexts))
framework.Logf("Checking that %d clusters are Ready", len(contexts))
for _, context := range contexts {
ClusterIsReadyOrFail(f, &context)
}
framework.Logf("%d clusters are Ready", len(contexts))
primaryClusterName := clusterList.Items[0].Name
By(fmt.Sprintf("Labeling %q as the first cluster", primaryClusterName))
for i, c := range clusterList.Items {
framework.Logf("Creating a clientset for the cluster %s", c.Name)
Expect(framework.TestContext.KubeConfig).ToNot(Equal(""), "KubeConfig must be specified to load clusters' client config")
clusters[c.Name] = &Cluster{c.Name, createClientsetForCluster(c, i, userAgentName), false, nil}
}
waitForNamespaceInFederatedClusters(clusters, f.FederationNamespace.Name, federatedNamespaceTimeout)
return clusters, primaryClusterName
}
// waitForAllRegisteredClusters waits for all clusters defined in e2e context to be created
// return ClusterList until the listed cluster items equals clusterCount
func waitForAllRegisteredClusters(f *Framework, clusterCount int) *federationapi.ClusterList {
var clusterList *federationapi.ClusterList
if err := wait.PollImmediate(framework.Poll, federatedClustersWaitTimeout, func() (bool, error) {
var err error
clusterList, err = f.FederationClientset.Federation().Clusters().List(metav1.ListOptions{})
if err != nil {
return false, err
}
framework.Logf("%d clusters registered, waiting for %d", len(clusterList.Items), clusterCount)
if len(clusterList.Items) == clusterCount {
return true, nil
}
return false, nil
}); err != nil {
framework.Failf("Failed to list registered clusters: %+v", err)
}
return clusterList
}
func createClientsetForCluster(c federationapi.Cluster, i int, userAgentName string) *kubeclientset.Clientset {
kubecfg, err := clientcmd.LoadFromFile(framework.TestContext.KubeConfig)
framework.ExpectNoError(err, "error loading KubeConfig: %v", err)
cfgOverride := &clientcmd.ConfigOverrides{
ClusterInfo: clientcmdapi.Cluster{
Server: c.Spec.ServerAddressByClientCIDRs[0].ServerAddress,
},
}
ccfg := clientcmd.NewNonInteractiveClientConfig(*kubecfg, c.Name, cfgOverride, clientcmd.NewDefaultClientConfigLoadingRules())
cfg, err := ccfg.ClientConfig()
framework.ExpectNoError(err, "Error creating client config in cluster #%d (%q)", i, c.Name)
cfg.QPS = KubeAPIQPS
cfg.Burst = KubeAPIBurst
return kubeclientset.NewForConfigOrDie(restclient.AddUserAgent(cfg, userAgentName))
}
// waitForNamespaceInFederatedClusters waits for the federated namespace to be created in federated clusters
func waitForNamespaceInFederatedClusters(clusters ClusterMap, nsName string, timeout time.Duration) {
for name, c := range clusters {
err := wait.PollImmediate(framework.Poll, timeout, func() (bool, error) {
_, err := c.Clientset.Core().Namespaces().Get(nsName, metav1.GetOptions{})
if err != nil {
By(fmt.Sprintf("Waiting for namespace %q to be created in cluster %q, err: %v", nsName, name, err))
return false, nil
}
By(fmt.Sprintf("Namespace %q exists in cluster %q", nsName, name))
return true, nil
})
framework.ExpectNoError(err, "Failed to verify federated namespace %q creation in cluster %q", nsName, name)
}
}
// ClusterIsReadyOrFail checks whether the federated cluster of the provided context is ready
func ClusterIsReadyOrFail(f *Framework, context *E2EContext) {
c, err := f.FederationClientset.Federation().Clusters().Get(context.Name, metav1.GetOptions{})
framework.ExpectNoError(err, fmt.Sprintf("get cluster: %+v", err))
if c.ObjectMeta.Name != context.Name {
framework.Failf("cluster name does not match input context: actual=%+v, expected=%+v", c, context)
}
err = isReady(context.Name, f.FederationClientset)
framework.ExpectNoError(err, fmt.Sprintf("unexpected error in verifying if cluster %s is ready: %+v", context.Name, err))
framework.Logf("Cluster %s is Ready", context.Name)
}
// Verify that the cluster is marked ready.
func isReady(clusterName string, clientset *fedclientset.Clientset) error {
return wait.PollImmediate(time.Second, 5*time.Minute, func() (bool, error) {
c, err := clientset.Federation().Clusters().Get(clusterName, metav1.GetOptions{})
if err != nil {
return false, err
}
for _, condition := range c.Status.Conditions {
if condition.Type == federationapi.ClusterReady && condition.Status == v1.ConditionTrue {
return true, nil
}
}
return false, nil
})
}

View File

@ -231,3 +231,7 @@ func (f *Framework) GetUnderlyingFederatedContexts() []E2EContext {
return e2eContexts
}
func (f *Framework) GetRegisteredClusters() (ClusterMap, string) {
return getRegisteredClusters(f)
}

View File

@ -37,6 +37,8 @@ import (
"k8s.io/kubernetes/test/e2e/framework"
)
const FederatedDefaultTestTimeout = 5 * time.Minute
// Detects whether the federation namespace exists in the underlying cluster
func SkipUnlessFederated(c clientset.Interface) {
federationNS := framework.FederationSystemNamespace()

View File

@ -142,7 +142,7 @@ var _ = framework.KubeDescribe("Federated ingresses [Feature:Federation]", func(
// e2e cases for federation ingress controller
var _ = Describe("Federated Ingresses", func() {
var (
clusters map[string]*cluster // All clusters, keyed by cluster name
clusters fedframework.ClusterMap // All clusters, keyed by cluster name
primaryClusterName, federationName, ns string
jig *federationTestJig
service *v1.Service
@ -157,7 +157,7 @@ var _ = framework.KubeDescribe("Federated ingresses [Feature:Federation]", func(
federationName = DefaultFederationName
}
jig = newFederationTestJig(f.FederationClientset)
clusters, primaryClusterName = getRegisteredClusters(UserAgentName, f)
clusters, primaryClusterName = f.GetRegisteredClusters()
ns = f.FederationNamespace.Name
// create backend service
service = createServiceOrFail(f.FederationClientset, ns, FederatedIngressServiceName)
@ -285,7 +285,7 @@ func equivalentIngress(federatedIngress, clusterIngress v1beta1.Ingress) bool {
// verifyCascadingDeletionForIngress verifies that ingresses are deleted from
// underlying clusters when orphan dependents is false and they are not deleted
// when orphan dependents is true.
func verifyCascadingDeletionForIngress(clientset *fedclientset.Clientset, clusters map[string]*cluster, orphanDependents *bool, nsName string) {
func verifyCascadingDeletionForIngress(clientset *fedclientset.Clientset, clusters fedframework.ClusterMap, orphanDependents *bool, nsName string) {
ingress := createIngressOrFail(clientset, nsName, FederatedIngressServiceName, FederatedIngressTLSSecretName)
ingressName := ingress.Name
// Check subclusters if the ingress was created there.
@ -338,7 +338,7 @@ func waitForIngressOrFail(clientset *kubeclientset.Clientset, namespace string,
}
// waitForIngressShardsOrFail waits for the ingress to appear in all clusters
func waitForIngressShardsOrFail(namespace string, ingress *v1beta1.Ingress, clusters map[string]*cluster) {
func waitForIngressShardsOrFail(namespace string, ingress *v1beta1.Ingress, clusters fedframework.ClusterMap) {
framework.Logf("Waiting for ingress %q in %d clusters", ingress.Name, len(clusters))
for _, c := range clusters {
waitForIngressOrFail(c.Clientset, namespace, ingress, true, FederatedIngressTimeout)
@ -346,7 +346,7 @@ func waitForIngressShardsOrFail(namespace string, ingress *v1beta1.Ingress, clus
}
// waitForIngressShardsUpdatedOrFail waits for the ingress to be updated in all clusters
func waitForIngressShardsUpdatedOrFail(namespace string, ingress *v1beta1.Ingress, clusters map[string]*cluster) {
func waitForIngressShardsUpdatedOrFail(namespace string, ingress *v1beta1.Ingress, clusters fedframework.ClusterMap) {
framework.Logf("Waiting for ingress %q in %d clusters", ingress.Name, len(clusters))
for _, c := range clusters {
waitForIngressUpdateOrFail(c.Clientset, namespace, ingress, FederatedIngressTimeout)
@ -374,7 +374,7 @@ func waitForIngressUpdateOrFail(clientset *kubeclientset.Clientset, namespace st
}
// waitForIngressShardsGoneOrFail waits for the ingress to disappear in all clusters
func waitForIngressShardsGoneOrFail(namespace string, ingress *v1beta1.Ingress, clusters map[string]*cluster) {
func waitForIngressShardsGoneOrFail(namespace string, ingress *v1beta1.Ingress, clusters fedframework.ClusterMap) {
framework.Logf("Waiting for ingress %q in %d clusters", ingress.Name, len(clusters))
for _, c := range clusters {
waitForIngressOrFail(c.Clientset, namespace, ingress, false, FederatedIngressTimeout)

View File

@ -44,13 +44,13 @@ var _ = framework.KubeDescribe("Federation namespace [Feature:Federation]", func
f := fedframework.NewDefaultFederatedFramework("federation-namespace")
Describe("Namespace objects", func() {
var clusters map[string]*cluster // All clusters, keyed by cluster name
var clusters fedframework.ClusterMap // All clusters, keyed by cluster name
var nsName string
BeforeEach(func() {
fedframework.SkipUnlessFederated(f.ClientSet)
clusters, _ = getRegisteredClusters(UserAgentName, f)
clusters, _ = f.GetRegisteredClusters()
})
AfterEach(func() {
@ -188,7 +188,7 @@ var _ = framework.KubeDescribe("Federation namespace [Feature:Federation]", func
// verifyNsCascadingDeletion verifies that namespaces are deleted from
// underlying clusters when orphan dependents is false and they are not
// deleted when orphan dependents is true.
func verifyNsCascadingDeletion(nsClient clientset.NamespaceInterface, clusters map[string]*cluster, orphanDependents *bool) string {
func verifyNsCascadingDeletion(nsClient clientset.NamespaceInterface, clusters fedframework.ClusterMap, orphanDependents *bool) string {
nsName := createNamespace(nsClient)
// Check subclusters if the namespace was created there.
By(fmt.Sprintf("Waiting for namespace %s to be created in all underlying clusters", nsName))

View File

@ -74,12 +74,12 @@ var _ = framework.KubeDescribe("Federated ReplicaSet [Feature:Federation]", func
// e2e cases for federated replicaset controller
Describe("Features", func() {
var (
clusters map[string]*cluster
clusters fedframework.ClusterMap
)
BeforeEach(func() {
fedframework.SkipUnlessFederated(f.ClientSet)
clusters, _ = getRegisteredClusters(UserAgentName, f)
clusters, _ = f.GetRegisteredClusters()
})
// e2e cases for federated replicaset controller
@ -200,7 +200,7 @@ var _ = framework.KubeDescribe("Federated ReplicaSet [Feature:Federation]", func
})
})
func createAndWaitForReplicasetOrFail(clientset *fedclientset.Clientset, nsName string, clusters map[string]*cluster) *v1beta1.ReplicaSet {
func createAndWaitForReplicasetOrFail(clientset *fedclientset.Clientset, nsName string, clusters fedframework.ClusterMap) *v1beta1.ReplicaSet {
rs := createReplicaSetOrFail(clientset, newReplicaSet(nsName, FederationReplicaSetPrefix, 5, nil))
// Check subclusters if the replicaSet was created there.
By(fmt.Sprintf("Waiting for replica sets %s to be created in all underlying clusters", rs.Name))
@ -220,7 +220,7 @@ func createAndWaitForReplicasetOrFail(clientset *fedclientset.Clientset, nsName
return rs
}
func createAndUpdateFedRSWithPref(clientset *fedclientset.Clientset, nsName string, clusters map[string]*cluster, pref *federation.FederatedReplicaSetPreferences, replicas int32, expect map[string]int32) *v1beta1.ReplicaSet {
func createAndUpdateFedRSWithPref(clientset *fedclientset.Clientset, nsName string, clusters fedframework.ClusterMap, pref *federation.FederatedReplicaSetPreferences, replicas int32, expect map[string]int32) *v1beta1.ReplicaSet {
framework.Logf("Replicas: %d, Preference: %#v", replicas, pref)
rs := newReplicaSet(nsName, FederationReplicaSetPrefix, replicas, pref)
rs = createReplicaSetOrFail(clientset, rs)
@ -254,7 +254,7 @@ func deleteAllReplicaSetsOrFail(clientset *fedclientset.Clientset, nsName string
// verifyCascadingDeletionForReplicaSet verifies that replicaSets are deleted
// from underlying clusters when orphan dependents is false and they are not
// deleted when orphan dependents is true.
func verifyCascadingDeletionForReplicaSet(clientset *fedclientset.Clientset, clusters map[string]*cluster, orphanDependents *bool, nsName, rsName string) {
func verifyCascadingDeletionForReplicaSet(clientset *fedclientset.Clientset, clusters fedframework.ClusterMap, orphanDependents *bool, nsName, rsName string) {
By(fmt.Sprintf("Deleting replica set %s", rsName))
deleteReplicaSetOrFail(clientset, nsName, rsName, orphanDependents)
@ -273,7 +273,7 @@ func verifyCascadingDeletionForReplicaSet(clientset *fedclientset.Clientset, clu
}
}
func generateFedRSPrefsWithWeight(clusters map[string]*cluster) (pref *federation.FederatedReplicaSetPreferences, replicas int32, expect map[string]int32) {
func generateFedRSPrefsWithWeight(clusters fedframework.ClusterMap) (pref *federation.FederatedReplicaSetPreferences, replicas int32, expect map[string]int32) {
By("Generating replicaset preferences with weights")
clusterNames := extractClusterNames(clusters)
pref = &federation.FederatedReplicaSetPreferences{
@ -294,7 +294,7 @@ func generateFedRSPrefsWithWeight(clusters map[string]*cluster) (pref *federatio
return
}
func generateFedRSPrefsWithMin(clusters map[string]*cluster) (pref *federation.FederatedReplicaSetPreferences, replicas int32, expect map[string]int32) {
func generateFedRSPrefsWithMin(clusters fedframework.ClusterMap) (pref *federation.FederatedReplicaSetPreferences, replicas int32, expect map[string]int32) {
By("Generating replicaset preferences with min replicas")
clusterNames := extractClusterNames(clusters)
pref = &federation.FederatedReplicaSetPreferences{
@ -321,7 +321,7 @@ func generateFedRSPrefsWithMin(clusters map[string]*cluster) (pref *federation.F
return
}
func generateFedRSPrefsWithMax(clusters map[string]*cluster) (pref *federation.FederatedReplicaSetPreferences, replicas int32, expect map[string]int32) {
func generateFedRSPrefsWithMax(clusters fedframework.ClusterMap) (pref *federation.FederatedReplicaSetPreferences, replicas int32, expect map[string]int32) {
By("Generating replicaset preferences with max replicas")
clusterNames := extractClusterNames(clusters)
pref = &federation.FederatedReplicaSetPreferences{
@ -354,7 +354,7 @@ func updateFedRSPrefsRebalance(pref *federation.FederatedReplicaSetPreferences,
return pref
}
func generateFedRSPrefsForRebalancing(clusters map[string]*cluster) (pref1, pref2 *federation.FederatedReplicaSetPreferences, replicas int32, expect1, expect2 map[string]int32) {
func generateFedRSPrefsForRebalancing(clusters fedframework.ClusterMap) (pref1, pref2 *federation.FederatedReplicaSetPreferences, replicas int32, expect1, expect2 map[string]int32) {
By("Generating replicaset for rebalancing")
clusterNames := extractClusterNames(clusters)
replicas = 3
@ -382,14 +382,14 @@ func generateFedRSPrefsForRebalancing(clusters map[string]*cluster) (pref1, pref
return
}
func waitForReplicaSetOrFail(c *fedclientset.Clientset, namespace string, replicaSetName string, clusters map[string]*cluster, expect map[string]int32) {
func waitForReplicaSetOrFail(c *fedclientset.Clientset, namespace string, replicaSetName string, clusters fedframework.ClusterMap, expect map[string]int32) {
err := waitForReplicaSet(c, namespace, replicaSetName, clusters, expect)
framework.ExpectNoError(err, "Failed to verify replica set \"%s/%s\", err: %v", namespace, replicaSetName, err)
}
func waitForReplicaSet(c *fedclientset.Clientset, namespace string, replicaSetName string, clusters map[string]*cluster, expect map[string]int32) error {
func waitForReplicaSet(c *fedclientset.Clientset, namespace string, replicaSetName string, clusters fedframework.ClusterMap, expect map[string]int32) error {
framework.Logf("waitForReplicaSet: %s/%s; clusters: %v; expect: %v", namespace, replicaSetName, clusters, expect)
err := wait.Poll(10*time.Second, federatedDefaultTestTimeout, func() (bool, error) {
err := wait.Poll(10*time.Second, fedframework.FederatedDefaultTestTimeout, func() (bool, error) {
frs, err := c.ReplicaSets(namespace).Get(replicaSetName, metav1.GetOptions{})
if err != nil {
return false, err
@ -398,21 +398,21 @@ func waitForReplicaSet(c *fedclientset.Clientset, namespace string, replicaSetNa
for _, cluster := range clusters {
rs, err := cluster.ReplicaSets(namespace).Get(replicaSetName, metav1.GetOptions{})
if err != nil && !errors.IsNotFound(err) {
framework.Logf("Failed getting replicaset: \"%s/%s/%s\", err: %v", cluster.name, namespace, replicaSetName, err)
framework.Logf("Failed getting replicaset: \"%s/%s/%s\", err: %v", cluster.Name, namespace, replicaSetName, err)
return false, err
}
if errors.IsNotFound(err) {
if expect != nil && expect[cluster.name] > 0 {
framework.Logf("Replicaset \"%s/%s/%s\" with replica count %d does not exist", cluster.name, namespace, replicaSetName, expect[cluster.name])
if expect != nil && expect[cluster.Name] > 0 {
framework.Logf("Replicaset \"%s/%s/%s\" with replica count %d does not exist", cluster.Name, namespace, replicaSetName, expect[cluster.Name])
return false, nil
}
} else {
if !equivalentReplicaSet(frs, rs) {
framework.Logf("Replicaset meta or spec does not match for cluster %q:\n federation: %v\n cluster: %v", cluster.name, frs, rs)
framework.Logf("Replicaset meta or spec does not match for cluster %q:\n federation: %v\n cluster: %v", cluster.Name, frs, rs)
return false, nil
}
if expect != nil && *rs.Spec.Replicas < expect[cluster.name] {
framework.Logf("Replicas do not match for \"%s/%s/%s\": expected: >= %v, actual: %v", cluster.name, namespace, replicaSetName, expect[cluster.name], *rs.Spec.Replicas)
if expect != nil && *rs.Spec.Replicas < expect[cluster.Name] {
framework.Logf("Replicas do not match for \"%s/%s/%s\": expected: >= %v, actual: %v", cluster.Name, namespace, replicaSetName, expect[cluster.Name], *rs.Spec.Replicas)
return false, nil
}
specReplicas += *rs.Spec.Replicas
@ -530,7 +530,7 @@ func newReplicaSetWithName(namespace string, name string, replicas int32, pref *
return rs
}
func extractClusterNames(clusters map[string]*cluster) []string {
func extractClusterNames(clusters fedframework.ClusterMap) []string {
clusterNames := make([]string, 0, len(clusters))
for clusterName := range clusters {
clusterNames = append(clusterNames, clusterName)

View File

@ -42,7 +42,7 @@ const (
// Create/delete secret api objects
var _ = framework.KubeDescribe("Federation secrets [Feature:Federation]", func() {
var clusters map[string]*cluster // All clusters, keyed by cluster name
var clusters fedframework.ClusterMap // All clusters, keyed by cluster name
f := fedframework.NewDefaultFederatedFramework("federated-secret")
@ -50,7 +50,7 @@ var _ = framework.KubeDescribe("Federation secrets [Feature:Federation]", func()
BeforeEach(func() {
fedframework.SkipUnlessFederated(f.ClientSet)
clusters, _ = getRegisteredClusters(UserAgentName, f)
clusters, _ = f.GetRegisteredClusters()
})
AfterEach(func() {
@ -108,7 +108,7 @@ func deleteAllSecretsOrFail(clientset *fedclientset.Clientset, nsName string) {
// verifyCascadingDeletionForSecret verifies that secrets are deleted from
// underlying clusters when orphan dependents is false and they are not
// deleted when orphan dependents is true.
func verifyCascadingDeletionForSecret(clientset *fedclientset.Clientset, clusters map[string]*cluster, orphanDependents *bool, nsName string) {
func verifyCascadingDeletionForSecret(clientset *fedclientset.Clientset, clusters fedframework.ClusterMap, orphanDependents *bool, nsName string) {
secret := createSecretOrFail(clientset, nsName)
secretName := secret.Name
// Check subclusters if the secret was created there.
@ -213,7 +213,7 @@ func updateSecretOrFail(clientset *fedclientset.Clientset, nsName string, secret
return newSecret
}
func waitForSecretShardsOrFail(nsName string, secret *v1.Secret, clusters map[string]*cluster) {
func waitForSecretShardsOrFail(nsName string, secret *v1.Secret, clusters fedframework.ClusterMap) {
framework.Logf("Waiting for secret %q in %d clusters", secret.Name, len(clusters))
for _, c := range clusters {
waitForSecretOrFail(c.Clientset, nsName, secret, true, FederatedSecretTimeout)
@ -243,7 +243,7 @@ func waitForSecretOrFail(clientset *kubeclientset.Clientset, nsName string, secr
}
}
func waitForSecretShardsUpdatedOrFail(nsName string, secret *v1.Secret, clusters map[string]*cluster) {
func waitForSecretShardsUpdatedOrFail(nsName string, secret *v1.Secret, clusters fedframework.ClusterMap) {
framework.Logf("Waiting for secret %q in %d clusters", secret.Name, len(clusters))
for _, c := range clusters {
waitForSecretUpdateOrFail(c.Clientset, nsName, secret, FederatedSecretTimeout)

View File

@ -47,7 +47,7 @@ var FederatedServiceLabels = map[string]string{
var _ = framework.KubeDescribe("Federated Services [Feature:Federation]", func() {
f := fedframework.NewDefaultFederatedFramework("federated-service")
var clusters map[string]*cluster // All clusters, keyed by cluster name
var clusters fedframework.ClusterMap // All clusters, keyed by cluster name
var federationName string
var primaryClusterName string // The name of the "primary" cluster
@ -84,7 +84,7 @@ var _ = framework.KubeDescribe("Federated Services [Feature:Federation]", func()
federationName = DefaultFederationName
}
clusters, primaryClusterName = getRegisteredClusters(UserAgentName, f)
clusters, primaryClusterName = f.GetRegisteredClusters()
})
Describe("Federated Service", func() {
@ -287,7 +287,7 @@ var _ = framework.KubeDescribe("Federated Services [Feature:Federation]", func()
// verifyCascadingDeletionForService verifies that services are deleted from
// underlying clusters when orphan dependents is false and they are not
// deleted when orphan dependents is true.
func verifyCascadingDeletionForService(clientset *fedclientset.Clientset, clusters map[string]*cluster, orphanDependents *bool, nsName string) {
func verifyCascadingDeletionForService(clientset *fedclientset.Clientset, clusters fedframework.ClusterMap, orphanDependents *bool, nsName string) {
service := createServiceOrFail(clientset, nsName, FederatedServiceName)
serviceName := service.Name
// Check subclusters if the service was created there.

View File

@ -129,9 +129,9 @@ func federationControlPlaneUpgrade(f *fedframework.Framework) {
func federatedClustersUpgrade(f *fedframework.Framework) {
k8sVersion, err := framework.RealVersion(framework.TestContext.UpgradeTarget)
framework.ExpectNoError(err)
clusters, _ := getRegisteredClusters(UserAgentName, f)
clusters, _ := f.GetRegisteredClusters()
for _, cluster := range clusters {
framework.ExpectNoError(fedframework.MasterUpgrade(cluster.name, k8sVersion))
framework.ExpectNoError(fedframework.MasterUpgrade(cluster.Name, k8sVersion))
framework.ExpectNoError(framework.CheckMasterVersion(cluster.Clientset, k8sVersion))
// TODO: Need to add Node upgrade. Add once this framework is stable

View File

@ -25,9 +25,6 @@ import (
"k8s.io/apimachinery/pkg/util/intstr"
"k8s.io/apimachinery/pkg/util/rand"
"k8s.io/apimachinery/pkg/util/wait"
restclient "k8s.io/client-go/rest"
"k8s.io/client-go/tools/clientcmd"
clientcmdapi "k8s.io/client-go/tools/clientcmd/api"
federationapi "k8s.io/kubernetes/federation/apis/federation/v1beta1"
fedclientset "k8s.io/kubernetes/federation/client/clientset_generated/federation_clientset"
"k8s.io/kubernetes/pkg/api"
@ -42,18 +39,12 @@ import (
)
var (
KubeAPIQPS float32 = 20.0
KubeAPIBurst = 30
DefaultFederationName = "e2e-federation"
UserAgentName = "federation-e2e"
// We use this to decide how long to wait for our DNS probes to succeed.
DNSTTL = 180 * time.Second // TODO: make k8s.io/kubernetes/federation/pkg/federation-controller/service.minDnsTtl exported, and import it here.
)
const (
federatedDefaultTestTimeout = 5 * time.Minute
federatedClustersWaitTimeout = 1 * time.Minute
// [30000, 32767] is the allowed default service nodeport range and our
// tests just use the defaults.
FederatedSvcNodePortFirst = 30000
@ -62,15 +53,6 @@ const (
var FederationSuite common.Suite
// cluster keeps track of the assorted objects and state related to each cluster
// in the federation
type cluster struct {
name string
*kubeclientset.Clientset
namespaceCreated bool // Did we need to create a new namespace in this cluster? If so, we should delete it.
backendPod *v1.Pod // The backend pod, if one's been created.
}
func createClusterObjectOrFail(f *fedframework.Framework, context *fedframework.E2EContext) {
framework.Logf("Creating cluster object: %s (%s, secret: %s)", context.Name, context.Cluster.Cluster.Server, context.Name)
cluster := federationapi.Cluster{
@ -97,58 +79,8 @@ func createClusterObjectOrFail(f *fedframework.Framework, context *fedframework.
framework.Logf("Successfully created cluster object: %s (%s, secret: %s)", context.Name, context.Cluster.Cluster.Server, context.Name)
}
func clusterIsReadyOrFail(f *fedframework.Framework, context *fedframework.E2EContext) {
c, err := f.FederationClientset.Federation().Clusters().Get(context.Name, metav1.GetOptions{})
framework.ExpectNoError(err, fmt.Sprintf("get cluster: %+v", err))
if c.ObjectMeta.Name != context.Name {
framework.Failf("cluster name does not match input context: actual=%+v, expected=%+v", c, context)
}
err = isReady(context.Name, f.FederationClientset)
framework.ExpectNoError(err, fmt.Sprintf("unexpected error in verifying if cluster %s is ready: %+v", context.Name, err))
framework.Logf("Cluster %s is Ready", context.Name)
}
// waitForAllRegisteredClusters waits for all clusters defined in e2e context to be created
// return ClusterList until the listed cluster items equals clusterCount
func waitForAllRegisteredClusters(f *fedframework.Framework, clusterCount int) *federationapi.ClusterList {
var clusterList *federationapi.ClusterList
if err := wait.PollImmediate(framework.Poll, federatedClustersWaitTimeout, func() (bool, error) {
var err error
clusterList, err = f.FederationClientset.Federation().Clusters().List(metav1.ListOptions{})
if err != nil {
return false, err
}
framework.Logf("%d clusters registered, waiting for %d", len(clusterList.Items), clusterCount)
if len(clusterList.Items) == clusterCount {
return true, nil
}
return false, nil
}); err != nil {
framework.Failf("Failed to list registered clusters: %+v", err)
}
return clusterList
}
func createClientsetForCluster(c federationapi.Cluster, i int, userAgentName string) *kubeclientset.Clientset {
kubecfg, err := clientcmd.LoadFromFile(framework.TestContext.KubeConfig)
framework.ExpectNoError(err, "error loading KubeConfig: %v", err)
cfgOverride := &clientcmd.ConfigOverrides{
ClusterInfo: clientcmdapi.Cluster{
Server: c.Spec.ServerAddressByClientCIDRs[0].ServerAddress,
},
}
ccfg := clientcmd.NewNonInteractiveClientConfig(*kubecfg, c.Name, cfgOverride, clientcmd.NewDefaultClientConfigLoadingRules())
cfg, err := ccfg.ClientConfig()
framework.ExpectNoError(err, "Error creating client config in cluster #%d (%q)", i, c.Name)
cfg.QPS = KubeAPIQPS
cfg.Burst = KubeAPIBurst
return kubeclientset.NewForConfigOrDie(restclient.AddUserAgent(cfg, userAgentName))
}
// Creates the federation namespace in all underlying clusters.
func createNamespaceInClusters(clusters map[string]*cluster, f *fedframework.Framework) {
func createNamespaceInClusters(clusters fedframework.ClusterMap, f *fedframework.Framework) {
nsName := f.FederationNamespace.Name
for name, c := range clusters {
// The e2e Framework created the required namespace in federation control plane, but we need to create it in all the others, if it doesn't yet exist.
@ -161,7 +93,7 @@ func createNamespaceInClusters(clusters map[string]*cluster, f *fedframework.Fra
}
_, err := c.Clientset.Core().Namespaces().Create(ns)
if err == nil {
c.namespaceCreated = true
c.NamespaceCreated = true
}
framework.ExpectNoError(err, "Couldn't create the namespace %s in cluster %q", nsName, name)
framework.Logf("Namespace %s created in cluster %q", nsName, name)
@ -173,10 +105,10 @@ func createNamespaceInClusters(clusters map[string]*cluster, f *fedframework.Fra
// Unregisters the given clusters from federation control plane.
// Also deletes the federation namespace from each cluster.
func unregisterClusters(clusters map[string]*cluster, f *fedframework.Framework) {
func unregisterClusters(clusters fedframework.ClusterMap, f *fedframework.Framework) {
nsName := f.FederationNamespace.Name
for name, c := range clusters {
if c.namespaceCreated {
if c.NamespaceCreated {
if _, err := c.Clientset.Core().Namespaces().Get(nsName, metav1.GetOptions{}); !errors.IsNotFound(err) {
err := c.Clientset.Core().Namespaces().Delete(nsName, &metav1.DeleteOptions{})
framework.ExpectNoError(err, "Couldn't delete the namespace %s in cluster %q: %v", nsName, name, err)
@ -194,47 +126,6 @@ func unregisterClusters(clusters map[string]*cluster, f *fedframework.Framework)
}
}
// waitForNamespaceInFederatedClusters waits for the federated namespace to be created in federated clusters
func waitForNamespaceInFederatedClusters(clusters map[string]*cluster, nsName string, timeout time.Duration) {
for name, c := range clusters {
err := wait.PollImmediate(framework.Poll, timeout, func() (bool, error) {
_, err := c.Clientset.Core().Namespaces().Get(nsName, metav1.GetOptions{})
if err != nil {
By(fmt.Sprintf("Waiting for namespace %q to be created in cluster %q, err: %v", nsName, name, err))
return false, nil
}
By(fmt.Sprintf("Namespace %q exists in cluster %q", nsName, name))
return true, nil
})
framework.ExpectNoError(err, "Failed to verify federated namespace %q creation in cluster %q", nsName, name)
}
}
// can not be moved to util, as By and Expect must be put in Ginkgo test unit
func getRegisteredClusters(userAgentName string, f *fedframework.Framework) (map[string]*cluster, string) {
clusters := make(map[string]*cluster)
contexts := f.GetUnderlyingFederatedContexts()
By("Obtaining a list of all the clusters")
clusterList := waitForAllRegisteredClusters(f, len(contexts))
framework.Logf("Checking that %d clusters are Ready", len(contexts))
for _, context := range contexts {
clusterIsReadyOrFail(f, &context)
}
framework.Logf("%d clusters are Ready", len(contexts))
primaryClusterName := clusterList.Items[0].Name
By(fmt.Sprintf("Labeling %q as the first cluster", primaryClusterName))
for i, c := range clusterList.Items {
framework.Logf("Creating a clientset for the cluster %s", c.Name)
Expect(framework.TestContext.KubeConfig).ToNot(Equal(""), "KubeConfig must be specified to load clusters' client config")
clusters[c.Name] = &cluster{c.Name, createClientsetForCluster(c, i, userAgentName), false, nil}
}
waitForNamespaceInFederatedClusters(clusters, f.FederationNamespace.Name, federatedDefaultTestTimeout)
return clusters, primaryClusterName
}
// waitForServiceOrFail waits until a service is either present or absent in the cluster specified by clientset.
// If the condition is not met within timout, it fails the calling test.
func waitForServiceOrFail(clientset *kubeclientset.Clientset, namespace string, service *v1.Service, present bool, timeout time.Duration) {
@ -259,10 +150,10 @@ func waitForServiceOrFail(clientset *kubeclientset.Clientset, namespace string,
}
// waitForServiceShardsOrFail waits for the service to appear in all clusters
func waitForServiceShardsOrFail(namespace string, service *v1.Service, clusters map[string]*cluster) {
func waitForServiceShardsOrFail(namespace string, service *v1.Service, clusters fedframework.ClusterMap) {
framework.Logf("Waiting for service %q in %d clusters", service.Name, len(clusters))
for _, c := range clusters {
waitForServiceOrFail(c.Clientset, namespace, service, true, federatedDefaultTestTimeout)
waitForServiceOrFail(c.Clientset, namespace, service, true, fedframework.FederatedDefaultTestTimeout)
}
}
@ -317,7 +208,7 @@ func deleteServiceOrFail(clientset *fedclientset.Clientset, namespace string, se
err := clientset.Services(namespace).Delete(serviceName, &metav1.DeleteOptions{OrphanDependents: orphanDependents})
framework.ExpectNoError(err, "Error deleting service %q from namespace %q", serviceName, namespace)
// Wait for the service to be deleted.
err = wait.Poll(5*time.Second, federatedDefaultTestTimeout, func() (bool, error) {
err = wait.Poll(5*time.Second, fedframework.FederatedDefaultTestTimeout, func() (bool, error) {
_, err := clientset.Core().Services(namespace).Get(serviceName, metav1.GetOptions{})
if err != nil && errors.IsNotFound(err) {
return true, nil
@ -330,12 +221,12 @@ func deleteServiceOrFail(clientset *fedclientset.Clientset, namespace string, se
}
}
func cleanupServiceShardsAndProviderResources(namespace string, service *v1.Service, clusters map[string]*cluster) {
func cleanupServiceShardsAndProviderResources(namespace string, service *v1.Service, clusters fedframework.ClusterMap) {
framework.Logf("Deleting service %q in %d clusters", service.Name, len(clusters))
for name, c := range clusters {
var cSvc *v1.Service
err := wait.PollImmediate(framework.Poll, federatedDefaultTestTimeout, func() (bool, error) {
err := wait.PollImmediate(framework.Poll, fedframework.FederatedDefaultTestTimeout, func() (bool, error) {
var err error
cSvc, err = c.Clientset.Services(namespace).Get(service.Name, metav1.GetOptions{})
if err != nil && !errors.IsNotFound(err) {
@ -352,15 +243,15 @@ func cleanupServiceShardsAndProviderResources(namespace string, service *v1.Serv
})
if err != nil || cSvc == nil {
By(fmt.Sprintf("Failed to find service %q in namespace %q, in cluster %q in %s", service.Name, namespace, name, federatedDefaultTestTimeout))
By(fmt.Sprintf("Failed to find service %q in namespace %q, in cluster %q in %s", service.Name, namespace, name, fedframework.FederatedDefaultTestTimeout))
continue
}
err = cleanupServiceShard(c.Clientset, name, namespace, cSvc, federatedDefaultTestTimeout)
err = cleanupServiceShard(c.Clientset, name, namespace, cSvc, fedframework.FederatedDefaultTestTimeout)
if err != nil {
framework.Logf("Failed to delete service %q in namespace %q, in cluster %q: %v", service.Name, namespace, name, err)
}
err = cleanupServiceShardLoadBalancer(name, cSvc, federatedDefaultTestTimeout)
err = cleanupServiceShardLoadBalancer(name, cSvc, fedframework.FederatedDefaultTestTimeout)
if err != nil {
framework.Logf("Failed to delete cloud provider resources for service %q in namespace %q, in cluster %q", service.Name, namespace, name)
}
@ -495,7 +386,7 @@ func discoverService(f *fedframework.Framework, name string, exists bool, podNam
// createBackendPodsOrFail creates one pod in each cluster, and returns the created pods (in the same order as clusterClientSets).
// If creation of any pod fails, the test fails (possibly with a partially created set of pods). No retries are attempted.
func createBackendPodsOrFail(clusters map[string]*cluster, namespace string, name string) {
func createBackendPodsOrFail(clusters fedframework.ClusterMap, namespace string, name string) {
pod := &v1.Pod{
ObjectMeta: metav1.ObjectMeta{
Name: name,
@ -517,17 +408,17 @@ func createBackendPodsOrFail(clusters map[string]*cluster, namespace string, nam
createdPod, err := c.Clientset.Core().Pods(namespace).Create(pod)
framework.ExpectNoError(err, "Creating pod %q in namespace %q in cluster %q", name, namespace, name)
By(fmt.Sprintf("Successfully created pod %q in namespace %q in cluster %q: %v", pod.Name, namespace, name, *createdPod))
c.backendPod = createdPod
c.BackendPod = createdPod
}
}
// deleteOneBackendPodOrFail deletes exactly one backend pod which must not be nil
// The test fails if there are any errors.
func deleteOneBackendPodOrFail(c *cluster) {
pod := c.backendPod
func deleteOneBackendPodOrFail(c *fedframework.Cluster) {
pod := c.BackendPod
Expect(pod).ToNot(BeNil())
err := c.Clientset.Core().Pods(pod.Namespace).Delete(pod.Name, metav1.NewDeleteOptions(0))
msgFmt := fmt.Sprintf("Deleting Pod %q in namespace %q in cluster %q %%v", pod.Name, pod.Namespace, c.name)
msgFmt := fmt.Sprintf("Deleting Pod %q in namespace %q in cluster %q %%v", pod.Name, pod.Namespace, c.Name)
if errors.IsNotFound(err) {
framework.Logf(msgFmt, "does not exist. No need to delete it.")
return
@ -538,11 +429,11 @@ func deleteOneBackendPodOrFail(c *cluster) {
// deleteBackendPodsOrFail deletes one pod from each cluster that has one.
// If deletion of any pod fails, the test fails (possibly with a partially deleted set of pods). No retries are attempted.
func deleteBackendPodsOrFail(clusters map[string]*cluster, namespace string) {
func deleteBackendPodsOrFail(clusters fedframework.ClusterMap, namespace string) {
for name, c := range clusters {
if c.backendPod != nil {
if c.BackendPod != nil {
deleteOneBackendPodOrFail(c)
c.backendPod = nil
c.BackendPod = nil
} else {
By(fmt.Sprintf("No backend pod to delete for cluster %q", name))
}
@ -552,7 +443,7 @@ func deleteBackendPodsOrFail(clusters map[string]*cluster, namespace string) {
// waitForReplicatSetToBeDeletedOrFail waits for the named ReplicaSet in namespace to be deleted.
// If the deletion fails, the enclosing test fails.
func waitForReplicaSetToBeDeletedOrFail(clientset *fedclientset.Clientset, namespace string, replicaSet string) {
err := wait.Poll(5*time.Second, federatedDefaultTestTimeout, func() (bool, error) {
err := wait.Poll(5*time.Second, fedframework.FederatedDefaultTestTimeout, func() (bool, error) {
_, err := clientset.Extensions().ReplicaSets(namespace).Get(replicaSet, metav1.GetOptions{})
if err != nil && errors.IsNotFound(err) {
return true, nil