Merge pull request #43087 from csbell/master

Automatic merge from submit-queue

[Federation][e2e] Provide less strict timeouts on destruction paths

The CI tests show that some timeouts are too strict. This PR harmonizes some of the "ForeverWaitTimeouts" in one place. The goal is to reduce the e2e flakiness.
This commit is contained in:
Kubernetes Submit Queue 2017-03-18 09:22:00 -07:00 committed by GitHub
commit 082c9a8fa5
3 changed files with 21 additions and 12 deletions

View File

@ -36,8 +36,7 @@ import (
)
const (
FederationDeploymentName = "federation-deployment"
FederatedDeploymentTimeout = 120 * time.Second
FederationDeploymentName = "federation-deployment"
)
// Create/delete deployment api objects
@ -186,7 +185,7 @@ func waitForDeploymentOrFail(c *fedclientset.Clientset, namespace string, deploy
}
func waitForDeployment(c *fedclientset.Clientset, namespace string, deploymentName string, clusters map[string]*cluster) error {
err := wait.Poll(10*time.Second, FederatedDeploymentTimeout, func() (bool, error) {
err := wait.Poll(10*time.Second, federatedDeploymentTimeout, func() (bool, error) {
fdep, err := c.Deployments(namespace).Get(deploymentName, metav1.GetOptions{})
if err != nil {
return false, err
@ -260,7 +259,7 @@ func deleteDeploymentOrFail(clientset *fedclientset.Clientset, nsName string, de
}
// Wait for the deployment to be deleted.
err = wait.Poll(5*time.Second, wait.ForeverTestTimeout, func() (bool, error) {
err = wait.Poll(10*time.Second, federatedDeploymentTimeout, func() (bool, error) {
_, err := clientset.Extensions().Deployments(nsName).Get(deploymentName, metav1.GetOptions{})
if err != nil && errors.IsNotFound(err) {
return true, nil

View File

@ -46,6 +46,7 @@ import (
const (
MaxRetriesOnFederatedApiserver = 3
FederatedIngressTimeout = 10 * time.Minute
FederatedIngressDeleteTimeout = 2 * time.Minute
FederatedIngressName = "federated-ingress"
FederatedIngressServiceName = "federated-ingress-service"
FederatedIngressTLSSecretName = "federated-ingress-tls-secret"
@ -173,6 +174,7 @@ var _ = framework.KubeDescribe("Federated ingresses [Feature:Federation]", func(
nsName := f.FederationNamespace.Name
deleteAllIngressesOrFail(f.FederationClientset, nsName)
if secret != nil {
By("Deleting secret")
orphanDependents := false
deleteSecretOrFail(f.FederationClientset, ns, secret.Name, &orphanDependents)
secret = nil
@ -180,7 +182,9 @@ var _ = framework.KubeDescribe("Federated ingresses [Feature:Federation]", func(
By("No secret to delete. Secret is nil")
}
if service != nil {
By("Deleting service")
deleteServiceOrFail(f.FederationClientset, ns, service.Name, nil)
By("Cleanup service shards and provider resources")
cleanupServiceShardsAndProviderResources(ns, service, clusters)
service = nil
} else {
@ -229,11 +233,13 @@ var _ = framework.KubeDescribe("Federated ingresses [Feature:Federation]", func(
jig.ing = createIngressOrFail(f.FederationClientset, ns, service.Name, FederatedIngressTLSSecretName)
// wait for ingress objects sync
waitForIngressShardsOrFail(ns, jig.ing, clusters)
By(fmt.Sprintf("Ingress created as %v", jig.ing.Name))
})
AfterEach(func() {
deleteBackendPodsOrFail(clusters, ns)
if jig.ing != nil {
By(fmt.Sprintf("Deleting ingress %v on all clusters", jig.ing.Name))
deleteIngressOrFail(f.FederationClientset, ns, jig.ing.Name, nil)
for clusterName, cluster := range clusters {
deleteClusterIngressOrFail(clusterName, cluster.Clientset, ns, jig.ing.Name)
@ -261,6 +267,7 @@ var _ = framework.KubeDescribe("Federated ingresses [Feature:Federation]", func(
})
It("should be able to connect to a federated ingress via its load balancer", func() {
By(fmt.Sprintf("Waiting for Federated Ingress on %v", jig.ing.Name))
// check the traffic on federation ingress
jig.waitForFederatedIngress()
})
@ -387,7 +394,7 @@ func deleteIngressOrFail(clientset *fedclientset.Clientset, namespace string, in
err := clientset.Ingresses(namespace).Delete(ingressName, &metav1.DeleteOptions{OrphanDependents: orphanDependents})
framework.ExpectNoError(err, "Error deleting ingress %q from namespace %q", ingressName, namespace)
// Wait for the ingress to be deleted.
err = wait.Poll(framework.Poll, wait.ForeverTestTimeout, func() (bool, error) {
err = wait.Poll(framework.Poll, FederatedIngressDeleteTimeout, func() (bool, error) {
_, err := clientset.Extensions().Ingresses(namespace).Get(ingressName, metav1.GetOptions{})
if err != nil && errors.IsNotFound(err) {
return true, nil

View File

@ -52,7 +52,9 @@ var (
const (
federatedNamespaceTimeout = 5 * time.Minute
federatedReplicasetTimeout = 5 * time.Minute
federatedServiceTimeout = 5 * time.Minute
federatedDeploymentTimeout = 5 * time.Minute
federatedClustersWaitTimeout = 1 * time.Minute
// [30000, 32767] is the allowed default service nodeport range and our
@ -318,7 +320,7 @@ func deleteServiceOrFail(clientset *fedclientset.Clientset, namespace string, se
err := clientset.Services(namespace).Delete(serviceName, &metav1.DeleteOptions{OrphanDependents: orphanDependents})
framework.ExpectNoError(err, "Error deleting service %q from namespace %q", serviceName, namespace)
// Wait for the service to be deleted.
err = wait.Poll(5*time.Second, 10*wait.ForeverTestTimeout, func() (bool, error) {
err = wait.Poll(5*time.Second, federatedServiceTimeout, func() (bool, error) {
_, err := clientset.Core().Services(namespace).Get(serviceName, metav1.GetOptions{})
if err != nil && errors.IsNotFound(err) {
return true, nil
@ -401,7 +403,7 @@ func cleanupServiceShardLoadBalancer(clusterName string, service *v1.Service, ti
err := lbi.EnsureLoadBalancerDeleted(clusterName, internalSvc)
if err != nil {
// Deletion failed with an error, try again.
framework.Logf("Failed to delete cloud provider resources for service %q in namespace %q, in cluster %q", service.Name, service.Namespace, clusterName)
framework.Logf("Failed to delete cloud provider resources for service %q in namespace %q, in cluster %q: %v", service.Name, service.Namespace, clusterName, err)
return false, nil
}
By(fmt.Sprintf("Cloud provider resources for Service %q in namespace %q in cluster %q deleted", service.Name, service.Namespace, clusterName))
@ -527,12 +529,13 @@ func deleteOneBackendPodOrFail(c *cluster) {
pod := c.backendPod
Expect(pod).ToNot(BeNil())
err := c.Clientset.Core().Pods(pod.Namespace).Delete(pod.Name, metav1.NewDeleteOptions(0))
msgFmt := fmt.Sprintf("Deleting Pod %q in namespace %q in cluster %q %%v", pod.Name, pod.Namespace, c.name)
if errors.IsNotFound(err) {
By(fmt.Sprintf("Pod %q in namespace %q in cluster %q does not exist. No need to delete it.", pod.Name, pod.Namespace, c.name))
} else {
framework.ExpectNoError(err, "Deleting pod %q in namespace %q from cluster %q", pod.Name, pod.Namespace, c.name)
framework.Logf(msgFmt, "does not exist. No need to delete it.")
return
}
By(fmt.Sprintf("Backend pod %q in namespace %q in cluster %q deleted or does not exist", pod.Name, pod.Namespace, c.name))
framework.ExpectNoError(err, msgFmt, "")
framework.Logf(msgFmt, "was deleted")
}
// deleteBackendPodsOrFail deletes one pod from each cluster that has one.
@ -551,7 +554,7 @@ func deleteBackendPodsOrFail(clusters map[string]*cluster, namespace string) {
// waitForReplicatSetToBeDeletedOrFail waits for the named ReplicaSet in namespace to be deleted.
// If the deletion fails, the enclosing test fails.
func waitForReplicaSetToBeDeletedOrFail(clientset *fedclientset.Clientset, namespace string, replicaSet string) {
err := wait.Poll(5*time.Second, wait.ForeverTestTimeout, func() (bool, error) {
err := wait.Poll(5*time.Second, federatedReplicasetTimeout, func() (bool, error) {
_, err := clientset.Extensions().ReplicaSets(namespace).Get(replicaSet, metav1.GetOptions{})
if err != nil && errors.IsNotFound(err) {
return true, nil