From 3806d386df7a2d35db03a42175d916d3444f93ba Mon Sep 17 00:00:00 2001 From: David Ashpole Date: Wed, 8 Mar 2017 14:39:51 -0800 Subject: [PATCH] use default timeout for deletion --- test/e2e/common/pods.go | 2 +- test/e2e/framework/pods.go | 2 ++ test/e2e/framework/util.go | 9 +++------ test/e2e/generated_clientset.go | 2 +- test/e2e_node/critical_pod_test.go | 8 ++++---- test/e2e_node/disk_eviction_test.go | 9 +++------ test/e2e_node/garbage_collector_test.go | 2 +- test/e2e_node/gpus.go | 5 ++--- test/e2e_node/inode_eviction_test.go | 2 +- test/e2e_node/lifecycle_hook_test.go | 4 ++-- 10 files changed, 20 insertions(+), 25 deletions(-) diff --git a/test/e2e/common/pods.go b/test/e2e/common/pods.go index 1a3ef6b5257..0abf9b658a9 100644 --- a/test/e2e/common/pods.go +++ b/test/e2e/common/pods.go @@ -234,7 +234,7 @@ var _ = framework.KubeDescribe("Pods", func() { By("verifying pod deletion was observed") deleted := false var lastPod *v1.Pod - timer := time.After(2 * time.Minute) + timer := time.After(framework.DefaultPodDeletionTimeout) for !deleted { select { case event, _ := <-w.ResultChan(): diff --git a/test/e2e/framework/pods.go b/test/e2e/framework/pods.go index 17430ddef48..a6aecb3b396 100644 --- a/test/e2e/framework/pods.go +++ b/test/e2e/framework/pods.go @@ -37,6 +37,8 @@ import ( . "github.com/onsi/gomega" ) +const DefaultPodDeletionTimeout = 3 * time.Minute + // ImageWhiteList is the images used in the current test suite. It should be initialized in test suite and // the images in the white list should be pre-pulled in the test suite. Currently, this is only used by // node e2e test. diff --git a/test/e2e/framework/util.go b/test/e2e/framework/util.go index 2416a94b47d..f7e3fc335c9 100644 --- a/test/e2e/framework/util.go +++ b/test/e2e/framework/util.go @@ -103,9 +103,6 @@ const ( // TODO: Make this 30 seconds once #4566 is resolved. PodStartTimeout = 5 * time.Minute - // How long to wait for the pod to no longer be running - podNoLongerRunningTimeout = 30 * time.Second - // If there are any orphaned namespaces to clean up, this test is running // on a long lived cluster. A long wait here is preferably to spurious test // failures caused by leaked resources from a previous test run. @@ -1252,10 +1249,10 @@ func podRunning(c clientset.Interface, podName, namespace string) wait.Condition } } -// Waits default amount of time (podNoLongerRunningTimeout) for the specified pod to stop running. +// Waits default amount of time (DefaultPodDeletionTimeout) for the specified pod to stop running. // Returns an error if timeout occurs first. func WaitForPodNoLongerRunningInNamespace(c clientset.Interface, podName, namespace string) error { - return WaitTimeoutForPodNoLongerRunningInNamespace(c, podName, namespace, podNoLongerRunningTimeout) + return WaitTimeoutForPodNoLongerRunningInNamespace(c, podName, namespace, DefaultPodDeletionTimeout) } func WaitTimeoutForPodNoLongerRunningInNamespace(c clientset.Interface, podName, namespace string, timeout time.Duration) error { @@ -2214,7 +2211,7 @@ func (f *Framework) MatchContainerOutput( createdPod := podClient.Create(pod) defer func() { By("delete the pod") - podClient.DeleteSync(createdPod.Name, &metav1.DeleteOptions{}, podNoLongerRunningTimeout) + podClient.DeleteSync(createdPod.Name, &metav1.DeleteOptions{}, DefaultPodDeletionTimeout) }() // Wait for client pod to complete. diff --git a/test/e2e/generated_clientset.go b/test/e2e/generated_clientset.go index c060a717f68..03ca5655f3f 100644 --- a/test/e2e/generated_clientset.go +++ b/test/e2e/generated_clientset.go @@ -102,7 +102,7 @@ func observeCreation(w watch.Interface) { func observeObjectDeletion(w watch.Interface) (obj runtime.Object) { deleted := false timeout := false - timer := time.After(60 * time.Second) + timer := time.After(framework.DefaultPodDeletionTimeout) for !deleted && !timeout { select { case event, _ := <-w.ResultChan(): diff --git a/test/e2e_node/critical_pod_test.go b/test/e2e_node/critical_pod_test.go index bdbbdadc8dd..7dc79c83bc3 100644 --- a/test/e2e_node/critical_pod_test.go +++ b/test/e2e_node/critical_pod_test.go @@ -94,10 +94,10 @@ var _ = framework.KubeDescribe("CriticalPod [Serial] [Disruptive]", func() { }) AfterEach(func() { // Delete Pods - f.PodClient().DeleteSync(guaranteedPodName, &metav1.DeleteOptions{}, podDisappearTimeout) - f.PodClient().DeleteSync(burstablePodName, &metav1.DeleteOptions{}, podDisappearTimeout) - f.PodClient().DeleteSync(bestEffortPodName, &metav1.DeleteOptions{}, podDisappearTimeout) - f.PodClientNS(kubeapi.NamespaceSystem).DeleteSyncInNamespace(criticalPodName, kubeapi.NamespaceSystem, &metav1.DeleteOptions{}, podDisappearTimeout) + f.PodClient().DeleteSync(guaranteedPodName, &metav1.DeleteOptions{}, framework.DefaultPodDeletionTimeout) + f.PodClient().DeleteSync(burstablePodName, &metav1.DeleteOptions{}, framework.DefaultPodDeletionTimeout) + f.PodClient().DeleteSync(bestEffortPodName, &metav1.DeleteOptions{}, framework.DefaultPodDeletionTimeout) + f.PodClientNS(kubeapi.NamespaceSystem).DeleteSyncInNamespace(criticalPodName, kubeapi.NamespaceSystem, &metav1.DeleteOptions{}, framework.DefaultPodDeletionTimeout) // Log Events logPodEvents(f) logNodeEvents(f) diff --git a/test/e2e_node/disk_eviction_test.go b/test/e2e_node/disk_eviction_test.go index e92d9bf995a..0fd663106ee 100644 --- a/test/e2e_node/disk_eviction_test.go +++ b/test/e2e_node/disk_eviction_test.go @@ -35,9 +35,6 @@ const ( // podCheckInterval is the interval seconds between pod status checks. podCheckInterval = time.Second * 2 - // podDisappearTimeout is the timeout to wait node disappear. - podDisappearTimeout = time.Minute * 2 - // containerGCPeriod is the period of container garbage collect loop. It should be the same // with ContainerGCPeriod in kubelet.go. However we don't want to include kubelet package // directly which will introduce a lot more dependencies. @@ -97,9 +94,9 @@ var _ = framework.KubeDescribe("Kubelet Eviction Manager [Serial] [Disruptive]", if !isImageSupported() || !evictionOptionIsSet() { // Skip the after each return } - podClient.DeleteSync(busyPodName, &metav1.DeleteOptions{}, podDisappearTimeout) - podClient.DeleteSync(idlePodName, &metav1.DeleteOptions{}, podDisappearTimeout) - podClient.DeleteSync(verifyPodName, &metav1.DeleteOptions{}, podDisappearTimeout) + podClient.DeleteSync(busyPodName, &metav1.DeleteOptions{}, framework.DefaultPodDeletionTimeout) + podClient.DeleteSync(idlePodName, &metav1.DeleteOptions{}, framework.DefaultPodDeletionTimeout) + podClient.DeleteSync(verifyPodName, &metav1.DeleteOptions{}, framework.DefaultPodDeletionTimeout) // Wait for 2 container gc loop to ensure that the containers are deleted. The containers // created in this test consume a lot of disk, we don't want them to trigger disk eviction diff --git a/test/e2e_node/garbage_collector_test.go b/test/e2e_node/garbage_collector_test.go index 88536828a97..80fea84de10 100644 --- a/test/e2e_node/garbage_collector_test.go +++ b/test/e2e_node/garbage_collector_test.go @@ -231,7 +231,7 @@ func containerGCTest(f *framework.Framework, test testRun) { AfterEach(func() { for _, pod := range test.testPods { By(fmt.Sprintf("Deleting Pod %v", pod.podName)) - f.PodClient().DeleteSync(pod.podName, &metav1.DeleteOptions{}, podDisappearTimeout) + f.PodClient().DeleteSync(pod.podName, &metav1.DeleteOptions{}, framework.DefaultPodDeletionTimeout) } By("Making sure all containers get cleaned up") diff --git a/test/e2e_node/gpus.go b/test/e2e_node/gpus.go index d8c651f2e8c..73763237bb9 100644 --- a/test/e2e_node/gpus.go +++ b/test/e2e_node/gpus.go @@ -18,7 +18,6 @@ package e2e_node import ( "fmt" - "time" "k8s.io/apimachinery/pkg/api/resource" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" @@ -98,10 +97,10 @@ var _ = framework.KubeDescribe("GPU [Serial]", func() { deleteOptions := metav1.DeleteOptions{ GracePeriodSeconds: &gp, } - f.PodClient().DeleteSync(podSuccess.Name, &deleteOptions, 30*time.Second) + f.PodClient().DeleteSync(podSuccess.Name, &deleteOptions, framework.DefaultPodDeletionTimeout) By("attempting to start the failed pod again") - f.PodClient().DeleteSync(podFailure.Name, &deleteOptions, 10*time.Second) + f.PodClient().DeleteSync(podFailure.Name, &deleteOptions, framework.DefaultPodDeletionTimeout) podFailure = f.PodClient().CreateSync(podFailure) By("Checking if the pod outputted Success to its logs") diff --git a/test/e2e_node/inode_eviction_test.go b/test/e2e_node/inode_eviction_test.go index 5d262cf93dd..3528801d751 100644 --- a/test/e2e_node/inode_eviction_test.go +++ b/test/e2e_node/inode_eviction_test.go @@ -289,7 +289,7 @@ func runEvictionTest(f *framework.Framework, testCondition string, podTestSpecs By("deleting pods") for _, spec := range podTestSpecs { By(fmt.Sprintf("deleting pod: %s", spec.pod.Name)) - f.PodClient().DeleteSync(spec.pod.Name, &metav1.DeleteOptions{}, podDisappearTimeout) + f.PodClient().DeleteSync(spec.pod.Name, &metav1.DeleteOptions{}, framework.DefaultPodDeletionTimeout) } if CurrentGinkgoTestDescription().Failed { diff --git a/test/e2e_node/lifecycle_hook_test.go b/test/e2e_node/lifecycle_hook_test.go index c54cb4927e0..17dea852666 100644 --- a/test/e2e_node/lifecycle_hook_test.go +++ b/test/e2e_node/lifecycle_hook_test.go @@ -60,7 +60,7 @@ var _ = framework.KubeDescribe("Container Lifecycle Hook", func() { podClient.WaitForSuccess(podCheckHook.Name, postStartWaitTimeout) } By("delete the pod with lifecycle hook") - podClient.DeleteSync(podWithHook.Name, metav1.NewDeleteOptions(15), podWaitTimeout) + podClient.DeleteSync(podWithHook.Name, metav1.NewDeleteOptions(15), framework.DefaultPodDeletionTimeout) if podWithHook.Spec.Containers[0].Lifecycle.PreStop != nil { By("create the hook check pod") podClient.Create(podCheckHook) @@ -144,7 +144,7 @@ var _ = framework.KubeDescribe("Container Lifecycle Hook", func() { }, postStartWaitTimeout, podCheckInterval).Should(BeNil()) } By("delete the pod with lifecycle hook") - podClient.DeleteSync(podWithHook.Name, metav1.NewDeleteOptions(15), podWaitTimeout) + podClient.DeleteSync(podWithHook.Name, metav1.NewDeleteOptions(15), framework.DefaultPodDeletionTimeout) if podWithHook.Spec.Containers[0].Lifecycle.PreStop != nil { By("check prestop hook") Eventually(func() error {