mirror of
https://github.com/k3s-io/kubernetes.git
synced 2025-07-24 04:06:03 +00:00
Merge pull request #42734 from dashpole/deletion_timeout
Automatic merge from submit-queue (batch tested with PRs 42734, 42745, 42758, 42814, 42694) Create DefaultPodDeletionTimeout for e2e tests In our e2e and e2e_node tests, we had a number of different timeouts for deletion. Recent changes to the way deletion works (#41644, #41456) have resulted in some timeouts in e2e tests. #42661 was the most recent fix for this. Most of these tests are not meant to test pod deletion latency, but rather just to clean up pods after a test is finished. For this reason, we should change all these tests to use a standard, fairly high timeout for deletion. cc @vishh @Random-Liu
This commit is contained in:
commit
7c08e817a5
@ -234,7 +234,7 @@ var _ = framework.KubeDescribe("Pods", func() {
|
||||
By("verifying pod deletion was observed")
|
||||
deleted := false
|
||||
var lastPod *v1.Pod
|
||||
timer := time.After(2 * time.Minute)
|
||||
timer := time.After(framework.DefaultPodDeletionTimeout)
|
||||
for !deleted {
|
||||
select {
|
||||
case event, _ := <-w.ResultChan():
|
||||
|
@ -37,6 +37,8 @@ import (
|
||||
. "github.com/onsi/gomega"
|
||||
)
|
||||
|
||||
const DefaultPodDeletionTimeout = 3 * time.Minute
|
||||
|
||||
// ImageWhiteList is the images used in the current test suite. It should be initialized in test suite and
|
||||
// the images in the white list should be pre-pulled in the test suite. Currently, this is only used by
|
||||
// node e2e test.
|
||||
|
@ -103,9 +103,6 @@ const (
|
||||
// TODO: Make this 30 seconds once #4566 is resolved.
|
||||
PodStartTimeout = 5 * time.Minute
|
||||
|
||||
// How long to wait for the pod to no longer be running
|
||||
podNoLongerRunningTimeout = 30 * time.Second
|
||||
|
||||
// If there are any orphaned namespaces to clean up, this test is running
|
||||
// on a long lived cluster. A long wait here is preferably to spurious test
|
||||
// failures caused by leaked resources from a previous test run.
|
||||
@ -1252,10 +1249,10 @@ func podRunning(c clientset.Interface, podName, namespace string) wait.Condition
|
||||
}
|
||||
}
|
||||
|
||||
// Waits default amount of time (podNoLongerRunningTimeout) for the specified pod to stop running.
|
||||
// Waits default amount of time (DefaultPodDeletionTimeout) for the specified pod to stop running.
|
||||
// Returns an error if timeout occurs first.
|
||||
func WaitForPodNoLongerRunningInNamespace(c clientset.Interface, podName, namespace string) error {
|
||||
return WaitTimeoutForPodNoLongerRunningInNamespace(c, podName, namespace, podNoLongerRunningTimeout)
|
||||
return WaitTimeoutForPodNoLongerRunningInNamespace(c, podName, namespace, DefaultPodDeletionTimeout)
|
||||
}
|
||||
|
||||
func WaitTimeoutForPodNoLongerRunningInNamespace(c clientset.Interface, podName, namespace string, timeout time.Duration) error {
|
||||
@ -2214,7 +2211,7 @@ func (f *Framework) MatchContainerOutput(
|
||||
createdPod := podClient.Create(pod)
|
||||
defer func() {
|
||||
By("delete the pod")
|
||||
podClient.DeleteSync(createdPod.Name, &metav1.DeleteOptions{}, podNoLongerRunningTimeout)
|
||||
podClient.DeleteSync(createdPod.Name, &metav1.DeleteOptions{}, DefaultPodDeletionTimeout)
|
||||
}()
|
||||
|
||||
// Wait for client pod to complete.
|
||||
|
@ -105,7 +105,7 @@ func observeObjectDeletion(w watch.Interface) (obj runtime.Object) {
|
||||
framework.Logf("Starting to observe pod deletion")
|
||||
deleted := false
|
||||
timeout := false
|
||||
timer := time.After(60 * time.Second)
|
||||
timer := time.After(framework.DefaultPodDeletionTimeout)
|
||||
for !deleted && !timeout {
|
||||
select {
|
||||
case event, normal := <-w.ResultChan():
|
||||
|
@ -94,10 +94,10 @@ var _ = framework.KubeDescribe("CriticalPod [Serial] [Disruptive]", func() {
|
||||
})
|
||||
AfterEach(func() {
|
||||
// Delete Pods
|
||||
f.PodClient().DeleteSync(guaranteedPodName, &metav1.DeleteOptions{}, podDisappearTimeout)
|
||||
f.PodClient().DeleteSync(burstablePodName, &metav1.DeleteOptions{}, podDisappearTimeout)
|
||||
f.PodClient().DeleteSync(bestEffortPodName, &metav1.DeleteOptions{}, podDisappearTimeout)
|
||||
f.PodClientNS(kubeapi.NamespaceSystem).DeleteSyncInNamespace(criticalPodName, kubeapi.NamespaceSystem, &metav1.DeleteOptions{}, podDisappearTimeout)
|
||||
f.PodClient().DeleteSync(guaranteedPodName, &metav1.DeleteOptions{}, framework.DefaultPodDeletionTimeout)
|
||||
f.PodClient().DeleteSync(burstablePodName, &metav1.DeleteOptions{}, framework.DefaultPodDeletionTimeout)
|
||||
f.PodClient().DeleteSync(bestEffortPodName, &metav1.DeleteOptions{}, framework.DefaultPodDeletionTimeout)
|
||||
f.PodClientNS(kubeapi.NamespaceSystem).DeleteSyncInNamespace(criticalPodName, kubeapi.NamespaceSystem, &metav1.DeleteOptions{}, framework.DefaultPodDeletionTimeout)
|
||||
// Log Events
|
||||
logPodEvents(f)
|
||||
logNodeEvents(f)
|
||||
|
@ -35,9 +35,6 @@ const (
|
||||
// podCheckInterval is the interval seconds between pod status checks.
|
||||
podCheckInterval = time.Second * 2
|
||||
|
||||
// podDisappearTimeout is the timeout to wait node disappear.
|
||||
podDisappearTimeout = time.Minute * 2
|
||||
|
||||
// containerGCPeriod is the period of container garbage collect loop. It should be the same
|
||||
// with ContainerGCPeriod in kubelet.go. However we don't want to include kubelet package
|
||||
// directly which will introduce a lot more dependencies.
|
||||
@ -97,9 +94,9 @@ var _ = framework.KubeDescribe("Kubelet Eviction Manager [Serial] [Disruptive]",
|
||||
if !isImageSupported() || !evictionOptionIsSet() { // Skip the after each
|
||||
return
|
||||
}
|
||||
podClient.DeleteSync(busyPodName, &metav1.DeleteOptions{}, podDisappearTimeout)
|
||||
podClient.DeleteSync(idlePodName, &metav1.DeleteOptions{}, podDisappearTimeout)
|
||||
podClient.DeleteSync(verifyPodName, &metav1.DeleteOptions{}, podDisappearTimeout)
|
||||
podClient.DeleteSync(busyPodName, &metav1.DeleteOptions{}, framework.DefaultPodDeletionTimeout)
|
||||
podClient.DeleteSync(idlePodName, &metav1.DeleteOptions{}, framework.DefaultPodDeletionTimeout)
|
||||
podClient.DeleteSync(verifyPodName, &metav1.DeleteOptions{}, framework.DefaultPodDeletionTimeout)
|
||||
|
||||
// Wait for 2 container gc loop to ensure that the containers are deleted. The containers
|
||||
// created in this test consume a lot of disk, we don't want them to trigger disk eviction
|
||||
|
@ -231,7 +231,7 @@ func containerGCTest(f *framework.Framework, test testRun) {
|
||||
AfterEach(func() {
|
||||
for _, pod := range test.testPods {
|
||||
By(fmt.Sprintf("Deleting Pod %v", pod.podName))
|
||||
f.PodClient().DeleteSync(pod.podName, &metav1.DeleteOptions{}, podDisappearTimeout)
|
||||
f.PodClient().DeleteSync(pod.podName, &metav1.DeleteOptions{}, framework.DefaultPodDeletionTimeout)
|
||||
}
|
||||
|
||||
By("Making sure all containers get cleaned up")
|
||||
|
@ -18,7 +18,6 @@ package e2e_node
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"time"
|
||||
|
||||
"k8s.io/apimachinery/pkg/api/resource"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
@ -98,10 +97,10 @@ var _ = framework.KubeDescribe("GPU [Serial]", func() {
|
||||
deleteOptions := metav1.DeleteOptions{
|
||||
GracePeriodSeconds: &gp,
|
||||
}
|
||||
f.PodClient().DeleteSync(podSuccess.Name, &deleteOptions, 30*time.Second)
|
||||
f.PodClient().DeleteSync(podSuccess.Name, &deleteOptions, framework.DefaultPodDeletionTimeout)
|
||||
|
||||
By("attempting to start the failed pod again")
|
||||
f.PodClient().DeleteSync(podFailure.Name, &deleteOptions, 10*time.Second)
|
||||
f.PodClient().DeleteSync(podFailure.Name, &deleteOptions, framework.DefaultPodDeletionTimeout)
|
||||
podFailure = f.PodClient().CreateSync(podFailure)
|
||||
|
||||
By("Checking if the pod outputted Success to its logs")
|
||||
|
@ -289,7 +289,7 @@ func runEvictionTest(f *framework.Framework, testCondition string, podTestSpecs
|
||||
By("deleting pods")
|
||||
for _, spec := range podTestSpecs {
|
||||
By(fmt.Sprintf("deleting pod: %s", spec.pod.Name))
|
||||
f.PodClient().DeleteSync(spec.pod.Name, &metav1.DeleteOptions{}, podDisappearTimeout)
|
||||
f.PodClient().DeleteSync(spec.pod.Name, &metav1.DeleteOptions{}, framework.DefaultPodDeletionTimeout)
|
||||
}
|
||||
|
||||
if CurrentGinkgoTestDescription().Failed {
|
||||
|
@ -60,7 +60,7 @@ var _ = framework.KubeDescribe("Container Lifecycle Hook", func() {
|
||||
podClient.WaitForSuccess(podCheckHook.Name, postStartWaitTimeout)
|
||||
}
|
||||
By("delete the pod with lifecycle hook")
|
||||
podClient.DeleteSync(podWithHook.Name, metav1.NewDeleteOptions(15), podWaitTimeout)
|
||||
podClient.DeleteSync(podWithHook.Name, metav1.NewDeleteOptions(15), framework.DefaultPodDeletionTimeout)
|
||||
if podWithHook.Spec.Containers[0].Lifecycle.PreStop != nil {
|
||||
By("create the hook check pod")
|
||||
podClient.Create(podCheckHook)
|
||||
@ -144,7 +144,7 @@ var _ = framework.KubeDescribe("Container Lifecycle Hook", func() {
|
||||
}, postStartWaitTimeout, podCheckInterval).Should(BeNil())
|
||||
}
|
||||
By("delete the pod with lifecycle hook")
|
||||
podClient.DeleteSync(podWithHook.Name, metav1.NewDeleteOptions(15), podWaitTimeout)
|
||||
podClient.DeleteSync(podWithHook.Name, metav1.NewDeleteOptions(15), framework.DefaultPodDeletionTimeout)
|
||||
if podWithHook.Spec.Containers[0].Lifecycle.PreStop != nil {
|
||||
By("check prestop hook")
|
||||
Eventually(func() error {
|
||||
|
Loading…
Reference in New Issue
Block a user