e2e: replace WaitForPodToDisappear with WaitForPodNotFoundInNamespace

WaitForPodToDisappear was always called such that it listed all pods, which
made it less efficient than trying to get just the one pod it was checking for.

Being able to customize the poll interval in practice wasn't useful, therefore
it can be replaced with WaitForPodNotFoundInNamespace.
This commit is contained in:
Patrick Ohly 2023-01-20 12:45:01 +01:00
parent 45d4631069
commit 9df3e2a47a
11 changed files with 12 additions and 59 deletions

View File

@ -32,7 +32,6 @@ import (
v1 "k8s.io/api/core/v1"
apierrors "k8s.io/apimachinery/pkg/api/errors"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/labels"
"k8s.io/apimachinery/pkg/util/intstr"
utilnet "k8s.io/apimachinery/pkg/util/net"
"k8s.io/apimachinery/pkg/util/sets"
@ -889,7 +888,7 @@ func (config *NetworkingTestConfig) DeleteNetProxyPod(ctx context.Context) {
framework.ExpectNoError(config.getPodClient().Delete(ctx, pod.Name, *metav1.NewDeleteOptions(0)))
config.EndpointPods = config.EndpointPods[1:]
// wait for pod being deleted.
err := e2epod.WaitForPodToDisappear(ctx, config.f.ClientSet, config.Namespace, pod.Name, labels.Everything(), time.Second, wait.ForeverTestTimeout)
err := e2epod.WaitForPodNotFoundInNamespace(ctx, config.f.ClientSet, pod.Name, config.Namespace, wait.ForeverTestTimeout)
if err != nil {
framework.Failf("Failed to delete %s pod: %v", pod.Name, err)
}

View File

@ -27,7 +27,6 @@ import (
v1 "k8s.io/api/core/v1"
apierrors "k8s.io/apimachinery/pkg/api/errors"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/labels"
"k8s.io/apimachinery/pkg/types"
"k8s.io/apimachinery/pkg/util/sets"
"k8s.io/apimachinery/pkg/util/strategicpatch"
@ -182,8 +181,7 @@ func (c *PodClient) DeleteSync(ctx context.Context, name string, options metav1.
if err != nil && !apierrors.IsNotFound(err) {
framework.Failf("Failed to delete pod %q: %v", name, err)
}
gomega.Expect(WaitForPodToDisappear(ctx, c.f.ClientSet, namespace, name, labels.Everything(),
2*time.Second, timeout)).To(gomega.Succeed(), "wait for pod %q to disappear", name)
framework.ExpectNoError(WaitForPodNotFoundInNamespace(ctx, c.f.ClientSet, name, namespace, timeout), "wait for pod %q to disappear", name)
}
// mungeSpec apply test-suite specific transformations to the pod spec.

View File

@ -591,42 +591,6 @@ func WaitForPodNotFoundInNamespace(ctx context.Context, c clientset.Interface, p
return maybeTimeoutError(err, "waiting for pod %s not found", podIdentifier(ns, podName))
}
// WaitForPodToDisappear waits the given timeout duration for the specified pod to disappear.
func WaitForPodToDisappear(ctx context.Context, c clientset.Interface, ns, podName string, label labels.Selector, interval, timeout time.Duration) error {
var lastPod *v1.Pod
err := wait.PollImmediateWithContext(ctx, interval, timeout, func(ctx context.Context) (bool, error) {
framework.Logf("Waiting for pod %s to disappear", podName)
options := metav1.ListOptions{LabelSelector: label.String()}
pods, err := c.CoreV1().Pods(ns).List(ctx, options)
if err != nil {
return handleWaitingAPIError(err, true, "listing pods")
}
found := false
for i, pod := range pods.Items {
if pod.Name == podName {
framework.Logf("Pod %s still exists", podName)
found = true
lastPod = &(pods.Items[i])
break
}
}
if !found {
framework.Logf("Pod %s no longer exists", podName)
return true, nil
}
return false, nil
})
if err == nil {
return nil
}
if IsTimeout(err) {
return TimeoutError(fmt.Sprintf("timed out while waiting for pod %s to disappear", podIdentifier(ns, podName)),
lastPod,
)
}
return maybeTimeoutError(err, "waiting for pod %s to disappear", podIdentifier(ns, podName))
}
// PodsResponding waits for the pods to response.
func PodsResponding(ctx context.Context, c clientset.Interface, ns, name string, wantName bool, pods *v1.PodList) error {
ginkgo.By("trying to dial each unique pod")

View File

@ -51,7 +51,6 @@ import (
v1 "k8s.io/api/core/v1"
apierrors "k8s.io/apimachinery/pkg/api/errors"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/labels"
"k8s.io/apimachinery/pkg/util/wait"
clientset "k8s.io/client-go/kubernetes"
clientexec "k8s.io/client-go/util/exec"
@ -464,7 +463,7 @@ func runVolumeTesterPod(ctx context.Context, client clientset.Interface, timeout
}
if err != nil {
e2epod.DeletePodOrFail(ctx, client, clientPod.Namespace, clientPod.Name)
_ = e2epod.WaitForPodToDisappear(ctx, client, clientPod.Namespace, clientPod.Name, labels.Everything(), framework.Poll, timeouts.PodDelete)
_ = e2epod.WaitForPodNotFoundInNamespace(ctx, client, clientPod.Name, clientPod.Namespace, timeouts.PodDelete)
return nil, err
}
return clientPod, nil
@ -542,7 +541,7 @@ func testVolumeClient(ctx context.Context, f *framework.Framework, config TestCo
// testVolumeClient might get used more than once per test, therefore
// we have to clean up before returning.
e2epod.DeletePodOrFail(ctx, f.ClientSet, clientPod.Namespace, clientPod.Name)
framework.ExpectNoError(e2epod.WaitForPodToDisappear(ctx, f.ClientSet, clientPod.Namespace, clientPod.Name, labels.Everything(), framework.Poll, timeouts.PodDelete))
framework.ExpectNoError(e2epod.WaitForPodNotFoundInNamespace(ctx, f.ClientSet, clientPod.Name, clientPod.Namespace, timeouts.PodDelete))
}()
testVolumeContent(f, clientPod, "", fsGroup, fsType, tests)
@ -577,7 +576,7 @@ func InjectContent(ctx context.Context, f *framework.Framework, config TestConfi
// This pod must get deleted before the function returns becaue the test relies on
// the volume not being in use.
e2epod.DeletePodOrFail(ctx, f.ClientSet, injectorPod.Namespace, injectorPod.Name)
framework.ExpectNoError(e2epod.WaitForPodToDisappear(ctx, f.ClientSet, injectorPod.Namespace, injectorPod.Name, labels.Everything(), framework.Poll, timeouts.PodDelete))
framework.ExpectNoError(e2epod.WaitForPodNotFoundInNamespace(ctx, f.ClientSet, injectorPod.Name, injectorPod.Namespace, timeouts.PodDelete))
}()
ginkgo.By("Writing text file contents in the container.")

View File

@ -734,7 +734,7 @@ metadata:
if !strings.Contains(ee.String(), "timed out") {
framework.Failf("Missing expected 'timed out' error, got: %#v", ee)
}
framework.ExpectNoError(e2epod.WaitForPodToDisappear(ctx, f.ClientSet, ns, "failure-3", labels.Everything(), 2*time.Second, 2*v1.DefaultTerminationGracePeriodSeconds*time.Second))
framework.ExpectNoError(e2epod.WaitForPodNotFoundInNamespace(ctx, f.ClientSet, "failure-3", ns, 2*v1.DefaultTerminationGracePeriodSeconds*time.Second))
})
ginkgo.It("[Slow] running a failing command with --leave-stdin-open", func(ctx context.Context) {

View File

@ -33,7 +33,6 @@ import (
policyv1 "k8s.io/api/policy/v1"
"k8s.io/apimachinery/pkg/api/resource"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/labels"
"k8s.io/apimachinery/pkg/types"
"k8s.io/apimachinery/pkg/util/uuid"
"k8s.io/apimachinery/pkg/util/wait"
@ -196,7 +195,7 @@ var _ = utils.SIGDescribe("Pod Disks [Feature:StorageProvider]", func() {
ginkgo.By("deleting host0Pod") // delete this pod before creating next pod
framework.ExpectNoError(podClient.Delete(ctx, host0Pod.Name, podDelOpt), "Failed to delete host0Pod")
framework.Logf("deleted host0Pod %q", host0Pod.Name)
e2epod.WaitForPodToDisappear(ctx, cs, host0Pod.Namespace, host0Pod.Name, labels.Everything(), framework.Poll, f.Timeouts.PodDelete)
e2epod.WaitForPodNotFoundInNamespace(ctx, cs, host0Pod.Name, host0Pod.Namespace, f.Timeouts.PodDelete)
framework.Logf("deleted host0Pod %q disappeared", host0Pod.Name)
}

View File

@ -33,7 +33,6 @@ import (
"k8s.io/apimachinery/pkg/api/resource"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/apis/meta/v1/unstructured"
"k8s.io/apimachinery/pkg/labels"
"k8s.io/apimachinery/pkg/runtime/schema"
"k8s.io/client-go/dynamic"
clientset "k8s.io/client-go/kubernetes"
@ -846,7 +845,7 @@ func (t StorageClassTest) TestBindingWaitForFirstConsumerMultiPVC(ctx context.Co
framework.ExpectNoError(err)
ginkgo.DeferCleanup(func(ctx context.Context) error {
e2epod.DeletePodOrFail(ctx, t.Client, pod.Namespace, pod.Name)
return e2epod.WaitForPodToDisappear(ctx, t.Client, pod.Namespace, pod.Name, labels.Everything(), framework.Poll, t.Timeouts.PodDelete)
return e2epod.WaitForPodNotFoundInNamespace(ctx, t.Client, pod.Name, pod.Namespace, t.Timeouts.PodDelete)
})
if expectUnschedulable {
// Verify that no claims are provisioned.

View File

@ -24,7 +24,6 @@ import (
"github.com/onsi/ginkgo/v2"
v1 "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/labels"
utilerrors "k8s.io/apimachinery/pkg/util/errors"
clientset "k8s.io/client-go/kubernetes"
"k8s.io/kubernetes/test/e2e/framework"
@ -101,7 +100,7 @@ func PodsUseStaticPVsOrFail(ctx context.Context, f *framework.Framework, podCoun
go func(config *staticPVTestConfig) {
defer ginkgo.GinkgoRecover()
defer wg.Done()
err := e2epod.WaitForPodToDisappear(ctx, c, ns, config.pod.Name, labels.Everything(), framework.Poll, f.Timeouts.PodDelete)
err := e2epod.WaitForPodNotFoundInNamespace(ctx, c, config.pod.Name, ns, f.Timeouts.PodDelete)
framework.ExpectNoError(err, "while waiting for pod to disappear")
errs := e2epv.PVPVCCleanup(ctx, c, ns, config.pv, config.pvc)
framework.ExpectNoError(utilerrors.NewAggregate(errs), "while cleaning up PVs and PVCs")

View File

@ -276,8 +276,7 @@ func deletePodsSync(ctx context.Context, f *framework.Framework, pods []*v1.Pod)
err := e2epod.NewPodClient(f).Delete(ctx, pod.ObjectMeta.Name, *metav1.NewDeleteOptions(30))
framework.ExpectNoError(err)
err = e2epod.WaitForPodToDisappear(ctx, f.ClientSet, f.Namespace.Name, pod.ObjectMeta.Name, labels.Everything(),
30*time.Second, 10*time.Minute)
err = e2epod.WaitForPodNotFoundInNamespace(ctx, f.ClientSet, pod.ObjectMeta.Name, f.Namespace.Name, 10*time.Minute)
framework.ExpectNoError(err)
}(pod)
}

View File

@ -31,7 +31,6 @@ import (
v1 "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/fields"
"k8s.io/apimachinery/pkg/labels"
"k8s.io/apimachinery/pkg/types"
"k8s.io/apimachinery/pkg/util/uuid"
clientset "k8s.io/client-go/kubernetes"
@ -434,7 +433,7 @@ current-context: local-context
ginkgo.By("Delete the node problem detector")
framework.ExpectNoError(e2epod.NewPodClient(f).Delete(ctx, name, *metav1.NewDeleteOptions(0)))
ginkgo.By("Wait for the node problem detector to disappear")
gomega.Expect(e2epod.WaitForPodToDisappear(ctx, c, ns, name, labels.Everything(), pollInterval, pollTimeout)).To(gomega.Succeed())
gomega.Expect(e2epod.WaitForPodNotFoundInNamespace(ctx, c, name, ns, pollTimeout)).To(gomega.Succeed())
ginkgo.By("Delete the config map")
framework.ExpectNoError(c.CoreV1().ConfigMaps(ns).Delete(ctx, configName, metav1.DeleteOptions{}))
ginkgo.By("Clean up the events")

View File

@ -39,7 +39,6 @@ import (
v1 "k8s.io/api/core/v1"
apierrors "k8s.io/apimachinery/pkg/api/errors"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/labels"
"k8s.io/apimachinery/pkg/util/runtime"
"k8s.io/apimachinery/pkg/util/uuid"
"k8s.io/apimachinery/pkg/util/wait"
@ -384,8 +383,7 @@ func deletePodsSync(ctx context.Context, f *framework.Framework, pods []*v1.Pod)
framework.Failf("Unexpected error trying to delete pod %s: %v", pod.Name, err)
}
gomega.Expect(e2epod.WaitForPodToDisappear(ctx, f.ClientSet, f.Namespace.Name, pod.ObjectMeta.Name, labels.Everything(),
30*time.Second, 10*time.Minute)).NotTo(gomega.HaveOccurred())
framework.ExpectNoError(e2epod.WaitForPodNotFoundInNamespace(ctx, f.ClientSet, pod.ObjectMeta.Name, f.Namespace.Name, 10*time.Minute))
}()
}
wg.Wait()