From 4d63e7d4d6d63109a3555c1011fe3b6c89bd2be6 Mon Sep 17 00:00:00 2001 From: Patrick Ohly Date: Thu, 19 Jan 2023 17:38:03 +0100 Subject: [PATCH] e2e: remove unused label filter from WaitForPodsRunningReady None of the users of the functions passed anything other than nil or an empty map and the implementation ignore the parameter - it seems like a candidate for simplification. --- test/e2e/cloud/gcp/node_lease.go | 2 +- test/e2e/cloud/gcp/resize_nodes.go | 2 +- test/e2e/common/node/container_probe.go | 4 ++-- test/e2e/common/node/pods.go | 2 +- test/e2e/e2e.go | 2 +- test/e2e/framework/pod/wait.go | 8 +------- test/e2e/scheduling/priorities.go | 2 +- test/e2e/windows/host_process.go | 4 ++-- test/e2e/windows/kubelet_stats.go | 4 ++-- 9 files changed, 12 insertions(+), 18 deletions(-) diff --git a/test/e2e/cloud/gcp/node_lease.go b/test/e2e/cloud/gcp/node_lease.go index cef9a0f86be..e3e875a68db 100644 --- a/test/e2e/cloud/gcp/node_lease.go +++ b/test/e2e/cloud/gcp/node_lease.go @@ -97,7 +97,7 @@ var _ = SIGDescribe("[Disruptive]NodeLease", func() { // Many e2e tests assume that the cluster is fully healthy before they start. Wait until // the cluster is restored to health. ginkgo.By("waiting for system pods to successfully restart") - err := e2epod.WaitForPodsRunningReady(ctx, c, metav1.NamespaceSystem, systemPodsNo, 0, framework.PodReadyBeforeTimeout, map[string]string{}) + err := e2epod.WaitForPodsRunningReady(ctx, c, metav1.NamespaceSystem, systemPodsNo, 0, framework.PodReadyBeforeTimeout) framework.ExpectNoError(err) }) diff --git a/test/e2e/cloud/gcp/resize_nodes.go b/test/e2e/cloud/gcp/resize_nodes.go index 60a02fbb41e..034d1876919 100644 --- a/test/e2e/cloud/gcp/resize_nodes.go +++ b/test/e2e/cloud/gcp/resize_nodes.go @@ -99,7 +99,7 @@ var _ = SIGDescribe("Nodes [Disruptive]", func() { // Many e2e tests assume that the cluster is fully healthy before they start. Wait until // the cluster is restored to health. ginkgo.By("waiting for system pods to successfully restart") - err := e2epod.WaitForPodsRunningReady(ctx, c, metav1.NamespaceSystem, systemPodsNo, 0, framework.PodReadyBeforeTimeout, map[string]string{}) + err := e2epod.WaitForPodsRunningReady(ctx, c, metav1.NamespaceSystem, systemPodsNo, 0, framework.PodReadyBeforeTimeout) framework.ExpectNoError(err) }) }) diff --git a/test/e2e/common/node/container_probe.go b/test/e2e/common/node/container_probe.go index 5f4e90d3e71..6a0f687ff07 100644 --- a/test/e2e/common/node/container_probe.go +++ b/test/e2e/common/node/container_probe.go @@ -608,7 +608,7 @@ done }) // verify pods are running and ready - err := e2epod.WaitForPodsRunningReady(ctx, f.ClientSet, f.Namespace.Name, 1, 0, f.Timeouts.PodStart, map[string]string{}) + err := e2epod.WaitForPodsRunningReady(ctx, f.ClientSet, f.Namespace.Name, 1, 0, f.Timeouts.PodStart) framework.ExpectNoError(err) // Shutdown pod. Readiness should change to false @@ -690,7 +690,7 @@ done }) // verify pods are running and ready - err := e2epod.WaitForPodsRunningReady(ctx, f.ClientSet, f.Namespace.Name, 1, 0, f.Timeouts.PodStart, map[string]string{}) + err := e2epod.WaitForPodsRunningReady(ctx, f.ClientSet, f.Namespace.Name, 1, 0, f.Timeouts.PodStart) framework.ExpectNoError(err) // Shutdown pod. Readiness should change to false diff --git a/test/e2e/common/node/pods.go b/test/e2e/common/node/pods.go index 698e032d003..f8ac287da5c 100644 --- a/test/e2e/common/node/pods.go +++ b/test/e2e/common/node/pods.go @@ -873,7 +873,7 @@ var _ = SIGDescribe("Pods", func() { // wait as required for all 3 pods to be running ginkgo.By("waiting for all 3 pods to be running") - err := e2epod.WaitForPodsRunningReady(ctx, f.ClientSet, f.Namespace.Name, 3, 0, f.Timeouts.PodStart, nil) + err := e2epod.WaitForPodsRunningReady(ctx, f.ClientSet, f.Namespace.Name, 3, 0, f.Timeouts.PodStart) framework.ExpectNoError(err, "3 pods not found running.") // delete Collection of pods with a label in the current namespace diff --git a/test/e2e/e2e.go b/test/e2e/e2e.go index 0d674e83b74..5ad7daebb21 100644 --- a/test/e2e/e2e.go +++ b/test/e2e/e2e.go @@ -245,7 +245,7 @@ func setupSuite(ctx context.Context) { // #41007. To avoid those pods preventing the whole test runs (and just // wasting the whole run), we allow for some not-ready pods (with the // number equal to the number of allowed not-ready nodes). - if err := e2epod.WaitForPodsRunningReady(ctx, c, metav1.NamespaceSystem, int32(framework.TestContext.MinStartupPods), int32(framework.TestContext.AllowedNotReadyNodes), timeouts.SystemPodsStartup, map[string]string{}); err != nil { + if err := e2epod.WaitForPodsRunningReady(ctx, c, metav1.NamespaceSystem, int32(framework.TestContext.MinStartupPods), int32(framework.TestContext.AllowedNotReadyNodes), timeouts.SystemPodsStartup); err != nil { e2edebug.DumpAllNamespaceInfo(ctx, c, metav1.NamespaceSystem) e2ekubectl.LogFailedContainers(ctx, c, metav1.NamespaceSystem, framework.Logf) framework.Failf("Error waiting for all pods to be running and ready: %v", err) diff --git a/test/e2e/framework/pod/wait.go b/test/e2e/framework/pod/wait.go index 7efc5b5d7b2..3cc1ca4715e 100644 --- a/test/e2e/framework/pod/wait.go +++ b/test/e2e/framework/pod/wait.go @@ -208,16 +208,13 @@ func BeInPhase(phase v1.PodPhase) types.GomegaMatcher { // example, in cluster startup, because the number of pods increases while // waiting. All pods that are in SUCCESS state are not counted. // -// If ignoreLabels is not empty, pods matching this selector are ignored. -// // If minPods or allowedNotReadyPods are -1, this method returns immediately // without waiting. -func WaitForPodsRunningReady(ctx context.Context, c clientset.Interface, ns string, minPods, allowedNotReadyPods int32, timeout time.Duration, ignoreLabels map[string]string) error { +func WaitForPodsRunningReady(ctx context.Context, c clientset.Interface, ns string, minPods, allowedNotReadyPods int32, timeout time.Duration) error { if minPods == -1 || allowedNotReadyPods == -1 { return nil } - ignoreSelector := labels.SelectorFromSet(map[string]string{}) start := time.Now() framework.Logf("Waiting up to %v for all pods (need at least %d) in namespace '%s' to be running and ready", timeout, minPods, ns) @@ -266,9 +263,6 @@ func WaitForPodsRunningReady(ctx context.Context, c clientset.Interface, ns stri badPods = []v1.Pod{} desiredPods = len(podList.Items) for _, pod := range podList.Items { - if len(ignoreLabels) != 0 && ignoreSelector.Matches(labels.Set(pod.Labels)) { - continue - } res, err := testutils.PodRunningReady(&pod) switch { case res && err == nil: diff --git a/test/e2e/scheduling/priorities.go b/test/e2e/scheduling/priorities.go index e0eafc70486..c5de0cef53c 100644 --- a/test/e2e/scheduling/priorities.go +++ b/test/e2e/scheduling/priorities.go @@ -108,7 +108,7 @@ var _ = SIGDescribe("SchedulerPriorities [Serial]", func() { err = framework.CheckTestingNSDeletedExcept(ctx, cs, ns) framework.ExpectNoError(err) - err = e2epod.WaitForPodsRunningReady(ctx, cs, metav1.NamespaceSystem, int32(systemPodsNo), 0, framework.PodReadyBeforeTimeout, map[string]string{}) + err = e2epod.WaitForPodsRunningReady(ctx, cs, metav1.NamespaceSystem, int32(systemPodsNo), 0, framework.PodReadyBeforeTimeout) framework.ExpectNoError(err) // skip if the most utilized node has less than the cri-o minMemLimit available diff --git a/test/e2e/windows/host_process.go b/test/e2e/windows/host_process.go index dfde454460c..33fbb7224df 100644 --- a/test/e2e/windows/host_process.go +++ b/test/e2e/windows/host_process.go @@ -655,7 +655,7 @@ var _ = SIGDescribe("[Feature:WindowsHostProcessContainers] [MinimumKubeletVersi ginkgo.By("Waiting for the pod to start running") timeout := 3 * time.Minute - e2epod.WaitForPodsRunningReady(ctx, f.ClientSet, f.Namespace.Name, 1, 0, timeout, make(map[string]string)) + e2epod.WaitForPodsRunningReady(ctx, f.ClientSet, f.Namespace.Name, 1, 0, timeout) ginkgo.By("Getting container stats for pod") nodeStats, err := e2ekubelet.GetStatsSummary(ctx, f.ClientSet, targetNode.Name) @@ -751,7 +751,7 @@ var _ = SIGDescribe("[Feature:WindowsHostProcessContainers] [MinimumKubeletVersi pc.Create(ctx, pod) ginkgo.By("Waiting for pod to run") - e2epod.WaitForPodsRunningReady(ctx, f.ClientSet, f.Namespace.Name, 1, 0, 3*time.Minute, make(map[string]string)) + e2epod.WaitForPodsRunningReady(ctx, f.ClientSet, f.Namespace.Name, 1, 0, 3*time.Minute) ginkgo.By("Waiting for 60 seconds") // We wait an additional 60 seconds after the pod is Running because the diff --git a/test/e2e/windows/kubelet_stats.go b/test/e2e/windows/kubelet_stats.go index f9c90940e60..7b1c766fd1a 100644 --- a/test/e2e/windows/kubelet_stats.go +++ b/test/e2e/windows/kubelet_stats.go @@ -58,7 +58,7 @@ var _ = SIGDescribe("[Feature:Windows] Kubelet-Stats [Serial]", func() { ginkgo.By("Waiting up to 3 minutes for pods to be running") timeout := 3 * time.Minute - err = e2epod.WaitForPodsRunningReady(ctx, f.ClientSet, f.Namespace.Name, 10, 0, timeout, make(map[string]string)) + err = e2epod.WaitForPodsRunningReady(ctx, f.ClientSet, f.Namespace.Name, 10, 0, timeout) framework.ExpectNoError(err) ginkgo.By("Getting kubelet stats 5 times and checking average duration") @@ -149,7 +149,7 @@ var _ = SIGDescribe("[Feature:Windows] Kubelet-Stats", func() { ginkgo.By("Waiting up to 3 minutes for pods to be running") timeout := 3 * time.Minute - err = e2epod.WaitForPodsRunningReady(ctx, f.ClientSet, f.Namespace.Name, 3, 0, timeout, make(map[string]string)) + err = e2epod.WaitForPodsRunningReady(ctx, f.ClientSet, f.Namespace.Name, 3, 0, timeout) framework.ExpectNoError(err) ginkgo.By("Getting kubelet stats 1 time")