mirror of
https://github.com/k3s-io/kubernetes.git
synced 2025-07-19 09:52:49 +00:00
e2e: remove unused label filter from WaitForPodsRunningReady
None of the users of the functions passed anything other than nil or an empty map and the implementation ignore the parameter - it seems like a candidate for simplification.
This commit is contained in:
parent
1b5da1035a
commit
3ebab68c8a
@ -97,7 +97,7 @@ var _ = SIGDescribe("[Disruptive]NodeLease", func() {
|
||||
// Many e2e tests assume that the cluster is fully healthy before they start. Wait until
|
||||
// the cluster is restored to health.
|
||||
ginkgo.By("waiting for system pods to successfully restart")
|
||||
err := e2epod.WaitForPodsRunningReady(ctx, c, metav1.NamespaceSystem, systemPodsNo, 0, framework.PodReadyBeforeTimeout, map[string]string{})
|
||||
err := e2epod.WaitForPodsRunningReady(ctx, c, metav1.NamespaceSystem, systemPodsNo, 0, framework.PodReadyBeforeTimeout)
|
||||
framework.ExpectNoError(err)
|
||||
})
|
||||
|
||||
|
@ -99,7 +99,7 @@ var _ = SIGDescribe("Nodes [Disruptive]", func() {
|
||||
// Many e2e tests assume that the cluster is fully healthy before they start. Wait until
|
||||
// the cluster is restored to health.
|
||||
ginkgo.By("waiting for system pods to successfully restart")
|
||||
err := e2epod.WaitForPodsRunningReady(ctx, c, metav1.NamespaceSystem, systemPodsNo, 0, framework.PodReadyBeforeTimeout, map[string]string{})
|
||||
err := e2epod.WaitForPodsRunningReady(ctx, c, metav1.NamespaceSystem, systemPodsNo, 0, framework.PodReadyBeforeTimeout)
|
||||
framework.ExpectNoError(err)
|
||||
})
|
||||
})
|
||||
|
@ -608,7 +608,7 @@ done
|
||||
})
|
||||
|
||||
// verify pods are running and ready
|
||||
err := e2epod.WaitForPodsRunningReady(ctx, f.ClientSet, f.Namespace.Name, 1, 0, f.Timeouts.PodStart, map[string]string{})
|
||||
err := e2epod.WaitForPodsRunningReady(ctx, f.ClientSet, f.Namespace.Name, 1, 0, f.Timeouts.PodStart)
|
||||
framework.ExpectNoError(err)
|
||||
|
||||
// Shutdown pod. Readiness should change to false
|
||||
@ -690,7 +690,7 @@ done
|
||||
})
|
||||
|
||||
// verify pods are running and ready
|
||||
err := e2epod.WaitForPodsRunningReady(ctx, f.ClientSet, f.Namespace.Name, 1, 0, f.Timeouts.PodStart, map[string]string{})
|
||||
err := e2epod.WaitForPodsRunningReady(ctx, f.ClientSet, f.Namespace.Name, 1, 0, f.Timeouts.PodStart)
|
||||
framework.ExpectNoError(err)
|
||||
|
||||
// Shutdown pod. Readiness should change to false
|
||||
|
@ -873,7 +873,7 @@ var _ = SIGDescribe("Pods", func() {
|
||||
|
||||
// wait as required for all 3 pods to be running
|
||||
ginkgo.By("waiting for all 3 pods to be running")
|
||||
err := e2epod.WaitForPodsRunningReady(ctx, f.ClientSet, f.Namespace.Name, 3, 0, f.Timeouts.PodStart, nil)
|
||||
err := e2epod.WaitForPodsRunningReady(ctx, f.ClientSet, f.Namespace.Name, 3, 0, f.Timeouts.PodStart)
|
||||
framework.ExpectNoError(err, "3 pods not found running.")
|
||||
|
||||
// delete Collection of pods with a label in the current namespace
|
||||
|
@ -245,7 +245,7 @@ func setupSuite(ctx context.Context) {
|
||||
// #41007. To avoid those pods preventing the whole test runs (and just
|
||||
// wasting the whole run), we allow for some not-ready pods (with the
|
||||
// number equal to the number of allowed not-ready nodes).
|
||||
if err := e2epod.WaitForPodsRunningReady(ctx, c, metav1.NamespaceSystem, int32(framework.TestContext.MinStartupPods), int32(framework.TestContext.AllowedNotReadyNodes), timeouts.SystemPodsStartup, map[string]string{}); err != nil {
|
||||
if err := e2epod.WaitForPodsRunningReady(ctx, c, metav1.NamespaceSystem, int32(framework.TestContext.MinStartupPods), int32(framework.TestContext.AllowedNotReadyNodes), timeouts.SystemPodsStartup); err != nil {
|
||||
e2edebug.DumpAllNamespaceInfo(ctx, c, metav1.NamespaceSystem)
|
||||
e2ekubectl.LogFailedContainers(ctx, c, metav1.NamespaceSystem, framework.Logf)
|
||||
framework.Failf("Error waiting for all pods to be running and ready: %v", err)
|
||||
|
@ -208,16 +208,13 @@ func BeInPhase(phase v1.PodPhase) types.GomegaMatcher {
|
||||
// example, in cluster startup, because the number of pods increases while
|
||||
// waiting. All pods that are in SUCCESS state are not counted.
|
||||
//
|
||||
// If ignoreLabels is not empty, pods matching this selector are ignored.
|
||||
//
|
||||
// If minPods or allowedNotReadyPods are -1, this method returns immediately
|
||||
// without waiting.
|
||||
func WaitForPodsRunningReady(ctx context.Context, c clientset.Interface, ns string, minPods, allowedNotReadyPods int32, timeout time.Duration, ignoreLabels map[string]string) error {
|
||||
func WaitForPodsRunningReady(ctx context.Context, c clientset.Interface, ns string, minPods, allowedNotReadyPods int32, timeout time.Duration) error {
|
||||
if minPods == -1 || allowedNotReadyPods == -1 {
|
||||
return nil
|
||||
}
|
||||
|
||||
ignoreSelector := labels.SelectorFromSet(map[string]string{})
|
||||
start := time.Now()
|
||||
framework.Logf("Waiting up to %v for all pods (need at least %d) in namespace '%s' to be running and ready",
|
||||
timeout, minPods, ns)
|
||||
@ -266,9 +263,6 @@ func WaitForPodsRunningReady(ctx context.Context, c clientset.Interface, ns stri
|
||||
badPods = []v1.Pod{}
|
||||
desiredPods = len(podList.Items)
|
||||
for _, pod := range podList.Items {
|
||||
if len(ignoreLabels) != 0 && ignoreSelector.Matches(labels.Set(pod.Labels)) {
|
||||
continue
|
||||
}
|
||||
res, err := testutils.PodRunningReady(&pod)
|
||||
switch {
|
||||
case res && err == nil:
|
||||
|
@ -108,7 +108,7 @@ var _ = SIGDescribe("SchedulerPriorities [Serial]", func() {
|
||||
|
||||
err = framework.CheckTestingNSDeletedExcept(ctx, cs, ns)
|
||||
framework.ExpectNoError(err)
|
||||
err = e2epod.WaitForPodsRunningReady(ctx, cs, metav1.NamespaceSystem, int32(systemPodsNo), 0, framework.PodReadyBeforeTimeout, map[string]string{})
|
||||
err = e2epod.WaitForPodsRunningReady(ctx, cs, metav1.NamespaceSystem, int32(systemPodsNo), 0, framework.PodReadyBeforeTimeout)
|
||||
framework.ExpectNoError(err)
|
||||
|
||||
// skip if the most utilized node has less than the cri-o minMemLimit available
|
||||
|
@ -655,7 +655,7 @@ var _ = SIGDescribe("[Feature:WindowsHostProcessContainers] [MinimumKubeletVersi
|
||||
|
||||
ginkgo.By("Waiting for the pod to start running")
|
||||
timeout := 3 * time.Minute
|
||||
e2epod.WaitForPodsRunningReady(ctx, f.ClientSet, f.Namespace.Name, 1, 0, timeout, make(map[string]string))
|
||||
e2epod.WaitForPodsRunningReady(ctx, f.ClientSet, f.Namespace.Name, 1, 0, timeout)
|
||||
|
||||
ginkgo.By("Getting container stats for pod")
|
||||
nodeStats, err := e2ekubelet.GetStatsSummary(ctx, f.ClientSet, targetNode.Name)
|
||||
@ -751,7 +751,7 @@ var _ = SIGDescribe("[Feature:WindowsHostProcessContainers] [MinimumKubeletVersi
|
||||
pc.Create(ctx, pod)
|
||||
|
||||
ginkgo.By("Waiting for pod to run")
|
||||
e2epod.WaitForPodsRunningReady(ctx, f.ClientSet, f.Namespace.Name, 1, 0, 3*time.Minute, make(map[string]string))
|
||||
e2epod.WaitForPodsRunningReady(ctx, f.ClientSet, f.Namespace.Name, 1, 0, 3*time.Minute)
|
||||
|
||||
ginkgo.By("Waiting for 60 seconds")
|
||||
// We wait an additional 60 seconds after the pod is Running because the
|
||||
|
@ -58,7 +58,7 @@ var _ = SIGDescribe("[Feature:Windows] Kubelet-Stats [Serial]", func() {
|
||||
|
||||
ginkgo.By("Waiting up to 3 minutes for pods to be running")
|
||||
timeout := 3 * time.Minute
|
||||
err = e2epod.WaitForPodsRunningReady(ctx, f.ClientSet, f.Namespace.Name, 10, 0, timeout, make(map[string]string))
|
||||
err = e2epod.WaitForPodsRunningReady(ctx, f.ClientSet, f.Namespace.Name, 10, 0, timeout)
|
||||
framework.ExpectNoError(err)
|
||||
|
||||
ginkgo.By("Getting kubelet stats 5 times and checking average duration")
|
||||
@ -149,7 +149,7 @@ var _ = SIGDescribe("[Feature:Windows] Kubelet-Stats", func() {
|
||||
|
||||
ginkgo.By("Waiting up to 3 minutes for pods to be running")
|
||||
timeout := 3 * time.Minute
|
||||
err = e2epod.WaitForPodsRunningReady(ctx, f.ClientSet, f.Namespace.Name, 3, 0, timeout, make(map[string]string))
|
||||
err = e2epod.WaitForPodsRunningReady(ctx, f.ClientSet, f.Namespace.Name, 3, 0, timeout)
|
||||
framework.ExpectNoError(err)
|
||||
|
||||
ginkgo.By("Getting kubelet stats 1 time")
|
||||
|
Loading…
Reference in New Issue
Block a user