mirror of
https://github.com/k3s-io/kubernetes.git
synced 2025-07-19 01:40:13 +00:00
e2e: consolidate pod response checking
This renames PodsResponding to WaitForPodsResponding for the sake of consistency and adds a timeout parameter. That is necessary because some other users of NewProxyResponseChecker used a much lower timeout (2min vs. 15min). Besides simplifying some code, it also makes it easier to rewrite ProxyResponseChecker because it only gets used in WaitForPodsResponding.
This commit is contained in:
parent
4491c80074
commit
3b579fca91
@ -27,7 +27,6 @@ import (
|
||||
apierrors "k8s.io/apimachinery/pkg/api/errors"
|
||||
"k8s.io/apimachinery/pkg/api/resource"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/labels"
|
||||
"k8s.io/apimachinery/pkg/runtime/schema"
|
||||
"k8s.io/apimachinery/pkg/types"
|
||||
utilrand "k8s.io/apimachinery/pkg/util/rand"
|
||||
@ -528,13 +527,7 @@ func TestReplicationControllerServeImageOrFail(ctx context.Context, f *framework
|
||||
|
||||
// Verify that something is listening.
|
||||
framework.Logf("Trying to dial the pod")
|
||||
retryTimeout := 2 * time.Minute
|
||||
retryInterval := 5 * time.Second
|
||||
label := labels.SelectorFromSet(labels.Set(map[string]string{"name": name}))
|
||||
err = wait.PollWithContext(ctx, retryInterval, retryTimeout, e2epod.NewProxyResponseChecker(f.ClientSet, f.Namespace.Name, label, name, true, pods).CheckAllResponses)
|
||||
if err != nil {
|
||||
framework.Failf("Did not get expected responses within the timeout period of %.2f seconds.", retryTimeout.Seconds())
|
||||
}
|
||||
framework.ExpectNoError(e2epod.WaitForPodsResponding(ctx, f.ClientSet, f.Namespace.Name, name, true, 2*time.Minute, pods))
|
||||
}
|
||||
|
||||
// 1. Create a quota restricting pods in the current namespace to 2.
|
||||
|
@ -225,13 +225,7 @@ func testReplicaSetServeImageOrFail(ctx context.Context, f *framework.Framework,
|
||||
|
||||
// Verify that something is listening.
|
||||
framework.Logf("Trying to dial the pod")
|
||||
retryTimeout := 2 * time.Minute
|
||||
retryInterval := 5 * time.Second
|
||||
label := labels.SelectorFromSet(labels.Set(map[string]string{"name": name}))
|
||||
err = wait.PollWithContext(ctx, retryInterval, retryTimeout, e2epod.NewProxyResponseChecker(f.ClientSet, f.Namespace.Name, label, name, true, pods).CheckAllResponses)
|
||||
if err != nil {
|
||||
framework.Failf("Did not get expected responses within the timeout period of %.2f seconds.", retryTimeout.Seconds())
|
||||
}
|
||||
framework.ExpectNoError(e2epod.WaitForPodsResponding(ctx, f.ClientSet, f.Namespace.Name, name, true, 2*time.Minute, pods))
|
||||
}
|
||||
|
||||
// 1. Create a quota restricting pods in the current namespace to 2.
|
||||
|
@ -204,10 +204,7 @@ func podRunningMaybeResponding(ctx context.Context, c clientset.Interface, ns, n
|
||||
return fmt.Errorf("failed to wait for pods running: %v", e)
|
||||
}
|
||||
if checkResponding {
|
||||
err = PodsResponding(ctx, c, ns, name, wantName, pods)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to wait for pods responding: %v", err)
|
||||
}
|
||||
return WaitForPodsResponding(ctx, c, ns, name, wantName, podRespondingTimeout, pods)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
@ -582,10 +582,14 @@ func WaitForPodNotFoundInNamespace(ctx context.Context, c clientset.Interface, p
|
||||
}
|
||||
|
||||
// PodsResponding waits for the pods to response.
|
||||
func PodsResponding(ctx context.Context, c clientset.Interface, ns, name string, wantName bool, pods *v1.PodList) error {
|
||||
func WaitForPodsResponding(ctx context.Context, c clientset.Interface, ns string, controllerName string, wantName bool, timeout time.Duration, pods *v1.PodList) error {
|
||||
if timeout == 0 {
|
||||
timeout = podRespondingTimeout
|
||||
}
|
||||
ginkgo.By("trying to dial each unique pod")
|
||||
label := labels.SelectorFromSet(labels.Set(map[string]string{"name": name}))
|
||||
err := wait.PollImmediateWithContext(ctx, framework.PollInterval(), podRespondingTimeout, NewProxyResponseChecker(c, ns, label, name, wantName, pods).CheckAllResponses)
|
||||
label := labels.SelectorFromSet(labels.Set(map[string]string{"name": controllerName}))
|
||||
|
||||
err := wait.PollImmediateWithContext(ctx, framework.PollInterval(), timeout, NewProxyResponseChecker(c, ns, label, controllerName, wantName, pods).CheckAllResponses)
|
||||
return maybeTimeoutError(err, "waiting for pods to be responsive")
|
||||
}
|
||||
|
||||
|
@ -116,7 +116,7 @@ var _ = common.SIGDescribe("ClusterDns [Feature:Example]", func() {
|
||||
options := metav1.ListOptions{LabelSelector: label.String()}
|
||||
pods, err := c.CoreV1().Pods(ns.Name).List(ctx, options)
|
||||
framework.ExpectNoError(err, "failed to list pods in namespace: %s", ns.Name)
|
||||
err = e2epod.PodsResponding(ctx, c, ns.Name, backendName, false, pods)
|
||||
err = e2epod.WaitForPodsResponding(ctx, c, ns.Name, backendName, false, 0, pods)
|
||||
framework.ExpectNoError(err, "waiting for all pods to respond")
|
||||
framework.Logf("found %d backend pods responding in namespace %s", len(pods.Items), ns.Name)
|
||||
|
||||
|
Loading…
Reference in New Issue
Block a user