mirror of
https://github.com/k3s-io/kubernetes.git
synced 2026-01-06 07:57:35 +00:00
Revert "e2e: wait for pods with gomega"
This commit is contained in:
@@ -18,6 +18,7 @@ package pod
|
||||
|
||||
import (
|
||||
"context"
|
||||
"errors"
|
||||
"fmt"
|
||||
"os"
|
||||
"path/filepath"
|
||||
@@ -30,6 +31,7 @@ import (
|
||||
v1 "k8s.io/api/core/v1"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/labels"
|
||||
"k8s.io/apimachinery/pkg/types"
|
||||
clientset "k8s.io/client-go/kubernetes"
|
||||
"k8s.io/klog/v2"
|
||||
|
||||
@@ -38,6 +40,14 @@ import (
|
||||
imageutils "k8s.io/kubernetes/test/utils/image"
|
||||
)
|
||||
|
||||
// errPodCompleted is returned by PodRunning or PodContainerRunning to indicate that
|
||||
// the pod has already reached completed state.
|
||||
var errPodCompleted = FinalError(errors.New("pod ran to completion successfully"))
|
||||
|
||||
// errPodFailed is returned by PodRunning or PodContainerRunning to indicate that
|
||||
// the pod has already reached a permanent failue state.
|
||||
var errPodFailed = FinalError(errors.New("pod failed permanently"))
|
||||
|
||||
// LabelLogOnPodFailure can be used to mark which Pods will have their logs logged in the case of
|
||||
// a test failure. By default, if there are no Pods with this label, only the first 5 Pods will
|
||||
// have their logs fetched.
|
||||
@@ -59,6 +69,95 @@ func expectNoErrorWithOffset(offset int, err error, explain ...interface{}) {
|
||||
gomega.ExpectWithOffset(1+offset, err).NotTo(gomega.HaveOccurred(), explain...)
|
||||
}
|
||||
|
||||
func isElementOf(podUID types.UID, pods *v1.PodList) bool {
|
||||
for _, pod := range pods.Items {
|
||||
if pod.UID == podUID {
|
||||
return true
|
||||
}
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
// ProxyResponseChecker is a context for checking pods responses by issuing GETs to them (via the API
|
||||
// proxy) and verifying that they answer with their own pod name.
|
||||
type ProxyResponseChecker struct {
|
||||
c clientset.Interface
|
||||
ns string
|
||||
label labels.Selector
|
||||
controllerName string
|
||||
respondName bool // Whether the pod should respond with its own name.
|
||||
pods *v1.PodList
|
||||
}
|
||||
|
||||
// NewProxyResponseChecker returns a context for checking pods responses.
|
||||
func NewProxyResponseChecker(c clientset.Interface, ns string, label labels.Selector, controllerName string, respondName bool, pods *v1.PodList) ProxyResponseChecker {
|
||||
return ProxyResponseChecker{c, ns, label, controllerName, respondName, pods}
|
||||
}
|
||||
|
||||
// CheckAllResponses issues GETs to all pods in the context and verify they
|
||||
// reply with their own pod name.
|
||||
func (r ProxyResponseChecker) CheckAllResponses(ctx context.Context) (done bool, err error) {
|
||||
successes := 0
|
||||
options := metav1.ListOptions{LabelSelector: r.label.String()}
|
||||
currentPods, err := r.c.CoreV1().Pods(r.ns).List(ctx, options)
|
||||
expectNoError(err, "Failed to get list of currentPods in namespace: %s", r.ns)
|
||||
for i, pod := range r.pods.Items {
|
||||
// Check that the replica list remains unchanged, otherwise we have problems.
|
||||
if !isElementOf(pod.UID, currentPods) {
|
||||
return false, fmt.Errorf("pod with UID %s is no longer a member of the replica set. Must have been restarted for some reason. Current replica set: %v", pod.UID, currentPods)
|
||||
}
|
||||
|
||||
ctxUntil, cancel := context.WithTimeout(ctx, singleCallTimeout)
|
||||
defer cancel()
|
||||
|
||||
body, err := r.c.CoreV1().RESTClient().Get().
|
||||
Namespace(r.ns).
|
||||
Resource("pods").
|
||||
SubResource("proxy").
|
||||
Name(string(pod.Name)).
|
||||
Do(ctxUntil).
|
||||
Raw()
|
||||
|
||||
if err != nil {
|
||||
if ctxUntil.Err() != nil {
|
||||
// We may encounter errors here because of a race between the pod readiness and apiserver
|
||||
// proxy. So, we log the error and retry if this occurs.
|
||||
framework.Logf("Controller %s: Failed to Get from replica %d [%s]: %v\n pod status: %#v", r.controllerName, i+1, pod.Name, err, pod.Status)
|
||||
return false, nil
|
||||
}
|
||||
framework.Logf("Controller %s: Failed to GET from replica %d [%s]: %v\npod status: %#v", r.controllerName, i+1, pod.Name, err, pod.Status)
|
||||
continue
|
||||
}
|
||||
// The response checker expects the pod's name unless !respondName, in
|
||||
// which case it just checks for a non-empty response.
|
||||
got := string(body)
|
||||
what := ""
|
||||
if r.respondName {
|
||||
what = "expected"
|
||||
want := pod.Name
|
||||
if got != want {
|
||||
framework.Logf("Controller %s: Replica %d [%s] expected response %q but got %q",
|
||||
r.controllerName, i+1, pod.Name, want, got)
|
||||
continue
|
||||
}
|
||||
} else {
|
||||
what = "non-empty"
|
||||
if len(got) == 0 {
|
||||
framework.Logf("Controller %s: Replica %d [%s] expected non-empty response",
|
||||
r.controllerName, i+1, pod.Name)
|
||||
continue
|
||||
}
|
||||
}
|
||||
successes++
|
||||
framework.Logf("Controller %s: Got %s result from replica %d [%s]: %q, %d of %d required successes so far",
|
||||
r.controllerName, what, i+1, pod.Name, got, successes, len(r.pods.Items))
|
||||
}
|
||||
if successes < len(r.pods.Items) {
|
||||
return false, nil
|
||||
}
|
||||
return true, nil
|
||||
}
|
||||
|
||||
// PodsCreated returns a pod list matched by the given name.
|
||||
func PodsCreated(ctx context.Context, c clientset.Interface, ns, name string, replicas int32) (*v1.PodList, error) {
|
||||
label := labels.SelectorFromSet(labels.Set(map[string]string{"name": name}))
|
||||
@@ -114,7 +213,10 @@ func podRunningMaybeResponding(ctx context.Context, c clientset.Interface, ns, n
|
||||
return fmt.Errorf("failed to wait for pods running: %v", e)
|
||||
}
|
||||
if checkResponding {
|
||||
return WaitForPodsResponding(ctx, c, ns, name, wantName, podRespondingTimeout, pods)
|
||||
err = PodsResponding(ctx, c, ns, name, wantName, pods)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to wait for pods responding: %v", err)
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
@@ -533,7 +635,7 @@ func VerifyPodHasConditionWithType(ctx context.Context, f *framework.Framework,
|
||||
func getNodeTTLAnnotationValue(ctx context.Context, c clientset.Interface) (time.Duration, error) {
|
||||
nodes, err := c.CoreV1().Nodes().List(ctx, metav1.ListOptions{})
|
||||
if err != nil || len(nodes.Items) == 0 {
|
||||
return time.Duration(0), fmt.Errorf("Couldn't list any nodes to get TTL annotation: %w", err)
|
||||
return time.Duration(0), fmt.Errorf("Couldn't list any nodes to get TTL annotation: %v", err)
|
||||
}
|
||||
// Since TTL the kubelet is using is stored in node object, for the timeout
|
||||
// purpose we take it from the first node (all of them should be the same).
|
||||
@@ -572,3 +674,15 @@ func IsPodActive(p *v1.Pod) bool {
|
||||
v1.PodFailed != p.Status.Phase &&
|
||||
p.DeletionTimestamp == nil
|
||||
}
|
||||
|
||||
func podIdentifier(namespace, name string) string {
|
||||
return fmt.Sprintf("%s/%s", namespace, name)
|
||||
}
|
||||
|
||||
func identifier(pod *v1.Pod) string {
|
||||
id := podIdentifier(pod.Namespace, pod.Name)
|
||||
if pod.UID != "" {
|
||||
id += fmt.Sprintf("(%s)", pod.UID)
|
||||
}
|
||||
return id
|
||||
}
|
||||
|
||||
Reference in New Issue
Block a user