mirror of
https://github.com/k3s-io/kubernetes.git
synced 2025-07-30 23:15:14 +00:00
Move function specific to container restart test inline
Signed-off-by: Laura Lorenz <lauralorenz@google.com>
This commit is contained in:
parent
529d5ba9d3
commit
59f9858086
@ -37,7 +37,6 @@ import (
|
||||
apitypes "k8s.io/apimachinery/pkg/types"
|
||||
clientset "k8s.io/client-go/kubernetes"
|
||||
"k8s.io/kubectl/pkg/util/podutils"
|
||||
podv1util "k8s.io/kubernetes/pkg/api/v1/pod"
|
||||
"k8s.io/kubernetes/test/e2e/framework"
|
||||
testutils "k8s.io/kubernetes/test/utils"
|
||||
"k8s.io/kubernetes/test/utils/format"
|
||||
@ -869,15 +868,3 @@ func WaitForContainerTerminated(ctx context.Context, c clientset.Interface, name
|
||||
return false, nil
|
||||
})
|
||||
}
|
||||
|
||||
// WaitForContainerRestartedNTimes waits for the given normal container in the Pod to have restarted N times
|
||||
func WaitForContainerRestartedNTimes(ctx context.Context, c clientset.Interface, namespace string, podName string, containerName string, timeout time.Duration, target int) error {
|
||||
conditionDesc := fmt.Sprintf("A container in pod %s restarted at least %d times", podName, target)
|
||||
return WaitForPodCondition(ctx, c, namespace, podName, conditionDesc, timeout, func(pod *v1.Pod) (bool, error) {
|
||||
cs, found := podv1util.GetContainerStatus(pod.Status.ContainerStatuses, containerName)
|
||||
if !found {
|
||||
return false, fmt.Errorf("could not find container %s in pod %s", containerName, podName)
|
||||
}
|
||||
return cs.RestartCount >= int32(target), nil
|
||||
})
|
||||
}
|
||||
|
@ -21,8 +21,10 @@ package e2enode
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"time"
|
||||
|
||||
podv1util "k8s.io/kubernetes/pkg/api/v1/pod"
|
||||
imageutils "k8s.io/kubernetes/test/utils/image"
|
||||
|
||||
"github.com/onsi/ginkgo/v2"
|
||||
@ -97,7 +99,7 @@ func doTest(ctx context.Context, f *framework.Framework, targetRestarts int, con
|
||||
|
||||
// Hard wait 30 seconds for targetRestarts in the best case; longer timeout later will handle if infra was slow.
|
||||
time.Sleep(30 * time.Second)
|
||||
podErr = e2epod.WaitForContainerRestartedNTimes(ctx, f.ClientSet, f.Namespace.Name, pod.Name, containerName, 5*time.Minute, targetRestarts)
|
||||
podErr = waitForContainerRestartedNTimes(ctx, f, f.Namespace.Name, pod.Name, containerName, 5*time.Minute, targetRestarts)
|
||||
gomega.Expect(podErr).ShouldNot(gomega.HaveOccurred(), "Expected container to repeatedly back off container failures")
|
||||
|
||||
r, err := extractObservedBackoff(ctx, f, pod.Name, containerName)
|
||||
@ -140,3 +142,14 @@ func newFailAlwaysPod() *v1.Pod {
|
||||
}
|
||||
return pod
|
||||
}
|
||||
|
||||
func waitForContainerRestartedNTimes(ctx context.Context, f *framework.Framework, namespace string, podName string, containerName string, timeout time.Duration, target int) error {
|
||||
conditionDesc := fmt.Sprintf("A container in pod %s restarted at least %d times", podName, target)
|
||||
return e2epod.WaitForPodCondition(ctx, f.ClientSet, namespace, podName, conditionDesc, timeout, func(pod *v1.Pod) (bool, error) {
|
||||
cs, found := podv1util.GetContainerStatus(pod.Status.ContainerStatuses, containerName)
|
||||
if !found {
|
||||
return false, fmt.Errorf("could not find container %s in pod %s", containerName, podName)
|
||||
}
|
||||
return cs.RestartCount >= int32(target), nil
|
||||
})
|
||||
}
|
||||
|
Loading…
Reference in New Issue
Block a user