mirror of
https://github.com/k3s-io/kubernetes.git
synced 2025-07-24 04:06:03 +00:00
Use a better util
Signed-off-by: Laura Lorenz <lauralorenz@google.com>
This commit is contained in:
parent
285d433dea
commit
8e7b2af712
@ -191,10 +191,7 @@ func podReadyTime(pod *corev1.Pod) *metav1.Time {
|
||||
return &metav1.Time{}
|
||||
}
|
||||
|
||||
// MaxContainerRestarts iterates through all the normal containers and sidecar
|
||||
// containers in a Pod object and reports the highest restart count observed per
|
||||
// category.
|
||||
func MaxContainerRestarts(pod *corev1.Pod) (regularRestarts, sidecarRestarts int) {
|
||||
func maxContainerRestarts(pod *corev1.Pod) (regularRestarts, sidecarRestarts int) {
|
||||
for _, c := range pod.Status.ContainerStatuses {
|
||||
regularRestarts = max(regularRestarts, int(c.RestartCount))
|
||||
}
|
||||
@ -217,8 +214,8 @@ func MaxContainerRestarts(pod *corev1.Pod) (regularRestarts, sidecarRestarts int
|
||||
// false: pj has a higher container restart count.
|
||||
// nil: Both have the same container restart count.
|
||||
func compareMaxContainerRestarts(pi *corev1.Pod, pj *corev1.Pod) *bool {
|
||||
regularRestartsI, sidecarRestartsI := MaxContainerRestarts(pi)
|
||||
regularRestartsJ, sidecarRestartsJ := MaxContainerRestarts(pj)
|
||||
regularRestartsI, sidecarRestartsI := maxContainerRestarts(pi)
|
||||
regularRestartsJ, sidecarRestartsJ := maxContainerRestarts(pj)
|
||||
if regularRestartsI != regularRestartsJ {
|
||||
res := regularRestartsI > regularRestartsJ
|
||||
return &res
|
||||
|
@ -37,6 +37,7 @@ import (
|
||||
apitypes "k8s.io/apimachinery/pkg/types"
|
||||
clientset "k8s.io/client-go/kubernetes"
|
||||
"k8s.io/kubectl/pkg/util/podutils"
|
||||
podv1util "k8s.io/kubernetes/pkg/api/v1/pod"
|
||||
"k8s.io/kubernetes/test/e2e/framework"
|
||||
testutils "k8s.io/kubernetes/test/utils"
|
||||
"k8s.io/kubernetes/test/utils/format"
|
||||
@ -869,11 +870,14 @@ func WaitForContainerTerminated(ctx context.Context, c clientset.Interface, name
|
||||
})
|
||||
}
|
||||
|
||||
// WaitForContainerRestartedNTimes waits for a container in the Pod to have restarted N times
|
||||
func WaitForContainerRestartedNTimes(ctx context.Context, c clientset.Interface, namespace string, podName string, timeout time.Duration, target int) error {
|
||||
// WaitForContainerRestartedNTimes waits for the given normal container in the Pod to have restarted N times
|
||||
func WaitForContainerRestartedNTimes(ctx context.Context, c clientset.Interface, namespace string, podName string, containerName string, timeout time.Duration, target int) error {
|
||||
conditionDesc := fmt.Sprintf("A container in pod %s restarted at least %d times", podName, target)
|
||||
return WaitForPodCondition(ctx, c, namespace, podName, conditionDesc, timeout, func(pod *v1.Pod) (bool, error) {
|
||||
r, _ := podutils.MaxContainerRestarts(pod)
|
||||
return r >= target, nil
|
||||
cs, found := podv1util.GetContainerStatus(pod.Status.ContainerStatuses, containerName)
|
||||
if !found {
|
||||
return false, fmt.Errorf("could not find container %s in pod %s", containerName, podName)
|
||||
}
|
||||
return cs.RestartCount >= int32(target), nil
|
||||
})
|
||||
}
|
||||
|
@ -97,7 +97,7 @@ func doTest(ctx context.Context, f *framework.Framework, targetRestarts int, con
|
||||
|
||||
// Hard wait 30 seconds for targetRestarts in the best case; longer timeout later will handle if infra was slow.
|
||||
time.Sleep(30 * time.Second)
|
||||
podErr = e2epod.WaitForContainerRestartedNTimes(ctx, f.ClientSet, f.Namespace.Name, pod.Name, 5*time.Minute, targetRestarts)
|
||||
podErr = e2epod.WaitForContainerRestartedNTimes(ctx, f.ClientSet, f.Namespace.Name, pod.Name, containerName, 5*time.Minute, targetRestarts)
|
||||
gomega.Expect(podErr).ShouldNot(gomega.HaveOccurred(), "Expected container to repeatedly back off container failures")
|
||||
|
||||
r, err := extractObservedBackoff(ctx, f, pod.Name, containerName)
|
||||
|
Loading…
Reference in New Issue
Block a user