mirror of
https://github.com/k3s-io/kubernetes.git
synced 2025-08-09 03:57:41 +00:00
Merge pull request #116091 from pacoxu/cleanup-terminationGracePeriodSeconds
cleanup: remove ProbeTerminationGracePeriod feature tag on test
This commit is contained in:
commit
6fbf4824fd
@ -459,7 +459,7 @@ var _ = SIGDescribe("Probing container", func() {
|
|||||||
Testname: Set terminationGracePeriodSeconds for livenessProbe
|
Testname: Set terminationGracePeriodSeconds for livenessProbe
|
||||||
Description: A pod with a long terminationGracePeriod is created with a shorter livenessProbe-level terminationGracePeriodSeconds. We confirm the shorter termination period is used.
|
Description: A pod with a long terminationGracePeriod is created with a shorter livenessProbe-level terminationGracePeriodSeconds. We confirm the shorter termination period is used.
|
||||||
*/
|
*/
|
||||||
ginkgo.It("should override timeoutGracePeriodSeconds when LivenessProbe field is set [Feature:ProbeTerminationGracePeriod]", func(ctx context.Context) {
|
ginkgo.It("should override timeoutGracePeriodSeconds when LivenessProbe field is set [NodeConformance]", func(ctx context.Context) {
|
||||||
pod := e2epod.NewAgnhostPod(f.Namespace.Name, "liveness-override-"+string(uuid.NewUUID()), nil, nil, nil, "/bin/sh", "-c", "sleep 1000")
|
pod := e2epod.NewAgnhostPod(f.Namespace.Name, "liveness-override-"+string(uuid.NewUUID()), nil, nil, nil, "/bin/sh", "-c", "sleep 1000")
|
||||||
longGracePeriod := int64(500)
|
longGracePeriod := int64(500)
|
||||||
pod.Spec.TerminationGracePeriodSeconds = &longGracePeriod
|
pod.Spec.TerminationGracePeriodSeconds = &longGracePeriod
|
||||||
@ -479,7 +479,8 @@ var _ = SIGDescribe("Probing container", func() {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// 10s delay + 10s period + 5s grace period = 25s < 30s << pod-level timeout 500
|
// 10s delay + 10s period + 5s grace period = 25s < 30s << pod-level timeout 500
|
||||||
RunLivenessTest(ctx, f, pod, 1, time.Second*30)
|
// add 10s more for kubelet syncing information to apiserver
|
||||||
|
RunLivenessTest(ctx, f, pod, 1, time.Second*40)
|
||||||
})
|
})
|
||||||
|
|
||||||
/*
|
/*
|
||||||
@ -487,7 +488,7 @@ var _ = SIGDescribe("Probing container", func() {
|
|||||||
Testname: Set terminationGracePeriodSeconds for startupProbe
|
Testname: Set terminationGracePeriodSeconds for startupProbe
|
||||||
Description: A pod with a long terminationGracePeriod is created with a shorter startupProbe-level terminationGracePeriodSeconds. We confirm the shorter termination period is used.
|
Description: A pod with a long terminationGracePeriod is created with a shorter startupProbe-level terminationGracePeriodSeconds. We confirm the shorter termination period is used.
|
||||||
*/
|
*/
|
||||||
ginkgo.It("should override timeoutGracePeriodSeconds when StartupProbe field is set [Feature:ProbeTerminationGracePeriod]", func(ctx context.Context) {
|
ginkgo.It("should override timeoutGracePeriodSeconds when StartupProbe field is set [NodeConformance]", func(ctx context.Context) {
|
||||||
pod := e2epod.NewAgnhostPod(f.Namespace.Name, "startup-override-"+string(uuid.NewUUID()), nil, nil, nil, "/bin/sh", "-c", "sleep 1000")
|
pod := e2epod.NewAgnhostPod(f.Namespace.Name, "startup-override-"+string(uuid.NewUUID()), nil, nil, nil, "/bin/sh", "-c", "sleep 1000")
|
||||||
longGracePeriod := int64(500)
|
longGracePeriod := int64(500)
|
||||||
pod.Spec.TerminationGracePeriodSeconds = &longGracePeriod
|
pod.Spec.TerminationGracePeriodSeconds = &longGracePeriod
|
||||||
@ -512,7 +513,8 @@ var _ = SIGDescribe("Probing container", func() {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// 10s delay + 10s period + 5s grace period = 25s < 30s << pod-level timeout 500
|
// 10s delay + 10s period + 5s grace period = 25s < 30s << pod-level timeout 500
|
||||||
RunLivenessTest(ctx, f, pod, 1, time.Second*30)
|
// add 10s more for kubelet syncing information to apiserver
|
||||||
|
RunLivenessTest(ctx, f, pod, 1, time.Second*40)
|
||||||
})
|
})
|
||||||
|
|
||||||
/*
|
/*
|
||||||
@ -970,11 +972,14 @@ func RunLivenessTest(ctx context.Context, f *framework.Framework, pod *v1.Pod, e
|
|||||||
framework.Logf("Initial restart count of pod %s is %d", pod.Name, initialRestartCount)
|
framework.Logf("Initial restart count of pod %s is %d", pod.Name, initialRestartCount)
|
||||||
|
|
||||||
// Wait for the restart state to be as desired.
|
// Wait for the restart state to be as desired.
|
||||||
deadline := time.Now().Add(timeout)
|
// If initialRestartCount is not zero, there is restarting back-off time.
|
||||||
|
deadline := time.Now().Add(timeout + time.Duration(initialRestartCount*10)*time.Second)
|
||||||
|
|
||||||
lastRestartCount := initialRestartCount
|
lastRestartCount := initialRestartCount
|
||||||
observedRestarts := int32(0)
|
observedRestarts := int32(0)
|
||||||
for start := time.Now(); time.Now().Before(deadline); time.Sleep(2 * time.Second) {
|
for start := time.Now(); time.Now().Before(deadline); time.Sleep(2 * time.Second) {
|
||||||
pod, err = podClient.Get(ctx, pod.Name, metav1.GetOptions{})
|
pod, err = podClient.Get(ctx, pod.Name, metav1.GetOptions{})
|
||||||
|
framework.Logf("Get pod %s in namespace %s", pod.Name, ns)
|
||||||
framework.ExpectNoError(err, fmt.Sprintf("getting pod %s", pod.Name))
|
framework.ExpectNoError(err, fmt.Sprintf("getting pod %s", pod.Name))
|
||||||
restartCount := podutil.GetExistingContainerStatus(pod.Status.ContainerStatuses, containerName).RestartCount
|
restartCount := podutil.GetExistingContainerStatus(pod.Status.ContainerStatuses, containerName).RestartCount
|
||||||
if restartCount != lastRestartCount {
|
if restartCount != lastRestartCount {
|
||||||
@ -997,8 +1002,8 @@ func RunLivenessTest(ctx context.Context, f *framework.Framework, pod *v1.Pod, e
|
|||||||
// If we expected n restarts (n > 0), fail if we observed < n restarts.
|
// If we expected n restarts (n > 0), fail if we observed < n restarts.
|
||||||
if (expectNumRestarts == 0 && observedRestarts > 0) || (expectNumRestarts > 0 &&
|
if (expectNumRestarts == 0 && observedRestarts > 0) || (expectNumRestarts > 0 &&
|
||||||
int(observedRestarts) < expectNumRestarts) {
|
int(observedRestarts) < expectNumRestarts) {
|
||||||
framework.Failf("pod %s/%s - expected number of restarts: %d, found restarts: %d",
|
framework.Failf("pod %s/%s - expected number of restarts: %d, found restarts: %d. Pod status: %s.",
|
||||||
ns, pod.Name, expectNumRestarts, observedRestarts)
|
ns, pod.Name, expectNumRestarts, observedRestarts, &pod.Status)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
Loading…
Reference in New Issue
Block a user