Set startTimeout back to 3m, restore wait loop at end of test

This commit is contained in:
Elana Hashman 2021-11-18 14:59:01 -08:00
parent b4a8861af3
commit 6ddf86d422
No known key found for this signature in database
GPG Key ID: D37F7B2A20B48FA0

View File

@ -73,7 +73,7 @@ var _ = SIGDescribe("Restart [Serial] [Slow] [Disruptive]", func() {
podCount = 100
podCreationInterval = 100 * time.Millisecond
recoverTimeout = 5 * time.Minute
startTimeout = 5 * time.Minute
startTimeout = 3 * time.Minute
// restartCount is chosen so even with minPods we exhaust the default
// allocation of a /24.
minPods = 50
@ -165,7 +165,7 @@ var _ = SIGDescribe("Restart [Serial] [Slow] [Disruptive]", func() {
pod.Spec.RestartPolicy = "Never"
pod.Spec.Containers[0].Command = []string{"echo", "hi"}
pod.Spec.Containers[0].Resources.Limits = v1.ResourceList{
v1.ResourceCPU: resource.MustParse("950m"), // leave a little room for other workloads
v1.ResourceCPU: resource.MustParse("950m"), // leave a little room for other workloads
}
}
createBatchPodWithRateControl(f, restartNeverPods, podCreationInterval)
@ -203,9 +203,11 @@ var _ = SIGDescribe("Restart [Serial] [Slow] [Disruptive]", func() {
// restart may think these old pods are consuming CPU and we
// will get an OutOfCpu error.
ginkgo.By("verifying restartNever pods succeed and restartAlways pods stay running")
postRestartRunningPods := waitForPods(f, numAllPods, recoverTimeout)
if len(postRestartRunningPods) < numAllPods {
framework.Failf("less pods are running after node restart, got %d but expected %d", len(postRestartRunningPods), numAllPods)
for start := time.Now(); time.Since(start) < startTimeout; time.Sleep(10 * time.Second) {
postRestartRunningPods := waitForPods(f, numAllPods, recoverTimeout)
if len(postRestartRunningPods) < numAllPods {
framework.Failf("less pods are running after node restart, got %d but expected %d", len(postRestartRunningPods), numAllPods)
}
}
})
})