mirror of
https://github.com/k3s-io/kubernetes.git
synced 2025-07-25 12:43:23 +00:00
Set startTimeout back to 3m, restore wait loop at end of test
This commit is contained in:
parent
b4a8861af3
commit
6ddf86d422
@ -73,7 +73,7 @@ var _ = SIGDescribe("Restart [Serial] [Slow] [Disruptive]", func() {
|
||||
podCount = 100
|
||||
podCreationInterval = 100 * time.Millisecond
|
||||
recoverTimeout = 5 * time.Minute
|
||||
startTimeout = 5 * time.Minute
|
||||
startTimeout = 3 * time.Minute
|
||||
// restartCount is chosen so even with minPods we exhaust the default
|
||||
// allocation of a /24.
|
||||
minPods = 50
|
||||
@ -203,10 +203,12 @@ var _ = SIGDescribe("Restart [Serial] [Slow] [Disruptive]", func() {
|
||||
// restart may think these old pods are consuming CPU and we
|
||||
// will get an OutOfCpu error.
|
||||
ginkgo.By("verifying restartNever pods succeed and restartAlways pods stay running")
|
||||
for start := time.Now(); time.Since(start) < startTimeout; time.Sleep(10 * time.Second) {
|
||||
postRestartRunningPods := waitForPods(f, numAllPods, recoverTimeout)
|
||||
if len(postRestartRunningPods) < numAllPods {
|
||||
framework.Failf("less pods are running after node restart, got %d but expected %d", len(postRestartRunningPods), numAllPods)
|
||||
}
|
||||
}
|
||||
})
|
||||
})
|
||||
})
|
||||
|
Loading…
Reference in New Issue
Block a user