mirror of
https://github.com/k3s-io/kubernetes.git
synced 2025-07-22 11:21:47 +00:00
Make e2e pod start timeouts uniform
This commit is contained in:
parent
2c286fea4c
commit
083f29158f
@ -79,9 +79,7 @@ var _ = Describe("Events", func() {
|
||||
Failf("Failed to create pod: %v", err)
|
||||
}
|
||||
|
||||
By("waiting for the pod to start running")
|
||||
err := waitForPodRunning(c, pod.Name, 300*time.Second)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
expectNoError(waitForPodRunning(c, pod.Name))
|
||||
|
||||
By("verifying the pod is in kubernetes")
|
||||
pods, err := podClient.List(labels.SelectorFromSet(labels.Set(map[string]string{"time": value})))
|
||||
|
@ -35,7 +35,6 @@ const (
|
||||
kittenImage = "kubernetes/update-demo:kitten"
|
||||
updateDemoSelector = "name=update-demo"
|
||||
updateDemoContainer = "update-demo"
|
||||
validateTimeout = 10 * time.Minute // TODO: Make this 30 seconds once #4566 is resolved.
|
||||
kubectlProxyPort = 8011
|
||||
)
|
||||
|
||||
@ -120,7 +119,7 @@ func validateController(c *client.Client, image string, replicas int) {
|
||||
getImageTemplate := fmt.Sprintf(`--template={{(index .currentState.info "%s").image}}`, updateDemoContainer)
|
||||
|
||||
By(fmt.Sprintf("waiting for all containers in %s pods to come up.", updateDemoSelector))
|
||||
for start := time.Now(); time.Since(start) < validateTimeout; time.Sleep(5 * time.Second) {
|
||||
for start := time.Now(); time.Since(start) < podStartTimeout; time.Sleep(5 * time.Second) {
|
||||
getPodsOutput := runKubectl("get", "pods", "-o", "template", getPodsTemplate, "-l", updateDemoSelector)
|
||||
pods := strings.Fields(getPodsOutput)
|
||||
if numPods := len(pods); numPods != replicas {
|
||||
|
@ -76,8 +76,7 @@ var _ = Describe("PD", func() {
|
||||
_, err := podClient.Create(host0Pod)
|
||||
expectNoError(err, fmt.Sprintf("Failed to create host0Pod: %v", err))
|
||||
|
||||
By("waiting up to 180 seconds for host0Pod to start running")
|
||||
expectNoError(waitForPodRunning(c, host0Pod.Name, 180*time.Second), "host0Pod not running after 180 seconds")
|
||||
expectNoError(waitForPodRunning(c, host0Pod.Name))
|
||||
|
||||
By("deleting host0Pod")
|
||||
expectNoError(podClient.Delete(host0Pod.Name), "Failed to delete host0Pod")
|
||||
@ -86,8 +85,7 @@ var _ = Describe("PD", func() {
|
||||
_, err = podClient.Create(host1Pod)
|
||||
expectNoError(err, "Failed to create host1Pod")
|
||||
|
||||
By("waiting up to 180 seconds for host1Pod to start running")
|
||||
expectNoError(waitForPodRunning(c, host1Pod.Name, 180*time.Second), "host1Pod not running after 180 seconds")
|
||||
expectNoError(waitForPodRunning(c, host1Pod.Name))
|
||||
|
||||
By("deleting host1Pod")
|
||||
expectNoError(podClient.Delete(host1Pod.Name), "Failed to delete host1Pod")
|
||||
@ -128,7 +126,7 @@ var _ = Describe("PD", func() {
|
||||
By("submitting rwPod to ensure PD is formatted")
|
||||
_, err := podClient.Create(rwPod)
|
||||
expectNoError(err, "Failed to create rwPod")
|
||||
expectNoError(waitForPodRunning(c, rwPod.Name, 180*time.Second), "rwPod not running after 180 seconds")
|
||||
expectNoError(waitForPodRunning(c, rwPod.Name))
|
||||
expectNoError(podClient.Delete(rwPod.Name), "Failed to delete host0Pod")
|
||||
|
||||
By("submitting host0ROPod to kubernetes")
|
||||
@ -139,11 +137,9 @@ var _ = Describe("PD", func() {
|
||||
_, err = podClient.Create(host1ROPod)
|
||||
expectNoError(err, "Failed to create host1ROPod")
|
||||
|
||||
By("waiting up to 180 seconds for host0ROPod to start running")
|
||||
expectNoError(waitForPodRunning(c, host0ROPod.Name, 180*time.Second), "host0ROPod not running after 180 seconds")
|
||||
expectNoError(waitForPodRunning(c, host0ROPod.Name))
|
||||
|
||||
By("waiting up to 180 seconds for host1ROPod to start running")
|
||||
expectNoError(waitForPodRunning(c, host1ROPod.Name, 180*time.Second), "host1ROPod not running after 180 seconds")
|
||||
expectNoError(waitForPodRunning(c, host1ROPod.Name))
|
||||
|
||||
By("deleting host0ROPod")
|
||||
expectNoError(podClient.Delete(host0ROPod.Name), "Failed to delete host0ROPod")
|
||||
|
@ -47,8 +47,7 @@ func runLivenessTest(c *client.Client, podDescr *api.Pod) {
|
||||
// Wait until the pod is not pending. (Here we need to check for something other than
|
||||
// 'Pending' other than checking for 'Running', since when failures occur, we go to
|
||||
// 'Terminated' which can cause indefinite blocking.)
|
||||
By("waiting for the pod to be something other than pending")
|
||||
expectNoError(waitForPodNotPending(c, ns, podDescr.Name, 60*time.Second),
|
||||
expectNoError(waitForPodNotPending(c, ns, podDescr.Name),
|
||||
fmt.Sprintf("starting pod %s in namespace %s", podDescr.Name, ns))
|
||||
By(fmt.Sprintf("Started pod %s in namespace %s", podDescr.Name, ns))
|
||||
|
||||
@ -190,8 +189,7 @@ var _ = Describe("Pods", func() {
|
||||
Fail(fmt.Sprintf("Failed to create pod: %v", err))
|
||||
}
|
||||
|
||||
By("waiting for the pod to start running")
|
||||
expectNoError(waitForPodRunning(c, pod.Name, 300*time.Second))
|
||||
expectNoError(waitForPodRunning(c, pod.Name))
|
||||
|
||||
By("verifying the pod is in kubernetes")
|
||||
pods, err := podClient.List(labels.SelectorFromSet(labels.Set(map[string]string{"time": value})))
|
||||
@ -213,8 +211,7 @@ var _ = Describe("Pods", func() {
|
||||
Fail(fmt.Sprintf("Failed to update pod: %v", err))
|
||||
}
|
||||
|
||||
By("waiting for the updated pod to start running")
|
||||
expectNoError(waitForPodRunning(c, pod.Name, 300*time.Second))
|
||||
expectNoError(waitForPodRunning(c, pod.Name))
|
||||
|
||||
By("verifying the updated pod is in kubernetes")
|
||||
pods, err = podClient.List(labels.SelectorFromSet(labels.Set(map[string]string{"time": value})))
|
||||
@ -246,7 +243,7 @@ var _ = Describe("Pods", func() {
|
||||
if err != nil {
|
||||
Fail(fmt.Sprintf("Failed to create serverPod: %v", err))
|
||||
}
|
||||
expectNoError(waitForPodRunning(c, serverPod.Name, 300*time.Second))
|
||||
expectNoError(waitForPodRunning(c, serverPod.Name))
|
||||
|
||||
// This service exposes port 8080 of the test pod as a service on port 8765
|
||||
// TODO(filbranden): We would like to use a unique service name such as:
|
||||
@ -305,8 +302,7 @@ var _ = Describe("Pods", func() {
|
||||
Fail(fmt.Sprintf("Failed to create pod: %v", err))
|
||||
}
|
||||
|
||||
// Wait for client pod to complete.
|
||||
expectNoError(waitForPodRunning(c, clientPod.Name, 60*time.Second))
|
||||
expectNoError(waitForPodRunning(c, clientPod.Name))
|
||||
|
||||
// Grab its logs. Get host first.
|
||||
clientPodStatus, err := c.Pods(api.NamespaceDefault).Get(clientPod.Name)
|
||||
|
@ -133,7 +133,7 @@ func ServeImageOrFail(c *client.Client, test string, image string) {
|
||||
// Wait for the pods to enter the running state. Waiting loops until the pods
|
||||
// are running so non-running pods cause a timeout for this test.
|
||||
for _, pod := range pods.Items {
|
||||
err = waitForPodRunning(c, pod.Name, 300*time.Second)
|
||||
err = waitForPodRunning(c, pod.Name)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
}
|
||||
|
||||
|
@ -115,7 +115,7 @@ var _ = Describe("Secrets", func() {
|
||||
Failf("Failed to create pod: %v", err)
|
||||
}
|
||||
// Wait for client pod to complete.
|
||||
expectNoError(waitForPodRunning(c, clientPod.Name, 60*time.Second))
|
||||
expectNoError(waitForPodRunning(c, clientPod.Name))
|
||||
|
||||
// Grab its logs. Get host first.
|
||||
clientPodStatus, err := c.Pods(ns).Get(clientPod.Name)
|
||||
|
@ -116,12 +116,10 @@ var _ = Describe("Services", func() {
|
||||
Failf("Failed to create %s pod: %v", pod.Name, err)
|
||||
}
|
||||
|
||||
By("waiting for the pod to start running")
|
||||
err := waitForPodRunning(c, pod.Name, 300*time.Second)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
expectNoError(waitForPodRunning(c, pod.Name))
|
||||
|
||||
By("retrieving the pod")
|
||||
pod, err = podClient.Get(pod.Name)
|
||||
pod, err := podClient.Get(pod.Name)
|
||||
if err != nil {
|
||||
Failf("Failed to get pod %s: %v", pod.Name, err)
|
||||
}
|
||||
|
@ -31,6 +31,12 @@ import (
|
||||
. "github.com/onsi/gomega"
|
||||
)
|
||||
|
||||
const (
|
||||
// Initial pod start can be delayed O(minutes) by slow docker pulls
|
||||
// TODO: Make this 30 seconds once #4566 is resolved.
|
||||
podStartTimeout = 5 * time.Minute
|
||||
)
|
||||
|
||||
type testContextType struct {
|
||||
authConfig string
|
||||
certDir string
|
||||
@ -50,55 +56,45 @@ func Failf(format string, a ...interface{}) {
|
||||
Fail(fmt.Sprintf(format, a...), 1)
|
||||
}
|
||||
|
||||
func waitForPodRunning(c *client.Client, id string, tryFor time.Duration) error {
|
||||
trySecs := int(tryFor.Seconds())
|
||||
for i := 0; i <= trySecs; i += 5 {
|
||||
time.Sleep(5 * time.Second)
|
||||
pod, err := c.Pods(api.NamespaceDefault).Get(id)
|
||||
if err != nil {
|
||||
return fmt.Errorf("Get pod %s failed: %v", id, err.Error())
|
||||
}
|
||||
if pod.Status.Phase == api.PodRunning {
|
||||
return nil
|
||||
}
|
||||
Logf("Waiting for pod %s status to be %q (found %q) (%d secs)", id, api.PodRunning, pod.Status.Phase, i)
|
||||
}
|
||||
return fmt.Errorf("Gave up waiting for pod %s to be running after %d seconds", id, trySecs)
|
||||
}
|
||||
type podCondition func(pod *api.Pod) (bool, error)
|
||||
|
||||
// waitForPodNotPending returns false if it took too long for the pod to go out of pending state.
|
||||
func waitForPodNotPending(c *client.Client, ns, podName string, tryFor time.Duration) error {
|
||||
trySecs := int(tryFor.Seconds())
|
||||
for i := 0; i <= trySecs; i += 5 {
|
||||
if i > 0 {
|
||||
time.Sleep(5 * time.Second)
|
||||
}
|
||||
func waitForPodCondition(c *client.Client, ns, podName, desc string, condition podCondition) error {
|
||||
By(fmt.Sprintf("waiting up to %v for pod %s status to be %s", podStartTimeout, podName, desc))
|
||||
for start := time.Now(); time.Since(start) < podStartTimeout; time.Sleep(5 * time.Second) {
|
||||
pod, err := c.Pods(ns).Get(podName)
|
||||
if err != nil {
|
||||
Logf("Get pod %s in namespace %s failed, ignoring for 5s: %v", podName, ns, err)
|
||||
continue
|
||||
}
|
||||
if pod.Status.Phase != api.PodPending {
|
||||
Logf("Saw pod %s in namespace %s out of pending state (found %q)", podName, ns, pod.Status.Phase)
|
||||
return nil
|
||||
}
|
||||
Logf("Waiting for status of pod %s in namespace %s to be !%q (found %q) (%v secs)", podName, ns, api.PodPending, pod.Status.Phase, i)
|
||||
}
|
||||
return fmt.Errorf("Gave up waiting for status of pod %s in namespace %s to go out of pending after %d seconds", podName, ns, trySecs)
|
||||
}
|
||||
|
||||
// waitForPodSuccess returns true if the pod reached state success, or false if it reached failure or ran too long.
|
||||
func waitForPodSuccess(c *client.Client, podName string, contName string, tryFor time.Duration) error {
|
||||
trySecs := int(tryFor.Seconds())
|
||||
for i := 0; i <= trySecs; i += 5 {
|
||||
if i > 0 {
|
||||
time.Sleep(5 * time.Second)
|
||||
}
|
||||
pod, err := c.Pods(api.NamespaceDefault).Get(podName)
|
||||
if err != nil {
|
||||
Logf("Get pod failed, ignoring for 5s: %v", err)
|
||||
continue
|
||||
}
|
||||
done, err := condition(pod)
|
||||
if done {
|
||||
return err
|
||||
}
|
||||
Logf("Waiting for pod %s status to be %q (found %q) (%d secs)", podName, api.PodRunning, pod.Status.Phase, time.Since(start).Seconds())
|
||||
}
|
||||
return fmt.Errorf("gave up waiting for pod %s to be %s after %v", podName, podStartTimeout)
|
||||
}
|
||||
|
||||
func waitForPodRunning(c *client.Client, podName string) error {
|
||||
return waitForPodCondition(c, api.NamespaceDefault, podName, "running", func(pod *api.Pod) (bool, error) {
|
||||
return (pod.Status.Phase == api.PodRunning), nil
|
||||
})
|
||||
}
|
||||
|
||||
// waitForPodNotPending returns an error if it took too long for the pod to go out of pending state.
|
||||
func waitForPodNotPending(c *client.Client, ns, podName string) error {
|
||||
return waitForPodCondition(c, ns, podName, "!pending", func(pod *api.Pod) (bool, error) {
|
||||
if pod.Status.Phase != api.PodPending {
|
||||
Logf("Saw pod %s in namespace %s out of pending state (found %q)", podName, ns, pod.Status.Phase)
|
||||
return true, nil
|
||||
}
|
||||
return false, nil
|
||||
})
|
||||
}
|
||||
|
||||
// waitForPodSuccess returns nil if the pod reached state success, or an error if it reached failure or ran too long.
|
||||
func waitForPodSuccess(c *client.Client, podName string, contName string) error {
|
||||
return waitForPodCondition(c, api.NamespaceDefault, podName, "success or failure", func(pod *api.Pod) (bool, error) {
|
||||
// Cannot use pod.Status.Phase == api.PodSucceeded/api.PodFailed due to #2632
|
||||
ci, ok := pod.Status.Info[contName]
|
||||
if !ok {
|
||||
@ -107,17 +103,17 @@ func waitForPodSuccess(c *client.Client, podName string, contName string, tryFor
|
||||
if ci.State.Termination != nil {
|
||||
if ci.State.Termination.ExitCode == 0 {
|
||||
By("Saw pod success")
|
||||
return nil
|
||||
return true, nil
|
||||
} else {
|
||||
Logf("Saw pod failure: %+v", ci.State.Termination)
|
||||
return true, fmt.Errorf("pod %s terminated with failure: %+v", podName, ci.State.Termination)
|
||||
}
|
||||
Logf("Waiting for pod %q status to be success or failure", podName)
|
||||
} else {
|
||||
Logf("Nil State.Termination for container %s in pod %s so far", contName, podName)
|
||||
}
|
||||
}
|
||||
}
|
||||
return fmt.Errorf("Gave up waiting for pod %q status to be success or failure after %d seconds", podName, trySecs)
|
||||
return false, nil
|
||||
})
|
||||
}
|
||||
|
||||
func loadConfig() (*client.Config, error) {
|
||||
|
Loading…
Reference in New Issue
Block a user