From 3f04421d7bf5b1bd7a83ee02e322673ded4dd207 Mon Sep 17 00:00:00 2001 From: Clayton Coleman Date: Fri, 24 Feb 2017 14:12:47 -0500 Subject: [PATCH] Pod deletion observation is flaking, increase timeout and debug more We can afford to wait longer than 30 seconds, and we should be printing more error and output information about the cause of the failure. --- test/e2e/common/pods.go | 13 ++++++++----- 1 file changed, 8 insertions(+), 5 deletions(-) diff --git a/test/e2e/common/pods.go b/test/e2e/common/pods.go index a90f369ceac..1a3ef6b5257 100644 --- a/test/e2e/common/pods.go +++ b/test/e2e/common/pods.go @@ -233,18 +233,21 @@ var _ = framework.KubeDescribe("Pods", func() { By("verifying pod deletion was observed") deleted := false - timeout := false var lastPod *v1.Pod - timer := time.After(30 * time.Second) - for !deleted && !timeout { + timer := time.After(2 * time.Minute) + for !deleted { select { case event, _ := <-w.ResultChan(): - if event.Type == watch.Deleted { + switch event.Type { + case watch.Deleted: lastPod = event.Object.(*v1.Pod) deleted = true + case watch.Error: + framework.Logf("received a watch error: %v", event.Object) + Fail("watch closed with error") } case <-timer: - timeout = true + Fail("timed out waiting for pod deletion") } } if !deleted {