From 9aef074e598f7f6fef137f4c3e0e5f58c3c15a49 Mon Sep 17 00:00:00 2001 From: Bowei Du Date: Mon, 21 Nov 2016 11:54:13 -0800 Subject: [PATCH] Update the timeout in CLOSE_WAIT e2e test I see some test flakes due to the timeout being too strict, updating to a larger value. Adding tail -n 1, it looks like there may be leftover state for other runs. We really only care about one of the CLOSE_WAIT entries. --- test/e2e/kube_proxy.go | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/test/e2e/kube_proxy.go b/test/e2e/kube_proxy.go index 52f04ca5ae0..ec9aa64a650 100644 --- a/test/e2e/kube_proxy.go +++ b/test/e2e/kube_proxy.go @@ -180,7 +180,8 @@ var _ = framework.KubeDescribe("Network", func() { fmt.Sprintf( "sudo cat /proc/net/ip_conntrack "+ "| grep 'CLOSE_WAIT.*dst=%v.*dport=%v' "+ - "| awk '{print $3}'", + "| tail -n 1"+ + "| awk '{print $3}' ", serverNodeInfo.nodeIp, testDaemonTcpPort), framework.TestContext.Provider, @@ -193,7 +194,7 @@ var _ = framework.KubeDescribe("Network", func() { // These must be synchronized from the default values set in // pkg/apis/../defaults.go ConntrackTCPCloseWaitTimeout. The // current defaults are hidden in the initialization code. - const epsilonSeconds = 10 + const epsilonSeconds = 60 const expectedTimeoutSeconds = 60 * 60 framework.Logf("conntrack entry timeout was: %v, expected: %v",