Merge pull request #35919 from bowei/issue-32551

Automatic merge from submit-queue

Adds TCPCloseWaitTimeout option to kube-proxy for sysctl nf_conntrack…

Adds TCPCloseWaitTimeout option to kube-proxy for sysctl nf_conntrack_tcp_timeout_time_wait
 
Fixes issue #32551
This commit is contained in:
Kubernetes Submit Queue 2016-11-04 00:01:32 -07:00 committed by GitHub
commit 3dec88d920
18 changed files with 350 additions and 12634 deletions

View File

@ -27,24 +27,33 @@ import (
"k8s.io/kubernetes/pkg/util/sysctl"
)
// Conntracker is an interface to the global sysctl. Descriptions of the various
// sysctl fields can be found here:
//
// https://www.kernel.org/doc/Documentation/networking/nf_conntrack-sysctl.txt
type Conntracker interface {
// SetMax adjusts nf_conntrack_max.
SetMax(max int) error
// SetTCPEstablishedTimeout adjusts nf_conntrack_tcp_timeout_established.
SetTCPEstablishedTimeout(seconds int) error
// SetTCPCloseWaitTimeout nf_conntrack_tcp_timeout_close_wait.
SetTCPCloseWaitTimeout(seconds int) error
}
type realConntracker struct{}
var readOnlySysFSError = errors.New("readOnlySysFS")
func (realConntracker) SetMax(max int) error {
glog.Infof("Setting nf_conntrack_max to %d", max)
if err := sysctl.New().SetSysctl("net/netfilter/nf_conntrack_max", max); err != nil {
func (rct realConntracker) SetMax(max int) error {
if err := rct.setIntSysCtl("nf_conntrack_max", max); err != nil {
return err
}
// sysfs is expected to be mounted as 'rw'. However, it may be unexpectedly mounted as
// 'ro' by docker because of a known docker issue (https://github.com/docker/docker/issues/24000).
// Setting conntrack will fail when sysfs is readonly. When that happens, we don't set conntrack
// hashsize and return a special error readOnlySysFSError here. The caller should deal with
// sysfs is expected to be mounted as 'rw'. However, it may be
// unexpectedly mounted as 'ro' by docker because of a known docker
// issue (https://github.com/docker/docker/issues/24000). Setting
// conntrack will fail when sysfs is readonly. When that happens, we
// don't set conntrack hashsize and return a special error
// readOnlySysFSError here. The caller should deal with
// readOnlySysFSError differently.
writable, err := isSysFSWritable()
if err != nil {
@ -58,9 +67,22 @@ func (realConntracker) SetMax(max int) error {
return writeIntStringFile("/sys/module/nf_conntrack/parameters/hashsize", max/4)
}
func (realConntracker) SetTCPEstablishedTimeout(seconds int) error {
glog.Infof("Setting nf_conntrack_tcp_timeout_established to %d", seconds)
return sysctl.New().SetSysctl("net/netfilter/nf_conntrack_tcp_timeout_established", seconds)
func (rct realConntracker) SetTCPEstablishedTimeout(seconds int) error {
return rct.setIntSysCtl("nf_conntrack_tcp_timeout_established", seconds)
}
func (rct realConntracker) SetTCPCloseWaitTimeout(seconds int) error {
return rct.setIntSysCtl("nf_conntrack_tcp_timeout_close_wait", seconds)
}
func (realConntracker) setIntSysCtl(name string, value int) error {
entry := "net/netfilter/" + name
glog.Infof("Set sysctl '%v' to %v", entry, value)
if err := sysctl.New().SetSysctl(entry, value); err != nil {
return err
}
return nil
}
// isSysFSWritable checks /proc/mounts to see whether sysfs is 'rw' or not.
@ -73,18 +95,23 @@ func isSysFSWritable() (bool, error) {
glog.Errorf("failed to list mount points: %v", err)
return false, err
}
for _, mountPoint := range mountPoints {
const sysfsDevice = "sysfs"
if mountPoint.Device != sysfsDevice {
continue
}
// Check whether sysfs is 'rw'
const permWritable = "rw"
if len(mountPoint.Opts) > 0 && mountPoint.Opts[0] == permWritable {
return true, nil
}
glog.Errorf("sysfs is not writable: %+v", mountPoint)
break
glog.Errorf("sysfs is not writable: %+v (mount options are %v)",
mountPoint, mountPoint.Opts)
return false, readOnlySysFSError
}
return false, nil
return false, errors.New("No sysfs mounted")
}
func writeIntStringFile(filename string, value int) error {

View File

@ -94,5 +94,10 @@ func (s *ProxyServerConfig) AddFlags(fs *pflag.FlagSet) {
fs.Int32Var(&s.ConntrackMin, "conntrack-min", s.ConntrackMin,
"Minimum number of conntrack entries to allocate, regardless of conntrack-max-per-core (set conntrack-max-per-core=0 to leave the limit as-is).")
fs.DurationVar(&s.ConntrackTCPEstablishedTimeout.Duration, "conntrack-tcp-timeout-established", s.ConntrackTCPEstablishedTimeout.Duration, "Idle timeout for established TCP connections (0 to leave as-is)")
fs.DurationVar(
&s.ConntrackTCPCloseWaitTimeout.Duration, "conntrack-tcp-timeout-close-wait",
s.ConntrackTCPCloseWaitTimeout.Duration,
"NAT timeout for TCP connections in the CLOSE_WAIT state")
config.DefaultFeatureGate.AddFlag(fs)
}

View File

@ -316,12 +316,22 @@ func (s *ProxyServer) Run() error {
// administrator should decide whether and how to handle this issue,
// whether to drain the node and restart docker.
// TODO(random-liu): Remove this when the docker bug is fixed.
const message = "DOCKER RESTART NEEDED (docker issue #24000): /sys is read-only: can't raise conntrack limits, problems may arise later."
const message = "DOCKER RESTART NEEDED (docker issue #24000): /sys is read-only: " +
"cannot modify conntrack limits, problems may arise later."
s.Recorder.Eventf(s.Config.NodeRef, api.EventTypeWarning, err.Error(), message)
}
}
if s.Config.ConntrackTCPEstablishedTimeout.Duration > 0 {
if err := s.Conntracker.SetTCPEstablishedTimeout(int(s.Config.ConntrackTCPEstablishedTimeout.Duration / time.Second)); err != nil {
timeout := int(s.Config.ConntrackTCPEstablishedTimeout.Duration / time.Second)
if err := s.Conntracker.SetTCPEstablishedTimeout(timeout); err != nil {
return err
}
}
if s.Config.ConntrackTCPCloseWaitTimeout.Duration > 0 {
timeout := int(s.Config.ConntrackTCPCloseWaitTimeout.Duration / time.Second)
if err := s.Conntracker.SetTCPCloseWaitTimeout(timeout); err != nil {
return err
}
}

View File

@ -90,21 +90,22 @@ concurrent-gc-syncs
concurrent-namespace-syncs
concurrent-replicaset-syncs
concurrent-resource-quota-syncs
concurrent-service-syncs
concurrent-serviceaccount-token-syncs
concurrent-service-syncs
config-sync-period
configure-cloud-routes
conntrack-max
conntrack-max-per-core
conntrack-min
conntrack-tcp-timeout-close-wait
conntrack-tcp-timeout-established
consumer-port
consumer-service-name
consumer-service-namespace
contain-pod-resources
container-port
container-runtime
container-runtime-endpoint
contain-pod-resources
controller-start-interval
cors-allowed-origins
cpu-cfs-quota

View File

@ -16,7 +16,6 @@ go_library(
"doc.go",
"helpers.go",
"register.go",
"types.generated.go",
"types.go",
"zz_generated.deepcopy.go",
],
@ -27,7 +26,6 @@ go_library(
"//pkg/runtime:go_default_library",
"//pkg/util/config:go_default_library",
"//pkg/util/net:go_default_library",
"//vendor:github.com/ugorji/go/codec",
],
)

File diff suppressed because it is too large Load Diff

View File

@ -75,8 +75,12 @@ type KubeProxyConfiguration struct {
// regardless of conntrackMaxPerCore (set conntrackMaxPerCore=0 to leave the limit as-is).
ConntrackMin int32 `json:"conntrackMin"`
// conntrackTCPEstablishedTimeout is how long an idle TCP connection will be kept open
// (e.g. '250ms', '2s'). Must be greater than 0.
// (e.g. '2s'). Must be greater than 0.
ConntrackTCPEstablishedTimeout unversioned.Duration `json:"conntrackTCPEstablishedTimeout"`
// conntrackTCPCloseWaitTimeout is how long an idle conntrack entry
// in CLOSE_WAIT state will remain in the conntrack
// table. (e.g. '60s'). Must be greater than 0 to set.
ConntrackTCPCloseWaitTimeout unversioned.Duration `json:"conntrackTCPCloseWaitTimeout"`
}
// Currently two modes of proxying are available: 'userspace' (older, stable) or 'iptables'

View File

@ -101,6 +101,29 @@ func SetDefaults_KubeProxyConfiguration(obj *KubeProxyConfiguration) {
if obj.ConntrackTCPEstablishedTimeout == zero {
obj.ConntrackTCPEstablishedTimeout = unversioned.Duration{Duration: 24 * time.Hour} // 1 day (1/5 default)
}
if obj.ConntrackTCPCloseWaitTimeout == zero {
// See https://github.com/kubernetes/kubernetes/issues/32551.
//
// CLOSE_WAIT conntrack state occurs when the the Linux kernel
// sees a FIN from the remote server. Note: this is a half-close
// condition that persists as long as the local side keeps the
// socket open. The condition is rare as it is typical in most
// protocols for both sides to issue a close; this typically
// occurs when the local socket is lazily garbage collected.
//
// If the CLOSE_WAIT conntrack entry expires, then FINs from the
// local socket will not be properly SNAT'd and will not reach the
// remote server (if the connection was subject to SNAT). If the
// remote timeouts for FIN_WAIT* states exceed the CLOSE_WAIT
// timeout, then there will be an inconsistency in the state of
// the connection and a new connection reusing the SNAT (src,
// port) pair may be rejected by the remote side with RST. This
// can cause new calls to connect(2) to return with ECONNREFUSED.
//
// We set CLOSE_WAIT to one hour by default to better match
// typical server timeouts.
obj.ConntrackTCPCloseWaitTimeout = unversioned.Duration{Duration: 1 * time.Hour}
}
}
func SetDefaults_KubeSchedulerConfiguration(obj *KubeSchedulerConfiguration) {

View File

@ -71,9 +71,13 @@ type KubeProxyConfiguration struct {
// conntrackMin is the minimum value of connect-tracking records to allocate,
// regardless of conntrackMaxPerCore (set conntrackMaxPerCore=0 to leave the limit as-is).
ConntrackMin int32 `json:"conntrackMin"`
// conntrackTCPEstablishedTimeout is how long an idle TCP connection will be kept open
// (e.g. '250ms', '2s'). Must be greater than 0.
// conntrackTCPEstablishedTimeout is how long an idle TCP connection
// will be kept open (e.g. '2s'). Must be greater than 0.
ConntrackTCPEstablishedTimeout unversioned.Duration `json:"conntrackTCPEstablishedTimeout"`
// conntrackTCPCloseWaitTimeout is how long an idle conntrack entry
// in CLOSE_WAIT state will remain in the conntrack
// table. (e.g. '60s'). Must be greater than 0 to set.
ConntrackTCPCloseWaitTimeout unversioned.Duration `json:"conntrackTCPCloseWaitTimeout"`
}
// Currently two modes of proxying are available: 'userspace' (older, stable) or 'iptables'

View File

@ -80,6 +80,7 @@ func autoConvert_v1alpha1_KubeProxyConfiguration_To_componentconfig_KubeProxyCon
out.ConntrackMaxPerCore = in.ConntrackMaxPerCore
out.ConntrackMin = in.ConntrackMin
out.ConntrackTCPEstablishedTimeout = in.ConntrackTCPEstablishedTimeout
out.ConntrackTCPCloseWaitTimeout = in.ConntrackTCPCloseWaitTimeout
return nil
}
@ -107,6 +108,7 @@ func autoConvert_componentconfig_KubeProxyConfiguration_To_v1alpha1_KubeProxyCon
out.ConntrackMaxPerCore = in.ConntrackMaxPerCore
out.ConntrackMin = in.ConntrackMin
out.ConntrackTCPEstablishedTimeout = in.ConntrackTCPEstablishedTimeout
out.ConntrackTCPCloseWaitTimeout = in.ConntrackTCPCloseWaitTimeout
return nil
}

View File

@ -83,6 +83,7 @@ func DeepCopy_v1alpha1_KubeProxyConfiguration(in interface{}, out interface{}, c
out.ConntrackMaxPerCore = in.ConntrackMaxPerCore
out.ConntrackMin = in.ConntrackMin
out.ConntrackTCPEstablishedTimeout = in.ConntrackTCPEstablishedTimeout
out.ConntrackTCPCloseWaitTimeout = in.ConntrackTCPCloseWaitTimeout
return nil
}
}

View File

@ -171,6 +171,7 @@ func DeepCopy_componentconfig_KubeProxyConfiguration(in interface{}, out interfa
out.ConntrackMaxPerCore = in.ConntrackMaxPerCore
out.ConntrackMin = in.ConntrackMin
out.ConntrackTCPEstablishedTimeout = in.ConntrackTCPEstablishedTimeout
out.ConntrackTCPCloseWaitTimeout = in.ConntrackTCPCloseWaitTimeout
return nil
}
}

View File

@ -1989,12 +1989,18 @@ var OpenAPIDefinitions *common.OpenAPIDefinitions = &common.OpenAPIDefinitions{
},
"conntrackTCPEstablishedTimeout": {
SchemaProps: spec.SchemaProps{
Description: "conntrackTCPEstablishedTimeout is how long an idle TCP connection will be kept open (e.g. '250ms', '2s'). Must be greater than 0.",
Description: "conntrackTCPEstablishedTimeout is how long an idle TCP connection will be kept open (e.g. '2s'). Must be greater than 0.",
Ref: spec.MustCreateRef("#/definitions/unversioned.Duration"),
},
},
"conntrackTCPCloseWaitTimeout": {
SchemaProps: spec.SchemaProps{
Description: "conntrackTCPCloseWaitTimeout is how long an idle conntrack entry in CLOSE_WAIT state will remain in the conntrack table. (e.g. '60s'). Must be greater than 0 to set.",
Ref: spec.MustCreateRef("#/definitions/unversioned.Duration"),
},
},
},
Required: []string{"TypeMeta", "bindAddress", "clusterCIDR", "healthzBindAddress", "healthzPort", "hostnameOverride", "iptablesMasqueradeBit", "iptablesSyncPeriodSeconds", "kubeconfigPath", "masqueradeAll", "master", "oomScoreAdj", "mode", "portRange", "resourceContainer", "udpTimeoutMilliseconds", "conntrackMax", "conntrackMaxPerCore", "conntrackMin", "conntrackTCPEstablishedTimeout"},
Required: []string{"TypeMeta", "bindAddress", "clusterCIDR", "healthzBindAddress", "healthzPort", "hostnameOverride", "iptablesMasqueradeBit", "iptablesSyncPeriodSeconds", "kubeconfigPath", "masqueradeAll", "master", "oomScoreAdj", "mode", "portRange", "resourceContainer", "udpTimeoutMilliseconds", "conntrackMax", "conntrackMaxPerCore", "conntrackMin", "conntrackTCPEstablishedTimeout", "conntrackTCPCloseWaitTimeout"},
},
},
Dependencies: []string{
@ -13717,12 +13723,18 @@ var OpenAPIDefinitions *common.OpenAPIDefinitions = &common.OpenAPIDefinitions{
},
"conntrackTCPEstablishedTimeout": {
SchemaProps: spec.SchemaProps{
Description: "conntrackTCPEstablishedTimeout is how long an idle TCP connection will be kept open (e.g. '250ms', '2s'). Must be greater than 0.",
Description: "conntrackTCPEstablishedTimeout is how long an idle TCP connection will be kept open (e.g. '2s'). Must be greater than 0.",
Ref: spec.MustCreateRef("#/definitions/unversioned.Duration"),
},
},
"conntrackTCPCloseWaitTimeout": {
SchemaProps: spec.SchemaProps{
Description: "conntrackTCPCloseWaitTimeout is how long an idle conntrack entry in CLOSE_WAIT state will remain in the conntrack table. (e.g. '60s'). Must be greater than 0 to set.",
Ref: spec.MustCreateRef("#/definitions/unversioned.Duration"),
},
},
},
Required: []string{"TypeMeta", "bindAddress", "clusterCIDR", "healthzBindAddress", "healthzPort", "hostnameOverride", "iptablesMasqueradeBit", "iptablesSyncPeriodSeconds", "kubeconfigPath", "masqueradeAll", "master", "oomScoreAdj", "mode", "portRange", "resourceContainer", "udpTimeoutMilliseconds", "conntrackMax", "conntrackMaxPerCore", "conntrackMin", "conntrackTCPEstablishedTimeout"},
Required: []string{"TypeMeta", "bindAddress", "clusterCIDR", "healthzBindAddress", "healthzPort", "hostnameOverride", "iptablesMasqueradeBit", "iptablesSyncPeriodSeconds", "kubeconfigPath", "masqueradeAll", "master", "oomScoreAdj", "mode", "portRange", "resourceContainer", "udpTimeoutMilliseconds", "conntrackMax", "conntrackMaxPerCore", "conntrackMin", "conntrackTCPEstablishedTimeout", "conntrackTCPCloseWaitTimeout"},
},
},
Dependencies: []string{

View File

@ -58,6 +58,7 @@ go_library(
"initial_resources.go",
"job.go",
"kibana_logging.go",
"kube_proxy.go",
"kubectl.go",
"kubelet.go",
"kubelet_perf.go",
@ -171,6 +172,7 @@ go_library(
"//test/e2e/chaosmonkey:go_default_library",
"//test/e2e/common:go_default_library",
"//test/e2e/framework:go_default_library",
"//test/images/net/nat:go_default_library",
"//test/utils:go_default_library",
"//vendor:github.com/aws/aws-sdk-go/aws",
"//vendor:github.com/aws/aws-sdk-go/aws/awserr",

View File

@ -3332,7 +3332,7 @@ func LogSSHResult(result SSHResult) {
Logf("ssh %s: exit code: %d", remote, result.Code)
}
func IssueSSHCommand(cmd, provider string, node *api.Node) error {
func IssueSSHCommandWithResult(cmd, provider string, node *api.Node) (*SSHResult, error) {
Logf("Getting external IP address for %s", node.Name)
host := ""
for _, a := range node.Status.Addresses {
@ -3341,15 +3341,34 @@ func IssueSSHCommand(cmd, provider string, node *api.Node) error {
break
}
}
if host == "" {
return fmt.Errorf("couldn't find external IP address for node %s", node.Name)
return nil, fmt.Errorf("couldn't find external IP address for node %s", node.Name)
}
Logf("Calling %s on %s(%s)", cmd, node.Name, host)
Logf("SSH %q on %s(%s)", cmd, node.Name, host)
result, err := SSH(cmd, host, provider)
LogSSHResult(result)
if result.Code != 0 || err != nil {
return fmt.Errorf("failed running %q: %v (exit code %d)", cmd, err, result.Code)
return nil, fmt.Errorf("failed running %q: %v (exit code %d)",
cmd, err, result.Code)
}
return &result, nil
}
func IssueSSHCommand(cmd, provider string, node *api.Node) error {
result, err := IssueSSHCommandWithResult(cmd, provider, node)
if result != nil {
LogSSHResult(*result)
}
if result.Code != 0 || err != nil {
return fmt.Errorf("failed running %q: %v (exit code %d)",
cmd, err, result.Code)
}
return nil
}

205
test/e2e/kube_proxy.go Normal file
View File

@ -0,0 +1,205 @@
/*
Copyright 2016 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package e2e
import (
"encoding/json"
"fmt"
"math"
"strconv"
"strings"
"time"
"k8s.io/kubernetes/pkg/api"
"k8s.io/kubernetes/test/e2e/framework"
"k8s.io/kubernetes/test/images/net/nat"
. "github.com/onsi/ginkgo"
. "github.com/onsi/gomega"
)
const kubeProxyE2eImage = "gcr.io/google_containers/e2e-net-amd64:1.0"
var _ = framework.KubeDescribe("Network", func() {
const (
testDaemonHttpPort = 11301
testDaemonTcpPort = 11302
timeoutSeconds = 10
postFinTimeoutSeconds = 5
)
fr := framework.NewDefaultFramework("network")
It("should set TCP CLOSE_WAIT timeout", func() {
nodes := framework.GetReadySchedulableNodesOrDie(fr.ClientSet)
ips := collectAddresses(nodes, api.NodeInternalIP)
if len(nodes.Items) < 2 {
framework.Skipf(
"Test requires >= 2 Ready nodes, but there are only %v nodes",
len(nodes.Items))
}
type NodeInfo struct {
node *api.Node
name string
nodeIp string
}
clientNodeInfo := NodeInfo{
node: &nodes.Items[0],
name: nodes.Items[0].Name,
nodeIp: ips[0],
}
serverNodeInfo := NodeInfo{
node: &nodes.Items[1],
name: nodes.Items[1].Name,
nodeIp: ips[1],
}
zero := int64(0)
clientPodSpec := &api.Pod{
ObjectMeta: api.ObjectMeta{
Name: "e2e-net-client",
Namespace: fr.Namespace.Name,
Labels: map[string]string{"app": "e2e-net-client"},
},
Spec: api.PodSpec{
NodeName: clientNodeInfo.name,
Containers: []api.Container{
{
Name: "e2e-net-client",
Image: kubeProxyE2eImage,
ImagePullPolicy: "Always",
Command: []string{
"/net", "-serve", fmt.Sprintf("0.0.0.0:%d", testDaemonHttpPort),
},
},
},
TerminationGracePeriodSeconds: &zero,
},
}
serverPodSpec := &api.Pod{
ObjectMeta: api.ObjectMeta{
Name: "e2e-net-server",
Namespace: fr.Namespace.Name,
Labels: map[string]string{"app": "e2e-net-server"},
},
Spec: api.PodSpec{
NodeName: serverNodeInfo.name,
Containers: []api.Container{
{
Name: "e2e-net-server",
Image: kubeProxyE2eImage,
ImagePullPolicy: "Always",
Command: []string{
"/net",
"-runner", "nat-closewait-server",
"-options",
fmt.Sprintf(`{"LocalAddr":"0.0.0.0:%v", "PostFindTimeoutSeconds":%v}`,
testDaemonTcpPort,
postFinTimeoutSeconds),
},
Ports: []api.ContainerPort{
{
Name: "tcp",
ContainerPort: testDaemonTcpPort,
HostPort: testDaemonTcpPort,
},
},
},
},
TerminationGracePeriodSeconds: &zero,
},
}
By(fmt.Sprintf(
"Launching a server daemon on node %v (node ip: %v, image: %v)",
serverNodeInfo.name,
serverNodeInfo.nodeIp,
kubeProxyE2eImage))
fr.PodClient().CreateSync(serverPodSpec)
By(fmt.Sprintf(
"Launching a client daemon on node %v (node ip: %v, image: %v)",
clientNodeInfo.name,
clientNodeInfo.nodeIp,
kubeProxyE2eImage))
fr.PodClient().CreateSync(clientPodSpec)
By("Make client connect")
options := nat.CloseWaitClientOptions{
RemoteAddr: fmt.Sprintf("%v:%v",
serverNodeInfo.nodeIp, testDaemonTcpPort),
TimeoutSeconds: timeoutSeconds,
PostFinTimeoutSeconds: 0,
LeakConnection: true,
}
jsonBytes, err := json.Marshal(options)
cmd := fmt.Sprintf(
`curl -X POST http://localhost:%v/run/nat-closewait-client -d `+
`'%v' 2>/dev/null`,
testDaemonHttpPort,
string(jsonBytes))
framework.RunHostCmdOrDie(fr.Namespace.Name, "e2e-net-client", cmd)
<-time.After(time.Duration(1) * time.Second)
By("Checking /proc/net/nf_conntrack for the timeout")
// If test flakes occur here, then this check should be performed
// in a loop as there may be a race with the client connecting.
framework.IssueSSHCommandWithResult(
fmt.Sprintf("sudo cat /proc/net/ip_conntrack | grep 'dport=%v'",
testDaemonTcpPort),
framework.TestContext.Provider,
clientNodeInfo.node)
// Timeout in seconds is available as the third column from
// /proc/net/ip_conntrack.
result, err := framework.IssueSSHCommandWithResult(
fmt.Sprintf(
"sudo cat /proc/net/ip_conntrack "+
"| grep 'CLOSE_WAIT.*dst=%v.*dport=%v' "+
"| awk '{print $3}'",
serverNodeInfo.nodeIp,
testDaemonTcpPort),
framework.TestContext.Provider,
clientNodeInfo.node)
framework.ExpectNoError(err)
timeoutSeconds, err := strconv.Atoi(strings.TrimSpace(result.Stdout))
framework.ExpectNoError(err)
// These must be synchronized from the default values set in
// pkg/apis/../defaults.go ConntrackTCPCloseWaitTimeout. The
// current defaults are hidden in the initialization code.
const epsilonSeconds = 10
const expectedTimeoutSeconds = 60 * 60
framework.Logf("conntrack entry timeout was: %v, expected: %v",
timeoutSeconds, expectedTimeoutSeconds)
Expect(math.Abs(float64(timeoutSeconds - expectedTimeoutSeconds))).Should(
BeNumerically("<", (epsilonSeconds)))
})
})

View File

@ -36,7 +36,8 @@ import (
"k8s.io/kubernetes/test/images/net/common"
)
// leakedConnection is a
// leakedConnection is a global variable that should leak the active
// connection assigned here.
var leakedConnection *net.TCPConn
// Server JSON options.
@ -156,7 +157,9 @@ func (client *closeWaitClient) Run(logger *log.Logger, rawOptions interface{}) e
if err != nil {
return err
}
defer conn.Close()
if !client.options.LeakConnection {
defer conn.Close()
}
logger.Printf("Connected to server")

View File

@ -261,6 +261,7 @@ Network Partition should come back up if node goes down,foxish,0
Network Partition should create new pods when node is partitioned,foxish,0
Network Partition should eagerly create replacement pod during network partition when termination grace is non-zero,foxish,0
Network Partition should not reschedule pets if there is a network partition,foxish,0
Network should set TCP CLOSE_WAIT timeout,bowei,0
Networking Granular Checks: Pods should function for intra-pod communication: http,stts,0
Networking Granular Checks: Pods should function for intra-pod communication: udp,freehan,0
Networking Granular Checks: Pods should function for node-pod communication: http,spxtr,1

1 name owner auto-assigned
261 Network Partition should create new pods when node is partitioned foxish 0
262 Network Partition should eagerly create replacement pod during network partition when termination grace is non-zero foxish 0
263 Network Partition should not reschedule pets if there is a network partition foxish 0
264 Network should set TCP CLOSE_WAIT timeout bowei 0
265 Networking Granular Checks: Pods should function for intra-pod communication: http stts 0
266 Networking Granular Checks: Pods should function for intra-pod communication: udp freehan 0
267 Networking Granular Checks: Pods should function for node-pod communication: http spxtr 1