diff --git a/cmd/kube-proxy/app/server.go b/cmd/kube-proxy/app/server.go index 3e7948f6c6a..523f0f9c9ec 100644 --- a/cmd/kube-proxy/app/server.go +++ b/cmd/kube-proxy/app/server.go @@ -90,7 +90,7 @@ func (s *ProxyServerConfig) AddFlags(fs *pflag.FlagSet) { fs.Var(&s.PortRange, "proxy-port-range", "Range of host ports (beginPort-endPort, inclusive) that may be consumed in order to proxy service traffic. If unspecified (0-0) then ports will be randomly chosen.") fs.StringVar(&s.HostnameOverride, "hostname-override", s.HostnameOverride, "If non-empty, will use this string as identification instead of the actual hostname.") fs.StringVar(&s.ProxyMode, "proxy-mode", "", "Which proxy mode to use: 'userspace' (older, stable) or 'iptables' (experimental). If blank, look at the Node object on the Kubernetes API and respect the '"+experimentalProxyModeAnnotation+"' annotation if provided. Otherwise use the best-available proxy (currently userspace, but may change in future versions). If the iptables proxy is selected, regardless of how, but the system's kernel or iptables versions are insufficient, this always falls back to the userspace proxy.") - fs.DurationVar(&s.SyncPeriod, "iptables-sync-period", 5*time.Second, "How often iptables rules are refreshed (e.g. '5s', '1m', '2h22m'). Must be greater than 0.") + fs.DurationVar(&s.SyncPeriod, "iptables-sync-period", 30*time.Second, "How often iptables rules are refreshed (e.g. '5s', '1m', '2h22m'). Must be greater than 0.") fs.BoolVar(&s.MasqueradeAll, "masquerade-all", false, "If using the pure iptables proxy, SNAT everything") fs.BoolVar(&s.CleanupAndExit, "cleanup-iptables", false, "If true cleanup iptables rules and exit.") } @@ -116,7 +116,7 @@ func NewProxyConfig() *ProxyServerConfig { HealthzBindAddress: net.ParseIP("127.0.0.1"), OOMScoreAdj: qos.KubeProxyOomScoreAdj, ResourceContainer: "/kube-proxy", - SyncPeriod: 5 * time.Second, + SyncPeriod: 30 * time.Second, } } diff --git a/pkg/proxy/userspace/proxier.go b/pkg/proxy/userspace/proxier.go index 930f9634ede..c724af9eb30 100644 --- a/pkg/proxy/userspace/proxier.go +++ b/pkg/proxy/userspace/proxier.go @@ -346,7 +346,7 @@ func (proxier *Proxier) addServiceOnPort(service proxy.ServicePortName, protocol } // How long we leave idle UDP connections open. -const udpIdleTimeout = 1 * time.Second +const udpIdleTimeout = 250 * time.Millisecond // OnUpdate manages the active set of service proxies. // Active service proxies are reinitialized if found in the update set or diff --git a/pkg/proxy/userspace/proxysocket.go b/pkg/proxy/userspace/proxysocket.go index ca7f59ec3d4..19aaf9d9bfe 100644 --- a/pkg/proxy/userspace/proxysocket.go +++ b/pkg/proxy/userspace/proxysocket.go @@ -132,7 +132,7 @@ func (tcp *tcpProxySocket) ProxyLoop(service proxy.ServicePortName, myInfo *serv glog.Errorf("Accept failed: %v", err) continue } - glog.V(2).Infof("Accepted TCP connection from %v to %v", inConn.RemoteAddr(), inConn.LocalAddr()) + glog.V(3).Infof("Accepted TCP connection from %v to %v", inConn.RemoteAddr(), inConn.LocalAddr()) outConn, err := tryConnect(service, inConn.(*net.TCPConn).RemoteAddr(), "tcp", proxier) if err != nil { glog.Errorf("Failed to connect to balancer: %v", err) @@ -247,7 +247,7 @@ func (udp *udpProxySocket) getBackendConn(activeClients *clientCache, cliAddr ne if !found { // TODO: This could spin up a new goroutine to make the outbound connection, // and keep accepting inbound traffic. - glog.V(2).Infof("New UDP connection from %s", cliAddr) + glog.V(3).Infof("New UDP connection from %s", cliAddr) var err error svrConn, err = tryConnect(service, cliAddr, "udp", proxier) if err != nil {