mirror of
https://github.com/k3s-io/kubernetes.git
synced 2025-07-22 19:31:44 +00:00
Merge pull request #81886 from praseodym/fix-staticcheck-pkg/proxy
Fix staticcheck failures for pkg/proxy/...
This commit is contained in:
commit
d758fc3edb
@ -17,11 +17,6 @@ pkg/kubelet/pluginmanager/operationexecutor
|
||||
pkg/kubelet/pluginmanager/pluginwatcher
|
||||
pkg/kubelet/remote
|
||||
pkg/probe/http
|
||||
pkg/proxy/healthcheck
|
||||
pkg/proxy/iptables
|
||||
pkg/proxy/userspace
|
||||
pkg/proxy/winkernel
|
||||
pkg/proxy/winuserspace
|
||||
pkg/registry/autoscaling/horizontalpodautoscaler/storage
|
||||
pkg/registry/core/namespace/storage
|
||||
pkg/registry/core/persistentvolumeclaim/storage
|
||||
|
@ -52,7 +52,6 @@ type ProxierHealthServer struct {
|
||||
clock clock.Clock
|
||||
|
||||
addr string
|
||||
port int32
|
||||
healthTimeout time.Duration
|
||||
recorder record.EventRecorder
|
||||
nodeRef *v1.ObjectReference
|
||||
@ -159,5 +158,5 @@ func (h healthzHandler) ServeHTTP(resp http.ResponseWriter, req *http.Request) {
|
||||
lastUpdated = currentTime
|
||||
|
||||
}
|
||||
fmt.Fprintf(resp, fmt.Sprintf(`{"lastUpdated": %q,"currentTime": %q}`, lastUpdated, currentTime))
|
||||
fmt.Fprintf(resp, `{"lastUpdated": %q,"currentTime": %q}`, lastUpdated, currentTime)
|
||||
}
|
||||
|
@ -163,7 +163,7 @@ func (h hcHandler) ServeHTTP(resp http.ResponseWriter, req *http.Request) {
|
||||
} else {
|
||||
resp.WriteHeader(http.StatusOK)
|
||||
}
|
||||
fmt.Fprintf(resp, strings.Trim(dedent.Dedent(fmt.Sprintf(`
|
||||
fmt.Fprint(resp, strings.Trim(dedent.Dedent(fmt.Sprintf(`
|
||||
{
|
||||
"service": {
|
||||
"namespace": %q,
|
||||
|
@ -727,14 +727,14 @@ func (proxier *Proxier) deleteEndpointConnections(connectionMap []proxy.ServiceE
|
||||
const endpointChainsNumberThreshold = 1000
|
||||
|
||||
// Assumes proxier.mu is held.
|
||||
func (proxier *Proxier) appendServiceCommentLocked(args []string, svcName string) {
|
||||
func (proxier *Proxier) appendServiceCommentLocked(args []string, svcName string) []string {
|
||||
// Not printing these comments, can reduce size of iptables (in case of large
|
||||
// number of endpoints) even by 40%+. So if total number of endpoint chains
|
||||
// is large enough, we simply drop those comments.
|
||||
if proxier.endpointChainsNumber > endpointChainsNumberThreshold {
|
||||
return
|
||||
return args
|
||||
}
|
||||
args = append(args, "-m", "comment", "--comment", svcName)
|
||||
return append(args, "-m", "comment", "--comment", svcName)
|
||||
}
|
||||
|
||||
// This is where all of the iptables-save/restore calls happen.
|
||||
@ -1266,7 +1266,7 @@ func (proxier *Proxier) syncProxyRules() {
|
||||
args = append(args[:0],
|
||||
"-A", string(svcChain),
|
||||
)
|
||||
proxier.appendServiceCommentLocked(args, svcNameString)
|
||||
args = proxier.appendServiceCommentLocked(args, svcNameString)
|
||||
args = append(args,
|
||||
"-m", "recent", "--name", string(endpointChain),
|
||||
"--rcheck", "--seconds", strconv.Itoa(svcInfo.StickyMaxAgeSeconds()), "--reap",
|
||||
@ -1278,13 +1278,10 @@ func (proxier *Proxier) syncProxyRules() {
|
||||
|
||||
// Now write loadbalancing & DNAT rules.
|
||||
n := len(endpointChains)
|
||||
localEndpoints := make([]*endpointsInfo, 0)
|
||||
localEndpointChains := make([]utiliptables.Chain, 0)
|
||||
for i, endpointChain := range endpointChains {
|
||||
// Write ingress loadbalancing & DNAT rules only for services that request OnlyLocal traffic.
|
||||
if svcInfo.OnlyNodeLocalEndpoints() && endpoints[i].IsLocal {
|
||||
// These slices parallel each other; must be kept in sync
|
||||
localEndpoints = append(localEndpoints, endpoints[i])
|
||||
localEndpointChains = append(localEndpointChains, endpointChains[i])
|
||||
}
|
||||
|
||||
@ -1296,7 +1293,7 @@ func (proxier *Proxier) syncProxyRules() {
|
||||
|
||||
// Balancing rules in the per-service chain.
|
||||
args = append(args[:0], "-A", string(svcChain))
|
||||
proxier.appendServiceCommentLocked(args, svcNameString)
|
||||
args = proxier.appendServiceCommentLocked(args, svcNameString)
|
||||
if i < (n - 1) {
|
||||
// Each rule is a probabilistic match.
|
||||
args = append(args,
|
||||
@ -1310,7 +1307,7 @@ func (proxier *Proxier) syncProxyRules() {
|
||||
|
||||
// Rules in the per-endpoint chain.
|
||||
args = append(args[:0], "-A", string(endpointChain))
|
||||
proxier.appendServiceCommentLocked(args, svcNameString)
|
||||
args = proxier.appendServiceCommentLocked(args, svcNameString)
|
||||
// Handle traffic that loops back to the originator with SNAT.
|
||||
writeLine(proxier.natRules, append(args,
|
||||
"-s", utilproxy.ToCIDR(net.ParseIP(epIP)),
|
||||
|
@ -2358,15 +2358,15 @@ COMMIT
|
||||
-A KUBE-MARK-MASQ -j MARK --set-xmark
|
||||
-A KUBE-SERVICES -m comment --comment "ns1/svc1: cluster IP" -m tcp -p tcp -d 172.20.1.1/32 --dport 0 ! -s 10.0.0.0/24 -j KUBE-MARK-MASQ
|
||||
-A KUBE-SERVICES -m comment --comment "ns1/svc1: cluster IP" -m tcp -p tcp -d 172.20.1.1/32 --dport 0 -j KUBE-SVC-AHZNAGK3SCETOS2T
|
||||
-A KUBE-SVC-AHZNAGK3SCETOS2T -m statistic --mode random --probability 0.3333333333 -j KUBE-SEP-PXD6POUVGD2I37UY
|
||||
-A KUBE-SEP-PXD6POUVGD2I37UY -s 10.0.1.1/32 -j KUBE-MARK-MASQ
|
||||
-A KUBE-SEP-PXD6POUVGD2I37UY -m tcp -p tcp -j DNAT --to-destination 10.0.1.1:80
|
||||
-A KUBE-SVC-AHZNAGK3SCETOS2T -m statistic --mode random --probability 0.5000000000 -j KUBE-SEP-SOKZUIT7SCEVIP33
|
||||
-A KUBE-SEP-SOKZUIT7SCEVIP33 -s 10.0.1.2/32 -j KUBE-MARK-MASQ
|
||||
-A KUBE-SEP-SOKZUIT7SCEVIP33 -m tcp -p tcp -j DNAT --to-destination 10.0.1.2:80
|
||||
-A KUBE-SVC-AHZNAGK3SCETOS2T -j KUBE-SEP-WVE3FAB34S7NZGDJ
|
||||
-A KUBE-SEP-WVE3FAB34S7NZGDJ -s 10.0.1.3/32 -j KUBE-MARK-MASQ
|
||||
-A KUBE-SEP-WVE3FAB34S7NZGDJ -m tcp -p tcp -j DNAT --to-destination 10.0.1.3:80
|
||||
-A KUBE-SVC-AHZNAGK3SCETOS2T -m comment --comment ns1/svc1: -m statistic --mode random --probability 0.3333333333 -j KUBE-SEP-PXD6POUVGD2I37UY
|
||||
-A KUBE-SEP-PXD6POUVGD2I37UY -m comment --comment ns1/svc1: -s 10.0.1.1/32 -j KUBE-MARK-MASQ
|
||||
-A KUBE-SEP-PXD6POUVGD2I37UY -m comment --comment ns1/svc1: -m tcp -p tcp -j DNAT --to-destination 10.0.1.1:80
|
||||
-A KUBE-SVC-AHZNAGK3SCETOS2T -m comment --comment ns1/svc1: -m statistic --mode random --probability 0.5000000000 -j KUBE-SEP-SOKZUIT7SCEVIP33
|
||||
-A KUBE-SEP-SOKZUIT7SCEVIP33 -m comment --comment ns1/svc1: -s 10.0.1.2/32 -j KUBE-MARK-MASQ
|
||||
-A KUBE-SEP-SOKZUIT7SCEVIP33 -m comment --comment ns1/svc1: -m tcp -p tcp -j DNAT --to-destination 10.0.1.2:80
|
||||
-A KUBE-SVC-AHZNAGK3SCETOS2T -m comment --comment ns1/svc1: -j KUBE-SEP-WVE3FAB34S7NZGDJ
|
||||
-A KUBE-SEP-WVE3FAB34S7NZGDJ -m comment --comment ns1/svc1: -s 10.0.1.3/32 -j KUBE-MARK-MASQ
|
||||
-A KUBE-SEP-WVE3FAB34S7NZGDJ -m comment --comment ns1/svc1: -m tcp -p tcp -j DNAT --to-destination 10.0.1.3:80
|
||||
-A KUBE-SERVICES -m comment --comment "kubernetes service nodeports; NOTE: this must be the last rule in this chain" -m addrtype --dst-type LOCAL -j KUBE-NODEPORTS
|
||||
COMMIT
|
||||
`
|
||||
|
@ -290,7 +290,7 @@ func (udp *udpProxySocket) proxyClient(cliAddr net.Addr, svrConn net.Conn, activ
|
||||
klog.Errorf("SetDeadline failed: %v", err)
|
||||
break
|
||||
}
|
||||
n, err = udp.WriteTo(buffer[0:n], cliAddr)
|
||||
_, err = udp.WriteTo(buffer[0:n], cliAddr)
|
||||
if err != nil {
|
||||
if !logTimeout(err) {
|
||||
klog.Errorf("WriteTo failed: %v", err)
|
||||
|
@ -21,6 +21,7 @@ go_library(
|
||||
"//pkg/proxy/apis/config:go_default_library",
|
||||
"//pkg/proxy/config:go_default_library",
|
||||
"//pkg/proxy/healthcheck:go_default_library",
|
||||
"//pkg/proxy/metrics:go_default_library",
|
||||
"//pkg/util/async:go_default_library",
|
||||
"//staging/src/k8s.io/api/core/v1:go_default_library",
|
||||
"//staging/src/k8s.io/apimachinery/pkg/types:go_default_library",
|
||||
|
@ -18,7 +18,6 @@ package winkernel
|
||||
|
||||
import (
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"k8s.io/component-base/metrics"
|
||||
"k8s.io/component-base/metrics/legacyregistry"
|
||||
@ -69,13 +68,3 @@ func RegisterMetrics() {
|
||||
legacyregistry.MustRegister(SyncProxyRulesLastTimestamp)
|
||||
})
|
||||
}
|
||||
|
||||
// Gets the time since the specified start in microseconds.
|
||||
func sinceInMicroseconds(start time.Time) float64 {
|
||||
return float64(time.Since(start).Nanoseconds() / time.Microsecond.Nanoseconds())
|
||||
}
|
||||
|
||||
// Gets the time since the specified start in seconds.
|
||||
func sinceInSeconds(start time.Time) float64 {
|
||||
return time.Since(start).Seconds()
|
||||
}
|
||||
|
@ -47,6 +47,7 @@ import (
|
||||
"k8s.io/kubernetes/pkg/proxy/apis/config"
|
||||
proxyconfig "k8s.io/kubernetes/pkg/proxy/config"
|
||||
"k8s.io/kubernetes/pkg/proxy/healthcheck"
|
||||
"k8s.io/kubernetes/pkg/proxy/metrics"
|
||||
"k8s.io/kubernetes/pkg/util/async"
|
||||
)
|
||||
|
||||
@ -1000,8 +1001,8 @@ func (proxier *Proxier) syncProxyRules() {
|
||||
|
||||
start := time.Now()
|
||||
defer func() {
|
||||
SyncProxyRulesLatency.Observe(sinceInSeconds(start))
|
||||
DeprecatedSyncProxyRulesLatency.Observe(sinceInMicroseconds(start))
|
||||
SyncProxyRulesLatency.Observe(metrics.SinceInSeconds(start))
|
||||
DeprecatedSyncProxyRulesLatency.Observe(metrics.SinceInMicroseconds(start))
|
||||
klog.V(4).Infof("syncProxyRules took %v", time.Since(start))
|
||||
}()
|
||||
// don't sync rules till we've received services and endpoints
|
||||
|
@ -91,8 +91,6 @@ type Proxier struct {
|
||||
serviceMap map[ServicePortPortalName]*serviceInfo
|
||||
syncPeriod time.Duration
|
||||
udpIdleTimeout time.Duration
|
||||
portMapMutex sync.Mutex
|
||||
portMap map[portMapKey]*portMapValue
|
||||
numProxyLoops int32 // use atomic ops to access this; mostly for testing
|
||||
netsh netsh.Interface
|
||||
hostIP net.IP
|
||||
@ -101,26 +99,6 @@ type Proxier struct {
|
||||
// assert Proxier is a proxy.Provider
|
||||
var _ proxy.Provider = &Proxier{}
|
||||
|
||||
// A key for the portMap. The ip has to be a string because slices can't be map
|
||||
// keys.
|
||||
type portMapKey struct {
|
||||
ip string
|
||||
port int
|
||||
protocol v1.Protocol
|
||||
}
|
||||
|
||||
func (k *portMapKey) String() string {
|
||||
return fmt.Sprintf("%s/%s", net.JoinHostPort(k.ip, strconv.Itoa(k.port)), k.protocol)
|
||||
}
|
||||
|
||||
// A value for the portMap
|
||||
type portMapValue struct {
|
||||
owner ServicePortPortalName
|
||||
socket interface {
|
||||
Close() error
|
||||
}
|
||||
}
|
||||
|
||||
var (
|
||||
// ErrProxyOnLocalhost is returned by NewProxier if the user requests a proxier on
|
||||
// the loopback address. May be checked for by callers of NewProxier to know whether
|
||||
@ -154,7 +132,6 @@ func createProxier(loadBalancer LoadBalancer, listenIP net.IP, netsh netsh.Inter
|
||||
return &Proxier{
|
||||
loadBalancer: loadBalancer,
|
||||
serviceMap: make(map[ServicePortPortalName]*serviceInfo),
|
||||
portMap: make(map[portMapKey]*portMapValue),
|
||||
syncPeriod: syncPeriod,
|
||||
udpIdleTimeout: udpIdleTimeout,
|
||||
netsh: netsh,
|
||||
|
@ -617,7 +617,7 @@ func (udp *udpProxySocket) proxyClient(cliAddr net.Addr, svrConn net.Conn, activ
|
||||
klog.Errorf("SetDeadline failed: %v", err)
|
||||
break
|
||||
}
|
||||
n, err = udp.WriteTo(buffer[0:n], cliAddr)
|
||||
_, err = udp.WriteTo(buffer[0:n], cliAddr)
|
||||
if err != nil {
|
||||
if !logTimeout(err) {
|
||||
klog.Errorf("WriteTo failed: %v", err)
|
||||
|
Loading…
Reference in New Issue
Block a user