mirror of
https://github.com/k3s-io/kubernetes.git
synced 2025-08-08 11:38:15 +00:00
Merge pull request #118334 from danwinship/proxyutil
Consistently use proxyutil as the name for pkg/proxy/util
This commit is contained in:
commit
7d24586663
@ -21,7 +21,7 @@ import (
|
||||
"k8s.io/apimachinery/pkg/util/sets"
|
||||
"k8s.io/klog/v2"
|
||||
"k8s.io/kubernetes/pkg/proxy"
|
||||
utilproxy "k8s.io/kubernetes/pkg/proxy/util"
|
||||
proxyutil "k8s.io/kubernetes/pkg/proxy/util"
|
||||
utilexec "k8s.io/utils/exec"
|
||||
)
|
||||
|
||||
@ -81,7 +81,7 @@ func deleteStaleServiceConntrackEntries(isIPv6 bool, exec utilexec.Interface, sv
|
||||
func deleteStaleEndpointConntrackEntries(exec utilexec.Interface, svcPortMap proxy.ServicePortMap, endpointUpdateResult proxy.UpdateEndpointMapResult) {
|
||||
for _, epSvcPair := range endpointUpdateResult.DeletedUDPEndpoints {
|
||||
if svcInfo, ok := svcPortMap[epSvcPair.ServicePortName]; ok {
|
||||
endpointIP := utilproxy.IPPart(epSvcPair.Endpoint)
|
||||
endpointIP := proxyutil.IPPart(epSvcPair.Endpoint)
|
||||
nodePort := svcInfo.NodePort()
|
||||
var err error
|
||||
if nodePort != 0 {
|
||||
|
@ -30,7 +30,7 @@ import (
|
||||
"k8s.io/apimachinery/pkg/types"
|
||||
"k8s.io/apimachinery/pkg/util/sets"
|
||||
"k8s.io/kubernetes/pkg/proxy/metrics"
|
||||
utilproxy "k8s.io/kubernetes/pkg/proxy/util"
|
||||
proxyutil "k8s.io/kubernetes/pkg/proxy/util"
|
||||
)
|
||||
|
||||
var supportedEndpointSliceAddressTypes = sets.New[string](
|
||||
@ -109,12 +109,12 @@ func (info *BaseEndpointInfo) GetZoneHints() sets.Set[string] {
|
||||
|
||||
// IP returns just the IP part of the endpoint, it's a part of proxy.Endpoint interface.
|
||||
func (info *BaseEndpointInfo) IP() string {
|
||||
return utilproxy.IPPart(info.Endpoint)
|
||||
return proxyutil.IPPart(info.Endpoint)
|
||||
}
|
||||
|
||||
// Port returns just the Port part of the endpoint.
|
||||
func (info *BaseEndpointInfo) Port() (int, error) {
|
||||
return utilproxy.PortPart(info.Endpoint)
|
||||
return proxyutil.PortPart(info.Endpoint)
|
||||
}
|
||||
|
||||
// Equal is part of proxy.Endpoint interface.
|
||||
|
@ -31,7 +31,7 @@ import (
|
||||
"k8s.io/client-go/tools/events"
|
||||
"k8s.io/klog/v2"
|
||||
"k8s.io/kubernetes/pkg/features"
|
||||
utilproxy "k8s.io/kubernetes/pkg/proxy/util"
|
||||
proxyutil "k8s.io/kubernetes/pkg/proxy/util"
|
||||
utilnet "k8s.io/utils/net"
|
||||
)
|
||||
|
||||
@ -290,7 +290,7 @@ func (cache *EndpointSliceCache) addEndpoints(svcPortName *ServicePortName, port
|
||||
if (cache.ipFamily == v1.IPv6Protocol) != utilnet.IsIPv6String(endpoint.Addresses[0]) {
|
||||
// Emit event on the corresponding service which had a different IP
|
||||
// version than the endpoint.
|
||||
utilproxy.LogAndEmitIncorrectIPVersionEvent(cache.recorder, "endpointslice", endpoint.Addresses[0], svcPortName.NamespacedName.Namespace, svcPortName.NamespacedName.Name, "")
|
||||
proxyutil.LogAndEmitIncorrectIPVersionEvent(cache.recorder, "endpointslice", endpoint.Addresses[0], svcPortName.NamespacedName.Namespace, svcPortName.NamespacedName.Name, "")
|
||||
continue
|
||||
}
|
||||
|
||||
|
@ -28,7 +28,7 @@ import (
|
||||
"k8s.io/apimachinery/pkg/types"
|
||||
"k8s.io/apimachinery/pkg/util/dump"
|
||||
"k8s.io/apimachinery/pkg/util/sets"
|
||||
utilproxy "k8s.io/kubernetes/pkg/proxy/util"
|
||||
proxyutil "k8s.io/kubernetes/pkg/proxy/util"
|
||||
testingclock "k8s.io/utils/clock/testing"
|
||||
)
|
||||
|
||||
@ -141,7 +141,7 @@ func (fake fakeProxierHealthChecker) IsHealthy() bool {
|
||||
func TestServer(t *testing.T) {
|
||||
listener := newFakeListener()
|
||||
httpFactory := newFakeHTTPServerFactory()
|
||||
nodePortAddresses := utilproxy.NewNodePortAddresses(v1.IPv4Protocol, []string{})
|
||||
nodePortAddresses := proxyutil.NewNodePortAddresses(v1.IPv4Protocol, []string{})
|
||||
proxyChecker := &fakeProxierHealthChecker{true}
|
||||
|
||||
hcsi := newServiceHealthServer("hostname", nil, listener, httpFactory, nodePortAddresses, proxyChecker)
|
||||
@ -471,7 +471,7 @@ func TestServerWithSelectiveListeningAddress(t *testing.T) {
|
||||
|
||||
// limiting addresses to loop back. We don't want any cleverness here around getting IP for
|
||||
// machine nor testing ipv6 || ipv4. using loop back guarantees the test will work on any machine
|
||||
nodePortAddresses := utilproxy.NewNodePortAddresses(v1.IPv4Protocol, []string{"127.0.0.0/8"})
|
||||
nodePortAddresses := proxyutil.NewNodePortAddresses(v1.IPv4Protocol, []string{"127.0.0.0/8"})
|
||||
|
||||
hcsi := newServiceHealthServer("hostname", nil, listener, httpFactory, nodePortAddresses, proxyChecker)
|
||||
hcs := hcsi.(*server)
|
||||
|
@ -24,15 +24,14 @@ import (
|
||||
"sync"
|
||||
|
||||
"github.com/lithammer/dedent"
|
||||
|
||||
v1 "k8s.io/api/core/v1"
|
||||
"k8s.io/klog/v2"
|
||||
|
||||
"k8s.io/apimachinery/pkg/types"
|
||||
"k8s.io/client-go/tools/events"
|
||||
api "k8s.io/kubernetes/pkg/apis/core"
|
||||
|
||||
utilerrors "k8s.io/apimachinery/pkg/util/errors"
|
||||
utilproxy "k8s.io/kubernetes/pkg/proxy/util"
|
||||
"k8s.io/client-go/tools/events"
|
||||
"k8s.io/klog/v2"
|
||||
api "k8s.io/kubernetes/pkg/apis/core"
|
||||
proxyutil "k8s.io/kubernetes/pkg/proxy/util"
|
||||
)
|
||||
|
||||
// ServiceHealthServer serves HTTP endpoints for each service name, with results
|
||||
@ -57,13 +56,13 @@ type proxierHealthChecker interface {
|
||||
IsHealthy() bool
|
||||
}
|
||||
|
||||
func newServiceHealthServer(hostname string, recorder events.EventRecorder, listener listener, factory httpServerFactory, nodePortAddresses *utilproxy.NodePortAddresses, healthzServer proxierHealthChecker) ServiceHealthServer {
|
||||
func newServiceHealthServer(hostname string, recorder events.EventRecorder, listener listener, factory httpServerFactory, nodePortAddresses *proxyutil.NodePortAddresses, healthzServer proxierHealthChecker) ServiceHealthServer {
|
||||
// It doesn't matter whether we listen on "0.0.0.0", "::", or ""; go
|
||||
// treats them all the same.
|
||||
nodeIPs := []net.IP{net.IPv4zero}
|
||||
|
||||
if !nodePortAddresses.MatchAll() {
|
||||
ips, err := nodePortAddresses.GetNodeIPs(utilproxy.RealNetwork{})
|
||||
ips, err := nodePortAddresses.GetNodeIPs(proxyutil.RealNetwork{})
|
||||
if err == nil {
|
||||
nodeIPs = ips
|
||||
} else {
|
||||
@ -83,7 +82,7 @@ func newServiceHealthServer(hostname string, recorder events.EventRecorder, list
|
||||
}
|
||||
|
||||
// NewServiceHealthServer allocates a new service healthcheck server manager
|
||||
func NewServiceHealthServer(hostname string, recorder events.EventRecorder, nodePortAddresses *utilproxy.NodePortAddresses, healthzServer proxierHealthChecker) ServiceHealthServer {
|
||||
func NewServiceHealthServer(hostname string, recorder events.EventRecorder, nodePortAddresses *proxyutil.NodePortAddresses, healthzServer proxierHealthChecker) ServiceHealthServer {
|
||||
return newServiceHealthServer(hostname, recorder, stdNetListener{}, stdHTTPServerFactory{}, nodePortAddresses, healthzServer)
|
||||
}
|
||||
|
||||
|
@ -46,7 +46,7 @@ import (
|
||||
"k8s.io/kubernetes/pkg/proxy/healthcheck"
|
||||
"k8s.io/kubernetes/pkg/proxy/metaproxier"
|
||||
"k8s.io/kubernetes/pkg/proxy/metrics"
|
||||
utilproxy "k8s.io/kubernetes/pkg/proxy/util"
|
||||
proxyutil "k8s.io/kubernetes/pkg/proxy/util"
|
||||
proxyutiliptables "k8s.io/kubernetes/pkg/proxy/util/iptables"
|
||||
"k8s.io/kubernetes/pkg/util/async"
|
||||
utiliptables "k8s.io/kubernetes/pkg/util/iptables"
|
||||
@ -196,10 +196,10 @@ type Proxier struct {
|
||||
// that are significantly impacting performance.
|
||||
iptablesData *bytes.Buffer
|
||||
existingFilterChainsData *bytes.Buffer
|
||||
filterChains utilproxy.LineBuffer
|
||||
filterRules utilproxy.LineBuffer
|
||||
natChains utilproxy.LineBuffer
|
||||
natRules utilproxy.LineBuffer
|
||||
filterChains proxyutil.LineBuffer
|
||||
filterRules proxyutil.LineBuffer
|
||||
natChains proxyutil.LineBuffer
|
||||
natRules proxyutil.LineBuffer
|
||||
|
||||
// largeClusterMode is set at the beginning of syncProxyRules if we are
|
||||
// going to end up outputting "lots" of iptables rules and so we need to
|
||||
@ -210,10 +210,10 @@ type Proxier struct {
|
||||
// via localhost.
|
||||
localhostNodePorts bool
|
||||
// nodePortAddresses selects the interfaces where nodePort works.
|
||||
nodePortAddresses *utilproxy.NodePortAddresses
|
||||
nodePortAddresses *proxyutil.NodePortAddresses
|
||||
// networkInterfacer defines an interface for several net library functions.
|
||||
// Inject for test purpose.
|
||||
networkInterfacer utilproxy.NetworkInterfacer
|
||||
networkInterfacer proxyutil.NetworkInterfacer
|
||||
}
|
||||
|
||||
// Proxier implements proxy.Provider
|
||||
@ -240,7 +240,7 @@ func NewProxier(ipFamily v1.IPFamily,
|
||||
healthzServer healthcheck.ProxierHealthUpdater,
|
||||
nodePortAddressStrings []string,
|
||||
) (*Proxier, error) {
|
||||
nodePortAddresses := utilproxy.NewNodePortAddresses(ipFamily, nodePortAddressStrings)
|
||||
nodePortAddresses := proxyutil.NewNodePortAddresses(ipFamily, nodePortAddressStrings)
|
||||
|
||||
if !nodePortAddresses.ContainsIPv4Loopback() {
|
||||
localhostNodePorts = false
|
||||
@ -249,7 +249,7 @@ func NewProxier(ipFamily v1.IPFamily,
|
||||
// Set the route_localnet sysctl we need for exposing NodePorts on loopback addresses
|
||||
// Refer to https://issues.k8s.io/90259
|
||||
klog.InfoS("Setting route_localnet=1 to allow node-ports on localhost; to change this either disable iptables.localhostNodePorts (--iptables-localhost-nodeports) or set nodePortAddresses (--nodeport-addresses) to filter loopback addresses")
|
||||
if err := utilproxy.EnsureSysctl(sysctl, sysctlRouteLocalnet, 1); err != nil {
|
||||
if err := proxyutil.EnsureSysctl(sysctl, sysctlRouteLocalnet, 1); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
@ -288,13 +288,13 @@ func NewProxier(ipFamily v1.IPFamily,
|
||||
precomputedProbabilities: make([]string, 0, 1001),
|
||||
iptablesData: bytes.NewBuffer(nil),
|
||||
existingFilterChainsData: bytes.NewBuffer(nil),
|
||||
filterChains: utilproxy.LineBuffer{},
|
||||
filterRules: utilproxy.LineBuffer{},
|
||||
natChains: utilproxy.LineBuffer{},
|
||||
natRules: utilproxy.LineBuffer{},
|
||||
filterChains: proxyutil.LineBuffer{},
|
||||
filterRules: proxyutil.LineBuffer{},
|
||||
natChains: proxyutil.LineBuffer{},
|
||||
natRules: proxyutil.LineBuffer{},
|
||||
localhostNodePorts: localhostNodePorts,
|
||||
nodePortAddresses: nodePortAddresses,
|
||||
networkInterfacer: utilproxy.RealNetwork{},
|
||||
networkInterfacer: proxyutil.RealNetwork{},
|
||||
}
|
||||
|
||||
burstSyncs := 2
|
||||
@ -411,8 +411,8 @@ func CleanupLeftovers(ipt utiliptables.Interface) (encounteredError bool) {
|
||||
encounteredError = true
|
||||
} else {
|
||||
existingNATChains := utiliptables.GetChainsFromTable(iptablesData.Bytes())
|
||||
natChains := &utilproxy.LineBuffer{}
|
||||
natRules := &utilproxy.LineBuffer{}
|
||||
natChains := &proxyutil.LineBuffer{}
|
||||
natRules := &proxyutil.LineBuffer{}
|
||||
natChains.Write("*nat")
|
||||
// Start with chains we know we need to remove.
|
||||
for _, chain := range []utiliptables.Chain{kubeServicesChain, kubeNodePortsChain, kubePostroutingChain} {
|
||||
@ -448,8 +448,8 @@ func CleanupLeftovers(ipt utiliptables.Interface) (encounteredError bool) {
|
||||
encounteredError = true
|
||||
} else {
|
||||
existingFilterChains := utiliptables.GetChainsFromTable(iptablesData.Bytes())
|
||||
filterChains := &utilproxy.LineBuffer{}
|
||||
filterRules := &utilproxy.LineBuffer{}
|
||||
filterChains := &proxyutil.LineBuffer{}
|
||||
filterRules := &proxyutil.LineBuffer{}
|
||||
filterChains.Write("*filter")
|
||||
for _, chain := range []utiliptables.Chain{kubeServicesChain, kubeExternalServicesChain, kubeForwardChain, kubeNodePortsChain} {
|
||||
if _, found := existingFilterChains[chain]; found {
|
||||
|
@ -45,9 +45,9 @@ import (
|
||||
"k8s.io/kubernetes/pkg/proxy/metrics"
|
||||
|
||||
"k8s.io/kubernetes/pkg/proxy/healthcheck"
|
||||
utilproxy "k8s.io/kubernetes/pkg/proxy/util"
|
||||
proxyutil "k8s.io/kubernetes/pkg/proxy/util"
|
||||
proxyutiliptables "k8s.io/kubernetes/pkg/proxy/util/iptables"
|
||||
utilproxytest "k8s.io/kubernetes/pkg/proxy/util/testing"
|
||||
proxyutiltest "k8s.io/kubernetes/pkg/proxy/util/testing"
|
||||
"k8s.io/kubernetes/pkg/util/async"
|
||||
utiliptables "k8s.io/kubernetes/pkg/util/iptables"
|
||||
iptablestest "k8s.io/kubernetes/pkg/util/iptables/testing"
|
||||
@ -175,7 +175,7 @@ func TestDeleteEndpointConnections(t *testing.T) {
|
||||
fexec.CommandScript = append(fexec.CommandScript, execFunc)
|
||||
}
|
||||
|
||||
endpointIP := utilproxy.IPPart(tc.endpoint)
|
||||
endpointIP := proxyutil.IPPart(tc.endpoint)
|
||||
isIPv6 := netutils.IsIPv6String(endpointIP)
|
||||
|
||||
var ipt utiliptables.Interface
|
||||
@ -293,7 +293,7 @@ func NewFakeProxier(ipt utiliptables.Interface) *Proxier {
|
||||
}
|
||||
detectLocal, _ := proxyutiliptables.NewDetectLocalByCIDR(podCIDR, ipt)
|
||||
|
||||
networkInterfacer := utilproxytest.NewFakeNetwork()
|
||||
networkInterfacer := proxyutiltest.NewFakeNetwork()
|
||||
itf := net.Interface{Index: 0, MTU: 0, Name: "lo", HardwareAddr: nil, Flags: 0}
|
||||
addrs := []net.Addr{
|
||||
&net.IPNet{IP: netutils.ParseIPSloppy("127.0.0.1"), Mask: net.CIDRMask(8, 32)},
|
||||
@ -324,13 +324,13 @@ func NewFakeProxier(ipt utiliptables.Interface) *Proxier {
|
||||
precomputedProbabilities: make([]string, 0, 1001),
|
||||
iptablesData: bytes.NewBuffer(nil),
|
||||
existingFilterChainsData: bytes.NewBuffer(nil),
|
||||
filterChains: utilproxy.LineBuffer{},
|
||||
filterRules: utilproxy.LineBuffer{},
|
||||
natChains: utilproxy.LineBuffer{},
|
||||
natRules: utilproxy.LineBuffer{},
|
||||
filterChains: proxyutil.LineBuffer{},
|
||||
filterRules: proxyutil.LineBuffer{},
|
||||
natChains: proxyutil.LineBuffer{},
|
||||
natRules: proxyutil.LineBuffer{},
|
||||
nodeIP: netutils.ParseIPSloppy(testNodeIP),
|
||||
localhostNodePorts: true,
|
||||
nodePortAddresses: utilproxy.NewNodePortAddresses(ipfamily, nil),
|
||||
nodePortAddresses: proxyutil.NewNodePortAddresses(ipfamily, nil),
|
||||
networkInterfacer: networkInterfacer,
|
||||
}
|
||||
p.setInitialized(true)
|
||||
@ -2464,7 +2464,7 @@ func TestNodePort(t *testing.T) {
|
||||
func TestHealthCheckNodePort(t *testing.T) {
|
||||
ipt := iptablestest.NewFake()
|
||||
fp := NewFakeProxier(ipt)
|
||||
fp.nodePortAddresses = utilproxy.NewNodePortAddresses(v1.IPv4Protocol, []string{"127.0.0.0/8"})
|
||||
fp.nodePortAddresses = proxyutil.NewNodePortAddresses(v1.IPv4Protocol, []string{"127.0.0.0/8"})
|
||||
|
||||
svcIP := "172.30.0.42"
|
||||
svcPort := 80
|
||||
@ -3393,7 +3393,7 @@ func TestDisableLocalhostNodePortsIPv4WithNodeAddress(t *testing.T) {
|
||||
fp.localDetector = proxyutiliptables.NewNoOpLocalDetector()
|
||||
fp.localhostNodePorts = false
|
||||
fp.networkInterfacer.InterfaceAddrs()
|
||||
fp.nodePortAddresses = utilproxy.NewNodePortAddresses(v1.IPv4Protocol, []string{"127.0.0.0/8"})
|
||||
fp.nodePortAddresses = proxyutil.NewNodePortAddresses(v1.IPv4Protocol, []string{"127.0.0.0/8"})
|
||||
|
||||
expected := dedent.Dedent(`
|
||||
*filter
|
||||
@ -3674,7 +3674,7 @@ func TestOnlyLocalNodePortsNoClusterCIDR(t *testing.T) {
|
||||
ipt := iptablestest.NewFake()
|
||||
fp := NewFakeProxier(ipt)
|
||||
fp.localDetector = proxyutiliptables.NewNoOpLocalDetector()
|
||||
fp.nodePortAddresses = utilproxy.NewNodePortAddresses(v1.IPv4Protocol, []string{"192.168.0.0/24", "2001:db8::/64"})
|
||||
fp.nodePortAddresses = proxyutil.NewNodePortAddresses(v1.IPv4Protocol, []string{"192.168.0.0/24", "2001:db8::/64"})
|
||||
fp.localhostNodePorts = false
|
||||
|
||||
expected := dedent.Dedent(`
|
||||
@ -3723,7 +3723,7 @@ func TestOnlyLocalNodePortsNoClusterCIDR(t *testing.T) {
|
||||
func TestOnlyLocalNodePorts(t *testing.T) {
|
||||
ipt := iptablestest.NewFake()
|
||||
fp := NewFakeProxier(ipt)
|
||||
fp.nodePortAddresses = utilproxy.NewNodePortAddresses(v1.IPv4Protocol, []string{"192.168.0.0/24", "2001:db8::/64"})
|
||||
fp.nodePortAddresses = proxyutil.NewNodePortAddresses(v1.IPv4Protocol, []string{"192.168.0.0/24", "2001:db8::/64"})
|
||||
fp.localhostNodePorts = false
|
||||
|
||||
expected := dedent.Dedent(`
|
||||
|
@ -25,7 +25,7 @@ import (
|
||||
|
||||
"k8s.io/apimachinery/pkg/util/sets"
|
||||
"k8s.io/klog/v2"
|
||||
utilproxy "k8s.io/kubernetes/pkg/proxy/util"
|
||||
proxyutil "k8s.io/kubernetes/pkg/proxy/util"
|
||||
netutils "k8s.io/utils/net"
|
||||
|
||||
"github.com/vishvananda/netlink"
|
||||
@ -135,7 +135,7 @@ func (h *netlinkHandle) GetAllLocalAddresses() (sets.Set[string], error) {
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("Could not get addresses: %v", err)
|
||||
}
|
||||
return utilproxy.AddressSet(h.isValidForSet, addr), nil
|
||||
return proxyutil.AddressSet(h.isValidForSet, addr), nil
|
||||
}
|
||||
|
||||
// GetLocalAddresses return all local addresses for an interface.
|
||||
@ -150,7 +150,7 @@ func (h *netlinkHandle) GetLocalAddresses(dev string) (sets.Set[string], error)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("Can't get addresses from %s: %v", ifi.Name, err)
|
||||
}
|
||||
return utilproxy.AddressSet(h.isValidForSet, addr), nil
|
||||
return proxyutil.AddressSet(h.isValidForSet, addr), nil
|
||||
}
|
||||
|
||||
func (h *netlinkHandle) isValidForSet(ip net.IP) bool {
|
||||
@ -190,5 +190,5 @@ func (h *netlinkHandle) GetAllLocalAddressesExcept(dev string) (sets.Set[string]
|
||||
}
|
||||
addr = append(addr, ifadr...)
|
||||
}
|
||||
return utilproxy.AddressSet(h.isValidForSet, addr), nil
|
||||
return proxyutil.AddressSet(h.isValidForSet, addr), nil
|
||||
}
|
||||
|
@ -48,7 +48,7 @@ import (
|
||||
utilipvs "k8s.io/kubernetes/pkg/proxy/ipvs/util"
|
||||
"k8s.io/kubernetes/pkg/proxy/metaproxier"
|
||||
"k8s.io/kubernetes/pkg/proxy/metrics"
|
||||
utilproxy "k8s.io/kubernetes/pkg/proxy/util"
|
||||
proxyutil "k8s.io/kubernetes/pkg/proxy/util"
|
||||
proxyutiliptables "k8s.io/kubernetes/pkg/proxy/util/iptables"
|
||||
"k8s.io/kubernetes/pkg/util/async"
|
||||
utilipset "k8s.io/kubernetes/pkg/util/ipset"
|
||||
@ -268,19 +268,19 @@ type Proxier struct {
|
||||
// that are significantly impacting performance.
|
||||
iptablesData *bytes.Buffer
|
||||
filterChainsData *bytes.Buffer
|
||||
natChains utilproxy.LineBuffer
|
||||
filterChains utilproxy.LineBuffer
|
||||
natRules utilproxy.LineBuffer
|
||||
filterRules utilproxy.LineBuffer
|
||||
natChains proxyutil.LineBuffer
|
||||
filterChains proxyutil.LineBuffer
|
||||
natRules proxyutil.LineBuffer
|
||||
filterRules proxyutil.LineBuffer
|
||||
// Added as a member to the struct to allow injection for testing.
|
||||
netlinkHandle NetLinkHandle
|
||||
// ipsetList is the list of ipsets that ipvs proxier used.
|
||||
ipsetList map[string]*IPSet
|
||||
// nodePortAddresses selects the interfaces where nodePort works.
|
||||
nodePortAddresses *utilproxy.NodePortAddresses
|
||||
nodePortAddresses *proxyutil.NodePortAddresses
|
||||
// networkInterfacer defines an interface for several net library functions.
|
||||
// Inject for test purpose.
|
||||
networkInterfacer utilproxy.NetworkInterfacer
|
||||
networkInterfacer proxyutil.NetworkInterfacer
|
||||
gracefuldeleteManager *GracefulTerminationManager
|
||||
// serviceNoLocalEndpointsInternal represents the set of services that couldn't be applied
|
||||
// due to the absence of local endpoints when the internal traffic policy is "Local".
|
||||
@ -338,7 +338,7 @@ func NewProxier(ipFamily v1.IPFamily,
|
||||
}
|
||||
|
||||
// Set the conntrack sysctl we need for
|
||||
if err := utilproxy.EnsureSysctl(sysctl, sysctlVSConnTrack, 1); err != nil {
|
||||
if err := proxyutil.EnsureSysctl(sysctl, sysctlVSConnTrack, 1); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
@ -357,34 +357,34 @@ func NewProxier(ipFamily v1.IPFamily,
|
||||
klog.V(2).InfoS("Left as-is", "sysctl", sysctlConnReuse)
|
||||
} else {
|
||||
// Set the connection reuse mode
|
||||
if err := utilproxy.EnsureSysctl(sysctl, sysctlConnReuse, 0); err != nil {
|
||||
if err := proxyutil.EnsureSysctl(sysctl, sysctlConnReuse, 0); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
|
||||
// Set the expire_nodest_conn sysctl we need for
|
||||
if err := utilproxy.EnsureSysctl(sysctl, sysctlExpireNoDestConn, 1); err != nil {
|
||||
if err := proxyutil.EnsureSysctl(sysctl, sysctlExpireNoDestConn, 1); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// Set the expire_quiescent_template sysctl we need for
|
||||
if err := utilproxy.EnsureSysctl(sysctl, sysctlExpireQuiescentTemplate, 1); err != nil {
|
||||
if err := proxyutil.EnsureSysctl(sysctl, sysctlExpireQuiescentTemplate, 1); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// Set the ip_forward sysctl we need for
|
||||
if err := utilproxy.EnsureSysctl(sysctl, sysctlForward, 1); err != nil {
|
||||
if err := proxyutil.EnsureSysctl(sysctl, sysctlForward, 1); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if strictARP {
|
||||
// Set the arp_ignore sysctl we need for
|
||||
if err := utilproxy.EnsureSysctl(sysctl, sysctlArpIgnore, 1); err != nil {
|
||||
if err := proxyutil.EnsureSysctl(sysctl, sysctlArpIgnore, 1); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// Set the arp_announce sysctl we need for
|
||||
if err := utilproxy.EnsureSysctl(sysctl, sysctlArpAnnounce, 2); err != nil {
|
||||
if err := proxyutil.EnsureSysctl(sysctl, sysctlArpAnnounce, 2); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
@ -409,7 +409,7 @@ func NewProxier(ipFamily v1.IPFamily,
|
||||
scheduler = defaultScheduler
|
||||
}
|
||||
|
||||
nodePortAddresses := utilproxy.NewNodePortAddresses(ipFamily, nodePortAddressStrings)
|
||||
nodePortAddresses := proxyutil.NewNodePortAddresses(ipFamily, nodePortAddressStrings)
|
||||
|
||||
serviceHealthServer := healthcheck.NewServiceHealthServer(hostname, recorder, nodePortAddresses, healthzServer)
|
||||
|
||||
@ -440,14 +440,14 @@ func NewProxier(ipFamily v1.IPFamily,
|
||||
ipvsScheduler: scheduler,
|
||||
iptablesData: bytes.NewBuffer(nil),
|
||||
filterChainsData: bytes.NewBuffer(nil),
|
||||
natChains: utilproxy.LineBuffer{},
|
||||
natRules: utilproxy.LineBuffer{},
|
||||
filterChains: utilproxy.LineBuffer{},
|
||||
filterRules: utilproxy.LineBuffer{},
|
||||
natChains: proxyutil.LineBuffer{},
|
||||
natRules: proxyutil.LineBuffer{},
|
||||
filterChains: proxyutil.LineBuffer{},
|
||||
filterRules: proxyutil.LineBuffer{},
|
||||
netlinkHandle: NewNetLinkHandle(ipFamily == v1.IPv6Protocol),
|
||||
ipset: ipset,
|
||||
nodePortAddresses: nodePortAddresses,
|
||||
networkInterfacer: utilproxy.RealNetwork{},
|
||||
networkInterfacer: proxyutil.RealNetwork{},
|
||||
gracefuldeleteManager: NewGracefulTerminationManager(ipvs),
|
||||
}
|
||||
// initialize ipsetList with all sets we needed
|
||||
|
@ -43,7 +43,7 @@ import (
|
||||
utilipvs "k8s.io/kubernetes/pkg/proxy/ipvs/util"
|
||||
ipvstest "k8s.io/kubernetes/pkg/proxy/ipvs/util/testing"
|
||||
"k8s.io/kubernetes/pkg/proxy/metrics"
|
||||
utilproxy "k8s.io/kubernetes/pkg/proxy/util"
|
||||
proxyutil "k8s.io/kubernetes/pkg/proxy/util"
|
||||
proxyutiliptables "k8s.io/kubernetes/pkg/proxy/util/iptables"
|
||||
proxyutiltest "k8s.io/kubernetes/pkg/proxy/util/testing"
|
||||
"k8s.io/kubernetes/pkg/util/async"
|
||||
@ -163,13 +163,13 @@ func NewFakeProxier(ipt utiliptables.Interface, ipvs utilipvs.Interface, ipset u
|
||||
ipvsScheduler: defaultScheduler,
|
||||
iptablesData: bytes.NewBuffer(nil),
|
||||
filterChainsData: bytes.NewBuffer(nil),
|
||||
natChains: utilproxy.LineBuffer{},
|
||||
natRules: utilproxy.LineBuffer{},
|
||||
filterChains: utilproxy.LineBuffer{},
|
||||
filterRules: utilproxy.LineBuffer{},
|
||||
natChains: proxyutil.LineBuffer{},
|
||||
natRules: proxyutil.LineBuffer{},
|
||||
filterChains: proxyutil.LineBuffer{},
|
||||
filterRules: proxyutil.LineBuffer{},
|
||||
netlinkHandle: netlinkHandle,
|
||||
ipsetList: ipsetList,
|
||||
nodePortAddresses: utilproxy.NewNodePortAddresses(ipFamily, nil),
|
||||
nodePortAddresses: proxyutil.NewNodePortAddresses(ipFamily, nil),
|
||||
networkInterfacer: proxyutiltest.NewFakeNetwork(),
|
||||
gracefuldeleteManager: NewGracefulTerminationManager(ipvs),
|
||||
ipFamily: ipFamily,
|
||||
@ -960,7 +960,7 @@ func TestNodePortIPv4(t *testing.T) {
|
||||
ipvs := ipvstest.NewFake()
|
||||
ipset := ipsettest.NewFake(testIPSetVersion)
|
||||
fp := NewFakeProxier(ipt, ipvs, ipset, test.nodeIPs, nil, v1.IPv4Protocol)
|
||||
fp.nodePortAddresses = utilproxy.NewNodePortAddresses(v1.IPv4Protocol, test.nodePortAddresses)
|
||||
fp.nodePortAddresses = proxyutil.NewNodePortAddresses(v1.IPv4Protocol, test.nodePortAddresses)
|
||||
|
||||
makeServiceMap(fp, test.services...)
|
||||
populateEndpointSlices(fp, test.endpoints...)
|
||||
@ -1305,7 +1305,7 @@ func TestNodePortIPv6(t *testing.T) {
|
||||
ipvs := ipvstest.NewFake()
|
||||
ipset := ipsettest.NewFake(testIPSetVersion)
|
||||
fp := NewFakeProxier(ipt, ipvs, ipset, test.nodeIPs, nil, v1.IPv6Protocol)
|
||||
fp.nodePortAddresses = utilproxy.NewNodePortAddresses(v1.IPv6Protocol, test.nodePortAddresses)
|
||||
fp.nodePortAddresses = proxyutil.NewNodePortAddresses(v1.IPv6Protocol, test.nodePortAddresses)
|
||||
|
||||
makeServiceMap(fp, test.services...)
|
||||
populateEndpointSlices(fp, test.endpoints...)
|
||||
@ -2068,7 +2068,7 @@ func TestOnlyLocalNodePorts(t *testing.T) {
|
||||
addrs1 := []net.Addr{&net.IPNet{IP: netutils.ParseIPSloppy("2001:db8::"), Mask: net.CIDRMask(64, 128)}}
|
||||
fp.networkInterfacer.(*proxyutiltest.FakeNetwork).AddInterfaceAddr(&itf, addrs)
|
||||
fp.networkInterfacer.(*proxyutiltest.FakeNetwork).AddInterfaceAddr(&itf1, addrs1)
|
||||
fp.nodePortAddresses = utilproxy.NewNodePortAddresses(v1.IPv4Protocol, []string{"100.101.102.0/24"})
|
||||
fp.nodePortAddresses = proxyutil.NewNodePortAddresses(v1.IPv4Protocol, []string{"100.101.102.0/24"})
|
||||
|
||||
fp.syncProxyRules()
|
||||
|
||||
@ -2156,7 +2156,7 @@ func TestHealthCheckNodePort(t *testing.T) {
|
||||
addrs1 := []net.Addr{&net.IPNet{IP: netutils.ParseIPSloppy("2001:db8::"), Mask: net.CIDRMask(64, 128)}}
|
||||
fp.networkInterfacer.(*proxyutiltest.FakeNetwork).AddInterfaceAddr(&itf, addrs)
|
||||
fp.networkInterfacer.(*proxyutiltest.FakeNetwork).AddInterfaceAddr(&itf1, addrs1)
|
||||
fp.nodePortAddresses = utilproxy.NewNodePortAddresses(v1.IPv4Protocol, []string{"100.101.102.0/24"})
|
||||
fp.nodePortAddresses = proxyutil.NewNodePortAddresses(v1.IPv4Protocol, []string{"100.101.102.0/24"})
|
||||
|
||||
fp.syncProxyRules()
|
||||
|
||||
|
@ -32,7 +32,7 @@ import (
|
||||
"k8s.io/apimachinery/pkg/util/sets"
|
||||
apiservice "k8s.io/kubernetes/pkg/api/v1/service"
|
||||
"k8s.io/kubernetes/pkg/proxy/metrics"
|
||||
utilproxy "k8s.io/kubernetes/pkg/proxy/util"
|
||||
proxyutil "k8s.io/kubernetes/pkg/proxy/util"
|
||||
)
|
||||
|
||||
// BaseServicePortInfo contains base information that defines a service.
|
||||
@ -165,7 +165,7 @@ func (sct *ServiceChangeTracker) newBaseServiceInfo(port *v1.ServicePort, servic
|
||||
stickyMaxAgeSeconds = int(*service.Spec.SessionAffinityConfig.ClientIP.TimeoutSeconds)
|
||||
}
|
||||
|
||||
clusterIP := utilproxy.GetClusterIPByFamily(sct.ipFamily, service)
|
||||
clusterIP := proxyutil.GetClusterIPByFamily(sct.ipFamily, service)
|
||||
info := &BaseServicePortInfo{
|
||||
clusterIP: netutils.ParseIPSloppy(clusterIP),
|
||||
port: int(port.Port),
|
||||
@ -194,19 +194,19 @@ func (sct *ServiceChangeTracker) newBaseServiceInfo(port *v1.ServicePort, servic
|
||||
// services, this is actually expected. Hence we downgraded from reporting by events
|
||||
// to just log lines with high verbosity
|
||||
|
||||
ipFamilyMap := utilproxy.MapIPsByIPFamily(service.Spec.ExternalIPs)
|
||||
ipFamilyMap := proxyutil.MapIPsByIPFamily(service.Spec.ExternalIPs)
|
||||
info.externalIPs = ipFamilyMap[sct.ipFamily]
|
||||
|
||||
// Log the IPs not matching the ipFamily
|
||||
if ips, ok := ipFamilyMap[utilproxy.OtherIPFamily(sct.ipFamily)]; ok && len(ips) > 0 {
|
||||
if ips, ok := ipFamilyMap[proxyutil.OtherIPFamily(sct.ipFamily)]; ok && len(ips) > 0 {
|
||||
klog.V(4).InfoS("Service change tracker ignored the following external IPs for given service as they don't match IP Family",
|
||||
"ipFamily", sct.ipFamily, "externalIPs", strings.Join(ips, ", "), "service", klog.KObj(service))
|
||||
}
|
||||
|
||||
ipFamilyMap = utilproxy.MapCIDRsByIPFamily(loadBalancerSourceRanges)
|
||||
ipFamilyMap = proxyutil.MapCIDRsByIPFamily(loadBalancerSourceRanges)
|
||||
info.loadBalancerSourceRanges = ipFamilyMap[sct.ipFamily]
|
||||
// Log the CIDRs not matching the ipFamily
|
||||
if cidrs, ok := ipFamilyMap[utilproxy.OtherIPFamily(sct.ipFamily)]; ok && len(cidrs) > 0 {
|
||||
if cidrs, ok := ipFamilyMap[proxyutil.OtherIPFamily(sct.ipFamily)]; ok && len(cidrs) > 0 {
|
||||
klog.V(4).InfoS("Service change tracker ignored the following load balancer source ranges for given Service as they don't match IP Family",
|
||||
"ipFamily", sct.ipFamily, "loadBalancerSourceRanges", strings.Join(cidrs, ", "), "service", klog.KObj(service))
|
||||
}
|
||||
@ -220,9 +220,9 @@ func (sct *ServiceChangeTracker) newBaseServiceInfo(port *v1.ServicePort, servic
|
||||
}
|
||||
|
||||
if len(ips) > 0 {
|
||||
ipFamilyMap = utilproxy.MapIPsByIPFamily(ips)
|
||||
ipFamilyMap = proxyutil.MapIPsByIPFamily(ips)
|
||||
|
||||
if ipList, ok := ipFamilyMap[utilproxy.OtherIPFamily(sct.ipFamily)]; ok && len(ipList) > 0 {
|
||||
if ipList, ok := ipFamilyMap[proxyutil.OtherIPFamily(sct.ipFamily)]; ok && len(ipList) > 0 {
|
||||
klog.V(4).InfoS("Service change tracker ignored the following load balancer ingress IPs for given Service as they don't match the IP Family",
|
||||
"ipFamily", sct.ipFamily, "loadBalancerIngressIps", strings.Join(ipList, ", "), "service", klog.KObj(service))
|
||||
}
|
||||
@ -381,11 +381,11 @@ func (sct *ServiceChangeTracker) serviceToServiceMap(service *v1.Service) Servic
|
||||
return nil
|
||||
}
|
||||
|
||||
if utilproxy.ShouldSkipService(service) {
|
||||
if proxyutil.ShouldSkipService(service) {
|
||||
return nil
|
||||
}
|
||||
|
||||
clusterIP := utilproxy.GetClusterIPByFamily(sct.ipFamily, service)
|
||||
clusterIP := proxyutil.GetClusterIPByFamily(sct.ipFamily, service)
|
||||
if clusterIP == "" {
|
||||
return nil
|
||||
}
|
||||
|
@ -47,7 +47,7 @@ import (
|
||||
"k8s.io/kubernetes/pkg/proxy/healthcheck"
|
||||
"k8s.io/kubernetes/pkg/proxy/metaproxier"
|
||||
"k8s.io/kubernetes/pkg/proxy/metrics"
|
||||
utilproxy "k8s.io/kubernetes/pkg/proxy/util"
|
||||
proxyutil "k8s.io/kubernetes/pkg/proxy/util"
|
||||
"k8s.io/kubernetes/pkg/util/async"
|
||||
netutils "k8s.io/utils/net"
|
||||
)
|
||||
@ -692,7 +692,7 @@ func NewProxier(
|
||||
}
|
||||
|
||||
// windows listens to all node addresses
|
||||
nodePortAddresses := utilproxy.NewNodePortAddresses(ipFamily, nil)
|
||||
nodePortAddresses := proxyutil.NewNodePortAddresses(ipFamily, nil)
|
||||
serviceHealthServer := healthcheck.NewServiceHealthServer(hostname, recorder, nodePortAddresses, healthzServer)
|
||||
|
||||
hns, supportedFeatures := newHostNetworkService()
|
||||
|
Loading…
Reference in New Issue
Block a user