mirror of
https://github.com/k3s-io/kubernetes.git
synced 2025-07-21 10:51:29 +00:00
Merge pull request #109845 from danwinship/proxy-chain-variables
unexport mistakenly-exported kube-proxy constants
This commit is contained in:
commit
9d0bb09fae
@ -69,11 +69,11 @@ const (
|
||||
// the kubernetes postrouting chain
|
||||
kubePostroutingChain utiliptables.Chain = "KUBE-POSTROUTING"
|
||||
|
||||
// KubeMarkMasqChain is the mark-for-masquerade chain
|
||||
KubeMarkMasqChain utiliptables.Chain = "KUBE-MARK-MASQ"
|
||||
// kubeMarkMasqChain is the mark-for-masquerade chain
|
||||
kubeMarkMasqChain utiliptables.Chain = "KUBE-MARK-MASQ"
|
||||
|
||||
// KubeMarkDropChain is the mark-for-drop chain
|
||||
KubeMarkDropChain utiliptables.Chain = "KUBE-MARK-DROP"
|
||||
// kubeMarkDropChain is the mark-for-drop chain
|
||||
kubeMarkDropChain utiliptables.Chain = "KUBE-MARK-DROP"
|
||||
|
||||
// the kubernetes forward chain
|
||||
kubeForwardChain utiliptables.Chain = "KUBE-FORWARD"
|
||||
@ -392,7 +392,7 @@ var iptablesEnsureChains = []struct {
|
||||
table utiliptables.Table
|
||||
chain utiliptables.Chain
|
||||
}{
|
||||
{utiliptables.TableNAT, KubeMarkDropChain},
|
||||
{utiliptables.TableNAT, kubeMarkDropChain},
|
||||
}
|
||||
|
||||
var iptablesCleanupOnlyChains = []iptablesJumpChain{
|
||||
@ -925,7 +925,7 @@ func (proxier *Proxier) syncProxyRules() {
|
||||
proxier.filterChains.Write(utiliptables.MakeChainLine(chainName))
|
||||
}
|
||||
}
|
||||
for _, chainName := range []utiliptables.Chain{kubeServicesChain, kubeNodePortsChain, kubePostroutingChain, KubeMarkMasqChain} {
|
||||
for _, chainName := range []utiliptables.Chain{kubeServicesChain, kubeNodePortsChain, kubePostroutingChain, kubeMarkMasqChain} {
|
||||
if chain, ok := existingNATChains[chainName]; ok {
|
||||
proxier.natChains.WriteBytes(chain)
|
||||
} else {
|
||||
@ -961,7 +961,7 @@ func (proxier *Proxier) syncProxyRules() {
|
||||
// this so that it is easier to flush and change, for example if the mark
|
||||
// value should ever change.
|
||||
proxier.natRules.Write(
|
||||
"-A", string(KubeMarkMasqChain),
|
||||
"-A", string(kubeMarkMasqChain),
|
||||
"-j", "MARK", "--or-mark", proxier.masqueradeMark,
|
||||
)
|
||||
|
||||
@ -1042,7 +1042,7 @@ func (proxier *Proxier) syncProxyRules() {
|
||||
proxier.natRules.Write(
|
||||
args,
|
||||
"-s", epInfo.IP(),
|
||||
"-j", string(KubeMarkMasqChain))
|
||||
"-j", string(kubeMarkMasqChain))
|
||||
// Update client-affinity lists.
|
||||
if svcInfo.SessionAffinityType() == v1.ServiceAffinityClientIP {
|
||||
args = append(args, "-m", "recent", "--name", string(endpointChain), "--set")
|
||||
@ -1114,7 +1114,7 @@ func (proxier *Proxier) syncProxyRules() {
|
||||
proxier.natRules.Write(
|
||||
"-A", string(externalTrafficChain),
|
||||
"-m", "comment", "--comment", fmt.Sprintf(`"masquerade traffic for %s external destinations"`, svcNameString),
|
||||
"-j", string(KubeMarkMasqChain))
|
||||
"-j", string(kubeMarkMasqChain))
|
||||
} else {
|
||||
// If we are only using same-node endpoints, we can retain the
|
||||
// source IP in most cases.
|
||||
@ -1138,7 +1138,7 @@ func (proxier *Proxier) syncProxyRules() {
|
||||
"-A", string(externalTrafficChain),
|
||||
"-m", "comment", "--comment", fmt.Sprintf(`"masquerade LOCAL traffic for %s external destinations"`, svcNameString),
|
||||
"-m", "addrtype", "--src-type", "LOCAL",
|
||||
"-j", string(KubeMarkMasqChain))
|
||||
"-j", string(kubeMarkMasqChain))
|
||||
|
||||
// Redirect all src-type=LOCAL -> external destination to the
|
||||
// policy=cluster chain. This allows traffic originating
|
||||
@ -1168,7 +1168,7 @@ func (proxier *Proxier) syncProxyRules() {
|
||||
proxier.natRules.Write(
|
||||
"-A", string(internalTrafficChain),
|
||||
args,
|
||||
"-j", string(KubeMarkMasqChain))
|
||||
"-j", string(kubeMarkMasqChain))
|
||||
} else if proxier.localDetector.IsImplemented() {
|
||||
// This masquerades off-cluster traffic to a service VIP. The idea
|
||||
// is that you can establish a static route for your Service range,
|
||||
@ -1178,7 +1178,7 @@ func (proxier *Proxier) syncProxyRules() {
|
||||
"-A", string(internalTrafficChain),
|
||||
args,
|
||||
proxier.localDetector.IfNotLocal(),
|
||||
"-j", string(KubeMarkMasqChain))
|
||||
"-j", string(kubeMarkMasqChain))
|
||||
}
|
||||
proxier.natRules.Write(
|
||||
"-A", string(kubeServicesChain),
|
||||
@ -1282,7 +1282,7 @@ func (proxier *Proxier) syncProxyRules() {
|
||||
// If the packet was able to reach the end of firewall chain,
|
||||
// then it did not get DNATed. It means the packet cannot go
|
||||
// thru the firewall, then mark it for DROP.
|
||||
proxier.natRules.Write(args, "-j", string(KubeMarkDropChain))
|
||||
proxier.natRules.Write(args, "-j", string(kubeMarkDropChain))
|
||||
}
|
||||
|
||||
for _, lbip := range svcInfo.LoadBalancerIPStrings() {
|
||||
@ -1368,7 +1368,7 @@ func (proxier *Proxier) syncProxyRules() {
|
||||
"-A", string(localPolicyChain),
|
||||
"-m", "comment", "--comment",
|
||||
fmt.Sprintf(`"%s has no local endpoints"`, svcNameString),
|
||||
"-j", string(KubeMarkDropChain))
|
||||
"-j", string(kubeMarkDropChain))
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -746,7 +746,7 @@ func checkIPTablesRuleJumps(ruleData string) error {
|
||||
// Ignore jumps to chains that we expect to exist even if kube-proxy
|
||||
// didn't create them itself.
|
||||
jumpedChains.Delete("ACCEPT", "REJECT", "DROP", "MARK", "RETURN", "DNAT", "SNAT", "MASQUERADE")
|
||||
jumpedChains.Delete(string(KubeMarkDropChain))
|
||||
jumpedChains.Delete(string(kubeMarkDropChain))
|
||||
|
||||
// Find cases where we have "-A FOO ... -j BAR" but no ":BAR", meaning
|
||||
// that we are jumping to a chain that was not created.
|
||||
@ -769,7 +769,7 @@ func checkIPTablesRuleJumps(ruleData string) error {
|
||||
// Find cases where we have ":BAR" but no "-A FOO ... -j BAR", meaning
|
||||
// that we are creating an empty chain but not using it for anything.
|
||||
extraChains := createdChains.Difference(jumpedChains)
|
||||
extraChains.Delete(string(kubeServicesChain), string(kubeExternalServicesChain), string(kubeNodePortsChain), string(kubePostroutingChain), string(kubeForwardChain), string(KubeMarkMasqChain))
|
||||
extraChains.Delete(string(kubeServicesChain), string(kubeExternalServicesChain), string(kubeNodePortsChain), string(kubePostroutingChain), string(kubeForwardChain), string(kubeMarkMasqChain))
|
||||
if len(extraChains) > 0 {
|
||||
return fmt.Errorf("some chains in %s are created but not used: %v", tableName, extraChains.List())
|
||||
}
|
||||
|
@ -64,31 +64,31 @@ const (
|
||||
kubeServicesChain utiliptables.Chain = "KUBE-SERVICES"
|
||||
|
||||
// KubeFireWallChain is the kubernetes firewall chain.
|
||||
KubeFireWallChain utiliptables.Chain = "KUBE-FIREWALL"
|
||||
kubeFirewallChain utiliptables.Chain = "KUBE-FIREWALL"
|
||||
|
||||
// kubePostroutingChain is the kubernetes postrouting chain
|
||||
kubePostroutingChain utiliptables.Chain = "KUBE-POSTROUTING"
|
||||
|
||||
// KubeMarkMasqChain is the mark-for-masquerade chain
|
||||
KubeMarkMasqChain utiliptables.Chain = "KUBE-MARK-MASQ"
|
||||
// kubeMarkMasqChain is the mark-for-masquerade chain
|
||||
kubeMarkMasqChain utiliptables.Chain = "KUBE-MARK-MASQ"
|
||||
|
||||
// KubeNodePortChain is the kubernetes node port chain
|
||||
KubeNodePortChain utiliptables.Chain = "KUBE-NODE-PORT"
|
||||
// kubeNodePortChain is the kubernetes node port chain
|
||||
kubeNodePortChain utiliptables.Chain = "KUBE-NODE-PORT"
|
||||
|
||||
// KubeMarkDropChain is the mark-for-drop chain
|
||||
KubeMarkDropChain utiliptables.Chain = "KUBE-MARK-DROP"
|
||||
kubeMarkDropChain utiliptables.Chain = "KUBE-MARK-DROP"
|
||||
|
||||
// KubeForwardChain is the kubernetes forward chain
|
||||
KubeForwardChain utiliptables.Chain = "KUBE-FORWARD"
|
||||
// kubeForwardChain is the kubernetes forward chain
|
||||
kubeForwardChain utiliptables.Chain = "KUBE-FORWARD"
|
||||
|
||||
// KubeLoadBalancerChain is the kubernetes chain for loadbalancer type service
|
||||
KubeLoadBalancerChain utiliptables.Chain = "KUBE-LOAD-BALANCER"
|
||||
// kubeLoadBalancerChain is the kubernetes chain for loadbalancer type service
|
||||
kubeLoadBalancerChain utiliptables.Chain = "KUBE-LOAD-BALANCER"
|
||||
|
||||
// DefaultScheduler is the default ipvs scheduler algorithm - round robin.
|
||||
DefaultScheduler = "rr"
|
||||
// defaultScheduler is the default ipvs scheduler algorithm - round robin.
|
||||
defaultScheduler = "rr"
|
||||
|
||||
// DefaultDummyDevice is the default dummy interface which ipvs service address will bind to it.
|
||||
DefaultDummyDevice = "kube-ipvs0"
|
||||
// defaultDummyDevice is the default dummy interface which ipvs service address will bind to it.
|
||||
defaultDummyDevice = "kube-ipvs0"
|
||||
|
||||
connReuseMinSupportedKernelVersion = "4.1"
|
||||
|
||||
@ -108,8 +108,8 @@ var iptablesJumpChain = []struct {
|
||||
{utiliptables.TableNAT, utiliptables.ChainOutput, kubeServicesChain, "kubernetes service portals"},
|
||||
{utiliptables.TableNAT, utiliptables.ChainPrerouting, kubeServicesChain, "kubernetes service portals"},
|
||||
{utiliptables.TableNAT, utiliptables.ChainPostrouting, kubePostroutingChain, "kubernetes postrouting rules"},
|
||||
{utiliptables.TableFilter, utiliptables.ChainForward, KubeForwardChain, "kubernetes forwarding rules"},
|
||||
{utiliptables.TableFilter, utiliptables.ChainInput, KubeNodePortChain, "kubernetes health check rules"},
|
||||
{utiliptables.TableFilter, utiliptables.ChainForward, kubeForwardChain, "kubernetes forwarding rules"},
|
||||
{utiliptables.TableFilter, utiliptables.ChainInput, kubeNodePortChain, "kubernetes health check rules"},
|
||||
}
|
||||
|
||||
var iptablesChains = []struct {
|
||||
@ -118,19 +118,19 @@ var iptablesChains = []struct {
|
||||
}{
|
||||
{utiliptables.TableNAT, kubeServicesChain},
|
||||
{utiliptables.TableNAT, kubePostroutingChain},
|
||||
{utiliptables.TableNAT, KubeFireWallChain},
|
||||
{utiliptables.TableNAT, KubeNodePortChain},
|
||||
{utiliptables.TableNAT, KubeLoadBalancerChain},
|
||||
{utiliptables.TableNAT, KubeMarkMasqChain},
|
||||
{utiliptables.TableFilter, KubeForwardChain},
|
||||
{utiliptables.TableFilter, KubeNodePortChain},
|
||||
{utiliptables.TableNAT, kubeFirewallChain},
|
||||
{utiliptables.TableNAT, kubeNodePortChain},
|
||||
{utiliptables.TableNAT, kubeLoadBalancerChain},
|
||||
{utiliptables.TableNAT, kubeMarkMasqChain},
|
||||
{utiliptables.TableFilter, kubeForwardChain},
|
||||
{utiliptables.TableFilter, kubeNodePortChain},
|
||||
}
|
||||
|
||||
var iptablesEnsureChains = []struct {
|
||||
table utiliptables.Table
|
||||
chain utiliptables.Chain
|
||||
}{
|
||||
{utiliptables.TableNAT, KubeMarkDropChain},
|
||||
{utiliptables.TableNAT, kubeMarkDropChain},
|
||||
}
|
||||
|
||||
var iptablesCleanupChains = []struct {
|
||||
@ -139,11 +139,11 @@ var iptablesCleanupChains = []struct {
|
||||
}{
|
||||
{utiliptables.TableNAT, kubeServicesChain},
|
||||
{utiliptables.TableNAT, kubePostroutingChain},
|
||||
{utiliptables.TableNAT, KubeFireWallChain},
|
||||
{utiliptables.TableNAT, KubeNodePortChain},
|
||||
{utiliptables.TableNAT, KubeLoadBalancerChain},
|
||||
{utiliptables.TableFilter, KubeForwardChain},
|
||||
{utiliptables.TableFilter, KubeNodePortChain},
|
||||
{utiliptables.TableNAT, kubeFirewallChain},
|
||||
{utiliptables.TableNAT, kubeNodePortChain},
|
||||
{utiliptables.TableNAT, kubeLoadBalancerChain},
|
||||
{utiliptables.TableFilter, kubeForwardChain},
|
||||
{utiliptables.TableFilter, kubeNodePortChain},
|
||||
}
|
||||
|
||||
// ipsetInfo is all ipset we needed in ipvs proxier
|
||||
@ -183,17 +183,17 @@ var ipsetWithIptablesChain = []struct {
|
||||
protocolMatch string
|
||||
}{
|
||||
{kubeLoopBackIPSet, string(kubePostroutingChain), "MASQUERADE", "dst,dst,src", ""},
|
||||
{kubeLoadBalancerSet, string(kubeServicesChain), string(KubeLoadBalancerChain), "dst,dst", ""},
|
||||
{kubeLoadbalancerFWSet, string(KubeLoadBalancerChain), string(KubeFireWallChain), "dst,dst", ""},
|
||||
{kubeLoadBalancerSourceCIDRSet, string(KubeFireWallChain), "RETURN", "dst,dst,src", ""},
|
||||
{kubeLoadBalancerSourceIPSet, string(KubeFireWallChain), "RETURN", "dst,dst,src", ""},
|
||||
{kubeLoadBalancerLocalSet, string(KubeLoadBalancerChain), "RETURN", "dst,dst", ""},
|
||||
{kubeNodePortLocalSetTCP, string(KubeNodePortChain), "RETURN", "dst", utilipset.ProtocolTCP},
|
||||
{kubeNodePortSetTCP, string(KubeNodePortChain), string(KubeMarkMasqChain), "dst", utilipset.ProtocolTCP},
|
||||
{kubeNodePortLocalSetUDP, string(KubeNodePortChain), "RETURN", "dst", utilipset.ProtocolUDP},
|
||||
{kubeNodePortSetUDP, string(KubeNodePortChain), string(KubeMarkMasqChain), "dst", utilipset.ProtocolUDP},
|
||||
{kubeNodePortLocalSetSCTP, string(KubeNodePortChain), "RETURN", "dst,dst", utilipset.ProtocolSCTP},
|
||||
{kubeNodePortSetSCTP, string(KubeNodePortChain), string(KubeMarkMasqChain), "dst,dst", utilipset.ProtocolSCTP},
|
||||
{kubeLoadBalancerSet, string(kubeServicesChain), string(kubeLoadBalancerChain), "dst,dst", ""},
|
||||
{kubeLoadbalancerFWSet, string(kubeLoadBalancerChain), string(kubeFirewallChain), "dst,dst", ""},
|
||||
{kubeLoadBalancerSourceCIDRSet, string(kubeFirewallChain), "RETURN", "dst,dst,src", ""},
|
||||
{kubeLoadBalancerSourceIPSet, string(kubeFirewallChain), "RETURN", "dst,dst,src", ""},
|
||||
{kubeLoadBalancerLocalSet, string(kubeLoadBalancerChain), "RETURN", "dst,dst", ""},
|
||||
{kubeNodePortLocalSetTCP, string(kubeNodePortChain), "RETURN", "dst", utilipset.ProtocolTCP},
|
||||
{kubeNodePortSetTCP, string(kubeNodePortChain), string(kubeMarkMasqChain), "dst", utilipset.ProtocolTCP},
|
||||
{kubeNodePortLocalSetUDP, string(kubeNodePortChain), "RETURN", "dst", utilipset.ProtocolUDP},
|
||||
{kubeNodePortSetUDP, string(kubeNodePortChain), string(kubeMarkMasqChain), "dst", utilipset.ProtocolUDP},
|
||||
{kubeNodePortLocalSetSCTP, string(kubeNodePortChain), "RETURN", "dst,dst", utilipset.ProtocolSCTP},
|
||||
{kubeNodePortSetSCTP, string(kubeNodePortChain), string(kubeMarkMasqChain), "dst,dst", utilipset.ProtocolSCTP},
|
||||
}
|
||||
|
||||
// In IPVS proxy mode, the following flags need to be set
|
||||
@ -334,7 +334,7 @@ func (r *realIPGetter) NodeIPs() (ips []net.IP, err error) {
|
||||
|
||||
// BindedIPs returns all addresses that are binded to the IPVS dummy interface kube-ipvs0
|
||||
func (r *realIPGetter) BindedIPs() (sets.String, error) {
|
||||
return r.nl.GetLocalAddresses(DefaultDummyDevice)
|
||||
return r.nl.GetLocalAddresses(defaultDummyDevice)
|
||||
}
|
||||
|
||||
// Proxier implements proxy.Provider
|
||||
@ -449,7 +449,7 @@ func NewProxier(ipt utiliptables.Interface,
|
||||
|
||||
if len(scheduler) == 0 {
|
||||
klog.InfoS("IPVS scheduler not specified, use rr by default")
|
||||
scheduler = DefaultScheduler
|
||||
scheduler = defaultScheduler
|
||||
}
|
||||
|
||||
serviceHealthServer := healthcheck.NewServiceHealthServer(hostname, recorder, nodePortAddresses)
|
||||
@ -732,7 +732,7 @@ func CanUseIPVSProxier(handle KernelHandler, ipsetver IPSetVersioner, scheduler
|
||||
wantModules := sets.NewString()
|
||||
// We check for the existence of the scheduler mod and will trigger a missingMods error if not found
|
||||
if scheduler == "" {
|
||||
scheduler = DefaultScheduler
|
||||
scheduler = defaultScheduler
|
||||
}
|
||||
schedulerMod := "ip_vs_" + scheduler
|
||||
mods = append(mods, schedulerMod)
|
||||
@ -819,9 +819,9 @@ func CleanupLeftovers(ipvs utilipvs.Interface, ipt utiliptables.Interface, ipset
|
||||
}
|
||||
// Delete dummy interface created by ipvs Proxier.
|
||||
nl := NewNetLinkHandle(false)
|
||||
err := nl.DeleteDummyDevice(DefaultDummyDevice)
|
||||
err := nl.DeleteDummyDevice(defaultDummyDevice)
|
||||
if err != nil {
|
||||
klog.ErrorS(err, "Error deleting dummy device created by ipvs proxier", "device", DefaultDummyDevice)
|
||||
klog.ErrorS(err, "Error deleting dummy device created by ipvs proxier", "device", defaultDummyDevice)
|
||||
encounteredError = true
|
||||
}
|
||||
// Clear iptables created by ipvs Proxier.
|
||||
@ -1060,9 +1060,9 @@ func (proxier *Proxier) syncProxyRules() {
|
||||
proxier.createAndLinkKubeChain()
|
||||
|
||||
// make sure dummy interface exists in the system where ipvs Proxier will bind service address on it
|
||||
_, err := proxier.netlinkHandle.EnsureDummyDevice(DefaultDummyDevice)
|
||||
_, err := proxier.netlinkHandle.EnsureDummyDevice(defaultDummyDevice)
|
||||
if err != nil {
|
||||
klog.ErrorS(err, "Failed to create dummy interface", "interface", DefaultDummyDevice)
|
||||
klog.ErrorS(err, "Failed to create dummy interface", "interface", defaultDummyDevice)
|
||||
return
|
||||
}
|
||||
|
||||
@ -1078,7 +1078,7 @@ func (proxier *Proxier) syncProxyRules() {
|
||||
activeIPVSServices := map[string]bool{}
|
||||
// currentIPVSServices represent IPVS services listed from the system
|
||||
currentIPVSServices := make(map[string]*utilipvs.VirtualServer)
|
||||
// activeBindAddrs represents ip address successfully bind to DefaultDummyDevice in this round of sync
|
||||
// activeBindAddrs represents ip address successfully bind to defaultDummyDevice in this round of sync
|
||||
activeBindAddrs := map[string]bool{}
|
||||
|
||||
bindedAddresses, err := proxier.ipGetter.BindedIPs()
|
||||
@ -1575,8 +1575,8 @@ func (proxier *Proxier) syncProxyRules() {
|
||||
}
|
||||
|
||||
// Get legacy bind address
|
||||
// currentBindAddrs represents ip addresses bind to DefaultDummyDevice from the system
|
||||
currentBindAddrs, err := proxier.netlinkHandle.ListBindAddress(DefaultDummyDevice)
|
||||
// currentBindAddrs represents ip addresses bind to defaultDummyDevice from the system
|
||||
currentBindAddrs, err := proxier.netlinkHandle.ListBindAddress(defaultDummyDevice)
|
||||
if err != nil {
|
||||
klog.ErrorS(err, "Failed to get bind address")
|
||||
}
|
||||
@ -1661,7 +1661,7 @@ func (proxier *Proxier) writeIptablesRules() {
|
||||
if proxier.masqueradeAll {
|
||||
proxier.natRules.Write(
|
||||
args, "dst,dst",
|
||||
"-j", string(KubeMarkMasqChain))
|
||||
"-j", string(kubeMarkMasqChain))
|
||||
} else if proxier.localDetector.IsImplemented() {
|
||||
// This masquerades off-cluster traffic to a service VIP. The idea
|
||||
// is that you can establish a static route for your Service range,
|
||||
@ -1671,7 +1671,7 @@ func (proxier *Proxier) writeIptablesRules() {
|
||||
proxier.natRules.Write(
|
||||
args, "dst,dst",
|
||||
proxier.localDetector.IfNotLocal(),
|
||||
"-j", string(KubeMarkMasqChain))
|
||||
"-j", string(kubeMarkMasqChain))
|
||||
} else {
|
||||
// Masquerade all OUTPUT traffic coming from a service ip.
|
||||
// The kube dummy interface has all service VIPs assigned which
|
||||
@ -1682,7 +1682,7 @@ func (proxier *Proxier) writeIptablesRules() {
|
||||
// source ip and service port destination fixes the outgoing connections.
|
||||
proxier.natRules.Write(
|
||||
args, "src,dst",
|
||||
"-j", string(KubeMarkMasqChain))
|
||||
"-j", string(kubeMarkMasqChain))
|
||||
}
|
||||
}
|
||||
|
||||
@ -1710,7 +1710,7 @@ func (proxier *Proxier) writeIptablesRules() {
|
||||
"-m", "set", "--match-set", proxier.ipsetList[kubeExternalIPSet].Name,
|
||||
"dst,dst",
|
||||
)
|
||||
proxier.natRules.Write(args, "-j", string(KubeMarkMasqChain))
|
||||
proxier.natRules.Write(args, "-j", string(kubeMarkMasqChain))
|
||||
externalIPRules(args)
|
||||
}
|
||||
|
||||
@ -1729,18 +1729,18 @@ func (proxier *Proxier) writeIptablesRules() {
|
||||
"-A", string(kubeServicesChain),
|
||||
"-m", "addrtype", "--dst-type", "LOCAL",
|
||||
)
|
||||
proxier.natRules.Write(args, "-j", string(KubeNodePortChain))
|
||||
proxier.natRules.Write(args, "-j", string(kubeNodePortChain))
|
||||
|
||||
// mark drop for KUBE-LOAD-BALANCER
|
||||
proxier.natRules.Write(
|
||||
"-A", string(KubeLoadBalancerChain),
|
||||
"-j", string(KubeMarkMasqChain),
|
||||
"-A", string(kubeLoadBalancerChain),
|
||||
"-j", string(kubeMarkMasqChain),
|
||||
)
|
||||
|
||||
// mark drop for KUBE-FIRE-WALL
|
||||
proxier.natRules.Write(
|
||||
"-A", string(KubeFireWallChain),
|
||||
"-j", string(KubeMarkDropChain),
|
||||
"-A", string(kubeFirewallChain),
|
||||
"-j", string(kubeMarkDropChain),
|
||||
)
|
||||
|
||||
// Accept all traffic with destination of ipvs virtual service, in case other iptables rules
|
||||
@ -1752,7 +1752,7 @@ func (proxier *Proxier) writeIptablesRules() {
|
||||
// traffic, this allows NodePort traffic to be forwarded even if the default
|
||||
// FORWARD policy is not accept.
|
||||
proxier.filterRules.Write(
|
||||
"-A", string(KubeForwardChain),
|
||||
"-A", string(kubeForwardChain),
|
||||
"-m", "comment", "--comment", `"kubernetes forwarding rules"`,
|
||||
"-m", "mark", "--mark", fmt.Sprintf("%s/%s", proxier.masqueradeMark, proxier.masqueradeMark),
|
||||
"-j", "ACCEPT",
|
||||
@ -1761,7 +1761,7 @@ func (proxier *Proxier) writeIptablesRules() {
|
||||
// The following rule ensures the traffic after the initial packet accepted
|
||||
// by the "kubernetes forwarding rules" rule above will be accepted.
|
||||
proxier.filterRules.Write(
|
||||
"-A", string(KubeForwardChain),
|
||||
"-A", string(kubeForwardChain),
|
||||
"-m", "comment", "--comment", `"kubernetes forwarding conntrack rule"`,
|
||||
"-m", "conntrack",
|
||||
"--ctstate", "RELATED,ESTABLISHED",
|
||||
@ -1770,7 +1770,7 @@ func (proxier *Proxier) writeIptablesRules() {
|
||||
|
||||
// Add rule to accept traffic towards health check node port
|
||||
proxier.filterRules.Write(
|
||||
"-A", string(KubeNodePortChain),
|
||||
"-A", string(kubeNodePortChain),
|
||||
"-m", "comment", "--comment", proxier.ipsetList[kubeHealthCheckNodePortSet].getComment(),
|
||||
"-m", "set", "--match-set", proxier.ipsetList[kubeHealthCheckNodePortSet].Name, "dst",
|
||||
"-j", "ACCEPT",
|
||||
@ -1805,7 +1805,7 @@ func (proxier *Proxier) writeIptablesRules() {
|
||||
// this so that it is easier to flush and change, for example if the mark
|
||||
// value should ever change.
|
||||
proxier.natRules.Write(
|
||||
"-A", string(KubeMarkMasqChain),
|
||||
"-A", string(kubeMarkMasqChain),
|
||||
"-j", "MARK", "--or-mark", proxier.masqueradeMark,
|
||||
)
|
||||
|
||||
@ -1949,7 +1949,7 @@ func (proxier *Proxier) syncService(svcName string, vs *utilipvs.VirtualServer,
|
||||
}
|
||||
|
||||
klog.V(4).InfoS("Bind address", "address", vs.Address)
|
||||
_, err := proxier.netlinkHandle.EnsureAddressBind(vs.Address.String(), DefaultDummyDevice)
|
||||
_, err := proxier.netlinkHandle.EnsureAddressBind(vs.Address.String(), defaultDummyDevice)
|
||||
if err != nil {
|
||||
klog.ErrorS(err, "Failed to bind service address to dummy device", "serviceName", svcName)
|
||||
return err
|
||||
@ -2109,8 +2109,8 @@ func (proxier *Proxier) cleanLegacyService(activeServices map[string]bool, curre
|
||||
addr := svc.Address.String()
|
||||
if _, ok := legacyBindAddrs[addr]; ok {
|
||||
klog.V(4).InfoS("Unbinding address", "address", addr)
|
||||
if err := proxier.netlinkHandle.UnbindAddress(addr, DefaultDummyDevice); err != nil {
|
||||
klog.ErrorS(err, "Failed to unbind service from dummy interface", "interface", DefaultDummyDevice, "address", addr)
|
||||
if err := proxier.netlinkHandle.UnbindAddress(addr, defaultDummyDevice); err != nil {
|
||||
klog.ErrorS(err, "Failed to unbind service from dummy interface", "interface", defaultDummyDevice, "address", addr)
|
||||
} else {
|
||||
// In case we delete a multi-port service, avoid trying to unbind multiple times
|
||||
delete(legacyBindAddrs, addr)
|
||||
|
@ -144,7 +144,7 @@ func NewFakeProxier(ipt utiliptables.Interface, ipvs utilipvs.Interface, ipset u
|
||||
localDetector: proxyutiliptables.NewNoOpLocalDetector(),
|
||||
hostname: testHostname,
|
||||
serviceHealthServer: healthcheck.NewFakeServiceHealthServer(),
|
||||
ipvsScheduler: DefaultScheduler,
|
||||
ipvsScheduler: defaultScheduler,
|
||||
ipGetter: &fakeIPGetter{nodeIPs: nodeIPs},
|
||||
iptablesData: bytes.NewBuffer(nil),
|
||||
filterChainsData: bytes.NewBuffer(nil),
|
||||
@ -655,15 +655,15 @@ func TestNodePortIPv4(t *testing.T) {
|
||||
}},
|
||||
},
|
||||
expectedIptablesChains: netlinktest.ExpectedIptablesChain{
|
||||
string(KubeNodePortChain): {{
|
||||
JumpChain: string(KubeMarkMasqChain), MatchSet: kubeNodePortSetUDP,
|
||||
string(kubeNodePortChain): {{
|
||||
JumpChain: string(kubeMarkMasqChain), MatchSet: kubeNodePortSetUDP,
|
||||
}, {
|
||||
JumpChain: "ACCEPT", MatchSet: kubeHealthCheckNodePortSet,
|
||||
}},
|
||||
string(kubeServicesChain): {{
|
||||
JumpChain: string(KubeMarkMasqChain), MatchSet: kubeClusterIPSet,
|
||||
JumpChain: string(kubeMarkMasqChain), MatchSet: kubeClusterIPSet,
|
||||
}, {
|
||||
JumpChain: string(KubeNodePortChain), MatchSet: "",
|
||||
JumpChain: string(kubeNodePortChain), MatchSet: "",
|
||||
}, {
|
||||
JumpChain: "ACCEPT", MatchSet: kubeClusterIPSet,
|
||||
}},
|
||||
@ -982,10 +982,10 @@ func TestNodePortIPv4(t *testing.T) {
|
||||
},
|
||||
},
|
||||
expectedIptablesChains: netlinktest.ExpectedIptablesChain{
|
||||
string(KubeNodePortChain): {{
|
||||
string(kubeNodePortChain): {{
|
||||
JumpChain: "RETURN", MatchSet: kubeNodePortLocalSetSCTP,
|
||||
}, {
|
||||
JumpChain: string(KubeMarkMasqChain), MatchSet: kubeNodePortSetSCTP,
|
||||
JumpChain: string(kubeMarkMasqChain), MatchSet: kubeNodePortSetSCTP,
|
||||
}, {
|
||||
JumpChain: "ACCEPT", MatchSet: kubeHealthCheckNodePortSet,
|
||||
}},
|
||||
@ -1947,18 +1947,18 @@ func TestLoadBalancer(t *testing.T) {
|
||||
// Check iptables chain and rules
|
||||
epIpt := netlinktest.ExpectedIptablesChain{
|
||||
string(kubeServicesChain): {{
|
||||
JumpChain: string(KubeLoadBalancerChain), MatchSet: kubeLoadBalancerSet,
|
||||
JumpChain: string(kubeLoadBalancerChain), MatchSet: kubeLoadBalancerSet,
|
||||
}, {
|
||||
JumpChain: string(KubeMarkMasqChain), MatchSet: kubeClusterIPSet,
|
||||
JumpChain: string(kubeMarkMasqChain), MatchSet: kubeClusterIPSet,
|
||||
}, {
|
||||
JumpChain: string(KubeNodePortChain), MatchSet: "",
|
||||
JumpChain: string(kubeNodePortChain), MatchSet: "",
|
||||
}, {
|
||||
JumpChain: "ACCEPT", MatchSet: kubeClusterIPSet,
|
||||
}, {
|
||||
JumpChain: "ACCEPT", MatchSet: kubeLoadBalancerSet,
|
||||
}},
|
||||
string(kubeLoadBalancerSet): {{
|
||||
JumpChain: string(KubeMarkMasqChain), MatchSet: "",
|
||||
JumpChain: string(kubeMarkMasqChain), MatchSet: "",
|
||||
}},
|
||||
}
|
||||
checkIptables(t, ipt, epIpt)
|
||||
@ -2047,16 +2047,16 @@ func TestOnlyLocalNodePorts(t *testing.T) {
|
||||
// Check iptables chain and rules
|
||||
epIpt := netlinktest.ExpectedIptablesChain{
|
||||
string(kubeServicesChain): {{
|
||||
JumpChain: string(KubeMarkMasqChain), MatchSet: kubeClusterIPSet,
|
||||
JumpChain: string(kubeMarkMasqChain), MatchSet: kubeClusterIPSet,
|
||||
}, {
|
||||
JumpChain: string(KubeNodePortChain), MatchSet: "",
|
||||
JumpChain: string(kubeNodePortChain), MatchSet: "",
|
||||
}, {
|
||||
JumpChain: "ACCEPT", MatchSet: kubeClusterIPSet,
|
||||
}},
|
||||
string(KubeNodePortChain): {{
|
||||
string(kubeNodePortChain): {{
|
||||
JumpChain: "RETURN", MatchSet: kubeNodePortLocalSetTCP,
|
||||
}, {
|
||||
JumpChain: string(KubeMarkMasqChain), MatchSet: kubeNodePortSetTCP,
|
||||
JumpChain: string(kubeMarkMasqChain), MatchSet: kubeNodePortSetTCP,
|
||||
}, {
|
||||
JumpChain: "ACCEPT", MatchSet: kubeHealthCheckNodePortSet,
|
||||
}},
|
||||
@ -2126,10 +2126,10 @@ func TestHealthCheckNodePort(t *testing.T) {
|
||||
|
||||
// Check iptables chain and rules
|
||||
epIpt := netlinktest.ExpectedIptablesChain{
|
||||
string(KubeNodePortChain): {{
|
||||
string(kubeNodePortChain): {{
|
||||
JumpChain: "RETURN", MatchSet: kubeNodePortLocalSetTCP,
|
||||
}, {
|
||||
JumpChain: string(KubeMarkMasqChain), MatchSet: kubeNodePortSetTCP,
|
||||
JumpChain: string(kubeMarkMasqChain), MatchSet: kubeNodePortSetTCP,
|
||||
}, {
|
||||
JumpChain: "ACCEPT", MatchSet: kubeHealthCheckNodePortSet,
|
||||
}},
|
||||
@ -2219,25 +2219,25 @@ func TestLoadBalanceSourceRanges(t *testing.T) {
|
||||
// Check iptables chain and rules
|
||||
epIpt := netlinktest.ExpectedIptablesChain{
|
||||
string(kubeServicesChain): {{
|
||||
JumpChain: string(KubeLoadBalancerChain), MatchSet: kubeLoadBalancerSet,
|
||||
JumpChain: string(kubeLoadBalancerChain), MatchSet: kubeLoadBalancerSet,
|
||||
}, {
|
||||
JumpChain: string(KubeMarkMasqChain), MatchSet: kubeClusterIPSet,
|
||||
JumpChain: string(kubeMarkMasqChain), MatchSet: kubeClusterIPSet,
|
||||
}, {
|
||||
JumpChain: string(KubeNodePortChain), MatchSet: "",
|
||||
JumpChain: string(kubeNodePortChain), MatchSet: "",
|
||||
}, {
|
||||
JumpChain: "ACCEPT", MatchSet: kubeClusterIPSet,
|
||||
}, {
|
||||
JumpChain: "ACCEPT", MatchSet: kubeLoadBalancerSet,
|
||||
}},
|
||||
string(KubeLoadBalancerChain): {{
|
||||
JumpChain: string(KubeFireWallChain), MatchSet: kubeLoadbalancerFWSet,
|
||||
string(kubeLoadBalancerChain): {{
|
||||
JumpChain: string(kubeFirewallChain), MatchSet: kubeLoadbalancerFWSet,
|
||||
}, {
|
||||
JumpChain: string(KubeMarkMasqChain), MatchSet: "",
|
||||
JumpChain: string(kubeMarkMasqChain), MatchSet: "",
|
||||
}},
|
||||
string(KubeFireWallChain): {{
|
||||
string(kubeFirewallChain): {{
|
||||
JumpChain: "RETURN", MatchSet: kubeLoadBalancerSourceCIDRSet,
|
||||
}, {
|
||||
JumpChain: string(KubeMarkDropChain), MatchSet: "",
|
||||
JumpChain: string(kubeMarkDropChain), MatchSet: "",
|
||||
}},
|
||||
}
|
||||
checkIptables(t, ipt, epIpt)
|
||||
@ -2300,12 +2300,12 @@ func TestAcceptIPVSTraffic(t *testing.T) {
|
||||
// Check iptables chain and rules
|
||||
epIpt := netlinktest.ExpectedIptablesChain{
|
||||
string(kubeServicesChain): {
|
||||
{JumpChain: string(KubeLoadBalancerChain), MatchSet: kubeLoadBalancerSet},
|
||||
{JumpChain: string(KubeMarkMasqChain), MatchSet: kubeClusterIPSet},
|
||||
{JumpChain: string(KubeMarkMasqChain), MatchSet: kubeExternalIPSet},
|
||||
{JumpChain: string(kubeLoadBalancerChain), MatchSet: kubeLoadBalancerSet},
|
||||
{JumpChain: string(kubeMarkMasqChain), MatchSet: kubeClusterIPSet},
|
||||
{JumpChain: string(kubeMarkMasqChain), MatchSet: kubeExternalIPSet},
|
||||
{JumpChain: "ACCEPT", MatchSet: kubeExternalIPSet}, // With externalTrafficOnlyArgs
|
||||
{JumpChain: "ACCEPT", MatchSet: kubeExternalIPSet}, // With dstLocalOnlyArgs
|
||||
{JumpChain: string(KubeNodePortChain), MatchSet: ""},
|
||||
{JumpChain: string(kubeNodePortChain), MatchSet: ""},
|
||||
{JumpChain: "ACCEPT", MatchSet: kubeClusterIPSet},
|
||||
{JumpChain: "ACCEPT", MatchSet: kubeLoadBalancerSet},
|
||||
},
|
||||
@ -2398,20 +2398,20 @@ func TestOnlyLocalLoadBalancing(t *testing.T) {
|
||||
// Check iptables chain and rules
|
||||
epIpt := netlinktest.ExpectedIptablesChain{
|
||||
string(kubeServicesChain): {{
|
||||
JumpChain: string(KubeLoadBalancerChain), MatchSet: kubeLoadBalancerSet,
|
||||
JumpChain: string(kubeLoadBalancerChain), MatchSet: kubeLoadBalancerSet,
|
||||
}, {
|
||||
JumpChain: string(KubeMarkMasqChain), MatchSet: kubeClusterIPSet,
|
||||
JumpChain: string(kubeMarkMasqChain), MatchSet: kubeClusterIPSet,
|
||||
}, {
|
||||
JumpChain: string(KubeNodePortChain), MatchSet: "",
|
||||
JumpChain: string(kubeNodePortChain), MatchSet: "",
|
||||
}, {
|
||||
JumpChain: "ACCEPT", MatchSet: kubeClusterIPSet,
|
||||
}, {
|
||||
JumpChain: "ACCEPT", MatchSet: kubeLoadBalancerSet,
|
||||
}},
|
||||
string(KubeLoadBalancerChain): {{
|
||||
string(kubeLoadBalancerChain): {{
|
||||
JumpChain: "RETURN", MatchSet: kubeLoadBalancerLocalSet,
|
||||
}, {
|
||||
JumpChain: string(KubeMarkMasqChain), MatchSet: "",
|
||||
JumpChain: string(kubeMarkMasqChain), MatchSet: "",
|
||||
}},
|
||||
}
|
||||
checkIptables(t, ipt, epIpt)
|
||||
@ -3786,7 +3786,7 @@ func Test_syncService(t *testing.T) {
|
||||
ipset := ipsettest.NewFake(testIPSetVersion)
|
||||
proxier := NewFakeProxier(ipt, ipvs, ipset, nil, nil, v1.IPv4Protocol)
|
||||
|
||||
proxier.netlinkHandle.EnsureDummyDevice(DefaultDummyDevice)
|
||||
proxier.netlinkHandle.EnsureDummyDevice(defaultDummyDevice)
|
||||
if testCases[i].oldVirtualServer != nil {
|
||||
if err := proxier.ipvs.AddVirtualServer(testCases[i].oldVirtualServer); err != nil {
|
||||
t.Errorf("Case [%d], unexpected add IPVS virtual server error: %v", i, err)
|
||||
@ -3967,12 +3967,12 @@ func TestCleanLegacyService(t *testing.T) {
|
||||
fp.ipvs.AddVirtualServer(currentServices[v])
|
||||
}
|
||||
|
||||
fp.netlinkHandle.EnsureDummyDevice(DefaultDummyDevice)
|
||||
fp.netlinkHandle.EnsureDummyDevice(defaultDummyDevice)
|
||||
activeBindAddrs := map[string]bool{"1.1.1.1": true, "2.2.2.2": true, "3.3.3.3": true, "4.4.4.4": true}
|
||||
// This is ipv4-only so ipv6 addresses should be ignored
|
||||
currentBindAddrs := []string{"1.1.1.1", "2.2.2.2", "3.3.3.3", "4.4.4.4", "5.5.5.5", "6.6.6.6", "fd80::1:2:3", "fd80::1:2:4"}
|
||||
for i := range currentBindAddrs {
|
||||
fp.netlinkHandle.EnsureAddressBind(currentBindAddrs[i], DefaultDummyDevice)
|
||||
fp.netlinkHandle.EnsureAddressBind(currentBindAddrs[i], defaultDummyDevice)
|
||||
}
|
||||
|
||||
fp.cleanLegacyService(activeServices, currentServices, map[string]bool{"5.5.5.5": true, "6.6.6.6": true})
|
||||
@ -3992,7 +3992,7 @@ func TestCleanLegacyService(t *testing.T) {
|
||||
}
|
||||
|
||||
// Addresses 5.5.5.5 and 6.6.6.6 should not be bound any more, but the ipv6 addresses should remain
|
||||
remainingAddrs, _ := fp.netlinkHandle.ListBindAddress(DefaultDummyDevice)
|
||||
remainingAddrs, _ := fp.netlinkHandle.ListBindAddress(defaultDummyDevice)
|
||||
if len(remainingAddrs) != 6 {
|
||||
t.Errorf("Expected number of remaining bound addrs after cleanup to be %v. Got %v", 6, len(remainingAddrs))
|
||||
}
|
||||
@ -4066,11 +4066,11 @@ func TestCleanLegacyServiceWithRealServers(t *testing.T) {
|
||||
fp.ipvs.AddRealServer(v, r)
|
||||
}
|
||||
|
||||
fp.netlinkHandle.EnsureDummyDevice(DefaultDummyDevice)
|
||||
fp.netlinkHandle.EnsureDummyDevice(defaultDummyDevice)
|
||||
activeBindAddrs := map[string]bool{"3.3.3.3": true}
|
||||
currentBindAddrs := []string{"1.1.1.1", "2.2.2.2", "3.3.3.3"}
|
||||
for i := range currentBindAddrs {
|
||||
fp.netlinkHandle.EnsureAddressBind(currentBindAddrs[i], DefaultDummyDevice)
|
||||
fp.netlinkHandle.EnsureAddressBind(currentBindAddrs[i], defaultDummyDevice)
|
||||
}
|
||||
|
||||
fp.cleanLegacyService(activeServices, currentServices, map[string]bool{"1.1.1.1": true, "2.2.2.2": true})
|
||||
@ -4085,7 +4085,7 @@ func TestCleanLegacyServiceWithRealServers(t *testing.T) {
|
||||
t.Errorf("unexpected IPVS service")
|
||||
}
|
||||
|
||||
remainingAddrs, _ := fp.netlinkHandle.ListBindAddress(DefaultDummyDevice)
|
||||
remainingAddrs, _ := fp.netlinkHandle.ListBindAddress(defaultDummyDevice)
|
||||
if len(remainingAddrs) != 1 {
|
||||
t.Errorf("Expected number of remaining bound addrs after cleanup to be %v. Got %v", 1, len(remainingAddrs))
|
||||
}
|
||||
@ -4140,9 +4140,9 @@ func TestCleanLegacyRealServersExcludeCIDRs(t *testing.T) {
|
||||
fp.ipvs.AddRealServer(vs, rs)
|
||||
}
|
||||
|
||||
fp.netlinkHandle.EnsureDummyDevice(DefaultDummyDevice)
|
||||
fp.netlinkHandle.EnsureDummyDevice(defaultDummyDevice)
|
||||
|
||||
fp.netlinkHandle.EnsureAddressBind("4.4.4.4", DefaultDummyDevice)
|
||||
fp.netlinkHandle.EnsureAddressBind("4.4.4.4", defaultDummyDevice)
|
||||
|
||||
fp.cleanLegacyService(
|
||||
map[string]bool{},
|
||||
@ -4224,12 +4224,12 @@ func TestCleanLegacyService6(t *testing.T) {
|
||||
fp.ipvs.AddVirtualServer(currentServices[v])
|
||||
}
|
||||
|
||||
fp.netlinkHandle.EnsureDummyDevice(DefaultDummyDevice)
|
||||
fp.netlinkHandle.EnsureDummyDevice(defaultDummyDevice)
|
||||
activeBindAddrs := map[string]bool{"1000::1": true, "1000::2": true, "3000::1": true, "4000::1": true}
|
||||
// This is ipv6-only so ipv4 addresses should be ignored
|
||||
currentBindAddrs := []string{"1000::1", "1000::2", "3000::1", "4000::1", "5000::1", "1000::6", "1.1.1.1", "2.2.2.2"}
|
||||
for i := range currentBindAddrs {
|
||||
fp.netlinkHandle.EnsureAddressBind(currentBindAddrs[i], DefaultDummyDevice)
|
||||
fp.netlinkHandle.EnsureAddressBind(currentBindAddrs[i], defaultDummyDevice)
|
||||
}
|
||||
|
||||
fp.cleanLegacyService(activeServices, currentServices, map[string]bool{"5000::1": true, "1000::6": true})
|
||||
@ -4249,7 +4249,7 @@ func TestCleanLegacyService6(t *testing.T) {
|
||||
}
|
||||
|
||||
// Addresses 5000::1 and 1000::6 should not be bound any more, but the ipv4 addresses should remain
|
||||
remainingAddrs, _ := fp.netlinkHandle.ListBindAddress(DefaultDummyDevice)
|
||||
remainingAddrs, _ := fp.netlinkHandle.ListBindAddress(defaultDummyDevice)
|
||||
if len(remainingAddrs) != 6 {
|
||||
t.Errorf("Expected number of remaining bound addrs after cleanup to be %v. Got %v", 6, len(remainingAddrs))
|
||||
}
|
||||
@ -4297,7 +4297,7 @@ func TestMultiPortServiceBindAddr(t *testing.T) {
|
||||
// first, add multi-port service1
|
||||
fp.OnServiceAdd(service1)
|
||||
fp.syncProxyRules()
|
||||
remainingAddrs, _ := fp.netlinkHandle.ListBindAddress(DefaultDummyDevice)
|
||||
remainingAddrs, _ := fp.netlinkHandle.ListBindAddress(defaultDummyDevice)
|
||||
// should only remain address "172.16.55.4"
|
||||
if len(remainingAddrs) != 1 {
|
||||
t.Errorf("Expected number of remaining bound addrs after cleanup to be %v. Got %v", 1, len(remainingAddrs))
|
||||
@ -4309,7 +4309,7 @@ func TestMultiPortServiceBindAddr(t *testing.T) {
|
||||
// update multi-port service1 to single-port service2
|
||||
fp.OnServiceUpdate(service1, service2)
|
||||
fp.syncProxyRules()
|
||||
remainingAddrs, _ = fp.netlinkHandle.ListBindAddress(DefaultDummyDevice)
|
||||
remainingAddrs, _ = fp.netlinkHandle.ListBindAddress(defaultDummyDevice)
|
||||
// should still only remain address "172.16.55.4"
|
||||
if len(remainingAddrs) != 1 {
|
||||
t.Errorf("Expected number of remaining bound addrs after cleanup to be %v. Got %v", 1, len(remainingAddrs))
|
||||
@ -4320,7 +4320,7 @@ func TestMultiPortServiceBindAddr(t *testing.T) {
|
||||
// update single-port service2 to multi-port service3
|
||||
fp.OnServiceUpdate(service2, service3)
|
||||
fp.syncProxyRules()
|
||||
remainingAddrs, _ = fp.netlinkHandle.ListBindAddress(DefaultDummyDevice)
|
||||
remainingAddrs, _ = fp.netlinkHandle.ListBindAddress(defaultDummyDevice)
|
||||
// should still only remain address "172.16.55.4"
|
||||
if len(remainingAddrs) != 1 {
|
||||
t.Errorf("Expected number of remaining bound addrs after cleanup to be %v. Got %v", 1, len(remainingAddrs))
|
||||
@ -4331,7 +4331,7 @@ func TestMultiPortServiceBindAddr(t *testing.T) {
|
||||
// delete multi-port service3
|
||||
fp.OnServiceDelete(service3)
|
||||
fp.syncProxyRules()
|
||||
remainingAddrs, _ = fp.netlinkHandle.ListBindAddress(DefaultDummyDevice)
|
||||
remainingAddrs, _ = fp.netlinkHandle.ListBindAddress(defaultDummyDevice)
|
||||
// all addresses should be unbound
|
||||
if len(remainingAddrs) != 0 {
|
||||
t.Errorf("Expected number of remaining bound addrs after cleanup to be %v. Got %v", 0, len(remainingAddrs))
|
||||
|
Loading…
Reference in New Issue
Block a user