mirror of
https://github.com/k3s-io/kubernetes.git
synced 2025-07-22 19:31:44 +00:00
Merge pull request #109845 from danwinship/proxy-chain-variables
unexport mistakenly-exported kube-proxy constants
This commit is contained in:
commit
9d0bb09fae
@ -69,11 +69,11 @@ const (
|
|||||||
// the kubernetes postrouting chain
|
// the kubernetes postrouting chain
|
||||||
kubePostroutingChain utiliptables.Chain = "KUBE-POSTROUTING"
|
kubePostroutingChain utiliptables.Chain = "KUBE-POSTROUTING"
|
||||||
|
|
||||||
// KubeMarkMasqChain is the mark-for-masquerade chain
|
// kubeMarkMasqChain is the mark-for-masquerade chain
|
||||||
KubeMarkMasqChain utiliptables.Chain = "KUBE-MARK-MASQ"
|
kubeMarkMasqChain utiliptables.Chain = "KUBE-MARK-MASQ"
|
||||||
|
|
||||||
// KubeMarkDropChain is the mark-for-drop chain
|
// kubeMarkDropChain is the mark-for-drop chain
|
||||||
KubeMarkDropChain utiliptables.Chain = "KUBE-MARK-DROP"
|
kubeMarkDropChain utiliptables.Chain = "KUBE-MARK-DROP"
|
||||||
|
|
||||||
// the kubernetes forward chain
|
// the kubernetes forward chain
|
||||||
kubeForwardChain utiliptables.Chain = "KUBE-FORWARD"
|
kubeForwardChain utiliptables.Chain = "KUBE-FORWARD"
|
||||||
@ -392,7 +392,7 @@ var iptablesEnsureChains = []struct {
|
|||||||
table utiliptables.Table
|
table utiliptables.Table
|
||||||
chain utiliptables.Chain
|
chain utiliptables.Chain
|
||||||
}{
|
}{
|
||||||
{utiliptables.TableNAT, KubeMarkDropChain},
|
{utiliptables.TableNAT, kubeMarkDropChain},
|
||||||
}
|
}
|
||||||
|
|
||||||
var iptablesCleanupOnlyChains = []iptablesJumpChain{
|
var iptablesCleanupOnlyChains = []iptablesJumpChain{
|
||||||
@ -925,7 +925,7 @@ func (proxier *Proxier) syncProxyRules() {
|
|||||||
proxier.filterChains.Write(utiliptables.MakeChainLine(chainName))
|
proxier.filterChains.Write(utiliptables.MakeChainLine(chainName))
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
for _, chainName := range []utiliptables.Chain{kubeServicesChain, kubeNodePortsChain, kubePostroutingChain, KubeMarkMasqChain} {
|
for _, chainName := range []utiliptables.Chain{kubeServicesChain, kubeNodePortsChain, kubePostroutingChain, kubeMarkMasqChain} {
|
||||||
if chain, ok := existingNATChains[chainName]; ok {
|
if chain, ok := existingNATChains[chainName]; ok {
|
||||||
proxier.natChains.WriteBytes(chain)
|
proxier.natChains.WriteBytes(chain)
|
||||||
} else {
|
} else {
|
||||||
@ -961,7 +961,7 @@ func (proxier *Proxier) syncProxyRules() {
|
|||||||
// this so that it is easier to flush and change, for example if the mark
|
// this so that it is easier to flush and change, for example if the mark
|
||||||
// value should ever change.
|
// value should ever change.
|
||||||
proxier.natRules.Write(
|
proxier.natRules.Write(
|
||||||
"-A", string(KubeMarkMasqChain),
|
"-A", string(kubeMarkMasqChain),
|
||||||
"-j", "MARK", "--or-mark", proxier.masqueradeMark,
|
"-j", "MARK", "--or-mark", proxier.masqueradeMark,
|
||||||
)
|
)
|
||||||
|
|
||||||
@ -1042,7 +1042,7 @@ func (proxier *Proxier) syncProxyRules() {
|
|||||||
proxier.natRules.Write(
|
proxier.natRules.Write(
|
||||||
args,
|
args,
|
||||||
"-s", epInfo.IP(),
|
"-s", epInfo.IP(),
|
||||||
"-j", string(KubeMarkMasqChain))
|
"-j", string(kubeMarkMasqChain))
|
||||||
// Update client-affinity lists.
|
// Update client-affinity lists.
|
||||||
if svcInfo.SessionAffinityType() == v1.ServiceAffinityClientIP {
|
if svcInfo.SessionAffinityType() == v1.ServiceAffinityClientIP {
|
||||||
args = append(args, "-m", "recent", "--name", string(endpointChain), "--set")
|
args = append(args, "-m", "recent", "--name", string(endpointChain), "--set")
|
||||||
@ -1114,7 +1114,7 @@ func (proxier *Proxier) syncProxyRules() {
|
|||||||
proxier.natRules.Write(
|
proxier.natRules.Write(
|
||||||
"-A", string(externalTrafficChain),
|
"-A", string(externalTrafficChain),
|
||||||
"-m", "comment", "--comment", fmt.Sprintf(`"masquerade traffic for %s external destinations"`, svcNameString),
|
"-m", "comment", "--comment", fmt.Sprintf(`"masquerade traffic for %s external destinations"`, svcNameString),
|
||||||
"-j", string(KubeMarkMasqChain))
|
"-j", string(kubeMarkMasqChain))
|
||||||
} else {
|
} else {
|
||||||
// If we are only using same-node endpoints, we can retain the
|
// If we are only using same-node endpoints, we can retain the
|
||||||
// source IP in most cases.
|
// source IP in most cases.
|
||||||
@ -1138,7 +1138,7 @@ func (proxier *Proxier) syncProxyRules() {
|
|||||||
"-A", string(externalTrafficChain),
|
"-A", string(externalTrafficChain),
|
||||||
"-m", "comment", "--comment", fmt.Sprintf(`"masquerade LOCAL traffic for %s external destinations"`, svcNameString),
|
"-m", "comment", "--comment", fmt.Sprintf(`"masquerade LOCAL traffic for %s external destinations"`, svcNameString),
|
||||||
"-m", "addrtype", "--src-type", "LOCAL",
|
"-m", "addrtype", "--src-type", "LOCAL",
|
||||||
"-j", string(KubeMarkMasqChain))
|
"-j", string(kubeMarkMasqChain))
|
||||||
|
|
||||||
// Redirect all src-type=LOCAL -> external destination to the
|
// Redirect all src-type=LOCAL -> external destination to the
|
||||||
// policy=cluster chain. This allows traffic originating
|
// policy=cluster chain. This allows traffic originating
|
||||||
@ -1168,7 +1168,7 @@ func (proxier *Proxier) syncProxyRules() {
|
|||||||
proxier.natRules.Write(
|
proxier.natRules.Write(
|
||||||
"-A", string(internalTrafficChain),
|
"-A", string(internalTrafficChain),
|
||||||
args,
|
args,
|
||||||
"-j", string(KubeMarkMasqChain))
|
"-j", string(kubeMarkMasqChain))
|
||||||
} else if proxier.localDetector.IsImplemented() {
|
} else if proxier.localDetector.IsImplemented() {
|
||||||
// This masquerades off-cluster traffic to a service VIP. The idea
|
// This masquerades off-cluster traffic to a service VIP. The idea
|
||||||
// is that you can establish a static route for your Service range,
|
// is that you can establish a static route for your Service range,
|
||||||
@ -1178,7 +1178,7 @@ func (proxier *Proxier) syncProxyRules() {
|
|||||||
"-A", string(internalTrafficChain),
|
"-A", string(internalTrafficChain),
|
||||||
args,
|
args,
|
||||||
proxier.localDetector.IfNotLocal(),
|
proxier.localDetector.IfNotLocal(),
|
||||||
"-j", string(KubeMarkMasqChain))
|
"-j", string(kubeMarkMasqChain))
|
||||||
}
|
}
|
||||||
proxier.natRules.Write(
|
proxier.natRules.Write(
|
||||||
"-A", string(kubeServicesChain),
|
"-A", string(kubeServicesChain),
|
||||||
@ -1282,7 +1282,7 @@ func (proxier *Proxier) syncProxyRules() {
|
|||||||
// If the packet was able to reach the end of firewall chain,
|
// If the packet was able to reach the end of firewall chain,
|
||||||
// then it did not get DNATed. It means the packet cannot go
|
// then it did not get DNATed. It means the packet cannot go
|
||||||
// thru the firewall, then mark it for DROP.
|
// thru the firewall, then mark it for DROP.
|
||||||
proxier.natRules.Write(args, "-j", string(KubeMarkDropChain))
|
proxier.natRules.Write(args, "-j", string(kubeMarkDropChain))
|
||||||
}
|
}
|
||||||
|
|
||||||
for _, lbip := range svcInfo.LoadBalancerIPStrings() {
|
for _, lbip := range svcInfo.LoadBalancerIPStrings() {
|
||||||
@ -1368,7 +1368,7 @@ func (proxier *Proxier) syncProxyRules() {
|
|||||||
"-A", string(localPolicyChain),
|
"-A", string(localPolicyChain),
|
||||||
"-m", "comment", "--comment",
|
"-m", "comment", "--comment",
|
||||||
fmt.Sprintf(`"%s has no local endpoints"`, svcNameString),
|
fmt.Sprintf(`"%s has no local endpoints"`, svcNameString),
|
||||||
"-j", string(KubeMarkDropChain))
|
"-j", string(kubeMarkDropChain))
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -746,7 +746,7 @@ func checkIPTablesRuleJumps(ruleData string) error {
|
|||||||
// Ignore jumps to chains that we expect to exist even if kube-proxy
|
// Ignore jumps to chains that we expect to exist even if kube-proxy
|
||||||
// didn't create them itself.
|
// didn't create them itself.
|
||||||
jumpedChains.Delete("ACCEPT", "REJECT", "DROP", "MARK", "RETURN", "DNAT", "SNAT", "MASQUERADE")
|
jumpedChains.Delete("ACCEPT", "REJECT", "DROP", "MARK", "RETURN", "DNAT", "SNAT", "MASQUERADE")
|
||||||
jumpedChains.Delete(string(KubeMarkDropChain))
|
jumpedChains.Delete(string(kubeMarkDropChain))
|
||||||
|
|
||||||
// Find cases where we have "-A FOO ... -j BAR" but no ":BAR", meaning
|
// Find cases where we have "-A FOO ... -j BAR" but no ":BAR", meaning
|
||||||
// that we are jumping to a chain that was not created.
|
// that we are jumping to a chain that was not created.
|
||||||
@ -769,7 +769,7 @@ func checkIPTablesRuleJumps(ruleData string) error {
|
|||||||
// Find cases where we have ":BAR" but no "-A FOO ... -j BAR", meaning
|
// Find cases where we have ":BAR" but no "-A FOO ... -j BAR", meaning
|
||||||
// that we are creating an empty chain but not using it for anything.
|
// that we are creating an empty chain but not using it for anything.
|
||||||
extraChains := createdChains.Difference(jumpedChains)
|
extraChains := createdChains.Difference(jumpedChains)
|
||||||
extraChains.Delete(string(kubeServicesChain), string(kubeExternalServicesChain), string(kubeNodePortsChain), string(kubePostroutingChain), string(kubeForwardChain), string(KubeMarkMasqChain))
|
extraChains.Delete(string(kubeServicesChain), string(kubeExternalServicesChain), string(kubeNodePortsChain), string(kubePostroutingChain), string(kubeForwardChain), string(kubeMarkMasqChain))
|
||||||
if len(extraChains) > 0 {
|
if len(extraChains) > 0 {
|
||||||
return fmt.Errorf("some chains in %s are created but not used: %v", tableName, extraChains.List())
|
return fmt.Errorf("some chains in %s are created but not used: %v", tableName, extraChains.List())
|
||||||
}
|
}
|
||||||
|
@ -64,31 +64,31 @@ const (
|
|||||||
kubeServicesChain utiliptables.Chain = "KUBE-SERVICES"
|
kubeServicesChain utiliptables.Chain = "KUBE-SERVICES"
|
||||||
|
|
||||||
// KubeFireWallChain is the kubernetes firewall chain.
|
// KubeFireWallChain is the kubernetes firewall chain.
|
||||||
KubeFireWallChain utiliptables.Chain = "KUBE-FIREWALL"
|
kubeFirewallChain utiliptables.Chain = "KUBE-FIREWALL"
|
||||||
|
|
||||||
// kubePostroutingChain is the kubernetes postrouting chain
|
// kubePostroutingChain is the kubernetes postrouting chain
|
||||||
kubePostroutingChain utiliptables.Chain = "KUBE-POSTROUTING"
|
kubePostroutingChain utiliptables.Chain = "KUBE-POSTROUTING"
|
||||||
|
|
||||||
// KubeMarkMasqChain is the mark-for-masquerade chain
|
// kubeMarkMasqChain is the mark-for-masquerade chain
|
||||||
KubeMarkMasqChain utiliptables.Chain = "KUBE-MARK-MASQ"
|
kubeMarkMasqChain utiliptables.Chain = "KUBE-MARK-MASQ"
|
||||||
|
|
||||||
// KubeNodePortChain is the kubernetes node port chain
|
// kubeNodePortChain is the kubernetes node port chain
|
||||||
KubeNodePortChain utiliptables.Chain = "KUBE-NODE-PORT"
|
kubeNodePortChain utiliptables.Chain = "KUBE-NODE-PORT"
|
||||||
|
|
||||||
// KubeMarkDropChain is the mark-for-drop chain
|
// KubeMarkDropChain is the mark-for-drop chain
|
||||||
KubeMarkDropChain utiliptables.Chain = "KUBE-MARK-DROP"
|
kubeMarkDropChain utiliptables.Chain = "KUBE-MARK-DROP"
|
||||||
|
|
||||||
// KubeForwardChain is the kubernetes forward chain
|
// kubeForwardChain is the kubernetes forward chain
|
||||||
KubeForwardChain utiliptables.Chain = "KUBE-FORWARD"
|
kubeForwardChain utiliptables.Chain = "KUBE-FORWARD"
|
||||||
|
|
||||||
// KubeLoadBalancerChain is the kubernetes chain for loadbalancer type service
|
// kubeLoadBalancerChain is the kubernetes chain for loadbalancer type service
|
||||||
KubeLoadBalancerChain utiliptables.Chain = "KUBE-LOAD-BALANCER"
|
kubeLoadBalancerChain utiliptables.Chain = "KUBE-LOAD-BALANCER"
|
||||||
|
|
||||||
// DefaultScheduler is the default ipvs scheduler algorithm - round robin.
|
// defaultScheduler is the default ipvs scheduler algorithm - round robin.
|
||||||
DefaultScheduler = "rr"
|
defaultScheduler = "rr"
|
||||||
|
|
||||||
// DefaultDummyDevice is the default dummy interface which ipvs service address will bind to it.
|
// defaultDummyDevice is the default dummy interface which ipvs service address will bind to it.
|
||||||
DefaultDummyDevice = "kube-ipvs0"
|
defaultDummyDevice = "kube-ipvs0"
|
||||||
|
|
||||||
connReuseMinSupportedKernelVersion = "4.1"
|
connReuseMinSupportedKernelVersion = "4.1"
|
||||||
|
|
||||||
@ -108,8 +108,8 @@ var iptablesJumpChain = []struct {
|
|||||||
{utiliptables.TableNAT, utiliptables.ChainOutput, kubeServicesChain, "kubernetes service portals"},
|
{utiliptables.TableNAT, utiliptables.ChainOutput, kubeServicesChain, "kubernetes service portals"},
|
||||||
{utiliptables.TableNAT, utiliptables.ChainPrerouting, kubeServicesChain, "kubernetes service portals"},
|
{utiliptables.TableNAT, utiliptables.ChainPrerouting, kubeServicesChain, "kubernetes service portals"},
|
||||||
{utiliptables.TableNAT, utiliptables.ChainPostrouting, kubePostroutingChain, "kubernetes postrouting rules"},
|
{utiliptables.TableNAT, utiliptables.ChainPostrouting, kubePostroutingChain, "kubernetes postrouting rules"},
|
||||||
{utiliptables.TableFilter, utiliptables.ChainForward, KubeForwardChain, "kubernetes forwarding rules"},
|
{utiliptables.TableFilter, utiliptables.ChainForward, kubeForwardChain, "kubernetes forwarding rules"},
|
||||||
{utiliptables.TableFilter, utiliptables.ChainInput, KubeNodePortChain, "kubernetes health check rules"},
|
{utiliptables.TableFilter, utiliptables.ChainInput, kubeNodePortChain, "kubernetes health check rules"},
|
||||||
}
|
}
|
||||||
|
|
||||||
var iptablesChains = []struct {
|
var iptablesChains = []struct {
|
||||||
@ -118,19 +118,19 @@ var iptablesChains = []struct {
|
|||||||
}{
|
}{
|
||||||
{utiliptables.TableNAT, kubeServicesChain},
|
{utiliptables.TableNAT, kubeServicesChain},
|
||||||
{utiliptables.TableNAT, kubePostroutingChain},
|
{utiliptables.TableNAT, kubePostroutingChain},
|
||||||
{utiliptables.TableNAT, KubeFireWallChain},
|
{utiliptables.TableNAT, kubeFirewallChain},
|
||||||
{utiliptables.TableNAT, KubeNodePortChain},
|
{utiliptables.TableNAT, kubeNodePortChain},
|
||||||
{utiliptables.TableNAT, KubeLoadBalancerChain},
|
{utiliptables.TableNAT, kubeLoadBalancerChain},
|
||||||
{utiliptables.TableNAT, KubeMarkMasqChain},
|
{utiliptables.TableNAT, kubeMarkMasqChain},
|
||||||
{utiliptables.TableFilter, KubeForwardChain},
|
{utiliptables.TableFilter, kubeForwardChain},
|
||||||
{utiliptables.TableFilter, KubeNodePortChain},
|
{utiliptables.TableFilter, kubeNodePortChain},
|
||||||
}
|
}
|
||||||
|
|
||||||
var iptablesEnsureChains = []struct {
|
var iptablesEnsureChains = []struct {
|
||||||
table utiliptables.Table
|
table utiliptables.Table
|
||||||
chain utiliptables.Chain
|
chain utiliptables.Chain
|
||||||
}{
|
}{
|
||||||
{utiliptables.TableNAT, KubeMarkDropChain},
|
{utiliptables.TableNAT, kubeMarkDropChain},
|
||||||
}
|
}
|
||||||
|
|
||||||
var iptablesCleanupChains = []struct {
|
var iptablesCleanupChains = []struct {
|
||||||
@ -139,11 +139,11 @@ var iptablesCleanupChains = []struct {
|
|||||||
}{
|
}{
|
||||||
{utiliptables.TableNAT, kubeServicesChain},
|
{utiliptables.TableNAT, kubeServicesChain},
|
||||||
{utiliptables.TableNAT, kubePostroutingChain},
|
{utiliptables.TableNAT, kubePostroutingChain},
|
||||||
{utiliptables.TableNAT, KubeFireWallChain},
|
{utiliptables.TableNAT, kubeFirewallChain},
|
||||||
{utiliptables.TableNAT, KubeNodePortChain},
|
{utiliptables.TableNAT, kubeNodePortChain},
|
||||||
{utiliptables.TableNAT, KubeLoadBalancerChain},
|
{utiliptables.TableNAT, kubeLoadBalancerChain},
|
||||||
{utiliptables.TableFilter, KubeForwardChain},
|
{utiliptables.TableFilter, kubeForwardChain},
|
||||||
{utiliptables.TableFilter, KubeNodePortChain},
|
{utiliptables.TableFilter, kubeNodePortChain},
|
||||||
}
|
}
|
||||||
|
|
||||||
// ipsetInfo is all ipset we needed in ipvs proxier
|
// ipsetInfo is all ipset we needed in ipvs proxier
|
||||||
@ -183,17 +183,17 @@ var ipsetWithIptablesChain = []struct {
|
|||||||
protocolMatch string
|
protocolMatch string
|
||||||
}{
|
}{
|
||||||
{kubeLoopBackIPSet, string(kubePostroutingChain), "MASQUERADE", "dst,dst,src", ""},
|
{kubeLoopBackIPSet, string(kubePostroutingChain), "MASQUERADE", "dst,dst,src", ""},
|
||||||
{kubeLoadBalancerSet, string(kubeServicesChain), string(KubeLoadBalancerChain), "dst,dst", ""},
|
{kubeLoadBalancerSet, string(kubeServicesChain), string(kubeLoadBalancerChain), "dst,dst", ""},
|
||||||
{kubeLoadbalancerFWSet, string(KubeLoadBalancerChain), string(KubeFireWallChain), "dst,dst", ""},
|
{kubeLoadbalancerFWSet, string(kubeLoadBalancerChain), string(kubeFirewallChain), "dst,dst", ""},
|
||||||
{kubeLoadBalancerSourceCIDRSet, string(KubeFireWallChain), "RETURN", "dst,dst,src", ""},
|
{kubeLoadBalancerSourceCIDRSet, string(kubeFirewallChain), "RETURN", "dst,dst,src", ""},
|
||||||
{kubeLoadBalancerSourceIPSet, string(KubeFireWallChain), "RETURN", "dst,dst,src", ""},
|
{kubeLoadBalancerSourceIPSet, string(kubeFirewallChain), "RETURN", "dst,dst,src", ""},
|
||||||
{kubeLoadBalancerLocalSet, string(KubeLoadBalancerChain), "RETURN", "dst,dst", ""},
|
{kubeLoadBalancerLocalSet, string(kubeLoadBalancerChain), "RETURN", "dst,dst", ""},
|
||||||
{kubeNodePortLocalSetTCP, string(KubeNodePortChain), "RETURN", "dst", utilipset.ProtocolTCP},
|
{kubeNodePortLocalSetTCP, string(kubeNodePortChain), "RETURN", "dst", utilipset.ProtocolTCP},
|
||||||
{kubeNodePortSetTCP, string(KubeNodePortChain), string(KubeMarkMasqChain), "dst", utilipset.ProtocolTCP},
|
{kubeNodePortSetTCP, string(kubeNodePortChain), string(kubeMarkMasqChain), "dst", utilipset.ProtocolTCP},
|
||||||
{kubeNodePortLocalSetUDP, string(KubeNodePortChain), "RETURN", "dst", utilipset.ProtocolUDP},
|
{kubeNodePortLocalSetUDP, string(kubeNodePortChain), "RETURN", "dst", utilipset.ProtocolUDP},
|
||||||
{kubeNodePortSetUDP, string(KubeNodePortChain), string(KubeMarkMasqChain), "dst", utilipset.ProtocolUDP},
|
{kubeNodePortSetUDP, string(kubeNodePortChain), string(kubeMarkMasqChain), "dst", utilipset.ProtocolUDP},
|
||||||
{kubeNodePortLocalSetSCTP, string(KubeNodePortChain), "RETURN", "dst,dst", utilipset.ProtocolSCTP},
|
{kubeNodePortLocalSetSCTP, string(kubeNodePortChain), "RETURN", "dst,dst", utilipset.ProtocolSCTP},
|
||||||
{kubeNodePortSetSCTP, string(KubeNodePortChain), string(KubeMarkMasqChain), "dst,dst", utilipset.ProtocolSCTP},
|
{kubeNodePortSetSCTP, string(kubeNodePortChain), string(kubeMarkMasqChain), "dst,dst", utilipset.ProtocolSCTP},
|
||||||
}
|
}
|
||||||
|
|
||||||
// In IPVS proxy mode, the following flags need to be set
|
// In IPVS proxy mode, the following flags need to be set
|
||||||
@ -334,7 +334,7 @@ func (r *realIPGetter) NodeIPs() (ips []net.IP, err error) {
|
|||||||
|
|
||||||
// BindedIPs returns all addresses that are binded to the IPVS dummy interface kube-ipvs0
|
// BindedIPs returns all addresses that are binded to the IPVS dummy interface kube-ipvs0
|
||||||
func (r *realIPGetter) BindedIPs() (sets.String, error) {
|
func (r *realIPGetter) BindedIPs() (sets.String, error) {
|
||||||
return r.nl.GetLocalAddresses(DefaultDummyDevice)
|
return r.nl.GetLocalAddresses(defaultDummyDevice)
|
||||||
}
|
}
|
||||||
|
|
||||||
// Proxier implements proxy.Provider
|
// Proxier implements proxy.Provider
|
||||||
@ -449,7 +449,7 @@ func NewProxier(ipt utiliptables.Interface,
|
|||||||
|
|
||||||
if len(scheduler) == 0 {
|
if len(scheduler) == 0 {
|
||||||
klog.InfoS("IPVS scheduler not specified, use rr by default")
|
klog.InfoS("IPVS scheduler not specified, use rr by default")
|
||||||
scheduler = DefaultScheduler
|
scheduler = defaultScheduler
|
||||||
}
|
}
|
||||||
|
|
||||||
serviceHealthServer := healthcheck.NewServiceHealthServer(hostname, recorder, nodePortAddresses)
|
serviceHealthServer := healthcheck.NewServiceHealthServer(hostname, recorder, nodePortAddresses)
|
||||||
@ -732,7 +732,7 @@ func CanUseIPVSProxier(handle KernelHandler, ipsetver IPSetVersioner, scheduler
|
|||||||
wantModules := sets.NewString()
|
wantModules := sets.NewString()
|
||||||
// We check for the existence of the scheduler mod and will trigger a missingMods error if not found
|
// We check for the existence of the scheduler mod and will trigger a missingMods error if not found
|
||||||
if scheduler == "" {
|
if scheduler == "" {
|
||||||
scheduler = DefaultScheduler
|
scheduler = defaultScheduler
|
||||||
}
|
}
|
||||||
schedulerMod := "ip_vs_" + scheduler
|
schedulerMod := "ip_vs_" + scheduler
|
||||||
mods = append(mods, schedulerMod)
|
mods = append(mods, schedulerMod)
|
||||||
@ -819,9 +819,9 @@ func CleanupLeftovers(ipvs utilipvs.Interface, ipt utiliptables.Interface, ipset
|
|||||||
}
|
}
|
||||||
// Delete dummy interface created by ipvs Proxier.
|
// Delete dummy interface created by ipvs Proxier.
|
||||||
nl := NewNetLinkHandle(false)
|
nl := NewNetLinkHandle(false)
|
||||||
err := nl.DeleteDummyDevice(DefaultDummyDevice)
|
err := nl.DeleteDummyDevice(defaultDummyDevice)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
klog.ErrorS(err, "Error deleting dummy device created by ipvs proxier", "device", DefaultDummyDevice)
|
klog.ErrorS(err, "Error deleting dummy device created by ipvs proxier", "device", defaultDummyDevice)
|
||||||
encounteredError = true
|
encounteredError = true
|
||||||
}
|
}
|
||||||
// Clear iptables created by ipvs Proxier.
|
// Clear iptables created by ipvs Proxier.
|
||||||
@ -1060,9 +1060,9 @@ func (proxier *Proxier) syncProxyRules() {
|
|||||||
proxier.createAndLinkKubeChain()
|
proxier.createAndLinkKubeChain()
|
||||||
|
|
||||||
// make sure dummy interface exists in the system where ipvs Proxier will bind service address on it
|
// make sure dummy interface exists in the system where ipvs Proxier will bind service address on it
|
||||||
_, err := proxier.netlinkHandle.EnsureDummyDevice(DefaultDummyDevice)
|
_, err := proxier.netlinkHandle.EnsureDummyDevice(defaultDummyDevice)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
klog.ErrorS(err, "Failed to create dummy interface", "interface", DefaultDummyDevice)
|
klog.ErrorS(err, "Failed to create dummy interface", "interface", defaultDummyDevice)
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -1078,7 +1078,7 @@ func (proxier *Proxier) syncProxyRules() {
|
|||||||
activeIPVSServices := map[string]bool{}
|
activeIPVSServices := map[string]bool{}
|
||||||
// currentIPVSServices represent IPVS services listed from the system
|
// currentIPVSServices represent IPVS services listed from the system
|
||||||
currentIPVSServices := make(map[string]*utilipvs.VirtualServer)
|
currentIPVSServices := make(map[string]*utilipvs.VirtualServer)
|
||||||
// activeBindAddrs represents ip address successfully bind to DefaultDummyDevice in this round of sync
|
// activeBindAddrs represents ip address successfully bind to defaultDummyDevice in this round of sync
|
||||||
activeBindAddrs := map[string]bool{}
|
activeBindAddrs := map[string]bool{}
|
||||||
|
|
||||||
bindedAddresses, err := proxier.ipGetter.BindedIPs()
|
bindedAddresses, err := proxier.ipGetter.BindedIPs()
|
||||||
@ -1575,8 +1575,8 @@ func (proxier *Proxier) syncProxyRules() {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// Get legacy bind address
|
// Get legacy bind address
|
||||||
// currentBindAddrs represents ip addresses bind to DefaultDummyDevice from the system
|
// currentBindAddrs represents ip addresses bind to defaultDummyDevice from the system
|
||||||
currentBindAddrs, err := proxier.netlinkHandle.ListBindAddress(DefaultDummyDevice)
|
currentBindAddrs, err := proxier.netlinkHandle.ListBindAddress(defaultDummyDevice)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
klog.ErrorS(err, "Failed to get bind address")
|
klog.ErrorS(err, "Failed to get bind address")
|
||||||
}
|
}
|
||||||
@ -1661,7 +1661,7 @@ func (proxier *Proxier) writeIptablesRules() {
|
|||||||
if proxier.masqueradeAll {
|
if proxier.masqueradeAll {
|
||||||
proxier.natRules.Write(
|
proxier.natRules.Write(
|
||||||
args, "dst,dst",
|
args, "dst,dst",
|
||||||
"-j", string(KubeMarkMasqChain))
|
"-j", string(kubeMarkMasqChain))
|
||||||
} else if proxier.localDetector.IsImplemented() {
|
} else if proxier.localDetector.IsImplemented() {
|
||||||
// This masquerades off-cluster traffic to a service VIP. The idea
|
// This masquerades off-cluster traffic to a service VIP. The idea
|
||||||
// is that you can establish a static route for your Service range,
|
// is that you can establish a static route for your Service range,
|
||||||
@ -1671,7 +1671,7 @@ func (proxier *Proxier) writeIptablesRules() {
|
|||||||
proxier.natRules.Write(
|
proxier.natRules.Write(
|
||||||
args, "dst,dst",
|
args, "dst,dst",
|
||||||
proxier.localDetector.IfNotLocal(),
|
proxier.localDetector.IfNotLocal(),
|
||||||
"-j", string(KubeMarkMasqChain))
|
"-j", string(kubeMarkMasqChain))
|
||||||
} else {
|
} else {
|
||||||
// Masquerade all OUTPUT traffic coming from a service ip.
|
// Masquerade all OUTPUT traffic coming from a service ip.
|
||||||
// The kube dummy interface has all service VIPs assigned which
|
// The kube dummy interface has all service VIPs assigned which
|
||||||
@ -1682,7 +1682,7 @@ func (proxier *Proxier) writeIptablesRules() {
|
|||||||
// source ip and service port destination fixes the outgoing connections.
|
// source ip and service port destination fixes the outgoing connections.
|
||||||
proxier.natRules.Write(
|
proxier.natRules.Write(
|
||||||
args, "src,dst",
|
args, "src,dst",
|
||||||
"-j", string(KubeMarkMasqChain))
|
"-j", string(kubeMarkMasqChain))
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -1710,7 +1710,7 @@ func (proxier *Proxier) writeIptablesRules() {
|
|||||||
"-m", "set", "--match-set", proxier.ipsetList[kubeExternalIPSet].Name,
|
"-m", "set", "--match-set", proxier.ipsetList[kubeExternalIPSet].Name,
|
||||||
"dst,dst",
|
"dst,dst",
|
||||||
)
|
)
|
||||||
proxier.natRules.Write(args, "-j", string(KubeMarkMasqChain))
|
proxier.natRules.Write(args, "-j", string(kubeMarkMasqChain))
|
||||||
externalIPRules(args)
|
externalIPRules(args)
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -1729,18 +1729,18 @@ func (proxier *Proxier) writeIptablesRules() {
|
|||||||
"-A", string(kubeServicesChain),
|
"-A", string(kubeServicesChain),
|
||||||
"-m", "addrtype", "--dst-type", "LOCAL",
|
"-m", "addrtype", "--dst-type", "LOCAL",
|
||||||
)
|
)
|
||||||
proxier.natRules.Write(args, "-j", string(KubeNodePortChain))
|
proxier.natRules.Write(args, "-j", string(kubeNodePortChain))
|
||||||
|
|
||||||
// mark drop for KUBE-LOAD-BALANCER
|
// mark drop for KUBE-LOAD-BALANCER
|
||||||
proxier.natRules.Write(
|
proxier.natRules.Write(
|
||||||
"-A", string(KubeLoadBalancerChain),
|
"-A", string(kubeLoadBalancerChain),
|
||||||
"-j", string(KubeMarkMasqChain),
|
"-j", string(kubeMarkMasqChain),
|
||||||
)
|
)
|
||||||
|
|
||||||
// mark drop for KUBE-FIRE-WALL
|
// mark drop for KUBE-FIRE-WALL
|
||||||
proxier.natRules.Write(
|
proxier.natRules.Write(
|
||||||
"-A", string(KubeFireWallChain),
|
"-A", string(kubeFirewallChain),
|
||||||
"-j", string(KubeMarkDropChain),
|
"-j", string(kubeMarkDropChain),
|
||||||
)
|
)
|
||||||
|
|
||||||
// Accept all traffic with destination of ipvs virtual service, in case other iptables rules
|
// Accept all traffic with destination of ipvs virtual service, in case other iptables rules
|
||||||
@ -1752,7 +1752,7 @@ func (proxier *Proxier) writeIptablesRules() {
|
|||||||
// traffic, this allows NodePort traffic to be forwarded even if the default
|
// traffic, this allows NodePort traffic to be forwarded even if the default
|
||||||
// FORWARD policy is not accept.
|
// FORWARD policy is not accept.
|
||||||
proxier.filterRules.Write(
|
proxier.filterRules.Write(
|
||||||
"-A", string(KubeForwardChain),
|
"-A", string(kubeForwardChain),
|
||||||
"-m", "comment", "--comment", `"kubernetes forwarding rules"`,
|
"-m", "comment", "--comment", `"kubernetes forwarding rules"`,
|
||||||
"-m", "mark", "--mark", fmt.Sprintf("%s/%s", proxier.masqueradeMark, proxier.masqueradeMark),
|
"-m", "mark", "--mark", fmt.Sprintf("%s/%s", proxier.masqueradeMark, proxier.masqueradeMark),
|
||||||
"-j", "ACCEPT",
|
"-j", "ACCEPT",
|
||||||
@ -1761,7 +1761,7 @@ func (proxier *Proxier) writeIptablesRules() {
|
|||||||
// The following rule ensures the traffic after the initial packet accepted
|
// The following rule ensures the traffic after the initial packet accepted
|
||||||
// by the "kubernetes forwarding rules" rule above will be accepted.
|
// by the "kubernetes forwarding rules" rule above will be accepted.
|
||||||
proxier.filterRules.Write(
|
proxier.filterRules.Write(
|
||||||
"-A", string(KubeForwardChain),
|
"-A", string(kubeForwardChain),
|
||||||
"-m", "comment", "--comment", `"kubernetes forwarding conntrack rule"`,
|
"-m", "comment", "--comment", `"kubernetes forwarding conntrack rule"`,
|
||||||
"-m", "conntrack",
|
"-m", "conntrack",
|
||||||
"--ctstate", "RELATED,ESTABLISHED",
|
"--ctstate", "RELATED,ESTABLISHED",
|
||||||
@ -1770,7 +1770,7 @@ func (proxier *Proxier) writeIptablesRules() {
|
|||||||
|
|
||||||
// Add rule to accept traffic towards health check node port
|
// Add rule to accept traffic towards health check node port
|
||||||
proxier.filterRules.Write(
|
proxier.filterRules.Write(
|
||||||
"-A", string(KubeNodePortChain),
|
"-A", string(kubeNodePortChain),
|
||||||
"-m", "comment", "--comment", proxier.ipsetList[kubeHealthCheckNodePortSet].getComment(),
|
"-m", "comment", "--comment", proxier.ipsetList[kubeHealthCheckNodePortSet].getComment(),
|
||||||
"-m", "set", "--match-set", proxier.ipsetList[kubeHealthCheckNodePortSet].Name, "dst",
|
"-m", "set", "--match-set", proxier.ipsetList[kubeHealthCheckNodePortSet].Name, "dst",
|
||||||
"-j", "ACCEPT",
|
"-j", "ACCEPT",
|
||||||
@ -1805,7 +1805,7 @@ func (proxier *Proxier) writeIptablesRules() {
|
|||||||
// this so that it is easier to flush and change, for example if the mark
|
// this so that it is easier to flush and change, for example if the mark
|
||||||
// value should ever change.
|
// value should ever change.
|
||||||
proxier.natRules.Write(
|
proxier.natRules.Write(
|
||||||
"-A", string(KubeMarkMasqChain),
|
"-A", string(kubeMarkMasqChain),
|
||||||
"-j", "MARK", "--or-mark", proxier.masqueradeMark,
|
"-j", "MARK", "--or-mark", proxier.masqueradeMark,
|
||||||
)
|
)
|
||||||
|
|
||||||
@ -1949,7 +1949,7 @@ func (proxier *Proxier) syncService(svcName string, vs *utilipvs.VirtualServer,
|
|||||||
}
|
}
|
||||||
|
|
||||||
klog.V(4).InfoS("Bind address", "address", vs.Address)
|
klog.V(4).InfoS("Bind address", "address", vs.Address)
|
||||||
_, err := proxier.netlinkHandle.EnsureAddressBind(vs.Address.String(), DefaultDummyDevice)
|
_, err := proxier.netlinkHandle.EnsureAddressBind(vs.Address.String(), defaultDummyDevice)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
klog.ErrorS(err, "Failed to bind service address to dummy device", "serviceName", svcName)
|
klog.ErrorS(err, "Failed to bind service address to dummy device", "serviceName", svcName)
|
||||||
return err
|
return err
|
||||||
@ -2109,8 +2109,8 @@ func (proxier *Proxier) cleanLegacyService(activeServices map[string]bool, curre
|
|||||||
addr := svc.Address.String()
|
addr := svc.Address.String()
|
||||||
if _, ok := legacyBindAddrs[addr]; ok {
|
if _, ok := legacyBindAddrs[addr]; ok {
|
||||||
klog.V(4).InfoS("Unbinding address", "address", addr)
|
klog.V(4).InfoS("Unbinding address", "address", addr)
|
||||||
if err := proxier.netlinkHandle.UnbindAddress(addr, DefaultDummyDevice); err != nil {
|
if err := proxier.netlinkHandle.UnbindAddress(addr, defaultDummyDevice); err != nil {
|
||||||
klog.ErrorS(err, "Failed to unbind service from dummy interface", "interface", DefaultDummyDevice, "address", addr)
|
klog.ErrorS(err, "Failed to unbind service from dummy interface", "interface", defaultDummyDevice, "address", addr)
|
||||||
} else {
|
} else {
|
||||||
// In case we delete a multi-port service, avoid trying to unbind multiple times
|
// In case we delete a multi-port service, avoid trying to unbind multiple times
|
||||||
delete(legacyBindAddrs, addr)
|
delete(legacyBindAddrs, addr)
|
||||||
|
@ -144,7 +144,7 @@ func NewFakeProxier(ipt utiliptables.Interface, ipvs utilipvs.Interface, ipset u
|
|||||||
localDetector: proxyutiliptables.NewNoOpLocalDetector(),
|
localDetector: proxyutiliptables.NewNoOpLocalDetector(),
|
||||||
hostname: testHostname,
|
hostname: testHostname,
|
||||||
serviceHealthServer: healthcheck.NewFakeServiceHealthServer(),
|
serviceHealthServer: healthcheck.NewFakeServiceHealthServer(),
|
||||||
ipvsScheduler: DefaultScheduler,
|
ipvsScheduler: defaultScheduler,
|
||||||
ipGetter: &fakeIPGetter{nodeIPs: nodeIPs},
|
ipGetter: &fakeIPGetter{nodeIPs: nodeIPs},
|
||||||
iptablesData: bytes.NewBuffer(nil),
|
iptablesData: bytes.NewBuffer(nil),
|
||||||
filterChainsData: bytes.NewBuffer(nil),
|
filterChainsData: bytes.NewBuffer(nil),
|
||||||
@ -655,15 +655,15 @@ func TestNodePortIPv4(t *testing.T) {
|
|||||||
}},
|
}},
|
||||||
},
|
},
|
||||||
expectedIptablesChains: netlinktest.ExpectedIptablesChain{
|
expectedIptablesChains: netlinktest.ExpectedIptablesChain{
|
||||||
string(KubeNodePortChain): {{
|
string(kubeNodePortChain): {{
|
||||||
JumpChain: string(KubeMarkMasqChain), MatchSet: kubeNodePortSetUDP,
|
JumpChain: string(kubeMarkMasqChain), MatchSet: kubeNodePortSetUDP,
|
||||||
}, {
|
}, {
|
||||||
JumpChain: "ACCEPT", MatchSet: kubeHealthCheckNodePortSet,
|
JumpChain: "ACCEPT", MatchSet: kubeHealthCheckNodePortSet,
|
||||||
}},
|
}},
|
||||||
string(kubeServicesChain): {{
|
string(kubeServicesChain): {{
|
||||||
JumpChain: string(KubeMarkMasqChain), MatchSet: kubeClusterIPSet,
|
JumpChain: string(kubeMarkMasqChain), MatchSet: kubeClusterIPSet,
|
||||||
}, {
|
}, {
|
||||||
JumpChain: string(KubeNodePortChain), MatchSet: "",
|
JumpChain: string(kubeNodePortChain), MatchSet: "",
|
||||||
}, {
|
}, {
|
||||||
JumpChain: "ACCEPT", MatchSet: kubeClusterIPSet,
|
JumpChain: "ACCEPT", MatchSet: kubeClusterIPSet,
|
||||||
}},
|
}},
|
||||||
@ -982,10 +982,10 @@ func TestNodePortIPv4(t *testing.T) {
|
|||||||
},
|
},
|
||||||
},
|
},
|
||||||
expectedIptablesChains: netlinktest.ExpectedIptablesChain{
|
expectedIptablesChains: netlinktest.ExpectedIptablesChain{
|
||||||
string(KubeNodePortChain): {{
|
string(kubeNodePortChain): {{
|
||||||
JumpChain: "RETURN", MatchSet: kubeNodePortLocalSetSCTP,
|
JumpChain: "RETURN", MatchSet: kubeNodePortLocalSetSCTP,
|
||||||
}, {
|
}, {
|
||||||
JumpChain: string(KubeMarkMasqChain), MatchSet: kubeNodePortSetSCTP,
|
JumpChain: string(kubeMarkMasqChain), MatchSet: kubeNodePortSetSCTP,
|
||||||
}, {
|
}, {
|
||||||
JumpChain: "ACCEPT", MatchSet: kubeHealthCheckNodePortSet,
|
JumpChain: "ACCEPT", MatchSet: kubeHealthCheckNodePortSet,
|
||||||
}},
|
}},
|
||||||
@ -1947,18 +1947,18 @@ func TestLoadBalancer(t *testing.T) {
|
|||||||
// Check iptables chain and rules
|
// Check iptables chain and rules
|
||||||
epIpt := netlinktest.ExpectedIptablesChain{
|
epIpt := netlinktest.ExpectedIptablesChain{
|
||||||
string(kubeServicesChain): {{
|
string(kubeServicesChain): {{
|
||||||
JumpChain: string(KubeLoadBalancerChain), MatchSet: kubeLoadBalancerSet,
|
JumpChain: string(kubeLoadBalancerChain), MatchSet: kubeLoadBalancerSet,
|
||||||
}, {
|
}, {
|
||||||
JumpChain: string(KubeMarkMasqChain), MatchSet: kubeClusterIPSet,
|
JumpChain: string(kubeMarkMasqChain), MatchSet: kubeClusterIPSet,
|
||||||
}, {
|
}, {
|
||||||
JumpChain: string(KubeNodePortChain), MatchSet: "",
|
JumpChain: string(kubeNodePortChain), MatchSet: "",
|
||||||
}, {
|
}, {
|
||||||
JumpChain: "ACCEPT", MatchSet: kubeClusterIPSet,
|
JumpChain: "ACCEPT", MatchSet: kubeClusterIPSet,
|
||||||
}, {
|
}, {
|
||||||
JumpChain: "ACCEPT", MatchSet: kubeLoadBalancerSet,
|
JumpChain: "ACCEPT", MatchSet: kubeLoadBalancerSet,
|
||||||
}},
|
}},
|
||||||
string(kubeLoadBalancerSet): {{
|
string(kubeLoadBalancerSet): {{
|
||||||
JumpChain: string(KubeMarkMasqChain), MatchSet: "",
|
JumpChain: string(kubeMarkMasqChain), MatchSet: "",
|
||||||
}},
|
}},
|
||||||
}
|
}
|
||||||
checkIptables(t, ipt, epIpt)
|
checkIptables(t, ipt, epIpt)
|
||||||
@ -2047,16 +2047,16 @@ func TestOnlyLocalNodePorts(t *testing.T) {
|
|||||||
// Check iptables chain and rules
|
// Check iptables chain and rules
|
||||||
epIpt := netlinktest.ExpectedIptablesChain{
|
epIpt := netlinktest.ExpectedIptablesChain{
|
||||||
string(kubeServicesChain): {{
|
string(kubeServicesChain): {{
|
||||||
JumpChain: string(KubeMarkMasqChain), MatchSet: kubeClusterIPSet,
|
JumpChain: string(kubeMarkMasqChain), MatchSet: kubeClusterIPSet,
|
||||||
}, {
|
}, {
|
||||||
JumpChain: string(KubeNodePortChain), MatchSet: "",
|
JumpChain: string(kubeNodePortChain), MatchSet: "",
|
||||||
}, {
|
}, {
|
||||||
JumpChain: "ACCEPT", MatchSet: kubeClusterIPSet,
|
JumpChain: "ACCEPT", MatchSet: kubeClusterIPSet,
|
||||||
}},
|
}},
|
||||||
string(KubeNodePortChain): {{
|
string(kubeNodePortChain): {{
|
||||||
JumpChain: "RETURN", MatchSet: kubeNodePortLocalSetTCP,
|
JumpChain: "RETURN", MatchSet: kubeNodePortLocalSetTCP,
|
||||||
}, {
|
}, {
|
||||||
JumpChain: string(KubeMarkMasqChain), MatchSet: kubeNodePortSetTCP,
|
JumpChain: string(kubeMarkMasqChain), MatchSet: kubeNodePortSetTCP,
|
||||||
}, {
|
}, {
|
||||||
JumpChain: "ACCEPT", MatchSet: kubeHealthCheckNodePortSet,
|
JumpChain: "ACCEPT", MatchSet: kubeHealthCheckNodePortSet,
|
||||||
}},
|
}},
|
||||||
@ -2126,10 +2126,10 @@ func TestHealthCheckNodePort(t *testing.T) {
|
|||||||
|
|
||||||
// Check iptables chain and rules
|
// Check iptables chain and rules
|
||||||
epIpt := netlinktest.ExpectedIptablesChain{
|
epIpt := netlinktest.ExpectedIptablesChain{
|
||||||
string(KubeNodePortChain): {{
|
string(kubeNodePortChain): {{
|
||||||
JumpChain: "RETURN", MatchSet: kubeNodePortLocalSetTCP,
|
JumpChain: "RETURN", MatchSet: kubeNodePortLocalSetTCP,
|
||||||
}, {
|
}, {
|
||||||
JumpChain: string(KubeMarkMasqChain), MatchSet: kubeNodePortSetTCP,
|
JumpChain: string(kubeMarkMasqChain), MatchSet: kubeNodePortSetTCP,
|
||||||
}, {
|
}, {
|
||||||
JumpChain: "ACCEPT", MatchSet: kubeHealthCheckNodePortSet,
|
JumpChain: "ACCEPT", MatchSet: kubeHealthCheckNodePortSet,
|
||||||
}},
|
}},
|
||||||
@ -2219,25 +2219,25 @@ func TestLoadBalanceSourceRanges(t *testing.T) {
|
|||||||
// Check iptables chain and rules
|
// Check iptables chain and rules
|
||||||
epIpt := netlinktest.ExpectedIptablesChain{
|
epIpt := netlinktest.ExpectedIptablesChain{
|
||||||
string(kubeServicesChain): {{
|
string(kubeServicesChain): {{
|
||||||
JumpChain: string(KubeLoadBalancerChain), MatchSet: kubeLoadBalancerSet,
|
JumpChain: string(kubeLoadBalancerChain), MatchSet: kubeLoadBalancerSet,
|
||||||
}, {
|
}, {
|
||||||
JumpChain: string(KubeMarkMasqChain), MatchSet: kubeClusterIPSet,
|
JumpChain: string(kubeMarkMasqChain), MatchSet: kubeClusterIPSet,
|
||||||
}, {
|
}, {
|
||||||
JumpChain: string(KubeNodePortChain), MatchSet: "",
|
JumpChain: string(kubeNodePortChain), MatchSet: "",
|
||||||
}, {
|
}, {
|
||||||
JumpChain: "ACCEPT", MatchSet: kubeClusterIPSet,
|
JumpChain: "ACCEPT", MatchSet: kubeClusterIPSet,
|
||||||
}, {
|
}, {
|
||||||
JumpChain: "ACCEPT", MatchSet: kubeLoadBalancerSet,
|
JumpChain: "ACCEPT", MatchSet: kubeLoadBalancerSet,
|
||||||
}},
|
}},
|
||||||
string(KubeLoadBalancerChain): {{
|
string(kubeLoadBalancerChain): {{
|
||||||
JumpChain: string(KubeFireWallChain), MatchSet: kubeLoadbalancerFWSet,
|
JumpChain: string(kubeFirewallChain), MatchSet: kubeLoadbalancerFWSet,
|
||||||
}, {
|
}, {
|
||||||
JumpChain: string(KubeMarkMasqChain), MatchSet: "",
|
JumpChain: string(kubeMarkMasqChain), MatchSet: "",
|
||||||
}},
|
}},
|
||||||
string(KubeFireWallChain): {{
|
string(kubeFirewallChain): {{
|
||||||
JumpChain: "RETURN", MatchSet: kubeLoadBalancerSourceCIDRSet,
|
JumpChain: "RETURN", MatchSet: kubeLoadBalancerSourceCIDRSet,
|
||||||
}, {
|
}, {
|
||||||
JumpChain: string(KubeMarkDropChain), MatchSet: "",
|
JumpChain: string(kubeMarkDropChain), MatchSet: "",
|
||||||
}},
|
}},
|
||||||
}
|
}
|
||||||
checkIptables(t, ipt, epIpt)
|
checkIptables(t, ipt, epIpt)
|
||||||
@ -2300,12 +2300,12 @@ func TestAcceptIPVSTraffic(t *testing.T) {
|
|||||||
// Check iptables chain and rules
|
// Check iptables chain and rules
|
||||||
epIpt := netlinktest.ExpectedIptablesChain{
|
epIpt := netlinktest.ExpectedIptablesChain{
|
||||||
string(kubeServicesChain): {
|
string(kubeServicesChain): {
|
||||||
{JumpChain: string(KubeLoadBalancerChain), MatchSet: kubeLoadBalancerSet},
|
{JumpChain: string(kubeLoadBalancerChain), MatchSet: kubeLoadBalancerSet},
|
||||||
{JumpChain: string(KubeMarkMasqChain), MatchSet: kubeClusterIPSet},
|
{JumpChain: string(kubeMarkMasqChain), MatchSet: kubeClusterIPSet},
|
||||||
{JumpChain: string(KubeMarkMasqChain), MatchSet: kubeExternalIPSet},
|
{JumpChain: string(kubeMarkMasqChain), MatchSet: kubeExternalIPSet},
|
||||||
{JumpChain: "ACCEPT", MatchSet: kubeExternalIPSet}, // With externalTrafficOnlyArgs
|
{JumpChain: "ACCEPT", MatchSet: kubeExternalIPSet}, // With externalTrafficOnlyArgs
|
||||||
{JumpChain: "ACCEPT", MatchSet: kubeExternalIPSet}, // With dstLocalOnlyArgs
|
{JumpChain: "ACCEPT", MatchSet: kubeExternalIPSet}, // With dstLocalOnlyArgs
|
||||||
{JumpChain: string(KubeNodePortChain), MatchSet: ""},
|
{JumpChain: string(kubeNodePortChain), MatchSet: ""},
|
||||||
{JumpChain: "ACCEPT", MatchSet: kubeClusterIPSet},
|
{JumpChain: "ACCEPT", MatchSet: kubeClusterIPSet},
|
||||||
{JumpChain: "ACCEPT", MatchSet: kubeLoadBalancerSet},
|
{JumpChain: "ACCEPT", MatchSet: kubeLoadBalancerSet},
|
||||||
},
|
},
|
||||||
@ -2398,20 +2398,20 @@ func TestOnlyLocalLoadBalancing(t *testing.T) {
|
|||||||
// Check iptables chain and rules
|
// Check iptables chain and rules
|
||||||
epIpt := netlinktest.ExpectedIptablesChain{
|
epIpt := netlinktest.ExpectedIptablesChain{
|
||||||
string(kubeServicesChain): {{
|
string(kubeServicesChain): {{
|
||||||
JumpChain: string(KubeLoadBalancerChain), MatchSet: kubeLoadBalancerSet,
|
JumpChain: string(kubeLoadBalancerChain), MatchSet: kubeLoadBalancerSet,
|
||||||
}, {
|
}, {
|
||||||
JumpChain: string(KubeMarkMasqChain), MatchSet: kubeClusterIPSet,
|
JumpChain: string(kubeMarkMasqChain), MatchSet: kubeClusterIPSet,
|
||||||
}, {
|
}, {
|
||||||
JumpChain: string(KubeNodePortChain), MatchSet: "",
|
JumpChain: string(kubeNodePortChain), MatchSet: "",
|
||||||
}, {
|
}, {
|
||||||
JumpChain: "ACCEPT", MatchSet: kubeClusterIPSet,
|
JumpChain: "ACCEPT", MatchSet: kubeClusterIPSet,
|
||||||
}, {
|
}, {
|
||||||
JumpChain: "ACCEPT", MatchSet: kubeLoadBalancerSet,
|
JumpChain: "ACCEPT", MatchSet: kubeLoadBalancerSet,
|
||||||
}},
|
}},
|
||||||
string(KubeLoadBalancerChain): {{
|
string(kubeLoadBalancerChain): {{
|
||||||
JumpChain: "RETURN", MatchSet: kubeLoadBalancerLocalSet,
|
JumpChain: "RETURN", MatchSet: kubeLoadBalancerLocalSet,
|
||||||
}, {
|
}, {
|
||||||
JumpChain: string(KubeMarkMasqChain), MatchSet: "",
|
JumpChain: string(kubeMarkMasqChain), MatchSet: "",
|
||||||
}},
|
}},
|
||||||
}
|
}
|
||||||
checkIptables(t, ipt, epIpt)
|
checkIptables(t, ipt, epIpt)
|
||||||
@ -3786,7 +3786,7 @@ func Test_syncService(t *testing.T) {
|
|||||||
ipset := ipsettest.NewFake(testIPSetVersion)
|
ipset := ipsettest.NewFake(testIPSetVersion)
|
||||||
proxier := NewFakeProxier(ipt, ipvs, ipset, nil, nil, v1.IPv4Protocol)
|
proxier := NewFakeProxier(ipt, ipvs, ipset, nil, nil, v1.IPv4Protocol)
|
||||||
|
|
||||||
proxier.netlinkHandle.EnsureDummyDevice(DefaultDummyDevice)
|
proxier.netlinkHandle.EnsureDummyDevice(defaultDummyDevice)
|
||||||
if testCases[i].oldVirtualServer != nil {
|
if testCases[i].oldVirtualServer != nil {
|
||||||
if err := proxier.ipvs.AddVirtualServer(testCases[i].oldVirtualServer); err != nil {
|
if err := proxier.ipvs.AddVirtualServer(testCases[i].oldVirtualServer); err != nil {
|
||||||
t.Errorf("Case [%d], unexpected add IPVS virtual server error: %v", i, err)
|
t.Errorf("Case [%d], unexpected add IPVS virtual server error: %v", i, err)
|
||||||
@ -3967,12 +3967,12 @@ func TestCleanLegacyService(t *testing.T) {
|
|||||||
fp.ipvs.AddVirtualServer(currentServices[v])
|
fp.ipvs.AddVirtualServer(currentServices[v])
|
||||||
}
|
}
|
||||||
|
|
||||||
fp.netlinkHandle.EnsureDummyDevice(DefaultDummyDevice)
|
fp.netlinkHandle.EnsureDummyDevice(defaultDummyDevice)
|
||||||
activeBindAddrs := map[string]bool{"1.1.1.1": true, "2.2.2.2": true, "3.3.3.3": true, "4.4.4.4": true}
|
activeBindAddrs := map[string]bool{"1.1.1.1": true, "2.2.2.2": true, "3.3.3.3": true, "4.4.4.4": true}
|
||||||
// This is ipv4-only so ipv6 addresses should be ignored
|
// This is ipv4-only so ipv6 addresses should be ignored
|
||||||
currentBindAddrs := []string{"1.1.1.1", "2.2.2.2", "3.3.3.3", "4.4.4.4", "5.5.5.5", "6.6.6.6", "fd80::1:2:3", "fd80::1:2:4"}
|
currentBindAddrs := []string{"1.1.1.1", "2.2.2.2", "3.3.3.3", "4.4.4.4", "5.5.5.5", "6.6.6.6", "fd80::1:2:3", "fd80::1:2:4"}
|
||||||
for i := range currentBindAddrs {
|
for i := range currentBindAddrs {
|
||||||
fp.netlinkHandle.EnsureAddressBind(currentBindAddrs[i], DefaultDummyDevice)
|
fp.netlinkHandle.EnsureAddressBind(currentBindAddrs[i], defaultDummyDevice)
|
||||||
}
|
}
|
||||||
|
|
||||||
fp.cleanLegacyService(activeServices, currentServices, map[string]bool{"5.5.5.5": true, "6.6.6.6": true})
|
fp.cleanLegacyService(activeServices, currentServices, map[string]bool{"5.5.5.5": true, "6.6.6.6": true})
|
||||||
@ -3992,7 +3992,7 @@ func TestCleanLegacyService(t *testing.T) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// Addresses 5.5.5.5 and 6.6.6.6 should not be bound any more, but the ipv6 addresses should remain
|
// Addresses 5.5.5.5 and 6.6.6.6 should not be bound any more, but the ipv6 addresses should remain
|
||||||
remainingAddrs, _ := fp.netlinkHandle.ListBindAddress(DefaultDummyDevice)
|
remainingAddrs, _ := fp.netlinkHandle.ListBindAddress(defaultDummyDevice)
|
||||||
if len(remainingAddrs) != 6 {
|
if len(remainingAddrs) != 6 {
|
||||||
t.Errorf("Expected number of remaining bound addrs after cleanup to be %v. Got %v", 6, len(remainingAddrs))
|
t.Errorf("Expected number of remaining bound addrs after cleanup to be %v. Got %v", 6, len(remainingAddrs))
|
||||||
}
|
}
|
||||||
@ -4066,11 +4066,11 @@ func TestCleanLegacyServiceWithRealServers(t *testing.T) {
|
|||||||
fp.ipvs.AddRealServer(v, r)
|
fp.ipvs.AddRealServer(v, r)
|
||||||
}
|
}
|
||||||
|
|
||||||
fp.netlinkHandle.EnsureDummyDevice(DefaultDummyDevice)
|
fp.netlinkHandle.EnsureDummyDevice(defaultDummyDevice)
|
||||||
activeBindAddrs := map[string]bool{"3.3.3.3": true}
|
activeBindAddrs := map[string]bool{"3.3.3.3": true}
|
||||||
currentBindAddrs := []string{"1.1.1.1", "2.2.2.2", "3.3.3.3"}
|
currentBindAddrs := []string{"1.1.1.1", "2.2.2.2", "3.3.3.3"}
|
||||||
for i := range currentBindAddrs {
|
for i := range currentBindAddrs {
|
||||||
fp.netlinkHandle.EnsureAddressBind(currentBindAddrs[i], DefaultDummyDevice)
|
fp.netlinkHandle.EnsureAddressBind(currentBindAddrs[i], defaultDummyDevice)
|
||||||
}
|
}
|
||||||
|
|
||||||
fp.cleanLegacyService(activeServices, currentServices, map[string]bool{"1.1.1.1": true, "2.2.2.2": true})
|
fp.cleanLegacyService(activeServices, currentServices, map[string]bool{"1.1.1.1": true, "2.2.2.2": true})
|
||||||
@ -4085,7 +4085,7 @@ func TestCleanLegacyServiceWithRealServers(t *testing.T) {
|
|||||||
t.Errorf("unexpected IPVS service")
|
t.Errorf("unexpected IPVS service")
|
||||||
}
|
}
|
||||||
|
|
||||||
remainingAddrs, _ := fp.netlinkHandle.ListBindAddress(DefaultDummyDevice)
|
remainingAddrs, _ := fp.netlinkHandle.ListBindAddress(defaultDummyDevice)
|
||||||
if len(remainingAddrs) != 1 {
|
if len(remainingAddrs) != 1 {
|
||||||
t.Errorf("Expected number of remaining bound addrs after cleanup to be %v. Got %v", 1, len(remainingAddrs))
|
t.Errorf("Expected number of remaining bound addrs after cleanup to be %v. Got %v", 1, len(remainingAddrs))
|
||||||
}
|
}
|
||||||
@ -4140,9 +4140,9 @@ func TestCleanLegacyRealServersExcludeCIDRs(t *testing.T) {
|
|||||||
fp.ipvs.AddRealServer(vs, rs)
|
fp.ipvs.AddRealServer(vs, rs)
|
||||||
}
|
}
|
||||||
|
|
||||||
fp.netlinkHandle.EnsureDummyDevice(DefaultDummyDevice)
|
fp.netlinkHandle.EnsureDummyDevice(defaultDummyDevice)
|
||||||
|
|
||||||
fp.netlinkHandle.EnsureAddressBind("4.4.4.4", DefaultDummyDevice)
|
fp.netlinkHandle.EnsureAddressBind("4.4.4.4", defaultDummyDevice)
|
||||||
|
|
||||||
fp.cleanLegacyService(
|
fp.cleanLegacyService(
|
||||||
map[string]bool{},
|
map[string]bool{},
|
||||||
@ -4224,12 +4224,12 @@ func TestCleanLegacyService6(t *testing.T) {
|
|||||||
fp.ipvs.AddVirtualServer(currentServices[v])
|
fp.ipvs.AddVirtualServer(currentServices[v])
|
||||||
}
|
}
|
||||||
|
|
||||||
fp.netlinkHandle.EnsureDummyDevice(DefaultDummyDevice)
|
fp.netlinkHandle.EnsureDummyDevice(defaultDummyDevice)
|
||||||
activeBindAddrs := map[string]bool{"1000::1": true, "1000::2": true, "3000::1": true, "4000::1": true}
|
activeBindAddrs := map[string]bool{"1000::1": true, "1000::2": true, "3000::1": true, "4000::1": true}
|
||||||
// This is ipv6-only so ipv4 addresses should be ignored
|
// This is ipv6-only so ipv4 addresses should be ignored
|
||||||
currentBindAddrs := []string{"1000::1", "1000::2", "3000::1", "4000::1", "5000::1", "1000::6", "1.1.1.1", "2.2.2.2"}
|
currentBindAddrs := []string{"1000::1", "1000::2", "3000::1", "4000::1", "5000::1", "1000::6", "1.1.1.1", "2.2.2.2"}
|
||||||
for i := range currentBindAddrs {
|
for i := range currentBindAddrs {
|
||||||
fp.netlinkHandle.EnsureAddressBind(currentBindAddrs[i], DefaultDummyDevice)
|
fp.netlinkHandle.EnsureAddressBind(currentBindAddrs[i], defaultDummyDevice)
|
||||||
}
|
}
|
||||||
|
|
||||||
fp.cleanLegacyService(activeServices, currentServices, map[string]bool{"5000::1": true, "1000::6": true})
|
fp.cleanLegacyService(activeServices, currentServices, map[string]bool{"5000::1": true, "1000::6": true})
|
||||||
@ -4249,7 +4249,7 @@ func TestCleanLegacyService6(t *testing.T) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// Addresses 5000::1 and 1000::6 should not be bound any more, but the ipv4 addresses should remain
|
// Addresses 5000::1 and 1000::6 should not be bound any more, but the ipv4 addresses should remain
|
||||||
remainingAddrs, _ := fp.netlinkHandle.ListBindAddress(DefaultDummyDevice)
|
remainingAddrs, _ := fp.netlinkHandle.ListBindAddress(defaultDummyDevice)
|
||||||
if len(remainingAddrs) != 6 {
|
if len(remainingAddrs) != 6 {
|
||||||
t.Errorf("Expected number of remaining bound addrs after cleanup to be %v. Got %v", 6, len(remainingAddrs))
|
t.Errorf("Expected number of remaining bound addrs after cleanup to be %v. Got %v", 6, len(remainingAddrs))
|
||||||
}
|
}
|
||||||
@ -4297,7 +4297,7 @@ func TestMultiPortServiceBindAddr(t *testing.T) {
|
|||||||
// first, add multi-port service1
|
// first, add multi-port service1
|
||||||
fp.OnServiceAdd(service1)
|
fp.OnServiceAdd(service1)
|
||||||
fp.syncProxyRules()
|
fp.syncProxyRules()
|
||||||
remainingAddrs, _ := fp.netlinkHandle.ListBindAddress(DefaultDummyDevice)
|
remainingAddrs, _ := fp.netlinkHandle.ListBindAddress(defaultDummyDevice)
|
||||||
// should only remain address "172.16.55.4"
|
// should only remain address "172.16.55.4"
|
||||||
if len(remainingAddrs) != 1 {
|
if len(remainingAddrs) != 1 {
|
||||||
t.Errorf("Expected number of remaining bound addrs after cleanup to be %v. Got %v", 1, len(remainingAddrs))
|
t.Errorf("Expected number of remaining bound addrs after cleanup to be %v. Got %v", 1, len(remainingAddrs))
|
||||||
@ -4309,7 +4309,7 @@ func TestMultiPortServiceBindAddr(t *testing.T) {
|
|||||||
// update multi-port service1 to single-port service2
|
// update multi-port service1 to single-port service2
|
||||||
fp.OnServiceUpdate(service1, service2)
|
fp.OnServiceUpdate(service1, service2)
|
||||||
fp.syncProxyRules()
|
fp.syncProxyRules()
|
||||||
remainingAddrs, _ = fp.netlinkHandle.ListBindAddress(DefaultDummyDevice)
|
remainingAddrs, _ = fp.netlinkHandle.ListBindAddress(defaultDummyDevice)
|
||||||
// should still only remain address "172.16.55.4"
|
// should still only remain address "172.16.55.4"
|
||||||
if len(remainingAddrs) != 1 {
|
if len(remainingAddrs) != 1 {
|
||||||
t.Errorf("Expected number of remaining bound addrs after cleanup to be %v. Got %v", 1, len(remainingAddrs))
|
t.Errorf("Expected number of remaining bound addrs after cleanup to be %v. Got %v", 1, len(remainingAddrs))
|
||||||
@ -4320,7 +4320,7 @@ func TestMultiPortServiceBindAddr(t *testing.T) {
|
|||||||
// update single-port service2 to multi-port service3
|
// update single-port service2 to multi-port service3
|
||||||
fp.OnServiceUpdate(service2, service3)
|
fp.OnServiceUpdate(service2, service3)
|
||||||
fp.syncProxyRules()
|
fp.syncProxyRules()
|
||||||
remainingAddrs, _ = fp.netlinkHandle.ListBindAddress(DefaultDummyDevice)
|
remainingAddrs, _ = fp.netlinkHandle.ListBindAddress(defaultDummyDevice)
|
||||||
// should still only remain address "172.16.55.4"
|
// should still only remain address "172.16.55.4"
|
||||||
if len(remainingAddrs) != 1 {
|
if len(remainingAddrs) != 1 {
|
||||||
t.Errorf("Expected number of remaining bound addrs after cleanup to be %v. Got %v", 1, len(remainingAddrs))
|
t.Errorf("Expected number of remaining bound addrs after cleanup to be %v. Got %v", 1, len(remainingAddrs))
|
||||||
@ -4331,7 +4331,7 @@ func TestMultiPortServiceBindAddr(t *testing.T) {
|
|||||||
// delete multi-port service3
|
// delete multi-port service3
|
||||||
fp.OnServiceDelete(service3)
|
fp.OnServiceDelete(service3)
|
||||||
fp.syncProxyRules()
|
fp.syncProxyRules()
|
||||||
remainingAddrs, _ = fp.netlinkHandle.ListBindAddress(DefaultDummyDevice)
|
remainingAddrs, _ = fp.netlinkHandle.ListBindAddress(defaultDummyDevice)
|
||||||
// all addresses should be unbound
|
// all addresses should be unbound
|
||||||
if len(remainingAddrs) != 0 {
|
if len(remainingAddrs) != 0 {
|
||||||
t.Errorf("Expected number of remaining bound addrs after cleanup to be %v. Got %v", 0, len(remainingAddrs))
|
t.Errorf("Expected number of remaining bound addrs after cleanup to be %v. Got %v", 0, len(remainingAddrs))
|
||||||
|
Loading…
Reference in New Issue
Block a user