proxy/iptables: fix firewall rules with multiple LB IPs

The various loops in the LoadBalancer rule section were mis-nested
such that if a service had multiple LoadBalancer IPs, we would write
out the firewall rules multiple times (and the allowFromNode rule for
the second and later IPs would end up being written after the "else
DROP" rule from the first IP).
This commit is contained in:
Dan Winship 2022-04-06 11:08:28 -04:00
parent df589b46a1
commit 813aca47af
2 changed files with 48 additions and 50 deletions

View File

@ -1243,6 +1243,46 @@ func (proxier *Proxier) syncProxyRules() {
// The firewall chain will jump to the "external destination"
// chain.
nextChain = svcInfo.firewallChainName
// The service firewall rules are created based on the
// loadBalancerSourceRanges field. This only works for
// VIP-like loadbalancers that preserve source IPs. For
// loadbalancers which direct traffic to service NodePort, the
// firewall rules will not apply.
args = append(args[:0],
"-A", string(nextChain),
"-m", "comment", "--comment", fmt.Sprintf(`"%s loadbalancer IP"`, svcNameString),
)
// firewall filter based on each source range
allowFromNode := false
for _, src := range svcInfo.LoadBalancerSourceRanges() {
proxier.natRules.Write(args, "-s", src, "-j", string(externalTrafficChain))
_, cidr, err := netutils.ParseCIDRSloppy(src)
if err != nil {
klog.ErrorS(err, "Error parsing CIDR in LoadBalancerSourceRanges, dropping it", "cidr", cidr)
} else if cidr.Contains(proxier.nodeIP) {
allowFromNode = true
}
}
// For VIP-like LBs, the VIP is often added as a local
// address (via an IP route rule). In that case, a request
// from a node to the VIP will not hit the loadbalancer but
// will loop back with the source IP set to the VIP. We
// need the following rules to allow requests from this node.
if allowFromNode {
for _, lbip := range svcInfo.LoadBalancerIPStrings() {
proxier.natRules.Write(
args,
"-s", lbip,
"-j", string(externalTrafficChain))
}
}
// If the packet was able to reach the end of firewall chain,
// then it did not get DNATed. It means the packet cannot go
// thru the firewall, then mark it for DROP.
proxier.natRules.Write(args, "-j", string(KubeMarkDropChain))
}
for _, lbip := range svcInfo.LoadBalancerIPStrings() {
@ -1254,44 +1294,6 @@ func (proxier *Proxier) syncProxyRules() {
"--dport", strconv.Itoa(svcInfo.Port()),
"-j", string(nextChain))
// The service firewall rules are created based on the
// loadBalancerSourceRanges field. This only works for
// VIP-like loadbalancers that preserve source IPs. For
// loadbalancers which direct traffic to service NodePort, the
// firewall rules will not apply.
if len(svcInfo.LoadBalancerSourceRanges()) > 0 {
args = append(args[:0],
"-A", string(nextChain),
"-m", "comment", "--comment", fmt.Sprintf(`"%s loadbalancer IP"`, svcNameString),
)
// firewall filter based on each source range
allowFromNode := false
for _, src := range svcInfo.LoadBalancerSourceRanges() {
proxier.natRules.Write(args, "-s", src, "-j", string(externalTrafficChain))
_, cidr, err := netutils.ParseCIDRSloppy(src)
if err != nil {
klog.ErrorS(err, "Error parsing CIDR in LoadBalancerSourceRanges, dropping it", "cidr", cidr)
} else if cidr.Contains(proxier.nodeIP) {
allowFromNode = true
}
}
// For VIP-like LBs, the VIP is often added as a local
// address (via an IP route rule). In that case, a request
// from a node to the VIP will not hit the loadbalancer but
// will loop back with the source IP set to the VIP. We
// need the following rule to allow requests from this node.
if allowFromNode {
proxier.natRules.Write(
args,
"-s", lbip,
"-j", string(externalTrafficChain))
}
// If the packet was able to reach the end of firewall chain,
// then it did not get DNATed. It means the packet cannot go
// thru the firewall, then mark it for DROP.
proxier.natRules.Write(args, "-j", string(KubeMarkDropChain))
}
}
} else {
// No endpoints.

View File

@ -2448,9 +2448,6 @@ func TestLoadBalancer(t *testing.T) {
-A KUBE-FW-XPGD46QRK7WJZT7O -m comment --comment "ns1/svc1:p80 loadbalancer IP" -s 192.168.0.0/24 -j KUBE-EXT-XPGD46QRK7WJZT7O
-A KUBE-FW-XPGD46QRK7WJZT7O -m comment --comment "ns1/svc1:p80 loadbalancer IP" -s 203.0.113.0/25 -j KUBE-EXT-XPGD46QRK7WJZT7O
-A KUBE-FW-XPGD46QRK7WJZT7O -m comment --comment "ns1/svc1:p80 loadbalancer IP" -s 1.2.3.4 -j KUBE-EXT-XPGD46QRK7WJZT7O
-A KUBE-FW-XPGD46QRK7WJZT7O -m comment --comment "ns1/svc1:p80 loadbalancer IP" -j KUBE-MARK-DROP
-A KUBE-FW-XPGD46QRK7WJZT7O -m comment --comment "ns1/svc1:p80 loadbalancer IP" -s 192.168.0.0/24 -j KUBE-EXT-XPGD46QRK7WJZT7O
-A KUBE-FW-XPGD46QRK7WJZT7O -m comment --comment "ns1/svc1:p80 loadbalancer IP" -s 203.0.113.0/25 -j KUBE-EXT-XPGD46QRK7WJZT7O
-A KUBE-FW-XPGD46QRK7WJZT7O -m comment --comment "ns1/svc1:p80 loadbalancer IP" -s 5.6.7.8 -j KUBE-EXT-XPGD46QRK7WJZT7O
-A KUBE-FW-XPGD46QRK7WJZT7O -m comment --comment "ns1/svc1:p80 loadbalancer IP" -j KUBE-MARK-DROP
-A KUBE-MARK-MASQ -j MARK --or-mark 0x4000
@ -2564,15 +2561,14 @@ func TestLoadBalancer(t *testing.T) {
output: fmt.Sprintf("%s:%d", epIP, svcPort),
masq: true,
},
// FIXME: this fails
// {
// name: "same node to LB2, SNATted to LB2 (implicitly allowed)",
// sourceIP: svcLBIP2,
// destIP: svcLBIP2,
// destPort: svcPort,
// output: fmt.Sprintf("%s:%d", epIP, svcPort),
// masq: true,
// },
{
name: "same node to LB2, SNATted to LB2 (implicitly allowed)",
sourceIP: svcLBIP2,
destIP: svcLBIP2,
destPort: svcPort,
output: fmt.Sprintf("%s:%d", epIP, svcPort),
masq: true,
},
})
}