proxy/iptables, proxy/ipvs: Remove an unnecessary check

The iptables and ipvs proxiers both had a check that none of the
elements of svcInfo.LoadBalancerIPStrings() were "", but that was
already guaranteed by the svcInfo code. Drop the unnecessary checks
and remove a level of indentation.
This commit is contained in:
Dan Winship 2021-10-29 08:14:02 -04:00
parent a9ad15c421
commit ab67a942ca
2 changed files with 154 additions and 158 deletions

View File

@ -1224,80 +1224,78 @@ func (proxier *Proxier) syncProxyRules() {
// Capture load-balancer ingress. // Capture load-balancer ingress.
fwChain := svcInfo.serviceFirewallChainName fwChain := svcInfo.serviceFirewallChainName
for _, ingress := range svcInfo.LoadBalancerIPStrings() { for _, ingress := range svcInfo.LoadBalancerIPStrings() {
if ingress != "" { if hasEndpoints {
if hasEndpoints { // create service firewall chain
// create service firewall chain if chain, ok := existingNATChains[fwChain]; ok {
if chain, ok := existingNATChains[fwChain]; ok { proxier.natChains.WriteBytes(chain)
proxier.natChains.WriteBytes(chain)
} else {
proxier.natChains.Write(utiliptables.MakeChainLine(fwChain))
}
activeNATChains[fwChain] = true
// The service firewall rules are created based on ServiceSpec.loadBalancerSourceRanges field.
// This currently works for loadbalancers that preserves source ips.
// For loadbalancers which direct traffic to service NodePort, the firewall rules will not apply.
args = append(args[:0],
"-A", string(kubeServicesChain),
"-m", "comment", "--comment", fmt.Sprintf(`"%s loadbalancer IP"`, svcNameString),
"-m", protocol, "-p", protocol,
"-d", utilproxy.ToCIDR(netutils.ParseIPSloppy(ingress)),
"--dport", strconv.Itoa(svcInfo.Port()),
)
// jump to service firewall chain
proxier.natRules.Write(args, "-j", string(fwChain))
args = append(args[:0],
"-A", string(fwChain),
"-m", "comment", "--comment", fmt.Sprintf(`"%s loadbalancer IP"`, svcNameString),
)
// Each source match rule in the FW chain may jump to either the SVC or the XLB chain
chosenChain := svcXlbChain
// If we are proxying globally, we need to masquerade in case we cross nodes.
// If we are proxying only locally, we can retain the source IP.
if !svcInfo.NodeLocalExternal() {
proxier.natRules.Write(args, "-j", string(KubeMarkMasqChain))
chosenChain = svcChain
}
if len(svcInfo.LoadBalancerSourceRanges()) == 0 {
// allow all sources, so jump directly to the KUBE-SVC or KUBE-XLB chain
proxier.natRules.Write(args, "-j", string(chosenChain))
} else {
// firewall filter based on each source range
allowFromNode := false
for _, src := range svcInfo.LoadBalancerSourceRanges() {
proxier.natRules.Write(args, "-s", src, "-j", string(chosenChain))
_, cidr, err := netutils.ParseCIDRSloppy(src)
if err != nil {
klog.ErrorS(err, "Error parsing CIDR in LoadBalancerSourceRanges, dropping it", "cidr", cidr)
} else if cidr.Contains(proxier.nodeIP) {
allowFromNode = true
}
}
// generally, ip route rule was added to intercept request to loadbalancer vip from the
// loadbalancer's backend hosts. In this case, request will not hit the loadbalancer but loop back directly.
// Need to add the following rule to allow request on host.
if allowFromNode {
proxier.natRules.Write(args, "-s", utilproxy.ToCIDR(netutils.ParseIPSloppy(ingress)), "-j", string(chosenChain))
}
}
// If the packet was able to reach the end of firewall chain, then it did not get DNATed.
// It means the packet cannot go thru the firewall, then mark it for DROP
proxier.natRules.Write(args, "-j", string(KubeMarkDropChain))
} else { } else {
// No endpoints. proxier.natChains.Write(utiliptables.MakeChainLine(fwChain))
proxier.filterRules.Write(
"-A", string(kubeExternalServicesChain),
"-m", "comment", "--comment", fmt.Sprintf(`"%s has no endpoints"`, svcNameString),
"-m", protocol, "-p", protocol,
"-d", utilproxy.ToCIDR(netutils.ParseIPSloppy(ingress)),
"--dport", strconv.Itoa(svcInfo.Port()),
"-j", "REJECT",
)
} }
activeNATChains[fwChain] = true
// The service firewall rules are created based on ServiceSpec.loadBalancerSourceRanges field.
// This currently works for loadbalancers that preserves source ips.
// For loadbalancers which direct traffic to service NodePort, the firewall rules will not apply.
args = append(args[:0],
"-A", string(kubeServicesChain),
"-m", "comment", "--comment", fmt.Sprintf(`"%s loadbalancer IP"`, svcNameString),
"-m", protocol, "-p", protocol,
"-d", utilproxy.ToCIDR(netutils.ParseIPSloppy(ingress)),
"--dport", strconv.Itoa(svcInfo.Port()),
)
// jump to service firewall chain
proxier.natRules.Write(args, "-j", string(fwChain))
args = append(args[:0],
"-A", string(fwChain),
"-m", "comment", "--comment", fmt.Sprintf(`"%s loadbalancer IP"`, svcNameString),
)
// Each source match rule in the FW chain may jump to either the SVC or the XLB chain
chosenChain := svcXlbChain
// If we are proxying globally, we need to masquerade in case we cross nodes.
// If we are proxying only locally, we can retain the source IP.
if !svcInfo.NodeLocalExternal() {
proxier.natRules.Write(args, "-j", string(KubeMarkMasqChain))
chosenChain = svcChain
}
if len(svcInfo.LoadBalancerSourceRanges()) == 0 {
// allow all sources, so jump directly to the KUBE-SVC or KUBE-XLB chain
proxier.natRules.Write(args, "-j", string(chosenChain))
} else {
// firewall filter based on each source range
allowFromNode := false
for _, src := range svcInfo.LoadBalancerSourceRanges() {
proxier.natRules.Write(args, "-s", src, "-j", string(chosenChain))
_, cidr, err := netutils.ParseCIDRSloppy(src)
if err != nil {
klog.ErrorS(err, "Error parsing CIDR in LoadBalancerSourceRanges, dropping it", "cidr", cidr)
} else if cidr.Contains(proxier.nodeIP) {
allowFromNode = true
}
}
// generally, ip route rule was added to intercept request to loadbalancer vip from the
// loadbalancer's backend hosts. In this case, request will not hit the loadbalancer but loop back directly.
// Need to add the following rule to allow request on host.
if allowFromNode {
proxier.natRules.Write(args, "-s", utilproxy.ToCIDR(netutils.ParseIPSloppy(ingress)), "-j", string(chosenChain))
}
}
// If the packet was able to reach the end of firewall chain, then it did not get DNATed.
// It means the packet cannot go thru the firewall, then mark it for DROP
proxier.natRules.Write(args, "-j", string(KubeMarkDropChain))
} else {
// No endpoints.
proxier.filterRules.Write(
"-A", string(kubeExternalServicesChain),
"-m", "comment", "--comment", fmt.Sprintf(`"%s has no endpoints"`, svcNameString),
"-m", protocol, "-p", protocol,
"-d", utilproxy.ToCIDR(netutils.ParseIPSloppy(ingress)),
"--dport", strconv.Itoa(svcInfo.Port()),
"-j", "REJECT",
)
} }
} }

View File

@ -1307,103 +1307,101 @@ func (proxier *Proxier) syncProxyRules() {
// Capture load-balancer ingress. // Capture load-balancer ingress.
for _, ingress := range svcInfo.LoadBalancerIPStrings() { for _, ingress := range svcInfo.LoadBalancerIPStrings() {
if ingress != "" { // ipset call
// ipset call entry = &utilipset.Entry{
entry = &utilipset.Entry{ IP: ingress,
IP: ingress, Port: svcInfo.Port(),
Port: svcInfo.Port(), Protocol: protocol,
Protocol: protocol, SetType: utilipset.HashIPPort,
SetType: utilipset.HashIPPort, }
} // add service load balancer ingressIP:Port to kubeServiceAccess ip set for the purpose of solving hairpin.
// add service load balancer ingressIP:Port to kubeServiceAccess ip set for the purpose of solving hairpin. // proxier.kubeServiceAccessSet.activeEntries.Insert(entry.String())
// proxier.kubeServiceAccessSet.activeEntries.Insert(entry.String()) // If we are proxying globally, we need to masquerade in case we cross nodes.
// If we are proxying globally, we need to masquerade in case we cross nodes. // If we are proxying only locally, we can retain the source IP.
// If we are proxying only locally, we can retain the source IP. if valid := proxier.ipsetList[kubeLoadBalancerSet].validateEntry(entry); !valid {
if valid := proxier.ipsetList[kubeLoadBalancerSet].validateEntry(entry); !valid { klog.ErrorS(nil, "Error adding entry to ipset", "entry", entry, "ipset", proxier.ipsetList[kubeLoadBalancerSet].Name)
klog.ErrorS(nil, "Error adding entry to ipset", "entry", entry, "ipset", proxier.ipsetList[kubeLoadBalancerSet].Name) continue
}
proxier.ipsetList[kubeLoadBalancerSet].activeEntries.Insert(entry.String())
// insert loadbalancer entry to lbIngressLocalSet if service externaltrafficpolicy=local
if svcInfo.NodeLocalExternal() {
if valid := proxier.ipsetList[kubeLoadBalancerLocalSet].validateEntry(entry); !valid {
klog.ErrorS(nil, "Error adding entry to ipset", "entry", entry, "ipset", proxier.ipsetList[kubeLoadBalancerLocalSet].Name)
continue continue
} }
proxier.ipsetList[kubeLoadBalancerSet].activeEntries.Insert(entry.String()) proxier.ipsetList[kubeLoadBalancerLocalSet].activeEntries.Insert(entry.String())
// insert loadbalancer entry to lbIngressLocalSet if service externaltrafficpolicy=local }
if svcInfo.NodeLocalExternal() { if len(svcInfo.LoadBalancerSourceRanges()) != 0 {
if valid := proxier.ipsetList[kubeLoadBalancerLocalSet].validateEntry(entry); !valid { // The service firewall rules are created based on ServiceSpec.loadBalancerSourceRanges field.
klog.ErrorS(nil, "Error adding entry to ipset", "entry", entry, "ipset", proxier.ipsetList[kubeLoadBalancerLocalSet].Name) // This currently works for loadbalancers that preserves source ips.
continue // For loadbalancers which direct traffic to service NodePort, the firewall rules will not apply.
} if valid := proxier.ipsetList[kubeLoadbalancerFWSet].validateEntry(entry); !valid {
proxier.ipsetList[kubeLoadBalancerLocalSet].activeEntries.Insert(entry.String()) klog.ErrorS(nil, "Error adding entry to ipset", "entry", entry, "ipset", proxier.ipsetList[kubeLoadbalancerFWSet].Name)
continue
} }
if len(svcInfo.LoadBalancerSourceRanges()) != 0 { proxier.ipsetList[kubeLoadbalancerFWSet].activeEntries.Insert(entry.String())
// The service firewall rules are created based on ServiceSpec.loadBalancerSourceRanges field. allowFromNode := false
// This currently works for loadbalancers that preserves source ips. for _, src := range svcInfo.LoadBalancerSourceRanges() {
// For loadbalancers which direct traffic to service NodePort, the firewall rules will not apply. // ipset call
if valid := proxier.ipsetList[kubeLoadbalancerFWSet].validateEntry(entry); !valid { entry = &utilipset.Entry{
klog.ErrorS(nil, "Error adding entry to ipset", "entry", entry, "ipset", proxier.ipsetList[kubeLoadbalancerFWSet].Name) IP: ingress,
Port: svcInfo.Port(),
Protocol: protocol,
Net: src,
SetType: utilipset.HashIPPortNet,
}
// enumerate all white list source cidr
if valid := proxier.ipsetList[kubeLoadBalancerSourceCIDRSet].validateEntry(entry); !valid {
klog.ErrorS(nil, "Error adding entry to ipset", "entry", entry, "ipset", proxier.ipsetList[kubeLoadBalancerSourceCIDRSet].Name)
continue continue
} }
proxier.ipsetList[kubeLoadbalancerFWSet].activeEntries.Insert(entry.String()) proxier.ipsetList[kubeLoadBalancerSourceCIDRSet].activeEntries.Insert(entry.String())
allowFromNode := false
for _, src := range svcInfo.LoadBalancerSourceRanges() {
// ipset call
entry = &utilipset.Entry{
IP: ingress,
Port: svcInfo.Port(),
Protocol: protocol,
Net: src,
SetType: utilipset.HashIPPortNet,
}
// enumerate all white list source cidr
if valid := proxier.ipsetList[kubeLoadBalancerSourceCIDRSet].validateEntry(entry); !valid {
klog.ErrorS(nil, "Error adding entry to ipset", "entry", entry, "ipset", proxier.ipsetList[kubeLoadBalancerSourceCIDRSet].Name)
continue
}
proxier.ipsetList[kubeLoadBalancerSourceCIDRSet].activeEntries.Insert(entry.String())
// ignore error because it has been validated // ignore error because it has been validated
_, cidr, _ := netutils.ParseCIDRSloppy(src) _, cidr, _ := netutils.ParseCIDRSloppy(src)
if cidr.Contains(proxier.nodeIP) { if cidr.Contains(proxier.nodeIP) {
allowFromNode = true allowFromNode = true
}
}
// generally, ip route rule was added to intercept request to loadbalancer vip from the
// loadbalancer's backend hosts. In this case, request will not hit the loadbalancer but loop back directly.
// Need to add the following rule to allow request on host.
if allowFromNode {
entry = &utilipset.Entry{
IP: ingress,
Port: svcInfo.Port(),
Protocol: protocol,
IP2: ingress,
SetType: utilipset.HashIPPortIP,
}
// enumerate all white list source ip
if valid := proxier.ipsetList[kubeLoadBalancerSourceIPSet].validateEntry(entry); !valid {
klog.ErrorS(nil, "Error adding entry to ipset", "entry", entry, "ipset", proxier.ipsetList[kubeLoadBalancerSourceIPSet].Name)
continue
}
proxier.ipsetList[kubeLoadBalancerSourceIPSet].activeEntries.Insert(entry.String())
} }
} }
// ipvs call // generally, ip route rule was added to intercept request to loadbalancer vip from the
serv := &utilipvs.VirtualServer{ // loadbalancer's backend hosts. In this case, request will not hit the loadbalancer but loop back directly.
Address: netutils.ParseIPSloppy(ingress), // Need to add the following rule to allow request on host.
Port: uint16(svcInfo.Port()), if allowFromNode {
Protocol: string(svcInfo.Protocol()), entry = &utilipset.Entry{
Scheduler: proxier.ipvsScheduler, IP: ingress,
} Port: svcInfo.Port(),
if svcInfo.SessionAffinityType() == v1.ServiceAffinityClientIP { Protocol: protocol,
serv.Flags |= utilipvs.FlagPersistent IP2: ingress,
serv.Timeout = uint32(svcInfo.StickyMaxAgeSeconds()) SetType: utilipset.HashIPPortIP,
}
if err := proxier.syncService(svcNameString, serv, true, bindedAddresses); err == nil {
activeIPVSServices[serv.String()] = true
activeBindAddrs[serv.Address.String()] = true
if err := proxier.syncEndpoint(svcName, svcInfo.NodeLocalExternal(), serv); err != nil {
klog.ErrorS(err, "Failed to sync endpoint for service", "serviceName", svcName, "virtualServer", serv)
} }
} else { // enumerate all white list source ip
klog.ErrorS(err, "Failed to sync service", "serviceName", svcName, "virtualServer", serv) if valid := proxier.ipsetList[kubeLoadBalancerSourceIPSet].validateEntry(entry); !valid {
klog.ErrorS(nil, "Error adding entry to ipset", "entry", entry, "ipset", proxier.ipsetList[kubeLoadBalancerSourceIPSet].Name)
continue
}
proxier.ipsetList[kubeLoadBalancerSourceIPSet].activeEntries.Insert(entry.String())
} }
} }
// ipvs call
serv := &utilipvs.VirtualServer{
Address: netutils.ParseIPSloppy(ingress),
Port: uint16(svcInfo.Port()),
Protocol: string(svcInfo.Protocol()),
Scheduler: proxier.ipvsScheduler,
}
if svcInfo.SessionAffinityType() == v1.ServiceAffinityClientIP {
serv.Flags |= utilipvs.FlagPersistent
serv.Timeout = uint32(svcInfo.StickyMaxAgeSeconds())
}
if err := proxier.syncService(svcNameString, serv, true, bindedAddresses); err == nil {
activeIPVSServices[serv.String()] = true
activeBindAddrs[serv.Address.String()] = true
if err := proxier.syncEndpoint(svcName, svcInfo.NodeLocalExternal(), serv); err != nil {
klog.ErrorS(err, "Failed to sync endpoint for service", "serviceName", svcName, "virtualServer", serv)
}
} else {
klog.ErrorS(err, "Failed to sync service", "serviceName", svcName, "virtualServer", serv)
}
} }
if svcInfo.NodePort() != 0 { if svcInfo.NodePort() != 0 {