mirror of
https://github.com/k3s-io/kubernetes.git
synced 2025-07-23 19:56:01 +00:00
proxy/iptables, proxy/ipvs: Remove an unnecessary check
The iptables and ipvs proxiers both had a check that none of the elements of svcInfo.LoadBalancerIPStrings() were "", but that was already guaranteed by the svcInfo code. Drop the unnecessary checks and remove a level of indentation.
This commit is contained in:
parent
a9ad15c421
commit
ab67a942ca
@ -1224,80 +1224,78 @@ func (proxier *Proxier) syncProxyRules() {
|
||||
// Capture load-balancer ingress.
|
||||
fwChain := svcInfo.serviceFirewallChainName
|
||||
for _, ingress := range svcInfo.LoadBalancerIPStrings() {
|
||||
if ingress != "" {
|
||||
if hasEndpoints {
|
||||
// create service firewall chain
|
||||
if chain, ok := existingNATChains[fwChain]; ok {
|
||||
proxier.natChains.WriteBytes(chain)
|
||||
} else {
|
||||
proxier.natChains.Write(utiliptables.MakeChainLine(fwChain))
|
||||
}
|
||||
activeNATChains[fwChain] = true
|
||||
// The service firewall rules are created based on ServiceSpec.loadBalancerSourceRanges field.
|
||||
// This currently works for loadbalancers that preserves source ips.
|
||||
// For loadbalancers which direct traffic to service NodePort, the firewall rules will not apply.
|
||||
|
||||
args = append(args[:0],
|
||||
"-A", string(kubeServicesChain),
|
||||
"-m", "comment", "--comment", fmt.Sprintf(`"%s loadbalancer IP"`, svcNameString),
|
||||
"-m", protocol, "-p", protocol,
|
||||
"-d", utilproxy.ToCIDR(netutils.ParseIPSloppy(ingress)),
|
||||
"--dport", strconv.Itoa(svcInfo.Port()),
|
||||
)
|
||||
// jump to service firewall chain
|
||||
proxier.natRules.Write(args, "-j", string(fwChain))
|
||||
|
||||
args = append(args[:0],
|
||||
"-A", string(fwChain),
|
||||
"-m", "comment", "--comment", fmt.Sprintf(`"%s loadbalancer IP"`, svcNameString),
|
||||
)
|
||||
|
||||
// Each source match rule in the FW chain may jump to either the SVC or the XLB chain
|
||||
chosenChain := svcXlbChain
|
||||
// If we are proxying globally, we need to masquerade in case we cross nodes.
|
||||
// If we are proxying only locally, we can retain the source IP.
|
||||
if !svcInfo.NodeLocalExternal() {
|
||||
proxier.natRules.Write(args, "-j", string(KubeMarkMasqChain))
|
||||
chosenChain = svcChain
|
||||
}
|
||||
|
||||
if len(svcInfo.LoadBalancerSourceRanges()) == 0 {
|
||||
// allow all sources, so jump directly to the KUBE-SVC or KUBE-XLB chain
|
||||
proxier.natRules.Write(args, "-j", string(chosenChain))
|
||||
} else {
|
||||
// firewall filter based on each source range
|
||||
allowFromNode := false
|
||||
for _, src := range svcInfo.LoadBalancerSourceRanges() {
|
||||
proxier.natRules.Write(args, "-s", src, "-j", string(chosenChain))
|
||||
_, cidr, err := netutils.ParseCIDRSloppy(src)
|
||||
if err != nil {
|
||||
klog.ErrorS(err, "Error parsing CIDR in LoadBalancerSourceRanges, dropping it", "cidr", cidr)
|
||||
} else if cidr.Contains(proxier.nodeIP) {
|
||||
allowFromNode = true
|
||||
}
|
||||
}
|
||||
// generally, ip route rule was added to intercept request to loadbalancer vip from the
|
||||
// loadbalancer's backend hosts. In this case, request will not hit the loadbalancer but loop back directly.
|
||||
// Need to add the following rule to allow request on host.
|
||||
if allowFromNode {
|
||||
proxier.natRules.Write(args, "-s", utilproxy.ToCIDR(netutils.ParseIPSloppy(ingress)), "-j", string(chosenChain))
|
||||
}
|
||||
}
|
||||
|
||||
// If the packet was able to reach the end of firewall chain, then it did not get DNATed.
|
||||
// It means the packet cannot go thru the firewall, then mark it for DROP
|
||||
proxier.natRules.Write(args, "-j", string(KubeMarkDropChain))
|
||||
if hasEndpoints {
|
||||
// create service firewall chain
|
||||
if chain, ok := existingNATChains[fwChain]; ok {
|
||||
proxier.natChains.WriteBytes(chain)
|
||||
} else {
|
||||
// No endpoints.
|
||||
proxier.filterRules.Write(
|
||||
"-A", string(kubeExternalServicesChain),
|
||||
"-m", "comment", "--comment", fmt.Sprintf(`"%s has no endpoints"`, svcNameString),
|
||||
"-m", protocol, "-p", protocol,
|
||||
"-d", utilproxy.ToCIDR(netutils.ParseIPSloppy(ingress)),
|
||||
"--dport", strconv.Itoa(svcInfo.Port()),
|
||||
"-j", "REJECT",
|
||||
)
|
||||
proxier.natChains.Write(utiliptables.MakeChainLine(fwChain))
|
||||
}
|
||||
activeNATChains[fwChain] = true
|
||||
// The service firewall rules are created based on ServiceSpec.loadBalancerSourceRanges field.
|
||||
// This currently works for loadbalancers that preserves source ips.
|
||||
// For loadbalancers which direct traffic to service NodePort, the firewall rules will not apply.
|
||||
|
||||
args = append(args[:0],
|
||||
"-A", string(kubeServicesChain),
|
||||
"-m", "comment", "--comment", fmt.Sprintf(`"%s loadbalancer IP"`, svcNameString),
|
||||
"-m", protocol, "-p", protocol,
|
||||
"-d", utilproxy.ToCIDR(netutils.ParseIPSloppy(ingress)),
|
||||
"--dport", strconv.Itoa(svcInfo.Port()),
|
||||
)
|
||||
// jump to service firewall chain
|
||||
proxier.natRules.Write(args, "-j", string(fwChain))
|
||||
|
||||
args = append(args[:0],
|
||||
"-A", string(fwChain),
|
||||
"-m", "comment", "--comment", fmt.Sprintf(`"%s loadbalancer IP"`, svcNameString),
|
||||
)
|
||||
|
||||
// Each source match rule in the FW chain may jump to either the SVC or the XLB chain
|
||||
chosenChain := svcXlbChain
|
||||
// If we are proxying globally, we need to masquerade in case we cross nodes.
|
||||
// If we are proxying only locally, we can retain the source IP.
|
||||
if !svcInfo.NodeLocalExternal() {
|
||||
proxier.natRules.Write(args, "-j", string(KubeMarkMasqChain))
|
||||
chosenChain = svcChain
|
||||
}
|
||||
|
||||
if len(svcInfo.LoadBalancerSourceRanges()) == 0 {
|
||||
// allow all sources, so jump directly to the KUBE-SVC or KUBE-XLB chain
|
||||
proxier.natRules.Write(args, "-j", string(chosenChain))
|
||||
} else {
|
||||
// firewall filter based on each source range
|
||||
allowFromNode := false
|
||||
for _, src := range svcInfo.LoadBalancerSourceRanges() {
|
||||
proxier.natRules.Write(args, "-s", src, "-j", string(chosenChain))
|
||||
_, cidr, err := netutils.ParseCIDRSloppy(src)
|
||||
if err != nil {
|
||||
klog.ErrorS(err, "Error parsing CIDR in LoadBalancerSourceRanges, dropping it", "cidr", cidr)
|
||||
} else if cidr.Contains(proxier.nodeIP) {
|
||||
allowFromNode = true
|
||||
}
|
||||
}
|
||||
// generally, ip route rule was added to intercept request to loadbalancer vip from the
|
||||
// loadbalancer's backend hosts. In this case, request will not hit the loadbalancer but loop back directly.
|
||||
// Need to add the following rule to allow request on host.
|
||||
if allowFromNode {
|
||||
proxier.natRules.Write(args, "-s", utilproxy.ToCIDR(netutils.ParseIPSloppy(ingress)), "-j", string(chosenChain))
|
||||
}
|
||||
}
|
||||
|
||||
// If the packet was able to reach the end of firewall chain, then it did not get DNATed.
|
||||
// It means the packet cannot go thru the firewall, then mark it for DROP
|
||||
proxier.natRules.Write(args, "-j", string(KubeMarkDropChain))
|
||||
} else {
|
||||
// No endpoints.
|
||||
proxier.filterRules.Write(
|
||||
"-A", string(kubeExternalServicesChain),
|
||||
"-m", "comment", "--comment", fmt.Sprintf(`"%s has no endpoints"`, svcNameString),
|
||||
"-m", protocol, "-p", protocol,
|
||||
"-d", utilproxy.ToCIDR(netutils.ParseIPSloppy(ingress)),
|
||||
"--dport", strconv.Itoa(svcInfo.Port()),
|
||||
"-j", "REJECT",
|
||||
)
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -1307,103 +1307,101 @@ func (proxier *Proxier) syncProxyRules() {
|
||||
|
||||
// Capture load-balancer ingress.
|
||||
for _, ingress := range svcInfo.LoadBalancerIPStrings() {
|
||||
if ingress != "" {
|
||||
// ipset call
|
||||
entry = &utilipset.Entry{
|
||||
IP: ingress,
|
||||
Port: svcInfo.Port(),
|
||||
Protocol: protocol,
|
||||
SetType: utilipset.HashIPPort,
|
||||
}
|
||||
// add service load balancer ingressIP:Port to kubeServiceAccess ip set for the purpose of solving hairpin.
|
||||
// proxier.kubeServiceAccessSet.activeEntries.Insert(entry.String())
|
||||
// If we are proxying globally, we need to masquerade in case we cross nodes.
|
||||
// If we are proxying only locally, we can retain the source IP.
|
||||
if valid := proxier.ipsetList[kubeLoadBalancerSet].validateEntry(entry); !valid {
|
||||
klog.ErrorS(nil, "Error adding entry to ipset", "entry", entry, "ipset", proxier.ipsetList[kubeLoadBalancerSet].Name)
|
||||
// ipset call
|
||||
entry = &utilipset.Entry{
|
||||
IP: ingress,
|
||||
Port: svcInfo.Port(),
|
||||
Protocol: protocol,
|
||||
SetType: utilipset.HashIPPort,
|
||||
}
|
||||
// add service load balancer ingressIP:Port to kubeServiceAccess ip set for the purpose of solving hairpin.
|
||||
// proxier.kubeServiceAccessSet.activeEntries.Insert(entry.String())
|
||||
// If we are proxying globally, we need to masquerade in case we cross nodes.
|
||||
// If we are proxying only locally, we can retain the source IP.
|
||||
if valid := proxier.ipsetList[kubeLoadBalancerSet].validateEntry(entry); !valid {
|
||||
klog.ErrorS(nil, "Error adding entry to ipset", "entry", entry, "ipset", proxier.ipsetList[kubeLoadBalancerSet].Name)
|
||||
continue
|
||||
}
|
||||
proxier.ipsetList[kubeLoadBalancerSet].activeEntries.Insert(entry.String())
|
||||
// insert loadbalancer entry to lbIngressLocalSet if service externaltrafficpolicy=local
|
||||
if svcInfo.NodeLocalExternal() {
|
||||
if valid := proxier.ipsetList[kubeLoadBalancerLocalSet].validateEntry(entry); !valid {
|
||||
klog.ErrorS(nil, "Error adding entry to ipset", "entry", entry, "ipset", proxier.ipsetList[kubeLoadBalancerLocalSet].Name)
|
||||
continue
|
||||
}
|
||||
proxier.ipsetList[kubeLoadBalancerSet].activeEntries.Insert(entry.String())
|
||||
// insert loadbalancer entry to lbIngressLocalSet if service externaltrafficpolicy=local
|
||||
if svcInfo.NodeLocalExternal() {
|
||||
if valid := proxier.ipsetList[kubeLoadBalancerLocalSet].validateEntry(entry); !valid {
|
||||
klog.ErrorS(nil, "Error adding entry to ipset", "entry", entry, "ipset", proxier.ipsetList[kubeLoadBalancerLocalSet].Name)
|
||||
continue
|
||||
}
|
||||
proxier.ipsetList[kubeLoadBalancerLocalSet].activeEntries.Insert(entry.String())
|
||||
proxier.ipsetList[kubeLoadBalancerLocalSet].activeEntries.Insert(entry.String())
|
||||
}
|
||||
if len(svcInfo.LoadBalancerSourceRanges()) != 0 {
|
||||
// The service firewall rules are created based on ServiceSpec.loadBalancerSourceRanges field.
|
||||
// This currently works for loadbalancers that preserves source ips.
|
||||
// For loadbalancers which direct traffic to service NodePort, the firewall rules will not apply.
|
||||
if valid := proxier.ipsetList[kubeLoadbalancerFWSet].validateEntry(entry); !valid {
|
||||
klog.ErrorS(nil, "Error adding entry to ipset", "entry", entry, "ipset", proxier.ipsetList[kubeLoadbalancerFWSet].Name)
|
||||
continue
|
||||
}
|
||||
if len(svcInfo.LoadBalancerSourceRanges()) != 0 {
|
||||
// The service firewall rules are created based on ServiceSpec.loadBalancerSourceRanges field.
|
||||
// This currently works for loadbalancers that preserves source ips.
|
||||
// For loadbalancers which direct traffic to service NodePort, the firewall rules will not apply.
|
||||
if valid := proxier.ipsetList[kubeLoadbalancerFWSet].validateEntry(entry); !valid {
|
||||
klog.ErrorS(nil, "Error adding entry to ipset", "entry", entry, "ipset", proxier.ipsetList[kubeLoadbalancerFWSet].Name)
|
||||
proxier.ipsetList[kubeLoadbalancerFWSet].activeEntries.Insert(entry.String())
|
||||
allowFromNode := false
|
||||
for _, src := range svcInfo.LoadBalancerSourceRanges() {
|
||||
// ipset call
|
||||
entry = &utilipset.Entry{
|
||||
IP: ingress,
|
||||
Port: svcInfo.Port(),
|
||||
Protocol: protocol,
|
||||
Net: src,
|
||||
SetType: utilipset.HashIPPortNet,
|
||||
}
|
||||
// enumerate all white list source cidr
|
||||
if valid := proxier.ipsetList[kubeLoadBalancerSourceCIDRSet].validateEntry(entry); !valid {
|
||||
klog.ErrorS(nil, "Error adding entry to ipset", "entry", entry, "ipset", proxier.ipsetList[kubeLoadBalancerSourceCIDRSet].Name)
|
||||
continue
|
||||
}
|
||||
proxier.ipsetList[kubeLoadbalancerFWSet].activeEntries.Insert(entry.String())
|
||||
allowFromNode := false
|
||||
for _, src := range svcInfo.LoadBalancerSourceRanges() {
|
||||
// ipset call
|
||||
entry = &utilipset.Entry{
|
||||
IP: ingress,
|
||||
Port: svcInfo.Port(),
|
||||
Protocol: protocol,
|
||||
Net: src,
|
||||
SetType: utilipset.HashIPPortNet,
|
||||
}
|
||||
// enumerate all white list source cidr
|
||||
if valid := proxier.ipsetList[kubeLoadBalancerSourceCIDRSet].validateEntry(entry); !valid {
|
||||
klog.ErrorS(nil, "Error adding entry to ipset", "entry", entry, "ipset", proxier.ipsetList[kubeLoadBalancerSourceCIDRSet].Name)
|
||||
continue
|
||||
}
|
||||
proxier.ipsetList[kubeLoadBalancerSourceCIDRSet].activeEntries.Insert(entry.String())
|
||||
proxier.ipsetList[kubeLoadBalancerSourceCIDRSet].activeEntries.Insert(entry.String())
|
||||
|
||||
// ignore error because it has been validated
|
||||
_, cidr, _ := netutils.ParseCIDRSloppy(src)
|
||||
if cidr.Contains(proxier.nodeIP) {
|
||||
allowFromNode = true
|
||||
}
|
||||
}
|
||||
// generally, ip route rule was added to intercept request to loadbalancer vip from the
|
||||
// loadbalancer's backend hosts. In this case, request will not hit the loadbalancer but loop back directly.
|
||||
// Need to add the following rule to allow request on host.
|
||||
if allowFromNode {
|
||||
entry = &utilipset.Entry{
|
||||
IP: ingress,
|
||||
Port: svcInfo.Port(),
|
||||
Protocol: protocol,
|
||||
IP2: ingress,
|
||||
SetType: utilipset.HashIPPortIP,
|
||||
}
|
||||
// enumerate all white list source ip
|
||||
if valid := proxier.ipsetList[kubeLoadBalancerSourceIPSet].validateEntry(entry); !valid {
|
||||
klog.ErrorS(nil, "Error adding entry to ipset", "entry", entry, "ipset", proxier.ipsetList[kubeLoadBalancerSourceIPSet].Name)
|
||||
continue
|
||||
}
|
||||
proxier.ipsetList[kubeLoadBalancerSourceIPSet].activeEntries.Insert(entry.String())
|
||||
// ignore error because it has been validated
|
||||
_, cidr, _ := netutils.ParseCIDRSloppy(src)
|
||||
if cidr.Contains(proxier.nodeIP) {
|
||||
allowFromNode = true
|
||||
}
|
||||
}
|
||||
// ipvs call
|
||||
serv := &utilipvs.VirtualServer{
|
||||
Address: netutils.ParseIPSloppy(ingress),
|
||||
Port: uint16(svcInfo.Port()),
|
||||
Protocol: string(svcInfo.Protocol()),
|
||||
Scheduler: proxier.ipvsScheduler,
|
||||
}
|
||||
if svcInfo.SessionAffinityType() == v1.ServiceAffinityClientIP {
|
||||
serv.Flags |= utilipvs.FlagPersistent
|
||||
serv.Timeout = uint32(svcInfo.StickyMaxAgeSeconds())
|
||||
}
|
||||
if err := proxier.syncService(svcNameString, serv, true, bindedAddresses); err == nil {
|
||||
activeIPVSServices[serv.String()] = true
|
||||
activeBindAddrs[serv.Address.String()] = true
|
||||
if err := proxier.syncEndpoint(svcName, svcInfo.NodeLocalExternal(), serv); err != nil {
|
||||
klog.ErrorS(err, "Failed to sync endpoint for service", "serviceName", svcName, "virtualServer", serv)
|
||||
// generally, ip route rule was added to intercept request to loadbalancer vip from the
|
||||
// loadbalancer's backend hosts. In this case, request will not hit the loadbalancer but loop back directly.
|
||||
// Need to add the following rule to allow request on host.
|
||||
if allowFromNode {
|
||||
entry = &utilipset.Entry{
|
||||
IP: ingress,
|
||||
Port: svcInfo.Port(),
|
||||
Protocol: protocol,
|
||||
IP2: ingress,
|
||||
SetType: utilipset.HashIPPortIP,
|
||||
}
|
||||
} else {
|
||||
klog.ErrorS(err, "Failed to sync service", "serviceName", svcName, "virtualServer", serv)
|
||||
// enumerate all white list source ip
|
||||
if valid := proxier.ipsetList[kubeLoadBalancerSourceIPSet].validateEntry(entry); !valid {
|
||||
klog.ErrorS(nil, "Error adding entry to ipset", "entry", entry, "ipset", proxier.ipsetList[kubeLoadBalancerSourceIPSet].Name)
|
||||
continue
|
||||
}
|
||||
proxier.ipsetList[kubeLoadBalancerSourceIPSet].activeEntries.Insert(entry.String())
|
||||
}
|
||||
}
|
||||
// ipvs call
|
||||
serv := &utilipvs.VirtualServer{
|
||||
Address: netutils.ParseIPSloppy(ingress),
|
||||
Port: uint16(svcInfo.Port()),
|
||||
Protocol: string(svcInfo.Protocol()),
|
||||
Scheduler: proxier.ipvsScheduler,
|
||||
}
|
||||
if svcInfo.SessionAffinityType() == v1.ServiceAffinityClientIP {
|
||||
serv.Flags |= utilipvs.FlagPersistent
|
||||
serv.Timeout = uint32(svcInfo.StickyMaxAgeSeconds())
|
||||
}
|
||||
if err := proxier.syncService(svcNameString, serv, true, bindedAddresses); err == nil {
|
||||
activeIPVSServices[serv.String()] = true
|
||||
activeBindAddrs[serv.Address.String()] = true
|
||||
if err := proxier.syncEndpoint(svcName, svcInfo.NodeLocalExternal(), serv); err != nil {
|
||||
klog.ErrorS(err, "Failed to sync endpoint for service", "serviceName", svcName, "virtualServer", serv)
|
||||
}
|
||||
} else {
|
||||
klog.ErrorS(err, "Failed to sync service", "serviceName", svcName, "virtualServer", serv)
|
||||
}
|
||||
}
|
||||
|
||||
if svcInfo.NodePort() != 0 {
|
||||
|
Loading…
Reference in New Issue
Block a user