mirror of
https://github.com/k3s-io/kubernetes.git
synced 2025-07-25 20:53:33 +00:00
Merge pull request #62003 from m1093782566/fix-nodeport
Automatic merge from submit-queue (batch tested with PRs 63787, 62003). If you want to cherry-pick this change to another branch, please follow the instructions <a href="https://github.com/kubernetes/community/blob/master/contributors/devel/cherry-picks.md">here</a>. Fix localport open with --nodeport-addresses specified **What this PR does / why we need it**: Fix localport open with --nodeport-addresses specified. **Which issue(s) this PR fixes**: Fixes #61953 **Special notes for your reviewer**: @ephur **Release note**: ```release-note Services can listen on same host ports on different interfaces with --nodeport-addresses specified ```
This commit is contained in:
commit
fc28745535
@ -960,32 +960,53 @@ func (proxier *Proxier) syncProxyRules() {
|
|||||||
if svcInfo.NodePort != 0 {
|
if svcInfo.NodePort != 0 {
|
||||||
// Hold the local port open so no other process can open it
|
// Hold the local port open so no other process can open it
|
||||||
// (because the socket might open but it would never work).
|
// (because the socket might open but it would never work).
|
||||||
lp := utilproxy.LocalPort{
|
addresses, err := utilproxy.GetNodeAddresses(proxier.nodePortAddresses, proxier.networkInterfacer)
|
||||||
Description: "nodePort for " + svcNameString,
|
if err != nil {
|
||||||
IP: "",
|
glog.Errorf("Failed to get node ip address matching nodeport cidr: %v", err)
|
||||||
Port: svcInfo.NodePort,
|
continue
|
||||||
Protocol: protocol,
|
|
||||||
}
|
}
|
||||||
if proxier.portsMap[lp] != nil {
|
|
||||||
glog.V(4).Infof("Port %s was open before and is still needed", lp.String())
|
lps := make([]utilproxy.LocalPort, 0)
|
||||||
replacementPortsMap[lp] = proxier.portsMap[lp]
|
for address := range addresses {
|
||||||
} else {
|
lp := utilproxy.LocalPort{
|
||||||
socket, err := proxier.portMapper.OpenLocalPort(&lp)
|
Description: "nodePort for " + svcNameString,
|
||||||
if err != nil {
|
IP: address,
|
||||||
glog.Errorf("can't open %s, skipping this nodePort: %v", lp.String(), err)
|
Port: svcInfo.NodePort,
|
||||||
continue
|
Protocol: protocol,
|
||||||
}
|
}
|
||||||
if lp.Protocol == "udp" {
|
if utilproxy.IsZeroCIDR(address) {
|
||||||
// TODO: We might have multiple services using the same port, and this will clear conntrack for all of them.
|
// Empty IP address means all
|
||||||
// This is very low impact. The NodePort range is intentionally obscure, and unlikely to actually collide with real Services.
|
lp.IP = ""
|
||||||
// This only affects UDP connections, which are not common.
|
lps = append(lps, lp)
|
||||||
// See issue: https://github.com/kubernetes/kubernetes/issues/49881
|
// If we encounter a zero CIDR, then there is no point in processing the rest of the addresses.
|
||||||
err := conntrack.ClearEntriesForPort(proxier.exec, lp.Port, isIPv6, v1.ProtocolUDP)
|
break
|
||||||
|
}
|
||||||
|
lps = append(lps, lp)
|
||||||
|
}
|
||||||
|
|
||||||
|
// For ports on node IPs, open the actual port and hold it.
|
||||||
|
for _, lp := range lps {
|
||||||
|
if proxier.portsMap[lp] != nil {
|
||||||
|
glog.V(4).Infof("Port %s was open before and is still needed", lp.String())
|
||||||
|
replacementPortsMap[lp] = proxier.portsMap[lp]
|
||||||
|
} else {
|
||||||
|
socket, err := proxier.portMapper.OpenLocalPort(&lp)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
glog.Errorf("Failed to clear udp conntrack for port %d, error: %v", lp.Port, err)
|
glog.Errorf("can't open %s, skipping this nodePort: %v", lp.String(), err)
|
||||||
|
continue
|
||||||
}
|
}
|
||||||
|
if lp.Protocol == "udp" {
|
||||||
|
// TODO: We might have multiple services using the same port, and this will clear conntrack for all of them.
|
||||||
|
// This is very low impact. The NodePort range is intentionally obscure, and unlikely to actually collide with real Services.
|
||||||
|
// This only affects UDP connections, which are not common.
|
||||||
|
// See issue: https://github.com/kubernetes/kubernetes/issues/49881
|
||||||
|
err := conntrack.ClearEntriesForPort(proxier.exec, lp.Port, isIPv6, v1.ProtocolUDP)
|
||||||
|
if err != nil {
|
||||||
|
glog.Errorf("Failed to clear udp conntrack for port %d, error: %v", lp.Port, err)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
replacementPortsMap[lp] = socket
|
||||||
}
|
}
|
||||||
replacementPortsMap[lp] = socket
|
|
||||||
}
|
}
|
||||||
|
|
||||||
if hasEndpoints {
|
if hasEndpoints {
|
||||||
|
@ -1066,27 +1066,48 @@ func (proxier *Proxier) syncProxyRules() {
|
|||||||
}
|
}
|
||||||
|
|
||||||
if svcInfo.NodePort != 0 {
|
if svcInfo.NodePort != 0 {
|
||||||
lp := utilproxy.LocalPort{
|
addresses, err := utilproxy.GetNodeAddresses(proxier.nodePortAddresses, proxier.networkInterfacer)
|
||||||
Description: "nodePort for " + svcNameString,
|
if err != nil {
|
||||||
IP: "",
|
glog.Errorf("Failed to get node ip address matching nodeport cidr: %v", err)
|
||||||
Port: svcInfo.NodePort,
|
continue
|
||||||
Protocol: protocol,
|
|
||||||
}
|
}
|
||||||
if proxier.portsMap[lp] != nil {
|
|
||||||
glog.V(4).Infof("Port %s was open before and is still needed", lp.String())
|
lps := make([]utilproxy.LocalPort, 0)
|
||||||
replacementPortsMap[lp] = proxier.portsMap[lp]
|
for address := range addresses {
|
||||||
} else {
|
lp := utilproxy.LocalPort{
|
||||||
socket, err := proxier.portMapper.OpenLocalPort(&lp)
|
Description: "nodePort for " + svcNameString,
|
||||||
if err != nil {
|
IP: address,
|
||||||
glog.Errorf("can't open %s, skipping this nodePort: %v", lp.String(), err)
|
Port: svcInfo.NodePort,
|
||||||
continue
|
Protocol: protocol,
|
||||||
}
|
}
|
||||||
if lp.Protocol == "udp" {
|
if utilproxy.IsZeroCIDR(address) {
|
||||||
isIPv6 := utilnet.IsIPv6(svcInfo.ClusterIP)
|
// Empty IP address means all
|
||||||
conntrack.ClearEntriesForPort(proxier.exec, lp.Port, isIPv6, clientv1.ProtocolUDP)
|
lp.IP = ""
|
||||||
|
lps = append(lps, lp)
|
||||||
|
// If we encounter a zero CIDR, then there is no point in processing the rest of the addresses.
|
||||||
|
break
|
||||||
}
|
}
|
||||||
replacementPortsMap[lp] = socket
|
lps = append(lps, lp)
|
||||||
} // We're holding the port, so it's OK to install ipvs rules.
|
}
|
||||||
|
|
||||||
|
// For ports on node IPs, open the actual port and hold it.
|
||||||
|
for _, lp := range lps {
|
||||||
|
if proxier.portsMap[lp] != nil {
|
||||||
|
glog.V(4).Infof("Port %s was open before and is still needed", lp.String())
|
||||||
|
replacementPortsMap[lp] = proxier.portsMap[lp]
|
||||||
|
} else {
|
||||||
|
socket, err := proxier.portMapper.OpenLocalPort(&lp)
|
||||||
|
if err != nil {
|
||||||
|
glog.Errorf("can't open %s, skipping this nodePort: %v", lp.String(), err)
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
if lp.Protocol == "udp" {
|
||||||
|
isIPv6 := utilnet.IsIPv6(svcInfo.ClusterIP)
|
||||||
|
conntrack.ClearEntriesForPort(proxier.exec, lp.Port, isIPv6, clientv1.ProtocolUDP)
|
||||||
|
}
|
||||||
|
replacementPortsMap[lp] = socket
|
||||||
|
} // We're holding the port, so it's OK to install ipvs rules.
|
||||||
|
}
|
||||||
|
|
||||||
// Nodeports need SNAT, unless they're local.
|
// Nodeports need SNAT, unless they're local.
|
||||||
// ipset call
|
// ipset call
|
||||||
@ -1137,11 +1158,6 @@ func (proxier *Proxier) syncProxyRules() {
|
|||||||
|
|
||||||
// Build ipvs kernel routes for each node ip address
|
// Build ipvs kernel routes for each node ip address
|
||||||
nodeIPs := make([]net.IP, 0)
|
nodeIPs := make([]net.IP, 0)
|
||||||
addresses, err := utilproxy.GetNodeAddresses(proxier.nodePortAddresses, proxier.networkInterfacer)
|
|
||||||
if err != nil {
|
|
||||||
glog.Errorf("Failed to get node ip address matching nodeport cidr")
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
for address := range addresses {
|
for address := range addresses {
|
||||||
if !utilproxy.IsZeroCIDR(address) {
|
if !utilproxy.IsZeroCIDR(address) {
|
||||||
nodeIPs = append(nodeIPs, net.ParseIP(address))
|
nodeIPs = append(nodeIPs, net.ParseIP(address))
|
||||||
|
Loading…
Reference in New Issue
Block a user