mirror of
https://github.com/k3s-io/kubernetes.git
synced 2025-07-23 03:41:45 +00:00
Stop trying to "preserve" iptables counters that are always 0
The iptables and ipvs proxies have code to try to preserve certain iptables counters when modifying chains via iptables-restore, but the counters in question only actually exist for the built-in chains (eg INPUT, FORWARD, PREROUTING, etc), which we never modify via iptables-restore (and in fact, *can't* safely modify via iptables-restore), so we are really just doing a lot of unnecessary work to copy the constant string "[0:0]" over from iptables-save output to iptables-restore input. So stop doing that. Also fix a confused error message when iptables-save fails.
This commit is contained in:
parent
7a9268d83a
commit
a3556edba1
@ -431,16 +431,16 @@ func CleanupLeftovers(ipt utiliptables.Interface) (encounteredError bool) {
|
||||
for _, chain := range []utiliptables.Chain{kubeServicesChain, kubeNodePortsChain, kubePostroutingChain} {
|
||||
if _, found := existingNATChains[chain]; found {
|
||||
chainString := string(chain)
|
||||
natChains.WriteBytes(existingNATChains[chain]) // flush
|
||||
natRules.Write("-X", chainString) // delete
|
||||
natChains.Write(utiliptables.MakeChainLine(chain)) // flush
|
||||
natRules.Write("-X", chainString) // delete
|
||||
}
|
||||
}
|
||||
// Hunt for service and endpoint chains.
|
||||
for chain := range existingNATChains {
|
||||
chainString := string(chain)
|
||||
if isServiceChainName(chainString) {
|
||||
natChains.WriteBytes(existingNATChains[chain]) // flush
|
||||
natRules.Write("-X", chainString) // delete
|
||||
natChains.Write(utiliptables.MakeChainLine(chain)) // flush
|
||||
natRules.Write("-X", chainString) // delete
|
||||
}
|
||||
}
|
||||
natRules.Write("COMMIT")
|
||||
@ -467,7 +467,7 @@ func CleanupLeftovers(ipt utiliptables.Interface) (encounteredError bool) {
|
||||
for _, chain := range []utiliptables.Chain{kubeServicesChain, kubeExternalServicesChain, kubeForwardChain, kubeNodePortsChain} {
|
||||
if _, found := existingFilterChains[chain]; found {
|
||||
chainString := string(chain)
|
||||
filterChains.WriteBytes(existingFilterChains[chain])
|
||||
filterChains.Write(utiliptables.MakeChainLine(chain))
|
||||
filterRules.Write("-X", chainString)
|
||||
}
|
||||
}
|
||||
@ -890,22 +890,13 @@ func (proxier *Proxier) syncProxyRules() {
|
||||
|
||||
// Get iptables-save output so we can check for existing chains and rules.
|
||||
// This will be a map of chain name to chain with rules as stored in iptables-save/iptables-restore
|
||||
existingFilterChains := make(map[utiliptables.Chain][]byte)
|
||||
proxier.existingFilterChainsData.Reset()
|
||||
err := proxier.iptables.SaveInto(utiliptables.TableFilter, proxier.existingFilterChainsData)
|
||||
if err != nil { // if we failed to get any rules
|
||||
klog.ErrorS(err, "Failed to execute iptables-save, syncing all rules")
|
||||
} else { // otherwise parse the output
|
||||
existingFilterChains = utiliptables.GetChainLines(utiliptables.TableFilter, proxier.existingFilterChainsData.Bytes())
|
||||
}
|
||||
|
||||
// IMPORTANT: existingNATChains may share memory with proxier.iptablesData.
|
||||
existingNATChains := make(map[utiliptables.Chain][]byte)
|
||||
proxier.iptablesData.Reset()
|
||||
err = proxier.iptables.SaveInto(utiliptables.TableNAT, proxier.iptablesData)
|
||||
if err != nil { // if we failed to get any rules
|
||||
klog.ErrorS(err, "Failed to execute iptables-save, syncing all rules")
|
||||
} else { // otherwise parse the output
|
||||
err := proxier.iptables.SaveInto(utiliptables.TableNAT, proxier.iptablesData)
|
||||
if err != nil {
|
||||
klog.ErrorS(err, "Failed to execute iptables-save: stale chains will not be deleted")
|
||||
} else {
|
||||
existingNATChains = utiliptables.GetChainLines(utiliptables.TableNAT, proxier.iptablesData.Bytes())
|
||||
}
|
||||
|
||||
@ -919,18 +910,10 @@ func (proxier *Proxier) syncProxyRules() {
|
||||
// Make sure we keep stats for the top-level chains, if they existed
|
||||
// (which most should have because we created them above).
|
||||
for _, chainName := range []utiliptables.Chain{kubeServicesChain, kubeExternalServicesChain, kubeForwardChain, kubeNodePortsChain} {
|
||||
if chain, ok := existingFilterChains[chainName]; ok {
|
||||
proxier.filterChains.WriteBytes(chain)
|
||||
} else {
|
||||
proxier.filterChains.Write(utiliptables.MakeChainLine(chainName))
|
||||
}
|
||||
proxier.filterChains.Write(utiliptables.MakeChainLine(chainName))
|
||||
}
|
||||
for _, chainName := range []utiliptables.Chain{kubeServicesChain, kubeNodePortsChain, kubePostroutingChain, kubeMarkMasqChain} {
|
||||
if chain, ok := existingNATChains[chainName]; ok {
|
||||
proxier.natChains.WriteBytes(chain)
|
||||
} else {
|
||||
proxier.natChains.Write(utiliptables.MakeChainLine(chainName))
|
||||
}
|
||||
proxier.natChains.Write(utiliptables.MakeChainLine(chainName))
|
||||
}
|
||||
|
||||
// Install the kubernetes-specific postrouting rules. We use a whole chain for
|
||||
@ -1028,12 +1011,8 @@ func (proxier *Proxier) syncProxyRules() {
|
||||
|
||||
endpointChain := epInfo.ChainName
|
||||
|
||||
// Create the endpoint chain, retaining counters if possible.
|
||||
if chain, ok := existingNATChains[endpointChain]; ok {
|
||||
proxier.natChains.WriteBytes(chain)
|
||||
} else {
|
||||
proxier.natChains.Write(utiliptables.MakeChainLine(endpointChain))
|
||||
}
|
||||
// Create the endpoint chain
|
||||
proxier.natChains.Write(utiliptables.MakeChainLine(endpointChain))
|
||||
activeNATChains[endpointChain] = true
|
||||
|
||||
args = append(args[:0], "-A", string(endpointChain))
|
||||
@ -1077,22 +1056,14 @@ func (proxier *Proxier) syncProxyRules() {
|
||||
|
||||
// Declare the clusterPolicyChain if needed.
|
||||
if hasEndpoints && svcInfo.UsesClusterEndpoints() {
|
||||
// Create the Cluster traffic policy chain, retaining counters if possible.
|
||||
if chain, ok := existingNATChains[clusterPolicyChain]; ok {
|
||||
proxier.natChains.WriteBytes(chain)
|
||||
} else {
|
||||
proxier.natChains.Write(utiliptables.MakeChainLine(clusterPolicyChain))
|
||||
}
|
||||
// Create the Cluster traffic policy chain
|
||||
proxier.natChains.Write(utiliptables.MakeChainLine(clusterPolicyChain))
|
||||
activeNATChains[clusterPolicyChain] = true
|
||||
}
|
||||
|
||||
// Declare the localPolicyChain if needed.
|
||||
if hasEndpoints && svcInfo.UsesLocalEndpoints() {
|
||||
if chain, ok := existingNATChains[localPolicyChain]; ok {
|
||||
proxier.natChains.WriteBytes(chain)
|
||||
} else {
|
||||
proxier.natChains.Write(utiliptables.MakeChainLine(localPolicyChain))
|
||||
}
|
||||
proxier.natChains.Write(utiliptables.MakeChainLine(localPolicyChain))
|
||||
activeNATChains[localPolicyChain] = true
|
||||
}
|
||||
|
||||
@ -1101,11 +1072,7 @@ func (proxier *Proxier) syncProxyRules() {
|
||||
// jump to externalTrafficChain, which will handle some special-cases
|
||||
// and then jump to externalPolicyChain.
|
||||
if hasEndpoints && svcInfo.ExternallyAccessible() {
|
||||
if chain, ok := existingNATChains[externalTrafficChain]; ok {
|
||||
proxier.natChains.WriteBytes(chain)
|
||||
} else {
|
||||
proxier.natChains.Write(utiliptables.MakeChainLine(externalTrafficChain))
|
||||
}
|
||||
proxier.natChains.Write(utiliptables.MakeChainLine(externalTrafficChain))
|
||||
activeNATChains[externalTrafficChain] = true
|
||||
|
||||
if !svcInfo.ExternalPolicyLocal() {
|
||||
@ -1233,11 +1200,7 @@ func (proxier *Proxier) syncProxyRules() {
|
||||
fwChain := svcInfo.firewallChainName
|
||||
|
||||
// Declare the service firewall chain.
|
||||
if chain, ok := existingNATChains[fwChain]; ok {
|
||||
proxier.natChains.WriteBytes(chain)
|
||||
} else {
|
||||
proxier.natChains.Write(utiliptables.MakeChainLine(fwChain))
|
||||
}
|
||||
proxier.natChains.Write(utiliptables.MakeChainLine(fwChain))
|
||||
activeNATChains[fwChain] = true
|
||||
|
||||
// The firewall chain will jump to the "external destination"
|
||||
@ -1384,7 +1347,7 @@ func (proxier *Proxier) syncProxyRules() {
|
||||
// We must (as per iptables) write a chain-line for it, which has
|
||||
// the nice effect of flushing the chain. Then we can remove the
|
||||
// chain.
|
||||
proxier.natChains.WriteBytes(existingNATChains[chain])
|
||||
proxier.natChains.Write(utiliptables.MakeChainLine(chain))
|
||||
proxier.natRules.Write("-X", chainString)
|
||||
}
|
||||
}
|
||||
|
@ -1839,27 +1839,15 @@ func (proxier *Proxier) acceptIPVSTraffic() {
|
||||
|
||||
// createAndLinkKubeChain create all kube chains that ipvs proxier need and write basic link.
|
||||
func (proxier *Proxier) createAndLinkKubeChain() {
|
||||
existingFilterChains := proxier.getExistingChains(proxier.filterChainsData, utiliptables.TableFilter)
|
||||
existingNATChains := proxier.getExistingChains(proxier.iptablesData, utiliptables.TableNAT)
|
||||
|
||||
// Make sure we keep stats for the top-level chains
|
||||
for _, ch := range iptablesChains {
|
||||
if _, err := proxier.iptables.EnsureChain(ch.table, ch.chain); err != nil {
|
||||
klog.ErrorS(err, "Failed to ensure chain exists", "table", ch.table, "chain", ch.chain)
|
||||
return
|
||||
}
|
||||
if ch.table == utiliptables.TableNAT {
|
||||
if chain, ok := existingNATChains[ch.chain]; ok {
|
||||
proxier.natChains.WriteBytes(chain)
|
||||
} else {
|
||||
proxier.natChains.Write(utiliptables.MakeChainLine(ch.chain))
|
||||
}
|
||||
proxier.natChains.Write(utiliptables.MakeChainLine(ch.chain))
|
||||
} else {
|
||||
if chain, ok := existingFilterChains[ch.chain]; ok {
|
||||
proxier.filterChains.WriteBytes(chain)
|
||||
} else {
|
||||
proxier.filterChains.Write(utiliptables.MakeChainLine(ch.chain))
|
||||
}
|
||||
proxier.filterChains.Write(utiliptables.MakeChainLine(ch.chain))
|
||||
}
|
||||
}
|
||||
|
||||
@ -1872,20 +1860,6 @@ func (proxier *Proxier) createAndLinkKubeChain() {
|
||||
|
||||
}
|
||||
|
||||
// getExistingChains get iptables-save output so we can check for existing chains and rules.
|
||||
// This will be a map of chain name to chain with rules as stored in iptables-save/iptables-restore
|
||||
// Result may SHARE memory with contents of buffer.
|
||||
func (proxier *Proxier) getExistingChains(buffer *bytes.Buffer, table utiliptables.Table) map[utiliptables.Chain][]byte {
|
||||
buffer.Reset()
|
||||
err := proxier.iptables.SaveInto(table, buffer)
|
||||
if err != nil { // if we failed to get any rules
|
||||
klog.ErrorS(err, "Failed to execute iptables-save, syncing all rules")
|
||||
} else { // otherwise parse the output
|
||||
return utiliptables.GetChainLines(table, buffer.Bytes())
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// After a UDP or SCTP endpoint has been removed, we must flush any pending conntrack entries to it, or else we
|
||||
// risk sending more traffic to it, all of which will be lost (because UDP).
|
||||
// This assumes the proxier mutex is held
|
||||
|
Loading…
Reference in New Issue
Block a user