Simplify nftables/proxier.go by removing partial syncing

Since optimization will be done differently in nftables.
This commit is contained in:
Dan Winship 2023-06-01 10:30:40 -04:00
parent 39a5af1d0a
commit e7c35d27f7
2 changed files with 141 additions and 281 deletions

View File

@ -36,7 +36,6 @@ import (
v1 "k8s.io/api/core/v1" v1 "k8s.io/api/core/v1"
discovery "k8s.io/api/discovery/v1" discovery "k8s.io/api/discovery/v1"
"k8s.io/apimachinery/pkg/types" "k8s.io/apimachinery/pkg/types"
"k8s.io/apimachinery/pkg/util/sets"
"k8s.io/apimachinery/pkg/util/wait" "k8s.io/apimachinery/pkg/util/wait"
"k8s.io/client-go/tools/events" "k8s.io/client-go/tools/events"
utilsysctl "k8s.io/component-helpers/node/util/sysctl" utilsysctl "k8s.io/component-helpers/node/util/sysctl"
@ -153,7 +152,6 @@ type Proxier struct {
// updating iptables with some partial data after kube-proxy restart. // updating iptables with some partial data after kube-proxy restart.
endpointSlicesSynced bool endpointSlicesSynced bool
servicesSynced bool servicesSynced bool
needFullSync bool
initialized int32 initialized int32
syncRunner *async.BoundedFrequencyRunner // governs calls to syncProxyRules syncRunner *async.BoundedFrequencyRunner // governs calls to syncProxyRules
syncPeriod time.Duration syncPeriod time.Duration
@ -265,7 +263,6 @@ func NewProxier(ipFamily v1.IPFamily,
serviceChanges: proxy.NewServiceChangeTracker(newServiceInfo, ipFamily, recorder, nil), serviceChanges: proxy.NewServiceChangeTracker(newServiceInfo, ipFamily, recorder, nil),
endpointsMap: make(proxy.EndpointsMap), endpointsMap: make(proxy.EndpointsMap),
endpointsChanges: proxy.NewEndpointsChangeTracker(hostname, newEndpointInfo, ipFamily, recorder, nil), endpointsChanges: proxy.NewEndpointsChangeTracker(hostname, newEndpointInfo, ipFamily, recorder, nil),
needFullSync: true,
syncPeriod: syncPeriod, syncPeriod: syncPeriod,
iptables: ipt, iptables: ipt,
masqueradeAll: masqueradeAll, masqueradeAll: masqueradeAll,
@ -298,7 +295,7 @@ func NewProxier(ipFamily v1.IPFamily,
proxier.syncRunner = async.NewBoundedFrequencyRunner("sync-runner", proxier.syncProxyRules, minSyncPeriod, time.Hour, burstSyncs) proxier.syncRunner = async.NewBoundedFrequencyRunner("sync-runner", proxier.syncProxyRules, minSyncPeriod, time.Hour, burstSyncs)
go ipt.Monitor(kubeProxyCanaryChain, []utiliptables.Table{utiliptables.TableMangle, utiliptables.TableNAT, utiliptables.TableFilter}, go ipt.Monitor(kubeProxyCanaryChain, []utiliptables.Table{utiliptables.TableMangle, utiliptables.TableNAT, utiliptables.TableFilter},
proxier.forceSyncProxyRules, syncPeriod, wait.NeverStop) proxier.syncProxyRules, syncPeriod, wait.NeverStop)
if ipt.HasRandomFully() { if ipt.HasRandomFully() {
klog.V(2).InfoS("Iptables supports --random-fully", "ipFamily", ipt.Protocol()) klog.V(2).InfoS("Iptables supports --random-fully", "ipFamily", ipt.Protocol())
@ -606,7 +603,6 @@ func (proxier *Proxier) OnNodeAdd(node *v1.Node) {
for k, v := range node.Labels { for k, v := range node.Labels {
proxier.nodeLabels[k] = v proxier.nodeLabels[k] = v
} }
proxier.needFullSync = true
proxier.mu.Unlock() proxier.mu.Unlock()
klog.V(4).InfoS("Updated proxier node labels", "labels", node.Labels) klog.V(4).InfoS("Updated proxier node labels", "labels", node.Labels)
@ -631,7 +627,6 @@ func (proxier *Proxier) OnNodeUpdate(oldNode, node *v1.Node) {
for k, v := range node.Labels { for k, v := range node.Labels {
proxier.nodeLabels[k] = v proxier.nodeLabels[k] = v
} }
proxier.needFullSync = true
proxier.mu.Unlock() proxier.mu.Unlock()
klog.V(4).InfoS("Updated proxier node labels", "labels", node.Labels) klog.V(4).InfoS("Updated proxier node labels", "labels", node.Labels)
@ -649,7 +644,6 @@ func (proxier *Proxier) OnNodeDelete(node *v1.Node) {
proxier.mu.Lock() proxier.mu.Lock()
proxier.nodeLabels = nil proxier.nodeLabels = nil
proxier.needFullSync = true
proxier.mu.Unlock() proxier.mu.Unlock()
proxier.Sync() proxier.Sync()
@ -731,17 +725,6 @@ func isServiceChainName(chainString string) bool {
return false return false
} }
// Called by the iptables.Monitor, and in response to topology changes; this calls
// syncProxyRules() and tells it to resync all services, regardless of whether the
// Service or Endpoints/EndpointSlice objects themselves have changed
func (proxier *Proxier) forceSyncProxyRules() {
proxier.mu.Lock()
proxier.needFullSync = true
proxier.mu.Unlock()
proxier.syncProxyRules()
}
// This is where all of the iptables-save/restore calls happen. // This is where all of the iptables-save/restore calls happen.
// The only other iptables rules are those that are setup in iptablesInit() // The only other iptables rules are those that are setup in iptablesInit()
// This assumes proxier.mu is NOT held // This assumes proxier.mu is NOT held
@ -755,27 +738,13 @@ func (proxier *Proxier) syncProxyRules() {
return return
} }
// The value of proxier.needFullSync may change before the defer funcs run, so
// we need to keep track of whether it was set at the *start* of the sync.
tryPartialSync := !proxier.needFullSync
// Keep track of how long syncs take. // Keep track of how long syncs take.
start := time.Now() start := time.Now()
defer func() { defer func() {
metrics.SyncProxyRulesLatency.Observe(metrics.SinceInSeconds(start)) metrics.SyncProxyRulesLatency.Observe(metrics.SinceInSeconds(start))
if tryPartialSync {
metrics.SyncPartialProxyRulesLatency.Observe(metrics.SinceInSeconds(start))
} else {
metrics.SyncFullProxyRulesLatency.Observe(metrics.SinceInSeconds(start))
}
klog.V(2).InfoS("SyncProxyRules complete", "elapsed", time.Since(start)) klog.V(2).InfoS("SyncProxyRules complete", "elapsed", time.Since(start))
}() }()
var serviceChanged, endpointsChanged sets.Set[string]
if tryPartialSync {
serviceChanged = proxier.serviceChanges.PendingChanges()
endpointsChanged = proxier.endpointsChanges.PendingChanges()
}
serviceUpdateResult := proxier.svcPortMap.Update(proxier.serviceChanges) serviceUpdateResult := proxier.svcPortMap.Update(proxier.serviceChanges)
endpointUpdateResult := proxier.endpointsMap.Update(proxier.endpointsChanges) endpointUpdateResult := proxier.endpointsMap.Update(proxier.endpointsChanges)
@ -786,28 +755,12 @@ func (proxier *Proxier) syncProxyRules() {
if !success { if !success {
klog.InfoS("Sync failed", "retryingTime", proxier.syncPeriod) klog.InfoS("Sync failed", "retryingTime", proxier.syncPeriod)
proxier.syncRunner.RetryAfter(proxier.syncPeriod) proxier.syncRunner.RetryAfter(proxier.syncPeriod)
if tryPartialSync {
metrics.IptablesPartialRestoreFailuresTotal.Inc()
}
// proxier.serviceChanges and proxier.endpointChanges have already
// been flushed, so we've lost the state needed to be able to do
// a partial sync.
proxier.needFullSync = true
} }
}() }()
if !tryPartialSync {
// Ensure that our jump rules (eg from PREROUTING to KUBE-SERVICES) exist. // Ensure that our jump rules (eg from PREROUTING to KUBE-SERVICES) exist.
// We can't do this as part of the iptables-restore because we don't want // We can't do this as part of the iptables-restore because we don't want
// to specify/replace *all* of the rules in PREROUTING, etc. // to specify/replace *all* of the rules in PREROUTING, etc.
//
// We need to create these rules when kube-proxy first starts, and we need
// to recreate them if the utiliptables Monitor detects that iptables has
// been flushed. In both of those cases, the code will force a full sync.
// In all other cases, it ought to be safe to assume that the rules
// already exist, so we'll skip this step when doing a partial sync, to
// save us from having to invoke /sbin/iptables 20 times on each sync
// (which will be very slow on hosts with lots of iptables rules).
for _, jump := range append(iptablesJumpChains, iptablesKubeletJumpChains...) { for _, jump := range append(iptablesJumpChains, iptablesKubeletJumpChains...) {
if _, err := proxier.iptables.EnsureChain(jump.table, jump.dstChain); err != nil { if _, err := proxier.iptables.EnsureChain(jump.table, jump.dstChain); err != nil {
klog.ErrorS(err, "Failed to ensure chain exists", "table", jump.table, "chain", jump.dstChain) klog.ErrorS(err, "Failed to ensure chain exists", "table", jump.table, "chain", jump.dstChain)
@ -823,7 +776,6 @@ func (proxier *Proxier) syncProxyRules() {
return return
} }
} }
}
// //
// Below this point we will not return until we try to write the iptables rules. // Below this point we will not return until we try to write the iptables rules.
@ -836,9 +788,6 @@ func (proxier *Proxier) syncProxyRules() {
proxier.natChains.Reset() proxier.natChains.Reset()
proxier.natRules.Reset() proxier.natRules.Reset()
skippedNatChains := proxyutil.NewDiscardLineBuffer()
skippedNatRules := proxyutil.NewDiscardLineBuffer()
// Write chain lines for all the "top-level" chains we'll be filling in // Write chain lines for all the "top-level" chains we'll be filling in
for _, chainName := range []utiliptables.Chain{kubeServicesChain, kubeExternalServicesChain, kubeForwardChain, kubeNodePortsChain, kubeProxyFirewallChain} { for _, chainName := range []utiliptables.Chain{kubeServicesChain, kubeExternalServicesChain, kubeForwardChain, kubeNodePortsChain, kubeProxyFirewallChain} {
proxier.filterChains.Write(utiliptables.MakeChainLine(chainName)) proxier.filterChains.Write(utiliptables.MakeChainLine(chainName))
@ -1047,13 +996,9 @@ func (proxier *Proxier) syncProxyRules() {
} }
} }
filterRules := proxier.filterRules
natChains := proxier.natChains
natRules := proxier.natRules
// Capture the clusterIP. // Capture the clusterIP.
if hasInternalEndpoints { if hasInternalEndpoints {
natRules.Write( proxier.natRules.Write(
"-A", string(kubeServicesChain), "-A", string(kubeServicesChain),
"-m", "comment", "--comment", fmt.Sprintf(`"%s cluster IP"`, svcPortNameString), "-m", "comment", "--comment", fmt.Sprintf(`"%s cluster IP"`, svcPortNameString),
"-m", protocol, "-p", protocol, "-m", protocol, "-p", protocol,
@ -1062,7 +1007,7 @@ func (proxier *Proxier) syncProxyRules() {
"-j", string(internalTrafficChain)) "-j", string(internalTrafficChain))
} else { } else {
// No endpoints. // No endpoints.
filterRules.Write( proxier.filterRules.Write(
"-A", string(kubeServicesChain), "-A", string(kubeServicesChain),
"-m", "comment", "--comment", internalTrafficFilterComment, "-m", "comment", "--comment", internalTrafficFilterComment,
"-m", protocol, "-p", protocol, "-m", protocol, "-p", protocol,
@ -1077,7 +1022,7 @@ func (proxier *Proxier) syncProxyRules() {
if hasEndpoints { if hasEndpoints {
// Send traffic bound for external IPs to the "external // Send traffic bound for external IPs to the "external
// destinations" chain. // destinations" chain.
natRules.Write( proxier.natRules.Write(
"-A", string(kubeServicesChain), "-A", string(kubeServicesChain),
"-m", "comment", "--comment", fmt.Sprintf(`"%s external IP"`, svcPortNameString), "-m", "comment", "--comment", fmt.Sprintf(`"%s external IP"`, svcPortNameString),
"-m", protocol, "-p", protocol, "-m", protocol, "-p", protocol,
@ -1089,7 +1034,7 @@ func (proxier *Proxier) syncProxyRules() {
// Either no endpoints at all (REJECT) or no endpoints for // Either no endpoints at all (REJECT) or no endpoints for
// external traffic (DROP anything that didn't get // external traffic (DROP anything that didn't get
// short-circuited by the EXT chain.) // short-circuited by the EXT chain.)
filterRules.Write( proxier.filterRules.Write(
"-A", string(kubeExternalServicesChain), "-A", string(kubeExternalServicesChain),
"-m", "comment", "--comment", externalTrafficFilterComment, "-m", "comment", "--comment", externalTrafficFilterComment,
"-m", protocol, "-p", protocol, "-m", protocol, "-p", protocol,
@ -1103,7 +1048,7 @@ func (proxier *Proxier) syncProxyRules() {
// Capture load-balancer ingress. // Capture load-balancer ingress.
for _, lbip := range svcInfo.LoadBalancerVIPStrings() { for _, lbip := range svcInfo.LoadBalancerVIPStrings() {
if hasEndpoints { if hasEndpoints {
natRules.Write( proxier.natRules.Write(
"-A", string(kubeServicesChain), "-A", string(kubeServicesChain),
"-m", "comment", "--comment", fmt.Sprintf(`"%s loadbalancer IP"`, svcPortNameString), "-m", "comment", "--comment", fmt.Sprintf(`"%s loadbalancer IP"`, svcPortNameString),
"-m", protocol, "-p", protocol, "-m", protocol, "-p", protocol,
@ -1113,7 +1058,7 @@ func (proxier *Proxier) syncProxyRules() {
} }
if usesFWChain { if usesFWChain {
filterRules.Write( proxier.filterRules.Write(
"-A", string(kubeProxyFirewallChain), "-A", string(kubeProxyFirewallChain),
"-m", "comment", "--comment", fmt.Sprintf(`"%s traffic not accepted by %s"`, svcPortNameString, svcInfo.firewallChainName), "-m", "comment", "--comment", fmt.Sprintf(`"%s traffic not accepted by %s"`, svcPortNameString, svcInfo.firewallChainName),
"-m", protocol, "-p", protocol, "-m", protocol, "-p", protocol,
@ -1127,7 +1072,7 @@ func (proxier *Proxier) syncProxyRules() {
// external traffic (DROP anything that didn't get short-circuited // external traffic (DROP anything that didn't get short-circuited
// by the EXT chain.) // by the EXT chain.)
for _, lbip := range svcInfo.LoadBalancerVIPStrings() { for _, lbip := range svcInfo.LoadBalancerVIPStrings() {
filterRules.Write( proxier.filterRules.Write(
"-A", string(kubeExternalServicesChain), "-A", string(kubeExternalServicesChain),
"-m", "comment", "--comment", externalTrafficFilterComment, "-m", "comment", "--comment", externalTrafficFilterComment,
"-m", protocol, "-p", protocol, "-m", protocol, "-p", protocol,
@ -1144,7 +1089,7 @@ func (proxier *Proxier) syncProxyRules() {
// Jump to the external destination chain. For better or for // Jump to the external destination chain. For better or for
// worse, nodeports are not subect to loadBalancerSourceRanges, // worse, nodeports are not subect to loadBalancerSourceRanges,
// and we can't change that. // and we can't change that.
natRules.Write( proxier.natRules.Write(
"-A", string(kubeNodePortsChain), "-A", string(kubeNodePortsChain),
"-m", "comment", "--comment", svcPortNameString, "-m", "comment", "--comment", svcPortNameString,
"-m", protocol, "-p", protocol, "-m", protocol, "-p", protocol,
@ -1155,7 +1100,7 @@ func (proxier *Proxier) syncProxyRules() {
// Either no endpoints at all (REJECT) or no endpoints for // Either no endpoints at all (REJECT) or no endpoints for
// external traffic (DROP anything that didn't get // external traffic (DROP anything that didn't get
// short-circuited by the EXT chain.) // short-circuited by the EXT chain.)
filterRules.Write( proxier.filterRules.Write(
"-A", string(kubeExternalServicesChain), "-A", string(kubeExternalServicesChain),
"-m", "comment", "--comment", externalTrafficFilterComment, "-m", "comment", "--comment", externalTrafficFilterComment,
"-m", "addrtype", "--dst-type", "LOCAL", "-m", "addrtype", "--dst-type", "LOCAL",
@ -1170,7 +1115,7 @@ func (proxier *Proxier) syncProxyRules() {
if svcInfo.HealthCheckNodePort() != 0 { if svcInfo.HealthCheckNodePort() != 0 {
// no matter if node has local endpoints, healthCheckNodePorts // no matter if node has local endpoints, healthCheckNodePorts
// need to add a rule to accept the incoming connection // need to add a rule to accept the incoming connection
filterRules.Write( proxier.filterRules.Write(
"-A", string(kubeNodePortsChain), "-A", string(kubeNodePortsChain),
"-m", "comment", "--comment", fmt.Sprintf(`"%s health check node port"`, svcPortNameString), "-m", "comment", "--comment", fmt.Sprintf(`"%s health check node port"`, svcPortNameString),
"-m", "tcp", "-p", "tcp", "-m", "tcp", "-p", "tcp",
@ -1179,16 +1124,6 @@ func (proxier *Proxier) syncProxyRules() {
) )
} }
// If the SVC/SVL/EXT/FW/SEP chains have not changed since the last sync
// then we can omit them from the restore input. (We have already marked
// them in activeNATChains, so they won't get deleted.) However, we have
// to still figure out how many chains we _would_ have written to make the
// metrics come out right, so we just compute them and throw them away.
if tryPartialSync && !serviceChanged.Has(svcName.NamespacedName.String()) && !endpointsChanged.Has(svcName.NamespacedName.String()) {
natChains = skippedNatChains
natRules = skippedNatRules
}
// Set up internal traffic handling. // Set up internal traffic handling.
if hasInternalEndpoints { if hasInternalEndpoints {
args = append(args[:0], args = append(args[:0],
@ -1198,7 +1133,7 @@ func (proxier *Proxier) syncProxyRules() {
"--dport", strconv.Itoa(svcInfo.Port()), "--dport", strconv.Itoa(svcInfo.Port()),
) )
if proxier.masqueradeAll { if proxier.masqueradeAll {
natRules.Write( proxier.natRules.Write(
"-A", string(internalTrafficChain), "-A", string(internalTrafficChain),
args, args,
"-j", string(kubeMarkMasqChain)) "-j", string(kubeMarkMasqChain))
@ -1208,7 +1143,7 @@ func (proxier *Proxier) syncProxyRules() {
// Service range, routing to any node, and that node will // Service range, routing to any node, and that node will
// bridge into the Service for you. Since that might bounce // bridge into the Service for you. Since that might bounce
// off-node, we masquerade here. // off-node, we masquerade here.
natRules.Write( proxier.natRules.Write(
"-A", string(internalTrafficChain), "-A", string(internalTrafficChain),
args, args,
proxier.localDetector.IfNotLocal(), proxier.localDetector.IfNotLocal(),
@ -1221,12 +1156,12 @@ func (proxier *Proxier) syncProxyRules() {
// jump to externalTrafficChain, which will handle some special cases and // jump to externalTrafficChain, which will handle some special cases and
// then jump to externalPolicyChain. // then jump to externalPolicyChain.
if usesExternalTrafficChain { if usesExternalTrafficChain {
natChains.Write(utiliptables.MakeChainLine(externalTrafficChain)) proxier.natChains.Write(utiliptables.MakeChainLine(externalTrafficChain))
if !svcInfo.ExternalPolicyLocal() { if !svcInfo.ExternalPolicyLocal() {
// If we are using non-local endpoints we need to masquerade, // If we are using non-local endpoints we need to masquerade,
// in case we cross nodes. // in case we cross nodes.
natRules.Write( proxier.natRules.Write(
"-A", string(externalTrafficChain), "-A", string(externalTrafficChain),
"-m", "comment", "--comment", fmt.Sprintf(`"masquerade traffic for %s external destinations"`, svcPortNameString), "-m", "comment", "--comment", fmt.Sprintf(`"masquerade traffic for %s external destinations"`, svcPortNameString),
"-j", string(kubeMarkMasqChain)) "-j", string(kubeMarkMasqChain))
@ -1239,7 +1174,7 @@ func (proxier *Proxier) syncProxyRules() {
// traffic as a special-case. It is subject to neither // traffic as a special-case. It is subject to neither
// form of traffic policy, which simulates going up-and-out // form of traffic policy, which simulates going up-and-out
// to an external load-balancer and coming back in. // to an external load-balancer and coming back in.
natRules.Write( proxier.natRules.Write(
"-A", string(externalTrafficChain), "-A", string(externalTrafficChain),
"-m", "comment", "--comment", fmt.Sprintf(`"pod traffic for %s external destinations"`, svcPortNameString), "-m", "comment", "--comment", fmt.Sprintf(`"pod traffic for %s external destinations"`, svcPortNameString),
proxier.localDetector.IfLocal(), proxier.localDetector.IfLocal(),
@ -1249,7 +1184,7 @@ func (proxier *Proxier) syncProxyRules() {
// Locally originated traffic (not a pod, but the host node) // Locally originated traffic (not a pod, but the host node)
// still needs masquerade because the LBIP itself is a local // still needs masquerade because the LBIP itself is a local
// address, so that will be the chosen source IP. // address, so that will be the chosen source IP.
natRules.Write( proxier.natRules.Write(
"-A", string(externalTrafficChain), "-A", string(externalTrafficChain),
"-m", "comment", "--comment", fmt.Sprintf(`"masquerade LOCAL traffic for %s external destinations"`, svcPortNameString), "-m", "comment", "--comment", fmt.Sprintf(`"masquerade LOCAL traffic for %s external destinations"`, svcPortNameString),
"-m", "addrtype", "--src-type", "LOCAL", "-m", "addrtype", "--src-type", "LOCAL",
@ -1258,7 +1193,7 @@ func (proxier *Proxier) syncProxyRules() {
// Redirect all src-type=LOCAL -> external destination to the // Redirect all src-type=LOCAL -> external destination to the
// policy=cluster chain. This allows traffic originating // policy=cluster chain. This allows traffic originating
// from the host to be redirected to the service correctly. // from the host to be redirected to the service correctly.
natRules.Write( proxier.natRules.Write(
"-A", string(externalTrafficChain), "-A", string(externalTrafficChain),
"-m", "comment", "--comment", fmt.Sprintf(`"route LOCAL traffic for %s external destinations"`, svcPortNameString), "-m", "comment", "--comment", fmt.Sprintf(`"route LOCAL traffic for %s external destinations"`, svcPortNameString),
"-m", "addrtype", "--src-type", "LOCAL", "-m", "addrtype", "--src-type", "LOCAL",
@ -1267,7 +1202,7 @@ func (proxier *Proxier) syncProxyRules() {
// Anything else falls thru to the appropriate policy chain. // Anything else falls thru to the appropriate policy chain.
if hasExternalEndpoints { if hasExternalEndpoints {
natRules.Write( proxier.natRules.Write(
"-A", string(externalTrafficChain), "-A", string(externalTrafficChain),
"-j", string(externalPolicyChain)) "-j", string(externalPolicyChain))
} }
@ -1275,7 +1210,7 @@ func (proxier *Proxier) syncProxyRules() {
// Set up firewall chain, if needed // Set up firewall chain, if needed
if usesFWChain { if usesFWChain {
natChains.Write(utiliptables.MakeChainLine(fwChain)) proxier.natChains.Write(utiliptables.MakeChainLine(fwChain))
// The service firewall rules are created based on the // The service firewall rules are created based on the
// loadBalancerSourceRanges field. This only works for VIP-like // loadBalancerSourceRanges field. This only works for VIP-like
@ -1290,7 +1225,7 @@ func (proxier *Proxier) syncProxyRules() {
// firewall filter based on each source range // firewall filter based on each source range
allowFromNode := false allowFromNode := false
for _, src := range svcInfo.LoadBalancerSourceRanges() { for _, src := range svcInfo.LoadBalancerSourceRanges() {
natRules.Write(args, "-s", src, "-j", string(externalTrafficChain)) proxier.natRules.Write(args, "-s", src, "-j", string(externalTrafficChain))
_, cidr, err := netutils.ParseCIDRSloppy(src) _, cidr, err := netutils.ParseCIDRSloppy(src)
if err != nil { if err != nil {
klog.ErrorS(err, "Error parsing CIDR in LoadBalancerSourceRanges, dropping it", "cidr", cidr) klog.ErrorS(err, "Error parsing CIDR in LoadBalancerSourceRanges, dropping it", "cidr", cidr)
@ -1305,7 +1240,7 @@ func (proxier *Proxier) syncProxyRules() {
// need the following rules to allow requests from this node. // need the following rules to allow requests from this node.
if allowFromNode { if allowFromNode {
for _, lbip := range svcInfo.LoadBalancerVIPStrings() { for _, lbip := range svcInfo.LoadBalancerVIPStrings() {
natRules.Write( proxier.natRules.Write(
args, args,
"-s", lbip, "-s", lbip,
"-j", string(externalTrafficChain)) "-j", string(externalTrafficChain))
@ -1314,7 +1249,7 @@ func (proxier *Proxier) syncProxyRules() {
// If the packet was able to reach the end of firewall chain, // If the packet was able to reach the end of firewall chain,
// then it did not get DNATed, so it will match the // then it did not get DNATed, so it will match the
// corresponding KUBE-PROXY-FIREWALL rule. // corresponding KUBE-PROXY-FIREWALL rule.
natRules.Write( proxier.natRules.Write(
"-A", string(fwChain), "-A", string(fwChain),
"-m", "comment", "--comment", fmt.Sprintf(`"other traffic to %s will be dropped by KUBE-PROXY-FIREWALL"`, svcPortNameString), "-m", "comment", "--comment", fmt.Sprintf(`"other traffic to %s will be dropped by KUBE-PROXY-FIREWALL"`, svcPortNameString),
) )
@ -1323,15 +1258,15 @@ func (proxier *Proxier) syncProxyRules() {
// If Cluster policy is in use, create the chain and create rules jumping // If Cluster policy is in use, create the chain and create rules jumping
// from clusterPolicyChain to the clusterEndpoints // from clusterPolicyChain to the clusterEndpoints
if usesClusterPolicyChain { if usesClusterPolicyChain {
natChains.Write(utiliptables.MakeChainLine(clusterPolicyChain)) proxier.natChains.Write(utiliptables.MakeChainLine(clusterPolicyChain))
proxier.writeServiceToEndpointRules(natRules, svcPortNameString, svcInfo, clusterPolicyChain, clusterEndpoints, args) proxier.writeServiceToEndpointRules(svcPortNameString, svcInfo, clusterPolicyChain, clusterEndpoints, args)
} }
// If Local policy is in use, create the chain and create rules jumping // If Local policy is in use, create the chain and create rules jumping
// from localPolicyChain to the localEndpoints // from localPolicyChain to the localEndpoints
if usesLocalPolicyChain { if usesLocalPolicyChain {
natChains.Write(utiliptables.MakeChainLine(localPolicyChain)) proxier.natChains.Write(utiliptables.MakeChainLine(localPolicyChain))
proxier.writeServiceToEndpointRules(natRules, svcPortNameString, svcInfo, localPolicyChain, localEndpoints, args) proxier.writeServiceToEndpointRules(svcPortNameString, svcInfo, localPolicyChain, localEndpoints, args)
} }
// Generate the per-endpoint chains. // Generate the per-endpoint chains.
@ -1345,13 +1280,13 @@ func (proxier *Proxier) syncProxyRules() {
endpointChain := epInfo.ChainName endpointChain := epInfo.ChainName
// Create the endpoint chain // Create the endpoint chain
natChains.Write(utiliptables.MakeChainLine(endpointChain)) proxier.natChains.Write(utiliptables.MakeChainLine(endpointChain))
activeNATChains[endpointChain] = true activeNATChains[endpointChain] = true
args = append(args[:0], "-A", string(endpointChain)) args = append(args[:0], "-A", string(endpointChain))
args = append(args, "-m", "comment", "--comment", svcPortNameString) args = append(args, "-m", "comment", "--comment", svcPortNameString)
// Handle traffic that loops back to the originator with SNAT. // Handle traffic that loops back to the originator with SNAT.
natRules.Write( proxier.natRules.Write(
args, args,
"-s", epInfo.IP(), "-s", epInfo.IP(),
"-j", string(kubeMarkMasqChain)) "-j", string(kubeMarkMasqChain))
@ -1361,7 +1296,7 @@ func (proxier *Proxier) syncProxyRules() {
} }
// DNAT to final destination. // DNAT to final destination.
args = append(args, "-m", protocol, "-p", protocol, "-j", "DNAT", "--to-destination", epInfo.String()) args = append(args, "-m", protocol, "-p", protocol, "-j", "DNAT", "--to-destination", epInfo.String())
natRules.Write(args) proxier.natRules.Write(args)
} }
} }
@ -1468,9 +1403,7 @@ func (proxier *Proxier) syncProxyRules() {
) )
metrics.IptablesRulesTotal.WithLabelValues(string(utiliptables.TableFilter)).Set(float64(proxier.filterRules.Lines())) metrics.IptablesRulesTotal.WithLabelValues(string(utiliptables.TableFilter)).Set(float64(proxier.filterRules.Lines()))
metrics.IptablesRulesLastSync.WithLabelValues(string(utiliptables.TableFilter)).Set(float64(proxier.filterRules.Lines())) metrics.IptablesRulesTotal.WithLabelValues(string(utiliptables.TableNAT)).Set(float64(proxier.natRules.Lines() - deletedChains))
metrics.IptablesRulesTotal.WithLabelValues(string(utiliptables.TableNAT)).Set(float64(proxier.natRules.Lines() + skippedNatRules.Lines() - deletedChains))
metrics.IptablesRulesLastSync.WithLabelValues(string(utiliptables.TableNAT)).Set(float64(proxier.natRules.Lines() - deletedChains))
// Sync rules. // Sync rules.
proxier.iptablesData.Reset() proxier.iptablesData.Reset()
@ -1506,7 +1439,6 @@ func (proxier *Proxier) syncProxyRules() {
return return
} }
success = true success = true
proxier.needFullSync = false
for name, lastChangeTriggerTimes := range endpointUpdateResult.LastChangeTriggerTimes { for name, lastChangeTriggerTimes := range endpointUpdateResult.LastChangeTriggerTimes {
for _, lastChangeTriggerTime := range lastChangeTriggerTimes { for _, lastChangeTriggerTime := range lastChangeTriggerTimes {
@ -1537,7 +1469,7 @@ func (proxier *Proxier) syncProxyRules() {
conntrack.CleanStaleEntries(proxier.iptables.IsIPv6(), proxier.exec, proxier.svcPortMap, serviceUpdateResult, endpointUpdateResult) conntrack.CleanStaleEntries(proxier.iptables.IsIPv6(), proxier.exec, proxier.svcPortMap, serviceUpdateResult, endpointUpdateResult)
} }
func (proxier *Proxier) writeServiceToEndpointRules(natRules proxyutil.LineBuffer, svcPortNameString string, svcInfo proxy.ServicePort, svcChain utiliptables.Chain, endpoints []proxy.Endpoint, args []string) { func (proxier *Proxier) writeServiceToEndpointRules(svcPortNameString string, svcInfo proxy.ServicePort, svcChain utiliptables.Chain, endpoints []proxy.Endpoint, args []string) {
// First write session affinity rules, if applicable. // First write session affinity rules, if applicable.
if svcInfo.SessionAffinityType() == v1.ServiceAffinityClientIP { if svcInfo.SessionAffinityType() == v1.ServiceAffinityClientIP {
for _, ep := range endpoints { for _, ep := range endpoints {
@ -1556,7 +1488,7 @@ func (proxier *Proxier) writeServiceToEndpointRules(natRules proxyutil.LineBuffe
"--rcheck", "--seconds", strconv.Itoa(svcInfo.StickyMaxAgeSeconds()), "--reap", "--rcheck", "--seconds", strconv.Itoa(svcInfo.StickyMaxAgeSeconds()), "--reap",
"-j", string(epInfo.ChainName), "-j", string(epInfo.ChainName),
) )
natRules.Write(args) proxier.natRules.Write(args)
} }
} }
@ -1579,6 +1511,6 @@ func (proxier *Proxier) writeServiceToEndpointRules(natRules proxyutil.LineBuffe
"--probability", proxier.probability(numEndpoints-i)) "--probability", proxier.probability(numEndpoints-i))
} }
// The final (or only if n == 1) rule is a guaranteed match. // The final (or only if n == 1) rule is a guaranteed match.
natRules.Write(args, "-j", string(epInfo.ChainName)) proxier.natRules.Write(args, "-j", string(epInfo.ChainName))
} }
} }

View File

@ -325,7 +325,6 @@ func NewFakeProxier(ipt utiliptables.Interface) *Proxier {
serviceChanges: proxy.NewServiceChangeTracker(newServiceInfo, ipfamily, nil, nil), serviceChanges: proxy.NewServiceChangeTracker(newServiceInfo, ipfamily, nil, nil),
endpointsMap: make(proxy.EndpointsMap), endpointsMap: make(proxy.EndpointsMap),
endpointsChanges: proxy.NewEndpointsChangeTracker(testHostname, newEndpointInfo, ipfamily, nil, nil), endpointsChanges: proxy.NewEndpointsChangeTracker(testHostname, newEndpointInfo, ipfamily, nil, nil),
needFullSync: true,
iptables: ipt, iptables: ipt,
masqueradeMark: "0x4000", masqueradeMark: "0x4000",
localDetector: detectLocal, localDetector: detectLocal,
@ -6042,20 +6041,13 @@ func TestSyncProxyRulesRepeated(t *testing.T) {
`) `)
assertIPTablesRulesEqual(t, getLine(), true, expected, fp.iptablesData.String()) assertIPTablesRulesEqual(t, getLine(), true, expected, fp.iptablesData.String())
rulesSynced := countRules(utiliptables.TableNAT, expected) rulesTotal := countRules(utiliptables.TableNAT, expected)
rulesSyncedMetric := countRulesFromLastSyncMetric(utiliptables.TableNAT)
if rulesSyncedMetric != rulesSynced {
t.Errorf("metric shows %d rules synced but iptables data shows %d", rulesSyncedMetric, rulesSynced)
}
rulesTotal := rulesSynced
rulesTotalMetric := countRulesFromMetric(utiliptables.TableNAT) rulesTotalMetric := countRulesFromMetric(utiliptables.TableNAT)
if rulesTotalMetric != rulesTotal { if rulesTotalMetric != rulesTotal {
t.Errorf("metric shows %d rules total but expected %d", rulesTotalMetric, rulesTotal) t.Errorf("metric shows %d rules total but expected %d", rulesTotalMetric, rulesTotal)
} }
// Add a new service and its endpoints. (This will only sync the SVC and SEP rules // Add a new service and its endpoints
// for the new service, not the existing ones.)
makeServiceMap(fp, makeServiceMap(fp,
makeTestService("ns3", "svc3", func(svc *v1.Service) { makeTestService("ns3", "svc3", func(svc *v1.Service) {
svc.Spec.Type = v1.ServiceTypeClusterIP svc.Spec.Type = v1.ServiceTypeClusterIP
@ -6103,7 +6095,11 @@ func TestSyncProxyRulesRepeated(t *testing.T) {
:KUBE-MARK-MASQ - [0:0] :KUBE-MARK-MASQ - [0:0]
:KUBE-POSTROUTING - [0:0] :KUBE-POSTROUTING - [0:0]
:KUBE-SEP-BSWRHOQ77KEXZLNL - [0:0] :KUBE-SEP-BSWRHOQ77KEXZLNL - [0:0]
:KUBE-SEP-SNQ3ZNILQDEJNDQO - [0:0]
:KUBE-SEP-UHEGFW77JX3KXTOV - [0:0]
:KUBE-SVC-2VJB64SDSIJUP5T6 - [0:0]
:KUBE-SVC-X27LE4BHSL4DOUIK - [0:0] :KUBE-SVC-X27LE4BHSL4DOUIK - [0:0]
:KUBE-SVC-XPGD46QRK7WJZT7O - [0:0]
-A KUBE-SERVICES -m comment --comment "ns1/svc1:p80 cluster IP" -m tcp -p tcp -d 172.30.0.41 --dport 80 -j KUBE-SVC-XPGD46QRK7WJZT7O -A KUBE-SERVICES -m comment --comment "ns1/svc1:p80 cluster IP" -m tcp -p tcp -d 172.30.0.41 --dport 80 -j KUBE-SVC-XPGD46QRK7WJZT7O
-A KUBE-SERVICES -m comment --comment "ns2/svc2:p8080 cluster IP" -m tcp -p tcp -d 172.30.0.42 --dport 8080 -j KUBE-SVC-2VJB64SDSIJUP5T6 -A KUBE-SERVICES -m comment --comment "ns2/svc2:p8080 cluster IP" -m tcp -p tcp -d 172.30.0.42 --dport 8080 -j KUBE-SVC-2VJB64SDSIJUP5T6
-A KUBE-SERVICES -m comment --comment "ns3/svc3:p80 cluster IP" -m tcp -p tcp -d 172.30.0.43 --dport 80 -j KUBE-SVC-X27LE4BHSL4DOUIK -A KUBE-SERVICES -m comment --comment "ns3/svc3:p80 cluster IP" -m tcp -p tcp -d 172.30.0.43 --dport 80 -j KUBE-SVC-X27LE4BHSL4DOUIK
@ -6114,17 +6110,19 @@ func TestSyncProxyRulesRepeated(t *testing.T) {
-A KUBE-POSTROUTING -m comment --comment "kubernetes service traffic requiring SNAT" -j MASQUERADE -A KUBE-POSTROUTING -m comment --comment "kubernetes service traffic requiring SNAT" -j MASQUERADE
-A KUBE-SEP-BSWRHOQ77KEXZLNL -m comment --comment ns3/svc3:p80 -s 10.0.3.1 -j KUBE-MARK-MASQ -A KUBE-SEP-BSWRHOQ77KEXZLNL -m comment --comment ns3/svc3:p80 -s 10.0.3.1 -j KUBE-MARK-MASQ
-A KUBE-SEP-BSWRHOQ77KEXZLNL -m comment --comment ns3/svc3:p80 -m tcp -p tcp -j DNAT --to-destination 10.0.3.1:80 -A KUBE-SEP-BSWRHOQ77KEXZLNL -m comment --comment ns3/svc3:p80 -m tcp -p tcp -j DNAT --to-destination 10.0.3.1:80
-A KUBE-SEP-SNQ3ZNILQDEJNDQO -m comment --comment ns1/svc1:p80 -s 10.0.1.1 -j KUBE-MARK-MASQ
-A KUBE-SEP-SNQ3ZNILQDEJNDQO -m comment --comment ns1/svc1:p80 -m tcp -p tcp -j DNAT --to-destination 10.0.1.1:80
-A KUBE-SEP-UHEGFW77JX3KXTOV -m comment --comment ns2/svc2:p8080 -s 10.0.2.1 -j KUBE-MARK-MASQ
-A KUBE-SEP-UHEGFW77JX3KXTOV -m comment --comment ns2/svc2:p8080 -m tcp -p tcp -j DNAT --to-destination 10.0.2.1:8080
-A KUBE-SVC-2VJB64SDSIJUP5T6 -m comment --comment "ns2/svc2:p8080 cluster IP" -m tcp -p tcp -d 172.30.0.42 --dport 8080 ! -s 10.0.0.0/8 -j KUBE-MARK-MASQ
-A KUBE-SVC-2VJB64SDSIJUP5T6 -m comment --comment "ns2/svc2:p8080 -> 10.0.2.1:8080" -j KUBE-SEP-UHEGFW77JX3KXTOV
-A KUBE-SVC-X27LE4BHSL4DOUIK -m comment --comment "ns3/svc3:p80 cluster IP" -m tcp -p tcp -d 172.30.0.43 --dport 80 ! -s 10.0.0.0/8 -j KUBE-MARK-MASQ -A KUBE-SVC-X27LE4BHSL4DOUIK -m comment --comment "ns3/svc3:p80 cluster IP" -m tcp -p tcp -d 172.30.0.43 --dport 80 ! -s 10.0.0.0/8 -j KUBE-MARK-MASQ
-A KUBE-SVC-X27LE4BHSL4DOUIK -m comment --comment "ns3/svc3:p80 -> 10.0.3.1:80" -j KUBE-SEP-BSWRHOQ77KEXZLNL -A KUBE-SVC-X27LE4BHSL4DOUIK -m comment --comment "ns3/svc3:p80 -> 10.0.3.1:80" -j KUBE-SEP-BSWRHOQ77KEXZLNL
-A KUBE-SVC-XPGD46QRK7WJZT7O -m comment --comment "ns1/svc1:p80 cluster IP" -m tcp -p tcp -d 172.30.0.41 --dport 80 ! -s 10.0.0.0/8 -j KUBE-MARK-MASQ
-A KUBE-SVC-XPGD46QRK7WJZT7O -m comment --comment "ns1/svc1:p80 -> 10.0.1.1:80" -j KUBE-SEP-SNQ3ZNILQDEJNDQO
COMMIT COMMIT
`) `)
assertIPTablesRulesEqual(t, getLine(), false, expected, fp.iptablesData.String()) assertIPTablesRulesEqual(t, getLine(), true, expected, fp.iptablesData.String())
rulesSynced = countRules(utiliptables.TableNAT, expected)
rulesSyncedMetric = countRulesFromLastSyncMetric(utiliptables.TableNAT)
if rulesSyncedMetric != rulesSynced {
t.Errorf("metric shows %d rules synced but iptables data shows %d", rulesSyncedMetric, rulesSynced)
}
// We added 1 KUBE-SERVICES rule, 2 KUBE-SVC-X27LE4BHSL4DOUIK rules, and 2 // We added 1 KUBE-SERVICES rule, 2 KUBE-SVC-X27LE4BHSL4DOUIK rules, and 2
// KUBE-SEP-BSWRHOQ77KEXZLNL rules. // KUBE-SEP-BSWRHOQ77KEXZLNL rules.
@ -6134,7 +6132,7 @@ func TestSyncProxyRulesRepeated(t *testing.T) {
t.Errorf("metric shows %d rules total but expected %d", rulesTotalMetric, rulesTotal) t.Errorf("metric shows %d rules total but expected %d", rulesTotalMetric, rulesTotal)
} }
// Delete a service. (Won't update the other services.) // Delete a service.
fp.OnServiceDelete(svc2) fp.OnServiceDelete(svc2)
fp.syncProxyRules() fp.syncProxyRules()
@ -6156,8 +6154,12 @@ func TestSyncProxyRulesRepeated(t *testing.T) {
:KUBE-SERVICES - [0:0] :KUBE-SERVICES - [0:0]
:KUBE-MARK-MASQ - [0:0] :KUBE-MARK-MASQ - [0:0]
:KUBE-POSTROUTING - [0:0] :KUBE-POSTROUTING - [0:0]
:KUBE-SEP-BSWRHOQ77KEXZLNL - [0:0]
:KUBE-SEP-SNQ3ZNILQDEJNDQO - [0:0]
:KUBE-SEP-UHEGFW77JX3KXTOV - [0:0] :KUBE-SEP-UHEGFW77JX3KXTOV - [0:0]
:KUBE-SVC-2VJB64SDSIJUP5T6 - [0:0] :KUBE-SVC-2VJB64SDSIJUP5T6 - [0:0]
:KUBE-SVC-X27LE4BHSL4DOUIK - [0:0]
:KUBE-SVC-XPGD46QRK7WJZT7O - [0:0]
-A KUBE-SERVICES -m comment --comment "ns1/svc1:p80 cluster IP" -m tcp -p tcp -d 172.30.0.41 --dport 80 -j KUBE-SVC-XPGD46QRK7WJZT7O -A KUBE-SERVICES -m comment --comment "ns1/svc1:p80 cluster IP" -m tcp -p tcp -d 172.30.0.41 --dport 80 -j KUBE-SVC-XPGD46QRK7WJZT7O
-A KUBE-SERVICES -m comment --comment "ns3/svc3:p80 cluster IP" -m tcp -p tcp -d 172.30.0.43 --dport 80 -j KUBE-SVC-X27LE4BHSL4DOUIK -A KUBE-SERVICES -m comment --comment "ns3/svc3:p80 cluster IP" -m tcp -p tcp -d 172.30.0.43 --dport 80 -j KUBE-SVC-X27LE4BHSL4DOUIK
-A KUBE-SERVICES -m comment --comment "kubernetes service nodeports; NOTE: this must be the last rule in this chain" -m addrtype --dst-type LOCAL -j KUBE-NODEPORTS -A KUBE-SERVICES -m comment --comment "kubernetes service nodeports; NOTE: this must be the last rule in this chain" -m addrtype --dst-type LOCAL -j KUBE-NODEPORTS
@ -6165,17 +6167,19 @@ func TestSyncProxyRulesRepeated(t *testing.T) {
-A KUBE-POSTROUTING -m mark ! --mark 0x4000/0x4000 -j RETURN -A KUBE-POSTROUTING -m mark ! --mark 0x4000/0x4000 -j RETURN
-A KUBE-POSTROUTING -j MARK --xor-mark 0x4000 -A KUBE-POSTROUTING -j MARK --xor-mark 0x4000
-A KUBE-POSTROUTING -m comment --comment "kubernetes service traffic requiring SNAT" -j MASQUERADE -A KUBE-POSTROUTING -m comment --comment "kubernetes service traffic requiring SNAT" -j MASQUERADE
-A KUBE-SEP-BSWRHOQ77KEXZLNL -m comment --comment ns3/svc3:p80 -s 10.0.3.1 -j KUBE-MARK-MASQ
-A KUBE-SEP-BSWRHOQ77KEXZLNL -m comment --comment ns3/svc3:p80 -m tcp -p tcp -j DNAT --to-destination 10.0.3.1:80
-A KUBE-SEP-SNQ3ZNILQDEJNDQO -m comment --comment ns1/svc1:p80 -s 10.0.1.1 -j KUBE-MARK-MASQ
-A KUBE-SEP-SNQ3ZNILQDEJNDQO -m comment --comment ns1/svc1:p80 -m tcp -p tcp -j DNAT --to-destination 10.0.1.1:80
-A KUBE-SVC-X27LE4BHSL4DOUIK -m comment --comment "ns3/svc3:p80 cluster IP" -m tcp -p tcp -d 172.30.0.43 --dport 80 ! -s 10.0.0.0/8 -j KUBE-MARK-MASQ
-A KUBE-SVC-X27LE4BHSL4DOUIK -m comment --comment "ns3/svc3:p80 -> 10.0.3.1:80" -j KUBE-SEP-BSWRHOQ77KEXZLNL
-A KUBE-SVC-XPGD46QRK7WJZT7O -m comment --comment "ns1/svc1:p80 cluster IP" -m tcp -p tcp -d 172.30.0.41 --dport 80 ! -s 10.0.0.0/8 -j KUBE-MARK-MASQ
-A KUBE-SVC-XPGD46QRK7WJZT7O -m comment --comment "ns1/svc1:p80 -> 10.0.1.1:80" -j KUBE-SEP-SNQ3ZNILQDEJNDQO
-X KUBE-SEP-UHEGFW77JX3KXTOV -X KUBE-SEP-UHEGFW77JX3KXTOV
-X KUBE-SVC-2VJB64SDSIJUP5T6 -X KUBE-SVC-2VJB64SDSIJUP5T6
COMMIT COMMIT
`) `)
assertIPTablesRulesEqual(t, getLine(), false, expected, fp.iptablesData.String()) assertIPTablesRulesEqual(t, getLine(), true, expected, fp.iptablesData.String())
rulesSynced = countRules(utiliptables.TableNAT, expected)
rulesSyncedMetric = countRulesFromLastSyncMetric(utiliptables.TableNAT)
if rulesSyncedMetric != rulesSynced {
t.Errorf("metric shows %d rules synced but iptables data shows %d", rulesSyncedMetric, rulesSynced)
}
// We deleted 1 KUBE-SERVICES rule, 2 KUBE-SVC-2VJB64SDSIJUP5T6 rules, and 2 // We deleted 1 KUBE-SERVICES rule, 2 KUBE-SVC-2VJB64SDSIJUP5T6 rules, and 2
// KUBE-SEP-UHEGFW77JX3KXTOV rules // KUBE-SEP-UHEGFW77JX3KXTOV rules
@ -6185,12 +6189,9 @@ func TestSyncProxyRulesRepeated(t *testing.T) {
t.Errorf("metric shows %d rules total but expected %d", rulesTotalMetric, rulesTotal) t.Errorf("metric shows %d rules total but expected %d", rulesTotalMetric, rulesTotal)
} }
// Add a service, sync, then add its endpoints. (The first sync will be a no-op other // Add a service, sync, then add its endpoints.
// than adding the REJECT rule. The second sync will create the new service.)
var svc4 *v1.Service
makeServiceMap(fp, makeServiceMap(fp,
makeTestService("ns4", "svc4", func(svc *v1.Service) { makeTestService("ns4", "svc4", func(svc *v1.Service) {
svc4 = svc
svc.Spec.Type = v1.ServiceTypeClusterIP svc.Spec.Type = v1.ServiceTypeClusterIP
svc.Spec.ClusterIP = "172.30.0.44" svc.Spec.ClusterIP = "172.30.0.44"
svc.Spec.Ports = []v1.ServicePort{{ svc.Spec.Ports = []v1.ServicePort{{
@ -6220,6 +6221,10 @@ func TestSyncProxyRulesRepeated(t *testing.T) {
:KUBE-SERVICES - [0:0] :KUBE-SERVICES - [0:0]
:KUBE-MARK-MASQ - [0:0] :KUBE-MARK-MASQ - [0:0]
:KUBE-POSTROUTING - [0:0] :KUBE-POSTROUTING - [0:0]
:KUBE-SEP-BSWRHOQ77KEXZLNL - [0:0]
:KUBE-SEP-SNQ3ZNILQDEJNDQO - [0:0]
:KUBE-SVC-X27LE4BHSL4DOUIK - [0:0]
:KUBE-SVC-XPGD46QRK7WJZT7O - [0:0]
-A KUBE-SERVICES -m comment --comment "ns1/svc1:p80 cluster IP" -m tcp -p tcp -d 172.30.0.41 --dport 80 -j KUBE-SVC-XPGD46QRK7WJZT7O -A KUBE-SERVICES -m comment --comment "ns1/svc1:p80 cluster IP" -m tcp -p tcp -d 172.30.0.41 --dport 80 -j KUBE-SVC-XPGD46QRK7WJZT7O
-A KUBE-SERVICES -m comment --comment "ns3/svc3:p80 cluster IP" -m tcp -p tcp -d 172.30.0.43 --dport 80 -j KUBE-SVC-X27LE4BHSL4DOUIK -A KUBE-SERVICES -m comment --comment "ns3/svc3:p80 cluster IP" -m tcp -p tcp -d 172.30.0.43 --dport 80 -j KUBE-SVC-X27LE4BHSL4DOUIK
-A KUBE-SERVICES -m comment --comment "kubernetes service nodeports; NOTE: this must be the last rule in this chain" -m addrtype --dst-type LOCAL -j KUBE-NODEPORTS -A KUBE-SERVICES -m comment --comment "kubernetes service nodeports; NOTE: this must be the last rule in this chain" -m addrtype --dst-type LOCAL -j KUBE-NODEPORTS
@ -6227,15 +6232,17 @@ func TestSyncProxyRulesRepeated(t *testing.T) {
-A KUBE-POSTROUTING -m mark ! --mark 0x4000/0x4000 -j RETURN -A KUBE-POSTROUTING -m mark ! --mark 0x4000/0x4000 -j RETURN
-A KUBE-POSTROUTING -j MARK --xor-mark 0x4000 -A KUBE-POSTROUTING -j MARK --xor-mark 0x4000
-A KUBE-POSTROUTING -m comment --comment "kubernetes service traffic requiring SNAT" -j MASQUERADE -A KUBE-POSTROUTING -m comment --comment "kubernetes service traffic requiring SNAT" -j MASQUERADE
-A KUBE-SEP-BSWRHOQ77KEXZLNL -m comment --comment ns3/svc3:p80 -s 10.0.3.1 -j KUBE-MARK-MASQ
-A KUBE-SEP-BSWRHOQ77KEXZLNL -m comment --comment ns3/svc3:p80 -m tcp -p tcp -j DNAT --to-destination 10.0.3.1:80
-A KUBE-SEP-SNQ3ZNILQDEJNDQO -m comment --comment ns1/svc1:p80 -s 10.0.1.1 -j KUBE-MARK-MASQ
-A KUBE-SEP-SNQ3ZNILQDEJNDQO -m comment --comment ns1/svc1:p80 -m tcp -p tcp -j DNAT --to-destination 10.0.1.1:80
-A KUBE-SVC-X27LE4BHSL4DOUIK -m comment --comment "ns3/svc3:p80 cluster IP" -m tcp -p tcp -d 172.30.0.43 --dport 80 ! -s 10.0.0.0/8 -j KUBE-MARK-MASQ
-A KUBE-SVC-X27LE4BHSL4DOUIK -m comment --comment "ns3/svc3:p80 -> 10.0.3.1:80" -j KUBE-SEP-BSWRHOQ77KEXZLNL
-A KUBE-SVC-XPGD46QRK7WJZT7O -m comment --comment "ns1/svc1:p80 cluster IP" -m tcp -p tcp -d 172.30.0.41 --dport 80 ! -s 10.0.0.0/8 -j KUBE-MARK-MASQ
-A KUBE-SVC-XPGD46QRK7WJZT7O -m comment --comment "ns1/svc1:p80 -> 10.0.1.1:80" -j KUBE-SEP-SNQ3ZNILQDEJNDQO
COMMIT COMMIT
`) `)
assertIPTablesRulesEqual(t, getLine(), false, expected, fp.iptablesData.String()) assertIPTablesRulesEqual(t, getLine(), true, expected, fp.iptablesData.String())
rulesSynced = countRules(utiliptables.TableNAT, expected)
rulesSyncedMetric = countRulesFromLastSyncMetric(utiliptables.TableNAT)
if rulesSyncedMetric != rulesSynced {
t.Errorf("metric shows %d rules synced but iptables data shows %d", rulesSyncedMetric, rulesSynced)
}
// The REJECT rule is in "filter", not NAT, so the number of NAT rules hasn't // The REJECT rule is in "filter", not NAT, so the number of NAT rules hasn't
// changed. // changed.
@ -6277,7 +6284,11 @@ func TestSyncProxyRulesRepeated(t *testing.T) {
:KUBE-MARK-MASQ - [0:0] :KUBE-MARK-MASQ - [0:0]
:KUBE-POSTROUTING - [0:0] :KUBE-POSTROUTING - [0:0]
:KUBE-SEP-AYCN5HPXMIRJNJXU - [0:0] :KUBE-SEP-AYCN5HPXMIRJNJXU - [0:0]
:KUBE-SEP-BSWRHOQ77KEXZLNL - [0:0]
:KUBE-SEP-SNQ3ZNILQDEJNDQO - [0:0]
:KUBE-SVC-4SW47YFZTEDKD3PK - [0:0] :KUBE-SVC-4SW47YFZTEDKD3PK - [0:0]
:KUBE-SVC-X27LE4BHSL4DOUIK - [0:0]
:KUBE-SVC-XPGD46QRK7WJZT7O - [0:0]
-A KUBE-SERVICES -m comment --comment "ns1/svc1:p80 cluster IP" -m tcp -p tcp -d 172.30.0.41 --dport 80 -j KUBE-SVC-XPGD46QRK7WJZT7O -A KUBE-SERVICES -m comment --comment "ns1/svc1:p80 cluster IP" -m tcp -p tcp -d 172.30.0.41 --dport 80 -j KUBE-SVC-XPGD46QRK7WJZT7O
-A KUBE-SERVICES -m comment --comment "ns3/svc3:p80 cluster IP" -m tcp -p tcp -d 172.30.0.43 --dport 80 -j KUBE-SVC-X27LE4BHSL4DOUIK -A KUBE-SERVICES -m comment --comment "ns3/svc3:p80 cluster IP" -m tcp -p tcp -d 172.30.0.43 --dport 80 -j KUBE-SVC-X27LE4BHSL4DOUIK
-A KUBE-SERVICES -m comment --comment "ns4/svc4:p80 cluster IP" -m tcp -p tcp -d 172.30.0.44 --dport 80 -j KUBE-SVC-4SW47YFZTEDKD3PK -A KUBE-SERVICES -m comment --comment "ns4/svc4:p80 cluster IP" -m tcp -p tcp -d 172.30.0.44 --dport 80 -j KUBE-SVC-4SW47YFZTEDKD3PK
@ -6288,17 +6299,19 @@ func TestSyncProxyRulesRepeated(t *testing.T) {
-A KUBE-POSTROUTING -m comment --comment "kubernetes service traffic requiring SNAT" -j MASQUERADE -A KUBE-POSTROUTING -m comment --comment "kubernetes service traffic requiring SNAT" -j MASQUERADE
-A KUBE-SEP-AYCN5HPXMIRJNJXU -m comment --comment ns4/svc4:p80 -s 10.0.4.1 -j KUBE-MARK-MASQ -A KUBE-SEP-AYCN5HPXMIRJNJXU -m comment --comment ns4/svc4:p80 -s 10.0.4.1 -j KUBE-MARK-MASQ
-A KUBE-SEP-AYCN5HPXMIRJNJXU -m comment --comment ns4/svc4:p80 -m tcp -p tcp -j DNAT --to-destination 10.0.4.1:80 -A KUBE-SEP-AYCN5HPXMIRJNJXU -m comment --comment ns4/svc4:p80 -m tcp -p tcp -j DNAT --to-destination 10.0.4.1:80
-A KUBE-SEP-BSWRHOQ77KEXZLNL -m comment --comment ns3/svc3:p80 -s 10.0.3.1 -j KUBE-MARK-MASQ
-A KUBE-SEP-BSWRHOQ77KEXZLNL -m comment --comment ns3/svc3:p80 -m tcp -p tcp -j DNAT --to-destination 10.0.3.1:80
-A KUBE-SEP-SNQ3ZNILQDEJNDQO -m comment --comment ns1/svc1:p80 -s 10.0.1.1 -j KUBE-MARK-MASQ
-A KUBE-SEP-SNQ3ZNILQDEJNDQO -m comment --comment ns1/svc1:p80 -m tcp -p tcp -j DNAT --to-destination 10.0.1.1:80
-A KUBE-SVC-4SW47YFZTEDKD3PK -m comment --comment "ns4/svc4:p80 cluster IP" -m tcp -p tcp -d 172.30.0.44 --dport 80 ! -s 10.0.0.0/8 -j KUBE-MARK-MASQ -A KUBE-SVC-4SW47YFZTEDKD3PK -m comment --comment "ns4/svc4:p80 cluster IP" -m tcp -p tcp -d 172.30.0.44 --dport 80 ! -s 10.0.0.0/8 -j KUBE-MARK-MASQ
-A KUBE-SVC-4SW47YFZTEDKD3PK -m comment --comment "ns4/svc4:p80 -> 10.0.4.1:80" -j KUBE-SEP-AYCN5HPXMIRJNJXU -A KUBE-SVC-4SW47YFZTEDKD3PK -m comment --comment "ns4/svc4:p80 -> 10.0.4.1:80" -j KUBE-SEP-AYCN5HPXMIRJNJXU
-A KUBE-SVC-X27LE4BHSL4DOUIK -m comment --comment "ns3/svc3:p80 cluster IP" -m tcp -p tcp -d 172.30.0.43 --dport 80 ! -s 10.0.0.0/8 -j KUBE-MARK-MASQ
-A KUBE-SVC-X27LE4BHSL4DOUIK -m comment --comment "ns3/svc3:p80 -> 10.0.3.1:80" -j KUBE-SEP-BSWRHOQ77KEXZLNL
-A KUBE-SVC-XPGD46QRK7WJZT7O -m comment --comment "ns1/svc1:p80 cluster IP" -m tcp -p tcp -d 172.30.0.41 --dport 80 ! -s 10.0.0.0/8 -j KUBE-MARK-MASQ
-A KUBE-SVC-XPGD46QRK7WJZT7O -m comment --comment "ns1/svc1:p80 -> 10.0.1.1:80" -j KUBE-SEP-SNQ3ZNILQDEJNDQO
COMMIT COMMIT
`) `)
assertIPTablesRulesEqual(t, getLine(), false, expected, fp.iptablesData.String()) assertIPTablesRulesEqual(t, getLine(), true, expected, fp.iptablesData.String())
rulesSynced = countRules(utiliptables.TableNAT, expected)
rulesSyncedMetric = countRulesFromLastSyncMetric(utiliptables.TableNAT)
if rulesSyncedMetric != rulesSynced {
t.Errorf("metric shows %d rules synced but iptables data shows %d", rulesSyncedMetric, rulesSynced)
}
// We added 1 KUBE-SERVICES rule, 2 KUBE-SVC-4SW47YFZTEDKD3PK rules, and // We added 1 KUBE-SERVICES rule, 2 KUBE-SVC-4SW47YFZTEDKD3PK rules, and
// 2 KUBE-SEP-AYCN5HPXMIRJNJXU rules // 2 KUBE-SEP-AYCN5HPXMIRJNJXU rules
@ -6308,8 +6321,7 @@ func TestSyncProxyRulesRepeated(t *testing.T) {
t.Errorf("metric shows %d rules total but expected %d", rulesTotalMetric, rulesTotal) t.Errorf("metric shows %d rules total but expected %d", rulesTotalMetric, rulesTotal)
} }
// Change an endpoint of an existing service. This will cause its SVC and SEP // Change an endpoint of an existing service.
// chains to be rewritten.
eps3update := eps3.DeepCopy() eps3update := eps3.DeepCopy()
eps3update.Endpoints[0].Addresses[0] = "10.0.3.2" eps3update.Endpoints[0].Addresses[0] = "10.0.3.2"
fp.OnEndpointSliceUpdate(eps3, eps3update) fp.OnEndpointSliceUpdate(eps3, eps3update)
@ -6333,9 +6345,13 @@ func TestSyncProxyRulesRepeated(t *testing.T) {
:KUBE-SERVICES - [0:0] :KUBE-SERVICES - [0:0]
:KUBE-MARK-MASQ - [0:0] :KUBE-MARK-MASQ - [0:0]
:KUBE-POSTROUTING - [0:0] :KUBE-POSTROUTING - [0:0]
:KUBE-SEP-AYCN5HPXMIRJNJXU - [0:0]
:KUBE-SEP-BSWRHOQ77KEXZLNL - [0:0] :KUBE-SEP-BSWRHOQ77KEXZLNL - [0:0]
:KUBE-SEP-DKCFIS26GWF2WLWC - [0:0] :KUBE-SEP-DKCFIS26GWF2WLWC - [0:0]
:KUBE-SEP-SNQ3ZNILQDEJNDQO - [0:0]
:KUBE-SVC-4SW47YFZTEDKD3PK - [0:0]
:KUBE-SVC-X27LE4BHSL4DOUIK - [0:0] :KUBE-SVC-X27LE4BHSL4DOUIK - [0:0]
:KUBE-SVC-XPGD46QRK7WJZT7O - [0:0]
-A KUBE-SERVICES -m comment --comment "ns1/svc1:p80 cluster IP" -m tcp -p tcp -d 172.30.0.41 --dport 80 -j KUBE-SVC-XPGD46QRK7WJZT7O -A KUBE-SERVICES -m comment --comment "ns1/svc1:p80 cluster IP" -m tcp -p tcp -d 172.30.0.41 --dport 80 -j KUBE-SVC-XPGD46QRK7WJZT7O
-A KUBE-SERVICES -m comment --comment "ns3/svc3:p80 cluster IP" -m tcp -p tcp -d 172.30.0.43 --dport 80 -j KUBE-SVC-X27LE4BHSL4DOUIK -A KUBE-SERVICES -m comment --comment "ns3/svc3:p80 cluster IP" -m tcp -p tcp -d 172.30.0.43 --dport 80 -j KUBE-SVC-X27LE4BHSL4DOUIK
-A KUBE-SERVICES -m comment --comment "ns4/svc4:p80 cluster IP" -m tcp -p tcp -d 172.30.0.44 --dport 80 -j KUBE-SVC-4SW47YFZTEDKD3PK -A KUBE-SERVICES -m comment --comment "ns4/svc4:p80 cluster IP" -m tcp -p tcp -d 172.30.0.44 --dport 80 -j KUBE-SVC-4SW47YFZTEDKD3PK
@ -6344,20 +6360,22 @@ func TestSyncProxyRulesRepeated(t *testing.T) {
-A KUBE-POSTROUTING -m mark ! --mark 0x4000/0x4000 -j RETURN -A KUBE-POSTROUTING -m mark ! --mark 0x4000/0x4000 -j RETURN
-A KUBE-POSTROUTING -j MARK --xor-mark 0x4000 -A KUBE-POSTROUTING -j MARK --xor-mark 0x4000
-A KUBE-POSTROUTING -m comment --comment "kubernetes service traffic requiring SNAT" -j MASQUERADE -A KUBE-POSTROUTING -m comment --comment "kubernetes service traffic requiring SNAT" -j MASQUERADE
-A KUBE-SEP-AYCN5HPXMIRJNJXU -m comment --comment ns4/svc4:p80 -s 10.0.4.1 -j KUBE-MARK-MASQ
-A KUBE-SEP-AYCN5HPXMIRJNJXU -m comment --comment ns4/svc4:p80 -m tcp -p tcp -j DNAT --to-destination 10.0.4.1:80
-A KUBE-SEP-DKCFIS26GWF2WLWC -m comment --comment ns3/svc3:p80 -s 10.0.3.2 -j KUBE-MARK-MASQ -A KUBE-SEP-DKCFIS26GWF2WLWC -m comment --comment ns3/svc3:p80 -s 10.0.3.2 -j KUBE-MARK-MASQ
-A KUBE-SEP-DKCFIS26GWF2WLWC -m comment --comment ns3/svc3:p80 -m tcp -p tcp -j DNAT --to-destination 10.0.3.2:80 -A KUBE-SEP-DKCFIS26GWF2WLWC -m comment --comment ns3/svc3:p80 -m tcp -p tcp -j DNAT --to-destination 10.0.3.2:80
-A KUBE-SEP-SNQ3ZNILQDEJNDQO -m comment --comment ns1/svc1:p80 -s 10.0.1.1 -j KUBE-MARK-MASQ
-A KUBE-SEP-SNQ3ZNILQDEJNDQO -m comment --comment ns1/svc1:p80 -m tcp -p tcp -j DNAT --to-destination 10.0.1.1:80
-A KUBE-SVC-4SW47YFZTEDKD3PK -m comment --comment "ns4/svc4:p80 cluster IP" -m tcp -p tcp -d 172.30.0.44 --dport 80 ! -s 10.0.0.0/8 -j KUBE-MARK-MASQ
-A KUBE-SVC-4SW47YFZTEDKD3PK -m comment --comment "ns4/svc4:p80 -> 10.0.4.1:80" -j KUBE-SEP-AYCN5HPXMIRJNJXU
-A KUBE-SVC-X27LE4BHSL4DOUIK -m comment --comment "ns3/svc3:p80 cluster IP" -m tcp -p tcp -d 172.30.0.43 --dport 80 ! -s 10.0.0.0/8 -j KUBE-MARK-MASQ -A KUBE-SVC-X27LE4BHSL4DOUIK -m comment --comment "ns3/svc3:p80 cluster IP" -m tcp -p tcp -d 172.30.0.43 --dport 80 ! -s 10.0.0.0/8 -j KUBE-MARK-MASQ
-A KUBE-SVC-X27LE4BHSL4DOUIK -m comment --comment "ns3/svc3:p80 -> 10.0.3.2:80" -j KUBE-SEP-DKCFIS26GWF2WLWC -A KUBE-SVC-X27LE4BHSL4DOUIK -m comment --comment "ns3/svc3:p80 -> 10.0.3.2:80" -j KUBE-SEP-DKCFIS26GWF2WLWC
-A KUBE-SVC-XPGD46QRK7WJZT7O -m comment --comment "ns1/svc1:p80 cluster IP" -m tcp -p tcp -d 172.30.0.41 --dport 80 ! -s 10.0.0.0/8 -j KUBE-MARK-MASQ
-A KUBE-SVC-XPGD46QRK7WJZT7O -m comment --comment "ns1/svc1:p80 -> 10.0.1.1:80" -j KUBE-SEP-SNQ3ZNILQDEJNDQO
-X KUBE-SEP-BSWRHOQ77KEXZLNL -X KUBE-SEP-BSWRHOQ77KEXZLNL
COMMIT COMMIT
`) `)
assertIPTablesRulesEqual(t, getLine(), false, expected, fp.iptablesData.String()) assertIPTablesRulesEqual(t, getLine(), true, expected, fp.iptablesData.String())
rulesSynced = countRules(utiliptables.TableNAT, expected)
rulesSyncedMetric = countRulesFromLastSyncMetric(utiliptables.TableNAT)
if rulesSyncedMetric != rulesSynced {
t.Errorf("metric shows %d rules synced but iptables data shows %d", rulesSyncedMetric, rulesSynced)
}
// We rewrote existing rules but did not change the overall number of rules. // We rewrote existing rules but did not change the overall number of rules.
rulesTotalMetric = countRulesFromMetric(utiliptables.TableNAT) rulesTotalMetric = countRulesFromMetric(utiliptables.TableNAT)
@ -6365,7 +6383,7 @@ func TestSyncProxyRulesRepeated(t *testing.T) {
t.Errorf("metric shows %d rules total but expected %d", rulesTotalMetric, rulesTotal) t.Errorf("metric shows %d rules total but expected %d", rulesTotalMetric, rulesTotal)
} }
// Add an endpoint to a service. This will cause its SVC and SEP chains to be rewritten. // Add an endpoint to a service.
eps3update2 := eps3update.DeepCopy() eps3update2 := eps3update.DeepCopy()
eps3update2.Endpoints = append(eps3update2.Endpoints, discovery.Endpoint{Addresses: []string{"10.0.3.3"}}) eps3update2.Endpoints = append(eps3update2.Endpoints, discovery.Endpoint{Addresses: []string{"10.0.3.3"}})
fp.OnEndpointSliceUpdate(eps3update, eps3update2) fp.OnEndpointSliceUpdate(eps3update, eps3update2)
@ -6389,9 +6407,13 @@ func TestSyncProxyRulesRepeated(t *testing.T) {
:KUBE-SERVICES - [0:0] :KUBE-SERVICES - [0:0]
:KUBE-MARK-MASQ - [0:0] :KUBE-MARK-MASQ - [0:0]
:KUBE-POSTROUTING - [0:0] :KUBE-POSTROUTING - [0:0]
:KUBE-SEP-AYCN5HPXMIRJNJXU - [0:0]
:KUBE-SEP-DKCFIS26GWF2WLWC - [0:0] :KUBE-SEP-DKCFIS26GWF2WLWC - [0:0]
:KUBE-SEP-JVVZVJ7BSEPPRNBS - [0:0] :KUBE-SEP-JVVZVJ7BSEPPRNBS - [0:0]
:KUBE-SEP-SNQ3ZNILQDEJNDQO - [0:0]
:KUBE-SVC-4SW47YFZTEDKD3PK - [0:0]
:KUBE-SVC-X27LE4BHSL4DOUIK - [0:0] :KUBE-SVC-X27LE4BHSL4DOUIK - [0:0]
:KUBE-SVC-XPGD46QRK7WJZT7O - [0:0]
-A KUBE-SERVICES -m comment --comment "ns1/svc1:p80 cluster IP" -m tcp -p tcp -d 172.30.0.41 --dport 80 -j KUBE-SVC-XPGD46QRK7WJZT7O -A KUBE-SERVICES -m comment --comment "ns1/svc1:p80 cluster IP" -m tcp -p tcp -d 172.30.0.41 --dport 80 -j KUBE-SVC-XPGD46QRK7WJZT7O
-A KUBE-SERVICES -m comment --comment "ns3/svc3:p80 cluster IP" -m tcp -p tcp -d 172.30.0.43 --dport 80 -j KUBE-SVC-X27LE4BHSL4DOUIK -A KUBE-SERVICES -m comment --comment "ns3/svc3:p80 cluster IP" -m tcp -p tcp -d 172.30.0.43 --dport 80 -j KUBE-SVC-X27LE4BHSL4DOUIK
-A KUBE-SERVICES -m comment --comment "ns4/svc4:p80 cluster IP" -m tcp -p tcp -d 172.30.0.44 --dport 80 -j KUBE-SVC-4SW47YFZTEDKD3PK -A KUBE-SERVICES -m comment --comment "ns4/svc4:p80 cluster IP" -m tcp -p tcp -d 172.30.0.44 --dport 80 -j KUBE-SVC-4SW47YFZTEDKD3PK
@ -6400,22 +6422,24 @@ func TestSyncProxyRulesRepeated(t *testing.T) {
-A KUBE-POSTROUTING -m mark ! --mark 0x4000/0x4000 -j RETURN -A KUBE-POSTROUTING -m mark ! --mark 0x4000/0x4000 -j RETURN
-A KUBE-POSTROUTING -j MARK --xor-mark 0x4000 -A KUBE-POSTROUTING -j MARK --xor-mark 0x4000
-A KUBE-POSTROUTING -m comment --comment "kubernetes service traffic requiring SNAT" -j MASQUERADE -A KUBE-POSTROUTING -m comment --comment "kubernetes service traffic requiring SNAT" -j MASQUERADE
-A KUBE-SEP-AYCN5HPXMIRJNJXU -m comment --comment ns4/svc4:p80 -s 10.0.4.1 -j KUBE-MARK-MASQ
-A KUBE-SEP-AYCN5HPXMIRJNJXU -m comment --comment ns4/svc4:p80 -m tcp -p tcp -j DNAT --to-destination 10.0.4.1:80
-A KUBE-SEP-DKCFIS26GWF2WLWC -m comment --comment ns3/svc3:p80 -s 10.0.3.2 -j KUBE-MARK-MASQ -A KUBE-SEP-DKCFIS26GWF2WLWC -m comment --comment ns3/svc3:p80 -s 10.0.3.2 -j KUBE-MARK-MASQ
-A KUBE-SEP-DKCFIS26GWF2WLWC -m comment --comment ns3/svc3:p80 -m tcp -p tcp -j DNAT --to-destination 10.0.3.2:80 -A KUBE-SEP-DKCFIS26GWF2WLWC -m comment --comment ns3/svc3:p80 -m tcp -p tcp -j DNAT --to-destination 10.0.3.2:80
-A KUBE-SEP-JVVZVJ7BSEPPRNBS -m comment --comment ns3/svc3:p80 -s 10.0.3.3 -j KUBE-MARK-MASQ -A KUBE-SEP-JVVZVJ7BSEPPRNBS -m comment --comment ns3/svc3:p80 -s 10.0.3.3 -j KUBE-MARK-MASQ
-A KUBE-SEP-JVVZVJ7BSEPPRNBS -m comment --comment ns3/svc3:p80 -m tcp -p tcp -j DNAT --to-destination 10.0.3.3:80 -A KUBE-SEP-JVVZVJ7BSEPPRNBS -m comment --comment ns3/svc3:p80 -m tcp -p tcp -j DNAT --to-destination 10.0.3.3:80
-A KUBE-SEP-SNQ3ZNILQDEJNDQO -m comment --comment ns1/svc1:p80 -s 10.0.1.1 -j KUBE-MARK-MASQ
-A KUBE-SEP-SNQ3ZNILQDEJNDQO -m comment --comment ns1/svc1:p80 -m tcp -p tcp -j DNAT --to-destination 10.0.1.1:80
-A KUBE-SVC-4SW47YFZTEDKD3PK -m comment --comment "ns4/svc4:p80 cluster IP" -m tcp -p tcp -d 172.30.0.44 --dport 80 ! -s 10.0.0.0/8 -j KUBE-MARK-MASQ
-A KUBE-SVC-4SW47YFZTEDKD3PK -m comment --comment "ns4/svc4:p80 -> 10.0.4.1:80" -j KUBE-SEP-AYCN5HPXMIRJNJXU
-A KUBE-SVC-X27LE4BHSL4DOUIK -m comment --comment "ns3/svc3:p80 cluster IP" -m tcp -p tcp -d 172.30.0.43 --dport 80 ! -s 10.0.0.0/8 -j KUBE-MARK-MASQ -A KUBE-SVC-X27LE4BHSL4DOUIK -m comment --comment "ns3/svc3:p80 cluster IP" -m tcp -p tcp -d 172.30.0.43 --dport 80 ! -s 10.0.0.0/8 -j KUBE-MARK-MASQ
-A KUBE-SVC-X27LE4BHSL4DOUIK -m comment --comment "ns3/svc3:p80 -> 10.0.3.2:80" -m statistic --mode random --probability 0.5000000000 -j KUBE-SEP-DKCFIS26GWF2WLWC -A KUBE-SVC-X27LE4BHSL4DOUIK -m comment --comment "ns3/svc3:p80 -> 10.0.3.2:80" -m statistic --mode random --probability 0.5000000000 -j KUBE-SEP-DKCFIS26GWF2WLWC
-A KUBE-SVC-X27LE4BHSL4DOUIK -m comment --comment "ns3/svc3:p80 -> 10.0.3.3:80" -j KUBE-SEP-JVVZVJ7BSEPPRNBS -A KUBE-SVC-X27LE4BHSL4DOUIK -m comment --comment "ns3/svc3:p80 -> 10.0.3.3:80" -j KUBE-SEP-JVVZVJ7BSEPPRNBS
-A KUBE-SVC-XPGD46QRK7WJZT7O -m comment --comment "ns1/svc1:p80 cluster IP" -m tcp -p tcp -d 172.30.0.41 --dport 80 ! -s 10.0.0.0/8 -j KUBE-MARK-MASQ
-A KUBE-SVC-XPGD46QRK7WJZT7O -m comment --comment "ns1/svc1:p80 -> 10.0.1.1:80" -j KUBE-SEP-SNQ3ZNILQDEJNDQO
COMMIT COMMIT
`) `)
assertIPTablesRulesEqual(t, getLine(), false, expected, fp.iptablesData.String()) assertIPTablesRulesEqual(t, getLine(), true, expected, fp.iptablesData.String())
rulesSynced = countRules(utiliptables.TableNAT, expected)
rulesSyncedMetric = countRulesFromLastSyncMetric(utiliptables.TableNAT)
if rulesSyncedMetric != rulesSynced {
t.Errorf("metric shows %d rules synced but iptables data shows %d", rulesSyncedMetric, rulesSynced)
}
// We added 2 KUBE-SEP-JVVZVJ7BSEPPRNBS rules and 1 KUBE-SVC-X27LE4BHSL4DOUIK rule // We added 2 KUBE-SEP-JVVZVJ7BSEPPRNBS rules and 1 KUBE-SVC-X27LE4BHSL4DOUIK rule
// jumping to the new SEP chain. The other rules related to svc3 got rewritten, // jumping to the new SEP chain. The other rules related to svc3 got rewritten,
@ -6429,97 +6453,6 @@ func TestSyncProxyRulesRepeated(t *testing.T) {
// Sync with no new changes... This will not rewrite any SVC or SEP chains // Sync with no new changes... This will not rewrite any SVC or SEP chains
fp.syncProxyRules() fp.syncProxyRules()
expected = dedent.Dedent(`
*filter
:KUBE-NODEPORTS - [0:0]
:KUBE-SERVICES - [0:0]
:KUBE-EXTERNAL-SERVICES - [0:0]
:KUBE-FIREWALL - [0:0]
:KUBE-FORWARD - [0:0]
:KUBE-PROXY-FIREWALL - [0:0]
-A KUBE-FIREWALL -m comment --comment "block incoming localnet connections" -d 127.0.0.0/8 ! -s 127.0.0.0/8 -m conntrack ! --ctstate RELATED,ESTABLISHED,DNAT -j DROP
-A KUBE-FORWARD -m conntrack --ctstate INVALID -j DROP
-A KUBE-FORWARD -m comment --comment "kubernetes forwarding rules" -m mark --mark 0x4000/0x4000 -j ACCEPT
-A KUBE-FORWARD -m comment --comment "kubernetes forwarding conntrack rule" -m conntrack --ctstate RELATED,ESTABLISHED -j ACCEPT
COMMIT
*nat
:KUBE-NODEPORTS - [0:0]
:KUBE-SERVICES - [0:0]
:KUBE-MARK-MASQ - [0:0]
:KUBE-POSTROUTING - [0:0]
-A KUBE-SERVICES -m comment --comment "ns1/svc1:p80 cluster IP" -m tcp -p tcp -d 172.30.0.41 --dport 80 -j KUBE-SVC-XPGD46QRK7WJZT7O
-A KUBE-SERVICES -m comment --comment "ns3/svc3:p80 cluster IP" -m tcp -p tcp -d 172.30.0.43 --dport 80 -j KUBE-SVC-X27LE4BHSL4DOUIK
-A KUBE-SERVICES -m comment --comment "ns4/svc4:p80 cluster IP" -m tcp -p tcp -d 172.30.0.44 --dport 80 -j KUBE-SVC-4SW47YFZTEDKD3PK
-A KUBE-SERVICES -m comment --comment "kubernetes service nodeports; NOTE: this must be the last rule in this chain" -m addrtype --dst-type LOCAL -j KUBE-NODEPORTS
-A KUBE-MARK-MASQ -j MARK --or-mark 0x4000
-A KUBE-POSTROUTING -m mark ! --mark 0x4000/0x4000 -j RETURN
-A KUBE-POSTROUTING -j MARK --xor-mark 0x4000
-A KUBE-POSTROUTING -m comment --comment "kubernetes service traffic requiring SNAT" -j MASQUERADE
COMMIT
`)
assertIPTablesRulesEqual(t, getLine(), false, expected, fp.iptablesData.String())
rulesSynced = countRules(utiliptables.TableNAT, expected)
rulesSyncedMetric = countRulesFromLastSyncMetric(utiliptables.TableNAT)
if rulesSyncedMetric != rulesSynced {
t.Errorf("metric shows %d rules synced but iptables data shows %d", rulesSyncedMetric, rulesSynced)
}
// (No changes)
rulesTotalMetric = countRulesFromMetric(utiliptables.TableNAT)
if rulesTotalMetric != rulesTotal {
t.Errorf("metric shows %d rules total but expected %d", rulesTotalMetric, rulesTotal)
}
// Now force a partial resync error and ensure that it recovers correctly
if fp.needFullSync {
t.Fatalf("Proxier unexpectedly already needs a full sync?")
}
prFailures, err := testutil.GetCounterMetricValue(metrics.IptablesPartialRestoreFailuresTotal)
if err != nil {
t.Fatalf("Could not get partial restore failures metric: %v", err)
}
if prFailures != 0.0 {
t.Errorf("Already did a partial resync? Something failed earlier!")
}
// Add a rule jumping from svc3's service chain to svc4's endpoint, then try to
// delete svc4. This will fail because the partial resync won't rewrite svc3's
// rules and so the partial restore would leave a dangling jump from there to
// svc4's endpoint. The proxier will then queue a full resync in response to the
// partial resync failure, and the full resync will succeed (since it will rewrite
// svc3's rules as well).
//
// This is an absurd scenario, but it has to be; partial resync failures are
// supposed to be impossible; if we knew of any non-absurd scenario that would
// cause such a failure, then that would be a bug and we would fix it.
if _, err := fp.iptables.ChainExists(utiliptables.TableNAT, utiliptables.Chain("KUBE-SEP-AYCN5HPXMIRJNJXU")); err != nil {
t.Fatalf("svc4's endpoint chain unexpected already does not exist!")
}
if _, err := fp.iptables.EnsureRule(utiliptables.Append, utiliptables.TableNAT, utiliptables.Chain("KUBE-SVC-X27LE4BHSL4DOUIK"), "-j", "KUBE-SEP-AYCN5HPXMIRJNJXU"); err != nil {
t.Fatalf("Could not add bad iptables rule: %v", err)
}
fp.OnServiceDelete(svc4)
fp.syncProxyRules()
if _, err := fp.iptables.ChainExists(utiliptables.TableNAT, utiliptables.Chain("KUBE-SEP-AYCN5HPXMIRJNJXU")); err != nil {
t.Errorf("svc4's endpoint chain was successfully deleted despite dangling references!")
}
if !fp.needFullSync {
t.Errorf("Proxier did not fail on previous partial resync?")
}
updatedPRFailures, err := testutil.GetCounterMetricValue(metrics.IptablesPartialRestoreFailuresTotal)
if err != nil {
t.Errorf("Could not get partial restore failures metric: %v", err)
}
if updatedPRFailures != prFailures+1.0 {
t.Errorf("Partial restore failures metric was not incremented after failed partial resync (expected %.02f, got %.02f)", prFailures+1.0, updatedPRFailures)
}
// On retry we should do a full resync, which should succeed (and delete svc4)
fp.syncProxyRules()
expected = dedent.Dedent(` expected = dedent.Dedent(`
*filter *filter
:KUBE-NODEPORTS - [0:0] :KUBE-NODEPORTS - [0:0]
@ -6547,37 +6480,32 @@ func TestSyncProxyRulesRepeated(t *testing.T) {
:KUBE-SVC-XPGD46QRK7WJZT7O - [0:0] :KUBE-SVC-XPGD46QRK7WJZT7O - [0:0]
-A KUBE-SERVICES -m comment --comment "ns1/svc1:p80 cluster IP" -m tcp -p tcp -d 172.30.0.41 --dport 80 -j KUBE-SVC-XPGD46QRK7WJZT7O -A KUBE-SERVICES -m comment --comment "ns1/svc1:p80 cluster IP" -m tcp -p tcp -d 172.30.0.41 --dport 80 -j KUBE-SVC-XPGD46QRK7WJZT7O
-A KUBE-SERVICES -m comment --comment "ns3/svc3:p80 cluster IP" -m tcp -p tcp -d 172.30.0.43 --dport 80 -j KUBE-SVC-X27LE4BHSL4DOUIK -A KUBE-SERVICES -m comment --comment "ns3/svc3:p80 cluster IP" -m tcp -p tcp -d 172.30.0.43 --dport 80 -j KUBE-SVC-X27LE4BHSL4DOUIK
-A KUBE-SERVICES -m comment --comment "ns4/svc4:p80 cluster IP" -m tcp -p tcp -d 172.30.0.44 --dport 80 -j KUBE-SVC-4SW47YFZTEDKD3PK
-A KUBE-SERVICES -m comment --comment "kubernetes service nodeports; NOTE: this must be the last rule in this chain" -m addrtype --dst-type LOCAL -j KUBE-NODEPORTS -A KUBE-SERVICES -m comment --comment "kubernetes service nodeports; NOTE: this must be the last rule in this chain" -m addrtype --dst-type LOCAL -j KUBE-NODEPORTS
-A KUBE-MARK-MASQ -j MARK --or-mark 0x4000 -A KUBE-MARK-MASQ -j MARK --or-mark 0x4000
-A KUBE-POSTROUTING -m mark ! --mark 0x4000/0x4000 -j RETURN -A KUBE-POSTROUTING -m mark ! --mark 0x4000/0x4000 -j RETURN
-A KUBE-POSTROUTING -j MARK --xor-mark 0x4000 -A KUBE-POSTROUTING -j MARK --xor-mark 0x4000
-A KUBE-POSTROUTING -m comment --comment "kubernetes service traffic requiring SNAT" -j MASQUERADE -A KUBE-POSTROUTING -m comment --comment "kubernetes service traffic requiring SNAT" -j MASQUERADE
-A KUBE-SEP-AYCN5HPXMIRJNJXU -m comment --comment ns4/svc4:p80 -s 10.0.4.1 -j KUBE-MARK-MASQ
-A KUBE-SEP-AYCN5HPXMIRJNJXU -m comment --comment ns4/svc4:p80 -m tcp -p tcp -j DNAT --to-destination 10.0.4.1:80
-A KUBE-SEP-DKCFIS26GWF2WLWC -m comment --comment ns3/svc3:p80 -s 10.0.3.2 -j KUBE-MARK-MASQ -A KUBE-SEP-DKCFIS26GWF2WLWC -m comment --comment ns3/svc3:p80 -s 10.0.3.2 -j KUBE-MARK-MASQ
-A KUBE-SEP-DKCFIS26GWF2WLWC -m comment --comment ns3/svc3:p80 -m tcp -p tcp -j DNAT --to-destination 10.0.3.2:80 -A KUBE-SEP-DKCFIS26GWF2WLWC -m comment --comment ns3/svc3:p80 -m tcp -p tcp -j DNAT --to-destination 10.0.3.2:80
-A KUBE-SEP-JVVZVJ7BSEPPRNBS -m comment --comment ns3/svc3:p80 -s 10.0.3.3 -j KUBE-MARK-MASQ -A KUBE-SEP-JVVZVJ7BSEPPRNBS -m comment --comment ns3/svc3:p80 -s 10.0.3.3 -j KUBE-MARK-MASQ
-A KUBE-SEP-JVVZVJ7BSEPPRNBS -m comment --comment ns3/svc3:p80 -m tcp -p tcp -j DNAT --to-destination 10.0.3.3:80 -A KUBE-SEP-JVVZVJ7BSEPPRNBS -m comment --comment ns3/svc3:p80 -m tcp -p tcp -j DNAT --to-destination 10.0.3.3:80
-A KUBE-SEP-SNQ3ZNILQDEJNDQO -m comment --comment ns1/svc1:p80 -s 10.0.1.1 -j KUBE-MARK-MASQ -A KUBE-SEP-SNQ3ZNILQDEJNDQO -m comment --comment ns1/svc1:p80 -s 10.0.1.1 -j KUBE-MARK-MASQ
-A KUBE-SEP-SNQ3ZNILQDEJNDQO -m comment --comment ns1/svc1:p80 -m tcp -p tcp -j DNAT --to-destination 10.0.1.1:80 -A KUBE-SEP-SNQ3ZNILQDEJNDQO -m comment --comment ns1/svc1:p80 -m tcp -p tcp -j DNAT --to-destination 10.0.1.1:80
-A KUBE-SVC-4SW47YFZTEDKD3PK -m comment --comment "ns4/svc4:p80 cluster IP" -m tcp -p tcp -d 172.30.0.44 --dport 80 ! -s 10.0.0.0/8 -j KUBE-MARK-MASQ
-A KUBE-SVC-4SW47YFZTEDKD3PK -m comment --comment "ns4/svc4:p80 -> 10.0.4.1:80" -j KUBE-SEP-AYCN5HPXMIRJNJXU
-A KUBE-SVC-X27LE4BHSL4DOUIK -m comment --comment "ns3/svc3:p80 cluster IP" -m tcp -p tcp -d 172.30.0.43 --dport 80 ! -s 10.0.0.0/8 -j KUBE-MARK-MASQ -A KUBE-SVC-X27LE4BHSL4DOUIK -m comment --comment "ns3/svc3:p80 cluster IP" -m tcp -p tcp -d 172.30.0.43 --dport 80 ! -s 10.0.0.0/8 -j KUBE-MARK-MASQ
-A KUBE-SVC-X27LE4BHSL4DOUIK -m comment --comment "ns3/svc3:p80 -> 10.0.3.2:80" -m statistic --mode random --probability 0.5000000000 -j KUBE-SEP-DKCFIS26GWF2WLWC -A KUBE-SVC-X27LE4BHSL4DOUIK -m comment --comment "ns3/svc3:p80 -> 10.0.3.2:80" -m statistic --mode random --probability 0.5000000000 -j KUBE-SEP-DKCFIS26GWF2WLWC
-A KUBE-SVC-X27LE4BHSL4DOUIK -m comment --comment "ns3/svc3:p80 -> 10.0.3.3:80" -j KUBE-SEP-JVVZVJ7BSEPPRNBS -A KUBE-SVC-X27LE4BHSL4DOUIK -m comment --comment "ns3/svc3:p80 -> 10.0.3.3:80" -j KUBE-SEP-JVVZVJ7BSEPPRNBS
-A KUBE-SVC-XPGD46QRK7WJZT7O -m comment --comment "ns1/svc1:p80 cluster IP" -m tcp -p tcp -d 172.30.0.41 --dport 80 ! -s 10.0.0.0/8 -j KUBE-MARK-MASQ -A KUBE-SVC-XPGD46QRK7WJZT7O -m comment --comment "ns1/svc1:p80 cluster IP" -m tcp -p tcp -d 172.30.0.41 --dport 80 ! -s 10.0.0.0/8 -j KUBE-MARK-MASQ
-A KUBE-SVC-XPGD46QRK7WJZT7O -m comment --comment "ns1/svc1:p80 -> 10.0.1.1:80" -j KUBE-SEP-SNQ3ZNILQDEJNDQO -A KUBE-SVC-XPGD46QRK7WJZT7O -m comment --comment "ns1/svc1:p80 -> 10.0.1.1:80" -j KUBE-SEP-SNQ3ZNILQDEJNDQO
-X KUBE-SEP-AYCN5HPXMIRJNJXU
-X KUBE-SVC-4SW47YFZTEDKD3PK
COMMIT COMMIT
`) `)
assertIPTablesRulesEqual(t, getLine(), false, expected, fp.iptablesData.String()) assertIPTablesRulesEqual(t, getLine(), true, expected, fp.iptablesData.String())
rulesSynced = countRules(utiliptables.TableNAT, expected) // (No changes)
rulesSyncedMetric = countRulesFromLastSyncMetric(utiliptables.TableNAT)
if rulesSyncedMetric != rulesSynced {
t.Errorf("metric shows %d rules synced but iptables data shows %d", rulesSyncedMetric, rulesSynced)
}
// We deleted 1 KUBE-SERVICES rule, 2 KUBE-SVC-4SW47YFZTEDKD3PK rules, and 2
// KUBE-SEP-AYCN5HPXMIRJNJXU rules
rulesTotal -= 5
rulesTotalMetric = countRulesFromMetric(utiliptables.TableNAT) rulesTotalMetric = countRulesFromMetric(utiliptables.TableNAT)
if rulesTotalMetric != rulesTotal { if rulesTotalMetric != rulesTotal {
t.Errorf("metric shows %d rules total but expected %d", rulesTotalMetric, rulesTotal) t.Errorf("metric shows %d rules total but expected %d", rulesTotalMetric, rulesTotal)