Redo LoadBalancerSourceRanges firewall using sets

This commit is contained in:
Dan Winship 2023-05-24 11:39:32 -04:00
parent edaa1d735b
commit 4128631d0f
3 changed files with 196 additions and 109 deletions

View File

@ -62,7 +62,8 @@ var objectOrder = map[string]int{
// with per-service rules, we don't know what order syncProxyRules is going to output them
// in, but the order doesn't matter anyway. So we sort the rules in those chains.
var sortedChains = sets.New(
kubeFirewallChain,
kubeServicesFilterChain,
kubeExternalServicesChain,
kubeServicesChain,
kubeNodePortsChain,
)
@ -232,6 +233,29 @@ func (tracer *nftablesTracer) matchDestIPOnly(elements []*knftables.Element, des
return nil
}
// matchDest checks an "ip daddr . meta l4proto . th dport" against a set/map, and returns
// the matching Element, if found.
func (tracer *nftablesTracer) matchDest(elements []*knftables.Element, destIP, protocol, destPort string) *knftables.Element {
for _, element := range elements {
if element.Key[0] == destIP && element.Key[1] == protocol && element.Key[2] == destPort {
return element
}
}
return nil
}
// matchDestAndSource checks an "ip daddr . meta l4proto . th dport . ip saddr" against a
// set/map, where the source is allowed to be a CIDR, and returns the matching Element, if
// found.
func (tracer *nftablesTracer) matchDestAndSource(elements []*knftables.Element, destIP, protocol, destPort, sourceIP string) *knftables.Element {
for _, element := range elements {
if element.Key[0] == destIP && element.Key[1] == protocol && element.Key[2] == destPort && tracer.addressMatches(sourceIP, "", element.Key[3]) {
return element
}
}
return nil
}
// We intentionally don't try to parse arbitrary nftables rules, as the syntax is quite
// complicated and context sensitive. (E.g., "ip daddr" could be the start of an address
// comparison, or it could be the start of a set/map lookup.) Instead, we just have
@ -247,6 +271,8 @@ var destAddrRegexp = regexp.MustCompile(`^ip6* daddr (!= )?(\S+)`)
var destAddrLocalRegexp = regexp.MustCompile(`^fib daddr type local`)
var destPortRegexp = regexp.MustCompile(`^(tcp|udp|sctp) dport (\d+)`)
var destIPOnlyLookupRegexp = regexp.MustCompile(`^ip6* daddr @(\S+)`)
var destLookupRegexp = regexp.MustCompile(`^ip6* daddr \. meta l4proto \. th dport @(\S+)`)
var destSourceLookupRegexp = regexp.MustCompile(`^ip6* daddr \. meta l4proto \. th dport \. ip6* saddr @(\S+)`)
var sourceAddrRegexp = regexp.MustCompile(`^ip6* saddr (!= )?(\S+)`)
var sourceAddrLocalRegexp = regexp.MustCompile(`^fib saddr type local`)
@ -256,6 +282,7 @@ var endpointVMapEntryRegexp = regexp.MustCompile(`\d+ : goto (\S+)`)
var masqueradeRegexp = regexp.MustCompile(`^jump ` + kubeMarkMasqChain + `$`)
var jumpRegexp = regexp.MustCompile(`^(jump|goto) (\S+)$`)
var returnRegexp = regexp.MustCompile(`^return$`)
var verdictRegexp = regexp.MustCompile(`^(drop|reject)$`)
var dnatRegexp = regexp.MustCompile(`^meta l4proto (tcp|udp|sctp) dnat to (\S+)$`)
@ -271,10 +298,6 @@ var ignoredRegexp = regexp.MustCompile(strings.Join(
// Likewise, this rule never matches and thus never drops anything, and so
// can be ignored.
`^ct state invalid drop$`,
// We use a bare "continue" rule in the firewall chains as a place to
// attach a comment.
`^continue$`,
},
"|",
))
@ -310,6 +333,30 @@ func (tracer *nftablesTracer) runChain(chname, sourceIP, protocol, destIP, destP
break
}
case destSourceLookupRegexp.MatchString(rule):
// `^ip6* daddr . meta l4proto . th dport . ip6* saddr @(\S+)`
// Tests whether "destIP . protocol . destPort . sourceIP" is
// a member of the indicated set.
match := destSourceLookupRegexp.FindStringSubmatch(rule)
rule = strings.TrimPrefix(rule, match[0])
set := match[1]
if tracer.matchDestAndSource(tracer.nft.Table.Sets[set].Elements, destIP, protocol, destPort, sourceIP) == nil {
rule = ""
break
}
case destLookupRegexp.MatchString(rule):
// `^ip6* daddr . meta l4proto . th dport @(\S+)`
// Tests whether "destIP . protocol . destPort" is a member
// of the indicated set.
match := destLookupRegexp.FindStringSubmatch(rule)
rule = strings.TrimPrefix(rule, match[0])
set := match[1]
if tracer.matchDest(tracer.nft.Table.Sets[set].Elements, destIP, protocol, destPort) == nil {
rule = ""
break
}
case destAddrRegexp.MatchString(rule):
// `^ip6* daddr (!= )?(\S+)`
// Tests whether destIP does/doesn't match a literal.
@ -404,6 +451,12 @@ func (tracer *nftablesTracer) runChain(chname, sourceIP, protocol, destIP, destP
tracer.outputs = append(tracer.outputs, strings.ToUpper(verdict))
return true
case returnRegexp.MatchString(rule):
// `^return$`
// Returns to the calling chain.
tracer.matches = append(tracer.matches, ruleObj.Rule)
return false
case dnatRegexp.MatchString(rule):
// `meta l4proto (tcp|udp|sctp) dnat to (\S+)`
// DNAT to an endpoint IP and terminate processing.

View File

@ -73,7 +73,10 @@ const (
kubeExternalServicesChain = "external-services"
// LoadBalancerSourceRanges handling
kubeFirewallChain = "firewall"
kubeFirewallSet = "firewall"
kubeFirewallCheckChain = "firewall-check"
kubeFirewallAllowSet = "firewall-allow"
kubeFirewallAllowCheckChain = "firewall-allow-check"
// masquerading
kubeMarkMasqChain = "mark-for-masquerade"
@ -92,7 +95,6 @@ type servicePortInfo struct {
nameString string
clusterPolicyChainName string
localPolicyChainName string
firewallChainName string
externalChainName string
}
@ -108,7 +110,6 @@ func newServiceInfo(port *v1.ServicePort, service *v1.Service, bsvcPortInfo *pro
chainNameBase := servicePortChainNameBase(&svcPortName, strings.ToLower(string(svcPort.Protocol())))
svcPort.clusterPolicyChainName = servicePortPolicyClusterChainNamePrefix + chainNameBase
svcPort.localPolicyChainName = servicePortPolicyLocalChainNamePrefix + chainNameBase
svcPort.firewallChainName = serviceFirewallChainNamePrefix + chainNameBase
svcPort.externalChainName = serviceExternalChainNamePrefix + chainNameBase
return svcPort
@ -319,9 +320,11 @@ type nftablesBaseChain struct {
}
var nftablesBaseChains = []nftablesBaseChain{
{"filter-input", knftables.FilterType, knftables.InputHook, knftables.FilterPriority},
{"filter-forward", knftables.FilterType, knftables.ForwardHook, knftables.FilterPriority},
{"filter-output", knftables.FilterType, knftables.OutputHook, knftables.FilterPriority},
// We want our filtering rules to operate on pre-DNAT dest IPs, so our filter
// chains have to run before DNAT.
{"filter-input", knftables.FilterType, knftables.InputHook, knftables.DNATPriority + "-1"},
{"filter-forward", knftables.FilterType, knftables.ForwardHook, knftables.DNATPriority + "-1"},
{"filter-output", knftables.FilterType, knftables.OutputHook, knftables.DNATPriority + "-1"},
{"nat-prerouting", knftables.NATType, knftables.PreroutingHook, knftables.DNATPriority},
{"nat-output", knftables.NATType, knftables.OutputHook, knftables.DNATPriority},
{"nat-postrouting", knftables.NATType, knftables.PostroutingHook, knftables.SNATPriority},
@ -342,9 +345,10 @@ var nftablesJumpChains = []nftablesJumpChain{
{kubeServicesFilterChain, "filter-forward", "ct state new"},
{kubeServicesFilterChain, "filter-output", "ct state new"},
{kubeForwardChain, "filter-forward", ""},
{kubeFirewallChain, "filter-input", "ct state new"},
{kubeFirewallChain, "filter-output", "ct state new"},
{kubeFirewallChain, "filter-forward", "ct state new"},
{kubeFirewallCheckChain, "filter-input", "ct state new"},
{kubeFirewallCheckChain, "filter-output", "ct state new"},
{kubeFirewallCheckChain, "filter-forward", "ct state new"},
{kubeServicesChain, "nat-output", ""},
{kubeServicesChain, "nat-prerouting", ""},
@ -368,8 +372,10 @@ func ensureChain(chain string, tx *knftables.Transaction, createdChains sets.Set
}
func (proxier *Proxier) setupNFTables(tx *knftables.Transaction) {
ipX := "ip"
ipvX_addr := "ipv4_addr" //nolint:stylecheck // var name intentionally resembles value
if proxier.ipFamily == v1.IPv6Protocol {
ipX = "ip6"
ipvX_addr = "ipv6_addr"
}
@ -403,7 +409,7 @@ func (proxier *Proxier) setupNFTables(tx *knftables.Transaction) {
}
// Ensure all of our other "top-level" chains exist
for _, chain := range []string{kubeServicesFilterChain, kubeServicesChain, kubeExternalServicesChain, kubeForwardChain, kubeNodePortsChain, kubeFirewallChain, kubeMasqueradingChain, kubeMarkMasqChain} {
for _, chain := range []string{kubeServicesFilterChain, kubeServicesChain, kubeExternalServicesChain, kubeForwardChain, kubeNodePortsChain, kubeMasqueradingChain, kubeMarkMasqChain} {
ensureChain(chain, tx, createdChains)
}
@ -477,6 +483,40 @@ func (proxier *Proxier) setupNFTables(tx *knftables.Transaction) {
})
}
}
// Set up LoadBalancerSourceRanges firewalling
tx.Add(&knftables.Set{
Name: kubeFirewallSet,
Type: ipvX_addr + " . inet_proto . inet_service",
Comment: ptr.To("destinations that are subject to LoadBalancerSourceRanges"),
})
tx.Add(&knftables.Set{
Name: kubeFirewallAllowSet,
Type: ipvX_addr + " . inet_proto . inet_service . " + ipvX_addr,
Flags: []knftables.SetFlag{knftables.IntervalFlag},
Comment: ptr.To("destinations+sources that are allowed by LoadBalancerSourceRanges"),
})
ensureChain(kubeFirewallCheckChain, tx, createdChains)
ensureChain(kubeFirewallAllowCheckChain, tx, createdChains)
tx.Add(&knftables.Rule{
Chain: kubeFirewallCheckChain,
Rule: knftables.Concat(
ipX, "daddr", ".", "meta l4proto", ".", "th dport", "@", kubeFirewallSet,
"jump", kubeFirewallAllowCheckChain,
),
})
tx.Add(&knftables.Rule{
Chain: kubeFirewallAllowCheckChain,
Rule: knftables.Concat(
ipX, "daddr", ".", "meta l4proto", ".", "th dport", ".", ipX, "saddr", "@", kubeFirewallAllowSet,
"return",
),
})
tx.Add(&knftables.Rule{
Chain: kubeFirewallAllowCheckChain,
Rule: "drop",
})
}
// CleanupLeftovers removes all nftables rules and chains created by the Proxier
@ -684,7 +724,6 @@ const (
const (
servicePortPolicyClusterChainNamePrefix = "service-"
servicePortPolicyLocalChainNamePrefix = "local-"
serviceFirewallChainNamePrefix = "firewall-"
serviceExternalChainNamePrefix = "external-"
servicePortEndpointChainNamePrefix = "endpoint-"
servicePortEndpointAffinityNamePrefix = "affinity-"
@ -861,6 +900,14 @@ func (proxier *Proxier) syncProxyRules() {
ipvX_addr = "ipv6_addr"
}
// We currently fully-rebuild our sets and maps on each resync
tx.Flush(&knftables.Set{
Name: kubeFirewallSet,
})
tx.Flush(&knftables.Set{
Name: kubeFirewallAllowSet,
})
// Accumulate service/endpoint chains and affinity sets to keep.
activeChains := sets.New[string]()
activeAffinitySets := sets.New[string]()
@ -957,17 +1004,6 @@ func (proxier *Proxier) syncProxyRules() {
ensureChain(externalTrafficChain, tx, activeChains)
}
// Traffic to LoadBalancer IPs can go directly to externalTrafficChain
// unless LoadBalancerSourceRanges is in use in which case we will
// create a firewall chain.
loadBalancerTrafficChain := externalTrafficChain
fwChain := svcInfo.firewallChainName
usesFWChain := hasEndpoints && len(svcInfo.LoadBalancerVIPStrings()) > 0 && len(svcInfo.LoadBalancerSourceRanges()) > 0
if usesFWChain {
ensureChain(fwChain, tx, activeChains)
loadBalancerTrafficChain = fwChain
}
var internalTrafficFilterVerdict, internalTrafficFilterComment string
var externalTrafficFilterVerdict, externalTrafficFilterComment string
if !hasEndpoints {
@ -1061,21 +1097,58 @@ func (proxier *Proxier) syncProxyRules() {
Rule: knftables.Concat(
ipX, "daddr", lbip,
protocol, "dport", svcInfo.Port(),
"goto", loadBalancerTrafficChain,
"goto", externalTrafficChain,
),
})
}
if usesFWChain {
comment := fmt.Sprintf("%s traffic not accepted by %s", svcPortNameString, svcInfo.firewallChainName)
tx.Add(&knftables.Rule{
Chain: kubeFirewallChain,
Rule: knftables.Concat(
ipX, "daddr", lbip,
protocol, "dport", svcInfo.Port(),
"drop",
),
Comment: &comment,
if len(svcInfo.LoadBalancerSourceRanges()) > 0 {
tx.Add(&knftables.Element{
Set: kubeFirewallSet,
Key: []string{
lbip,
protocol,
strconv.Itoa(svcInfo.Port()),
},
Comment: &svcPortNameString,
})
allowFromNode := false
for _, src := range svcInfo.LoadBalancerSourceRanges() {
_, cidr, _ := netutils.ParseCIDRSloppy(src)
if cidr == nil {
continue
}
tx.Add(&knftables.Element{
Set: kubeFirewallAllowSet,
Key: []string{
lbip,
protocol,
strconv.Itoa(svcInfo.Port()),
src,
},
Comment: &svcPortNameString,
})
if cidr.Contains(proxier.nodeIP) {
allowFromNode = true
}
}
// For VIP-like LBs, the VIP is often added as a local
// address (via an IP route rule). In that case, a request
// from a node to the VIP will not hit the loadbalancer but
// will loop back with the source IP set to the VIP. We
// need the following rules to allow requests from this node.
if allowFromNode {
tx.Add(&knftables.Element{
Set: kubeFirewallAllowSet,
Key: []string{
lbip,
protocol,
strconv.Itoa(svcInfo.Port()),
lbip,
},
})
}
}
}
if !hasExternalEndpoints {
@ -1223,57 +1296,6 @@ func (proxier *Proxier) syncProxyRules() {
}
}
// Set up firewall chain, if needed
if usesFWChain {
// The service firewall rules are created based on the
// loadBalancerSourceRanges field. This only works for VIP-like
// loadbalancers that preserve source IPs. For loadbalancers which
// direct traffic to service NodePort, the firewall rules will not
// apply.
// firewall filter based on each source range
allowFromNode := false
for _, src := range svcInfo.LoadBalancerSourceRanges() {
tx.Add(&knftables.Rule{
Chain: fwChain,
Rule: knftables.Concat(
ipX, "saddr", src,
"goto", externalTrafficChain,
),
})
_, cidr, err := netutils.ParseCIDRSloppy(src)
if err != nil {
klog.ErrorS(err, "Error parsing CIDR in LoadBalancerSourceRanges, dropping it", "cidr", cidr)
} else if cidr.Contains(proxier.nodeIP) {
allowFromNode = true
}
}
// For VIP-like LBs, the VIP is often added as a local
// address (via an IP route rule). In that case, a request
// from a node to the VIP will not hit the loadbalancer but
// will loop back with the source IP set to the VIP. We
// need the following rules to allow requests from this node.
if allowFromNode {
for _, lbip := range svcInfo.LoadBalancerVIPStrings() {
tx.Add(&knftables.Rule{
Chain: fwChain,
Rule: knftables.Concat(
ipX, "saddr", lbip,
"goto", externalTrafficChain,
),
})
}
}
// If the packet was able to reach the end of firewall chain,
// then it did not get DNATed, so it will match the
// corresponding KUBE-PROXY-FIREWALL rule.
tx.Add(&knftables.Rule{
Chain: fwChain,
Rule: "continue",
Comment: ptr.To("other traffic will be dropped by firewall"),
})
}
if svcInfo.SessionAffinityType() == v1.ServiceAffinityClientIP {
// Generate the per-endpoint affinity sets
for _, ep := range allLocallyReachableEndpoints {
@ -1323,7 +1345,7 @@ func (proxier *Proxier) syncProxyRules() {
proxier.writeServiceToEndpointRules(tx, svcPortNameString, svcInfo, localPolicyChain, localEndpoints)
}
// Generate the per-endpoint chains and affinity sets
// Generate the per-endpoint chains
for _, ep := range allLocallyReachableEndpoints {
epInfo, ok := ep.(*endpointInfo)
if !ok {

View File

@ -505,20 +505,18 @@ func TestOverallNFTablesRules(t *testing.T) {
add rule ip kube-proxy masquerading mark and 0x4000 == 0 return
add rule ip kube-proxy masquerading mark set mark xor 0x4000
add rule ip kube-proxy masquerading masquerade fully-random
add chain ip kube-proxy firewall
add chain ip kube-proxy services
add chain ip kube-proxy services-filter
add chain ip kube-proxy filter-forward { type filter hook forward priority 0 ; }
add chain ip kube-proxy firewall-check
add chain ip kube-proxy firewall-allow-check
add chain ip kube-proxy filter-forward { type filter hook forward priority -101 ; }
add rule ip kube-proxy filter-forward ct state new jump external-services
add rule ip kube-proxy filter-forward ct state new jump services-filter
add rule ip kube-proxy filter-forward jump forward
add rule ip kube-proxy filter-forward ct state new jump firewall
add chain ip kube-proxy filter-input { type filter hook input priority 0 ; }
add chain ip kube-proxy filter-input { type filter hook input priority -101 ; }
add rule ip kube-proxy filter-input ct state new jump external-services
add rule ip kube-proxy filter-input ct state new jump firewall
add chain ip kube-proxy filter-output { type filter hook output priority 0 ; }
add chain ip kube-proxy filter-output { type filter hook output priority -101 ; }
add rule ip kube-proxy filter-output ct state new jump services-filter
add rule ip kube-proxy filter-output ct state new jump firewall
add chain ip kube-proxy nat-output { type nat hook output priority -100 ; }
add rule ip kube-proxy nat-output jump services
add chain ip kube-proxy nat-postrouting { type nat hook postrouting priority 100 ; }
@ -526,6 +524,15 @@ func TestOverallNFTablesRules(t *testing.T) {
add chain ip kube-proxy nat-prerouting { type nat hook prerouting priority -100 ; }
add rule ip kube-proxy nat-prerouting jump services
add set ip kube-proxy firewall { type ipv4_addr . inet_proto . inet_service ; comment "destinations that are subject to LoadBalancerSourceRanges" ; }
add set ip kube-proxy firewall-allow { type ipv4_addr . inet_proto . inet_service . ipv4_addr ; flags interval ; comment "destinations+sources that are allowed by LoadBalancerSourceRanges" ; }
add rule ip kube-proxy firewall-allow-check ip daddr . meta l4proto . th dport . ip saddr @firewall-allow return
add rule ip kube-proxy firewall-allow-check drop
add rule ip kube-proxy firewall-check ip daddr . meta l4proto . th dport @firewall jump firewall-allow-check
add rule ip kube-proxy filter-forward ct state new jump firewall-check
add rule ip kube-proxy filter-input ct state new jump firewall-check
add rule ip kube-proxy filter-output ct state new jump firewall-check
# svc1
add chain ip kube-proxy service-ULMVA6XW-ns1/svc1/tcp/p80
add rule ip kube-proxy service-ULMVA6XW-ns1/svc1/tcp/p80 ip daddr 172.30.0.41 tcp dport 80 ip saddr != 10.0.0.0/8 jump mark-for-masquerade
@ -594,18 +601,16 @@ func TestOverallNFTablesRules(t *testing.T) {
add chain ip kube-proxy external-HVFWP5L3-ns5/svc5/tcp/p80
add rule ip kube-proxy external-HVFWP5L3-ns5/svc5/tcp/p80 jump mark-for-masquerade
add rule ip kube-proxy external-HVFWP5L3-ns5/svc5/tcp/p80 goto service-HVFWP5L3-ns5/svc5/tcp/p80
add chain ip kube-proxy firewall-HVFWP5L3-ns5/svc5/tcp/p80
add rule ip kube-proxy firewall-HVFWP5L3-ns5/svc5/tcp/p80 ip saddr 203.0.113.0/25 goto external-HVFWP5L3-ns5/svc5/tcp/p80
add rule ip kube-proxy firewall-HVFWP5L3-ns5/svc5/tcp/p80 continue comment "other traffic will be dropped by firewall"
add chain ip kube-proxy endpoint-GTK6MW7G-ns5/svc5/tcp/p80__10.180.0.3/80
add rule ip kube-proxy endpoint-GTK6MW7G-ns5/svc5/tcp/p80__10.180.0.3/80 ip saddr 10.180.0.3 jump mark-for-masquerade
add rule ip kube-proxy endpoint-GTK6MW7G-ns5/svc5/tcp/p80__10.180.0.3/80 update @affinity-GTK6MW7G-ns5/svc5/tcp/p80__10.180.0.3/80 { ip saddr }
add rule ip kube-proxy endpoint-GTK6MW7G-ns5/svc5/tcp/p80__10.180.0.3/80 meta l4proto tcp dnat to 10.180.0.3:80
add rule ip kube-proxy firewall ip daddr 5.6.7.8 tcp dport 80 drop comment "ns5/svc5:p80 traffic not accepted by firewall-HVFWP5L3-ns5/svc5/tcp/p80"
add rule ip kube-proxy services ip daddr 172.30.0.45 tcp dport 80 goto service-HVFWP5L3-ns5/svc5/tcp/p80
add rule ip kube-proxy services ip daddr 5.6.7.8 tcp dport 80 goto firewall-HVFWP5L3-ns5/svc5/tcp/p80
add rule ip kube-proxy services ip daddr 5.6.7.8 tcp dport 80 goto external-HVFWP5L3-ns5/svc5/tcp/p80
add rule ip kube-proxy nodeports tcp dport 3002 goto external-HVFWP5L3-ns5/svc5/tcp/p80
add element ip kube-proxy firewall { 5.6.7.8 . tcp . 80 comment "ns5/svc5:p80" }
add element ip kube-proxy firewall-allow { 5.6.7.8 . tcp . 80 . 203.0.113.0/25 comment "ns5/svc5:p80" }
# svc6
add rule ip kube-proxy services-filter ip daddr 172.30.0.46 tcp dport 80 reject comment "ns6/svc6:p80 has no endpoints"
@ -4242,10 +4247,11 @@ func TestSyncProxyRulesRepeated(t *testing.T) {
add table ip kube-proxy { comment "rules for kube-proxy" ; }
add chain ip kube-proxy external-services
add chain ip kube-proxy filter-forward { type filter hook forward priority 0 ; }
add chain ip kube-proxy filter-input { type filter hook input priority 0 ; }
add chain ip kube-proxy filter-output { type filter hook output priority 0 ; }
add chain ip kube-proxy firewall
add chain ip kube-proxy filter-forward { type filter hook forward priority -101 ; }
add chain ip kube-proxy filter-input { type filter hook input priority -101 ; }
add chain ip kube-proxy filter-output { type filter hook output priority -101 ; }
add chain ip kube-proxy firewall-allow-check
add chain ip kube-proxy firewall-check
add chain ip kube-proxy forward
add chain ip kube-proxy mark-for-masquerade
add chain ip kube-proxy masquerading
@ -4259,11 +4265,14 @@ func TestSyncProxyRulesRepeated(t *testing.T) {
add rule ip kube-proxy filter-forward ct state new jump external-services
add rule ip kube-proxy filter-forward ct state new jump services-filter
add rule ip kube-proxy filter-forward jump forward
add rule ip kube-proxy filter-forward ct state new jump firewall
add rule ip kube-proxy filter-forward ct state new jump firewall-check
add rule ip kube-proxy filter-input ct state new jump external-services
add rule ip kube-proxy filter-input ct state new jump firewall
add rule ip kube-proxy filter-input ct state new jump firewall-check
add rule ip kube-proxy filter-output ct state new jump services-filter
add rule ip kube-proxy filter-output ct state new jump firewall
add rule ip kube-proxy filter-output ct state new jump firewall-check
add rule ip kube-proxy firewall-allow-check ip daddr . meta l4proto . th dport . ip saddr @firewall-allow return
add rule ip kube-proxy firewall-allow-check drop
add rule ip kube-proxy firewall-check ip daddr . meta l4proto . th dport @firewall jump firewall-allow-check
add rule ip kube-proxy forward ct state invalid drop
add rule ip kube-proxy mark-for-masquerade mark set mark or 0x4000
add rule ip kube-proxy masquerading mark and 0x4000 == 0 return
@ -4272,6 +4281,9 @@ func TestSyncProxyRulesRepeated(t *testing.T) {
add rule ip kube-proxy nat-output jump services
add rule ip kube-proxy nat-postrouting jump masquerading
add rule ip kube-proxy nat-prerouting jump services
add set ip kube-proxy firewall { type ipv4_addr . inet_proto . inet_service ; comment "destinations that are subject to LoadBalancerSourceRanges" ; }
add set ip kube-proxy firewall-allow { type ipv4_addr . inet_proto . inet_service . ipv4_addr ; flags interval ; comment "destinations+sources that are allowed by LoadBalancerSourceRanges" ; }
`)
// Helper function to make it look like time has passed (from the point of view of