mirror of
https://github.com/k3s-io/kubernetes.git
synced 2025-08-06 18:54:06 +00:00
Redo service dispatch with maps
This commit is contained in:
parent
9d71513ac1
commit
0993bb78ef
@ -58,14 +58,6 @@ var objectOrder = map[string]int{
|
||||
// anything else: 0
|
||||
}
|
||||
|
||||
// For most chains we leave the rules in order (because the order matters), but for chains
|
||||
// with per-service rules, we don't know what order syncProxyRules is going to output them
|
||||
// in, but the order doesn't matter anyway. So we sort the rules in those chains.
|
||||
var sortedChains = sets.New(
|
||||
kubeServicesChain,
|
||||
kubeNodePortsChain,
|
||||
)
|
||||
|
||||
// sortNFTablesTransaction sorts an nftables transaction into a standard order for comparison
|
||||
func sortNFTablesTransaction(tx string) string {
|
||||
lines := strings.Split(tx, "\n")
|
||||
@ -104,13 +96,8 @@ func sortNFTablesTransaction(tx string) string {
|
||||
return wi[4] < wj[4]
|
||||
}
|
||||
|
||||
// Leave rules in the order they were originally added.
|
||||
if wi[1] == "rule" {
|
||||
// Sort rules in chains that need to be sorted
|
||||
if sortedChains.Has(wi[4]) {
|
||||
return li < lj
|
||||
}
|
||||
|
||||
// Otherwise leave rules in the order they were originally added.
|
||||
return false
|
||||
}
|
||||
|
||||
|
@ -63,7 +63,8 @@ const (
|
||||
|
||||
// service dispatch
|
||||
kubeServicesChain = "services"
|
||||
kubeNodePortsChain = "nodeports"
|
||||
kubeServiceIPsMap = "service-ips"
|
||||
kubeServiceNodePortsMap = "service-nodeports"
|
||||
|
||||
// set of IPs that accept NodePort traffic
|
||||
kubeNodePortIPsSet = "nodeport-ips"
|
||||
@ -413,7 +414,7 @@ func (proxier *Proxier) setupNFTables(tx *knftables.Transaction) {
|
||||
}
|
||||
|
||||
// Ensure all of our other "top-level" chains exist
|
||||
for _, chain := range []string{kubeServicesChain, kubeForwardChain, kubeNodePortsChain, kubeMasqueradingChain, kubeMarkMasqChain} {
|
||||
for _, chain := range []string{kubeServicesChain, kubeForwardChain, kubeMasqueradingChain, kubeMarkMasqChain} {
|
||||
ensureChain(chain, tx, createdChains)
|
||||
}
|
||||
|
||||
@ -574,6 +575,45 @@ func (proxier *Proxier) setupNFTables(tx *knftables.Transaction) {
|
||||
Chain: kubeFirewallAllowCheckChain,
|
||||
Rule: "drop",
|
||||
})
|
||||
|
||||
// Set up service dispatch
|
||||
tx.Add(&knftables.Map{
|
||||
Name: kubeServiceIPsMap,
|
||||
Type: ipvX_addr + " . inet_proto . inet_service : verdict",
|
||||
Comment: ptr.To("ClusterIP, ExternalIP and LoadBalancer IP traffic"),
|
||||
})
|
||||
tx.Add(&knftables.Map{
|
||||
Name: kubeServiceNodePortsMap,
|
||||
Type: "inet_proto . inet_service : verdict",
|
||||
Comment: ptr.To("NodePort traffic"),
|
||||
})
|
||||
tx.Add(&knftables.Rule{
|
||||
Chain: kubeServicesChain,
|
||||
Rule: knftables.Concat(
|
||||
ipX, "daddr", ".", "meta l4proto", ".", "th dport",
|
||||
"vmap", "@", kubeServiceIPsMap,
|
||||
),
|
||||
})
|
||||
if proxier.nodePortAddresses.MatchAll() {
|
||||
tx.Add(&knftables.Rule{
|
||||
Chain: kubeServicesChain,
|
||||
Rule: knftables.Concat(
|
||||
"fib daddr type local",
|
||||
noLocalhost,
|
||||
"meta l4proto . th dport",
|
||||
"vmap", "@", kubeServiceNodePortsMap,
|
||||
),
|
||||
})
|
||||
} else {
|
||||
tx.Add(&knftables.Rule{
|
||||
Chain: kubeServicesChain,
|
||||
Rule: knftables.Concat(
|
||||
ipX, "daddr @nodeport-ips",
|
||||
"meta l4proto . th dport",
|
||||
"vmap", "@", kubeServiceNodePortsMap,
|
||||
),
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
// CleanupLeftovers removes all nftables rules and chains created by the Proxier
|
||||
@ -970,6 +1010,12 @@ func (proxier *Proxier) syncProxyRules() {
|
||||
tx.Flush(&knftables.Map{
|
||||
Name: kubeNoEndpointNodePortsMap,
|
||||
})
|
||||
tx.Flush(&knftables.Map{
|
||||
Name: kubeServiceIPsMap,
|
||||
})
|
||||
tx.Flush(&knftables.Map{
|
||||
Name: kubeServiceNodePortsMap,
|
||||
})
|
||||
|
||||
// Accumulate service/endpoint chains and affinity sets to keep.
|
||||
activeChains := sets.New[string]()
|
||||
@ -1096,13 +1142,16 @@ func (proxier *Proxier) syncProxyRules() {
|
||||
|
||||
// Capture the clusterIP.
|
||||
if hasInternalEndpoints {
|
||||
tx.Add(&knftables.Rule{
|
||||
Chain: kubeServicesChain,
|
||||
Rule: knftables.Concat(
|
||||
ipX, "daddr", svcInfo.ClusterIP(),
|
||||
protocol, "dport", svcInfo.Port(),
|
||||
"goto", internalTrafficChain,
|
||||
),
|
||||
tx.Add(&knftables.Element{
|
||||
Map: kubeServiceIPsMap,
|
||||
Key: []string{
|
||||
svcInfo.ClusterIP().String(),
|
||||
protocol,
|
||||
strconv.Itoa(svcInfo.Port()),
|
||||
},
|
||||
Value: []string{
|
||||
fmt.Sprintf("goto %s", internalTrafficChain),
|
||||
},
|
||||
})
|
||||
} else {
|
||||
// No endpoints.
|
||||
@ -1125,13 +1174,16 @@ func (proxier *Proxier) syncProxyRules() {
|
||||
if hasEndpoints {
|
||||
// Send traffic bound for external IPs to the "external
|
||||
// destinations" chain.
|
||||
tx.Add(&knftables.Rule{
|
||||
Chain: kubeServicesChain,
|
||||
Rule: knftables.Concat(
|
||||
ipX, "daddr", externalIP,
|
||||
protocol, "dport", svcInfo.Port(),
|
||||
"goto", externalTrafficChain,
|
||||
),
|
||||
tx.Add(&knftables.Element{
|
||||
Map: kubeServiceIPsMap,
|
||||
Key: []string{
|
||||
externalIP,
|
||||
protocol,
|
||||
strconv.Itoa(svcInfo.Port()),
|
||||
},
|
||||
Value: []string{
|
||||
fmt.Sprintf("goto %s", externalTrafficChain),
|
||||
},
|
||||
})
|
||||
}
|
||||
if !hasExternalEndpoints {
|
||||
@ -1156,13 +1208,16 @@ func (proxier *Proxier) syncProxyRules() {
|
||||
// Capture load-balancer ingress.
|
||||
for _, lbip := range svcInfo.LoadBalancerVIPStrings() {
|
||||
if hasEndpoints {
|
||||
tx.Add(&knftables.Rule{
|
||||
Chain: kubeServicesChain,
|
||||
Rule: knftables.Concat(
|
||||
ipX, "daddr", lbip,
|
||||
protocol, "dport", svcInfo.Port(),
|
||||
"goto", externalTrafficChain,
|
||||
),
|
||||
tx.Add(&knftables.Element{
|
||||
Map: kubeServiceIPsMap,
|
||||
Key: []string{
|
||||
lbip,
|
||||
protocol,
|
||||
strconv.Itoa(svcInfo.Port()),
|
||||
},
|
||||
Value: []string{
|
||||
fmt.Sprintf("goto %s", externalTrafficChain),
|
||||
},
|
||||
})
|
||||
}
|
||||
|
||||
@ -1241,12 +1296,15 @@ func (proxier *Proxier) syncProxyRules() {
|
||||
// Jump to the external destination chain. For better or for
|
||||
// worse, nodeports are not subect to loadBalancerSourceRanges,
|
||||
// and we can't change that.
|
||||
tx.Add(&knftables.Rule{
|
||||
Chain: kubeNodePortsChain,
|
||||
Rule: knftables.Concat(
|
||||
protocol, "dport", svcInfo.NodePort(),
|
||||
"goto", externalTrafficChain,
|
||||
),
|
||||
tx.Add(&knftables.Element{
|
||||
Map: kubeServiceNodePortsMap,
|
||||
Key: []string{
|
||||
protocol,
|
||||
strconv.Itoa(svcInfo.NodePort()),
|
||||
},
|
||||
Value: []string{
|
||||
fmt.Sprintf("goto %s", externalTrafficChain),
|
||||
},
|
||||
})
|
||||
}
|
||||
if !hasExternalEndpoints {
|
||||
@ -1455,37 +1513,6 @@ func (proxier *Proxier) syncProxyRules() {
|
||||
}
|
||||
}
|
||||
|
||||
// Finally, tail-call to the nodePorts chain. This needs to be after all
|
||||
// other service portal rules.
|
||||
if proxier.nodePortAddresses.MatchAll() {
|
||||
// Block localhost nodePorts
|
||||
var noLocalhost string
|
||||
if proxier.ipFamily == v1.IPv6Protocol {
|
||||
noLocalhost = "ip6 daddr != ::1"
|
||||
} else {
|
||||
noLocalhost = "ip daddr != 127.0.0.0/8"
|
||||
}
|
||||
|
||||
tx.Add(&knftables.Rule{
|
||||
Chain: kubeServicesChain,
|
||||
Rule: knftables.Concat(
|
||||
"fib daddr type local",
|
||||
noLocalhost,
|
||||
"jump", kubeNodePortsChain,
|
||||
),
|
||||
Comment: ptr.To("kubernetes service nodeports; NOTE: this must be the last rule in this chain"),
|
||||
})
|
||||
} else {
|
||||
tx.Add(&knftables.Rule{
|
||||
Chain: kubeServicesChain,
|
||||
Rule: knftables.Concat(
|
||||
ipX, "daddr", "@", kubeNodePortIPsSet,
|
||||
"jump", kubeNodePortsChain,
|
||||
),
|
||||
Comment: ptr.To("kubernetes service nodeports; NOTE: this must be the last rule in this chain"),
|
||||
})
|
||||
}
|
||||
|
||||
// Figure out which chains are now stale. Unfortunately, we can't delete them
|
||||
// right away, because with kernels before 6.2, if there is a map element pointing
|
||||
// to a chain, and you delete that map element, the kernel doesn't notice until a
|
||||
|
@ -499,7 +499,6 @@ func TestOverallNFTablesRules(t *testing.T) {
|
||||
add rule ip kube-proxy forward ct state invalid drop
|
||||
add chain ip kube-proxy mark-for-masquerade
|
||||
add rule ip kube-proxy mark-for-masquerade mark set mark or 0x4000
|
||||
add chain ip kube-proxy nodeports
|
||||
add chain ip kube-proxy masquerading
|
||||
add rule ip kube-proxy masquerading mark and 0x4000 == 0 return
|
||||
add rule ip kube-proxy masquerading mark set mark xor 0x4000
|
||||
@ -540,6 +539,11 @@ func TestOverallNFTablesRules(t *testing.T) {
|
||||
add rule ip kube-proxy endpoints-check ip daddr . meta l4proto . th dport vmap @no-endpoint-services
|
||||
add rule ip kube-proxy endpoints-check fib daddr type local ip daddr != 127.0.0.0/8 meta l4proto . th dport vmap @no-endpoint-nodeports
|
||||
|
||||
add map ip kube-proxy service-ips { type ipv4_addr . inet_proto . inet_service : verdict ; comment "ClusterIP, ExternalIP and LoadBalancer IP traffic" ; }
|
||||
add map ip kube-proxy service-nodeports { type inet_proto . inet_service : verdict ; comment "NodePort traffic" ; }
|
||||
add rule ip kube-proxy services ip daddr . meta l4proto . th dport vmap @service-ips
|
||||
add rule ip kube-proxy services fib daddr type local ip daddr != 127.0.0.0/8 meta l4proto . th dport vmap @service-nodeports
|
||||
|
||||
# svc1
|
||||
add chain ip kube-proxy service-ULMVA6XW-ns1/svc1/tcp/p80
|
||||
add rule ip kube-proxy service-ULMVA6XW-ns1/svc1/tcp/p80 ip daddr 172.30.0.41 tcp dport 80 ip saddr != 10.0.0.0/8 jump mark-for-masquerade
|
||||
@ -549,7 +553,7 @@ func TestOverallNFTablesRules(t *testing.T) {
|
||||
add rule ip kube-proxy endpoint-5OJB2KTY-ns1/svc1/tcp/p80__10.180.0.1/80 ip saddr 10.180.0.1 jump mark-for-masquerade
|
||||
add rule ip kube-proxy endpoint-5OJB2KTY-ns1/svc1/tcp/p80__10.180.0.1/80 meta l4proto tcp dnat to 10.180.0.1:80
|
||||
|
||||
add rule ip kube-proxy services ip daddr 172.30.0.41 tcp dport 80 goto service-ULMVA6XW-ns1/svc1/tcp/p80
|
||||
add element ip kube-proxy service-ips { 172.30.0.41 . tcp . 80 : goto service-ULMVA6XW-ns1/svc1/tcp/p80 }
|
||||
|
||||
# svc2
|
||||
add chain ip kube-proxy service-42NFTM6N-ns2/svc2/tcp/p80
|
||||
@ -562,10 +566,11 @@ func TestOverallNFTablesRules(t *testing.T) {
|
||||
add chain ip kube-proxy endpoint-SGOXE6O3-ns2/svc2/tcp/p80__10.180.0.2/80
|
||||
add rule ip kube-proxy endpoint-SGOXE6O3-ns2/svc2/tcp/p80__10.180.0.2/80 ip saddr 10.180.0.2 jump mark-for-masquerade
|
||||
add rule ip kube-proxy endpoint-SGOXE6O3-ns2/svc2/tcp/p80__10.180.0.2/80 meta l4proto tcp dnat to 10.180.0.2:80
|
||||
add rule ip kube-proxy services ip daddr 172.30.0.42 tcp dport 80 goto service-42NFTM6N-ns2/svc2/tcp/p80
|
||||
add rule ip kube-proxy services ip daddr 192.168.99.22 tcp dport 80 goto external-42NFTM6N-ns2/svc2/tcp/p80
|
||||
add rule ip kube-proxy services ip daddr 1.2.3.4 tcp dport 80 goto external-42NFTM6N-ns2/svc2/tcp/p80
|
||||
add rule ip kube-proxy nodeports tcp dport 3001 goto external-42NFTM6N-ns2/svc2/tcp/p80
|
||||
|
||||
add element ip kube-proxy service-ips { 172.30.0.42 . tcp . 80 : goto service-42NFTM6N-ns2/svc2/tcp/p80 }
|
||||
add element ip kube-proxy service-ips { 192.168.99.22 . tcp . 80 : goto external-42NFTM6N-ns2/svc2/tcp/p80 }
|
||||
add element ip kube-proxy service-ips { 1.2.3.4 . tcp . 80 : goto external-42NFTM6N-ns2/svc2/tcp/p80 }
|
||||
add element ip kube-proxy service-nodeports { tcp . 3001 : goto external-42NFTM6N-ns2/svc2/tcp/p80 }
|
||||
|
||||
add element ip kube-proxy no-endpoint-nodeports { tcp . 3001 comment "ns2/svc2:p80" : drop }
|
||||
add element ip kube-proxy no-endpoint-services { 1.2.3.4 . tcp . 80 comment "ns2/svc2:p80" : drop }
|
||||
@ -581,8 +586,9 @@ func TestOverallNFTablesRules(t *testing.T) {
|
||||
add chain ip kube-proxy endpoint-UEIP74TE-ns3/svc3/tcp/p80__10.180.0.3/80
|
||||
add rule ip kube-proxy endpoint-UEIP74TE-ns3/svc3/tcp/p80__10.180.0.3/80 ip saddr 10.180.0.3 jump mark-for-masquerade
|
||||
add rule ip kube-proxy endpoint-UEIP74TE-ns3/svc3/tcp/p80__10.180.0.3/80 meta l4proto tcp dnat to 10.180.0.3:80
|
||||
add rule ip kube-proxy services ip daddr 172.30.0.43 tcp dport 80 goto service-4AT6LBPK-ns3/svc3/tcp/p80
|
||||
add rule ip kube-proxy nodeports tcp dport 3003 goto external-4AT6LBPK-ns3/svc3/tcp/p80
|
||||
|
||||
add element ip kube-proxy service-ips { 172.30.0.43 . tcp . 80 : goto service-4AT6LBPK-ns3/svc3/tcp/p80 }
|
||||
add element ip kube-proxy service-nodeports { tcp . 3003 : goto external-4AT6LBPK-ns3/svc3/tcp/p80 }
|
||||
|
||||
# svc4
|
||||
add chain ip kube-proxy service-LAUZTJTB-ns4/svc4/tcp/p80
|
||||
@ -597,8 +603,9 @@ func TestOverallNFTablesRules(t *testing.T) {
|
||||
add chain ip kube-proxy endpoint-UNZV3OEC-ns4/svc4/tcp/p80__10.180.0.4/80
|
||||
add rule ip kube-proxy endpoint-UNZV3OEC-ns4/svc4/tcp/p80__10.180.0.4/80 ip saddr 10.180.0.4 jump mark-for-masquerade
|
||||
add rule ip kube-proxy endpoint-UNZV3OEC-ns4/svc4/tcp/p80__10.180.0.4/80 meta l4proto tcp dnat to 10.180.0.4:80
|
||||
add rule ip kube-proxy services ip daddr 172.30.0.44 tcp dport 80 goto service-LAUZTJTB-ns4/svc4/tcp/p80
|
||||
add rule ip kube-proxy services ip daddr 192.168.99.33 tcp dport 80 goto external-LAUZTJTB-ns4/svc4/tcp/p80
|
||||
|
||||
add element ip kube-proxy service-ips { 172.30.0.44 . tcp . 80 : goto service-LAUZTJTB-ns4/svc4/tcp/p80 }
|
||||
add element ip kube-proxy service-ips { 192.168.99.33 . tcp . 80 : goto external-LAUZTJTB-ns4/svc4/tcp/p80 }
|
||||
|
||||
# svc5
|
||||
add set ip kube-proxy affinity-GTK6MW7G-ns5/svc5/tcp/p80__10.180.0.3/80 { type ipv4_addr ; flags dynamic,timeout ; timeout 10800s ; }
|
||||
@ -614,16 +621,15 @@ func TestOverallNFTablesRules(t *testing.T) {
|
||||
add rule ip kube-proxy endpoint-GTK6MW7G-ns5/svc5/tcp/p80__10.180.0.3/80 ip saddr 10.180.0.3 jump mark-for-masquerade
|
||||
add rule ip kube-proxy endpoint-GTK6MW7G-ns5/svc5/tcp/p80__10.180.0.3/80 update @affinity-GTK6MW7G-ns5/svc5/tcp/p80__10.180.0.3/80 { ip saddr }
|
||||
add rule ip kube-proxy endpoint-GTK6MW7G-ns5/svc5/tcp/p80__10.180.0.3/80 meta l4proto tcp dnat to 10.180.0.3:80
|
||||
add rule ip kube-proxy services ip daddr 172.30.0.45 tcp dport 80 goto service-HVFWP5L3-ns5/svc5/tcp/p80
|
||||
add rule ip kube-proxy services ip daddr 5.6.7.8 tcp dport 80 goto external-HVFWP5L3-ns5/svc5/tcp/p80
|
||||
add rule ip kube-proxy nodeports tcp dport 3002 goto external-HVFWP5L3-ns5/svc5/tcp/p80
|
||||
|
||||
add element ip kube-proxy service-ips { 172.30.0.45 . tcp . 80 : goto service-HVFWP5L3-ns5/svc5/tcp/p80 }
|
||||
add element ip kube-proxy service-ips { 5.6.7.8 . tcp . 80 : goto external-HVFWP5L3-ns5/svc5/tcp/p80 }
|
||||
add element ip kube-proxy service-nodeports { tcp . 3002 : goto external-HVFWP5L3-ns5/svc5/tcp/p80 }
|
||||
add element ip kube-proxy firewall { 5.6.7.8 . tcp . 80 comment "ns5/svc5:p80" }
|
||||
add element ip kube-proxy firewall-allow { 5.6.7.8 . tcp . 80 . 203.0.113.0/25 comment "ns5/svc5:p80" }
|
||||
|
||||
# svc6
|
||||
add element ip kube-proxy no-endpoint-services { 172.30.0.46 . tcp . 80 comment "ns6/svc6:p80" : goto reject-chain }
|
||||
|
||||
add rule ip kube-proxy services fib daddr type local ip daddr != 127.0.0.0/8 jump nodeports comment "kubernetes service nodeports; NOTE: this must be the last rule in this chain"
|
||||
`)
|
||||
|
||||
assertNFTablesTransactionEqual(t, getLine(), expected, nft.Dump())
|
||||
@ -4266,7 +4272,6 @@ func TestSyncProxyRulesRepeated(t *testing.T) {
|
||||
add chain ip kube-proxy nat-output { type nat hook output priority -100 ; }
|
||||
add chain ip kube-proxy nat-postrouting { type nat hook postrouting priority 100 ; }
|
||||
add chain ip kube-proxy nat-prerouting { type nat hook prerouting priority -100 ; }
|
||||
add chain ip kube-proxy nodeports
|
||||
add chain ip kube-proxy reject-chain { comment "helper for @no-endpoint-services / @no-endpoint-nodeports" ; }
|
||||
add chain ip kube-proxy services
|
||||
|
||||
@ -4291,11 +4296,15 @@ func TestSyncProxyRulesRepeated(t *testing.T) {
|
||||
add rule ip kube-proxy nat-postrouting jump masquerading
|
||||
add rule ip kube-proxy nat-prerouting jump services
|
||||
add rule ip kube-proxy reject-chain reject
|
||||
add rule ip kube-proxy services ip daddr . meta l4proto . th dport vmap @service-ips
|
||||
add rule ip kube-proxy services fib daddr type local ip daddr != 127.0.0.0/8 meta l4proto . th dport vmap @service-nodeports
|
||||
|
||||
add set ip kube-proxy firewall { type ipv4_addr . inet_proto . inet_service ; comment "destinations that are subject to LoadBalancerSourceRanges" ; }
|
||||
add set ip kube-proxy firewall-allow { type ipv4_addr . inet_proto . inet_service . ipv4_addr ; flags interval ; comment "destinations+sources that are allowed by LoadBalancerSourceRanges" ; }
|
||||
add map ip kube-proxy no-endpoint-nodeports { type inet_proto . inet_service : verdict ; comment "vmap to drop or reject packets to service nodeports with no endpoints" ; }
|
||||
add map ip kube-proxy no-endpoint-services { type ipv4_addr . inet_proto . inet_service : verdict ; comment "vmap to drop or reject packets to services with no endpoints" ; }
|
||||
add map ip kube-proxy service-ips { type ipv4_addr . inet_proto . inet_service : verdict ; comment "ClusterIP, ExternalIP and LoadBalancer IP traffic" ; }
|
||||
add map ip kube-proxy service-nodeports { type inet_proto . inet_service : verdict ; comment "NodePort traffic" ; }
|
||||
`)
|
||||
|
||||
// Helper function to make it look like time has passed (from the point of view of
|
||||
@ -4359,9 +4368,8 @@ func TestSyncProxyRulesRepeated(t *testing.T) {
|
||||
fp.syncProxyRules()
|
||||
|
||||
expected := baseRules + dedent.Dedent(`
|
||||
add rule ip kube-proxy services ip daddr 172.30.0.41 tcp dport 80 goto service-ULMVA6XW-ns1/svc1/tcp/p80
|
||||
add rule ip kube-proxy services ip daddr 172.30.0.42 tcp dport 8080 goto service-MHHHYRWA-ns2/svc2/tcp/p8080
|
||||
add rule ip kube-proxy services fib daddr type local ip daddr != 127.0.0.0/8 jump nodeports comment "kubernetes service nodeports; NOTE: this must be the last rule in this chain"
|
||||
add element ip kube-proxy service-ips { 172.30.0.41 . tcp . 80 : goto service-ULMVA6XW-ns1/svc1/tcp/p80 }
|
||||
add element ip kube-proxy service-ips { 172.30.0.42 . tcp . 8080 : goto service-MHHHYRWA-ns2/svc2/tcp/p8080 }
|
||||
|
||||
add chain ip kube-proxy service-ULMVA6XW-ns1/svc1/tcp/p80
|
||||
add rule ip kube-proxy service-ULMVA6XW-ns1/svc1/tcp/p80 ip daddr 172.30.0.41 tcp dport 80 ip saddr != 10.0.0.0/8 jump mark-for-masquerade
|
||||
@ -4409,10 +4417,9 @@ func TestSyncProxyRulesRepeated(t *testing.T) {
|
||||
fp.syncProxyRules()
|
||||
|
||||
expected = baseRules + dedent.Dedent(`
|
||||
add rule ip kube-proxy services ip daddr 172.30.0.41 tcp dport 80 goto service-ULMVA6XW-ns1/svc1/tcp/p80
|
||||
add rule ip kube-proxy services ip daddr 172.30.0.42 tcp dport 8080 goto service-MHHHYRWA-ns2/svc2/tcp/p8080
|
||||
add rule ip kube-proxy services ip daddr 172.30.0.43 tcp dport 80 goto service-4AT6LBPK-ns3/svc3/tcp/p80
|
||||
add rule ip kube-proxy services fib daddr type local ip daddr != 127.0.0.0/8 jump nodeports comment "kubernetes service nodeports; NOTE: this must be the last rule in this chain"
|
||||
add element ip kube-proxy service-ips { 172.30.0.41 . tcp . 80 : goto service-ULMVA6XW-ns1/svc1/tcp/p80 }
|
||||
add element ip kube-proxy service-ips { 172.30.0.42 . tcp . 8080 : goto service-MHHHYRWA-ns2/svc2/tcp/p8080 }
|
||||
add element ip kube-proxy service-ips { 172.30.0.43 . tcp . 80 : goto service-4AT6LBPK-ns3/svc3/tcp/p80 }
|
||||
|
||||
add chain ip kube-proxy service-ULMVA6XW-ns1/svc1/tcp/p80
|
||||
add rule ip kube-proxy service-ULMVA6XW-ns1/svc1/tcp/p80 ip daddr 172.30.0.41 tcp dport 80 ip saddr != 10.0.0.0/8 jump mark-for-masquerade
|
||||
@ -4441,9 +4448,8 @@ func TestSyncProxyRulesRepeated(t *testing.T) {
|
||||
fp.OnServiceDelete(svc2)
|
||||
fp.syncProxyRules()
|
||||
expected = baseRules + dedent.Dedent(`
|
||||
add rule ip kube-proxy services ip daddr 172.30.0.41 tcp dport 80 goto service-ULMVA6XW-ns1/svc1/tcp/p80
|
||||
add rule ip kube-proxy services ip daddr 172.30.0.43 tcp dport 80 goto service-4AT6LBPK-ns3/svc3/tcp/p80
|
||||
add rule ip kube-proxy services fib daddr type local ip daddr != 127.0.0.0/8 jump nodeports comment "kubernetes service nodeports; NOTE: this must be the last rule in this chain"
|
||||
add element ip kube-proxy service-ips { 172.30.0.41 . tcp . 80 : goto service-ULMVA6XW-ns1/svc1/tcp/p80 }
|
||||
add element ip kube-proxy service-ips { 172.30.0.43 . tcp . 80 : goto service-4AT6LBPK-ns3/svc3/tcp/p80 }
|
||||
|
||||
add chain ip kube-proxy service-ULMVA6XW-ns1/svc1/tcp/p80
|
||||
add rule ip kube-proxy service-ULMVA6XW-ns1/svc1/tcp/p80 ip daddr 172.30.0.41 tcp dport 80 ip saddr != 10.0.0.0/8 jump mark-for-masquerade
|
||||
@ -4468,9 +4474,8 @@ func TestSyncProxyRulesRepeated(t *testing.T) {
|
||||
ageStaleChains()
|
||||
fp.syncProxyRules()
|
||||
expected = baseRules + dedent.Dedent(`
|
||||
add rule ip kube-proxy services ip daddr 172.30.0.41 tcp dport 80 goto service-ULMVA6XW-ns1/svc1/tcp/p80
|
||||
add rule ip kube-proxy services ip daddr 172.30.0.43 tcp dport 80 goto service-4AT6LBPK-ns3/svc3/tcp/p80
|
||||
add rule ip kube-proxy services fib daddr type local ip daddr != 127.0.0.0/8 jump nodeports comment "kubernetes service nodeports; NOTE: this must be the last rule in this chain"
|
||||
add element ip kube-proxy service-ips { 172.30.0.41 . tcp . 80 : goto service-ULMVA6XW-ns1/svc1/tcp/p80 }
|
||||
add element ip kube-proxy service-ips { 172.30.0.43 . tcp . 80 : goto service-4AT6LBPK-ns3/svc3/tcp/p80 }
|
||||
|
||||
add chain ip kube-proxy service-ULMVA6XW-ns1/svc1/tcp/p80
|
||||
add rule ip kube-proxy service-ULMVA6XW-ns1/svc1/tcp/p80 ip daddr 172.30.0.41 tcp dport 80 ip saddr != 10.0.0.0/8 jump mark-for-masquerade
|
||||
@ -4502,9 +4507,8 @@ func TestSyncProxyRulesRepeated(t *testing.T) {
|
||||
)
|
||||
fp.syncProxyRules()
|
||||
expected = baseRules + dedent.Dedent(`
|
||||
add rule ip kube-proxy services ip daddr 172.30.0.41 tcp dport 80 goto service-ULMVA6XW-ns1/svc1/tcp/p80
|
||||
add rule ip kube-proxy services ip daddr 172.30.0.43 tcp dport 80 goto service-4AT6LBPK-ns3/svc3/tcp/p80
|
||||
add rule ip kube-proxy services fib daddr type local ip daddr != 127.0.0.0/8 jump nodeports comment "kubernetes service nodeports; NOTE: this must be the last rule in this chain"
|
||||
add element ip kube-proxy service-ips { 172.30.0.41 . tcp . 80 : goto service-ULMVA6XW-ns1/svc1/tcp/p80 }
|
||||
add element ip kube-proxy service-ips { 172.30.0.43 . tcp . 80 : goto service-4AT6LBPK-ns3/svc3/tcp/p80 }
|
||||
|
||||
add chain ip kube-proxy service-ULMVA6XW-ns1/svc1/tcp/p80
|
||||
add rule ip kube-proxy service-ULMVA6XW-ns1/svc1/tcp/p80 ip daddr 172.30.0.41 tcp dport 80 ip saddr != 10.0.0.0/8 jump mark-for-masquerade
|
||||
@ -4539,10 +4543,9 @@ func TestSyncProxyRulesRepeated(t *testing.T) {
|
||||
)
|
||||
fp.syncProxyRules()
|
||||
expected = baseRules + dedent.Dedent(`
|
||||
add rule ip kube-proxy services ip daddr 172.30.0.41 tcp dport 80 goto service-ULMVA6XW-ns1/svc1/tcp/p80
|
||||
add rule ip kube-proxy services ip daddr 172.30.0.43 tcp dport 80 goto service-4AT6LBPK-ns3/svc3/tcp/p80
|
||||
add rule ip kube-proxy services ip daddr 172.30.0.44 tcp dport 80 goto service-LAUZTJTB-ns4/svc4/tcp/p80
|
||||
add rule ip kube-proxy services fib daddr type local ip daddr != 127.0.0.0/8 jump nodeports comment "kubernetes service nodeports; NOTE: this must be the last rule in this chain"
|
||||
add element ip kube-proxy service-ips { 172.30.0.41 . tcp . 80 : goto service-ULMVA6XW-ns1/svc1/tcp/p80 }
|
||||
add element ip kube-proxy service-ips { 172.30.0.43 . tcp . 80 : goto service-4AT6LBPK-ns3/svc3/tcp/p80 }
|
||||
add element ip kube-proxy service-ips { 172.30.0.44 . tcp . 80 : goto service-LAUZTJTB-ns4/svc4/tcp/p80 }
|
||||
|
||||
add chain ip kube-proxy service-ULMVA6XW-ns1/svc1/tcp/p80
|
||||
add rule ip kube-proxy service-ULMVA6XW-ns1/svc1/tcp/p80 ip daddr 172.30.0.41 tcp dport 80 ip saddr != 10.0.0.0/8 jump mark-for-masquerade
|
||||
@ -4575,10 +4578,9 @@ func TestSyncProxyRulesRepeated(t *testing.T) {
|
||||
|
||||
// The old endpoint chain (for 10.0.3.1) will not be deleted yet.
|
||||
expected = baseRules + dedent.Dedent(`
|
||||
add rule ip kube-proxy services ip daddr 172.30.0.41 tcp dport 80 goto service-ULMVA6XW-ns1/svc1/tcp/p80
|
||||
add rule ip kube-proxy services ip daddr 172.30.0.43 tcp dport 80 goto service-4AT6LBPK-ns3/svc3/tcp/p80
|
||||
add rule ip kube-proxy services ip daddr 172.30.0.44 tcp dport 80 goto service-LAUZTJTB-ns4/svc4/tcp/p80
|
||||
add rule ip kube-proxy services fib daddr type local ip daddr != 127.0.0.0/8 jump nodeports comment "kubernetes service nodeports; NOTE: this must be the last rule in this chain"
|
||||
add element ip kube-proxy service-ips { 172.30.0.41 . tcp . 80 : goto service-ULMVA6XW-ns1/svc1/tcp/p80 }
|
||||
add element ip kube-proxy service-ips { 172.30.0.43 . tcp . 80 : goto service-4AT6LBPK-ns3/svc3/tcp/p80 }
|
||||
add element ip kube-proxy service-ips { 172.30.0.44 . tcp . 80 : goto service-LAUZTJTB-ns4/svc4/tcp/p80 }
|
||||
|
||||
add chain ip kube-proxy service-ULMVA6XW-ns1/svc1/tcp/p80
|
||||
add rule ip kube-proxy service-ULMVA6XW-ns1/svc1/tcp/p80 ip daddr 172.30.0.41 tcp dport 80 ip saddr != 10.0.0.0/8 jump mark-for-masquerade
|
||||
@ -4614,10 +4616,9 @@ func TestSyncProxyRulesRepeated(t *testing.T) {
|
||||
fp.syncProxyRules()
|
||||
|
||||
expected = baseRules + dedent.Dedent(`
|
||||
add rule ip kube-proxy services ip daddr 172.30.0.41 tcp dport 80 goto service-ULMVA6XW-ns1/svc1/tcp/p80
|
||||
add rule ip kube-proxy services ip daddr 172.30.0.43 tcp dport 80 goto service-4AT6LBPK-ns3/svc3/tcp/p80
|
||||
add rule ip kube-proxy services ip daddr 172.30.0.44 tcp dport 80 goto service-LAUZTJTB-ns4/svc4/tcp/p80
|
||||
add rule ip kube-proxy services fib daddr type local ip daddr != 127.0.0.0/8 jump nodeports comment "kubernetes service nodeports; NOTE: this must be the last rule in this chain"
|
||||
add element ip kube-proxy service-ips { 172.30.0.41 . tcp . 80 : goto service-ULMVA6XW-ns1/svc1/tcp/p80 }
|
||||
add element ip kube-proxy service-ips { 172.30.0.43 . tcp . 80 : goto service-4AT6LBPK-ns3/svc3/tcp/p80 }
|
||||
add element ip kube-proxy service-ips { 172.30.0.44 . tcp . 80 : goto service-LAUZTJTB-ns4/svc4/tcp/p80 }
|
||||
|
||||
add chain ip kube-proxy service-ULMVA6XW-ns1/svc1/tcp/p80
|
||||
add rule ip kube-proxy service-ULMVA6XW-ns1/svc1/tcp/p80 ip daddr 172.30.0.41 tcp dport 80 ip saddr != 10.0.0.0/8 jump mark-for-masquerade
|
||||
@ -4912,14 +4913,8 @@ func TestLoadBalancerIngressRouteTypeProxy(t *testing.T) {
|
||||
|
||||
fp.syncProxyRules()
|
||||
|
||||
c := nft.Table.Chains[kubeServicesChain]
|
||||
ruleExists := false
|
||||
destCheck := fmt.Sprintf("ip daddr %s", testCase.svcLBIP)
|
||||
for _, r := range c.Rules {
|
||||
if strings.HasPrefix(r.Rule, destCheck) {
|
||||
ruleExists = true
|
||||
}
|
||||
}
|
||||
element := nft.Table.Maps["service-ips"].FindElement(testCase.svcLBIP, "tcp", "80")
|
||||
ruleExists := element != nil
|
||||
if ruleExists != testCase.expectedRule {
|
||||
t.Errorf("unexpected rule for %s", testCase.svcLBIP)
|
||||
}
|
||||
|
Loading…
Reference in New Issue
Block a user