From 11f55eae96f8ff8e99078614e5e7a14371dc93ed Mon Sep 17 00:00:00 2001 From: Dan Winship Date: Thu, 18 Jul 2024 10:53:36 -0400 Subject: [PATCH 1/3] Reduce some duplication in nftables unit tests --- pkg/proxy/nftables/proxier_test.go | 165 ++++++++++------------------- 1 file changed, 55 insertions(+), 110 deletions(-) diff --git a/pkg/proxy/nftables/proxier_test.go b/pkg/proxy/nftables/proxier_test.go index 5bcbe242a36..1442f63ae60 100644 --- a/pkg/proxy/nftables/proxier_test.go +++ b/pkg/proxy/nftables/proxier_test.go @@ -137,6 +137,60 @@ func NewFakeProxier(ipFamily v1.IPFamily) (*knftables.Fake, *Proxier) { return nft, p } +var baseRules = dedent.Dedent(` + add table ip kube-proxy { comment "rules for kube-proxy" ; } + + add chain ip kube-proxy cluster-ips-check + add chain ip kube-proxy filter-prerouting { type filter hook prerouting priority -110 ; } + add chain ip kube-proxy filter-forward { type filter hook forward priority -110 ; } + add chain ip kube-proxy filter-input { type filter hook input priority -110 ; } + add chain ip kube-proxy filter-output { type filter hook output priority -110 ; } + add chain ip kube-proxy filter-output-post-dnat { type filter hook output priority -90 ; } + add chain ip kube-proxy firewall-check + add chain ip kube-proxy mark-for-masquerade + add chain ip kube-proxy masquerading + add chain ip kube-proxy nat-output { type nat hook output priority -100 ; } + add chain ip kube-proxy nat-postrouting { type nat hook postrouting priority 100 ; } + add chain ip kube-proxy nat-prerouting { type nat hook prerouting priority -100 ; } + add chain ip kube-proxy nodeport-endpoints-check + add chain ip kube-proxy reject-chain { comment "helper for @no-endpoint-services / @no-endpoint-nodeports" ; } + add chain ip kube-proxy services + add chain ip kube-proxy service-endpoints-check + + add rule ip kube-proxy cluster-ips-check ip daddr @cluster-ips reject comment "Reject traffic to invalid ports of ClusterIPs" + add rule ip kube-proxy cluster-ips-check ip daddr { 172.30.0.0/16 } drop comment "Drop traffic to unallocated ClusterIPs" + add rule ip kube-proxy filter-prerouting ct state new jump firewall-check + add rule ip kube-proxy filter-forward ct state new jump service-endpoints-check + add rule ip kube-proxy filter-forward ct state new jump cluster-ips-check + add rule ip kube-proxy filter-input ct state new jump nodeport-endpoints-check + add rule ip kube-proxy filter-input ct state new jump service-endpoints-check + add rule ip kube-proxy filter-output ct state new jump service-endpoints-check + add rule ip kube-proxy filter-output ct state new jump firewall-check + add rule ip kube-proxy filter-output-post-dnat ct state new jump cluster-ips-check + add rule ip kube-proxy firewall-check ip daddr . meta l4proto . th dport vmap @firewall-ips + add rule ip kube-proxy mark-for-masquerade mark set mark or 0x4000 + add rule ip kube-proxy masquerading mark and 0x4000 == 0 return + add rule ip kube-proxy masquerading mark set mark xor 0x4000 + add rule ip kube-proxy masquerading masquerade fully-random + add rule ip kube-proxy nat-output jump services + add rule ip kube-proxy nat-postrouting jump masquerading + add rule ip kube-proxy nat-prerouting jump services + add rule ip kube-proxy nodeport-endpoints-check ip daddr @nodeport-ips meta l4proto . th dport vmap @no-endpoint-nodeports + add rule ip kube-proxy reject-chain reject + add rule ip kube-proxy services ip daddr . meta l4proto . th dport vmap @service-ips + add rule ip kube-proxy services ip daddr @nodeport-ips meta l4proto . th dport vmap @service-nodeports + add set ip kube-proxy cluster-ips { type ipv4_addr ; comment "Active ClusterIPs" ; } + add set ip kube-proxy nodeport-ips { type ipv4_addr ; comment "IPs that accept NodePort traffic" ; } + add element ip kube-proxy nodeport-ips { 192.168.0.2 } + add rule ip kube-proxy service-endpoints-check ip daddr . meta l4proto . th dport vmap @no-endpoint-services + + add map ip kube-proxy firewall-ips { type ipv4_addr . inet_proto . inet_service : verdict ; comment "destinations that are subject to LoadBalancerSourceRanges" ; } + add map ip kube-proxy no-endpoint-nodeports { type inet_proto . inet_service : verdict ; comment "vmap to drop or reject packets to service nodeports with no endpoints" ; } + add map ip kube-proxy no-endpoint-services { type ipv4_addr . inet_proto . inet_service : verdict ; comment "vmap to drop or reject packets to services with no endpoints" ; } + add map ip kube-proxy service-ips { type ipv4_addr . inet_proto . inet_service : verdict ; comment "ClusterIP, ExternalIP and LoadBalancer IP traffic" ; } + add map ip kube-proxy service-nodeports { type inet_proto . inet_service : verdict ; comment "NodePort traffic" ; } + `) + // TestOverallNFTablesRules creates a variety of services and verifies that the generated // rules are exactly as expected. func TestOverallNFTablesRules(t *testing.T) { @@ -301,62 +355,7 @@ func TestOverallNFTablesRules(t *testing.T) { fp.syncProxyRules() - expected := dedent.Dedent(` - add table ip kube-proxy { comment "rules for kube-proxy" ; } - - add chain ip kube-proxy mark-for-masquerade - add rule ip kube-proxy mark-for-masquerade mark set mark or 0x4000 - add chain ip kube-proxy masquerading - add rule ip kube-proxy masquerading mark and 0x4000 == 0 return - add rule ip kube-proxy masquerading mark set mark xor 0x4000 - add rule ip kube-proxy masquerading masquerade fully-random - add chain ip kube-proxy services - add chain ip kube-proxy service-endpoints-check - add rule ip kube-proxy service-endpoints-check ip daddr . meta l4proto . th dport vmap @no-endpoint-services - add chain ip kube-proxy filter-prerouting { type filter hook prerouting priority -110 ; } - add rule ip kube-proxy filter-prerouting ct state new jump firewall-check - add chain ip kube-proxy filter-forward { type filter hook forward priority -110 ; } - add rule ip kube-proxy filter-forward ct state new jump service-endpoints-check - add rule ip kube-proxy filter-forward ct state new jump cluster-ips-check - add chain ip kube-proxy filter-input { type filter hook input priority -110 ; } - add rule ip kube-proxy filter-input ct state new jump nodeport-endpoints-check - add rule ip kube-proxy filter-input ct state new jump service-endpoints-check - add chain ip kube-proxy filter-output { type filter hook output priority -110 ; } - add rule ip kube-proxy filter-output ct state new jump service-endpoints-check - add rule ip kube-proxy filter-output ct state new jump firewall-check - add chain ip kube-proxy filter-output-post-dnat { type filter hook output priority -90 ; } - add rule ip kube-proxy filter-output-post-dnat ct state new jump cluster-ips-check - add chain ip kube-proxy nat-output { type nat hook output priority -100 ; } - add rule ip kube-proxy nat-output jump services - add chain ip kube-proxy nat-postrouting { type nat hook postrouting priority 100 ; } - add rule ip kube-proxy nat-postrouting jump masquerading - add chain ip kube-proxy nat-prerouting { type nat hook prerouting priority -100 ; } - add rule ip kube-proxy nat-prerouting jump services - add chain ip kube-proxy nodeport-endpoints-check - add rule ip kube-proxy nodeport-endpoints-check ip daddr @nodeport-ips meta l4proto . th dport vmap @no-endpoint-nodeports - - add set ip kube-proxy cluster-ips { type ipv4_addr ; comment "Active ClusterIPs" ; } - add chain ip kube-proxy cluster-ips-check - add rule ip kube-proxy cluster-ips-check ip daddr @cluster-ips reject comment "Reject traffic to invalid ports of ClusterIPs" - add rule ip kube-proxy cluster-ips-check ip daddr { 172.30.0.0/16 } drop comment "Drop traffic to unallocated ClusterIPs" - - add set ip kube-proxy nodeport-ips { type ipv4_addr ; comment "IPs that accept NodePort traffic" ; } - add map ip kube-proxy firewall-ips { type ipv4_addr . inet_proto . inet_service : verdict ; comment "destinations that are subject to LoadBalancerSourceRanges" ; } - add chain ip kube-proxy firewall-check - add rule ip kube-proxy firewall-check ip daddr . meta l4proto . th dport vmap @firewall-ips - - add chain ip kube-proxy reject-chain { comment "helper for @no-endpoint-services / @no-endpoint-nodeports" ; } - add rule ip kube-proxy reject-chain reject - - add map ip kube-proxy no-endpoint-services { type ipv4_addr . inet_proto . inet_service : verdict ; comment "vmap to drop or reject packets to services with no endpoints" ; } - add map ip kube-proxy no-endpoint-nodeports { type inet_proto . inet_service : verdict ; comment "vmap to drop or reject packets to service nodeports with no endpoints" ; } - - add map ip kube-proxy service-ips { type ipv4_addr . inet_proto . inet_service : verdict ; comment "ClusterIP, ExternalIP and LoadBalancer IP traffic" ; } - add map ip kube-proxy service-nodeports { type inet_proto . inet_service : verdict ; comment "NodePort traffic" ; } - add rule ip kube-proxy services ip daddr . meta l4proto . th dport vmap @service-ips - add rule ip kube-proxy services ip daddr @nodeport-ips meta l4proto . th dport vmap @service-nodeports - add element ip kube-proxy nodeport-ips { 192.168.0.2 } - + expected := baseRules + dedent.Dedent(` # svc1 add chain ip kube-proxy service-ULMVA6XW-ns1/svc1/tcp/p80 add rule ip kube-proxy service-ULMVA6XW-ns1/svc1/tcp/p80 ip daddr 172.30.0.41 tcp dport 80 ip saddr != 10.0.0.0/8 jump mark-for-masquerade @@ -3942,60 +3941,6 @@ func TestInternalExternalMasquerade(t *testing.T) { func TestSyncProxyRulesRepeated(t *testing.T) { nft, fp := NewFakeProxier(v1.IPv4Protocol) - baseRules := dedent.Dedent(` - add table ip kube-proxy { comment "rules for kube-proxy" ; } - - add chain ip kube-proxy cluster-ips-check - add chain ip kube-proxy filter-prerouting { type filter hook prerouting priority -110 ; } - add chain ip kube-proxy filter-forward { type filter hook forward priority -110 ; } - add chain ip kube-proxy filter-input { type filter hook input priority -110 ; } - add chain ip kube-proxy filter-output { type filter hook output priority -110 ; } - add chain ip kube-proxy filter-output-post-dnat { type filter hook output priority -90 ; } - add chain ip kube-proxy firewall-check - add chain ip kube-proxy mark-for-masquerade - add chain ip kube-proxy masquerading - add chain ip kube-proxy nat-output { type nat hook output priority -100 ; } - add chain ip kube-proxy nat-postrouting { type nat hook postrouting priority 100 ; } - add chain ip kube-proxy nat-prerouting { type nat hook prerouting priority -100 ; } - add chain ip kube-proxy nodeport-endpoints-check - add chain ip kube-proxy reject-chain { comment "helper for @no-endpoint-services / @no-endpoint-nodeports" ; } - add chain ip kube-proxy services - add chain ip kube-proxy service-endpoints-check - - add rule ip kube-proxy cluster-ips-check ip daddr @cluster-ips reject comment "Reject traffic to invalid ports of ClusterIPs" - add rule ip kube-proxy cluster-ips-check ip daddr { 172.30.0.0/16 } drop comment "Drop traffic to unallocated ClusterIPs" - add rule ip kube-proxy filter-prerouting ct state new jump firewall-check - add rule ip kube-proxy filter-forward ct state new jump service-endpoints-check - add rule ip kube-proxy filter-forward ct state new jump cluster-ips-check - add rule ip kube-proxy filter-input ct state new jump nodeport-endpoints-check - add rule ip kube-proxy filter-input ct state new jump service-endpoints-check - add rule ip kube-proxy filter-output ct state new jump service-endpoints-check - add rule ip kube-proxy filter-output ct state new jump firewall-check - add rule ip kube-proxy filter-output-post-dnat ct state new jump cluster-ips-check - add rule ip kube-proxy firewall-check ip daddr . meta l4proto . th dport vmap @firewall-ips - add rule ip kube-proxy mark-for-masquerade mark set mark or 0x4000 - add rule ip kube-proxy masquerading mark and 0x4000 == 0 return - add rule ip kube-proxy masquerading mark set mark xor 0x4000 - add rule ip kube-proxy masquerading masquerade fully-random - add rule ip kube-proxy nat-output jump services - add rule ip kube-proxy nat-postrouting jump masquerading - add rule ip kube-proxy nat-prerouting jump services - add rule ip kube-proxy nodeport-endpoints-check ip daddr @nodeport-ips meta l4proto . th dport vmap @no-endpoint-nodeports - add rule ip kube-proxy reject-chain reject - add rule ip kube-proxy services ip daddr . meta l4proto . th dport vmap @service-ips - add rule ip kube-proxy services ip daddr @nodeport-ips meta l4proto . th dport vmap @service-nodeports - add set ip kube-proxy cluster-ips { type ipv4_addr ; comment "Active ClusterIPs" ; } - add set ip kube-proxy nodeport-ips { type ipv4_addr ; comment "IPs that accept NodePort traffic" ; } - add element ip kube-proxy nodeport-ips { 192.168.0.2 } - add rule ip kube-proxy service-endpoints-check ip daddr . meta l4proto . th dport vmap @no-endpoint-services - - add map ip kube-proxy firewall-ips { type ipv4_addr . inet_proto . inet_service : verdict ; comment "destinations that are subject to LoadBalancerSourceRanges" ; } - add map ip kube-proxy no-endpoint-nodeports { type inet_proto . inet_service : verdict ; comment "vmap to drop or reject packets to service nodeports with no endpoints" ; } - add map ip kube-proxy no-endpoint-services { type ipv4_addr . inet_proto . inet_service : verdict ; comment "vmap to drop or reject packets to services with no endpoints" ; } - add map ip kube-proxy service-ips { type ipv4_addr . inet_proto . inet_service : verdict ; comment "ClusterIP, ExternalIP and LoadBalancer IP traffic" ; } - add map ip kube-proxy service-nodeports { type inet_proto . inet_service : verdict ; comment "NodePort traffic" ; } - `) - // Helper function to make it look like time has passed (from the point of view of // the stale-chain-deletion code). ageStaleChains := func() { From f762e5c8deb4377f0ffa6ad16dcac8b7405641ef Mon Sep 17 00:00:00 2001 From: Dan Winship Date: Thu, 18 Jul 2024 10:54:28 -0400 Subject: [PATCH 2/3] Remove an unnecessary comment in nftables output (It's redundant with the chain name.) --- pkg/proxy/nftables/proxier.go | 1 - pkg/proxy/nftables/proxier_test.go | 2 +- 2 files changed, 1 insertion(+), 2 deletions(-) diff --git a/pkg/proxy/nftables/proxier.go b/pkg/proxy/nftables/proxier.go index 12c5e2fbd96..c4b9aa803fa 100644 --- a/pkg/proxy/nftables/proxier.go +++ b/pkg/proxy/nftables/proxier.go @@ -1364,7 +1364,6 @@ func (proxier *Proxier) syncProxyRules() { Value: []string{ fmt.Sprintf("goto %s", fwChain), }, - Comment: &svcPortNameString, }) } } diff --git a/pkg/proxy/nftables/proxier_test.go b/pkg/proxy/nftables/proxier_test.go index 1442f63ae60..b22ac0023b1 100644 --- a/pkg/proxy/nftables/proxier_test.go +++ b/pkg/proxy/nftables/proxier_test.go @@ -445,7 +445,7 @@ func TestOverallNFTablesRules(t *testing.T) { add element ip kube-proxy service-ips { 172.30.0.45 . tcp . 80 : goto service-HVFWP5L3-ns5/svc5/tcp/p80 } add element ip kube-proxy service-ips { 5.6.7.8 . tcp . 80 : goto external-HVFWP5L3-ns5/svc5/tcp/p80 } add element ip kube-proxy service-nodeports { tcp . 3002 : goto external-HVFWP5L3-ns5/svc5/tcp/p80 } - add element ip kube-proxy firewall-ips { 5.6.7.8 . tcp . 80 comment "ns5/svc5:p80" : goto firewall-HVFWP5L3-ns5/svc5/tcp/p80 } + add element ip kube-proxy firewall-ips { 5.6.7.8 . tcp . 80 : goto firewall-HVFWP5L3-ns5/svc5/tcp/p80 } # svc6 add element ip kube-proxy cluster-ips { 172.30.0.46 } From 30bc1b59d70de99412dbe9d8944ab07a3d587463 Mon Sep 17 00:00:00 2001 From: Dan Winship Date: Thu, 18 Jul 2024 10:55:13 -0400 Subject: [PATCH 3/3] Add unit tests to validate "bad IP/CIDR" handling in kube-proxy Also, fix the handling of bad EndpointSlice IPs! --- pkg/proxy/endpointslicecache.go | 3 +- pkg/proxy/iptables/proxier_test.go | 87 ++++++++++++++++++++++++++++++ pkg/proxy/nftables/proxier_test.go | 67 +++++++++++++++++++++++ 3 files changed, 156 insertions(+), 1 deletion(-) diff --git a/pkg/proxy/endpointslicecache.go b/pkg/proxy/endpointslicecache.go index 089bf5a3710..c967f9f5172 100644 --- a/pkg/proxy/endpointslicecache.go +++ b/pkg/proxy/endpointslicecache.go @@ -235,7 +235,8 @@ func (cache *EndpointSliceCache) addEndpoints(svcPortName *ServicePortName, port } } - endpointInfo := newBaseEndpointInfo(endpoint.Addresses[0], portNum, isLocal, + endpointIP := utilnet.ParseIPSloppy(endpoint.Addresses[0]).String() + endpointInfo := newBaseEndpointInfo(endpointIP, portNum, isLocal, ready, serving, terminating, zoneHints) // This logic ensures we're deduplicating potential overlapping endpoints diff --git a/pkg/proxy/iptables/proxier_test.go b/pkg/proxy/iptables/proxier_test.go index 8323922fb1d..3f67fa66930 100644 --- a/pkg/proxy/iptables/proxier_test.go +++ b/pkg/proxy/iptables/proxier_test.go @@ -6775,3 +6775,90 @@ func TestLoadBalancerIngressRouteTypeProxy(t *testing.T) { }) } } + +// TestBadIPs tests that "bad" IPs and CIDRs in Services/Endpoints are rewritten to +// be "good" in the input provided to iptables-restore +func TestBadIPs(t *testing.T) { + ipt := iptablestest.NewFake() + fp := NewFakeProxier(ipt) + metrics.RegisterMetrics(kubeproxyconfig.ProxyModeIPTables) + + makeServiceMap(fp, + makeTestService("ns1", "svc1", func(svc *v1.Service) { + svc.Spec.Type = "LoadBalancer" + svc.Spec.ClusterIP = "172.30.0.041" + svc.Spec.Ports = []v1.ServicePort{{ + Name: "p80", + Port: 80, + Protocol: v1.ProtocolTCP, + NodePort: 3001, + }} + svc.Status.LoadBalancer.Ingress = []v1.LoadBalancerIngress{{ + IP: "1.2.3.004", + }} + svc.Spec.ExternalIPs = []string{"192.168.099.022"} + svc.Spec.LoadBalancerSourceRanges = []string{"203.0.113.000/025"} + }), + ) + populateEndpointSlices(fp, + makeTestEndpointSlice("ns1", "svc1", 1, func(eps *discovery.EndpointSlice) { + eps.AddressType = discovery.AddressTypeIPv4 + eps.Endpoints = []discovery.Endpoint{{ + Addresses: []string{"10.180.00.001"}, + }} + eps.Ports = []discovery.EndpointPort{{ + Name: ptr.To("p80"), + Port: ptr.To[int32](80), + Protocol: ptr.To(v1.ProtocolTCP), + }} + }), + ) + + fp.syncProxyRules() + + expected := dedent.Dedent(` + *filter + :KUBE-NODEPORTS - [0:0] + :KUBE-SERVICES - [0:0] + :KUBE-EXTERNAL-SERVICES - [0:0] + :KUBE-FIREWALL - [0:0] + :KUBE-FORWARD - [0:0] + :KUBE-PROXY-FIREWALL - [0:0] + -A KUBE-FIREWALL -m comment --comment "block incoming localnet connections" -d 127.0.0.0/8 ! -s 127.0.0.0/8 -m conntrack ! --ctstate RELATED,ESTABLISHED,DNAT -j DROP + -A KUBE-FORWARD -m conntrack --ctstate INVALID -m nfacct --nfacct-name ct_state_invalid_dropped_pkts -j DROP + -A KUBE-FORWARD -m comment --comment "kubernetes forwarding rules" -m mark --mark 0x4000/0x4000 -j ACCEPT + -A KUBE-FORWARD -m comment --comment "kubernetes forwarding conntrack rule" -m conntrack --ctstate RELATED,ESTABLISHED -j ACCEPT + -A KUBE-PROXY-FIREWALL -m comment --comment "ns1/svc1:p80 traffic not accepted by KUBE-FW-XPGD46QRK7WJZT7O" -m tcp -p tcp -d 1.2.3.4 --dport 80 -j DROP + COMMIT + *nat + :KUBE-NODEPORTS - [0:0] + :KUBE-SERVICES - [0:0] + :KUBE-EXT-XPGD46QRK7WJZT7O - [0:0] + :KUBE-FW-XPGD46QRK7WJZT7O - [0:0] + :KUBE-MARK-MASQ - [0:0] + :KUBE-POSTROUTING - [0:0] + :KUBE-SEP-SXIVWICOYRO3J4NJ - [0:0] + :KUBE-SVC-XPGD46QRK7WJZT7O - [0:0] + -A KUBE-NODEPORTS -m comment --comment ns1/svc1:p80 -m tcp -p tcp -d 127.0.0.0/8 --dport 3001 -m nfacct --nfacct-name localhost_nps_accepted_pkts -j KUBE-EXT-XPGD46QRK7WJZT7O + -A KUBE-NODEPORTS -m comment --comment ns1/svc1:p80 -m tcp -p tcp --dport 3001 -j KUBE-EXT-XPGD46QRK7WJZT7O + -A KUBE-SERVICES -m comment --comment "ns1/svc1:p80 cluster IP" -m tcp -p tcp -d 172.30.0.41 --dport 80 -j KUBE-SVC-XPGD46QRK7WJZT7O + -A KUBE-SERVICES -m comment --comment "ns1/svc1:p80 external IP" -m tcp -p tcp -d 192.168.99.22 --dport 80 -j KUBE-EXT-XPGD46QRK7WJZT7O + -A KUBE-SERVICES -m comment --comment "ns1/svc1:p80 loadbalancer IP" -m tcp -p tcp -d 1.2.3.4 --dport 80 -j KUBE-FW-XPGD46QRK7WJZT7O + -A KUBE-SERVICES -m comment --comment "kubernetes service nodeports; NOTE: this must be the last rule in this chain" -m addrtype --dst-type LOCAL -j KUBE-NODEPORTS + -A KUBE-EXT-XPGD46QRK7WJZT7O -m comment --comment "masquerade traffic for ns1/svc1:p80 external destinations" -j KUBE-MARK-MASQ + -A KUBE-EXT-XPGD46QRK7WJZT7O -j KUBE-SVC-XPGD46QRK7WJZT7O + -A KUBE-FW-XPGD46QRK7WJZT7O -m comment --comment "ns1/svc1:p80 loadbalancer IP" -s 203.0.113.0/25 -j KUBE-EXT-XPGD46QRK7WJZT7O + -A KUBE-FW-XPGD46QRK7WJZT7O -m comment --comment "other traffic to ns1/svc1:p80 will be dropped by KUBE-PROXY-FIREWALL" + -A KUBE-MARK-MASQ -j MARK --or-mark 0x4000 + -A KUBE-POSTROUTING -m mark ! --mark 0x4000/0x4000 -j RETURN + -A KUBE-POSTROUTING -j MARK --xor-mark 0x4000 + -A KUBE-POSTROUTING -m comment --comment "kubernetes service traffic requiring SNAT" -j MASQUERADE + -A KUBE-SEP-SXIVWICOYRO3J4NJ -m comment --comment ns1/svc1:p80 -s 10.180.0.1 -j KUBE-MARK-MASQ + -A KUBE-SEP-SXIVWICOYRO3J4NJ -m comment --comment ns1/svc1:p80 -m tcp -p tcp -j DNAT --to-destination 10.180.0.1:80 + -A KUBE-SVC-XPGD46QRK7WJZT7O -m comment --comment "ns1/svc1:p80 cluster IP" -m tcp -p tcp -d 172.30.0.41 --dport 80 ! -s 10.0.0.0/8 -j KUBE-MARK-MASQ + -A KUBE-SVC-XPGD46QRK7WJZT7O -m comment --comment "ns1/svc1:p80 -> 10.180.0.1:80" -j KUBE-SEP-SXIVWICOYRO3J4NJ + COMMIT + `) + + assertIPTablesRulesEqual(t, getLine(), true, expected, fp.iptablesData.String()) +} diff --git a/pkg/proxy/nftables/proxier_test.go b/pkg/proxy/nftables/proxier_test.go index b22ac0023b1..937fcaaf1f0 100644 --- a/pkg/proxy/nftables/proxier_test.go +++ b/pkg/proxy/nftables/proxier_test.go @@ -4835,3 +4835,70 @@ func TestProxier_OnServiceCIDRsChanged(t *testing.T) { proxier.OnServiceCIDRsChanged([]string{"172.30.0.0/16", "172.50.0.0/16", "fd00:10:96::/112", "fd00:172:30::/112"}) assert.Equal(t, proxier.serviceCIDRs, "fd00:10:96::/112,fd00:172:30::/112") } + +// TestBadIPs tests that "bad" IPs and CIDRs in Services/Endpoints are rewritten to +// be "good" in the input provided to nft +func TestBadIPs(t *testing.T) { + nft, fp := NewFakeProxier(v1.IPv4Protocol) + metrics.RegisterMetrics(kubeproxyconfig.ProxyModeNFTables) + + makeServiceMap(fp, + makeTestService("ns1", "svc1", func(svc *v1.Service) { + svc.Spec.Type = "LoadBalancer" + svc.Spec.ClusterIP = "172.30.0.041" + svc.Spec.Ports = []v1.ServicePort{{ + Name: "p80", + Port: 80, + Protocol: v1.ProtocolTCP, + NodePort: 3001, + }} + svc.Status.LoadBalancer.Ingress = []v1.LoadBalancerIngress{{ + IP: "1.2.3.004", + }} + svc.Spec.ExternalIPs = []string{"192.168.099.022"} + svc.Spec.LoadBalancerSourceRanges = []string{"203.0.113.000/025"} + }), + ) + populateEndpointSlices(fp, + makeTestEndpointSlice("ns1", "svc1", 1, func(eps *discovery.EndpointSlice) { + eps.AddressType = discovery.AddressTypeIPv4 + eps.Endpoints = []discovery.Endpoint{{ + Addresses: []string{"10.180.00.001"}, + }} + eps.Ports = []discovery.EndpointPort{{ + Name: ptr.To("p80"), + Port: ptr.To[int32](80), + Protocol: ptr.To(v1.ProtocolTCP), + }} + }), + ) + + fp.syncProxyRules() + + expected := baseRules + dedent.Dedent(` + # svc1 + add chain ip kube-proxy service-ULMVA6XW-ns1/svc1/tcp/p80 + add rule ip kube-proxy service-ULMVA6XW-ns1/svc1/tcp/p80 ip daddr 172.30.0.41 tcp dport 80 ip saddr != 10.0.0.0/8 jump mark-for-masquerade + add rule ip kube-proxy service-ULMVA6XW-ns1/svc1/tcp/p80 numgen random mod 1 vmap { 0 : goto endpoint-5OJB2KTY-ns1/svc1/tcp/p80__10.180.0.1/80 } + + add chain ip kube-proxy external-ULMVA6XW-ns1/svc1/tcp/p80 + add rule ip kube-proxy external-ULMVA6XW-ns1/svc1/tcp/p80 jump mark-for-masquerade + add rule ip kube-proxy external-ULMVA6XW-ns1/svc1/tcp/p80 goto service-ULMVA6XW-ns1/svc1/tcp/p80 + + add chain ip kube-proxy endpoint-5OJB2KTY-ns1/svc1/tcp/p80__10.180.0.1/80 + add rule ip kube-proxy endpoint-5OJB2KTY-ns1/svc1/tcp/p80__10.180.0.1/80 ip saddr 10.180.0.1 jump mark-for-masquerade + add rule ip kube-proxy endpoint-5OJB2KTY-ns1/svc1/tcp/p80__10.180.0.1/80 meta l4proto tcp dnat to 10.180.0.1:80 + + add chain ip kube-proxy firewall-ULMVA6XW-ns1/svc1/tcp/p80 + add rule ip kube-proxy firewall-ULMVA6XW-ns1/svc1/tcp/p80 ip saddr != { 203.0.113.0/25 } drop + + add element ip kube-proxy cluster-ips { 172.30.0.41 } + add element ip kube-proxy service-ips { 172.30.0.41 . tcp . 80 : goto service-ULMVA6XW-ns1/svc1/tcp/p80 } + add element ip kube-proxy service-ips { 192.168.99.22 . tcp . 80 : goto external-ULMVA6XW-ns1/svc1/tcp/p80 } + add element ip kube-proxy service-ips { 1.2.3.4 . tcp . 80 : goto external-ULMVA6XW-ns1/svc1/tcp/p80 } + add element ip kube-proxy service-nodeports { tcp . 3001 : goto external-ULMVA6XW-ns1/svc1/tcp/p80 } + add element ip kube-proxy firewall-ips { 1.2.3.4 . tcp . 80 : goto firewall-ULMVA6XW-ns1/svc1/tcp/p80 } + `) + + assertNFTablesTransactionEqual(t, getLine(), expected, nft.Dump()) +}