diff --git a/pkg/proxy/endpointslicecache.go b/pkg/proxy/endpointslicecache.go index 089bf5a3710..c967f9f5172 100644 --- a/pkg/proxy/endpointslicecache.go +++ b/pkg/proxy/endpointslicecache.go @@ -235,7 +235,8 @@ func (cache *EndpointSliceCache) addEndpoints(svcPortName *ServicePortName, port } } - endpointInfo := newBaseEndpointInfo(endpoint.Addresses[0], portNum, isLocal, + endpointIP := utilnet.ParseIPSloppy(endpoint.Addresses[0]).String() + endpointInfo := newBaseEndpointInfo(endpointIP, portNum, isLocal, ready, serving, terminating, zoneHints) // This logic ensures we're deduplicating potential overlapping endpoints diff --git a/pkg/proxy/iptables/proxier_test.go b/pkg/proxy/iptables/proxier_test.go index 8323922fb1d..3f67fa66930 100644 --- a/pkg/proxy/iptables/proxier_test.go +++ b/pkg/proxy/iptables/proxier_test.go @@ -6775,3 +6775,90 @@ func TestLoadBalancerIngressRouteTypeProxy(t *testing.T) { }) } } + +// TestBadIPs tests that "bad" IPs and CIDRs in Services/Endpoints are rewritten to +// be "good" in the input provided to iptables-restore +func TestBadIPs(t *testing.T) { + ipt := iptablestest.NewFake() + fp := NewFakeProxier(ipt) + metrics.RegisterMetrics(kubeproxyconfig.ProxyModeIPTables) + + makeServiceMap(fp, + makeTestService("ns1", "svc1", func(svc *v1.Service) { + svc.Spec.Type = "LoadBalancer" + svc.Spec.ClusterIP = "172.30.0.041" + svc.Spec.Ports = []v1.ServicePort{{ + Name: "p80", + Port: 80, + Protocol: v1.ProtocolTCP, + NodePort: 3001, + }} + svc.Status.LoadBalancer.Ingress = []v1.LoadBalancerIngress{{ + IP: "1.2.3.004", + }} + svc.Spec.ExternalIPs = []string{"192.168.099.022"} + svc.Spec.LoadBalancerSourceRanges = []string{"203.0.113.000/025"} + }), + ) + populateEndpointSlices(fp, + makeTestEndpointSlice("ns1", "svc1", 1, func(eps *discovery.EndpointSlice) { + eps.AddressType = discovery.AddressTypeIPv4 + eps.Endpoints = []discovery.Endpoint{{ + Addresses: []string{"10.180.00.001"}, + }} + eps.Ports = []discovery.EndpointPort{{ + Name: ptr.To("p80"), + Port: ptr.To[int32](80), + Protocol: ptr.To(v1.ProtocolTCP), + }} + }), + ) + + fp.syncProxyRules() + + expected := dedent.Dedent(` + *filter + :KUBE-NODEPORTS - [0:0] + :KUBE-SERVICES - [0:0] + :KUBE-EXTERNAL-SERVICES - [0:0] + :KUBE-FIREWALL - [0:0] + :KUBE-FORWARD - [0:0] + :KUBE-PROXY-FIREWALL - [0:0] + -A KUBE-FIREWALL -m comment --comment "block incoming localnet connections" -d 127.0.0.0/8 ! -s 127.0.0.0/8 -m conntrack ! --ctstate RELATED,ESTABLISHED,DNAT -j DROP + -A KUBE-FORWARD -m conntrack --ctstate INVALID -m nfacct --nfacct-name ct_state_invalid_dropped_pkts -j DROP + -A KUBE-FORWARD -m comment --comment "kubernetes forwarding rules" -m mark --mark 0x4000/0x4000 -j ACCEPT + -A KUBE-FORWARD -m comment --comment "kubernetes forwarding conntrack rule" -m conntrack --ctstate RELATED,ESTABLISHED -j ACCEPT + -A KUBE-PROXY-FIREWALL -m comment --comment "ns1/svc1:p80 traffic not accepted by KUBE-FW-XPGD46QRK7WJZT7O" -m tcp -p tcp -d 1.2.3.4 --dport 80 -j DROP + COMMIT + *nat + :KUBE-NODEPORTS - [0:0] + :KUBE-SERVICES - [0:0] + :KUBE-EXT-XPGD46QRK7WJZT7O - [0:0] + :KUBE-FW-XPGD46QRK7WJZT7O - [0:0] + :KUBE-MARK-MASQ - [0:0] + :KUBE-POSTROUTING - [0:0] + :KUBE-SEP-SXIVWICOYRO3J4NJ - [0:0] + :KUBE-SVC-XPGD46QRK7WJZT7O - [0:0] + -A KUBE-NODEPORTS -m comment --comment ns1/svc1:p80 -m tcp -p tcp -d 127.0.0.0/8 --dport 3001 -m nfacct --nfacct-name localhost_nps_accepted_pkts -j KUBE-EXT-XPGD46QRK7WJZT7O + -A KUBE-NODEPORTS -m comment --comment ns1/svc1:p80 -m tcp -p tcp --dport 3001 -j KUBE-EXT-XPGD46QRK7WJZT7O + -A KUBE-SERVICES -m comment --comment "ns1/svc1:p80 cluster IP" -m tcp -p tcp -d 172.30.0.41 --dport 80 -j KUBE-SVC-XPGD46QRK7WJZT7O + -A KUBE-SERVICES -m comment --comment "ns1/svc1:p80 external IP" -m tcp -p tcp -d 192.168.99.22 --dport 80 -j KUBE-EXT-XPGD46QRK7WJZT7O + -A KUBE-SERVICES -m comment --comment "ns1/svc1:p80 loadbalancer IP" -m tcp -p tcp -d 1.2.3.4 --dport 80 -j KUBE-FW-XPGD46QRK7WJZT7O + -A KUBE-SERVICES -m comment --comment "kubernetes service nodeports; NOTE: this must be the last rule in this chain" -m addrtype --dst-type LOCAL -j KUBE-NODEPORTS + -A KUBE-EXT-XPGD46QRK7WJZT7O -m comment --comment "masquerade traffic for ns1/svc1:p80 external destinations" -j KUBE-MARK-MASQ + -A KUBE-EXT-XPGD46QRK7WJZT7O -j KUBE-SVC-XPGD46QRK7WJZT7O + -A KUBE-FW-XPGD46QRK7WJZT7O -m comment --comment "ns1/svc1:p80 loadbalancer IP" -s 203.0.113.0/25 -j KUBE-EXT-XPGD46QRK7WJZT7O + -A KUBE-FW-XPGD46QRK7WJZT7O -m comment --comment "other traffic to ns1/svc1:p80 will be dropped by KUBE-PROXY-FIREWALL" + -A KUBE-MARK-MASQ -j MARK --or-mark 0x4000 + -A KUBE-POSTROUTING -m mark ! --mark 0x4000/0x4000 -j RETURN + -A KUBE-POSTROUTING -j MARK --xor-mark 0x4000 + -A KUBE-POSTROUTING -m comment --comment "kubernetes service traffic requiring SNAT" -j MASQUERADE + -A KUBE-SEP-SXIVWICOYRO3J4NJ -m comment --comment ns1/svc1:p80 -s 10.180.0.1 -j KUBE-MARK-MASQ + -A KUBE-SEP-SXIVWICOYRO3J4NJ -m comment --comment ns1/svc1:p80 -m tcp -p tcp -j DNAT --to-destination 10.180.0.1:80 + -A KUBE-SVC-XPGD46QRK7WJZT7O -m comment --comment "ns1/svc1:p80 cluster IP" -m tcp -p tcp -d 172.30.0.41 --dport 80 ! -s 10.0.0.0/8 -j KUBE-MARK-MASQ + -A KUBE-SVC-XPGD46QRK7WJZT7O -m comment --comment "ns1/svc1:p80 -> 10.180.0.1:80" -j KUBE-SEP-SXIVWICOYRO3J4NJ + COMMIT + `) + + assertIPTablesRulesEqual(t, getLine(), true, expected, fp.iptablesData.String()) +} diff --git a/pkg/proxy/nftables/proxier_test.go b/pkg/proxy/nftables/proxier_test.go index b22ac0023b1..937fcaaf1f0 100644 --- a/pkg/proxy/nftables/proxier_test.go +++ b/pkg/proxy/nftables/proxier_test.go @@ -4835,3 +4835,70 @@ func TestProxier_OnServiceCIDRsChanged(t *testing.T) { proxier.OnServiceCIDRsChanged([]string{"172.30.0.0/16", "172.50.0.0/16", "fd00:10:96::/112", "fd00:172:30::/112"}) assert.Equal(t, proxier.serviceCIDRs, "fd00:10:96::/112,fd00:172:30::/112") } + +// TestBadIPs tests that "bad" IPs and CIDRs in Services/Endpoints are rewritten to +// be "good" in the input provided to nft +func TestBadIPs(t *testing.T) { + nft, fp := NewFakeProxier(v1.IPv4Protocol) + metrics.RegisterMetrics(kubeproxyconfig.ProxyModeNFTables) + + makeServiceMap(fp, + makeTestService("ns1", "svc1", func(svc *v1.Service) { + svc.Spec.Type = "LoadBalancer" + svc.Spec.ClusterIP = "172.30.0.041" + svc.Spec.Ports = []v1.ServicePort{{ + Name: "p80", + Port: 80, + Protocol: v1.ProtocolTCP, + NodePort: 3001, + }} + svc.Status.LoadBalancer.Ingress = []v1.LoadBalancerIngress{{ + IP: "1.2.3.004", + }} + svc.Spec.ExternalIPs = []string{"192.168.099.022"} + svc.Spec.LoadBalancerSourceRanges = []string{"203.0.113.000/025"} + }), + ) + populateEndpointSlices(fp, + makeTestEndpointSlice("ns1", "svc1", 1, func(eps *discovery.EndpointSlice) { + eps.AddressType = discovery.AddressTypeIPv4 + eps.Endpoints = []discovery.Endpoint{{ + Addresses: []string{"10.180.00.001"}, + }} + eps.Ports = []discovery.EndpointPort{{ + Name: ptr.To("p80"), + Port: ptr.To[int32](80), + Protocol: ptr.To(v1.ProtocolTCP), + }} + }), + ) + + fp.syncProxyRules() + + expected := baseRules + dedent.Dedent(` + # svc1 + add chain ip kube-proxy service-ULMVA6XW-ns1/svc1/tcp/p80 + add rule ip kube-proxy service-ULMVA6XW-ns1/svc1/tcp/p80 ip daddr 172.30.0.41 tcp dport 80 ip saddr != 10.0.0.0/8 jump mark-for-masquerade + add rule ip kube-proxy service-ULMVA6XW-ns1/svc1/tcp/p80 numgen random mod 1 vmap { 0 : goto endpoint-5OJB2KTY-ns1/svc1/tcp/p80__10.180.0.1/80 } + + add chain ip kube-proxy external-ULMVA6XW-ns1/svc1/tcp/p80 + add rule ip kube-proxy external-ULMVA6XW-ns1/svc1/tcp/p80 jump mark-for-masquerade + add rule ip kube-proxy external-ULMVA6XW-ns1/svc1/tcp/p80 goto service-ULMVA6XW-ns1/svc1/tcp/p80 + + add chain ip kube-proxy endpoint-5OJB2KTY-ns1/svc1/tcp/p80__10.180.0.1/80 + add rule ip kube-proxy endpoint-5OJB2KTY-ns1/svc1/tcp/p80__10.180.0.1/80 ip saddr 10.180.0.1 jump mark-for-masquerade + add rule ip kube-proxy endpoint-5OJB2KTY-ns1/svc1/tcp/p80__10.180.0.1/80 meta l4proto tcp dnat to 10.180.0.1:80 + + add chain ip kube-proxy firewall-ULMVA6XW-ns1/svc1/tcp/p80 + add rule ip kube-proxy firewall-ULMVA6XW-ns1/svc1/tcp/p80 ip saddr != { 203.0.113.0/25 } drop + + add element ip kube-proxy cluster-ips { 172.30.0.41 } + add element ip kube-proxy service-ips { 172.30.0.41 . tcp . 80 : goto service-ULMVA6XW-ns1/svc1/tcp/p80 } + add element ip kube-proxy service-ips { 192.168.99.22 . tcp . 80 : goto external-ULMVA6XW-ns1/svc1/tcp/p80 } + add element ip kube-proxy service-ips { 1.2.3.4 . tcp . 80 : goto external-ULMVA6XW-ns1/svc1/tcp/p80 } + add element ip kube-proxy service-nodeports { tcp . 3001 : goto external-ULMVA6XW-ns1/svc1/tcp/p80 } + add element ip kube-proxy firewall-ips { 1.2.3.4 . tcp . 80 : goto firewall-ULMVA6XW-ns1/svc1/tcp/p80 } + `) + + assertNFTablesTransactionEqual(t, getLine(), expected, nft.Dump()) +}