From 791573ddb60121a51079ce4b6419726fc053ddf7 Mon Sep 17 00:00:00 2001 From: Antonio Ojea Date: Thu, 2 Mar 2023 22:18:33 +0000 Subject: [PATCH 1/2] promote ProxyTerminatingEndpoints to GA Change-Id: Ife524c831d905acbc606aa7631e1194f91199938 --- pkg/features/kube_features.go | 3 +- pkg/proxy/iptables/proxier_test.go | 519 +---------------------------- pkg/proxy/ipvs/proxier_test.go | 179 ---------- pkg/proxy/topology.go | 4 +- pkg/proxy/topology_test.go | 39 +-- pkg/proxy/winkernel/proxier.go | 4 +- test/e2e/network/service.go | 10 +- 7 files changed, 20 insertions(+), 738 deletions(-) diff --git a/pkg/features/kube_features.go b/pkg/features/kube_features.go index 53822723f2b..b955a0aad5d 100644 --- a/pkg/features/kube_features.go +++ b/pkg/features/kube_features.go @@ -653,6 +653,7 @@ const ( // kep: https://kep.k8s.io/1669 // alpha: v1.22 // beta: v1.26 + // GA: v1.28 // // Enable kube-proxy to handle terminating ednpoints when externalTrafficPolicy=Local ProxyTerminatingEndpoints featuregate.Feature = "ProxyTerminatingEndpoints" @@ -1035,7 +1036,7 @@ var defaultKubernetesFeatureGates = map[featuregate.Feature]featuregate.FeatureS ProcMountType: {Default: false, PreRelease: featuregate.Alpha}, - ProxyTerminatingEndpoints: {Default: true, PreRelease: featuregate.Beta}, + ProxyTerminatingEndpoints: {Default: true, PreRelease: featuregate.GA, LockToDefault: true}, // remove in 1.30 QOSReserved: {Default: false, PreRelease: featuregate.Alpha}, diff --git a/pkg/proxy/iptables/proxier_test.go b/pkg/proxy/iptables/proxier_test.go index 0e5e7f8b0d9..2d7f58f2e7f 100644 --- a/pkg/proxy/iptables/proxier_test.go +++ b/pkg/proxy/iptables/proxier_test.go @@ -5833,7 +5833,7 @@ func TestEndpointSliceWithTerminatingEndpointsTrafficPolicyLocal(t *testing.T) { flowTests []packetFlowTest }{ { - name: "feature gate ProxyTerminatingEndpoints enabled, ready endpoints exist", + name: "ready endpoints exist", line: getLine(), terminatingFeatureGate: true, endpointslice: &discovery.EndpointSlice{ @@ -5974,148 +5974,7 @@ func TestEndpointSliceWithTerminatingEndpointsTrafficPolicyLocal(t *testing.T) { }, }, { - name: "feature gate ProxyTerminatingEndpoints disabled, ready endpoints exist", - line: getLine(), - terminatingFeatureGate: false, - endpointslice: &discovery.EndpointSlice{ - ObjectMeta: metav1.ObjectMeta{ - Name: fmt.Sprintf("%s-1", "svc1"), - Namespace: "ns1", - Labels: map[string]string{discovery.LabelServiceName: "svc1"}, - }, - Ports: []discovery.EndpointPort{{ - Name: pointer.String(""), - Port: pointer.Int32(80), - Protocol: &tcpProtocol, - }}, - AddressType: discovery.AddressTypeIPv4, - Endpoints: []discovery.Endpoint{ - { - Addresses: []string{"10.0.1.1"}, - Conditions: discovery.EndpointConditions{ - Ready: pointer.Bool(true), - Serving: pointer.Bool(true), - Terminating: pointer.Bool(false), - }, - NodeName: pointer.String(testHostname), - }, - { - Addresses: []string{"10.0.1.2"}, - Conditions: discovery.EndpointConditions{ - Ready: pointer.Bool(true), - Serving: pointer.Bool(true), - Terminating: pointer.Bool(false), - }, - NodeName: pointer.String(testHostname), - }, - { - // this endpoint should be ignored since it is not ready and the feature gate is off - Addresses: []string{"10.0.1.3"}, - Conditions: discovery.EndpointConditions{ - Ready: pointer.Bool(false), - Serving: pointer.Bool(true), - Terminating: pointer.Bool(true), - }, - NodeName: pointer.String(testHostname), - }, - { - // this endpoint should be ignored since it is not ready and the feature gate is off - Addresses: []string{"10.0.1.4"}, - Conditions: discovery.EndpointConditions{ - Ready: pointer.Bool(false), - Serving: pointer.Bool(false), - Terminating: pointer.Bool(true), - }, - NodeName: pointer.String(testHostname), - }, - { - // this endpoint should be ignored for external since it's not local - Addresses: []string{"10.0.1.5"}, - Conditions: discovery.EndpointConditions{ - Ready: pointer.Bool(true), - Serving: pointer.Bool(true), - Terminating: pointer.Bool(false), - }, - NodeName: pointer.String("host-1"), - }, - }, - }, - expectedIPTables: dedent.Dedent(` - *filter - :KUBE-NODEPORTS - [0:0] - :KUBE-SERVICES - [0:0] - :KUBE-EXTERNAL-SERVICES - [0:0] - :KUBE-FIREWALL - [0:0] - :KUBE-FORWARD - [0:0] - :KUBE-PROXY-FIREWALL - [0:0] - -A KUBE-NODEPORTS -m comment --comment "ns1/svc1 health check node port" -m tcp -p tcp --dport 30000 -j ACCEPT - -A KUBE-FIREWALL -m comment --comment "block incoming localnet connections" -d 127.0.0.0/8 ! -s 127.0.0.0/8 -m conntrack ! --ctstate RELATED,ESTABLISHED,DNAT -j DROP - -A KUBE-FORWARD -m conntrack --ctstate INVALID -j DROP - -A KUBE-FORWARD -m comment --comment "kubernetes forwarding rules" -m mark --mark 0x4000/0x4000 -j ACCEPT - -A KUBE-FORWARD -m comment --comment "kubernetes forwarding conntrack rule" -m conntrack --ctstate RELATED,ESTABLISHED -j ACCEPT - COMMIT - *nat - :KUBE-NODEPORTS - [0:0] - :KUBE-SERVICES - [0:0] - :KUBE-EXT-AQI2S6QIMU7PVVRP - [0:0] - :KUBE-MARK-MASQ - [0:0] - :KUBE-POSTROUTING - [0:0] - :KUBE-SEP-3JOIVZTXZZRGORX4 - [0:0] - :KUBE-SEP-EQCHZ7S2PJ72OHAY - [0:0] - :KUBE-SEP-IO5XOSKPAXIFQXAJ - [0:0] - :KUBE-SVC-AQI2S6QIMU7PVVRP - [0:0] - :KUBE-SVL-AQI2S6QIMU7PVVRP - [0:0] - -A KUBE-SERVICES -m comment --comment "ns1/svc1 cluster IP" -m tcp -p tcp -d 172.30.1.1 --dport 80 -j KUBE-SVC-AQI2S6QIMU7PVVRP - -A KUBE-SERVICES -m comment --comment "ns1/svc1 loadbalancer IP" -m tcp -p tcp -d 1.2.3.4 --dport 80 -j KUBE-EXT-AQI2S6QIMU7PVVRP - -A KUBE-SERVICES -m comment --comment "kubernetes service nodeports; NOTE: this must be the last rule in this chain" -m addrtype --dst-type LOCAL -j KUBE-NODEPORTS - -A KUBE-EXT-AQI2S6QIMU7PVVRP -m comment --comment "pod traffic for ns1/svc1 external destinations" -s 10.0.0.0/8 -j KUBE-SVC-AQI2S6QIMU7PVVRP - -A KUBE-EXT-AQI2S6QIMU7PVVRP -m comment --comment "masquerade LOCAL traffic for ns1/svc1 external destinations" -m addrtype --src-type LOCAL -j KUBE-MARK-MASQ - -A KUBE-EXT-AQI2S6QIMU7PVVRP -m comment --comment "route LOCAL traffic for ns1/svc1 external destinations" -m addrtype --src-type LOCAL -j KUBE-SVC-AQI2S6QIMU7PVVRP - -A KUBE-EXT-AQI2S6QIMU7PVVRP -j KUBE-SVL-AQI2S6QIMU7PVVRP - -A KUBE-MARK-MASQ -j MARK --or-mark 0x4000 - -A KUBE-POSTROUTING -m mark ! --mark 0x4000/0x4000 -j RETURN - -A KUBE-POSTROUTING -j MARK --xor-mark 0x4000 - -A KUBE-POSTROUTING -m comment --comment "kubernetes service traffic requiring SNAT" -j MASQUERADE - -A KUBE-SEP-3JOIVZTXZZRGORX4 -m comment --comment ns1/svc1 -s 10.0.1.1 -j KUBE-MARK-MASQ - -A KUBE-SEP-3JOIVZTXZZRGORX4 -m comment --comment ns1/svc1 -m recent --name KUBE-SEP-3JOIVZTXZZRGORX4 --set -m tcp -p tcp -j DNAT --to-destination 10.0.1.1:80 - -A KUBE-SEP-EQCHZ7S2PJ72OHAY -m comment --comment ns1/svc1 -s 10.0.1.5 -j KUBE-MARK-MASQ - -A KUBE-SEP-EQCHZ7S2PJ72OHAY -m comment --comment ns1/svc1 -m recent --name KUBE-SEP-EQCHZ7S2PJ72OHAY --set -m tcp -p tcp -j DNAT --to-destination 10.0.1.5:80 - -A KUBE-SEP-IO5XOSKPAXIFQXAJ -m comment --comment ns1/svc1 -s 10.0.1.2 -j KUBE-MARK-MASQ - -A KUBE-SEP-IO5XOSKPAXIFQXAJ -m comment --comment ns1/svc1 -m recent --name KUBE-SEP-IO5XOSKPAXIFQXAJ --set -m tcp -p tcp -j DNAT --to-destination 10.0.1.2:80 - -A KUBE-SVC-AQI2S6QIMU7PVVRP -m comment --comment "ns1/svc1 cluster IP" -m tcp -p tcp -d 172.30.1.1 --dport 80 ! -s 10.0.0.0/8 -j KUBE-MARK-MASQ - -A KUBE-SVC-AQI2S6QIMU7PVVRP -m comment --comment "ns1/svc1 -> 10.0.1.1:80" -m recent --name KUBE-SEP-3JOIVZTXZZRGORX4 --rcheck --seconds 10800 --reap -j KUBE-SEP-3JOIVZTXZZRGORX4 - -A KUBE-SVC-AQI2S6QIMU7PVVRP -m comment --comment "ns1/svc1 -> 10.0.1.2:80" -m recent --name KUBE-SEP-IO5XOSKPAXIFQXAJ --rcheck --seconds 10800 --reap -j KUBE-SEP-IO5XOSKPAXIFQXAJ - -A KUBE-SVC-AQI2S6QIMU7PVVRP -m comment --comment "ns1/svc1 -> 10.0.1.5:80" -m recent --name KUBE-SEP-EQCHZ7S2PJ72OHAY --rcheck --seconds 10800 --reap -j KUBE-SEP-EQCHZ7S2PJ72OHAY - -A KUBE-SVC-AQI2S6QIMU7PVVRP -m comment --comment "ns1/svc1 -> 10.0.1.1:80" -m statistic --mode random --probability 0.3333333333 -j KUBE-SEP-3JOIVZTXZZRGORX4 - -A KUBE-SVC-AQI2S6QIMU7PVVRP -m comment --comment "ns1/svc1 -> 10.0.1.2:80" -m statistic --mode random --probability 0.5000000000 -j KUBE-SEP-IO5XOSKPAXIFQXAJ - -A KUBE-SVC-AQI2S6QIMU7PVVRP -m comment --comment "ns1/svc1 -> 10.0.1.5:80" -j KUBE-SEP-EQCHZ7S2PJ72OHAY - -A KUBE-SVL-AQI2S6QIMU7PVVRP -m comment --comment "ns1/svc1 -> 10.0.1.1:80" -m recent --name KUBE-SEP-3JOIVZTXZZRGORX4 --rcheck --seconds 10800 --reap -j KUBE-SEP-3JOIVZTXZZRGORX4 - -A KUBE-SVL-AQI2S6QIMU7PVVRP -m comment --comment "ns1/svc1 -> 10.0.1.2:80" -m recent --name KUBE-SEP-IO5XOSKPAXIFQXAJ --rcheck --seconds 10800 --reap -j KUBE-SEP-IO5XOSKPAXIFQXAJ - -A KUBE-SVL-AQI2S6QIMU7PVVRP -m comment --comment "ns1/svc1 -> 10.0.1.1:80" -m statistic --mode random --probability 0.5000000000 -j KUBE-SEP-3JOIVZTXZZRGORX4 - -A KUBE-SVL-AQI2S6QIMU7PVVRP -m comment --comment "ns1/svc1 -> 10.0.1.2:80" -j KUBE-SEP-IO5XOSKPAXIFQXAJ - COMMIT - `), - flowTests: []packetFlowTest{ - { - name: "pod to clusterIP", - sourceIP: "10.0.0.2", - destIP: "172.30.1.1", - destPort: 80, - output: "10.0.1.1:80, 10.0.1.2:80, 10.0.1.5:80", - masq: false, - }, - { - name: "external to LB", - sourceIP: testExternalClient, - destIP: "1.2.3.4", - destPort: 80, - output: "10.0.1.1:80, 10.0.1.2:80", - masq: false, - }, - }, - }, - { - name: "feature gate ProxyTerminatingEndpoints enabled, only terminating endpoints exist", + name: "only terminating endpoints exist", line: getLine(), terminatingFeatureGate: true, endpointslice: &discovery.EndpointSlice{ @@ -6244,129 +6103,7 @@ func TestEndpointSliceWithTerminatingEndpointsTrafficPolicyLocal(t *testing.T) { }, }, { - name: "with ProxyTerminatingEndpoints disabled, only non-local and terminating endpoints exist", - line: getLine(), - terminatingFeatureGate: false, - endpointslice: &discovery.EndpointSlice{ - ObjectMeta: metav1.ObjectMeta{ - Name: fmt.Sprintf("%s-1", "svc1"), - Namespace: "ns1", - Labels: map[string]string{discovery.LabelServiceName: "svc1"}, - }, - Ports: []discovery.EndpointPort{{ - Name: pointer.String(""), - Port: pointer.Int32(80), - Protocol: &tcpProtocol, - }}, - AddressType: discovery.AddressTypeIPv4, - Endpoints: []discovery.Endpoint{ - { - Addresses: []string{"10.0.1.1"}, - Conditions: discovery.EndpointConditions{ - Ready: pointer.Bool(false), - Serving: pointer.Bool(true), - Terminating: pointer.Bool(true), - }, - NodeName: pointer.String(testHostname), - }, - { - Addresses: []string{"10.0.1.2"}, - Conditions: discovery.EndpointConditions{ - Ready: pointer.Bool(false), - Serving: pointer.Bool(true), - Terminating: pointer.Bool(true), - }, - NodeName: pointer.String(testHostname), - }, - { - Addresses: []string{"10.0.1.3"}, - Conditions: discovery.EndpointConditions{ - Ready: pointer.Bool(false), - Serving: pointer.Bool(false), - Terminating: pointer.Bool(true), - }, - NodeName: pointer.String(testHostname), - }, - { - Addresses: []string{"10.0.1.4"}, - Conditions: discovery.EndpointConditions{ - Ready: pointer.Bool(false), - Serving: pointer.Bool(false), - Terminating: pointer.Bool(true), - }, - NodeName: pointer.String(testHostname), - }, - { - Addresses: []string{"10.0.1.5"}, - Conditions: discovery.EndpointConditions{ - Ready: pointer.Bool(true), - Serving: pointer.Bool(true), - Terminating: pointer.Bool(false), - }, - NodeName: pointer.String("host-1"), - }, - }, - }, - expectedIPTables: dedent.Dedent(` - *filter - :KUBE-NODEPORTS - [0:0] - :KUBE-SERVICES - [0:0] - :KUBE-EXTERNAL-SERVICES - [0:0] - :KUBE-FIREWALL - [0:0] - :KUBE-FORWARD - [0:0] - :KUBE-PROXY-FIREWALL - [0:0] - -A KUBE-NODEPORTS -m comment --comment "ns1/svc1 health check node port" -m tcp -p tcp --dport 30000 -j ACCEPT - -A KUBE-EXTERNAL-SERVICES -m comment --comment "ns1/svc1 has no local endpoints" -m tcp -p tcp -d 1.2.3.4 --dport 80 -j DROP - -A KUBE-FIREWALL -m comment --comment "block incoming localnet connections" -d 127.0.0.0/8 ! -s 127.0.0.0/8 -m conntrack ! --ctstate RELATED,ESTABLISHED,DNAT -j DROP - -A KUBE-FORWARD -m conntrack --ctstate INVALID -j DROP - -A KUBE-FORWARD -m comment --comment "kubernetes forwarding rules" -m mark --mark 0x4000/0x4000 -j ACCEPT - -A KUBE-FORWARD -m comment --comment "kubernetes forwarding conntrack rule" -m conntrack --ctstate RELATED,ESTABLISHED -j ACCEPT - COMMIT - *nat - :KUBE-NODEPORTS - [0:0] - :KUBE-SERVICES - [0:0] - :KUBE-EXT-AQI2S6QIMU7PVVRP - [0:0] - :KUBE-MARK-MASQ - [0:0] - :KUBE-POSTROUTING - [0:0] - :KUBE-SEP-EQCHZ7S2PJ72OHAY - [0:0] - :KUBE-SVC-AQI2S6QIMU7PVVRP - [0:0] - -A KUBE-SERVICES -m comment --comment "ns1/svc1 cluster IP" -m tcp -p tcp -d 172.30.1.1 --dport 80 -j KUBE-SVC-AQI2S6QIMU7PVVRP - -A KUBE-SERVICES -m comment --comment "ns1/svc1 loadbalancer IP" -m tcp -p tcp -d 1.2.3.4 --dport 80 -j KUBE-EXT-AQI2S6QIMU7PVVRP - -A KUBE-SERVICES -m comment --comment "kubernetes service nodeports; NOTE: this must be the last rule in this chain" -m addrtype --dst-type LOCAL -j KUBE-NODEPORTS - -A KUBE-EXT-AQI2S6QIMU7PVVRP -m comment --comment "pod traffic for ns1/svc1 external destinations" -s 10.0.0.0/8 -j KUBE-SVC-AQI2S6QIMU7PVVRP - -A KUBE-EXT-AQI2S6QIMU7PVVRP -m comment --comment "masquerade LOCAL traffic for ns1/svc1 external destinations" -m addrtype --src-type LOCAL -j KUBE-MARK-MASQ - -A KUBE-EXT-AQI2S6QIMU7PVVRP -m comment --comment "route LOCAL traffic for ns1/svc1 external destinations" -m addrtype --src-type LOCAL -j KUBE-SVC-AQI2S6QIMU7PVVRP - -A KUBE-MARK-MASQ -j MARK --or-mark 0x4000 - -A KUBE-POSTROUTING -m mark ! --mark 0x4000/0x4000 -j RETURN - -A KUBE-POSTROUTING -j MARK --xor-mark 0x4000 - -A KUBE-POSTROUTING -m comment --comment "kubernetes service traffic requiring SNAT" -j MASQUERADE - -A KUBE-SEP-EQCHZ7S2PJ72OHAY -m comment --comment ns1/svc1 -s 10.0.1.5 -j KUBE-MARK-MASQ - -A KUBE-SEP-EQCHZ7S2PJ72OHAY -m comment --comment ns1/svc1 -m recent --name KUBE-SEP-EQCHZ7S2PJ72OHAY --set -m tcp -p tcp -j DNAT --to-destination 10.0.1.5:80 - -A KUBE-SVC-AQI2S6QIMU7PVVRP -m comment --comment "ns1/svc1 cluster IP" -m tcp -p tcp -d 172.30.1.1 --dport 80 ! -s 10.0.0.0/8 -j KUBE-MARK-MASQ - -A KUBE-SVC-AQI2S6QIMU7PVVRP -m comment --comment "ns1/svc1 -> 10.0.1.5:80" -m recent --name KUBE-SEP-EQCHZ7S2PJ72OHAY --rcheck --seconds 10800 --reap -j KUBE-SEP-EQCHZ7S2PJ72OHAY - -A KUBE-SVC-AQI2S6QIMU7PVVRP -m comment --comment "ns1/svc1 -> 10.0.1.5:80" -j KUBE-SEP-EQCHZ7S2PJ72OHAY - COMMIT - `), - flowTests: []packetFlowTest{ - { - name: "pod to clusterIP", - sourceIP: "10.0.0.2", - destIP: "172.30.1.1", - destPort: 80, - output: "10.0.1.5:80", - masq: false, - }, - { - name: "external to LB", - sourceIP: testExternalClient, - destIP: "1.2.3.4", - destPort: 80, - output: "DROP", - }, - }, - }, - { - name: "ProxyTerminatingEndpoints enabled, terminating endpoints on remote node", + name: "terminating endpoints on remote node", line: getLine(), terminatingFeatureGate: true, endpointslice: &discovery.EndpointSlice{ @@ -6541,8 +6278,6 @@ func TestEndpointSliceWithTerminatingEndpointsTrafficPolicyLocal(t *testing.T) { for _, testcase := range testcases { t.Run(testcase.name, func(t *testing.T) { - defer featuregatetesting.SetFeatureGateDuringTest(t, utilfeature.DefaultFeatureGate, features.ProxyTerminatingEndpoints, testcase.terminatingFeatureGate)() - ipt := iptablestest.NewFake() fp := NewFakeProxier(ipt) fp.OnServiceSynced() @@ -6630,7 +6365,7 @@ func TestEndpointSliceWithTerminatingEndpointsTrafficPolicyCluster(t *testing.T) flowTests []packetFlowTest }{ { - name: "feature gate ProxyTerminatingEndpoints enabled, ready endpoints exist", + name: "ready endpoints exist", line: getLine(), terminatingFeatureGate: true, endpointslice: &discovery.EndpointSlice{ @@ -6762,139 +6497,7 @@ func TestEndpointSliceWithTerminatingEndpointsTrafficPolicyCluster(t *testing.T) }, }, { - name: "feature gate ProxyTerminatingEndpoints disabled, ready endpoints exist", - line: getLine(), - terminatingFeatureGate: false, - endpointslice: &discovery.EndpointSlice{ - ObjectMeta: metav1.ObjectMeta{ - Name: fmt.Sprintf("%s-1", "svc1"), - Namespace: "ns1", - Labels: map[string]string{discovery.LabelServiceName: "svc1"}, - }, - Ports: []discovery.EndpointPort{{ - Name: pointer.String(""), - Port: pointer.Int32(80), - Protocol: &tcpProtocol, - }}, - AddressType: discovery.AddressTypeIPv4, - Endpoints: []discovery.Endpoint{ - { - Addresses: []string{"10.0.1.1"}, - Conditions: discovery.EndpointConditions{ - Ready: pointer.Bool(true), - Serving: pointer.Bool(true), - Terminating: pointer.Bool(false), - }, - NodeName: pointer.String(testHostname), - }, - { - Addresses: []string{"10.0.1.2"}, - Conditions: discovery.EndpointConditions{ - Ready: pointer.Bool(true), - Serving: pointer.Bool(true), - Terminating: pointer.Bool(false), - }, - NodeName: pointer.String(testHostname), - }, - { - // always ignored since feature gate is disabled - Addresses: []string{"10.0.1.3"}, - Conditions: discovery.EndpointConditions{ - Ready: pointer.Bool(false), - Serving: pointer.Bool(true), - Terminating: pointer.Bool(true), - }, - NodeName: pointer.String("another-host"), - }, - { - // always ignored since serving=false - Addresses: []string{"10.0.1.4"}, - Conditions: discovery.EndpointConditions{ - Ready: pointer.Bool(false), - Serving: pointer.Bool(false), - Terminating: pointer.Bool(true), - }, - NodeName: pointer.String("another-host"), - }, - { - Addresses: []string{"10.0.1.5"}, - Conditions: discovery.EndpointConditions{ - Ready: pointer.Bool(true), - Serving: pointer.Bool(true), - Terminating: pointer.Bool(false), - }, - NodeName: pointer.String("another-host"), - }, - }, - }, - expectedIPTables: dedent.Dedent(` - *filter - :KUBE-NODEPORTS - [0:0] - :KUBE-SERVICES - [0:0] - :KUBE-EXTERNAL-SERVICES - [0:0] - :KUBE-FIREWALL - [0:0] - :KUBE-FORWARD - [0:0] - :KUBE-PROXY-FIREWALL - [0:0] - -A KUBE-FIREWALL -m comment --comment "block incoming localnet connections" -d 127.0.0.0/8 ! -s 127.0.0.0/8 -m conntrack ! --ctstate RELATED,ESTABLISHED,DNAT -j DROP - -A KUBE-FORWARD -m conntrack --ctstate INVALID -j DROP - -A KUBE-FORWARD -m comment --comment "kubernetes forwarding rules" -m mark --mark 0x4000/0x4000 -j ACCEPT - -A KUBE-FORWARD -m comment --comment "kubernetes forwarding conntrack rule" -m conntrack --ctstate RELATED,ESTABLISHED -j ACCEPT - COMMIT - *nat - :KUBE-NODEPORTS - [0:0] - :KUBE-SERVICES - [0:0] - :KUBE-EXT-AQI2S6QIMU7PVVRP - [0:0] - :KUBE-MARK-MASQ - [0:0] - :KUBE-POSTROUTING - [0:0] - :KUBE-SEP-3JOIVZTXZZRGORX4 - [0:0] - :KUBE-SEP-EQCHZ7S2PJ72OHAY - [0:0] - :KUBE-SEP-IO5XOSKPAXIFQXAJ - [0:0] - :KUBE-SVC-AQI2S6QIMU7PVVRP - [0:0] - -A KUBE-SERVICES -m comment --comment "ns1/svc1 cluster IP" -m tcp -p tcp -d 172.30.1.1 --dport 80 -j KUBE-SVC-AQI2S6QIMU7PVVRP - -A KUBE-SERVICES -m comment --comment "ns1/svc1 loadbalancer IP" -m tcp -p tcp -d 1.2.3.4 --dport 80 -j KUBE-EXT-AQI2S6QIMU7PVVRP - -A KUBE-SERVICES -m comment --comment "kubernetes service nodeports; NOTE: this must be the last rule in this chain" -m addrtype --dst-type LOCAL -j KUBE-NODEPORTS - -A KUBE-EXT-AQI2S6QIMU7PVVRP -m comment --comment "masquerade traffic for ns1/svc1 external destinations" -j KUBE-MARK-MASQ - -A KUBE-EXT-AQI2S6QIMU7PVVRP -j KUBE-SVC-AQI2S6QIMU7PVVRP - -A KUBE-MARK-MASQ -j MARK --or-mark 0x4000 - -A KUBE-POSTROUTING -m mark ! --mark 0x4000/0x4000 -j RETURN - -A KUBE-POSTROUTING -j MARK --xor-mark 0x4000 - -A KUBE-POSTROUTING -m comment --comment "kubernetes service traffic requiring SNAT" -j MASQUERADE - -A KUBE-SEP-3JOIVZTXZZRGORX4 -m comment --comment ns1/svc1 -s 10.0.1.1 -j KUBE-MARK-MASQ - -A KUBE-SEP-3JOIVZTXZZRGORX4 -m comment --comment ns1/svc1 -m recent --name KUBE-SEP-3JOIVZTXZZRGORX4 --set -m tcp -p tcp -j DNAT --to-destination 10.0.1.1:80 - -A KUBE-SEP-EQCHZ7S2PJ72OHAY -m comment --comment ns1/svc1 -s 10.0.1.5 -j KUBE-MARK-MASQ - -A KUBE-SEP-EQCHZ7S2PJ72OHAY -m comment --comment ns1/svc1 -m recent --name KUBE-SEP-EQCHZ7S2PJ72OHAY --set -m tcp -p tcp -j DNAT --to-destination 10.0.1.5:80 - -A KUBE-SEP-IO5XOSKPAXIFQXAJ -m comment --comment ns1/svc1 -s 10.0.1.2 -j KUBE-MARK-MASQ - -A KUBE-SEP-IO5XOSKPAXIFQXAJ -m comment --comment ns1/svc1 -m recent --name KUBE-SEP-IO5XOSKPAXIFQXAJ --set -m tcp -p tcp -j DNAT --to-destination 10.0.1.2:80 - -A KUBE-SVC-AQI2S6QIMU7PVVRP -m comment --comment "ns1/svc1 cluster IP" -m tcp -p tcp -d 172.30.1.1 --dport 80 ! -s 10.0.0.0/8 -j KUBE-MARK-MASQ - -A KUBE-SVC-AQI2S6QIMU7PVVRP -m comment --comment "ns1/svc1 -> 10.0.1.1:80" -m recent --name KUBE-SEP-3JOIVZTXZZRGORX4 --rcheck --seconds 10800 --reap -j KUBE-SEP-3JOIVZTXZZRGORX4 - -A KUBE-SVC-AQI2S6QIMU7PVVRP -m comment --comment "ns1/svc1 -> 10.0.1.2:80" -m recent --name KUBE-SEP-IO5XOSKPAXIFQXAJ --rcheck --seconds 10800 --reap -j KUBE-SEP-IO5XOSKPAXIFQXAJ - -A KUBE-SVC-AQI2S6QIMU7PVVRP -m comment --comment "ns1/svc1 -> 10.0.1.5:80" -m recent --name KUBE-SEP-EQCHZ7S2PJ72OHAY --rcheck --seconds 10800 --reap -j KUBE-SEP-EQCHZ7S2PJ72OHAY - -A KUBE-SVC-AQI2S6QIMU7PVVRP -m comment --comment "ns1/svc1 -> 10.0.1.1:80" -m statistic --mode random --probability 0.3333333333 -j KUBE-SEP-3JOIVZTXZZRGORX4 - -A KUBE-SVC-AQI2S6QIMU7PVVRP -m comment --comment "ns1/svc1 -> 10.0.1.2:80" -m statistic --mode random --probability 0.5000000000 -j KUBE-SEP-IO5XOSKPAXIFQXAJ - -A KUBE-SVC-AQI2S6QIMU7PVVRP -m comment --comment "ns1/svc1 -> 10.0.1.5:80" -j KUBE-SEP-EQCHZ7S2PJ72OHAY - COMMIT - `), - flowTests: []packetFlowTest{ - { - name: "pod to clusterIP", - sourceIP: "10.0.0.2", - destIP: "172.30.1.1", - destPort: 80, - output: "10.0.1.1:80, 10.0.1.2:80, 10.0.1.5:80", - masq: false, - }, - { - name: "external to LB", - sourceIP: testExternalClient, - destIP: "1.2.3.4", - destPort: 80, - output: "10.0.1.1:80, 10.0.1.2:80, 10.0.1.5:80", - masq: true, - }, - }, - }, - { - name: "feature gate ProxyTerminatingEndpoints enabled, only terminating endpoints exist", + name: "only terminating endpoints exist", line: getLine(), terminatingFeatureGate: true, endpointslice: &discovery.EndpointSlice{ @@ -7019,116 +6622,7 @@ func TestEndpointSliceWithTerminatingEndpointsTrafficPolicyCluster(t *testing.T) }, }, { - name: "with ProxyTerminatingEndpoints disabled, only terminating endpoints exist", - line: getLine(), - terminatingFeatureGate: false, - endpointslice: &discovery.EndpointSlice{ - ObjectMeta: metav1.ObjectMeta{ - Name: fmt.Sprintf("%s-1", "svc1"), - Namespace: "ns1", - Labels: map[string]string{discovery.LabelServiceName: "svc1"}, - }, - Ports: []discovery.EndpointPort{{ - Name: pointer.String(""), - Port: pointer.Int32(80), - Protocol: &tcpProtocol, - }}, - AddressType: discovery.AddressTypeIPv4, - Endpoints: []discovery.Endpoint{ - { - Addresses: []string{"10.0.1.1"}, - Conditions: discovery.EndpointConditions{ - Ready: pointer.Bool(false), - Serving: pointer.Bool(true), - Terminating: pointer.Bool(true), - }, - NodeName: pointer.String(testHostname), - }, - { - Addresses: []string{"10.0.1.2"}, - Conditions: discovery.EndpointConditions{ - Ready: pointer.Bool(false), - Serving: pointer.Bool(true), - Terminating: pointer.Bool(true), - }, - NodeName: pointer.String(testHostname), - }, - { - Addresses: []string{"10.0.1.3"}, - Conditions: discovery.EndpointConditions{ - Ready: pointer.Bool(false), - Serving: pointer.Bool(false), - Terminating: pointer.Bool(true), - }, - NodeName: pointer.String("another-host"), - }, - { - Addresses: []string{"10.0.1.4"}, - Conditions: discovery.EndpointConditions{ - Ready: pointer.Bool(false), - Serving: pointer.Bool(false), - Terminating: pointer.Bool(true), - }, - NodeName: pointer.String("another-host"), - }, - { - Addresses: []string{"10.0.1.5"}, - Conditions: discovery.EndpointConditions{ - Ready: pointer.Bool(false), - Serving: pointer.Bool(true), - Terminating: pointer.Bool(true), - }, - NodeName: pointer.String("another-host"), - }, - }, - }, - noUsableEndpoints: true, - expectedIPTables: dedent.Dedent(` - *filter - :KUBE-NODEPORTS - [0:0] - :KUBE-SERVICES - [0:0] - :KUBE-EXTERNAL-SERVICES - [0:0] - :KUBE-FIREWALL - [0:0] - :KUBE-FORWARD - [0:0] - :KUBE-PROXY-FIREWALL - [0:0] - -A KUBE-SERVICES -m comment --comment "ns1/svc1 has no endpoints" -m tcp -p tcp -d 172.30.1.1 --dport 80 -j REJECT - -A KUBE-EXTERNAL-SERVICES -m comment --comment "ns1/svc1 has no endpoints" -m tcp -p tcp -d 1.2.3.4 --dport 80 -j REJECT - -A KUBE-FIREWALL -m comment --comment "block incoming localnet connections" -d 127.0.0.0/8 ! -s 127.0.0.0/8 -m conntrack ! --ctstate RELATED,ESTABLISHED,DNAT -j DROP - -A KUBE-FORWARD -m conntrack --ctstate INVALID -j DROP - -A KUBE-FORWARD -m comment --comment "kubernetes forwarding rules" -m mark --mark 0x4000/0x4000 -j ACCEPT - -A KUBE-FORWARD -m comment --comment "kubernetes forwarding conntrack rule" -m conntrack --ctstate RELATED,ESTABLISHED -j ACCEPT - COMMIT - *nat - :KUBE-NODEPORTS - [0:0] - :KUBE-SERVICES - [0:0] - :KUBE-MARK-MASQ - [0:0] - :KUBE-POSTROUTING - [0:0] - -A KUBE-SERVICES -m comment --comment "kubernetes service nodeports; NOTE: this must be the last rule in this chain" -m addrtype --dst-type LOCAL -j KUBE-NODEPORTS - -A KUBE-MARK-MASQ -j MARK --or-mark 0x4000 - -A KUBE-POSTROUTING -m mark ! --mark 0x4000/0x4000 -j RETURN - -A KUBE-POSTROUTING -j MARK --xor-mark 0x4000 - -A KUBE-POSTROUTING -m comment --comment "kubernetes service traffic requiring SNAT" -j MASQUERADE - COMMIT - `), - flowTests: []packetFlowTest{ - { - name: "pod to clusterIP", - sourceIP: "10.0.0.2", - destIP: "172.30.1.1", - destPort: 80, - output: "REJECT", - }, - { - name: "external to LB", - sourceIP: testExternalClient, - destIP: "1.2.3.4", - destPort: 80, - output: "REJECT", - }, - }, - }, - { - name: "ProxyTerminatingEndpoints enabled, terminating endpoints on remote node", + name: "terminating endpoints on remote node", line: getLine(), terminatingFeatureGate: true, endpointslice: &discovery.EndpointSlice{ @@ -7299,7 +6793,6 @@ func TestEndpointSliceWithTerminatingEndpointsTrafficPolicyCluster(t *testing.T) for _, testcase := range testcases { t.Run(testcase.name, func(t *testing.T) { - defer featuregatetesting.SetFeatureGateDuringTest(t, utilfeature.DefaultFeatureGate, features.ProxyTerminatingEndpoints, testcase.terminatingFeatureGate)() ipt := iptablestest.NewFake() fp := NewFakeProxier(ipt) diff --git a/pkg/proxy/ipvs/proxier_test.go b/pkg/proxy/ipvs/proxier_test.go index 7899d7412da..ca348c75c68 100644 --- a/pkg/proxy/ipvs/proxier_test.go +++ b/pkg/proxy/ipvs/proxier_test.go @@ -36,10 +36,7 @@ import ( "k8s.io/apimachinery/pkg/types" "k8s.io/apimachinery/pkg/util/intstr" "k8s.io/apimachinery/pkg/util/sets" - utilfeature "k8s.io/apiserver/pkg/util/feature" - featuregatetesting "k8s.io/component-base/featuregate/testing" "k8s.io/component-base/metrics/testutil" - "k8s.io/kubernetes/pkg/features" "k8s.io/kubernetes/pkg/proxy" "k8s.io/kubernetes/pkg/proxy/healthcheck" netlinktest "k8s.io/kubernetes/pkg/proxy/ipvs/testing" @@ -4848,7 +4845,6 @@ func TestTestInternalTrafficPolicyE2E(t *testing.T) { // Test_EndpointSliceReadyAndTerminatingCluster tests that when there are ready and ready + terminating // endpoints and the traffic policy is "Cluster", only the ready endpoints are used. func Test_EndpointSliceReadyAndTerminatingCluster(t *testing.T) { - defer featuregatetesting.SetFeatureGateDuringTest(t, utilfeature.DefaultFeatureGate, features.ProxyTerminatingEndpoints, true)() ipt := iptablestest.NewFake() ipvs := ipvstest.NewFake() @@ -5025,7 +5021,6 @@ func Test_EndpointSliceReadyAndTerminatingCluster(t *testing.T) { // Test_EndpointSliceReadyAndTerminatingLocal tests that when there are local ready and ready + terminating // endpoints, only the ready endpoints are used. func Test_EndpointSliceReadyAndTerminatingLocal(t *testing.T) { - defer featuregatetesting.SetFeatureGateDuringTest(t, utilfeature.DefaultFeatureGate, features.ProxyTerminatingEndpoints, true)() ipt := iptablestest.NewFake() ipvs := ipvstest.NewFake() @@ -5201,7 +5196,6 @@ func Test_EndpointSliceReadyAndTerminatingLocal(t *testing.T) { // Test_EndpointSliceOnlyReadyTerminatingCluster tests that when there are only ready terminating // endpoints and the traffic policy is "Cluster", we fall back to terminating endpoints. func Test_EndpointSliceOnlyReadyAndTerminatingCluster(t *testing.T) { - defer featuregatetesting.SetFeatureGateDuringTest(t, utilfeature.DefaultFeatureGate, features.ProxyTerminatingEndpoints, true)() ipt := iptablestest.NewFake() ipvs := ipvstest.NewFake() @@ -5377,7 +5371,6 @@ func Test_EndpointSliceOnlyReadyAndTerminatingCluster(t *testing.T) { // Test_EndpointSliceOnlyReadyTerminatingLocal tests that when there are only local ready terminating // endpoints, we fall back to those endpoints. func Test_EndpointSliceOnlyReadyAndTerminatingLocal(t *testing.T) { - defer featuregatetesting.SetFeatureGateDuringTest(t, utilfeature.DefaultFeatureGate, features.ProxyTerminatingEndpoints, true)() ipt := iptablestest.NewFake() ipvs := ipvstest.NewFake() @@ -5547,178 +5540,6 @@ func Test_EndpointSliceOnlyReadyAndTerminatingLocal(t *testing.T) { assert.Len(t, realServers2, 0, "Expected 0 real servers") } -// Test_EndpointSliceOnlyReadyTerminatingLocalWithFeatureGateDisabled tests that when there are only local ready terminating -// endpoints, we fall back to those endpoints. -func Test_EndpointSliceOnlyReadyAndTerminatingLocalWithFeatureGateDisabled(t *testing.T) { - defer featuregatetesting.SetFeatureGateDuringTest(t, utilfeature.DefaultFeatureGate, features.ProxyTerminatingEndpoints, false)() - - ipt := iptablestest.NewFake() - ipvs := ipvstest.NewFake() - ipset := ipsettest.NewFake(testIPSetVersion) - fp := NewFakeProxier(ipt, ipvs, ipset, nil, nil, v1.IPv4Protocol) - fp.servicesSynced = true - // fp.endpointsSynced = true - fp.endpointSlicesSynced = true - - clusterInternalTrafficPolicy := v1.ServiceInternalTrafficPolicyCluster - - // Add initial service - serviceName := "svc1" - namespaceName := "ns1" - fp.OnServiceAdd(&v1.Service{ - ObjectMeta: metav1.ObjectMeta{Name: serviceName, Namespace: namespaceName}, - Spec: v1.ServiceSpec{ - ClusterIP: "172.20.1.1", - Selector: map[string]string{"foo": "bar"}, - Type: v1.ServiceTypeNodePort, - ExternalTrafficPolicy: v1.ServiceExternalTrafficPolicyLocal, - InternalTrafficPolicy: &clusterInternalTrafficPolicy, - ExternalIPs: []string{ - "1.2.3.4", - }, - Ports: []v1.ServicePort{ - { - Name: "", - Port: 80, - TargetPort: intstr.FromInt32(80), - Protocol: v1.ProtocolTCP, - }, - }, - }, - }) - - // Add initial endpoint slice - tcpProtocol := v1.ProtocolTCP - endpointSlice := &discovery.EndpointSlice{ - ObjectMeta: metav1.ObjectMeta{ - Name: fmt.Sprintf("%s-1", serviceName), - Namespace: namespaceName, - Labels: map[string]string{discovery.LabelServiceName: serviceName}, - }, - Ports: []discovery.EndpointPort{{ - Name: pointer.String(""), - Port: pointer.Int32(80), - Protocol: &tcpProtocol, - }}, - AddressType: discovery.AddressTypeIPv4, - Endpoints: []discovery.Endpoint{ - { - Addresses: []string{"10.0.1.1"}, - Conditions: discovery.EndpointConditions{ - Ready: pointer.Bool(false), - Serving: pointer.Bool(true), - Terminating: pointer.Bool(true), - }, - NodeName: pointer.String(testHostname), - }, - { - Addresses: []string{"10.0.1.2"}, - Conditions: discovery.EndpointConditions{ - Ready: pointer.Bool(false), - Serving: pointer.Bool(true), - Terminating: pointer.Bool(true), - }, - NodeName: pointer.String(testHostname), - }, - { - Addresses: []string{"10.0.1.3"}, - Conditions: discovery.EndpointConditions{ - Ready: pointer.Bool(false), - Serving: pointer.Bool(false), - Terminating: pointer.Bool(true), - }, - NodeName: pointer.String(testHostname), - }, - { - Addresses: []string{"10.0.1.4"}, - Conditions: discovery.EndpointConditions{ - Ready: pointer.Bool(false), - Serving: pointer.Bool(true), - Terminating: pointer.Bool(true), - }, - NodeName: pointer.String("another-host"), - }, - { - Addresses: []string{"10.0.1.5"}, - Conditions: discovery.EndpointConditions{ - Ready: pointer.Bool(true), - Serving: pointer.Bool(true), - Terminating: pointer.Bool(false), - }, - NodeName: pointer.String("another-host"), - }, - }, - } - - fp.OnEndpointSliceAdd(endpointSlice) - fp.syncProxyRules() - - // Ensure that Proxier updates ipvs appropriately after EndpointSlice update - assert.NotNil(t, fp.ipsetList["KUBE-LOOP-BACK"]) - activeEntries1 := fp.ipsetList["KUBE-LOOP-BACK"].activeEntries - assert.Equal(t, 3, activeEntries1.Len(), "Expected 3 active entry in KUBE-LOOP-BACK") - assert.Equal(t, true, activeEntries1.Has("10.0.1.1,tcp:80,10.0.1.1"), "Expected activeEntries to reference first (local) pod") - assert.Equal(t, true, activeEntries1.Has("10.0.1.2,tcp:80,10.0.1.2"), "Expected activeEntries to reference second (local) pod") - assert.Equal(t, true, activeEntries1.Has("10.0.1.3,tcp:80,10.0.1.3"), "Expected activeEntries to reference second (local) pod") - - virtualServers, vsErr := ipvs.GetVirtualServers() - assert.Nil(t, vsErr, "Expected no error getting virtual servers") - assert.Len(t, virtualServers, 2, "Expected 2 virtual server") - - var clusterIPServer, externalIPServer *utilipvs.VirtualServer - for _, virtualServer := range virtualServers { - if virtualServer.Address.String() == "172.20.1.1" { - clusterIPServer = virtualServer - } - - if virtualServer.Address.String() == "1.2.3.4" { - externalIPServer = virtualServer - } - } - - // clusterIP should route to cluster-wide Ready endpoints - realServers1, rsErr1 := ipvs.GetRealServers(clusterIPServer) - assert.Nil(t, rsErr1, "Expected no error getting real servers") - assert.Len(t, realServers1, 1, "Expected 1 real servers") - assert.Equal(t, realServers1[0].String(), "10.0.1.5:80") - - // externalIP should have 1 (remote) endpoint since the feature gate is disabled. - realServers2, rsErr2 := ipvs.GetRealServers(externalIPServer) - assert.Nil(t, rsErr2, "Expected no error getting real servers") - assert.Len(t, realServers2, 1, "Expected 0 real servers") - assert.Equal(t, realServers2[0].String(), "10.0.1.5:80") - - fp.OnEndpointSliceDelete(endpointSlice) - fp.syncProxyRules() - - // Ensure that Proxier updates ipvs appropriately after EndpointSlice delete - assert.NotNil(t, fp.ipsetList["KUBE-LOOP-BACK"]) - activeEntries2 := fp.ipsetList["KUBE-LOOP-BACK"].activeEntries - assert.Equal(t, 0, activeEntries2.Len(), "Expected 0 active entries in KUBE-LOOP-BACK") - - virtualServers, vsErr = ipvs.GetVirtualServers() - assert.Nil(t, vsErr, "Expected no error getting virtual servers") - assert.Len(t, virtualServers, 2, "Expected 2 virtual server") - - for _, virtualServer := range virtualServers { - if virtualServer.Address.String() == "172.20.1.1" { - clusterIPServer = virtualServer - } - - if virtualServer.Address.String() == "1.2.3.4" { - externalIPServer = virtualServer - } - } - - realServers1, rsErr1 = ipvs.GetRealServers(clusterIPServer) - assert.Nil(t, rsErr1, "Expected no error getting real servers") - assert.Len(t, realServers1, 0, "Expected 0 real servers") - - realServers2, rsErr2 = ipvs.GetRealServers(externalIPServer) - assert.Nil(t, rsErr2, "Expected no error getting real servers") - assert.Len(t, realServers2, 0, "Expected 0 real servers") -} - func TestIpIsValidForSet(t *testing.T) { testCases := []struct { isIPv6 bool diff --git a/pkg/proxy/topology.go b/pkg/proxy/topology.go index e8248a93a7e..52f539b659f 100644 --- a/pkg/proxy/topology.go +++ b/pkg/proxy/topology.go @@ -55,7 +55,7 @@ func CategorizeEndpoints(endpoints []Endpoint, svcInfo ServicePort, nodeLabels m // if there are 0 cluster-wide endpoints, we can try to fallback to any terminating endpoints that are ready. // When falling back to terminating endpoints, we do NOT consider topology aware routing since this is a best // effort attempt to avoid dropping connections. - if len(clusterEndpoints) == 0 && utilfeature.DefaultFeatureGate.Enabled(features.ProxyTerminatingEndpoints) { + if len(clusterEndpoints) == 0 { clusterEndpoints = filterEndpoints(endpoints, func(ep Endpoint) bool { if ep.IsServing() && ep.IsTerminating() { return true @@ -87,7 +87,7 @@ func CategorizeEndpoints(endpoints []Endpoint, svcInfo ServicePort, nodeLabels m if ep.GetIsLocal() { hasLocalReadyEndpoints = true } - } else if ep.IsServing() && ep.IsTerminating() && utilfeature.DefaultFeatureGate.Enabled(features.ProxyTerminatingEndpoints) { + } else if ep.IsServing() && ep.IsTerminating() { hasAnyEndpoints = true if ep.GetIsLocal() { hasLocalServingTerminatingEndpoints = true diff --git a/pkg/proxy/topology_test.go b/pkg/proxy/topology_test.go index ad375c28e15..41fbcfb775c 100644 --- a/pkg/proxy/topology_test.go +++ b/pkg/proxy/topology_test.go @@ -359,7 +359,7 @@ func TestCategorizeEndpoints(t *testing.T) { clusterEndpoints: sets.NewString("10.0.0.1:80"), localEndpoints: nil, }, { - name: "Cluster traffic policy, PTE enabled, all endpoints are terminating", + name: "Cluster traffic policy, all endpoints are terminating", pteEnabled: true, serviceInfo: &BaseServicePortInfo{}, endpoints: []Endpoint{ @@ -368,16 +368,6 @@ func TestCategorizeEndpoints(t *testing.T) { }, clusterEndpoints: sets.NewString("10.0.0.0:80", "10.0.0.1:80"), localEndpoints: nil, - }, { - name: "Cluster traffic policy, PTE disabled, all endpoints are terminating", - pteEnabled: false, - serviceInfo: &BaseServicePortInfo{}, - endpoints: []Endpoint{ - &BaseEndpointInfo{Endpoint: "10.0.0.0:80", Ready: false, Serving: true, Terminating: true, IsLocal: true}, - &BaseEndpointInfo{Endpoint: "10.0.0.1:80", Ready: false, Serving: true, Terminating: true, IsLocal: false}, - }, - clusterEndpoints: sets.NewString(), - localEndpoints: nil, }, { name: "iTP: Local, eTP: Cluster, some endpoints local", serviceInfo: &BaseServicePortInfo{internalPolicyLocal: true, externalPolicyLocal: false, nodePort: 8080}, @@ -419,17 +409,7 @@ func TestCategorizeEndpoints(t *testing.T) { localEndpoints: sets.NewString(), allEndpoints: sets.NewString("10.0.0.0:80", "10.0.0.1:80"), }, { - name: "iTP: Local, eTP: Local, PTE disabled, all endpoints remote and terminating", - serviceInfo: &BaseServicePortInfo{internalPolicyLocal: true, externalPolicyLocal: true, nodePort: 8080}, - endpoints: []Endpoint{ - &BaseEndpointInfo{Endpoint: "10.0.0.0:80", Ready: false, Serving: true, Terminating: true, IsLocal: false}, - &BaseEndpointInfo{Endpoint: "10.0.0.1:80", Ready: false, Serving: true, Terminating: true, IsLocal: false}, - }, - clusterEndpoints: sets.NewString(), - localEndpoints: sets.NewString(), - allEndpoints: sets.NewString(), - }, { - name: "iTP: Local, eTP: Local, PTE enabled, all endpoints remote and terminating", + name: "iTP: Local, eTP: Local, all endpoints remote and terminating", pteEnabled: true, serviceInfo: &BaseServicePortInfo{internalPolicyLocal: true, externalPolicyLocal: true, nodePort: 8080}, endpoints: []Endpoint{ @@ -441,19 +421,7 @@ func TestCategorizeEndpoints(t *testing.T) { allEndpoints: sets.NewString("10.0.0.0:80", "10.0.0.1:80"), onlyRemoteEndpoints: true, }, { - name: "iTP: Cluster, eTP: Local, PTE disabled, with terminating endpoints", - serviceInfo: &BaseServicePortInfo{internalPolicyLocal: false, externalPolicyLocal: true, nodePort: 8080}, - endpoints: []Endpoint{ - &BaseEndpointInfo{Endpoint: "10.0.0.0:80", Ready: true, IsLocal: false}, - &BaseEndpointInfo{Endpoint: "10.0.0.1:80", Ready: false, Serving: false, IsLocal: true}, - &BaseEndpointInfo{Endpoint: "10.0.0.2:80", Ready: false, Serving: true, Terminating: true, IsLocal: true}, - &BaseEndpointInfo{Endpoint: "10.0.0.3:80", Ready: false, Serving: true, Terminating: true, IsLocal: false}, - }, - clusterEndpoints: sets.NewString("10.0.0.0:80"), - localEndpoints: sets.NewString(), - allEndpoints: sets.NewString("10.0.0.0:80"), - }, { - name: "iTP: Cluster, eTP: Local, PTE enabled, with terminating endpoints", + name: "iTP: Cluster, eTP: Local, with terminating endpoints", pteEnabled: true, serviceInfo: &BaseServicePortInfo{internalPolicyLocal: false, externalPolicyLocal: true, nodePort: 8080}, endpoints: []Endpoint{ @@ -490,7 +458,6 @@ func TestCategorizeEndpoints(t *testing.T) { for _, tc := range testCases { t.Run(tc.name, func(t *testing.T) { defer featuregatetesting.SetFeatureGateDuringTest(t, utilfeature.DefaultFeatureGate, features.TopologyAwareHints, tc.hintsEnabled)() - defer featuregatetesting.SetFeatureGateDuringTest(t, utilfeature.DefaultFeatureGate, features.ProxyTerminatingEndpoints, tc.pteEnabled)() clusterEndpoints, localEndpoints, allEndpoints, hasAnyEndpoints := CategorizeEndpoints(tc.endpoints, tc.serviceInfo, tc.nodeLabels) diff --git a/pkg/proxy/winkernel/proxier.go b/pkg/proxy/winkernel/proxier.go index e281796f223..063bc8ba48b 100644 --- a/pkg/proxy/winkernel/proxier.go +++ b/pkg/proxy/winkernel/proxier.go @@ -1255,7 +1255,7 @@ func (proxier *Proxier) syncProxyRules() { var allEndpointsTerminating, allEndpointsNonServing bool someEndpointsServing := true - if utilfeature.DefaultFeatureGate.Enabled(kubefeatures.ProxyTerminatingEndpoints) && len(svcInfo.loadBalancerIngressIPs) > 0 { + if len(svcInfo.loadBalancerIngressIPs) > 0 { // Check should be done only if comes under the feature gate or enabled // The check should be done only if Spec.Type == Loadbalancer. allEndpointsTerminating = proxier.isAllEndpointsTerminating(svcName, svcInfo.localTrafficDSR) @@ -1263,7 +1263,7 @@ func (proxier *Proxier) syncProxyRules() { someEndpointsServing = !allEndpointsNonServing klog.V(4).InfoS("Terminating status checked for all endpoints", "svcClusterIP", svcInfo.ClusterIP(), "allEndpointsTerminating", allEndpointsTerminating, "allEndpointsNonServing", allEndpointsNonServing, "localTrafficDSR", svcInfo.localTrafficDSR) } else { - klog.V(4).InfoS("Skipped terminating status check for all endpoints", "svcClusterIP", svcInfo.ClusterIP(), "proxyEndpointsFeatureGateEnabled", utilfeature.DefaultFeatureGate.Enabled(kubefeatures.ProxyTerminatingEndpoints), "ingressLBCount", len(svcInfo.loadBalancerIngressIPs)) + klog.V(4).InfoS("Skipped terminating status check for all endpoints", "svcClusterIP", svcInfo.ClusterIP(), "ingressLBCount", len(svcInfo.loadBalancerIngressIPs)) } for _, epInfo := range proxier.endpointsMap[svcName] { diff --git a/test/e2e/network/service.go b/test/e2e/network/service.go index 83d51f452ff..7808c58d3bd 100644 --- a/test/e2e/network/service.go +++ b/test/e2e/network/service.go @@ -2705,7 +2705,7 @@ var _ = common.SIGDescribe("Services", func() { } }) - ginkgo.It("should fail health check node port if there are only terminating endpoints [Feature:ProxyTerminatingEndpoints]", func(ctx context.Context) { + ginkgo.It("should fail health check node port if there are only terminating endpoints", func(ctx context.Context) { // windows kube-proxy does not support this feature yet e2eskipper.SkipIfNodeOSDistroIs("windows") @@ -2794,7 +2794,7 @@ var _ = common.SIGDescribe("Services", func() { execHostnameTest(*pausePod0, nodePortAddress, webserverPod0.Name) }) - ginkgo.It("should fallback to terminating endpoints when there are no ready endpoints with internalTrafficPolicy=Cluster [Feature:ProxyTerminatingEndpoints]", func(ctx context.Context) { + ginkgo.It("should fallback to terminating endpoints when there are no ready endpoints with internalTrafficPolicy=Cluster", func(ctx context.Context) { // windows kube-proxy does not support this feature yet e2eskipper.SkipIfNodeOSDistroIs("windows") @@ -2867,7 +2867,7 @@ var _ = common.SIGDescribe("Services", func() { } }) - ginkgo.It("should fallback to local terminating endpoints when there are no ready endpoints with internalTrafficPolicy=Local [Feature:ProxyTerminatingEndpoints]", func(ctx context.Context) { + ginkgo.It("should fallback to local terminating endpoints when there are no ready endpoints with internalTrafficPolicy=Local", func(ctx context.Context) { // windows kube-proxy does not support this feature yet e2eskipper.SkipIfNodeOSDistroIs("windows") @@ -2945,7 +2945,7 @@ var _ = common.SIGDescribe("Services", func() { } }) - ginkgo.It("should fallback to terminating endpoints when there are no ready endpoints with externallTrafficPolicy=Cluster [Feature:ProxyTerminatingEndpoints]", func(ctx context.Context) { + ginkgo.It("should fallback to terminating endpoints when there are no ready endpoints with externallTrafficPolicy=Cluster", func(ctx context.Context) { // windows kube-proxy does not support this feature yet e2eskipper.SkipIfNodeOSDistroIs("windows") @@ -3020,7 +3020,7 @@ var _ = common.SIGDescribe("Services", func() { } }) - ginkgo.It("should fallback to local terminating endpoints when there are no ready endpoints with externalTrafficPolicy=Local [Feature:ProxyTerminatingEndpoints]", func(ctx context.Context) { + ginkgo.It("should fallback to local terminating endpoints when there are no ready endpoints with externalTrafficPolicy=Local", func(ctx context.Context) { // windows kube-proxy does not support this feature yet e2eskipper.SkipIfNodeOSDistroIs("windows") From b849ff57b850329331c439c67da61e24591bb7ad Mon Sep 17 00:00:00 2001 From: Antonio Ojea Date: Thu, 4 May 2023 13:04:52 +0000 Subject: [PATCH 2/2] e2e reasonable grace termination period timeout The existing termination period of 600 seconds for pods on the e2e test causes that those pods are kept running after the test has finished. 100 seconds is a good compromise to avoid leaving pods lingering and more than enought for the test to finish. Change-Id: I993162a77125345df1829044dc2514e03b13a407 --- test/e2e/network/service.go | 34 +++++++++++++++++----------------- 1 file changed, 17 insertions(+), 17 deletions(-) diff --git a/test/e2e/network/service.go b/test/e2e/network/service.go index 7808c58d3bd..84c8bad8356 100644 --- a/test/e2e/network/service.go +++ b/test/e2e/network/service.go @@ -1765,7 +1765,7 @@ var _ = common.SIGDescribe("Services", func() { t.Name = "slow-terminating-unready-pod" t.Image = imageutils.GetE2EImage(imageutils.Agnhost) port := 80 - terminateSeconds := int64(600) + terminateSeconds := int64(100) service := &v1.Service{ ObjectMeta: metav1.ObjectMeta{ @@ -1987,7 +1987,7 @@ var _ = common.SIGDescribe("Services", func() { } // webserver should continue to serve traffic through the Service after delete since: - // - it has a 600s termination grace period + // - it has a 100s termination grace period // - it is unready but PublishNotReadyAddresses is true err = cs.CoreV1().Pods(ns).Delete(ctx, webserverPod0.Name, metav1.DeleteOptions{}) framework.ExpectNoError(err) @@ -2106,7 +2106,7 @@ var _ = common.SIGDescribe("Services", func() { } // webserver should stop to serve traffic through the Service after delete since: - // - it has a 600s termination grace period + // - it has a 100s termination grace period // - it is unready but PublishNotReadyAddresses is false err = cs.CoreV1().Pods(ns).Delete(ctx, webserverPod0.Name, metav1.DeleteOptions{}) framework.ExpectNoError(err) @@ -2733,9 +2733,9 @@ var _ = common.SIGDescribe("Services", func() { framework.ExpectNoError(err) ginkgo.By("Creating 1 webserver pod to be part of the TCP service") - webserverPod0 := e2epod.NewAgnhostPod(ns, "echo-hostname-0", nil, nil, nil, "netexec", "--http-port", strconv.Itoa(servicePort), "--delay-shutdown", "600") + webserverPod0 := e2epod.NewAgnhostPod(ns, "echo-hostname-0", nil, nil, nil, "netexec", "--http-port", strconv.Itoa(servicePort), "--delay-shutdown", "100") webserverPod0.Labels = jig.Labels - webserverPod0.Spec.TerminationGracePeriodSeconds = utilpointer.Int64(600) + webserverPod0.Spec.TerminationGracePeriodSeconds = utilpointer.Int64(100) e2epod.SetNodeSelection(&webserverPod0.Spec, e2epod.NodeSelection{Name: node0.Name}) _, err = cs.CoreV1().Pods(ns).Create(ctx, webserverPod0, metav1.CreateOptions{}) @@ -2821,9 +2821,9 @@ var _ = common.SIGDescribe("Services", func() { framework.ExpectNoError(err) ginkgo.By("Creating 1 webserver pod to be part of the TCP service") - webserverPod0 := e2epod.NewAgnhostPod(ns, "echo-hostname-0", nil, nil, nil, "netexec", "--http-port", strconv.Itoa(servicePort), "--delay-shutdown", "600") + webserverPod0 := e2epod.NewAgnhostPod(ns, "echo-hostname-0", nil, nil, nil, "netexec", "--http-port", strconv.Itoa(servicePort), "--delay-shutdown", "100") webserverPod0.Labels = jig.Labels - webserverPod0.Spec.TerminationGracePeriodSeconds = utilpointer.Int64(600) + webserverPod0.Spec.TerminationGracePeriodSeconds = utilpointer.Int64(100) e2epod.SetNodeSelection(&webserverPod0.Spec, e2epod.NodeSelection{Name: node0.Name}) _, err = cs.CoreV1().Pods(ns).Create(ctx, webserverPod0, metav1.CreateOptions{}) @@ -2847,7 +2847,7 @@ var _ = common.SIGDescribe("Services", func() { framework.ExpectNoError(e2epod.WaitTimeoutForPodReadyInNamespace(ctx, f.ClientSet, pausePod1.Name, f.Namespace.Name, framework.PodStartTimeout)) // webserver should continue to serve traffic through the Service after delete since: - // - it has a 600s termination grace period + // - it has a 100s termination grace period // - it is the only ready endpoint err = cs.CoreV1().Pods(ns).Delete(ctx, webserverPod0.Name, metav1.DeleteOptions{}) framework.ExpectNoError(err) @@ -2896,9 +2896,9 @@ var _ = common.SIGDescribe("Services", func() { framework.ExpectNoError(err) ginkgo.By("Creating 1 webserver pod to be part of the TCP service") - webserverPod0 := e2epod.NewAgnhostPod(ns, "echo-hostname-0", nil, nil, nil, "netexec", "--http-port", strconv.Itoa(servicePort), "--delay-shutdown", "600") + webserverPod0 := e2epod.NewAgnhostPod(ns, "echo-hostname-0", nil, nil, nil, "netexec", "--http-port", strconv.Itoa(servicePort), "--delay-shutdown", "100") webserverPod0.Labels = jig.Labels - webserverPod0.Spec.TerminationGracePeriodSeconds = utilpointer.Int64(600) + webserverPod0.Spec.TerminationGracePeriodSeconds = utilpointer.Int64(100) e2epod.SetNodeSelection(&webserverPod0.Spec, e2epod.NodeSelection{Name: node0.Name}) _, err = cs.CoreV1().Pods(ns).Create(ctx, webserverPod0, metav1.CreateOptions{}) @@ -2922,7 +2922,7 @@ var _ = common.SIGDescribe("Services", func() { framework.ExpectNoError(e2epod.WaitTimeoutForPodReadyInNamespace(ctx, f.ClientSet, pausePod1.Name, f.Namespace.Name, framework.PodStartTimeout)) // webserver should continue to serve traffic through the Service after delete since: - // - it has a 600s termination grace period + // - it has a 100s termination grace period // - it is the only ready endpoint err = cs.CoreV1().Pods(ns).Delete(ctx, webserverPod0.Name, metav1.DeleteOptions{}) framework.ExpectNoError(err) @@ -2973,9 +2973,9 @@ var _ = common.SIGDescribe("Services", func() { framework.ExpectNoError(err) ginkgo.By("Creating 1 webserver pod to be part of the TCP service") - webserverPod0 := e2epod.NewAgnhostPod(ns, "echo-hostname-0", nil, nil, nil, "netexec", "--http-port", strconv.Itoa(servicePort), "--delay-shutdown", "600") + webserverPod0 := e2epod.NewAgnhostPod(ns, "echo-hostname-0", nil, nil, nil, "netexec", "--http-port", strconv.Itoa(servicePort), "--delay-shutdown", "100") webserverPod0.Labels = jig.Labels - webserverPod0.Spec.TerminationGracePeriodSeconds = utilpointer.Int64(600) + webserverPod0.Spec.TerminationGracePeriodSeconds = utilpointer.Int64(100) e2epod.SetNodeSelection(&webserverPod0.Spec, e2epod.NodeSelection{Name: node0.Name}) _, err = cs.CoreV1().Pods(ns).Create(ctx, webserverPod0, metav1.CreateOptions{}) @@ -2999,7 +2999,7 @@ var _ = common.SIGDescribe("Services", func() { framework.ExpectNoError(e2epod.WaitTimeoutForPodReadyInNamespace(ctx, f.ClientSet, pausePod1.Name, f.Namespace.Name, framework.PodStartTimeout)) // webserver should continue to serve traffic through the Service after delete since: - // - it has a 600s termination grace period + // - it has a 100s termination grace period // - it is the only ready endpoint err = cs.CoreV1().Pods(ns).Delete(ctx, webserverPod0.Name, metav1.DeleteOptions{}) framework.ExpectNoError(err) @@ -3049,9 +3049,9 @@ var _ = common.SIGDescribe("Services", func() { framework.ExpectNoError(err) ginkgo.By("Creating 1 webserver pod to be part of the TCP service") - webserverPod0 := e2epod.NewAgnhostPod(ns, "echo-hostname-0", nil, nil, nil, "netexec", "--http-port", strconv.Itoa(servicePort), "--delay-shutdown", "600") + webserverPod0 := e2epod.NewAgnhostPod(ns, "echo-hostname-0", nil, nil, nil, "netexec", "--http-port", strconv.Itoa(servicePort), "--delay-shutdown", "100") webserverPod0.Labels = jig.Labels - webserverPod0.Spec.TerminationGracePeriodSeconds = utilpointer.Int64(600) + webserverPod0.Spec.TerminationGracePeriodSeconds = utilpointer.Int64(100) e2epod.SetNodeSelection(&webserverPod0.Spec, e2epod.NodeSelection{Name: node0.Name}) _, err = cs.CoreV1().Pods(ns).Create(ctx, webserverPod0, metav1.CreateOptions{}) @@ -3075,7 +3075,7 @@ var _ = common.SIGDescribe("Services", func() { framework.ExpectNoError(e2epod.WaitTimeoutForPodReadyInNamespace(ctx, f.ClientSet, pausePod1.Name, f.Namespace.Name, framework.PodStartTimeout)) // webserver should continue to serve traffic through the Service after delete since: - // - it has a 600s termination grace period + // - it has a 100s termination grace period // - it is the only ready endpoint err = cs.CoreV1().Pods(ns).Delete(ctx, webserverPod0.Name, metav1.DeleteOptions{}) framework.ExpectNoError(err)