Belatedly clean up some "Endpoints" vs "EndpointSlice" distinctions in the unit tests

Remove "EndpointSlice" from some unit test names, because they don't
need to clarify that they use EndpointSlices now, because all of the
tests use EndpointSlices now.

Likewise, remove TestEndpointSliceE2E entirely; it was originally an
EndpointSlice version of one of the other tests, but the other test
uses EndpointSlices now too.
This commit is contained in:
Dan Winship 2023-06-26 07:23:44 -04:00
parent da2d500c80
commit 4835d9e137

View File

@ -5080,109 +5080,6 @@ func TestUpdateEndpointsMap(t *testing.T) {
} }
} }
// The majority of EndpointSlice specific tests are not iptables specific and focus on
// the shared EndpointChangeTracker and EndpointSliceCache. This test ensures that the
// iptables proxier supports translating EndpointSlices to iptables output.
func TestEndpointSliceE2E(t *testing.T) {
expectedIPTablesWithSlice := dedent.Dedent(`
*filter
:KUBE-NODEPORTS - [0:0]
:KUBE-SERVICES - [0:0]
:KUBE-EXTERNAL-SERVICES - [0:0]
:KUBE-FIREWALL - [0:0]
:KUBE-FORWARD - [0:0]
:KUBE-PROXY-FIREWALL - [0:0]
-A KUBE-FIREWALL -m comment --comment "block incoming localnet connections" -d 127.0.0.0/8 ! -s 127.0.0.0/8 -m conntrack ! --ctstate RELATED,ESTABLISHED,DNAT -j DROP
-A KUBE-FORWARD -m conntrack --ctstate INVALID -j DROP
-A KUBE-FORWARD -m comment --comment "kubernetes forwarding rules" -m mark --mark 0x4000/0x4000 -j ACCEPT
-A KUBE-FORWARD -m comment --comment "kubernetes forwarding conntrack rule" -m conntrack --ctstate RELATED,ESTABLISHED -j ACCEPT
COMMIT
*nat
:KUBE-NODEPORTS - [0:0]
:KUBE-SERVICES - [0:0]
:KUBE-MARK-MASQ - [0:0]
:KUBE-POSTROUTING - [0:0]
:KUBE-SEP-3JOIVZTXZZRGORX4 - [0:0]
:KUBE-SEP-IO5XOSKPAXIFQXAJ - [0:0]
:KUBE-SEP-XGJFVO3L2O5SRFNT - [0:0]
:KUBE-SVC-AQI2S6QIMU7PVVRP - [0:0]
-A KUBE-SERVICES -m comment --comment "ns1/svc1 cluster IP" -m tcp -p tcp -d 172.30.1.1 --dport 0 -j KUBE-SVC-AQI2S6QIMU7PVVRP
-A KUBE-SERVICES -m comment --comment "kubernetes service nodeports; NOTE: this must be the last rule in this chain" -m addrtype --dst-type LOCAL -j KUBE-NODEPORTS
-A KUBE-MARK-MASQ -j MARK --or-mark 0x4000
-A KUBE-POSTROUTING -m mark ! --mark 0x4000/0x4000 -j RETURN
-A KUBE-POSTROUTING -j MARK --xor-mark 0x4000
-A KUBE-POSTROUTING -m comment --comment "kubernetes service traffic requiring SNAT" -j MASQUERADE
-A KUBE-SEP-3JOIVZTXZZRGORX4 -m comment --comment ns1/svc1 -s 10.0.1.1 -j KUBE-MARK-MASQ
-A KUBE-SEP-3JOIVZTXZZRGORX4 -m comment --comment ns1/svc1 -m tcp -p tcp -j DNAT --to-destination 10.0.1.1:80
-A KUBE-SEP-IO5XOSKPAXIFQXAJ -m comment --comment ns1/svc1 -s 10.0.1.2 -j KUBE-MARK-MASQ
-A KUBE-SEP-IO5XOSKPAXIFQXAJ -m comment --comment ns1/svc1 -m tcp -p tcp -j DNAT --to-destination 10.0.1.2:80
-A KUBE-SEP-XGJFVO3L2O5SRFNT -m comment --comment ns1/svc1 -s 10.0.1.3 -j KUBE-MARK-MASQ
-A KUBE-SEP-XGJFVO3L2O5SRFNT -m comment --comment ns1/svc1 -m tcp -p tcp -j DNAT --to-destination 10.0.1.3:80
-A KUBE-SVC-AQI2S6QIMU7PVVRP -m comment --comment "ns1/svc1 cluster IP" -m tcp -p tcp -d 172.30.1.1 --dport 0 ! -s 10.0.0.0/8 -j KUBE-MARK-MASQ
-A KUBE-SVC-AQI2S6QIMU7PVVRP -m comment --comment "ns1/svc1 -> 10.0.1.1:80" -m statistic --mode random --probability 0.3333333333 -j KUBE-SEP-3JOIVZTXZZRGORX4
-A KUBE-SVC-AQI2S6QIMU7PVVRP -m comment --comment "ns1/svc1 -> 10.0.1.2:80" -m statistic --mode random --probability 0.5000000000 -j KUBE-SEP-IO5XOSKPAXIFQXAJ
-A KUBE-SVC-AQI2S6QIMU7PVVRP -m comment --comment "ns1/svc1 -> 10.0.1.3:80" -j KUBE-SEP-XGJFVO3L2O5SRFNT
COMMIT
`)
ipt := iptablestest.NewFake()
fp := NewFakeProxier(ipt)
fp.OnServiceSynced()
fp.OnEndpointSlicesSynced()
serviceName := "svc1"
namespaceName := "ns1"
fp.OnServiceAdd(&v1.Service{
ObjectMeta: metav1.ObjectMeta{Name: serviceName, Namespace: namespaceName},
Spec: v1.ServiceSpec{
ClusterIP: "172.30.1.1",
Selector: map[string]string{"foo": "bar"},
Ports: []v1.ServicePort{{Name: "", TargetPort: intstr.FromInt32(80), Protocol: v1.ProtocolTCP}},
},
})
tcpProtocol := v1.ProtocolTCP
endpointSlice := &discovery.EndpointSlice{
ObjectMeta: metav1.ObjectMeta{
Name: fmt.Sprintf("%s-1", serviceName),
Namespace: namespaceName,
Labels: map[string]string{discovery.LabelServiceName: serviceName},
},
Ports: []discovery.EndpointPort{{
Name: pointer.String(""),
Port: pointer.Int32(80),
Protocol: &tcpProtocol,
}},
AddressType: discovery.AddressTypeIPv4,
Endpoints: []discovery.Endpoint{{
Addresses: []string{"10.0.1.1"},
Conditions: discovery.EndpointConditions{Ready: pointer.Bool(true)},
NodeName: pointer.String(testHostname),
}, {
Addresses: []string{"10.0.1.2"},
Conditions: discovery.EndpointConditions{Ready: pointer.Bool(true)},
NodeName: pointer.String("node2"),
}, {
Addresses: []string{"10.0.1.3"},
Conditions: discovery.EndpointConditions{Ready: pointer.Bool(true)},
NodeName: pointer.String("node3"),
}, {
Addresses: []string{"10.0.1.4"},
Conditions: discovery.EndpointConditions{Ready: pointer.Bool(false)},
NodeName: pointer.String("node4"),
}},
}
fp.OnEndpointSliceAdd(endpointSlice)
fp.syncProxyRules()
assertIPTablesRulesEqual(t, getLine(), true, expectedIPTablesWithSlice, fp.iptablesData.String())
fp.OnEndpointSliceDelete(endpointSlice)
fp.syncProxyRules()
assertIPTablesRulesNotEqual(t, getLine(), expectedIPTablesWithSlice, fp.iptablesData.String())
}
// TestHealthCheckNodePortWhenTerminating tests that health check node ports are not enabled when all local endpoints are terminating // TestHealthCheckNodePortWhenTerminating tests that health check node ports are not enabled when all local endpoints are terminating
func TestHealthCheckNodePortWhenTerminating(t *testing.T) { func TestHealthCheckNodePortWhenTerminating(t *testing.T) {
ipt := iptablestest.NewFake() ipt := iptablestest.NewFake()
@ -5786,9 +5683,9 @@ func TestInternalTrafficPolicyE2E(t *testing.T) {
} }
} }
// TestEndpointSliceWithTerminatingEndpointsTrafficPolicyLocal tests that when there are local ready and ready + terminating // TestTerminatingEndpointsTrafficPolicyLocal tests that when there are local ready and
// endpoints, only the ready endpoints are used. // ready + terminating endpoints, only the ready endpoints are used.
func TestEndpointSliceWithTerminatingEndpointsTrafficPolicyLocal(t *testing.T) { func TestTerminatingEndpointsTrafficPolicyLocal(t *testing.T) {
tcpProtocol := v1.ProtocolTCP tcpProtocol := v1.ProtocolTCP
timeout := v1.DefaultClientIPServiceAffinitySeconds timeout := v1.DefaultClientIPServiceAffinitySeconds
service := &v1.Service{ service := &v1.Service{
@ -6318,9 +6215,9 @@ func TestEndpointSliceWithTerminatingEndpointsTrafficPolicyLocal(t *testing.T) {
} }
} }
// TestEndpointSliceWithTerminatingEndpointsTrafficPolicyCluster tests that when there are cluster-wide ready and ready + terminating // TestTerminatingEndpointsTrafficPolicyCluster tests that when there are cluster-wide
// endpoints, only the ready endpoints are used. // ready and ready + terminating endpoints, only the ready endpoints are used.
func TestEndpointSliceWithTerminatingEndpointsTrafficPolicyCluster(t *testing.T) { func TestTerminatingEndpointsTrafficPolicyCluster(t *testing.T) {
tcpProtocol := v1.ProtocolTCP tcpProtocol := v1.ProtocolTCP
timeout := v1.DefaultClientIPServiceAffinitySeconds timeout := v1.DefaultClientIPServiceAffinitySeconds
service := &v1.Service{ service := &v1.Service{