mirror of
https://github.com/k3s-io/kubernetes.git
synced 2025-07-23 11:50:44 +00:00
Replace iptables-proxy-specific SCTP e2e test with a unit test
We had a test that creating a Service with an SCTP port would create an iptables rule with "-p sctp" in it, which let us test that kube-proxy was doing vaguely the right thing with SCTP even if the e2e environment didn't have SCTP support. But this would really make much more sense as a unit test.
This commit is contained in:
parent
233f9c210a
commit
c6cc056675
@ -2117,7 +2117,7 @@ func TestClusterIPReject(t *testing.T) {
|
||||
})
|
||||
}
|
||||
|
||||
func TestClusterIPEndpointsJump(t *testing.T) {
|
||||
func TestClusterIPEndpointsMore(t *testing.T) {
|
||||
ipt := iptablestest.NewFake()
|
||||
fp := NewFakeProxier(ipt)
|
||||
svcIP := "172.30.0.41"
|
||||
@ -2125,7 +2125,7 @@ func TestClusterIPEndpointsJump(t *testing.T) {
|
||||
svcPortName := proxy.ServicePortName{
|
||||
NamespacedName: makeNSN("ns1", "svc1"),
|
||||
Port: "p80",
|
||||
Protocol: v1.ProtocolTCP,
|
||||
Protocol: v1.ProtocolSCTP,
|
||||
}
|
||||
|
||||
makeServiceMap(fp,
|
||||
@ -2134,13 +2134,13 @@ func TestClusterIPEndpointsJump(t *testing.T) {
|
||||
svc.Spec.Ports = []v1.ServicePort{{
|
||||
Name: svcPortName.Port,
|
||||
Port: int32(svcPort),
|
||||
Protocol: v1.ProtocolTCP,
|
||||
Protocol: v1.ProtocolSCTP,
|
||||
}}
|
||||
}),
|
||||
)
|
||||
|
||||
epIP := "10.180.0.1"
|
||||
tcpProtocol := v1.ProtocolTCP
|
||||
sctpProtocol := v1.ProtocolSCTP
|
||||
populateEndpointSlices(fp,
|
||||
makeTestEndpointSlice(svcPortName.Namespace, svcPortName.Name, 1, func(eps *discovery.EndpointSlice) {
|
||||
eps.AddressType = discovery.AddressTypeIPv4
|
||||
@ -2150,7 +2150,7 @@ func TestClusterIPEndpointsJump(t *testing.T) {
|
||||
eps.Ports = []discovery.EndpointPort{{
|
||||
Name: pointer.String(svcPortName.Port),
|
||||
Port: pointer.Int32(int32(svcPort)),
|
||||
Protocol: &tcpProtocol,
|
||||
Protocol: &sctpProtocol,
|
||||
}}
|
||||
}),
|
||||
)
|
||||
@ -2173,18 +2173,18 @@ func TestClusterIPEndpointsJump(t *testing.T) {
|
||||
:KUBE-SERVICES - [0:0]
|
||||
:KUBE-MARK-MASQ - [0:0]
|
||||
:KUBE-POSTROUTING - [0:0]
|
||||
:KUBE-SEP-SXIVWICOYRO3J4NJ - [0:0]
|
||||
:KUBE-SVC-XPGD46QRK7WJZT7O - [0:0]
|
||||
-A KUBE-SERVICES -m comment --comment "ns1/svc1:p80 cluster IP" -m tcp -p tcp -d 172.30.0.41 --dport 80 -j KUBE-SVC-XPGD46QRK7WJZT7O
|
||||
:KUBE-SEP-RFW33Y6OHVBQ4W3M - [0:0]
|
||||
:KUBE-SVC-GFCIFIA5VTFSTMSM - [0:0]
|
||||
-A KUBE-SERVICES -m comment --comment "ns1/svc1:p80 cluster IP" -m sctp -p sctp -d 172.30.0.41 --dport 80 -j KUBE-SVC-GFCIFIA5VTFSTMSM
|
||||
-A KUBE-SERVICES -m comment --comment "kubernetes service nodeports; NOTE: this must be the last rule in this chain" -m addrtype --dst-type LOCAL -j KUBE-NODEPORTS
|
||||
-A KUBE-MARK-MASQ -j MARK --or-mark 0x4000
|
||||
-A KUBE-POSTROUTING -m mark ! --mark 0x4000/0x4000 -j RETURN
|
||||
-A KUBE-POSTROUTING -j MARK --xor-mark 0x4000
|
||||
-A KUBE-POSTROUTING -m comment --comment "kubernetes service traffic requiring SNAT" -j MASQUERADE
|
||||
-A KUBE-SEP-SXIVWICOYRO3J4NJ -m comment --comment ns1/svc1:p80 -s 10.180.0.1 -j KUBE-MARK-MASQ
|
||||
-A KUBE-SEP-SXIVWICOYRO3J4NJ -m comment --comment ns1/svc1:p80 -m tcp -p tcp -j DNAT --to-destination 10.180.0.1:80
|
||||
-A KUBE-SVC-XPGD46QRK7WJZT7O -m comment --comment "ns1/svc1:p80 cluster IP" -m tcp -p tcp -d 172.30.0.41 --dport 80 ! -s 10.0.0.0/8 -j KUBE-MARK-MASQ
|
||||
-A KUBE-SVC-XPGD46QRK7WJZT7O -m comment --comment "ns1/svc1:p80 -> 10.180.0.1:80" -j KUBE-SEP-SXIVWICOYRO3J4NJ
|
||||
-A KUBE-SEP-RFW33Y6OHVBQ4W3M -m comment --comment ns1/svc1:p80 -s 10.180.0.1 -j KUBE-MARK-MASQ
|
||||
-A KUBE-SEP-RFW33Y6OHVBQ4W3M -m comment --comment ns1/svc1:p80 -m sctp -p sctp -j DNAT --to-destination 10.180.0.1:80
|
||||
-A KUBE-SVC-GFCIFIA5VTFSTMSM -m comment --comment "ns1/svc1:p80 cluster IP" -m sctp -p sctp -d 172.30.0.41 --dport 80 ! -s 10.0.0.0/8 -j KUBE-MARK-MASQ
|
||||
-A KUBE-SVC-GFCIFIA5VTFSTMSM -m comment --comment "ns1/svc1:p80 -> 10.180.0.1:80" -j KUBE-SEP-RFW33Y6OHVBQ4W3M
|
||||
COMMIT
|
||||
`)
|
||||
assertIPTablesRulesEqual(t, getLine(), true, expected, fp.iptablesData.String())
|
||||
|
@ -4429,66 +4429,4 @@ var _ = common.SIGDescribe("SCTP [LinuxOnly]", func() {
|
||||
framework.Failf("The state of the sctp module has changed due to the test case")
|
||||
}
|
||||
})
|
||||
ginkgo.It("should create a ClusterIP Service with SCTP ports", func(ctx context.Context) {
|
||||
ginkgo.By("checking that kube-proxy is in iptables mode")
|
||||
if proxyMode, err := proxyMode(f); err != nil {
|
||||
e2eskipper.Skipf("Couldn't detect KubeProxy mode - skip, %v", err)
|
||||
} else if proxyMode != "iptables" {
|
||||
e2eskipper.Skipf("The test doesn't work if kube-proxy is not in iptables mode")
|
||||
}
|
||||
|
||||
serviceName := "sctp-clusterip"
|
||||
ns := f.Namespace.Name
|
||||
jig := e2eservice.NewTestJig(cs, ns, serviceName)
|
||||
|
||||
ginkgo.By("getting the state of the sctp module on nodes")
|
||||
nodes, err := e2enode.GetBoundedReadySchedulableNodes(cs, 2)
|
||||
framework.ExpectNoError(err)
|
||||
sctpLoadedAtStart := CheckSCTPModuleLoadedOnNodes(f, nodes)
|
||||
|
||||
ginkgo.By("creating service " + serviceName + " in namespace " + ns)
|
||||
_, err = jig.CreateSCTPServiceWithPort(func(svc *v1.Service) {
|
||||
svc.Spec.Type = v1.ServiceTypeClusterIP
|
||||
svc.Spec.Ports = []v1.ServicePort{{Protocol: v1.ProtocolSCTP, Port: 5060}}
|
||||
}, 5060)
|
||||
framework.ExpectNoError(err)
|
||||
ginkgo.DeferCleanup(func(ctx context.Context) {
|
||||
err := cs.CoreV1().Services(ns).Delete(ctx, serviceName, metav1.DeleteOptions{})
|
||||
framework.ExpectNoError(err, "failed to delete service: %s in namespace: %s", serviceName, ns)
|
||||
})
|
||||
|
||||
err = e2enetwork.WaitForService(f.ClientSet, ns, serviceName, true, 5*time.Second, e2eservice.TestTimeout)
|
||||
framework.ExpectNoError(err, fmt.Sprintf("error while waiting for service:%s err: %v", serviceName, err))
|
||||
hostExec := utils.NewHostExec(f)
|
||||
ginkgo.DeferCleanup(hostExec.Cleanup)
|
||||
node := &nodes.Items[0]
|
||||
cmd := "iptables-save"
|
||||
if framework.TestContext.ClusterIsIPv6() {
|
||||
cmd = "ip6tables-save"
|
||||
}
|
||||
err = wait.PollImmediate(framework.Poll, e2eservice.KubeProxyLagTimeout, func() (bool, error) {
|
||||
framework.Logf("Executing cmd %q on node %v", cmd, node.Name)
|
||||
result, err := hostExec.IssueCommandWithResult(cmd, node)
|
||||
if err != nil {
|
||||
framework.Logf("Interrogation of iptables rules failed on node %v", node.Name)
|
||||
return false, nil
|
||||
}
|
||||
|
||||
for _, line := range strings.Split(result, "\n") {
|
||||
if strings.Contains(line, "-A KUBE-SERVICES") && strings.Contains(line, "-p sctp") {
|
||||
return true, nil
|
||||
}
|
||||
}
|
||||
framework.Logf("retrying ... no iptables rules found for service with sctp ports on node %v", node.Name)
|
||||
return false, nil
|
||||
})
|
||||
if err != nil {
|
||||
framework.Failf("iptables rules are not set for a clusterip service with sctp ports")
|
||||
}
|
||||
ginkgo.By("validating sctp module is still not loaded")
|
||||
sctpLoadedAtEnd := CheckSCTPModuleLoadedOnNodes(f, nodes)
|
||||
if !sctpLoadedAtStart && sctpLoadedAtEnd {
|
||||
framework.Failf("The state of the sctp module has changed due to the test case")
|
||||
}
|
||||
})
|
||||
})
|
||||
|
Loading…
Reference in New Issue
Block a user