Merge pull request #113335 from danwinship/cleanup-sctp-tests

Clean up SCTP tests
This commit is contained in:
Kubernetes Prow Robot 2022-12-13 18:33:34 -08:00 committed by GitHub
commit fa8ef76a8b
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
7 changed files with 114 additions and 210 deletions

View File

@ -2117,7 +2117,7 @@ func TestClusterIPReject(t *testing.T) {
})
}
func TestClusterIPEndpointsJump(t *testing.T) {
func TestClusterIPEndpointsMore(t *testing.T) {
ipt := iptablestest.NewFake()
fp := NewFakeProxier(ipt)
svcIP := "172.30.0.41"
@ -2125,7 +2125,7 @@ func TestClusterIPEndpointsJump(t *testing.T) {
svcPortName := proxy.ServicePortName{
NamespacedName: makeNSN("ns1", "svc1"),
Port: "p80",
Protocol: v1.ProtocolTCP,
Protocol: v1.ProtocolSCTP,
}
makeServiceMap(fp,
@ -2134,13 +2134,13 @@ func TestClusterIPEndpointsJump(t *testing.T) {
svc.Spec.Ports = []v1.ServicePort{{
Name: svcPortName.Port,
Port: int32(svcPort),
Protocol: v1.ProtocolTCP,
Protocol: v1.ProtocolSCTP,
}}
}),
)
epIP := "10.180.0.1"
tcpProtocol := v1.ProtocolTCP
sctpProtocol := v1.ProtocolSCTP
populateEndpointSlices(fp,
makeTestEndpointSlice(svcPortName.Namespace, svcPortName.Name, 1, func(eps *discovery.EndpointSlice) {
eps.AddressType = discovery.AddressTypeIPv4
@ -2150,7 +2150,7 @@ func TestClusterIPEndpointsJump(t *testing.T) {
eps.Ports = []discovery.EndpointPort{{
Name: pointer.String(svcPortName.Port),
Port: pointer.Int32(int32(svcPort)),
Protocol: &tcpProtocol,
Protocol: &sctpProtocol,
}}
}),
)
@ -2173,18 +2173,18 @@ func TestClusterIPEndpointsJump(t *testing.T) {
:KUBE-SERVICES - [0:0]
:KUBE-MARK-MASQ - [0:0]
:KUBE-POSTROUTING - [0:0]
:KUBE-SEP-SXIVWICOYRO3J4NJ - [0:0]
:KUBE-SVC-XPGD46QRK7WJZT7O - [0:0]
-A KUBE-SERVICES -m comment --comment "ns1/svc1:p80 cluster IP" -m tcp -p tcp -d 172.30.0.41 --dport 80 -j KUBE-SVC-XPGD46QRK7WJZT7O
:KUBE-SEP-RFW33Y6OHVBQ4W3M - [0:0]
:KUBE-SVC-GFCIFIA5VTFSTMSM - [0:0]
-A KUBE-SERVICES -m comment --comment "ns1/svc1:p80 cluster IP" -m sctp -p sctp -d 172.30.0.41 --dport 80 -j KUBE-SVC-GFCIFIA5VTFSTMSM
-A KUBE-SERVICES -m comment --comment "kubernetes service nodeports; NOTE: this must be the last rule in this chain" -m addrtype --dst-type LOCAL -j KUBE-NODEPORTS
-A KUBE-MARK-MASQ -j MARK --or-mark 0x4000
-A KUBE-POSTROUTING -m mark ! --mark 0x4000/0x4000 -j RETURN
-A KUBE-POSTROUTING -j MARK --xor-mark 0x4000
-A KUBE-POSTROUTING -m comment --comment "kubernetes service traffic requiring SNAT" -j MASQUERADE
-A KUBE-SEP-SXIVWICOYRO3J4NJ -m comment --comment ns1/svc1:p80 -s 10.180.0.1 -j KUBE-MARK-MASQ
-A KUBE-SEP-SXIVWICOYRO3J4NJ -m comment --comment ns1/svc1:p80 -m tcp -p tcp -j DNAT --to-destination 10.180.0.1:80
-A KUBE-SVC-XPGD46QRK7WJZT7O -m comment --comment "ns1/svc1:p80 cluster IP" -m tcp -p tcp -d 172.30.0.41 --dport 80 ! -s 10.0.0.0/8 -j KUBE-MARK-MASQ
-A KUBE-SVC-XPGD46QRK7WJZT7O -m comment --comment "ns1/svc1:p80 -> 10.180.0.1:80" -j KUBE-SEP-SXIVWICOYRO3J4NJ
-A KUBE-SEP-RFW33Y6OHVBQ4W3M -m comment --comment ns1/svc1:p80 -s 10.180.0.1 -j KUBE-MARK-MASQ
-A KUBE-SEP-RFW33Y6OHVBQ4W3M -m comment --comment ns1/svc1:p80 -m sctp -p sctp -j DNAT --to-destination 10.180.0.1:80
-A KUBE-SVC-GFCIFIA5VTFSTMSM -m comment --comment "ns1/svc1:p80 cluster IP" -m sctp -p sctp -d 172.30.0.41 --dport 80 ! -s 10.0.0.0/8 -j KUBE-MARK-MASQ
-A KUBE-SVC-GFCIFIA5VTFSTMSM -m comment --comment "ns1/svc1:p80 -> 10.180.0.1:80" -j KUBE-SEP-RFW33Y6OHVBQ4W3M
COMMIT
`)
assertIPTablesRulesEqual(t, getLine(), true, expected, fp.iptablesData.String())

View File

@ -131,14 +131,12 @@ var _ = SIGDescribe("Networking", func() {
}
})
// [Disruptive] because it conflicts with tests that call CheckSCTPModuleLoadedOnNodes
ginkgo.It("should function for intra-pod communication: sctp [LinuxOnly][Feature:SCTPConnectivity][Disruptive]", func(ctx context.Context) {
ginkgo.It("should function for intra-pod communication: sctp [LinuxOnly][Feature:SCTPConnectivity]", func(ctx context.Context) {
config := e2enetwork.NewNetworkingTestConfig(f, e2enetwork.EnableSCTP)
checkPodToPodConnectivity(config, "sctp", e2enetwork.EndpointSCTPPort)
})
// [Disruptive] because it conflicts with tests that call CheckSCTPModuleLoadedOnNodes
ginkgo.It("should function for node-pod communication: sctp [LinuxOnly][Feature:SCTPConnectivity][Disruptive]", func(ctx context.Context) {
ginkgo.It("should function for node-pod communication: sctp [LinuxOnly][Feature:SCTPConnectivity]", func(ctx context.Context) {
ginkgo.Skip("Skipping SCTP node to pod test until DialFromNode supports SCTP #96482")
config := e2enetwork.NewNetworkingTestConfig(f, e2enetwork.EnableSCTP)
for _, endpointPod := range config.EndpointPods {

View File

@ -462,8 +462,7 @@ var _ = common.SIGDescribe("[Feature:IPv6DualStack]", func() {
}
})
// [Disruptive] because it conflicts with tests that call CheckSCTPModuleLoadedOnNodes
ginkgo.It("should function for pod-Service: sctp [Feature:SCTPConnectivity][Disruptive]", func(ctx context.Context) {
ginkgo.It("should function for pod-Service: sctp [Feature:SCTPConnectivity]", func(ctx context.Context) {
config := e2enetwork.NewNetworkingTestConfig(f, e2enetwork.EnableDualStack, e2enetwork.EnableSCTP)
ginkgo.By(fmt.Sprintf("dialing(sctp) %v --> %v:%v (config.clusterIP)", config.TestContainerPod.Name, config.SecondaryClusterIP, e2enetwork.ClusterSCTPPort))
err := config.DialFromTestContainer("sctp", config.SecondaryClusterIP, e2enetwork.ClusterSCTPPort, config.MaxTries, 0, config.EndpointHostnames())

View File

@ -1686,7 +1686,10 @@ var _ = common.SIGDescribe("NetworkPolicyLegacy [LinuxOnly]", func() {
})
cleanupServerPodAndService(f, podA, serviceA)
})
ginkgo.It("should not allow access by TCP when a policy specifies only SCTP [Feature:NetworkPolicy]", func(ctx context.Context) {
// This is [Serial] because it can't run at the same time as the
// [Feature:SCTPConnectivity] tests, since they may cause sctp.ko to be loaded.
ginkgo.It("should not allow access by TCP when a policy specifies only SCTP [Feature:NetworkPolicy] [Serial]", func(ctx context.Context) {
ginkgo.By("getting the state of the sctp module on nodes")
nodes, err := e2enode.GetReadySchedulableNodes(f.ClientSet)
framework.ExpectNoError(err)
@ -1729,7 +1732,7 @@ var _ = common.SIGDescribe("NetworkPolicyLegacy [LinuxOnly]", func() {
})
})
var _ = common.SIGDescribe("NetworkPolicy [Feature:SCTPConnectivity][LinuxOnly][Disruptive]", func() {
var _ = common.SIGDescribe("NetworkPolicy [Feature:SCTPConnectivity][LinuxOnly]", func() {
var service *v1.Service
var podServer *v1.Pod
var podServerLabelSelector string

View File

@ -1312,7 +1312,7 @@ var _ = common.SIGDescribe("Netpol [LinuxOnly]", func() {
})
})
var _ = common.SIGDescribe("Netpol [Feature:SCTPConnectivity][LinuxOnly][Disruptive]", func() {
var _ = common.SIGDescribe("Netpol [Feature:SCTPConnectivity][LinuxOnly]", func() {
f := framework.NewDefaultFramework("sctp-network-policy")
f.SkipNamespaceCreation = true
f.NamespacePodSecurityEnforceLevel = admissionapi.LevelBaseline

View File

@ -29,10 +29,12 @@ import (
"k8s.io/kubernetes/pkg/cluster/ports"
"k8s.io/kubernetes/test/e2e/framework"
e2enetwork "k8s.io/kubernetes/test/e2e/framework/network"
e2enode "k8s.io/kubernetes/test/e2e/framework/node"
e2epod "k8s.io/kubernetes/test/e2e/framework/pod"
e2eskipper "k8s.io/kubernetes/test/e2e/framework/skipper"
e2essh "k8s.io/kubernetes/test/e2e/framework/ssh"
"k8s.io/kubernetes/test/e2e/network/common"
"k8s.io/kubernetes/test/e2e/storage/utils"
admissionapi "k8s.io/pod-security-admission/api"
"github.com/onsi/ginkgo/v2"
@ -174,8 +176,7 @@ var _ = common.SIGDescribe("Networking", func() {
}
})
// [Disruptive] because it conflicts with tests that call CheckSCTPModuleLoadedOnNodes
ginkgo.It("should function for pod-Service: sctp [Feature:SCTPConnectivity][Disruptive]", func(ctx context.Context) {
ginkgo.It("should function for pod-Service: sctp [Feature:SCTPConnectivity]", func(ctx context.Context) {
config := e2enetwork.NewNetworkingTestConfig(f, e2enetwork.EnableSCTP)
ginkgo.By(fmt.Sprintf("dialing(sctp) %v --> %v:%v (config.clusterIP)", config.TestContainerPod.Name, config.ClusterIP, e2enetwork.ClusterSCTPPort))
err := config.DialFromTestContainer("sctp", config.ClusterIP, e2enetwork.ClusterSCTPPort, config.MaxTries, 0, config.EndpointHostnames())
@ -217,8 +218,7 @@ var _ = common.SIGDescribe("Networking", func() {
}
})
// [Disruptive] because it conflicts with tests that call CheckSCTPModuleLoadedOnNodes
ginkgo.It("should function for node-Service: sctp [Feature:SCTPConnectivity][Disruptive]", func(ctx context.Context) {
ginkgo.It("should function for node-Service: sctp [Feature:SCTPConnectivity]", func(ctx context.Context) {
ginkgo.Skip("Skipping SCTP node to service test until DialFromNode supports SCTP #96482")
config := e2enetwork.NewNetworkingTestConfig(f, e2enetwork.EnableSCTP)
ginkgo.By(fmt.Sprintf("dialing(sctp) %v (node) --> %v:%v (config.clusterIP)", config.NodeIP, config.ClusterIP, e2enetwork.ClusterSCTPPort))
@ -262,8 +262,7 @@ var _ = common.SIGDescribe("Networking", func() {
}
})
// [Disruptive] because it conflicts with tests that call CheckSCTPModuleLoadedOnNodes
ginkgo.It("should function for endpoint-Service: sctp [Feature:SCTPConnectivity][Disruptive]", func(ctx context.Context) {
ginkgo.It("should function for endpoint-Service: sctp [Feature:SCTPConnectivity]", func(ctx context.Context) {
config := e2enetwork.NewNetworkingTestConfig(f, e2enetwork.EnableSCTP)
ginkgo.By(fmt.Sprintf("dialing(sctp) %v (endpoint) --> %v:%v (config.clusterIP)", config.EndpointPods[0].Name, config.ClusterIP, e2enetwork.ClusterSCTPPort))
err := config.DialFromEndpointContainer("sctp", config.ClusterIP, e2enetwork.ClusterSCTPPort, config.MaxTries, 0, config.EndpointHostnames())
@ -632,4 +631,37 @@ var _ = common.SIGDescribe("Networking", func() {
}
framework.ExpectNoError(err, "kubelet did not recreate its iptables rules")
})
// This is [Serial] because it can't run at the same time as the
// [Feature:SCTPConnectivity] tests, since they may cause sctp.ko to be loaded.
ginkgo.It("should allow creating a Pod with an SCTP HostPort [LinuxOnly] [Serial]", func(ctx context.Context) {
node, err := e2enode.GetRandomReadySchedulableNode(f.ClientSet)
framework.ExpectNoError(err)
hostExec := utils.NewHostExec(f)
ginkgo.DeferCleanup(hostExec.Cleanup)
ginkgo.By("getting the state of the sctp module on the selected node")
nodes := &v1.NodeList{}
nodes.Items = append(nodes.Items, *node)
sctpLoadedAtStart := CheckSCTPModuleLoadedOnNodes(f, nodes)
ginkgo.By("creating a pod with hostport on the selected node")
podName := "hostport"
ports := []v1.ContainerPort{{Protocol: v1.ProtocolSCTP, ContainerPort: 5060, HostPort: 5060}}
podSpec := e2epod.NewAgnhostPod(f.Namespace.Name, podName, nil, nil, ports)
nodeSelection := e2epod.NodeSelection{Name: node.Name}
e2epod.SetNodeSelection(&podSpec.Spec, nodeSelection)
ginkgo.By(fmt.Sprintf("Launching the pod on node %v", node.Name))
e2epod.NewPodClient(f).CreateSync(podSpec)
ginkgo.DeferCleanup(func(ctx context.Context) {
err := f.ClientSet.CoreV1().Pods(f.Namespace.Name).Delete(ctx, podName, metav1.DeleteOptions{})
framework.ExpectNoError(err, "failed to delete pod: %s in namespace: %s", podName, f.Namespace.Name)
})
ginkgo.By("validating sctp module is still not loaded")
sctpLoadedAtEnd := CheckSCTPModuleLoadedOnNodes(f, nodes)
if !sctpLoadedAtStart && sctpLoadedAtEnd {
framework.Failf("The state of the sctp module has changed due to the test case")
}
})
})

View File

@ -70,7 +70,6 @@ import (
e2eservice "k8s.io/kubernetes/test/e2e/framework/service"
e2eskipper "k8s.io/kubernetes/test/e2e/framework/skipper"
"k8s.io/kubernetes/test/e2e/network/common"
"k8s.io/kubernetes/test/e2e/storage/utils"
testutils "k8s.io/kubernetes/test/utils"
imageutils "k8s.io/kubernetes/test/utils/image"
@ -3781,6 +3780,61 @@ var _ = common.SIGDescribe("Services", func() {
e2epod.DeletePodOrFail(cs, ns, podname1)
})
// These is [Serial] because it can't run at the same time as the
// [Feature:SCTPConnectivity] tests, since they may cause sctp.ko to be loaded.
ginkgo.It("should allow creating a basic SCTP service with pod and endpoints [LinuxOnly] [Serial]", func(ctx context.Context) {
serviceName := "sctp-endpoint-test"
ns := f.Namespace.Name
jig := e2eservice.NewTestJig(cs, ns, serviceName)
ginkgo.By("getting the state of the sctp module on nodes")
nodes, err := e2enode.GetBoundedReadySchedulableNodes(cs, 2)
framework.ExpectNoError(err)
sctpLoadedAtStart := CheckSCTPModuleLoadedOnNodes(f, nodes)
ginkgo.By("creating service " + serviceName + " in namespace " + ns)
_, err = jig.CreateSCTPServiceWithPort(nil, 5060)
framework.ExpectNoError(err)
ginkgo.DeferCleanup(func(ctx context.Context) {
err := cs.CoreV1().Services(ns).Delete(ctx, serviceName, metav1.DeleteOptions{})
framework.ExpectNoError(err, "failed to delete service: %s in namespace: %s", serviceName, ns)
})
err = e2enetwork.WaitForService(f.ClientSet, ns, serviceName, true, 5*time.Second, e2eservice.TestTimeout)
framework.ExpectNoError(err, fmt.Sprintf("error while waiting for service:%s err: %v", serviceName, err))
ginkgo.By("validating endpoints do not exist yet")
validateEndpointsPortsOrFail(cs, ns, serviceName, portsByPodName{})
ginkgo.By("creating a pod for the service")
names := map[string]bool{}
name1 := "pod1"
createPodOrFail(f, ns, name1, jig.Labels, []v1.ContainerPort{{ContainerPort: 5060, Protocol: v1.ProtocolSCTP}})
names[name1] = true
ginkgo.DeferCleanup(func(ctx context.Context) {
for name := range names {
err := cs.CoreV1().Pods(ns).Delete(ctx, name, metav1.DeleteOptions{})
framework.ExpectNoError(err, "failed to delete pod: %s in namespace: %s", name, ns)
}
})
ginkgo.By("validating endpoints exists")
validateEndpointsPortsOrFail(cs, ns, serviceName, portsByPodName{name1: {5060}})
ginkgo.By("deleting the pod")
e2epod.DeletePodOrFail(cs, ns, name1)
delete(names, name1)
ginkgo.By("validating endpoints do not exist anymore")
validateEndpointsPortsOrFail(cs, ns, serviceName, portsByPodName{})
ginkgo.By("validating sctp module is still not loaded")
sctpLoadedAtEnd := CheckSCTPModuleLoadedOnNodes(f, nodes)
if !sctpLoadedAtStart && sctpLoadedAtEnd {
framework.Failf("The state of the sctp module has changed due to the test case")
}
})
})
// execAffinityTestForSessionAffinityTimeout is a helper function that wrap the logic of
@ -4310,185 +4364,3 @@ func validatePortsAndProtocols(ep, expectedEndpoints fullPortsByPodUID) error {
}
return nil
}
var _ = common.SIGDescribe("SCTP [LinuxOnly]", func() {
f := framework.NewDefaultFramework("sctp")
f.NamespacePodSecurityEnforceLevel = admissionapi.LevelPrivileged
var cs clientset.Interface
ginkgo.BeforeEach(func() {
cs = f.ClientSet
})
ginkgo.It("should allow creating a basic SCTP service with pod and endpoints", func(ctx context.Context) {
serviceName := "sctp-endpoint-test"
ns := f.Namespace.Name
jig := e2eservice.NewTestJig(cs, ns, serviceName)
ginkgo.By("getting the state of the sctp module on nodes")
nodes, err := e2enode.GetBoundedReadySchedulableNodes(cs, 2)
framework.ExpectNoError(err)
sctpLoadedAtStart := CheckSCTPModuleLoadedOnNodes(f, nodes)
ginkgo.By("creating service " + serviceName + " in namespace " + ns)
_, err = jig.CreateSCTPServiceWithPort(nil, 5060)
framework.ExpectNoError(err)
ginkgo.DeferCleanup(func(ctx context.Context) {
err := cs.CoreV1().Services(ns).Delete(ctx, serviceName, metav1.DeleteOptions{})
framework.ExpectNoError(err, "failed to delete service: %s in namespace: %s", serviceName, ns)
})
err = e2enetwork.WaitForService(f.ClientSet, ns, serviceName, true, 5*time.Second, e2eservice.TestTimeout)
framework.ExpectNoError(err, fmt.Sprintf("error while waiting for service:%s err: %v", serviceName, err))
ginkgo.By("validating endpoints do not exist yet")
validateEndpointsPortsOrFail(cs, ns, serviceName, portsByPodName{})
ginkgo.By("creating a pod for the service")
names := map[string]bool{}
name1 := "pod1"
createPodOrFail(f, ns, name1, jig.Labels, []v1.ContainerPort{{ContainerPort: 5060, Protocol: v1.ProtocolSCTP}})
names[name1] = true
ginkgo.DeferCleanup(func(ctx context.Context) {
for name := range names {
err := cs.CoreV1().Pods(ns).Delete(ctx, name, metav1.DeleteOptions{})
framework.ExpectNoError(err, "failed to delete pod: %s in namespace: %s", name, ns)
}
})
ginkgo.By("validating endpoints exists")
validateEndpointsPortsOrFail(cs, ns, serviceName, portsByPodName{name1: {5060}})
ginkgo.By("deleting the pod")
e2epod.DeletePodOrFail(cs, ns, name1)
delete(names, name1)
ginkgo.By("validating endpoints do not exist anymore")
validateEndpointsPortsOrFail(cs, ns, serviceName, portsByPodName{})
ginkgo.By("validating sctp module is still not loaded")
sctpLoadedAtEnd := CheckSCTPModuleLoadedOnNodes(f, nodes)
if !sctpLoadedAtStart && sctpLoadedAtEnd {
framework.Failf("The state of the sctp module has changed due to the test case")
}
})
ginkgo.It("should create a Pod with SCTP HostPort", func(ctx context.Context) {
node, err := e2enode.GetRandomReadySchedulableNode(cs)
framework.ExpectNoError(err)
hostExec := utils.NewHostExec(f)
ginkgo.DeferCleanup(hostExec.Cleanup)
ginkgo.By("getting the state of the sctp module on the selected node")
nodes := &v1.NodeList{}
nodes.Items = append(nodes.Items, *node)
sctpLoadedAtStart := CheckSCTPModuleLoadedOnNodes(f, nodes)
ginkgo.By("creating a pod with hostport on the selected node")
podName := "hostport"
ports := []v1.ContainerPort{{Protocol: v1.ProtocolSCTP, ContainerPort: 5060, HostPort: 5060}}
podSpec := e2epod.NewAgnhostPod(f.Namespace.Name, podName, nil, nil, ports)
nodeSelection := e2epod.NodeSelection{Name: node.Name}
e2epod.SetNodeSelection(&podSpec.Spec, nodeSelection)
ginkgo.By(fmt.Sprintf("Launching the pod on node %v", node.Name))
e2epod.NewPodClient(f).CreateSync(podSpec)
ginkgo.DeferCleanup(func(ctx context.Context) {
err := cs.CoreV1().Pods(f.Namespace.Name).Delete(ctx, podName, metav1.DeleteOptions{})
framework.ExpectNoError(err, "failed to delete pod: %s in namespace: %s", podName, f.Namespace.Name)
})
// wait until host port manager syncs rules
cmd := "iptables-save"
if framework.TestContext.ClusterIsIPv6() {
cmd = "ip6tables-save"
}
err = wait.PollImmediate(framework.Poll, framework.PollShortTimeout, func() (bool, error) {
framework.Logf("Executing cmd %q on node %v", cmd, node.Name)
result, err := hostExec.IssueCommandWithResult(cmd, node)
if err != nil {
framework.Logf("Interrogation of iptables rules failed on node %v", node.Name)
return false, nil
}
for _, line := range strings.Split(result, "\n") {
if strings.Contains(line, "-p sctp") && strings.Contains(line, "--dport 5060") {
return true, nil
}
}
framework.Logf("retrying ... not hostport sctp iptables rules found on node %v", node.Name)
return false, nil
})
if err != nil {
framework.Failf("iptables rules are not set for a pod with sctp hostport")
}
ginkgo.By("validating sctp module is still not loaded")
sctpLoadedAtEnd := CheckSCTPModuleLoadedOnNodes(f, nodes)
if !sctpLoadedAtStart && sctpLoadedAtEnd {
framework.Failf("The state of the sctp module has changed due to the test case")
}
})
ginkgo.It("should create a ClusterIP Service with SCTP ports", func(ctx context.Context) {
ginkgo.By("checking that kube-proxy is in iptables mode")
if proxyMode, err := proxyMode(f); err != nil {
e2eskipper.Skipf("Couldn't detect KubeProxy mode - skip, %v", err)
} else if proxyMode != "iptables" {
e2eskipper.Skipf("The test doesn't work if kube-proxy is not in iptables mode")
}
serviceName := "sctp-clusterip"
ns := f.Namespace.Name
jig := e2eservice.NewTestJig(cs, ns, serviceName)
ginkgo.By("getting the state of the sctp module on nodes")
nodes, err := e2enode.GetBoundedReadySchedulableNodes(cs, 2)
framework.ExpectNoError(err)
sctpLoadedAtStart := CheckSCTPModuleLoadedOnNodes(f, nodes)
ginkgo.By("creating service " + serviceName + " in namespace " + ns)
_, err = jig.CreateSCTPServiceWithPort(func(svc *v1.Service) {
svc.Spec.Type = v1.ServiceTypeClusterIP
svc.Spec.Ports = []v1.ServicePort{{Protocol: v1.ProtocolSCTP, Port: 5060}}
}, 5060)
framework.ExpectNoError(err)
ginkgo.DeferCleanup(func(ctx context.Context) {
err := cs.CoreV1().Services(ns).Delete(ctx, serviceName, metav1.DeleteOptions{})
framework.ExpectNoError(err, "failed to delete service: %s in namespace: %s", serviceName, ns)
})
err = e2enetwork.WaitForService(f.ClientSet, ns, serviceName, true, 5*time.Second, e2eservice.TestTimeout)
framework.ExpectNoError(err, fmt.Sprintf("error while waiting for service:%s err: %v", serviceName, err))
hostExec := utils.NewHostExec(f)
ginkgo.DeferCleanup(hostExec.Cleanup)
node := &nodes.Items[0]
cmd := "iptables-save"
if framework.TestContext.ClusterIsIPv6() {
cmd = "ip6tables-save"
}
err = wait.PollImmediate(framework.Poll, e2eservice.KubeProxyLagTimeout, func() (bool, error) {
framework.Logf("Executing cmd %q on node %v", cmd, node.Name)
result, err := hostExec.IssueCommandWithResult(cmd, node)
if err != nil {
framework.Logf("Interrogation of iptables rules failed on node %v", node.Name)
return false, nil
}
for _, line := range strings.Split(result, "\n") {
if strings.Contains(line, "-A KUBE-SERVICES") && strings.Contains(line, "-p sctp") {
return true, nil
}
}
framework.Logf("retrying ... no iptables rules found for service with sctp ports on node %v", node.Name)
return false, nil
})
if err != nil {
framework.Failf("iptables rules are not set for a clusterip service with sctp ports")
}
ginkgo.By("validating sctp module is still not loaded")
sctpLoadedAtEnd := CheckSCTPModuleLoadedOnNodes(f, nodes)
if !sctpLoadedAtStart && sctpLoadedAtEnd {
framework.Failf("The state of the sctp module has changed due to the test case")
}
})
})