mirror of
https://github.com/k3s-io/kubernetes.git
synced 2025-07-19 18:02:01 +00:00
Add SCTP e2e connectivity tests.
Pod2pod connectivity, and pod 2 service. Signed-off-by: Federico Paolinelli <fpaoline@redhat.com>
This commit is contained in:
parent
55d3408211
commit
c42b1ca783
@ -48,12 +48,16 @@ const (
|
||||
// EndpointHTTPPort is an endpoint HTTP port for testing.
|
||||
EndpointHTTPPort = 8080
|
||||
// EndpointUDPPort is an endpoint UDP port for testing.
|
||||
EndpointUDPPort = 8081
|
||||
EndpointUDPPort = 8081
|
||||
// EndpointSCTPPort is an endpoint SCTP port for testing.
|
||||
EndpointSCTPPort = 8082
|
||||
testContainerHTTPPort = 8080
|
||||
// ClusterHTTPPort is a cluster HTTP port for testing.
|
||||
ClusterHTTPPort = 80
|
||||
// ClusterUDPPort is a cluster UDP port for testing.
|
||||
ClusterUDPPort = 90
|
||||
ClusterUDPPort = 90
|
||||
// ClusterSCTPPort is a cluster SCTP port for testing.
|
||||
ClusterSCTPPort = 95
|
||||
testPodName = "test-container-pod"
|
||||
hostTestPodName = "host-test-container-pod"
|
||||
nodePortServiceName = "node-port-service"
|
||||
@ -83,8 +87,8 @@ const (
|
||||
var NetexecImageName = imageutils.GetE2EImage(imageutils.Agnhost)
|
||||
|
||||
// NewNetworkingTestConfig creates and sets up a new test config helper.
|
||||
func NewNetworkingTestConfig(f *framework.Framework, hostNetwork bool) *NetworkingTestConfig {
|
||||
config := &NetworkingTestConfig{f: f, Namespace: f.Namespace.Name, HostNetwork: hostNetwork}
|
||||
func NewNetworkingTestConfig(f *framework.Framework, hostNetwork, SCTPEnabled bool) *NetworkingTestConfig {
|
||||
config := &NetworkingTestConfig{f: f, Namespace: f.Namespace.Name, HostNetwork: hostNetwork, SCTPEnabled: SCTPEnabled}
|
||||
ginkgo.By(fmt.Sprintf("Performing setup for networking test in namespace %v", config.Namespace))
|
||||
config.setup(getServiceSelector())
|
||||
return config
|
||||
@ -117,6 +121,9 @@ type NetworkingTestConfig struct {
|
||||
HostTestContainerPod *v1.Pod
|
||||
// if the HostTestContainerPod is running with HostNetwork=true.
|
||||
HostNetwork bool
|
||||
// if the test pods are listening on sctp port. We need this as sctp tests
|
||||
// are marked as disruptive as they may load the sctp module.
|
||||
SCTPEnabled bool
|
||||
// EndpointPods are the pods belonging to the Service created by this
|
||||
// test config. Each invocation of `setup` creates a service with
|
||||
// 1 pod per node running the netexecImage.
|
||||
@ -140,9 +147,10 @@ type NetworkingTestConfig struct {
|
||||
ClusterIP string
|
||||
// External ip of first node for use in nodePort testing.
|
||||
NodeIP string
|
||||
// The http/udp nodePorts of the Service.
|
||||
// The http/udp/sctp nodePorts of the Service.
|
||||
NodeHTTPPort int
|
||||
NodeUDPPort int
|
||||
NodeSCTPPort int
|
||||
// The kubernetes namespace within which all resources for this
|
||||
// config are created
|
||||
Namespace string
|
||||
@ -484,6 +492,15 @@ func (config *NetworkingTestConfig) createNetShellPodSpec(podName, hostname stri
|
||||
},
|
||||
},
|
||||
}
|
||||
// we want sctp to be optional as it will load the sctp kernel module
|
||||
if config.SCTPEnabled {
|
||||
pod.Spec.Containers[0].Args = append(pod.Spec.Containers[0].Args, fmt.Sprintf("--sctp-port=%d", EndpointSCTPPort))
|
||||
pod.Spec.Containers[0].Ports = append(pod.Spec.Containers[0].Ports, v1.ContainerPort{
|
||||
Name: "sctp",
|
||||
ContainerPort: EndpointSCTPPort,
|
||||
Protocol: v1.ProtocolSCTP,
|
||||
})
|
||||
}
|
||||
return pod
|
||||
}
|
||||
|
||||
@ -518,6 +535,10 @@ func (config *NetworkingTestConfig) createTestPodSpec() *v1.Pod {
|
||||
},
|
||||
},
|
||||
}
|
||||
// we want sctp to be optional as it will load the sctp kernel module
|
||||
if config.SCTPEnabled {
|
||||
pod.Spec.Containers[0].Args = append(pod.Spec.Containers[0].Args, fmt.Sprintf("--sctp-port=%d", EndpointSCTPPort))
|
||||
}
|
||||
return pod
|
||||
}
|
||||
|
||||
@ -526,7 +547,7 @@ func (config *NetworkingTestConfig) createNodePortServiceSpec(svcName string, se
|
||||
if enableSessionAffinity {
|
||||
sessionAffinity = v1.ServiceAffinityClientIP
|
||||
}
|
||||
return &v1.Service{
|
||||
res := &v1.Service{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: svcName,
|
||||
},
|
||||
@ -540,6 +561,11 @@ func (config *NetworkingTestConfig) createNodePortServiceSpec(svcName string, se
|
||||
SessionAffinity: sessionAffinity,
|
||||
},
|
||||
}
|
||||
|
||||
if config.SCTPEnabled {
|
||||
res.Spec.Ports = append(res.Spec.Ports, v1.ServicePort{Port: ClusterSCTPPort, Name: "sctp", Protocol: v1.ProtocolSCTP, TargetPort: intstr.FromInt(EndpointSCTPPort)})
|
||||
}
|
||||
return res
|
||||
}
|
||||
|
||||
func (config *NetworkingTestConfig) createNodePortService(selector map[string]string) {
|
||||
@ -633,6 +659,8 @@ func (config *NetworkingTestConfig) setup(selector map[string]string) {
|
||||
config.NodeUDPPort = int(p.NodePort)
|
||||
case v1.ProtocolTCP:
|
||||
config.NodeHTTPPort = int(p.NodePort)
|
||||
case v1.ProtocolSCTP:
|
||||
config.NodeSCTPPort = int(p.NodePort)
|
||||
default:
|
||||
continue
|
||||
}
|
||||
|
@ -149,7 +149,7 @@ var _ = SIGDescribe("Networking", func() {
|
||||
ginkgo.It("should check kube-proxy urls", func() {
|
||||
// TODO: this is overkill we just need the host networking pod
|
||||
// to hit kube-proxy urls.
|
||||
config := e2enetwork.NewNetworkingTestConfig(f, true)
|
||||
config := e2enetwork.NewNetworkingTestConfig(f, true, false)
|
||||
|
||||
ginkgo.By("checking kube-proxy URLs")
|
||||
config.GetSelfURL(ports.ProxyHealthzPort, "/healthz", "200 OK")
|
||||
@ -162,7 +162,7 @@ var _ = SIGDescribe("Networking", func() {
|
||||
ginkgo.Describe("Granular Checks: Services", func() {
|
||||
|
||||
ginkgo.It("should function for pod-Service: http", func() {
|
||||
config := e2enetwork.NewNetworkingTestConfig(f, false)
|
||||
config := e2enetwork.NewNetworkingTestConfig(f, false, false)
|
||||
ginkgo.By(fmt.Sprintf("dialing(http) %v --> %v:%v (config.clusterIP)", config.TestContainerPod.Name, config.ClusterIP, e2enetwork.ClusterHTTPPort))
|
||||
config.DialFromTestContainer("http", config.ClusterIP, e2enetwork.ClusterHTTPPort, config.MaxTries, 0, config.EndpointHostnames())
|
||||
|
||||
@ -171,7 +171,7 @@ var _ = SIGDescribe("Networking", func() {
|
||||
})
|
||||
|
||||
ginkgo.It("should function for pod-Service: udp", func() {
|
||||
config := e2enetwork.NewNetworkingTestConfig(f, false)
|
||||
config := e2enetwork.NewNetworkingTestConfig(f, false, false)
|
||||
ginkgo.By(fmt.Sprintf("dialing(udp) %v --> %v:%v (config.clusterIP)", config.TestContainerPod.Name, config.ClusterIP, e2enetwork.ClusterUDPPort))
|
||||
config.DialFromTestContainer("udp", config.ClusterIP, e2enetwork.ClusterUDPPort, config.MaxTries, 0, config.EndpointHostnames())
|
||||
|
||||
@ -179,8 +179,19 @@ var _ = SIGDescribe("Networking", func() {
|
||||
config.DialFromTestContainer("udp", config.NodeIP, config.NodeUDPPort, config.MaxTries, 0, config.EndpointHostnames())
|
||||
})
|
||||
|
||||
// Once basic tests checking for the sctp module not to be loaded are implemented, this
|
||||
// needs to be marked as [Disruptive]
|
||||
ginkgo.It("should function for pod-Service: sctp [Feature:SCTPConnectivity]", func() {
|
||||
config := e2enetwork.NewNetworkingTestConfig(f, false, true)
|
||||
ginkgo.By(fmt.Sprintf("dialing(sctp) %v --> %v:%v (config.clusterIP)", config.TestContainerPod.Name, config.ClusterIP, e2enetwork.ClusterSCTPPort))
|
||||
config.DialFromTestContainer("sctp", config.ClusterIP, e2enetwork.ClusterSCTPPort, config.MaxTries, 0, config.EndpointHostnames())
|
||||
|
||||
ginkgo.By(fmt.Sprintf("dialing(sctp) %v --> %v:%v (nodeIP)", config.TestContainerPod.Name, config.NodeIP, config.NodeSCTPPort))
|
||||
config.DialFromTestContainer("sctp", config.NodeIP, config.NodeSCTPPort, config.MaxTries, 0, config.EndpointHostnames())
|
||||
})
|
||||
|
||||
ginkgo.It("should function for node-Service: http", func() {
|
||||
config := e2enetwork.NewNetworkingTestConfig(f, true)
|
||||
config := e2enetwork.NewNetworkingTestConfig(f, true, false)
|
||||
ginkgo.By(fmt.Sprintf("dialing(http) %v (node) --> %v:%v (config.clusterIP)", config.NodeIP, config.ClusterIP, e2enetwork.ClusterHTTPPort))
|
||||
config.DialFromNode("http", config.ClusterIP, e2enetwork.ClusterHTTPPort, config.MaxTries, 0, config.EndpointHostnames())
|
||||
|
||||
@ -189,7 +200,7 @@ var _ = SIGDescribe("Networking", func() {
|
||||
})
|
||||
|
||||
ginkgo.It("should function for node-Service: udp", func() {
|
||||
config := e2enetwork.NewNetworkingTestConfig(f, true)
|
||||
config := e2enetwork.NewNetworkingTestConfig(f, true, false)
|
||||
ginkgo.By(fmt.Sprintf("dialing(udp) %v (node) --> %v:%v (config.clusterIP)", config.NodeIP, config.ClusterIP, e2enetwork.ClusterUDPPort))
|
||||
config.DialFromNode("udp", config.ClusterIP, e2enetwork.ClusterUDPPort, config.MaxTries, 0, config.EndpointHostnames())
|
||||
|
||||
@ -198,7 +209,7 @@ var _ = SIGDescribe("Networking", func() {
|
||||
})
|
||||
|
||||
ginkgo.It("should function for endpoint-Service: http", func() {
|
||||
config := e2enetwork.NewNetworkingTestConfig(f, false)
|
||||
config := e2enetwork.NewNetworkingTestConfig(f, false, false)
|
||||
ginkgo.By(fmt.Sprintf("dialing(http) %v (endpoint) --> %v:%v (config.clusterIP)", config.EndpointPods[0].Name, config.ClusterIP, e2enetwork.ClusterHTTPPort))
|
||||
config.DialFromEndpointContainer("http", config.ClusterIP, e2enetwork.ClusterHTTPPort, config.MaxTries, 0, config.EndpointHostnames())
|
||||
|
||||
@ -207,7 +218,7 @@ var _ = SIGDescribe("Networking", func() {
|
||||
})
|
||||
|
||||
ginkgo.It("should function for endpoint-Service: udp", func() {
|
||||
config := e2enetwork.NewNetworkingTestConfig(f, false)
|
||||
config := e2enetwork.NewNetworkingTestConfig(f, false, false)
|
||||
ginkgo.By(fmt.Sprintf("dialing(udp) %v (endpoint) --> %v:%v (config.clusterIP)", config.EndpointPods[0].Name, config.ClusterIP, e2enetwork.ClusterUDPPort))
|
||||
config.DialFromEndpointContainer("udp", config.ClusterIP, e2enetwork.ClusterUDPPort, config.MaxTries, 0, config.EndpointHostnames())
|
||||
|
||||
@ -216,7 +227,7 @@ var _ = SIGDescribe("Networking", func() {
|
||||
})
|
||||
|
||||
ginkgo.It("should update endpoints: http", func() {
|
||||
config := e2enetwork.NewNetworkingTestConfig(f, false)
|
||||
config := e2enetwork.NewNetworkingTestConfig(f, false, false)
|
||||
ginkgo.By(fmt.Sprintf("dialing(http) %v --> %v:%v (config.clusterIP)", config.TestContainerPod.Name, config.ClusterIP, e2enetwork.ClusterHTTPPort))
|
||||
config.DialFromTestContainer("http", config.ClusterIP, e2enetwork.ClusterHTTPPort, config.MaxTries, 0, config.EndpointHostnames())
|
||||
|
||||
@ -227,7 +238,7 @@ var _ = SIGDescribe("Networking", func() {
|
||||
})
|
||||
|
||||
ginkgo.It("should update endpoints: udp", func() {
|
||||
config := e2enetwork.NewNetworkingTestConfig(f, false)
|
||||
config := e2enetwork.NewNetworkingTestConfig(f, false, false)
|
||||
ginkgo.By(fmt.Sprintf("dialing(udp) %v --> %v:%v (config.clusterIP)", config.TestContainerPod.Name, config.ClusterIP, e2enetwork.ClusterUDPPort))
|
||||
config.DialFromTestContainer("udp", config.ClusterIP, e2enetwork.ClusterUDPPort, config.MaxTries, 0, config.EndpointHostnames())
|
||||
|
||||
@ -239,7 +250,7 @@ var _ = SIGDescribe("Networking", func() {
|
||||
|
||||
// Slow because we confirm that the nodePort doesn't serve traffic, which requires a period of polling.
|
||||
ginkgo.It("should update nodePort: http [Slow]", func() {
|
||||
config := e2enetwork.NewNetworkingTestConfig(f, true)
|
||||
config := e2enetwork.NewNetworkingTestConfig(f, true, false)
|
||||
ginkgo.By(fmt.Sprintf("dialing(http) %v (node) --> %v:%v (nodeIP)", config.NodeIP, config.NodeIP, config.NodeHTTPPort))
|
||||
config.DialFromNode("http", config.NodeIP, config.NodeHTTPPort, config.MaxTries, 0, config.EndpointHostnames())
|
||||
|
||||
@ -251,7 +262,7 @@ var _ = SIGDescribe("Networking", func() {
|
||||
|
||||
// Slow because we confirm that the nodePort doesn't serve traffic, which requires a period of polling.
|
||||
ginkgo.It("should update nodePort: udp [Slow]", func() {
|
||||
config := e2enetwork.NewNetworkingTestConfig(f, true)
|
||||
config := e2enetwork.NewNetworkingTestConfig(f, true, false)
|
||||
ginkgo.By(fmt.Sprintf("dialing(udp) %v (node) --> %v:%v (nodeIP)", config.NodeIP, config.NodeIP, config.NodeUDPPort))
|
||||
config.DialFromNode("udp", config.NodeIP, config.NodeUDPPort, config.MaxTries, 0, config.EndpointHostnames())
|
||||
|
||||
@ -263,7 +274,7 @@ var _ = SIGDescribe("Networking", func() {
|
||||
|
||||
// [LinuxOnly]: Windows does not support session affinity.
|
||||
ginkgo.It("should function for client IP based session affinity: http [LinuxOnly]", func() {
|
||||
config := e2enetwork.NewNetworkingTestConfig(f, false)
|
||||
config := e2enetwork.NewNetworkingTestConfig(f, false, false)
|
||||
ginkgo.By(fmt.Sprintf("dialing(http) %v --> %v:%v", config.TestContainerPod.Name, config.SessionAffinityService.Spec.ClusterIP, e2enetwork.ClusterHTTPPort))
|
||||
|
||||
// Check if number of endpoints returned are exactly one.
|
||||
@ -281,7 +292,7 @@ var _ = SIGDescribe("Networking", func() {
|
||||
|
||||
// [LinuxOnly]: Windows does not support session affinity.
|
||||
ginkgo.It("should function for client IP based session affinity: udp [LinuxOnly]", func() {
|
||||
config := e2enetwork.NewNetworkingTestConfig(f, false)
|
||||
config := e2enetwork.NewNetworkingTestConfig(f, false, false)
|
||||
ginkgo.By(fmt.Sprintf("dialing(udp) %v --> %v:%v", config.TestContainerPod.Name, config.SessionAffinityService.Spec.ClusterIP, e2enetwork.ClusterUDPPort))
|
||||
|
||||
// Check if number of endpoints returned are exactly one.
|
||||
@ -298,20 +309,29 @@ var _ = SIGDescribe("Networking", func() {
|
||||
})
|
||||
|
||||
ginkgo.It("should be able to handle large requests: http", func() {
|
||||
config := e2enetwork.NewNetworkingTestConfig(f, false)
|
||||
config := e2enetwork.NewNetworkingTestConfig(f, false, false)
|
||||
ginkgo.By(fmt.Sprintf("dialing(http) %v --> %v:%v (config.clusterIP)", config.TestContainerPod.Name, config.ClusterIP, e2enetwork.ClusterHTTPPort))
|
||||
message := strings.Repeat("42", 1000)
|
||||
config.DialEchoFromTestContainer("http", config.ClusterIP, e2enetwork.ClusterHTTPPort, config.MaxTries, 0, message)
|
||||
})
|
||||
|
||||
ginkgo.It("should be able to handle large requests: udp", func() {
|
||||
config := e2enetwork.NewNetworkingTestConfig(f, false)
|
||||
config := e2enetwork.NewNetworkingTestConfig(f, false, false)
|
||||
ginkgo.By(fmt.Sprintf("dialing(udp) %v --> %v:%v (config.clusterIP)", config.TestContainerPod.Name, config.ClusterIP, e2enetwork.ClusterUDPPort))
|
||||
message := "n" + strings.Repeat("o", 1999)
|
||||
config.DialEchoFromTestContainer("udp", config.ClusterIP, e2enetwork.ClusterUDPPort, config.MaxTries, 0, message)
|
||||
})
|
||||
})
|
||||
|
||||
// Once basic tests checking for the sctp module not to be loaded are implemented, this
|
||||
// needs to be marked as [Disruptive]
|
||||
ginkgo.It("should function for pod-pod: sctp [Feature:SCTPConnectivity]", func() {
|
||||
config := e2enetwork.NewNetworkingTestConfig(f, false, true)
|
||||
ginkgo.By(fmt.Sprintf("dialing(sctp) %v --> %v:%v (config.clusterIP)", config.TestContainerPod.Name, config.ClusterIP, e2enetwork.ClusterSCTPPort))
|
||||
message := "hello"
|
||||
config.DialEchoFromTestContainer("sctp", config.TestContainerPod.Status.PodIP, e2enetwork.EndpointSCTPPort, config.MaxTries, 0, message)
|
||||
})
|
||||
|
||||
ginkgo.It("should recreate its iptables rules if they are deleted [Disruptive]", func() {
|
||||
e2eskipper.SkipUnlessProviderIs(framework.ProvidersWithSSH...)
|
||||
e2eskipper.SkipUnlessSSHKeyPresent()
|
||||
|
Loading…
Reference in New Issue
Block a user