e2e NodePorts should be tested within the cluster

The NodePort functionality can be tested within the cluster.

Testing from outside the cluster assumes that there is connectivity
between the e2e.test binary and the cluster under test, that is not
always true, and in some cases is exposed to external factors  or
misconfigurations like wrong routes or firewall rules that impact
on the test.

Change-Id: Ie2fc8929723e80273c0933dbaeb6a42729c819d0
This commit is contained in:
Antonio Ojea 2022-12-20 10:21:59 +00:00
parent 0fdb14f63a
commit f333e5b4c5

View File

@ -156,9 +156,6 @@ var _ = common.SIGDescribe("LoadBalancers", func() {
ns1 := f.Namespace.Name // LB1 in ns1 on TCP
framework.Logf("namespace for TCP test: %s", ns1)
nodeIP, err := getRandomNodeIP(ctx, cs)
framework.ExpectNoError(err, "Could not obtain a valid Node IP")
ginkgo.By("creating a TCP service " + serviceName + " with type=ClusterIP in namespace " + ns1)
tcpJig := e2eservice.NewTestJig(cs, ns1, serviceName)
tcpService, err := tcpJig.CreateTCPService(ctx, nil)
@ -188,9 +185,6 @@ var _ = common.SIGDescribe("LoadBalancers", func() {
err = tcpJig.CheckServiceReachability(ctx, tcpService, execPod)
framework.ExpectNoError(err)
ginkgo.By("hitting the TCP service's NodePort")
e2eservice.TestReachableHTTP(ctx, nodeIP, tcpNodePort, e2eservice.KubeProxyLagTimeout)
// Change the services to LoadBalancer.
// Here we test that LoadBalancers can receive static IP addresses. This isn't
@ -261,9 +255,6 @@ var _ = common.SIGDescribe("LoadBalancers", func() {
err = tcpJig.CheckServiceReachability(ctx, tcpService, execPod)
framework.ExpectNoError(err)
ginkgo.By("hitting the TCP service's NodePort")
e2eservice.TestReachableHTTP(ctx, nodeIP, tcpNodePort, e2eservice.KubeProxyLagTimeout)
ginkgo.By("hitting the TCP service's LoadBalancer")
e2eservice.TestReachableHTTP(ctx, tcpIngressIP, svcPort, loadBalancerLagTimeout)
@ -282,12 +273,6 @@ var _ = common.SIGDescribe("LoadBalancers", func() {
}
framework.Logf("TCP node port: %d", tcpNodePort)
ginkgo.By("hitting the TCP service's new NodePort")
e2eservice.TestReachableHTTP(ctx, nodeIP, tcpNodePort, e2eservice.KubeProxyLagTimeout)
ginkgo.By("checking the old TCP NodePort is closed")
testNotReachableHTTP(nodeIP, tcpNodePortOld, e2eservice.KubeProxyLagTimeout)
ginkgo.By("hitting the TCP service's LoadBalancer")
e2eservice.TestReachableHTTP(ctx, tcpIngressIP, svcPort, loadBalancerLagTimeout)
@ -312,9 +297,6 @@ var _ = common.SIGDescribe("LoadBalancers", func() {
framework.Logf("service port TCP: %d", svcPort)
ginkgo.By("hitting the TCP service's NodePort")
e2eservice.TestReachableHTTP(ctx, nodeIP, tcpNodePort, e2eservice.KubeProxyLagTimeout)
ginkgo.By("hitting the TCP service's LoadBalancer")
e2eservice.TestReachableHTTP(ctx, tcpIngressIP, svcPort, loadBalancerCreateTimeout)
@ -322,9 +304,6 @@ var _ = common.SIGDescribe("LoadBalancers", func() {
err = tcpJig.Scale(ctx, 0)
framework.ExpectNoError(err)
ginkgo.By("looking for ICMP REJECT on the TCP service's NodePort")
testRejectedHTTP(nodeIP, tcpNodePort, e2eservice.KubeProxyLagTimeout)
ginkgo.By("looking for ICMP REJECT on the TCP service's LoadBalancer")
testRejectedHTTP(tcpIngressIP, svcPort, loadBalancerCreateTimeout)
@ -332,9 +311,6 @@ var _ = common.SIGDescribe("LoadBalancers", func() {
err = tcpJig.Scale(ctx, 1)
framework.ExpectNoError(err)
ginkgo.By("hitting the TCP service's NodePort")
e2eservice.TestReachableHTTP(ctx, nodeIP, tcpNodePort, e2eservice.KubeProxyLagTimeout)
ginkgo.By("hitting the TCP service's LoadBalancer")
e2eservice.TestReachableHTTP(ctx, tcpIngressIP, svcPort, loadBalancerCreateTimeout)
@ -352,9 +328,6 @@ var _ = common.SIGDescribe("LoadBalancers", func() {
_, err = tcpJig.WaitForLoadBalancerDestroy(ctx, tcpIngressIP, svcPort, loadBalancerCreateTimeout)
framework.ExpectNoError(err)
ginkgo.By("checking the TCP NodePort is closed")
testNotReachableHTTP(nodeIP, tcpNodePort, e2eservice.KubeProxyLagTimeout)
ginkgo.By("checking the TCP LoadBalancer is closed")
testNotReachableHTTP(tcpIngressIP, svcPort, loadBalancerLagTimeout)
})
@ -373,9 +346,6 @@ var _ = common.SIGDescribe("LoadBalancers", func() {
ns2 := f.Namespace.Name // LB1 in ns2 on TCP
framework.Logf("namespace for TCP test: %s", ns2)
nodeIP, err := getRandomNodeIP(ctx, cs)
framework.ExpectNoError(err, "Could not obtain a valid Node IP")
ginkgo.By("creating a UDP service " + serviceName + " with type=ClusterIP in namespace " + ns2)
udpJig := e2eservice.NewTestJig(cs, ns2, serviceName)
udpService, err := udpJig.CreateUDPService(ctx, nil)
@ -405,9 +375,6 @@ var _ = common.SIGDescribe("LoadBalancers", func() {
err = udpJig.CheckServiceReachability(ctx, udpService, execPod)
framework.ExpectNoError(err)
ginkgo.By("hitting the UDP service's NodePort")
testReachableUDP(nodeIP, udpNodePort, e2eservice.KubeProxyLagTimeout)
// Change the services to LoadBalancer.
// Here we test that LoadBalancers can receive static IP addresses. This isn't
@ -471,9 +438,6 @@ var _ = common.SIGDescribe("LoadBalancers", func() {
err = udpJig.CheckServiceReachability(ctx, udpService, execPod)
framework.ExpectNoError(err)
ginkgo.By("hitting the UDP service's NodePort")
testReachableUDP(nodeIP, udpNodePort, e2eservice.KubeProxyLagTimeout)
ginkgo.By("hitting the UDP service's LoadBalancer")
testReachableUDP(udpIngressIP, svcPort, loadBalancerLagTimeout)
@ -492,11 +456,8 @@ var _ = common.SIGDescribe("LoadBalancers", func() {
}
framework.Logf("UDP node port: %d", udpNodePort)
ginkgo.By("hitting the UDP service's new NodePort")
testReachableUDP(nodeIP, udpNodePort, e2eservice.KubeProxyLagTimeout)
ginkgo.By("checking the old UDP NodePort is closed")
testNotReachableUDP(nodeIP, udpNodePortOld, e2eservice.KubeProxyLagTimeout)
err = udpJig.CheckServiceReachability(ctx, udpService, execPod)
framework.ExpectNoError(err)
ginkgo.By("hitting the UDP service's LoadBalancer")
testReachableUDP(udpIngressIP, svcPort, loadBalancerLagTimeout)
@ -523,7 +484,8 @@ var _ = common.SIGDescribe("LoadBalancers", func() {
framework.Logf("service port UDP: %d", svcPort)
ginkgo.By("hitting the UDP service's NodePort")
testReachableUDP(nodeIP, udpNodePort, e2eservice.KubeProxyLagTimeout)
err = udpJig.CheckServiceReachability(ctx, udpService, execPod)
framework.ExpectNoError(err)
ginkgo.By("hitting the UDP service's LoadBalancer")
testReachableUDP(udpIngressIP, svcPort, loadBalancerCreateTimeout)
@ -532,9 +494,6 @@ var _ = common.SIGDescribe("LoadBalancers", func() {
err = udpJig.Scale(ctx, 0)
framework.ExpectNoError(err)
ginkgo.By("looking for ICMP REJECT on the UDP service's NodePort")
testRejectedUDP(nodeIP, udpNodePort, e2eservice.KubeProxyLagTimeout)
ginkgo.By("looking for ICMP REJECT on the UDP service's LoadBalancer")
testRejectedUDP(udpIngressIP, svcPort, loadBalancerCreateTimeout)
@ -543,7 +502,8 @@ var _ = common.SIGDescribe("LoadBalancers", func() {
framework.ExpectNoError(err)
ginkgo.By("hitting the UDP service's NodePort")
testReachableUDP(nodeIP, udpNodePort, e2eservice.KubeProxyLagTimeout)
err = udpJig.CheckServiceReachability(ctx, udpService, execPod)
framework.ExpectNoError(err)
ginkgo.By("hitting the UDP service's LoadBalancer")
testReachableUDP(udpIngressIP, svcPort, loadBalancerCreateTimeout)
@ -562,9 +522,6 @@ var _ = common.SIGDescribe("LoadBalancers", func() {
_, err = udpJig.WaitForLoadBalancerDestroy(ctx, udpIngressIP, svcPort, loadBalancerCreateTimeout)
framework.ExpectNoError(err)
ginkgo.By("checking the UDP NodePort is closed")
testNotReachableUDP(nodeIP, udpNodePort, e2eservice.KubeProxyLagTimeout)
ginkgo.By("checking the UDP LoadBalancer is closed")
testNotReachableUDP(udpIngressIP, svcPort, loadBalancerLagTimeout)
})
@ -956,9 +913,6 @@ var _ = common.SIGDescribe("LoadBalancers", func() {
ns1 := f.Namespace.Name // LB1 in ns1 on TCP
framework.Logf("namespace for TCP test: %s", ns1)
nodeIP, err := e2enode.PickIP(ctx, cs) // for later
framework.ExpectNoError(err)
ginkgo.By("creating a TCP service " + serviceName + " with type=ClusterIP in namespace " + ns1)
tcpJig := e2eservice.NewTestJig(cs, ns1, serviceName)
tcpService, err := tcpJig.CreateTCPService(ctx, nil)
@ -1058,9 +1012,6 @@ var _ = common.SIGDescribe("LoadBalancers", func() {
}
framework.Logf("TCP node port: %d", tcpNodePort)
ginkgo.By("hitting the TCP service's new NodePort")
e2eservice.TestReachableHTTP(ctx, nodeIP, tcpNodePort, e2eservice.KubeProxyLagTimeout)
ginkgo.By("hitting the TCP service's LoadBalancer")
e2eservice.TestReachableHTTP(ctx, tcpIngressIP, svcPort, loadBalancerLagTimeout)
})
@ -1939,28 +1890,3 @@ func testRollingUpdateLBConnectivityDisruption(ctx context.Context, f *framework
// assert that the load balancer address is still reachable after the rolling updates are finished
e2eservice.TestReachableHTTP(ctx, lbNameOrAddress, svcPort, timeout)
}
// getRandomNodeIP gets an IP address from a random worker node.
// These tests exercise traffic coming from outside the traffic,
// so it prefers ExternalIPs over InternalIPs.
func getRandomNodeIP(ctx context.Context, cs clientset.Interface) (string, error) {
family := v1.IPv4Protocol
if framework.TestContext.ClusterIsIPv6() {
family = v1.IPv6Protocol
}
node, err := e2enode.GetRandomReadySchedulableNode(ctx, cs)
if err != nil {
return "", err
}
ips := e2enode.GetAddressesByTypeAndFamily(node, v1.NodeExternalIP, family)
if len(ips) > 0 {
return ips[0], nil
}
ips = e2enode.GetAddressesByTypeAndFamily(node, v1.NodeInternalIP, family)
if len(ips) > 0 {
return ips[0], nil
}
return "", fmt.Errorf("node %v does not contain any valid IP", node)
}