mirror of
https://github.com/k3s-io/kubernetes.git
synced 2025-08-09 12:07:47 +00:00
e2e: Don't try to create a UDP LoadBalancer on AWS
AWS doesn't support type=LoadBalancer with UDP services. For now, we simply skip over the test with type=LoadBalancer on AWS for the UDP service. Fix #20911
This commit is contained in:
parent
41a98b43e4
commit
a0093eb503
@ -200,7 +200,7 @@ func (s *AWSCloud) ensureLoadBalancerHealthCheck(loadBalancer *elb.LoadBalancerD
|
||||
expectedTimeout := int64(5)
|
||||
expectedInterval := int64(10)
|
||||
|
||||
// We only a TCP health-check on the first port
|
||||
// We only configure a TCP health-check on the first port
|
||||
expectedTarget := ""
|
||||
for _, listener := range listeners {
|
||||
if listener.InstancePort == nil {
|
||||
|
@ -418,6 +418,8 @@ var _ = Describe("Services", func() {
|
||||
// requires cloud load-balancer support
|
||||
SkipUnlessProviderIs("gce", "gke", "aws")
|
||||
|
||||
loadBalancerSupportsUDP := !providerIs("aws")
|
||||
|
||||
// This test is more monolithic than we'd like because LB turnup can be
|
||||
// very slow, so we lumped all the tests into one LB lifecycle.
|
||||
|
||||
@ -510,10 +512,12 @@ var _ = Describe("Services", func() {
|
||||
s.Spec.Type = api.ServiceTypeLoadBalancer
|
||||
})
|
||||
|
||||
By("changing the UDP service to type=LoadBalancer")
|
||||
udpService = jig.UpdateServiceOrFail(ns2, udpService.Name, func(s *api.Service) {
|
||||
s.Spec.Type = api.ServiceTypeLoadBalancer
|
||||
})
|
||||
if loadBalancerSupportsUDP {
|
||||
By("changing the UDP service to type=LoadBalancer")
|
||||
udpService = jig.UpdateServiceOrFail(ns2, udpService.Name, func(s *api.Service) {
|
||||
s.Spec.Type = api.ServiceTypeLoadBalancer
|
||||
})
|
||||
}
|
||||
|
||||
By("waiting for the TCP service to have a load balancer")
|
||||
// Wait for the load balancer to be created asynchronously
|
||||
@ -528,7 +532,6 @@ var _ = Describe("Services", func() {
|
||||
tcpIngressIP := getIngressPoint(&tcpService.Status.LoadBalancer.Ingress[0])
|
||||
Logf("TCP load balancer: %s", tcpIngressIP)
|
||||
|
||||
By("waiting for the UDP service " + serviceName + " to have a load balancer")
|
||||
if providerIs("gce", "gke") {
|
||||
// Do this as early as possible, which overrides the `defer` above.
|
||||
// This is mostly out of fear of leaking the IP in a timeout case
|
||||
@ -545,19 +548,22 @@ var _ = Describe("Services", func() {
|
||||
}
|
||||
}
|
||||
|
||||
By("waiting for the UDP service to have a load balancer")
|
||||
// 2nd one should be faster since they ran in parallel.
|
||||
udpService = jig.WaitForLoadBalancerOrFail(ns2, udpService.Name)
|
||||
jig.SanityCheckService(udpService, api.ServiceTypeLoadBalancer)
|
||||
if udpService.Spec.Ports[0].NodePort != udpNodePort {
|
||||
Failf("UDP Spec.Ports[0].NodePort changed (%d -> %d) when not expected", udpNodePort, udpService.Spec.Ports[0].NodePort)
|
||||
}
|
||||
udpIngressIP := getIngressPoint(&udpService.Status.LoadBalancer.Ingress[0])
|
||||
Logf("UDP load balancer: %s", udpIngressIP)
|
||||
var udpIngressIP string
|
||||
if loadBalancerSupportsUDP {
|
||||
By("waiting for the UDP service to have a load balancer")
|
||||
// 2nd one should be faster since they ran in parallel.
|
||||
udpService = jig.WaitForLoadBalancerOrFail(ns2, udpService.Name)
|
||||
jig.SanityCheckService(udpService, api.ServiceTypeLoadBalancer)
|
||||
if udpService.Spec.Ports[0].NodePort != udpNodePort {
|
||||
Failf("UDP Spec.Ports[0].NodePort changed (%d -> %d) when not expected", udpNodePort, udpService.Spec.Ports[0].NodePort)
|
||||
}
|
||||
udpIngressIP = getIngressPoint(&udpService.Status.LoadBalancer.Ingress[0])
|
||||
Logf("UDP load balancer: %s", udpIngressIP)
|
||||
|
||||
By("verifying that TCP and UDP use different load balancers")
|
||||
if tcpIngressIP == udpIngressIP {
|
||||
Failf("Load balancers are not different: %s", getIngressPoint(&tcpService.Status.LoadBalancer.Ingress[0]))
|
||||
By("verifying that TCP and UDP use different load balancers")
|
||||
if tcpIngressIP == udpIngressIP {
|
||||
Failf("Load balancers are not different: %s", getIngressPoint(&tcpService.Status.LoadBalancer.Ingress[0]))
|
||||
}
|
||||
}
|
||||
|
||||
By("hitting the TCP service's NodePort")
|
||||
@ -569,8 +575,10 @@ var _ = Describe("Services", func() {
|
||||
By("hitting the TCP service's LoadBalancer")
|
||||
jig.TestReachableHTTP(tcpIngressIP, svcPort, loadBalancerLagTimeout)
|
||||
|
||||
By("hitting the UDP service's LoadBalancer")
|
||||
jig.TestReachableUDP(udpIngressIP, svcPort, loadBalancerLagTimeout)
|
||||
if loadBalancerSupportsUDP {
|
||||
By("hitting the UDP service's LoadBalancer")
|
||||
jig.TestReachableUDP(udpIngressIP, svcPort, loadBalancerLagTimeout)
|
||||
}
|
||||
|
||||
// Change the services' node ports.
|
||||
|
||||
@ -589,13 +597,17 @@ var _ = Describe("Services", func() {
|
||||
|
||||
By("changing the UDP service's NodePort")
|
||||
udpService = jig.ChangeServiceNodePortOrFail(ns2, udpService.Name, udpNodePort)
|
||||
jig.SanityCheckService(udpService, api.ServiceTypeLoadBalancer)
|
||||
if loadBalancerSupportsUDP {
|
||||
jig.SanityCheckService(udpService, api.ServiceTypeLoadBalancer)
|
||||
} else {
|
||||
jig.SanityCheckService(udpService, api.ServiceTypeNodePort)
|
||||
}
|
||||
udpNodePortOld := udpNodePort
|
||||
udpNodePort = udpService.Spec.Ports[0].NodePort
|
||||
if udpNodePort == udpNodePortOld {
|
||||
Failf("UDP Spec.Ports[0].NodePort (%d) did not change", udpNodePort)
|
||||
}
|
||||
if getIngressPoint(&udpService.Status.LoadBalancer.Ingress[0]) != udpIngressIP {
|
||||
if loadBalancerSupportsUDP && getIngressPoint(&udpService.Status.LoadBalancer.Ingress[0]) != udpIngressIP {
|
||||
Failf("UDP Status.LoadBalancer.Ingress changed (%s -> %s) when not expected", udpIngressIP, getIngressPoint(&udpService.Status.LoadBalancer.Ingress[0]))
|
||||
}
|
||||
Logf("UDP node port: %d", udpNodePort)
|
||||
@ -615,8 +627,10 @@ var _ = Describe("Services", func() {
|
||||
By("hitting the TCP service's LoadBalancer")
|
||||
jig.TestReachableHTTP(tcpIngressIP, svcPort, loadBalancerLagTimeout)
|
||||
|
||||
By("hitting the UDP service's LoadBalancer")
|
||||
jig.TestReachableUDP(udpIngressIP, svcPort, loadBalancerLagTimeout)
|
||||
if loadBalancerSupportsUDP {
|
||||
By("hitting the UDP service's LoadBalancer")
|
||||
jig.TestReachableUDP(udpIngressIP, svcPort, loadBalancerLagTimeout)
|
||||
}
|
||||
|
||||
// Change the services' main ports.
|
||||
|
||||
@ -641,14 +655,18 @@ var _ = Describe("Services", func() {
|
||||
udpService = jig.UpdateServiceOrFail(ns2, udpService.Name, func(s *api.Service) {
|
||||
s.Spec.Ports[0].Port++
|
||||
})
|
||||
jig.SanityCheckService(udpService, api.ServiceTypeLoadBalancer)
|
||||
if loadBalancerSupportsUDP {
|
||||
jig.SanityCheckService(udpService, api.ServiceTypeLoadBalancer)
|
||||
} else {
|
||||
jig.SanityCheckService(udpService, api.ServiceTypeNodePort)
|
||||
}
|
||||
if udpService.Spec.Ports[0].Port != svcPort {
|
||||
Failf("UDP Spec.Ports[0].Port (%d) did not change", udpService.Spec.Ports[0].Port)
|
||||
}
|
||||
if udpService.Spec.Ports[0].NodePort != udpNodePort {
|
||||
Failf("UDP Spec.Ports[0].NodePort (%d) changed", udpService.Spec.Ports[0].NodePort)
|
||||
}
|
||||
if getIngressPoint(&udpService.Status.LoadBalancer.Ingress[0]) != udpIngressIP {
|
||||
if loadBalancerSupportsUDP && getIngressPoint(&udpService.Status.LoadBalancer.Ingress[0]) != udpIngressIP {
|
||||
Failf("UDP Status.LoadBalancer.Ingress changed (%s -> %s) when not expected", udpIngressIP, getIngressPoint(&udpService.Status.LoadBalancer.Ingress[0]))
|
||||
}
|
||||
|
||||
@ -663,8 +681,10 @@ var _ = Describe("Services", func() {
|
||||
By("hitting the TCP service's LoadBalancer")
|
||||
jig.TestReachableHTTP(tcpIngressIP, svcPort, loadBalancerCreateTimeout) // this may actually recreate the LB
|
||||
|
||||
By("hitting the UDP service's LoadBalancer")
|
||||
jig.TestReachableUDP(udpIngressIP, svcPort, loadBalancerCreateTimeout) // this may actually recreate the LB)
|
||||
if loadBalancerSupportsUDP {
|
||||
By("hitting the UDP service's LoadBalancer")
|
||||
jig.TestReachableUDP(udpIngressIP, svcPort, loadBalancerCreateTimeout) // this may actually recreate the LB)
|
||||
}
|
||||
|
||||
// Change the services back to ClusterIP.
|
||||
|
||||
@ -682,9 +702,11 @@ var _ = Describe("Services", func() {
|
||||
s.Spec.Type = api.ServiceTypeClusterIP
|
||||
s.Spec.Ports[0].NodePort = 0
|
||||
})
|
||||
// Wait for the load balancer to be destroyed asynchronously
|
||||
udpService = jig.WaitForLoadBalancerDestroyOrFail(ns2, udpService.Name, udpIngressIP, svcPort)
|
||||
jig.SanityCheckService(udpService, api.ServiceTypeClusterIP)
|
||||
if loadBalancerSupportsUDP {
|
||||
// Wait for the load balancer to be destroyed asynchronously
|
||||
udpService = jig.WaitForLoadBalancerDestroyOrFail(ns2, udpService.Name, udpIngressIP, svcPort)
|
||||
jig.SanityCheckService(udpService, api.ServiceTypeClusterIP)
|
||||
}
|
||||
|
||||
By("checking the TCP NodePort is closed")
|
||||
jig.TestNotReachableHTTP(nodeIP, tcpNodePort, kubeProxyLagTimeout)
|
||||
@ -695,8 +717,10 @@ var _ = Describe("Services", func() {
|
||||
By("checking the TCP LoadBalancer is closed")
|
||||
jig.TestNotReachableHTTP(tcpIngressIP, svcPort, loadBalancerLagTimeout)
|
||||
|
||||
By("checking the UDP LoadBalancer is closed")
|
||||
jig.TestNotReachableUDP(udpIngressIP, svcPort, loadBalancerLagTimeout)
|
||||
if loadBalancerSupportsUDP {
|
||||
By("checking the UDP LoadBalancer is closed")
|
||||
jig.TestNotReachableUDP(udpIngressIP, svcPort, loadBalancerLagTimeout)
|
||||
}
|
||||
})
|
||||
|
||||
It("should prevent NodePort collisions", func() {
|
||||
|
Loading…
Reference in New Issue
Block a user