From a0093eb5038ca78e971c9f33cc206c679a842017 Mon Sep 17 00:00:00 2001 From: Justin Santa Barbara Date: Tue, 9 Feb 2016 21:59:35 -0500 Subject: [PATCH 1/2] e2e: Don't try to create a UDP LoadBalancer on AWS AWS doesn't support type=LoadBalancer with UDP services. For now, we simply skip over the test with type=LoadBalancer on AWS for the UDP service. Fix #20911 --- .../providers/aws/aws_loadbalancer.go | 2 +- test/e2e/service.go | 88 ++++++++++++------- 2 files changed, 57 insertions(+), 33 deletions(-) diff --git a/pkg/cloudprovider/providers/aws/aws_loadbalancer.go b/pkg/cloudprovider/providers/aws/aws_loadbalancer.go index 7f7b5dd3e22..aedbafd1dc3 100644 --- a/pkg/cloudprovider/providers/aws/aws_loadbalancer.go +++ b/pkg/cloudprovider/providers/aws/aws_loadbalancer.go @@ -200,7 +200,7 @@ func (s *AWSCloud) ensureLoadBalancerHealthCheck(loadBalancer *elb.LoadBalancerD expectedTimeout := int64(5) expectedInterval := int64(10) - // We only a TCP health-check on the first port + // We only configure a TCP health-check on the first port expectedTarget := "" for _, listener := range listeners { if listener.InstancePort == nil { diff --git a/test/e2e/service.go b/test/e2e/service.go index e83eb7f97b6..d0136e05e9e 100644 --- a/test/e2e/service.go +++ b/test/e2e/service.go @@ -418,6 +418,8 @@ var _ = Describe("Services", func() { // requires cloud load-balancer support SkipUnlessProviderIs("gce", "gke", "aws") + loadBalancerSupportsUDP := !providerIs("aws") + // This test is more monolithic than we'd like because LB turnup can be // very slow, so we lumped all the tests into one LB lifecycle. @@ -510,10 +512,12 @@ var _ = Describe("Services", func() { s.Spec.Type = api.ServiceTypeLoadBalancer }) - By("changing the UDP service to type=LoadBalancer") - udpService = jig.UpdateServiceOrFail(ns2, udpService.Name, func(s *api.Service) { - s.Spec.Type = api.ServiceTypeLoadBalancer - }) + if loadBalancerSupportsUDP { + By("changing the UDP service to type=LoadBalancer") + udpService = jig.UpdateServiceOrFail(ns2, udpService.Name, func(s *api.Service) { + s.Spec.Type = api.ServiceTypeLoadBalancer + }) + } By("waiting for the TCP service to have a load balancer") // Wait for the load balancer to be created asynchronously @@ -528,7 +532,6 @@ var _ = Describe("Services", func() { tcpIngressIP := getIngressPoint(&tcpService.Status.LoadBalancer.Ingress[0]) Logf("TCP load balancer: %s", tcpIngressIP) - By("waiting for the UDP service " + serviceName + " to have a load balancer") if providerIs("gce", "gke") { // Do this as early as possible, which overrides the `defer` above. // This is mostly out of fear of leaking the IP in a timeout case @@ -545,19 +548,22 @@ var _ = Describe("Services", func() { } } - By("waiting for the UDP service to have a load balancer") - // 2nd one should be faster since they ran in parallel. - udpService = jig.WaitForLoadBalancerOrFail(ns2, udpService.Name) - jig.SanityCheckService(udpService, api.ServiceTypeLoadBalancer) - if udpService.Spec.Ports[0].NodePort != udpNodePort { - Failf("UDP Spec.Ports[0].NodePort changed (%d -> %d) when not expected", udpNodePort, udpService.Spec.Ports[0].NodePort) - } - udpIngressIP := getIngressPoint(&udpService.Status.LoadBalancer.Ingress[0]) - Logf("UDP load balancer: %s", udpIngressIP) + var udpIngressIP string + if loadBalancerSupportsUDP { + By("waiting for the UDP service to have a load balancer") + // 2nd one should be faster since they ran in parallel. + udpService = jig.WaitForLoadBalancerOrFail(ns2, udpService.Name) + jig.SanityCheckService(udpService, api.ServiceTypeLoadBalancer) + if udpService.Spec.Ports[0].NodePort != udpNodePort { + Failf("UDP Spec.Ports[0].NodePort changed (%d -> %d) when not expected", udpNodePort, udpService.Spec.Ports[0].NodePort) + } + udpIngressIP = getIngressPoint(&udpService.Status.LoadBalancer.Ingress[0]) + Logf("UDP load balancer: %s", udpIngressIP) - By("verifying that TCP and UDP use different load balancers") - if tcpIngressIP == udpIngressIP { - Failf("Load balancers are not different: %s", getIngressPoint(&tcpService.Status.LoadBalancer.Ingress[0])) + By("verifying that TCP and UDP use different load balancers") + if tcpIngressIP == udpIngressIP { + Failf("Load balancers are not different: %s", getIngressPoint(&tcpService.Status.LoadBalancer.Ingress[0])) + } } By("hitting the TCP service's NodePort") @@ -569,8 +575,10 @@ var _ = Describe("Services", func() { By("hitting the TCP service's LoadBalancer") jig.TestReachableHTTP(tcpIngressIP, svcPort, loadBalancerLagTimeout) - By("hitting the UDP service's LoadBalancer") - jig.TestReachableUDP(udpIngressIP, svcPort, loadBalancerLagTimeout) + if loadBalancerSupportsUDP { + By("hitting the UDP service's LoadBalancer") + jig.TestReachableUDP(udpIngressIP, svcPort, loadBalancerLagTimeout) + } // Change the services' node ports. @@ -589,13 +597,17 @@ var _ = Describe("Services", func() { By("changing the UDP service's NodePort") udpService = jig.ChangeServiceNodePortOrFail(ns2, udpService.Name, udpNodePort) - jig.SanityCheckService(udpService, api.ServiceTypeLoadBalancer) + if loadBalancerSupportsUDP { + jig.SanityCheckService(udpService, api.ServiceTypeLoadBalancer) + } else { + jig.SanityCheckService(udpService, api.ServiceTypeNodePort) + } udpNodePortOld := udpNodePort udpNodePort = udpService.Spec.Ports[0].NodePort if udpNodePort == udpNodePortOld { Failf("UDP Spec.Ports[0].NodePort (%d) did not change", udpNodePort) } - if getIngressPoint(&udpService.Status.LoadBalancer.Ingress[0]) != udpIngressIP { + if loadBalancerSupportsUDP && getIngressPoint(&udpService.Status.LoadBalancer.Ingress[0]) != udpIngressIP { Failf("UDP Status.LoadBalancer.Ingress changed (%s -> %s) when not expected", udpIngressIP, getIngressPoint(&udpService.Status.LoadBalancer.Ingress[0])) } Logf("UDP node port: %d", udpNodePort) @@ -615,8 +627,10 @@ var _ = Describe("Services", func() { By("hitting the TCP service's LoadBalancer") jig.TestReachableHTTP(tcpIngressIP, svcPort, loadBalancerLagTimeout) - By("hitting the UDP service's LoadBalancer") - jig.TestReachableUDP(udpIngressIP, svcPort, loadBalancerLagTimeout) + if loadBalancerSupportsUDP { + By("hitting the UDP service's LoadBalancer") + jig.TestReachableUDP(udpIngressIP, svcPort, loadBalancerLagTimeout) + } // Change the services' main ports. @@ -641,14 +655,18 @@ var _ = Describe("Services", func() { udpService = jig.UpdateServiceOrFail(ns2, udpService.Name, func(s *api.Service) { s.Spec.Ports[0].Port++ }) - jig.SanityCheckService(udpService, api.ServiceTypeLoadBalancer) + if loadBalancerSupportsUDP { + jig.SanityCheckService(udpService, api.ServiceTypeLoadBalancer) + } else { + jig.SanityCheckService(udpService, api.ServiceTypeNodePort) + } if udpService.Spec.Ports[0].Port != svcPort { Failf("UDP Spec.Ports[0].Port (%d) did not change", udpService.Spec.Ports[0].Port) } if udpService.Spec.Ports[0].NodePort != udpNodePort { Failf("UDP Spec.Ports[0].NodePort (%d) changed", udpService.Spec.Ports[0].NodePort) } - if getIngressPoint(&udpService.Status.LoadBalancer.Ingress[0]) != udpIngressIP { + if loadBalancerSupportsUDP && getIngressPoint(&udpService.Status.LoadBalancer.Ingress[0]) != udpIngressIP { Failf("UDP Status.LoadBalancer.Ingress changed (%s -> %s) when not expected", udpIngressIP, getIngressPoint(&udpService.Status.LoadBalancer.Ingress[0])) } @@ -663,8 +681,10 @@ var _ = Describe("Services", func() { By("hitting the TCP service's LoadBalancer") jig.TestReachableHTTP(tcpIngressIP, svcPort, loadBalancerCreateTimeout) // this may actually recreate the LB - By("hitting the UDP service's LoadBalancer") - jig.TestReachableUDP(udpIngressIP, svcPort, loadBalancerCreateTimeout) // this may actually recreate the LB) + if loadBalancerSupportsUDP { + By("hitting the UDP service's LoadBalancer") + jig.TestReachableUDP(udpIngressIP, svcPort, loadBalancerCreateTimeout) // this may actually recreate the LB) + } // Change the services back to ClusterIP. @@ -682,9 +702,11 @@ var _ = Describe("Services", func() { s.Spec.Type = api.ServiceTypeClusterIP s.Spec.Ports[0].NodePort = 0 }) - // Wait for the load balancer to be destroyed asynchronously - udpService = jig.WaitForLoadBalancerDestroyOrFail(ns2, udpService.Name, udpIngressIP, svcPort) - jig.SanityCheckService(udpService, api.ServiceTypeClusterIP) + if loadBalancerSupportsUDP { + // Wait for the load balancer to be destroyed asynchronously + udpService = jig.WaitForLoadBalancerDestroyOrFail(ns2, udpService.Name, udpIngressIP, svcPort) + jig.SanityCheckService(udpService, api.ServiceTypeClusterIP) + } By("checking the TCP NodePort is closed") jig.TestNotReachableHTTP(nodeIP, tcpNodePort, kubeProxyLagTimeout) @@ -695,8 +717,10 @@ var _ = Describe("Services", func() { By("checking the TCP LoadBalancer is closed") jig.TestNotReachableHTTP(tcpIngressIP, svcPort, loadBalancerLagTimeout) - By("checking the UDP LoadBalancer is closed") - jig.TestNotReachableUDP(udpIngressIP, svcPort, loadBalancerLagTimeout) + if loadBalancerSupportsUDP { + By("checking the UDP LoadBalancer is closed") + jig.TestNotReachableUDP(udpIngressIP, svcPort, loadBalancerLagTimeout) + } }) It("should prevent NodePort collisions", func() { From 46b89464fd567a9b7c691ebf6b8a2c10ad204c5f Mon Sep 17 00:00:00 2001 From: Justin Santa Barbara Date: Wed, 10 Feb 2016 09:31:52 -0500 Subject: [PATCH 2/2] e2e: Allow longer for AWS LoadBalancers to start serving traffic When we create a LoadBalancer on AWS, there is a longer delay after creating the LB before it starts to serve traffic than there is on GCE. A delay of a few minutes is normal. Use a longer timeout when waiting for the LB on AWS therefore. --- test/e2e/cluster_upgrade.go | 4 ++++ test/e2e/service.go | 11 ++++++++++- 2 files changed, 14 insertions(+), 1 deletion(-) diff --git a/test/e2e/cluster_upgrade.go b/test/e2e/cluster_upgrade.go index 35294ad7e9b..54a1d422909 100644 --- a/test/e2e/cluster_upgrade.go +++ b/test/e2e/cluster_upgrade.go @@ -611,6 +611,10 @@ func migRollingUpdatePoll(id string, nt time.Duration) error { } func testLoadBalancerReachable(ingress api.LoadBalancerIngress, port int) bool { + loadBalancerLagTimeout := loadBalancerLagTimeoutDefault + if providerIs("aws") { + loadBalancerLagTimeout = loadBalancerLagTimeoutAWS + } return testLoadBalancerReachableInTime(ingress, port, loadBalancerLagTimeout) } diff --git a/test/e2e/service.go b/test/e2e/service.go index d0136e05e9e..8549fc53206 100644 --- a/test/e2e/service.go +++ b/test/e2e/service.go @@ -48,7 +48,11 @@ import ( const kubeProxyLagTimeout = 5 * time.Minute // Maximum time a load balancer is allowed to not respond after creation. -const loadBalancerLagTimeout = 2 * time.Minute +const loadBalancerLagTimeoutDefault = 2 * time.Minute + +// On AWS there is a delay between ELB creation and serving traffic; +// a few minutes is typical, so use 10m. +const loadBalancerLagTimeoutAWS = 10 * time.Minute // How long to wait for a load balancer to be created/modified. //TODO: once support ticket 21807001 is resolved, reduce this timeout back to something reasonable @@ -420,6 +424,11 @@ var _ = Describe("Services", func() { loadBalancerSupportsUDP := !providerIs("aws") + loadBalancerLagTimeout := loadBalancerLagTimeoutDefault + if providerIs("aws") { + loadBalancerLagTimeout = loadBalancerLagTimeoutAWS + } + // This test is more monolithic than we'd like because LB turnup can be // very slow, so we lumped all the tests into one LB lifecycle.