e2e disable node port on loadbalancers

This commit is contained in:
Antonio Ojea 2021-11-18 09:45:26 +01:00
parent 2ea3d320b0
commit 020cf2d7aa
2 changed files with 147 additions and 4 deletions

View File

@ -473,10 +473,7 @@ func (j *TestJig) sanityCheckService(svc *v1.Service, svcType v1.ServiceType) (*
} }
} }
expectNodePorts := false expectNodePorts := needsNodePorts(svc)
if svcType != v1.ServiceTypeClusterIP && svcType != v1.ServiceTypeExternalName {
expectNodePorts = true
}
for i, port := range svc.Spec.Ports { for i, port := range svc.Spec.Ports {
hasNodePort := (port.NodePort != 0) hasNodePort := (port.NodePort != 0)
if hasNodePort != expectNodePorts { if hasNodePort != expectNodePorts {
@ -499,6 +496,24 @@ func (j *TestJig) sanityCheckService(svc *v1.Service, svcType v1.ServiceType) (*
return svc, nil return svc, nil
} }
func needsNodePorts(svc *v1.Service) bool {
if svc == nil {
return false
}
// Type NodePort
if svc.Spec.Type == v1.ServiceTypeNodePort {
return true
}
if svc.Spec.Type != v1.ServiceTypeLoadBalancer {
return false
}
// Type LoadBalancer
if svc.Spec.AllocateLoadBalancerNodePorts == nil {
return true //back-compat
}
return *svc.Spec.AllocateLoadBalancerNodePorts
}
// UpdateService fetches a service, calls the update function on it, and // UpdateService fetches a service, calls the update function on it, and
// then attempts to send the updated service. It tries up to 3 times in the // then attempts to send the updated service. It tries up to 3 times in the
// face of timeouts and conflicts. // face of timeouts and conflicts.

View File

@ -44,6 +44,7 @@ import (
e2eskipper "k8s.io/kubernetes/test/e2e/framework/skipper" e2eskipper "k8s.io/kubernetes/test/e2e/framework/skipper"
"k8s.io/kubernetes/test/e2e/network/common" "k8s.io/kubernetes/test/e2e/network/common"
gcecloud "k8s.io/legacy-cloud-providers/gce" gcecloud "k8s.io/legacy-cloud-providers/gce"
utilpointer "k8s.io/utils/pointer"
"github.com/onsi/ginkgo" "github.com/onsi/ginkgo"
"github.com/onsi/gomega" "github.com/onsi/gomega"
@ -846,6 +847,133 @@ var _ = common.SIGDescribe("LoadBalancers", func() {
framework.ExpectNoError(err) framework.ExpectNoError(err)
e2eservice.WaitForServiceUpdatedWithFinalizer(cs, svc.Namespace, svc.Name, true) e2eservice.WaitForServiceUpdatedWithFinalizer(cs, svc.Namespace, svc.Name, true)
}) })
ginkgo.It("should be able to create LoadBalancer Service without NodePort and change it [Slow]", func() {
// requires cloud load-balancer support
e2eskipper.SkipUnlessProviderIs("gce", "gke", "aws")
loadBalancerLagTimeout := e2eservice.LoadBalancerLagTimeoutDefault
if framework.ProviderIs("aws") {
loadBalancerLagTimeout = e2eservice.LoadBalancerLagTimeoutAWS
}
loadBalancerCreateTimeout := e2eservice.GetServiceLoadBalancerCreationTimeout(cs)
// This test is more monolithic than we'd like because LB turnup can be
// very slow, so we lumped all the tests into one LB lifecycle.
serviceName := "reallocate-nodeport-test"
ns1 := f.Namespace.Name // LB1 in ns1 on TCP
framework.Logf("namespace for TCP test: %s", ns1)
nodeIP, err := e2enode.PickIP(cs) // for later
framework.ExpectNoError(err)
ginkgo.By("creating a TCP service " + serviceName + " with type=ClusterIP in namespace " + ns1)
tcpJig := e2eservice.NewTestJig(cs, ns1, serviceName)
tcpService, err := tcpJig.CreateTCPService(nil)
framework.ExpectNoError(err)
svcPort := int(tcpService.Spec.Ports[0].Port)
framework.Logf("service port TCP: %d", svcPort)
ginkgo.By("creating a pod to be part of the TCP service " + serviceName)
_, err = tcpJig.Run(nil)
framework.ExpectNoError(err)
// Change the services to LoadBalancer.
// Here we test that LoadBalancers can receive static IP addresses. This isn't
// necessary, but is an additional feature this monolithic test checks.
requestedIP := ""
staticIPName := ""
if framework.ProviderIs("gce", "gke") {
ginkgo.By("creating a static load balancer IP")
staticIPName = fmt.Sprintf("e2e-external-lb-test-%s", framework.RunID)
gceCloud, err := gce.GetGCECloud()
framework.ExpectNoError(err, "failed to get GCE cloud provider")
err = gceCloud.ReserveRegionAddress(&compute.Address{Name: staticIPName}, gceCloud.Region())
defer func() {
if staticIPName != "" {
// Release GCE static IP - this is not kube-managed and will not be automatically released.
if err := gceCloud.DeleteRegionAddress(staticIPName, gceCloud.Region()); err != nil {
framework.Logf("failed to release static IP %s: %v", staticIPName, err)
}
}
}()
framework.ExpectNoError(err, "failed to create region address: %s", staticIPName)
reservedAddr, err := gceCloud.GetRegionAddress(staticIPName, gceCloud.Region())
framework.ExpectNoError(err, "failed to get region address: %s", staticIPName)
requestedIP = reservedAddr.Address
framework.Logf("Allocated static load balancer IP: %s", requestedIP)
}
ginkgo.By("changing the TCP service to type=LoadBalancer")
tcpService, err = tcpJig.UpdateService(func(s *v1.Service) {
s.Spec.LoadBalancerIP = requestedIP // will be "" if not applicable
s.Spec.Type = v1.ServiceTypeLoadBalancer
s.Spec.AllocateLoadBalancerNodePorts = utilpointer.BoolPtr(false)
})
framework.ExpectNoError(err)
serviceLBNames = append(serviceLBNames, cloudprovider.DefaultLoadBalancerName(tcpService))
ginkgo.By("waiting for the TCP service to have a load balancer")
// Wait for the load balancer to be created asynchronously
tcpService, err = tcpJig.WaitForLoadBalancer(loadBalancerCreateTimeout)
framework.ExpectNoError(err)
if int(tcpService.Spec.Ports[0].NodePort) != 0 {
framework.Failf("TCP Spec.Ports[0].NodePort allocated %d when not expected", tcpService.Spec.Ports[0].NodePort)
}
if requestedIP != "" && e2eservice.GetIngressPoint(&tcpService.Status.LoadBalancer.Ingress[0]) != requestedIP {
framework.Failf("unexpected TCP Status.LoadBalancer.Ingress (expected %s, got %s)", requestedIP, e2eservice.GetIngressPoint(&tcpService.Status.LoadBalancer.Ingress[0]))
}
tcpIngressIP := e2eservice.GetIngressPoint(&tcpService.Status.LoadBalancer.Ingress[0])
framework.Logf("TCP load balancer: %s", tcpIngressIP)
if framework.ProviderIs("gce", "gke") {
// Do this as early as possible, which overrides the `defer` above.
// This is mostly out of fear of leaking the IP in a timeout case
// (as of this writing we're not 100% sure where the leaks are
// coming from, so this is first-aid rather than surgery).
ginkgo.By("demoting the static IP to ephemeral")
if staticIPName != "" {
gceCloud, err := gce.GetGCECloud()
framework.ExpectNoError(err, "failed to get GCE cloud provider")
// Deleting it after it is attached "demotes" it to an
// ephemeral IP, which can be auto-released.
if err := gceCloud.DeleteRegionAddress(staticIPName, gceCloud.Region()); err != nil {
framework.Failf("failed to release static IP %s: %v", staticIPName, err)
}
staticIPName = ""
}
}
ginkgo.By("hitting the TCP service's LoadBalancer")
e2eservice.TestReachableHTTP(tcpIngressIP, svcPort, loadBalancerLagTimeout)
// Change the services' node ports.
ginkgo.By("adding a TCP service's NodePort")
tcpService, err = tcpJig.UpdateService(func(s *v1.Service) {
s.Spec.AllocateLoadBalancerNodePorts = utilpointer.BoolPtr(true)
})
framework.ExpectNoError(err)
tcpNodePort := int(tcpService.Spec.Ports[0].NodePort)
if tcpNodePort == 0 {
framework.Failf("TCP Spec.Ports[0].NodePort (%d) not allocated", tcpNodePort)
}
if e2eservice.GetIngressPoint(&tcpService.Status.LoadBalancer.Ingress[0]) != tcpIngressIP {
framework.Failf("TCP Status.LoadBalancer.Ingress changed (%s -> %s) when not expected", tcpIngressIP, e2eservice.GetIngressPoint(&tcpService.Status.LoadBalancer.Ingress[0]))
}
framework.Logf("TCP node port: %d", tcpNodePort)
ginkgo.By("hitting the TCP service's new NodePort")
e2eservice.TestReachableHTTP(nodeIP, tcpNodePort, e2eservice.KubeProxyLagTimeout)
ginkgo.By("hitting the TCP service's LoadBalancer")
e2eservice.TestReachableHTTP(tcpIngressIP, svcPort, loadBalancerLagTimeout)
})
}) })
var _ = common.SIGDescribe("LoadBalancers ESIPP [Slow]", func() { var _ = common.SIGDescribe("LoadBalancers ESIPP [Slow]", func() {