From 2ea3d320b04c9ed792edcf23f6de4ec65a4d98c1 Mon Sep 17 00:00:00 2001 From: Antonio Ojea Date: Thu, 18 Nov 2021 09:25:52 +0100 Subject: [PATCH 1/2] integration tests service node port control When a service is created with AllocateLoadBalancerNodePorts to false it should not allocate node ports. If the same service is updated to set AllocateLoadBalancerNodePorts to true, it should allocate node ports. When a service is updated from ClusterIP type to LoadBalancer type, and AllocateLoadBalancerNodePorts is set to false, it should not allocate node ports. --- test/integration/service/loadbalancer_test.go | 108 ++++++++++++++++++ 1 file changed, 108 insertions(+) diff --git a/test/integration/service/loadbalancer_test.go b/test/integration/service/loadbalancer_test.go index 480d9f43594..18ad2a16567 100644 --- a/test/integration/service/loadbalancer_test.go +++ b/test/integration/service/loadbalancer_test.go @@ -78,6 +78,60 @@ func Test_ServiceLoadBalancerDisableAllocateNodePorts(t *testing.T) { } } +// Test_ServiceUpdateLoadBalancerAllocateNodePorts tests that a Service that is updated from ClusterIP to LoadBalancer +// with spec.allocateLoadBalancerNodePorts=false does not allocate node ports for the Service +func Test_ServiceUpdateLoadBalancerDisableAllocateNodePorts(t *testing.T) { + defer featuregatetesting.SetFeatureGateDuringTest(t, utilfeature.DefaultFeatureGate, features.ServiceLBNodePortControl, true)() + + controlPlaneConfig := framework.NewIntegrationTestControlPlaneConfig() + _, server, closeFn := framework.RunAnAPIServer(controlPlaneConfig) + defer closeFn() + + config := restclient.Config{Host: server.URL} + client, err := clientset.NewForConfig(&config) + if err != nil { + t.Fatalf("Error creating clientset: %v", err) + } + + ns := framework.CreateTestingNamespace("test-service-allocate-node-ports", server, t) + defer framework.DeleteTestingNamespace(ns, server, t) + + service := &corev1.Service{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test-123", + }, + Spec: corev1.ServiceSpec{ + Type: corev1.ServiceTypeClusterIP, + Ports: []corev1.ServicePort{{ + Port: int32(80), + }}, + Selector: map[string]string{ + "foo": "bar", + }, + }, + } + + service, err = client.CoreV1().Services(ns.Name).Create(context.TODO(), service, metav1.CreateOptions{}) + if err != nil { + t.Fatalf("Error creating test service: %v", err) + } + + if serviceHasNodePorts(service) { + t.Error("found node ports when none was expected") + } + + service.Spec.Type = corev1.ServiceTypeLoadBalancer + service.Spec.AllocateLoadBalancerNodePorts = utilpointer.BoolPtr(false) + service, err = client.CoreV1().Services(ns.Name).Update(context.TODO(), service, metav1.UpdateOptions{}) + if err != nil { + t.Fatalf("Error updating test service: %v", err) + } + + if serviceHasNodePorts(service) { + t.Error("found node ports when none was expected") + } +} + // Test_ServiceLoadBalancerSwitchToDeallocatedNodePorts test that switching a Service // to spec.allocateLoadBalancerNodePorts=false, does not de-allocate existing node ports. func Test_ServiceLoadBalancerEnableThenDisableAllocatedNodePorts(t *testing.T) { @@ -132,6 +186,60 @@ func Test_ServiceLoadBalancerEnableThenDisableAllocatedNodePorts(t *testing.T) { } } +// Test_ServiceLoadBalancerDisableThenEnableAllocatedNodePorts test that switching a Service +// to spec.allocateLoadBalancerNodePorts=true from false, allocate new node ports. +func Test_ServiceLoadBalancerDisableThenEnableAllocatedNodePorts(t *testing.T) { + defer featuregatetesting.SetFeatureGateDuringTest(t, utilfeature.DefaultFeatureGate, features.ServiceLBNodePortControl, true)() + + controlPlaneConfig := framework.NewIntegrationTestControlPlaneConfig() + _, server, closeFn := framework.RunAnAPIServer(controlPlaneConfig) + defer closeFn() + + config := restclient.Config{Host: server.URL} + client, err := clientset.NewForConfig(&config) + if err != nil { + t.Fatalf("Error creating clientset: %v", err) + } + + ns := framework.CreateTestingNamespace("test-service-reallocate-node-ports", server, t) + defer framework.DeleteTestingNamespace(ns, server, t) + + service := &corev1.Service{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test-123", + }, + Spec: corev1.ServiceSpec{ + Type: corev1.ServiceTypeLoadBalancer, + AllocateLoadBalancerNodePorts: utilpointer.BoolPtr(false), + Ports: []corev1.ServicePort{{ + Port: int32(80), + }}, + Selector: map[string]string{ + "foo": "bar", + }, + }, + } + + service, err = client.CoreV1().Services(ns.Name).Create(context.TODO(), service, metav1.CreateOptions{}) + if err != nil { + t.Fatalf("Error creating test service: %v", err) + } + + if serviceHasNodePorts(service) { + t.Error("not expected node ports but found one") + } + + service.Spec.AllocateLoadBalancerNodePorts = utilpointer.BoolPtr(true) + service, err = client.CoreV1().Services(ns.Name).Update(context.TODO(), service, metav1.UpdateOptions{}) + if err != nil { + t.Fatalf("Error updating test service: %v", err) + } + + if !serviceHasNodePorts(service) { + t.Error("expected node ports but found none") + } +} + func serviceHasNodePorts(svc *corev1.Service) bool { for _, port := range svc.Spec.Ports { if port.NodePort > 0 { From 020cf2d7aa2011147a89c9bd4063c8237b71a08f Mon Sep 17 00:00:00 2001 From: Antonio Ojea Date: Thu, 18 Nov 2021 09:45:26 +0100 Subject: [PATCH 2/2] e2e disable node port on loadbalancers --- test/e2e/framework/service/jig.go | 23 +++++- test/e2e/network/loadbalancer.go | 128 ++++++++++++++++++++++++++++++ 2 files changed, 147 insertions(+), 4 deletions(-) diff --git a/test/e2e/framework/service/jig.go b/test/e2e/framework/service/jig.go index ae2be4f60e9..d97543b94f3 100644 --- a/test/e2e/framework/service/jig.go +++ b/test/e2e/framework/service/jig.go @@ -473,10 +473,7 @@ func (j *TestJig) sanityCheckService(svc *v1.Service, svcType v1.ServiceType) (* } } - expectNodePorts := false - if svcType != v1.ServiceTypeClusterIP && svcType != v1.ServiceTypeExternalName { - expectNodePorts = true - } + expectNodePorts := needsNodePorts(svc) for i, port := range svc.Spec.Ports { hasNodePort := (port.NodePort != 0) if hasNodePort != expectNodePorts { @@ -499,6 +496,24 @@ func (j *TestJig) sanityCheckService(svc *v1.Service, svcType v1.ServiceType) (* return svc, nil } +func needsNodePorts(svc *v1.Service) bool { + if svc == nil { + return false + } + // Type NodePort + if svc.Spec.Type == v1.ServiceTypeNodePort { + return true + } + if svc.Spec.Type != v1.ServiceTypeLoadBalancer { + return false + } + // Type LoadBalancer + if svc.Spec.AllocateLoadBalancerNodePorts == nil { + return true //back-compat + } + return *svc.Spec.AllocateLoadBalancerNodePorts +} + // UpdateService fetches a service, calls the update function on it, and // then attempts to send the updated service. It tries up to 3 times in the // face of timeouts and conflicts. diff --git a/test/e2e/network/loadbalancer.go b/test/e2e/network/loadbalancer.go index 7fca3460250..aa2eb3ac435 100644 --- a/test/e2e/network/loadbalancer.go +++ b/test/e2e/network/loadbalancer.go @@ -44,6 +44,7 @@ import ( e2eskipper "k8s.io/kubernetes/test/e2e/framework/skipper" "k8s.io/kubernetes/test/e2e/network/common" gcecloud "k8s.io/legacy-cloud-providers/gce" + utilpointer "k8s.io/utils/pointer" "github.com/onsi/ginkgo" "github.com/onsi/gomega" @@ -846,6 +847,133 @@ var _ = common.SIGDescribe("LoadBalancers", func() { framework.ExpectNoError(err) e2eservice.WaitForServiceUpdatedWithFinalizer(cs, svc.Namespace, svc.Name, true) }) + + ginkgo.It("should be able to create LoadBalancer Service without NodePort and change it [Slow]", func() { + // requires cloud load-balancer support + e2eskipper.SkipUnlessProviderIs("gce", "gke", "aws") + + loadBalancerLagTimeout := e2eservice.LoadBalancerLagTimeoutDefault + if framework.ProviderIs("aws") { + loadBalancerLagTimeout = e2eservice.LoadBalancerLagTimeoutAWS + } + loadBalancerCreateTimeout := e2eservice.GetServiceLoadBalancerCreationTimeout(cs) + + // This test is more monolithic than we'd like because LB turnup can be + // very slow, so we lumped all the tests into one LB lifecycle. + + serviceName := "reallocate-nodeport-test" + ns1 := f.Namespace.Name // LB1 in ns1 on TCP + framework.Logf("namespace for TCP test: %s", ns1) + + nodeIP, err := e2enode.PickIP(cs) // for later + framework.ExpectNoError(err) + + ginkgo.By("creating a TCP service " + serviceName + " with type=ClusterIP in namespace " + ns1) + tcpJig := e2eservice.NewTestJig(cs, ns1, serviceName) + tcpService, err := tcpJig.CreateTCPService(nil) + framework.ExpectNoError(err) + + svcPort := int(tcpService.Spec.Ports[0].Port) + framework.Logf("service port TCP: %d", svcPort) + + ginkgo.By("creating a pod to be part of the TCP service " + serviceName) + _, err = tcpJig.Run(nil) + framework.ExpectNoError(err) + + // Change the services to LoadBalancer. + + // Here we test that LoadBalancers can receive static IP addresses. This isn't + // necessary, but is an additional feature this monolithic test checks. + requestedIP := "" + staticIPName := "" + if framework.ProviderIs("gce", "gke") { + ginkgo.By("creating a static load balancer IP") + staticIPName = fmt.Sprintf("e2e-external-lb-test-%s", framework.RunID) + gceCloud, err := gce.GetGCECloud() + framework.ExpectNoError(err, "failed to get GCE cloud provider") + + err = gceCloud.ReserveRegionAddress(&compute.Address{Name: staticIPName}, gceCloud.Region()) + defer func() { + if staticIPName != "" { + // Release GCE static IP - this is not kube-managed and will not be automatically released. + if err := gceCloud.DeleteRegionAddress(staticIPName, gceCloud.Region()); err != nil { + framework.Logf("failed to release static IP %s: %v", staticIPName, err) + } + } + }() + framework.ExpectNoError(err, "failed to create region address: %s", staticIPName) + reservedAddr, err := gceCloud.GetRegionAddress(staticIPName, gceCloud.Region()) + framework.ExpectNoError(err, "failed to get region address: %s", staticIPName) + + requestedIP = reservedAddr.Address + framework.Logf("Allocated static load balancer IP: %s", requestedIP) + } + + ginkgo.By("changing the TCP service to type=LoadBalancer") + tcpService, err = tcpJig.UpdateService(func(s *v1.Service) { + s.Spec.LoadBalancerIP = requestedIP // will be "" if not applicable + s.Spec.Type = v1.ServiceTypeLoadBalancer + s.Spec.AllocateLoadBalancerNodePorts = utilpointer.BoolPtr(false) + }) + framework.ExpectNoError(err) + + serviceLBNames = append(serviceLBNames, cloudprovider.DefaultLoadBalancerName(tcpService)) + ginkgo.By("waiting for the TCP service to have a load balancer") + // Wait for the load balancer to be created asynchronously + tcpService, err = tcpJig.WaitForLoadBalancer(loadBalancerCreateTimeout) + framework.ExpectNoError(err) + if int(tcpService.Spec.Ports[0].NodePort) != 0 { + framework.Failf("TCP Spec.Ports[0].NodePort allocated %d when not expected", tcpService.Spec.Ports[0].NodePort) + } + if requestedIP != "" && e2eservice.GetIngressPoint(&tcpService.Status.LoadBalancer.Ingress[0]) != requestedIP { + framework.Failf("unexpected TCP Status.LoadBalancer.Ingress (expected %s, got %s)", requestedIP, e2eservice.GetIngressPoint(&tcpService.Status.LoadBalancer.Ingress[0])) + } + tcpIngressIP := e2eservice.GetIngressPoint(&tcpService.Status.LoadBalancer.Ingress[0]) + framework.Logf("TCP load balancer: %s", tcpIngressIP) + + if framework.ProviderIs("gce", "gke") { + // Do this as early as possible, which overrides the `defer` above. + // This is mostly out of fear of leaking the IP in a timeout case + // (as of this writing we're not 100% sure where the leaks are + // coming from, so this is first-aid rather than surgery). + ginkgo.By("demoting the static IP to ephemeral") + if staticIPName != "" { + gceCloud, err := gce.GetGCECloud() + framework.ExpectNoError(err, "failed to get GCE cloud provider") + // Deleting it after it is attached "demotes" it to an + // ephemeral IP, which can be auto-released. + if err := gceCloud.DeleteRegionAddress(staticIPName, gceCloud.Region()); err != nil { + framework.Failf("failed to release static IP %s: %v", staticIPName, err) + } + staticIPName = "" + } + } + + ginkgo.By("hitting the TCP service's LoadBalancer") + e2eservice.TestReachableHTTP(tcpIngressIP, svcPort, loadBalancerLagTimeout) + + // Change the services' node ports. + + ginkgo.By("adding a TCP service's NodePort") + tcpService, err = tcpJig.UpdateService(func(s *v1.Service) { + s.Spec.AllocateLoadBalancerNodePorts = utilpointer.BoolPtr(true) + }) + framework.ExpectNoError(err) + tcpNodePort := int(tcpService.Spec.Ports[0].NodePort) + if tcpNodePort == 0 { + framework.Failf("TCP Spec.Ports[0].NodePort (%d) not allocated", tcpNodePort) + } + if e2eservice.GetIngressPoint(&tcpService.Status.LoadBalancer.Ingress[0]) != tcpIngressIP { + framework.Failf("TCP Status.LoadBalancer.Ingress changed (%s -> %s) when not expected", tcpIngressIP, e2eservice.GetIngressPoint(&tcpService.Status.LoadBalancer.Ingress[0])) + } + framework.Logf("TCP node port: %d", tcpNodePort) + + ginkgo.By("hitting the TCP service's new NodePort") + e2eservice.TestReachableHTTP(nodeIP, tcpNodePort, e2eservice.KubeProxyLagTimeout) + + ginkgo.By("hitting the TCP service's LoadBalancer") + e2eservice.TestReachableHTTP(tcpIngressIP, svcPort, loadBalancerLagTimeout) + }) }) var _ = common.SIGDescribe("LoadBalancers ESIPP [Slow]", func() {