mirror of
https://github.com/k3s-io/kubernetes.git
synced 2026-01-13 11:25:19 +00:00
KEP-1435 Mixed Protocol values in LoadBalancer Service GA
Removed the unit tests that test the cases when the MixedProtocolLBService feature flag was false - the feature flag is locked to true with GA Added an integration test to test whether the API server accepts an LB Service with different protocols. Added an e2e test to test whether a service which is exposed by a multi-protocol LB Service is accessible via both ports. Removed the conditional validation that compared the new and the old Service definitions during an update - the feature flag is locked to true with GA.
This commit is contained in:
@@ -1030,6 +1030,8 @@ func (j *TestJig) CheckServiceReachability(svc *v1.Service, pod *v1.Pod) error {
|
||||
return j.checkNodePortServiceReachability(svc, pod)
|
||||
case v1.ServiceTypeExternalName:
|
||||
return j.checkExternalServiceReachability(svc, pod)
|
||||
case v1.ServiceTypeLoadBalancer:
|
||||
return j.checkClusterIPServiceReachability(svc, pod)
|
||||
default:
|
||||
return fmt.Errorf("unsupported service type \"%s\" to verify service reachability for \"%s\" service. This may due to diverse implementation of the service type", svcType, svc.Name)
|
||||
}
|
||||
@@ -1065,3 +1067,36 @@ func (j *TestJig) CreateSCTPServiceWithPort(tweak func(svc *v1.Service), port in
|
||||
}
|
||||
return j.sanityCheckService(result, svc.Spec.Type)
|
||||
}
|
||||
|
||||
// CreateLoadBalancerServiceWaitForClusterIPOnly creates a loadbalancer service and waits
|
||||
// for it to acquire a cluster IP
|
||||
func (j *TestJig) CreateLoadBalancerServiceWaitForClusterIPOnly(timeout time.Duration, tweak func(svc *v1.Service)) (*v1.Service, error) {
|
||||
ginkgo.By("creating a service " + j.Namespace + "/" + j.Name + " with type=LoadBalancer")
|
||||
svc := j.newServiceTemplate(v1.ProtocolTCP, 80)
|
||||
svc.Spec.Type = v1.ServiceTypeLoadBalancer
|
||||
// We need to turn affinity off for our LB distribution tests
|
||||
svc.Spec.SessionAffinity = v1.ServiceAffinityNone
|
||||
if tweak != nil {
|
||||
tweak(svc)
|
||||
}
|
||||
_, err := j.Client.CoreV1().Services(j.Namespace).Create(context.TODO(), svc, metav1.CreateOptions{})
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to create LoadBalancer Service %q: %v", svc.Name, err)
|
||||
}
|
||||
|
||||
ginkgo.By("waiting for cluster IP for loadbalancer service " + j.Namespace + "/" + j.Name)
|
||||
return j.WaitForLoadBalancerClusterIP(timeout)
|
||||
}
|
||||
|
||||
// WaitForLoadBalancerClusterIP waits the given LoadBalancer service to have a ClusterIP, or returns an error after the given timeout
|
||||
func (j *TestJig) WaitForLoadBalancerClusterIP(timeout time.Duration) (*v1.Service, error) {
|
||||
framework.Logf("Waiting up to %v for LoadBalancer service %q to have a ClusterIP", timeout, j.Name)
|
||||
service, err := j.waitForCondition(timeout, "have a ClusterIP", func(svc *v1.Service) bool {
|
||||
return len(svc.Spec.ClusterIP) > 0
|
||||
})
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return j.sanityCheckService(service, v1.ServiceTypeLoadBalancer)
|
||||
}
|
||||
|
||||
@@ -3731,6 +3731,78 @@ var _ = common.SIGDescribe("Services", func() {
|
||||
|
||||
framework.Logf("Collection of services has been deleted")
|
||||
})
|
||||
/*
|
||||
Release: v1.26
|
||||
Testname: Service, same ports with different protocols on a Load Balancer Service
|
||||
Description: Create a LoadBalancer service with two ports that have the same value but use different protocols. Add a Pod that listens on both ports. The Pod must be reachable via the ClusterIP and both ports
|
||||
*/
|
||||
ginkgo.It("should serve endpoints on same port and different protocol for internal traffic on Type LoadBalancer ", func() {
|
||||
serviceName := "multiprotocol-lb-test"
|
||||
ns := f.Namespace.Name
|
||||
jig := e2eservice.NewTestJig(cs, ns, serviceName)
|
||||
|
||||
defer func() {
|
||||
err := cs.CoreV1().Services(ns).Delete(context.TODO(), serviceName, metav1.DeleteOptions{})
|
||||
framework.ExpectNoError(err, "failed to delete service: %s in namespace: %s", serviceName, ns)
|
||||
}()
|
||||
|
||||
svc1port := "svc1"
|
||||
svc2port := "svc2"
|
||||
|
||||
ginkgo.By("creating service " + serviceName + " in namespace " + ns)
|
||||
svc, err := jig.CreateLoadBalancerServiceWaitForClusterIPOnly(2*time.Minute, func(service *v1.Service) {
|
||||
service.Spec.Ports = []v1.ServicePort{
|
||||
{
|
||||
Name: "portname1",
|
||||
Port: 80,
|
||||
TargetPort: intstr.FromString(svc1port),
|
||||
Protocol: v1.ProtocolTCP,
|
||||
},
|
||||
{
|
||||
Name: "portname2",
|
||||
Port: 81,
|
||||
TargetPort: intstr.FromString(svc2port),
|
||||
Protocol: v1.ProtocolUDP,
|
||||
},
|
||||
}
|
||||
})
|
||||
framework.ExpectNoError(err)
|
||||
|
||||
port1 := 100
|
||||
port2 := 101
|
||||
|
||||
names := map[string]bool{}
|
||||
defer func() {
|
||||
for name := range names {
|
||||
err := cs.CoreV1().Pods(ns).Delete(context.TODO(), name, metav1.DeleteOptions{})
|
||||
framework.ExpectNoError(err, "failed to delete pod: %s in namespace: %s", name, ns)
|
||||
}
|
||||
}()
|
||||
|
||||
containerPorts := []v1.ContainerPort{
|
||||
{
|
||||
Name: svc1port,
|
||||
ContainerPort: int32(port1),
|
||||
Protocol: v1.ProtocolTCP,
|
||||
},
|
||||
{
|
||||
Name: svc2port,
|
||||
ContainerPort: int32(port2),
|
||||
Protocol: v1.ProtocolUDP,
|
||||
},
|
||||
}
|
||||
|
||||
podname1 := "pod1"
|
||||
|
||||
createPodOrFail(f, ns, podname1, jig.Labels, containerPorts, "netexec", "--http-port", strconv.Itoa(port1), "--udp-port", strconv.Itoa(port2))
|
||||
validateEndpointsPortsOrFail(cs, ns, serviceName, portsByPodName{podname1: {port1, port2}})
|
||||
|
||||
ginkgo.By("Checking if the Service forwards traffic to pods")
|
||||
execPod := e2epod.CreateExecPodOrFail(cs, ns, "execpod", nil)
|
||||
err = jig.CheckServiceReachability(svc, execPod)
|
||||
framework.ExpectNoError(err)
|
||||
e2epod.DeletePodOrFail(cs, ns, podname1)
|
||||
})
|
||||
|
||||
})
|
||||
|
||||
|
||||
@@ -381,6 +381,59 @@ func Test_UpdateLoadBalancerWithLoadBalancerClass(t *testing.T) {
|
||||
}
|
||||
}
|
||||
|
||||
// Test_ServiceLoadBalancerMixedProtocolSetup tests that a LoadBalancer Service with different protocol values
|
||||
// can be created.
|
||||
func Test_ServiceLoadBalancerMixedProtocolSetup(t *testing.T) {
|
||||
server := kubeapiservertesting.StartTestServerOrDie(t, nil, nil, framework.SharedEtcd())
|
||||
defer server.TearDownFn()
|
||||
|
||||
client, err := clientset.NewForConfig(server.ClientConfig)
|
||||
if err != nil {
|
||||
t.Fatalf("Error creating clientset: %v", err)
|
||||
}
|
||||
|
||||
ns := framework.CreateNamespaceOrDie(client, "test-service-mixed-protocols", t)
|
||||
defer framework.DeleteNamespaceOrDie(client, ns, t)
|
||||
|
||||
controller, cloud, informer := newServiceController(t, client)
|
||||
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
defer cancel()
|
||||
informer.Start(ctx.Done())
|
||||
go controller.Run(ctx, 1, controllersmetrics.NewControllerManagerMetrics("loadbalancer-test"))
|
||||
|
||||
service := &corev1.Service{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "test-123",
|
||||
},
|
||||
Spec: corev1.ServiceSpec{
|
||||
Type: corev1.ServiceTypeLoadBalancer,
|
||||
Ports: []corev1.ServicePort{
|
||||
{
|
||||
Name: "tcpport",
|
||||
Port: int32(53),
|
||||
Protocol: corev1.ProtocolTCP,
|
||||
},
|
||||
{
|
||||
Name: "udpport",
|
||||
Port: int32(53),
|
||||
Protocol: corev1.ProtocolUDP,
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
_, err = client.CoreV1().Services(ns.Name).Create(context.TODO(), service, metav1.CreateOptions{})
|
||||
if err != nil {
|
||||
t.Fatalf("Error creating test service: %v", err)
|
||||
}
|
||||
|
||||
time.Sleep(5 * time.Second) // sleep 5 second to wait for the service controller reconcile
|
||||
if len(cloud.Calls) == 0 {
|
||||
t.Errorf("expected cloud provider calls to create load balancer")
|
||||
}
|
||||
}
|
||||
|
||||
func newServiceController(t *testing.T, client *clientset.Clientset) (*servicecontroller.Controller, *fakecloud.Cloud, informers.SharedInformerFactory) {
|
||||
cloud := &fakecloud.Cloud{}
|
||||
informerFactory := informers.NewSharedInformerFactory(client, 0)
|
||||
|
||||
Reference in New Issue
Block a user