mirror of
https://github.com/k3s-io/kubernetes.git
synced 2025-08-08 11:38:15 +00:00
Merge pull request #122320 from armstrongli/master
allow service NodePort to be updated to 0 in case AllocateLoadBalance…
This commit is contained in:
commit
439f7df65b
@ -648,7 +648,11 @@ func needsClusterIP(svc *api.Service) bool {
|
||||
}
|
||||
|
||||
func needsNodePort(svc *api.Service) bool {
|
||||
if svc.Spec.Type == api.ServiceTypeNodePort || svc.Spec.Type == api.ServiceTypeLoadBalancer {
|
||||
if svc.Spec.Type == api.ServiceTypeNodePort {
|
||||
return true
|
||||
}
|
||||
if svc.Spec.Type == api.ServiceTypeLoadBalancer &&
|
||||
(svc.Spec.AllocateLoadBalancerNodePorts == nil || *svc.Spec.AllocateLoadBalancerNodePorts) {
|
||||
return true
|
||||
}
|
||||
return false
|
||||
|
@ -502,6 +502,62 @@ func TestPatchAllocatedValues(t *testing.T) {
|
||||
update: svctest.MakeService("foo",
|
||||
svctest.SetTypeExternalName,
|
||||
svctest.SetExternalTrafficPolicy(api.ServiceExternalTrafficPolicyLocal)),
|
||||
}, {
|
||||
name: "reset_NodePort",
|
||||
before: svctest.MakeService("foo",
|
||||
svctest.SetTypeLoadBalancer,
|
||||
svctest.SetAllocateLoadBalancerNodePorts(false),
|
||||
svctest.SetClusterIPs("10.0.0.93", "2000::76"),
|
||||
svctest.SetUniqueNodePorts,
|
||||
svctest.SetHealthCheckNodePort(31234)),
|
||||
update: svctest.MakeService("foo",
|
||||
svctest.SetTypeLoadBalancer,
|
||||
svctest.SetAllocateLoadBalancerNodePorts(false),
|
||||
svctest.SetClusterIPs("10.0.0.93", "2000::76"),
|
||||
svctest.SetNodePorts(0)),
|
||||
expectSameClusterIPs: true,
|
||||
expectSameNodePort: false,
|
||||
}, {
|
||||
name: "reset_partial_NodePorts",
|
||||
before: svctest.MakeService("foo",
|
||||
svctest.SetTypeLoadBalancer,
|
||||
svctest.SetAllocateLoadBalancerNodePorts(false),
|
||||
svctest.SetClusterIPs("10.0.0.93", "2000::76"),
|
||||
svctest.SetPorts(
|
||||
svctest.MakeServicePort("", 93, intstr.FromInt32(76), api.ProtocolTCP),
|
||||
svctest.MakeServicePort("", 94, intstr.FromInt32(76), api.ProtocolTCP),
|
||||
),
|
||||
svctest.SetUniqueNodePorts,
|
||||
svctest.SetHealthCheckNodePort(31234)),
|
||||
update: svctest.MakeService("foo",
|
||||
svctest.SetTypeLoadBalancer,
|
||||
svctest.SetAllocateLoadBalancerNodePorts(false),
|
||||
svctest.SetClusterIPs("10.0.0.93", "2000::76"),
|
||||
svctest.SetPorts(
|
||||
svctest.MakeServicePort("", 93, intstr.FromInt32(76), api.ProtocolTCP),
|
||||
svctest.MakeServicePort("", 94, intstr.FromInt32(76), api.ProtocolTCP),
|
||||
),
|
||||
svctest.SetUniqueNodePorts,
|
||||
func(service *api.Service) {
|
||||
service.Spec.Ports[1].NodePort = 0
|
||||
}),
|
||||
expectSameClusterIPs: true,
|
||||
expectSameNodePort: false,
|
||||
}, {
|
||||
name: "keep_NodePort",
|
||||
before: svctest.MakeService("foo",
|
||||
svctest.SetTypeLoadBalancer,
|
||||
svctest.SetAllocateLoadBalancerNodePorts(true),
|
||||
svctest.SetClusterIPs("10.0.0.93", "2000::76"),
|
||||
svctest.SetUniqueNodePorts,
|
||||
svctest.SetHealthCheckNodePort(31234)),
|
||||
update: svctest.MakeService("foo",
|
||||
svctest.SetTypeLoadBalancer,
|
||||
svctest.SetAllocateLoadBalancerNodePorts(true),
|
||||
svctest.SetClusterIPs("10.0.0.93", "2000::76"),
|
||||
svctest.SetNodePorts(0)),
|
||||
expectSameClusterIPs: true,
|
||||
expectSameNodePort: true,
|
||||
}}
|
||||
|
||||
for _, tc := range testCases {
|
||||
@ -532,10 +588,18 @@ func TestPatchAllocatedValues(t *testing.T) {
|
||||
} else if cmp.Equal(beforeIPs, updateIPs) {
|
||||
t.Errorf("expected clusterIPs to not be patched: %q == %q", beforeIPs, updateIPs)
|
||||
}
|
||||
if b, u := tc.before.Spec.Ports[0].NodePort, update.Spec.Ports[0].NodePort; tc.expectSameNodePort && b != u {
|
||||
t.Errorf("expected nodePort to be patched: %d != %d", b, u)
|
||||
} else if !tc.expectSameNodePort && b == u {
|
||||
t.Errorf("expected nodePort to not be patched: %d == %d", b, u)
|
||||
|
||||
bNodePorts, uNodePorts := make([]int32, 0), make([]int32, 0)
|
||||
for _, item := range tc.before.Spec.Ports {
|
||||
bNodePorts = append(bNodePorts, item.NodePort)
|
||||
}
|
||||
for _, item := range update.Spec.Ports {
|
||||
uNodePorts = append(uNodePorts, item.NodePort)
|
||||
}
|
||||
if tc.expectSameNodePort && !reflect.DeepEqual(bNodePorts, uNodePorts) {
|
||||
t.Errorf("expected nodePort to be patched: %v != %v", bNodePorts, uNodePorts)
|
||||
} else if !tc.expectSameNodePort && reflect.DeepEqual(bNodePorts, uNodePorts) {
|
||||
t.Errorf("expected nodePort to not be patched: %v == %v", bNodePorts, uNodePorts)
|
||||
}
|
||||
|
||||
if b, u := tc.before.Spec.HealthCheckNodePort, update.Spec.HealthCheckNodePort; tc.expectSameHCNP && b != u {
|
||||
|
@ -18,12 +18,15 @@ package service
|
||||
|
||||
import (
|
||||
"context"
|
||||
"encoding/json"
|
||||
"reflect"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
corev1 "k8s.io/api/core/v1"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/types"
|
||||
"k8s.io/apimachinery/pkg/util/strategicpatch"
|
||||
utilfeature "k8s.io/apiserver/pkg/util/feature"
|
||||
"k8s.io/client-go/informers"
|
||||
clientset "k8s.io/client-go/kubernetes"
|
||||
@ -178,6 +181,188 @@ func Test_ServiceLoadBalancerEnableThenDisableAllocatedNodePorts(t *testing.T) {
|
||||
}
|
||||
}
|
||||
|
||||
// Test_ServiceLoadBalancerDisableAllocatedNodePort test that switching a Service
|
||||
// to spec.allocateLoadBalancerNodePorts=false can de-allocate existing node ports.
|
||||
func Test_ServiceLoadBalancerDisableAllocatedNodePort(t *testing.T) {
|
||||
server := kubeapiservertesting.StartTestServerOrDie(t, nil, nil, framework.SharedEtcd())
|
||||
defer server.TearDownFn()
|
||||
|
||||
client, err := clientset.NewForConfig(server.ClientConfig)
|
||||
if err != nil {
|
||||
t.Fatalf("Error creating clientset: %v", err)
|
||||
}
|
||||
|
||||
ns := framework.CreateNamespaceOrDie(client, "test-service-deallocate-node-ports", t)
|
||||
defer framework.DeleteNamespaceOrDie(client, ns, t)
|
||||
|
||||
service := &corev1.Service{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "test-123",
|
||||
},
|
||||
Spec: corev1.ServiceSpec{
|
||||
Type: corev1.ServiceTypeLoadBalancer,
|
||||
AllocateLoadBalancerNodePorts: utilpointer.Bool(true),
|
||||
Ports: []corev1.ServicePort{{
|
||||
Port: int32(80),
|
||||
}},
|
||||
Selector: map[string]string{
|
||||
"foo": "bar",
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
service, err = client.CoreV1().Services(ns.Name).Create(context.TODO(), service, metav1.CreateOptions{})
|
||||
if err != nil {
|
||||
t.Fatalf("Error creating test service: %v", err)
|
||||
}
|
||||
|
||||
if !serviceHasNodePorts(service) {
|
||||
t.Error("expected node ports but found none")
|
||||
}
|
||||
|
||||
service.Spec.AllocateLoadBalancerNodePorts = utilpointer.Bool(false)
|
||||
service.Spec.Ports[0].NodePort = 0
|
||||
service, err = client.CoreV1().Services(ns.Name).Update(context.TODO(), service, metav1.UpdateOptions{})
|
||||
if err != nil {
|
||||
t.Fatalf("Error updating test service: %v", err)
|
||||
}
|
||||
|
||||
if serviceHasNodePorts(service) {
|
||||
t.Error("node ports were expected to be deallocated")
|
||||
}
|
||||
}
|
||||
|
||||
// Test_ServiceLoadBalancerDisableAllocatedNodePorts test that switching a Service
|
||||
// to spec.allocateLoadBalancerNodePorts=false can de-allocate one of existing node ports.
|
||||
func Test_ServiceLoadBalancerDisableAllocatedNodePorts(t *testing.T) {
|
||||
server := kubeapiservertesting.StartTestServerOrDie(t, nil, nil, framework.SharedEtcd())
|
||||
defer server.TearDownFn()
|
||||
|
||||
client, err := clientset.NewForConfig(server.ClientConfig)
|
||||
if err != nil {
|
||||
t.Fatalf("Error creating clientset: %v", err)
|
||||
}
|
||||
|
||||
ns := framework.CreateNamespaceOrDie(client, "test-service-deallocate-node-ports", t)
|
||||
defer framework.DeleteNamespaceOrDie(client, ns, t)
|
||||
|
||||
service := &corev1.Service{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "test-123",
|
||||
},
|
||||
Spec: corev1.ServiceSpec{
|
||||
Type: corev1.ServiceTypeLoadBalancer,
|
||||
AllocateLoadBalancerNodePorts: utilpointer.Bool(true),
|
||||
Ports: []corev1.ServicePort{{
|
||||
Name: "np-1",
|
||||
Port: int32(80),
|
||||
}, {
|
||||
Name: "np-2",
|
||||
Port: int32(81),
|
||||
}},
|
||||
Selector: map[string]string{
|
||||
"foo": "bar",
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
service, err = client.CoreV1().Services(ns.Name).Create(context.TODO(), service, metav1.CreateOptions{})
|
||||
if err != nil {
|
||||
t.Fatalf("Error creating test service: %v", err)
|
||||
}
|
||||
|
||||
if !serviceHasNodePorts(service) {
|
||||
t.Error("expected node ports but found none")
|
||||
}
|
||||
|
||||
service.Spec.AllocateLoadBalancerNodePorts = utilpointer.Bool(false)
|
||||
service.Spec.Ports[0].NodePort = 0
|
||||
service, err = client.CoreV1().Services(ns.Name).Update(context.TODO(), service, metav1.UpdateOptions{})
|
||||
if err != nil {
|
||||
t.Fatalf("Error updating test service: %v", err)
|
||||
}
|
||||
|
||||
if service.Spec.Ports[0].NodePort != 0 {
|
||||
t.Error("node ports[0] was expected to be deallocated")
|
||||
}
|
||||
if service.Spec.Ports[1].NodePort == 0 {
|
||||
t.Error("node ports was not expected to be deallocated")
|
||||
}
|
||||
}
|
||||
|
||||
// Test_ServiceLoadBalancerDisableAllocatedNodePortsByPatch test that switching a Service
|
||||
// to spec.allocateLoadBalancerNodePorts=false with path can de-allocate one of existing node ports.
|
||||
func Test_ServiceLoadBalancerDisableAllocatedNodePortsByPatch(t *testing.T) {
|
||||
server := kubeapiservertesting.StartTestServerOrDie(t, nil, nil, framework.SharedEtcd())
|
||||
defer server.TearDownFn()
|
||||
|
||||
client, err := clientset.NewForConfig(server.ClientConfig)
|
||||
if err != nil {
|
||||
t.Fatalf("Error creating clientset: %v", err)
|
||||
}
|
||||
|
||||
ns := framework.CreateNamespaceOrDie(client, "test-service-deallocate-node-ports", t)
|
||||
defer framework.DeleteNamespaceOrDie(client, ns, t)
|
||||
|
||||
service := &corev1.Service{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "test-123",
|
||||
},
|
||||
Spec: corev1.ServiceSpec{
|
||||
Type: corev1.ServiceTypeLoadBalancer,
|
||||
AllocateLoadBalancerNodePorts: utilpointer.Bool(true),
|
||||
Ports: []corev1.ServicePort{{
|
||||
Name: "np-1",
|
||||
Port: int32(80),
|
||||
}, {
|
||||
Name: "np-2",
|
||||
Port: int32(81),
|
||||
}},
|
||||
Selector: map[string]string{
|
||||
"foo": "bar",
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
service, err = client.CoreV1().Services(ns.Name).Create(context.TODO(), service, metav1.CreateOptions{})
|
||||
if err != nil {
|
||||
t.Fatalf("Error creating test service: %v", err)
|
||||
}
|
||||
|
||||
if !serviceHasNodePorts(service) {
|
||||
t.Error("expected node ports but found none")
|
||||
}
|
||||
|
||||
clone := service.DeepCopy()
|
||||
clone.Spec.AllocateLoadBalancerNodePorts = utilpointer.Bool(false)
|
||||
clone.Spec.Ports[0].NodePort = 0
|
||||
|
||||
oldData, err := json.Marshal(service)
|
||||
if err != nil {
|
||||
t.Fatalf("Error marshalling test service: %v", err)
|
||||
}
|
||||
newData, err := json.Marshal(clone)
|
||||
if err != nil {
|
||||
t.Fatalf("Error marshalling test service: %v", err)
|
||||
}
|
||||
patch, err := strategicpatch.CreateTwoWayMergePatch(oldData, newData, corev1.Service{})
|
||||
if err != nil {
|
||||
t.Fatalf("Error creating patch: %v", err)
|
||||
}
|
||||
|
||||
service, err = client.CoreV1().Services(ns.Name).Patch(context.TODO(), service.Name, types.StrategicMergePatchType, patch, metav1.PatchOptions{})
|
||||
if err != nil {
|
||||
t.Fatalf("Error updating test service: %v", err)
|
||||
}
|
||||
|
||||
if service.Spec.Ports[0].NodePort != 0 {
|
||||
t.Error("node ports[0] was expected to be deallocated")
|
||||
}
|
||||
if service.Spec.Ports[1].NodePort == 0 {
|
||||
t.Error("node ports was not expected to be deallocated")
|
||||
}
|
||||
}
|
||||
|
||||
// Test_ServiceLoadBalancerDisableThenEnableAllocatedNodePorts test that switching a Service
|
||||
// to spec.allocateLoadBalancerNodePorts=true from false, allocate new node ports.
|
||||
func Test_ServiceLoadBalancerDisableThenEnableAllocatedNodePorts(t *testing.T) {
|
||||
|
Loading…
Reference in New Issue
Block a user