From d3e8572d706158806f98968d0a87b530990ffde8 Mon Sep 17 00:00:00 2001 From: Antonio Ojea Date: Fri, 24 Jan 2025 10:37:43 +0000 Subject: [PATCH] cluster ip allocator should check first on the legacy allocators Kubernetes clusters allow to define an IPv6 range of /108 for IPv6 despite the old allocators will only use the first /112 of that range. The new allocators does not have this limitation, so they can allocate IPs on the whole space, the problem happens on upgrades from clusters that were already using this size, since the new allocators by default will try to allocate addresses that works for both new and old allocatos to allow safe upgrades. The new allocators, when configured to keep compatibility with the old allocators, must try first to allocate an IP that is compatible with the old allocators and only fall back to the new behavior if it is not possible. --- .../core/service/ipallocator/cidrallocator.go | 23 +++++++++++-------- .../service/ipallocator/cidrallocator_test.go | 6 ----- test/integration/dualstack/dualstack_test.go | 16 ++++++------- 3 files changed, 22 insertions(+), 23 deletions(-) diff --git a/pkg/registry/core/service/ipallocator/cidrallocator.go b/pkg/registry/core/service/ipallocator/cidrallocator.go index 53d9bb2de66..7ec7e9cebd9 100644 --- a/pkg/registry/core/service/ipallocator/cidrallocator.go +++ b/pkg/registry/core/service/ipallocator/cidrallocator.go @@ -361,6 +361,20 @@ func (c *MetaAllocator) Allocate(ip net.IP) error { } func (c *MetaAllocator) AllocateNextService(service *api.Service) (net.IP, error) { + // If the cluster is still using the old allocators use them first to try to + // get an IP address to keep backwards compatibility. + if !utilfeature.DefaultFeatureGate.Enabled(features.DisableAllocatorDualWrite) { + ip, err := c.bitmapAllocator.AllocateNext() + if err == nil { + allocator, err := c.getAllocator(ip, true) + if err != nil { + return nil, err + } + return ip, allocator.AllocateService(service, ip) + } else { + klog.Infof("no IP address available on the old ClusterIP allocator, trying to get a new address using the new allocators") + } + } c.mu.Lock() defer c.mu.Unlock() // TODO(aojea) add strategy to return a random allocator but @@ -376,15 +390,6 @@ func (c *MetaAllocator) AllocateNextService(service *api.Service) (net.IP, error } ip, err := item.allocator.AllocateNextService(service) if err == nil { - if !utilfeature.DefaultFeatureGate.Enabled(features.DisableAllocatorDualWrite) { - cidr := c.bitmapAllocator.CIDR() - if cidr.Contains(ip) { - err := c.bitmapAllocator.Allocate(ip) - if err != nil { - continue - } - } - } return ip, nil } } diff --git a/pkg/registry/core/service/ipallocator/cidrallocator_test.go b/pkg/registry/core/service/ipallocator/cidrallocator_test.go index 7bc8b279e8b..bc72a56ffb1 100644 --- a/pkg/registry/core/service/ipallocator/cidrallocator_test.go +++ b/pkg/registry/core/service/ipallocator/cidrallocator_test.go @@ -484,9 +484,6 @@ func TestCIDRAllocateDualWrite(t *testing.T) { if f := r.Free(); f != 0 { t.Errorf("free: %d", f) } - if _, err := r.AllocateNext(); err == nil { - t.Error(err) - } cidr := newServiceCIDR("test", "192.168.0.0/28") _, err = r.client.ServiceCIDRs().Create(context.Background(), cidr, metav1.CreateOptions{}) @@ -554,9 +551,6 @@ func TestCIDRAllocateDualWriteCollision(t *testing.T) { if f := r.Free(); f != 0 { t.Errorf("free: %d", f) } - if _, err := r.AllocateNext(); err == nil { - t.Error(err) - } cidr := newServiceCIDR("test", "192.168.0.0/28") _, err = r.client.ServiceCIDRs().Create(context.Background(), cidr, metav1.CreateOptions{}) diff --git a/test/integration/dualstack/dualstack_test.go b/test/integration/dualstack/dualstack_test.go index d6a1cc16727..da519748f0d 100644 --- a/test/integration/dualstack/dualstack_test.go +++ b/test/integration/dualstack/dualstack_test.go @@ -298,7 +298,7 @@ func TestCreateServiceSingleStackIPv6(t *testing.T) { apiServerOptions, []string{ fmt.Sprintf("--runtime-config=networking.k8s.io/v1beta1=%v", enableMultiServiceCIDR), - "--service-cluster-ip-range=2001:db8:1::/112", + "--service-cluster-ip-range=2001:db8:1::/108", "--advertise-address=2001:db8::10", "--disable-admission-plugins=ServiceAccount", fmt.Sprintf("--feature-gates=%s=%v,%s=%v", features.MultiCIDRServiceAllocator, enableMultiServiceCIDR, features.DisableAllocatorDualWrite, disableAllocatorDualWrite), @@ -529,7 +529,7 @@ func TestCreateServiceDualStackIPv4IPv6(t *testing.T) { apiServerOptions, []string{ fmt.Sprintf("--runtime-config=networking.k8s.io/v1beta1=%v", enableMultiServiceCIDR), - "--service-cluster-ip-range=10.0.0.0/16,2001:db8:1::/112", + "--service-cluster-ip-range=10.0.0.0/16,2001:db8:1::/108", "--advertise-address=10.0.0.1", "--disable-admission-plugins=ServiceAccount", fmt.Sprintf("--feature-gates=%s=%v,%s=%v", features.MultiCIDRServiceAllocator, enableMultiServiceCIDR, features.DisableAllocatorDualWrite, disableAllocatorDualWrite), @@ -808,7 +808,7 @@ func TestCreateServiceDualStackIPv6IPv4(t *testing.T) { apiServerOptions, []string{ fmt.Sprintf("--runtime-config=networking.k8s.io/v1beta1=%v", enableMultiServiceCIDR), - "--service-cluster-ip-range=2001:db8:1::/112,10.0.0.0/16", + "--service-cluster-ip-range=2001:db8:1::/108,10.0.0.0/16", "--advertise-address=2001:db8::10", "--disable-admission-plugins=ServiceAccount", fmt.Sprintf("--feature-gates=%s=%v,%s=%v", features.MultiCIDRServiceAllocator, enableMultiServiceCIDR, features.DisableAllocatorDualWrite, disableAllocatorDualWrite), @@ -1038,7 +1038,7 @@ func TestUpgradeDowngrade(t *testing.T) { s := kubeapiservertesting.StartTestServerOrDie(t, apiServerOptions, []string{ - "--service-cluster-ip-range=10.0.0.0/16,2001:db8:1::/112", + "--service-cluster-ip-range=10.0.0.0/16,2001:db8:1::/108", "--advertise-address=10.0.0.1", "--disable-admission-plugins=ServiceAccount", }, @@ -1149,7 +1149,7 @@ func TestConvertToFromExternalName(t *testing.T) { s := kubeapiservertesting.StartTestServerOrDie(t, apiServerOptions, []string{ - "--service-cluster-ip-range=10.0.0.0/16,2001:db8:1::/112", + "--service-cluster-ip-range=10.0.0.0/16,2001:db8:1::/108", "--advertise-address=10.0.0.1", "--disable-admission-plugins=ServiceAccount", }, @@ -1238,7 +1238,7 @@ func TestPreferDualStack(t *testing.T) { s := kubeapiservertesting.StartTestServerOrDie(t, apiServerOptions, []string{ - "--service-cluster-ip-range=10.0.0.0/16,2001:db8:1::/112", + "--service-cluster-ip-range=10.0.0.0/16,2001:db8:1::/108", "--advertise-address=10.0.0.1", "--disable-admission-plugins=ServiceAccount", }, @@ -1552,7 +1552,7 @@ func TestUpgradeServicePreferToDualStack(t *testing.T) { s = kubeapiservertesting.StartTestServerOrDie(t, apiServerOptions, []string{ - "--service-cluster-ip-range=192.168.0.0/24,2001:db8:1::/112", + "--service-cluster-ip-range=192.168.0.0/24,2001:db8:1::/108", "--disable-admission-plugins=ServiceAccount", }, sharedEtcd) @@ -1593,7 +1593,7 @@ func TestDowngradeServicePreferToDualStack(t *testing.T) { s := kubeapiservertesting.StartTestServerOrDie(t, apiServerOptions, []string{ - "--service-cluster-ip-range=192.168.0.0/24,2001:db8:1::/112", + "--service-cluster-ip-range=192.168.0.0/24,2001:db8:1::/108", "--advertise-address=10.0.0.1", "--disable-admission-plugins=ServiceAccount", },