cluster ip allocator should check first on the legacy allocators

Kubernetes clusters allow to define an IPv6 range of /108 for IPv6
despite the old allocators will only use the first /112 of that range.

The new allocators does not have this limitation, so they can allocate
IPs on the whole space, the problem happens on upgrades from clusters
that were already using this size, since the new allocators by default
will try to allocate addresses that works for both new and old allocatos
to allow safe upgrades.

The new allocators, when configured to keep compatibility with the old
allocators, must try first to allocate an IP that is compatible with the
old allocators and only fall back to the new behavior if it is not
possible.
This commit is contained in:
Antonio Ojea 2025-01-24 10:37:43 +00:00
parent f6f06806cc
commit d3e8572d70
3 changed files with 22 additions and 23 deletions

View File

@ -361,6 +361,20 @@ func (c *MetaAllocator) Allocate(ip net.IP) error {
}
func (c *MetaAllocator) AllocateNextService(service *api.Service) (net.IP, error) {
// If the cluster is still using the old allocators use them first to try to
// get an IP address to keep backwards compatibility.
if !utilfeature.DefaultFeatureGate.Enabled(features.DisableAllocatorDualWrite) {
ip, err := c.bitmapAllocator.AllocateNext()
if err == nil {
allocator, err := c.getAllocator(ip, true)
if err != nil {
return nil, err
}
return ip, allocator.AllocateService(service, ip)
} else {
klog.Infof("no IP address available on the old ClusterIP allocator, trying to get a new address using the new allocators")
}
}
c.mu.Lock()
defer c.mu.Unlock()
// TODO(aojea) add strategy to return a random allocator but
@ -376,15 +390,6 @@ func (c *MetaAllocator) AllocateNextService(service *api.Service) (net.IP, error
}
ip, err := item.allocator.AllocateNextService(service)
if err == nil {
if !utilfeature.DefaultFeatureGate.Enabled(features.DisableAllocatorDualWrite) {
cidr := c.bitmapAllocator.CIDR()
if cidr.Contains(ip) {
err := c.bitmapAllocator.Allocate(ip)
if err != nil {
continue
}
}
}
return ip, nil
}
}

View File

@ -484,9 +484,6 @@ func TestCIDRAllocateDualWrite(t *testing.T) {
if f := r.Free(); f != 0 {
t.Errorf("free: %d", f)
}
if _, err := r.AllocateNext(); err == nil {
t.Error(err)
}
cidr := newServiceCIDR("test", "192.168.0.0/28")
_, err = r.client.ServiceCIDRs().Create(context.Background(), cidr, metav1.CreateOptions{})
@ -554,9 +551,6 @@ func TestCIDRAllocateDualWriteCollision(t *testing.T) {
if f := r.Free(); f != 0 {
t.Errorf("free: %d", f)
}
if _, err := r.AllocateNext(); err == nil {
t.Error(err)
}
cidr := newServiceCIDR("test", "192.168.0.0/28")
_, err = r.client.ServiceCIDRs().Create(context.Background(), cidr, metav1.CreateOptions{})

View File

@ -298,7 +298,7 @@ func TestCreateServiceSingleStackIPv6(t *testing.T) {
apiServerOptions,
[]string{
fmt.Sprintf("--runtime-config=networking.k8s.io/v1beta1=%v", enableMultiServiceCIDR),
"--service-cluster-ip-range=2001:db8:1::/112",
"--service-cluster-ip-range=2001:db8:1::/108",
"--advertise-address=2001:db8::10",
"--disable-admission-plugins=ServiceAccount",
fmt.Sprintf("--feature-gates=%s=%v,%s=%v", features.MultiCIDRServiceAllocator, enableMultiServiceCIDR, features.DisableAllocatorDualWrite, disableAllocatorDualWrite),
@ -529,7 +529,7 @@ func TestCreateServiceDualStackIPv4IPv6(t *testing.T) {
apiServerOptions,
[]string{
fmt.Sprintf("--runtime-config=networking.k8s.io/v1beta1=%v", enableMultiServiceCIDR),
"--service-cluster-ip-range=10.0.0.0/16,2001:db8:1::/112",
"--service-cluster-ip-range=10.0.0.0/16,2001:db8:1::/108",
"--advertise-address=10.0.0.1",
"--disable-admission-plugins=ServiceAccount",
fmt.Sprintf("--feature-gates=%s=%v,%s=%v", features.MultiCIDRServiceAllocator, enableMultiServiceCIDR, features.DisableAllocatorDualWrite, disableAllocatorDualWrite),
@ -808,7 +808,7 @@ func TestCreateServiceDualStackIPv6IPv4(t *testing.T) {
apiServerOptions,
[]string{
fmt.Sprintf("--runtime-config=networking.k8s.io/v1beta1=%v", enableMultiServiceCIDR),
"--service-cluster-ip-range=2001:db8:1::/112,10.0.0.0/16",
"--service-cluster-ip-range=2001:db8:1::/108,10.0.0.0/16",
"--advertise-address=2001:db8::10",
"--disable-admission-plugins=ServiceAccount",
fmt.Sprintf("--feature-gates=%s=%v,%s=%v", features.MultiCIDRServiceAllocator, enableMultiServiceCIDR, features.DisableAllocatorDualWrite, disableAllocatorDualWrite),
@ -1038,7 +1038,7 @@ func TestUpgradeDowngrade(t *testing.T) {
s := kubeapiservertesting.StartTestServerOrDie(t,
apiServerOptions,
[]string{
"--service-cluster-ip-range=10.0.0.0/16,2001:db8:1::/112",
"--service-cluster-ip-range=10.0.0.0/16,2001:db8:1::/108",
"--advertise-address=10.0.0.1",
"--disable-admission-plugins=ServiceAccount",
},
@ -1149,7 +1149,7 @@ func TestConvertToFromExternalName(t *testing.T) {
s := kubeapiservertesting.StartTestServerOrDie(t,
apiServerOptions,
[]string{
"--service-cluster-ip-range=10.0.0.0/16,2001:db8:1::/112",
"--service-cluster-ip-range=10.0.0.0/16,2001:db8:1::/108",
"--advertise-address=10.0.0.1",
"--disable-admission-plugins=ServiceAccount",
},
@ -1238,7 +1238,7 @@ func TestPreferDualStack(t *testing.T) {
s := kubeapiservertesting.StartTestServerOrDie(t,
apiServerOptions,
[]string{
"--service-cluster-ip-range=10.0.0.0/16,2001:db8:1::/112",
"--service-cluster-ip-range=10.0.0.0/16,2001:db8:1::/108",
"--advertise-address=10.0.0.1",
"--disable-admission-plugins=ServiceAccount",
},
@ -1552,7 +1552,7 @@ func TestUpgradeServicePreferToDualStack(t *testing.T) {
s = kubeapiservertesting.StartTestServerOrDie(t,
apiServerOptions,
[]string{
"--service-cluster-ip-range=192.168.0.0/24,2001:db8:1::/112",
"--service-cluster-ip-range=192.168.0.0/24,2001:db8:1::/108",
"--disable-admission-plugins=ServiceAccount",
},
sharedEtcd)
@ -1593,7 +1593,7 @@ func TestDowngradeServicePreferToDualStack(t *testing.T) {
s := kubeapiservertesting.StartTestServerOrDie(t,
apiServerOptions,
[]string{
"--service-cluster-ip-range=192.168.0.0/24,2001:db8:1::/112",
"--service-cluster-ip-range=192.168.0.0/24,2001:db8:1::/108",
"--advertise-address=10.0.0.1",
"--disable-admission-plugins=ServiceAccount",
},