From ea99593fa1ef102d8a08b0884477693137ae7aec Mon Sep 17 00:00:00 2001 From: Antonio Ojea Date: Wed, 25 Jan 2023 20:16:49 +0000 Subject: [PATCH] Fix panic on ClusterIP allocation for /28 subnets The ClusterIP allocator tries to reserve on part of the ServiceCIDR to allocate static IPs to the Services. The heuristic of the allocator to obtain the offset was taking into account the whole range size, not the IPs available in the range, the subnet address and the broadcast address for IPv4 are not available. This caused that for CIDRs with 16 hosts, /28 for IPv4 and /124 for IPv6, the offset calculated was higher than the max number of available addresses on the allocator, causing this to panic. Change-Id: I6c6f527b0a600b3612be37769e405b8fb3dd33a8 --- pkg/registry/core/service/allocator/bitmap.go | 1 + .../core/service/ipallocator/bitmap.go | 10 +++- .../core/service/ipallocator/bitmap_test.go | 10 ++++ test/integration/network/services_test.go | 46 +++++++++++++++++++ 4 files changed, 66 insertions(+), 1 deletion(-) diff --git a/pkg/registry/core/service/allocator/bitmap.go b/pkg/registry/core/service/allocator/bitmap.go index b492b4a1bb6..c176c578cdc 100644 --- a/pkg/registry/core/service/allocator/bitmap.go +++ b/pkg/registry/core/service/allocator/bitmap.go @@ -68,6 +68,7 @@ func NewAllocationMap(max int, rangeSpec string) *AllocationBitmap { // allows to pass an offset that divides the allocation bitmap in two blocks. // The first block of values will not be used for random value assigned by the AllocateNext() // method until the second block of values has been exhausted. +// The offset value must be always smaller than the bitmap size. func NewAllocationMapWithOffset(max int, rangeSpec string, offset int) *AllocationBitmap { a := AllocationBitmap{ strategy: randomScanStrategyWithOffset{ diff --git a/pkg/registry/core/service/ipallocator/bitmap.go b/pkg/registry/core/service/ipallocator/bitmap.go index 038487e7de8..02878ea48c4 100644 --- a/pkg/registry/core/service/ipallocator/bitmap.go +++ b/pkg/registry/core/service/ipallocator/bitmap.go @@ -83,6 +83,11 @@ func New(cidr *net.IPNet, allocatorFactory allocator.AllocatorWithOffsetFactory) base.Add(base, big.NewInt(1)) max-- + // cidr with whole mask can be negative + if max < 0 { + max = 0 + } + r := Range{ net: cidr, base: base, @@ -357,7 +362,10 @@ func calculateRangeOffset(cidr *net.IPNet) int { ) cidrSize := netutils.RangeSize(cidr) - if cidrSize < min { + // available addresses are always less than the cidr size + // A /28 CIDR returns 16 addresses, but 2 of them, the network + // and broadcast addresses are not available. + if cidrSize <= min { return 0 } diff --git a/pkg/registry/core/service/ipallocator/bitmap_test.go b/pkg/registry/core/service/ipallocator/bitmap_test.go index 7978a0e03a2..a736c739031 100644 --- a/pkg/registry/core/service/ipallocator/bitmap_test.go +++ b/pkg/registry/core/service/ipallocator/bitmap_test.go @@ -798,8 +798,18 @@ func Test_calculateRangeOffset(t *testing.T) { { name: "small mask IPv4", cidr: "192.168.1.1/28", + want: 0, + }, + { + name: "small mask IPv4", + cidr: "192.168.1.1/27", want: 16, }, + { + name: "small mask IPv6", + cidr: "fd00::1/124", + want: 0, + }, { name: "small mask IPv6", cidr: "fd00::1/122", diff --git a/test/integration/network/services_test.go b/test/integration/network/services_test.go index 19e3c9a30e2..5a93a5d856a 100644 --- a/test/integration/network/services_test.go +++ b/test/integration/network/services_test.go @@ -128,3 +128,49 @@ func TestServicesFinalizersRepairLoop(t *testing.T) { } t.Logf("Created service: %s", svcNodePort.Name) } + +// Regresion test for https://issues.k8s.io/115316 +func TestServiceCIDR28bits(t *testing.T) { + serviceCIDR := "10.0.0.0/28" + + client, _, tearDownFn := framework.StartTestServer(t, framework.TestServerSetup{ + ModifyServerRunOptions: func(opts *options.ServerRunOptions) { + opts.ServiceClusterIPRanges = serviceCIDR + }, + }) + defer tearDownFn() + + // Wait until the default "kubernetes" service is created. + if err := wait.Poll(250*time.Millisecond, time.Minute, func() (bool, error) { + _, err := client.CoreV1().Services(metav1.NamespaceDefault).Get(context.TODO(), "kubernetes", metav1.GetOptions{}) + if err != nil { + return false, err + } + return true, nil + }); err != nil { + t.Fatalf("creating kubernetes service timed out") + } + + ns := framework.CreateNamespaceOrDie(client, "test-regression", t) + defer framework.DeleteNamespaceOrDie(client, ns, t) + + service := &v1.Service{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test-1234", + }, + Spec: v1.ServiceSpec{ + Type: v1.ServiceTypeClusterIP, + Ports: []v1.ServicePort{{ + Port: int32(80), + }}, + Selector: map[string]string{ + "foo": "bar", + }, + }, + } + + _, err := client.CoreV1().Services(ns.Name).Create(context.TODO(), service, metav1.CreateOptions{}) + if err != nil { + t.Fatalf("Error creating test service: %v", err) + } +}