Fix panic on ClusterIP allocation for /28 subnets

The ClusterIP allocator tries to reserve on part of the ServiceCIDR
to allocate static IPs to the Services.

The heuristic of the allocator to obtain the offset was taking into
account the whole range size, not the IPs available in the range, the
subnet address and the broadcast address for IPv4 are not available.

This caused that for CIDRs with 16 hosts, /28 for IPv4 and /124 for
IPv6, the offset calculated was higher than the max number of available
addresses on the allocator, causing this to panic.

Change-Id: I6c6f527b0a600b3612be37769e405b8fb3dd33a8
This commit is contained in:
Antonio Ojea 2023-01-25 20:16:49 +00:00
parent 0fcc3dbd55
commit ea99593fa1
4 changed files with 66 additions and 1 deletions

View File

@ -68,6 +68,7 @@ func NewAllocationMap(max int, rangeSpec string) *AllocationBitmap {
// allows to pass an offset that divides the allocation bitmap in two blocks.
// The first block of values will not be used for random value assigned by the AllocateNext()
// method until the second block of values has been exhausted.
// The offset value must be always smaller than the bitmap size.
func NewAllocationMapWithOffset(max int, rangeSpec string, offset int) *AllocationBitmap {
a := AllocationBitmap{
strategy: randomScanStrategyWithOffset{

View File

@ -83,6 +83,11 @@ func New(cidr *net.IPNet, allocatorFactory allocator.AllocatorWithOffsetFactory)
base.Add(base, big.NewInt(1))
max--
// cidr with whole mask can be negative
if max < 0 {
max = 0
}
r := Range{
net: cidr,
base: base,
@ -357,7 +362,10 @@ func calculateRangeOffset(cidr *net.IPNet) int {
)
cidrSize := netutils.RangeSize(cidr)
if cidrSize < min {
// available addresses are always less than the cidr size
// A /28 CIDR returns 16 addresses, but 2 of them, the network
// and broadcast addresses are not available.
if cidrSize <= min {
return 0
}

View File

@ -798,8 +798,18 @@ func Test_calculateRangeOffset(t *testing.T) {
{
name: "small mask IPv4",
cidr: "192.168.1.1/28",
want: 0,
},
{
name: "small mask IPv4",
cidr: "192.168.1.1/27",
want: 16,
},
{
name: "small mask IPv6",
cidr: "fd00::1/124",
want: 0,
},
{
name: "small mask IPv6",
cidr: "fd00::1/122",

View File

@ -128,3 +128,49 @@ func TestServicesFinalizersRepairLoop(t *testing.T) {
}
t.Logf("Created service: %s", svcNodePort.Name)
}
// Regresion test for https://issues.k8s.io/115316
func TestServiceCIDR28bits(t *testing.T) {
serviceCIDR := "10.0.0.0/28"
client, _, tearDownFn := framework.StartTestServer(t, framework.TestServerSetup{
ModifyServerRunOptions: func(opts *options.ServerRunOptions) {
opts.ServiceClusterIPRanges = serviceCIDR
},
})
defer tearDownFn()
// Wait until the default "kubernetes" service is created.
if err := wait.Poll(250*time.Millisecond, time.Minute, func() (bool, error) {
_, err := client.CoreV1().Services(metav1.NamespaceDefault).Get(context.TODO(), "kubernetes", metav1.GetOptions{})
if err != nil {
return false, err
}
return true, nil
}); err != nil {
t.Fatalf("creating kubernetes service timed out")
}
ns := framework.CreateNamespaceOrDie(client, "test-regression", t)
defer framework.DeleteNamespaceOrDie(client, ns, t)
service := &v1.Service{
ObjectMeta: metav1.ObjectMeta{
Name: "test-1234",
},
Spec: v1.ServiceSpec{
Type: v1.ServiceTypeClusterIP,
Ports: []v1.ServicePort{{
Port: int32(80),
}},
Selector: map[string]string{
"foo": "bar",
},
},
}
_, err := client.CoreV1().Services(ns.Name).Create(context.TODO(), service, metav1.CreateOptions{})
if err != nil {
t.Fatalf("Error creating test service: %v", err)
}
}