mirror of
https://github.com/k3s-io/kubernetes.git
synced 2025-07-21 19:01:49 +00:00
Merge pull request #115322 from aojea/hotfix
Fix panic on ClusterIP allocation for /28 subnets
This commit is contained in:
commit
3d6c7efc23
@ -68,6 +68,7 @@ func NewAllocationMap(max int, rangeSpec string) *AllocationBitmap {
|
||||
// allows to pass an offset that divides the allocation bitmap in two blocks.
|
||||
// The first block of values will not be used for random value assigned by the AllocateNext()
|
||||
// method until the second block of values has been exhausted.
|
||||
// The offset value must be always smaller than the bitmap size.
|
||||
func NewAllocationMapWithOffset(max int, rangeSpec string, offset int) *AllocationBitmap {
|
||||
a := AllocationBitmap{
|
||||
strategy: randomScanStrategyWithOffset{
|
||||
|
@ -83,6 +83,11 @@ func New(cidr *net.IPNet, allocatorFactory allocator.AllocatorWithOffsetFactory)
|
||||
base.Add(base, big.NewInt(1))
|
||||
max--
|
||||
|
||||
// cidr with whole mask can be negative
|
||||
if max < 0 {
|
||||
max = 0
|
||||
}
|
||||
|
||||
r := Range{
|
||||
net: cidr,
|
||||
base: base,
|
||||
@ -357,7 +362,10 @@ func calculateRangeOffset(cidr *net.IPNet) int {
|
||||
)
|
||||
|
||||
cidrSize := netutils.RangeSize(cidr)
|
||||
if cidrSize < min {
|
||||
// available addresses are always less than the cidr size
|
||||
// A /28 CIDR returns 16 addresses, but 2 of them, the network
|
||||
// and broadcast addresses are not available.
|
||||
if cidrSize <= min {
|
||||
return 0
|
||||
}
|
||||
|
||||
|
@ -798,8 +798,18 @@ func Test_calculateRangeOffset(t *testing.T) {
|
||||
{
|
||||
name: "small mask IPv4",
|
||||
cidr: "192.168.1.1/28",
|
||||
want: 0,
|
||||
},
|
||||
{
|
||||
name: "small mask IPv4",
|
||||
cidr: "192.168.1.1/27",
|
||||
want: 16,
|
||||
},
|
||||
{
|
||||
name: "small mask IPv6",
|
||||
cidr: "fd00::1/124",
|
||||
want: 0,
|
||||
},
|
||||
{
|
||||
name: "small mask IPv6",
|
||||
cidr: "fd00::1/122",
|
||||
|
@ -128,3 +128,49 @@ func TestServicesFinalizersRepairLoop(t *testing.T) {
|
||||
}
|
||||
t.Logf("Created service: %s", svcNodePort.Name)
|
||||
}
|
||||
|
||||
// Regresion test for https://issues.k8s.io/115316
|
||||
func TestServiceCIDR28bits(t *testing.T) {
|
||||
serviceCIDR := "10.0.0.0/28"
|
||||
|
||||
client, _, tearDownFn := framework.StartTestServer(t, framework.TestServerSetup{
|
||||
ModifyServerRunOptions: func(opts *options.ServerRunOptions) {
|
||||
opts.ServiceClusterIPRanges = serviceCIDR
|
||||
},
|
||||
})
|
||||
defer tearDownFn()
|
||||
|
||||
// Wait until the default "kubernetes" service is created.
|
||||
if err := wait.Poll(250*time.Millisecond, time.Minute, func() (bool, error) {
|
||||
_, err := client.CoreV1().Services(metav1.NamespaceDefault).Get(context.TODO(), "kubernetes", metav1.GetOptions{})
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
return true, nil
|
||||
}); err != nil {
|
||||
t.Fatalf("creating kubernetes service timed out")
|
||||
}
|
||||
|
||||
ns := framework.CreateNamespaceOrDie(client, "test-regression", t)
|
||||
defer framework.DeleteNamespaceOrDie(client, ns, t)
|
||||
|
||||
service := &v1.Service{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "test-1234",
|
||||
},
|
||||
Spec: v1.ServiceSpec{
|
||||
Type: v1.ServiceTypeClusterIP,
|
||||
Ports: []v1.ServicePort{{
|
||||
Port: int32(80),
|
||||
}},
|
||||
Selector: map[string]string{
|
||||
"foo": "bar",
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
_, err := client.CoreV1().Services(ns.Name).Create(context.TODO(), service, metav1.CreateOptions{})
|
||||
if err != nil {
|
||||
t.Fatalf("Error creating test service: %v", err)
|
||||
}
|
||||
}
|
||||
|
Loading…
Reference in New Issue
Block a user