mirror of
https://github.com/k3s-io/kubernetes.git
synced 2025-07-23 03:41:45 +00:00
Merge pull request #86534 from liggitt/ipallocator-range
Restore IPAllocator ipv4 range handling
This commit is contained in:
commit
5e31799701
@ -13,8 +13,6 @@ go_library(
|
||||
deps = [
|
||||
"//pkg/apis/core:go_default_library",
|
||||
"//pkg/registry/core/service/allocator:go_default_library",
|
||||
"//vendor/k8s.io/utils/integer:go_default_library",
|
||||
"//vendor/k8s.io/utils/net:go_default_library",
|
||||
],
|
||||
)
|
||||
|
||||
|
@ -19,14 +19,10 @@ package ipallocator
|
||||
import (
|
||||
"errors"
|
||||
"fmt"
|
||||
|
||||
"math/big"
|
||||
"net"
|
||||
|
||||
api "k8s.io/kubernetes/pkg/apis/core"
|
||||
"k8s.io/kubernetes/pkg/registry/core/service/allocator"
|
||||
"k8s.io/utils/integer"
|
||||
utilnet "k8s.io/utils/net"
|
||||
"math/big"
|
||||
"net"
|
||||
)
|
||||
|
||||
// Interface manages the allocation of IP addresses out of a range. Interface
|
||||
@ -84,8 +80,8 @@ type Range struct {
|
||||
|
||||
// NewAllocatorCIDRRange creates a Range over a net.IPNet, calling allocatorFactory to construct the backing store.
|
||||
func NewAllocatorCIDRRange(cidr *net.IPNet, allocatorFactory allocator.AllocatorFactory) (*Range, error) {
|
||||
max := integer.Int64Min(utilnet.RangeSize(cidr), 1<<16)
|
||||
base := utilnet.BigForIP(cidr.IP)
|
||||
max := RangeSize(cidr)
|
||||
base := bigForIP(cidr.IP)
|
||||
rangeSpec := cidr.String()
|
||||
|
||||
r := Range{
|
||||
@ -173,7 +169,7 @@ func (r *Range) AllocateNext() (net.IP, error) {
|
||||
if !ok {
|
||||
return nil, ErrFull
|
||||
}
|
||||
return utilnet.AddIPOffset(r.base, offset), nil
|
||||
return addIPOffset(r.base, offset), nil
|
||||
}
|
||||
|
||||
// Release releases the IP back to the pool. Releasing an
|
||||
@ -191,7 +187,7 @@ func (r *Range) Release(ip net.IP) error {
|
||||
// ForEach calls the provided function for each allocated IP.
|
||||
func (r *Range) ForEach(fn func(net.IP)) {
|
||||
r.alloc.ForEach(func(offset int) {
|
||||
ip, _ := utilnet.GetIndexedIP(r.net, offset+1) // +1 because Range doesn't store IP 0
|
||||
ip, _ := GetIndexedIP(r.net, offset+1) // +1 because Range doesn't store IP 0
|
||||
fn(ip)
|
||||
})
|
||||
}
|
||||
@ -249,8 +245,49 @@ func (r *Range) contains(ip net.IP) (bool, int) {
|
||||
return true, offset
|
||||
}
|
||||
|
||||
// bigForIP creates a big.Int based on the provided net.IP
|
||||
func bigForIP(ip net.IP) *big.Int {
|
||||
b := ip.To4()
|
||||
if b == nil {
|
||||
b = ip.To16()
|
||||
}
|
||||
return big.NewInt(0).SetBytes(b)
|
||||
}
|
||||
|
||||
// addIPOffset adds the provided integer offset to a base big.Int representing a
|
||||
// net.IP
|
||||
func addIPOffset(base *big.Int, offset int) net.IP {
|
||||
return net.IP(big.NewInt(0).Add(base, big.NewInt(int64(offset))).Bytes())
|
||||
}
|
||||
|
||||
// calculateIPOffset calculates the integer offset of ip from base such that
|
||||
// base + offset = ip. It requires ip >= base.
|
||||
func calculateIPOffset(base *big.Int, ip net.IP) int {
|
||||
return int(big.NewInt(0).Sub(utilnet.BigForIP(ip), base).Int64())
|
||||
return int(big.NewInt(0).Sub(bigForIP(ip), base).Int64())
|
||||
}
|
||||
|
||||
// RangeSize returns the size of a range in valid addresses.
|
||||
func RangeSize(subnet *net.IPNet) int64 {
|
||||
ones, bits := subnet.Mask.Size()
|
||||
if bits == 32 && (bits-ones) >= 31 || bits == 128 && (bits-ones) >= 127 {
|
||||
return 0
|
||||
}
|
||||
// For IPv6, the max size will be limited to 65536
|
||||
// This is due to the allocator keeping track of all the
|
||||
// allocated IP's in a bitmap. This will keep the size of
|
||||
// the bitmap to 64k.
|
||||
if bits == 128 && (bits-ones) >= 16 {
|
||||
return int64(1) << uint(16)
|
||||
} else {
|
||||
return int64(1) << uint(bits-ones)
|
||||
}
|
||||
}
|
||||
|
||||
// GetIndexedIP returns a net.IP that is subnet.IP + index in the contiguous IP space.
|
||||
func GetIndexedIP(subnet *net.IPNet, index int) (net.IP, error) {
|
||||
ip := addIPOffset(bigForIP(subnet.IP), index)
|
||||
if !subnet.Contains(ip) {
|
||||
return nil, fmt.Errorf("can't generate IP with index %d from subnet. subnet too small. subnet: %q", index, subnet)
|
||||
}
|
||||
return ip, nil
|
||||
}
|
||||
|
@ -66,7 +66,6 @@ func TestAllocate(t *testing.T) {
|
||||
t.Fatal(err)
|
||||
}
|
||||
t.Logf("base: %v", r.base.Bytes())
|
||||
t.Logf("max: %v", r.max)
|
||||
if f := r.Free(); f != tc.free {
|
||||
t.Errorf("Test %s unexpected free %d", tc.name, f)
|
||||
}
|
||||
@ -214,6 +213,51 @@ func TestAllocateSmall(t *testing.T) {
|
||||
t.Logf("allocated: %v", found)
|
||||
}
|
||||
|
||||
func TestRangeSize(t *testing.T) {
|
||||
testCases := []struct {
|
||||
name string
|
||||
cidr string
|
||||
addrs int64
|
||||
}{
|
||||
{
|
||||
name: "supported IPv4 cidr",
|
||||
cidr: "192.168.1.0/24",
|
||||
addrs: 256,
|
||||
},
|
||||
{
|
||||
name: "supported large IPv4 cidr",
|
||||
cidr: "10.96.0.0/12",
|
||||
addrs: 1048576,
|
||||
},
|
||||
{
|
||||
name: "unsupported IPv4 cidr",
|
||||
cidr: "192.168.1.0/1",
|
||||
addrs: 0,
|
||||
},
|
||||
{
|
||||
name: "supported IPv6 cidr",
|
||||
cidr: "2001:db8::/48",
|
||||
addrs: 65536,
|
||||
},
|
||||
{
|
||||
name: "unsupported IPv6 mask",
|
||||
cidr: "2001:db8::/1",
|
||||
addrs: 0,
|
||||
},
|
||||
}
|
||||
|
||||
for _, tc := range testCases {
|
||||
_, cidr, err := net.ParseCIDR(tc.cidr)
|
||||
if err != nil {
|
||||
t.Errorf("failed to parse cidr for test %s, unexpected error: '%s'", tc.name, err)
|
||||
}
|
||||
if size := RangeSize(cidr); size != tc.addrs {
|
||||
t.Errorf("test %s failed. %s should have a range size of %d, got %d",
|
||||
tc.name, tc.cidr, tc.addrs, size)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestForEach(t *testing.T) {
|
||||
_, cidr, err := net.ParseCIDR("192.168.1.0/24")
|
||||
if err != nil {
|
||||
|
Loading…
Reference in New Issue
Block a user