mirror of
https://github.com/k3s-io/kubernetes.git
synced 2026-01-05 07:27:21 +00:00
make podcidr mask size configurable
This commit is contained in:
@@ -110,7 +110,11 @@ func (r *rangeAllocator) Occupy(cidr *net.IPNet) (err error) {
|
||||
cidrMask := cidr.Mask
|
||||
maskSize, _ := cidrMask.Size()
|
||||
|
||||
if r.clusterCIDR.Contains(cidr.IP.Mask(r.clusterCIDR.Mask)) && r.clusterMaskSize < maskSize {
|
||||
if !r.clusterCIDR.Contains(cidr.IP.Mask(r.clusterCIDR.Mask)) && !cidr.Contains(r.clusterCIDR.IP.Mask(cidr.Mask)) {
|
||||
return fmt.Errorf("cidr %v is out the range of cluster cidr %v", cidr, r.clusterCIDR)
|
||||
}
|
||||
|
||||
if r.clusterMaskSize < maskSize {
|
||||
subNetMask := net.CIDRMask(r.subNetMaskSize, 32)
|
||||
begin, err = r.getIndexForCIDR(&net.IPNet{
|
||||
IP: cidr.IP.To4().Mask(subNetMask),
|
||||
@@ -127,7 +131,6 @@ func (r *rangeAllocator) Occupy(cidr *net.IPNet) (err error) {
|
||||
IP: net.IP(ip).To4().Mask(subNetMask),
|
||||
Mask: subNetMask,
|
||||
})
|
||||
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
@@ -336,6 +336,7 @@ func TestOccupy(t *testing.T) {
|
||||
}
|
||||
if err != nil && !tc.expectErr {
|
||||
t.Errorf("unexpected error: %v", err)
|
||||
continue
|
||||
}
|
||||
|
||||
expectedUsed := big.Int{}
|
||||
|
||||
@@ -148,6 +148,7 @@ func NewNodeController(
|
||||
nodeMonitorPeriod time.Duration,
|
||||
clusterCIDR *net.IPNet,
|
||||
serviceCIDR *net.IPNet,
|
||||
nodeCIDRMaskSize int,
|
||||
allocateNodeCIDRs bool) *NodeController {
|
||||
eventBroadcaster := record.NewBroadcaster()
|
||||
recorder := eventBroadcaster.NewRecorder(api.EventSource{Component: "controllermanager"})
|
||||
@@ -168,11 +169,8 @@ func NewNodeController(
|
||||
glog.Fatal("NodeController: Must specify clusterCIDR if allocateNodeCIDRs == true.")
|
||||
}
|
||||
mask := clusterCIDR.Mask
|
||||
// TODO(mqliang): Make pod CIDR mask size configurable.
|
||||
// For now, we assume podCIDR mask size is 24, so make sure the
|
||||
// clusterCIDR mask size is larger than 24.
|
||||
if maskSize, _ := mask.Size(); maskSize > 24 {
|
||||
glog.Fatal("NodeController: Invalid clusterCIDR, mask size must be less than 24.")
|
||||
if maskSize, _ := mask.Size(); maskSize > nodeCIDRMaskSize {
|
||||
glog.Fatal("NodeController: Invalid clusterCIDR, mask size of clusterCIDR must be less than nodeCIDRMaskSize.")
|
||||
}
|
||||
}
|
||||
evictorLock := sync.Mutex{}
|
||||
@@ -258,8 +256,7 @@ func NewNodeController(
|
||||
)
|
||||
|
||||
if allocateNodeCIDRs {
|
||||
// TODO(mqliang): make pod CIDR mask size configurable, for now set it to 24.
|
||||
nc.cidrAllocator = NewCIDRRangeAllocator(clusterCIDR, 24)
|
||||
nc.cidrAllocator = NewCIDRRangeAllocator(clusterCIDR, nodeCIDRMaskSize)
|
||||
}
|
||||
|
||||
return nc
|
||||
@@ -267,8 +264,9 @@ func NewNodeController(
|
||||
|
||||
// Run starts an asynchronous loop that monitors the status of cluster nodes.
|
||||
func (nc *NodeController) Run(period time.Duration) {
|
||||
|
||||
nc.filterOutServiceRange()
|
||||
if nc.allocateNodeCIDRs {
|
||||
nc.filterOutServiceRange()
|
||||
}
|
||||
|
||||
go nc.nodeController.Run(wait.NeverStop)
|
||||
go nc.podController.Run(wait.NeverStop)
|
||||
@@ -341,7 +339,7 @@ func (nc *NodeController) Run(period time.Duration) {
|
||||
}
|
||||
|
||||
func (nc *NodeController) filterOutServiceRange() {
|
||||
if !nc.clusterCIDR.Contains(nc.serviceCIDR.IP.Mask(nc.clusterCIDR.Mask)) {
|
||||
if !nc.clusterCIDR.Contains(nc.serviceCIDR.IP.Mask(nc.clusterCIDR.Mask)) && !nc.serviceCIDR.Contains(nc.clusterCIDR.IP.Mask(nc.serviceCIDR.Mask)) {
|
||||
return
|
||||
}
|
||||
|
||||
|
||||
@@ -660,7 +660,7 @@ func TestMonitorNodeStatusEvictPods(t *testing.T) {
|
||||
for _, item := range table {
|
||||
nodeController := NewNodeController(nil, item.fakeNodeHandler,
|
||||
evictionTimeout, flowcontrol.NewFakeAlwaysRateLimiter(), flowcontrol.NewFakeAlwaysRateLimiter(), testNodeMonitorGracePeriod,
|
||||
testNodeStartupGracePeriod, testNodeMonitorPeriod, nil, nil, false)
|
||||
testNodeStartupGracePeriod, testNodeMonitorPeriod, nil, nil, 0, false)
|
||||
nodeController.now = func() unversioned.Time { return fakeNow }
|
||||
for _, ds := range item.daemonSets {
|
||||
nodeController.daemonSetStore.Add(&ds)
|
||||
@@ -731,7 +731,7 @@ func TestCloudProviderNoRateLimit(t *testing.T) {
|
||||
nodeController := NewNodeController(nil, fnh, 10*time.Minute,
|
||||
flowcontrol.NewFakeAlwaysRateLimiter(), flowcontrol.NewFakeAlwaysRateLimiter(),
|
||||
testNodeMonitorGracePeriod, testNodeStartupGracePeriod,
|
||||
testNodeMonitorPeriod, nil, nil, false)
|
||||
testNodeMonitorPeriod, nil, nil, 0, false)
|
||||
nodeController.cloud = &fakecloud.FakeCloud{}
|
||||
nodeController.now = func() unversioned.Time { return unversioned.Date(2016, 1, 1, 12, 0, 0, 0, time.UTC) }
|
||||
nodeController.nodeExistsInCloudProvider = func(nodeName string) (bool, error) {
|
||||
@@ -963,7 +963,7 @@ func TestMonitorNodeStatusUpdateStatus(t *testing.T) {
|
||||
|
||||
for i, item := range table {
|
||||
nodeController := NewNodeController(nil, item.fakeNodeHandler, 5*time.Minute, flowcontrol.NewFakeAlwaysRateLimiter(),
|
||||
flowcontrol.NewFakeAlwaysRateLimiter(), testNodeMonitorGracePeriod, testNodeStartupGracePeriod, testNodeMonitorPeriod, nil, nil, false)
|
||||
flowcontrol.NewFakeAlwaysRateLimiter(), testNodeMonitorGracePeriod, testNodeStartupGracePeriod, testNodeMonitorPeriod, nil, nil, 0, false)
|
||||
nodeController.now = func() unversioned.Time { return fakeNow }
|
||||
if err := nodeController.monitorNodeStatus(); err != nil {
|
||||
t.Errorf("unexpected error: %v", err)
|
||||
@@ -1113,7 +1113,7 @@ func TestMonitorNodeStatusMarkPodsNotReady(t *testing.T) {
|
||||
|
||||
for i, item := range table {
|
||||
nodeController := NewNodeController(nil, item.fakeNodeHandler, 5*time.Minute, flowcontrol.NewFakeAlwaysRateLimiter(),
|
||||
flowcontrol.NewFakeAlwaysRateLimiter(), testNodeMonitorGracePeriod, testNodeStartupGracePeriod, testNodeMonitorPeriod, nil, nil, false)
|
||||
flowcontrol.NewFakeAlwaysRateLimiter(), testNodeMonitorGracePeriod, testNodeStartupGracePeriod, testNodeMonitorPeriod, nil, nil, 0, false)
|
||||
nodeController.now = func() unversioned.Time { return fakeNow }
|
||||
if err := nodeController.monitorNodeStatus(); err != nil {
|
||||
t.Errorf("Case[%d] unexpected error: %v", i, err)
|
||||
@@ -1195,7 +1195,7 @@ func TestNodeDeletion(t *testing.T) {
|
||||
}
|
||||
|
||||
nodeController := NewNodeController(nil, fakeNodeHandler, 5*time.Minute, flowcontrol.NewFakeAlwaysRateLimiter(), flowcontrol.NewFakeAlwaysRateLimiter(),
|
||||
testNodeMonitorGracePeriod, testNodeStartupGracePeriod, testNodeMonitorPeriod, nil, nil, false)
|
||||
testNodeMonitorGracePeriod, testNodeStartupGracePeriod, testNodeMonitorPeriod, nil, nil, 0, false)
|
||||
nodeController.now = func() unversioned.Time { return fakeNow }
|
||||
if err := nodeController.monitorNodeStatus(); err != nil {
|
||||
t.Errorf("unexpected error: %v", err)
|
||||
@@ -1298,7 +1298,7 @@ func TestCheckPod(t *testing.T) {
|
||||
},
|
||||
}
|
||||
|
||||
nc := NewNodeController(nil, nil, 0, nil, nil, 0, 0, 0, nil, nil, false)
|
||||
nc := NewNodeController(nil, nil, 0, nil, nil, 0, 0, 0, nil, nil, 0, false)
|
||||
nc.nodeStore.Store = cache.NewStore(cache.MetaNamespaceKeyFunc)
|
||||
nc.nodeStore.Store.Add(&api.Node{
|
||||
ObjectMeta: api.ObjectMeta{
|
||||
@@ -1375,7 +1375,7 @@ func TestCleanupOrphanedPods(t *testing.T) {
|
||||
newPod("b", "bar"),
|
||||
newPod("c", "gone"),
|
||||
}
|
||||
nc := NewNodeController(nil, nil, 0, nil, nil, 0, 0, 0, nil, nil, false)
|
||||
nc := NewNodeController(nil, nil, 0, nil, nil, 0, 0, 0, nil, nil, 0, false)
|
||||
|
||||
nc.nodeStore.Store.Add(newNode("foo"))
|
||||
nc.nodeStore.Store.Add(newNode("bar"))
|
||||
|
||||
Reference in New Issue
Block a user