Merge pull request #63049 from andrewsykim/kcm-nodeipam

Automatic merge from submit-queue (batch tested with PRs 63049, 59731). If you want to cherry-pick this change to another branch, please follow the instructions <a href="https://github.com/kubernetes/community/blob/master/contributors/devel/cherry-picks.md">here</a>.

re-enable nodeipam in kube-controller-manager

**What this PR does / why we need it**:
Re-enables nodeipam controller for external clouds. Also does a small refactor so that we don't need to pass in `allocateNodeCidr` into the controller. 

In v1.10 we made a change (9187b343e1 (diff-f11913dc67d80d36b3d06a93f61c49cf) in https://github.com/kubernetes/kubernetes/pull/57492) where nodeipam would be disabled for any cluster that sets `--cloud-provider=external`. The original intention behind this was that the nodeipam controller is cloud specific for some clouds (only GCE at the moment) so it should be moved to the CCM (cloud controller manager). After some discussions with wg-cloud-provider it makes sense to re-enable nodeipam controller in KCM and have GCE CCM enable its own cloud-specific IPAM controller as part of [Initialize()](https://github.com/kubernetes/kubernetes/blob/master/pkg/cloudprovider/cloud.go#L33-L35). This would allow for GCE to run nodeipam in both KCM (by setting --cloud-provider=gce and --allocate-node-cidr) and in the CCM (once implemented in `Initialize()`) without disabling nodeipam in the KCM for all external clouds and avoids having to implement nodeipam in CCM. 

**Which issue(s) this PR fixes** *(optional, in `fixes #<issue number>(, fixes #<issue_number>, ...)` format, will close the issue(s) when PR gets merged)*:
Fixes # 

**Special notes for your reviewer**:


**Release note**:
```release-note
Re-enable nodeipam controller for external clouds. 
```
This commit is contained in:
Kubernetes Submit Queue 2018-05-11 11:07:12 -07:00 committed by GitHub
commit 5a54555f59
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
4 changed files with 58 additions and 66 deletions

View File

@ -337,9 +337,9 @@ func NewControllerInitializers(loopMode ControllerLoopMode) map[string]InitFunc
controllers["ttl"] = startTTLController controllers["ttl"] = startTTLController
controllers["bootstrapsigner"] = startBootstrapSignerController controllers["bootstrapsigner"] = startBootstrapSignerController
controllers["tokencleaner"] = startTokenCleanerController controllers["tokencleaner"] = startTokenCleanerController
controllers["nodeipam"] = startNodeIpamController
if loopMode == IncludeCloudLoops { if loopMode == IncludeCloudLoops {
controllers["service"] = startServiceController controllers["service"] = startServiceController
controllers["nodeipam"] = startNodeIpamController
controllers["route"] = startRouteController controllers["route"] = startRouteController
// TODO: volume controller into the IncludeCloudLoops only set. // TODO: volume controller into the IncludeCloudLoops only set.
// TODO: Separate cluster in cloud check from node lifecycle controller. // TODO: Separate cluster in cloud check from node lifecycle controller.

View File

@ -79,20 +79,23 @@ func startServiceController(ctx ControllerContext) (bool, error) {
func startNodeIpamController(ctx ControllerContext) (bool, error) { func startNodeIpamController(ctx ControllerContext) (bool, error) {
var clusterCIDR *net.IPNet = nil var clusterCIDR *net.IPNet = nil
var serviceCIDR *net.IPNet = nil var serviceCIDR *net.IPNet = nil
if ctx.ComponentConfig.KubeCloudShared.AllocateNodeCIDRs {
var err error
if len(strings.TrimSpace(ctx.ComponentConfig.KubeCloudShared.ClusterCIDR)) != 0 {
_, clusterCIDR, err = net.ParseCIDR(ctx.ComponentConfig.KubeCloudShared.ClusterCIDR)
if err != nil {
glog.Warningf("Unsuccessful parsing of cluster CIDR %v: %v", ctx.ComponentConfig.KubeCloudShared.ClusterCIDR, err)
}
}
if len(strings.TrimSpace(ctx.ComponentConfig.NodeIpamController.ServiceCIDR)) != 0 { if !ctx.ComponentConfig.KubeCloudShared.AllocateNodeCIDRs {
_, serviceCIDR, err = net.ParseCIDR(ctx.ComponentConfig.NodeIpamController.ServiceCIDR) return false, nil
if err != nil { }
glog.Warningf("Unsuccessful parsing of service CIDR %v: %v", ctx.ComponentConfig.NodeIpamController.ServiceCIDR, err)
} var err error
if len(strings.TrimSpace(ctx.ComponentConfig.KubeCloudShared.ClusterCIDR)) != 0 {
_, clusterCIDR, err = net.ParseCIDR(ctx.ComponentConfig.KubeCloudShared.ClusterCIDR)
if err != nil {
glog.Warningf("Unsuccessful parsing of cluster CIDR %v: %v", ctx.ComponentConfig.KubeCloudShared.ClusterCIDR, err)
}
}
if len(strings.TrimSpace(ctx.ComponentConfig.NodeIpamController.ServiceCIDR)) != 0 {
_, serviceCIDR, err = net.ParseCIDR(ctx.ComponentConfig.NodeIpamController.ServiceCIDR)
if err != nil {
glog.Warningf("Unsuccessful parsing of service CIDR %v: %v", ctx.ComponentConfig.NodeIpamController.ServiceCIDR, err)
} }
} }
@ -103,7 +106,6 @@ func startNodeIpamController(ctx ControllerContext) (bool, error) {
clusterCIDR, clusterCIDR,
serviceCIDR, serviceCIDR,
int(ctx.ComponentConfig.NodeIpamController.NodeCIDRMaskSize), int(ctx.ComponentConfig.NodeIpamController.NodeCIDRMaskSize),
ctx.ComponentConfig.KubeCloudShared.AllocateNodeCIDRs,
ipam.CIDRAllocatorType(ctx.ComponentConfig.KubeCloudShared.CIDRAllocatorType), ipam.CIDRAllocatorType(ctx.ComponentConfig.KubeCloudShared.CIDRAllocatorType),
) )
if err != nil { if err != nil {

View File

@ -58,8 +58,7 @@ const (
// Controller is the controller that manages node ipam state. // Controller is the controller that manages node ipam state.
type Controller struct { type Controller struct {
allocateNodeCIDRs bool allocatorType ipam.CIDRAllocatorType
allocatorType ipam.CIDRAllocatorType
cloud cloudprovider.Interface cloud cloudprovider.Interface
clusterCIDR *net.IPNet clusterCIDR *net.IPNet
@ -88,7 +87,6 @@ func NewNodeIpamController(
clusterCIDR *net.IPNet, clusterCIDR *net.IPNet,
serviceCIDR *net.IPNet, serviceCIDR *net.IPNet,
nodeCIDRMaskSize int, nodeCIDRMaskSize int,
allocateNodeCIDRs bool,
allocatorType ipam.CIDRAllocatorType) (*Controller, error) { allocatorType ipam.CIDRAllocatorType) (*Controller, error) {
if kubeClient == nil { if kubeClient == nil {
@ -108,54 +106,49 @@ func NewNodeIpamController(
metrics.RegisterMetricAndTrackRateLimiterUsage("node_ipam_controller", kubeClient.CoreV1().RESTClient().GetRateLimiter()) metrics.RegisterMetricAndTrackRateLimiterUsage("node_ipam_controller", kubeClient.CoreV1().RESTClient().GetRateLimiter())
} }
if allocateNodeCIDRs { if clusterCIDR == nil {
if clusterCIDR == nil { glog.Fatal("Controller: Must specify --cluster-cidr if --allocate-node-cidrs is set")
glog.Fatal("Controller: Must specify clusterCIDR if allocateNodeCIDRs == true.") }
} mask := clusterCIDR.Mask
mask := clusterCIDR.Mask if maskSize, _ := mask.Size(); maskSize > nodeCIDRMaskSize {
if maskSize, _ := mask.Size(); maskSize > nodeCIDRMaskSize { glog.Fatal("Controller: Invalid --cluster-cidr, mask size of cluster CIDR must be less than --node-cidr-mask-size")
glog.Fatal("Controller: Invalid clusterCIDR, mask size of clusterCIDR must be less than nodeCIDRMaskSize.")
}
} }
ic := &Controller{ ic := &Controller{
cloud: cloud, cloud: cloud,
kubeClient: kubeClient, kubeClient: kubeClient,
lookupIP: net.LookupIP, lookupIP: net.LookupIP,
clusterCIDR: clusterCIDR, clusterCIDR: clusterCIDR,
serviceCIDR: serviceCIDR, serviceCIDR: serviceCIDR,
allocateNodeCIDRs: allocateNodeCIDRs, allocatorType: allocatorType,
allocatorType: allocatorType,
} }
// TODO: Abstract this check into a generic controller manager should run method. // TODO: Abstract this check into a generic controller manager should run method.
if ic.allocateNodeCIDRs { if ic.allocatorType == ipam.IPAMFromClusterAllocatorType || ic.allocatorType == ipam.IPAMFromCloudAllocatorType {
if ic.allocatorType == ipam.IPAMFromClusterAllocatorType || ic.allocatorType == ipam.IPAMFromCloudAllocatorType { cfg := &ipam.Config{
cfg := &ipam.Config{ Resync: ipamResyncInterval,
Resync: ipamResyncInterval, MaxBackoff: ipamMaxBackoff,
MaxBackoff: ipamMaxBackoff, InitialRetry: ipamInitialBackoff,
InitialRetry: ipamInitialBackoff, }
} switch ic.allocatorType {
switch ic.allocatorType { case ipam.IPAMFromClusterAllocatorType:
case ipam.IPAMFromClusterAllocatorType: cfg.Mode = nodesync.SyncFromCluster
cfg.Mode = nodesync.SyncFromCluster case ipam.IPAMFromCloudAllocatorType:
case ipam.IPAMFromCloudAllocatorType: cfg.Mode = nodesync.SyncFromCloud
cfg.Mode = nodesync.SyncFromCloud }
} ipamc, err := ipam.NewController(cfg, kubeClient, cloud, clusterCIDR, serviceCIDR, nodeCIDRMaskSize)
ipamc, err := ipam.NewController(cfg, kubeClient, cloud, clusterCIDR, serviceCIDR, nodeCIDRMaskSize) if err != nil {
if err != nil { glog.Fatalf("Error creating ipam controller: %v", err)
glog.Fatalf("Error creating ipam controller: %v", err) }
} if err := ipamc.Start(nodeInformer); err != nil {
if err := ipamc.Start(nodeInformer); err != nil { glog.Fatalf("Error trying to Init(): %v", err)
glog.Fatalf("Error trying to Init(): %v", err) }
} } else {
} else { var err error
var err error ic.cidrAllocator, err = ipam.New(
ic.cidrAllocator, err = ipam.New( kubeClient, cloud, nodeInformer, ic.allocatorType, ic.clusterCIDR, ic.serviceCIDR, nodeCIDRMaskSize)
kubeClient, cloud, nodeInformer, ic.allocatorType, ic.clusterCIDR, ic.serviceCIDR, nodeCIDRMaskSize) if err != nil {
if err != nil { return nil, err
return nil, err
}
} }
} }
@ -176,11 +169,8 @@ func (nc *Controller) Run(stopCh <-chan struct{}) {
return return
} }
// TODO: Abstract this check into a generic controller manager should run method. if nc.allocatorType != ipam.IPAMFromClusterAllocatorType && nc.allocatorType != ipam.IPAMFromCloudAllocatorType {
if nc.allocateNodeCIDRs { go nc.cidrAllocator.Run(stopCh)
if nc.allocatorType != ipam.IPAMFromClusterAllocatorType && nc.allocatorType != ipam.IPAMFromCloudAllocatorType {
go nc.cidrAllocator.Run(stopCh)
}
} }
<-stopCh <-stopCh

View File

@ -52,7 +52,7 @@ func setupAllocator(apiURL string, config *Config, clusterCIDR, serviceCIDR *net
sharedInformer := informers.NewSharedInformerFactory(clientSet, 1*time.Hour) sharedInformer := informers.NewSharedInformerFactory(clientSet, 1*time.Hour)
ipamController, err := nodeipam.NewNodeIpamController( ipamController, err := nodeipam.NewNodeIpamController(
sharedInformer.Core().V1().Nodes(), config.Cloud, clientSet, sharedInformer.Core().V1().Nodes(), config.Cloud, clientSet,
clusterCIDR, serviceCIDR, subnetMaskSize, true, config.AllocatorType, clusterCIDR, serviceCIDR, subnetMaskSize, config.AllocatorType,
) )
if err != nil { if err != nil {
return nil, shutdownFunc, err return nil, shutdownFunc, err