From 796faba4ac96db07194c4811585abe52fd682064 Mon Sep 17 00:00:00 2001 From: Anish Ramasekar Date: Tue, 9 Jul 2019 14:47:01 -0700 Subject: [PATCH] Allow multiple node cidr masks in cm update tests add comment amend var name update comment add check for empty slice fix tests fix mask size in test review feedback add ipv4 and ipv6 flag for mask sizes add to violation exception list remove import alias run update-openapi-spec review feedback run update-bazel review feedback review feedback --- api/api-rules/violation_exceptions.list | 2 + cmd/kube-controller-manager/app/BUILD | 1 + cmd/kube-controller-manager/app/core.go | 75 +++- .../app/options/nodeipamcontroller.go | 4 +- .../app/options/options_test.go | 6 +- pkg/controller/nodeipam/config/types.go | 6 +- .../nodeipam/config/v1alpha1/defaults.go | 5 +- .../v1alpha1/zz_generated.conversion.go | 4 + .../nodeipam/ipam/cidr_allocator.go | 17 +- .../nodeipam/ipam/cidrset/cidr_set.go | 2 + .../nodeipam/ipam/range_allocator.go | 20 +- .../nodeipam/ipam/range_allocator_test.go | 338 ++++++++++-------- pkg/controller/nodeipam/legacyprovider.go | 4 +- .../nodeipam/node_ipam_controller.go | 22 +- .../nodeipam/node_ipam_controller_test.go | 29 +- .../config/v1alpha1/types.go | 4 + test/integration/ipamperf/ipam_test.go | 2 +- 17 files changed, 343 insertions(+), 198 deletions(-) diff --git a/api/api-rules/violation_exceptions.list b/api/api-rules/violation_exceptions.list index 10211214c1c..e3e4e24154a 100644 --- a/api/api-rules/violation_exceptions.list +++ b/api/api-rules/violation_exceptions.list @@ -662,6 +662,8 @@ API rule violation: names_match,k8s.io/kube-controller-manager/config/v1alpha1,K API rule violation: names_match,k8s.io/kube-controller-manager/config/v1alpha1,NamespaceControllerConfiguration,ConcurrentNamespaceSyncs API rule violation: names_match,k8s.io/kube-controller-manager/config/v1alpha1,NamespaceControllerConfiguration,NamespaceSyncPeriod API rule violation: names_match,k8s.io/kube-controller-manager/config/v1alpha1,NodeIPAMControllerConfiguration,NodeCIDRMaskSize +API rule violation: names_match,k8s.io/kube-controller-manager/config/v1alpha1,NodeIPAMControllerConfiguration,NodeCIDRMaskSizeIPv4 +API rule violation: names_match,k8s.io/kube-controller-manager/config/v1alpha1,NodeIPAMControllerConfiguration,NodeCIDRMaskSizeIPv6 API rule violation: names_match,k8s.io/kube-controller-manager/config/v1alpha1,NodeIPAMControllerConfiguration,SecondaryServiceCIDR API rule violation: names_match,k8s.io/kube-controller-manager/config/v1alpha1,NodeIPAMControllerConfiguration,ServiceCIDR API rule violation: names_match,k8s.io/kube-controller-manager/config/v1alpha1,NodeLifecycleControllerConfiguration,EnableTaintManager diff --git a/cmd/kube-controller-manager/app/BUILD b/cmd/kube-controller-manager/app/BUILD index 5b99320fd24..ab799901a32 100644 --- a/cmd/kube-controller-manager/app/BUILD +++ b/cmd/kube-controller-manager/app/BUILD @@ -60,6 +60,7 @@ go_library( "//pkg/controller/job:go_default_library", "//pkg/controller/namespace:go_default_library", "//pkg/controller/nodeipam:go_default_library", + "//pkg/controller/nodeipam/config:go_default_library", "//pkg/controller/nodeipam/ipam:go_default_library", "//pkg/controller/nodelifecycle:go_default_library", "//pkg/controller/podautoscaler:go_default_library", diff --git a/cmd/kube-controller-manager/app/core.go b/cmd/kube-controller-manager/app/core.go index ff592155a72..b5fbfc5ddde 100644 --- a/cmd/kube-controller-manager/app/core.go +++ b/cmd/kube-controller-manager/app/core.go @@ -21,6 +21,7 @@ limitations under the License. package app import ( + "errors" "fmt" "net" "net/http" @@ -29,7 +30,7 @@ import ( "k8s.io/klog" - v1 "k8s.io/api/core/v1" + "k8s.io/api/core/v1" "k8s.io/apimachinery/pkg/runtime/schema" utilfeature "k8s.io/apiserver/pkg/util/feature" cacheddiscovery "k8s.io/client-go/discovery/cached/memory" @@ -46,6 +47,7 @@ import ( "k8s.io/kubernetes/pkg/controller/garbagecollector" namespacecontroller "k8s.io/kubernetes/pkg/controller/namespace" nodeipamcontroller "k8s.io/kubernetes/pkg/controller/nodeipam" + nodeipamconfig "k8s.io/kubernetes/pkg/controller/nodeipam/config" "k8s.io/kubernetes/pkg/controller/nodeipam/ipam" lifecyclecontroller "k8s.io/kubernetes/pkg/controller/nodelifecycle" "k8s.io/kubernetes/pkg/controller/podgc" @@ -68,6 +70,13 @@ import ( netutils "k8s.io/utils/net" ) +const ( + // defaultNodeMaskCIDRIPv4 is default mask size for IPv4 node cidr + defaultNodeMaskCIDRIPv4 = 24 + // defaultNodeMaskCIDRIPv6 is default mask size for IPv6 node cidr + defaultNodeMaskCIDRIPv6 = 64 +) + func startServiceController(ctx ControllerContext) (http.Handler, bool, error) { serviceController, err := servicecontroller.New( ctx.Cloud, @@ -84,6 +93,7 @@ func startServiceController(ctx ControllerContext) (http.Handler, bool, error) { go serviceController.Run(ctx.Stop, int(ctx.ComponentConfig.ServiceController.ConcurrentServiceSyncs)) return nil, true, nil } + func startNodeIpamController(ctx ControllerContext) (http.Handler, bool, error) { var serviceCIDR *net.IPNet var secondaryServiceCIDR *net.IPNet @@ -146,6 +156,20 @@ func startNodeIpamController(ctx ControllerContext) (http.Handler, bool, error) } } + var nodeCIDRMaskSizeIPv4, nodeCIDRMaskSizeIPv6 int + if utilfeature.DefaultFeatureGate.Enabled(kubefeatures.IPv6DualStack) { + nodeCIDRMaskSizeIPv4, nodeCIDRMaskSizeIPv6, err = setNodeCIDRMaskSizesDualStack(ctx.ComponentConfig.NodeIPAMController) + } else { + nodeCIDRMaskSizeIPv4, nodeCIDRMaskSizeIPv6, err = setNodeCIDRMaskSizes(ctx.ComponentConfig.NodeIPAMController) + } + + if err != nil { + return nil, false, err + } + + // get list of node cidr mask sizes + nodeCIDRMaskSizes := getNodeCIDRMaskSizes(clusterCIDRs, nodeCIDRMaskSizeIPv4, nodeCIDRMaskSizeIPv6) + nodeIpamController, err := nodeipamcontroller.NewNodeIpamController( ctx.InformerFactory.Core().V1().Nodes(), ctx.Cloud, @@ -153,7 +177,7 @@ func startNodeIpamController(ctx ControllerContext) (http.Handler, bool, error) clusterCIDRs, serviceCIDR, secondaryServiceCIDR, - int(ctx.ComponentConfig.NodeIPAMController.NodeCIDRMaskSize), + nodeCIDRMaskSizes, ipam.CIDRAllocatorType(ctx.ComponentConfig.KubeCloudShared.CIDRAllocatorType), ) if err != nil { @@ -546,3 +570,50 @@ func processCIDRs(cidrsList string) ([]*net.IPNet, bool, error) { return cidrs, dualstack, nil } + +// setNodeCIDRMaskSizes returns the IPv4 and IPv6 node cidr mask sizes. +// If --node-cidr-mask-size not set, then it will return default IPv4 and IPv6 cidr mask sizes. +func setNodeCIDRMaskSizes(cfg nodeipamconfig.NodeIPAMControllerConfiguration) (int, int, error) { + ipv4Mask, ipv6Mask := defaultNodeMaskCIDRIPv4, defaultNodeMaskCIDRIPv6 + // NodeCIDRMaskSizeIPv4 and NodeCIDRMaskSizeIPv6 can be used only for dual-stack clusters + if cfg.NodeCIDRMaskSizeIPv4 != 0 || cfg.NodeCIDRMaskSizeIPv6 != 0 { + return ipv4Mask, ipv6Mask, errors.New("usage of --node-cidr-mask-size-ipv4 and --node-cidr-mask-size-ipv6 are not allowed with non dual-stack clusters") + } + if cfg.NodeCIDRMaskSize != 0 { + ipv4Mask = int(cfg.NodeCIDRMaskSize) + ipv6Mask = int(cfg.NodeCIDRMaskSize) + } + return ipv4Mask, ipv6Mask, nil +} + +// setNodeCIDRMaskSizesDualStack returns the IPv4 and IPv6 node cidr mask sizes to the value provided +// for --node-cidr-mask-size-ipv4 and --node-cidr-mask-size-ipv6 respectively. If value not provided, +// then it will return default IPv4 and IPv6 cidr mask sizes. +func setNodeCIDRMaskSizesDualStack(cfg nodeipamconfig.NodeIPAMControllerConfiguration) (int, int, error) { + ipv4Mask, ipv6Mask := defaultNodeMaskCIDRIPv4, defaultNodeMaskCIDRIPv6 + // NodeCIDRMaskSize can be used only for single stack clusters + if cfg.NodeCIDRMaskSize != 0 { + return ipv4Mask, ipv6Mask, errors.New("usage of --node-cidr-mask-size is not allowed with dual-stack clusters") + } + if cfg.NodeCIDRMaskSizeIPv4 != 0 { + ipv4Mask = int(cfg.NodeCIDRMaskSizeIPv4) + } + if cfg.NodeCIDRMaskSizeIPv6 != 0 { + ipv6Mask = int(cfg.NodeCIDRMaskSizeIPv6) + } + return ipv4Mask, ipv6Mask, nil +} + +// getNodeCIDRMaskSizes is a helper function that helps the generate the node cidr mask +// sizes slice based on the cluster cidr slice +func getNodeCIDRMaskSizes(clusterCIDRs []*net.IPNet, maskSizeIPv4, maskSizeIPv6 int) []int { + nodeMaskCIDRs := []int{} + for _, clusterCIDR := range clusterCIDRs { + if netutils.IsIPv6CIDR(clusterCIDR) { + nodeMaskCIDRs = append(nodeMaskCIDRs, maskSizeIPv6) + } else { + nodeMaskCIDRs = append(nodeMaskCIDRs, maskSizeIPv4) + } + } + return nodeMaskCIDRs +} diff --git a/cmd/kube-controller-manager/app/options/nodeipamcontroller.go b/cmd/kube-controller-manager/app/options/nodeipamcontroller.go index 158ce08be79..a7a90bfa765 100644 --- a/cmd/kube-controller-manager/app/options/nodeipamcontroller.go +++ b/cmd/kube-controller-manager/app/options/nodeipamcontroller.go @@ -36,7 +36,9 @@ func (o *NodeIPAMControllerOptions) AddFlags(fs *pflag.FlagSet) { return } fs.StringVar(&o.ServiceCIDR, "service-cluster-ip-range", o.ServiceCIDR, "CIDR Range for Services in cluster. Requires --allocate-node-cidrs to be true") - fs.Int32Var(&o.NodeCIDRMaskSize, "node-cidr-mask-size", o.NodeCIDRMaskSize, "Mask size for node cidr in cluster.") + fs.Int32Var(&o.NodeCIDRMaskSize, "node-cidr-mask-size", o.NodeCIDRMaskSize, "Mask size for node cidr in cluster. Default is 24 for IPv4 and 64 for IPv6.") + fs.Int32Var(&o.NodeCIDRMaskSizeIPv4, "node-cidr-mask-size-ipv4", o.NodeCIDRMaskSizeIPv4, "Mask size for IPv4 node cidr in dual-stack cluster. Default is 24.") + fs.Int32Var(&o.NodeCIDRMaskSizeIPv6, "node-cidr-mask-size-ipv6", o.NodeCIDRMaskSizeIPv6, "Mask size for IPv6 node cidr in dual-stack cluster. Default is 64.") } // ApplyTo fills up NodeIpamController config with options. diff --git a/cmd/kube-controller-manager/app/options/options_test.go b/cmd/kube-controller-manager/app/options/options_test.go index d1ede9e882e..3e215249731 100644 --- a/cmd/kube-controller-manager/app/options/options_test.go +++ b/cmd/kube-controller-manager/app/options/options_test.go @@ -117,6 +117,8 @@ func TestAddFlags(t *testing.T) { "--min-resync-period=8h", "--namespace-sync-period=10m", "--node-cidr-mask-size=48", + "--node-cidr-mask-size-ipv4=48", + "--node-cidr-mask-size-ipv6=108", "--node-eviction-rate=0.2", "--node-monitor-grace-period=30s", "--node-monitor-period=10s", @@ -279,7 +281,9 @@ func TestAddFlags(t *testing.T) { }, NodeIPAMController: &NodeIPAMControllerOptions{ &nodeipamconfig.NodeIPAMControllerConfiguration{ - NodeCIDRMaskSize: 48, + NodeCIDRMaskSize: 48, + NodeCIDRMaskSizeIPv4: 48, + NodeCIDRMaskSizeIPv6: 108, }, }, NodeLifecycleController: &NodeLifecycleControllerOptions{ diff --git a/pkg/controller/nodeipam/config/types.go b/pkg/controller/nodeipam/config/types.go index 66094406385..84431b4e3c2 100644 --- a/pkg/controller/nodeipam/config/types.go +++ b/pkg/controller/nodeipam/config/types.go @@ -22,6 +22,10 @@ type NodeIPAMControllerConfiguration struct { ServiceCIDR string // secondaryServiceCIDR is CIDR Range for Services in cluster. This is used in dual stack clusters. SecondaryServiceCIDR must be of different IP family than ServiceCIDR SecondaryServiceCIDR string - // NodeCIDRMaskSize is the mask size for node cidr in cluster. + // NodeCIDRMaskSize is the mask size for node cidr in single-stack cluster. NodeCIDRMaskSize int32 + // NodeCIDRMaskSizeIPv4 is the mask size for node cidr in dual-stack cluster. + NodeCIDRMaskSizeIPv4 int32 + // NodeCIDRMaskSizeIPv6 is the mask size for node cidr in dual-stack cluster. + NodeCIDRMaskSizeIPv6 int32 } diff --git a/pkg/controller/nodeipam/config/v1alpha1/defaults.go b/pkg/controller/nodeipam/config/v1alpha1/defaults.go index 24c84835349..fa6d17ac227 100644 --- a/pkg/controller/nodeipam/config/v1alpha1/defaults.go +++ b/pkg/controller/nodeipam/config/v1alpha1/defaults.go @@ -30,7 +30,6 @@ import ( // be no easy way to opt-out. Instead, if you want to use this defaulting method // run it in your wrapper struct of this type in its `SetDefaults_` method. func RecommendedDefaultNodeIPAMControllerConfiguration(obj *kubectrlmgrconfigv1alpha1.NodeIPAMControllerConfiguration) { - if obj.NodeCIDRMaskSize == 0 { - obj.NodeCIDRMaskSize = 24 - } + // The default mask size is not set here because we need to determine the cluster cidr family before setting the + // appropriate mask size. } diff --git a/pkg/controller/nodeipam/config/v1alpha1/zz_generated.conversion.go b/pkg/controller/nodeipam/config/v1alpha1/zz_generated.conversion.go index 6878b70e8ce..1ba089f8055 100644 --- a/pkg/controller/nodeipam/config/v1alpha1/zz_generated.conversion.go +++ b/pkg/controller/nodeipam/config/v1alpha1/zz_generated.conversion.go @@ -84,6 +84,8 @@ func autoConvert_v1alpha1_NodeIPAMControllerConfiguration_To_config_NodeIPAMCont out.ServiceCIDR = in.ServiceCIDR out.SecondaryServiceCIDR = in.SecondaryServiceCIDR out.NodeCIDRMaskSize = in.NodeCIDRMaskSize + out.NodeCIDRMaskSizeIPv4 = in.NodeCIDRMaskSizeIPv4 + out.NodeCIDRMaskSizeIPv6 = in.NodeCIDRMaskSizeIPv6 return nil } @@ -91,5 +93,7 @@ func autoConvert_config_NodeIPAMControllerConfiguration_To_v1alpha1_NodeIPAMCont out.ServiceCIDR = in.ServiceCIDR out.SecondaryServiceCIDR = in.SecondaryServiceCIDR out.NodeCIDRMaskSize = in.NodeCIDRMaskSize + out.NodeCIDRMaskSizeIPv4 = in.NodeCIDRMaskSizeIPv4 + out.NodeCIDRMaskSizeIPv6 = in.NodeCIDRMaskSizeIPv6 return nil } diff --git a/pkg/controller/nodeipam/ipam/cidr_allocator.go b/pkg/controller/nodeipam/ipam/cidr_allocator.go index 080d71b9cd5..997e3545d59 100644 --- a/pkg/controller/nodeipam/ipam/cidr_allocator.go +++ b/pkg/controller/nodeipam/ipam/cidr_allocator.go @@ -88,8 +88,21 @@ type CIDRAllocator interface { Run(stopCh <-chan struct{}) } +// CIDRAllocatorParams is parameters that's required for creating new +// cidr range allocator. +type CIDRAllocatorParams struct { + // ClusterCIDRs is list of cluster cidrs + ClusterCIDRs []*net.IPNet + // ServiceCIDR is primary service cidr for cluster + ServiceCIDR *net.IPNet + // SecondaryServiceCIDR is secondary service cidr for cluster + SecondaryServiceCIDR *net.IPNet + // NodeCIDRMaskSizes is list of node cidr mask sizes + NodeCIDRMaskSizes []int +} + // New creates a new CIDR range allocator. -func New(kubeClient clientset.Interface, cloud cloudprovider.Interface, nodeInformer informers.NodeInformer, allocatorType CIDRAllocatorType, clusterCIDRs []*net.IPNet, serviceCIDR *net.IPNet, secondaryServiceCIDR *net.IPNet, nodeCIDRMaskSize int) (CIDRAllocator, error) { +func New(kubeClient clientset.Interface, cloud cloudprovider.Interface, nodeInformer informers.NodeInformer, allocatorType CIDRAllocatorType, allocatorParams CIDRAllocatorParams) (CIDRAllocator, error) { nodeList, err := listNodes(kubeClient) if err != nil { return nil, err @@ -97,7 +110,7 @@ func New(kubeClient clientset.Interface, cloud cloudprovider.Interface, nodeInfo switch allocatorType { case RangeAllocatorType: - return NewCIDRRangeAllocator(kubeClient, nodeInformer, clusterCIDRs, serviceCIDR, secondaryServiceCIDR, nodeCIDRMaskSize, nodeList) + return NewCIDRRangeAllocator(kubeClient, nodeInformer, allocatorParams, nodeList) case CloudAllocatorType: return NewCloudCIDRAllocator(kubeClient, cloud, nodeInformer) default: diff --git a/pkg/controller/nodeipam/ipam/cidrset/cidr_set.go b/pkg/controller/nodeipam/ipam/cidrset/cidr_set.go index 939e581f82d..f2be0cc6832 100644 --- a/pkg/controller/nodeipam/ipam/cidrset/cidr_set.go +++ b/pkg/controller/nodeipam/ipam/cidrset/cidr_set.go @@ -43,6 +43,8 @@ const ( // The subnet mask size cannot be greater than 16 more than the cluster mask size // TODO: https://github.com/kubernetes/kubernetes/issues/44918 // clusterSubnetMaxDiff limited to 16 due to the uncompressed bitmap + // Due to this limitation the subnet mask for IPv6 cluster cidr needs to be >= 48 + // as default mask size for IPv6 is 64. clusterSubnetMaxDiff = 16 // halfIPv6Len is the half of the IPv6 length halfIPv6Len = net.IPv6len / 2 diff --git a/pkg/controller/nodeipam/ipam/range_allocator.go b/pkg/controller/nodeipam/ipam/range_allocator.go index e72437c9e03..49568020183 100644 --- a/pkg/controller/nodeipam/ipam/range_allocator.go +++ b/pkg/controller/nodeipam/ipam/range_allocator.go @@ -21,9 +21,9 @@ import ( "net" "sync" + "k8s.io/api/core/v1" "k8s.io/klog" - v1 "k8s.io/api/core/v1" apierrors "k8s.io/apimachinery/pkg/api/errors" "k8s.io/apimachinery/pkg/types" utilruntime "k8s.io/apimachinery/pkg/util/runtime" @@ -71,7 +71,7 @@ type rangeAllocator struct { // Caller must always pass in a list of existing nodes so the new allocator. // Caller must ensure that ClusterCIDRs are semantically correct e.g (1 for non DualStack, 2 for DualStack etc..) // can initialize its CIDR map. NodeList is only nil in testing. -func NewCIDRRangeAllocator(client clientset.Interface, nodeInformer informers.NodeInformer, clusterCIDRs []*net.IPNet, serviceCIDR *net.IPNet, secondaryServiceCIDR *net.IPNet, subNetMaskSize int, nodeList *v1.NodeList) (CIDRAllocator, error) { +func NewCIDRRangeAllocator(client clientset.Interface, nodeInformer informers.NodeInformer, allocatorParams CIDRAllocatorParams, nodeList *v1.NodeList) (CIDRAllocator, error) { if client == nil { klog.Fatalf("kubeClient is nil when starting NodeController") } @@ -84,9 +84,9 @@ func NewCIDRRangeAllocator(client clientset.Interface, nodeInformer informers.No // create a cidrSet for each cidr we operate on // cidrSet are mapped to clusterCIDR by index - cidrSets := make([]*cidrset.CidrSet, len(clusterCIDRs)) - for idx, cidr := range clusterCIDRs { - cidrSet, err := cidrset.NewCIDRSet(cidr, subNetMaskSize) + cidrSets := make([]*cidrset.CidrSet, len(allocatorParams.ClusterCIDRs)) + for idx, cidr := range allocatorParams.ClusterCIDRs { + cidrSet, err := cidrset.NewCIDRSet(cidr, allocatorParams.NodeCIDRMaskSizes[idx]) if err != nil { return nil, err } @@ -95,7 +95,7 @@ func NewCIDRRangeAllocator(client clientset.Interface, nodeInformer informers.No ra := &rangeAllocator{ client: client, - clusterCIDRs: clusterCIDRs, + clusterCIDRs: allocatorParams.ClusterCIDRs, cidrSets: cidrSets, nodeLister: nodeInformer.Lister(), nodesSynced: nodeInformer.Informer().HasSynced, @@ -104,14 +104,14 @@ func NewCIDRRangeAllocator(client clientset.Interface, nodeInformer informers.No nodesInProcessing: sets.NewString(), } - if serviceCIDR != nil { - ra.filterOutServiceRange(serviceCIDR) + if allocatorParams.ServiceCIDR != nil { + ra.filterOutServiceRange(allocatorParams.ServiceCIDR) } else { klog.V(0).Info("No Service CIDR provided. Skipping filtering out service addresses.") } - if secondaryServiceCIDR != nil { - ra.filterOutServiceRange(secondaryServiceCIDR) + if allocatorParams.SecondaryServiceCIDR != nil { + ra.filterOutServiceRange(allocatorParams.SecondaryServiceCIDR) } else { klog.V(0).Info("No Secondary Service CIDR provided. Skipping filtering out secondary service addresses.") } diff --git a/pkg/controller/nodeipam/ipam/range_allocator_test.go b/pkg/controller/nodeipam/ipam/range_allocator_test.go index 7dcf376c81a..c017fb0bb41 100644 --- a/pkg/controller/nodeipam/ipam/range_allocator_test.go +++ b/pkg/controller/nodeipam/ipam/range_allocator_test.go @@ -60,12 +60,9 @@ func getFakeNodeInformer(fakeNodeHandler *testutil.FakeNodeHandler) coreinformer } type testCase struct { - description string - fakeNodeHandler *testutil.FakeNodeHandler - clusterCIDRs []*net.IPNet - serviceCIDR *net.IPNet - secondaryServiceCIDR *net.IPNet - subNetMaskSize int + description string + fakeNodeHandler *testutil.FakeNodeHandler + allocatorParams CIDRAllocatorParams // key is index of the cidr allocated expectedAllocatedCIDR map[int]string allocatedCIDRs map[int][]string @@ -88,13 +85,15 @@ func TestOccupyPreExistingCIDR(t *testing.T) { }, Clientset: fake.NewSimpleClientset(), }, - clusterCIDRs: func() []*net.IPNet { - _, clusterCIDRv4, _ := net.ParseCIDR("10.10.0.0/16") - return []*net.IPNet{clusterCIDRv4} - }(), - serviceCIDR: nil, - secondaryServiceCIDR: nil, - subNetMaskSize: 24, + allocatorParams: CIDRAllocatorParams{ + ClusterCIDRs: func() []*net.IPNet { + _, clusterCIDRv4, _ := net.ParseCIDR("10.10.0.0/16") + return []*net.IPNet{clusterCIDRv4} + }(), + ServiceCIDR: nil, + SecondaryServiceCIDR: nil, + NodeCIDRMaskSizes: []int{24}, + }, allocatedCIDRs: nil, expectedAllocatedCIDR: nil, ctrlCreateFail: false, @@ -111,14 +110,16 @@ func TestOccupyPreExistingCIDR(t *testing.T) { }, Clientset: fake.NewSimpleClientset(), }, - clusterCIDRs: func() []*net.IPNet { - _, clusterCIDRv4, _ := net.ParseCIDR("10.10.0.0/16") - _, clusterCIDRv6, _ := net.ParseCIDR("ace:cab:deca::/8") - return []*net.IPNet{clusterCIDRv4, clusterCIDRv6} - }(), - serviceCIDR: nil, - secondaryServiceCIDR: nil, - subNetMaskSize: 24, + allocatorParams: CIDRAllocatorParams{ + ClusterCIDRs: func() []*net.IPNet { + _, clusterCIDRv4, _ := net.ParseCIDR("10.10.0.0/16") + _, clusterCIDRv6, _ := net.ParseCIDR("ace:cab:deca::/8") + return []*net.IPNet{clusterCIDRv4, clusterCIDRv6} + }(), + ServiceCIDR: nil, + SecondaryServiceCIDR: nil, + NodeCIDRMaskSizes: []int{24, 24}, + }, allocatedCIDRs: nil, expectedAllocatedCIDR: nil, ctrlCreateFail: false, @@ -138,13 +139,15 @@ func TestOccupyPreExistingCIDR(t *testing.T) { }, Clientset: fake.NewSimpleClientset(), }, - clusterCIDRs: func() []*net.IPNet { - _, clusterCIDRv4, _ := net.ParseCIDR("10.10.0.0/16") - return []*net.IPNet{clusterCIDRv4} - }(), - serviceCIDR: nil, - secondaryServiceCIDR: nil, - subNetMaskSize: 24, + allocatorParams: CIDRAllocatorParams{ + ClusterCIDRs: func() []*net.IPNet { + _, clusterCIDRv4, _ := net.ParseCIDR("10.10.0.0/16") + return []*net.IPNet{clusterCIDRv4} + }(), + ServiceCIDR: nil, + SecondaryServiceCIDR: nil, + NodeCIDRMaskSizes: []int{24}, + }, allocatedCIDRs: nil, expectedAllocatedCIDR: nil, ctrlCreateFail: false, @@ -164,14 +167,16 @@ func TestOccupyPreExistingCIDR(t *testing.T) { }, Clientset: fake.NewSimpleClientset(), }, - clusterCIDRs: func() []*net.IPNet { - _, clusterCIDRv4, _ := net.ParseCIDR("10.10.0.0/16") - _, clusterCIDRv6, _ := net.ParseCIDR("ace:cab:deca::/8") - return []*net.IPNet{clusterCIDRv4, clusterCIDRv6} - }(), - serviceCIDR: nil, - secondaryServiceCIDR: nil, - subNetMaskSize: 24, + allocatorParams: CIDRAllocatorParams{ + ClusterCIDRs: func() []*net.IPNet { + _, clusterCIDRv4, _ := net.ParseCIDR("10.10.0.0/16") + _, clusterCIDRv6, _ := net.ParseCIDR("ace:cab:deca::/8") + return []*net.IPNet{clusterCIDRv4, clusterCIDRv6} + }(), + ServiceCIDR: nil, + SecondaryServiceCIDR: nil, + NodeCIDRMaskSizes: []int{24, 24}, + }, allocatedCIDRs: nil, expectedAllocatedCIDR: nil, ctrlCreateFail: false, @@ -192,13 +197,15 @@ func TestOccupyPreExistingCIDR(t *testing.T) { }, Clientset: fake.NewSimpleClientset(), }, - clusterCIDRs: func() []*net.IPNet { - _, clusterCIDRv4, _ := net.ParseCIDR("10.10.0.0/16") - return []*net.IPNet{clusterCIDRv4} - }(), - serviceCIDR: nil, - secondaryServiceCIDR: nil, - subNetMaskSize: 24, + allocatorParams: CIDRAllocatorParams{ + ClusterCIDRs: func() []*net.IPNet { + _, clusterCIDRv4, _ := net.ParseCIDR("10.10.0.0/16") + return []*net.IPNet{clusterCIDRv4} + }(), + ServiceCIDR: nil, + SecondaryServiceCIDR: nil, + NodeCIDRMaskSizes: []int{24}, + }, allocatedCIDRs: nil, expectedAllocatedCIDR: nil, ctrlCreateFail: true, @@ -219,13 +226,15 @@ func TestOccupyPreExistingCIDR(t *testing.T) { }, Clientset: fake.NewSimpleClientset(), }, - clusterCIDRs: func() []*net.IPNet { - _, clusterCIDRv4, _ := net.ParseCIDR("10.10.0.0/16") - return []*net.IPNet{clusterCIDRv4} - }(), - serviceCIDR: nil, - secondaryServiceCIDR: nil, - subNetMaskSize: 24, + allocatorParams: CIDRAllocatorParams{ + ClusterCIDRs: func() []*net.IPNet { + _, clusterCIDRv4, _ := net.ParseCIDR("10.10.0.0/16") + return []*net.IPNet{clusterCIDRv4} + }(), + ServiceCIDR: nil, + SecondaryServiceCIDR: nil, + NodeCIDRMaskSizes: []int{24}, + }, allocatedCIDRs: nil, expectedAllocatedCIDR: nil, ctrlCreateFail: true, @@ -246,14 +255,16 @@ func TestOccupyPreExistingCIDR(t *testing.T) { }, Clientset: fake.NewSimpleClientset(), }, - clusterCIDRs: func() []*net.IPNet { - _, clusterCIDRv4, _ := net.ParseCIDR("10.10.0.0/16") - _, clusterCIDRv6, _ := net.ParseCIDR("ace:cab:deca::/8") - return []*net.IPNet{clusterCIDRv4, clusterCIDRv6} - }(), - serviceCIDR: nil, - secondaryServiceCIDR: nil, - subNetMaskSize: 24, + allocatorParams: CIDRAllocatorParams{ + ClusterCIDRs: func() []*net.IPNet { + _, clusterCIDRv4, _ := net.ParseCIDR("10.10.0.0/16") + _, clusterCIDRv6, _ := net.ParseCIDR("ace:cab:deca::/8") + return []*net.IPNet{clusterCIDRv4, clusterCIDRv6} + }(), + ServiceCIDR: nil, + SecondaryServiceCIDR: nil, + NodeCIDRMaskSizes: []int{24, 24}, + }, allocatedCIDRs: nil, expectedAllocatedCIDR: nil, ctrlCreateFail: true, @@ -274,14 +285,16 @@ func TestOccupyPreExistingCIDR(t *testing.T) { }, Clientset: fake.NewSimpleClientset(), }, - clusterCIDRs: func() []*net.IPNet { - _, clusterCIDRv4, _ := net.ParseCIDR("10.10.0.0/16") - _, clusterCIDRv6, _ := net.ParseCIDR("ace:cab:deca::/8") - return []*net.IPNet{clusterCIDRv4, clusterCIDRv6} - }(), - serviceCIDR: nil, - secondaryServiceCIDR: nil, - subNetMaskSize: 24, + allocatorParams: CIDRAllocatorParams{ + ClusterCIDRs: func() []*net.IPNet { + _, clusterCIDRv4, _ := net.ParseCIDR("10.10.0.0/16") + _, clusterCIDRv6, _ := net.ParseCIDR("ace:cab:deca::/8") + return []*net.IPNet{clusterCIDRv4, clusterCIDRv6} + }(), + ServiceCIDR: nil, + SecondaryServiceCIDR: nil, + NodeCIDRMaskSizes: []int{24, 24}, + }, allocatedCIDRs: nil, expectedAllocatedCIDR: nil, ctrlCreateFail: true, @@ -294,7 +307,7 @@ func TestOccupyPreExistingCIDR(t *testing.T) { // Initialize the range allocator. fakeNodeInformer := getFakeNodeInformer(tc.fakeNodeHandler) nodeList, _ := tc.fakeNodeHandler.List(metav1.ListOptions{}) - _, err := NewCIDRRangeAllocator(tc.fakeNodeHandler, fakeNodeInformer, tc.clusterCIDRs, tc.serviceCIDR, tc.secondaryServiceCIDR, tc.subNetMaskSize, nodeList) + _, err := NewCIDRRangeAllocator(tc.fakeNodeHandler, fakeNodeInformer, tc.allocatorParams, nodeList) if err == nil && tc.ctrlCreateFail { t.Fatalf("creating range allocator was expected to fail, but it did not") } @@ -320,13 +333,15 @@ func TestAllocateOrOccupyCIDRSuccess(t *testing.T) { }, Clientset: fake.NewSimpleClientset(), }, - clusterCIDRs: func() []*net.IPNet { - _, clusterCIDR, _ := net.ParseCIDR("127.123.234.0/24") - return []*net.IPNet{clusterCIDR} - }(), - serviceCIDR: nil, - secondaryServiceCIDR: nil, - subNetMaskSize: 30, + allocatorParams: CIDRAllocatorParams{ + ClusterCIDRs: func() []*net.IPNet { + _, clusterCIDR, _ := net.ParseCIDR("127.123.234.0/24") + return []*net.IPNet{clusterCIDR} + }(), + ServiceCIDR: nil, + SecondaryServiceCIDR: nil, + NodeCIDRMaskSizes: []int{30}, + }, expectedAllocatedCIDR: map[int]string{ 0: "127.123.234.0/30", }, @@ -343,16 +358,18 @@ func TestAllocateOrOccupyCIDRSuccess(t *testing.T) { }, Clientset: fake.NewSimpleClientset(), }, - clusterCIDRs: func() []*net.IPNet { - _, clusterCIDR, _ := net.ParseCIDR("127.123.234.0/24") - return []*net.IPNet{clusterCIDR} - }(), - serviceCIDR: func() *net.IPNet { - _, serviceCIDR, _ := net.ParseCIDR("127.123.234.0/26") - return serviceCIDR - }(), - secondaryServiceCIDR: nil, - subNetMaskSize: 30, + allocatorParams: CIDRAllocatorParams{ + ClusterCIDRs: func() []*net.IPNet { + _, clusterCIDR, _ := net.ParseCIDR("127.123.234.0/24") + return []*net.IPNet{clusterCIDR} + }(), + ServiceCIDR: func() *net.IPNet { + _, serviceCIDR, _ := net.ParseCIDR("127.123.234.0/26") + return serviceCIDR + }(), + SecondaryServiceCIDR: nil, + NodeCIDRMaskSizes: []int{30}, + }, // it should return first /30 CIDR after service range expectedAllocatedCIDR: map[int]string{ 0: "127.123.234.64/30", @@ -370,16 +387,18 @@ func TestAllocateOrOccupyCIDRSuccess(t *testing.T) { }, Clientset: fake.NewSimpleClientset(), }, - clusterCIDRs: func() []*net.IPNet { - _, clusterCIDR, _ := net.ParseCIDR("127.123.234.0/24") - return []*net.IPNet{clusterCIDR} - }(), - serviceCIDR: func() *net.IPNet { - _, serviceCIDR, _ := net.ParseCIDR("127.123.234.0/26") - return serviceCIDR - }(), - secondaryServiceCIDR: nil, - subNetMaskSize: 30, + allocatorParams: CIDRAllocatorParams{ + ClusterCIDRs: func() []*net.IPNet { + _, clusterCIDR, _ := net.ParseCIDR("127.123.234.0/24") + return []*net.IPNet{clusterCIDR} + }(), + ServiceCIDR: func() *net.IPNet { + _, serviceCIDR, _ := net.ParseCIDR("127.123.234.0/26") + return serviceCIDR + }(), + SecondaryServiceCIDR: nil, + NodeCIDRMaskSizes: []int{30}, + }, allocatedCIDRs: map[int][]string{ 0: {"127.123.234.64/30", "127.123.234.68/30", "127.123.234.72/30", "127.123.234.80/30"}, }, @@ -399,16 +418,19 @@ func TestAllocateOrOccupyCIDRSuccess(t *testing.T) { }, Clientset: fake.NewSimpleClientset(), }, - clusterCIDRs: func() []*net.IPNet { - _, clusterCIDRv4, _ := net.ParseCIDR("127.123.234.0/8") - _, clusterCIDRv6, _ := net.ParseCIDR("ace:cab:deca::/8") - return []*net.IPNet{clusterCIDRv4, clusterCIDRv6} - }(), - serviceCIDR: func() *net.IPNet { - _, serviceCIDR, _ := net.ParseCIDR("127.123.234.0/26") - return serviceCIDR - }(), - secondaryServiceCIDR: nil, + allocatorParams: CIDRAllocatorParams{ + ClusterCIDRs: func() []*net.IPNet { + _, clusterCIDRv4, _ := net.ParseCIDR("127.123.234.0/8") + _, clusterCIDRv6, _ := net.ParseCIDR("ace:cab:deca::/84") + return []*net.IPNet{clusterCIDRv4, clusterCIDRv6} + }(), + ServiceCIDR: func() *net.IPNet { + _, serviceCIDR, _ := net.ParseCIDR("127.123.234.0/26") + return serviceCIDR + }(), + SecondaryServiceCIDR: nil, + NodeCIDRMaskSizes: []int{24, 98}, + }, }, { description: "Dualstack CIDRs v6,v4", @@ -422,18 +444,20 @@ func TestAllocateOrOccupyCIDRSuccess(t *testing.T) { }, Clientset: fake.NewSimpleClientset(), }, - clusterCIDRs: func() []*net.IPNet { - _, clusterCIDRv4, _ := net.ParseCIDR("127.123.234.0/8") - _, clusterCIDRv6, _ := net.ParseCIDR("ace:cab:deca::/8") - return []*net.IPNet{clusterCIDRv6, clusterCIDRv4} - }(), - serviceCIDR: func() *net.IPNet { - _, serviceCIDR, _ := net.ParseCIDR("127.123.234.0/26") - return serviceCIDR - }(), - secondaryServiceCIDR: nil, + allocatorParams: CIDRAllocatorParams{ + ClusterCIDRs: func() []*net.IPNet { + _, clusterCIDRv4, _ := net.ParseCIDR("127.123.234.0/8") + _, clusterCIDRv6, _ := net.ParseCIDR("ace:cab:deca::/84") + return []*net.IPNet{clusterCIDRv6, clusterCIDRv4} + }(), + ServiceCIDR: func() *net.IPNet { + _, serviceCIDR, _ := net.ParseCIDR("127.123.234.0/26") + return serviceCIDR + }(), + SecondaryServiceCIDR: nil, + NodeCIDRMaskSizes: []int{98, 24}, + }, }, - { description: "Dualstack CIDRs, more than two", fakeNodeHandler: &testutil.FakeNodeHandler{ @@ -446,24 +470,27 @@ func TestAllocateOrOccupyCIDRSuccess(t *testing.T) { }, Clientset: fake.NewSimpleClientset(), }, - clusterCIDRs: func() []*net.IPNet { - _, clusterCIDRv4, _ := net.ParseCIDR("127.123.234.0/8") - _, clusterCIDRv6, _ := net.ParseCIDR("ace:cab:deca::/8") - _, clusterCIDRv4_2, _ := net.ParseCIDR("10.0.0.0/8") - return []*net.IPNet{clusterCIDRv4, clusterCIDRv6, clusterCIDRv4_2} - }(), - serviceCIDR: func() *net.IPNet { - _, serviceCIDR, _ := net.ParseCIDR("127.123.234.0/26") - return serviceCIDR - }(), - secondaryServiceCIDR: nil, + allocatorParams: CIDRAllocatorParams{ + ClusterCIDRs: func() []*net.IPNet { + _, clusterCIDRv4, _ := net.ParseCIDR("127.123.234.0/8") + _, clusterCIDRv6, _ := net.ParseCIDR("ace:cab:deca::/84") + _, clusterCIDRv4_2, _ := net.ParseCIDR("10.0.0.0/8") + return []*net.IPNet{clusterCIDRv4, clusterCIDRv6, clusterCIDRv4_2} + }(), + ServiceCIDR: func() *net.IPNet { + _, serviceCIDR, _ := net.ParseCIDR("127.123.234.0/26") + return serviceCIDR + }(), + SecondaryServiceCIDR: nil, + NodeCIDRMaskSizes: []int{24, 98, 24}, + }, }, } // test function testFunc := func(tc testCase) { // Initialize the range allocator. - allocator, err := NewCIDRRangeAllocator(tc.fakeNodeHandler, getFakeNodeInformer(tc.fakeNodeHandler), tc.clusterCIDRs, tc.serviceCIDR, tc.secondaryServiceCIDR, tc.subNetMaskSize, nil) + allocator, err := NewCIDRRangeAllocator(tc.fakeNodeHandler, getFakeNodeInformer(tc.fakeNodeHandler), tc.allocatorParams, nil) if err != nil { t.Errorf("%v: failed to create CIDRRangeAllocator with error %v", tc.description, err) return @@ -535,13 +562,15 @@ func TestAllocateOrOccupyCIDRFailure(t *testing.T) { }, Clientset: fake.NewSimpleClientset(), }, - clusterCIDRs: func() []*net.IPNet { - _, clusterCIDR, _ := net.ParseCIDR("127.123.234.0/28") - return []*net.IPNet{clusterCIDR} - }(), - serviceCIDR: nil, - secondaryServiceCIDR: nil, - subNetMaskSize: 30, + allocatorParams: CIDRAllocatorParams{ + ClusterCIDRs: func() []*net.IPNet { + _, clusterCIDR, _ := net.ParseCIDR("127.123.234.0/28") + return []*net.IPNet{clusterCIDR} + }(), + ServiceCIDR: nil, + SecondaryServiceCIDR: nil, + NodeCIDRMaskSizes: []int{30}, + }, allocatedCIDRs: map[int][]string{ 0: {"127.123.234.0/30", "127.123.234.4/30", "127.123.234.8/30", "127.123.234.12/30"}, }, @@ -550,7 +579,7 @@ func TestAllocateOrOccupyCIDRFailure(t *testing.T) { testFunc := func(tc testCase) { // Initialize the range allocator. - allocator, err := NewCIDRRangeAllocator(tc.fakeNodeHandler, getFakeNodeInformer(tc.fakeNodeHandler), tc.clusterCIDRs, tc.serviceCIDR, tc.secondaryServiceCIDR, tc.subNetMaskSize, nil) + allocator, err := NewCIDRRangeAllocator(tc.fakeNodeHandler, getFakeNodeInformer(tc.fakeNodeHandler), tc.allocatorParams, nil) if err != nil { t.Logf("%v: failed to create CIDRRangeAllocator with error %v", tc.description, err) } @@ -609,10 +638,7 @@ func TestAllocateOrOccupyCIDRFailure(t *testing.T) { type releaseTestCase struct { description string fakeNodeHandler *testutil.FakeNodeHandler - clusterCIDRs []*net.IPNet - serviceCIDR *net.IPNet - secondaryServiceCIDR *net.IPNet - subNetMaskSize int + allocatorParams CIDRAllocatorParams expectedAllocatedCIDRFirstRound map[int]string expectedAllocatedCIDRSecondRound map[int]string allocatedCIDRs map[int][]string @@ -633,13 +659,15 @@ func TestReleaseCIDRSuccess(t *testing.T) { }, Clientset: fake.NewSimpleClientset(), }, - clusterCIDRs: func() []*net.IPNet { - _, clusterCIDR, _ := net.ParseCIDR("127.123.234.0/28") - return []*net.IPNet{clusterCIDR} - }(), - serviceCIDR: nil, - secondaryServiceCIDR: nil, - subNetMaskSize: 30, + allocatorParams: CIDRAllocatorParams{ + ClusterCIDRs: func() []*net.IPNet { + _, clusterCIDR, _ := net.ParseCIDR("127.123.234.0/28") + return []*net.IPNet{clusterCIDR} + }(), + ServiceCIDR: nil, + SecondaryServiceCIDR: nil, + NodeCIDRMaskSizes: []int{30}, + }, allocatedCIDRs: map[int][]string{ 0: {"127.123.234.0/30", "127.123.234.4/30", "127.123.234.8/30", "127.123.234.12/30"}, }, @@ -663,13 +691,15 @@ func TestReleaseCIDRSuccess(t *testing.T) { }, Clientset: fake.NewSimpleClientset(), }, - clusterCIDRs: func() []*net.IPNet { - _, clusterCIDR, _ := net.ParseCIDR("127.123.234.0/28") - return []*net.IPNet{clusterCIDR} - }(), - serviceCIDR: nil, - secondaryServiceCIDR: nil, - subNetMaskSize: 30, + allocatorParams: CIDRAllocatorParams{ + ClusterCIDRs: func() []*net.IPNet { + _, clusterCIDR, _ := net.ParseCIDR("127.123.234.0/28") + return []*net.IPNet{clusterCIDR} + }(), + ServiceCIDR: nil, + SecondaryServiceCIDR: nil, + NodeCIDRMaskSizes: []int{30}, + }, allocatedCIDRs: map[int][]string{ 0: {"127.123.234.4/30", "127.123.234.8/30", "127.123.234.12/30"}, }, @@ -687,7 +717,7 @@ func TestReleaseCIDRSuccess(t *testing.T) { testFunc := func(tc releaseTestCase) { // Initialize the range allocator. - allocator, _ := NewCIDRRangeAllocator(tc.fakeNodeHandler, getFakeNodeInformer(tc.fakeNodeHandler), tc.clusterCIDRs, tc.serviceCIDR, tc.secondaryServiceCIDR, tc.subNetMaskSize, nil) + allocator, _ := NewCIDRRangeAllocator(tc.fakeNodeHandler, getFakeNodeInformer(tc.fakeNodeHandler), tc.allocatorParams, nil) rangeAllocator, ok := allocator.(*rangeAllocator) if !ok { t.Logf("%v: found non-default implementation of CIDRAllocator, skipping white-box test...", tc.description) diff --git a/pkg/controller/nodeipam/legacyprovider.go b/pkg/controller/nodeipam/legacyprovider.go index c0c99c85d12..ff01aec147c 100644 --- a/pkg/controller/nodeipam/legacyprovider.go +++ b/pkg/controller/nodeipam/legacyprovider.go @@ -37,7 +37,7 @@ func startLegacyIPAM( kubeClient clientset.Interface, clusterCIDRs []*net.IPNet, serviceCIDR *net.IPNet, - nodeCIDRMaskSize int, + nodeCIDRMaskSizes []int, ) { cfg := &ipam.Config{ Resync: ipamResyncInterval, @@ -59,7 +59,7 @@ func startLegacyIPAM( if len(clusterCIDRs) > 1 { klog.Warningf("Multiple cidrs were configured with FromCluster or FromCloud. cidrs except first one were discarded") } - ipamc, err := ipam.NewController(cfg, kubeClient, cloud, cidr, serviceCIDR, nodeCIDRMaskSize) + ipamc, err := ipam.NewController(cfg, kubeClient, cloud, cidr, serviceCIDR, nodeCIDRMaskSizes[0]) if err != nil { klog.Fatalf("Error creating ipam controller: %v", err) } diff --git a/pkg/controller/nodeipam/node_ipam_controller.go b/pkg/controller/nodeipam/node_ipam_controller.go index a8214b6dcce..99d13e8d175 100644 --- a/pkg/controller/nodeipam/node_ipam_controller.go +++ b/pkg/controller/nodeipam/node_ipam_controller.go @@ -81,7 +81,7 @@ func NewNodeIpamController( clusterCIDRs []*net.IPNet, serviceCIDR *net.IPNet, secondaryServiceCIDR *net.IPNet, - nodeCIDRMaskSize int, + nodeCIDRMaskSizes []int, allocatorType ipam.CIDRAllocatorType) (*Controller, error) { if kubeClient == nil { @@ -111,11 +111,11 @@ func NewNodeIpamController( // - modify mask to allow flexible masks for IPv4 and IPv6 // - for alpha status they are the same - // for each cidr, cidr mask size must be <= node mask - for _, cidr := range clusterCIDRs { + // for each cidr, node mask size must be <= cidr mask + for idx, cidr := range clusterCIDRs { mask := cidr.Mask - if maskSize, _ := mask.Size(); maskSize > nodeCIDRMaskSize { - klog.Fatal("Controller: Invalid --cluster-cidr, mask size of cluster CIDR must be less than or equal to --node-cidr-mask-size") + if maskSize, _ := mask.Size(); maskSize > nodeCIDRMaskSizes[idx] { + klog.Fatal("Controller: Invalid --cluster-cidr, mask size of cluster CIDR must be less than or equal to --node-cidr-mask-size configured for CIDR family") } } } @@ -132,10 +132,18 @@ func NewNodeIpamController( // TODO: Abstract this check into a generic controller manager should run method. if ic.allocatorType == ipam.IPAMFromClusterAllocatorType || ic.allocatorType == ipam.IPAMFromCloudAllocatorType { - startLegacyIPAM(ic, nodeInformer, cloud, kubeClient, clusterCIDRs, serviceCIDR, nodeCIDRMaskSize) + startLegacyIPAM(ic, nodeInformer, cloud, kubeClient, clusterCIDRs, serviceCIDR, nodeCIDRMaskSizes) } else { var err error - ic.cidrAllocator, err = ipam.New(kubeClient, cloud, nodeInformer, ic.allocatorType, clusterCIDRs, ic.serviceCIDR, ic.secondaryServiceCIDR, nodeCIDRMaskSize) + + allocatorParams := ipam.CIDRAllocatorParams{ + ClusterCIDRs: clusterCIDRs, + ServiceCIDR: ic.serviceCIDR, + SecondaryServiceCIDR: ic.secondaryServiceCIDR, + NodeCIDRMaskSizes: nodeCIDRMaskSizes, + } + + ic.cidrAllocator, err = ipam.New(kubeClient, cloud, nodeInformer, ic.allocatorType, allocatorParams) if err != nil { return nil, err } diff --git a/pkg/controller/nodeipam/node_ipam_controller_test.go b/pkg/controller/nodeipam/node_ipam_controller_test.go index 50d0b63a192..1b435f4ab73 100644 --- a/pkg/controller/nodeipam/node_ipam_controller_test.go +++ b/pkg/controller/nodeipam/node_ipam_controller_test.go @@ -34,7 +34,7 @@ import ( netutils "k8s.io/utils/net" ) -func newTestNodeIpamController(clusterCIDR []*net.IPNet, serviceCIDR *net.IPNet, secondaryServiceCIDR *net.IPNet, nodeCIDRMaskSize int, allocatorType ipam.CIDRAllocatorType) (*Controller, error) { +func newTestNodeIpamController(clusterCIDR []*net.IPNet, serviceCIDR *net.IPNet, secondaryServiceCIDR *net.IPNet, nodeCIDRMaskSizes []int, allocatorType ipam.CIDRAllocatorType) (*Controller, error) { clientSet := fake.NewSimpleClientset() fakeNodeHandler := &testutil.FakeNodeHandler{ Existing: []*v1.Node{ @@ -53,7 +53,7 @@ func newTestNodeIpamController(clusterCIDR []*net.IPNet, serviceCIDR *net.IPNet, fakeGCE := gce.NewFakeGCECloud(gce.DefaultTestClusterValues()) return NewNodeIpamController( fakeNodeInformer, fakeGCE, clientSet, - clusterCIDR, serviceCIDR, secondaryServiceCIDR, nodeCIDRMaskSize, allocatorType, + clusterCIDR, serviceCIDR, secondaryServiceCIDR, nodeCIDRMaskSizes, allocatorType, ) } @@ -66,23 +66,24 @@ func TestNewNodeIpamControllerWithCIDRMasks(t *testing.T) { clusterCIDR string serviceCIDR string secondaryServiceCIDR string - maskSize int + maskSize []int allocatorType ipam.CIDRAllocatorType wantFatal bool }{ - {"valid_range_allocator", "10.0.0.0/21", "10.1.0.0/21", emptyServiceCIDR, 24, ipam.RangeAllocatorType, false}, + {"valid_range_allocator", "10.0.0.0/21", "10.1.0.0/21", emptyServiceCIDR, []int{24}, ipam.RangeAllocatorType, false}, - {"valid_range_allocator_dualstack", "10.0.0.0/21,2000::/10", "10.1.0.0/21", emptyServiceCIDR, 24, ipam.RangeAllocatorType, false}, - {"valid_range_allocator_dualstack_dualstackservice", "10.0.0.0/21,2000::/10", "10.1.0.0/21", "3000::/10", 24, ipam.RangeAllocatorType, false}, + {"valid_range_allocator_dualstack", "10.0.0.0/21,2000::/10", "10.1.0.0/21", emptyServiceCIDR, []int{24, 98}, ipam.RangeAllocatorType, false}, + {"valid_range_allocator_dualstack_dualstackservice", "10.0.0.0/21,2000::/10", "10.1.0.0/21", "3000::/10", []int{24, 98}, ipam.RangeAllocatorType, false}, - {"valid_cloud_allocator", "10.0.0.0/21", "10.1.0.0/21", emptyServiceCIDR, 24, ipam.CloudAllocatorType, false}, - {"valid_ipam_from_cluster", "10.0.0.0/21", "10.1.0.0/21", emptyServiceCIDR, 24, ipam.IPAMFromClusterAllocatorType, false}, - {"valid_ipam_from_cloud", "10.0.0.0/21", "10.1.0.0/21", emptyServiceCIDR, 24, ipam.IPAMFromCloudAllocatorType, false}, - {"valid_skip_cluster_CIDR_validation_for_cloud_allocator", "invalid", "10.1.0.0/21", emptyServiceCIDR, 24, ipam.CloudAllocatorType, false}, - {"invalid_cluster_CIDR", "invalid", "10.1.0.0/21", emptyServiceCIDR, 24, ipam.IPAMFromClusterAllocatorType, true}, - {"valid_CIDR_smaller_than_mask_cloud_allocator", "10.0.0.0/26", "10.1.0.0/21", emptyServiceCIDR, 24, ipam.CloudAllocatorType, false}, - {"invalid_CIDR_smaller_than_mask_other_allocators", "10.0.0.0/26", "10.1.0.0/21", emptyServiceCIDR, 24, ipam.IPAMFromCloudAllocatorType, true}, - {"invalid_serviceCIDR_contains_clusterCIDR", "10.0.0.0/23", "10.0.0.0/21", emptyServiceCIDR, 24, ipam.IPAMFromClusterAllocatorType, true}, + {"valid_cloud_allocator", "10.0.0.0/21", "10.1.0.0/21", emptyServiceCIDR, []int{24}, ipam.CloudAllocatorType, false}, + {"valid_ipam_from_cluster", "10.0.0.0/21", "10.1.0.0/21", emptyServiceCIDR, []int{24}, ipam.IPAMFromClusterAllocatorType, false}, + {"valid_ipam_from_cloud", "10.0.0.0/21", "10.1.0.0/21", emptyServiceCIDR, []int{24}, ipam.IPAMFromCloudAllocatorType, false}, + {"valid_skip_cluster_CIDR_validation_for_cloud_allocator", "invalid", "10.1.0.0/21", emptyServiceCIDR, []int{24}, ipam.CloudAllocatorType, false}, + {"invalid_cluster_CIDR", "invalid", "10.1.0.0/21", emptyServiceCIDR, []int{24}, ipam.IPAMFromClusterAllocatorType, true}, + {"valid_CIDR_smaller_than_mask_cloud_allocator", "10.0.0.0/26", "10.1.0.0/21", emptyServiceCIDR, []int{24}, ipam.CloudAllocatorType, false}, + {"invalid_CIDR_smaller_than_mask_other_allocators", "10.0.0.0/26", "10.1.0.0/21", emptyServiceCIDR, []int{24}, ipam.IPAMFromCloudAllocatorType, true}, + {"invalid_serviceCIDR_contains_clusterCIDR", "10.0.0.0/23", "10.0.0.0/21", emptyServiceCIDR, []int{24}, ipam.IPAMFromClusterAllocatorType, true}, + {"invalid_CIDR_mask_size", "10.0.0.0/24,2000::/64", "10.1.0.0/21", emptyServiceCIDR, []int{24, 48}, ipam.IPAMFromClusterAllocatorType, true}, } { t.Run(tc.desc, func(t *testing.T) { clusterCidrs, _ := netutils.ParseCIDRs(strings.Split(tc.clusterCIDR, ",")) diff --git a/staging/src/k8s.io/kube-controller-manager/config/v1alpha1/types.go b/staging/src/k8s.io/kube-controller-manager/config/v1alpha1/types.go index 3d040abdcbe..6f1d449cb0c 100644 --- a/staging/src/k8s.io/kube-controller-manager/config/v1alpha1/types.go +++ b/staging/src/k8s.io/kube-controller-manager/config/v1alpha1/types.go @@ -382,6 +382,10 @@ type NodeIPAMControllerConfiguration struct { SecondaryServiceCIDR string // NodeCIDRMaskSize is the mask size for node cidr in cluster. NodeCIDRMaskSize int32 + // NodeCIDRMaskSizeIPv4 is the mask size for node cidr in dual-stack cluster. + NodeCIDRMaskSizeIPv4 int32 + // NodeCIDRMaskSizeIPv6 is the mask size for node cidr in dual-stack cluster. + NodeCIDRMaskSizeIPv6 int32 } // NodeLifecycleControllerConfiguration contains elements describing NodeLifecycleController. diff --git a/test/integration/ipamperf/ipam_test.go b/test/integration/ipamperf/ipam_test.go index 7b9bbd236cc..e0a71358328 100644 --- a/test/integration/ipamperf/ipam_test.go +++ b/test/integration/ipamperf/ipam_test.go @@ -52,7 +52,7 @@ func setupAllocator(apiURL string, config *Config, clusterCIDR, serviceCIDR *net sharedInformer := informers.NewSharedInformerFactory(clientSet, 1*time.Hour) ipamController, err := nodeipam.NewNodeIpamController( sharedInformer.Core().V1().Nodes(), config.Cloud, clientSet, - []*net.IPNet{clusterCIDR}, serviceCIDR, nil, subnetMaskSize, config.AllocatorType, + []*net.IPNet{clusterCIDR}, serviceCIDR, nil, []int{subnetMaskSize}, config.AllocatorType, ) if err != nil { return nil, shutdownFunc, err