From 313a5c57347026055fdcfefcdd626a7ab3357f84 Mon Sep 17 00:00:00 2001 From: "Khaled Henidak(Kal)" Date: Mon, 19 Aug 2019 20:53:18 +0000 Subject: [PATCH] phase 2: ipam filter secondary service cidr --- api/api-rules/violation_exceptions.list | 1 + cmd/kube-controller-manager/app/core.go | 26 ++++++++++ .../app/options/nodeipamcontroller.go | 21 +++++++-- pkg/controller/nodeipam/config/types.go | 2 + .../nodeipam/ipam/cidr_allocator.go | 4 +- .../nodeipam/ipam/range_allocator.go | 9 +++- .../nodeipam/ipam/range_allocator_test.go | 47 ++++++++++++------- .../nodeipam/node_ipam_controller.go | 25 +++++----- .../nodeipam/node_ipam_controller_test.go | 44 +++++++++-------- .../config/v1alpha1/types.go | 2 + test/integration/ipamperf/ipam_test.go | 2 +- 11 files changed, 128 insertions(+), 55 deletions(-) diff --git a/api/api-rules/violation_exceptions.list b/api/api-rules/violation_exceptions.list index 24733bab0b6..c6b9c863a36 100644 --- a/api/api-rules/violation_exceptions.list +++ b/api/api-rules/violation_exceptions.list @@ -636,6 +636,7 @@ API rule violation: names_match,k8s.io/kube-controller-manager/config/v1alpha1,K API rule violation: names_match,k8s.io/kube-controller-manager/config/v1alpha1,NamespaceControllerConfiguration,ConcurrentNamespaceSyncs API rule violation: names_match,k8s.io/kube-controller-manager/config/v1alpha1,NamespaceControllerConfiguration,NamespaceSyncPeriod API rule violation: names_match,k8s.io/kube-controller-manager/config/v1alpha1,NodeIPAMControllerConfiguration,NodeCIDRMaskSize +API rule violation: names_match,k8s.io/kube-controller-manager/config/v1alpha1,NodeIPAMControllerConfiguration,SecondaryServiceCIDR API rule violation: names_match,k8s.io/kube-controller-manager/config/v1alpha1,NodeIPAMControllerConfiguration,ServiceCIDR API rule violation: names_match,k8s.io/kube-controller-manager/config/v1alpha1,NodeLifecycleControllerConfiguration,EnableTaintManager API rule violation: names_match,k8s.io/kube-controller-manager/config/v1alpha1,NodeLifecycleControllerConfiguration,LargeClusterSizeThreshold diff --git a/cmd/kube-controller-manager/app/core.go b/cmd/kube-controller-manager/app/core.go index d80c6e5a111..2a5d7ec64c5 100644 --- a/cmd/kube-controller-manager/app/core.go +++ b/cmd/kube-controller-manager/app/core.go @@ -83,6 +83,7 @@ func startServiceController(ctx ControllerContext) (http.Handler, bool, error) { } func startNodeIpamController(ctx ControllerContext) (http.Handler, bool, error) { var serviceCIDR *net.IPNet + var secondaryServiceCIDR *net.IPNet // should we start nodeIPAM if !ctx.ComponentConfig.KubeCloudShared.AllocateNodeCIDRs { @@ -118,12 +119,37 @@ func startNodeIpamController(ctx ControllerContext) (http.Handler, bool, error) } } + if len(strings.TrimSpace(ctx.ComponentConfig.NodeIPAMController.SecondaryServiceCIDR)) != 0 { + _, secondaryServiceCIDR, err = net.ParseCIDR(ctx.ComponentConfig.NodeIPAMController.SecondaryServiceCIDR) + if err != nil { + klog.Warningf("Unsuccessful parsing of service CIDR %v: %v", ctx.ComponentConfig.NodeIPAMController.SecondaryServiceCIDR, err) + } + } + + // the following checks are triggered if both serviceCIDR and secondaryServiceCIDR are provided + if serviceCIDR != nil && secondaryServiceCIDR != nil { + // should have dual stack flag enabled + if !utilfeature.DefaultFeatureGate.Enabled(kubefeatures.IPv6DualStack) { + return nil, false, fmt.Errorf("secondary service cidr is provided and IPv6DualStack feature is not enabled") + } + + // should be dual stack (from different IPFamilies) + dualstackServiceCIDR, err := netutils.IsDualStackCIDRs([]*net.IPNet{serviceCIDR, secondaryServiceCIDR}) + if err != nil { + return nil, false, fmt.Errorf("failed to perform dualstack check on serviceCIDR and secondaryServiceCIDR error:%v", err) + } + if !dualstackServiceCIDR { + return nil, false, fmt.Errorf("serviceCIDR and secondaryServiceCIDR are not dualstack (from different IPfamiles)") + } + } + nodeIpamController, err := nodeipamcontroller.NewNodeIpamController( ctx.InformerFactory.Core().V1().Nodes(), ctx.Cloud, ctx.ClientBuilder.ClientOrDie("node-controller"), clusterCIDRs, serviceCIDR, + secondaryServiceCIDR, int(ctx.ComponentConfig.NodeIPAMController.NodeCIDRMaskSize), ipam.CIDRAllocatorType(ctx.ComponentConfig.KubeCloudShared.CIDRAllocatorType), ) diff --git a/cmd/kube-controller-manager/app/options/nodeipamcontroller.go b/cmd/kube-controller-manager/app/options/nodeipamcontroller.go index f530b203451..158ce08be79 100644 --- a/cmd/kube-controller-manager/app/options/nodeipamcontroller.go +++ b/cmd/kube-controller-manager/app/options/nodeipamcontroller.go @@ -17,6 +17,9 @@ limitations under the License. package options import ( + "fmt" + "strings" + "github.com/spf13/pflag" nodeipamconfig "k8s.io/kubernetes/pkg/controller/nodeipam/config" @@ -32,7 +35,6 @@ func (o *NodeIPAMControllerOptions) AddFlags(fs *pflag.FlagSet) { if o == nil { return } - fs.StringVar(&o.ServiceCIDR, "service-cluster-ip-range", o.ServiceCIDR, "CIDR Range for Services in cluster. Requires --allocate-node-cidrs to be true") fs.Int32Var(&o.NodeCIDRMaskSize, "node-cidr-mask-size", o.NodeCIDRMaskSize, "Mask size for node cidr in cluster.") } @@ -43,7 +45,15 @@ func (o *NodeIPAMControllerOptions) ApplyTo(cfg *nodeipamconfig.NodeIPAMControll return nil } - cfg.ServiceCIDR = o.ServiceCIDR + // split the cidrs list and assign primary and secondary + serviceCIDRList := strings.Split(o.ServiceCIDR, ",") + if len(serviceCIDRList) > 0 { + cfg.ServiceCIDR = serviceCIDRList[0] + } + if len(serviceCIDRList) > 1 { + cfg.SecondaryServiceCIDR = serviceCIDRList[1] + } + cfg.NodeCIDRMaskSize = o.NodeCIDRMaskSize return nil @@ -54,7 +64,12 @@ func (o *NodeIPAMControllerOptions) Validate() []error { if o == nil { return nil } + errs := make([]error, 0) + + serviceCIDRList := strings.Split(o.ServiceCIDR, ",") + if len(serviceCIDRList) > 2 { + errs = append(errs, fmt.Errorf("--service-cluster-ip-range can not contain more than two entries")) + } - errs := []error{} return errs } diff --git a/pkg/controller/nodeipam/config/types.go b/pkg/controller/nodeipam/config/types.go index 2b6cb01cd60..66094406385 100644 --- a/pkg/controller/nodeipam/config/types.go +++ b/pkg/controller/nodeipam/config/types.go @@ -20,6 +20,8 @@ package config type NodeIPAMControllerConfiguration struct { // serviceCIDR is CIDR Range for Services in cluster. ServiceCIDR string + // secondaryServiceCIDR is CIDR Range for Services in cluster. This is used in dual stack clusters. SecondaryServiceCIDR must be of different IP family than ServiceCIDR + SecondaryServiceCIDR string // NodeCIDRMaskSize is the mask size for node cidr in cluster. NodeCIDRMaskSize int32 } diff --git a/pkg/controller/nodeipam/ipam/cidr_allocator.go b/pkg/controller/nodeipam/ipam/cidr_allocator.go index 96079a1688f..080d71b9cd5 100644 --- a/pkg/controller/nodeipam/ipam/cidr_allocator.go +++ b/pkg/controller/nodeipam/ipam/cidr_allocator.go @@ -89,7 +89,7 @@ type CIDRAllocator interface { } // New creates a new CIDR range allocator. -func New(kubeClient clientset.Interface, cloud cloudprovider.Interface, nodeInformer informers.NodeInformer, allocatorType CIDRAllocatorType, clusterCIDRs []*net.IPNet, serviceCIDR *net.IPNet, nodeCIDRMaskSize int) (CIDRAllocator, error) { +func New(kubeClient clientset.Interface, cloud cloudprovider.Interface, nodeInformer informers.NodeInformer, allocatorType CIDRAllocatorType, clusterCIDRs []*net.IPNet, serviceCIDR *net.IPNet, secondaryServiceCIDR *net.IPNet, nodeCIDRMaskSize int) (CIDRAllocator, error) { nodeList, err := listNodes(kubeClient) if err != nil { return nil, err @@ -97,7 +97,7 @@ func New(kubeClient clientset.Interface, cloud cloudprovider.Interface, nodeInfo switch allocatorType { case RangeAllocatorType: - return NewCIDRRangeAllocator(kubeClient, nodeInformer, clusterCIDRs, serviceCIDR, nodeCIDRMaskSize, nodeList) + return NewCIDRRangeAllocator(kubeClient, nodeInformer, clusterCIDRs, serviceCIDR, secondaryServiceCIDR, nodeCIDRMaskSize, nodeList) case CloudAllocatorType: return NewCloudCIDRAllocator(kubeClient, cloud, nodeInformer) default: diff --git a/pkg/controller/nodeipam/ipam/range_allocator.go b/pkg/controller/nodeipam/ipam/range_allocator.go index 4763450a866..0032e36a689 100644 --- a/pkg/controller/nodeipam/ipam/range_allocator.go +++ b/pkg/controller/nodeipam/ipam/range_allocator.go @@ -71,7 +71,7 @@ type rangeAllocator struct { // Caller must always pass in a list of existing nodes so the new allocator. // Caller must ensure that ClusterCIDRs are semantically correct e.g (1 for non DualStack, 2 for DualStack etc..) // can initialize its CIDR map. NodeList is only nil in testing. -func NewCIDRRangeAllocator(client clientset.Interface, nodeInformer informers.NodeInformer, clusterCIDRs []*net.IPNet, serviceCIDR *net.IPNet, subNetMaskSize int, nodeList *v1.NodeList) (CIDRAllocator, error) { +func NewCIDRRangeAllocator(client clientset.Interface, nodeInformer informers.NodeInformer, clusterCIDRs []*net.IPNet, serviceCIDR *net.IPNet, secondaryServiceCIDR *net.IPNet, subNetMaskSize int, nodeList *v1.NodeList) (CIDRAllocator, error) { if client == nil { klog.Fatalf("kubeClient is nil when starting NodeController") } @@ -110,6 +110,12 @@ func NewCIDRRangeAllocator(client clientset.Interface, nodeInformer informers.No klog.V(0).Info("No Service CIDR provided. Skipping filtering out service addresses.") } + if secondaryServiceCIDR != nil { + ra.filterOutServiceRange(secondaryServiceCIDR) + } else { + klog.V(0).Info("No Secondary Service CIDR provided. Skipping filtering out secondary service addresses.") + } + if nodeList != nil { for _, node := range nodeList.Items { if len(node.Spec.PodCIDRs) == 0 { @@ -295,6 +301,7 @@ func (r *rangeAllocator) filterOutServiceRange(serviceCIDR *net.IPNet) { // serviceCIDR) or vice versa (which means that serviceCIDR contains // clusterCIDR). for idx, cidr := range r.clusterCIDRs { + // if they don't overlap then ignore the filtering if !cidr.Contains(serviceCIDR.IP.Mask(cidr.Mask)) && !serviceCIDR.Contains(cidr.IP.Mask(serviceCIDR.Mask)) { continue } diff --git a/pkg/controller/nodeipam/ipam/range_allocator_test.go b/pkg/controller/nodeipam/ipam/range_allocator_test.go index 8bcf9de6f7d..8b5d1c1a19c 100644 --- a/pkg/controller/nodeipam/ipam/range_allocator_test.go +++ b/pkg/controller/nodeipam/ipam/range_allocator_test.go @@ -60,11 +60,12 @@ func getFakeNodeInformer(fakeNodeHandler *testutil.FakeNodeHandler) coreinformer } type testCase struct { - description string - fakeNodeHandler *testutil.FakeNodeHandler - clusterCIDRs []*net.IPNet - serviceCIDR *net.IPNet - subNetMaskSize int + description string + fakeNodeHandler *testutil.FakeNodeHandler + clusterCIDRs []*net.IPNet + serviceCIDR *net.IPNet + secondaryServiceCIDR *net.IPNet + subNetMaskSize int // key is index of the cidr allocated expectedAllocatedCIDR map[int]string allocatedCIDRs map[int][]string @@ -89,8 +90,9 @@ func TestAllocateOrOccupyCIDRSuccess(t *testing.T) { _, clusterCIDR, _ := net.ParseCIDR("127.123.234.0/24") return []*net.IPNet{clusterCIDR} }(), - serviceCIDR: nil, - subNetMaskSize: 30, + serviceCIDR: nil, + secondaryServiceCIDR: nil, + subNetMaskSize: 30, expectedAllocatedCIDR: map[int]string{ 0: "127.123.234.0/30", }, @@ -115,7 +117,8 @@ func TestAllocateOrOccupyCIDRSuccess(t *testing.T) { _, serviceCIDR, _ := net.ParseCIDR("127.123.234.0/26") return serviceCIDR }(), - subNetMaskSize: 30, + secondaryServiceCIDR: nil, + subNetMaskSize: 30, // it should return first /30 CIDR after service range expectedAllocatedCIDR: map[int]string{ 0: "127.123.234.64/30", @@ -141,7 +144,8 @@ func TestAllocateOrOccupyCIDRSuccess(t *testing.T) { _, serviceCIDR, _ := net.ParseCIDR("127.123.234.0/26") return serviceCIDR }(), - subNetMaskSize: 30, + secondaryServiceCIDR: nil, + subNetMaskSize: 30, allocatedCIDRs: map[int][]string{ 0: {"127.123.234.64/30", "127.123.234.68/30", "127.123.234.72/30", "127.123.234.80/30"}, }, @@ -170,6 +174,7 @@ func TestAllocateOrOccupyCIDRSuccess(t *testing.T) { _, serviceCIDR, _ := net.ParseCIDR("127.123.234.0/26") return serviceCIDR }(), + secondaryServiceCIDR: nil, }, { description: "Dualstack CIDRs v6,v4", @@ -192,6 +197,7 @@ func TestAllocateOrOccupyCIDRSuccess(t *testing.T) { _, serviceCIDR, _ := net.ParseCIDR("127.123.234.0/26") return serviceCIDR }(), + secondaryServiceCIDR: nil, }, { @@ -216,13 +222,14 @@ func TestAllocateOrOccupyCIDRSuccess(t *testing.T) { _, serviceCIDR, _ := net.ParseCIDR("127.123.234.0/26") return serviceCIDR }(), + secondaryServiceCIDR: nil, }, } // test function testFunc := func(tc testCase) { // Initialize the range allocator. - allocator, err := NewCIDRRangeAllocator(tc.fakeNodeHandler, getFakeNodeInformer(tc.fakeNodeHandler), tc.clusterCIDRs, tc.serviceCIDR, tc.subNetMaskSize, nil) + allocator, err := NewCIDRRangeAllocator(tc.fakeNodeHandler, getFakeNodeInformer(tc.fakeNodeHandler), tc.clusterCIDRs, tc.serviceCIDR, tc.secondaryServiceCIDR, tc.subNetMaskSize, nil) if err != nil { t.Errorf("%v: failed to create CIDRRangeAllocator with error %v", tc.description, err) return @@ -298,8 +305,9 @@ func TestAllocateOrOccupyCIDRFailure(t *testing.T) { _, clusterCIDR, _ := net.ParseCIDR("127.123.234.0/28") return []*net.IPNet{clusterCIDR} }(), - serviceCIDR: nil, - subNetMaskSize: 30, + serviceCIDR: nil, + secondaryServiceCIDR: nil, + subNetMaskSize: 30, allocatedCIDRs: map[int][]string{ 0: {"127.123.234.0/30", "127.123.234.4/30", "127.123.234.8/30", "127.123.234.12/30"}, }, @@ -308,7 +316,7 @@ func TestAllocateOrOccupyCIDRFailure(t *testing.T) { testFunc := func(tc testCase) { // Initialize the range allocator. - allocator, err := NewCIDRRangeAllocator(tc.fakeNodeHandler, getFakeNodeInformer(tc.fakeNodeHandler), tc.clusterCIDRs, tc.serviceCIDR, tc.subNetMaskSize, nil) + allocator, err := NewCIDRRangeAllocator(tc.fakeNodeHandler, getFakeNodeInformer(tc.fakeNodeHandler), tc.clusterCIDRs, tc.serviceCIDR, tc.secondaryServiceCIDR, tc.subNetMaskSize, nil) if err != nil { t.Logf("%v: failed to create CIDRRangeAllocator with error %v", tc.description, err) } @@ -369,6 +377,7 @@ type releaseTestCase struct { fakeNodeHandler *testutil.FakeNodeHandler clusterCIDRs []*net.IPNet serviceCIDR *net.IPNet + secondaryServiceCIDR *net.IPNet subNetMaskSize int expectedAllocatedCIDRFirstRound map[int]string expectedAllocatedCIDRSecondRound map[int]string @@ -394,8 +403,9 @@ func TestReleaseCIDRSuccess(t *testing.T) { _, clusterCIDR, _ := net.ParseCIDR("127.123.234.0/28") return []*net.IPNet{clusterCIDR} }(), - serviceCIDR: nil, - subNetMaskSize: 30, + serviceCIDR: nil, + secondaryServiceCIDR: nil, + subNetMaskSize: 30, allocatedCIDRs: map[int][]string{ 0: {"127.123.234.0/30", "127.123.234.4/30", "127.123.234.8/30", "127.123.234.12/30"}, }, @@ -423,8 +433,9 @@ func TestReleaseCIDRSuccess(t *testing.T) { _, clusterCIDR, _ := net.ParseCIDR("127.123.234.0/28") return []*net.IPNet{clusterCIDR} }(), - serviceCIDR: nil, - subNetMaskSize: 30, + serviceCIDR: nil, + secondaryServiceCIDR: nil, + subNetMaskSize: 30, allocatedCIDRs: map[int][]string{ 0: {"127.123.234.4/30", "127.123.234.8/30", "127.123.234.12/30"}, }, @@ -442,7 +453,7 @@ func TestReleaseCIDRSuccess(t *testing.T) { testFunc := func(tc releaseTestCase) { // Initialize the range allocator. - allocator, _ := NewCIDRRangeAllocator(tc.fakeNodeHandler, getFakeNodeInformer(tc.fakeNodeHandler), tc.clusterCIDRs, tc.serviceCIDR, tc.subNetMaskSize, nil) + allocator, _ := NewCIDRRangeAllocator(tc.fakeNodeHandler, getFakeNodeInformer(tc.fakeNodeHandler), tc.clusterCIDRs, tc.serviceCIDR, tc.secondaryServiceCIDR, tc.subNetMaskSize, nil) rangeAllocator, ok := allocator.(*rangeAllocator) if !ok { t.Logf("%v: found non-default implementation of CIDRAllocator, skipping white-box test...", tc.description) diff --git a/pkg/controller/nodeipam/node_ipam_controller.go b/pkg/controller/nodeipam/node_ipam_controller.go index 0540490fed7..48dabd15601 100644 --- a/pkg/controller/nodeipam/node_ipam_controller.go +++ b/pkg/controller/nodeipam/node_ipam_controller.go @@ -53,10 +53,11 @@ const ( type Controller struct { allocatorType ipam.CIDRAllocatorType - cloud cloudprovider.Interface - clusterCIDRs []*net.IPNet - serviceCIDR *net.IPNet - kubeClient clientset.Interface + cloud cloudprovider.Interface + clusterCIDRs []*net.IPNet + serviceCIDR *net.IPNet + secondaryServiceCIDR *net.IPNet + kubeClient clientset.Interface // Method for easy mocking in unittest. lookupIP func(host string) ([]net.IP, error) @@ -79,6 +80,7 @@ func NewNodeIpamController( kubeClient clientset.Interface, clusterCIDRs []*net.IPNet, serviceCIDR *net.IPNet, + secondaryServiceCIDR *net.IPNet, nodeCIDRMaskSize int, allocatorType ipam.CIDRAllocatorType) (*Controller, error) { @@ -119,12 +121,13 @@ func NewNodeIpamController( } ic := &Controller{ - cloud: cloud, - kubeClient: kubeClient, - lookupIP: net.LookupIP, - clusterCIDRs: clusterCIDRs, - serviceCIDR: serviceCIDR, - allocatorType: allocatorType, + cloud: cloud, + kubeClient: kubeClient, + lookupIP: net.LookupIP, + clusterCIDRs: clusterCIDRs, + serviceCIDR: serviceCIDR, + secondaryServiceCIDR: secondaryServiceCIDR, + allocatorType: allocatorType, } // TODO: Abstract this check into a generic controller manager should run method. @@ -132,7 +135,7 @@ func NewNodeIpamController( startLegacyIPAM(ic, nodeInformer, cloud, kubeClient, clusterCIDRs, serviceCIDR, nodeCIDRMaskSize) } else { var err error - ic.cidrAllocator, err = ipam.New(kubeClient, cloud, nodeInformer, ic.allocatorType, clusterCIDRs, ic.serviceCIDR, nodeCIDRMaskSize) + ic.cidrAllocator, err = ipam.New(kubeClient, cloud, nodeInformer, ic.allocatorType, clusterCIDRs, ic.serviceCIDR, ic.secondaryServiceCIDR, nodeCIDRMaskSize) if err != nil { return nil, err } diff --git a/pkg/controller/nodeipam/node_ipam_controller_test.go b/pkg/controller/nodeipam/node_ipam_controller_test.go index 49f18a11293..50d0b63a192 100644 --- a/pkg/controller/nodeipam/node_ipam_controller_test.go +++ b/pkg/controller/nodeipam/node_ipam_controller_test.go @@ -34,7 +34,7 @@ import ( netutils "k8s.io/utils/net" ) -func newTestNodeIpamController(clusterCIDR []*net.IPNet, serviceCIDR *net.IPNet, nodeCIDRMaskSize int, allocatorType ipam.CIDRAllocatorType) (*Controller, error) { +func newTestNodeIpamController(clusterCIDR []*net.IPNet, serviceCIDR *net.IPNet, secondaryServiceCIDR *net.IPNet, nodeCIDRMaskSize int, allocatorType ipam.CIDRAllocatorType) (*Controller, error) { clientSet := fake.NewSimpleClientset() fakeNodeHandler := &testutil.FakeNodeHandler{ Existing: []*v1.Node{ @@ -53,39 +53,45 @@ func newTestNodeIpamController(clusterCIDR []*net.IPNet, serviceCIDR *net.IPNet, fakeGCE := gce.NewFakeGCECloud(gce.DefaultTestClusterValues()) return NewNodeIpamController( fakeNodeInformer, fakeGCE, clientSet, - clusterCIDR, serviceCIDR, nodeCIDRMaskSize, allocatorType, + clusterCIDR, serviceCIDR, secondaryServiceCIDR, nodeCIDRMaskSize, allocatorType, ) } // TestNewNodeIpamControllerWithCIDRMasks tests if the controller can be // created with combinations of network CIDRs and masks. func TestNewNodeIpamControllerWithCIDRMasks(t *testing.T) { + emptyServiceCIDR := "" for _, tc := range []struct { - desc string - clusterCIDR string - serviceCIDR string - maskSize int - allocatorType ipam.CIDRAllocatorType - wantFatal bool + desc string + clusterCIDR string + serviceCIDR string + secondaryServiceCIDR string + maskSize int + allocatorType ipam.CIDRAllocatorType + wantFatal bool }{ - {"valid_range_allocator", "10.0.0.0/21", "10.1.0.0/21", 24, ipam.RangeAllocatorType, false}, - {"valid_range_allocator_dualstack", "10.0.0.0/21,2000::/10", "10.1.0.0/21", 24, ipam.RangeAllocatorType, false}, - {"valid_cloud_allocator", "10.0.0.0/21", "10.1.0.0/21", 24, ipam.CloudAllocatorType, false}, - {"valid_ipam_from_cluster", "10.0.0.0/21", "10.1.0.0/21", 24, ipam.IPAMFromClusterAllocatorType, false}, - {"valid_ipam_from_cloud", "10.0.0.0/21", "10.1.0.0/21", 24, ipam.IPAMFromCloudAllocatorType, false}, - {"valid_skip_cluster_CIDR_validation_for_cloud_allocator", "invalid", "10.1.0.0/21", 24, ipam.CloudAllocatorType, false}, - {"invalid_cluster_CIDR", "invalid", "10.1.0.0/21", 24, ipam.IPAMFromClusterAllocatorType, true}, - {"valid_CIDR_smaller_than_mask_cloud_allocator", "10.0.0.0/26", "10.1.0.0/21", 24, ipam.CloudAllocatorType, false}, - {"invalid_CIDR_smaller_than_mask_other_allocators", "10.0.0.0/26", "10.1.0.0/21", 24, ipam.IPAMFromCloudAllocatorType, true}, - {"invalid_serviceCIDR_contains_clusterCIDR", "10.0.0.0/23", "10.0.0.0/21", 24, ipam.IPAMFromClusterAllocatorType, true}, + {"valid_range_allocator", "10.0.0.0/21", "10.1.0.0/21", emptyServiceCIDR, 24, ipam.RangeAllocatorType, false}, + + {"valid_range_allocator_dualstack", "10.0.0.0/21,2000::/10", "10.1.0.0/21", emptyServiceCIDR, 24, ipam.RangeAllocatorType, false}, + {"valid_range_allocator_dualstack_dualstackservice", "10.0.0.0/21,2000::/10", "10.1.0.0/21", "3000::/10", 24, ipam.RangeAllocatorType, false}, + + {"valid_cloud_allocator", "10.0.0.0/21", "10.1.0.0/21", emptyServiceCIDR, 24, ipam.CloudAllocatorType, false}, + {"valid_ipam_from_cluster", "10.0.0.0/21", "10.1.0.0/21", emptyServiceCIDR, 24, ipam.IPAMFromClusterAllocatorType, false}, + {"valid_ipam_from_cloud", "10.0.0.0/21", "10.1.0.0/21", emptyServiceCIDR, 24, ipam.IPAMFromCloudAllocatorType, false}, + {"valid_skip_cluster_CIDR_validation_for_cloud_allocator", "invalid", "10.1.0.0/21", emptyServiceCIDR, 24, ipam.CloudAllocatorType, false}, + {"invalid_cluster_CIDR", "invalid", "10.1.0.0/21", emptyServiceCIDR, 24, ipam.IPAMFromClusterAllocatorType, true}, + {"valid_CIDR_smaller_than_mask_cloud_allocator", "10.0.0.0/26", "10.1.0.0/21", emptyServiceCIDR, 24, ipam.CloudAllocatorType, false}, + {"invalid_CIDR_smaller_than_mask_other_allocators", "10.0.0.0/26", "10.1.0.0/21", emptyServiceCIDR, 24, ipam.IPAMFromCloudAllocatorType, true}, + {"invalid_serviceCIDR_contains_clusterCIDR", "10.0.0.0/23", "10.0.0.0/21", emptyServiceCIDR, 24, ipam.IPAMFromClusterAllocatorType, true}, } { t.Run(tc.desc, func(t *testing.T) { clusterCidrs, _ := netutils.ParseCIDRs(strings.Split(tc.clusterCIDR, ",")) _, serviceCIDRIpNet, _ := net.ParseCIDR(tc.serviceCIDR) + _, secondaryServiceCIDRIpNet, _ := net.ParseCIDR(tc.secondaryServiceCIDR) if os.Getenv("EXIT_ON_FATAL") == "1" { // This is the subprocess which runs the actual code. - newTestNodeIpamController(clusterCidrs, serviceCIDRIpNet, tc.maskSize, tc.allocatorType) + newTestNodeIpamController(clusterCidrs, serviceCIDRIpNet, secondaryServiceCIDRIpNet, tc.maskSize, tc.allocatorType) return } // This is the host process that monitors the exit code of the subprocess. diff --git a/staging/src/k8s.io/kube-controller-manager/config/v1alpha1/types.go b/staging/src/k8s.io/kube-controller-manager/config/v1alpha1/types.go index db869f34b6d..dee7bb15693 100644 --- a/staging/src/k8s.io/kube-controller-manager/config/v1alpha1/types.go +++ b/staging/src/k8s.io/kube-controller-manager/config/v1alpha1/types.go @@ -361,6 +361,8 @@ type NamespaceControllerConfiguration struct { type NodeIPAMControllerConfiguration struct { // serviceCIDR is CIDR Range for Services in cluster. ServiceCIDR string + // secondaryServiceCIDR is CIDR Range for Services in cluster. This is used in dual stack clusters. SecondaryServiceCIDR must be of different IP family than ServiceCIDR + SecondaryServiceCIDR string // NodeCIDRMaskSize is the mask size for node cidr in cluster. NodeCIDRMaskSize int32 } diff --git a/test/integration/ipamperf/ipam_test.go b/test/integration/ipamperf/ipam_test.go index 09d69f44aa6..7b9bbd236cc 100644 --- a/test/integration/ipamperf/ipam_test.go +++ b/test/integration/ipamperf/ipam_test.go @@ -52,7 +52,7 @@ func setupAllocator(apiURL string, config *Config, clusterCIDR, serviceCIDR *net sharedInformer := informers.NewSharedInformerFactory(clientSet, 1*time.Hour) ipamController, err := nodeipam.NewNodeIpamController( sharedInformer.Core().V1().Nodes(), config.Cloud, clientSet, - []*net.IPNet{clusterCIDR}, serviceCIDR, subnetMaskSize, config.AllocatorType, + []*net.IPNet{clusterCIDR}, serviceCIDR, nil, subnetMaskSize, config.AllocatorType, ) if err != nil { return nil, shutdownFunc, err