From 4ff80864e1c6d9b8ec6ddac2110c357436973e0a Mon Sep 17 00:00:00 2001 From: Antonio Ojea Date: Thu, 25 Aug 2022 07:42:52 +0200 Subject: [PATCH] service cidr controller manager Controls the lifecycle of the ServiceCIDRs adding finalizers and setting the Ready condition in status when they are created, and removing the finalizers once it is safe to remove (no orphan IPAddresses) An IPAddress is orphan if there are no ServiceCIDR containing it. Change-Id: Icbe31e1ed8525fa04df3b741c8a817e5f2a49e80 --- .../app/controllermanager.go | 3 + cmd/kube-controller-manager/app/networking.go | 38 ++ .../names/controller_names.go | 1 + .../servicecidrs/servicecidrs_controller.go | 566 +++++++++++++++++ .../servicecidrs_controller_test.go | 580 ++++++++++++++++++ .../rbac/bootstrappolicy/controller_policy.go | 10 + .../testdata/controller-role-bindings.yaml | 17 + .../testdata/controller-roles.yaml | 51 ++ 8 files changed, 1266 insertions(+) create mode 100644 cmd/kube-controller-manager/app/networking.go create mode 100644 pkg/controller/servicecidrs/servicecidrs_controller.go create mode 100644 pkg/controller/servicecidrs/servicecidrs_controller_test.go diff --git a/cmd/kube-controller-manager/app/controllermanager.go b/cmd/kube-controller-manager/app/controllermanager.go index bc0121d4149..c50d702fdc1 100644 --- a/cmd/kube-controller-manager/app/controllermanager.go +++ b/cmd/kube-controller-manager/app/controllermanager.go @@ -563,6 +563,9 @@ func NewControllerDescriptors() map[string]*ControllerDescriptor { panic(fmt.Sprintf("alias %q conflicts with a controller name", alias)) } } + if utilfeature.DefaultFeatureGate.Enabled(kubefeatures.MultiCIDRServiceAllocator) { + register(names.ServiceCIDRController, startServiceCIDRsController) + } return controllers } diff --git a/cmd/kube-controller-manager/app/networking.go b/cmd/kube-controller-manager/app/networking.go new file mode 100644 index 00000000000..2c2fa12dd34 --- /dev/null +++ b/cmd/kube-controller-manager/app/networking.go @@ -0,0 +1,38 @@ +/* +Copyright 2023 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Package app implements a server that runs a set of active +// components. This includes replication controllers, service endpoints and +// nodes. +package app + +import ( + "context" + + "k8s.io/controller-manager/controller" + "k8s.io/kubernetes/pkg/controller/servicecidrs" +) + +func startServiceCIDRsController(ctx context.Context, controllerContext ControllerContext) (controller.Interface, bool, error) { + go servicecidrs.NewController( + controllerContext.InformerFactory.Networking().V1alpha1().ServiceCIDRs(), + controllerContext.InformerFactory.Networking().V1alpha1().IPAddresses(), + controllerContext.ClientBuilder.ClientOrDie("service-cidrs-controller"), + ).Run(ctx, 5) + // TODO use component config + return nil, true, nil + +} diff --git a/cmd/kube-controller-manager/names/controller_names.go b/cmd/kube-controller-manager/names/controller_names.go index fed6d887a86..3418c41b46d 100644 --- a/cmd/kube-controller-manager/names/controller_names.go +++ b/cmd/kube-controller-manager/names/controller_names.go @@ -82,4 +82,5 @@ const ( ResourceClaimController = "resourceclaim-controller" LegacyServiceAccountTokenCleanerController = "legacy-serviceaccount-token-cleaner-controller" ValidatingAdmissionPolicyStatusController = "validatingadmissionpolicy-status-controller" + ServiceCIDRController = "service-cidr-controller" ) diff --git a/pkg/controller/servicecidrs/servicecidrs_controller.go b/pkg/controller/servicecidrs/servicecidrs_controller.go new file mode 100644 index 00000000000..24191adcf6b --- /dev/null +++ b/pkg/controller/servicecidrs/servicecidrs_controller.go @@ -0,0 +1,566 @@ +/* +Copyright 2023 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package servicecidrs + +import ( + "context" + "encoding/json" + "net/netip" + "sync" + "time" + + v1 "k8s.io/api/core/v1" + networkingapiv1alpha1 "k8s.io/api/networking/v1alpha1" + apierrors "k8s.io/apimachinery/pkg/api/errors" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/labels" + "k8s.io/apimachinery/pkg/types" + utilruntime "k8s.io/apimachinery/pkg/util/runtime" + "k8s.io/apimachinery/pkg/util/sets" + "k8s.io/apimachinery/pkg/util/wait" + metav1apply "k8s.io/client-go/applyconfigurations/meta/v1" + networkingapiv1alpha1apply "k8s.io/client-go/applyconfigurations/networking/v1alpha1" + networkinginformers "k8s.io/client-go/informers/networking/v1alpha1" + clientset "k8s.io/client-go/kubernetes" + "k8s.io/client-go/kubernetes/scheme" + v1core "k8s.io/client-go/kubernetes/typed/core/v1" + networkinglisters "k8s.io/client-go/listers/networking/v1alpha1" + "k8s.io/client-go/tools/cache" + "k8s.io/client-go/tools/record" + "k8s.io/client-go/util/workqueue" + "k8s.io/klog/v2" + "k8s.io/kubernetes/pkg/registry/core/service/ipallocator" + "k8s.io/kubernetes/pkg/util/iptree" +) + +const ( + // maxRetries is the max number of times a service object will be retried before it is dropped out of the queue. + // With the current rate-limiter in use (5ms*2^(maxRetries-1)) the following numbers represent the + // sequence of delays between successive queuings of a service. + // + // 5ms, 10ms, 20ms, 40ms, 80ms, 160ms, 320ms, 640ms, 1.3s, 2.6s, 5.1s, 10.2s, 20.4s, 41s, 82s + maxRetries = 15 + controllerName = "service-cidr-controller" + + ServiceCIDRProtectionFinalizer = "networking.k8s.io/service-cidr-finalizer" +) + +// NewController returns a new *Controller. +func NewController( + serviceCIDRInformer networkinginformers.ServiceCIDRInformer, + ipAddressInformer networkinginformers.IPAddressInformer, + client clientset.Interface, +) *Controller { + broadcaster := record.NewBroadcaster() + recorder := broadcaster.NewRecorder(scheme.Scheme, v1.EventSource{Component: controllerName}) + c := &Controller{ + client: client, + queue: workqueue.NewNamedRateLimitingQueue(workqueue.DefaultControllerRateLimiter(), "ipaddresses"), + tree: iptree.New[sets.Set[string]](), + workerLoopPeriod: time.Second, + } + + _, _ = serviceCIDRInformer.Informer().AddEventHandler(cache.ResourceEventHandlerFuncs{ + AddFunc: c.addServiceCIDR, + UpdateFunc: c.updateServiceCIDR, + DeleteFunc: c.deleteServiceCIDR, + }) + c.serviceCIDRLister = serviceCIDRInformer.Lister() + c.serviceCIDRsSynced = serviceCIDRInformer.Informer().HasSynced + + // IPAddresses can only block the deletion of the ServiceCIDR that contains it + _, _ = ipAddressInformer.Informer().AddEventHandler(cache.ResourceEventHandlerFuncs{ + AddFunc: c.addIPAddress, + DeleteFunc: c.deleteIPAddress, + }) + + c.ipAddressLister = ipAddressInformer.Lister() + c.ipAddressSynced = ipAddressInformer.Informer().HasSynced + + c.eventBroadcaster = broadcaster + c.eventRecorder = recorder + + return c +} + +// Controller manages selector-based service ipAddress. +type Controller struct { + client clientset.Interface + eventBroadcaster record.EventBroadcaster + eventRecorder record.EventRecorder + + serviceCIDRLister networkinglisters.ServiceCIDRLister + serviceCIDRsSynced cache.InformerSynced + + ipAddressLister networkinglisters.IPAddressLister + ipAddressSynced cache.InformerSynced + + queue workqueue.RateLimitingInterface + + // workerLoopPeriod is the time between worker runs. The workers process the queue of service and ipRange changes. + workerLoopPeriod time.Duration + + // tree store the ServiceCIDRs names associated to each + muTree sync.Mutex + tree *iptree.Tree[sets.Set[string]] +} + +// Run will not return until stopCh is closed. +func (c *Controller) Run(ctx context.Context, workers int) { + defer utilruntime.HandleCrash() + defer c.queue.ShutDown() + + c.eventBroadcaster.StartStructuredLogging(0) + c.eventBroadcaster.StartRecordingToSink(&v1core.EventSinkImpl{Interface: c.client.CoreV1().Events("")}) + defer c.eventBroadcaster.Shutdown() + + logger := klog.FromContext(ctx) + + logger.Info("Starting", "controller", controllerName) + defer logger.Info("Shutting down", "controller", controllerName) + + if !cache.WaitForNamedCacheSync(controllerName, ctx.Done(), c.serviceCIDRsSynced, c.ipAddressSynced) { + return + } + + for i := 0; i < workers; i++ { + go wait.UntilWithContext(ctx, c.worker, c.workerLoopPeriod) + } + <-ctx.Done() +} + +func (c *Controller) addServiceCIDR(obj interface{}) { + cidr, ok := obj.(*networkingapiv1alpha1.ServiceCIDR) + if !ok { + return + } + c.queue.Add(cidr.Name) + for _, key := range c.overlappingServiceCIDRs(cidr) { + c.queue.Add(key) + } +} + +func (c *Controller) updateServiceCIDR(oldObj, obj interface{}) { + key, err := cache.MetaNamespaceKeyFunc(obj) + if err == nil { + c.queue.Add(key) + } +} + +// deleteServiceCIDR +func (c *Controller) deleteServiceCIDR(obj interface{}) { + key, err := cache.DeletionHandlingMetaNamespaceKeyFunc(obj) + if err == nil { + c.queue.Add(key) + } +} + +// addIPAddress may block a ServiceCIDR deletion +func (c *Controller) addIPAddress(obj interface{}) { + ip, ok := obj.(*networkingapiv1alpha1.IPAddress) + if !ok { + return + } + + for _, cidr := range c.containingServiceCIDRs(ip) { + c.queue.Add(cidr) + } +} + +// deleteIPAddress may unblock a ServiceCIDR deletion +func (c *Controller) deleteIPAddress(obj interface{}) { + ip, ok := obj.(*networkingapiv1alpha1.IPAddress) + if !ok { + tombstone, ok := obj.(cache.DeletedFinalStateUnknown) + if !ok { + return + } + ip, ok = tombstone.Obj.(*networkingapiv1alpha1.IPAddress) + if !ok { + return + } + } + + for _, cidr := range c.containingServiceCIDRs(ip) { + c.queue.Add(cidr) + } +} + +// overlappingServiceCIDRs, given a ServiceCIDR return the ServiceCIDRs that contain or are contained, +// this is required because adding or removing a CIDR will require to recompute the +// state of each ServiceCIDR to check if can be unblocked on deletion. +func (c *Controller) overlappingServiceCIDRs(cidr *networkingapiv1alpha1.ServiceCIDR) []string { + c.muTree.Lock() + defer c.muTree.Unlock() + + serviceCIDRs := sets.New[string]() + if prefix, err := netip.ParsePrefix(cidr.Spec.IPv4); err == nil { // if is empty err will not be nil + c.tree.WalkPath(prefix, func(k netip.Prefix, v sets.Set[string]) bool { + serviceCIDRs.Insert(v.UnsortedList()...) + return false + }) + c.tree.WalkPrefix(prefix, func(k netip.Prefix, v sets.Set[string]) bool { + serviceCIDRs.Insert(v.UnsortedList()...) + return false + }) + } + if prefix, err := netip.ParsePrefix(cidr.Spec.IPv6); err == nil { // if is empty err will not be nil + c.tree.WalkPath(prefix, func(k netip.Prefix, v sets.Set[string]) bool { + serviceCIDRs.Insert(v.UnsortedList()...) + return false + }) + c.tree.WalkPrefix(prefix, func(k netip.Prefix, v sets.Set[string]) bool { + serviceCIDRs.Insert(v.UnsortedList()...) + return false + }) + } + return serviceCIDRs.UnsortedList() +} + +// containingServiceCIDRs, given an IPAddress return the ServiceCIDRs that contains the IP, +// as it may block or be blocking the deletion of the ServiceCIDRs that contain it. +func (c *Controller) containingServiceCIDRs(ip *networkingapiv1alpha1.IPAddress) []string { + // only process IPs managed by the kube-apiserver + managedBy, ok := ip.Labels[networkingapiv1alpha1.LabelManagedBy] + if !ok || managedBy != ipallocator.ControllerName { + return []string{} + } + + address, err := netip.ParseAddr(ip.Name) + if err != nil { + // This should not happen, the IPAddress object validates + // the name is a valid IPAddress + return []string{} + } + + c.muTree.Lock() + defer c.muTree.Unlock() + serviceCIDRs := []string{} + // walk the tree to get all the ServiceCIDRs that contain this IP address + prefixes := c.tree.GetHostIPPrefixMatches(address) + for _, v := range prefixes { + serviceCIDRs = append(serviceCIDRs, v.UnsortedList()...) + } + + return serviceCIDRs +} + +func (c *Controller) worker(ctx context.Context) { + for c.processNext(ctx) { + } +} + +func (c *Controller) processNext(ctx context.Context) bool { + eKey, quit := c.queue.Get() + if quit { + return false + } + defer c.queue.Done(eKey) + + key := eKey.(string) + err := c.sync(ctx, key) + if err == nil { + c.queue.Forget(key) + return true + } + logger := klog.FromContext(ctx) + if c.queue.NumRequeues(key) < maxRetries { + logger.V(2).Info("Error syncing ServiceCIDR, retrying", "ServiceCIDR", key, "err", err) + c.queue.AddRateLimited(key) + } else { + logger.Info("Dropping ServiceCIDR out of the queue", "ServiceCIDR", key, "err", err) + c.queue.Forget(key) + utilruntime.HandleError(err) + } + return true +} + +// syncCIDRs rebuilds the radix tree based from the informers cache +func (c *Controller) syncCIDRs() error { + cidrList, err := c.serviceCIDRLister.List(labels.Everything()) + if err != nil { + return err + } + + // track the names of the different ServiceCIDRs, there + // can be multiple ServiceCIDRs sharing the same prefixes + // and this is important to determine if a ServiceCIDR can + // be deleted. + tree := iptree.New[sets.Set[string]]() + for _, cidr := range cidrList { + if prefix, err := netip.ParsePrefix(cidr.Spec.IPv4); err == nil { // if is empty err will not be nil + // if the prefix already exist append the new ServiceCIDR name + v, ok := tree.GetPrefix(prefix) + if !ok { + v = sets.Set[string]{} + } + v.Insert(cidr.Name) + tree.InsertPrefix(prefix, v) + } + if prefix, err := netip.ParsePrefix(cidr.Spec.IPv6); err == nil { // if is empty err will not be nil + // if the prefix already exist append the new ServiceCIDR name + v, ok := tree.GetPrefix(prefix) + if !ok { + v = sets.Set[string]{} + } + v.Insert(cidr.Name) + tree.InsertPrefix(prefix, v) + } + } + c.muTree.Lock() + defer c.muTree.Unlock() + c.tree = tree + return nil +} + +func (c *Controller) sync(ctx context.Context, key string) error { + logger := klog.FromContext(ctx) + startTime := time.Now() + defer func() { + logger.V(4).Info("Finished syncing ServiceCIDR)", "ServiceCIDR", key, "elapsed", time.Since(startTime)) + }() + + // TODO(aojea) verify if this present a performance problem + // restore the radix tree from the current state + err := c.syncCIDRs() + if err != nil { + return err + } + + logger.V(4).Info("syncing ServiceCIDR", "ServiceCIDR", key) + cidr, err := c.serviceCIDRLister.Get(key) + if err != nil { + if apierrors.IsNotFound(err) { + logger.V(4).Info("ServiceCIDR no longer exist", "ServiceCIDR", key) + return nil + } + return err + } + + // Deleting .... + if !cidr.GetDeletionTimestamp().IsZero() { + // check if the existing ServiceCIDR can be deleted before removing the finalizer + ok, err := c.canDeleteCIDR(ctx, cidr) + if err != nil { + return err + } + // if there are no IPAddress depending on this ServiceCIDR is safe to remove it + if ok { + return c.removeServiceCIDRFinalizerIfNeeded(ctx, cidr) + } + // update the status to indicate why the ServiceCIDR can not be deleted + svcApplyStatus := networkingapiv1alpha1apply.ServiceCIDRStatus().WithConditions( + metav1apply.Condition(). + WithType(networkingapiv1alpha1.ServiceCIDRConditionReady). + WithStatus(metav1.ConditionFalse). + WithReason(networkingapiv1alpha1.ServiceCIDRReasonTerminating). + WithMessage("There are still IPAddresses referencing the ServiceCIDR, please remove them or create a new ServiceCIDR"). + WithLastTransitionTime(metav1.Now())) + svcApply := networkingapiv1alpha1apply.ServiceCIDR(cidr.Name).WithStatus(svcApplyStatus) + _, err = c.client.NetworkingV1alpha1().ServiceCIDRs().ApplyStatus(ctx, svcApply, metav1.ApplyOptions{FieldManager: controllerName, Force: true}) + return err + } + + // Created or Updated, the ServiceCIDR must have a finalizer. + err = c.addServiceCIDRFinalizerIfNeeded(ctx, cidr) + if err != nil { + return err + } + + // Set Ready condition to True. + svcApplyStatus := networkingapiv1alpha1apply.ServiceCIDRStatus().WithConditions( + metav1apply.Condition(). + WithType(networkingapiv1alpha1.ServiceCIDRConditionReady). + WithStatus(metav1.ConditionTrue). + WithMessage("Kubernetes Service CIDR is ready"). + WithLastTransitionTime(metav1.Now())) + svcApply := networkingapiv1alpha1apply.ServiceCIDR(cidr.Name).WithStatus(svcApplyStatus) + if _, err := c.client.NetworkingV1alpha1().ServiceCIDRs().ApplyStatus(ctx, svcApply, metav1.ApplyOptions{FieldManager: controllerName, Force: true}); err != nil { + logger.Info("error updating default ServiceCIDR status", "error", err) + c.eventRecorder.Eventf(cidr, v1.EventTypeWarning, "KubernetesServiceCIDRError", "The ServiceCIDR Status can not be set to Ready=True") + return err + } + + return nil +} + +// canDeleteCIDR checks that the ServiceCIDR can be safely deleted and not leave orphan IPAddresses +func (c *Controller) canDeleteCIDR(ctx context.Context, cidr *networkingapiv1alpha1.ServiceCIDR) (bool, error) { + // TODO(aojea) Revisit the lock usage and if we need to keep it only for the tree operations + // to avoid holding it during the whole operation. + c.muTree.Lock() + defer c.muTree.Unlock() + logger := klog.FromContext(ctx) + + // Check if there is a subnet that already contains the ServiceCIDR that is going to be deleted. + hasParentV4 := true + hasParentV6 := true + // Walk the tree to find if there is a larger subnet that contains the existing one, + // or there is another ServiceCIDR with the same subnet. + if prefix, err := netip.ParsePrefix(cidr.Spec.IPv4); err == nil { + serviceCIDRs := sets.New[string]() + c.tree.WalkPath(prefix, func(k netip.Prefix, v sets.Set[string]) bool { + serviceCIDRs.Insert(v.UnsortedList()...) + return false + }) + if serviceCIDRs.Len() == 1 && serviceCIDRs.Has(cidr.Name) { + hasParentV4 = false + } + } + if prefix, err := netip.ParsePrefix(cidr.Spec.IPv6); err == nil { + serviceCIDRs := sets.New[string]() + c.tree.WalkPath(prefix, func(k netip.Prefix, v sets.Set[string]) bool { + serviceCIDRs.Insert(v.UnsortedList()...) + return false + }) + if serviceCIDRs.Len() == 1 && serviceCIDRs.Has(cidr.Name) { + hasParentV6 = false + } + } + + // All the existing IP addresses will be contained on the parent ServiceCIDRs, + // it is safe to delete, remove the finalizer. + if hasParentV4 && hasParentV6 { + logger.V(2).Info("Removing finalizer for ServiceCIDR", "ServiceCIDR", cidr.String()) + return true, nil + } + + // TODO: optimize this + // Since current ServiceCIDR does not have another ServiceCIDR containing it, + // verify there are no existing IPAddresses referencing it that will be orphan. + if cidr.Spec.IPv4 != "" && !hasParentV4 { + // get all the IPv4 addresses + ipLabelSelector := labels.Set(map[string]string{ + networkingapiv1alpha1.LabelIPAddressFamily: string(v1.IPv4Protocol), + networkingapiv1alpha1.LabelManagedBy: ipallocator.ControllerName, + }).AsSelectorPreValidated() + ips, err := c.ipAddressLister.List(ipLabelSelector) + if err != nil { + return false, err + } + for _, ip := range ips { + // if the longest prefix match is the ServiceCIDR to be deleted + // and is the only existing one, at least one IPAddress will be + // orphan, block the ServiceCIDR deletion. + address, err := netip.ParseAddr(ip.Name) + if err != nil { + // the IPAddress object validates that the name is a valid IPAddress + logger.Info("[SHOULD NOT HAPPEN] unexpected error parsing IPAddress", "IPAddress", ip.Name, "error", err) + continue + } + // walk the tree to find all ServiceCIDRs containing this IP + prefixes := c.tree.GetHostIPPrefixMatches(address) + if len(prefixes) != 1 { + continue + } + for _, v := range prefixes { + if v.Len() == 1 && v.Has(cidr.Name) { + return false, nil + } + } + } + + if cidr.Spec.IPv6 != "" && !hasParentV6 { + ipLabelSelector := labels.Set(map[string]string{ + networkingapiv1alpha1.LabelIPAddressFamily: string(v1.IPv6Protocol), + networkingapiv1alpha1.LabelManagedBy: ipallocator.ControllerName, + }).AsSelectorPreValidated() + ips, err := c.ipAddressLister.List(ipLabelSelector) + if err != nil { + return false, err + } + for _, ip := range ips { + // if the longest prefix match is the ServiceCIDR to be deleted + // and is the only existing one, at least one IPAddress will be + // orphan, block the ServiceCIDR deletion. + address, err := netip.ParseAddr(ip.Name) + if err != nil { + // the IPAddress object validates that the name is a valid IPAddress + logger.Info("[SHOULD NOT HAPPEN] unexpected error parsing IPAddress", "IPAddress", ip.Name, "error", err) + continue + } + // walk the tree to find all ServiceCIDRs containing this IP + prefixes := c.tree.GetHostIPPrefixMatches(address) + if len(prefixes) != 1 { + continue + } + for _, v := range prefixes { + if v.Len() == 1 && v.Has(cidr.Name) { + return false, nil + } + } + } + } + } + // There are no IPAddresses that depend on the existing ServiceCIDR, so + // it is safe to delete, remove finalizer. + logger.Info("ServiceCIDR no longer have orphan IPs", "ServiceCDIR", cidr.String()) + return true, nil +} + +func (c *Controller) addServiceCIDRFinalizerIfNeeded(ctx context.Context, cidr *networkingapiv1alpha1.ServiceCIDR) error { + for _, f := range cidr.GetFinalizers() { + if f == ServiceCIDRProtectionFinalizer { + return nil + } + } + + patch := map[string]interface{}{ + "metadata": map[string]interface{}{ + "finalizers": []string{ServiceCIDRProtectionFinalizer}, + }, + } + patchBytes, err := json.Marshal(patch) + if err != nil { + return err + } + _, err = c.client.NetworkingV1alpha1().ServiceCIDRs().Patch(ctx, cidr.Name, types.StrategicMergePatchType, patchBytes, metav1.PatchOptions{}) + if err != nil && !apierrors.IsNotFound(err) { + return err + } + klog.FromContext(ctx).V(4).Info("Added protection finalizer to ServiceCIDR", "ServiceCIDR", cidr.Name) + return nil + +} + +func (c *Controller) removeServiceCIDRFinalizerIfNeeded(ctx context.Context, cidr *networkingapiv1alpha1.ServiceCIDR) error { + found := false + for _, f := range cidr.GetFinalizers() { + if f == ServiceCIDRProtectionFinalizer { + found = true + break + } + } + if !found { + return nil + } + patch := map[string]interface{}{ + "metadata": map[string]interface{}{ + "$deleteFromPrimitiveList/finalizers": []string{ServiceCIDRProtectionFinalizer}, + }, + } + patchBytes, err := json.Marshal(patch) + if err != nil { + return err + } + _, err = c.client.NetworkingV1alpha1().ServiceCIDRs().Patch(ctx, cidr.Name, types.StrategicMergePatchType, patchBytes, metav1.PatchOptions{}) + if err != nil && !apierrors.IsNotFound(err) { + return err + } + klog.FromContext(ctx).V(4).Info("Removed protection finalizer from ServiceCIDRs", "ServiceCIDR", cidr.Name) + return nil +} diff --git a/pkg/controller/servicecidrs/servicecidrs_controller_test.go b/pkg/controller/servicecidrs/servicecidrs_controller_test.go new file mode 100644 index 00000000000..5a3d9a03355 --- /dev/null +++ b/pkg/controller/servicecidrs/servicecidrs_controller_test.go @@ -0,0 +1,580 @@ +/* +Copyright 2023 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package servicecidrs + +import ( + "context" + "testing" + + "github.com/google/go-cmp/cmp" + "github.com/google/go-cmp/cmp/cmpopts" + v1 "k8s.io/api/core/v1" + networkingapiv1alpha1 "k8s.io/api/networking/v1alpha1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/client-go/informers" + "k8s.io/client-go/kubernetes/fake" + k8stesting "k8s.io/client-go/testing" + "k8s.io/client-go/tools/cache" + "k8s.io/kubernetes/pkg/controller" + "k8s.io/kubernetes/pkg/controlplane/controller/defaultservicecidr" + "k8s.io/kubernetes/pkg/registry/core/service/ipallocator" + netutils "k8s.io/utils/net" +) + +type testController struct { + *Controller + servicecidrsStore cache.Store + ipaddressesStore cache.Store +} + +func newController(t *testing.T, cidrs []*networkingapiv1alpha1.ServiceCIDR, ips []*networkingapiv1alpha1.IPAddress) (*fake.Clientset, *testController) { + client := fake.NewSimpleClientset() + + informerFactory := informers.NewSharedInformerFactory(client, controller.NoResyncPeriodFunc()) + + serviceCIDRInformer := informerFactory.Networking().V1alpha1().ServiceCIDRs() + cidrStore := serviceCIDRInformer.Informer().GetStore() + for _, obj := range cidrs { + err := cidrStore.Add(obj) + if err != nil { + t.Fatal(err) + } + } + ipAddressInformer := informerFactory.Networking().V1alpha1().IPAddresses() + ipStore := ipAddressInformer.Informer().GetStore() + for _, obj := range ips { + err := ipStore.Add(obj) + if err != nil { + t.Fatal(err) + } + } + controller := NewController( + serviceCIDRInformer, + ipAddressInformer, + client) + + var alwaysReady = func() bool { return true } + controller.serviceCIDRsSynced = alwaysReady + controller.ipAddressSynced = alwaysReady + + return client, &testController{ + controller, + cidrStore, + ipStore, + } +} + +func TestControllerSync(t *testing.T) { + deletingServiceCIDR := makeServiceCIDR("deleting-cidr", "192.168.0.0/24", "2001:db2::/64") + now := metav1.Now() + deletingServiceCIDR.Finalizers = []string{ServiceCIDRProtectionFinalizer} + deletingServiceCIDR.DeletionTimestamp = &now + + testCases := []struct { + name string + cidrs []*networkingapiv1alpha1.ServiceCIDR + ips []*networkingapiv1alpha1.IPAddress + cidrSynced string + actions [][]string // verb and resource and subresource + }{ + { + name: "no existing service CIDRs", + }, + { + name: "default service CIDR must have finalizer", + cidrs: []*networkingapiv1alpha1.ServiceCIDR{ + makeServiceCIDR(defaultservicecidr.DefaultServiceCIDRName, "192.168.0.0/24", "2001:db2::/64"), + }, + cidrSynced: defaultservicecidr.DefaultServiceCIDRName, + actions: [][]string{{"patch", "servicecidrs", ""}, {"patch", "servicecidrs", "status"}}, + }, + { + name: "service CIDR must have finalizer", + cidrs: []*networkingapiv1alpha1.ServiceCIDR{ + makeServiceCIDR("no-finalizer", "192.168.0.0/24", "2001:db2::/64"), + }, + cidrSynced: "no-finalizer", + actions: [][]string{{"patch", "servicecidrs", ""}, {"patch", "servicecidrs", "status"}}, + }, + { + name: "service CIDR being deleted must remove the finalizer", + cidrs: []*networkingapiv1alpha1.ServiceCIDR{ + deletingServiceCIDR, + }, + cidrSynced: deletingServiceCIDR.Name, + actions: [][]string{{"patch", "servicecidrs", ""}}, + }, + { + name: "service CIDR being deleted with IPv4 addresses should update the status", + cidrs: []*networkingapiv1alpha1.ServiceCIDR{ + deletingServiceCIDR, + }, + ips: []*networkingapiv1alpha1.IPAddress{ + makeIPAddress("192.168.0.1"), + }, + cidrSynced: deletingServiceCIDR.Name, + actions: [][]string{{"patch", "servicecidrs", "status"}}, + }, + { + name: "service CIDR being deleted and overlapping same range and IPv4 addresses should remove the finalizer", + cidrs: []*networkingapiv1alpha1.ServiceCIDR{ + deletingServiceCIDR, + makeServiceCIDR("overlapping", "192.168.0.0/24", "2001:db2::/64"), + }, + ips: []*networkingapiv1alpha1.IPAddress{ + makeIPAddress("192.168.0.1"), + }, + cidrSynced: deletingServiceCIDR.Name, + actions: [][]string{{"patch", "servicecidrs", ""}}, + }, + { + name: "service CIDR being deleted and overlapping and IPv4 addresses should remove the finalizer", + cidrs: []*networkingapiv1alpha1.ServiceCIDR{ + deletingServiceCIDR, + makeServiceCIDR("overlapping", "192.168.0.0/16", "2001:db2::/64"), + }, + ips: []*networkingapiv1alpha1.IPAddress{ + makeIPAddress("192.168.0.1"), + }, + cidrSynced: deletingServiceCIDR.Name, + actions: [][]string{{"patch", "servicecidrs", ""}}, + }, + { + name: "service CIDR being deleted and not overlapping and IPv4 addresses should update the status", + cidrs: []*networkingapiv1alpha1.ServiceCIDR{ + deletingServiceCIDR, + makeServiceCIDR("overlapping", "192.168.255.0/26", "2001:db2::/64"), + }, + ips: []*networkingapiv1alpha1.IPAddress{ + makeIPAddress("192.168.0.1"), + }, + cidrSynced: deletingServiceCIDR.Name, + actions: [][]string{{"patch", "servicecidrs", "status"}}, + }, + + { + name: "service CIDR being deleted with IPv6 addresses should update the status", + cidrs: []*networkingapiv1alpha1.ServiceCIDR{ + deletingServiceCIDR, + }, + ips: []*networkingapiv1alpha1.IPAddress{ + makeIPAddress("2001:db2::1"), + }, + cidrSynced: deletingServiceCIDR.Name, + actions: [][]string{{"patch", "servicecidrs", "status"}}, + }, + { + name: "service CIDR being deleted and overlapping same range and IPv6 addresses should remove the finalizer", + cidrs: []*networkingapiv1alpha1.ServiceCIDR{ + deletingServiceCIDR, + makeServiceCIDR("overlapping", "192.168.0.0/24", "2001:db2::/64"), + }, + ips: []*networkingapiv1alpha1.IPAddress{ + makeIPAddress("2001:db2::1"), + }, + cidrSynced: deletingServiceCIDR.Name, + actions: [][]string{{"patch", "servicecidrs", ""}}, + }, + { + name: "service CIDR being deleted and overlapping and IPv6 addresses should remove the finalizer", + cidrs: []*networkingapiv1alpha1.ServiceCIDR{ + deletingServiceCIDR, + makeServiceCIDR("overlapping", "192.168.0.0/16", "2001:db2::/48"), + }, + ips: []*networkingapiv1alpha1.IPAddress{ + makeIPAddress("2001:db2::1"), + }, + cidrSynced: deletingServiceCIDR.Name, + actions: [][]string{{"patch", "servicecidrs", ""}}, + }, + { + name: "service CIDR being deleted and not overlapping and IPv6 addresses should update the status", + cidrs: []*networkingapiv1alpha1.ServiceCIDR{ + deletingServiceCIDR, + makeServiceCIDR("overlapping", "192.168.255.0/26", "2001:db2:a:b::/64"), + }, + ips: []*networkingapiv1alpha1.IPAddress{ + makeIPAddress("2001:db2::1"), + }, + cidrSynced: deletingServiceCIDR.Name, + actions: [][]string{{"patch", "servicecidrs", "status"}}, + }, + } + + for _, tc := range testCases { + t.Run(tc.name, func(t *testing.T) { + client, controller := newController(t, tc.cidrs, tc.ips) + // server side apply does not play well with fake client go + // so we skup the errors and only assert on the actions + // https://github.com/kubernetes/kubernetes/issues/99953 + _ = controller.sync(context.Background(), tc.cidrSynced) + expectAction(t, client.Actions(), tc.actions) + + }) + } +} + +func makeServiceCIDR(name, ipv4, ipv6 string) *networkingapiv1alpha1.ServiceCIDR { + serviceCIDR := &networkingapiv1alpha1.ServiceCIDR{ + ObjectMeta: metav1.ObjectMeta{ + Name: name, + }, + Spec: networkingapiv1alpha1.ServiceCIDRSpec{ + IPv4: ipv4, + IPv6: ipv6, + }, + } + return serviceCIDR +} + +func makeIPAddress(name string) *networkingapiv1alpha1.IPAddress { + family := string(v1.IPv4Protocol) + if netutils.IsIPv6String(name) { + family = string(v1.IPv6Protocol) + } + return &networkingapiv1alpha1.IPAddress{ + ObjectMeta: metav1.ObjectMeta{ + Name: name, + Labels: map[string]string{ + networkingapiv1alpha1.LabelIPAddressFamily: family, + networkingapiv1alpha1.LabelManagedBy: ipallocator.ControllerName, + }, + }, + } +} + +func expectAction(t *testing.T, actions []k8stesting.Action, expected [][]string) { + t.Helper() + if len(actions) != len(expected) { + t.Fatalf("Expected at least %d actions, got %d \ndiff: %v", len(expected), len(actions), cmp.Diff(expected, actions)) + } + + for i, action := range actions { + verb := expected[i][0] + if action.GetVerb() != verb { + t.Errorf("Expected action %d verb to be %s, got %s", i, verb, action.GetVerb()) + } + resource := expected[i][1] + if action.GetResource().Resource != resource { + t.Errorf("Expected action %d resource to be %s, got %s", i, resource, action.GetResource().Resource) + } + subresource := expected[i][2] + if action.GetSubresource() != subresource { + t.Errorf("Expected action %d subresource to be %s, got %s", i, subresource, action.GetSubresource()) + } + } +} + +func TestController_canDeleteCIDR(t *testing.T) { + tests := []struct { + name string + cidrs []*networkingapiv1alpha1.ServiceCIDR + ips []*networkingapiv1alpha1.IPAddress + cidrSynced *networkingapiv1alpha1.ServiceCIDR + want bool + }{ + { + name: "empty", + cidrSynced: makeServiceCIDR(defaultservicecidr.DefaultServiceCIDRName, "192.168.0.0/24", "2001:db2::/64"), + want: true, + }, + { + name: "CIDR and no IPs", + cidrs: []*networkingapiv1alpha1.ServiceCIDR{ + makeServiceCIDR(defaultservicecidr.DefaultServiceCIDRName, "192.168.0.0/24", "2001:db2::/64"), + }, + cidrSynced: makeServiceCIDR(defaultservicecidr.DefaultServiceCIDRName, "192.168.0.0/24", "2001:db2::/64"), + want: true, + }, + { + name: "CIDR with IPs", + cidrs: []*networkingapiv1alpha1.ServiceCIDR{ + makeServiceCIDR(defaultservicecidr.DefaultServiceCIDRName, "192.168.0.0/24", "2001:db2::/64"), + }, + ips: []*networkingapiv1alpha1.IPAddress{ + makeIPAddress("192.168.0.24"), + }, + cidrSynced: makeServiceCIDR(defaultservicecidr.DefaultServiceCIDRName, "192.168.0.0/24", "2001:db2::/64"), + want: false, + }, + { + name: "CIDR without IPs", + cidrs: []*networkingapiv1alpha1.ServiceCIDR{ + makeServiceCIDR(defaultservicecidr.DefaultServiceCIDRName, "192.168.0.0/24", "2001:db2::/64"), + }, + ips: []*networkingapiv1alpha1.IPAddress{ + makeIPAddress("192.168.1.24"), + }, + cidrSynced: makeServiceCIDR(defaultservicecidr.DefaultServiceCIDRName, "192.168.0.0/24", "2001:db2::/64"), + want: true, + }, + { + name: "CIDR with IPv4 address referencing the subnet address", + cidrs: []*networkingapiv1alpha1.ServiceCIDR{ + makeServiceCIDR(defaultservicecidr.DefaultServiceCIDRName, "192.168.0.0/24", "2001:db2::/64"), + }, + ips: []*networkingapiv1alpha1.IPAddress{ + makeIPAddress("192.168.0.0"), + }, + cidrSynced: makeServiceCIDR(defaultservicecidr.DefaultServiceCIDRName, "192.168.0.0/24", "2001:db2::/64"), + want: true, + }, + { + name: "CIDR with IPv4 address referencing the broadcast address", + cidrs: []*networkingapiv1alpha1.ServiceCIDR{ + makeServiceCIDR(defaultservicecidr.DefaultServiceCIDRName, "192.168.0.0/24", "2001:db2::/64"), + }, + ips: []*networkingapiv1alpha1.IPAddress{ + makeIPAddress("192.168.0.255"), + }, + cidrSynced: makeServiceCIDR(defaultservicecidr.DefaultServiceCIDRName, "192.168.0.0/24", "2001:db2::/64"), + want: true, + }, + { + name: "CIDR with IPv6 address referencing the broadcast address", + cidrs: []*networkingapiv1alpha1.ServiceCIDR{ + makeServiceCIDR(defaultservicecidr.DefaultServiceCIDRName, "192.168.0.0/24", "2001:db2::/64"), + }, + ips: []*networkingapiv1alpha1.IPAddress{ + makeIPAddress("2001:0db2::ffff:ffff:ffff:ffff"), + }, + cidrSynced: makeServiceCIDR(defaultservicecidr.DefaultServiceCIDRName, "192.168.0.0/24", "2001:db2::/64"), + want: false, + }, + { + name: "CIDR with same range overlapping and IPs", + cidrs: []*networkingapiv1alpha1.ServiceCIDR{ + makeServiceCIDR(defaultservicecidr.DefaultServiceCIDRName, "192.168.0.0/24", "2001:db2::/64"), + makeServiceCIDR("overlapping", "192.168.0.0/24", "2001:db2::/64"), + }, + ips: []*networkingapiv1alpha1.IPAddress{ + makeIPAddress("192.168.0.23"), + }, + cidrSynced: makeServiceCIDR(defaultservicecidr.DefaultServiceCIDRName, "192.168.0.0/24", "2001:db2::/64"), + want: true, + }, + { + name: "CIDR with smaller range overlapping and IPs", + cidrs: []*networkingapiv1alpha1.ServiceCIDR{ + makeServiceCIDR(defaultservicecidr.DefaultServiceCIDRName, "192.168.0.0/24", "2001:db2::/64"), + makeServiceCIDR("overlapping", "192.168.0.0/26", "2001:db2::/64"), + }, + ips: []*networkingapiv1alpha1.IPAddress{ + makeIPAddress("192.168.0.23"), + }, + cidrSynced: makeServiceCIDR(defaultservicecidr.DefaultServiceCIDRName, "192.168.0.0/24", "2001:db2::/64"), + want: true, + }, + { + name: "CIDR with smaller range overlapping but IPs orphan", + cidrs: []*networkingapiv1alpha1.ServiceCIDR{ + makeServiceCIDR(defaultservicecidr.DefaultServiceCIDRName, "192.168.0.0/24", "2001:db2::/64"), + makeServiceCIDR("overlapping", "192.168.0.0/28", "2001:db2::/64"), + }, + ips: []*networkingapiv1alpha1.IPAddress{ + makeIPAddress("192.168.0.23"), + }, + cidrSynced: makeServiceCIDR(defaultservicecidr.DefaultServiceCIDRName, "192.168.0.0/24", "2001:db2::/64"), + want: false, + }, + { + name: "CIDR with larger range overlapping and IPs", + cidrs: []*networkingapiv1alpha1.ServiceCIDR{ + makeServiceCIDR(defaultservicecidr.DefaultServiceCIDRName, "192.168.0.0/24", "2001:db2::/64"), + makeServiceCIDR("overlapping", "192.168.0.0/16", "2001:db2::/64"), + }, + ips: []*networkingapiv1alpha1.IPAddress{ + makeIPAddress("192.168.0.23"), + }, + cidrSynced: makeServiceCIDR(defaultservicecidr.DefaultServiceCIDRName, "192.168.0.0/24", "2001:db2::/64"), + want: true, + }, + } + for _, tc := range tests { + t.Run(tc.name, func(t *testing.T) { + _, controller := newController(t, tc.cidrs, tc.ips) + err := controller.syncCIDRs() + if err != nil { + t.Fatal(err) + } + + got, err := controller.canDeleteCIDR(context.Background(), tc.cidrSynced) + if err != nil { + t.Fatal(err) + } + if got != tc.want { + t.Errorf("Controller.canDeleteCIDR() = %v, want %v", got, tc.want) + } + }) + } +} + +func TestController_ipToCidrs(t *testing.T) { + tests := []struct { + name string + cidrs []*networkingapiv1alpha1.ServiceCIDR + ip *networkingapiv1alpha1.IPAddress + want []string + }{ + { + name: "empty", + ip: makeIPAddress("192.168.0.23"), + want: []string{}, + }, { + name: "one CIDR", + cidrs: []*networkingapiv1alpha1.ServiceCIDR{ + makeServiceCIDR(defaultservicecidr.DefaultServiceCIDRName, "192.168.0.0/24", "2001:db2::/64"), + makeServiceCIDR("unrelated", "10.0.0.0/24", ""), + makeServiceCIDR("unrelated2", "10.0.0.0/16", ""), + }, + ip: makeIPAddress("192.168.0.23"), + want: []string{defaultservicecidr.DefaultServiceCIDRName}, + }, { + name: "two equal CIDR", + cidrs: []*networkingapiv1alpha1.ServiceCIDR{ + makeServiceCIDR(defaultservicecidr.DefaultServiceCIDRName, "192.168.0.0/24", "2001:db2::/64"), + makeServiceCIDR("overlapping", "192.168.0.0/24", "2001:db2::/96"), + makeServiceCIDR("unrelated", "10.0.0.0/24", ""), + makeServiceCIDR("unrelated2", "10.0.0.0/16", ""), + }, + ip: makeIPAddress("192.168.0.23"), + want: []string{defaultservicecidr.DefaultServiceCIDRName, "overlapping"}, + }, { + name: "three CIDR - two same and one larger", + cidrs: []*networkingapiv1alpha1.ServiceCIDR{ + makeServiceCIDR(defaultservicecidr.DefaultServiceCIDRName, "192.168.0.0/24", "2001:db2::/64"), + makeServiceCIDR("overlapping", "192.168.0.0/24", "2001:db2::/64"), + makeServiceCIDR("overlapping2", "192.168.0.0/26", "2001:db2::/96"), + makeServiceCIDR("unrelated", "10.0.0.0/24", ""), + makeServiceCIDR("unrelated2", "10.0.0.0/16", ""), + }, + ip: makeIPAddress("192.168.0.23"), + want: []string{defaultservicecidr.DefaultServiceCIDRName, "overlapping", "overlapping2"}, + }, { + name: "three CIDR - two same and one larger - IPv4 subnet address", + cidrs: []*networkingapiv1alpha1.ServiceCIDR{ + makeServiceCIDR(defaultservicecidr.DefaultServiceCIDRName, "192.168.0.0/24", "2001:db2::/64"), + makeServiceCIDR("overlapping", "192.168.0.0/24", "2001:db2::/64"), + makeServiceCIDR("overlapping2", "192.168.0.0/26", "2001:db2::/96"), + makeServiceCIDR("unrelated", "10.0.0.0/24", ""), + makeServiceCIDR("unrelated2", "10.0.0.0/16", ""), + }, + ip: makeIPAddress("192.168.0.0"), + want: []string{}, + }, { + name: "three CIDR - two same and one larger - IPv4 broadcast address", + cidrs: []*networkingapiv1alpha1.ServiceCIDR{ + makeServiceCIDR(defaultservicecidr.DefaultServiceCIDRName, "192.168.0.0/24", "2001:db2::/64"), + makeServiceCIDR("overlapping", "192.168.0.0/24", "2001:db2::/64"), + makeServiceCIDR("overlapping2", "192.168.0.0/26", "2001:db2::/96"), + makeServiceCIDR("unrelated", "10.0.0.0/24", ""), + makeServiceCIDR("unrelated2", "10.0.0.0/16", ""), + }, + ip: makeIPAddress("192.168.0.63"), // broadcast for 192.168.0.0/26 + want: []string{defaultservicecidr.DefaultServiceCIDRName, "overlapping"}, + }, { + name: "three CIDR - two same and one larger - IPv6 subnet address", + cidrs: []*networkingapiv1alpha1.ServiceCIDR{ + makeServiceCIDR(defaultservicecidr.DefaultServiceCIDRName, "192.168.0.0/24", "2001:db2::/64"), + makeServiceCIDR("overlapping", "192.168.0.0/24", "2001:db2::/64"), + makeServiceCIDR("overlapping2", "192.168.0.0/26", "2001:db2::/96"), + makeServiceCIDR("unrelated", "10.0.0.0/24", ""), + makeServiceCIDR("unrelated2", "10.0.0.0/16", ""), + }, + ip: makeIPAddress("2001:db2::"), + want: []string{}, + }, { + name: "three CIDR - two same and one larger - IPv6 broadcast address", + cidrs: []*networkingapiv1alpha1.ServiceCIDR{ + makeServiceCIDR(defaultservicecidr.DefaultServiceCIDRName, "192.168.0.0/24", "2001:db2::/64"), + makeServiceCIDR("overlapping", "192.168.0.0/24", "2001:db2::/64"), + makeServiceCIDR("overlapping2", "192.168.0.0/26", "2001:db2::/96"), + makeServiceCIDR("unrelated", "10.0.0.0/24", ""), + makeServiceCIDR("unrelated2", "10.0.0.0/16", ""), + }, + ip: makeIPAddress("2001:0db2::ffff:ffff:ffff:ffff"), + want: []string{defaultservicecidr.DefaultServiceCIDRName, "overlapping"}, + }} + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + _, controller := newController(t, tt.cidrs, nil) + err := controller.syncCIDRs() + if err != nil { + t.Fatal(err) + } + if got := controller.containingServiceCIDRs(tt.ip); !cmp.Equal(got, tt.want, cmpopts.SortSlices(func(a, b string) bool { return a < b })) { + t.Errorf("Controller.ipToCidrs() = %v, want %v", got, tt.want) + } + }) + } +} + +func TestController_cidrToCidrs(t *testing.T) { + tests := []struct { + name string + cidrs []*networkingapiv1alpha1.ServiceCIDR + cidr *networkingapiv1alpha1.ServiceCIDR + want []string + }{ + { + name: "empty", + cidr: makeServiceCIDR(defaultservicecidr.DefaultServiceCIDRName, "192.168.0.0/24", "2001:db2::/64"), + want: []string{}, + }, { + name: "one CIDR", + cidrs: []*networkingapiv1alpha1.ServiceCIDR{ + makeServiceCIDR(defaultservicecidr.DefaultServiceCIDRName, "192.168.0.0/24", "2001:db2::/64"), + makeServiceCIDR("unrelated", "10.0.0.0/24", ""), + makeServiceCIDR("unrelated2", "10.0.0.0/16", ""), + }, + cidr: makeServiceCIDR(defaultservicecidr.DefaultServiceCIDRName, "192.168.0.0/24", "2001:db2::/64"), + want: []string{defaultservicecidr.DefaultServiceCIDRName}, + }, { + name: "two equal CIDR", + cidrs: []*networkingapiv1alpha1.ServiceCIDR{ + makeServiceCIDR(defaultservicecidr.DefaultServiceCIDRName, "192.168.0.0/24", "2001:db2::/64"), + makeServiceCIDR("overlapping", "192.168.0.0/24", "2001:db2::/96"), + makeServiceCIDR("unrelated", "10.0.0.0/24", ""), + makeServiceCIDR("unrelated2", "10.0.0.0/16", ""), + }, + cidr: makeServiceCIDR(defaultservicecidr.DefaultServiceCIDRName, "192.168.0.0/24", "2001:db2::/64"), + want: []string{defaultservicecidr.DefaultServiceCIDRName, "overlapping"}, + }, { + name: "three CIDR - two same and one larger", + cidrs: []*networkingapiv1alpha1.ServiceCIDR{ + makeServiceCIDR(defaultservicecidr.DefaultServiceCIDRName, "192.168.0.0/24", "2001:db2::/64"), + makeServiceCIDR("overlapping", "192.168.0.0/24", "2001:db2::/64"), + makeServiceCIDR("overlapping2", "192.168.0.0/26", "2001:db2::/96"), + makeServiceCIDR("unrelated", "10.0.0.0/24", ""), + makeServiceCIDR("unrelated2", "10.0.0.0/16", ""), + }, + cidr: makeServiceCIDR(defaultservicecidr.DefaultServiceCIDRName, "192.168.0.0/24", "2001:db2::/64"), + want: []string{defaultservicecidr.DefaultServiceCIDRName, "overlapping", "overlapping2"}, + }} + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + _, controller := newController(t, tt.cidrs, nil) + err := controller.syncCIDRs() + if err != nil { + t.Fatal(err) + } + if got := controller.overlappingServiceCIDRs(tt.cidr); !cmp.Equal(got, tt.want, cmpopts.SortSlices(func(a, b string) bool { return a < b })) { + t.Errorf("Controller.cidrToCidrs() = %v, want %v", got, tt.want) + } + }) + } +} diff --git a/plugin/pkg/auth/authorizer/rbac/bootstrappolicy/controller_policy.go b/plugin/pkg/auth/authorizer/rbac/bootstrappolicy/controller_policy.go index db7d222849a..19bb1e4cf08 100644 --- a/plugin/pkg/auth/authorizer/rbac/bootstrappolicy/controller_policy.go +++ b/plugin/pkg/auth/authorizer/rbac/bootstrappolicy/controller_policy.go @@ -369,6 +369,16 @@ func buildControllerRoles() ([]rbacv1.ClusterRole, []rbacv1.ClusterRoleBinding) eventsRule(), }, }) + addControllerRole(&controllerRoles, &controllerRoleBindings, rbacv1.ClusterRole{ + ObjectMeta: metav1.ObjectMeta{Name: saRolePrefix + "service-cidrs-controller"}, + Rules: []rbacv1.PolicyRule{ + rbacv1helpers.NewRule("get", "list", "watch", "patch", "update").Groups(networkingGroup).Resources("servicecidrs").RuleOrDie(), + rbacv1helpers.NewRule("patch", "update").Groups(networkingGroup).Resources("servicecidrs/finalizers").RuleOrDie(), + rbacv1helpers.NewRule("patch", "update").Groups(networkingGroup).Resources("servicecidrs/status").RuleOrDie(), + rbacv1helpers.NewRule("get", "list", "watch").Groups(networkingGroup).Resources("ipaddresses").RuleOrDie(), + eventsRule(), + }, + }) addControllerRole(&controllerRoles, &controllerRoleBindings, func() rbacv1.ClusterRole { role := rbacv1.ClusterRole{ ObjectMeta: metav1.ObjectMeta{Name: saRolePrefix + "statefulset-controller"}, diff --git a/plugin/pkg/auth/authorizer/rbac/bootstrappolicy/testdata/controller-role-bindings.yaml b/plugin/pkg/auth/authorizer/rbac/bootstrappolicy/testdata/controller-role-bindings.yaml index 595e95df8ec..4348a04fa35 100644 --- a/plugin/pkg/auth/authorizer/rbac/bootstrappolicy/testdata/controller-role-bindings.yaml +++ b/plugin/pkg/auth/authorizer/rbac/bootstrappolicy/testdata/controller-role-bindings.yaml @@ -476,6 +476,23 @@ items: - kind: ServiceAccount name: service-account-controller namespace: kube-system +- apiVersion: rbac.authorization.k8s.io/v1 + kind: ClusterRoleBinding + metadata: + annotations: + rbac.authorization.kubernetes.io/autoupdate: "true" + creationTimestamp: null + labels: + kubernetes.io/bootstrapping: rbac-defaults + name: system:controller:service-cidrs-controller + roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: system:controller:service-cidrs-controller + subjects: + - kind: ServiceAccount + name: service-cidrs-controller + namespace: kube-system - apiVersion: rbac.authorization.k8s.io/v1 kind: ClusterRoleBinding metadata: diff --git a/plugin/pkg/auth/authorizer/rbac/bootstrappolicy/testdata/controller-roles.yaml b/plugin/pkg/auth/authorizer/rbac/bootstrappolicy/testdata/controller-roles.yaml index 940519aa083..8cc0685dd13 100644 --- a/plugin/pkg/auth/authorizer/rbac/bootstrappolicy/testdata/controller-roles.yaml +++ b/plugin/pkg/auth/authorizer/rbac/bootstrappolicy/testdata/controller-roles.yaml @@ -1373,6 +1373,57 @@ items: - create - patch - update +- apiVersion: rbac.authorization.k8s.io/v1 + kind: ClusterRole + metadata: + annotations: + rbac.authorization.kubernetes.io/autoupdate: "true" + creationTimestamp: null + labels: + kubernetes.io/bootstrapping: rbac-defaults + name: system:controller:service-cidrs-controller + rules: + - apiGroups: + - networking.k8s.io + resources: + - servicecidrs + verbs: + - get + - list + - patch + - update + - watch + - apiGroups: + - networking.k8s.io + resources: + - servicecidrs/finalizers + verbs: + - patch + - update + - apiGroups: + - networking.k8s.io + resources: + - servicecidrs/status + verbs: + - patch + - update + - apiGroups: + - networking.k8s.io + resources: + - ipaddresses + verbs: + - get + - list + - watch + - apiGroups: + - "" + - events.k8s.io + resources: + - events + verbs: + - create + - patch + - update - apiVersion: rbac.authorization.k8s.io/v1 kind: ClusterRole metadata: