mirror of
https://github.com/k3s-io/kubernetes.git
synced 2026-01-29 21:29:24 +00:00
node ipam controller for ipv6 dualstack
This commit is contained in:
@@ -46,6 +46,7 @@ go_library(
|
||||
"//pkg/controller/nodeipam/ipam/cidrset:go_default_library",
|
||||
"//pkg/controller/nodeipam/ipam/sync:go_default_library",
|
||||
"//pkg/controller/util/node:go_default_library",
|
||||
"//pkg/features:go_default_library",
|
||||
"//pkg/scheduler/api:go_default_library",
|
||||
"//pkg/util/node:go_default_library",
|
||||
"//pkg/util/taints:go_default_library",
|
||||
@@ -58,6 +59,7 @@ go_library(
|
||||
"//staging/src/k8s.io/apimachinery/pkg/util/runtime:go_default_library",
|
||||
"//staging/src/k8s.io/apimachinery/pkg/util/sets:go_default_library",
|
||||
"//staging/src/k8s.io/apimachinery/pkg/util/wait:go_default_library",
|
||||
"//staging/src/k8s.io/apiserver/pkg/util/feature:go_default_library",
|
||||
"//staging/src/k8s.io/client-go/informers/core/v1:go_default_library",
|
||||
"//staging/src/k8s.io/client-go/kubernetes:go_default_library",
|
||||
"//staging/src/k8s.io/client-go/kubernetes/scheme:go_default_library",
|
||||
@@ -69,6 +71,7 @@ go_library(
|
||||
"//staging/src/k8s.io/legacy-cloud-providers/gce:go_default_library",
|
||||
"//staging/src/k8s.io/metrics/pkg/client/clientset/versioned/scheme:go_default_library",
|
||||
"//vendor/k8s.io/klog:go_default_library",
|
||||
"//vendor/k8s.io/utils/net:go_default_library",
|
||||
],
|
||||
)
|
||||
|
||||
|
||||
@@ -33,11 +33,6 @@ import (
|
||||
cloudprovider "k8s.io/cloud-provider"
|
||||
)
|
||||
|
||||
type nodeAndCIDR struct {
|
||||
cidr *net.IPNet
|
||||
nodeName string
|
||||
}
|
||||
|
||||
// CIDRAllocatorType is the type of the allocator to use.
|
||||
type CIDRAllocatorType string
|
||||
|
||||
@@ -94,7 +89,7 @@ type CIDRAllocator interface {
|
||||
}
|
||||
|
||||
// New creates a new CIDR range allocator.
|
||||
func New(kubeClient clientset.Interface, cloud cloudprovider.Interface, nodeInformer informers.NodeInformer, allocatorType CIDRAllocatorType, clusterCIDR, serviceCIDR *net.IPNet, nodeCIDRMaskSize int) (CIDRAllocator, error) {
|
||||
func New(kubeClient clientset.Interface, cloud cloudprovider.Interface, nodeInformer informers.NodeInformer, allocatorType CIDRAllocatorType, clusterCIDRs []*net.IPNet, serviceCIDR *net.IPNet, nodeCIDRMaskSize int) (CIDRAllocator, error) {
|
||||
nodeList, err := listNodes(kubeClient)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
@@ -102,7 +97,7 @@ func New(kubeClient clientset.Interface, cloud cloudprovider.Interface, nodeInfo
|
||||
|
||||
switch allocatorType {
|
||||
case RangeAllocatorType:
|
||||
return NewCIDRRangeAllocator(kubeClient, nodeInformer, clusterCIDR, serviceCIDR, nodeCIDRMaskSize, nodeList)
|
||||
return NewCIDRRangeAllocator(kubeClient, nodeInformer, clusterCIDRs, serviceCIDR, nodeCIDRMaskSize, nodeList)
|
||||
case CloudAllocatorType:
|
||||
return NewCloudCIDRAllocator(kubeClient, cloud, nodeInformer)
|
||||
default:
|
||||
|
||||
@@ -41,33 +41,38 @@ import (
|
||||
utilnode "k8s.io/kubernetes/pkg/util/node"
|
||||
)
|
||||
|
||||
type rangeAllocator struct {
|
||||
client clientset.Interface
|
||||
cidrs *cidrset.CidrSet
|
||||
clusterCIDR *net.IPNet
|
||||
maxCIDRs int
|
||||
// cidrs are reserved, then node resource is patched with them
|
||||
// this type holds the reservation info for a node
|
||||
type nodeReservedCIDRs struct {
|
||||
allocatedCIDRs []*net.IPNet
|
||||
nodeName string
|
||||
}
|
||||
|
||||
// nodeLister is able to list/get nodes and is populated by the shared informer passed to
|
||||
// NewCloudCIDRAllocator.
|
||||
type rangeAllocator struct {
|
||||
client clientset.Interface
|
||||
// cluster cidrs as passed in during controller creation
|
||||
clusterCIDRs []*net.IPNet
|
||||
// for each entry in clusterCIDRs we maintain a list of what is used and what is not
|
||||
cidrSets []*cidrset.CidrSet
|
||||
// nodeLister is able to list/get nodes and is populated by the shared informer passed to controller
|
||||
nodeLister corelisters.NodeLister
|
||||
// nodesSynced returns true if the node shared informer has been synced at least once.
|
||||
nodesSynced cache.InformerSynced
|
||||
|
||||
// Channel that is used to pass updating Nodes with assigned CIDRs to the background
|
||||
// Channel that is used to pass updating Nodes and their reserved CIDRs to the background
|
||||
// This increases a throughput of CIDR assignment by not blocking on long operations.
|
||||
nodeCIDRUpdateChannel chan nodeAndCIDR
|
||||
nodeCIDRUpdateChannel chan nodeReservedCIDRs
|
||||
recorder record.EventRecorder
|
||||
|
||||
// Keep a set of nodes that are currectly being processed to avoid races in CIDR allocation
|
||||
lock sync.Mutex
|
||||
nodesInProcessing sets.String
|
||||
}
|
||||
|
||||
// NewCIDRRangeAllocator returns a CIDRAllocator to allocate CIDR for node
|
||||
// NewCIDRRangeAllocator returns a CIDRAllocator to allocate CIDRs for node (one from each of clusterCIDRs)
|
||||
// Caller must ensure subNetMaskSize is not less than cluster CIDR mask size.
|
||||
// Caller must always pass in a list of existing nodes so the new allocator
|
||||
// Caller must always pass in a list of existing nodes so the new allocator.
|
||||
// Caller must ensure that ClusterCIDRs are semantically correct e.g (1 for non DualStack, 2 for DualStack etc..)
|
||||
// can initialize its CIDR map. NodeList is only nil in testing.
|
||||
func NewCIDRRangeAllocator(client clientset.Interface, nodeInformer informers.NodeInformer, clusterCIDR *net.IPNet, serviceCIDR *net.IPNet, subNetMaskSize int, nodeList *v1.NodeList) (CIDRAllocator, error) {
|
||||
func NewCIDRRangeAllocator(client clientset.Interface, nodeInformer informers.NodeInformer, clusterCIDRs []*net.IPNet, serviceCIDR *net.IPNet, subNetMaskSize int, nodeList *v1.NodeList) (CIDRAllocator, error) {
|
||||
if client == nil {
|
||||
klog.Fatalf("kubeClient is nil when starting NodeController")
|
||||
}
|
||||
@@ -78,17 +83,24 @@ func NewCIDRRangeAllocator(client clientset.Interface, nodeInformer informers.No
|
||||
klog.V(0).Infof("Sending events to api server.")
|
||||
eventBroadcaster.StartRecordingToSink(&v1core.EventSinkImpl{Interface: client.CoreV1().Events("")})
|
||||
|
||||
set, err := cidrset.NewCIDRSet(clusterCIDR, subNetMaskSize)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
// create a cidrSet for each cidr we operate on
|
||||
// cidrSet are mapped to clusterCIDR by index
|
||||
cidrSets := make([]*cidrset.CidrSet, len(clusterCIDRs))
|
||||
for idx, cidr := range clusterCIDRs {
|
||||
cidrSet, err := cidrset.NewCIDRSet(cidr, subNetMaskSize)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
cidrSets[idx] = cidrSet
|
||||
}
|
||||
|
||||
ra := &rangeAllocator{
|
||||
client: client,
|
||||
cidrs: set,
|
||||
clusterCIDR: clusterCIDR,
|
||||
clusterCIDRs: clusterCIDRs,
|
||||
cidrSets: cidrSets,
|
||||
nodeLister: nodeInformer.Lister(),
|
||||
nodesSynced: nodeInformer.Informer().HasSynced,
|
||||
nodeCIDRUpdateChannel: make(chan nodeAndCIDR, cidrUpdateQueueSize),
|
||||
nodeCIDRUpdateChannel: make(chan nodeReservedCIDRs, cidrUpdateQueueSize),
|
||||
recorder: recorder,
|
||||
nodesInProcessing: sets.NewString(),
|
||||
}
|
||||
@@ -101,16 +113,14 @@ func NewCIDRRangeAllocator(client clientset.Interface, nodeInformer informers.No
|
||||
|
||||
if nodeList != nil {
|
||||
for _, node := range nodeList.Items {
|
||||
if node.Spec.PodCIDR == "" {
|
||||
klog.Infof("Node %v has no CIDR, ignoring", node.Name)
|
||||
if len(node.Spec.PodCIDRs) == 0 {
|
||||
klog.V(4).Infof("Node %v has no CIDR, ignoring", node.Name)
|
||||
continue
|
||||
} else {
|
||||
klog.Infof("Node %v has CIDR %s, occupying it in CIDR map",
|
||||
node.Name, node.Spec.PodCIDR)
|
||||
}
|
||||
if err := ra.occupyCIDR(&node); err != nil {
|
||||
klog.V(4).Infof("Node %v has CIDR %s, occupying it in CIDR map", node.Name, node.Spec.PodCIDR)
|
||||
if err := ra.occupyCIDRs(&node); err != nil {
|
||||
// This will happen if:
|
||||
// 1. We find garbage in the podCIDR field. Retrying is useless.
|
||||
// 1. We find garbage in the podCIDRs field. Retrying is useless.
|
||||
// 2. CIDR out of range: This means a node CIDR has changed.
|
||||
// This error will keep crashing controller-manager.
|
||||
return nil, err
|
||||
@@ -121,26 +131,26 @@ func NewCIDRRangeAllocator(client clientset.Interface, nodeInformer informers.No
|
||||
nodeInformer.Informer().AddEventHandler(cache.ResourceEventHandlerFuncs{
|
||||
AddFunc: nodeutil.CreateAddNodeHandler(ra.AllocateOrOccupyCIDR),
|
||||
UpdateFunc: nodeutil.CreateUpdateNodeHandler(func(_, newNode *v1.Node) error {
|
||||
// If the PodCIDR is not empty we either:
|
||||
// - already processed a Node that already had a CIDR after NC restarted
|
||||
// If the PodCIDRs list is not empty we either:
|
||||
// - already processed a Node that already had CIDRs after NC restarted
|
||||
// (cidr is marked as used),
|
||||
// - already processed a Node successfully and allocated a CIDR for it
|
||||
// - already processed a Node successfully and allocated CIDRs for it
|
||||
// (cidr is marked as used),
|
||||
// - already processed a Node but we did saw a "timeout" response and
|
||||
// request eventually got through in this case we haven't released
|
||||
// the allocated CIDR (cidr is still marked as used).
|
||||
// the allocated CIDRs (cidr is still marked as used).
|
||||
// There's a possible error here:
|
||||
// - NC sees a new Node and assigns a CIDR X to it,
|
||||
// - NC sees a new Node and assigns CIDRs X,Y.. to it,
|
||||
// - Update Node call fails with a timeout,
|
||||
// - Node is updated by some other component, NC sees an update and
|
||||
// assigns CIDR Y to the Node,
|
||||
// - Both CIDR X and CIDR Y are marked as used in the local cache,
|
||||
// even though Node sees only CIDR Y
|
||||
// The problem here is that in in-memory cache we see CIDR X as marked,
|
||||
// assigns CIDRs A,B.. to the Node,
|
||||
// - Both CIDR X,Y.. and CIDR A,B.. are marked as used in the local cache,
|
||||
// even though Node sees only CIDR A,B..
|
||||
// The problem here is that in in-memory cache we see CIDR X,Y.. as marked,
|
||||
// which prevents it from being assigned to any new node. The cluster
|
||||
// state is correct.
|
||||
// Restart of NC fixes the issue.
|
||||
if newNode.Spec.PodCIDR == "" {
|
||||
if len(newNode.Spec.PodCIDRs) == 0 {
|
||||
return ra.AllocateOrOccupyCIDR(newNode)
|
||||
}
|
||||
return nil
|
||||
@@ -176,7 +186,7 @@ func (r *rangeAllocator) worker(stopChan <-chan struct{}) {
|
||||
klog.Warning("Channel nodeCIDRUpdateChannel was unexpectedly closed")
|
||||
return
|
||||
}
|
||||
if err := r.updateCIDRAllocation(workItem); err != nil {
|
||||
if err := r.updateCIDRsAllocation(workItem); err != nil {
|
||||
// Requeue the failed node for update again.
|
||||
r.nodeCIDRUpdateChannel <- workItem
|
||||
}
|
||||
@@ -202,17 +212,20 @@ func (r *rangeAllocator) removeNodeFromProcessing(nodeName string) {
|
||||
r.nodesInProcessing.Delete(nodeName)
|
||||
}
|
||||
|
||||
func (r *rangeAllocator) occupyCIDR(node *v1.Node) error {
|
||||
// marks node.PodCIDRs[...] as used in allocator's tracked cidrSet
|
||||
func (r *rangeAllocator) occupyCIDRs(node *v1.Node) error {
|
||||
defer r.removeNodeFromProcessing(node.Name)
|
||||
if node.Spec.PodCIDR == "" {
|
||||
if len(node.Spec.PodCIDRs) == 0 {
|
||||
return nil
|
||||
}
|
||||
_, podCIDR, err := net.ParseCIDR(node.Spec.PodCIDR)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to parse node %s, CIDR %s", node.Name, node.Spec.PodCIDR)
|
||||
}
|
||||
if err := r.cidrs.Occupy(podCIDR); err != nil {
|
||||
return fmt.Errorf("failed to mark cidr as occupied: %v", err)
|
||||
for idx, cidr := range node.Spec.PodCIDRs {
|
||||
_, podCIDR, err := net.ParseCIDR(cidr)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to parse node %s, CIDR %s", node.Name, node.Spec.PodCIDR)
|
||||
}
|
||||
if err := r.cidrSets[idx].Occupy(podCIDR); err != nil {
|
||||
return fmt.Errorf("failed to mark cidr[%v] at idx [%v] as occupied for node: %v: %v", podCIDR, idx, node.Name, err)
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
@@ -228,41 +241,53 @@ func (r *rangeAllocator) AllocateOrOccupyCIDR(node *v1.Node) error {
|
||||
klog.V(2).Infof("Node %v is already in a process of CIDR assignment.", node.Name)
|
||||
return nil
|
||||
}
|
||||
if node.Spec.PodCIDR != "" {
|
||||
return r.occupyCIDR(node)
|
||||
|
||||
if len(node.Spec.PodCIDRs) > 0 {
|
||||
return r.occupyCIDRs(node)
|
||||
}
|
||||
podCIDR, err := r.cidrs.AllocateNext()
|
||||
if err != nil {
|
||||
r.removeNodeFromProcessing(node.Name)
|
||||
nodeutil.RecordNodeStatusChange(r.recorder, node, "CIDRNotAvailable")
|
||||
return fmt.Errorf("failed to allocate cidr: %v", err)
|
||||
// allocate and queue the assignment
|
||||
allocated := nodeReservedCIDRs{
|
||||
nodeName: node.Name,
|
||||
allocatedCIDRs: make([]*net.IPNet, len(r.cidrSets)),
|
||||
}
|
||||
|
||||
klog.V(4).Infof("Putting node %s with CIDR %s into the work queue", node.Name, podCIDR)
|
||||
r.nodeCIDRUpdateChannel <- nodeAndCIDR{
|
||||
nodeName: node.Name,
|
||||
cidr: podCIDR,
|
||||
for idx := range r.cidrSets {
|
||||
podCIDR, err := r.cidrSets[idx].AllocateNext()
|
||||
if err != nil {
|
||||
r.removeNodeFromProcessing(node.Name)
|
||||
nodeutil.RecordNodeStatusChange(r.recorder, node, "CIDRNotAvailable")
|
||||
return fmt.Errorf("failed to allocate cidr from cluster cidr at idx:%v: %v", idx, err)
|
||||
}
|
||||
allocated.allocatedCIDRs[idx] = podCIDR
|
||||
}
|
||||
|
||||
//queue the assignement
|
||||
klog.V(4).Infof("Putting node %s with CIDR %v into the work queue", node.Name, allocated.allocatedCIDRs)
|
||||
r.nodeCIDRUpdateChannel <- allocated
|
||||
return nil
|
||||
}
|
||||
|
||||
// ReleaseCIDR marks node.podCIDRs[...] as unused in our tracked cidrSets
|
||||
func (r *rangeAllocator) ReleaseCIDR(node *v1.Node) error {
|
||||
if node == nil || len(node.Spec.PodCIDRs) == 0 {
|
||||
return nil
|
||||
}
|
||||
|
||||
for idx, cidr := range node.Spec.PodCIDRs {
|
||||
_, podCIDR, err := net.ParseCIDR(cidr)
|
||||
if err != nil {
|
||||
return fmt.Errorf("Failed to parse CIDR %s on Node %v: %v", cidr, node.Name, err)
|
||||
}
|
||||
|
||||
klog.V(4).Infof("release CIDR %s for node:%v", cidr, node.Name)
|
||||
if err = r.cidrSets[idx].Release(podCIDR); err != nil {
|
||||
return fmt.Errorf("Error when releasing CIDR %v: %v", cidr, err)
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (r *rangeAllocator) ReleaseCIDR(node *v1.Node) error {
|
||||
if node == nil || node.Spec.PodCIDR == "" {
|
||||
return nil
|
||||
}
|
||||
_, podCIDR, err := net.ParseCIDR(node.Spec.PodCIDR)
|
||||
if err != nil {
|
||||
return fmt.Errorf("Failed to parse CIDR %s on Node %v: %v", node.Spec.PodCIDR, node.Name, err)
|
||||
}
|
||||
|
||||
klog.V(4).Infof("release CIDR %s", node.Spec.PodCIDR)
|
||||
if err = r.cidrs.Release(podCIDR); err != nil {
|
||||
return fmt.Errorf("Error when releasing CIDR %v: %v", node.Spec.PodCIDR, err)
|
||||
}
|
||||
return err
|
||||
}
|
||||
|
||||
// Marks all CIDRs with subNetMaskSize that belongs to serviceCIDR as used,
|
||||
// Marks all CIDRs with subNetMaskSize that belongs to serviceCIDR as used across all cidrs
|
||||
// so that they won't be assignable.
|
||||
func (r *rangeAllocator) filterOutServiceRange(serviceCIDR *net.IPNet) {
|
||||
// Checks if service CIDR has a nonempty intersection with cluster
|
||||
@@ -270,57 +295,87 @@ func (r *rangeAllocator) filterOutServiceRange(serviceCIDR *net.IPNet) {
|
||||
// clusterCIDR's Mask applied (this means that clusterCIDR contains
|
||||
// serviceCIDR) or vice versa (which means that serviceCIDR contains
|
||||
// clusterCIDR).
|
||||
if !r.clusterCIDR.Contains(serviceCIDR.IP.Mask(r.clusterCIDR.Mask)) && !serviceCIDR.Contains(r.clusterCIDR.IP.Mask(serviceCIDR.Mask)) {
|
||||
return
|
||||
}
|
||||
for idx, cidr := range r.clusterCIDRs {
|
||||
if !cidr.Contains(serviceCIDR.IP.Mask(cidr.Mask)) && !serviceCIDR.Contains(cidr.IP.Mask(serviceCIDR.Mask)) {
|
||||
continue
|
||||
}
|
||||
|
||||
if err := r.cidrs.Occupy(serviceCIDR); err != nil {
|
||||
klog.Errorf("Error filtering out service cidr %v: %v", serviceCIDR, err)
|
||||
// at this point, len(cidrSet) == len(clusterCidr)
|
||||
if err := r.cidrSets[idx].Occupy(serviceCIDR); err != nil {
|
||||
klog.Errorf("Error filtering out service cidr out cluster cidr:%v (index:%v) %v: %v", cidr, idx, serviceCIDR, err)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// updateCIDRAllocation assigns CIDR to Node and sends an update to the API server.
|
||||
func (r *rangeAllocator) updateCIDRAllocation(data nodeAndCIDR) error {
|
||||
// updateCIDRsAllocation assigns CIDR to Node and sends an update to the API server.
|
||||
func (r *rangeAllocator) updateCIDRsAllocation(data nodeReservedCIDRs) error {
|
||||
var err error
|
||||
var node *v1.Node
|
||||
defer r.removeNodeFromProcessing(data.nodeName)
|
||||
|
||||
podCIDR := data.cidr.String()
|
||||
|
||||
cidrsString := cidrsAsString(data.allocatedCIDRs)
|
||||
node, err = r.nodeLister.Get(data.nodeName)
|
||||
if err != nil {
|
||||
klog.Errorf("Failed while getting node %v for updating Node.Spec.PodCIDR: %v", data.nodeName, err)
|
||||
klog.Errorf("Failed while getting node %v for updating Node.Spec.PodCIDRs: %v", data.nodeName, err)
|
||||
return err
|
||||
}
|
||||
|
||||
if node.Spec.PodCIDR == podCIDR {
|
||||
klog.V(4).Infof("Node %v already has allocated CIDR %v. It matches the proposed one.", node.Name, podCIDR)
|
||||
return nil
|
||||
}
|
||||
if node.Spec.PodCIDR != "" {
|
||||
klog.Errorf("Node %v already has a CIDR allocated %v. Releasing the new one %v.", node.Name, node.Spec.PodCIDR, podCIDR)
|
||||
if err := r.cidrs.Release(data.cidr); err != nil {
|
||||
klog.Errorf("Error when releasing CIDR %v", podCIDR)
|
||||
// if cidr list matches the proposed.
|
||||
// then we possibly updated this node
|
||||
// and just failed to ack the success.
|
||||
if len(node.Spec.PodCIDRs) == len(data.allocatedCIDRs) {
|
||||
match := true
|
||||
for idx, cidr := range cidrsString {
|
||||
if node.Spec.PodCIDRs[idx] != cidr {
|
||||
match = false
|
||||
break
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
// If we reached here, it means that the node has no CIDR currently assigned. So we set it.
|
||||
for i := 0; i < cidrUpdateRetries; i++ {
|
||||
if err = utilnode.PatchNodeCIDR(r.client, types.NodeName(node.Name), podCIDR); err == nil {
|
||||
klog.Infof("Set node %v PodCIDR to %v", node.Name, podCIDR)
|
||||
if match {
|
||||
klog.V(4).Infof("Node %v already has allocated CIDR %v. It matches the proposed one.", node.Name, data.allocatedCIDRs)
|
||||
return nil
|
||||
}
|
||||
}
|
||||
klog.Errorf("Failed to update node %v PodCIDR to %v after multiple attempts: %v", node.Name, podCIDR, err)
|
||||
|
||||
// node has cidrs, release the reserved
|
||||
if len(node.Spec.PodCIDRs) != 0 {
|
||||
klog.Errorf("Node %v already has a CIDR allocated %v. Releasing the new one.", node.Name, node.Spec.PodCIDRs)
|
||||
for idx, cidr := range data.allocatedCIDRs {
|
||||
if releaseErr := r.cidrSets[idx].Release(cidr); err != nil {
|
||||
klog.Errorf("Error when releasing CIDR idx:%v value: %v err:%v", idx, cidr, releaseErr)
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// If we reached here, it means that the node has no CIDR currently assigned. So we set it.
|
||||
for i := 0; i < cidrUpdateRetries; i++ {
|
||||
if err = utilnode.PatchNodeCIDRs(r.client, types.NodeName(node.Name), cidrsString); err == nil {
|
||||
klog.Infof("Set node %v PodCIDR to %v", node.Name, cidrsString)
|
||||
return nil
|
||||
}
|
||||
}
|
||||
// failed release back to the pool
|
||||
klog.Errorf("Failed to update node %v PodCIDR to %v after multiple attempts: %v", node.Name, cidrsString, err)
|
||||
nodeutil.RecordNodeStatusChange(r.recorder, node, "CIDRAssignmentFailed")
|
||||
// We accept the fact that we may leak CIDRs here. This is safer than releasing
|
||||
// them in case when we don't know if request went through.
|
||||
// NodeController restart will return all falsely allocated CIDRs to the pool.
|
||||
if !apierrors.IsServerTimeout(err) {
|
||||
klog.Errorf("CIDR assignment for node %v failed: %v. Releasing allocated CIDR", node.Name, err)
|
||||
if releaseErr := r.cidrs.Release(data.cidr); releaseErr != nil {
|
||||
klog.Errorf("Error releasing allocated CIDR for node %v: %v", node.Name, releaseErr)
|
||||
for idx, cidr := range data.allocatedCIDRs {
|
||||
if releaseErr := r.cidrSets[idx].Release(cidr); releaseErr != nil {
|
||||
klog.Errorf("Error releasing allocated CIDR for node %v: %v", node.Name, releaseErr)
|
||||
}
|
||||
}
|
||||
}
|
||||
return err
|
||||
}
|
||||
|
||||
// converts a slice of cidrs into <c-1>,<c-2>,<c-n>
|
||||
func cidrsAsString(inCIDRs []*net.IPNet) []string {
|
||||
outCIDRs := make([]string, len(inCIDRs))
|
||||
for idx, inCIDR := range inCIDRs {
|
||||
outCIDRs[idx] = inCIDR.String()
|
||||
}
|
||||
return outCIDRs
|
||||
}
|
||||
|
||||
@@ -59,16 +59,20 @@ func getFakeNodeInformer(fakeNodeHandler *testutil.FakeNodeHandler) coreinformer
|
||||
return fakeNodeInformer
|
||||
}
|
||||
|
||||
type testCase struct {
|
||||
description string
|
||||
fakeNodeHandler *testutil.FakeNodeHandler
|
||||
clusterCIDRs []*net.IPNet
|
||||
serviceCIDR *net.IPNet
|
||||
subNetMaskSize int
|
||||
// key is index of the cidr allocated
|
||||
expectedAllocatedCIDR map[int]string
|
||||
allocatedCIDRs map[int][]string
|
||||
}
|
||||
|
||||
func TestAllocateOrOccupyCIDRSuccess(t *testing.T) {
|
||||
testCases := []struct {
|
||||
description string
|
||||
fakeNodeHandler *testutil.FakeNodeHandler
|
||||
clusterCIDR *net.IPNet
|
||||
serviceCIDR *net.IPNet
|
||||
subNetMaskSize int
|
||||
expectedAllocatedCIDR string
|
||||
allocatedCIDRs []string
|
||||
}{
|
||||
// all tests operate on a single node
|
||||
testCases := []testCase{
|
||||
{
|
||||
description: "When there's no ServiceCIDR return first CIDR in range",
|
||||
fakeNodeHandler: &testutil.FakeNodeHandler{
|
||||
@@ -81,13 +85,15 @@ func TestAllocateOrOccupyCIDRSuccess(t *testing.T) {
|
||||
},
|
||||
Clientset: fake.NewSimpleClientset(),
|
||||
},
|
||||
clusterCIDR: func() *net.IPNet {
|
||||
clusterCIDRs: func() []*net.IPNet {
|
||||
_, clusterCIDR, _ := net.ParseCIDR("127.123.234.0/24")
|
||||
return clusterCIDR
|
||||
return []*net.IPNet{clusterCIDR}
|
||||
}(),
|
||||
serviceCIDR: nil,
|
||||
subNetMaskSize: 30,
|
||||
expectedAllocatedCIDR: "127.123.234.0/30",
|
||||
serviceCIDR: nil,
|
||||
subNetMaskSize: 30,
|
||||
expectedAllocatedCIDR: map[int]string{
|
||||
0: "127.123.234.0/30",
|
||||
},
|
||||
},
|
||||
{
|
||||
description: "Correctly filter out ServiceCIDR",
|
||||
@@ -101,17 +107,19 @@ func TestAllocateOrOccupyCIDRSuccess(t *testing.T) {
|
||||
},
|
||||
Clientset: fake.NewSimpleClientset(),
|
||||
},
|
||||
clusterCIDR: func() *net.IPNet {
|
||||
clusterCIDRs: func() []*net.IPNet {
|
||||
_, clusterCIDR, _ := net.ParseCIDR("127.123.234.0/24")
|
||||
return clusterCIDR
|
||||
return []*net.IPNet{clusterCIDR}
|
||||
}(),
|
||||
serviceCIDR: func() *net.IPNet {
|
||||
_, clusterCIDR, _ := net.ParseCIDR("127.123.234.0/26")
|
||||
return clusterCIDR
|
||||
_, serviceCIDR, _ := net.ParseCIDR("127.123.234.0/26")
|
||||
return serviceCIDR
|
||||
}(),
|
||||
subNetMaskSize: 30,
|
||||
// it should return first /30 CIDR after service range
|
||||
expectedAllocatedCIDR: "127.123.234.64/30",
|
||||
expectedAllocatedCIDR: map[int]string{
|
||||
0: "127.123.234.64/30",
|
||||
},
|
||||
},
|
||||
{
|
||||
description: "Correctly ignore already allocated CIDRs",
|
||||
@@ -125,31 +133,100 @@ func TestAllocateOrOccupyCIDRSuccess(t *testing.T) {
|
||||
},
|
||||
Clientset: fake.NewSimpleClientset(),
|
||||
},
|
||||
clusterCIDR: func() *net.IPNet {
|
||||
clusterCIDRs: func() []*net.IPNet {
|
||||
_, clusterCIDR, _ := net.ParseCIDR("127.123.234.0/24")
|
||||
return clusterCIDR
|
||||
return []*net.IPNet{clusterCIDR}
|
||||
}(),
|
||||
serviceCIDR: func() *net.IPNet {
|
||||
_, clusterCIDR, _ := net.ParseCIDR("127.123.234.0/26")
|
||||
return clusterCIDR
|
||||
_, serviceCIDR, _ := net.ParseCIDR("127.123.234.0/26")
|
||||
return serviceCIDR
|
||||
}(),
|
||||
subNetMaskSize: 30,
|
||||
allocatedCIDRs: map[int][]string{
|
||||
0: {"127.123.234.64/30", "127.123.234.68/30", "127.123.234.72/30", "127.123.234.80/30"},
|
||||
},
|
||||
expectedAllocatedCIDR: map[int]string{
|
||||
0: "127.123.234.76/30",
|
||||
},
|
||||
},
|
||||
{
|
||||
description: "Dualstack CIDRs v4,v6",
|
||||
fakeNodeHandler: &testutil.FakeNodeHandler{
|
||||
Existing: []*v1.Node{
|
||||
{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "node0",
|
||||
},
|
||||
},
|
||||
},
|
||||
Clientset: fake.NewSimpleClientset(),
|
||||
},
|
||||
clusterCIDRs: func() []*net.IPNet {
|
||||
_, clusterCIDRv4, _ := net.ParseCIDR("127.123.234.0/8")
|
||||
_, clusterCIDRv6, _ := net.ParseCIDR("ace:cab:deca::/8")
|
||||
return []*net.IPNet{clusterCIDRv4, clusterCIDRv6}
|
||||
}(),
|
||||
serviceCIDR: func() *net.IPNet {
|
||||
_, serviceCIDR, _ := net.ParseCIDR("127.123.234.0/26")
|
||||
return serviceCIDR
|
||||
}(),
|
||||
},
|
||||
{
|
||||
description: "Dualstack CIDRs v6,v4",
|
||||
fakeNodeHandler: &testutil.FakeNodeHandler{
|
||||
Existing: []*v1.Node{
|
||||
{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "node0",
|
||||
},
|
||||
},
|
||||
},
|
||||
Clientset: fake.NewSimpleClientset(),
|
||||
},
|
||||
clusterCIDRs: func() []*net.IPNet {
|
||||
_, clusterCIDRv4, _ := net.ParseCIDR("127.123.234.0/8")
|
||||
_, clusterCIDRv6, _ := net.ParseCIDR("ace:cab:deca::/8")
|
||||
return []*net.IPNet{clusterCIDRv6, clusterCIDRv4}
|
||||
}(),
|
||||
serviceCIDR: func() *net.IPNet {
|
||||
_, serviceCIDR, _ := net.ParseCIDR("127.123.234.0/26")
|
||||
return serviceCIDR
|
||||
}(),
|
||||
},
|
||||
|
||||
{
|
||||
description: "Dualstack CIDRs, more than two",
|
||||
fakeNodeHandler: &testutil.FakeNodeHandler{
|
||||
Existing: []*v1.Node{
|
||||
{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "node0",
|
||||
},
|
||||
},
|
||||
},
|
||||
Clientset: fake.NewSimpleClientset(),
|
||||
},
|
||||
clusterCIDRs: func() []*net.IPNet {
|
||||
_, clusterCIDRv4, _ := net.ParseCIDR("127.123.234.0/8")
|
||||
_, clusterCIDRv6, _ := net.ParseCIDR("ace:cab:deca::/8")
|
||||
_, clusterCIDRv4_2, _ := net.ParseCIDR("10.0.0.0/8")
|
||||
return []*net.IPNet{clusterCIDRv4, clusterCIDRv6, clusterCIDRv4_2}
|
||||
}(),
|
||||
serviceCIDR: func() *net.IPNet {
|
||||
_, serviceCIDR, _ := net.ParseCIDR("127.123.234.0/26")
|
||||
return serviceCIDR
|
||||
}(),
|
||||
subNetMaskSize: 30,
|
||||
allocatedCIDRs: []string{"127.123.234.64/30", "127.123.234.68/30", "127.123.234.72/30", "127.123.234.80/30"},
|
||||
expectedAllocatedCIDR: "127.123.234.76/30",
|
||||
},
|
||||
}
|
||||
|
||||
testFunc := func(tc struct {
|
||||
description string
|
||||
fakeNodeHandler *testutil.FakeNodeHandler
|
||||
clusterCIDR *net.IPNet
|
||||
serviceCIDR *net.IPNet
|
||||
subNetMaskSize int
|
||||
expectedAllocatedCIDR string
|
||||
allocatedCIDRs []string
|
||||
}) {
|
||||
// test function
|
||||
testFunc := func(tc testCase) {
|
||||
// Initialize the range allocator.
|
||||
allocator, _ := NewCIDRRangeAllocator(tc.fakeNodeHandler, getFakeNodeInformer(tc.fakeNodeHandler), tc.clusterCIDR, tc.serviceCIDR, tc.subNetMaskSize, nil)
|
||||
allocator, err := NewCIDRRangeAllocator(tc.fakeNodeHandler, getFakeNodeInformer(tc.fakeNodeHandler), tc.clusterCIDRs, tc.serviceCIDR, tc.subNetMaskSize, nil)
|
||||
if err != nil {
|
||||
t.Errorf("%v: failed to create CIDRRangeAllocator with error %v", tc.description, err)
|
||||
return
|
||||
}
|
||||
rangeAllocator, ok := allocator.(*rangeAllocator)
|
||||
if !ok {
|
||||
t.Logf("%v: found non-default implementation of CIDRAllocator, skipping white-box test...", tc.description)
|
||||
@@ -160,50 +237,51 @@ func TestAllocateOrOccupyCIDRSuccess(t *testing.T) {
|
||||
go allocator.Run(wait.NeverStop)
|
||||
|
||||
// this is a bit of white box testing
|
||||
for _, allocated := range tc.allocatedCIDRs {
|
||||
_, cidr, err := net.ParseCIDR(allocated)
|
||||
if err != nil {
|
||||
t.Fatalf("%v: unexpected error when parsing CIDR %v: %v", tc.description, allocated, err)
|
||||
// pre allocate the cidrs as per the test
|
||||
for idx, allocatedList := range tc.allocatedCIDRs {
|
||||
for _, allocated := range allocatedList {
|
||||
_, cidr, err := net.ParseCIDR(allocated)
|
||||
if err != nil {
|
||||
t.Fatalf("%v: unexpected error when parsing CIDR %v: %v", tc.description, allocated, err)
|
||||
}
|
||||
if err = rangeAllocator.cidrSets[idx].Occupy(cidr); err != nil {
|
||||
t.Fatalf("%v: unexpected error when occupying CIDR %v: %v", tc.description, allocated, err)
|
||||
}
|
||||
}
|
||||
if err = rangeAllocator.cidrs.Occupy(cidr); err != nil {
|
||||
t.Fatalf("%v: unexpected error when occupying CIDR %v: %v", tc.description, allocated, err)
|
||||
if err := allocator.AllocateOrOccupyCIDR(tc.fakeNodeHandler.Existing[0]); err != nil {
|
||||
t.Errorf("%v: unexpected error in AllocateOrOccupyCIDR: %v", tc.description, err)
|
||||
}
|
||||
if err := waitForUpdatedNodeWithTimeout(tc.fakeNodeHandler, 1, wait.ForeverTestTimeout); err != nil {
|
||||
t.Fatalf("%v: timeout while waiting for Node update: %v", tc.description, err)
|
||||
}
|
||||
}
|
||||
if err := allocator.AllocateOrOccupyCIDR(tc.fakeNodeHandler.Existing[0]); err != nil {
|
||||
t.Errorf("%v: unexpected error in AllocateOrOccupyCIDR: %v", tc.description, err)
|
||||
|
||||
if len(tc.expectedAllocatedCIDR) == 0 {
|
||||
// nothing further expected
|
||||
return
|
||||
}
|
||||
if err := waitForUpdatedNodeWithTimeout(tc.fakeNodeHandler, 1, wait.ForeverTestTimeout); err != nil {
|
||||
t.Fatalf("%v: timeout while waiting for Node update: %v", tc.description, err)
|
||||
}
|
||||
found := false
|
||||
seenCIDRs := []string{}
|
||||
for _, updatedNode := range tc.fakeNodeHandler.GetUpdatedNodesCopy() {
|
||||
seenCIDRs = append(seenCIDRs, updatedNode.Spec.PodCIDR)
|
||||
if updatedNode.Spec.PodCIDR == tc.expectedAllocatedCIDR {
|
||||
found = true
|
||||
break
|
||||
if len(updatedNode.Spec.PodCIDRs) == 0 {
|
||||
continue // not assigned yet
|
||||
}
|
||||
//match
|
||||
for podCIDRIdx, expectedPodCIDR := range tc.expectedAllocatedCIDR {
|
||||
if updatedNode.Spec.PodCIDRs[podCIDRIdx] != expectedPodCIDR {
|
||||
t.Errorf("%v: Unable to find allocated CIDR %v, found updated Nodes with CIDRs: %v", tc.description, expectedPodCIDR, updatedNode.Spec.PodCIDRs)
|
||||
break
|
||||
}
|
||||
}
|
||||
}
|
||||
if !found {
|
||||
t.Errorf("%v: Unable to find allocated CIDR %v, found updated Nodes with CIDRs: %v",
|
||||
tc.description, tc.expectedAllocatedCIDR, seenCIDRs)
|
||||
}
|
||||
}
|
||||
|
||||
// run the test cases
|
||||
for _, tc := range testCases {
|
||||
testFunc(tc)
|
||||
}
|
||||
}
|
||||
|
||||
func TestAllocateOrOccupyCIDRFailure(t *testing.T) {
|
||||
testCases := []struct {
|
||||
description string
|
||||
fakeNodeHandler *testutil.FakeNodeHandler
|
||||
clusterCIDR *net.IPNet
|
||||
serviceCIDR *net.IPNet
|
||||
subNetMaskSize int
|
||||
allocatedCIDRs []string
|
||||
}{
|
||||
testCases := []testCase{
|
||||
{
|
||||
description: "When there's no ServiceCIDR return first CIDR in range",
|
||||
fakeNodeHandler: &testutil.FakeNodeHandler{
|
||||
@@ -216,26 +294,24 @@ func TestAllocateOrOccupyCIDRFailure(t *testing.T) {
|
||||
},
|
||||
Clientset: fake.NewSimpleClientset(),
|
||||
},
|
||||
clusterCIDR: func() *net.IPNet {
|
||||
clusterCIDRs: func() []*net.IPNet {
|
||||
_, clusterCIDR, _ := net.ParseCIDR("127.123.234.0/28")
|
||||
return clusterCIDR
|
||||
return []*net.IPNet{clusterCIDR}
|
||||
}(),
|
||||
serviceCIDR: nil,
|
||||
subNetMaskSize: 30,
|
||||
allocatedCIDRs: []string{"127.123.234.0/30", "127.123.234.4/30", "127.123.234.8/30", "127.123.234.12/30"},
|
||||
allocatedCIDRs: map[int][]string{
|
||||
0: {"127.123.234.0/30", "127.123.234.4/30", "127.123.234.8/30", "127.123.234.12/30"},
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
testFunc := func(tc struct {
|
||||
description string
|
||||
fakeNodeHandler *testutil.FakeNodeHandler
|
||||
clusterCIDR *net.IPNet
|
||||
serviceCIDR *net.IPNet
|
||||
subNetMaskSize int
|
||||
allocatedCIDRs []string
|
||||
}) {
|
||||
testFunc := func(tc testCase) {
|
||||
// Initialize the range allocator.
|
||||
allocator, _ := NewCIDRRangeAllocator(tc.fakeNodeHandler, getFakeNodeInformer(tc.fakeNodeHandler), tc.clusterCIDR, tc.serviceCIDR, tc.subNetMaskSize, nil)
|
||||
allocator, err := NewCIDRRangeAllocator(tc.fakeNodeHandler, getFakeNodeInformer(tc.fakeNodeHandler), tc.clusterCIDRs, tc.serviceCIDR, tc.subNetMaskSize, nil)
|
||||
if err != nil {
|
||||
t.Logf("%v: failed to create CIDRRangeAllocator with error %v", tc.description, err)
|
||||
}
|
||||
rangeAllocator, ok := allocator.(*rangeAllocator)
|
||||
if !ok {
|
||||
t.Logf("%v: found non-default implementation of CIDRAllocator, skipping white-box test...", tc.description)
|
||||
@@ -246,14 +322,16 @@ func TestAllocateOrOccupyCIDRFailure(t *testing.T) {
|
||||
go allocator.Run(wait.NeverStop)
|
||||
|
||||
// this is a bit of white box testing
|
||||
for _, allocated := range tc.allocatedCIDRs {
|
||||
_, cidr, err := net.ParseCIDR(allocated)
|
||||
if err != nil {
|
||||
t.Fatalf("%v: unexpected error when parsing CIDR %v: %v", tc.description, allocated, err)
|
||||
}
|
||||
err = rangeAllocator.cidrs.Occupy(cidr)
|
||||
if err != nil {
|
||||
t.Fatalf("%v: unexpected error when occupying CIDR %v: %v", tc.description, allocated, err)
|
||||
for setIdx, allocatedList := range tc.allocatedCIDRs {
|
||||
for _, allocated := range allocatedList {
|
||||
_, cidr, err := net.ParseCIDR(allocated)
|
||||
if err != nil {
|
||||
t.Fatalf("%v: unexpected error when parsing CIDR %v: %v", tc.description, cidr, err)
|
||||
}
|
||||
err = rangeAllocator.cidrSets[setIdx].Occupy(cidr)
|
||||
if err != nil {
|
||||
t.Fatalf("%v: unexpected error when occupying CIDR %v: %v", tc.description, cidr, err)
|
||||
}
|
||||
}
|
||||
}
|
||||
if err := allocator.AllocateOrOccupyCIDR(tc.fakeNodeHandler.Existing[0]); err == nil {
|
||||
@@ -264,15 +342,21 @@ func TestAllocateOrOccupyCIDRFailure(t *testing.T) {
|
||||
if len(tc.fakeNodeHandler.GetUpdatedNodesCopy()) != 0 {
|
||||
t.Fatalf("%v: unexpected update of nodes: %v", tc.description, tc.fakeNodeHandler.GetUpdatedNodesCopy())
|
||||
}
|
||||
seenCIDRs := []string{}
|
||||
for _, updatedNode := range tc.fakeNodeHandler.GetUpdatedNodesCopy() {
|
||||
if updatedNode.Spec.PodCIDR != "" {
|
||||
seenCIDRs = append(seenCIDRs, updatedNode.Spec.PodCIDR)
|
||||
}
|
||||
if len(tc.expectedAllocatedCIDR) == 0 {
|
||||
// nothing further expected
|
||||
return
|
||||
}
|
||||
if len(seenCIDRs) != 0 {
|
||||
t.Errorf("%v: Seen assigned CIDRs when not expected: %v",
|
||||
tc.description, seenCIDRs)
|
||||
for _, updatedNode := range tc.fakeNodeHandler.GetUpdatedNodesCopy() {
|
||||
if len(updatedNode.Spec.PodCIDRs) == 0 {
|
||||
continue // not assigned yet
|
||||
}
|
||||
//match
|
||||
for podCIDRIdx, expectedPodCIDR := range tc.expectedAllocatedCIDR {
|
||||
if updatedNode.Spec.PodCIDRs[podCIDRIdx] == expectedPodCIDR {
|
||||
t.Errorf("%v: found cidr %v that should not be allocated on node with CIDRs:%v", tc.description, expectedPodCIDR, updatedNode.Spec.PodCIDRs)
|
||||
break
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
for _, tc := range testCases {
|
||||
@@ -280,18 +364,20 @@ func TestAllocateOrOccupyCIDRFailure(t *testing.T) {
|
||||
}
|
||||
}
|
||||
|
||||
type releaseTestCase struct {
|
||||
description string
|
||||
fakeNodeHandler *testutil.FakeNodeHandler
|
||||
clusterCIDRs []*net.IPNet
|
||||
serviceCIDR *net.IPNet
|
||||
subNetMaskSize int
|
||||
expectedAllocatedCIDRFirstRound map[int]string
|
||||
expectedAllocatedCIDRSecondRound map[int]string
|
||||
allocatedCIDRs map[int][]string
|
||||
cidrsToRelease [][]string
|
||||
}
|
||||
|
||||
func TestReleaseCIDRSuccess(t *testing.T) {
|
||||
testCases := []struct {
|
||||
description string
|
||||
fakeNodeHandler *testutil.FakeNodeHandler
|
||||
clusterCIDR *net.IPNet
|
||||
serviceCIDR *net.IPNet
|
||||
subNetMaskSize int
|
||||
expectedAllocatedCIDRFirstRound string
|
||||
expectedAllocatedCIDRSecondRound string
|
||||
allocatedCIDRs []string
|
||||
cidrsToRelease []string
|
||||
}{
|
||||
testCases := []releaseTestCase{
|
||||
{
|
||||
description: "Correctly release preallocated CIDR",
|
||||
fakeNodeHandler: &testutil.FakeNodeHandler{
|
||||
@@ -304,16 +390,22 @@ func TestReleaseCIDRSuccess(t *testing.T) {
|
||||
},
|
||||
Clientset: fake.NewSimpleClientset(),
|
||||
},
|
||||
clusterCIDR: func() *net.IPNet {
|
||||
clusterCIDRs: func() []*net.IPNet {
|
||||
_, clusterCIDR, _ := net.ParseCIDR("127.123.234.0/28")
|
||||
return clusterCIDR
|
||||
return []*net.IPNet{clusterCIDR}
|
||||
}(),
|
||||
serviceCIDR: nil,
|
||||
subNetMaskSize: 30,
|
||||
allocatedCIDRs: []string{"127.123.234.0/30", "127.123.234.4/30", "127.123.234.8/30", "127.123.234.12/30"},
|
||||
expectedAllocatedCIDRFirstRound: "",
|
||||
cidrsToRelease: []string{"127.123.234.4/30"},
|
||||
expectedAllocatedCIDRSecondRound: "127.123.234.4/30",
|
||||
serviceCIDR: nil,
|
||||
subNetMaskSize: 30,
|
||||
allocatedCIDRs: map[int][]string{
|
||||
0: {"127.123.234.0/30", "127.123.234.4/30", "127.123.234.8/30", "127.123.234.12/30"},
|
||||
},
|
||||
expectedAllocatedCIDRFirstRound: nil,
|
||||
cidrsToRelease: [][]string{
|
||||
{"127.123.234.4/30"},
|
||||
},
|
||||
expectedAllocatedCIDRSecondRound: map[int]string{
|
||||
0: "127.123.234.4/30",
|
||||
},
|
||||
},
|
||||
{
|
||||
description: "Correctly recycle CIDR",
|
||||
@@ -327,32 +419,30 @@ func TestReleaseCIDRSuccess(t *testing.T) {
|
||||
},
|
||||
Clientset: fake.NewSimpleClientset(),
|
||||
},
|
||||
clusterCIDR: func() *net.IPNet {
|
||||
clusterCIDRs: func() []*net.IPNet {
|
||||
_, clusterCIDR, _ := net.ParseCIDR("127.123.234.0/28")
|
||||
return clusterCIDR
|
||||
return []*net.IPNet{clusterCIDR}
|
||||
}(),
|
||||
serviceCIDR: nil,
|
||||
subNetMaskSize: 30,
|
||||
allocatedCIDRs: []string{"127.123.234.4/30", "127.123.234.8/30", "127.123.234.12/30"},
|
||||
expectedAllocatedCIDRFirstRound: "127.123.234.0/30",
|
||||
cidrsToRelease: []string{"127.123.234.0/30"},
|
||||
expectedAllocatedCIDRSecondRound: "127.123.234.0/30",
|
||||
serviceCIDR: nil,
|
||||
subNetMaskSize: 30,
|
||||
allocatedCIDRs: map[int][]string{
|
||||
0: {"127.123.234.4/30", "127.123.234.8/30", "127.123.234.12/30"},
|
||||
},
|
||||
expectedAllocatedCIDRFirstRound: map[int]string{
|
||||
0: "127.123.234.0/30",
|
||||
},
|
||||
cidrsToRelease: [][]string{
|
||||
{"127.123.234.0/30"},
|
||||
},
|
||||
expectedAllocatedCIDRSecondRound: map[int]string{
|
||||
0: "127.123.234.0/30",
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
testFunc := func(tc struct {
|
||||
description string
|
||||
fakeNodeHandler *testutil.FakeNodeHandler
|
||||
clusterCIDR *net.IPNet
|
||||
serviceCIDR *net.IPNet
|
||||
subNetMaskSize int
|
||||
expectedAllocatedCIDRFirstRound string
|
||||
expectedAllocatedCIDRSecondRound string
|
||||
allocatedCIDRs []string
|
||||
cidrsToRelease []string
|
||||
}) {
|
||||
testFunc := func(tc releaseTestCase) {
|
||||
// Initialize the range allocator.
|
||||
allocator, _ := NewCIDRRangeAllocator(tc.fakeNodeHandler, getFakeNodeInformer(tc.fakeNodeHandler), tc.clusterCIDR, tc.serviceCIDR, tc.subNetMaskSize, nil)
|
||||
allocator, _ := NewCIDRRangeAllocator(tc.fakeNodeHandler, getFakeNodeInformer(tc.fakeNodeHandler), tc.clusterCIDRs, tc.serviceCIDR, tc.subNetMaskSize, nil)
|
||||
rangeAllocator, ok := allocator.(*rangeAllocator)
|
||||
if !ok {
|
||||
t.Logf("%v: found non-default implementation of CIDRAllocator, skipping white-box test...", tc.description)
|
||||
@@ -363,18 +453,21 @@ func TestReleaseCIDRSuccess(t *testing.T) {
|
||||
go allocator.Run(wait.NeverStop)
|
||||
|
||||
// this is a bit of white box testing
|
||||
for _, allocated := range tc.allocatedCIDRs {
|
||||
_, cidr, err := net.ParseCIDR(allocated)
|
||||
if err != nil {
|
||||
t.Fatalf("%v: unexpected error when parsing CIDR %v: %v", tc.description, allocated, err)
|
||||
}
|
||||
err = rangeAllocator.cidrs.Occupy(cidr)
|
||||
if err != nil {
|
||||
t.Fatalf("%v: unexpected error when occupying CIDR %v: %v", tc.description, allocated, err)
|
||||
for setIdx, allocatedList := range tc.allocatedCIDRs {
|
||||
for _, allocated := range allocatedList {
|
||||
_, cidr, err := net.ParseCIDR(allocated)
|
||||
if err != nil {
|
||||
t.Fatalf("%v: unexpected error when parsing CIDR %v: %v", tc.description, allocated, err)
|
||||
}
|
||||
err = rangeAllocator.cidrSets[setIdx].Occupy(cidr)
|
||||
if err != nil {
|
||||
t.Fatalf("%v: unexpected error when occupying CIDR %v: %v", tc.description, allocated, err)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
err := allocator.AllocateOrOccupyCIDR(tc.fakeNodeHandler.Existing[0])
|
||||
if tc.expectedAllocatedCIDRFirstRound != "" {
|
||||
if len(tc.expectedAllocatedCIDRFirstRound) != 0 {
|
||||
if err != nil {
|
||||
t.Fatalf("%v: unexpected error in AllocateOrOccupyCIDR: %v", tc.description, err)
|
||||
}
|
||||
@@ -391,20 +484,18 @@ func TestReleaseCIDRSuccess(t *testing.T) {
|
||||
t.Fatalf("%v: unexpected update of nodes: %v", tc.description, tc.fakeNodeHandler.GetUpdatedNodesCopy())
|
||||
}
|
||||
}
|
||||
|
||||
for _, cidrToRelease := range tc.cidrsToRelease {
|
||||
nodeToRelease := v1.Node{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "node0",
|
||||
},
|
||||
}
|
||||
nodeToRelease.Spec.PodCIDR = cidrToRelease
|
||||
nodeToRelease.Spec.PodCIDRs = cidrToRelease
|
||||
err = allocator.ReleaseCIDR(&nodeToRelease)
|
||||
if err != nil {
|
||||
t.Fatalf("%v: unexpected error in ReleaseCIDR: %v", tc.description, err)
|
||||
}
|
||||
}
|
||||
|
||||
if err = allocator.AllocateOrOccupyCIDR(tc.fakeNodeHandler.Existing[0]); err != nil {
|
||||
t.Fatalf("%v: unexpected error in AllocateOrOccupyCIDR: %v", tc.description, err)
|
||||
}
|
||||
@@ -412,20 +503,24 @@ func TestReleaseCIDRSuccess(t *testing.T) {
|
||||
t.Fatalf("%v: timeout while waiting for Node update: %v", tc.description, err)
|
||||
}
|
||||
|
||||
found := false
|
||||
seenCIDRs := []string{}
|
||||
if len(tc.expectedAllocatedCIDRSecondRound) == 0 {
|
||||
// nothing further expected
|
||||
return
|
||||
}
|
||||
for _, updatedNode := range tc.fakeNodeHandler.GetUpdatedNodesCopy() {
|
||||
seenCIDRs = append(seenCIDRs, updatedNode.Spec.PodCIDR)
|
||||
if updatedNode.Spec.PodCIDR == tc.expectedAllocatedCIDRSecondRound {
|
||||
found = true
|
||||
break
|
||||
if len(updatedNode.Spec.PodCIDRs) == 0 {
|
||||
continue // not assigned yet
|
||||
}
|
||||
//match
|
||||
for podCIDRIdx, expectedPodCIDR := range tc.expectedAllocatedCIDRSecondRound {
|
||||
if updatedNode.Spec.PodCIDRs[podCIDRIdx] != expectedPodCIDR {
|
||||
t.Errorf("%v: found cidr %v that should not be allocated on node with CIDRs:%v", tc.description, expectedPodCIDR, updatedNode.Spec.PodCIDRs)
|
||||
break
|
||||
}
|
||||
}
|
||||
}
|
||||
if !found {
|
||||
t.Errorf("%v: Unable to find allocated CIDR %v, found updated Nodes with CIDRs: %v",
|
||||
tc.description, tc.expectedAllocatedCIDRSecondRound, seenCIDRs)
|
||||
}
|
||||
}
|
||||
|
||||
for _, tc := range testCases {
|
||||
testFunc(tc)
|
||||
}
|
||||
|
||||
Reference in New Issue
Block a user