move pkg/util/node to component-helpers/node/util (#105347)

Signed-off-by: Neha Lohia <nehapithadiya444@gmail.com>
This commit is contained in:
Neha Lohia
2021-11-12 21:22:27 +05:30
committed by GitHub
parent 7b9f4f18fe
commit fa1b6765d5
41 changed files with 204 additions and 303 deletions

View File

@@ -34,7 +34,7 @@ import (
clientset "k8s.io/client-go/kubernetes"
v1core "k8s.io/client-go/kubernetes/typed/core/v1"
"k8s.io/client-go/tools/record"
nodeutil "k8s.io/kubernetes/pkg/util/node"
nodeutil "k8s.io/component-helpers/node/util"
"k8s.io/legacy-cloud-providers/gce"
"k8s.io/metrics/pkg/client/clientset/versioned/scheme"
)

View File

@@ -43,8 +43,8 @@ import (
"k8s.io/client-go/kubernetes/scheme"
v1core "k8s.io/client-go/kubernetes/typed/core/v1"
cloudprovider "k8s.io/cloud-provider"
nodeutil "k8s.io/kubernetes/pkg/controller/util/node"
utilnode "k8s.io/kubernetes/pkg/util/node"
nodeutil "k8s.io/component-helpers/node/util"
controllerutil "k8s.io/kubernetes/pkg/controller/util/node"
utiltaints "k8s.io/kubernetes/pkg/util/taints"
"k8s.io/legacy-cloud-providers/gce"
netutils "k8s.io/utils/net"
@@ -112,21 +112,21 @@ func NewCloudCIDRAllocator(client clientset.Interface, cloud cloudprovider.Inter
}
nodeInformer.Informer().AddEventHandler(cache.ResourceEventHandlerFuncs{
AddFunc: nodeutil.CreateAddNodeHandler(ca.AllocateOrOccupyCIDR),
UpdateFunc: nodeutil.CreateUpdateNodeHandler(func(_, newNode *v1.Node) error {
AddFunc: controllerutil.CreateAddNodeHandler(ca.AllocateOrOccupyCIDR),
UpdateFunc: controllerutil.CreateUpdateNodeHandler(func(_, newNode *v1.Node) error {
if newNode.Spec.PodCIDR == "" {
return ca.AllocateOrOccupyCIDR(newNode)
}
// Even if PodCIDR is assigned, but NetworkUnavailable condition is
// set to true, we need to process the node to set the condition.
networkUnavailableTaint := &v1.Taint{Key: v1.TaintNodeNetworkUnavailable, Effect: v1.TaintEffectNoSchedule}
_, cond := nodeutil.GetNodeCondition(&newNode.Status, v1.NodeNetworkUnavailable)
_, cond := controllerutil.GetNodeCondition(&newNode.Status, v1.NodeNetworkUnavailable)
if cond == nil || cond.Status != v1.ConditionFalse || utiltaints.TaintExists(newNode.Spec.Taints, networkUnavailableTaint) {
return ca.AllocateOrOccupyCIDR(newNode)
}
return nil
}),
DeleteFunc: nodeutil.CreateDeleteNodeHandler(ca.ReleaseCIDR),
DeleteFunc: controllerutil.CreateDeleteNodeHandler(ca.ReleaseCIDR),
})
klog.V(0).Infof("Using cloud CIDR allocator (provider: %v)", cloud.ProviderName())
@@ -258,11 +258,11 @@ func (ca *cloudCIDRAllocator) updateCIDRAllocation(nodeName string) error {
cidrStrings, err := ca.cloud.AliasRangesByProviderID(node.Spec.ProviderID)
if err != nil {
nodeutil.RecordNodeStatusChange(ca.recorder, node, "CIDRNotAvailable")
controllerutil.RecordNodeStatusChange(ca.recorder, node, "CIDRNotAvailable")
return fmt.Errorf("failed to get cidr(s) from provider: %v", err)
}
if len(cidrStrings) == 0 {
nodeutil.RecordNodeStatusChange(ca.recorder, node, "CIDRNotAvailable")
controllerutil.RecordNodeStatusChange(ca.recorder, node, "CIDRNotAvailable")
return fmt.Errorf("failed to allocate cidr: Node %v has no CIDRs", node.Name)
}
//Can have at most 2 ips (one for v4 and one for v6)
@@ -290,19 +290,19 @@ func (ca *cloudCIDRAllocator) updateCIDRAllocation(nodeName string) error {
// See https://github.com/kubernetes/kubernetes/pull/42147#discussion_r103357248
}
for i := 0; i < cidrUpdateRetries; i++ {
if err = utilnode.PatchNodeCIDRs(ca.client, types.NodeName(node.Name), cidrStrings); err == nil {
if err = nodeutil.PatchNodeCIDRs(ca.client, types.NodeName(node.Name), cidrStrings); err == nil {
klog.InfoS("Set the node PodCIDRs", "nodeName", node.Name, "cidrStrings", cidrStrings)
break
}
}
}
if err != nil {
nodeutil.RecordNodeStatusChange(ca.recorder, node, "CIDRAssignmentFailed")
controllerutil.RecordNodeStatusChange(ca.recorder, node, "CIDRAssignmentFailed")
klog.ErrorS(err, "Failed to update the node PodCIDR after multiple attempts", "nodeName", node.Name, "cidrStrings", cidrStrings)
return err
}
err = utilnode.SetNodeCondition(ca.client, types.NodeName(node.Name), v1.NodeCondition{
err = nodeutil.SetNodeCondition(ca.client, types.NodeName(node.Name), v1.NodeCondition{
Type: v1.NodeNetworkUnavailable,
Status: v1.ConditionFalse,
Reason: "RouteCreated",

View File

@@ -35,7 +35,7 @@ import (
cloudprovider "k8s.io/cloud-provider"
"k8s.io/kubernetes/pkg/controller/nodeipam/ipam/cidrset"
nodesync "k8s.io/kubernetes/pkg/controller/nodeipam/ipam/sync"
nodeutil "k8s.io/kubernetes/pkg/controller/util/node"
controllerutil "k8s.io/kubernetes/pkg/controller/util/node"
"k8s.io/legacy-cloud-providers/gce"
)
@@ -142,9 +142,9 @@ func (c *Controller) Start(nodeInformer informers.NodeInformer) error {
}
nodeInformer.Informer().AddEventHandler(cache.ResourceEventHandlerFuncs{
AddFunc: nodeutil.CreateAddNodeHandler(c.onAdd),
UpdateFunc: nodeutil.CreateUpdateNodeHandler(c.onUpdate),
DeleteFunc: nodeutil.CreateDeleteNodeHandler(c.onDelete),
AddFunc: controllerutil.CreateAddNodeHandler(c.onAdd),
UpdateFunc: controllerutil.CreateUpdateNodeHandler(c.onUpdate),
DeleteFunc: controllerutil.CreateDeleteNodeHandler(c.onDelete),
})
return nil

View File

@@ -36,9 +36,9 @@ import (
corelisters "k8s.io/client-go/listers/core/v1"
"k8s.io/client-go/tools/cache"
"k8s.io/client-go/tools/record"
nodeutil "k8s.io/component-helpers/node/util"
"k8s.io/kubernetes/pkg/controller/nodeipam/ipam/cidrset"
nodeutil "k8s.io/kubernetes/pkg/controller/util/node"
utilnode "k8s.io/kubernetes/pkg/util/node"
controllerutil "k8s.io/kubernetes/pkg/controller/util/node"
)
// cidrs are reserved, then node resource is patched with them
@@ -135,8 +135,8 @@ func NewCIDRRangeAllocator(client clientset.Interface, nodeInformer informers.No
}
nodeInformer.Informer().AddEventHandler(cache.ResourceEventHandlerFuncs{
AddFunc: nodeutil.CreateAddNodeHandler(ra.AllocateOrOccupyCIDR),
UpdateFunc: nodeutil.CreateUpdateNodeHandler(func(_, newNode *v1.Node) error {
AddFunc: controllerutil.CreateAddNodeHandler(ra.AllocateOrOccupyCIDR),
UpdateFunc: controllerutil.CreateUpdateNodeHandler(func(_, newNode *v1.Node) error {
// If the PodCIDRs list is not empty we either:
// - already processed a Node that already had CIDRs after NC restarted
// (cidr is marked as used),
@@ -161,7 +161,7 @@ func NewCIDRRangeAllocator(client clientset.Interface, nodeInformer informers.No
}
return nil
}),
DeleteFunc: nodeutil.CreateDeleteNodeHandler(ra.ReleaseCIDR),
DeleteFunc: controllerutil.CreateDeleteNodeHandler(ra.ReleaseCIDR),
})
return ra, nil
@@ -268,7 +268,7 @@ func (r *rangeAllocator) AllocateOrOccupyCIDR(node *v1.Node) error {
podCIDR, err := r.cidrSets[idx].AllocateNext()
if err != nil {
r.removeNodeFromProcessing(node.Name)
nodeutil.RecordNodeStatusChange(r.recorder, node, "CIDRNotAvailable")
controllerutil.RecordNodeStatusChange(r.recorder, node, "CIDRNotAvailable")
return fmt.Errorf("failed to allocate cidr from cluster cidr at idx:%v: %v", idx, err)
}
allocated.allocatedCIDRs[idx] = podCIDR
@@ -370,14 +370,14 @@ func (r *rangeAllocator) updateCIDRsAllocation(data nodeReservedCIDRs) error {
// If we reached here, it means that the node has no CIDR currently assigned. So we set it.
for i := 0; i < cidrUpdateRetries; i++ {
if err = utilnode.PatchNodeCIDRs(r.client, types.NodeName(node.Name), cidrsString); err == nil {
if err = nodeutil.PatchNodeCIDRs(r.client, types.NodeName(node.Name), cidrsString); err == nil {
klog.Infof("Set node %v PodCIDR to %v", node.Name, cidrsString)
return nil
}
}
// failed release back to the pool
klog.Errorf("Failed to update node %v PodCIDR to %v after multiple attempts: %v", node.Name, cidrsString, err)
nodeutil.RecordNodeStatusChange(r.recorder, node, "CIDRAssignmentFailed")
controllerutil.RecordNodeStatusChange(r.recorder, node, "CIDRAssignmentFailed")
// We accept the fact that we may leak CIDRs here. This is safer than releasing
// them in case when we don't know if request went through.
// NodeController restart will return all falsely allocated CIDRs to the pool.