Remove CLI flag enable-taint-manager

Signed-off-by: kerthcet <kerthcet@gmail.com>
This commit is contained in:
kerthcet 2022-09-14 17:04:08 +08:00 committed by Andrea Tosatto
parent 0e9ad242bd
commit e5c812bbe7
14 changed files with 61 additions and 388 deletions

View File

@ -537,7 +537,6 @@ API rule violation: names_match,k8s.io/kube-controller-manager/config/v1alpha1,N
API rule violation: names_match,k8s.io/kube-controller-manager/config/v1alpha1,NodeIPAMControllerConfiguration,NodeCIDRMaskSizeIPv6 API rule violation: names_match,k8s.io/kube-controller-manager/config/v1alpha1,NodeIPAMControllerConfiguration,NodeCIDRMaskSizeIPv6
API rule violation: names_match,k8s.io/kube-controller-manager/config/v1alpha1,NodeIPAMControllerConfiguration,SecondaryServiceCIDR API rule violation: names_match,k8s.io/kube-controller-manager/config/v1alpha1,NodeIPAMControllerConfiguration,SecondaryServiceCIDR
API rule violation: names_match,k8s.io/kube-controller-manager/config/v1alpha1,NodeIPAMControllerConfiguration,ServiceCIDR API rule violation: names_match,k8s.io/kube-controller-manager/config/v1alpha1,NodeIPAMControllerConfiguration,ServiceCIDR
API rule violation: names_match,k8s.io/kube-controller-manager/config/v1alpha1,NodeLifecycleControllerConfiguration,EnableTaintManager
API rule violation: names_match,k8s.io/kube-controller-manager/config/v1alpha1,NodeLifecycleControllerConfiguration,LargeClusterSizeThreshold API rule violation: names_match,k8s.io/kube-controller-manager/config/v1alpha1,NodeLifecycleControllerConfiguration,LargeClusterSizeThreshold
API rule violation: names_match,k8s.io/kube-controller-manager/config/v1alpha1,NodeLifecycleControllerConfiguration,NodeEvictionRate API rule violation: names_match,k8s.io/kube-controller-manager/config/v1alpha1,NodeLifecycleControllerConfiguration,NodeEvictionRate
API rule violation: names_match,k8s.io/kube-controller-manager/config/v1alpha1,NodeLifecycleControllerConfiguration,NodeMonitorGracePeriod API rule violation: names_match,k8s.io/kube-controller-manager/config/v1alpha1,NodeLifecycleControllerConfiguration,NodeMonitorGracePeriod

View File

@ -191,7 +191,6 @@ func startNodeLifecycleController(ctx context.Context, controllerContext Control
controllerContext.ComponentConfig.NodeLifecycleController.SecondaryNodeEvictionRate, controllerContext.ComponentConfig.NodeLifecycleController.SecondaryNodeEvictionRate,
controllerContext.ComponentConfig.NodeLifecycleController.LargeClusterSizeThreshold, controllerContext.ComponentConfig.NodeLifecycleController.LargeClusterSizeThreshold,
controllerContext.ComponentConfig.NodeLifecycleController.UnhealthyZoneThreshold, controllerContext.ComponentConfig.NodeLifecycleController.UnhealthyZoneThreshold,
controllerContext.ComponentConfig.NodeLifecycleController.EnableTaintManager,
) )
if err != nil { if err != nil {
return nil, true, err return nil, true, err

View File

@ -44,8 +44,6 @@ func (o *NodeLifecycleControllerOptions) AddFlags(fs *pflag.FlagSet) {
fs.Float32Var(&o.SecondaryNodeEvictionRate, "secondary-node-eviction-rate", 0.01, "Number of nodes per second on which pods are deleted in case of node failure when a zone is unhealthy (see --unhealthy-zone-threshold for definition of healthy/unhealthy). Zone refers to entire cluster in non-multizone clusters. This value is implicitly overridden to 0 if the cluster size is smaller than --large-cluster-size-threshold.") fs.Float32Var(&o.SecondaryNodeEvictionRate, "secondary-node-eviction-rate", 0.01, "Number of nodes per second on which pods are deleted in case of node failure when a zone is unhealthy (see --unhealthy-zone-threshold for definition of healthy/unhealthy). Zone refers to entire cluster in non-multizone clusters. This value is implicitly overridden to 0 if the cluster size is smaller than --large-cluster-size-threshold.")
fs.Int32Var(&o.LargeClusterSizeThreshold, "large-cluster-size-threshold", 50, "Number of nodes from which NodeController treats the cluster as large for the eviction logic purposes. --secondary-node-eviction-rate is implicitly overridden to 0 for clusters this size or smaller.") fs.Int32Var(&o.LargeClusterSizeThreshold, "large-cluster-size-threshold", 50, "Number of nodes from which NodeController treats the cluster as large for the eviction logic purposes. --secondary-node-eviction-rate is implicitly overridden to 0 for clusters this size or smaller.")
fs.Float32Var(&o.UnhealthyZoneThreshold, "unhealthy-zone-threshold", 0.55, "Fraction of Nodes in a zone which needs to be not Ready (minimum 3) for zone to be treated as unhealthy. ") fs.Float32Var(&o.UnhealthyZoneThreshold, "unhealthy-zone-threshold", 0.55, "Fraction of Nodes in a zone which needs to be not Ready (minimum 3) for zone to be treated as unhealthy. ")
fs.BoolVar(&o.EnableTaintManager, "enable-taint-manager", o.EnableTaintManager, "If set to true enables NoExecute Taints and will evict all not-tolerating Pod running on Nodes tainted with this kind of Taints.")
fs.MarkDeprecated("enable-taint-manager", "This flag is deprecated and it will be removed in 1.27. The taint-manager is enabled by default and will remain implicitly enabled once this flag is removed.")
fs.MarkDeprecated("pod-eviction-timeout", "This flag is deprecated and it will be removed in 1.27. Once taint manager is enabled, this flag has no effect.") fs.MarkDeprecated("pod-eviction-timeout", "This flag is deprecated and it will be removed in 1.27. Once taint manager is enabled, this flag has no effect.")
} }
@ -55,7 +53,6 @@ func (o *NodeLifecycleControllerOptions) ApplyTo(cfg *nodelifecycleconfig.NodeLi
return nil return nil
} }
cfg.EnableTaintManager = o.EnableTaintManager
cfg.NodeStartupGracePeriod = o.NodeStartupGracePeriod cfg.NodeStartupGracePeriod = o.NodeStartupGracePeriod
cfg.NodeMonitorGracePeriod = o.NodeMonitorGracePeriod cfg.NodeMonitorGracePeriod = o.NodeMonitorGracePeriod
cfg.PodEvictionTimeout = o.PodEvictionTimeout cfg.PodEvictionTimeout = o.PodEvictionTimeout

View File

@ -107,7 +107,6 @@ var args = []string{
"--enable-dynamic-provisioning=false", "--enable-dynamic-provisioning=false",
"--enable-garbage-collector=false", "--enable-garbage-collector=false",
"--enable-hostpath-provisioner=true", "--enable-hostpath-provisioner=true",
"--enable-taint-manager=false",
"--cluster-signing-duration=10h", "--cluster-signing-duration=10h",
"--flex-volume-plugin-dir=/flex-volume-plugin", "--flex-volume-plugin-dir=/flex-volume-plugin",
"--volume-host-cidr-denylist=127.0.0.1/28,feed::/16", "--volume-host-cidr-denylist=127.0.0.1/28,feed::/16",
@ -344,7 +343,6 @@ func TestAddFlags(t *testing.T) {
}, },
NodeLifecycleController: &NodeLifecycleControllerOptions{ NodeLifecycleController: &NodeLifecycleControllerOptions{
&nodelifecycleconfig.NodeLifecycleControllerConfiguration{ &nodelifecycleconfig.NodeLifecycleControllerConfiguration{
EnableTaintManager: false,
NodeEvictionRate: 0.2, NodeEvictionRate: 0.2,
SecondaryNodeEvictionRate: 0.05, SecondaryNodeEvictionRate: 0.05,
NodeMonitorGracePeriod: metav1.Duration{Duration: 30 * time.Second}, NodeMonitorGracePeriod: metav1.Duration{Duration: 30 * time.Second},
@ -589,7 +587,6 @@ func TestApplyTo(t *testing.T) {
NodeCIDRMaskSizeIPv6: 108, NodeCIDRMaskSizeIPv6: 108,
}, },
NodeLifecycleController: nodelifecycleconfig.NodeLifecycleControllerConfiguration{ NodeLifecycleController: nodelifecycleconfig.NodeLifecycleControllerConfiguration{
EnableTaintManager: false,
NodeEvictionRate: 0.2, NodeEvictionRate: 0.2,
SecondaryNodeEvictionRate: 0.05, SecondaryNodeEvictionRate: 0.05,
NodeMonitorGracePeriod: metav1.Duration{Duration: 30 * time.Second}, NodeMonitorGracePeriod: metav1.Duration{Duration: 30 * time.Second},
@ -1164,7 +1161,6 @@ func TestValidateControllersOptions(t *testing.T) {
expectErrors: false, expectErrors: false,
validate: (&NodeLifecycleControllerOptions{ validate: (&NodeLifecycleControllerOptions{
&nodelifecycleconfig.NodeLifecycleControllerConfiguration{ &nodelifecycleconfig.NodeLifecycleControllerConfiguration{
EnableTaintManager: false,
NodeEvictionRate: 0.2, NodeEvictionRate: 0.2,
SecondaryNodeEvictionRate: 0.05, SecondaryNodeEvictionRate: 0.05,
NodeMonitorGracePeriod: metav1.Duration{Duration: 30 * time.Second}, NodeMonitorGracePeriod: metav1.Duration{Duration: 30 * time.Second},

View File

@ -22,9 +22,6 @@ import (
// NodeLifecycleControllerConfiguration contains elements describing NodeLifecycleController. // NodeLifecycleControllerConfiguration contains elements describing NodeLifecycleController.
type NodeLifecycleControllerConfiguration struct { type NodeLifecycleControllerConfiguration struct {
// If set to true enables NoExecute Taints and will evict all not-tolerating
// Pod running on Nodes tainted with this kind of Taints.
EnableTaintManager bool
// nodeEvictionRate is the number of nodes per second on which pods are deleted in case of node failure when a zone is healthy // nodeEvictionRate is the number of nodes per second on which pods are deleted in case of node failure when a zone is healthy
NodeEvictionRate float32 NodeEvictionRate float32
// secondaryNodeEvictionRate is the number of nodes per second on which pods are deleted in case of node failure when a zone is unhealthy // secondaryNodeEvictionRate is the number of nodes per second on which pods are deleted in case of node failure when a zone is unhealthy

View File

@ -21,7 +21,6 @@ import (
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
kubectrlmgrconfigv1alpha1 "k8s.io/kube-controller-manager/config/v1alpha1" kubectrlmgrconfigv1alpha1 "k8s.io/kube-controller-manager/config/v1alpha1"
"k8s.io/utils/pointer"
) )
// RecommendedDefaultNodeLifecycleControllerConfiguration defaults a pointer to a // RecommendedDefaultNodeLifecycleControllerConfiguration defaults a pointer to a
@ -44,7 +43,4 @@ func RecommendedDefaultNodeLifecycleControllerConfiguration(obj *kubectrlmgrconf
if obj.NodeStartupGracePeriod == zero { if obj.NodeStartupGracePeriod == zero {
obj.NodeStartupGracePeriod = metav1.Duration{Duration: 60 * time.Second} obj.NodeStartupGracePeriod = metav1.Duration{Duration: 60 * time.Second}
} }
if obj.EnableTaintManager == nil {
obj.EnableTaintManager = pointer.Bool(true)
}
} }

View File

@ -82,9 +82,6 @@ func Convert_v1_GroupResource_To_v1alpha1_GroupResource(in *v1.GroupResource, ou
} }
func autoConvert_v1alpha1_NodeLifecycleControllerConfiguration_To_config_NodeLifecycleControllerConfiguration(in *v1alpha1.NodeLifecycleControllerConfiguration, out *config.NodeLifecycleControllerConfiguration, s conversion.Scope) error { func autoConvert_v1alpha1_NodeLifecycleControllerConfiguration_To_config_NodeLifecycleControllerConfiguration(in *v1alpha1.NodeLifecycleControllerConfiguration, out *config.NodeLifecycleControllerConfiguration, s conversion.Scope) error {
if err := v1.Convert_Pointer_bool_To_bool(&in.EnableTaintManager, &out.EnableTaintManager, s); err != nil {
return err
}
out.NodeEvictionRate = in.NodeEvictionRate out.NodeEvictionRate = in.NodeEvictionRate
out.SecondaryNodeEvictionRate = in.SecondaryNodeEvictionRate out.SecondaryNodeEvictionRate = in.SecondaryNodeEvictionRate
out.NodeStartupGracePeriod = in.NodeStartupGracePeriod out.NodeStartupGracePeriod = in.NodeStartupGracePeriod
@ -96,9 +93,6 @@ func autoConvert_v1alpha1_NodeLifecycleControllerConfiguration_To_config_NodeLif
} }
func autoConvert_config_NodeLifecycleControllerConfiguration_To_v1alpha1_NodeLifecycleControllerConfiguration(in *config.NodeLifecycleControllerConfiguration, out *v1alpha1.NodeLifecycleControllerConfiguration, s conversion.Scope) error { func autoConvert_config_NodeLifecycleControllerConfiguration_To_v1alpha1_NodeLifecycleControllerConfiguration(in *config.NodeLifecycleControllerConfiguration, out *v1alpha1.NodeLifecycleControllerConfiguration, s conversion.Scope) error {
if err := v1.Convert_bool_To_Pointer_bool(&in.EnableTaintManager, &out.EnableTaintManager, s); err != nil {
return err
}
out.NodeEvictionRate = in.NodeEvictionRate out.NodeEvictionRate = in.NodeEvictionRate
out.SecondaryNodeEvictionRate = in.SecondaryNodeEvictionRate out.SecondaryNodeEvictionRate = in.SecondaryNodeEvictionRate
out.NodeStartupGracePeriod = in.NodeStartupGracePeriod out.NodeStartupGracePeriod = in.NodeStartupGracePeriod

View File

@ -205,57 +205,6 @@ type podUpdateItem struct {
name string name string
} }
type evictionStatus int
const (
unmarked = iota
toBeEvicted
evicted
)
// nodeEvictionMap stores evictionStatus data for each node.
type nodeEvictionMap struct {
lock sync.Mutex
nodeEvictions map[string]evictionStatus
}
func newNodeEvictionMap() *nodeEvictionMap {
return &nodeEvictionMap{
nodeEvictions: make(map[string]evictionStatus),
}
}
func (n *nodeEvictionMap) registerNode(nodeName string) {
n.lock.Lock()
defer n.lock.Unlock()
n.nodeEvictions[nodeName] = unmarked
}
func (n *nodeEvictionMap) unregisterNode(nodeName string) {
n.lock.Lock()
defer n.lock.Unlock()
delete(n.nodeEvictions, nodeName)
}
func (n *nodeEvictionMap) setStatus(nodeName string, status evictionStatus) bool {
n.lock.Lock()
defer n.lock.Unlock()
if _, exists := n.nodeEvictions[nodeName]; !exists {
return false
}
n.nodeEvictions[nodeName] = status
return true
}
func (n *nodeEvictionMap) getStatus(nodeName string) (evictionStatus, bool) {
n.lock.Lock()
defer n.lock.Unlock()
if _, exists := n.nodeEvictions[nodeName]; !exists {
return unmarked, false
}
return n.nodeEvictions[nodeName], true
}
// Controller is the controller that manages node's life cycle. // Controller is the controller that manages node's life cycle.
type Controller struct { type Controller struct {
taintManager *scheduler.NoExecuteTaintManager taintManager *scheduler.NoExecuteTaintManager
@ -277,10 +226,7 @@ type Controller struct {
nodeHealthMap *nodeHealthMap nodeHealthMap *nodeHealthMap
// evictorLock protects zonePodEvictor and zoneNoExecuteTainter. // evictorLock protects zonePodEvictor and zoneNoExecuteTainter.
evictorLock sync.Mutex evictorLock sync.Mutex
nodeEvictionMap *nodeEvictionMap
// workers that evicts pods from unresponsive nodes.
zonePodEvictor map[string]*scheduler.RateLimitedTimedQueue
// workers that are responsible for tainting nodes. // workers that are responsible for tainting nodes.
zoneNoExecuteTainter map[string]*scheduler.RateLimitedTimedQueue zoneNoExecuteTainter map[string]*scheduler.RateLimitedTimedQueue
@ -342,10 +288,6 @@ type Controller struct {
largeClusterThreshold int32 largeClusterThreshold int32
unhealthyZoneThreshold float32 unhealthyZoneThreshold float32
// if set to true Controller will start TaintManager that will evict Pods from
// tainted nodes, if they're not tolerated.
runTaintManager bool
nodeUpdateQueue workqueue.Interface nodeUpdateQueue workqueue.Interface
podUpdateQueue workqueue.RateLimitingInterface podUpdateQueue workqueue.RateLimitingInterface
} }
@ -366,7 +308,6 @@ func NewNodeLifecycleController(
secondaryEvictionLimiterQPS float32, secondaryEvictionLimiterQPS float32,
largeClusterThreshold int32, largeClusterThreshold int32,
unhealthyZoneThreshold float32, unhealthyZoneThreshold float32,
runTaintManager bool,
) (*Controller, error) { ) (*Controller, error) {
logger := klog.LoggerWithName(klog.FromContext(ctx), "NodeLifecycleController") logger := klog.LoggerWithName(klog.FromContext(ctx), "NodeLifecycleController")
if kubeClient == nil { if kubeClient == nil {
@ -382,14 +323,12 @@ func NewNodeLifecycleController(
now: metav1.Now, now: metav1.Now,
knownNodeSet: make(map[string]*v1.Node), knownNodeSet: make(map[string]*v1.Node),
nodeHealthMap: newNodeHealthMap(), nodeHealthMap: newNodeHealthMap(),
nodeEvictionMap: newNodeEvictionMap(),
broadcaster: eventBroadcaster, broadcaster: eventBroadcaster,
recorder: recorder, recorder: recorder,
nodeMonitorPeriod: nodeMonitorPeriod, nodeMonitorPeriod: nodeMonitorPeriod,
nodeStartupGracePeriod: nodeStartupGracePeriod, nodeStartupGracePeriod: nodeStartupGracePeriod,
nodeMonitorGracePeriod: nodeMonitorGracePeriod, nodeMonitorGracePeriod: nodeMonitorGracePeriod,
nodeUpdateWorkerSize: scheduler.UpdateWorkerSize, nodeUpdateWorkerSize: scheduler.UpdateWorkerSize,
zonePodEvictor: make(map[string]*scheduler.RateLimitedTimedQueue),
zoneNoExecuteTainter: make(map[string]*scheduler.RateLimitedTimedQueue), zoneNoExecuteTainter: make(map[string]*scheduler.RateLimitedTimedQueue),
nodesToRetry: sync.Map{}, nodesToRetry: sync.Map{},
zoneStates: make(map[string]ZoneState), zoneStates: make(map[string]ZoneState),
@ -398,7 +337,6 @@ func NewNodeLifecycleController(
secondaryEvictionLimiterQPS: secondaryEvictionLimiterQPS, secondaryEvictionLimiterQPS: secondaryEvictionLimiterQPS,
largeClusterThreshold: largeClusterThreshold, largeClusterThreshold: largeClusterThreshold,
unhealthyZoneThreshold: unhealthyZoneThreshold, unhealthyZoneThreshold: unhealthyZoneThreshold,
runTaintManager: runTaintManager,
nodeUpdateQueue: workqueue.NewNamed("node_lifecycle_controller"), nodeUpdateQueue: workqueue.NewNamed("node_lifecycle_controller"),
podUpdateQueue: workqueue.NewNamedRateLimitingQueue(workqueue.DefaultControllerRateLimiter(), "node_lifecycle_controller_pods"), podUpdateQueue: workqueue.NewNamedRateLimitingQueue(workqueue.DefaultControllerRateLimiter(), "node_lifecycle_controller_pods"),
} }
@ -477,29 +415,26 @@ func NewNodeLifecycleController(
nc.podLister = podInformer.Lister() nc.podLister = podInformer.Lister()
nc.nodeLister = nodeInformer.Lister() nc.nodeLister = nodeInformer.Lister()
if nc.runTaintManager { nc.taintManager = scheduler.NewNoExecuteTaintManager(ctx, kubeClient, nc.podLister, nc.nodeLister, nc.getPodsAssignedToNode)
nc.taintManager = scheduler.NewNoExecuteTaintManager(ctx, kubeClient, nc.podLister, nc.nodeLister, nc.getPodsAssignedToNode) nodeInformer.Informer().AddEventHandler(cache.ResourceEventHandlerFuncs{
nodeInformer.Informer().AddEventHandler(cache.ResourceEventHandlerFuncs{ AddFunc: controllerutil.CreateAddNodeHandler(func(node *v1.Node) error {
AddFunc: controllerutil.CreateAddNodeHandler(func(node *v1.Node) error { nc.taintManager.NodeUpdated(nil, node)
nc.taintManager.NodeUpdated(nil, node) return nil
return nil }),
}), UpdateFunc: controllerutil.CreateUpdateNodeHandler(func(oldNode, newNode *v1.Node) error {
UpdateFunc: controllerutil.CreateUpdateNodeHandler(func(oldNode, newNode *v1.Node) error { nc.taintManager.NodeUpdated(oldNode, newNode)
nc.taintManager.NodeUpdated(oldNode, newNode) return nil
return nil }),
}), DeleteFunc: controllerutil.CreateDeleteNodeHandler(func(node *v1.Node) error {
DeleteFunc: controllerutil.CreateDeleteNodeHandler(func(node *v1.Node) error { nc.taintManager.NodeUpdated(node, nil)
nc.taintManager.NodeUpdated(node, nil) return nil
return nil }),
}), })
})
}
logger.Info("Controller will reconcile labels") logger.Info("Controller will reconcile labels")
nodeInformer.Informer().AddEventHandler(cache.ResourceEventHandlerFuncs{ nodeInformer.Informer().AddEventHandler(cache.ResourceEventHandlerFuncs{
AddFunc: controllerutil.CreateAddNodeHandler(func(node *v1.Node) error { AddFunc: controllerutil.CreateAddNodeHandler(func(node *v1.Node) error {
nc.nodeUpdateQueue.Add(node.Name) nc.nodeUpdateQueue.Add(node.Name)
nc.nodeEvictionMap.registerNode(node.Name)
return nil return nil
}), }),
UpdateFunc: controllerutil.CreateUpdateNodeHandler(func(_, newNode *v1.Node) error { UpdateFunc: controllerutil.CreateUpdateNodeHandler(func(_, newNode *v1.Node) error {
@ -508,7 +443,6 @@ func NewNodeLifecycleController(
}), }),
DeleteFunc: controllerutil.CreateDeleteNodeHandler(func(node *v1.Node) error { DeleteFunc: controllerutil.CreateDeleteNodeHandler(func(node *v1.Node) error {
nc.nodesToRetry.Delete(node.Name) nc.nodesToRetry.Delete(node.Name)
nc.nodeEvictionMap.unregisterNode(node.Name)
return nil return nil
}), }),
}) })
@ -549,9 +483,7 @@ func (nc *Controller) Run(ctx context.Context) {
return return
} }
if nc.runTaintManager { go nc.taintManager.Run(ctx)
go nc.taintManager.Run(ctx)
}
// Start workers to reconcile labels and/or update NoSchedule taint for nodes. // Start workers to reconcile labels and/or update NoSchedule taint for nodes.
for i := 0; i < scheduler.UpdateWorkerSize; i++ { for i := 0; i < scheduler.UpdateWorkerSize; i++ {
@ -566,16 +498,9 @@ func (nc *Controller) Run(ctx context.Context) {
go wait.UntilWithContext(ctx, nc.doPodProcessingWorker, time.Second) go wait.UntilWithContext(ctx, nc.doPodProcessingWorker, time.Second)
} }
if nc.runTaintManager { // Handling taint based evictions. Because we don't want a dedicated logic in TaintManager for NC-originated
// Handling taint based evictions. Because we don't want a dedicated logic in TaintManager for NC-originated // taints and we normally don't rate limit evictions caused by taints, we need to rate limit adding taints.
// taints and we normally don't rate limit evictions caused by taints, we need to rate limit adding taints. go wait.UntilWithContext(ctx, nc.doNoExecuteTaintingPass, scheduler.NodeEvictionPeriod)
go wait.UntilWithContext(ctx, nc.doNoExecuteTaintingPass, scheduler.NodeEvictionPeriod)
} else {
// Managing eviction of nodes:
// When we delete pods off a node, if the node was not empty at the time we then
// queue an eviction watcher. If we hit an error, retry deletion.
go wait.UntilWithContext(ctx, nc.doEvictionPass, scheduler.NodeEvictionPeriod)
}
// Incorporate the results of node health signal pushed from kubelet to master. // Incorporate the results of node health signal pushed from kubelet to master.
go wait.UntilWithContext(ctx, func(ctx context.Context) { go wait.UntilWithContext(ctx, func(ctx context.Context) {
@ -732,73 +657,6 @@ func (nc *Controller) doNoExecuteTaintingPass(ctx context.Context) {
} }
} }
func (nc *Controller) doEvictionPass(ctx context.Context) {
// Extract out the keys of the map in order to not hold
// the evictorLock for the entire function and hold it
// only when nescessary.
var zonePodEvictorKeys []string
func() {
nc.evictorLock.Lock()
defer nc.evictorLock.Unlock()
zonePodEvictorKeys = make([]string, 0, len(nc.zonePodEvictor))
for k := range nc.zonePodEvictor {
zonePodEvictorKeys = append(zonePodEvictorKeys, k)
}
}()
logger := klog.FromContext(ctx)
for _, k := range zonePodEvictorKeys {
var zonePodEvictionWorker *scheduler.RateLimitedTimedQueue
func() {
nc.evictorLock.Lock()
defer nc.evictorLock.Unlock()
// Extracting the value without checking if the key
// exists or not is safe to do here since zones do
// not get removed, and consequently pod evictors for
// these zones also do not get removed, only added.
zonePodEvictionWorker = nc.zonePodEvictor[k]
}()
// Function should return 'false' and a time after which it should be retried, or 'true' if it shouldn't (it succeeded).
zonePodEvictionWorker.Try(logger, func(value scheduler.TimedValue) (bool, time.Duration) {
node, err := nc.nodeLister.Get(value.Value)
if apierrors.IsNotFound(err) {
logger.Info("Node no longer present in nodeLister", "node", klog.KRef("", value.Value))
} else if err != nil {
logger.Info("Failed to get Node from the nodeLister", "node", klog.KRef("", value.Value), "err", err)
}
nodeUID, _ := value.UID.(string)
pods, err := nc.getPodsAssignedToNode(value.Value)
if err != nil {
utilruntime.HandleError(fmt.Errorf("unable to list pods from node %q: %v", value.Value, err))
return false, 0
}
remaining, err := controllerutil.DeletePods(ctx, nc.kubeClient, pods, nc.recorder, value.Value, nodeUID, nc.daemonSetStore)
if err != nil {
// We are not setting eviction status here.
// New pods will be handled by zonePodEvictor retry
// instead of immediate pod eviction.
utilruntime.HandleError(fmt.Errorf("unable to evict node %q: %v", value.Value, err))
return false, 0
}
if !nc.nodeEvictionMap.setStatus(value.Value, evicted) {
logger.V(2).Info("Node was unregistered in the meantime - skipping setting status", "node", klog.KRef("", value.Value))
}
if remaining {
logger.Info("Pods awaiting deletion due to Controller eviction")
}
if node != nil {
zone := nodetopology.GetZoneKey(node)
evictionsNumber.WithLabelValues(zone).Inc()
evictionsTotal.WithLabelValues(zone).Inc()
}
return true, 0
})
}
}
// monitorNodeHealth verifies node health are constantly updated by kubelet, and // monitorNodeHealth verifies node health are constantly updated by kubelet, and
// if not, post "NodeReady==ConditionUnknown". // if not, post "NodeReady==ConditionUnknown".
// This function will taint nodes who are not ready or not reachable for a long period of time. // This function will taint nodes who are not ready or not reachable for a long period of time.
@ -824,11 +682,7 @@ func (nc *Controller) monitorNodeHealth(ctx context.Context) error {
controllerutil.RecordNodeEvent(nc.recorder, added[i].Name, string(added[i].UID), v1.EventTypeNormal, "RegisteredNode", fmt.Sprintf("Registered Node %v in Controller", added[i].Name)) controllerutil.RecordNodeEvent(nc.recorder, added[i].Name, string(added[i].UID), v1.EventTypeNormal, "RegisteredNode", fmt.Sprintf("Registered Node %v in Controller", added[i].Name))
nc.knownNodeSet[added[i].Name] = added[i] nc.knownNodeSet[added[i].Name] = added[i]
nc.addPodEvictorForNewZone(logger, added[i]) nc.addPodEvictorForNewZone(logger, added[i])
if nc.runTaintManager { nc.markNodeAsReachable(ctx, added[i])
nc.markNodeAsReachable(ctx, added[i])
} else {
nc.cancelPodEviction(logger, added[i])
}
} }
for i := range deleted { for i := range deleted {
@ -845,14 +699,13 @@ func (nc *Controller) monitorNodeHealth(ctx context.Context) error {
updateNodeHealthDuration.Observe(time.Since(start.Time).Seconds()) updateNodeHealthDuration.Observe(time.Since(start.Time).Seconds())
}() }()
var gracePeriod time.Duration
var observedReadyCondition v1.NodeCondition var observedReadyCondition v1.NodeCondition
var currentReadyCondition *v1.NodeCondition var currentReadyCondition *v1.NodeCondition
node := nodes[piece].DeepCopy() node := nodes[piece].DeepCopy()
if err := wait.PollImmediate(retrySleepTime, retrySleepTime*scheduler.NodeHealthUpdateRetry, func() (bool, error) { if err := wait.PollImmediate(retrySleepTime, retrySleepTime*scheduler.NodeHealthUpdateRetry, func() (bool, error) {
var err error var err error
gracePeriod, observedReadyCondition, currentReadyCondition, err = nc.tryUpdateNodeHealth(ctx, node) _, observedReadyCondition, currentReadyCondition, err = nc.tryUpdateNodeHealth(ctx, node)
if err == nil { if err == nil {
return true, nil return true, nil
} }
@ -887,13 +740,7 @@ func (nc *Controller) monitorNodeHealth(ctx context.Context) error {
} }
return return
} }
if nc.runTaintManager { nc.processTaintBaseEviction(ctx, node, &observedReadyCondition)
nc.processTaintBaseEviction(ctx, node, &observedReadyCondition)
} else {
if err := nc.processNoTaintBaseEviction(ctx, node, &observedReadyCondition, gracePeriod, pods); err != nil {
utilruntime.HandleError(fmt.Errorf("unable to evict all pods from node %v: %v; queuing for retry", node.Name, err))
}
}
_, needsRetry := nc.nodesToRetry.Load(node.Name) _, needsRetry := nc.nodesToRetry.Load(node.Name)
switch { switch {
@ -960,53 +807,6 @@ func (nc *Controller) processTaintBaseEviction(ctx context.Context, node *v1.Nod
} }
} }
func (nc *Controller) processNoTaintBaseEviction(ctx context.Context, node *v1.Node, observedReadyCondition *v1.NodeCondition, gracePeriod time.Duration, pods []*v1.Pod) error {
decisionTimestamp := nc.now()
nodeHealthData := nc.nodeHealthMap.getDeepCopy(node.Name)
if nodeHealthData == nil {
return fmt.Errorf("health data doesn't exist for node %q", node.Name)
}
// Check eviction timeout against decisionTimestamp
logger := klog.FromContext(ctx)
switch observedReadyCondition.Status {
case v1.ConditionFalse:
if decisionTimestamp.After(nodeHealthData.readyTransitionTimestamp.Add(nc.podEvictionTimeout)) {
enqueued, err := nc.evictPods(ctx, node, pods)
if err != nil {
return err
}
if enqueued {
logger.V(2).Info("Node is NotReady. Adding Pods on Node to eviction queue: decisionTimestamp is later than readyTransitionTimestamp + podEvictionTimeout",
"node", klog.KObj(node),
"decisionTimestamp", decisionTimestamp,
"readyTransitionTimestamp", nodeHealthData.readyTransitionTimestamp,
"podEvictionTimeout", nc.podEvictionTimeout,
)
}
}
case v1.ConditionUnknown:
if decisionTimestamp.After(nodeHealthData.probeTimestamp.Add(nc.podEvictionTimeout)) {
enqueued, err := nc.evictPods(ctx, node, pods)
if err != nil {
return err
}
if enqueued {
logger.V(2).Info("Node is unresponsive. Adding Pods on Node to eviction queues: decisionTimestamp is later than readyTransitionTimestamp + podEvictionTimeout-gracePeriod",
"node", klog.KObj(node),
"decisionTimestamp", decisionTimestamp,
"readyTransitionTimestamp", nodeHealthData.readyTransitionTimestamp,
"podEvictionTimeoutGracePeriod", nc.podEvictionTimeout-gracePeriod,
)
}
}
case v1.ConditionTrue:
if nc.cancelPodEviction(logger, node) {
logger.V(2).Info("Node is ready again, cancelled pod eviction", "node", klog.KObj(node))
}
}
return nil
}
// labelNodeDisruptionExclusion is a label on nodes that controls whether they are // labelNodeDisruptionExclusion is a label on nodes that controls whether they are
// excluded from being considered for disruption checks by the node controller. // excluded from being considered for disruption checks by the node controller.
const labelNodeDisruptionExclusion = "node.kubernetes.io/exclude-disruption" const labelNodeDisruptionExclusion = "node.kubernetes.io/exclude-disruption"
@ -1230,22 +1030,14 @@ func (nc *Controller) handleDisruption(ctx context.Context, zoneToNodeConditions
if allAreFullyDisrupted { if allAreFullyDisrupted {
logger.Info("Controller detected that all Nodes are not-Ready. Entering master disruption mode") logger.Info("Controller detected that all Nodes are not-Ready. Entering master disruption mode")
for i := range nodes { for i := range nodes {
if nc.runTaintManager { _, err := nc.markNodeAsReachable(ctx, nodes[i])
_, err := nc.markNodeAsReachable(ctx, nodes[i]) if err != nil {
if err != nil { logger.Error(nil, "Failed to remove taints from Node", "node", klog.KObj(nodes[i]))
logger.Error(nil, "Failed to remove taints from Node", "node", klog.KObj(nodes[i]))
}
} else {
nc.cancelPodEviction(logger, nodes[i])
} }
} }
// We stop all evictions. // We stop all evictions.
for k := range nc.zoneStates { for k := range nc.zoneStates {
if nc.runTaintManager { nc.zoneNoExecuteTainter[k].SwapLimiter(0)
nc.zoneNoExecuteTainter[k].SwapLimiter(0)
} else {
nc.zonePodEvictor[k].SwapLimiter(0)
}
} }
for k := range nc.zoneStates { for k := range nc.zoneStates {
nc.zoneStates[k] = stateFullDisruption nc.zoneStates[k] = stateFullDisruption
@ -1312,7 +1104,7 @@ func (nc *Controller) doPodProcessingWorker(ctx context.Context) {
// processPod is processing events of assigning pods to nodes. In particular: // processPod is processing events of assigning pods to nodes. In particular:
// 1. for NodeReady=true node, taint eviction for this pod will be cancelled // 1. for NodeReady=true node, taint eviction for this pod will be cancelled
// 2. for NodeReady=false or unknown node, taint eviction of pod will happen and pod will be marked as not ready // 2. for NodeReady=false or unknown node, taint eviction of pod will happen and pod will be marked as not ready
// 3. if node doesn't exist in cache, it will be skipped and handled later by doEvictionPass // 3. if node doesn't exist in cache, it will be skipped.
func (nc *Controller) processPod(ctx context.Context, podItem podUpdateItem) { func (nc *Controller) processPod(ctx context.Context, podItem podUpdateItem) {
defer nc.podUpdateQueue.Done(podItem) defer nc.podUpdateQueue.Done(podItem)
pod, err := nc.podLister.Pods(podItem.namespace).Get(podItem.name) pod, err := nc.podLister.Pods(podItem.namespace).Get(podItem.name)
@ -1331,12 +1123,11 @@ func (nc *Controller) processPod(ctx context.Context, podItem podUpdateItem) {
nodeHealth := nc.nodeHealthMap.getDeepCopy(nodeName) nodeHealth := nc.nodeHealthMap.getDeepCopy(nodeName)
if nodeHealth == nil { if nodeHealth == nil {
// Node data is not gathered yet or node has beed removed in the meantime. // Node data is not gathered yet or node has been removed in the meantime.
// Pod will be handled by doEvictionPass method.
return return
} }
node, err := nc.nodeLister.Get(nodeName) _, err = nc.nodeLister.Get(nodeName)
if err != nil { if err != nil {
logger.Info("Failed to read node", "node", klog.KRef("", nodeName), "err", err) logger.Info("Failed to read node", "node", klog.KRef("", nodeName), "err", err)
nc.podUpdateQueue.AddRateLimited(podItem) nc.podUpdateQueue.AddRateLimited(podItem)
@ -1352,16 +1143,6 @@ func (nc *Controller) processPod(ctx context.Context, podItem podUpdateItem) {
} }
pods := []*v1.Pod{pod} pods := []*v1.Pod{pod}
// In taint-based eviction mode, only node updates are processed by NodeLifecycleController.
// Pods are processed by TaintManager.
if !nc.runTaintManager {
if err := nc.processNoTaintBaseEviction(ctx, node, currentReadyCondition, nc.nodeMonitorGracePeriod, pods); err != nil {
logger.Info("Unable to process pod eviction from node", "pod", klog.KRef(podItem.namespace, podItem.name), "node", klog.KRef("", nodeName), "err", err)
nc.podUpdateQueue.AddRateLimited(podItem)
return
}
}
if currentReadyCondition.Status != v1.ConditionTrue { if currentReadyCondition.Status != v1.ConditionTrue {
if err := controllerutil.MarkPodsNotReady(ctx, nc.kubeClient, nc.recorder, pods, nodeName); err != nil { if err := controllerutil.MarkPodsNotReady(ctx, nc.kubeClient, nc.recorder, pods, nodeName); err != nil {
logger.Info("Unable to mark pod NotReady on node", "pod", klog.KRef(podItem.namespace, podItem.name), "node", klog.KRef("", nodeName), "err", err) logger.Info("Unable to mark pod NotReady on node", "pod", klog.KRef(podItem.namespace, podItem.name), "node", klog.KRef("", nodeName), "err", err)
@ -1373,27 +1154,13 @@ func (nc *Controller) processPod(ctx context.Context, podItem podUpdateItem) {
func (nc *Controller) setLimiterInZone(zone string, zoneSize int, state ZoneState) { func (nc *Controller) setLimiterInZone(zone string, zoneSize int, state ZoneState) {
switch state { switch state {
case stateNormal: case stateNormal:
if nc.runTaintManager { nc.zoneNoExecuteTainter[zone].SwapLimiter(nc.evictionLimiterQPS)
nc.zoneNoExecuteTainter[zone].SwapLimiter(nc.evictionLimiterQPS)
} else {
nc.zonePodEvictor[zone].SwapLimiter(nc.evictionLimiterQPS)
}
case statePartialDisruption: case statePartialDisruption:
if nc.runTaintManager { nc.zoneNoExecuteTainter[zone].SwapLimiter(
nc.zoneNoExecuteTainter[zone].SwapLimiter( nc.enterPartialDisruptionFunc(zoneSize))
nc.enterPartialDisruptionFunc(zoneSize))
} else {
nc.zonePodEvictor[zone].SwapLimiter(
nc.enterPartialDisruptionFunc(zoneSize))
}
case stateFullDisruption: case stateFullDisruption:
if nc.runTaintManager { nc.zoneNoExecuteTainter[zone].SwapLimiter(
nc.zoneNoExecuteTainter[zone].SwapLimiter( nc.enterFullDisruptionFunc(zoneSize))
nc.enterFullDisruptionFunc(zoneSize))
} else {
nc.zonePodEvictor[zone].SwapLimiter(
nc.enterFullDisruptionFunc(zoneSize))
}
} }
} }
@ -1453,15 +1220,9 @@ func (nc *Controller) addPodEvictorForNewZone(logger klog.Logger, node *v1.Node)
zone := nodetopology.GetZoneKey(node) zone := nodetopology.GetZoneKey(node)
if _, found := nc.zoneStates[zone]; !found { if _, found := nc.zoneStates[zone]; !found {
nc.zoneStates[zone] = stateInitial nc.zoneStates[zone] = stateInitial
if !nc.runTaintManager { nc.zoneNoExecuteTainter[zone] =
nc.zonePodEvictor[zone] = scheduler.NewRateLimitedTimedQueue(
scheduler.NewRateLimitedTimedQueue( flowcontrol.NewTokenBucketRateLimiter(nc.evictionLimiterQPS, scheduler.EvictionRateLimiterBurst))
flowcontrol.NewTokenBucketRateLimiter(nc.evictionLimiterQPS, scheduler.EvictionRateLimiterBurst))
} else {
nc.zoneNoExecuteTainter[zone] =
scheduler.NewRateLimitedTimedQueue(
flowcontrol.NewTokenBucketRateLimiter(nc.evictionLimiterQPS, scheduler.EvictionRateLimiterBurst))
}
// Init the metric for the new zone. // Init the metric for the new zone.
logger.Info("Initializing eviction metric for zone", "zone", zone) logger.Info("Initializing eviction metric for zone", "zone", zone)
evictionsNumber.WithLabelValues(zone).Add(0) evictionsNumber.WithLabelValues(zone).Add(0)
@ -1469,50 +1230,6 @@ func (nc *Controller) addPodEvictorForNewZone(logger klog.Logger, node *v1.Node)
} }
} }
// cancelPodEviction removes any queued evictions, typically because the node is available again. It
// returns true if an eviction was queued.
func (nc *Controller) cancelPodEviction(logger klog.Logger, node *v1.Node) bool {
zone := nodetopology.GetZoneKey(node)
if !nc.nodeEvictionMap.setStatus(node.Name, unmarked) {
logger.V(2).Info("Node was unregistered in the meantime - skipping setting status", "node", klog.KObj(node))
}
nc.evictorLock.Lock()
defer nc.evictorLock.Unlock()
wasDeleting := nc.zonePodEvictor[zone].Remove(node.Name)
if wasDeleting {
logger.V(2).Info("Cancelling pod Eviction on Node", "node", klog.KObj(node))
return true
}
return false
}
// evictPods:
// - adds node to evictor queue if the node is not marked as evicted.
// Returns false if the node name was already enqueued.
// - deletes pods immediately if node is already marked as evicted.
// Returns false, because the node wasn't added to the queue.
func (nc *Controller) evictPods(ctx context.Context, node *v1.Node, pods []*v1.Pod) (bool, error) {
status, ok := nc.nodeEvictionMap.getStatus(node.Name)
if ok && status == evicted {
// Node eviction already happened for this node.
// Handling immediate pod deletion.
_, err := controllerutil.DeletePods(ctx, nc.kubeClient, pods, nc.recorder, node.Name, string(node.UID), nc.daemonSetStore)
if err != nil {
return false, fmt.Errorf("unable to delete pods from node %q: %v", node.Name, err)
}
return false, nil
}
logger := klog.FromContext(ctx)
if !nc.nodeEvictionMap.setStatus(node.Name, toBeEvicted) {
logger.V(2).Info("Node was unregistered in the meantime - skipping setting status", "node", klog.KObj(node))
}
nc.evictorLock.Lock()
defer nc.evictorLock.Unlock()
return nc.zonePodEvictor[nodetopology.GetZoneKey(node)].Add(node.Name, string(node.UID)), nil
}
func (nc *Controller) markNodeForTainting(node *v1.Node, status v1.ConditionStatus) bool { func (nc *Controller) markNodeForTainting(node *v1.Node, status v1.ConditionStatus) bool {
nc.evictorLock.Lock() nc.evictorLock.Lock()
defer nc.evictorLock.Unlock() defer nc.evictorLock.Unlock()

View File

@ -95,11 +95,10 @@ func (nc *nodeLifecycleController) doEviction(logger klog.Logger, fakeNodeHandle
defer nc.evictorLock.Unlock() defer nc.evictorLock.Unlock()
zones := testutil.GetZones(fakeNodeHandler) zones := testutil.GetZones(fakeNodeHandler)
for _, zone := range zones { for _, zone := range zones {
nc.zonePodEvictor[zone].Try(logger, func(value scheduler.TimedValue) (bool, time.Duration) { nc.zoneNoExecuteTainter[zone].Try(logger, func(value scheduler.TimedValue) (bool, time.Duration) {
uid, _ := value.UID.(string) uid, _ := value.UID.(string)
pods, _ := nc.getPodsAssignedToNode(value.Value) pods, _ := nc.getPodsAssignedToNode(value.Value)
controllerutil.DeletePods(context.TODO(), fakeNodeHandler, pods, nc.recorder, value.Value, uid, nc.daemonSetStore) controllerutil.DeletePods(context.TODO(), fakeNodeHandler, pods, nc.recorder, value.Value, uid, nc.daemonSetStore)
_ = nc.nodeEvictionMap.setStatus(value.Value, evicted)
return true, 0 return true, 0
}) })
} }
@ -157,7 +156,6 @@ func newNodeLifecycleControllerFromClient(
nodeMonitorGracePeriod time.Duration, nodeMonitorGracePeriod time.Duration,
nodeStartupGracePeriod time.Duration, nodeStartupGracePeriod time.Duration,
nodeMonitorPeriod time.Duration, nodeMonitorPeriod time.Duration,
useTaints bool,
) (*nodeLifecycleController, error) { ) (*nodeLifecycleController, error) {
factory := informers.NewSharedInformerFactory(kubeClient, controller.NoResyncPeriodFunc()) factory := informers.NewSharedInformerFactory(kubeClient, controller.NoResyncPeriodFunc())
@ -181,7 +179,6 @@ func newNodeLifecycleControllerFromClient(
secondaryEvictionLimiterQPS, secondaryEvictionLimiterQPS,
largeClusterThreshold, largeClusterThreshold,
unhealthyZoneThreshold, unhealthyZoneThreshold,
useTaints,
) )
if err != nil { if err != nil {
return nil, err return nil, err
@ -378,7 +375,7 @@ func TestMonitorNodeHealthEvictPods(t *testing.T) {
}, },
}, },
secondNodeNewStatus: healthyNodeNewStatus, secondNodeNewStatus: healthyNodeNewStatus,
expectedEvictPods: false, expectedEvictPods: true,
description: "Node created long time ago, and kubelet posted NotReady for a short period of time.", description: "Node created long time ago, and kubelet posted NotReady for a short period of time.",
}, },
// Pod is ds-managed, and kubelet posted NotReady for a long period of time. // Pod is ds-managed, and kubelet posted NotReady for a long period of time.
@ -609,7 +606,7 @@ func TestMonitorNodeHealthEvictPods(t *testing.T) {
}, },
}, },
secondNodeNewStatus: healthyNodeNewStatus, secondNodeNewStatus: healthyNodeNewStatus,
expectedEvictPods: false, expectedEvictPods: true,
description: "Node created long time ago, node controller posted Unknown for a short period of time.", description: "Node created long time ago, node controller posted Unknown for a short period of time.",
}, },
// Node created long time ago, node controller posted Unknown for a long period of time. // Node created long time ago, node controller posted Unknown for a long period of time.
@ -694,7 +691,7 @@ func TestMonitorNodeHealthEvictPods(t *testing.T) {
testNodeMonitorGracePeriod, testNodeMonitorGracePeriod,
testNodeStartupGracePeriod, testNodeStartupGracePeriod,
testNodeMonitorPeriod, testNodeMonitorPeriod,
false) )
nodeController.now = func() metav1.Time { return fakeNow } nodeController.now = func() metav1.Time { return fakeNow }
nodeController.recorder = testutil.NewFakeRecorder() nodeController.recorder = testutil.NewFakeRecorder()
nodeController.getPodsAssignedToNode = fakeGetPodsAssignedToNode(item.fakeNodeHandler.Clientset) nodeController.getPodsAssignedToNode = fakeGetPodsAssignedToNode(item.fakeNodeHandler.Clientset)
@ -725,8 +722,8 @@ func TestMonitorNodeHealthEvictPods(t *testing.T) {
zones := testutil.GetZones(item.fakeNodeHandler) zones := testutil.GetZones(item.fakeNodeHandler)
logger, _ := ktesting.NewTestContext(t) logger, _ := ktesting.NewTestContext(t)
for _, zone := range zones { for _, zone := range zones {
if _, ok := nodeController.zonePodEvictor[zone]; ok { if _, ok := nodeController.zoneNoExecuteTainter[zone]; ok {
nodeController.zonePodEvictor[zone].Try(logger, func(value scheduler.TimedValue) (bool, time.Duration) { nodeController.zoneNoExecuteTainter[zone].Try(logger, func(value scheduler.TimedValue) (bool, time.Duration) {
nodeUID, _ := value.UID.(string) nodeUID, _ := value.UID.(string)
pods, err := nodeController.getPodsAssignedToNode(value.Value) pods, err := nodeController.getPodsAssignedToNode(value.Value)
if err != nil { if err != nil {
@ -864,7 +861,7 @@ func TestPodStatusChange(t *testing.T) {
testNodeMonitorGracePeriod, testNodeMonitorGracePeriod,
testNodeStartupGracePeriod, testNodeStartupGracePeriod,
testNodeMonitorPeriod, testNodeMonitorPeriod,
false) )
nodeController.now = func() metav1.Time { return fakeNow } nodeController.now = func() metav1.Time { return fakeNow }
nodeController.recorder = testutil.NewFakeRecorder() nodeController.recorder = testutil.NewFakeRecorder()
nodeController.getPodsAssignedToNode = fakeGetPodsAssignedToNode(item.fakeNodeHandler.Clientset) nodeController.getPodsAssignedToNode = fakeGetPodsAssignedToNode(item.fakeNodeHandler.Clientset)
@ -888,7 +885,7 @@ func TestPodStatusChange(t *testing.T) {
zones := testutil.GetZones(item.fakeNodeHandler) zones := testutil.GetZones(item.fakeNodeHandler)
logger, _ := ktesting.NewTestContext(t) logger, _ := ktesting.NewTestContext(t)
for _, zone := range zones { for _, zone := range zones {
nodeController.zonePodEvictor[zone].Try(logger, func(value scheduler.TimedValue) (bool, time.Duration) { nodeController.zoneNoExecuteTainter[zone].Try(logger, func(value scheduler.TimedValue) (bool, time.Duration) {
nodeUID, _ := value.UID.(string) nodeUID, _ := value.UID.(string)
pods, err := nodeController.getPodsAssignedToNode(value.Value) pods, err := nodeController.getPodsAssignedToNode(value.Value)
if err != nil { if err != nil {
@ -1427,7 +1424,7 @@ func TestMonitorNodeHealthEvictPodsWithDisruption(t *testing.T) {
testNodeMonitorGracePeriod, testNodeMonitorGracePeriod,
testNodeStartupGracePeriod, testNodeStartupGracePeriod,
testNodeMonitorPeriod, testNodeMonitorPeriod,
false) )
nodeController.now = func() metav1.Time { return fakeNow } nodeController.now = func() metav1.Time { return fakeNow }
nodeController.recorder = testutil.NewFakeRecorder() nodeController.recorder = testutil.NewFakeRecorder()
nodeController.getPodsAssignedToNode = fakeGetPodsAssignedToNode(fakeNodeHandler.Clientset) nodeController.getPodsAssignedToNode = fakeGetPodsAssignedToNode(fakeNodeHandler.Clientset)
@ -1715,7 +1712,7 @@ func TestMonitorNodeHealthUpdateStatus(t *testing.T) {
testNodeMonitorGracePeriod, testNodeMonitorGracePeriod,
testNodeStartupGracePeriod, testNodeStartupGracePeriod,
testNodeMonitorPeriod, testNodeMonitorPeriod,
false) )
nodeController.now = func() metav1.Time { return fakeNow } nodeController.now = func() metav1.Time { return fakeNow }
nodeController.recorder = testutil.NewFakeRecorder() nodeController.recorder = testutil.NewFakeRecorder()
nodeController.getPodsAssignedToNode = fakeGetPodsAssignedToNode(item.fakeNodeHandler.Clientset) nodeController.getPodsAssignedToNode = fakeGetPodsAssignedToNode(item.fakeNodeHandler.Clientset)
@ -2259,7 +2256,7 @@ func TestMonitorNodeHealthUpdateNodeAndPodStatusWithLease(t *testing.T) {
testNodeMonitorGracePeriod, testNodeMonitorGracePeriod,
testNodeStartupGracePeriod, testNodeStartupGracePeriod,
testNodeMonitorPeriod, testNodeMonitorPeriod,
false) )
nodeController.now = func() metav1.Time { return fakeNow } nodeController.now = func() metav1.Time { return fakeNow }
nodeController.recorder = testutil.NewFakeRecorder() nodeController.recorder = testutil.NewFakeRecorder()
nodeController.getPodsAssignedToNode = fakeGetPodsAssignedToNode(item.fakeNodeHandler.Clientset) nodeController.getPodsAssignedToNode = fakeGetPodsAssignedToNode(item.fakeNodeHandler.Clientset)
@ -2424,7 +2421,7 @@ func TestMonitorNodeHealthMarkPodsNotReady(t *testing.T) {
testNodeMonitorGracePeriod, testNodeMonitorGracePeriod,
testNodeStartupGracePeriod, testNodeStartupGracePeriod,
testNodeMonitorPeriod, testNodeMonitorPeriod,
false) )
nodeController.now = func() metav1.Time { return fakeNow } nodeController.now = func() metav1.Time { return fakeNow }
nodeController.recorder = testutil.NewFakeRecorder() nodeController.recorder = testutil.NewFakeRecorder()
nodeController.getPodsAssignedToNode = fakeGetPodsAssignedToNode(item.fakeNodeHandler.Clientset) nodeController.getPodsAssignedToNode = fakeGetPodsAssignedToNode(item.fakeNodeHandler.Clientset)
@ -2525,8 +2522,7 @@ func TestMonitorNodeHealthMarkPodsNotReadyWithWorkerSize(t *testing.T) {
testUnhealthyThreshold, testUnhealthyThreshold,
testNodeMonitorGracePeriod, testNodeMonitorGracePeriod,
testNodeStartupGracePeriod, testNodeStartupGracePeriod,
testNodeMonitorPeriod, testNodeMonitorPeriod)
false)
nodeController.now = func() metav1.Time { return fakeNow } nodeController.now = func() metav1.Time { return fakeNow }
nodeController.recorder = testutil.NewFakeRecorder() nodeController.recorder = testutil.NewFakeRecorder()
nodeController.getPodsAssignedToNode = fakeGetPodsAssignedToNode(fakeNodeHandler.Clientset) nodeController.getPodsAssignedToNode = fakeGetPodsAssignedToNode(fakeNodeHandler.Clientset)
@ -2730,7 +2726,7 @@ func TestMonitorNodeHealthMarkPodsNotReadyRetry(t *testing.T) {
testNodeMonitorGracePeriod, testNodeMonitorGracePeriod,
testNodeStartupGracePeriod, testNodeStartupGracePeriod,
testNodeMonitorPeriod, testNodeMonitorPeriod,
false) )
if item.updateReactor != nil { if item.updateReactor != nil {
item.fakeNodeHandler.Clientset.PrependReactor("update", "pods", item.updateReactor) item.fakeNodeHandler.Clientset.PrependReactor("update", "pods", item.updateReactor)
} }
@ -2869,7 +2865,7 @@ func TestApplyNoExecuteTaints(t *testing.T) {
testNodeMonitorGracePeriod, testNodeMonitorGracePeriod,
testNodeStartupGracePeriod, testNodeStartupGracePeriod,
testNodeMonitorPeriod, testNodeMonitorPeriod,
true) )
nodeController.now = func() metav1.Time { return fakeNow } nodeController.now = func() metav1.Time { return fakeNow }
nodeController.recorder = testutil.NewFakeRecorder() nodeController.recorder = testutil.NewFakeRecorder()
nodeController.getPodsAssignedToNode = fakeGetPodsAssignedToNode(fakeNodeHandler.Clientset) nodeController.getPodsAssignedToNode = fakeGetPodsAssignedToNode(fakeNodeHandler.Clientset)
@ -3024,7 +3020,7 @@ func TestApplyNoExecuteTaintsToNodesEnqueueTwice(t *testing.T) {
testNodeMonitorGracePeriod, testNodeMonitorGracePeriod,
testNodeStartupGracePeriod, testNodeStartupGracePeriod,
testNodeMonitorPeriod, testNodeMonitorPeriod,
true) )
nodeController.now = func() metav1.Time { return fakeNow } nodeController.now = func() metav1.Time { return fakeNow }
nodeController.recorder = testutil.NewFakeRecorder() nodeController.recorder = testutil.NewFakeRecorder()
nodeController.getPodsAssignedToNode = fakeGetPodsAssignedToNode(fakeNodeHandler.Clientset) nodeController.getPodsAssignedToNode = fakeGetPodsAssignedToNode(fakeNodeHandler.Clientset)
@ -3249,7 +3245,7 @@ func TestSwapUnreachableNotReadyTaints(t *testing.T) {
testNodeMonitorGracePeriod, testNodeMonitorGracePeriod,
testNodeStartupGracePeriod, testNodeStartupGracePeriod,
testNodeMonitorPeriod, testNodeMonitorPeriod,
true) )
nodeController.now = func() metav1.Time { return fakeNow } nodeController.now = func() metav1.Time { return fakeNow }
nodeController.recorder = testutil.NewFakeRecorder() nodeController.recorder = testutil.NewFakeRecorder()
nodeController.getPodsAssignedToNode = fakeGetPodsAssignedToNode(fakeNodeHandler.Clientset) nodeController.getPodsAssignedToNode = fakeGetPodsAssignedToNode(fakeNodeHandler.Clientset)
@ -3354,7 +3350,7 @@ func TestTaintsNodeByCondition(t *testing.T) {
testNodeMonitorGracePeriod, testNodeMonitorGracePeriod,
testNodeStartupGracePeriod, testNodeStartupGracePeriod,
testNodeMonitorPeriod, testNodeMonitorPeriod,
true) )
nodeController.now = func() metav1.Time { return fakeNow } nodeController.now = func() metav1.Time { return fakeNow }
nodeController.recorder = testutil.NewFakeRecorder() nodeController.recorder = testutil.NewFakeRecorder()
nodeController.getPodsAssignedToNode = fakeGetPodsAssignedToNode(fakeNodeHandler.Clientset) nodeController.getPodsAssignedToNode = fakeGetPodsAssignedToNode(fakeNodeHandler.Clientset)
@ -3557,7 +3553,7 @@ func TestNodeEventGeneration(t *testing.T) {
testNodeMonitorGracePeriod, testNodeMonitorGracePeriod,
testNodeStartupGracePeriod, testNodeStartupGracePeriod,
testNodeMonitorPeriod, testNodeMonitorPeriod,
false) )
nodeController.now = func() metav1.Time { return fakeNow } nodeController.now = func() metav1.Time { return fakeNow }
fakeRecorder := testutil.NewFakeRecorder() fakeRecorder := testutil.NewFakeRecorder()
nodeController.recorder = fakeRecorder nodeController.recorder = fakeRecorder
@ -3631,7 +3627,7 @@ func TestReconcileNodeLabels(t *testing.T) {
testNodeMonitorGracePeriod, testNodeMonitorGracePeriod,
testNodeStartupGracePeriod, testNodeStartupGracePeriod,
testNodeMonitorPeriod, testNodeMonitorPeriod,
true) )
nodeController.now = func() metav1.Time { return fakeNow } nodeController.now = func() metav1.Time { return fakeNow }
nodeController.recorder = testutil.NewFakeRecorder() nodeController.recorder = testutil.NewFakeRecorder()
nodeController.getPodsAssignedToNode = fakeGetPodsAssignedToNode(fakeNodeHandler.Clientset) nodeController.getPodsAssignedToNode = fakeGetPodsAssignedToNode(fakeNodeHandler.Clientset)
@ -3775,7 +3771,7 @@ func TestTryUpdateNodeHealth(t *testing.T) {
testNodeMonitorGracePeriod, testNodeMonitorGracePeriod,
testNodeStartupGracePeriod, testNodeStartupGracePeriod,
testNodeMonitorPeriod, testNodeMonitorPeriod,
true) )
nodeController.now = func() metav1.Time { return fakeNow } nodeController.now = func() metav1.Time { return fakeNow }
nodeController.recorder = testutil.NewFakeRecorder() nodeController.recorder = testutil.NewFakeRecorder()
nodeController.getPodsAssignedToNode = fakeGetPodsAssignedToNode(fakeNodeHandler.Clientset) nodeController.getPodsAssignedToNode = fakeGetPodsAssignedToNode(fakeNodeHandler.Clientset)

View File

@ -52473,13 +52473,6 @@ func schema_k8sio_kube_controller_manager_config_v1alpha1_NodeLifecycleControlle
Description: "NodeLifecycleControllerConfiguration contains elements describing NodeLifecycleController.", Description: "NodeLifecycleControllerConfiguration contains elements describing NodeLifecycleController.",
Type: []string{"object"}, Type: []string{"object"},
Properties: map[string]spec.Schema{ Properties: map[string]spec.Schema{
"EnableTaintManager": {
SchemaProps: spec.SchemaProps{
Description: "If set to true enables NoExecute Taints and will evict all not-tolerating Pod running on Nodes tainted with this kind of Taints.",
Type: []string{"boolean"},
Format: "",
},
},
"NodeEvictionRate": { "NodeEvictionRate": {
SchemaProps: spec.SchemaProps{ SchemaProps: spec.SchemaProps{
Description: "nodeEvictionRate is the number of nodes per second on which pods are deleted in case of node failure when a zone is healthy", Description: "nodeEvictionRate is the number of nodes per second on which pods are deleted in case of node failure when a zone is healthy",
@ -52534,7 +52527,7 @@ func schema_k8sio_kube_controller_manager_config_v1alpha1_NodeLifecycleControlle
}, },
}, },
}, },
Required: []string{"EnableTaintManager", "NodeEvictionRate", "SecondaryNodeEvictionRate", "NodeStartupGracePeriod", "NodeMonitorGracePeriod", "PodEvictionTimeout", "LargeClusterSizeThreshold", "UnhealthyZoneThreshold"}, Required: []string{"NodeEvictionRate", "SecondaryNodeEvictionRate", "NodeStartupGracePeriod", "NodeMonitorGracePeriod", "PodEvictionTimeout", "LargeClusterSizeThreshold", "UnhealthyZoneThreshold"},
}, },
}, },
Dependencies: []string{ Dependencies: []string{

View File

@ -383,9 +383,6 @@ type NodeIPAMControllerConfiguration struct {
// NodeLifecycleControllerConfiguration contains elements describing NodeLifecycleController. // NodeLifecycleControllerConfiguration contains elements describing NodeLifecycleController.
type NodeLifecycleControllerConfiguration struct { type NodeLifecycleControllerConfiguration struct {
// If set to true enables NoExecute Taints and will evict all not-tolerating
// Pod running on Nodes tainted with this kind of Taints.
EnableTaintManager *bool
// nodeEvictionRate is the number of nodes per second on which pods are deleted in case of node failure when a zone is healthy // nodeEvictionRate is the number of nodes per second on which pods are deleted in case of node failure when a zone is healthy
NodeEvictionRate float32 NodeEvictionRate float32
// secondaryNodeEvictionRate is the number of nodes per second on which pods are deleted in case of node failure when a zone is unhealthy // secondaryNodeEvictionRate is the number of nodes per second on which pods are deleted in case of node failure when a zone is unhealthy

View File

@ -312,7 +312,7 @@ func (in *KubeControllerManagerConfiguration) DeepCopyInto(out *KubeControllerMa
out.CronJobController = in.CronJobController out.CronJobController = in.CronJobController
out.NamespaceController = in.NamespaceController out.NamespaceController = in.NamespaceController
out.NodeIPAMController = in.NodeIPAMController out.NodeIPAMController = in.NodeIPAMController
in.NodeLifecycleController.DeepCopyInto(&out.NodeLifecycleController) out.NodeLifecycleController = in.NodeLifecycleController
in.PersistentVolumeBinderController.DeepCopyInto(&out.PersistentVolumeBinderController) in.PersistentVolumeBinderController.DeepCopyInto(&out.PersistentVolumeBinderController)
out.PodGCController = in.PodGCController out.PodGCController = in.PodGCController
out.ReplicaSetController = in.ReplicaSetController out.ReplicaSetController = in.ReplicaSetController
@ -378,11 +378,6 @@ func (in *NodeIPAMControllerConfiguration) DeepCopy() *NodeIPAMControllerConfigu
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *NodeLifecycleControllerConfiguration) DeepCopyInto(out *NodeLifecycleControllerConfiguration) { func (in *NodeLifecycleControllerConfiguration) DeepCopyInto(out *NodeLifecycleControllerConfiguration) {
*out = *in *out = *in
if in.EnableTaintManager != nil {
in, out := &in.EnableTaintManager, &out.EnableTaintManager
*out = new(bool)
**out = **in
}
out.NodeStartupGracePeriod = in.NodeStartupGracePeriod out.NodeStartupGracePeriod = in.NodeStartupGracePeriod
out.NodeMonitorGracePeriod = in.NodeMonitorGracePeriod out.NodeMonitorGracePeriod = in.NodeMonitorGracePeriod
out.PodEvictionTimeout = in.PodEvictionTimeout out.PodEvictionTimeout = in.PodEvictionTimeout

View File

@ -131,7 +131,6 @@ func TestEvictionForNoExecuteTaintAddedByUser(t *testing.T) {
100, // Secondary eviction limiter QPS 100, // Secondary eviction limiter QPS
50, // Large cluster threshold 50, // Large cluster threshold
0.55, // Unhealthy zone threshold 0.55, // Unhealthy zone threshold
true, // Run taint manager
) )
if err != nil { if err != nil {
t.Fatalf("Failed to create node controller: %v", err) t.Fatalf("Failed to create node controller: %v", err)
@ -284,7 +283,6 @@ func TestTaintBasedEvictions(t *testing.T) {
100, // Secondary eviction limiter QPS 100, // Secondary eviction limiter QPS
50, // Large cluster threshold 50, // Large cluster threshold
0.55, // Unhealthy zone threshold 0.55, // Unhealthy zone threshold
true, // Run taint manager
) )
if err != nil { if err != nil {
t.Fatalf("Failed to create node controller: %v", err) t.Fatalf("Failed to create node controller: %v", err)

View File

@ -100,7 +100,6 @@ func TestTaintNodeByCondition(t *testing.T) {
100, // Secondary eviction limiter QPS 100, // Secondary eviction limiter QPS
100, // Large cluster threshold 100, // Large cluster threshold
100, // Unhealthy zone threshold 100, // Unhealthy zone threshold
true, // Run taint manager
) )
if err != nil { if err != nil {
t.Errorf("Failed to create node controller: %v", err) t.Errorf("Failed to create node controller: %v", err)