mirror of
https://github.com/k3s-io/kubernetes.git
synced 2025-07-22 03:11:40 +00:00
Merge pull request #52402 from yastij/nodecontroller-clean-backwards
Automatic merge from submit-queue. If you want to cherry-pick this change to another branch, please follow the instructions <a href="https://github.com/kubernetes/community/blob/master/contributors/devel/cherry-picks.md">here</a>. cleaning node controller from code for unsupported versions **What this PR does / why we need it**: **Which issue this PR fixes**: fixes #52356 **Special notes for your reviewer**: **Release note**: ```release-note None ```
This commit is contained in:
commit
7810eb8da0
@ -309,14 +309,12 @@ func NewNodeController(
|
||||
|
||||
podInformer.Informer().AddEventHandler(cache.ResourceEventHandlerFuncs{
|
||||
AddFunc: func(obj interface{}) {
|
||||
nc.maybeDeleteTerminatingPod(obj)
|
||||
pod := obj.(*v1.Pod)
|
||||
if nc.taintManager != nil {
|
||||
nc.taintManager.PodUpdated(nil, pod)
|
||||
}
|
||||
},
|
||||
UpdateFunc: func(prev, obj interface{}) {
|
||||
nc.maybeDeleteTerminatingPod(obj)
|
||||
prevPod := prev.(*v1.Pod)
|
||||
newPod := obj.(*v1.Pod)
|
||||
if nc.taintManager != nil {
|
||||
@ -1196,55 +1194,3 @@ func (nc *Controller) ComputeZoneState(nodeReadyConditions []*v1.NodeCondition)
|
||||
return notReadyNodes, stateNormal
|
||||
}
|
||||
}
|
||||
|
||||
// maybeDeleteTerminatingPod non-gracefully deletes pods that are terminating
|
||||
// that should not be gracefully terminated.
|
||||
func (nc *Controller) maybeDeleteTerminatingPod(obj interface{}) {
|
||||
pod, ok := obj.(*v1.Pod)
|
||||
if !ok {
|
||||
tombstone, ok := obj.(cache.DeletedFinalStateUnknown)
|
||||
if !ok {
|
||||
glog.Errorf("Couldn't get object from tombstone %#v", obj)
|
||||
return
|
||||
}
|
||||
pod, ok = tombstone.Obj.(*v1.Pod)
|
||||
if !ok {
|
||||
glog.Errorf("Tombstone contained object that is not a Pod %#v", obj)
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
// consider only terminating pods
|
||||
if pod.DeletionTimestamp == nil {
|
||||
return
|
||||
}
|
||||
|
||||
node, err := nc.nodeLister.Get(pod.Spec.NodeName)
|
||||
// if there is no such node, do nothing and let the podGC clean it up.
|
||||
if apierrors.IsNotFound(err) {
|
||||
return
|
||||
}
|
||||
if err != nil {
|
||||
// this can only happen if the Store.KeyFunc has a problem creating
|
||||
// a key for the pod. If it happens once, it will happen again so
|
||||
// don't bother requeuing the pod.
|
||||
utilruntime.HandleError(err)
|
||||
return
|
||||
}
|
||||
|
||||
// delete terminating pods that have been scheduled on
|
||||
// nodes that do not support graceful termination
|
||||
// TODO(mikedanese): this can be removed when we no longer
|
||||
// guarantee backwards compatibility of master API to kubelets with
|
||||
// versions less than 1.1.0
|
||||
v, err := utilversion.ParseSemantic(node.Status.NodeInfo.KubeletVersion)
|
||||
if err != nil {
|
||||
glog.V(0).Infof("Couldn't parse version %q of node: %v", node.Status.NodeInfo.KubeletVersion, err)
|
||||
utilruntime.HandleError(nc.forcefullyDeletePod(pod))
|
||||
return
|
||||
}
|
||||
if v.LessThan(gracefulDeletionVersion) {
|
||||
utilruntime.HandleError(nc.forcefullyDeletePod(pod))
|
||||
return
|
||||
}
|
||||
}
|
||||
|
@ -1726,9 +1726,6 @@ func TestMonitorNodeStatusMarkPodsNotReady(t *testing.T) {
|
||||
CreationTimestamp: metav1.Date(2012, 1, 1, 0, 0, 0, 0, time.UTC),
|
||||
},
|
||||
Status: v1.NodeStatus{
|
||||
NodeInfo: v1.NodeSystemInfo{
|
||||
KubeletVersion: "v1.2.0",
|
||||
},
|
||||
Conditions: []v1.NodeCondition{
|
||||
{
|
||||
Type: v1.NodeReady,
|
||||
@ -1752,9 +1749,6 @@ func TestMonitorNodeStatusMarkPodsNotReady(t *testing.T) {
|
||||
},
|
||||
timeToPass: 1 * time.Minute,
|
||||
newNodeStatus: v1.NodeStatus{
|
||||
NodeInfo: v1.NodeSystemInfo{
|
||||
KubeletVersion: "v1.2.0",
|
||||
},
|
||||
Conditions: []v1.NodeCondition{
|
||||
{
|
||||
Type: v1.NodeReady,
|
||||
|
@ -152,13 +152,6 @@ func ForcefullyDeleteNode(kubeClient clientset.Interface, nodeName string) error
|
||||
// MarkAllPodsNotReady updates ready status of all pods running on
|
||||
// given node from master return true if success
|
||||
func MarkAllPodsNotReady(kubeClient clientset.Interface, node *v1.Node) error {
|
||||
// Don't set pods to NotReady if the kubelet is running a version that
|
||||
// doesn't understand how to correct readiness.
|
||||
// TODO: Remove this check when we no longer guarantee backward compatibility
|
||||
// with node versions < 1.2.0.
|
||||
if NodeRunningOutdatedKubelet(node) {
|
||||
return nil
|
||||
}
|
||||
nodeName := node.Name
|
||||
glog.V(2).Infof("Update ready status of pods on node [%v]", nodeName)
|
||||
opts := metav1.ListOptions{FieldSelector: fields.OneTermEqualSelector(api.PodHostField, nodeName).String()}
|
||||
@ -193,23 +186,6 @@ func MarkAllPodsNotReady(kubeClient clientset.Interface, node *v1.Node) error {
|
||||
return fmt.Errorf("%v", strings.Join(errMsg, "; "))
|
||||
}
|
||||
|
||||
// NodeRunningOutdatedKubelet returns true if the kubeletVersion reported
|
||||
// in the nodeInfo of the given node is "outdated", meaning < 1.2.0.
|
||||
// Older versions were inflexible and modifying pod.Status directly through
|
||||
// the apiserver would result in unexpected outcomes.
|
||||
func NodeRunningOutdatedKubelet(node *v1.Node) bool {
|
||||
v, err := utilversion.ParseSemantic(node.Status.NodeInfo.KubeletVersion)
|
||||
if err != nil {
|
||||
glog.Errorf("couldn't parse version %q of node %v", node.Status.NodeInfo.KubeletVersion, err)
|
||||
return true
|
||||
}
|
||||
if v.LessThan(podStatusReconciliationVersion) {
|
||||
glog.Infof("Node %v running kubelet at (%v) which is less than the minimum version that allows nodecontroller to mark pods NotReady (%v).", node.Name, v, podStatusReconciliationVersion)
|
||||
return true
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
// NodeExistsInCloudProvider returns true if the node exists in the
|
||||
// cloud provider.
|
||||
func NodeExistsInCloudProvider(cloud cloudprovider.Interface, nodeName types.NodeName) (bool, error) {
|
||||
|
Loading…
Reference in New Issue
Block a user