mirror of
https://github.com/k3s-io/kubernetes.git
synced 2025-07-23 11:50:44 +00:00
cleaning node controller from code that ensures backward compatibility for unsupported versions
This commit is contained in:
parent
c4a1d928e7
commit
c446aa0e63
@ -309,14 +309,12 @@ func NewNodeController(
|
|||||||
|
|
||||||
podInformer.Informer().AddEventHandler(cache.ResourceEventHandlerFuncs{
|
podInformer.Informer().AddEventHandler(cache.ResourceEventHandlerFuncs{
|
||||||
AddFunc: func(obj interface{}) {
|
AddFunc: func(obj interface{}) {
|
||||||
nc.maybeDeleteTerminatingPod(obj)
|
|
||||||
pod := obj.(*v1.Pod)
|
pod := obj.(*v1.Pod)
|
||||||
if nc.taintManager != nil {
|
if nc.taintManager != nil {
|
||||||
nc.taintManager.PodUpdated(nil, pod)
|
nc.taintManager.PodUpdated(nil, pod)
|
||||||
}
|
}
|
||||||
},
|
},
|
||||||
UpdateFunc: func(prev, obj interface{}) {
|
UpdateFunc: func(prev, obj interface{}) {
|
||||||
nc.maybeDeleteTerminatingPod(obj)
|
|
||||||
prevPod := prev.(*v1.Pod)
|
prevPod := prev.(*v1.Pod)
|
||||||
newPod := obj.(*v1.Pod)
|
newPod := obj.(*v1.Pod)
|
||||||
if nc.taintManager != nil {
|
if nc.taintManager != nil {
|
||||||
@ -1196,55 +1194,3 @@ func (nc *Controller) ComputeZoneState(nodeReadyConditions []*v1.NodeCondition)
|
|||||||
return notReadyNodes, stateNormal
|
return notReadyNodes, stateNormal
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// maybeDeleteTerminatingPod non-gracefully deletes pods that are terminating
|
|
||||||
// that should not be gracefully terminated.
|
|
||||||
func (nc *Controller) maybeDeleteTerminatingPod(obj interface{}) {
|
|
||||||
pod, ok := obj.(*v1.Pod)
|
|
||||||
if !ok {
|
|
||||||
tombstone, ok := obj.(cache.DeletedFinalStateUnknown)
|
|
||||||
if !ok {
|
|
||||||
glog.Errorf("Couldn't get object from tombstone %#v", obj)
|
|
||||||
return
|
|
||||||
}
|
|
||||||
pod, ok = tombstone.Obj.(*v1.Pod)
|
|
||||||
if !ok {
|
|
||||||
glog.Errorf("Tombstone contained object that is not a Pod %#v", obj)
|
|
||||||
return
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// consider only terminating pods
|
|
||||||
if pod.DeletionTimestamp == nil {
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
node, err := nc.nodeLister.Get(pod.Spec.NodeName)
|
|
||||||
// if there is no such node, do nothing and let the podGC clean it up.
|
|
||||||
if apierrors.IsNotFound(err) {
|
|
||||||
return
|
|
||||||
}
|
|
||||||
if err != nil {
|
|
||||||
// this can only happen if the Store.KeyFunc has a problem creating
|
|
||||||
// a key for the pod. If it happens once, it will happen again so
|
|
||||||
// don't bother requeuing the pod.
|
|
||||||
utilruntime.HandleError(err)
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
// delete terminating pods that have been scheduled on
|
|
||||||
// nodes that do not support graceful termination
|
|
||||||
// TODO(mikedanese): this can be removed when we no longer
|
|
||||||
// guarantee backwards compatibility of master API to kubelets with
|
|
||||||
// versions less than 1.1.0
|
|
||||||
v, err := utilversion.ParseSemantic(node.Status.NodeInfo.KubeletVersion)
|
|
||||||
if err != nil {
|
|
||||||
glog.V(0).Infof("Couldn't parse version %q of node: %v", node.Status.NodeInfo.KubeletVersion, err)
|
|
||||||
utilruntime.HandleError(nc.forcefullyDeletePod(pod))
|
|
||||||
return
|
|
||||||
}
|
|
||||||
if v.LessThan(gracefulDeletionVersion) {
|
|
||||||
utilruntime.HandleError(nc.forcefullyDeletePod(pod))
|
|
||||||
return
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
@ -1726,9 +1726,6 @@ func TestMonitorNodeStatusMarkPodsNotReady(t *testing.T) {
|
|||||||
CreationTimestamp: metav1.Date(2012, 1, 1, 0, 0, 0, 0, time.UTC),
|
CreationTimestamp: metav1.Date(2012, 1, 1, 0, 0, 0, 0, time.UTC),
|
||||||
},
|
},
|
||||||
Status: v1.NodeStatus{
|
Status: v1.NodeStatus{
|
||||||
NodeInfo: v1.NodeSystemInfo{
|
|
||||||
KubeletVersion: "v1.2.0",
|
|
||||||
},
|
|
||||||
Conditions: []v1.NodeCondition{
|
Conditions: []v1.NodeCondition{
|
||||||
{
|
{
|
||||||
Type: v1.NodeReady,
|
Type: v1.NodeReady,
|
||||||
@ -1752,9 +1749,6 @@ func TestMonitorNodeStatusMarkPodsNotReady(t *testing.T) {
|
|||||||
},
|
},
|
||||||
timeToPass: 1 * time.Minute,
|
timeToPass: 1 * time.Minute,
|
||||||
newNodeStatus: v1.NodeStatus{
|
newNodeStatus: v1.NodeStatus{
|
||||||
NodeInfo: v1.NodeSystemInfo{
|
|
||||||
KubeletVersion: "v1.2.0",
|
|
||||||
},
|
|
||||||
Conditions: []v1.NodeCondition{
|
Conditions: []v1.NodeCondition{
|
||||||
{
|
{
|
||||||
Type: v1.NodeReady,
|
Type: v1.NodeReady,
|
||||||
|
@ -152,13 +152,6 @@ func ForcefullyDeleteNode(kubeClient clientset.Interface, nodeName string) error
|
|||||||
// MarkAllPodsNotReady updates ready status of all pods running on
|
// MarkAllPodsNotReady updates ready status of all pods running on
|
||||||
// given node from master return true if success
|
// given node from master return true if success
|
||||||
func MarkAllPodsNotReady(kubeClient clientset.Interface, node *v1.Node) error {
|
func MarkAllPodsNotReady(kubeClient clientset.Interface, node *v1.Node) error {
|
||||||
// Don't set pods to NotReady if the kubelet is running a version that
|
|
||||||
// doesn't understand how to correct readiness.
|
|
||||||
// TODO: Remove this check when we no longer guarantee backward compatibility
|
|
||||||
// with node versions < 1.2.0.
|
|
||||||
if NodeRunningOutdatedKubelet(node) {
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
nodeName := node.Name
|
nodeName := node.Name
|
||||||
glog.V(2).Infof("Update ready status of pods on node [%v]", nodeName)
|
glog.V(2).Infof("Update ready status of pods on node [%v]", nodeName)
|
||||||
opts := metav1.ListOptions{FieldSelector: fields.OneTermEqualSelector(api.PodHostField, nodeName).String()}
|
opts := metav1.ListOptions{FieldSelector: fields.OneTermEqualSelector(api.PodHostField, nodeName).String()}
|
||||||
@ -193,23 +186,6 @@ func MarkAllPodsNotReady(kubeClient clientset.Interface, node *v1.Node) error {
|
|||||||
return fmt.Errorf("%v", strings.Join(errMsg, "; "))
|
return fmt.Errorf("%v", strings.Join(errMsg, "; "))
|
||||||
}
|
}
|
||||||
|
|
||||||
// NodeRunningOutdatedKubelet returns true if the kubeletVersion reported
|
|
||||||
// in the nodeInfo of the given node is "outdated", meaning < 1.2.0.
|
|
||||||
// Older versions were inflexible and modifying pod.Status directly through
|
|
||||||
// the apiserver would result in unexpected outcomes.
|
|
||||||
func NodeRunningOutdatedKubelet(node *v1.Node) bool {
|
|
||||||
v, err := utilversion.ParseSemantic(node.Status.NodeInfo.KubeletVersion)
|
|
||||||
if err != nil {
|
|
||||||
glog.Errorf("couldn't parse version %q of node %v", node.Status.NodeInfo.KubeletVersion, err)
|
|
||||||
return true
|
|
||||||
}
|
|
||||||
if v.LessThan(podStatusReconciliationVersion) {
|
|
||||||
glog.Infof("Node %v running kubelet at (%v) which is less than the minimum version that allows nodecontroller to mark pods NotReady (%v).", node.Name, v, podStatusReconciliationVersion)
|
|
||||||
return true
|
|
||||||
}
|
|
||||||
return false
|
|
||||||
}
|
|
||||||
|
|
||||||
// NodeExistsInCloudProvider returns true if the node exists in the
|
// NodeExistsInCloudProvider returns true if the node exists in the
|
||||||
// cloud provider.
|
// cloud provider.
|
||||||
func NodeExistsInCloudProvider(cloud cloudprovider.Interface, nodeName types.NodeName) (bool, error) {
|
func NodeExistsInCloudProvider(cloud cloudprovider.Interface, nodeName types.NodeName) (bool, error) {
|
||||||
|
Loading…
Reference in New Issue
Block a user