mirror of
https://github.com/k3s-io/kubernetes.git
synced 2025-09-13 21:25:09 +00:00
Moving deletion behavior from the NC into PodGC
This should be a NOP because we're just moving functionality around and thanks to #35476, the podGC controller should always run anyway.
This commit is contained in:
@@ -135,12 +135,6 @@ func (nc *NodeController) maybeDeleteTerminatingPod(obj interface{}) {
|
||||
return
|
||||
}
|
||||
|
||||
// delete terminating pods that have not yet been scheduled
|
||||
if len(pod.Spec.NodeName) == 0 {
|
||||
utilruntime.HandleError(nc.forcefullyDeletePod(pod))
|
||||
return
|
||||
}
|
||||
|
||||
nodeObj, found, err := nc.nodeStore.Store.GetByKey(pod.Spec.NodeName)
|
||||
if err != nil {
|
||||
// this can only happen if the Store.KeyFunc has a problem creating
|
||||
@@ -150,11 +144,8 @@ func (nc *NodeController) maybeDeleteTerminatingPod(obj interface{}) {
|
||||
return
|
||||
}
|
||||
|
||||
// delete terminating pods that have been scheduled on
|
||||
// nonexistent nodes
|
||||
// if there is no such node, do nothing and let the podGC clean it up.
|
||||
if !found {
|
||||
glog.Warningf("Unable to find Node: %v, deleting all assigned Pods.", pod.Spec.NodeName)
|
||||
utilruntime.HandleError(nc.forcefullyDeletePod(pod))
|
||||
return
|
||||
}
|
||||
|
||||
|
@@ -1723,14 +1723,14 @@ func TestCheckPod(t *testing.T) {
|
||||
ObjectMeta: api.ObjectMeta{DeletionTimestamp: &unversioned.Time{}},
|
||||
Spec: api.PodSpec{NodeName: ""},
|
||||
},
|
||||
prune: true,
|
||||
prune: false,
|
||||
},
|
||||
{
|
||||
pod: api.Pod{
|
||||
ObjectMeta: api.ObjectMeta{DeletionTimestamp: &unversioned.Time{}},
|
||||
Spec: api.PodSpec{NodeName: "nonexistant"},
|
||||
},
|
||||
prune: true,
|
||||
prune: false,
|
||||
},
|
||||
}
|
||||
|
||||
|
@@ -125,6 +125,7 @@ func (gcc *PodGCController) gc() {
|
||||
gcc.gcTerminated(pods)
|
||||
}
|
||||
gcc.gcOrphaned(pods)
|
||||
gcc.gcUnscheduledTerminating(pods)
|
||||
}
|
||||
|
||||
func isPodTerminated(pod *api.Pod) bool {
|
||||
@@ -168,7 +169,7 @@ func (gcc *PodGCController) gcTerminated(pods []*api.Pod) {
|
||||
wait.Wait()
|
||||
}
|
||||
|
||||
// cleanupOrphanedPods deletes pods that are bound to nodes that don't exist.
|
||||
// gcOrphaned deletes pods that are bound to nodes that don't exist.
|
||||
func (gcc *PodGCController) gcOrphaned(pods []*api.Pod) {
|
||||
glog.V(4).Infof("GC'ing orphaned")
|
||||
|
||||
@@ -183,7 +184,29 @@ func (gcc *PodGCController) gcOrphaned(pods []*api.Pod) {
|
||||
if err := gcc.deletePod(pod.Namespace, pod.Name); err != nil {
|
||||
utilruntime.HandleError(err)
|
||||
} else {
|
||||
glog.V(4).Infof("Forced deletion of oprhaned Pod %s succeeded", pod.Name)
|
||||
glog.V(4).Infof("Forced deletion of orphaned Pod %s succeeded", pod.Name)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// gcUnscheduledTerminating deletes pods that are terminating and haven't been scheduled to a particular node.
|
||||
func (gcc *PodGCController) gcUnscheduledTerminating(pods []*api.Pod) {
|
||||
glog.V(4).Infof("GC'ing unscheduled pods which are terminating.")
|
||||
|
||||
for _, pod := range pods {
|
||||
if pod.DeletionTimestamp == nil {
|
||||
continue
|
||||
}
|
||||
|
||||
if len(pod.Spec.NodeName) > 0 {
|
||||
continue
|
||||
}
|
||||
|
||||
glog.V(2).Infof("Found unscheduled terminating Pod %v not assigned to any Node. Deleting.", pod.Name)
|
||||
if err := gcc.deletePod(pod.Namespace, pod.Name); err != nil {
|
||||
utilruntime.HandleError(err)
|
||||
} else {
|
||||
glog.V(4).Infof("Forced deletion of unscheduled terminating Pod %s succeeded", pod.Name)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
Reference in New Issue
Block a user