mirror of
https://github.com/k3s-io/kubernetes.git
synced 2025-07-31 07:20:13 +00:00
refactor maybeDeleteTerminatingPod
This commit is contained in:
parent
10211f4df2
commit
6d77f53af4
@ -139,7 +139,7 @@ func forcefullyDeleteNode(kubeClient clientset.Interface, nodeName string, force
|
||||
|
||||
// maybeDeleteTerminatingPod non-gracefully deletes pods that are terminating
|
||||
// that should not be gracefully terminated.
|
||||
func (nc *NodeController) maybeDeleteTerminatingPod(obj interface{}, nodeStore cache.Store, forcefulDeletePodFunc func(*api.Pod) error) {
|
||||
func (nc *NodeController) maybeDeleteTerminatingPod(obj interface{}) {
|
||||
pod, ok := obj.(*api.Pod)
|
||||
if !ok {
|
||||
return
|
||||
@ -152,11 +152,11 @@ func (nc *NodeController) maybeDeleteTerminatingPod(obj interface{}, nodeStore c
|
||||
|
||||
// delete terminating pods that have not yet been scheduled
|
||||
if len(pod.Spec.NodeName) == 0 {
|
||||
utilruntime.HandleError(forcefulDeletePodFunc(pod))
|
||||
utilruntime.HandleError(nc.forcefullyDeletePod(pod))
|
||||
return
|
||||
}
|
||||
|
||||
nodeObj, found, err := nodeStore.GetByKey(pod.Spec.NodeName)
|
||||
nodeObj, found, err := nc.nodeStore.Store.GetByKey(pod.Spec.NodeName)
|
||||
if err != nil {
|
||||
// this can only happen if the Store.KeyFunc has a problem creating
|
||||
// a key for the pod. If it happens once, it will happen again so
|
||||
@ -169,7 +169,7 @@ func (nc *NodeController) maybeDeleteTerminatingPod(obj interface{}, nodeStore c
|
||||
// nonexistent nodes
|
||||
if !found {
|
||||
glog.Warningf("Unable to find Node: %v, deleting all assigned Pods.", pod.Spec.NodeName)
|
||||
utilruntime.HandleError(forcefulDeletePodFunc(pod))
|
||||
utilruntime.HandleError(nc.forcefullyDeletePod(pod))
|
||||
return
|
||||
}
|
||||
|
||||
@ -182,11 +182,11 @@ func (nc *NodeController) maybeDeleteTerminatingPod(obj interface{}, nodeStore c
|
||||
v, err := version.Parse(node.Status.NodeInfo.KubeletVersion)
|
||||
if err != nil {
|
||||
glog.V(0).Infof("couldn't parse verions %q of minion: %v", node.Status.NodeInfo.KubeletVersion, err)
|
||||
utilruntime.HandleError(forcefulDeletePodFunc(pod))
|
||||
utilruntime.HandleError(nc.forcefullyDeletePod(pod))
|
||||
return
|
||||
}
|
||||
if gracefulDeletionVersion.GT(v) {
|
||||
utilruntime.HandleError(forcefulDeletePodFunc(pod))
|
||||
utilruntime.HandleError(nc.forcefullyDeletePod(pod))
|
||||
return
|
||||
}
|
||||
}
|
||||
|
@ -222,10 +222,10 @@ func NewNodeController(
|
||||
controller.NoResyncPeriodFunc(),
|
||||
framework.ResourceEventHandlerFuncs{
|
||||
AddFunc: func(obj interface{}) {
|
||||
nc.maybeDeleteTerminatingPod(obj, nc.nodeStore.Store, nc.forcefullyDeletePod)
|
||||
nc.maybeDeleteTerminatingPod(obj)
|
||||
},
|
||||
UpdateFunc: func(_, obj interface{}) {
|
||||
nc.maybeDeleteTerminatingPod(obj, nc.nodeStore.Store, nc.forcefullyDeletePod)
|
||||
nc.maybeDeleteTerminatingPod(obj)
|
||||
},
|
||||
},
|
||||
// We don't need to build a index for podStore here actually, but build one for consistency.
|
||||
|
@ -1288,12 +1288,12 @@ func TestCheckPod(t *testing.T) {
|
||||
|
||||
for i, tc := range tcs {
|
||||
var deleteCalls int
|
||||
forcefullyDeletePodsFunc := func(_ *api.Pod) error {
|
||||
nc.forcefullyDeletePod = func(_ *api.Pod) error {
|
||||
deleteCalls++
|
||||
return nil
|
||||
}
|
||||
|
||||
nc.maybeDeleteTerminatingPod(&tc.pod, nc.nodeStore.Store, forcefullyDeletePodsFunc)
|
||||
nc.maybeDeleteTerminatingPod(&tc.pod)
|
||||
|
||||
if tc.prune && deleteCalls != 1 {
|
||||
t.Errorf("[%v] expected number of delete calls to be 1 but got %v", i, deleteCalls)
|
||||
|
Loading…
Reference in New Issue
Block a user