mirror of
https://github.com/k3s-io/kubernetes.git
synced 2025-07-29 22:46:12 +00:00
Merge pull request #10651 from yujuhong/orphaned_poddirs
Kubelet: do not remove pod directory if any container is still running
This commit is contained in:
commit
35d05093f7
@ -1334,18 +1334,24 @@ func getDesiredVolumes(pods []*api.Pod) map[string]api.Volume {
|
|||||||
return desiredVolumes
|
return desiredVolumes
|
||||||
}
|
}
|
||||||
|
|
||||||
func (kl *Kubelet) cleanupOrphanedPodDirs(pods []*api.Pod) error {
|
// cleanupOrphanedPodDirs removes a pod directory if the pod is not in the
|
||||||
desired := util.NewStringSet()
|
// desired set of pods and there is no running containers in the pod.
|
||||||
|
func (kl *Kubelet) cleanupOrphanedPodDirs(pods []*api.Pod, runningPods []*kubecontainer.Pod) error {
|
||||||
|
active := util.NewStringSet()
|
||||||
for _, pod := range pods {
|
for _, pod := range pods {
|
||||||
desired.Insert(string(pod.UID))
|
active.Insert(string(pod.UID))
|
||||||
}
|
}
|
||||||
|
for _, pod := range runningPods {
|
||||||
|
active.Insert(string(pod.ID))
|
||||||
|
}
|
||||||
|
|
||||||
found, err := kl.listPodsFromDisk()
|
found, err := kl.listPodsFromDisk()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
errlist := []error{}
|
errlist := []error{}
|
||||||
for i := range found {
|
for i := range found {
|
||||||
if !desired.Has(string(found[i])) {
|
if !active.Has(string(found[i])) {
|
||||||
glog.V(3).Infof("Orphaned pod %q found, removing", found[i])
|
glog.V(3).Infof("Orphaned pod %q found, removing", found[i])
|
||||||
if err := os.RemoveAll(kl.getPodDir(found[i])); err != nil {
|
if err := os.RemoveAll(kl.getPodDir(found[i])); err != nil {
|
||||||
errlist = append(errlist, err)
|
errlist = append(errlist, err)
|
||||||
@ -1574,7 +1580,7 @@ func (kl *Kubelet) HandlePodCleanups() error {
|
|||||||
// Note that we pass all pods (including terminated pods) to the function,
|
// Note that we pass all pods (including terminated pods) to the function,
|
||||||
// so that we don't remove directories associated with terminated but not yet
|
// so that we don't remove directories associated with terminated but not yet
|
||||||
// deleted pods.
|
// deleted pods.
|
||||||
err = kl.cleanupOrphanedPodDirs(allPods)
|
err = kl.cleanupOrphanedPodDirs(allPods, runningPods)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
glog.Errorf("Failed cleaning up orphaned pod directories: %v", err)
|
glog.Errorf("Failed cleaning up orphaned pod directories: %v", err)
|
||||||
return err
|
return err
|
||||||
|
@ -3243,6 +3243,22 @@ func TestDeletePodDirsForDeletedPods(t *testing.T) {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func syncAndVerifyPodDir(t *testing.T, testKubelet *TestKubelet, pods []*api.Pod, podsToCheck []*api.Pod, shouldExist bool) {
|
||||||
|
kl := testKubelet.kubelet
|
||||||
|
|
||||||
|
kl.podManager.SetPods(pods)
|
||||||
|
kl.HandlePodSyncs(pods)
|
||||||
|
kl.HandlePodCleanups()
|
||||||
|
for i, pod := range podsToCheck {
|
||||||
|
exist := dirExists(kl.getPodDir(pod.UID))
|
||||||
|
if shouldExist && !exist {
|
||||||
|
t.Errorf("expected directory to exist for pod %d", i)
|
||||||
|
} else if !shouldExist && exist {
|
||||||
|
t.Errorf("expected directory to be removed for pod %d", i)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
func TestDoesNotDeletePodDirsForTerminatedPods(t *testing.T) {
|
func TestDoesNotDeletePodDirsForTerminatedPods(t *testing.T) {
|
||||||
testKubelet := newTestKubelet(t)
|
testKubelet := newTestKubelet(t)
|
||||||
testKubelet.fakeCadvisor.On("MachineInfo").Return(&cadvisorApi.MachineInfo{}, nil)
|
testKubelet.fakeCadvisor.On("MachineInfo").Return(&cadvisorApi.MachineInfo{}, nil)
|
||||||
@ -3273,22 +3289,45 @@ func TestDoesNotDeletePodDirsForTerminatedPods(t *testing.T) {
|
|||||||
},
|
},
|
||||||
}
|
}
|
||||||
|
|
||||||
kl.podManager.SetPods(pods)
|
syncAndVerifyPodDir(t, testKubelet, pods, pods, true)
|
||||||
// Sync to create pod directories.
|
|
||||||
kl.HandlePodSyncs(pods)
|
|
||||||
for i := range pods {
|
|
||||||
if !dirExists(kl.getPodDir(pods[i].UID)) {
|
|
||||||
t.Errorf("expected directory to exist for pod %d", i)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
// Pod 1 failed, and pod 2 succeeded. None of the pod directories should be
|
// Pod 1 failed, and pod 2 succeeded. None of the pod directories should be
|
||||||
// deleted.
|
// deleted.
|
||||||
kl.statusManager.SetPodStatus(pods[1], api.PodStatus{Phase: api.PodFailed})
|
kl.statusManager.SetPodStatus(pods[1], api.PodStatus{Phase: api.PodFailed})
|
||||||
kl.statusManager.SetPodStatus(pods[2], api.PodStatus{Phase: api.PodSucceeded})
|
kl.statusManager.SetPodStatus(pods[2], api.PodStatus{Phase: api.PodSucceeded})
|
||||||
kl.HandlePodCleanups()
|
syncAndVerifyPodDir(t, testKubelet, pods, pods, true)
|
||||||
for i := range pods {
|
}
|
||||||
if !dirExists(kl.getPodDir(pods[i].UID)) {
|
|
||||||
t.Errorf("expected directory to exist for pod %d", i)
|
func TestDoesNotDeletePodDirsIfContainerIsRunning(t *testing.T) {
|
||||||
}
|
testKubelet := newTestKubelet(t)
|
||||||
}
|
testKubelet.fakeCadvisor.On("MachineInfo").Return(&cadvisorApi.MachineInfo{}, nil)
|
||||||
|
testKubelet.fakeCadvisor.On("DockerImagesFsInfo").Return(cadvisorApiv2.FsInfo{}, nil)
|
||||||
|
testKubelet.fakeCadvisor.On("RootFsInfo").Return(cadvisorApiv2.FsInfo{}, nil)
|
||||||
|
runningPod := &kubecontainer.Pod{
|
||||||
|
ID: "12345678",
|
||||||
|
Name: "pod1",
|
||||||
|
Namespace: "ns",
|
||||||
|
}
|
||||||
|
apiPod := &api.Pod{
|
||||||
|
ObjectMeta: api.ObjectMeta{
|
||||||
|
UID: runningPod.ID,
|
||||||
|
Name: runningPod.Name,
|
||||||
|
Namespace: runningPod.Namespace,
|
||||||
|
},
|
||||||
|
}
|
||||||
|
// Sync once to create pod directory; confirm that the pod directory has
|
||||||
|
// already been created.
|
||||||
|
pods := []*api.Pod{apiPod}
|
||||||
|
syncAndVerifyPodDir(t, testKubelet, pods, []*api.Pod{apiPod}, true)
|
||||||
|
|
||||||
|
// Pretend the pod is deleted from apiserver, but is still active on the node.
|
||||||
|
// The pod directory should not be removed.
|
||||||
|
pods = []*api.Pod{}
|
||||||
|
testKubelet.fakeRuntime.PodList = []*kubecontainer.Pod{runningPod}
|
||||||
|
syncAndVerifyPodDir(t, testKubelet, pods, []*api.Pod{apiPod}, true)
|
||||||
|
|
||||||
|
// The pod is deleted and also not active on the node. The pod directory
|
||||||
|
// should be removed.
|
||||||
|
pods = []*api.Pod{}
|
||||||
|
testKubelet.fakeRuntime.PodList = []*kubecontainer.Pod{}
|
||||||
|
syncAndVerifyPodDir(t, testKubelet, pods, []*api.Pod{apiPod}, false)
|
||||||
}
|
}
|
||||||
|
Loading…
Reference in New Issue
Block a user