diff --git a/pkg/kubelet/container/runtime.go b/pkg/kubelet/container/runtime.go index 56aeaf8136e..bf2af98620d 100644 --- a/pkg/kubelet/container/runtime.go +++ b/pkg/kubelet/container/runtime.go @@ -470,6 +470,9 @@ type VolumeInfo struct { // Whether the volume permission is set to read-only or not // This value is passed from volume.spec ReadOnly bool + // Inner volume spec name, which is the PV name if used, otherwise + // it is the same as the outer volume spec name. + InnerVolumeSpecName string } type VolumeMap map[string]VolumeInfo diff --git a/pkg/kubelet/kubelet_pods.go b/pkg/kubelet/kubelet_pods.go index 2712448cf81..d8b0bcd2aa0 100644 --- a/pkg/kubelet/kubelet_pods.go +++ b/pkg/kubelet/kubelet_pods.go @@ -235,7 +235,7 @@ func makeMounts(pod *v1.Pod, podDir string, container *v1.Container, hostName, h hostPath, cleanupAction, err = mounter.PrepareSafeSubpath(mountutil.Subpath{ VolumeMountIndex: i, Path: hostPath, - VolumeName: mount.Name, + VolumeName: vol.InnerVolumeSpecName, VolumePath: volumePath, PodDir: podDir, ContainerName: container.Name, diff --git a/pkg/kubelet/volumemanager/volume_manager.go b/pkg/kubelet/volumemanager/volume_manager.go index 1402eb2a474..5d5ad90ab41 100644 --- a/pkg/kubelet/volumemanager/volume_manager.go +++ b/pkg/kubelet/volumemanager/volume_manager.go @@ -255,9 +255,10 @@ func (vm *volumeManager) GetMountedVolumesForPod(podName types.UniquePodName) co podVolumes := make(container.VolumeMap) for _, mountedVolume := range vm.actualStateOfWorld.GetMountedVolumesForPod(podName) { podVolumes[mountedVolume.OuterVolumeSpecName] = container.VolumeInfo{ - Mounter: mountedVolume.Mounter, - BlockVolumeMapper: mountedVolume.BlockVolumeMapper, - ReadOnly: mountedVolume.VolumeSpec.ReadOnly, + Mounter: mountedVolume.Mounter, + BlockVolumeMapper: mountedVolume.BlockVolumeMapper, + ReadOnly: mountedVolume.VolumeSpec.ReadOnly, + InnerVolumeSpecName: mountedVolume.InnerVolumeSpecName, } } return podVolumes diff --git a/pkg/util/mount/mount_linux.go b/pkg/util/mount/mount_linux.go index 0758b0df8ca..0b54b3fd36a 100644 --- a/pkg/util/mount/mount_linux.go +++ b/pkg/util/mount/mount_linux.go @@ -815,9 +815,10 @@ func (mounter *Mounter) CleanSubPaths(podDir string, volumeName string) error { // This implementation is shared between Linux and NsEnterMounter func doCleanSubPaths(mounter Interface, podDir string, volumeName string) error { - glog.V(4).Infof("Cleaning up subpath mounts for %s", podDir) // scan /var/lib/kubelet/pods//volume-subpaths//* subPathDir := filepath.Join(podDir, containerSubPathDirectoryName, volumeName) + glog.V(4).Infof("Cleaning up subpath mounts for %s", subPathDir) + containerDirs, err := ioutil.ReadDir(subPathDir) if err != nil { if os.IsNotExist(err) { diff --git a/pkg/volume/util/operationexecutor/operation_generator.go b/pkg/volume/util/operationexecutor/operation_generator.go index f985055fcdd..7130d840437 100644 --- a/pkg/volume/util/operationexecutor/operation_generator.go +++ b/pkg/volume/util/operationexecutor/operation_generator.go @@ -672,7 +672,7 @@ func (og *operationGenerator) GenerateUnmountVolumeFunc( // Remove all bind-mounts for subPaths podDir := path.Join(podsDir, string(volumeToUnmount.PodUID)) - if err := mounter.CleanSubPaths(podDir, volumeToUnmount.OuterVolumeSpecName); err != nil { + if err := mounter.CleanSubPaths(podDir, volumeToUnmount.InnerVolumeSpecName); err != nil { return volumeToUnmount.GenerateError("error cleaning subPath mounts", err) } diff --git a/test/e2e/storage/subpath.go b/test/e2e/storage/subpath.go index 34432498cb8..33e20814009 100644 --- a/test/e2e/storage/subpath.go +++ b/test/e2e/storage/subpath.go @@ -23,6 +23,7 @@ import ( "k8s.io/api/core/v1" apierrors "k8s.io/apimachinery/pkg/api/errors" + apierrs "k8s.io/apimachinery/pkg/api/errors" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/fields" utilerrors "k8s.io/apimachinery/pkg/util/errors" @@ -60,7 +61,7 @@ var initVolSources = map[string]func() volSource{ "hostPath": initHostpath, "hostPathSymlink": initHostpathSymlink, "emptyDir": initEmptydir, - "gcePD": initGCEPD, + "gcePDPVC": initGCEPD, "gcePDPartitioned": initGCEPDPartition, "nfs": initNFS, "nfsPVC": initNFSPVC, @@ -307,6 +308,17 @@ var _ = utils.SIGDescribe("Subpath", func() { testPodContainerRestart(f, pod, filePathInVolume, filePathInSubpath) }) + + It("should unmount if pod is gracefully deleted while kubelet is down [Disruptive][Slow]", func() { + testSubpathReconstruction(f, pod, false) + }) + + It("should unmount if pod is force deleted while kubelet is down [Disruptive][Slow]", func() { + if curVolType == "hostPath" || curVolType == "hostPathSymlink" { + framework.Skipf("%s volume type does not support reconstruction, skipping", curVolType) + } + testSubpathReconstruction(f, pod, true) + }) }) } @@ -549,6 +561,85 @@ func testPodContainerRestart(f *framework.Framework, pod *v1.Pod, fileInVolume, Expect(strings.TrimSpace(out)).To(BeEquivalentTo("test-after")) } +func testSubpathReconstruction(f *framework.Framework, pod *v1.Pod, forceDelete bool) { + // This is mostly copied from TestVolumeUnmountsFromDeletedPodWithForceOption() + + // Change to busybox + pod.Spec.Containers[0].Image = "busybox" + pod.Spec.Containers[0].Command = []string{"/bin/sh", "-ec", "sleep 100000"} + pod.Spec.Containers[1].Image = "busybox" + pod.Spec.Containers[1].Command = []string{"/bin/sh", "-ec", "sleep 100000"} + + By(fmt.Sprintf("Creating pod %s", pod.Name)) + pod, err := f.ClientSet.CoreV1().Pods(f.Namespace.Name).Create(pod) + Expect(err).ToNot(HaveOccurred()) + + err = framework.WaitTimeoutForPodRunningInNamespace(f.ClientSet, pod.Name, pod.Namespace, time.Minute) + Expect(err).ToNot(HaveOccurred()) + + pod, err = f.ClientSet.CoreV1().Pods(f.Namespace.Name).Get(pod.Name, metav1.GetOptions{}) + Expect(err).ToNot(HaveOccurred()) + + nodeIP, err := framework.GetHostExternalAddress(f.ClientSet, pod) + Expect(err).NotTo(HaveOccurred()) + nodeIP = nodeIP + ":22" + + By("Expecting the volume mount to be found.") + result, err := framework.SSH(fmt.Sprintf("mount | grep %s | grep -v volume-subpaths", pod.UID), nodeIP, framework.TestContext.Provider) + framework.LogSSHResult(result) + Expect(err).NotTo(HaveOccurred(), "Encountered SSH error.") + Expect(result.Code).To(BeZero(), fmt.Sprintf("Expected grep exit code of 0, got %d", result.Code)) + + By("Expecting the subpath volume mount to be found.") + result, err = framework.SSH(fmt.Sprintf("cat /proc/self/mountinfo | grep volume-subpaths | grep %s", pod.UID), nodeIP, framework.TestContext.Provider) + framework.LogSSHResult(result) + Expect(err).NotTo(HaveOccurred(), "Encountered SSH error.") + Expect(result.Code).To(BeZero(), fmt.Sprintf("Expected grep exit code of 0, got %d", result.Code)) + + By("Stopping the kubelet.") + utils.KubeletCommand(utils.KStop, f.ClientSet, pod) + defer func() { + if err != nil { + utils.KubeletCommand(utils.KStart, f.ClientSet, pod) + } + }() + + By(fmt.Sprintf("Deleting Pod %q", pod.Name)) + if forceDelete { + err = f.ClientSet.CoreV1().Pods(pod.Namespace).Delete(pod.Name, metav1.NewDeleteOptions(0)) + } else { + err = f.ClientSet.CoreV1().Pods(pod.Namespace).Delete(pod.Name, &metav1.DeleteOptions{}) + } + Expect(err).NotTo(HaveOccurred()) + + By("Starting the kubelet and waiting for pod to delete.") + utils.KubeletCommand(utils.KStart, f.ClientSet, pod) + err = f.WaitForPodTerminated(pod.Name, "") + if !apierrs.IsNotFound(err) && err != nil { + Expect(err).NotTo(HaveOccurred(), "Expected pod to terminate.") + } + + if forceDelete { + // With forceDelete, since pods are immediately deleted from API server, there is no way to be sure when volumes are torn down + // so wait some time to finish + time.Sleep(30 * time.Second) + } + + By("Expecting the volume mount not to be found.") + result, err = framework.SSH(fmt.Sprintf("mount | grep %s | grep -v volume-subpaths", pod.UID), nodeIP, framework.TestContext.Provider) + framework.LogSSHResult(result) + Expect(err).NotTo(HaveOccurred(), "Encountered SSH error.") + Expect(result.Stdout).To(BeEmpty(), "Expected grep stdout to be empty (i.e. no mount found).") + framework.Logf("Volume unmounted on node %s", pod.Spec.NodeName) + + By("Expecting the subpath volume mount not to be found.") + result, err = framework.SSH(fmt.Sprintf("cat /proc/self/mountinfo | grep volume-subpaths | grep %s", pod.UID), nodeIP, framework.TestContext.Provider) + framework.LogSSHResult(result) + Expect(err).NotTo(HaveOccurred(), "Encountered SSH error.") + Expect(result.Stdout).To(BeEmpty(), "Expected grep stdout to be empty (i.e. no subpath mount found).") + framework.Logf("Subpath volume unmounted on node %s", pod.Spec.NodeName) +} + func podContainerExec(pod *v1.Pod, containerIndex int, bashExec string) (string, error) { return framework.RunKubectl("exec", fmt.Sprintf("--namespace=%s", pod.Namespace), pod.Name, "--container", pod.Spec.Containers[containerIndex].Name, "--", "/bin/sh", "-c", bashExec) }