mirror of
https://github.com/k3s-io/kubernetes.git
synced 2025-07-28 14:07:14 +00:00
Use inner volume name instead of outer volume name for subpath directory
This commit is contained in:
parent
67be0a90f4
commit
91c557f504
@ -470,6 +470,9 @@ type VolumeInfo struct {
|
|||||||
// Whether the volume permission is set to read-only or not
|
// Whether the volume permission is set to read-only or not
|
||||||
// This value is passed from volume.spec
|
// This value is passed from volume.spec
|
||||||
ReadOnly bool
|
ReadOnly bool
|
||||||
|
// Inner volume spec name, which is the PV name if used, otherwise
|
||||||
|
// it is the same as the outer volume spec name.
|
||||||
|
InnerVolumeSpecName string
|
||||||
}
|
}
|
||||||
|
|
||||||
type VolumeMap map[string]VolumeInfo
|
type VolumeMap map[string]VolumeInfo
|
||||||
|
@ -235,7 +235,7 @@ func makeMounts(pod *v1.Pod, podDir string, container *v1.Container, hostName, h
|
|||||||
hostPath, cleanupAction, err = mounter.PrepareSafeSubpath(mountutil.Subpath{
|
hostPath, cleanupAction, err = mounter.PrepareSafeSubpath(mountutil.Subpath{
|
||||||
VolumeMountIndex: i,
|
VolumeMountIndex: i,
|
||||||
Path: hostPath,
|
Path: hostPath,
|
||||||
VolumeName: mount.Name,
|
VolumeName: vol.InnerVolumeSpecName,
|
||||||
VolumePath: volumePath,
|
VolumePath: volumePath,
|
||||||
PodDir: podDir,
|
PodDir: podDir,
|
||||||
ContainerName: container.Name,
|
ContainerName: container.Name,
|
||||||
|
@ -255,9 +255,10 @@ func (vm *volumeManager) GetMountedVolumesForPod(podName types.UniquePodName) co
|
|||||||
podVolumes := make(container.VolumeMap)
|
podVolumes := make(container.VolumeMap)
|
||||||
for _, mountedVolume := range vm.actualStateOfWorld.GetMountedVolumesForPod(podName) {
|
for _, mountedVolume := range vm.actualStateOfWorld.GetMountedVolumesForPod(podName) {
|
||||||
podVolumes[mountedVolume.OuterVolumeSpecName] = container.VolumeInfo{
|
podVolumes[mountedVolume.OuterVolumeSpecName] = container.VolumeInfo{
|
||||||
Mounter: mountedVolume.Mounter,
|
Mounter: mountedVolume.Mounter,
|
||||||
BlockVolumeMapper: mountedVolume.BlockVolumeMapper,
|
BlockVolumeMapper: mountedVolume.BlockVolumeMapper,
|
||||||
ReadOnly: mountedVolume.VolumeSpec.ReadOnly,
|
ReadOnly: mountedVolume.VolumeSpec.ReadOnly,
|
||||||
|
InnerVolumeSpecName: mountedVolume.InnerVolumeSpecName,
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
return podVolumes
|
return podVolumes
|
||||||
|
@ -815,9 +815,10 @@ func (mounter *Mounter) CleanSubPaths(podDir string, volumeName string) error {
|
|||||||
|
|
||||||
// This implementation is shared between Linux and NsEnterMounter
|
// This implementation is shared between Linux and NsEnterMounter
|
||||||
func doCleanSubPaths(mounter Interface, podDir string, volumeName string) error {
|
func doCleanSubPaths(mounter Interface, podDir string, volumeName string) error {
|
||||||
glog.V(4).Infof("Cleaning up subpath mounts for %s", podDir)
|
|
||||||
// scan /var/lib/kubelet/pods/<uid>/volume-subpaths/<volume>/*
|
// scan /var/lib/kubelet/pods/<uid>/volume-subpaths/<volume>/*
|
||||||
subPathDir := filepath.Join(podDir, containerSubPathDirectoryName, volumeName)
|
subPathDir := filepath.Join(podDir, containerSubPathDirectoryName, volumeName)
|
||||||
|
glog.V(4).Infof("Cleaning up subpath mounts for %s", subPathDir)
|
||||||
|
|
||||||
containerDirs, err := ioutil.ReadDir(subPathDir)
|
containerDirs, err := ioutil.ReadDir(subPathDir)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
if os.IsNotExist(err) {
|
if os.IsNotExist(err) {
|
||||||
|
@ -672,7 +672,7 @@ func (og *operationGenerator) GenerateUnmountVolumeFunc(
|
|||||||
|
|
||||||
// Remove all bind-mounts for subPaths
|
// Remove all bind-mounts for subPaths
|
||||||
podDir := path.Join(podsDir, string(volumeToUnmount.PodUID))
|
podDir := path.Join(podsDir, string(volumeToUnmount.PodUID))
|
||||||
if err := mounter.CleanSubPaths(podDir, volumeToUnmount.OuterVolumeSpecName); err != nil {
|
if err := mounter.CleanSubPaths(podDir, volumeToUnmount.InnerVolumeSpecName); err != nil {
|
||||||
return volumeToUnmount.GenerateError("error cleaning subPath mounts", err)
|
return volumeToUnmount.GenerateError("error cleaning subPath mounts", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -23,6 +23,7 @@ import (
|
|||||||
|
|
||||||
"k8s.io/api/core/v1"
|
"k8s.io/api/core/v1"
|
||||||
apierrors "k8s.io/apimachinery/pkg/api/errors"
|
apierrors "k8s.io/apimachinery/pkg/api/errors"
|
||||||
|
apierrs "k8s.io/apimachinery/pkg/api/errors"
|
||||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||||
"k8s.io/apimachinery/pkg/fields"
|
"k8s.io/apimachinery/pkg/fields"
|
||||||
utilerrors "k8s.io/apimachinery/pkg/util/errors"
|
utilerrors "k8s.io/apimachinery/pkg/util/errors"
|
||||||
@ -60,7 +61,7 @@ var initVolSources = map[string]func() volSource{
|
|||||||
"hostPath": initHostpath,
|
"hostPath": initHostpath,
|
||||||
"hostPathSymlink": initHostpathSymlink,
|
"hostPathSymlink": initHostpathSymlink,
|
||||||
"emptyDir": initEmptydir,
|
"emptyDir": initEmptydir,
|
||||||
"gcePD": initGCEPD,
|
"gcePDPVC": initGCEPD,
|
||||||
"gcePDPartitioned": initGCEPDPartition,
|
"gcePDPartitioned": initGCEPDPartition,
|
||||||
"nfs": initNFS,
|
"nfs": initNFS,
|
||||||
"nfsPVC": initNFSPVC,
|
"nfsPVC": initNFSPVC,
|
||||||
@ -307,6 +308,17 @@ var _ = utils.SIGDescribe("Subpath", func() {
|
|||||||
|
|
||||||
testPodContainerRestart(f, pod, filePathInVolume, filePathInSubpath)
|
testPodContainerRestart(f, pod, filePathInVolume, filePathInSubpath)
|
||||||
})
|
})
|
||||||
|
|
||||||
|
It("should unmount if pod is gracefully deleted while kubelet is down [Disruptive][Slow]", func() {
|
||||||
|
testSubpathReconstruction(f, pod, false)
|
||||||
|
})
|
||||||
|
|
||||||
|
It("should unmount if pod is force deleted while kubelet is down [Disruptive][Slow]", func() {
|
||||||
|
if curVolType == "hostPath" || curVolType == "hostPathSymlink" {
|
||||||
|
framework.Skipf("%s volume type does not support reconstruction, skipping", curVolType)
|
||||||
|
}
|
||||||
|
testSubpathReconstruction(f, pod, true)
|
||||||
|
})
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -549,6 +561,85 @@ func testPodContainerRestart(f *framework.Framework, pod *v1.Pod, fileInVolume,
|
|||||||
Expect(strings.TrimSpace(out)).To(BeEquivalentTo("test-after"))
|
Expect(strings.TrimSpace(out)).To(BeEquivalentTo("test-after"))
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func testSubpathReconstruction(f *framework.Framework, pod *v1.Pod, forceDelete bool) {
|
||||||
|
// This is mostly copied from TestVolumeUnmountsFromDeletedPodWithForceOption()
|
||||||
|
|
||||||
|
// Change to busybox
|
||||||
|
pod.Spec.Containers[0].Image = "busybox"
|
||||||
|
pod.Spec.Containers[0].Command = []string{"/bin/sh", "-ec", "sleep 100000"}
|
||||||
|
pod.Spec.Containers[1].Image = "busybox"
|
||||||
|
pod.Spec.Containers[1].Command = []string{"/bin/sh", "-ec", "sleep 100000"}
|
||||||
|
|
||||||
|
By(fmt.Sprintf("Creating pod %s", pod.Name))
|
||||||
|
pod, err := f.ClientSet.CoreV1().Pods(f.Namespace.Name).Create(pod)
|
||||||
|
Expect(err).ToNot(HaveOccurred())
|
||||||
|
|
||||||
|
err = framework.WaitTimeoutForPodRunningInNamespace(f.ClientSet, pod.Name, pod.Namespace, time.Minute)
|
||||||
|
Expect(err).ToNot(HaveOccurred())
|
||||||
|
|
||||||
|
pod, err = f.ClientSet.CoreV1().Pods(f.Namespace.Name).Get(pod.Name, metav1.GetOptions{})
|
||||||
|
Expect(err).ToNot(HaveOccurred())
|
||||||
|
|
||||||
|
nodeIP, err := framework.GetHostExternalAddress(f.ClientSet, pod)
|
||||||
|
Expect(err).NotTo(HaveOccurred())
|
||||||
|
nodeIP = nodeIP + ":22"
|
||||||
|
|
||||||
|
By("Expecting the volume mount to be found.")
|
||||||
|
result, err := framework.SSH(fmt.Sprintf("mount | grep %s | grep -v volume-subpaths", pod.UID), nodeIP, framework.TestContext.Provider)
|
||||||
|
framework.LogSSHResult(result)
|
||||||
|
Expect(err).NotTo(HaveOccurred(), "Encountered SSH error.")
|
||||||
|
Expect(result.Code).To(BeZero(), fmt.Sprintf("Expected grep exit code of 0, got %d", result.Code))
|
||||||
|
|
||||||
|
By("Expecting the subpath volume mount to be found.")
|
||||||
|
result, err = framework.SSH(fmt.Sprintf("cat /proc/self/mountinfo | grep volume-subpaths | grep %s", pod.UID), nodeIP, framework.TestContext.Provider)
|
||||||
|
framework.LogSSHResult(result)
|
||||||
|
Expect(err).NotTo(HaveOccurred(), "Encountered SSH error.")
|
||||||
|
Expect(result.Code).To(BeZero(), fmt.Sprintf("Expected grep exit code of 0, got %d", result.Code))
|
||||||
|
|
||||||
|
By("Stopping the kubelet.")
|
||||||
|
utils.KubeletCommand(utils.KStop, f.ClientSet, pod)
|
||||||
|
defer func() {
|
||||||
|
if err != nil {
|
||||||
|
utils.KubeletCommand(utils.KStart, f.ClientSet, pod)
|
||||||
|
}
|
||||||
|
}()
|
||||||
|
|
||||||
|
By(fmt.Sprintf("Deleting Pod %q", pod.Name))
|
||||||
|
if forceDelete {
|
||||||
|
err = f.ClientSet.CoreV1().Pods(pod.Namespace).Delete(pod.Name, metav1.NewDeleteOptions(0))
|
||||||
|
} else {
|
||||||
|
err = f.ClientSet.CoreV1().Pods(pod.Namespace).Delete(pod.Name, &metav1.DeleteOptions{})
|
||||||
|
}
|
||||||
|
Expect(err).NotTo(HaveOccurred())
|
||||||
|
|
||||||
|
By("Starting the kubelet and waiting for pod to delete.")
|
||||||
|
utils.KubeletCommand(utils.KStart, f.ClientSet, pod)
|
||||||
|
err = f.WaitForPodTerminated(pod.Name, "")
|
||||||
|
if !apierrs.IsNotFound(err) && err != nil {
|
||||||
|
Expect(err).NotTo(HaveOccurred(), "Expected pod to terminate.")
|
||||||
|
}
|
||||||
|
|
||||||
|
if forceDelete {
|
||||||
|
// With forceDelete, since pods are immediately deleted from API server, there is no way to be sure when volumes are torn down
|
||||||
|
// so wait some time to finish
|
||||||
|
time.Sleep(30 * time.Second)
|
||||||
|
}
|
||||||
|
|
||||||
|
By("Expecting the volume mount not to be found.")
|
||||||
|
result, err = framework.SSH(fmt.Sprintf("mount | grep %s | grep -v volume-subpaths", pod.UID), nodeIP, framework.TestContext.Provider)
|
||||||
|
framework.LogSSHResult(result)
|
||||||
|
Expect(err).NotTo(HaveOccurred(), "Encountered SSH error.")
|
||||||
|
Expect(result.Stdout).To(BeEmpty(), "Expected grep stdout to be empty (i.e. no mount found).")
|
||||||
|
framework.Logf("Volume unmounted on node %s", pod.Spec.NodeName)
|
||||||
|
|
||||||
|
By("Expecting the subpath volume mount not to be found.")
|
||||||
|
result, err = framework.SSH(fmt.Sprintf("cat /proc/self/mountinfo | grep volume-subpaths | grep %s", pod.UID), nodeIP, framework.TestContext.Provider)
|
||||||
|
framework.LogSSHResult(result)
|
||||||
|
Expect(err).NotTo(HaveOccurred(), "Encountered SSH error.")
|
||||||
|
Expect(result.Stdout).To(BeEmpty(), "Expected grep stdout to be empty (i.e. no subpath mount found).")
|
||||||
|
framework.Logf("Subpath volume unmounted on node %s", pod.Spec.NodeName)
|
||||||
|
}
|
||||||
|
|
||||||
func podContainerExec(pod *v1.Pod, containerIndex int, bashExec string) (string, error) {
|
func podContainerExec(pod *v1.Pod, containerIndex int, bashExec string) (string, error) {
|
||||||
return framework.RunKubectl("exec", fmt.Sprintf("--namespace=%s", pod.Namespace), pod.Name, "--container", pod.Spec.Containers[containerIndex].Name, "--", "/bin/sh", "-c", bashExec)
|
return framework.RunKubectl("exec", fmt.Sprintf("--namespace=%s", pod.Namespace), pod.Name, "--container", pod.Spec.Containers[containerIndex].Name, "--", "/bin/sh", "-c", bashExec)
|
||||||
}
|
}
|
||||||
|
Loading…
Reference in New Issue
Block a user