Merge pull request #81163 from jsafrane/skip-unused-volumes

Skip unused volumes in VolumeManager
This commit is contained in:
Kubernetes Prow Robot 2019-08-16 17:02:36 -07:00 committed by GitHub
commit e319abf274
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
13 changed files with 559 additions and 92 deletions

View File

@ -118,6 +118,7 @@ import (
"k8s.io/kubernetes/pkg/volume"
"k8s.io/kubernetes/pkg/volume/csi"
"k8s.io/kubernetes/pkg/volume/util/subpath"
"k8s.io/kubernetes/pkg/volume/util/volumepathhandler"
utilexec "k8s.io/utils/exec"
"k8s.io/utils/integer"
)
@ -819,7 +820,8 @@ func NewMainKubelet(kubeCfg *kubeletconfiginternal.KubeletConfiguration,
klet.getPodsDir(),
kubeDeps.Recorder,
experimentalCheckNodeCapabilitiesBeforeMount,
keepTerminatedPodVolumes)
keepTerminatedPodVolumes,
volumepathhandler.NewBlockVolumePathHandler())
klet.reasonCache = NewReasonCache()
klet.workQueue = queue.NewBasicWorkQueue(klet.clock)

View File

@ -326,7 +326,8 @@ func newTestKubeletWithImageList(
kubelet.getPodsDir(),
kubelet.recorder,
false, /* experimentalCheckNodeCapabilitiesBeforeMount*/
false /* keepTerminatedPodVolumes */)
false, /* keepTerminatedPodVolumes */
volumetest.NewBlockVolumePathHandler())
kubelet.pluginManager = pluginmanager.NewPluginManager(
kubelet.getPluginsRegistrationDir(), /* sockDir */

View File

@ -21,7 +21,7 @@ import (
"testing"
"github.com/stretchr/testify/assert"
"k8s.io/api/core/v1"
v1 "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/runtime"
"k8s.io/apimachinery/pkg/types"
@ -37,6 +37,21 @@ func TestListVolumesForPod(t *testing.T) {
kubelet := testKubelet.kubelet
pod := podWithUIDNameNsSpec("12345678", "foo", "test", v1.PodSpec{
Containers: []v1.Container{
{
Name: "container1",
VolumeMounts: []v1.VolumeMount{
{
Name: "vol1",
MountPath: "/mnt/vol1",
},
{
Name: "vol2",
MountPath: "/mnt/vol2",
},
},
},
},
Volumes: []v1.Volume{
{
Name: "vol1",
@ -74,7 +89,6 @@ func TestListVolumesForPod(t *testing.T) {
outerVolumeSpecName2 := "vol2"
assert.NotNil(t, volumesToReturn[outerVolumeSpecName2], "key %s", outerVolumeSpecName2)
}
func TestPodVolumesExist(t *testing.T) {
@ -89,6 +103,17 @@ func TestPodVolumesExist(t *testing.T) {
UID: "pod1uid",
},
Spec: v1.PodSpec{
Containers: []v1.Container{
{
Name: "container1",
VolumeMounts: []v1.VolumeMount{
{
Name: "vol1",
MountPath: "/mnt/vol1",
},
},
},
},
Volumes: []v1.Volume{
{
Name: "vol1",
@ -107,6 +132,17 @@ func TestPodVolumesExist(t *testing.T) {
UID: "pod2uid",
},
Spec: v1.PodSpec{
Containers: []v1.Container{
{
Name: "container2",
VolumeMounts: []v1.VolumeMount{
{
Name: "vol2",
MountPath: "/mnt/vol2",
},
},
},
},
Volumes: []v1.Volume{
{
Name: "vol2",
@ -125,6 +161,17 @@ func TestPodVolumesExist(t *testing.T) {
UID: "pod3uid",
},
Spec: v1.PodSpec{
Containers: []v1.Container{
{
Name: "container3",
VolumeMounts: []v1.VolumeMount{
{
Name: "vol3",
MountPath: "/mnt/vol3",
},
},
},
},
Volumes: []v1.Volume{
{
Name: "vol3",
@ -160,6 +207,17 @@ func TestVolumeAttachAndMountControllerDisabled(t *testing.T) {
kubelet := testKubelet.kubelet
pod := podWithUIDNameNsSpec("12345678", "foo", "test", v1.PodSpec{
Containers: []v1.Container{
{
Name: "container1",
VolumeMounts: []v1.VolumeMount{
{
Name: "vol1",
MountPath: "/mnt/vol1",
},
},
},
},
Volumes: []v1.Volume{
{
Name: "vol1",
@ -204,6 +262,17 @@ func TestVolumeUnmountAndDetachControllerDisabled(t *testing.T) {
kubelet := testKubelet.kubelet
pod := podWithUIDNameNsSpec("12345678", "foo", "test", v1.PodSpec{
Containers: []v1.Container{
{
Name: "container1",
VolumeMounts: []v1.VolumeMount{
{
Name: "vol1",
MountPath: "/mnt/vol1",
},
},
},
},
Volumes: []v1.Volume{
{
Name: "vol1",
@ -290,6 +359,17 @@ func TestVolumeAttachAndMountControllerEnabled(t *testing.T) {
})
pod := podWithUIDNameNsSpec("12345678", "foo", "test", v1.PodSpec{
Containers: []v1.Container{
{
Name: "container1",
VolumeMounts: []v1.VolumeMount{
{
Name: "vol1",
MountPath: "/mnt/vol1",
},
},
},
},
Volumes: []v1.Volume{
{
Name: "vol1",
@ -356,6 +436,17 @@ func TestVolumeUnmountAndDetachControllerEnabled(t *testing.T) {
})
pod := podWithUIDNameNsSpec("12345678", "foo", "test", v1.PodSpec{
Containers: []v1.Container{
{
Name: "container1",
VolumeMounts: []v1.VolumeMount{
{
Name: "vol1",
MountPath: "/mnt/vol1",
},
},
},
},
Volumes: []v1.Volume{
{
Name: "vol1",

View File

@ -23,7 +23,7 @@ import (
cadvisorapi "github.com/google/cadvisor/info/v1"
cadvisorapiv2 "github.com/google/cadvisor/info/v2"
"k8s.io/api/core/v1"
v1 "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/types"
"k8s.io/apimachinery/pkg/util/clock"
@ -108,7 +108,8 @@ func TestRunOnce(t *testing.T) {
kb.getPodsDir(),
kb.recorder,
false, /* experimentalCheckNodeCapabilitiesBeforeMount */
false /* keepTerminatedPodVolumes */)
false, /* keepTerminatedPodVolumes */
volumetest.NewBlockVolumePathHandler())
// TODO: Factor out "StatsProvider" from Kubelet so we don't have a cyclic dependency
volumeStatsAggPeriod := time.Second * 10

View File

@ -45,6 +45,7 @@ go_test(
srcs = ["volume_manager_test.go"],
embed = [":go_default_library"],
deps = [
"//pkg/features:go_default_library",
"//pkg/kubelet/config:go_default_library",
"//pkg/kubelet/configmap:go_default_library",
"//pkg/kubelet/container/testing:go_default_library",
@ -62,10 +63,12 @@ go_test(
"//staging/src/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/util/sets:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/util/wait:go_default_library",
"//staging/src/k8s.io/apiserver/pkg/util/feature:go_default_library",
"//staging/src/k8s.io/client-go/kubernetes:go_default_library",
"//staging/src/k8s.io/client-go/kubernetes/fake:go_default_library",
"//staging/src/k8s.io/client-go/tools/record:go_default_library",
"//staging/src/k8s.io/client-go/util/testing:go_default_library",
"//staging/src/k8s.io/component-base/featuregate/testing:go_default_library",
],
)

View File

@ -296,12 +296,18 @@ func (dswp *desiredStateOfWorldPopulator) processPodVolumes(
}
allVolumesAdded := true
mountsMap, devicesMap := dswp.makeVolumeMap(pod.Spec.Containers)
mounts, devices := util.GetPodVolumeNames(pod)
// Process volume spec for each volume defined in pod
for _, podVolume := range pod.Spec.Volumes {
if !mounts.Has(podVolume.Name) && !devices.Has(podVolume.Name) {
// Volume is not used in the pod, ignore it.
klog.V(4).Infof("Skipping unused volume %q for pod %q", podVolume.Name, format.Pod(pod))
continue
}
pvc, volumeSpec, volumeGidValue, err :=
dswp.createVolumeSpec(podVolume, pod.Name, pod.Namespace, mountsMap, devicesMap)
dswp.createVolumeSpec(podVolume, pod.Name, pod.Namespace, mounts, devices)
if err != nil {
klog.Errorf(
"Error processing volume %q for pod %q: %v",
@ -481,11 +487,11 @@ func (dswp *desiredStateOfWorldPopulator) deleteProcessedPod(
delete(dswp.pods.processedPods, podName)
}
// createVolumeSpec creates and returns a mutatable volume.Spec object for the
// createVolumeSpec creates and returns a mutable volume.Spec object for the
// specified volume. It dereference any PVC to get PV objects, if needed.
// Returns an error if unable to obtain the volume at this time.
func (dswp *desiredStateOfWorldPopulator) createVolumeSpec(
podVolume v1.Volume, podName string, podNamespace string, mountsMap map[string]bool, devicesMap map[string]bool) (*v1.PersistentVolumeClaim, *volume.Spec, string, error) {
podVolume v1.Volume, podName string, podNamespace string, mounts, devices sets.String) (*v1.PersistentVolumeClaim, *volume.Spec, string, error) {
if pvcSource :=
podVolume.VolumeSource.PersistentVolumeClaim; pvcSource != nil {
klog.V(5).Infof(
@ -538,14 +544,14 @@ func (dswp *desiredStateOfWorldPopulator) createVolumeSpec(
return nil, nil, "", err
}
// Error if a container has volumeMounts but the volumeMode of PVC isn't Filesystem
if mountsMap[podVolume.Name] && volumeMode != v1.PersistentVolumeFilesystem {
if mounts.Has(podVolume.Name) && volumeMode != v1.PersistentVolumeFilesystem {
return nil, nil, "", fmt.Errorf(
"volume %s has volumeMode %s, but is specified in volumeMounts",
podVolume.Name,
volumeMode)
}
// Error if a container has volumeDevices but the volumeMode of PVC isn't Block
if devicesMap[podVolume.Name] && volumeMode != v1.PersistentVolumeBlock {
if devices.Has(podVolume.Name) && volumeMode != v1.PersistentVolumeBlock {
return nil, nil, "", fmt.Errorf(
"volume %s has volumeMode %s, but is specified in volumeDevices",
podVolume.Name,
@ -628,28 +634,6 @@ func (dswp *desiredStateOfWorldPopulator) getPVSpec(
return volume.NewSpecFromPersistentVolume(pv, pvcReadOnly), volumeGidValue, nil
}
func (dswp *desiredStateOfWorldPopulator) makeVolumeMap(containers []v1.Container) (map[string]bool, map[string]bool) {
volumeDevicesMap := make(map[string]bool)
volumeMountsMap := make(map[string]bool)
for _, container := range containers {
if container.VolumeMounts != nil {
for _, mount := range container.VolumeMounts {
volumeMountsMap[mount.Name] = true
}
}
// TODO: remove feature gate check after no longer needed
if utilfeature.DefaultFeatureGate.Enabled(features.BlockVolume) &&
container.VolumeDevices != nil {
for _, device := range container.VolumeDevices {
volumeDevicesMap[device.Name] = true
}
}
}
return volumeMountsMap, volumeDevicesMap
}
func getPVVolumeGidAnnotationValue(pv *v1.PersistentVolume) string {
if volumeGid, ok := pv.Annotations[util.VolumeGidAnnotationKey]; ok {
return volumeGid

View File

@ -22,7 +22,7 @@ import (
"fmt"
"k8s.io/api/core/v1"
v1 "k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/api/resource"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/runtime"
@ -410,7 +410,7 @@ func TestCreateVolumeSpec_Valid_File_VolumeMounts(t *testing.T) {
pod := createPodWithVolume("dswp-test-pod", "dswp-test-volume-name", "file-bound", containers)
fakePodManager.AddPod(pod)
mountsMap, devicesMap := dswp.makeVolumeMap(pod.Spec.Containers)
mountsMap, devicesMap := util.GetPodVolumeNames(pod)
_, volumeSpec, _, err :=
dswp.createVolumeSpec(pod.Spec.Volumes[0], pod.Name, pod.Namespace, mountsMap, devicesMap)
@ -459,7 +459,7 @@ func TestCreateVolumeSpec_Valid_Block_VolumeDevices(t *testing.T) {
pod := createPodWithVolume("dswp-test-pod", "dswp-test-volume-name", "block-bound", containers)
fakePodManager.AddPod(pod)
mountsMap, devicesMap := dswp.makeVolumeMap(pod.Spec.Containers)
mountsMap, devicesMap := util.GetPodVolumeNames(pod)
_, volumeSpec, _, err :=
dswp.createVolumeSpec(pod.Spec.Volumes[0], pod.Name, pod.Namespace, mountsMap, devicesMap)
@ -508,7 +508,7 @@ func TestCreateVolumeSpec_Invalid_File_VolumeDevices(t *testing.T) {
pod := createPodWithVolume("dswp-test-pod", "dswp-test-volume-name", "file-bound", containers)
fakePodManager.AddPod(pod)
mountsMap, devicesMap := dswp.makeVolumeMap(pod.Spec.Containers)
mountsMap, devicesMap := util.GetPodVolumeNames(pod)
_, volumeSpec, _, err :=
dswp.createVolumeSpec(pod.Spec.Volumes[0], pod.Name, pod.Namespace, mountsMap, devicesMap)
@ -557,7 +557,7 @@ func TestCreateVolumeSpec_Invalid_Block_VolumeMounts(t *testing.T) {
pod := createPodWithVolume("dswp-test-pod", "dswp-test-volume-name", "block-bound", containers)
fakePodManager.AddPod(pod)
mountsMap, devicesMap := dswp.makeVolumeMap(pod.Spec.Containers)
mountsMap, devicesMap := util.GetPodVolumeNames(pod)
_, volumeSpec, _, err :=
dswp.createVolumeSpec(pod.Spec.Volumes[0], pod.Name, pod.Namespace, mountsMap, devicesMap)

View File

@ -157,7 +157,8 @@ func NewVolumeManager(
kubeletPodsDir string,
recorder record.EventRecorder,
checkNodeCapabilitiesBeforeMount bool,
keepTerminatedPodVolumes bool) VolumeManager {
keepTerminatedPodVolumes bool,
blockVolumePathHandler volumepathhandler.BlockVolumePathHandler) VolumeManager {
vm := &volumeManager{
kubeClient: kubeClient,
@ -169,7 +170,7 @@ func NewVolumeManager(
volumePluginMgr,
recorder,
checkNodeCapabilitiesBeforeMount,
volumepathhandler.NewBlockVolumePathHandler())),
blockVolumePathHandler)),
}
vm.desiredStateOfWorldPopulator = populator.NewDesiredStateOfWorldPopulator(
@ -435,13 +436,8 @@ func filterUnmountedVolumes(mountedVolumes sets.String, expectedVolumes []string
// getExpectedVolumes returns a list of volumes that must be mounted in order to
// consider the volume setup step for this pod satisfied.
func getExpectedVolumes(pod *v1.Pod) []string {
expectedVolumes := []string{}
for _, podVolume := range pod.Spec.Volumes {
expectedVolumes = append(expectedVolumes, podVolume.Name)
}
return expectedVolumes
mounts, devices := util.GetPodVolumeNames(pod)
return mounts.Union(devices).UnsortedList()
}
// getExtraSupplementalGid returns the value of an extra supplemental GID as

View File

@ -27,10 +27,13 @@ import (
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/util/sets"
"k8s.io/apimachinery/pkg/util/wait"
utilfeature "k8s.io/apiserver/pkg/util/feature"
clientset "k8s.io/client-go/kubernetes"
"k8s.io/client-go/kubernetes/fake"
"k8s.io/client-go/tools/record"
utiltesting "k8s.io/client-go/util/testing"
featuregatetesting "k8s.io/component-base/featuregate/testing"
"k8s.io/kubernetes/pkg/features"
"k8s.io/kubernetes/pkg/kubelet/config"
"k8s.io/kubernetes/pkg/kubelet/configmap"
containertest "k8s.io/kubernetes/pkg/kubelet/container/testing"
@ -51,45 +54,103 @@ const (
)
func TestGetMountedVolumesForPodAndGetVolumesInUse(t *testing.T) {
tmpDir, err := utiltesting.MkTmpdir("volumeManagerTest")
if err != nil {
t.Fatalf("can't make a temp dir: %v", err)
}
defer os.RemoveAll(tmpDir)
cpm := podtest.NewMockCheckpointManager()
podManager := kubepod.NewBasicPodManager(podtest.NewFakeMirrorClient(), secret.NewFakeManager(), configmap.NewFakeManager(), cpm)
node, pod, pv, claim := createObjects()
kubeClient := fake.NewSimpleClientset(node, pod, pv, claim)
manager := newTestVolumeManager(tmpDir, podManager, kubeClient)
stopCh := runVolumeManager(manager)
defer close(stopCh)
podManager.SetPods([]*v1.Pod{pod})
// Fake node status update
go simulateVolumeInUseUpdate(
v1.UniqueVolumeName(node.Status.VolumesAttached[0].Name),
stopCh,
manager)
err = manager.WaitForAttachAndMount(pod)
if err != nil {
t.Errorf("Expected success: %v", err)
tests := []struct {
name string
pvMode, podMode v1.PersistentVolumeMode
disableBlockFeature bool
expectMount bool
expectError bool
}{
{
name: "filesystem volume",
pvMode: v1.PersistentVolumeFilesystem,
podMode: v1.PersistentVolumeFilesystem,
expectMount: true,
expectError: false,
},
{
name: "block volume",
pvMode: v1.PersistentVolumeBlock,
podMode: v1.PersistentVolumeBlock,
expectMount: true,
expectError: false,
},
{
name: "block volume with block feature off",
pvMode: v1.PersistentVolumeBlock,
podMode: v1.PersistentVolumeBlock,
disableBlockFeature: true,
expectMount: false,
expectError: false,
},
{
name: "mismatched volume",
pvMode: v1.PersistentVolumeBlock,
podMode: v1.PersistentVolumeFilesystem,
expectMount: false,
expectError: true,
},
}
expectedMounted := pod.Spec.Volumes[0].Name
actualMounted := manager.GetMountedVolumesForPod(types.UniquePodName(pod.ObjectMeta.UID))
if _, ok := actualMounted[expectedMounted]; !ok || (len(actualMounted) != 1) {
t.Errorf("Expected %v to be mounted to pod but got %v", expectedMounted, actualMounted)
}
for _, test := range tests {
t.Run(test.name, func(t *testing.T) {
if test.disableBlockFeature {
defer featuregatetesting.SetFeatureGateDuringTest(t, utilfeature.DefaultFeatureGate, features.BlockVolume, false)()
}
expectedInUse := []v1.UniqueVolumeName{v1.UniqueVolumeName(node.Status.VolumesAttached[0].Name)}
actualInUse := manager.GetVolumesInUse()
if !reflect.DeepEqual(expectedInUse, actualInUse) {
t.Errorf("Expected %v to be in use but got %v", expectedInUse, actualInUse)
tmpDir, err := utiltesting.MkTmpdir("volumeManagerTest")
if err != nil {
t.Fatalf("can't make a temp dir: %v", err)
}
defer os.RemoveAll(tmpDir)
cpm := podtest.NewMockCheckpointManager()
podManager := kubepod.NewBasicPodManager(podtest.NewFakeMirrorClient(), secret.NewFakeManager(), configmap.NewFakeManager(), cpm)
node, pod, pv, claim := createObjects(test.pvMode, test.podMode)
kubeClient := fake.NewSimpleClientset(node, pod, pv, claim)
manager := newTestVolumeManager(tmpDir, podManager, kubeClient)
stopCh := runVolumeManager(manager)
defer close(stopCh)
podManager.SetPods([]*v1.Pod{pod})
// Fake node status update
go simulateVolumeInUseUpdate(
v1.UniqueVolumeName(node.Status.VolumesAttached[0].Name),
stopCh,
manager)
err = manager.WaitForAttachAndMount(pod)
if err != nil && !test.expectError {
t.Errorf("Expected success: %v", err)
}
if err == nil && test.expectError {
t.Errorf("Expected error, got none")
}
expectedMounted := pod.Spec.Volumes[0].Name
actualMounted := manager.GetMountedVolumesForPod(types.UniquePodName(pod.ObjectMeta.UID))
if test.expectMount {
if _, ok := actualMounted[expectedMounted]; !ok || (len(actualMounted) != 1) {
t.Errorf("Expected %v to be mounted to pod but got %v", expectedMounted, actualMounted)
}
} else {
if _, ok := actualMounted[expectedMounted]; ok || (len(actualMounted) != 0) {
t.Errorf("Expected %v not to be mounted to pod but got %v", expectedMounted, actualMounted)
}
}
expectedInUse := []v1.UniqueVolumeName{}
if test.expectMount {
expectedInUse = []v1.UniqueVolumeName{v1.UniqueVolumeName(node.Status.VolumesAttached[0].Name)}
}
actualInUse := manager.GetVolumesInUse()
if !reflect.DeepEqual(expectedInUse, actualInUse) {
t.Errorf("Expected %v to be in use but got %v", expectedInUse, actualInUse)
}
})
}
}
@ -102,7 +163,7 @@ func TestInitialPendingVolumesForPodAndGetVolumesInUse(t *testing.T) {
cpm := podtest.NewMockCheckpointManager()
podManager := kubepod.NewBasicPodManager(podtest.NewFakeMirrorClient(), secret.NewFakeManager(), configmap.NewFakeManager(), cpm)
node, pod, pv, claim := createObjects()
node, pod, pv, claim := createObjects(v1.PersistentVolumeFilesystem, v1.PersistentVolumeFilesystem)
claim.Status = v1.PersistentVolumeClaimStatus{
Phase: v1.ClaimPending,
}
@ -148,7 +209,7 @@ func TestGetExtraSupplementalGroupsForPod(t *testing.T) {
cpm := podtest.NewMockCheckpointManager()
podManager := kubepod.NewBasicPodManager(podtest.NewFakeMirrorClient(), secret.NewFakeManager(), configmap.NewFakeManager(), cpm)
node, pod, _, claim := createObjects()
node, pod, _, claim := createObjects(v1.PersistentVolumeFilesystem, v1.PersistentVolumeFilesystem)
existingGid := pod.Spec.SecurityContext.SupplementalGroups[0]
@ -230,7 +291,7 @@ func newTestVolumeManager(tmpDir string, podManager kubepod.Manager, kubeClient
// TODO (#51147) inject mock prober
plugMgr.InitPlugins([]volume.VolumePlugin{plug}, nil /* prober */, volumetest.NewFakeVolumeHost(tmpDir, kubeClient, nil))
statusManager := status.NewManager(kubeClient, podManager, &statustest.FakePodDeletionSafetyProvider{})
fakePathHandler := volumetest.NewBlockVolumePathHandler()
vm := NewVolumeManager(
true,
testHostname,
@ -244,14 +305,15 @@ func newTestVolumeManager(tmpDir string, podManager kubepod.Manager, kubeClient
"",
fakeRecorder,
false, /* experimentalCheckNodeCapabilitiesBeforeMount */
false /* keepTerminatedPodVolumes */)
false, /* keepTerminatedPodVolumes */
fakePathHandler)
return vm
}
// createObjects returns objects for making a fake clientset. The pv is
// already attached to the node and bound to the claim used by the pod.
func createObjects() (*v1.Node, *v1.Pod, *v1.PersistentVolume, *v1.PersistentVolumeClaim) {
func createObjects(pvMode, podMode v1.PersistentVolumeMode) (*v1.Node, *v1.Pod, *v1.PersistentVolume, *v1.PersistentVolumeClaim) {
node := &v1.Node{
ObjectMeta: metav1.ObjectMeta{Name: testHostname},
Status: v1.NodeStatus{
@ -269,6 +331,11 @@ func createObjects() (*v1.Node, *v1.Pod, *v1.PersistentVolume, *v1.PersistentVol
UID: "1234",
},
Spec: v1.PodSpec{
Containers: []v1.Container{
{
Name: "container1",
},
},
Volumes: []v1.Volume{
{
Name: "vol1",
@ -284,7 +351,24 @@ func createObjects() (*v1.Node, *v1.Pod, *v1.PersistentVolume, *v1.PersistentVol
},
},
}
fs := v1.PersistentVolumeFilesystem
switch podMode {
case v1.PersistentVolumeBlock:
pod.Spec.Containers[0].VolumeDevices = []v1.VolumeDevice{
{
Name: "vol1",
DevicePath: "/dev/vol1",
},
}
case v1.PersistentVolumeFilesystem:
pod.Spec.Containers[0].VolumeMounts = []v1.VolumeMount{
{
Name: "vol1",
MountPath: "/mnt/vol1",
},
}
default:
// The volume is not mounted nor mapped
}
pv := &v1.PersistentVolume{
ObjectMeta: metav1.ObjectMeta{
Name: "pvA",
@ -298,7 +382,7 @@ func createObjects() (*v1.Node, *v1.Pod, *v1.PersistentVolume, *v1.PersistentVol
ClaimRef: &v1.ObjectReference{
Name: "claimA",
},
VolumeMode: &fs,
VolumeMode: &pvMode,
},
}
claim := &v1.PersistentVolumeClaim{
@ -308,6 +392,7 @@ func createObjects() (*v1.Node, *v1.Pod, *v1.PersistentVolume, *v1.PersistentVol
},
Spec: v1.PersistentVolumeClaimSpec{
VolumeName: "pvA",
VolumeMode: &pvMode,
},
Status: v1.PersistentVolumeClaimStatus{
Phase: v1.ClaimBound,

View File

@ -548,3 +548,31 @@ func IsLocalEphemeralVolume(volume v1.Volume) bool {
(volume.EmptyDir != nil && volume.EmptyDir.Medium != v1.StorageMediumMemory) ||
volume.ConfigMap != nil || volume.DownwardAPI != nil
}
// GetPodVolumeNames returns names of volumes that are used in a pod,
// either as filesystem mount or raw block device.
func GetPodVolumeNames(pod *v1.Pod) (mounts sets.String, devices sets.String) {
mounts = sets.NewString()
devices = sets.NewString()
addContainerVolumes(pod.Spec.Containers, mounts, devices)
addContainerVolumes(pod.Spec.InitContainers, mounts, devices)
return
}
func addContainerVolumes(containers []v1.Container, mounts, devices sets.String) {
for _, container := range containers {
if container.VolumeMounts != nil {
for _, mount := range container.VolumeMounts {
mounts.Insert(mount.Name)
}
}
// TODO: remove feature gate check after no longer needed
if utilfeature.DefaultFeatureGate.Enabled(features.BlockVolume) &&
container.VolumeDevices != nil {
for _, device := range container.VolumeDevices {
devices.Insert(device.Name)
}
}
}
}

View File

@ -24,6 +24,7 @@ import (
v1 "k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/util/sets"
_ "k8s.io/kubernetes/pkg/apis/core/install"
"reflect"
@ -658,3 +659,180 @@ func TestMakeAbsolutePath(t *testing.T) {
}
}
}
func TestGetPodVolumeNames(t *testing.T) {
tests := []struct {
name string
pod *v1.Pod
expectedMounts sets.String
expectedDevices sets.String
}{
{
name: "empty pod",
pod: &v1.Pod{
Spec: v1.PodSpec{},
},
expectedMounts: sets.NewString(),
expectedDevices: sets.NewString(),
},
{
name: "pod with volumes",
pod: &v1.Pod{
Spec: v1.PodSpec{
Containers: []v1.Container{
{
Name: "container",
VolumeMounts: []v1.VolumeMount{
{
Name: "vol1",
},
{
Name: "vol2",
},
},
VolumeDevices: []v1.VolumeDevice{
{
Name: "vol3",
},
{
Name: "vol4",
},
},
},
},
Volumes: []v1.Volume{
{
Name: "vol1",
},
{
Name: "vol2",
},
{
Name: "vol3",
},
{
Name: "vol4",
},
},
},
},
expectedMounts: sets.NewString("vol1", "vol2"),
expectedDevices: sets.NewString("vol3", "vol4"),
},
{
name: "pod with init containers",
pod: &v1.Pod{
Spec: v1.PodSpec{
InitContainers: []v1.Container{
{
Name: "initContainer",
VolumeMounts: []v1.VolumeMount{
{
Name: "vol1",
},
{
Name: "vol2",
},
},
VolumeDevices: []v1.VolumeDevice{
{
Name: "vol3",
},
{
Name: "vol4",
},
},
},
},
Volumes: []v1.Volume{
{
Name: "vol1",
},
{
Name: "vol2",
},
{
Name: "vol3",
},
{
Name: "vol4",
},
},
},
},
expectedMounts: sets.NewString("vol1", "vol2"),
expectedDevices: sets.NewString("vol3", "vol4"),
},
{
name: "pod with multiple containers",
pod: &v1.Pod{
Spec: v1.PodSpec{
InitContainers: []v1.Container{
{
Name: "initContainer1",
VolumeMounts: []v1.VolumeMount{
{
Name: "vol1",
},
},
},
{
Name: "initContainer2",
VolumeDevices: []v1.VolumeDevice{
{
Name: "vol2",
},
},
},
},
Containers: []v1.Container{
{
Name: "container1",
VolumeMounts: []v1.VolumeMount{
{
Name: "vol3",
},
},
},
{
Name: "container2",
VolumeDevices: []v1.VolumeDevice{
{
Name: "vol4",
},
},
},
},
Volumes: []v1.Volume{
{
Name: "vol1",
},
{
Name: "vol2",
},
{
Name: "vol3",
},
{
Name: "vol4",
},
},
},
},
expectedMounts: sets.NewString("vol1", "vol3"),
expectedDevices: sets.NewString("vol2", "vol4"),
},
}
for _, test := range tests {
t.Run(test.name, func(t *testing.T) {
mounts, devices := GetPodVolumeNames(test.pod)
if !mounts.Equal(test.expectedMounts) {
t.Errorf("Expected mounts: %q, got %q", mounts.List(), test.expectedMounts.List())
}
if !devices.Equal(test.expectedDevices) {
t.Errorf("Expected devices: %q, got %q", devices.List(), test.expectedDevices.List())
}
})
}
}

View File

@ -18,8 +18,10 @@ package testsuites
import (
"fmt"
"strings"
"github.com/onsi/ginkgo"
"github.com/onsi/gomega"
v1 "k8s.io/api/core/v1"
storagev1 "k8s.io/api/storage/v1"
@ -31,6 +33,7 @@ import (
e2elog "k8s.io/kubernetes/test/e2e/framework/log"
e2epod "k8s.io/kubernetes/test/e2e/framework/pod"
"k8s.io/kubernetes/test/e2e/storage/testpatterns"
"k8s.io/kubernetes/test/e2e/storage/utils"
)
const (
@ -252,7 +255,7 @@ func (t *volumeModeTestSuite) defineTests(driver TestDriver, pattern testpattern
framework.ExpectNoError(framework.DeletePodWithWait(f, l.cs, pod))
}()
ginkgo.By("Waiting for pod to fail")
ginkgo.By("Waiting for the pod to fail")
// Wait for an event that the pod is invalid.
eventSelector := fields.Set{
"involvedObject.kind": "Pod",
@ -279,6 +282,54 @@ func (t *volumeModeTestSuite) defineTests(driver TestDriver, pattern testpattern
framework.ExpectEqual(p.Status.Phase, v1.PodPending)
})
ginkgo.It("should not mount / map unused volumes in a pod", func() {
if pattern.VolMode == v1.PersistentVolumeBlock {
skipBlockTest(driver)
}
init()
l.genericVolumeTestResource = *createGenericVolumeTestResource(driver, l.config, pattern)
defer cleanup()
ginkgo.By("Creating pod")
var err error
pod := framework.MakeSecPod(l.ns.Name, []*v1.PersistentVolumeClaim{l.pvc}, nil, false, "", false, false, framework.SELinuxLabel, nil)
for i := range pod.Spec.Containers {
pod.Spec.Containers[i].VolumeDevices = nil
pod.Spec.Containers[i].VolumeMounts = nil
}
// Run the pod
pod, err = l.cs.CoreV1().Pods(l.ns.Name).Create(pod)
framework.ExpectNoError(err)
defer func() {
framework.ExpectNoError(framework.DeletePodWithWait(f, l.cs, pod))
}()
err = e2epod.WaitForPodNameRunningInNamespace(l.cs, pod.Name, pod.Namespace)
framework.ExpectNoError(err)
// Reload the pod to get its node
pod, err = l.cs.CoreV1().Pods(l.ns.Name).Get(pod.Name, metav1.GetOptions{})
framework.ExpectNoError(err)
ginkgo.By("Listing mounted volumes in the pod")
volumePaths, devicePaths, err := utils.ListPodVolumePluginDirectory(l.cs, pod)
framework.ExpectNoError(err)
driverInfo := driver.GetDriverInfo()
volumePlugin := driverInfo.InTreePluginName
if len(volumePlugin) == 0 {
// TODO: check if it's a CSI volume first
volumePlugin = "kubernetes.io/csi"
}
ginkgo.By(fmt.Sprintf("Checking that volume plugin %s is not used in pod directory", volumePlugin))
safeVolumePlugin := strings.ReplaceAll(volumePlugin, "/", "~")
for _, path := range volumePaths {
gomega.Expect(path).NotTo(gomega.ContainSubstring(safeVolumePlugin), fmt.Sprintf("no %s volume should be mounted into pod directory", volumePlugin))
}
for _, path := range devicePaths {
gomega.Expect(path).NotTo(gomega.ContainSubstring(safeVolumePlugin), fmt.Sprintf("no %s volume should be symlinked into pod directory", volumePlugin))
}
})
}
func generateConfigsForPreprovisionedPVTest(scName string, volBindMode storagev1.VolumeBindingMode,

View File

@ -634,3 +634,50 @@ func CheckWriteToPath(pod *v1.Pod, volMode v1.PersistentVolumeMode, path string,
VerifyExecInPodSucceed(pod, fmt.Sprintf("echo %s | base64 -d | sha256sum", encoded))
VerifyExecInPodSucceed(pod, fmt.Sprintf("echo %s | base64 -d | dd of=%s bs=%d count=1", encoded, pathForVolMode, len))
}
// ListPodVolumePluginDirectory returns all volumes in /var/lib/kubelet/pods/<pod UID>/volumes/* and
// /var/lib/kubelet/pods/<pod UID>/volumeDevices/*
// Sample output:
// /var/lib/kubelet/pods/a4717a30-000a-4081-a7a8-f51adf280036/volumes/kubernetes.io~secret/default-token-rphdt
// /var/lib/kubelet/pods/4475b7a3-4a55-4716-9119-fd0053d9d4a6/volumeDevices/kubernetes.io~aws-ebs/pvc-5f9f80f5-c90b-4586-9966-83f91711e1c0
func ListPodVolumePluginDirectory(c clientset.Interface, pod *v1.Pod) (mounts []string, devices []string, err error) {
mountPath := filepath.Join("/var/lib/kubelet/pods/", string(pod.UID), "volumes")
devicePath := filepath.Join("/var/lib/kubelet/pods/", string(pod.UID), "volumeDevices")
nodeIP, err := framework.GetHostAddress(c, pod)
if err != nil {
return nil, nil, fmt.Errorf("error getting IP address of node %s: %s", pod.Spec.NodeName, err)
}
nodeIP = nodeIP + ":22"
mounts, err = listPodDirectory(nodeIP, mountPath)
if err != nil {
return nil, nil, err
}
devices, err = listPodDirectory(nodeIP, devicePath)
if err != nil {
return nil, nil, err
}
return mounts, devices, nil
}
func listPodDirectory(hostAddress string, path string) ([]string, error) {
// Check the directory exists
res, err := e2essh.SSH("test -d "+path, hostAddress, framework.TestContext.Provider)
e2essh.LogResult(res)
if res.Code != 0 {
// The directory does not exist
return nil, nil
}
// Inside /var/lib/kubelet/pods/<pod>/volumes, look for <volume_plugin>/<volume-name>, hence depth 2
res, err = e2essh.SSH("find "+path+" -mindepth 2 -maxdepth 2", hostAddress, framework.TestContext.Provider)
e2essh.LogResult(res)
if err != nil {
return nil, fmt.Errorf("error checking directory %s on node %s: %s", path, hostAddress, err)
}
if res.Code != 0 {
return nil, fmt.Errorf("error checking directory %s on node %s: exit code %d", path, hostAddress, res.Code)
}
return strings.Split(res.Stdout, "\n"), nil
}