Add test for unused volumes

This commit is contained in:
Jan Safranek 2019-08-13 17:11:19 +02:00
parent 5b69362ff0
commit 2c79ffe274
2 changed files with 99 additions and 1 deletions

View File

@ -18,8 +18,10 @@ package testsuites
import (
"fmt"
"strings"
"github.com/onsi/ginkgo"
"github.com/onsi/gomega"
v1 "k8s.io/api/core/v1"
storagev1 "k8s.io/api/storage/v1"
@ -31,6 +33,7 @@ import (
e2elog "k8s.io/kubernetes/test/e2e/framework/log"
e2epod "k8s.io/kubernetes/test/e2e/framework/pod"
"k8s.io/kubernetes/test/e2e/storage/testpatterns"
"k8s.io/kubernetes/test/e2e/storage/utils"
)
const (
@ -249,7 +252,7 @@ func (t *volumeModeTestSuite) defineTests(driver TestDriver, pattern testpattern
framework.ExpectNoError(framework.DeletePodWithWait(f, l.cs, pod))
}()
ginkgo.By("Waiting for pod to fail")
ginkgo.By("Waiting for the pod to fail")
// Wait for an event that the pod is invalid.
eventSelector := fields.Set{
"involvedObject.kind": "Pod",
@ -276,6 +279,54 @@ func (t *volumeModeTestSuite) defineTests(driver TestDriver, pattern testpattern
framework.ExpectEqual(p.Status.Phase, v1.PodPending)
})
ginkgo.It("should not mount / map unused volumes in a pod", func() {
if pattern.VolMode == v1.PersistentVolumeBlock {
skipBlockTest(driver)
}
init()
l.genericVolumeTestResource = *createGenericVolumeTestResource(driver, l.config, pattern)
defer cleanup()
ginkgo.By("Creating pod")
var err error
pod := framework.MakeSecPod(l.ns.Name, []*v1.PersistentVolumeClaim{l.pvc}, nil, false, "", false, false, framework.SELinuxLabel, nil)
for i := range pod.Spec.Containers {
pod.Spec.Containers[i].VolumeDevices = nil
pod.Spec.Containers[i].VolumeMounts = nil
}
// Run the pod
pod, err = l.cs.CoreV1().Pods(l.ns.Name).Create(pod)
framework.ExpectNoError(err)
defer func() {
framework.ExpectNoError(framework.DeletePodWithWait(f, l.cs, pod))
}()
err = e2epod.WaitForPodNameRunningInNamespace(l.cs, pod.Name, pod.Namespace)
framework.ExpectNoError(err)
// Reload the pod to get its node
pod, err = l.cs.CoreV1().Pods(l.ns.Name).Get(pod.Name, metav1.GetOptions{})
framework.ExpectNoError(err)
ginkgo.By("Listing mounted volumes in the pod")
volumePaths, devicePaths, err := utils.ListPodVolumePluginDirectory(l.cs, pod)
framework.ExpectNoError(err)
driverInfo := driver.GetDriverInfo()
volumePlugin := driverInfo.InTreePluginName
if len(volumePlugin) == 0 {
// TODO: check if it's a CSI volume first
volumePlugin = "kubernetes.io/csi"
}
ginkgo.By(fmt.Sprintf("Checking that volume plugin %s is not used in pod directory", volumePlugin))
safeVolumePlugin := strings.ReplaceAll(volumePlugin, "/", "~")
for _, path := range volumePaths {
gomega.Expect(path).NotTo(gomega.ContainSubstring(safeVolumePlugin), fmt.Sprintf("no %s volume should be mounted into pod directory", volumePlugin))
}
for _, path := range devicePaths {
gomega.Expect(path).NotTo(gomega.ContainSubstring(safeVolumePlugin), fmt.Sprintf("no %s volume should be symlinked into pod directory", volumePlugin))
}
})
}
func generateConfigsForPreprovisionedPVTest(scName string, volBindMode storagev1.VolumeBindingMode,

View File

@ -634,3 +634,50 @@ func CheckWriteToPath(pod *v1.Pod, volMode v1.PersistentVolumeMode, path string,
VerifyExecInPodSucceed(pod, fmt.Sprintf("echo %s | base64 -d | sha256sum", encoded))
VerifyExecInPodSucceed(pod, fmt.Sprintf("echo %s | base64 -d | dd of=%s bs=%d count=1", encoded, pathForVolMode, len))
}
// ListPodVolumePluginDirectory returns all volumes in /var/lib/kubelet/pods/<pod UID>/volumes/* and
// /var/lib/kubelet/pods/<pod UID>/volumeDevices/*
// Sample output:
// /var/lib/kubelet/pods/a4717a30-000a-4081-a7a8-f51adf280036/volumes/kubernetes.io~secret/default-token-rphdt
// /var/lib/kubelet/pods/4475b7a3-4a55-4716-9119-fd0053d9d4a6/volumeDevices/kubernetes.io~aws-ebs/pvc-5f9f80f5-c90b-4586-9966-83f91711e1c0
func ListPodVolumePluginDirectory(c clientset.Interface, pod *v1.Pod) (mounts []string, devices []string, err error) {
mountPath := filepath.Join("/var/lib/kubelet/pods/", string(pod.UID), "volumes")
devicePath := filepath.Join("/var/lib/kubelet/pods/", string(pod.UID), "volumeDevices")
nodeIP, err := framework.GetHostAddress(c, pod)
if err != nil {
return nil, nil, fmt.Errorf("error getting IP address of node %s: %s", pod.Spec.NodeName, err)
}
nodeIP = nodeIP + ":22"
mounts, err = listPodDirectory(nodeIP, mountPath)
if err != nil {
return nil, nil, err
}
devices, err = listPodDirectory(nodeIP, devicePath)
if err != nil {
return nil, nil, err
}
return mounts, devices, nil
}
func listPodDirectory(hostAddress string, path string) ([]string, error) {
// Check the directory exists
res, err := e2essh.SSH("test -d "+path, hostAddress, framework.TestContext.Provider)
e2essh.LogResult(res)
if res.Code != 0 {
// The directory does not exist
return nil, nil
}
// Inside /var/lib/kubelet/pods/<pod>/volumes, look for <volume_plugin>/<volume-name>, hence depth 2
res, err = e2essh.SSH("find "+path+" -mindepth 2 -maxdepth 2", hostAddress, framework.TestContext.Provider)
e2essh.LogResult(res)
if err != nil {
return nil, fmt.Errorf("error checking directory %s on node %s: %s", path, hostAddress, err)
}
if res.Code != 0 {
return nil, fmt.Errorf("error checking directory %s on node %s: exit code %d", path, hostAddress, res.Code)
}
return strings.Split(res.Stdout, "\n"), nil
}