Merge pull request #83108 from jsafrane/hostexec-volumemode

Use pod + nsenter instead of SSH in block volume tests
This commit is contained in:
Kubernetes Prow Robot 2019-10-04 02:46:29 -07:00 committed by GitHub
commit ec05944b13
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
3 changed files with 50 additions and 52 deletions

View File

@ -18,6 +18,7 @@ package testsuites
import ( import (
"fmt" "fmt"
"path/filepath"
"strings" "strings"
"github.com/onsi/ginkgo" "github.com/onsi/ginkgo"
@ -321,8 +322,6 @@ func (t *volumeModeTestSuite) defineTests(driver TestDriver, pattern testpattern
}) })
ginkgo.It("should not mount / map unused volumes in a pod", func() { ginkgo.It("should not mount / map unused volumes in a pod", func() {
framework.SkipUnlessProviderIs(framework.ProvidersWithSSH...)
framework.SkipUnlessSSHKeyPresent()
if pattern.VolMode == v1.PersistentVolumeBlock { if pattern.VolMode == v1.PersistentVolumeBlock {
skipTestIfBlockNotSupported(driver) skipTestIfBlockNotSupported(driver)
} }
@ -351,10 +350,16 @@ func (t *volumeModeTestSuite) defineTests(driver TestDriver, pattern testpattern
// Reload the pod to get its node // Reload the pod to get its node
pod, err = l.cs.CoreV1().Pods(l.ns.Name).Get(pod.Name, metav1.GetOptions{}) pod, err = l.cs.CoreV1().Pods(l.ns.Name).Get(pod.Name, metav1.GetOptions{})
framework.ExpectNoError(err) framework.ExpectNoError(err)
framework.ExpectNotEqual(pod.Spec.NodeName, "", "pod should be scheduled to a node")
node, err := l.cs.CoreV1().Nodes().Get(pod.Spec.NodeName, metav1.GetOptions{})
framework.ExpectNoError(err)
ginkgo.By("Listing mounted volumes in the pod") ginkgo.By("Listing mounted volumes in the pod")
volumePaths, devicePaths, err := utils.ListPodVolumePluginDirectory(l.cs, pod) hostExec := utils.NewHostExec(f)
defer hostExec.Cleanup()
volumePaths, devicePaths, err := listPodVolumePluginDirectory(hostExec, pod, node)
framework.ExpectNoError(err) framework.ExpectNoError(err)
driverInfo := driver.GetDriverInfo() driverInfo := driver.GetDriverInfo()
volumePlugin := driverInfo.InTreePluginName volumePlugin := driverInfo.InTreePluginName
if len(volumePlugin) == 0 { if len(volumePlugin) == 0 {
@ -426,3 +431,42 @@ func swapVolumeMode(podTemplate *v1.Pod) *v1.Pod {
} }
return pod return pod
} }
// listPodVolumePluginDirectory returns all volumes in /var/lib/kubelet/pods/<pod UID>/volumes/* and
// /var/lib/kubelet/pods/<pod UID>/volumeDevices/*
// Sample output:
// /var/lib/kubelet/pods/a4717a30-000a-4081-a7a8-f51adf280036/volumes/kubernetes.io~secret/default-token-rphdt
// /var/lib/kubelet/pods/4475b7a3-4a55-4716-9119-fd0053d9d4a6/volumeDevices/kubernetes.io~aws-ebs/pvc-5f9f80f5-c90b-4586-9966-83f91711e1c0
func listPodVolumePluginDirectory(h utils.HostExec, pod *v1.Pod, node *v1.Node) (mounts []string, devices []string, err error) {
mountPath := filepath.Join("/var/lib/kubelet/pods/", string(pod.UID), "volumes")
devicePath := filepath.Join("/var/lib/kubelet/pods/", string(pod.UID), "volumeDevices")
mounts, err = listPodDirectory(h, mountPath, node)
if err != nil {
return nil, nil, err
}
devices, err = listPodDirectory(h, devicePath, node)
if err != nil {
return nil, nil, err
}
return mounts, devices, nil
}
func listPodDirectory(h utils.HostExec, path string, node *v1.Node) ([]string, error) {
// Return no error if the directory does not exist (e.g. there are no block volumes used)
_, err := h.IssueCommandWithResult("test ! -d "+path, node)
if err == nil {
// The directory does not exist
return nil, nil
}
// The directory either exists or a real error happened (e.g. "access denied").
// Ignore the error, "find" will hit the error again and we report it there.
// Inside /var/lib/kubelet/pods/<pod>/volumes, look for <volume_plugin>/<volume-name>, hence depth 2
cmd := fmt.Sprintf("find %s -mindepth 2 -maxdepth 2", path)
out, err := h.IssueCommandWithResult(cmd, node)
if err != nil {
return nil, fmt.Errorf("error checking directory %s on node %s: %s", path, node.Name, err)
}
return strings.Split(out, "\n"), nil
}

View File

@ -19,7 +19,7 @@ package utils
import ( import (
"fmt" "fmt"
"k8s.io/api/core/v1" v1 "k8s.io/api/core/v1"
"k8s.io/kubernetes/test/e2e/framework" "k8s.io/kubernetes/test/e2e/framework"
e2epod "k8s.io/kubernetes/test/e2e/framework/pod" e2epod "k8s.io/kubernetes/test/e2e/framework/pod"
) )
@ -51,7 +51,8 @@ func (h *hostExecutor) launchNodeExecPod(node string) *v1.Pod {
f := h.Framework f := h.Framework
cs := f.ClientSet cs := f.ClientSet
ns := f.Namespace ns := f.Namespace
hostExecPod := e2epod.NewExecPodSpec(ns.Name, fmt.Sprintf("hostexec-%s", node), true) hostExecPod := e2epod.NewExecPodSpec(ns.Name, "", true)
hostExecPod.GenerateName = fmt.Sprintf("hostexec-%s-", node)
hostExecPod.Spec.NodeName = node hostExecPod.Spec.NodeName = node
hostExecPod.Spec.Volumes = []v1.Volume{ hostExecPod.Spec.Volumes = []v1.Volume{
{ {

View File

@ -633,50 +633,3 @@ func CheckWriteToPath(pod *v1.Pod, volMode v1.PersistentVolumeMode, path string,
VerifyExecInPodSucceed(pod, fmt.Sprintf("echo %s | base64 -d | sha256sum", encoded)) VerifyExecInPodSucceed(pod, fmt.Sprintf("echo %s | base64 -d | sha256sum", encoded))
VerifyExecInPodSucceed(pod, fmt.Sprintf("echo %s | base64 -d | dd of=%s bs=%d count=1", encoded, pathForVolMode, len)) VerifyExecInPodSucceed(pod, fmt.Sprintf("echo %s | base64 -d | dd of=%s bs=%d count=1", encoded, pathForVolMode, len))
} }
// ListPodVolumePluginDirectory returns all volumes in /var/lib/kubelet/pods/<pod UID>/volumes/* and
// /var/lib/kubelet/pods/<pod UID>/volumeDevices/*
// Sample output:
// /var/lib/kubelet/pods/a4717a30-000a-4081-a7a8-f51adf280036/volumes/kubernetes.io~secret/default-token-rphdt
// /var/lib/kubelet/pods/4475b7a3-4a55-4716-9119-fd0053d9d4a6/volumeDevices/kubernetes.io~aws-ebs/pvc-5f9f80f5-c90b-4586-9966-83f91711e1c0
func ListPodVolumePluginDirectory(c clientset.Interface, pod *v1.Pod) (mounts []string, devices []string, err error) {
mountPath := filepath.Join("/var/lib/kubelet/pods/", string(pod.UID), "volumes")
devicePath := filepath.Join("/var/lib/kubelet/pods/", string(pod.UID), "volumeDevices")
nodeIP, err := framework.GetHostAddress(c, pod)
if err != nil {
return nil, nil, fmt.Errorf("error getting IP address of node %s: %s", pod.Spec.NodeName, err)
}
nodeIP = nodeIP + ":22"
mounts, err = listPodDirectory(nodeIP, mountPath)
if err != nil {
return nil, nil, err
}
devices, err = listPodDirectory(nodeIP, devicePath)
if err != nil {
return nil, nil, err
}
return mounts, devices, nil
}
func listPodDirectory(hostAddress string, path string) ([]string, error) {
// Check the directory exists
res, err := e2essh.SSH("test -d "+path, hostAddress, framework.TestContext.Provider)
e2essh.LogResult(res)
if res.Code != 0 {
// The directory does not exist
return nil, nil
}
// Inside /var/lib/kubelet/pods/<pod>/volumes, look for <volume_plugin>/<volume-name>, hence depth 2
res, err = e2essh.SSH("find "+path+" -mindepth 2 -maxdepth 2", hostAddress, framework.TestContext.Provider)
e2essh.LogResult(res)
if err != nil {
return nil, fmt.Errorf("error checking directory %s on node %s: %s", path, hostAddress, err)
}
if res.Code != 0 {
return nil, fmt.Errorf("error checking directory %s on node %s: exit code %d", path, hostAddress, res.Code)
}
return strings.Split(res.Stdout, "\n"), nil
}