use HostExec and sets.String

This commit is contained in:
Yecheng Fu 2019-10-23 21:02:18 +08:00
parent 25c5ad52fd
commit 8def74f394
2 changed files with 41 additions and 17 deletions

View File

@ -25,8 +25,10 @@ import (
v1 "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/util/rand"
"k8s.io/apimachinery/pkg/util/sets"
"k8s.io/apimachinery/pkg/util/wait"
"k8s.io/kubernetes/test/e2e/framework"
e2enode "k8s.io/kubernetes/test/e2e/framework/node"
e2epod "k8s.io/kubernetes/test/e2e/framework/pod"
"k8s.io/kubernetes/test/e2e/framework/volume"
"k8s.io/kubernetes/test/e2e/storage/testpatterns"
@ -88,6 +90,7 @@ func (s *subPathTestSuite) defineTests(driver TestDriver, pattern testpatterns.T
config *PerTestConfig
driverCleanup func()
hostExec utils.HostExec
resource *genericVolumeTestResource
roVolSource *v1.VolumeSource
pod *v1.Pod
@ -117,6 +120,7 @@ func (s *subPathTestSuite) defineTests(driver TestDriver, pattern testpatterns.T
l.intreeOps, l.migratedOps = getMigrationVolumeOpCounts(f.ClientSet, driver.GetDriverInfo().InTreePluginName)
testVolumeSizeRange := s.getTestSuiteInfo().supportedSizeRange
l.resource = createGenericVolumeTestResource(driver, l.config, pattern, testVolumeSizeRange)
l.hostExec = utils.NewHostExec(f)
// Setup subPath test dependent resource
volType := pattern.VolType
@ -175,6 +179,10 @@ func (s *subPathTestSuite) defineTests(driver TestDriver, pattern testpatterns.T
l.driverCleanup = nil
}
if l.hostExec != nil {
l.hostExec.Cleanup()
}
validateMigrationVolumeOpCounts(f.ClientSet, driver.GetDriverInfo().InTreePluginName, l.intreeOps, l.migratedOps)
}
@ -333,7 +341,7 @@ func (s *subPathTestSuite) defineTests(driver TestDriver, pattern testpatterns.T
init()
defer cleanup()
testSubpathReconstruction(f, l.pod, false)
testSubpathReconstruction(f, l.hostExec, l.pod, false)
})
ginkgo.It("should unmount if pod is force deleted while kubelet is down [Disruptive][Slow][LinuxOnly]", func() {
@ -345,7 +353,7 @@ func (s *subPathTestSuite) defineTests(driver TestDriver, pattern testpatterns.T
framework.Skipf("%s volume type does not support reconstruction, skipping", l.resource.volType)
}
testSubpathReconstruction(f, l.pod, true)
testSubpathReconstruction(f, l.hostExec, l.pod, true)
})
ginkgo.It("should support readOnly directory specified in the volumeMount", func() {
@ -878,12 +886,17 @@ func testPodContainerRestart(f *framework.Framework, pod *v1.Pod) {
framework.ExpectNoError(err, "while waiting for container to stabilize")
}
func testSubpathReconstruction(f *framework.Framework, pod *v1.Pod, forceDelete bool) {
func testSubpathReconstruction(f *framework.Framework, hostExec utils.HostExec, pod *v1.Pod, forceDelete bool) {
// This is mostly copied from TestVolumeUnmountsFromDeletedPodWithForceOption()
// Disruptive test run serially, we can cache all voluem global mount
// points and verify after the test that we do not leak any global mount point.
mountPoints := utils.FindVolumeGlobalMountPoints(f.ClientSet, pod)
nodeList, err := e2enode.GetReadySchedulableNodes(f.ClientSet)
framework.ExpectNoError(err, "while listing scheduable nodes")
globalMountPointsByNode := make(map[string]sets.String, len(nodeList.Items))
for _, node := range nodeList.Items {
globalMountPointsByNode[node.Name] = utils.FindVolumeGlobalMountPoints(hostExec, &node)
}
// Change to busybox
pod.Spec.Containers[0].Image = volume.GetTestImage(imageutils.GetE2EImage(imageutils.BusyBox))
@ -898,7 +911,7 @@ func testSubpathReconstruction(f *framework.Framework, pod *v1.Pod, forceDelete
ginkgo.By(fmt.Sprintf("Creating pod %s", pod.Name))
removeUnusedContainers(pod)
pod, err := f.ClientSet.CoreV1().Pods(f.Namespace.Name).Create(pod)
pod, err = f.ClientSet.CoreV1().Pods(f.Namespace.Name).Create(pod)
framework.ExpectNoError(err, "while creating pod")
err = e2epod.WaitForPodRunningInNamespace(f.ClientSet, pod)
@ -907,10 +920,24 @@ func testSubpathReconstruction(f *framework.Framework, pod *v1.Pod, forceDelete
pod, err = f.ClientSet.CoreV1().Pods(f.Namespace.Name).Get(pod.Name, metav1.GetOptions{})
framework.ExpectNoError(err, "while getting pod")
var podNode *v1.Node
for i := range nodeList.Items {
if nodeList.Items[i].Name == pod.Spec.NodeName {
podNode = &nodeList.Items[i]
}
}
framework.ExpectNotEqual(podNode, nil, "pod node should exist in scheduable nodes")
utils.TestVolumeUnmountsFromDeletedPodWithForceOption(f.ClientSet, f, pod, forceDelete, true)
mountPointsAfter := utils.FindVolumeGlobalMountPoints(f.ClientSet, pod)
gomega.Expect(mountPointsAfter).To(gomega.ConsistOf(mountPoints), "Global mount points leaked. Before: %v, After: %v.", mountPoints, mountPointsAfter)
if podNode != nil {
mountPoints := globalMountPointsByNode[podNode.Name]
mountPointsAfter := utils.FindVolumeGlobalMountPoints(hostExec, podNode)
s1 := mountPointsAfter.Difference(mountPoints)
s2 := mountPoints.Difference(mountPointsAfter)
gomega.Expect(s1).To(gomega.BeEmpty(), "global mount points leaked: %v", s1)
gomega.Expect(s2).To(gomega.BeEmpty(), "global mount points not found: %v", s2)
}
}
func formatVolume(f *framework.Framework, pod *v1.Pod) {

View File

@ -32,6 +32,7 @@ import (
rbacv1 "k8s.io/api/rbac/v1"
apierrs "k8s.io/apimachinery/pkg/api/errors"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/util/sets"
"k8s.io/apimachinery/pkg/util/wait"
clientset "k8s.io/client-go/kubernetes"
"k8s.io/kubernetes/test/e2e/framework"
@ -650,13 +651,12 @@ func CheckWriteToPath(pod *v1.Pod, volMode v1.PersistentVolumeMode, path string,
}
// findMountPoints returns all mount points on given node under specified directory.
func findMountPoints(nodeIP string, dir string) []string {
result, err := e2essh.SSH(fmt.Sprintf(`find %s -type d -exec mountpoint {} \; | grep 'is a mountpoint$'`, dir), nodeIP, framework.TestContext.Provider)
e2essh.LogResult(result)
framework.ExpectNoError(err, "Encountered SSH error.")
func findMountPoints(hostExec HostExec, node *v1.Node, dir string) []string {
result, err := hostExec.IssueCommandWithResult(fmt.Sprintf(`find %s -type d -exec mountpoint {} \; | grep 'is a mountpoint$' || true`, dir), node)
framework.ExpectNoError(err, "Encountered HostExec error.")
var mountPoints []string
if err != nil {
for _, line := range strings.Split(result.Stdout, "\n") {
for _, line := range strings.Split(result, "\n") {
if line == "" {
continue
}
@ -667,9 +667,6 @@ func findMountPoints(nodeIP string, dir string) []string {
}
// FindVolumeGlobalMountPoints returns all volume global mount points on the node of given pod.
func FindVolumeGlobalMountPoints(c clientset.Interface, pod *v1.Pod) []string {
nodeIP, err := framework.GetHostAddress(c, pod)
framework.ExpectNoError(err)
nodeIP = nodeIP + ":22"
return findMountPoints(nodeIP, "/var/lib/kubelet/plugins")
func FindVolumeGlobalMountPoints(hostExec HostExec, node *v1.Node) sets.String {
return sets.NewString(findMountPoints(hostExec, node, "/var/lib/kubelet/plugins")...)
}