simplify global mount points check

This commit is contained in:
Yecheng Fu 2019-10-23 17:29:00 +08:00
parent b536395c07
commit 25c5ad52fd
4 changed files with 53 additions and 155 deletions

View File

@ -36,6 +36,7 @@ import (
"time" "time"
"github.com/onsi/ginkgo" "github.com/onsi/ginkgo"
"github.com/onsi/gomega"
) )
var ( var (
@ -157,24 +158,24 @@ func (s *subPathTestSuite) defineTests(driver TestDriver, pattern testpatterns.T
} }
cleanup := func() { cleanup := func() {
// if l.pod != nil { if l.pod != nil {
// ginkgo.By("Deleting pod") ginkgo.By("Deleting pod")
// err := e2epod.DeletePodWithWait(f.ClientSet, l.pod) err := e2epod.DeletePodWithWait(f.ClientSet, l.pod)
// framework.ExpectNoError(err, "while deleting pod") framework.ExpectNoError(err, "while deleting pod")
// l.pod = nil l.pod = nil
// } }
// if l.resource != nil { if l.resource != nil {
// l.resource.cleanupResource() l.resource.cleanupResource()
// l.resource = nil l.resource = nil
// } }
// if l.driverCleanup != nil { if l.driverCleanup != nil {
// l.driverCleanup() l.driverCleanup()
// l.driverCleanup = nil l.driverCleanup = nil
// } }
// validateMigrationVolumeOpCounts(f.ClientSet, driver.GetDriverInfo().InTreePluginName, l.intreeOps, l.migratedOps) validateMigrationVolumeOpCounts(f.ClientSet, driver.GetDriverInfo().InTreePluginName, l.intreeOps, l.migratedOps)
} }
ginkgo.It("should support non-existent path", func() { ginkgo.It("should support non-existent path", func() {
@ -880,6 +881,10 @@ func testPodContainerRestart(f *framework.Framework, pod *v1.Pod) {
func testSubpathReconstruction(f *framework.Framework, pod *v1.Pod, forceDelete bool) { func testSubpathReconstruction(f *framework.Framework, pod *v1.Pod, forceDelete bool) {
// This is mostly copied from TestVolumeUnmountsFromDeletedPodWithForceOption() // This is mostly copied from TestVolumeUnmountsFromDeletedPodWithForceOption()
// Disruptive test run serially, we can cache all voluem global mount
// points and verify after the test that we do not leak any global mount point.
mountPoints := utils.FindVolumeGlobalMountPoints(f.ClientSet, pod)
// Change to busybox // Change to busybox
pod.Spec.Containers[0].Image = volume.GetTestImage(imageutils.GetE2EImage(imageutils.BusyBox)) pod.Spec.Containers[0].Image = volume.GetTestImage(imageutils.GetE2EImage(imageutils.BusyBox))
pod.Spec.Containers[0].Command = volume.GenerateScriptCmd("sleep 100000") pod.Spec.Containers[0].Command = volume.GenerateScriptCmd("sleep 100000")
@ -902,7 +907,10 @@ func testSubpathReconstruction(f *framework.Framework, pod *v1.Pod, forceDelete
pod, err = f.ClientSet.CoreV1().Pods(f.Namespace.Name).Get(pod.Name, metav1.GetOptions{}) pod, err = f.ClientSet.CoreV1().Pods(f.Namespace.Name).Get(pod.Name, metav1.GetOptions{})
framework.ExpectNoError(err, "while getting pod") framework.ExpectNoError(err, "while getting pod")
utils.TestVolumeUnmountsFromDeletedPodWithForceOption(f.ClientSet, f, pod, forceDelete, true, true) utils.TestVolumeUnmountsFromDeletedPodWithForceOption(f.ClientSet, f, pod, forceDelete, true)
mountPointsAfter := utils.FindVolumeGlobalMountPoints(f.ClientSet, pod)
gomega.Expect(mountPointsAfter).To(gomega.ConsistOf(mountPoints), "Global mount points leaked. Before: %v, After: %v.", mountPoints, mountPointsAfter)
} }
func formatVolume(f *framework.Framework, pod *v1.Pod) { func formatVolume(f *framework.Framework, pod *v1.Pod) {

View File

@ -1,6 +1,6 @@
package(default_visibility = ["//visibility:public"]) package(default_visibility = ["//visibility:public"])
load("@io_bazel_rules_go//go:def.bzl", "go_library", "go_test") load("@io_bazel_rules_go//go:def.bzl", "go_library")
go_library( go_library(
name = "go_default_library", name = "go_default_library",
@ -13,7 +13,6 @@ go_library(
], ],
importpath = "k8s.io/kubernetes/test/e2e/storage/utils", importpath = "k8s.io/kubernetes/test/e2e/storage/utils",
deps = [ deps = [
"//pkg/util/mount:go_default_library",
"//staging/src/k8s.io/api/apps/v1:go_default_library", "//staging/src/k8s.io/api/apps/v1:go_default_library",
"//staging/src/k8s.io/api/core/v1:go_default_library", "//staging/src/k8s.io/api/core/v1:go_default_library",
"//staging/src/k8s.io/api/rbac/v1:go_default_library", "//staging/src/k8s.io/api/rbac/v1:go_default_library",
@ -48,10 +47,3 @@ filegroup(
srcs = [":package-srcs"], srcs = [":package-srcs"],
tags = ["automanaged"], tags = ["automanaged"],
) )
go_test(
name = "go_default_test",
srcs = ["utils_test.go"],
embed = [":go_default_library"],
deps = ["//vendor/github.com/onsi/gomega:go_default_library"],
)

View File

@ -20,9 +20,7 @@ import (
"crypto/sha256" "crypto/sha256"
"encoding/base64" "encoding/base64"
"fmt" "fmt"
"io/ioutil"
"math/rand" "math/rand"
"os"
"path/filepath" "path/filepath"
"strings" "strings"
"time" "time"
@ -36,7 +34,6 @@ import (
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/util/wait" "k8s.io/apimachinery/pkg/util/wait"
clientset "k8s.io/client-go/kubernetes" clientset "k8s.io/client-go/kubernetes"
"k8s.io/kubernetes/pkg/util/mount"
"k8s.io/kubernetes/test/e2e/framework" "k8s.io/kubernetes/test/e2e/framework"
e2enode "k8s.io/kubernetes/test/e2e/framework/node" e2enode "k8s.io/kubernetes/test/e2e/framework/node"
e2epod "k8s.io/kubernetes/test/e2e/framework/pod" e2epod "k8s.io/kubernetes/test/e2e/framework/pod"
@ -237,48 +234,10 @@ func TestKubeletRestartsAndRestoresMap(c clientset.Interface, f *framework.Frame
framework.Logf("Volume map detected on pod %s and written data %s is readable post-restart.", clientPod.Name, path) framework.Logf("Volume map detected on pod %s and written data %s is readable post-restart.", clientPod.Name, path)
} }
// findGlobalVolumeMountPaths finds all global volume mount paths for given pod from the host mount information.
// This function assumes:
// 1) pod volume mount paths exists in /var/lib/kubelet/pods/<pod-uid>/volumes/
// 2) global volume mount paths exists in /var/lib/kubelet/plugins/
func findGlobalVolumeMountPaths(mountInfo string, podUID string) ([]string, error) {
tmpfile, err := ioutil.TempFile("", "mountinfo")
if err != nil {
return nil, err
}
defer os.Remove(tmpfile.Name()) // clean up
err = ioutil.WriteFile(tmpfile.Name(), []byte(mountInfo), 0644)
if err != nil {
return nil, err
}
podVolumeMountBase := fmt.Sprintf("/var/lib/kubelet/pods/%s/volumes/", podUID)
globalVolumeMountBase := "/var/lib/kubelet/plugins"
mis, err := mount.ParseMountInfo(tmpfile.Name())
if err != nil {
return nil, err
}
globalVolumeMountPaths := []string{}
for _, mi := range mis {
if mount.PathWithinBase(mi.MountPoint, podVolumeMountBase) {
refs, err := mount.SearchMountPoints(mi.MountPoint, tmpfile.Name())
if err != nil {
return nil, err
}
for _, ref := range refs {
if mount.PathWithinBase(ref, globalVolumeMountBase) {
globalVolumeMountPaths = append(globalVolumeMountPaths, ref)
}
}
}
}
return globalVolumeMountPaths, nil
}
// TestVolumeUnmountsFromDeletedPodWithForceOption tests that a volume unmounts if the client pod was deleted while the kubelet was down. // TestVolumeUnmountsFromDeletedPodWithForceOption tests that a volume unmounts if the client pod was deleted while the kubelet was down.
// forceDelete is true indicating whether the pod is forcefully deleted. // forceDelete is true indicating whether the pod is forcefully deleted.
// checkSubpath is true indicating whether the subpath should be checked. // checkSubpath is true indicating whether the subpath should be checked.
// checkGlobalMount is true indicating whether the global mount should be checked. func TestVolumeUnmountsFromDeletedPodWithForceOption(c clientset.Interface, f *framework.Framework, clientPod *v1.Pod, forceDelete bool, checkSubpath bool) {
func TestVolumeUnmountsFromDeletedPodWithForceOption(c clientset.Interface, f *framework.Framework, clientPod *v1.Pod, forceDelete bool, checkSubpath bool, checkGlobalMount bool) {
nodeIP, err := framework.GetHostAddress(c, clientPod) nodeIP, err := framework.GetHostAddress(c, clientPod)
framework.ExpectNoError(err) framework.ExpectNoError(err)
nodeIP = nodeIP + ":22" nodeIP = nodeIP + ":22"
@ -297,24 +256,6 @@ func TestVolumeUnmountsFromDeletedPodWithForceOption(c clientset.Interface, f *f
gomega.Expect(result.Code).To(gomega.BeZero(), fmt.Sprintf("Expected grep exit code of 0, got %d", result.Code)) gomega.Expect(result.Code).To(gomega.BeZero(), fmt.Sprintf("Expected grep exit code of 0, got %d", result.Code))
} }
var globalVolumeMountPaths []string
if checkGlobalMount {
// Find global mount path and verify it will be unmounted later.
// We don't verify it must exist because:
// 1) not all volume types have global mount path, e.g. local filesystem volume with directory source
// 2) volume types which failed to mount global mount path will fail in other test
ginkgo.By("Find the volume global mount paths")
result, err = e2essh.SSH("cat /proc/self/mountinfo", nodeIP, framework.TestContext.Provider)
framework.ExpectNoError(err, "Encountered SSH error.")
globalVolumeMountPaths, err = findGlobalVolumeMountPaths(result.Stdout, string(clientPod.UID))
framework.ExpectNoError(err, fmt.Sprintf("Failed to get global volume mount paths: %v", err))
if len(globalVolumeMountPaths) > 0 {
framework.Logf("Volume global mount paths found at %v", globalVolumeMountPaths)
} else {
framework.Logf("No volume global mount paths found")
}
}
// This command is to make sure kubelet is started after test finishes no matter it fails or not. // This command is to make sure kubelet is started after test finishes no matter it fails or not.
defer func() { defer func() {
KubeletCommand(KStart, c, clientPod) KubeletCommand(KStart, c, clientPod)
@ -358,28 +299,16 @@ func TestVolumeUnmountsFromDeletedPodWithForceOption(c clientset.Interface, f *f
gomega.Expect(result.Stdout).To(gomega.BeEmpty(), "Expected grep stdout to be empty (i.e. no subpath mount found).") gomega.Expect(result.Stdout).To(gomega.BeEmpty(), "Expected grep stdout to be empty (i.e. no subpath mount found).")
framework.Logf("Subpath volume unmounted on node %s", clientPod.Spec.NodeName) framework.Logf("Subpath volume unmounted on node %s", clientPod.Spec.NodeName)
} }
if checkGlobalMount && len(globalVolumeMountPaths) > 0 {
globalMountPathCmd := fmt.Sprintf("ls %s | grep '.'", strings.Join(globalVolumeMountPaths, " "))
if isSudoPresent(nodeIP, framework.TestContext.Provider) {
globalMountPathCmd = fmt.Sprintf("sudo sh -c \"%s\"", globalMountPathCmd)
}
ginkgo.By("Expecting the volume global mount path not to be found.")
result, err = e2essh.SSH(globalMountPathCmd, nodeIP, framework.TestContext.Provider)
e2essh.LogResult(result)
framework.ExpectNoError(err, "Encountered SSH error.")
gomega.Expect(result.Stdout).To(gomega.BeEmpty(), "Expected grep stdout to be empty.")
}
} }
// TestVolumeUnmountsFromDeletedPod tests that a volume unmounts if the client pod was deleted while the kubelet was down. // TestVolumeUnmountsFromDeletedPod tests that a volume unmounts if the client pod was deleted while the kubelet was down.
func TestVolumeUnmountsFromDeletedPod(c clientset.Interface, f *framework.Framework, clientPod *v1.Pod) { func TestVolumeUnmountsFromDeletedPod(c clientset.Interface, f *framework.Framework, clientPod *v1.Pod) {
TestVolumeUnmountsFromDeletedPodWithForceOption(c, f, clientPod, false, false, false) TestVolumeUnmountsFromDeletedPodWithForceOption(c, f, clientPod, false, false)
} }
// TestVolumeUnmountsFromForceDeletedPod tests that a volume unmounts if the client pod was forcefully deleted while the kubelet was down. // TestVolumeUnmountsFromForceDeletedPod tests that a volume unmounts if the client pod was forcefully deleted while the kubelet was down.
func TestVolumeUnmountsFromForceDeletedPod(c clientset.Interface, f *framework.Framework, clientPod *v1.Pod) { func TestVolumeUnmountsFromForceDeletedPod(c clientset.Interface, f *framework.Framework, clientPod *v1.Pod) {
TestVolumeUnmountsFromDeletedPodWithForceOption(c, f, clientPod, true, false, false) TestVolumeUnmountsFromDeletedPodWithForceOption(c, f, clientPod, true, false)
} }
// TestVolumeUnmapsFromDeletedPodWithForceOption tests that a volume unmaps if the client pod was deleted while the kubelet was down. // TestVolumeUnmapsFromDeletedPodWithForceOption tests that a volume unmaps if the client pod was deleted while the kubelet was down.
@ -719,3 +648,28 @@ func CheckWriteToPath(pod *v1.Pod, volMode v1.PersistentVolumeMode, path string,
VerifyExecInPodSucceed(pod, fmt.Sprintf("echo %s | base64 -d | sha256sum", encoded)) VerifyExecInPodSucceed(pod, fmt.Sprintf("echo %s | base64 -d | sha256sum", encoded))
VerifyExecInPodSucceed(pod, fmt.Sprintf("echo %s | base64 -d | dd of=%s bs=%d count=1", encoded, pathForVolMode, len)) VerifyExecInPodSucceed(pod, fmt.Sprintf("echo %s | base64 -d | dd of=%s bs=%d count=1", encoded, pathForVolMode, len))
} }
// findMountPoints returns all mount points on given node under specified directory.
func findMountPoints(nodeIP string, dir string) []string {
result, err := e2essh.SSH(fmt.Sprintf(`find %s -type d -exec mountpoint {} \; | grep 'is a mountpoint$'`, dir), nodeIP, framework.TestContext.Provider)
e2essh.LogResult(result)
framework.ExpectNoError(err, "Encountered SSH error.")
var mountPoints []string
if err != nil {
for _, line := range strings.Split(result.Stdout, "\n") {
if line == "" {
continue
}
mountPoints = append(mountPoints, strings.TrimSuffix(line, " is a mountpoint"))
}
}
return mountPoints
}
// FindVolumeGlobalMountPoints returns all volume global mount points on the node of given pod.
func FindVolumeGlobalMountPoints(c clientset.Interface, pod *v1.Pod) []string {
nodeIP, err := framework.GetHostAddress(c, pod)
framework.ExpectNoError(err)
nodeIP = nodeIP + ":22"
return findMountPoints(nodeIP, "/var/lib/kubelet/plugins")
}

View File

@ -1,56 +0,0 @@
/*
Copyright 2019 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package utils
import (
"testing"
"github.com/onsi/gomega"
)
func TestFindGlobalVolumeMountPaths(t *testing.T) {
tests := []struct {
name string
mountInfo string
podUID string
expected []string
}{
{
name: "pod uses local filesystem pv with block source",
mountInfo: `1045 245 0:385 / /var/lib/kubelet/pods/ff5e9fa2-7111-486d-854c-848bcc6b3819/volumes/kubernetes.io~secret/default-token-djlt2 rw,relatime shared:199 - tmpfs tmpfs rw
1047 245 7:6 / /var/lib/kubelet/plugins/kubernetes.io/local-volume/mounts/local-wdx8b rw,relatime shared:200 - ext4 /dev/loop6 rw,data=ordered
1048 245 7:6 / /var/lib/kubelet/pods/ff5e9fa2-7111-486d-854c-848bcc6b3819/volumes/kubernetes.io~local-volume/local-wdx8b rw,relatime shared:200 - ext4 /dev/loop6 rw,data=ordered
1054 245 7:6 /provisioning-9823 /var/lib/kubelet/pods/ff5e9fa2-7111-486d-854c-848bcc6b3819/volume-subpaths/local-wdx8b/test-container-subpath-local-preprovisionedpv-d72p/0 rw,relatime shared:200 - ext4 /dev/loop6 rw,data=ordered
`,
podUID: "ff5e9fa2-7111-486d-854c-848bcc6b3819",
expected: []string{
"/var/lib/kubelet/plugins/kubernetes.io/local-volume/mounts/local-wdx8b",
},
},
}
g := gomega.NewWithT(t)
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
mountPaths, err := findGlobalVolumeMountPaths(tt.mountInfo, tt.podUID)
if err != nil {
t.Fatal(err)
}
g.Expect(mountPaths).To(gomega.ConsistOf(tt.expected))
})
}
}