mirror of
https://github.com/k3s-io/kubernetes.git
synced 2025-07-28 14:07:14 +00:00
use HostExec and sets.String
This commit is contained in:
parent
25c5ad52fd
commit
8def74f394
@ -25,8 +25,10 @@ import (
|
|||||||
v1 "k8s.io/api/core/v1"
|
v1 "k8s.io/api/core/v1"
|
||||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||||
"k8s.io/apimachinery/pkg/util/rand"
|
"k8s.io/apimachinery/pkg/util/rand"
|
||||||
|
"k8s.io/apimachinery/pkg/util/sets"
|
||||||
"k8s.io/apimachinery/pkg/util/wait"
|
"k8s.io/apimachinery/pkg/util/wait"
|
||||||
"k8s.io/kubernetes/test/e2e/framework"
|
"k8s.io/kubernetes/test/e2e/framework"
|
||||||
|
e2enode "k8s.io/kubernetes/test/e2e/framework/node"
|
||||||
e2epod "k8s.io/kubernetes/test/e2e/framework/pod"
|
e2epod "k8s.io/kubernetes/test/e2e/framework/pod"
|
||||||
"k8s.io/kubernetes/test/e2e/framework/volume"
|
"k8s.io/kubernetes/test/e2e/framework/volume"
|
||||||
"k8s.io/kubernetes/test/e2e/storage/testpatterns"
|
"k8s.io/kubernetes/test/e2e/storage/testpatterns"
|
||||||
@ -88,6 +90,7 @@ func (s *subPathTestSuite) defineTests(driver TestDriver, pattern testpatterns.T
|
|||||||
config *PerTestConfig
|
config *PerTestConfig
|
||||||
driverCleanup func()
|
driverCleanup func()
|
||||||
|
|
||||||
|
hostExec utils.HostExec
|
||||||
resource *genericVolumeTestResource
|
resource *genericVolumeTestResource
|
||||||
roVolSource *v1.VolumeSource
|
roVolSource *v1.VolumeSource
|
||||||
pod *v1.Pod
|
pod *v1.Pod
|
||||||
@ -117,6 +120,7 @@ func (s *subPathTestSuite) defineTests(driver TestDriver, pattern testpatterns.T
|
|||||||
l.intreeOps, l.migratedOps = getMigrationVolumeOpCounts(f.ClientSet, driver.GetDriverInfo().InTreePluginName)
|
l.intreeOps, l.migratedOps = getMigrationVolumeOpCounts(f.ClientSet, driver.GetDriverInfo().InTreePluginName)
|
||||||
testVolumeSizeRange := s.getTestSuiteInfo().supportedSizeRange
|
testVolumeSizeRange := s.getTestSuiteInfo().supportedSizeRange
|
||||||
l.resource = createGenericVolumeTestResource(driver, l.config, pattern, testVolumeSizeRange)
|
l.resource = createGenericVolumeTestResource(driver, l.config, pattern, testVolumeSizeRange)
|
||||||
|
l.hostExec = utils.NewHostExec(f)
|
||||||
|
|
||||||
// Setup subPath test dependent resource
|
// Setup subPath test dependent resource
|
||||||
volType := pattern.VolType
|
volType := pattern.VolType
|
||||||
@ -175,6 +179,10 @@ func (s *subPathTestSuite) defineTests(driver TestDriver, pattern testpatterns.T
|
|||||||
l.driverCleanup = nil
|
l.driverCleanup = nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
if l.hostExec != nil {
|
||||||
|
l.hostExec.Cleanup()
|
||||||
|
}
|
||||||
|
|
||||||
validateMigrationVolumeOpCounts(f.ClientSet, driver.GetDriverInfo().InTreePluginName, l.intreeOps, l.migratedOps)
|
validateMigrationVolumeOpCounts(f.ClientSet, driver.GetDriverInfo().InTreePluginName, l.intreeOps, l.migratedOps)
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -333,7 +341,7 @@ func (s *subPathTestSuite) defineTests(driver TestDriver, pattern testpatterns.T
|
|||||||
init()
|
init()
|
||||||
defer cleanup()
|
defer cleanup()
|
||||||
|
|
||||||
testSubpathReconstruction(f, l.pod, false)
|
testSubpathReconstruction(f, l.hostExec, l.pod, false)
|
||||||
})
|
})
|
||||||
|
|
||||||
ginkgo.It("should unmount if pod is force deleted while kubelet is down [Disruptive][Slow][LinuxOnly]", func() {
|
ginkgo.It("should unmount if pod is force deleted while kubelet is down [Disruptive][Slow][LinuxOnly]", func() {
|
||||||
@ -345,7 +353,7 @@ func (s *subPathTestSuite) defineTests(driver TestDriver, pattern testpatterns.T
|
|||||||
framework.Skipf("%s volume type does not support reconstruction, skipping", l.resource.volType)
|
framework.Skipf("%s volume type does not support reconstruction, skipping", l.resource.volType)
|
||||||
}
|
}
|
||||||
|
|
||||||
testSubpathReconstruction(f, l.pod, true)
|
testSubpathReconstruction(f, l.hostExec, l.pod, true)
|
||||||
})
|
})
|
||||||
|
|
||||||
ginkgo.It("should support readOnly directory specified in the volumeMount", func() {
|
ginkgo.It("should support readOnly directory specified in the volumeMount", func() {
|
||||||
@ -878,12 +886,17 @@ func testPodContainerRestart(f *framework.Framework, pod *v1.Pod) {
|
|||||||
framework.ExpectNoError(err, "while waiting for container to stabilize")
|
framework.ExpectNoError(err, "while waiting for container to stabilize")
|
||||||
}
|
}
|
||||||
|
|
||||||
func testSubpathReconstruction(f *framework.Framework, pod *v1.Pod, forceDelete bool) {
|
func testSubpathReconstruction(f *framework.Framework, hostExec utils.HostExec, pod *v1.Pod, forceDelete bool) {
|
||||||
// This is mostly copied from TestVolumeUnmountsFromDeletedPodWithForceOption()
|
// This is mostly copied from TestVolumeUnmountsFromDeletedPodWithForceOption()
|
||||||
|
|
||||||
// Disruptive test run serially, we can cache all voluem global mount
|
// Disruptive test run serially, we can cache all voluem global mount
|
||||||
// points and verify after the test that we do not leak any global mount point.
|
// points and verify after the test that we do not leak any global mount point.
|
||||||
mountPoints := utils.FindVolumeGlobalMountPoints(f.ClientSet, pod)
|
nodeList, err := e2enode.GetReadySchedulableNodes(f.ClientSet)
|
||||||
|
framework.ExpectNoError(err, "while listing scheduable nodes")
|
||||||
|
globalMountPointsByNode := make(map[string]sets.String, len(nodeList.Items))
|
||||||
|
for _, node := range nodeList.Items {
|
||||||
|
globalMountPointsByNode[node.Name] = utils.FindVolumeGlobalMountPoints(hostExec, &node)
|
||||||
|
}
|
||||||
|
|
||||||
// Change to busybox
|
// Change to busybox
|
||||||
pod.Spec.Containers[0].Image = volume.GetTestImage(imageutils.GetE2EImage(imageutils.BusyBox))
|
pod.Spec.Containers[0].Image = volume.GetTestImage(imageutils.GetE2EImage(imageutils.BusyBox))
|
||||||
@ -898,7 +911,7 @@ func testSubpathReconstruction(f *framework.Framework, pod *v1.Pod, forceDelete
|
|||||||
|
|
||||||
ginkgo.By(fmt.Sprintf("Creating pod %s", pod.Name))
|
ginkgo.By(fmt.Sprintf("Creating pod %s", pod.Name))
|
||||||
removeUnusedContainers(pod)
|
removeUnusedContainers(pod)
|
||||||
pod, err := f.ClientSet.CoreV1().Pods(f.Namespace.Name).Create(pod)
|
pod, err = f.ClientSet.CoreV1().Pods(f.Namespace.Name).Create(pod)
|
||||||
framework.ExpectNoError(err, "while creating pod")
|
framework.ExpectNoError(err, "while creating pod")
|
||||||
|
|
||||||
err = e2epod.WaitForPodRunningInNamespace(f.ClientSet, pod)
|
err = e2epod.WaitForPodRunningInNamespace(f.ClientSet, pod)
|
||||||
@ -907,10 +920,24 @@ func testSubpathReconstruction(f *framework.Framework, pod *v1.Pod, forceDelete
|
|||||||
pod, err = f.ClientSet.CoreV1().Pods(f.Namespace.Name).Get(pod.Name, metav1.GetOptions{})
|
pod, err = f.ClientSet.CoreV1().Pods(f.Namespace.Name).Get(pod.Name, metav1.GetOptions{})
|
||||||
framework.ExpectNoError(err, "while getting pod")
|
framework.ExpectNoError(err, "while getting pod")
|
||||||
|
|
||||||
|
var podNode *v1.Node
|
||||||
|
for i := range nodeList.Items {
|
||||||
|
if nodeList.Items[i].Name == pod.Spec.NodeName {
|
||||||
|
podNode = &nodeList.Items[i]
|
||||||
|
}
|
||||||
|
}
|
||||||
|
framework.ExpectNotEqual(podNode, nil, "pod node should exist in scheduable nodes")
|
||||||
|
|
||||||
utils.TestVolumeUnmountsFromDeletedPodWithForceOption(f.ClientSet, f, pod, forceDelete, true)
|
utils.TestVolumeUnmountsFromDeletedPodWithForceOption(f.ClientSet, f, pod, forceDelete, true)
|
||||||
|
|
||||||
mountPointsAfter := utils.FindVolumeGlobalMountPoints(f.ClientSet, pod)
|
if podNode != nil {
|
||||||
gomega.Expect(mountPointsAfter).To(gomega.ConsistOf(mountPoints), "Global mount points leaked. Before: %v, After: %v.", mountPoints, mountPointsAfter)
|
mountPoints := globalMountPointsByNode[podNode.Name]
|
||||||
|
mountPointsAfter := utils.FindVolumeGlobalMountPoints(hostExec, podNode)
|
||||||
|
s1 := mountPointsAfter.Difference(mountPoints)
|
||||||
|
s2 := mountPoints.Difference(mountPointsAfter)
|
||||||
|
gomega.Expect(s1).To(gomega.BeEmpty(), "global mount points leaked: %v", s1)
|
||||||
|
gomega.Expect(s2).To(gomega.BeEmpty(), "global mount points not found: %v", s2)
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func formatVolume(f *framework.Framework, pod *v1.Pod) {
|
func formatVolume(f *framework.Framework, pod *v1.Pod) {
|
||||||
|
@ -32,6 +32,7 @@ import (
|
|||||||
rbacv1 "k8s.io/api/rbac/v1"
|
rbacv1 "k8s.io/api/rbac/v1"
|
||||||
apierrs "k8s.io/apimachinery/pkg/api/errors"
|
apierrs "k8s.io/apimachinery/pkg/api/errors"
|
||||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||||
|
"k8s.io/apimachinery/pkg/util/sets"
|
||||||
"k8s.io/apimachinery/pkg/util/wait"
|
"k8s.io/apimachinery/pkg/util/wait"
|
||||||
clientset "k8s.io/client-go/kubernetes"
|
clientset "k8s.io/client-go/kubernetes"
|
||||||
"k8s.io/kubernetes/test/e2e/framework"
|
"k8s.io/kubernetes/test/e2e/framework"
|
||||||
@ -650,13 +651,12 @@ func CheckWriteToPath(pod *v1.Pod, volMode v1.PersistentVolumeMode, path string,
|
|||||||
}
|
}
|
||||||
|
|
||||||
// findMountPoints returns all mount points on given node under specified directory.
|
// findMountPoints returns all mount points on given node under specified directory.
|
||||||
func findMountPoints(nodeIP string, dir string) []string {
|
func findMountPoints(hostExec HostExec, node *v1.Node, dir string) []string {
|
||||||
result, err := e2essh.SSH(fmt.Sprintf(`find %s -type d -exec mountpoint {} \; | grep 'is a mountpoint$'`, dir), nodeIP, framework.TestContext.Provider)
|
result, err := hostExec.IssueCommandWithResult(fmt.Sprintf(`find %s -type d -exec mountpoint {} \; | grep 'is a mountpoint$' || true`, dir), node)
|
||||||
e2essh.LogResult(result)
|
framework.ExpectNoError(err, "Encountered HostExec error.")
|
||||||
framework.ExpectNoError(err, "Encountered SSH error.")
|
|
||||||
var mountPoints []string
|
var mountPoints []string
|
||||||
if err != nil {
|
if err != nil {
|
||||||
for _, line := range strings.Split(result.Stdout, "\n") {
|
for _, line := range strings.Split(result, "\n") {
|
||||||
if line == "" {
|
if line == "" {
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
@ -667,9 +667,6 @@ func findMountPoints(nodeIP string, dir string) []string {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// FindVolumeGlobalMountPoints returns all volume global mount points on the node of given pod.
|
// FindVolumeGlobalMountPoints returns all volume global mount points on the node of given pod.
|
||||||
func FindVolumeGlobalMountPoints(c clientset.Interface, pod *v1.Pod) []string {
|
func FindVolumeGlobalMountPoints(hostExec HostExec, node *v1.Node) sets.String {
|
||||||
nodeIP, err := framework.GetHostAddress(c, pod)
|
return sets.NewString(findMountPoints(hostExec, node, "/var/lib/kubelet/plugins")...)
|
||||||
framework.ExpectNoError(err)
|
|
||||||
nodeIP = nodeIP + ":22"
|
|
||||||
return findMountPoints(nodeIP, "/var/lib/kubelet/plugins")
|
|
||||||
}
|
}
|
||||||
|
Loading…
Reference in New Issue
Block a user