Merge pull request #113853 from jsafrane/add-volumepath

Fix subpath disruptive tests
This commit is contained in:
Kubernetes Prow Robot 2022-11-15 07:56:52 -08:00 committed by GitHub
commit d34bdeea41
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
8 changed files with 43 additions and 40 deletions

View File

@ -28,6 +28,11 @@ import (
imageutils "k8s.io/kubernetes/test/utils/image"
)
const (
VolumeMountPathTemplate = "/mnt/volume%d"
VolumeMountPath1 = "/mnt/volume1"
)
// Config is a struct containing all arguments for creating a pod.
// SELinux testing requires to pass HostIPC and HostPID as boolean arguments.
type Config struct {
@ -222,10 +227,11 @@ func setVolumes(podSpec *v1.PodSpec, pvcs []*v1.PersistentVolumeClaim, inlineVol
volumeIndex := 0
for _, pvclaim := range pvcs {
volumename := fmt.Sprintf("volume%v", volumeIndex+1)
volumeMountPath := fmt.Sprintf(VolumeMountPathTemplate, volumeIndex+1)
if pvclaim.Spec.VolumeMode != nil && *pvclaim.Spec.VolumeMode == v1.PersistentVolumeBlock {
volumeDevices = append(volumeDevices, v1.VolumeDevice{Name: volumename, DevicePath: "/mnt/" + volumename})
volumeDevices = append(volumeDevices, v1.VolumeDevice{Name: volumename, DevicePath: volumeMountPath})
} else {
volumeMounts = append(volumeMounts, v1.VolumeMount{Name: volumename, MountPath: "/mnt/" + volumename})
volumeMounts = append(volumeMounts, v1.VolumeMount{Name: volumename, MountPath: volumeMountPath})
}
volumes[volumeIndex] = v1.Volume{
Name: volumename,
@ -240,8 +246,9 @@ func setVolumes(podSpec *v1.PodSpec, pvcs []*v1.PersistentVolumeClaim, inlineVol
}
for _, src := range inlineVolumeSources {
volumename := fmt.Sprintf("volume%v", volumeIndex+1)
volumeMountPath := fmt.Sprintf(VolumeMountPathTemplate, volumeIndex+1)
// In-line volumes can be only filesystem, not block.
volumeMounts = append(volumeMounts, v1.VolumeMount{Name: volumename, MountPath: "/mnt/" + volumename})
volumeMounts = append(volumeMounts, v1.VolumeMount{Name: volumename, MountPath: volumeMountPath})
volumes[volumeIndex] = v1.Volume{Name: volumename, VolumeSource: *src}
volumeIndex++
}

View File

@ -659,7 +659,6 @@ func PodExec(f *framework.Framework, pod *v1.Pod, shExec string) (string, string
func VerifyExecInPodSucceed(f *framework.Framework, pod *v1.Pod, shExec string) {
stdout, stderr, err := PodExec(f, pod, shExec)
if err != nil {
if exiterr, ok := err.(uexec.CodeExitError); ok {
exitCode := exiterr.ExitStatus()
framework.ExpectNoError(err,

View File

@ -80,7 +80,7 @@ var _ = utils.SIGDescribe("GenericPersistentVolume[Disruptive]", func() {
func(t disruptiveTest) {
ginkgo.It(t.testItStmt, func() {
ginkgo.By("Executing Spec")
t.runTest(c, f, clientPod)
t.runTest(c, f, clientPod, e2epod.VolumeMountPath1)
})
}(test)
}

View File

@ -42,7 +42,7 @@ import (
admissionapi "k8s.io/pod-security-admission/api"
)
type testBody func(c clientset.Interface, f *framework.Framework, clientPod *v1.Pod)
type testBody func(c clientset.Interface, f *framework.Framework, clientPod *v1.Pod, volumePath string)
type disruptiveTest struct {
testItStmt string
runTest testBody
@ -272,7 +272,7 @@ var _ = utils.SIGDescribe("NFSPersistentVolumes[Disruptive][Flaky]", func() {
func(t disruptiveTest) {
ginkgo.It(t.testItStmt, func() {
ginkgo.By("Executing Spec")
t.runTest(c, f, clientPod)
t.runTest(c, f, clientPod, e2epod.VolumeMountPath1)
})
}(test)
}

View File

@ -132,7 +132,7 @@ func (s *disruptiveTestSuite) DefineTests(driver storageframework.TestDriver, pa
framework.ExpectNoError(errors.NewAggregate(errs), "while cleaning up resource")
}
type singlePodTestBody func(c clientset.Interface, f *framework.Framework, clientPod *v1.Pod)
type singlePodTestBody func(c clientset.Interface, f *framework.Framework, clientPod *v1.Pod, mountPath string)
type singlePodTest struct {
testItStmt string
runTestFile singlePodTestBody
@ -185,10 +185,10 @@ func (s *disruptiveTestSuite) DefineTests(driver storageframework.TestDriver, pa
framework.ExpectNoError(err, "While creating pods for kubelet restart test")
if pattern.VolMode == v1.PersistentVolumeBlock && t.runTestBlock != nil {
t.runTestBlock(l.cs, l.config.Framework, l.pod)
t.runTestBlock(l.cs, l.config.Framework, l.pod, e2epod.VolumeMountPath1)
}
if pattern.VolMode == v1.PersistentVolumeFilesystem && t.runTestFile != nil {
t.runTestFile(l.cs, l.config.Framework, l.pod)
t.runTestFile(l.cs, l.config.Framework, l.pod, e2epod.VolumeMountPath1)
}
})
}
@ -204,27 +204,27 @@ func (s *disruptiveTestSuite) DefineTests(driver storageframework.TestDriver, pa
{
testItStmt: "Should test that pv used in a pod that is deleted while the kubelet is down is usable by a new pod when kubelet returns [Feature:SELinux][Feature:SELinuxMountReadWriteOncePod].",
runTestFile: func(c clientset.Interface, f *framework.Framework, pod1, pod2 *v1.Pod) {
storageutils.TestVolumeUnmountsFromDeletedPodWithForceOption(c, f, pod1, false, false, pod2)
storageutils.TestVolumeUnmountsFromDeletedPodWithForceOption(c, f, pod1, false, false, pod2, e2epod.VolumeMountPath1)
},
},
{
testItStmt: "Should test that pv used in a pod that is force deleted while the kubelet is down is usable by a new pod when kubelet returns [Feature:SELinux][Feature:SELinuxMountReadWriteOncePod].",
runTestFile: func(c clientset.Interface, f *framework.Framework, pod1, pod2 *v1.Pod) {
storageutils.TestVolumeUnmountsFromDeletedPodWithForceOption(c, f, pod1, true, false, pod2)
storageutils.TestVolumeUnmountsFromDeletedPodWithForceOption(c, f, pod1, true, false, pod2, e2epod.VolumeMountPath1)
},
},
{
testItStmt: "Should test that pv used in a pod that is deleted while the kubelet is down is usable by a new pod with a different SELinux context when kubelet returns [Feature:SELinux][Feature:SELinuxMountReadWriteOncePod].",
changeSELinuxContexts: true,
runTestFile: func(c clientset.Interface, f *framework.Framework, pod1, pod2 *v1.Pod) {
storageutils.TestVolumeUnmountsFromDeletedPodWithForceOption(c, f, pod1, false, false, pod2)
storageutils.TestVolumeUnmountsFromDeletedPodWithForceOption(c, f, pod1, false, false, pod2, e2epod.VolumeMountPath1)
},
},
{
testItStmt: "Should test that pv used in a pod that is force deleted while the kubelet is down is usable by a new pod with a different SELinux context when kubelet returns [Feature:SELinux][Feature:SELinuxMountReadWriteOncePod].",
changeSELinuxContexts: true,
runTestFile: func(c clientset.Interface, f *framework.Framework, pod1, pod2 *v1.Pod) {
storageutils.TestVolumeUnmountsFromDeletedPodWithForceOption(c, f, pod1, true, false, pod2)
storageutils.TestVolumeUnmountsFromDeletedPodWithForceOption(c, f, pod1, true, false, pod2, e2epod.VolumeMountPath1)
},
},
}

View File

@ -1002,7 +1002,7 @@ func testSubpathReconstruction(f *framework.Framework, hostExec storageutils.Hos
}
framework.ExpectNotEqual(podNode, nil, "pod node should exist in schedulable nodes")
storageutils.TestVolumeUnmountsFromDeletedPodWithForceOption(f.ClientSet, f, pod, forceDelete, true, nil)
storageutils.TestVolumeUnmountsFromDeletedPodWithForceOption(f.ClientSet, f, pod, forceDelete, true, nil, volumePath)
if podNode != nil {
mountPoints := globalMountPointsByNode[podNode.Name]

View File

@ -95,46 +95,44 @@ func getKubeletMainPid(nodeIP string, sudoPresent bool, systemctlPresent bool) s
}
// TestKubeletRestartsAndRestoresMount tests that a volume mounted to a pod remains mounted after a kubelet restarts
func TestKubeletRestartsAndRestoresMount(c clientset.Interface, f *framework.Framework, clientPod *v1.Pod) {
path := "/mnt/volume1"
func TestKubeletRestartsAndRestoresMount(c clientset.Interface, f *framework.Framework, clientPod *v1.Pod, volumePath string) {
byteLen := 64
seed := time.Now().UTC().UnixNano()
ginkgo.By("Writing to the volume.")
CheckWriteToPath(f, clientPod, v1.PersistentVolumeFilesystem, false, path, byteLen, seed)
CheckWriteToPath(f, clientPod, v1.PersistentVolumeFilesystem, false, volumePath, byteLen, seed)
ginkgo.By("Restarting kubelet")
KubeletCommand(KRestart, c, clientPod)
ginkgo.By("Testing that written file is accessible.")
CheckReadFromPath(f, clientPod, v1.PersistentVolumeFilesystem, false, path, byteLen, seed)
CheckReadFromPath(f, clientPod, v1.PersistentVolumeFilesystem, false, volumePath, byteLen, seed)
framework.Logf("Volume mount detected on pod %s and written file %s is readable post-restart.", clientPod.Name, path)
framework.Logf("Volume mount detected on pod %s and written file %s is readable post-restart.", clientPod.Name, volumePath)
}
// TestKubeletRestartsAndRestoresMap tests that a volume mapped to a pod remains mapped after a kubelet restarts
func TestKubeletRestartsAndRestoresMap(c clientset.Interface, f *framework.Framework, clientPod *v1.Pod) {
path := "/mnt/volume1"
func TestKubeletRestartsAndRestoresMap(c clientset.Interface, f *framework.Framework, clientPod *v1.Pod, volumePath string) {
byteLen := 64
seed := time.Now().UTC().UnixNano()
ginkgo.By("Writing to the volume.")
CheckWriteToPath(f, clientPod, v1.PersistentVolumeBlock, false, path, byteLen, seed)
CheckWriteToPath(f, clientPod, v1.PersistentVolumeBlock, false, volumePath, byteLen, seed)
ginkgo.By("Restarting kubelet")
KubeletCommand(KRestart, c, clientPod)
ginkgo.By("Testing that written pv is accessible.")
CheckReadFromPath(f, clientPod, v1.PersistentVolumeBlock, false, path, byteLen, seed)
CheckReadFromPath(f, clientPod, v1.PersistentVolumeBlock, false, volumePath, byteLen, seed)
framework.Logf("Volume map detected on pod %s and written data %s is readable post-restart.", clientPod.Name, path)
framework.Logf("Volume map detected on pod %s and written data %s is readable post-restart.", clientPod.Name, volumePath)
}
// TestVolumeUnmountsFromDeletedPodWithForceOption tests that a volume unmounts if the client pod was deleted while the kubelet was down.
// forceDelete is true indicating whether the pod is forcefully deleted.
// checkSubpath is true indicating whether the subpath should be checked.
// If secondPod is set, it is started when kubelet is down to check that the volume is usable while the old pod is being deleted and the new pod is starting.
func TestVolumeUnmountsFromDeletedPodWithForceOption(c clientset.Interface, f *framework.Framework, clientPod *v1.Pod, forceDelete bool, checkSubpath bool, secondPod *v1.Pod) {
func TestVolumeUnmountsFromDeletedPodWithForceOption(c clientset.Interface, f *framework.Framework, clientPod *v1.Pod, forceDelete bool, checkSubpath bool, secondPod *v1.Pod, volumePath string) {
nodeIP, err := getHostAddress(c, clientPod)
framework.ExpectNoError(err)
nodeIP = nodeIP + ":22"
@ -154,10 +152,9 @@ func TestVolumeUnmountsFromDeletedPodWithForceOption(c clientset.Interface, f *f
}
ginkgo.By("Writing to the volume.")
path := "/mnt/volume1"
byteLen := 64
seed := time.Now().UTC().UnixNano()
CheckWriteToPath(f, clientPod, v1.PersistentVolumeFilesystem, false, path, byteLen, seed)
CheckWriteToPath(f, clientPod, v1.PersistentVolumeFilesystem, false, volumePath, byteLen, seed)
// This command is to make sure kubelet is started after test finishes no matter it fails or not.
defer func() {
@ -209,7 +206,7 @@ func TestVolumeUnmountsFromDeletedPodWithForceOption(c clientset.Interface, f *f
framework.ExpectEqual(result.Code, 0, fmt.Sprintf("Expected grep exit code of 0, got %d", result.Code))
ginkgo.By("Testing that written file is accessible in the second pod.")
CheckReadFromPath(f, secondPod, v1.PersistentVolumeFilesystem, false, path, byteLen, seed)
CheckReadFromPath(f, secondPod, v1.PersistentVolumeFilesystem, false, volumePath, byteLen, seed)
err = c.CoreV1().Pods(secondPod.Namespace).Delete(context.TODO(), secondPod.Name, metav1.DeleteOptions{})
framework.ExpectNoError(err, "when deleting the second pod")
err = e2epod.WaitForPodNotFoundInNamespace(f.ClientSet, secondPod.Name, f.Namespace.Name, f.Timeouts.PodDelete)
@ -235,18 +232,18 @@ func TestVolumeUnmountsFromDeletedPodWithForceOption(c clientset.Interface, f *f
}
// TestVolumeUnmountsFromDeletedPod tests that a volume unmounts if the client pod was deleted while the kubelet was down.
func TestVolumeUnmountsFromDeletedPod(c clientset.Interface, f *framework.Framework, clientPod *v1.Pod) {
TestVolumeUnmountsFromDeletedPodWithForceOption(c, f, clientPod, false, false, nil)
func TestVolumeUnmountsFromDeletedPod(c clientset.Interface, f *framework.Framework, clientPod *v1.Pod, volumePath string) {
TestVolumeUnmountsFromDeletedPodWithForceOption(c, f, clientPod, false, false, nil, volumePath)
}
// TestVolumeUnmountsFromForceDeletedPod tests that a volume unmounts if the client pod was forcefully deleted while the kubelet was down.
func TestVolumeUnmountsFromForceDeletedPod(c clientset.Interface, f *framework.Framework, clientPod *v1.Pod) {
TestVolumeUnmountsFromDeletedPodWithForceOption(c, f, clientPod, true, false, nil)
func TestVolumeUnmountsFromForceDeletedPod(c clientset.Interface, f *framework.Framework, clientPod *v1.Pod, volumePath string) {
TestVolumeUnmountsFromDeletedPodWithForceOption(c, f, clientPod, true, false, nil, volumePath)
}
// TestVolumeUnmapsFromDeletedPodWithForceOption tests that a volume unmaps if the client pod was deleted while the kubelet was down.
// forceDelete is true indicating whether the pod is forcefully deleted.
func TestVolumeUnmapsFromDeletedPodWithForceOption(c clientset.Interface, f *framework.Framework, clientPod *v1.Pod, forceDelete bool) {
func TestVolumeUnmapsFromDeletedPodWithForceOption(c clientset.Interface, f *framework.Framework, clientPod *v1.Pod, forceDelete bool, devicePath string) {
nodeIP, err := getHostAddress(c, clientPod)
framework.ExpectNoError(err, "Failed to get nodeIP.")
nodeIP = nodeIP + ":22"
@ -317,13 +314,13 @@ func TestVolumeUnmapsFromDeletedPodWithForceOption(c clientset.Interface, f *fra
}
// TestVolumeUnmapsFromDeletedPod tests that a volume unmaps if the client pod was deleted while the kubelet was down.
func TestVolumeUnmapsFromDeletedPod(c clientset.Interface, f *framework.Framework, clientPod *v1.Pod) {
TestVolumeUnmapsFromDeletedPodWithForceOption(c, f, clientPod, false)
func TestVolumeUnmapsFromDeletedPod(c clientset.Interface, f *framework.Framework, clientPod *v1.Pod, devicePath string) {
TestVolumeUnmapsFromDeletedPodWithForceOption(c, f, clientPod, false, devicePath)
}
// TestVolumeUnmapsFromForceDeletedPod tests that a volume unmaps if the client pod was forcefully deleted while the kubelet was down.
func TestVolumeUnmapsFromForceDeletedPod(c clientset.Interface, f *framework.Framework, clientPod *v1.Pod) {
TestVolumeUnmapsFromDeletedPodWithForceOption(c, f, clientPod, true)
func TestVolumeUnmapsFromForceDeletedPod(c clientset.Interface, f *framework.Framework, clientPod *v1.Pod, devicePath string) {
TestVolumeUnmapsFromDeletedPodWithForceOption(c, f, clientPod, true, devicePath)
}
// RunInPodWithVolume runs a command in a pod with given claim mounted to /mnt directory.

View File

@ -159,7 +159,7 @@ var _ = utils.SIGDescribe("PersistentVolumes:vsphere [Feature:vsphere]", func()
*/
ginkgo.It("should test that a file written to the vsphere volume mount before kubelet restart can be read after restart [Disruptive]", func() {
e2eskipper.SkipUnlessSSHKeyPresent()
utils.TestKubeletRestartsAndRestoresMount(c, f, clientPod)
utils.TestKubeletRestartsAndRestoresMount(c, f, clientPod, e2epod.VolumeMountPath1)
})
/*
@ -175,7 +175,7 @@ var _ = utils.SIGDescribe("PersistentVolumes:vsphere [Feature:vsphere]", func()
*/
ginkgo.It("should test that a vsphere volume mounted to a pod that is deleted while the kubelet is down unmounts when the kubelet returns [Disruptive]", func() {
e2eskipper.SkipUnlessSSHKeyPresent()
utils.TestVolumeUnmountsFromDeletedPod(c, f, clientPod)
utils.TestVolumeUnmountsFromDeletedPod(c, f, clientPod, e2epod.VolumeMountPath1)
})
/*