mirror of
https://github.com/k3s-io/kubernetes.git
synced 2025-07-22 11:21:47 +00:00
test/e2e: Reduce need to use local kubectl
Use POST method instead of running local kubectl. Use ExecCommandInContainerWithFullOutput() instead of RunKubectl(). PodExec() takes additional framework arg, passed down in call chain. VerifyExecInPodFail uses different error code cast as original one causes test code Panic if used with new call method.
This commit is contained in:
parent
5dc87d2919
commit
2fac3f2c20
@ -91,7 +91,7 @@ var _ = ginkgo.Describe("[sig-storage] GCP Volumes", func() {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// Must match content of test/images/volumes-tester/nfs/index.html
|
// Must match content of test/images/volumes-tester/nfs/index.html
|
||||||
volume.TestVolumeClient(c, config, nil, "" /* fsType */, tests)
|
volume.TestVolumeClient(f, c, config, nil, "" /* fsType */, tests)
|
||||||
})
|
})
|
||||||
})
|
})
|
||||||
|
|
||||||
@ -114,7 +114,7 @@ var _ = ginkgo.Describe("[sig-storage] GCP Volumes", func() {
|
|||||||
},
|
},
|
||||||
}
|
}
|
||||||
// Must match content of test/images/volume-tester/nfs/index.html
|
// Must match content of test/images/volume-tester/nfs/index.html
|
||||||
volume.TestVolumeClient(c, config, nil, "" /* fsType */, tests)
|
volume.TestVolumeClient(f, c, config, nil, "" /* fsType */, tests)
|
||||||
})
|
})
|
||||||
})
|
})
|
||||||
|
|
||||||
@ -147,7 +147,7 @@ var _ = ginkgo.Describe("[sig-storage] GCP Volumes", func() {
|
|||||||
ExpectedContent: "Hello from GlusterFS!",
|
ExpectedContent: "Hello from GlusterFS!",
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
volume.TestVolumeClient(c, config, nil, "" /* fsType */, tests)
|
volume.TestVolumeClient(f, c, config, nil, "" /* fsType */, tests)
|
||||||
})
|
})
|
||||||
})
|
})
|
||||||
})
|
})
|
||||||
|
@ -528,7 +528,7 @@ func runVolumeTesterPod(client clientset.Interface, config TestConfig, podSuffix
|
|||||||
return clientPod, nil
|
return clientPod, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func testVolumeContent(client clientset.Interface, pod *v1.Pod, fsGroup *int64, fsType string, tests []Test) {
|
func testVolumeContent(f *framework.Framework, client clientset.Interface, pod *v1.Pod, fsGroup *int64, fsType string, tests []Test) {
|
||||||
ginkgo.By("Checking that text file contents are perfect.")
|
ginkgo.By("Checking that text file contents are perfect.")
|
||||||
for i, test := range tests {
|
for i, test := range tests {
|
||||||
if test.Mode == v1.PersistentVolumeBlock {
|
if test.Mode == v1.PersistentVolumeBlock {
|
||||||
@ -539,7 +539,7 @@ func testVolumeContent(client clientset.Interface, pod *v1.Pod, fsGroup *int64,
|
|||||||
framework.ExpectNoError(err, "failed: finding the contents of the block device %s.", deviceName)
|
framework.ExpectNoError(err, "failed: finding the contents of the block device %s.", deviceName)
|
||||||
|
|
||||||
// Check that it's a real block device
|
// Check that it's a real block device
|
||||||
utils.CheckVolumeModeOfPath(pod, test.Mode, deviceName)
|
utils.CheckVolumeModeOfPath(f, pod, test.Mode, deviceName)
|
||||||
} else {
|
} else {
|
||||||
// Filesystem: check content
|
// Filesystem: check content
|
||||||
fileName := fmt.Sprintf("/opt/%d/%s", i, test.File)
|
fileName := fmt.Sprintf("/opt/%d/%s", i, test.File)
|
||||||
@ -549,7 +549,7 @@ func testVolumeContent(client clientset.Interface, pod *v1.Pod, fsGroup *int64,
|
|||||||
|
|
||||||
// Check that a directory has been mounted
|
// Check that a directory has been mounted
|
||||||
dirName := filepath.Dir(fileName)
|
dirName := filepath.Dir(fileName)
|
||||||
utils.CheckVolumeModeOfPath(pod, test.Mode, dirName)
|
utils.CheckVolumeModeOfPath(f, pod, test.Mode, dirName)
|
||||||
|
|
||||||
if !framework.NodeOSDistroIs("windows") {
|
if !framework.NodeOSDistroIs("windows") {
|
||||||
// Filesystem: check fsgroup
|
// Filesystem: check fsgroup
|
||||||
@ -574,20 +574,20 @@ func testVolumeContent(client clientset.Interface, pod *v1.Pod, fsGroup *int64,
|
|||||||
// and check that the pod sees expected data, e.g. from the server pod.
|
// and check that the pod sees expected data, e.g. from the server pod.
|
||||||
// Multiple Tests can be specified to mount multiple volumes to a single
|
// Multiple Tests can be specified to mount multiple volumes to a single
|
||||||
// pod.
|
// pod.
|
||||||
func TestVolumeClient(client clientset.Interface, config TestConfig, fsGroup *int64, fsType string, tests []Test) {
|
func TestVolumeClient(f *framework.Framework, client clientset.Interface, config TestConfig, fsGroup *int64, fsType string, tests []Test) {
|
||||||
clientPod, err := runVolumeTesterPod(client, config, "client", false, fsGroup, tests)
|
clientPod, err := runVolumeTesterPod(client, config, "client", false, fsGroup, tests)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
framework.Failf("Failed to create client pod: %v", err)
|
framework.Failf("Failed to create client pod: %v", err)
|
||||||
|
|
||||||
}
|
}
|
||||||
framework.ExpectNoError(e2epod.WaitForPodRunningInNamespace(client, clientPod))
|
framework.ExpectNoError(e2epod.WaitForPodRunningInNamespace(client, clientPod))
|
||||||
testVolumeContent(client, clientPod, fsGroup, fsType, tests)
|
testVolumeContent(f, client, clientPod, fsGroup, fsType, tests)
|
||||||
}
|
}
|
||||||
|
|
||||||
// InjectContent inserts index.html with given content into given volume. It does so by
|
// InjectContent inserts index.html with given content into given volume. It does so by
|
||||||
// starting and auxiliary pod which writes the file there.
|
// starting and auxiliary pod which writes the file there.
|
||||||
// The volume must be writable.
|
// The volume must be writable.
|
||||||
func InjectContent(client clientset.Interface, config TestConfig, fsGroup *int64, fsType string, tests []Test) {
|
func InjectContent(f *framework.Framework, client clientset.Interface, config TestConfig, fsGroup *int64, fsType string, tests []Test) {
|
||||||
privileged := true
|
privileged := true
|
||||||
if framework.NodeOSDistroIs("windows") {
|
if framework.NodeOSDistroIs("windows") {
|
||||||
privileged = false
|
privileged = false
|
||||||
@ -621,7 +621,7 @@ func InjectContent(client clientset.Interface, config TestConfig, fsGroup *int64
|
|||||||
|
|
||||||
// Check that the data have been really written in this pod.
|
// Check that the data have been really written in this pod.
|
||||||
// This tests non-persistent volume types
|
// This tests non-persistent volume types
|
||||||
testVolumeContent(client, injectorPod, fsGroup, fsType, tests)
|
testVolumeContent(f, client, injectorPod, fsGroup, fsType, tests)
|
||||||
}
|
}
|
||||||
|
|
||||||
// CreateGCEVolume creates PersistentVolumeSource for GCEVolume.
|
// CreateGCEVolume creates PersistentVolumeSource for GCEVolume.
|
||||||
|
@ -62,7 +62,7 @@ func testFlexVolume(driver string, cs clientset.Interface, config volume.TestCon
|
|||||||
ExpectedContent: "Hello from flexvolume!",
|
ExpectedContent: "Hello from flexvolume!",
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
volume.TestVolumeClient(cs, config, nil, "" /* fsType */, tests)
|
volume.TestVolumeClient(f, cs, config, nil, "" /* fsType */, tests)
|
||||||
|
|
||||||
volume.TestCleanup(f, config)
|
volume.TestCleanup(f, config)
|
||||||
}
|
}
|
||||||
|
@ -215,7 +215,7 @@ var _ = utils.SIGDescribe("PersistentVolumes-local ", func() {
|
|||||||
writeCmd := createWriteCmd(volumeDir, testFile, testFileContent, testVol.localVolumeType)
|
writeCmd := createWriteCmd(volumeDir, testFile, testFileContent, testVol.localVolumeType)
|
||||||
|
|
||||||
ginkgo.By("Writing in pod1")
|
ginkgo.By("Writing in pod1")
|
||||||
podRWCmdExec(pod1, writeCmd)
|
podRWCmdExec(f, pod1, writeCmd)
|
||||||
})
|
})
|
||||||
|
|
||||||
ginkgo.AfterEach(func() {
|
ginkgo.AfterEach(func() {
|
||||||
@ -226,28 +226,28 @@ var _ = utils.SIGDescribe("PersistentVolumes-local ", func() {
|
|||||||
ginkgo.It("should be able to mount volume and read from pod1", func() {
|
ginkgo.It("should be able to mount volume and read from pod1", func() {
|
||||||
ginkgo.By("Reading in pod1")
|
ginkgo.By("Reading in pod1")
|
||||||
// testFileContent was written in BeforeEach
|
// testFileContent was written in BeforeEach
|
||||||
testReadFileContent(volumeDir, testFile, testFileContent, pod1, testVolType)
|
testReadFileContent(f, volumeDir, testFile, testFileContent, pod1, testVolType)
|
||||||
})
|
})
|
||||||
|
|
||||||
ginkgo.It("should be able to mount volume and write from pod1", func() {
|
ginkgo.It("should be able to mount volume and write from pod1", func() {
|
||||||
// testFileContent was written in BeforeEach
|
// testFileContent was written in BeforeEach
|
||||||
testReadFileContent(volumeDir, testFile, testFileContent, pod1, testVolType)
|
testReadFileContent(f, volumeDir, testFile, testFileContent, pod1, testVolType)
|
||||||
|
|
||||||
ginkgo.By("Writing in pod1")
|
ginkgo.By("Writing in pod1")
|
||||||
writeCmd := createWriteCmd(volumeDir, testFile, testVol.ltr.Path /*writeTestFileContent*/, testVolType)
|
writeCmd := createWriteCmd(volumeDir, testFile, testVol.ltr.Path /*writeTestFileContent*/, testVolType)
|
||||||
podRWCmdExec(pod1, writeCmd)
|
podRWCmdExec(f, pod1, writeCmd)
|
||||||
})
|
})
|
||||||
})
|
})
|
||||||
|
|
||||||
ginkgo.Context("Two pods mounting a local volume at the same time", func() {
|
ginkgo.Context("Two pods mounting a local volume at the same time", func() {
|
||||||
ginkgo.It("should be able to write from pod1 and read from pod2", func() {
|
ginkgo.It("should be able to write from pod1 and read from pod2", func() {
|
||||||
twoPodsReadWriteTest(config, testVol)
|
twoPodsReadWriteTest(f, config, testVol)
|
||||||
})
|
})
|
||||||
})
|
})
|
||||||
|
|
||||||
ginkgo.Context("Two pods mounting a local volume one after the other", func() {
|
ginkgo.Context("Two pods mounting a local volume one after the other", func() {
|
||||||
ginkgo.It("should be able to write from pod1 and read from pod2", func() {
|
ginkgo.It("should be able to write from pod1 and read from pod2", func() {
|
||||||
twoPodsReadWriteSerialTest(config, testVol)
|
twoPodsReadWriteSerialTest(f, config, testVol)
|
||||||
})
|
})
|
||||||
})
|
})
|
||||||
|
|
||||||
@ -703,7 +703,7 @@ func testPodWithNodeConflict(config *localTestConfig, testVolType localVolumeTyp
|
|||||||
// The tests below are run against multiple mount point types
|
// The tests below are run against multiple mount point types
|
||||||
|
|
||||||
// Test two pods at the same time, write from pod1, and read from pod2
|
// Test two pods at the same time, write from pod1, and read from pod2
|
||||||
func twoPodsReadWriteTest(config *localTestConfig, testVol *localTestVolume) {
|
func twoPodsReadWriteTest(f *framework.Framework, config *localTestConfig, testVol *localTestVolume) {
|
||||||
ginkgo.By("Creating pod1 to write to the PV")
|
ginkgo.By("Creating pod1 to write to the PV")
|
||||||
pod1, pod1Err := createLocalPod(config, testVol, nil)
|
pod1, pod1Err := createLocalPod(config, testVol, nil)
|
||||||
framework.ExpectNoError(pod1Err)
|
framework.ExpectNoError(pod1Err)
|
||||||
@ -712,10 +712,10 @@ func twoPodsReadWriteTest(config *localTestConfig, testVol *localTestVolume) {
|
|||||||
writeCmd := createWriteCmd(volumeDir, testFile, testFileContent, testVol.localVolumeType)
|
writeCmd := createWriteCmd(volumeDir, testFile, testFileContent, testVol.localVolumeType)
|
||||||
|
|
||||||
ginkgo.By("Writing in pod1")
|
ginkgo.By("Writing in pod1")
|
||||||
podRWCmdExec(pod1, writeCmd)
|
podRWCmdExec(f, pod1, writeCmd)
|
||||||
|
|
||||||
// testFileContent was written after creating pod1
|
// testFileContent was written after creating pod1
|
||||||
testReadFileContent(volumeDir, testFile, testFileContent, pod1, testVol.localVolumeType)
|
testReadFileContent(f, volumeDir, testFile, testFileContent, pod1, testVol.localVolumeType)
|
||||||
|
|
||||||
ginkgo.By("Creating pod2 to read from the PV")
|
ginkgo.By("Creating pod2 to read from the PV")
|
||||||
pod2, pod2Err := createLocalPod(config, testVol, nil)
|
pod2, pod2Err := createLocalPod(config, testVol, nil)
|
||||||
@ -723,15 +723,15 @@ func twoPodsReadWriteTest(config *localTestConfig, testVol *localTestVolume) {
|
|||||||
verifyLocalPod(config, testVol, pod2, config.node0.Name)
|
verifyLocalPod(config, testVol, pod2, config.node0.Name)
|
||||||
|
|
||||||
// testFileContent was written after creating pod1
|
// testFileContent was written after creating pod1
|
||||||
testReadFileContent(volumeDir, testFile, testFileContent, pod2, testVol.localVolumeType)
|
testReadFileContent(f, volumeDir, testFile, testFileContent, pod2, testVol.localVolumeType)
|
||||||
|
|
||||||
writeCmd = createWriteCmd(volumeDir, testFile, testVol.ltr.Path /*writeTestFileContent*/, testVol.localVolumeType)
|
writeCmd = createWriteCmd(volumeDir, testFile, testVol.ltr.Path /*writeTestFileContent*/, testVol.localVolumeType)
|
||||||
|
|
||||||
ginkgo.By("Writing in pod2")
|
ginkgo.By("Writing in pod2")
|
||||||
podRWCmdExec(pod2, writeCmd)
|
podRWCmdExec(f, pod2, writeCmd)
|
||||||
|
|
||||||
ginkgo.By("Reading in pod1")
|
ginkgo.By("Reading in pod1")
|
||||||
testReadFileContent(volumeDir, testFile, testVol.ltr.Path, pod1, testVol.localVolumeType)
|
testReadFileContent(f, volumeDir, testFile, testVol.ltr.Path, pod1, testVol.localVolumeType)
|
||||||
|
|
||||||
ginkgo.By("Deleting pod1")
|
ginkgo.By("Deleting pod1")
|
||||||
e2epod.DeletePodOrFail(config.client, config.ns, pod1.Name)
|
e2epod.DeletePodOrFail(config.client, config.ns, pod1.Name)
|
||||||
@ -740,7 +740,7 @@ func twoPodsReadWriteTest(config *localTestConfig, testVol *localTestVolume) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// Test two pods one after other, write from pod1, and read from pod2
|
// Test two pods one after other, write from pod1, and read from pod2
|
||||||
func twoPodsReadWriteSerialTest(config *localTestConfig, testVol *localTestVolume) {
|
func twoPodsReadWriteSerialTest(f *framework.Framework, config *localTestConfig, testVol *localTestVolume) {
|
||||||
ginkgo.By("Creating pod1")
|
ginkgo.By("Creating pod1")
|
||||||
pod1, pod1Err := createLocalPod(config, testVol, nil)
|
pod1, pod1Err := createLocalPod(config, testVol, nil)
|
||||||
framework.ExpectNoError(pod1Err)
|
framework.ExpectNoError(pod1Err)
|
||||||
@ -749,10 +749,10 @@ func twoPodsReadWriteSerialTest(config *localTestConfig, testVol *localTestVolum
|
|||||||
writeCmd := createWriteCmd(volumeDir, testFile, testFileContent, testVol.localVolumeType)
|
writeCmd := createWriteCmd(volumeDir, testFile, testFileContent, testVol.localVolumeType)
|
||||||
|
|
||||||
ginkgo.By("Writing in pod1")
|
ginkgo.By("Writing in pod1")
|
||||||
podRWCmdExec(pod1, writeCmd)
|
podRWCmdExec(f, pod1, writeCmd)
|
||||||
|
|
||||||
// testFileContent was written after creating pod1
|
// testFileContent was written after creating pod1
|
||||||
testReadFileContent(volumeDir, testFile, testFileContent, pod1, testVol.localVolumeType)
|
testReadFileContent(f, volumeDir, testFile, testFileContent, pod1, testVol.localVolumeType)
|
||||||
|
|
||||||
ginkgo.By("Deleting pod1")
|
ginkgo.By("Deleting pod1")
|
||||||
e2epod.DeletePodOrFail(config.client, config.ns, pod1.Name)
|
e2epod.DeletePodOrFail(config.client, config.ns, pod1.Name)
|
||||||
@ -763,7 +763,7 @@ func twoPodsReadWriteSerialTest(config *localTestConfig, testVol *localTestVolum
|
|||||||
verifyLocalPod(config, testVol, pod2, config.node0.Name)
|
verifyLocalPod(config, testVol, pod2, config.node0.Name)
|
||||||
|
|
||||||
ginkgo.By("Reading in pod2")
|
ginkgo.By("Reading in pod2")
|
||||||
testReadFileContent(volumeDir, testFile, testFileContent, pod2, testVol.localVolumeType)
|
testReadFileContent(f, volumeDir, testFile, testFileContent, pod2, testVol.localVolumeType)
|
||||||
|
|
||||||
ginkgo.By("Deleting pod2")
|
ginkgo.By("Deleting pod2")
|
||||||
e2epod.DeletePodOrFail(config.client, config.ns, pod2.Name)
|
e2epod.DeletePodOrFail(config.client, config.ns, pod2.Name)
|
||||||
@ -1015,16 +1015,16 @@ func createReadCmd(testFileDir string, testFile string, volumeType localVolumeTy
|
|||||||
}
|
}
|
||||||
|
|
||||||
// Read testFile and evaluate whether it contains the testFileContent
|
// Read testFile and evaluate whether it contains the testFileContent
|
||||||
func testReadFileContent(testFileDir string, testFile string, testFileContent string, pod *v1.Pod, volumeType localVolumeType) {
|
func testReadFileContent(f *framework.Framework, testFileDir string, testFile string, testFileContent string, pod *v1.Pod, volumeType localVolumeType) {
|
||||||
readCmd := createReadCmd(testFileDir, testFile, volumeType)
|
readCmd := createReadCmd(testFileDir, testFile, volumeType)
|
||||||
readOut := podRWCmdExec(pod, readCmd)
|
readOut := podRWCmdExec(f, pod, readCmd)
|
||||||
gomega.Expect(readOut).To(gomega.ContainSubstring(testFileContent))
|
gomega.Expect(readOut).To(gomega.ContainSubstring(testFileContent))
|
||||||
}
|
}
|
||||||
|
|
||||||
// Execute a read or write command in a pod.
|
// Execute a read or write command in a pod.
|
||||||
// Fail on error
|
// Fail on error
|
||||||
func podRWCmdExec(pod *v1.Pod, cmd string) string {
|
func podRWCmdExec(f *framework.Framework, pod *v1.Pod, cmd string) string {
|
||||||
out, err := utils.PodExec(pod, cmd)
|
out, err := utils.PodExec(f, pod, cmd)
|
||||||
framework.Logf("podRWCmdExec out: %q err: %v", out, err)
|
framework.Logf("podRWCmdExec out: %q err: %v", out, err)
|
||||||
framework.ExpectNoError(err)
|
framework.ExpectNoError(err)
|
||||||
return out
|
return out
|
||||||
|
@ -118,7 +118,7 @@ func (p *ephemeralTestSuite) defineTests(driver TestDriver, pattern testpatterns
|
|||||||
|
|
||||||
l.testCase.ReadOnly = true
|
l.testCase.ReadOnly = true
|
||||||
l.testCase.RunningPodCheck = func(pod *v1.Pod) interface{} {
|
l.testCase.RunningPodCheck = func(pod *v1.Pod) interface{} {
|
||||||
storageutils.VerifyExecInPodSucceed(pod, "mount | grep /mnt/test | grep ro,")
|
storageutils.VerifyExecInPodSucceed(f, pod, "mount | grep /mnt/test | grep ro,")
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
l.testCase.TestEphemeral()
|
l.testCase.TestEphemeral()
|
||||||
@ -130,7 +130,7 @@ func (p *ephemeralTestSuite) defineTests(driver TestDriver, pattern testpatterns
|
|||||||
|
|
||||||
l.testCase.ReadOnly = false
|
l.testCase.ReadOnly = false
|
||||||
l.testCase.RunningPodCheck = func(pod *v1.Pod) interface{} {
|
l.testCase.RunningPodCheck = func(pod *v1.Pod) interface{} {
|
||||||
storageutils.VerifyExecInPodSucceed(pod, "mount | grep /mnt/test | grep rw,")
|
storageutils.VerifyExecInPodSucceed(f, pod, "mount | grep /mnt/test | grep rw,")
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
l.testCase.TestEphemeral()
|
l.testCase.TestEphemeral()
|
||||||
@ -159,8 +159,8 @@ func (p *ephemeralTestSuite) defineTests(driver TestDriver, pattern testpatterns
|
|||||||
// visible in the other.
|
// visible in the other.
|
||||||
if !readOnly && !shared {
|
if !readOnly && !shared {
|
||||||
ginkgo.By("writing data in one pod and checking for it in the second")
|
ginkgo.By("writing data in one pod and checking for it in the second")
|
||||||
storageutils.VerifyExecInPodSucceed(pod, "touch /mnt/test-0/hello-world")
|
storageutils.VerifyExecInPodSucceed(f, pod, "touch /mnt/test-0/hello-world")
|
||||||
storageutils.VerifyExecInPodSucceed(pod2, "[ ! -f /mnt/test-0/hello-world ]")
|
storageutils.VerifyExecInPodSucceed(f, pod2, "[ ! -f /mnt/test-0/hello-world ]")
|
||||||
}
|
}
|
||||||
|
|
||||||
defer StopPod(f.ClientSet, pod2)
|
defer StopPod(f.ClientSet, pod2)
|
||||||
|
@ -365,18 +365,18 @@ func testAccessMultipleVolumes(f *framework.Framework, cs clientset.Interface, n
|
|||||||
index := i + 1
|
index := i + 1
|
||||||
path := fmt.Sprintf("/mnt/volume%d", index)
|
path := fmt.Sprintf("/mnt/volume%d", index)
|
||||||
ginkgo.By(fmt.Sprintf("Checking if the volume%d exists as expected volume mode (%s)", index, *pvc.Spec.VolumeMode))
|
ginkgo.By(fmt.Sprintf("Checking if the volume%d exists as expected volume mode (%s)", index, *pvc.Spec.VolumeMode))
|
||||||
utils.CheckVolumeModeOfPath(pod, *pvc.Spec.VolumeMode, path)
|
utils.CheckVolumeModeOfPath(f, pod, *pvc.Spec.VolumeMode, path)
|
||||||
|
|
||||||
if readSeedBase > 0 {
|
if readSeedBase > 0 {
|
||||||
ginkgo.By(fmt.Sprintf("Checking if read from the volume%d works properly", index))
|
ginkgo.By(fmt.Sprintf("Checking if read from the volume%d works properly", index))
|
||||||
utils.CheckReadFromPath(pod, *pvc.Spec.VolumeMode, path, byteLen, readSeedBase+int64(i))
|
utils.CheckReadFromPath(f, pod, *pvc.Spec.VolumeMode, path, byteLen, readSeedBase+int64(i))
|
||||||
}
|
}
|
||||||
|
|
||||||
ginkgo.By(fmt.Sprintf("Checking if write to the volume%d works properly", index))
|
ginkgo.By(fmt.Sprintf("Checking if write to the volume%d works properly", index))
|
||||||
utils.CheckWriteToPath(pod, *pvc.Spec.VolumeMode, path, byteLen, writeSeedBase+int64(i))
|
utils.CheckWriteToPath(f, pod, *pvc.Spec.VolumeMode, path, byteLen, writeSeedBase+int64(i))
|
||||||
|
|
||||||
ginkgo.By(fmt.Sprintf("Checking if read from the volume%d works properly", index))
|
ginkgo.By(fmt.Sprintf("Checking if read from the volume%d works properly", index))
|
||||||
utils.CheckReadFromPath(pod, *pvc.Spec.VolumeMode, path, byteLen, writeSeedBase+int64(i))
|
utils.CheckReadFromPath(f, pod, *pvc.Spec.VolumeMode, path, byteLen, writeSeedBase+int64(i))
|
||||||
}
|
}
|
||||||
|
|
||||||
pod, err = cs.CoreV1().Pods(pod.Namespace).Get(pod.Name, metav1.GetOptions{})
|
pod, err = cs.CoreV1().Pods(pod.Namespace).Get(pod.Name, metav1.GetOptions{})
|
||||||
@ -452,22 +452,22 @@ func TestConcurrentAccessToSingleVolume(f *framework.Framework, cs clientset.Int
|
|||||||
for i, pod := range pods {
|
for i, pod := range pods {
|
||||||
index := i + 1
|
index := i + 1
|
||||||
ginkgo.By(fmt.Sprintf("Checking if the volume in pod%d exists as expected volume mode (%s)", index, *pvc.Spec.VolumeMode))
|
ginkgo.By(fmt.Sprintf("Checking if the volume in pod%d exists as expected volume mode (%s)", index, *pvc.Spec.VolumeMode))
|
||||||
utils.CheckVolumeModeOfPath(pod, *pvc.Spec.VolumeMode, path)
|
utils.CheckVolumeModeOfPath(f, pod, *pvc.Spec.VolumeMode, path)
|
||||||
|
|
||||||
if i != 0 {
|
if i != 0 {
|
||||||
ginkgo.By(fmt.Sprintf("From pod%d, checking if reading the data that pod%d write works properly", index, index-1))
|
ginkgo.By(fmt.Sprintf("From pod%d, checking if reading the data that pod%d write works properly", index, index-1))
|
||||||
// For 1st pod, no one has written data yet, so pass the read check
|
// For 1st pod, no one has written data yet, so pass the read check
|
||||||
utils.CheckReadFromPath(pod, *pvc.Spec.VolumeMode, path, byteLen, seed)
|
utils.CheckReadFromPath(f, pod, *pvc.Spec.VolumeMode, path, byteLen, seed)
|
||||||
}
|
}
|
||||||
|
|
||||||
// Update the seed and check if write/read works properly
|
// Update the seed and check if write/read works properly
|
||||||
seed = time.Now().UTC().UnixNano()
|
seed = time.Now().UTC().UnixNano()
|
||||||
|
|
||||||
ginkgo.By(fmt.Sprintf("Checking if write to the volume in pod%d works properly", index))
|
ginkgo.By(fmt.Sprintf("Checking if write to the volume in pod%d works properly", index))
|
||||||
utils.CheckWriteToPath(pod, *pvc.Spec.VolumeMode, path, byteLen, seed)
|
utils.CheckWriteToPath(f, pod, *pvc.Spec.VolumeMode, path, byteLen, seed)
|
||||||
|
|
||||||
ginkgo.By(fmt.Sprintf("Checking if read from the volume in pod%d works properly", index))
|
ginkgo.By(fmt.Sprintf("Checking if read from the volume in pod%d works properly", index))
|
||||||
utils.CheckReadFromPath(pod, *pvc.Spec.VolumeMode, path, byteLen, seed)
|
utils.CheckReadFromPath(f, pod, *pvc.Spec.VolumeMode, path, byteLen, seed)
|
||||||
}
|
}
|
||||||
|
|
||||||
// Delete the last pod and remove from slice of pods
|
// Delete the last pod and remove from slice of pods
|
||||||
@ -483,7 +483,7 @@ func TestConcurrentAccessToSingleVolume(f *framework.Framework, cs clientset.Int
|
|||||||
index := i + 1
|
index := i + 1
|
||||||
// index of pod and index of pvc match, because pods are created above way
|
// index of pod and index of pvc match, because pods are created above way
|
||||||
ginkgo.By(fmt.Sprintf("Rechecking if the volume in pod%d exists as expected volume mode (%s)", index, *pvc.Spec.VolumeMode))
|
ginkgo.By(fmt.Sprintf("Rechecking if the volume in pod%d exists as expected volume mode (%s)", index, *pvc.Spec.VolumeMode))
|
||||||
utils.CheckVolumeModeOfPath(pod, *pvc.Spec.VolumeMode, "/mnt/volume1")
|
utils.CheckVolumeModeOfPath(f, pod, *pvc.Spec.VolumeMode, "/mnt/volume1")
|
||||||
|
|
||||||
if i == 0 {
|
if i == 0 {
|
||||||
// This time there should be data that last pod wrote, for 1st pod
|
// This time there should be data that last pod wrote, for 1st pod
|
||||||
@ -491,15 +491,15 @@ func TestConcurrentAccessToSingleVolume(f *framework.Framework, cs clientset.Int
|
|||||||
} else {
|
} else {
|
||||||
ginkgo.By(fmt.Sprintf("From pod%d, rechecking if reading the data that pod%d write works properly", index, index-1))
|
ginkgo.By(fmt.Sprintf("From pod%d, rechecking if reading the data that pod%d write works properly", index, index-1))
|
||||||
}
|
}
|
||||||
utils.CheckReadFromPath(pod, *pvc.Spec.VolumeMode, path, byteLen, seed)
|
utils.CheckReadFromPath(f, pod, *pvc.Spec.VolumeMode, path, byteLen, seed)
|
||||||
|
|
||||||
// Update the seed and check if write/read works properly
|
// Update the seed and check if write/read works properly
|
||||||
seed = time.Now().UTC().UnixNano()
|
seed = time.Now().UTC().UnixNano()
|
||||||
|
|
||||||
ginkgo.By(fmt.Sprintf("Rechecking if write to the volume in pod%d works properly", index))
|
ginkgo.By(fmt.Sprintf("Rechecking if write to the volume in pod%d works properly", index))
|
||||||
utils.CheckWriteToPath(pod, *pvc.Spec.VolumeMode, path, byteLen, seed)
|
utils.CheckWriteToPath(f, pod, *pvc.Spec.VolumeMode, path, byteLen, seed)
|
||||||
|
|
||||||
ginkgo.By(fmt.Sprintf("Rechecking if read from the volume in pod%d works properly", index))
|
ginkgo.By(fmt.Sprintf("Rechecking if read from the volume in pod%d works properly", index))
|
||||||
utils.CheckReadFromPath(pod, *pvc.Spec.VolumeMode, path, byteLen, seed)
|
utils.CheckReadFromPath(f, pod, *pvc.Spec.VolumeMode, path, byteLen, seed)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -243,19 +243,19 @@ func makePodSpec(config volume.TestConfig, initCmd string, volsrc v1.VolumeSourc
|
|||||||
}
|
}
|
||||||
|
|
||||||
// Write `fsize` bytes to `fpath` in the pod, using dd and the `ddInput` file.
|
// Write `fsize` bytes to `fpath` in the pod, using dd and the `ddInput` file.
|
||||||
func writeToFile(pod *v1.Pod, fpath, ddInput string, fsize int64) error {
|
func writeToFile(f *framework.Framework, pod *v1.Pod, fpath, ddInput string, fsize int64) error {
|
||||||
ginkgo.By(fmt.Sprintf("writing %d bytes to test file %s", fsize, fpath))
|
ginkgo.By(fmt.Sprintf("writing %d bytes to test file %s", fsize, fpath))
|
||||||
loopCnt := fsize / testpatterns.MinFileSize
|
loopCnt := fsize / testpatterns.MinFileSize
|
||||||
writeCmd := fmt.Sprintf("i=0; while [ $i -lt %d ]; do dd if=%s bs=%d >>%s 2>/dev/null; let i+=1; done", loopCnt, ddInput, testpatterns.MinFileSize, fpath)
|
writeCmd := fmt.Sprintf("i=0; while [ $i -lt %d ]; do dd if=%s bs=%d >>%s 2>/dev/null; let i+=1; done", loopCnt, ddInput, testpatterns.MinFileSize, fpath)
|
||||||
_, err := utils.PodExec(pod, writeCmd)
|
_, err := utils.PodExec(f, pod, writeCmd)
|
||||||
|
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
// Verify that the test file is the expected size and contains the expected content.
|
// Verify that the test file is the expected size and contains the expected content.
|
||||||
func verifyFile(pod *v1.Pod, fpath string, expectSize int64, ddInput string) error {
|
func verifyFile(f *framework.Framework, pod *v1.Pod, fpath string, expectSize int64, ddInput string) error {
|
||||||
ginkgo.By("verifying file size")
|
ginkgo.By("verifying file size")
|
||||||
rtnstr, err := utils.PodExec(pod, fmt.Sprintf("stat -c %%s %s", fpath))
|
rtnstr, err := utils.PodExec(f, pod, fmt.Sprintf("stat -c %%s %s", fpath))
|
||||||
if err != nil || rtnstr == "" {
|
if err != nil || rtnstr == "" {
|
||||||
return fmt.Errorf("unable to get file size via `stat %s`: %v", fpath, err)
|
return fmt.Errorf("unable to get file size via `stat %s`: %v", fpath, err)
|
||||||
}
|
}
|
||||||
@ -268,7 +268,7 @@ func verifyFile(pod *v1.Pod, fpath string, expectSize int64, ddInput string) err
|
|||||||
}
|
}
|
||||||
|
|
||||||
ginkgo.By("verifying file hash")
|
ginkgo.By("verifying file hash")
|
||||||
rtnstr, err = utils.PodExec(pod, fmt.Sprintf("md5sum %s | cut -d' ' -f1", fpath))
|
rtnstr, err = utils.PodExec(f, pod, fmt.Sprintf("md5sum %s | cut -d' ' -f1", fpath))
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return fmt.Errorf("unable to test file hash via `md5sum %s`: %v", fpath, err)
|
return fmt.Errorf("unable to test file hash via `md5sum %s`: %v", fpath, err)
|
||||||
}
|
}
|
||||||
@ -287,9 +287,9 @@ func verifyFile(pod *v1.Pod, fpath string, expectSize int64, ddInput string) err
|
|||||||
}
|
}
|
||||||
|
|
||||||
// Delete `fpath` to save some disk space on host. Delete errors are logged but ignored.
|
// Delete `fpath` to save some disk space on host. Delete errors are logged but ignored.
|
||||||
func deleteFile(pod *v1.Pod, fpath string) {
|
func deleteFile(f *framework.Framework, pod *v1.Pod, fpath string) {
|
||||||
ginkgo.By(fmt.Sprintf("deleting test file %s...", fpath))
|
ginkgo.By(fmt.Sprintf("deleting test file %s...", fpath))
|
||||||
_, err := utils.PodExec(pod, fmt.Sprintf("rm -f %s", fpath))
|
_, err := utils.PodExec(f, pod, fmt.Sprintf("rm -f %s", fpath))
|
||||||
if err != nil {
|
if err != nil {
|
||||||
// keep going, the test dir will be deleted when the volume is unmounted
|
// keep going, the test dir will be deleted when the volume is unmounted
|
||||||
framework.Logf("unable to delete test file %s: %v\nerror ignored, continuing test", fpath, err)
|
framework.Logf("unable to delete test file %s: %v\nerror ignored, continuing test", fpath, err)
|
||||||
@ -320,7 +320,7 @@ func testVolumeIO(f *framework.Framework, cs clientset.Interface, config volume.
|
|||||||
return fmt.Errorf("failed to create client pod %q: %v", clientPod.Name, err)
|
return fmt.Errorf("failed to create client pod %q: %v", clientPod.Name, err)
|
||||||
}
|
}
|
||||||
defer func() {
|
defer func() {
|
||||||
deleteFile(clientPod, ddInput)
|
deleteFile(f, clientPod, ddInput)
|
||||||
ginkgo.By(fmt.Sprintf("deleting client pod %q...", clientPod.Name))
|
ginkgo.By(fmt.Sprintf("deleting client pod %q...", clientPod.Name))
|
||||||
e := e2epod.DeletePodWithWait(cs, clientPod)
|
e := e2epod.DeletePodWithWait(cs, clientPod)
|
||||||
if e != nil {
|
if e != nil {
|
||||||
@ -347,12 +347,12 @@ func testVolumeIO(f *framework.Framework, cs clientset.Interface, config volume.
|
|||||||
}
|
}
|
||||||
fpath := filepath.Join(mountPath, fmt.Sprintf("%s-%d", file, fsize))
|
fpath := filepath.Join(mountPath, fmt.Sprintf("%s-%d", file, fsize))
|
||||||
defer func() {
|
defer func() {
|
||||||
deleteFile(clientPod, fpath)
|
deleteFile(f, clientPod, fpath)
|
||||||
}()
|
}()
|
||||||
if err = writeToFile(clientPod, fpath, ddInput, fsize); err != nil {
|
if err = writeToFile(f, clientPod, fpath, ddInput, fsize); err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
if err = verifyFile(clientPod, fpath, fsize, ddInput); err != nil {
|
if err = verifyFile(f, clientPod, fpath, fsize, ddInput); err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -178,9 +178,9 @@ func (t *volumesTestSuite) defineTests(driver TestDriver, pattern testpatterns.T
|
|||||||
// local), plugin skips setting fsGroup if volume is already mounted
|
// local), plugin skips setting fsGroup if volume is already mounted
|
||||||
// and we don't have reliable way to detect volumes are unmounted or
|
// and we don't have reliable way to detect volumes are unmounted or
|
||||||
// not before starting the second pod.
|
// not before starting the second pod.
|
||||||
volume.InjectContent(f.ClientSet, config, fsGroup, pattern.FsType, tests)
|
volume.InjectContent(f, f.ClientSet, config, fsGroup, pattern.FsType, tests)
|
||||||
if driver.GetDriverInfo().Capabilities[CapPersistence] {
|
if driver.GetDriverInfo().Capabilities[CapPersistence] {
|
||||||
volume.TestVolumeClient(f.ClientSet, config, fsGroup, pattern.FsType, tests)
|
volume.TestVolumeClient(f, f.ClientSet, config, fsGroup, pattern.FsType, tests)
|
||||||
} else {
|
} else {
|
||||||
ginkgo.By("Skipping persistence check for non-persistent volume")
|
ginkgo.By("Skipping persistence check for non-persistent volume")
|
||||||
}
|
}
|
||||||
|
@ -35,6 +35,7 @@ import (
|
|||||||
"k8s.io/apimachinery/pkg/util/sets"
|
"k8s.io/apimachinery/pkg/util/sets"
|
||||||
"k8s.io/apimachinery/pkg/util/wait"
|
"k8s.io/apimachinery/pkg/util/wait"
|
||||||
clientset "k8s.io/client-go/kubernetes"
|
clientset "k8s.io/client-go/kubernetes"
|
||||||
|
clientexec "k8s.io/client-go/util/exec"
|
||||||
"k8s.io/kubernetes/test/e2e/framework"
|
"k8s.io/kubernetes/test/e2e/framework"
|
||||||
e2enode "k8s.io/kubernetes/test/e2e/framework/node"
|
e2enode "k8s.io/kubernetes/test/e2e/framework/node"
|
||||||
e2epod "k8s.io/kubernetes/test/e2e/framework/pod"
|
e2epod "k8s.io/kubernetes/test/e2e/framework/pod"
|
||||||
@ -62,14 +63,15 @@ const (
|
|||||||
podSecurityPolicyPrivilegedClusterRoleName = "e2e-test-privileged-psp"
|
podSecurityPolicyPrivilegedClusterRoleName = "e2e-test-privileged-psp"
|
||||||
)
|
)
|
||||||
|
|
||||||
// PodExec wraps RunKubectl to execute a bash cmd in target pod
|
// PodExec runs f.ExecCommandInContainerWithFullOutput to execute a shell cmd in target pod
|
||||||
func PodExec(pod *v1.Pod, bashExec string) (string, error) {
|
func PodExec(f *framework.Framework, pod *v1.Pod, bashExec string) (string, error) {
|
||||||
return framework.RunKubectl("exec", fmt.Sprintf("--namespace=%s", pod.Namespace), pod.Name, "--", "/bin/sh", "-c", bashExec)
|
stdout, _, err := f.ExecCommandInContainerWithFullOutput(pod.Name, pod.Spec.Containers[0].Name, "/bin/sh", "-c", bashExec)
|
||||||
|
return stdout, err
|
||||||
}
|
}
|
||||||
|
|
||||||
// VerifyExecInPodSucceed verifies bash cmd in target pod succeed
|
// VerifyExecInPodSucceed verifies bash cmd in target pod succeed
|
||||||
func VerifyExecInPodSucceed(pod *v1.Pod, bashExec string) {
|
func VerifyExecInPodSucceed(f *framework.Framework, pod *v1.Pod, bashExec string) {
|
||||||
_, err := PodExec(pod, bashExec)
|
_, err := PodExec(f, pod, bashExec)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
if err, ok := err.(uexec.CodeExitError); ok {
|
if err, ok := err.(uexec.CodeExitError); ok {
|
||||||
exitCode := err.ExitStatus()
|
exitCode := err.ExitStatus()
|
||||||
@ -85,10 +87,10 @@ func VerifyExecInPodSucceed(pod *v1.Pod, bashExec string) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// VerifyExecInPodFail verifies bash cmd in target pod fail with certain exit code
|
// VerifyExecInPodFail verifies bash cmd in target pod fail with certain exit code
|
||||||
func VerifyExecInPodFail(pod *v1.Pod, bashExec string, exitCode int) {
|
func VerifyExecInPodFail(f *framework.Framework, pod *v1.Pod, bashExec string, exitCode int) {
|
||||||
_, err := PodExec(pod, bashExec)
|
_, err := PodExec(f, pod, bashExec)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
if err, ok := err.(uexec.CodeExitError); ok {
|
if err, ok := err.(clientexec.ExitError); ok {
|
||||||
actualExitCode := err.ExitStatus()
|
actualExitCode := err.ExitStatus()
|
||||||
framework.ExpectEqual(actualExitCode, exitCode,
|
framework.ExpectEqual(actualExitCode, exitCode,
|
||||||
"%q should fail with exit code %d, but failed with exit code %d and error message %q",
|
"%q should fail with exit code %d, but failed with exit code %d and error message %q",
|
||||||
@ -236,13 +238,13 @@ func TestKubeletRestartsAndRestoresMount(c clientset.Interface, f *framework.Fra
|
|||||||
seed := time.Now().UTC().UnixNano()
|
seed := time.Now().UTC().UnixNano()
|
||||||
|
|
||||||
ginkgo.By("Writing to the volume.")
|
ginkgo.By("Writing to the volume.")
|
||||||
CheckWriteToPath(clientPod, v1.PersistentVolumeFilesystem, path, byteLen, seed)
|
CheckWriteToPath(f, clientPod, v1.PersistentVolumeFilesystem, path, byteLen, seed)
|
||||||
|
|
||||||
ginkgo.By("Restarting kubelet")
|
ginkgo.By("Restarting kubelet")
|
||||||
KubeletCommand(KRestart, c, clientPod)
|
KubeletCommand(KRestart, c, clientPod)
|
||||||
|
|
||||||
ginkgo.By("Testing that written file is accessible.")
|
ginkgo.By("Testing that written file is accessible.")
|
||||||
CheckReadFromPath(clientPod, v1.PersistentVolumeFilesystem, path, byteLen, seed)
|
CheckReadFromPath(f, clientPod, v1.PersistentVolumeFilesystem, path, byteLen, seed)
|
||||||
|
|
||||||
framework.Logf("Volume mount detected on pod %s and written file %s is readable post-restart.", clientPod.Name, path)
|
framework.Logf("Volume mount detected on pod %s and written file %s is readable post-restart.", clientPod.Name, path)
|
||||||
}
|
}
|
||||||
@ -254,13 +256,13 @@ func TestKubeletRestartsAndRestoresMap(c clientset.Interface, f *framework.Frame
|
|||||||
seed := time.Now().UTC().UnixNano()
|
seed := time.Now().UTC().UnixNano()
|
||||||
|
|
||||||
ginkgo.By("Writing to the volume.")
|
ginkgo.By("Writing to the volume.")
|
||||||
CheckWriteToPath(clientPod, v1.PersistentVolumeBlock, path, byteLen, seed)
|
CheckWriteToPath(f, clientPod, v1.PersistentVolumeBlock, path, byteLen, seed)
|
||||||
|
|
||||||
ginkgo.By("Restarting kubelet")
|
ginkgo.By("Restarting kubelet")
|
||||||
KubeletCommand(KRestart, c, clientPod)
|
KubeletCommand(KRestart, c, clientPod)
|
||||||
|
|
||||||
ginkgo.By("Testing that written pv is accessible.")
|
ginkgo.By("Testing that written pv is accessible.")
|
||||||
CheckReadFromPath(clientPod, v1.PersistentVolumeBlock, path, byteLen, seed)
|
CheckReadFromPath(f, clientPod, v1.PersistentVolumeBlock, path, byteLen, seed)
|
||||||
|
|
||||||
framework.Logf("Volume map detected on pod %s and written data %s is readable post-restart.", clientPod.Name, path)
|
framework.Logf("Volume map detected on pod %s and written data %s is readable post-restart.", clientPod.Name, path)
|
||||||
}
|
}
|
||||||
@ -594,46 +596,46 @@ func PrivilegedTestPSPClusterRoleBinding(client clientset.Interface,
|
|||||||
}
|
}
|
||||||
|
|
||||||
// CheckVolumeModeOfPath check mode of volume
|
// CheckVolumeModeOfPath check mode of volume
|
||||||
func CheckVolumeModeOfPath(pod *v1.Pod, volMode v1.PersistentVolumeMode, path string) {
|
func CheckVolumeModeOfPath(f *framework.Framework, pod *v1.Pod, volMode v1.PersistentVolumeMode, path string) {
|
||||||
if volMode == v1.PersistentVolumeBlock {
|
if volMode == v1.PersistentVolumeBlock {
|
||||||
// Check if block exists
|
// Check if block exists
|
||||||
VerifyExecInPodSucceed(pod, fmt.Sprintf("test -b %s", path))
|
VerifyExecInPodSucceed(f, pod, fmt.Sprintf("test -b %s", path))
|
||||||
|
|
||||||
// Double check that it's not directory
|
// Double check that it's not directory
|
||||||
VerifyExecInPodFail(pod, fmt.Sprintf("test -d %s", path), 1)
|
VerifyExecInPodFail(f, pod, fmt.Sprintf("test -d %s", path), 1)
|
||||||
} else {
|
} else {
|
||||||
// Check if directory exists
|
// Check if directory exists
|
||||||
VerifyExecInPodSucceed(pod, fmt.Sprintf("test -d %s", path))
|
VerifyExecInPodSucceed(f, pod, fmt.Sprintf("test -d %s", path))
|
||||||
|
|
||||||
// Double check that it's not block
|
// Double check that it's not block
|
||||||
VerifyExecInPodFail(pod, fmt.Sprintf("test -b %s", path), 1)
|
VerifyExecInPodFail(f, pod, fmt.Sprintf("test -b %s", path), 1)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// CheckReadWriteToPath check that path can b e read and written
|
// CheckReadWriteToPath check that path can b e read and written
|
||||||
func CheckReadWriteToPath(pod *v1.Pod, volMode v1.PersistentVolumeMode, path string) {
|
func CheckReadWriteToPath(f *framework.Framework, pod *v1.Pod, volMode v1.PersistentVolumeMode, path string) {
|
||||||
if volMode == v1.PersistentVolumeBlock {
|
if volMode == v1.PersistentVolumeBlock {
|
||||||
// random -> file1
|
// random -> file1
|
||||||
VerifyExecInPodSucceed(pod, "dd if=/dev/urandom of=/tmp/file1 bs=64 count=1")
|
VerifyExecInPodSucceed(f, pod, "dd if=/dev/urandom of=/tmp/file1 bs=64 count=1")
|
||||||
// file1 -> dev (write to dev)
|
// file1 -> dev (write to dev)
|
||||||
VerifyExecInPodSucceed(pod, fmt.Sprintf("dd if=/tmp/file1 of=%s bs=64 count=1", path))
|
VerifyExecInPodSucceed(f, pod, fmt.Sprintf("dd if=/tmp/file1 of=%s bs=64 count=1", path))
|
||||||
// dev -> file2 (read from dev)
|
// dev -> file2 (read from dev)
|
||||||
VerifyExecInPodSucceed(pod, fmt.Sprintf("dd if=%s of=/tmp/file2 bs=64 count=1", path))
|
VerifyExecInPodSucceed(f, pod, fmt.Sprintf("dd if=%s of=/tmp/file2 bs=64 count=1", path))
|
||||||
// file1 == file2 (check contents)
|
// file1 == file2 (check contents)
|
||||||
VerifyExecInPodSucceed(pod, "diff /tmp/file1 /tmp/file2")
|
VerifyExecInPodSucceed(f, pod, "diff /tmp/file1 /tmp/file2")
|
||||||
// Clean up temp files
|
// Clean up temp files
|
||||||
VerifyExecInPodSucceed(pod, "rm -f /tmp/file1 /tmp/file2")
|
VerifyExecInPodSucceed(f, pod, "rm -f /tmp/file1 /tmp/file2")
|
||||||
|
|
||||||
// Check that writing file to block volume fails
|
// Check that writing file to block volume fails
|
||||||
VerifyExecInPodFail(pod, fmt.Sprintf("echo 'Hello world.' > %s/file1.txt", path), 1)
|
VerifyExecInPodFail(f, pod, fmt.Sprintf("echo 'Hello world.' > %s/file1.txt", path), 1)
|
||||||
} else {
|
} else {
|
||||||
// text -> file1 (write to file)
|
// text -> file1 (write to file)
|
||||||
VerifyExecInPodSucceed(pod, fmt.Sprintf("echo 'Hello world.' > %s/file1.txt", path))
|
VerifyExecInPodSucceed(f, pod, fmt.Sprintf("echo 'Hello world.' > %s/file1.txt", path))
|
||||||
// grep file1 (read from file and check contents)
|
// grep file1 (read from file and check contents)
|
||||||
VerifyExecInPodSucceed(pod, fmt.Sprintf("grep 'Hello world.' %s/file1.txt", path))
|
VerifyExecInPodSucceed(f, pod, fmt.Sprintf("grep 'Hello world.' %s/file1.txt", path))
|
||||||
|
|
||||||
// Check that writing to directory as block volume fails
|
// Check that writing to directory as block volume fails
|
||||||
VerifyExecInPodFail(pod, fmt.Sprintf("dd if=/dev/urandom of=%s bs=64 count=1", path), 1)
|
VerifyExecInPodFail(f, pod, fmt.Sprintf("dd if=/dev/urandom of=%s bs=64 count=1", path), 1)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -651,7 +653,7 @@ func genBinDataFromSeed(len int, seed int64) []byte {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// CheckReadFromPath validate that file can be properly read.
|
// CheckReadFromPath validate that file can be properly read.
|
||||||
func CheckReadFromPath(pod *v1.Pod, volMode v1.PersistentVolumeMode, path string, len int, seed int64) {
|
func CheckReadFromPath(f *framework.Framework, pod *v1.Pod, volMode v1.PersistentVolumeMode, path string, len int, seed int64) {
|
||||||
var pathForVolMode string
|
var pathForVolMode string
|
||||||
if volMode == v1.PersistentVolumeBlock {
|
if volMode == v1.PersistentVolumeBlock {
|
||||||
pathForVolMode = path
|
pathForVolMode = path
|
||||||
@ -661,12 +663,12 @@ func CheckReadFromPath(pod *v1.Pod, volMode v1.PersistentVolumeMode, path string
|
|||||||
|
|
||||||
sum := sha256.Sum256(genBinDataFromSeed(len, seed))
|
sum := sha256.Sum256(genBinDataFromSeed(len, seed))
|
||||||
|
|
||||||
VerifyExecInPodSucceed(pod, fmt.Sprintf("dd if=%s bs=%d count=1 | sha256sum", pathForVolMode, len))
|
VerifyExecInPodSucceed(f, pod, fmt.Sprintf("dd if=%s bs=%d count=1 | sha256sum", pathForVolMode, len))
|
||||||
VerifyExecInPodSucceed(pod, fmt.Sprintf("dd if=%s bs=%d count=1 | sha256sum | grep -Fq %x", pathForVolMode, len, sum))
|
VerifyExecInPodSucceed(f, pod, fmt.Sprintf("dd if=%s bs=%d count=1 | sha256sum | grep -Fq %x", pathForVolMode, len, sum))
|
||||||
}
|
}
|
||||||
|
|
||||||
// CheckWriteToPath that file can be properly written.
|
// CheckWriteToPath that file can be properly written.
|
||||||
func CheckWriteToPath(pod *v1.Pod, volMode v1.PersistentVolumeMode, path string, len int, seed int64) {
|
func CheckWriteToPath(f *framework.Framework, pod *v1.Pod, volMode v1.PersistentVolumeMode, path string, len int, seed int64) {
|
||||||
var pathForVolMode string
|
var pathForVolMode string
|
||||||
if volMode == v1.PersistentVolumeBlock {
|
if volMode == v1.PersistentVolumeBlock {
|
||||||
pathForVolMode = path
|
pathForVolMode = path
|
||||||
@ -676,8 +678,8 @@ func CheckWriteToPath(pod *v1.Pod, volMode v1.PersistentVolumeMode, path string,
|
|||||||
|
|
||||||
encoded := base64.StdEncoding.EncodeToString(genBinDataFromSeed(len, seed))
|
encoded := base64.StdEncoding.EncodeToString(genBinDataFromSeed(len, seed))
|
||||||
|
|
||||||
VerifyExecInPodSucceed(pod, fmt.Sprintf("echo %s | base64 -d | sha256sum", encoded))
|
VerifyExecInPodSucceed(f, pod, fmt.Sprintf("echo %s | base64 -d | sha256sum", encoded))
|
||||||
VerifyExecInPodSucceed(pod, fmt.Sprintf("echo %s | base64 -d | dd of=%s bs=%d count=1", encoded, pathForVolMode, len))
|
VerifyExecInPodSucceed(f, pod, fmt.Sprintf("echo %s | base64 -d | dd of=%s bs=%d count=1", encoded, pathForVolMode, len))
|
||||||
}
|
}
|
||||||
|
|
||||||
// findMountPoints returns all mount points on given node under specified directory.
|
// findMountPoints returns all mount points on given node under specified directory.
|
||||||
|
@ -108,7 +108,7 @@ var _ = utils.SIGDescribe("Volumes", func() {
|
|||||||
ExpectedContent: "this is the second file",
|
ExpectedContent: "this is the second file",
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
volume.TestVolumeClient(cs, config, nil, "" /* fsType */, tests)
|
volume.TestVolumeClient(f, cs, config, nil, "" /* fsType */, tests)
|
||||||
})
|
})
|
||||||
})
|
})
|
||||||
})
|
})
|
||||||
|
@ -99,10 +99,10 @@ func (t *VolumeModeDowngradeTest) Setup(f *framework.Framework) {
|
|||||||
framework.ExpectNoError(err)
|
framework.ExpectNoError(err)
|
||||||
|
|
||||||
ginkgo.By("Checking if PV exists as expected volume mode")
|
ginkgo.By("Checking if PV exists as expected volume mode")
|
||||||
utils.CheckVolumeModeOfPath(t.pod, block, devicePath)
|
utils.CheckVolumeModeOfPath(f, t.pod, block, devicePath)
|
||||||
|
|
||||||
ginkgo.By("Checking if read/write to PV works properly")
|
ginkgo.By("Checking if read/write to PV works properly")
|
||||||
utils.CheckReadWriteToPath(t.pod, block, devicePath)
|
utils.CheckReadWriteToPath(f, t.pod, block, devicePath)
|
||||||
}
|
}
|
||||||
|
|
||||||
// Test waits for the downgrade to complete, and then verifies that a pod can no
|
// Test waits for the downgrade to complete, and then verifies that a pod can no
|
||||||
@ -112,7 +112,7 @@ func (t *VolumeModeDowngradeTest) Test(f *framework.Framework, done <-chan struc
|
|||||||
<-done
|
<-done
|
||||||
|
|
||||||
ginkgo.By("Verifying that nothing exists at the device path in the pod")
|
ginkgo.By("Verifying that nothing exists at the device path in the pod")
|
||||||
utils.VerifyExecInPodFail(t.pod, fmt.Sprintf("test -e %s", devicePath), 1)
|
utils.VerifyExecInPodFail(f, t.pod, fmt.Sprintf("test -e %s", devicePath), 1)
|
||||||
}
|
}
|
||||||
|
|
||||||
// Teardown cleans up any remaining resources.
|
// Teardown cleans up any remaining resources.
|
||||||
|
Loading…
Reference in New Issue
Block a user