mirror of
https://github.com/k3s-io/kubernetes.git
synced 2025-07-24 04:06:03 +00:00
Merge pull request #84042 from okartau/avoid-local-kubectl
test/e2e: Reduce need to use local kubectl
This commit is contained in:
commit
6ff3b68d72
@ -91,7 +91,7 @@ var _ = ginkgo.Describe("[sig-storage] GCP Volumes", func() {
|
||||
}
|
||||
|
||||
// Must match content of test/images/volumes-tester/nfs/index.html
|
||||
volume.TestVolumeClient(c, config, nil, "" /* fsType */, tests)
|
||||
volume.TestVolumeClient(f, config, nil, "" /* fsType */, tests)
|
||||
})
|
||||
})
|
||||
|
||||
@ -114,7 +114,7 @@ var _ = ginkgo.Describe("[sig-storage] GCP Volumes", func() {
|
||||
},
|
||||
}
|
||||
// Must match content of test/images/volume-tester/nfs/index.html
|
||||
volume.TestVolumeClient(c, config, nil, "" /* fsType */, tests)
|
||||
volume.TestVolumeClient(f, config, nil, "" /* fsType */, tests)
|
||||
})
|
||||
})
|
||||
|
||||
@ -147,7 +147,7 @@ var _ = ginkgo.Describe("[sig-storage] GCP Volumes", func() {
|
||||
ExpectedContent: "Hello from GlusterFS!",
|
||||
},
|
||||
}
|
||||
volume.TestVolumeClient(c, config, nil, "" /* fsType */, tests)
|
||||
volume.TestVolumeClient(f, config, nil, "" /* fsType */, tests)
|
||||
})
|
||||
})
|
||||
})
|
||||
|
@ -528,7 +528,7 @@ func runVolumeTesterPod(client clientset.Interface, config TestConfig, podSuffix
|
||||
return clientPod, nil
|
||||
}
|
||||
|
||||
func testVolumeContent(client clientset.Interface, pod *v1.Pod, fsGroup *int64, fsType string, tests []Test) {
|
||||
func testVolumeContent(f *framework.Framework, pod *v1.Pod, fsGroup *int64, fsType string, tests []Test) {
|
||||
ginkgo.By("Checking that text file contents are perfect.")
|
||||
for i, test := range tests {
|
||||
if test.Mode == v1.PersistentVolumeBlock {
|
||||
@ -539,7 +539,7 @@ func testVolumeContent(client clientset.Interface, pod *v1.Pod, fsGroup *int64,
|
||||
framework.ExpectNoError(err, "failed: finding the contents of the block device %s.", deviceName)
|
||||
|
||||
// Check that it's a real block device
|
||||
utils.CheckVolumeModeOfPath(pod, test.Mode, deviceName)
|
||||
utils.CheckVolumeModeOfPath(f, pod, test.Mode, deviceName)
|
||||
} else {
|
||||
// Filesystem: check content
|
||||
fileName := fmt.Sprintf("/opt/%d/%s", i, test.File)
|
||||
@ -549,7 +549,7 @@ func testVolumeContent(client clientset.Interface, pod *v1.Pod, fsGroup *int64,
|
||||
|
||||
// Check that a directory has been mounted
|
||||
dirName := filepath.Dir(fileName)
|
||||
utils.CheckVolumeModeOfPath(pod, test.Mode, dirName)
|
||||
utils.CheckVolumeModeOfPath(f, pod, test.Mode, dirName)
|
||||
|
||||
if !framework.NodeOSDistroIs("windows") {
|
||||
// Filesystem: check fsgroup
|
||||
@ -574,32 +574,32 @@ func testVolumeContent(client clientset.Interface, pod *v1.Pod, fsGroup *int64,
|
||||
// and check that the pod sees expected data, e.g. from the server pod.
|
||||
// Multiple Tests can be specified to mount multiple volumes to a single
|
||||
// pod.
|
||||
func TestVolumeClient(client clientset.Interface, config TestConfig, fsGroup *int64, fsType string, tests []Test) {
|
||||
clientPod, err := runVolumeTesterPod(client, config, "client", false, fsGroup, tests)
|
||||
func TestVolumeClient(f *framework.Framework, config TestConfig, fsGroup *int64, fsType string, tests []Test) {
|
||||
clientPod, err := runVolumeTesterPod(f.ClientSet, config, "client", false, fsGroup, tests)
|
||||
if err != nil {
|
||||
framework.Failf("Failed to create client pod: %v", err)
|
||||
|
||||
}
|
||||
framework.ExpectNoError(e2epod.WaitForPodRunningInNamespace(client, clientPod))
|
||||
testVolumeContent(client, clientPod, fsGroup, fsType, tests)
|
||||
framework.ExpectNoError(e2epod.WaitForPodRunningInNamespace(f.ClientSet, clientPod))
|
||||
testVolumeContent(f, clientPod, fsGroup, fsType, tests)
|
||||
}
|
||||
|
||||
// InjectContent inserts index.html with given content into given volume. It does so by
|
||||
// starting and auxiliary pod which writes the file there.
|
||||
// The volume must be writable.
|
||||
func InjectContent(client clientset.Interface, config TestConfig, fsGroup *int64, fsType string, tests []Test) {
|
||||
func InjectContent(f *framework.Framework, config TestConfig, fsGroup *int64, fsType string, tests []Test) {
|
||||
privileged := true
|
||||
if framework.NodeOSDistroIs("windows") {
|
||||
privileged = false
|
||||
}
|
||||
injectorPod, err := runVolumeTesterPod(client, config, "injector", privileged, fsGroup, tests)
|
||||
injectorPod, err := runVolumeTesterPod(f.ClientSet, config, "injector", privileged, fsGroup, tests)
|
||||
if err != nil {
|
||||
framework.Failf("Failed to create injector pod: %v", err)
|
||||
return
|
||||
}
|
||||
defer func() {
|
||||
e2epod.DeletePodOrFail(client, injectorPod.Namespace, injectorPod.Name)
|
||||
e2epod.WaitForPodToDisappear(client, injectorPod.Namespace, injectorPod.Name, labels.Everything(), framework.Poll, framework.PodDeleteTimeout)
|
||||
e2epod.DeletePodOrFail(f.ClientSet, injectorPod.Namespace, injectorPod.Name)
|
||||
e2epod.WaitForPodToDisappear(f.ClientSet, injectorPod.Namespace, injectorPod.Name, labels.Everything(), framework.Poll, framework.PodDeleteTimeout)
|
||||
}()
|
||||
|
||||
ginkgo.By("Writing text file contents in the container.")
|
||||
@ -621,7 +621,7 @@ func InjectContent(client clientset.Interface, config TestConfig, fsGroup *int64
|
||||
|
||||
// Check that the data have been really written in this pod.
|
||||
// This tests non-persistent volume types
|
||||
testVolumeContent(client, injectorPod, fsGroup, fsType, tests)
|
||||
testVolumeContent(f, injectorPod, fsGroup, fsType, tests)
|
||||
}
|
||||
|
||||
// CreateGCEVolume creates PersistentVolumeSource for GCEVolume.
|
||||
|
@ -49,7 +49,7 @@ const (
|
||||
|
||||
// testFlexVolume tests that a client pod using a given flexvolume driver
|
||||
// successfully mounts it and runs
|
||||
func testFlexVolume(driver string, cs clientset.Interface, config volume.TestConfig, f *framework.Framework) {
|
||||
func testFlexVolume(driver string, config volume.TestConfig, f *framework.Framework) {
|
||||
tests := []volume.Test{
|
||||
{
|
||||
Volume: v1.VolumeSource{
|
||||
@ -62,7 +62,7 @@ func testFlexVolume(driver string, cs clientset.Interface, config volume.TestCon
|
||||
ExpectedContent: "Hello from flexvolume!",
|
||||
},
|
||||
}
|
||||
volume.TestVolumeClient(cs, config, nil, "" /* fsType */, tests)
|
||||
volume.TestVolumeClient(f, config, nil, "" /* fsType */, tests)
|
||||
|
||||
volume.TestCleanup(f, config)
|
||||
}
|
||||
@ -190,7 +190,7 @@ var _ = utils.SIGDescribe("Flexvolumes", func() {
|
||||
ginkgo.By(fmt.Sprintf("installing flexvolume %s on node %s as %s", path.Join(driverDir, driver), node.Name, driverInstallAs))
|
||||
installFlex(cs, node, "k8s", driverInstallAs, path.Join(driverDir, driver))
|
||||
|
||||
testFlexVolume(driverInstallAs, cs, config, f)
|
||||
testFlexVolume(driverInstallAs, config, f)
|
||||
|
||||
ginkgo.By("waiting for flex client pod to terminate")
|
||||
if err := f.WaitForPodTerminated(config.Prefix+"-client", ""); !apierrs.IsNotFound(err) {
|
||||
@ -210,7 +210,7 @@ var _ = utils.SIGDescribe("Flexvolumes", func() {
|
||||
ginkgo.By(fmt.Sprintf("installing flexvolume %s on master as %s", path.Join(driverDir, driver), driverInstallAs))
|
||||
installFlex(cs, nil, "k8s", driverInstallAs, path.Join(driverDir, driver))
|
||||
|
||||
testFlexVolume(driverInstallAs, cs, config, f)
|
||||
testFlexVolume(driverInstallAs, config, f)
|
||||
|
||||
ginkgo.By("waiting for flex client pod to terminate")
|
||||
if err := f.WaitForPodTerminated(config.Prefix+"-client", ""); !apierrs.IsNotFound(err) {
|
||||
|
@ -215,7 +215,7 @@ var _ = utils.SIGDescribe("PersistentVolumes-local ", func() {
|
||||
writeCmd := createWriteCmd(volumeDir, testFile, testFileContent, testVol.localVolumeType)
|
||||
|
||||
ginkgo.By("Writing in pod1")
|
||||
podRWCmdExec(pod1, writeCmd)
|
||||
podRWCmdExec(f, pod1, writeCmd)
|
||||
})
|
||||
|
||||
ginkgo.AfterEach(func() {
|
||||
@ -226,28 +226,28 @@ var _ = utils.SIGDescribe("PersistentVolumes-local ", func() {
|
||||
ginkgo.It("should be able to mount volume and read from pod1", func() {
|
||||
ginkgo.By("Reading in pod1")
|
||||
// testFileContent was written in BeforeEach
|
||||
testReadFileContent(volumeDir, testFile, testFileContent, pod1, testVolType)
|
||||
testReadFileContent(f, volumeDir, testFile, testFileContent, pod1, testVolType)
|
||||
})
|
||||
|
||||
ginkgo.It("should be able to mount volume and write from pod1", func() {
|
||||
// testFileContent was written in BeforeEach
|
||||
testReadFileContent(volumeDir, testFile, testFileContent, pod1, testVolType)
|
||||
testReadFileContent(f, volumeDir, testFile, testFileContent, pod1, testVolType)
|
||||
|
||||
ginkgo.By("Writing in pod1")
|
||||
writeCmd := createWriteCmd(volumeDir, testFile, testVol.ltr.Path /*writeTestFileContent*/, testVolType)
|
||||
podRWCmdExec(pod1, writeCmd)
|
||||
podRWCmdExec(f, pod1, writeCmd)
|
||||
})
|
||||
})
|
||||
|
||||
ginkgo.Context("Two pods mounting a local volume at the same time", func() {
|
||||
ginkgo.It("should be able to write from pod1 and read from pod2", func() {
|
||||
twoPodsReadWriteTest(config, testVol)
|
||||
twoPodsReadWriteTest(f, config, testVol)
|
||||
})
|
||||
})
|
||||
|
||||
ginkgo.Context("Two pods mounting a local volume one after the other", func() {
|
||||
ginkgo.It("should be able to write from pod1 and read from pod2", func() {
|
||||
twoPodsReadWriteSerialTest(config, testVol)
|
||||
twoPodsReadWriteSerialTest(f, config, testVol)
|
||||
})
|
||||
})
|
||||
|
||||
@ -703,7 +703,7 @@ func testPodWithNodeConflict(config *localTestConfig, testVolType localVolumeTyp
|
||||
// The tests below are run against multiple mount point types
|
||||
|
||||
// Test two pods at the same time, write from pod1, and read from pod2
|
||||
func twoPodsReadWriteTest(config *localTestConfig, testVol *localTestVolume) {
|
||||
func twoPodsReadWriteTest(f *framework.Framework, config *localTestConfig, testVol *localTestVolume) {
|
||||
ginkgo.By("Creating pod1 to write to the PV")
|
||||
pod1, pod1Err := createLocalPod(config, testVol, nil)
|
||||
framework.ExpectNoError(pod1Err)
|
||||
@ -712,10 +712,10 @@ func twoPodsReadWriteTest(config *localTestConfig, testVol *localTestVolume) {
|
||||
writeCmd := createWriteCmd(volumeDir, testFile, testFileContent, testVol.localVolumeType)
|
||||
|
||||
ginkgo.By("Writing in pod1")
|
||||
podRWCmdExec(pod1, writeCmd)
|
||||
podRWCmdExec(f, pod1, writeCmd)
|
||||
|
||||
// testFileContent was written after creating pod1
|
||||
testReadFileContent(volumeDir, testFile, testFileContent, pod1, testVol.localVolumeType)
|
||||
testReadFileContent(f, volumeDir, testFile, testFileContent, pod1, testVol.localVolumeType)
|
||||
|
||||
ginkgo.By("Creating pod2 to read from the PV")
|
||||
pod2, pod2Err := createLocalPod(config, testVol, nil)
|
||||
@ -723,15 +723,15 @@ func twoPodsReadWriteTest(config *localTestConfig, testVol *localTestVolume) {
|
||||
verifyLocalPod(config, testVol, pod2, config.node0.Name)
|
||||
|
||||
// testFileContent was written after creating pod1
|
||||
testReadFileContent(volumeDir, testFile, testFileContent, pod2, testVol.localVolumeType)
|
||||
testReadFileContent(f, volumeDir, testFile, testFileContent, pod2, testVol.localVolumeType)
|
||||
|
||||
writeCmd = createWriteCmd(volumeDir, testFile, testVol.ltr.Path /*writeTestFileContent*/, testVol.localVolumeType)
|
||||
|
||||
ginkgo.By("Writing in pod2")
|
||||
podRWCmdExec(pod2, writeCmd)
|
||||
podRWCmdExec(f, pod2, writeCmd)
|
||||
|
||||
ginkgo.By("Reading in pod1")
|
||||
testReadFileContent(volumeDir, testFile, testVol.ltr.Path, pod1, testVol.localVolumeType)
|
||||
testReadFileContent(f, volumeDir, testFile, testVol.ltr.Path, pod1, testVol.localVolumeType)
|
||||
|
||||
ginkgo.By("Deleting pod1")
|
||||
e2epod.DeletePodOrFail(config.client, config.ns, pod1.Name)
|
||||
@ -740,7 +740,7 @@ func twoPodsReadWriteTest(config *localTestConfig, testVol *localTestVolume) {
|
||||
}
|
||||
|
||||
// Test two pods one after other, write from pod1, and read from pod2
|
||||
func twoPodsReadWriteSerialTest(config *localTestConfig, testVol *localTestVolume) {
|
||||
func twoPodsReadWriteSerialTest(f *framework.Framework, config *localTestConfig, testVol *localTestVolume) {
|
||||
ginkgo.By("Creating pod1")
|
||||
pod1, pod1Err := createLocalPod(config, testVol, nil)
|
||||
framework.ExpectNoError(pod1Err)
|
||||
@ -749,10 +749,10 @@ func twoPodsReadWriteSerialTest(config *localTestConfig, testVol *localTestVolum
|
||||
writeCmd := createWriteCmd(volumeDir, testFile, testFileContent, testVol.localVolumeType)
|
||||
|
||||
ginkgo.By("Writing in pod1")
|
||||
podRWCmdExec(pod1, writeCmd)
|
||||
podRWCmdExec(f, pod1, writeCmd)
|
||||
|
||||
// testFileContent was written after creating pod1
|
||||
testReadFileContent(volumeDir, testFile, testFileContent, pod1, testVol.localVolumeType)
|
||||
testReadFileContent(f, volumeDir, testFile, testFileContent, pod1, testVol.localVolumeType)
|
||||
|
||||
ginkgo.By("Deleting pod1")
|
||||
e2epod.DeletePodOrFail(config.client, config.ns, pod1.Name)
|
||||
@ -763,7 +763,7 @@ func twoPodsReadWriteSerialTest(config *localTestConfig, testVol *localTestVolum
|
||||
verifyLocalPod(config, testVol, pod2, config.node0.Name)
|
||||
|
||||
ginkgo.By("Reading in pod2")
|
||||
testReadFileContent(volumeDir, testFile, testFileContent, pod2, testVol.localVolumeType)
|
||||
testReadFileContent(f, volumeDir, testFile, testFileContent, pod2, testVol.localVolumeType)
|
||||
|
||||
ginkgo.By("Deleting pod2")
|
||||
e2epod.DeletePodOrFail(config.client, config.ns, pod2.Name)
|
||||
@ -1015,16 +1015,16 @@ func createReadCmd(testFileDir string, testFile string, volumeType localVolumeTy
|
||||
}
|
||||
|
||||
// Read testFile and evaluate whether it contains the testFileContent
|
||||
func testReadFileContent(testFileDir string, testFile string, testFileContent string, pod *v1.Pod, volumeType localVolumeType) {
|
||||
func testReadFileContent(f *framework.Framework, testFileDir string, testFile string, testFileContent string, pod *v1.Pod, volumeType localVolumeType) {
|
||||
readCmd := createReadCmd(testFileDir, testFile, volumeType)
|
||||
readOut := podRWCmdExec(pod, readCmd)
|
||||
readOut := podRWCmdExec(f, pod, readCmd)
|
||||
gomega.Expect(readOut).To(gomega.ContainSubstring(testFileContent))
|
||||
}
|
||||
|
||||
// Execute a read or write command in a pod.
|
||||
// Fail on error
|
||||
func podRWCmdExec(pod *v1.Pod, cmd string) string {
|
||||
out, err := utils.PodExec(pod, cmd)
|
||||
func podRWCmdExec(f *framework.Framework, pod *v1.Pod, cmd string) string {
|
||||
out, err := utils.PodExec(f, pod, cmd)
|
||||
framework.Logf("podRWCmdExec out: %q err: %v", out, err)
|
||||
framework.ExpectNoError(err)
|
||||
return out
|
||||
|
@ -118,7 +118,7 @@ func (p *ephemeralTestSuite) defineTests(driver TestDriver, pattern testpatterns
|
||||
|
||||
l.testCase.ReadOnly = true
|
||||
l.testCase.RunningPodCheck = func(pod *v1.Pod) interface{} {
|
||||
storageutils.VerifyExecInPodSucceed(pod, "mount | grep /mnt/test | grep ro,")
|
||||
storageutils.VerifyExecInPodSucceed(f, pod, "mount | grep /mnt/test | grep ro,")
|
||||
return nil
|
||||
}
|
||||
l.testCase.TestEphemeral()
|
||||
@ -130,7 +130,7 @@ func (p *ephemeralTestSuite) defineTests(driver TestDriver, pattern testpatterns
|
||||
|
||||
l.testCase.ReadOnly = false
|
||||
l.testCase.RunningPodCheck = func(pod *v1.Pod) interface{} {
|
||||
storageutils.VerifyExecInPodSucceed(pod, "mount | grep /mnt/test | grep rw,")
|
||||
storageutils.VerifyExecInPodSucceed(f, pod, "mount | grep /mnt/test | grep rw,")
|
||||
return nil
|
||||
}
|
||||
l.testCase.TestEphemeral()
|
||||
@ -159,8 +159,8 @@ func (p *ephemeralTestSuite) defineTests(driver TestDriver, pattern testpatterns
|
||||
// visible in the other.
|
||||
if !readOnly && !shared {
|
||||
ginkgo.By("writing data in one pod and checking for it in the second")
|
||||
storageutils.VerifyExecInPodSucceed(pod, "touch /mnt/test-0/hello-world")
|
||||
storageutils.VerifyExecInPodSucceed(pod2, "[ ! -f /mnt/test-0/hello-world ]")
|
||||
storageutils.VerifyExecInPodSucceed(f, pod, "touch /mnt/test-0/hello-world")
|
||||
storageutils.VerifyExecInPodSucceed(f, pod2, "[ ! -f /mnt/test-0/hello-world ]")
|
||||
}
|
||||
|
||||
defer StopPod(f.ClientSet, pod2)
|
||||
|
@ -365,18 +365,18 @@ func testAccessMultipleVolumes(f *framework.Framework, cs clientset.Interface, n
|
||||
index := i + 1
|
||||
path := fmt.Sprintf("/mnt/volume%d", index)
|
||||
ginkgo.By(fmt.Sprintf("Checking if the volume%d exists as expected volume mode (%s)", index, *pvc.Spec.VolumeMode))
|
||||
utils.CheckVolumeModeOfPath(pod, *pvc.Spec.VolumeMode, path)
|
||||
utils.CheckVolumeModeOfPath(f, pod, *pvc.Spec.VolumeMode, path)
|
||||
|
||||
if readSeedBase > 0 {
|
||||
ginkgo.By(fmt.Sprintf("Checking if read from the volume%d works properly", index))
|
||||
utils.CheckReadFromPath(pod, *pvc.Spec.VolumeMode, path, byteLen, readSeedBase+int64(i))
|
||||
utils.CheckReadFromPath(f, pod, *pvc.Spec.VolumeMode, path, byteLen, readSeedBase+int64(i))
|
||||
}
|
||||
|
||||
ginkgo.By(fmt.Sprintf("Checking if write to the volume%d works properly", index))
|
||||
utils.CheckWriteToPath(pod, *pvc.Spec.VolumeMode, path, byteLen, writeSeedBase+int64(i))
|
||||
utils.CheckWriteToPath(f, pod, *pvc.Spec.VolumeMode, path, byteLen, writeSeedBase+int64(i))
|
||||
|
||||
ginkgo.By(fmt.Sprintf("Checking if read from the volume%d works properly", index))
|
||||
utils.CheckReadFromPath(pod, *pvc.Spec.VolumeMode, path, byteLen, writeSeedBase+int64(i))
|
||||
utils.CheckReadFromPath(f, pod, *pvc.Spec.VolumeMode, path, byteLen, writeSeedBase+int64(i))
|
||||
}
|
||||
|
||||
pod, err = cs.CoreV1().Pods(pod.Namespace).Get(pod.Name, metav1.GetOptions{})
|
||||
@ -452,22 +452,22 @@ func TestConcurrentAccessToSingleVolume(f *framework.Framework, cs clientset.Int
|
||||
for i, pod := range pods {
|
||||
index := i + 1
|
||||
ginkgo.By(fmt.Sprintf("Checking if the volume in pod%d exists as expected volume mode (%s)", index, *pvc.Spec.VolumeMode))
|
||||
utils.CheckVolumeModeOfPath(pod, *pvc.Spec.VolumeMode, path)
|
||||
utils.CheckVolumeModeOfPath(f, pod, *pvc.Spec.VolumeMode, path)
|
||||
|
||||
if i != 0 {
|
||||
ginkgo.By(fmt.Sprintf("From pod%d, checking if reading the data that pod%d write works properly", index, index-1))
|
||||
// For 1st pod, no one has written data yet, so pass the read check
|
||||
utils.CheckReadFromPath(pod, *pvc.Spec.VolumeMode, path, byteLen, seed)
|
||||
utils.CheckReadFromPath(f, pod, *pvc.Spec.VolumeMode, path, byteLen, seed)
|
||||
}
|
||||
|
||||
// Update the seed and check if write/read works properly
|
||||
seed = time.Now().UTC().UnixNano()
|
||||
|
||||
ginkgo.By(fmt.Sprintf("Checking if write to the volume in pod%d works properly", index))
|
||||
utils.CheckWriteToPath(pod, *pvc.Spec.VolumeMode, path, byteLen, seed)
|
||||
utils.CheckWriteToPath(f, pod, *pvc.Spec.VolumeMode, path, byteLen, seed)
|
||||
|
||||
ginkgo.By(fmt.Sprintf("Checking if read from the volume in pod%d works properly", index))
|
||||
utils.CheckReadFromPath(pod, *pvc.Spec.VolumeMode, path, byteLen, seed)
|
||||
utils.CheckReadFromPath(f, pod, *pvc.Spec.VolumeMode, path, byteLen, seed)
|
||||
}
|
||||
|
||||
// Delete the last pod and remove from slice of pods
|
||||
@ -483,7 +483,7 @@ func TestConcurrentAccessToSingleVolume(f *framework.Framework, cs clientset.Int
|
||||
index := i + 1
|
||||
// index of pod and index of pvc match, because pods are created above way
|
||||
ginkgo.By(fmt.Sprintf("Rechecking if the volume in pod%d exists as expected volume mode (%s)", index, *pvc.Spec.VolumeMode))
|
||||
utils.CheckVolumeModeOfPath(pod, *pvc.Spec.VolumeMode, "/mnt/volume1")
|
||||
utils.CheckVolumeModeOfPath(f, pod, *pvc.Spec.VolumeMode, "/mnt/volume1")
|
||||
|
||||
if i == 0 {
|
||||
// This time there should be data that last pod wrote, for 1st pod
|
||||
@ -491,15 +491,15 @@ func TestConcurrentAccessToSingleVolume(f *framework.Framework, cs clientset.Int
|
||||
} else {
|
||||
ginkgo.By(fmt.Sprintf("From pod%d, rechecking if reading the data that pod%d write works properly", index, index-1))
|
||||
}
|
||||
utils.CheckReadFromPath(pod, *pvc.Spec.VolumeMode, path, byteLen, seed)
|
||||
utils.CheckReadFromPath(f, pod, *pvc.Spec.VolumeMode, path, byteLen, seed)
|
||||
|
||||
// Update the seed and check if write/read works properly
|
||||
seed = time.Now().UTC().UnixNano()
|
||||
|
||||
ginkgo.By(fmt.Sprintf("Rechecking if write to the volume in pod%d works properly", index))
|
||||
utils.CheckWriteToPath(pod, *pvc.Spec.VolumeMode, path, byteLen, seed)
|
||||
utils.CheckWriteToPath(f, pod, *pvc.Spec.VolumeMode, path, byteLen, seed)
|
||||
|
||||
ginkgo.By(fmt.Sprintf("Rechecking if read from the volume in pod%d works properly", index))
|
||||
utils.CheckReadFromPath(pod, *pvc.Spec.VolumeMode, path, byteLen, seed)
|
||||
utils.CheckReadFromPath(f, pod, *pvc.Spec.VolumeMode, path, byteLen, seed)
|
||||
}
|
||||
}
|
||||
|
@ -243,19 +243,19 @@ func makePodSpec(config volume.TestConfig, initCmd string, volsrc v1.VolumeSourc
|
||||
}
|
||||
|
||||
// Write `fsize` bytes to `fpath` in the pod, using dd and the `ddInput` file.
|
||||
func writeToFile(pod *v1.Pod, fpath, ddInput string, fsize int64) error {
|
||||
func writeToFile(f *framework.Framework, pod *v1.Pod, fpath, ddInput string, fsize int64) error {
|
||||
ginkgo.By(fmt.Sprintf("writing %d bytes to test file %s", fsize, fpath))
|
||||
loopCnt := fsize / testpatterns.MinFileSize
|
||||
writeCmd := fmt.Sprintf("i=0; while [ $i -lt %d ]; do dd if=%s bs=%d >>%s 2>/dev/null; let i+=1; done", loopCnt, ddInput, testpatterns.MinFileSize, fpath)
|
||||
_, err := utils.PodExec(pod, writeCmd)
|
||||
_, err := utils.PodExec(f, pod, writeCmd)
|
||||
|
||||
return err
|
||||
}
|
||||
|
||||
// Verify that the test file is the expected size and contains the expected content.
|
||||
func verifyFile(pod *v1.Pod, fpath string, expectSize int64, ddInput string) error {
|
||||
func verifyFile(f *framework.Framework, pod *v1.Pod, fpath string, expectSize int64, ddInput string) error {
|
||||
ginkgo.By("verifying file size")
|
||||
rtnstr, err := utils.PodExec(pod, fmt.Sprintf("stat -c %%s %s", fpath))
|
||||
rtnstr, err := utils.PodExec(f, pod, fmt.Sprintf("stat -c %%s %s", fpath))
|
||||
if err != nil || rtnstr == "" {
|
||||
return fmt.Errorf("unable to get file size via `stat %s`: %v", fpath, err)
|
||||
}
|
||||
@ -268,7 +268,7 @@ func verifyFile(pod *v1.Pod, fpath string, expectSize int64, ddInput string) err
|
||||
}
|
||||
|
||||
ginkgo.By("verifying file hash")
|
||||
rtnstr, err = utils.PodExec(pod, fmt.Sprintf("md5sum %s | cut -d' ' -f1", fpath))
|
||||
rtnstr, err = utils.PodExec(f, pod, fmt.Sprintf("md5sum %s | cut -d' ' -f1", fpath))
|
||||
if err != nil {
|
||||
return fmt.Errorf("unable to test file hash via `md5sum %s`: %v", fpath, err)
|
||||
}
|
||||
@ -287,9 +287,9 @@ func verifyFile(pod *v1.Pod, fpath string, expectSize int64, ddInput string) err
|
||||
}
|
||||
|
||||
// Delete `fpath` to save some disk space on host. Delete errors are logged but ignored.
|
||||
func deleteFile(pod *v1.Pod, fpath string) {
|
||||
func deleteFile(f *framework.Framework, pod *v1.Pod, fpath string) {
|
||||
ginkgo.By(fmt.Sprintf("deleting test file %s...", fpath))
|
||||
_, err := utils.PodExec(pod, fmt.Sprintf("rm -f %s", fpath))
|
||||
_, err := utils.PodExec(f, pod, fmt.Sprintf("rm -f %s", fpath))
|
||||
if err != nil {
|
||||
// keep going, the test dir will be deleted when the volume is unmounted
|
||||
framework.Logf("unable to delete test file %s: %v\nerror ignored, continuing test", fpath, err)
|
||||
@ -320,7 +320,7 @@ func testVolumeIO(f *framework.Framework, cs clientset.Interface, config volume.
|
||||
return fmt.Errorf("failed to create client pod %q: %v", clientPod.Name, err)
|
||||
}
|
||||
defer func() {
|
||||
deleteFile(clientPod, ddInput)
|
||||
deleteFile(f, clientPod, ddInput)
|
||||
ginkgo.By(fmt.Sprintf("deleting client pod %q...", clientPod.Name))
|
||||
e := e2epod.DeletePodWithWait(cs, clientPod)
|
||||
if e != nil {
|
||||
@ -347,12 +347,12 @@ func testVolumeIO(f *framework.Framework, cs clientset.Interface, config volume.
|
||||
}
|
||||
fpath := filepath.Join(mountPath, fmt.Sprintf("%s-%d", file, fsize))
|
||||
defer func() {
|
||||
deleteFile(clientPod, fpath)
|
||||
deleteFile(f, clientPod, fpath)
|
||||
}()
|
||||
if err = writeToFile(clientPod, fpath, ddInput, fsize); err != nil {
|
||||
if err = writeToFile(f, clientPod, fpath, ddInput, fsize); err != nil {
|
||||
return err
|
||||
}
|
||||
if err = verifyFile(clientPod, fpath, fsize, ddInput); err != nil {
|
||||
if err = verifyFile(f, clientPod, fpath, fsize, ddInput); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
@ -178,9 +178,9 @@ func (t *volumesTestSuite) defineTests(driver TestDriver, pattern testpatterns.T
|
||||
// local), plugin skips setting fsGroup if volume is already mounted
|
||||
// and we don't have reliable way to detect volumes are unmounted or
|
||||
// not before starting the second pod.
|
||||
volume.InjectContent(f.ClientSet, config, fsGroup, pattern.FsType, tests)
|
||||
volume.InjectContent(f, config, fsGroup, pattern.FsType, tests)
|
||||
if driver.GetDriverInfo().Capabilities[CapPersistence] {
|
||||
volume.TestVolumeClient(f.ClientSet, config, fsGroup, pattern.FsType, tests)
|
||||
volume.TestVolumeClient(f, config, fsGroup, pattern.FsType, tests)
|
||||
} else {
|
||||
ginkgo.By("Skipping persistence check for non-persistent volume")
|
||||
}
|
||||
|
@ -35,6 +35,7 @@ import (
|
||||
"k8s.io/apimachinery/pkg/util/sets"
|
||||
"k8s.io/apimachinery/pkg/util/wait"
|
||||
clientset "k8s.io/client-go/kubernetes"
|
||||
clientexec "k8s.io/client-go/util/exec"
|
||||
"k8s.io/kubernetes/test/e2e/framework"
|
||||
e2enode "k8s.io/kubernetes/test/e2e/framework/node"
|
||||
e2epod "k8s.io/kubernetes/test/e2e/framework/pod"
|
||||
@ -62,44 +63,45 @@ const (
|
||||
podSecurityPolicyPrivilegedClusterRoleName = "e2e-test-privileged-psp"
|
||||
)
|
||||
|
||||
// PodExec wraps RunKubectl to execute a bash cmd in target pod
|
||||
func PodExec(pod *v1.Pod, bashExec string) (string, error) {
|
||||
return framework.RunKubectl("exec", fmt.Sprintf("--namespace=%s", pod.Namespace), pod.Name, "--", "/bin/sh", "-c", bashExec)
|
||||
// PodExec runs f.ExecCommandInContainerWithFullOutput to execute a shell cmd in target pod
|
||||
func PodExec(f *framework.Framework, pod *v1.Pod, shExec string) (string, error) {
|
||||
stdout, _, err := f.ExecCommandInContainerWithFullOutput(pod.Name, pod.Spec.Containers[0].Name, "/bin/sh", "-c", shExec)
|
||||
return stdout, err
|
||||
}
|
||||
|
||||
// VerifyExecInPodSucceed verifies bash cmd in target pod succeed
|
||||
func VerifyExecInPodSucceed(pod *v1.Pod, bashExec string) {
|
||||
_, err := PodExec(pod, bashExec)
|
||||
// VerifyExecInPodSucceed verifies shell cmd in target pod succeed
|
||||
func VerifyExecInPodSucceed(f *framework.Framework, pod *v1.Pod, shExec string) {
|
||||
_, err := PodExec(f, pod, shExec)
|
||||
if err != nil {
|
||||
if err, ok := err.(uexec.CodeExitError); ok {
|
||||
exitCode := err.ExitStatus()
|
||||
framework.ExpectNoError(err,
|
||||
"%q should succeed, but failed with exit code %d and error message %q",
|
||||
bashExec, exitCode, err)
|
||||
shExec, exitCode, err)
|
||||
} else {
|
||||
framework.ExpectNoError(err,
|
||||
"%q should succeed, but failed with error message %q",
|
||||
bashExec, err)
|
||||
shExec, err)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// VerifyExecInPodFail verifies bash cmd in target pod fail with certain exit code
|
||||
func VerifyExecInPodFail(pod *v1.Pod, bashExec string, exitCode int) {
|
||||
_, err := PodExec(pod, bashExec)
|
||||
// VerifyExecInPodFail verifies shell cmd in target pod fail with certain exit code
|
||||
func VerifyExecInPodFail(f *framework.Framework, pod *v1.Pod, shExec string, exitCode int) {
|
||||
_, err := PodExec(f, pod, shExec)
|
||||
if err != nil {
|
||||
if err, ok := err.(uexec.CodeExitError); ok {
|
||||
if err, ok := err.(clientexec.ExitError); ok {
|
||||
actualExitCode := err.ExitStatus()
|
||||
framework.ExpectEqual(actualExitCode, exitCode,
|
||||
"%q should fail with exit code %d, but failed with exit code %d and error message %q",
|
||||
bashExec, exitCode, actualExitCode, err)
|
||||
shExec, exitCode, actualExitCode, err)
|
||||
} else {
|
||||
framework.ExpectNoError(err,
|
||||
"%q should fail with exit code %d, but failed with error message %q",
|
||||
bashExec, exitCode, err)
|
||||
shExec, exitCode, err)
|
||||
}
|
||||
}
|
||||
framework.ExpectError(err, "%q should fail with exit code %d, but exit without error", bashExec, exitCode)
|
||||
framework.ExpectError(err, "%q should fail with exit code %d, but exit without error", shExec, exitCode)
|
||||
}
|
||||
|
||||
func isSudoPresent(nodeIP string, provider string) bool {
|
||||
@ -236,13 +238,13 @@ func TestKubeletRestartsAndRestoresMount(c clientset.Interface, f *framework.Fra
|
||||
seed := time.Now().UTC().UnixNano()
|
||||
|
||||
ginkgo.By("Writing to the volume.")
|
||||
CheckWriteToPath(clientPod, v1.PersistentVolumeFilesystem, path, byteLen, seed)
|
||||
CheckWriteToPath(f, clientPod, v1.PersistentVolumeFilesystem, path, byteLen, seed)
|
||||
|
||||
ginkgo.By("Restarting kubelet")
|
||||
KubeletCommand(KRestart, c, clientPod)
|
||||
|
||||
ginkgo.By("Testing that written file is accessible.")
|
||||
CheckReadFromPath(clientPod, v1.PersistentVolumeFilesystem, path, byteLen, seed)
|
||||
CheckReadFromPath(f, clientPod, v1.PersistentVolumeFilesystem, path, byteLen, seed)
|
||||
|
||||
framework.Logf("Volume mount detected on pod %s and written file %s is readable post-restart.", clientPod.Name, path)
|
||||
}
|
||||
@ -254,13 +256,13 @@ func TestKubeletRestartsAndRestoresMap(c clientset.Interface, f *framework.Frame
|
||||
seed := time.Now().UTC().UnixNano()
|
||||
|
||||
ginkgo.By("Writing to the volume.")
|
||||
CheckWriteToPath(clientPod, v1.PersistentVolumeBlock, path, byteLen, seed)
|
||||
CheckWriteToPath(f, clientPod, v1.PersistentVolumeBlock, path, byteLen, seed)
|
||||
|
||||
ginkgo.By("Restarting kubelet")
|
||||
KubeletCommand(KRestart, c, clientPod)
|
||||
|
||||
ginkgo.By("Testing that written pv is accessible.")
|
||||
CheckReadFromPath(clientPod, v1.PersistentVolumeBlock, path, byteLen, seed)
|
||||
CheckReadFromPath(f, clientPod, v1.PersistentVolumeBlock, path, byteLen, seed)
|
||||
|
||||
framework.Logf("Volume map detected on pod %s and written data %s is readable post-restart.", clientPod.Name, path)
|
||||
}
|
||||
@ -594,46 +596,46 @@ func PrivilegedTestPSPClusterRoleBinding(client clientset.Interface,
|
||||
}
|
||||
|
||||
// CheckVolumeModeOfPath check mode of volume
|
||||
func CheckVolumeModeOfPath(pod *v1.Pod, volMode v1.PersistentVolumeMode, path string) {
|
||||
func CheckVolumeModeOfPath(f *framework.Framework, pod *v1.Pod, volMode v1.PersistentVolumeMode, path string) {
|
||||
if volMode == v1.PersistentVolumeBlock {
|
||||
// Check if block exists
|
||||
VerifyExecInPodSucceed(pod, fmt.Sprintf("test -b %s", path))
|
||||
VerifyExecInPodSucceed(f, pod, fmt.Sprintf("test -b %s", path))
|
||||
|
||||
// Double check that it's not directory
|
||||
VerifyExecInPodFail(pod, fmt.Sprintf("test -d %s", path), 1)
|
||||
VerifyExecInPodFail(f, pod, fmt.Sprintf("test -d %s", path), 1)
|
||||
} else {
|
||||
// Check if directory exists
|
||||
VerifyExecInPodSucceed(pod, fmt.Sprintf("test -d %s", path))
|
||||
VerifyExecInPodSucceed(f, pod, fmt.Sprintf("test -d %s", path))
|
||||
|
||||
// Double check that it's not block
|
||||
VerifyExecInPodFail(pod, fmt.Sprintf("test -b %s", path), 1)
|
||||
VerifyExecInPodFail(f, pod, fmt.Sprintf("test -b %s", path), 1)
|
||||
}
|
||||
}
|
||||
|
||||
// CheckReadWriteToPath check that path can b e read and written
|
||||
func CheckReadWriteToPath(pod *v1.Pod, volMode v1.PersistentVolumeMode, path string) {
|
||||
func CheckReadWriteToPath(f *framework.Framework, pod *v1.Pod, volMode v1.PersistentVolumeMode, path string) {
|
||||
if volMode == v1.PersistentVolumeBlock {
|
||||
// random -> file1
|
||||
VerifyExecInPodSucceed(pod, "dd if=/dev/urandom of=/tmp/file1 bs=64 count=1")
|
||||
VerifyExecInPodSucceed(f, pod, "dd if=/dev/urandom of=/tmp/file1 bs=64 count=1")
|
||||
// file1 -> dev (write to dev)
|
||||
VerifyExecInPodSucceed(pod, fmt.Sprintf("dd if=/tmp/file1 of=%s bs=64 count=1", path))
|
||||
VerifyExecInPodSucceed(f, pod, fmt.Sprintf("dd if=/tmp/file1 of=%s bs=64 count=1", path))
|
||||
// dev -> file2 (read from dev)
|
||||
VerifyExecInPodSucceed(pod, fmt.Sprintf("dd if=%s of=/tmp/file2 bs=64 count=1", path))
|
||||
VerifyExecInPodSucceed(f, pod, fmt.Sprintf("dd if=%s of=/tmp/file2 bs=64 count=1", path))
|
||||
// file1 == file2 (check contents)
|
||||
VerifyExecInPodSucceed(pod, "diff /tmp/file1 /tmp/file2")
|
||||
VerifyExecInPodSucceed(f, pod, "diff /tmp/file1 /tmp/file2")
|
||||
// Clean up temp files
|
||||
VerifyExecInPodSucceed(pod, "rm -f /tmp/file1 /tmp/file2")
|
||||
VerifyExecInPodSucceed(f, pod, "rm -f /tmp/file1 /tmp/file2")
|
||||
|
||||
// Check that writing file to block volume fails
|
||||
VerifyExecInPodFail(pod, fmt.Sprintf("echo 'Hello world.' > %s/file1.txt", path), 1)
|
||||
VerifyExecInPodFail(f, pod, fmt.Sprintf("echo 'Hello world.' > %s/file1.txt", path), 1)
|
||||
} else {
|
||||
// text -> file1 (write to file)
|
||||
VerifyExecInPodSucceed(pod, fmt.Sprintf("echo 'Hello world.' > %s/file1.txt", path))
|
||||
VerifyExecInPodSucceed(f, pod, fmt.Sprintf("echo 'Hello world.' > %s/file1.txt", path))
|
||||
// grep file1 (read from file and check contents)
|
||||
VerifyExecInPodSucceed(pod, fmt.Sprintf("grep 'Hello world.' %s/file1.txt", path))
|
||||
VerifyExecInPodSucceed(f, pod, fmt.Sprintf("grep 'Hello world.' %s/file1.txt", path))
|
||||
|
||||
// Check that writing to directory as block volume fails
|
||||
VerifyExecInPodFail(pod, fmt.Sprintf("dd if=/dev/urandom of=%s bs=64 count=1", path), 1)
|
||||
VerifyExecInPodFail(f, pod, fmt.Sprintf("dd if=/dev/urandom of=%s bs=64 count=1", path), 1)
|
||||
}
|
||||
}
|
||||
|
||||
@ -651,7 +653,7 @@ func genBinDataFromSeed(len int, seed int64) []byte {
|
||||
}
|
||||
|
||||
// CheckReadFromPath validate that file can be properly read.
|
||||
func CheckReadFromPath(pod *v1.Pod, volMode v1.PersistentVolumeMode, path string, len int, seed int64) {
|
||||
func CheckReadFromPath(f *framework.Framework, pod *v1.Pod, volMode v1.PersistentVolumeMode, path string, len int, seed int64) {
|
||||
var pathForVolMode string
|
||||
if volMode == v1.PersistentVolumeBlock {
|
||||
pathForVolMode = path
|
||||
@ -661,12 +663,12 @@ func CheckReadFromPath(pod *v1.Pod, volMode v1.PersistentVolumeMode, path string
|
||||
|
||||
sum := sha256.Sum256(genBinDataFromSeed(len, seed))
|
||||
|
||||
VerifyExecInPodSucceed(pod, fmt.Sprintf("dd if=%s bs=%d count=1 | sha256sum", pathForVolMode, len))
|
||||
VerifyExecInPodSucceed(pod, fmt.Sprintf("dd if=%s bs=%d count=1 | sha256sum | grep -Fq %x", pathForVolMode, len, sum))
|
||||
VerifyExecInPodSucceed(f, pod, fmt.Sprintf("dd if=%s bs=%d count=1 | sha256sum", pathForVolMode, len))
|
||||
VerifyExecInPodSucceed(f, pod, fmt.Sprintf("dd if=%s bs=%d count=1 | sha256sum | grep -Fq %x", pathForVolMode, len, sum))
|
||||
}
|
||||
|
||||
// CheckWriteToPath that file can be properly written.
|
||||
func CheckWriteToPath(pod *v1.Pod, volMode v1.PersistentVolumeMode, path string, len int, seed int64) {
|
||||
func CheckWriteToPath(f *framework.Framework, pod *v1.Pod, volMode v1.PersistentVolumeMode, path string, len int, seed int64) {
|
||||
var pathForVolMode string
|
||||
if volMode == v1.PersistentVolumeBlock {
|
||||
pathForVolMode = path
|
||||
@ -676,8 +678,8 @@ func CheckWriteToPath(pod *v1.Pod, volMode v1.PersistentVolumeMode, path string,
|
||||
|
||||
encoded := base64.StdEncoding.EncodeToString(genBinDataFromSeed(len, seed))
|
||||
|
||||
VerifyExecInPodSucceed(pod, fmt.Sprintf("echo %s | base64 -d | sha256sum", encoded))
|
||||
VerifyExecInPodSucceed(pod, fmt.Sprintf("echo %s | base64 -d | dd of=%s bs=%d count=1", encoded, pathForVolMode, len))
|
||||
VerifyExecInPodSucceed(f, pod, fmt.Sprintf("echo %s | base64 -d | sha256sum", encoded))
|
||||
VerifyExecInPodSucceed(f, pod, fmt.Sprintf("echo %s | base64 -d | dd of=%s bs=%d count=1", encoded, pathForVolMode, len))
|
||||
}
|
||||
|
||||
// findMountPoints returns all mount points on given node under specified directory.
|
||||
|
@ -108,7 +108,7 @@ var _ = utils.SIGDescribe("Volumes", func() {
|
||||
ExpectedContent: "this is the second file",
|
||||
},
|
||||
}
|
||||
volume.TestVolumeClient(cs, config, nil, "" /* fsType */, tests)
|
||||
volume.TestVolumeClient(f, config, nil, "" /* fsType */, tests)
|
||||
})
|
||||
})
|
||||
})
|
||||
|
@ -99,10 +99,10 @@ func (t *VolumeModeDowngradeTest) Setup(f *framework.Framework) {
|
||||
framework.ExpectNoError(err)
|
||||
|
||||
ginkgo.By("Checking if PV exists as expected volume mode")
|
||||
utils.CheckVolumeModeOfPath(t.pod, block, devicePath)
|
||||
utils.CheckVolumeModeOfPath(f, t.pod, block, devicePath)
|
||||
|
||||
ginkgo.By("Checking if read/write to PV works properly")
|
||||
utils.CheckReadWriteToPath(t.pod, block, devicePath)
|
||||
utils.CheckReadWriteToPath(f, t.pod, block, devicePath)
|
||||
}
|
||||
|
||||
// Test waits for the downgrade to complete, and then verifies that a pod can no
|
||||
@ -112,7 +112,7 @@ func (t *VolumeModeDowngradeTest) Test(f *framework.Framework, done <-chan struc
|
||||
<-done
|
||||
|
||||
ginkgo.By("Verifying that nothing exists at the device path in the pod")
|
||||
utils.VerifyExecInPodFail(t.pod, fmt.Sprintf("test -e %s", devicePath), 1)
|
||||
utils.VerifyExecInPodFail(f, t.pod, fmt.Sprintf("test -e %s", devicePath), 1)
|
||||
}
|
||||
|
||||
// Teardown cleans up any remaining resources.
|
||||
|
Loading…
Reference in New Issue
Block a user