Move some exec helper functions from framework/volume to framework/pod

This commit is contained in:
carlory 2025-01-14 16:06:59 +08:00
parent ccd2b4e8a7
commit 8b4eae24ab
13 changed files with 153 additions and 165 deletions

View File

@ -19,6 +19,7 @@ package pod
import (
"bytes"
"context"
"errors"
"io"
"net/url"
"strings"
@ -28,6 +29,7 @@ import (
"k8s.io/client-go/kubernetes/scheme"
restclient "k8s.io/client-go/rest"
"k8s.io/client-go/tools/remotecommand"
clientexec "k8s.io/client-go/util/exec"
"k8s.io/kubernetes/test/e2e/framework"
"github.com/onsi/gomega"
@ -141,6 +143,43 @@ func ExecShellInPodWithFullOutput(ctx context.Context, f *framework.Framework, p
return execCommandInPodWithFullOutput(ctx, f, podName, "/bin/sh", "-c", cmd)
}
// VerifyExecInPodSucceed verifies shell cmd in target pod succeed
func VerifyExecInPodSucceed(ctx context.Context, f *framework.Framework, pod *v1.Pod, shExec string) {
stdout, stderr, err := ExecShellInPodWithFullOutput(ctx, f, pod.Name, shExec)
if err != nil {
var exitError clientexec.CodeExitError
if errors.As(err, &exitError) {
exitCode := exitError.ExitStatus()
framework.ExpectNoError(err,
"%q should succeed, but failed with exit code %d and error message %q\nstdout: %s\nstderr: %s",
shExec, exitCode, exitError, stdout, stderr)
} else {
framework.ExpectNoError(err,
"%q should succeed, but failed with error message %q\nstdout: %s\nstderr: %s",
shExec, err, stdout, stderr)
}
}
}
// VerifyExecInPodFail verifies shell cmd in target pod fail with certain exit code
func VerifyExecInPodFail(ctx context.Context, f *framework.Framework, pod *v1.Pod, shExec string, exitCode int) {
stdout, stderr, err := ExecShellInPodWithFullOutput(ctx, f, pod.Name, shExec)
if err != nil {
var exitError clientexec.CodeExitError
if errors.As(err, &exitError) {
actualExitCode := exitError.ExitStatus()
gomega.Expect(actualExitCode).To(gomega.Equal(exitCode),
"%q should fail with exit code %d, but failed with exit code %d and error message %q\nstdout: %s\nstderr: %s",
shExec, exitCode, actualExitCode, exitError, stdout, stderr)
} else {
framework.ExpectNoError(err,
"%q should fail with exit code %d, but failed with error message %q\nstdout: %s\nstderr: %s",
shExec, exitCode, err, stdout, stderr)
}
}
gomega.Expect(err).To(gomega.HaveOccurred(), "%q should fail with exit code %d, but exit without error", shExec, exitCode)
}
func execute(ctx context.Context, method string, url *url.URL, config *restclient.Config, stdin io.Reader, stdout, stderr io.Writer, tty bool) error {
exec, err := remotecommand.NewSPDYExecutor(config, method, url)
if err != nil {

View File

@ -53,14 +53,12 @@ import (
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/util/wait"
clientset "k8s.io/client-go/kubernetes"
clientexec "k8s.io/client-go/util/exec"
"k8s.io/kubernetes/test/e2e/framework"
e2ekubectl "k8s.io/kubernetes/test/e2e/framework/kubectl"
e2epod "k8s.io/kubernetes/test/e2e/framework/pod"
e2epodoutput "k8s.io/kubernetes/test/e2e/framework/pod/output"
imageutils "k8s.io/kubernetes/test/utils/image"
admissionapi "k8s.io/pod-security-admission/api"
uexec "k8s.io/utils/exec"
"github.com/onsi/ginkgo/v2"
"github.com/onsi/gomega"
@ -179,18 +177,18 @@ func NewNFSServerWithNodeName(ctx context.Context, cs clientset.Interface, names
// Restart the passed-in nfs-server by issuing a `rpc.nfsd 1` command in the
// pod's (only) container. This command changes the number of nfs server threads from
// (presumably) zero back to 1, and therefore allows nfs to open connections again.
func RestartNFSServer(f *framework.Framework, serverPod *v1.Pod) {
func RestartNFSServer(ctx context.Context, f *framework.Framework, serverPod *v1.Pod) {
const startcmd = "rpc.nfsd 1"
_, _, err := PodExec(f, serverPod, startcmd)
_, _, err := e2epod.ExecShellInPodWithFullOutput(ctx, f, serverPod.Name, startcmd)
framework.ExpectNoError(err)
}
// Stop the passed-in nfs-server by issuing a `rpc.nfsd 0` command in the
// pod's (only) container. This command changes the number of nfs server threads to 0,
// thus closing all open nfs connections.
func StopNFSServer(f *framework.Framework, serverPod *v1.Pod) {
func StopNFSServer(ctx context.Context, f *framework.Framework, serverPod *v1.Pod) {
const stopcmd = "rpc.nfsd 0 && for i in $(seq 200); do rpcinfo -p | grep -q nfs || break; sleep 1; done"
_, _, err := PodExec(f, serverPod, stopcmd)
_, _, err := e2epod.ExecShellInPodWithFullOutput(ctx, f, serverPod.Name, stopcmd)
framework.ExpectNoError(err)
}
@ -501,7 +499,7 @@ func runVolumeTesterPod(ctx context.Context, client clientset.Interface, timeout
return clientPod, nil
}
func testVolumeContent(f *framework.Framework, pod *v1.Pod, containerName string, fsGroup *int64, fsType string, tests []Test) {
func testVolumeContent(ctx context.Context, f *framework.Framework, pod *v1.Pod, containerName string, fsGroup *int64, fsType string, tests []Test) {
ginkgo.By("Checking that text file contents are perfect.")
for i, test := range tests {
if test.Mode == v1.PersistentVolumeBlock {
@ -512,7 +510,7 @@ func testVolumeContent(f *framework.Framework, pod *v1.Pod, containerName string
framework.ExpectNoError(err, "failed: finding the contents of the block device %s.", deviceName)
// Check that it's a real block device
CheckVolumeModeOfPath(f, pod, test.Mode, deviceName)
CheckVolumeModeOfPath(ctx, f, pod, test.Mode, deviceName)
} else {
// Filesystem: check content
fileName := fmt.Sprintf("/opt/%d/%s", i, test.File)
@ -522,7 +520,7 @@ func testVolumeContent(f *framework.Framework, pod *v1.Pod, containerName string
// Check that a directory has been mounted
dirName := filepath.Dir(fileName)
CheckVolumeModeOfPath(f, pod, test.Mode, dirName)
CheckVolumeModeOfPath(ctx, f, pod, test.Mode, dirName)
if !framework.NodeOSDistroIs("windows") {
// Filesystem: check fsgroup
@ -576,7 +574,7 @@ func testVolumeClient(ctx context.Context, f *framework.Framework, config TestCo
framework.ExpectNoError(e2epod.WaitForPodNotFoundInNamespace(ctx, f.ClientSet, clientPod.Name, clientPod.Namespace, timeouts.PodDelete))
}()
testVolumeContent(f, clientPod, "", fsGroup, fsType, tests)
testVolumeContent(ctx, f, clientPod, "", fsGroup, fsType, tests)
ginkgo.By("Repeating the test on an ephemeral container (if enabled)")
ec := &v1.EphemeralContainer{
@ -587,7 +585,7 @@ func testVolumeClient(ctx context.Context, f *framework.Framework, config TestCo
err = e2epod.NewPodClient(f).AddEphemeralContainerSync(ctx, clientPod, ec, timeouts.PodStart)
// The API server will return NotFound for the subresource when the feature is disabled
framework.ExpectNoError(err, "failed to add ephemeral container for re-test")
testVolumeContent(f, clientPod, ec.Name, fsGroup, fsType, tests)
testVolumeContent(ctx, f, clientPod, ec.Name, fsGroup, fsType, tests)
}
// InjectContent inserts index.html with given content into given volume. It does so by
@ -630,7 +628,7 @@ func InjectContent(ctx context.Context, f *framework.Framework, config TestConfi
// Check that the data have been really written in this pod.
// This tests non-persistent volume types
testVolumeContent(f, injectorPod, "", fsGroup, fsType, tests)
testVolumeContent(ctx, f, injectorPod, "", fsGroup, fsType, tests)
}
// generateWriteCmd is used by generateWriteBlockCmd and generateWriteFileCmd
@ -665,64 +663,18 @@ func generateWriteFileCmd(content, fullPath string) []string {
}
// CheckVolumeModeOfPath check mode of volume
func CheckVolumeModeOfPath(f *framework.Framework, pod *v1.Pod, volMode v1.PersistentVolumeMode, path string) {
func CheckVolumeModeOfPath(ctx context.Context, f *framework.Framework, pod *v1.Pod, volMode v1.PersistentVolumeMode, path string) {
if volMode == v1.PersistentVolumeBlock {
// Check if block exists
VerifyExecInPodSucceed(f, pod, fmt.Sprintf("test -b %s", path))
e2epod.VerifyExecInPodSucceed(ctx, f, pod, fmt.Sprintf("test -b %s", path))
// Double check that it's not directory
VerifyExecInPodFail(f, pod, fmt.Sprintf("test -d %s", path), 1)
e2epod.VerifyExecInPodFail(ctx, f, pod, fmt.Sprintf("test -d %s", path), 1)
} else {
// Check if directory exists
VerifyExecInPodSucceed(f, pod, fmt.Sprintf("test -d %s", path))
e2epod.VerifyExecInPodSucceed(ctx, f, pod, fmt.Sprintf("test -d %s", path))
// Double check that it's not block
VerifyExecInPodFail(f, pod, fmt.Sprintf("test -b %s", path), 1)
e2epod.VerifyExecInPodFail(ctx, f, pod, fmt.Sprintf("test -b %s", path), 1)
}
}
// PodExec runs f.ExecCommandInContainerWithFullOutput to execute a shell cmd in target pod
// TODO: put this under e2epod once https://github.com/kubernetes/kubernetes/issues/81245
// is resolved. Otherwise there will be dependency issue.
func PodExec(f *framework.Framework, pod *v1.Pod, shExec string) (string, string, error) {
return e2epod.ExecCommandInContainerWithFullOutput(f, pod.Name, pod.Spec.Containers[0].Name, "/bin/sh", "-c", shExec)
}
// VerifyExecInPodSucceed verifies shell cmd in target pod succeed
// TODO: put this under e2epod once https://github.com/kubernetes/kubernetes/issues/81245
// is resolved. Otherwise there will be dependency issue.
func VerifyExecInPodSucceed(f *framework.Framework, pod *v1.Pod, shExec string) {
stdout, stderr, err := PodExec(f, pod, shExec)
if err != nil {
if exiterr, ok := err.(uexec.CodeExitError); ok {
exitCode := exiterr.ExitStatus()
framework.ExpectNoError(err,
"%q should succeed, but failed with exit code %d and error message %q\nstdout: %s\nstderr: %s",
shExec, exitCode, exiterr, stdout, stderr)
} else {
framework.ExpectNoError(err,
"%q should succeed, but failed with error message %q\nstdout: %s\nstderr: %s",
shExec, err, stdout, stderr)
}
}
}
// VerifyExecInPodFail verifies shell cmd in target pod fail with certain exit code
// TODO: put this under e2epod once https://github.com/kubernetes/kubernetes/issues/81245
// is resolved. Otherwise there will be dependency issue.
func VerifyExecInPodFail(f *framework.Framework, pod *v1.Pod, shExec string, exitCode int) {
stdout, stderr, err := PodExec(f, pod, shExec)
if err != nil {
if exiterr, ok := err.(clientexec.ExitError); ok {
actualExitCode := exiterr.ExitStatus()
gomega.Expect(actualExitCode).To(gomega.Equal(exitCode),
"%q should fail with exit code %d, but failed with exit code %d and error message %q\nstdout: %s\nstderr: %s",
shExec, exitCode, actualExitCode, exiterr, stdout, stderr)
} else {
framework.ExpectNoError(err,
"%q should fail with exit code %d, but failed with error message %q\nstdout: %s\nstderr: %s",
shExec, exitCode, err, stdout, stderr)
}
}
gomega.Expect(err).To(gomega.HaveOccurred(), "%q should fail with exit code %d, but exit without error", shExec, exitCode)
}

View File

@ -421,7 +421,7 @@ var _ = SIGDescribe("kubelet", func() {
pod = createPodUsingNfs(ctx, f, c, ns, nfsIP, t.podCmd)
ginkgo.By("Stop the NFS server")
e2evolume.StopNFSServer(f, nfsServerPod)
e2evolume.StopNFSServer(ctx, f, nfsServerPod)
ginkgo.By("Delete the pod mounted to the NFS volume -- expect failure")
err := e2epod.DeletePodWithWait(ctx, c, pod)
@ -432,7 +432,7 @@ var _ = SIGDescribe("kubelet", func() {
checkPodCleanup(ctx, c, pod, false)
ginkgo.By("Restart the nfs server")
e2evolume.RestartNFSServer(f, nfsServerPod)
e2evolume.RestartNFSServer(ctx, f, nfsServerPod)
ginkgo.By("Verify that the deleted client pod is now cleaned up")
checkPodCleanup(ctx, c, pod, true)

View File

@ -28,7 +28,6 @@ import (
"k8s.io/kubernetes/test/e2e/framework"
e2epod "k8s.io/kubernetes/test/e2e/framework/pod"
e2eskipper "k8s.io/kubernetes/test/e2e/framework/skipper"
e2evolume "k8s.io/kubernetes/test/e2e/framework/volume"
"k8s.io/kubernetes/test/e2e/storage/utils"
admissionapi "k8s.io/pod-security-admission/api"
)
@ -166,27 +165,27 @@ func waitUtilFSGroupInPod(ctx context.Context, m *mockDriverSetup, modified bool
// Create the subdirectory to ensure that fsGroup propagates
createDirectory := fmt.Sprintf("mkdir %s", dirName)
_, _, err = e2evolume.PodExec(m.f, pod, createDirectory)
_, _, err = e2epod.ExecShellInPodWithFullOutput(ctx, m.f, pod.Name, createDirectory)
framework.ExpectNoError(err, "failed: creating the directory: %s", err)
// Inject the contents onto the mount
createFile := fmt.Sprintf("echo '%s' > '%s'; sync", "filecontents", fileName)
_, _, err = e2evolume.PodExec(m.f, pod, createFile)
_, _, err = e2epod.ExecShellInPodWithFullOutput(ctx, m.f, pod.Name, createFile)
framework.ExpectNoError(err, "failed: writing the contents: %s", err)
// Delete the created file. This step is mandatory, as the mock driver
// won't clean up the contents automatically.
defer func() {
deleteDir := fmt.Sprintf("rm -fr %s", dirName)
_, _, err = e2evolume.PodExec(m.f, pod, deleteDir)
_, _, err = e2epod.ExecShellInPodWithFullOutput(ctx, m.f, pod.Name, deleteDir)
framework.ExpectNoError(err, "failed: deleting the directory: %s", err)
}()
// Ensure that the fsGroup matches what we expect
if modified {
utils.VerifyFSGroupInPod(m.f, fileName, strconv.FormatInt(*fsGroup, 10), pod)
utils.VerifyFSGroupInPod(ctx, m.f, fileName, strconv.FormatInt(*fsGroup, 10), pod)
} else {
utils.VerifyFSGroupInPod(m.f, fileName, "root", pod)
utils.VerifyFSGroupInPod(ctx, m.f, fileName, "root", pod)
}
})

View File

@ -28,7 +28,6 @@ import (
"k8s.io/kubernetes/test/e2e/framework"
e2eevents "k8s.io/kubernetes/test/e2e/framework/events"
e2epod "k8s.io/kubernetes/test/e2e/framework/pod"
e2evolume "k8s.io/kubernetes/test/e2e/framework/volume"
"k8s.io/kubernetes/test/e2e/storage/utils"
imageutils "k8s.io/kubernetes/test/utils/image"
admissionapi "k8s.io/pod-security-admission/api"
@ -270,7 +269,7 @@ var _ = utils.SIGDescribe("HostPathType Character Device", framework.WithSlow(),
targetCharDev = path.Join(hostBaseDir, "achardev")
ginkgo.By("Create a character device for further testing")
cmd := fmt.Sprintf("mknod %s c 89 1", path.Join(mountBaseDir, "achardev"))
stdout, stderr, err := e2evolume.PodExec(f, basePod, cmd)
stdout, stderr, err := e2epod.ExecShellInPodWithFullOutput(ctx, f, basePod.Name, cmd)
framework.ExpectNoError(err, "command: %q, stdout: %s\nstderr: %s", cmd, stdout, stderr)
})
@ -340,7 +339,7 @@ var _ = utils.SIGDescribe("HostPathType Block Device", framework.WithSlow(), fun
targetBlockDev = path.Join(hostBaseDir, "ablkdev")
ginkgo.By("Create a block device for further testing")
cmd := fmt.Sprintf("mknod %s b 89 1", path.Join(mountBaseDir, "ablkdev"))
stdout, stderr, err := e2evolume.PodExec(f, basePod, cmd)
stdout, stderr, err := e2epod.ExecShellInPodWithFullOutput(ctx, f, basePod.Name, cmd)
framework.ExpectNoError(err, "command %q: stdout: %s\nstderr: %s", cmd, stdout, stderr)
})

View File

@ -48,7 +48,6 @@ import (
e2epv "k8s.io/kubernetes/test/e2e/framework/pv"
e2eskipper "k8s.io/kubernetes/test/e2e/framework/skipper"
e2estatefulset "k8s.io/kubernetes/test/e2e/framework/statefulset"
e2evolume "k8s.io/kubernetes/test/e2e/framework/volume"
"k8s.io/kubernetes/test/e2e/storage/utils"
imageutils "k8s.io/kubernetes/test/utils/image"
admissionapi "k8s.io/pod-security-admission/api"
@ -232,7 +231,7 @@ var _ = utils.SIGDescribe("PersistentVolumes-local", func() {
writeCmd := createWriteCmd(volumeDir, testFile, testFileContent, testVol.localVolumeType)
ginkgo.By("Writing in pod1")
podRWCmdExec(f, pod1, writeCmd)
podRWCmdExec(ctx, f, pod1, writeCmd)
})
ginkgo.AfterEach(func(ctx context.Context) {
@ -243,16 +242,16 @@ var _ = utils.SIGDescribe("PersistentVolumes-local", func() {
ginkgo.It("should be able to mount volume and read from pod1", func(ctx context.Context) {
ginkgo.By("Reading in pod1")
// testFileContent was written in BeforeEach
testReadFileContent(f, volumeDir, testFile, testFileContent, pod1, testVolType)
testReadFileContent(ctx, f, volumeDir, testFile, testFileContent, pod1, testVolType)
})
ginkgo.It("should be able to mount volume and write from pod1", func(ctx context.Context) {
// testFileContent was written in BeforeEach
testReadFileContent(f, volumeDir, testFile, testFileContent, pod1, testVolType)
testReadFileContent(ctx, f, volumeDir, testFile, testFileContent, pod1, testVolType)
ginkgo.By("Writing in pod1")
writeCmd := createWriteCmd(volumeDir, testFile, testVol.ltr.Path /*writeTestFileContent*/, testVolType)
podRWCmdExec(f, pod1, writeCmd)
podRWCmdExec(ctx, f, pod1, writeCmd)
})
})
@ -685,10 +684,10 @@ func twoPodsReadWriteTest(ctx context.Context, f *framework.Framework, config *l
writeCmd := createWriteCmd(volumeDir, testFile, testFileContent, testVol.localVolumeType)
ginkgo.By("Writing in pod1")
podRWCmdExec(f, pod1, writeCmd)
podRWCmdExec(ctx, f, pod1, writeCmd)
// testFileContent was written after creating pod1
testReadFileContent(f, volumeDir, testFile, testFileContent, pod1, testVol.localVolumeType)
testReadFileContent(ctx, f, volumeDir, testFile, testFileContent, pod1, testVol.localVolumeType)
ginkgo.By("Creating pod2 to read from the PV")
pod2, pod2Err := createLocalPod(ctx, config, testVol, nil)
@ -696,15 +695,15 @@ func twoPodsReadWriteTest(ctx context.Context, f *framework.Framework, config *l
verifyLocalPod(ctx, config, testVol, pod2, config.randomNode.Name)
// testFileContent was written after creating pod1
testReadFileContent(f, volumeDir, testFile, testFileContent, pod2, testVol.localVolumeType)
testReadFileContent(ctx, f, volumeDir, testFile, testFileContent, pod2, testVol.localVolumeType)
writeCmd = createWriteCmd(volumeDir, testFile, testVol.ltr.Path /*writeTestFileContent*/, testVol.localVolumeType)
ginkgo.By("Writing in pod2")
podRWCmdExec(f, pod2, writeCmd)
podRWCmdExec(ctx, f, pod2, writeCmd)
ginkgo.By("Reading in pod1")
testReadFileContent(f, volumeDir, testFile, testVol.ltr.Path, pod1, testVol.localVolumeType)
testReadFileContent(ctx, f, volumeDir, testFile, testVol.ltr.Path, pod1, testVol.localVolumeType)
ginkgo.By("Deleting pod1")
e2epod.DeletePodOrFail(ctx, config.client, config.ns, pod1.Name)
@ -722,10 +721,10 @@ func twoPodsReadWriteSerialTest(ctx context.Context, f *framework.Framework, con
writeCmd := createWriteCmd(volumeDir, testFile, testFileContent, testVol.localVolumeType)
ginkgo.By("Writing in pod1")
podRWCmdExec(f, pod1, writeCmd)
podRWCmdExec(ctx, f, pod1, writeCmd)
// testFileContent was written after creating pod1
testReadFileContent(f, volumeDir, testFile, testFileContent, pod1, testVol.localVolumeType)
testReadFileContent(ctx, f, volumeDir, testFile, testFileContent, pod1, testVol.localVolumeType)
ginkgo.By("Deleting pod1")
e2epod.DeletePodOrFail(ctx, config.client, config.ns, pod1.Name)
@ -736,7 +735,7 @@ func twoPodsReadWriteSerialTest(ctx context.Context, f *framework.Framework, con
verifyLocalPod(ctx, config, testVol, pod2, config.randomNode.Name)
ginkgo.By("Reading in pod2")
testReadFileContent(f, volumeDir, testFile, testFileContent, pod2, testVol.localVolumeType)
testReadFileContent(ctx, f, volumeDir, testFile, testFileContent, pod2, testVol.localVolumeType)
ginkgo.By("Deleting pod2")
e2epod.DeletePodOrFail(ctx, config.client, config.ns, pod2.Name)
@ -1022,16 +1021,16 @@ func createReadCmd(testFileDir string, testFile string, volumeType localVolumeTy
}
// Read testFile and evaluate whether it contains the testFileContent
func testReadFileContent(f *framework.Framework, testFileDir string, testFile string, testFileContent string, pod *v1.Pod, volumeType localVolumeType) {
func testReadFileContent(ctx context.Context, f *framework.Framework, testFileDir string, testFile string, testFileContent string, pod *v1.Pod, volumeType localVolumeType) {
readCmd := createReadCmd(testFileDir, testFile, volumeType)
readOut := podRWCmdExec(f, pod, readCmd)
readOut := podRWCmdExec(ctx, f, pod, readCmd)
gomega.Expect(readOut).To(gomega.ContainSubstring(testFileContent))
}
// Execute a read or write command in a pod.
// Fail on error
func podRWCmdExec(f *framework.Framework, pod *v1.Pod, cmd string) string {
stdout, stderr, err := e2evolume.PodExec(f, pod, cmd)
func podRWCmdExec(ctx context.Context, f *framework.Framework, pod *v1.Pod, cmd string) string {
stdout, stderr, err := e2epod.ExecShellInPodWithFullOutput(ctx, f, pod.Name, cmd)
framework.Logf("podRWCmdExec cmd: %q, out: %q, stderr: %q, err: %v", cmd, stdout, stderr, err)
framework.ExpectNoError(err)
return stdout

View File

@ -194,7 +194,7 @@ func (p *ephemeralTestSuite) DefineTests(driver storageframework.TestDriver, pat
// attempt to create a dummy file and expect for it not to be created
command = "ls /mnt/test* && (touch /mnt/test-0/hello-world || true) && [ ! -f /mnt/test-0/hello-world ]"
}
e2evolume.VerifyExecInPodSucceed(f, pod, command)
e2epod.VerifyExecInPodSucceed(ctx, f, pod, command)
return nil
}
l.testCase.TestEphemeral(ctx)
@ -214,7 +214,7 @@ func (p *ephemeralTestSuite) DefineTests(driver storageframework.TestDriver, pat
if pattern.VolMode == v1.PersistentVolumeBlock {
command = "if ! [ -b /mnt/test-0 ]; then echo /mnt/test-0 is not a block device; exit 1; fi"
}
e2evolume.VerifyExecInPodSucceed(f, pod, command)
e2epod.VerifyExecInPodSucceed(ctx, f, pod, command)
return nil
}
l.testCase.TestEphemeral(ctx)
@ -308,8 +308,8 @@ func (p *ephemeralTestSuite) DefineTests(driver storageframework.TestDriver, pat
// visible in the other.
if pattern.VolMode != v1.PersistentVolumeBlock && !readOnly && !shared {
ginkgo.By("writing data in one pod and checking the second does not see it (it should get its own volume)")
e2evolume.VerifyExecInPodSucceed(f, pod, "touch /mnt/test-0/hello-world")
e2evolume.VerifyExecInPodSucceed(f, pod2, "[ ! -f /mnt/test-0/hello-world ]")
e2epod.VerifyExecInPodSucceed(ctx, f, pod, "touch /mnt/test-0/hello-world")
e2epod.VerifyExecInPodSucceed(ctx, f, pod2, "[ ! -f /mnt/test-0/hello-world ]")
}
// TestEphemeral expects the pod to be fully deleted

View File

@ -250,12 +250,12 @@ func (s *fsGroupChangePolicyTestSuite) DefineTests(driver storageframework.TestD
// Change the ownership of files in the initial pod.
if test.changedRootDirFileOwnership != 0 {
ginkgo.By(fmt.Sprintf("Changing the root directory file ownership to %s", strconv.Itoa(test.changedRootDirFileOwnership)))
storageutils.ChangeFilePathGidInPod(f, rootDirFilePath, strconv.Itoa(test.changedRootDirFileOwnership), pod)
storageutils.ChangeFilePathGidInPod(ctx, f, rootDirFilePath, strconv.Itoa(test.changedRootDirFileOwnership), pod)
}
if test.changedSubDirFileOwnership != 0 {
ginkgo.By(fmt.Sprintf("Changing the sub-directory file ownership to %s", strconv.Itoa(test.changedSubDirFileOwnership)))
storageutils.ChangeFilePathGidInPod(f, subDirFilePath, strconv.Itoa(test.changedSubDirFileOwnership), pod)
storageutils.ChangeFilePathGidInPod(ctx, f, subDirFilePath, strconv.Itoa(test.changedSubDirFileOwnership), pod)
}
ginkgo.By(fmt.Sprintf("Deleting Pod %s/%s", pod.Namespace, pod.Name))
@ -281,24 +281,24 @@ func createPodAndVerifyContentGid(ctx context.Context, f *framework.Framework, p
ginkgo.By(fmt.Sprintf("Creating a sub-directory and file, and verifying their ownership is %s", podFsGroup))
cmd := fmt.Sprintf("touch %s", rootDirFilePath)
var err error
_, _, err = e2evolume.PodExec(f, pod, cmd)
_, _, err = e2epod.ExecShellInPodWithFullOutput(ctx, f, pod.Name, cmd)
framework.ExpectNoError(err)
storageutils.VerifyFilePathGidInPod(f, rootDirFilePath, podFsGroup, pod)
storageutils.VerifyFilePathGidInPod(ctx, f, rootDirFilePath, podFsGroup, pod)
cmd = fmt.Sprintf("mkdir %s", subdir)
_, _, err = e2evolume.PodExec(f, pod, cmd)
_, _, err = e2epod.ExecShellInPodWithFullOutput(ctx, f, pod.Name, cmd)
framework.ExpectNoError(err)
cmd = fmt.Sprintf("touch %s", subDirFilePath)
_, _, err = e2evolume.PodExec(f, pod, cmd)
_, _, err = e2epod.ExecShellInPodWithFullOutput(ctx, f, pod.Name, cmd)
framework.ExpectNoError(err)
storageutils.VerifyFilePathGidInPod(f, subDirFilePath, podFsGroup, pod)
storageutils.VerifyFilePathGidInPod(ctx, f, subDirFilePath, podFsGroup, pod)
return pod
}
// Verify existing contents of the volume
ginkgo.By(fmt.Sprintf("Verifying the ownership of root directory file is %s", expectedRootDirFileOwnership))
storageutils.VerifyFilePathGidInPod(f, rootDirFilePath, expectedRootDirFileOwnership, pod)
storageutils.VerifyFilePathGidInPod(ctx, f, rootDirFilePath, expectedRootDirFileOwnership, pod)
ginkgo.By(fmt.Sprintf("Verifying the ownership of sub directory file is %s", expectedSubDirFileOwnership))
storageutils.VerifyFilePathGidInPod(f, subDirFilePath, expectedSubDirFileOwnership, pod)
storageutils.VerifyFilePathGidInPod(ctx, f, subDirFilePath, expectedSubDirFileOwnership, pod)
return pod
}

View File

@ -499,18 +499,18 @@ func testAccessMultipleVolumes(ctx context.Context, f *framework.Framework, cs c
index := i + 1
path := fmt.Sprintf("/mnt/volume%d", index)
ginkgo.By(fmt.Sprintf("Checking if the volume%d exists as expected volume mode (%s)", index, *pvc.Spec.VolumeMode))
e2evolume.CheckVolumeModeOfPath(f, pod, *pvc.Spec.VolumeMode, path)
e2evolume.CheckVolumeModeOfPath(ctx, f, pod, *pvc.Spec.VolumeMode, path)
if readSeedBase > 0 {
ginkgo.By(fmt.Sprintf("Checking if read from the volume%d works properly", index))
storageutils.CheckReadFromPath(f, pod, *pvc.Spec.VolumeMode, false, path, byteLen, readSeedBase+int64(i))
storageutils.CheckReadFromPath(ctx, f, pod, *pvc.Spec.VolumeMode, false, path, byteLen, readSeedBase+int64(i))
}
ginkgo.By(fmt.Sprintf("Checking if write to the volume%d works properly", index))
storageutils.CheckWriteToPath(f, pod, *pvc.Spec.VolumeMode, false, path, byteLen, writeSeedBase+int64(i))
storageutils.CheckWriteToPath(ctx, f, pod, *pvc.Spec.VolumeMode, false, path, byteLen, writeSeedBase+int64(i))
ginkgo.By(fmt.Sprintf("Checking if read from the volume%d works properly", index))
storageutils.CheckReadFromPath(f, pod, *pvc.Spec.VolumeMode, false, path, byteLen, writeSeedBase+int64(i))
storageutils.CheckReadFromPath(ctx, f, pod, *pvc.Spec.VolumeMode, false, path, byteLen, writeSeedBase+int64(i))
}
pod, err = cs.CoreV1().Pods(pod.Namespace).Get(ctx, pod.Name, metav1.GetOptions{})
@ -599,7 +599,7 @@ func TestConcurrentAccessToSingleVolume(ctx context.Context, f *framework.Framew
framework.Failf("Number of pods shouldn't be less than 1, but got %d", len(pods))
}
// byteLen should be the size of a sector to enable direct I/O
byteLen = storageutils.GetSectorSize(f, pods[0], path)
byteLen = storageutils.GetSectorSize(ctx, f, pods[0], path)
directIO = true
}
@ -607,7 +607,7 @@ func TestConcurrentAccessToSingleVolume(ctx context.Context, f *framework.Framew
for i, pod := range pods {
index := i + 1
ginkgo.By(fmt.Sprintf("Checking if the volume in pod%d exists as expected volume mode (%s)", index, *pvc.Spec.VolumeMode))
e2evolume.CheckVolumeModeOfPath(f, pod, *pvc.Spec.VolumeMode, path)
e2evolume.CheckVolumeModeOfPath(ctx, f, pod, *pvc.Spec.VolumeMode, path)
if readOnly {
ginkgo.By("Skipping volume content checks, volume is read-only")
@ -617,17 +617,17 @@ func TestConcurrentAccessToSingleVolume(ctx context.Context, f *framework.Framew
if i != 0 {
ginkgo.By(fmt.Sprintf("From pod%d, checking if reading the data that pod%d write works properly", index, index-1))
// For 1st pod, no one has written data yet, so pass the read check
storageutils.CheckReadFromPath(f, pod, *pvc.Spec.VolumeMode, directIO, path, byteLen, seed)
storageutils.CheckReadFromPath(ctx, f, pod, *pvc.Spec.VolumeMode, directIO, path, byteLen, seed)
}
// Update the seed and check if write/read works properly
seed = time.Now().UTC().UnixNano()
ginkgo.By(fmt.Sprintf("Checking if write to the volume in pod%d works properly", index))
storageutils.CheckWriteToPath(f, pod, *pvc.Spec.VolumeMode, directIO, path, byteLen, seed)
storageutils.CheckWriteToPath(ctx, f, pod, *pvc.Spec.VolumeMode, directIO, path, byteLen, seed)
ginkgo.By(fmt.Sprintf("Checking if read from the volume in pod%d works properly", index))
storageutils.CheckReadFromPath(f, pod, *pvc.Spec.VolumeMode, directIO, path, byteLen, seed)
storageutils.CheckReadFromPath(ctx, f, pod, *pvc.Spec.VolumeMode, directIO, path, byteLen, seed)
}
if len(pods) < 2 {
@ -643,7 +643,7 @@ func TestConcurrentAccessToSingleVolume(ctx context.Context, f *framework.Framew
index := i + 1
// index of pod and index of pvc match, because pods are created above way
ginkgo.By(fmt.Sprintf("Rechecking if the volume in pod%d exists as expected volume mode (%s)", index, *pvc.Spec.VolumeMode))
e2evolume.CheckVolumeModeOfPath(f, pod, *pvc.Spec.VolumeMode, "/mnt/volume1")
e2evolume.CheckVolumeModeOfPath(ctx, f, pod, *pvc.Spec.VolumeMode, "/mnt/volume1")
if readOnly {
ginkgo.By("Skipping volume content checks, volume is read-only")
@ -656,16 +656,16 @@ func TestConcurrentAccessToSingleVolume(ctx context.Context, f *framework.Framew
} else {
ginkgo.By(fmt.Sprintf("From pod%d, rechecking if reading the data that pod%d write works properly", index, index-1))
}
storageutils.CheckReadFromPath(f, pod, *pvc.Spec.VolumeMode, directIO, path, byteLen, seed)
storageutils.CheckReadFromPath(ctx, f, pod, *pvc.Spec.VolumeMode, directIO, path, byteLen, seed)
// Update the seed and check if write/read works properly
seed = time.Now().UTC().UnixNano()
ginkgo.By(fmt.Sprintf("Rechecking if write to the volume in pod%d works properly", index))
storageutils.CheckWriteToPath(f, pod, *pvc.Spec.VolumeMode, directIO, path, byteLen, seed)
storageutils.CheckWriteToPath(ctx, f, pod, *pvc.Spec.VolumeMode, directIO, path, byteLen, seed)
ginkgo.By(fmt.Sprintf("Rechecking if read from the volume in pod%d works properly", index))
storageutils.CheckReadFromPath(f, pod, *pvc.Spec.VolumeMode, directIO, path, byteLen, seed)
storageutils.CheckReadFromPath(ctx, f, pod, *pvc.Spec.VolumeMode, directIO, path, byteLen, seed)
}
}

View File

@ -243,11 +243,11 @@ func makePodSpec(config e2evolume.TestConfig, initCmd string, volsrc v1.VolumeSo
}
// Write `fsize` bytes to `fpath` in the pod, using dd and the `ddInput` file.
func writeToFile(f *framework.Framework, pod *v1.Pod, fpath, ddInput string, fsize int64) error {
func writeToFile(ctx context.Context, f *framework.Framework, pod *v1.Pod, fpath, ddInput string, fsize int64) error {
ginkgo.By(fmt.Sprintf("writing %d bytes to test file %s", fsize, fpath))
loopCnt := fsize / storageframework.MinFileSize
writeCmd := fmt.Sprintf("i=0; while [ $i -lt %d ]; do dd if=%s bs=%d >>%s 2>/dev/null; let i+=1; done", loopCnt, ddInput, storageframework.MinFileSize, fpath)
stdout, stderr, err := e2evolume.PodExec(f, pod, writeCmd)
stdout, stderr, err := e2epod.ExecShellInPodWithFullOutput(ctx, f, pod.Name, writeCmd)
if err != nil {
return fmt.Errorf("error writing to volume using %q: %s\nstdout: %s\nstderr: %s", writeCmd, err, stdout, stderr)
}
@ -255,9 +255,9 @@ func writeToFile(f *framework.Framework, pod *v1.Pod, fpath, ddInput string, fsi
}
// Verify that the test file is the expected size and contains the expected content.
func verifyFile(f *framework.Framework, pod *v1.Pod, fpath string, expectSize int64, ddInput string) error {
func verifyFile(ctx context.Context, f *framework.Framework, pod *v1.Pod, fpath string, expectSize int64, ddInput string) error {
ginkgo.By("verifying file size")
rtnstr, stderr, err := e2evolume.PodExec(f, pod, fmt.Sprintf("stat -c %%s %s", fpath))
rtnstr, stderr, err := e2epod.ExecShellInPodWithFullOutput(ctx, f, pod.Name, fmt.Sprintf("stat -c %%s %s", fpath))
if err != nil || rtnstr == "" {
return fmt.Errorf("unable to get file size via `stat %s`: %v\nstdout: %s\nstderr: %s", fpath, err, rtnstr, stderr)
}
@ -270,14 +270,14 @@ func verifyFile(f *framework.Framework, pod *v1.Pod, fpath string, expectSize in
}
ginkgo.By("verifying file hash")
rtnstr, stderr, err = e2evolume.PodExec(f, pod, fmt.Sprintf("md5sum %s | cut -d' ' -f1", fpath))
rtnstr, stderr, err = e2epod.ExecShellInPodWithFullOutput(ctx, f, pod.Name, fmt.Sprintf("md5sum %s | cut -d' ' -f1", fpath))
if err != nil {
return fmt.Errorf("unable to test file hash via `md5sum %s`: %v\nstdout: %s\nstderr: %s", fpath, err, rtnstr, stderr)
}
actualHash := strings.TrimSuffix(rtnstr, "\n")
expectedHash, ok := md5hashes[expectSize]
if !ok {
return fmt.Errorf("File hash is unknown for file size %d. Was a new file size added to the test suite?",
return fmt.Errorf("file hash is unknown for file size %d. Was a new file size added to the test suite?",
expectSize)
}
if actualHash != expectedHash {
@ -289,9 +289,9 @@ func verifyFile(f *framework.Framework, pod *v1.Pod, fpath string, expectSize in
}
// Delete `fpath` to save some disk space on host. Delete errors are logged but ignored.
func deleteFile(f *framework.Framework, pod *v1.Pod, fpath string) {
func deleteFile(ctx context.Context, f *framework.Framework, pod *v1.Pod, fpath string) {
ginkgo.By(fmt.Sprintf("deleting test file %s...", fpath))
stdout, stderr, err := e2evolume.PodExec(f, pod, fmt.Sprintf("rm -f %s", fpath))
stdout, stderr, err := e2epod.ExecShellInPodWithFullOutput(ctx, f, pod.Name, fmt.Sprintf("rm -f %s", fpath))
if err != nil {
// keep going, the test dir will be deleted when the volume is unmounted
framework.Logf("unable to delete test file %s: %v\nerror ignored, continuing test\nstdout: %s\nstderr: %s", fpath, err, stdout, stderr)
@ -323,7 +323,7 @@ func testVolumeIO(ctx context.Context, f *framework.Framework, cs clientset.Inte
return fmt.Errorf("failed to create client pod %q: %w", clientPod.Name, err)
}
ginkgo.DeferCleanup(func(ctx context.Context) {
deleteFile(f, clientPod, ddInput)
deleteFile(ctx, f, clientPod, ddInput)
ginkgo.By(fmt.Sprintf("deleting client pod %q...", clientPod.Name))
e := e2epod.DeletePodWithWait(ctx, cs, clientPod)
if e != nil {
@ -350,12 +350,12 @@ func testVolumeIO(ctx context.Context, f *framework.Framework, cs clientset.Inte
}
fpath := filepath.Join(mountPath, fmt.Sprintf("%s-%d", file, fsize))
defer func() {
deleteFile(f, clientPod, fpath)
deleteFile(ctx, f, clientPod, fpath)
}()
if err = writeToFile(f, clientPod, fpath, ddInput, fsize); err != nil {
if err = writeToFile(ctx, f, clientPod, fpath, ddInput, fsize); err != nil {
return err
}
if err = verifyFile(f, clientPod, fpath, fsize, ddInput); err != nil {
if err = verifyFile(ctx, f, clientPod, fpath, fsize, ddInput); err != nil {
return err
}
}

View File

@ -64,9 +64,9 @@ const (
)
// VerifyFSGroupInPod verifies that the passed in filePath contains the expectedFSGroup
func VerifyFSGroupInPod(f *framework.Framework, filePath, expectedFSGroup string, pod *v1.Pod) {
func VerifyFSGroupInPod(ctx context.Context, f *framework.Framework, filePath, expectedFSGroup string, pod *v1.Pod) {
cmd := fmt.Sprintf("ls -l %s", filePath)
stdout, stderr, err := e2evolume.PodExec(f, pod, cmd)
stdout, stderr, err := e2epod.ExecShellInPodWithFullOutput(ctx, f, pod.Name, cmd)
framework.ExpectNoError(err)
framework.Logf("pod %s/%s exec for cmd %s, stdout: %s, stderr: %s", pod.Namespace, pod.Name, cmd, stdout, stderr)
fsGroupResult := strings.Fields(stdout)[3]
@ -91,7 +91,7 @@ func TestKubeletRestartsAndRestoresMount(ctx context.Context, c clientset.Interf
seed := time.Now().UTC().UnixNano()
ginkgo.By("Writing to the volume.")
CheckWriteToPath(f, clientPod, v1.PersistentVolumeFilesystem, false, volumePath, byteLen, seed)
CheckWriteToPath(ctx, f, clientPod, v1.PersistentVolumeFilesystem, false, volumePath, byteLen, seed)
ginkgo.By("Restarting kubelet")
KubeletCommand(ctx, KRestart, c, clientPod)
@ -100,7 +100,7 @@ func TestKubeletRestartsAndRestoresMount(ctx context.Context, c clientset.Interf
time.Sleep(20 * time.Second)
ginkgo.By("Testing that written file is accessible.")
CheckReadFromPath(f, clientPod, v1.PersistentVolumeFilesystem, false, volumePath, byteLen, seed)
CheckReadFromPath(ctx, f, clientPod, v1.PersistentVolumeFilesystem, false, volumePath, byteLen, seed)
framework.Logf("Volume mount detected on pod %s and written file %s is readable post-restart.", clientPod.Name, volumePath)
}
@ -111,7 +111,7 @@ func TestKubeletRestartsAndRestoresMap(ctx context.Context, c clientset.Interfac
seed := time.Now().UTC().UnixNano()
ginkgo.By("Writing to the volume.")
CheckWriteToPath(f, clientPod, v1.PersistentVolumeBlock, false, volumePath, byteLen, seed)
CheckWriteToPath(ctx, f, clientPod, v1.PersistentVolumeBlock, false, volumePath, byteLen, seed)
ginkgo.By("Restarting kubelet")
KubeletCommand(ctx, KRestart, c, clientPod)
@ -120,7 +120,7 @@ func TestKubeletRestartsAndRestoresMap(ctx context.Context, c clientset.Interfac
time.Sleep(20 * time.Second)
ginkgo.By("Testing that written pv is accessible.")
CheckReadFromPath(f, clientPod, v1.PersistentVolumeBlock, false, volumePath, byteLen, seed)
CheckReadFromPath(ctx, f, clientPod, v1.PersistentVolumeBlock, false, volumePath, byteLen, seed)
framework.Logf("Volume map detected on pod %s and written data %s is readable post-restart.", clientPod.Name, volumePath)
}
@ -151,7 +151,7 @@ func TestVolumeUnmountsFromDeletedPodWithForceOption(ctx context.Context, c clie
ginkgo.By("Writing to the volume.")
byteLen := 64
seed := time.Now().UTC().UnixNano()
CheckWriteToPath(f, clientPod, v1.PersistentVolumeFilesystem, false, volumePath, byteLen, seed)
CheckWriteToPath(ctx, f, clientPod, v1.PersistentVolumeFilesystem, false, volumePath, byteLen, seed)
// This command is to make sure kubelet is started after test finishes no matter it fails or not.
ginkgo.DeferCleanup(KubeletCommand, KStart, c, clientPod)
@ -201,7 +201,7 @@ func TestVolumeUnmountsFromDeletedPodWithForceOption(ctx context.Context, c clie
gomega.Expect(result.Code).To(gomega.Equal(0), fmt.Sprintf("Expected grep exit code of 0, got %d", result.Code))
ginkgo.By("Testing that written file is accessible in the second pod.")
CheckReadFromPath(f, secondPod, v1.PersistentVolumeFilesystem, false, volumePath, byteLen, seed)
CheckReadFromPath(ctx, f, secondPod, v1.PersistentVolumeFilesystem, false, volumePath, byteLen, seed)
err = c.CoreV1().Pods(secondPod.Namespace).Delete(context.TODO(), secondPod.Name, metav1.DeleteOptions{})
framework.ExpectNoError(err, "when deleting the second pod")
err = e2epod.WaitForPodNotFoundInNamespace(ctx, f.ClientSet, secondPod.Name, f.Namespace.Name, f.Timeouts.PodDelete)
@ -446,28 +446,28 @@ func isSudoPresent(ctx context.Context, nodeIP string, provider string) bool {
}
// CheckReadWriteToPath check that path can b e read and written
func CheckReadWriteToPath(f *framework.Framework, pod *v1.Pod, volMode v1.PersistentVolumeMode, path string) {
func CheckReadWriteToPath(ctx context.Context, f *framework.Framework, pod *v1.Pod, volMode v1.PersistentVolumeMode, path string) {
if volMode == v1.PersistentVolumeBlock {
// random -> file1
e2evolume.VerifyExecInPodSucceed(f, pod, "dd if=/dev/urandom of=/tmp/file1 bs=64 count=1")
e2epod.VerifyExecInPodSucceed(ctx, f, pod, "dd if=/dev/urandom of=/tmp/file1 bs=64 count=1")
// file1 -> dev (write to dev)
e2evolume.VerifyExecInPodSucceed(f, pod, fmt.Sprintf("dd if=/tmp/file1 of=%s bs=64 count=1", path))
e2epod.VerifyExecInPodSucceed(ctx, f, pod, fmt.Sprintf("dd if=/tmp/file1 of=%s bs=64 count=1", path))
// dev -> file2 (read from dev)
e2evolume.VerifyExecInPodSucceed(f, pod, fmt.Sprintf("dd if=%s of=/tmp/file2 bs=64 count=1", path))
e2epod.VerifyExecInPodSucceed(ctx, f, pod, fmt.Sprintf("dd if=%s of=/tmp/file2 bs=64 count=1", path))
// file1 == file2 (check contents)
e2evolume.VerifyExecInPodSucceed(f, pod, "diff /tmp/file1 /tmp/file2")
e2epod.VerifyExecInPodSucceed(ctx, f, pod, "diff /tmp/file1 /tmp/file2")
// Clean up temp files
e2evolume.VerifyExecInPodSucceed(f, pod, "rm -f /tmp/file1 /tmp/file2")
e2epod.VerifyExecInPodSucceed(ctx, f, pod, "rm -f /tmp/file1 /tmp/file2")
// Check that writing file to block volume fails
e2evolume.VerifyExecInPodFail(f, pod, fmt.Sprintf("echo 'Hello world.' > %s/file1.txt", path), 1)
e2epod.VerifyExecInPodFail(ctx, f, pod, fmt.Sprintf("echo 'Hello world.' > %s/file1.txt", path), 1)
} else {
// text -> file1 (write to file)
e2evolume.VerifyExecInPodSucceed(f, pod, fmt.Sprintf("echo 'Hello world.' > %s/file1.txt", path))
e2epod.VerifyExecInPodSucceed(ctx, f, pod, fmt.Sprintf("echo 'Hello world.' > %s/file1.txt", path))
// grep file1 (read from file and check contents)
e2evolume.VerifyExecInPodSucceed(f, pod, readFile("Hello word.", path))
e2epod.VerifyExecInPodSucceed(ctx, f, pod, readFile("Hello word.", path))
// Check that writing to directory as block volume fails
e2evolume.VerifyExecInPodFail(f, pod, fmt.Sprintf("dd if=/dev/urandom of=%s bs=64 count=1", path), 1)
e2epod.VerifyExecInPodFail(ctx, f, pod, fmt.Sprintf("dd if=/dev/urandom of=%s bs=64 count=1", path), 1)
}
}
@ -497,7 +497,7 @@ func genBinDataFromSeed(len int, seed int64) []byte {
// directIO to function correctly, is to read whole sector(s) for Block-mode
// PVCs (normally a sector is 512 bytes), or memory pages for files (commonly
// 4096 bytes).
func CheckReadFromPath(f *framework.Framework, pod *v1.Pod, volMode v1.PersistentVolumeMode, directIO bool, path string, len int, seed int64) {
func CheckReadFromPath(ctx context.Context, f *framework.Framework, pod *v1.Pod, volMode v1.PersistentVolumeMode, directIO bool, path string, len int, seed int64) {
var pathForVolMode string
var iflag string
@ -513,8 +513,8 @@ func CheckReadFromPath(f *framework.Framework, pod *v1.Pod, volMode v1.Persisten
sum := sha256.Sum256(genBinDataFromSeed(len, seed))
e2evolume.VerifyExecInPodSucceed(f, pod, fmt.Sprintf("dd if=%s %s bs=%d count=1 | sha256sum", pathForVolMode, iflag, len))
e2evolume.VerifyExecInPodSucceed(f, pod, fmt.Sprintf("dd if=%s %s bs=%d count=1 | sha256sum | grep -Fq %x", pathForVolMode, iflag, len, sum))
e2epod.VerifyExecInPodSucceed(ctx, f, pod, fmt.Sprintf("dd if=%s %s bs=%d count=1 | sha256sum", pathForVolMode, iflag, len))
e2epod.VerifyExecInPodSucceed(ctx, f, pod, fmt.Sprintf("dd if=%s %s bs=%d count=1 | sha256sum | grep -Fq %x", pathForVolMode, iflag, len, sum))
}
// CheckWriteToPath that file can be properly written.
@ -522,7 +522,7 @@ func CheckReadFromPath(f *framework.Framework, pod *v1.Pod, volMode v1.Persisten
// Note: nocache does not work with (default) BusyBox Pods. To read without
// caching, enable directIO with CheckReadFromPath and check the hints about
// the len requirements.
func CheckWriteToPath(f *framework.Framework, pod *v1.Pod, volMode v1.PersistentVolumeMode, nocache bool, path string, len int, seed int64) {
func CheckWriteToPath(ctx context.Context, f *framework.Framework, pod *v1.Pod, volMode v1.PersistentVolumeMode, nocache bool, path string, len int, seed int64) {
var pathForVolMode string
var oflag string
@ -538,13 +538,13 @@ func CheckWriteToPath(f *framework.Framework, pod *v1.Pod, volMode v1.Persistent
encoded := base64.StdEncoding.EncodeToString(genBinDataFromSeed(len, seed))
e2evolume.VerifyExecInPodSucceed(f, pod, fmt.Sprintf("echo %s | base64 -d | sha256sum", encoded))
e2evolume.VerifyExecInPodSucceed(f, pod, fmt.Sprintf("echo %s | base64 -d | dd of=%s %s bs=%d count=1", encoded, pathForVolMode, oflag, len))
e2epod.VerifyExecInPodSucceed(ctx, f, pod, fmt.Sprintf("echo %s | base64 -d | sha256sum", encoded))
e2epod.VerifyExecInPodSucceed(ctx, f, pod, fmt.Sprintf("echo %s | base64 -d | dd of=%s %s bs=%d count=1", encoded, pathForVolMode, oflag, len))
}
// GetSectorSize returns the sector size of the device.
func GetSectorSize(f *framework.Framework, pod *v1.Pod, device string) int {
stdout, _, err := e2evolume.PodExec(f, pod, fmt.Sprintf("blockdev --getss %s", device))
func GetSectorSize(ctx context.Context, f *framework.Framework, pod *v1.Pod, device string) int {
stdout, _, err := e2epod.ExecShellInPodWithFullOutput(ctx, f, pod.Name, fmt.Sprintf("blockdev --getss %s", device))
framework.ExpectNoError(err, "Failed to get sector size of %s", device)
ss, err := strconv.Atoi(stdout)
framework.ExpectNoError(err, "Sector size returned by blockdev command isn't integer value.")
@ -723,9 +723,9 @@ func WaitForGVRFinalizer(ctx context.Context, c dynamic.Interface, gvr schema.Gr
}
// VerifyFilePathGidInPod verfies expected GID of the target filepath
func VerifyFilePathGidInPod(f *framework.Framework, filePath, expectedGid string, pod *v1.Pod) {
func VerifyFilePathGidInPod(ctx context.Context, f *framework.Framework, filePath, expectedGid string, pod *v1.Pod) {
cmd := fmt.Sprintf("ls -l %s", filePath)
stdout, stderr, err := e2evolume.PodExec(f, pod, cmd)
stdout, stderr, err := e2epod.ExecShellInPodWithFullOutput(ctx, f, pod.Name, cmd)
framework.ExpectNoError(err)
framework.Logf("pod %s/%s exec for cmd %s, stdout: %s, stderr: %s", pod.Namespace, pod.Name, cmd, stdout, stderr)
ll := strings.Fields(stdout)
@ -734,11 +734,11 @@ func VerifyFilePathGidInPod(f *framework.Framework, filePath, expectedGid string
}
// ChangeFilePathGidInPod changes the GID of the target filepath.
func ChangeFilePathGidInPod(f *framework.Framework, filePath, targetGid string, pod *v1.Pod) {
func ChangeFilePathGidInPod(ctx context.Context, f *framework.Framework, filePath, targetGid string, pod *v1.Pod) {
cmd := fmt.Sprintf("chgrp %s %s", targetGid, filePath)
_, _, err := e2evolume.PodExec(f, pod, cmd)
_, _, err := e2epod.ExecShellInPodWithFullOutput(ctx, f, pod.Name, cmd)
framework.ExpectNoError(err)
VerifyFilePathGidInPod(f, filePath, targetGid, pod)
VerifyFilePathGidInPod(ctx, f, filePath, targetGid, pod)
}
// DeleteStorageClass deletes the passed in StorageClass and catches errors other than "Not Found"

View File

@ -105,10 +105,10 @@ func (t *VolumeModeDowngradeTest) Setup(ctx context.Context, f *framework.Framew
framework.ExpectNoError(err)
ginkgo.By("Checking if PV exists as expected volume mode")
e2evolume.CheckVolumeModeOfPath(f, t.pod, block, devicePath)
e2evolume.CheckVolumeModeOfPath(ctx, f, t.pod, block, devicePath)
ginkgo.By("Checking if read/write to PV works properly")
storageutils.CheckReadWriteToPath(f, t.pod, block, devicePath)
storageutils.CheckReadWriteToPath(ctx, f, t.pod, block, devicePath)
}
// Test waits for the downgrade to complete, and then verifies that a pod can no
@ -118,7 +118,7 @@ func (t *VolumeModeDowngradeTest) Test(ctx context.Context, f *framework.Framewo
<-done
ginkgo.By("Verifying that nothing exists at the device path in the pod")
e2evolume.VerifyExecInPodFail(f, t.pod, fmt.Sprintf("test -e %s", devicePath), 1)
e2epod.VerifyExecInPodFail(ctx, f, t.pod, fmt.Sprintf("test -e %s", devicePath), 1)
}
// Teardown cleans up any remaining resources.

View File

@ -231,7 +231,7 @@ var _ = SIGDescribe("MirrorPod", func() {
}
ginkgo.By("Stopping the NFS server")
e2evolume.StopNFSServer(f, nfsServerPod)
e2evolume.StopNFSServer(ctx, f, nfsServerPod)
ginkgo.By(fmt.Sprintf("Deleting the static nfs test pod: %s", staticPodName))
err = deleteStaticPod(podPath, staticPodName, ns)
@ -243,7 +243,7 @@ var _ = SIGDescribe("MirrorPod", func() {
}, 5*time.Minute, 10*time.Second).Should(gomega.BeTrueBecause("pod volume should exist while nfs server is stopped"))
ginkgo.By("Start the NFS server")
e2evolume.RestartNFSServer(f, nfsServerPod)
e2evolume.RestartNFSServer(ctx, f, nfsServerPod)
ginkgo.By("Waiting for the pod volume to deleted after the NFS server is started")
gomega.Eventually(func() bool {