mirror of
https://github.com/k3s-io/kubernetes.git
synced 2025-07-29 06:27:05 +00:00
Merge pull request #96330 from jingxu97/nov/snapshot
Modify storage snapshottable test for Windows
This commit is contained in:
commit
2e55fa82df
@ -466,7 +466,7 @@ func testVolumeContent(f *framework.Framework, pod *v1.Pod, fsGroup *int64, fsTy
|
|||||||
} else {
|
} else {
|
||||||
// Filesystem: check content
|
// Filesystem: check content
|
||||||
fileName := fmt.Sprintf("/opt/%d/%s", i, test.File)
|
fileName := fmt.Sprintf("/opt/%d/%s", i, test.File)
|
||||||
commands := generateReadFileCmd(fileName)
|
commands := GenerateReadFileCmd(fileName)
|
||||||
_, err := framework.LookForStringInPodExec(pod.Namespace, pod.Name, commands, test.ExpectedContent, time.Minute)
|
_, err := framework.LookForStringInPodExec(pod.Namespace, pod.Name, commands, test.ExpectedContent, time.Minute)
|
||||||
framework.ExpectNoError(err, "failed: finding the contents of the mounted file %s.", fileName)
|
framework.ExpectNoError(err, "failed: finding the contents of the mounted file %s.", fileName)
|
||||||
|
|
||||||
@ -608,9 +608,9 @@ func generateWriteBlockCmd(content, fullPath string) []string {
|
|||||||
return generateWriteCmd(content, fullPath)
|
return generateWriteCmd(content, fullPath)
|
||||||
}
|
}
|
||||||
|
|
||||||
// generateReadFileCmd generates the corresponding command lines to read from a file with the given file path.
|
// GenerateReadFileCmd generates the corresponding command lines to read from a file with the given file path.
|
||||||
// Depending on the Node OS is Windows or linux, the command will use powershell or /bin/sh
|
// Depending on the Node OS is Windows or linux, the command will use powershell or /bin/sh
|
||||||
func generateReadFileCmd(fullPath string) []string {
|
func GenerateReadFileCmd(fullPath string) []string {
|
||||||
var commands []string
|
var commands []string
|
||||||
if !framework.NodeOSDistroIs("windows") {
|
if !framework.NodeOSDistroIs("windows") {
|
||||||
commands = []string{"cat", fullPath}
|
commands = []string{"cat", fullPath}
|
||||||
|
@ -41,7 +41,7 @@ func InitDisruptiveTestSuite() TestSuite {
|
|||||||
return &disruptiveTestSuite{
|
return &disruptiveTestSuite{
|
||||||
tsInfo: TestSuiteInfo{
|
tsInfo: TestSuiteInfo{
|
||||||
Name: "disruptive",
|
Name: "disruptive",
|
||||||
FeatureTag: "[Disruptive]",
|
FeatureTag: "[Disruptive][LinuxOnly]",
|
||||||
TestPatterns: []testpatterns.TestPattern{
|
TestPatterns: []testpatterns.TestPattern{
|
||||||
// FSVolMode is already covered in subpath testsuite
|
// FSVolMode is already covered in subpath testsuite
|
||||||
testpatterns.DefaultFsInlineVolume,
|
testpatterns.DefaultFsInlineVolume,
|
||||||
@ -166,6 +166,7 @@ func (s *disruptiveTestSuite) DefineTests(driver TestDriver, pattern testpattern
|
|||||||
InlineVolumeSources: inlineSources,
|
InlineVolumeSources: inlineSources,
|
||||||
SeLinuxLabel: e2epv.SELinuxLabel,
|
SeLinuxLabel: e2epv.SELinuxLabel,
|
||||||
NodeSelection: l.config.ClientNodeSelection,
|
NodeSelection: l.config.ClientNodeSelection,
|
||||||
|
ImageID: getTestImage(),
|
||||||
}
|
}
|
||||||
l.pod, err = e2epod.CreateSecPodWithNodeSelection(l.cs, &podConfig, framework.PodStartTimeout)
|
l.pod, err = e2epod.CreateSecPodWithNodeSelection(l.cs, &podConfig, framework.PodStartTimeout)
|
||||||
framework.ExpectNoError(err, "While creating pods for kubelet restart test")
|
framework.ExpectNoError(err, "While creating pods for kubelet restart test")
|
||||||
|
@ -48,6 +48,9 @@ const snapshotGroup = "snapshot.storage.k8s.io"
|
|||||||
// snapshot CRD api version
|
// snapshot CRD api version
|
||||||
const snapshotAPIVersion = "snapshot.storage.k8s.io/v1beta1"
|
const snapshotAPIVersion = "snapshot.storage.k8s.io/v1beta1"
|
||||||
|
|
||||||
|
// data file name
|
||||||
|
const datapath = "/mnt/test/data"
|
||||||
|
|
||||||
var (
|
var (
|
||||||
// SnapshotGVR is GroupVersionResource for volumesnapshots
|
// SnapshotGVR is GroupVersionResource for volumesnapshots
|
||||||
SnapshotGVR = schema.GroupVersionResource{Group: snapshotGroup, Version: "v1beta1", Resource: "volumesnapshots"}
|
SnapshotGVR = schema.GroupVersionResource{Group: snapshotGroup, Version: "v1beta1", Resource: "volumesnapshots"}
|
||||||
@ -151,13 +154,12 @@ func (s *snapshottableTestSuite) DefineTests(driver TestDriver, pattern testpatt
|
|||||||
|
|
||||||
ginkgo.By("starting a pod to use the claim")
|
ginkgo.By("starting a pod to use the claim")
|
||||||
originalMntTestData = fmt.Sprintf("hello from %s namespace", pvc.GetNamespace())
|
originalMntTestData = fmt.Sprintf("hello from %s namespace", pvc.GetNamespace())
|
||||||
command := fmt.Sprintf("echo '%s' > /mnt/test/data", originalMntTestData)
|
command := fmt.Sprintf("echo '%s' > %s", originalMntTestData, datapath)
|
||||||
|
|
||||||
RunInPodWithVolume(cs, pvc.Namespace, pvc.Name, "pvc-snapshottable-tester", command, config.ClientNodeSelection)
|
RunInPodWithVolume(cs, pvc.Namespace, pvc.Name, "pvc-snapshottable-tester", command, config.ClientNodeSelection)
|
||||||
|
|
||||||
err = e2epv.WaitForPersistentVolumeClaimPhase(v1.ClaimBound, cs, pvc.Namespace, pvc.Name, framework.Poll, framework.ClaimProvisionTimeout)
|
err = e2epv.WaitForPersistentVolumeClaimPhase(v1.ClaimBound, cs, pvc.Namespace, pvc.Name, framework.Poll, framework.ClaimProvisionTimeout)
|
||||||
framework.ExpectNoError(err)
|
framework.ExpectNoError(err)
|
||||||
|
|
||||||
ginkgo.By("checking the claim")
|
ginkgo.By("checking the claim")
|
||||||
// Get new copy of the claim
|
// Get new copy of the claim
|
||||||
pvc, err = cs.CoreV1().PersistentVolumeClaims(pvc.Namespace).Get(context.TODO(), pvc.Name, metav1.GetOptions{})
|
pvc, err = cs.CoreV1().PersistentVolumeClaims(pvc.Namespace).Get(context.TODO(), pvc.Name, metav1.GetOptions{})
|
||||||
@ -239,7 +241,7 @@ func (s *snapshottableTestSuite) DefineTests(driver TestDriver, pattern testpatt
|
|||||||
|
|
||||||
ginkgo.By("modifying the data in the source PVC")
|
ginkgo.By("modifying the data in the source PVC")
|
||||||
|
|
||||||
command := fmt.Sprintf("echo '%s' > /mnt/test/data", modifiedMntTestData)
|
command := fmt.Sprintf("echo '%s' > %s", modifiedMntTestData, datapath)
|
||||||
RunInPodWithVolume(cs, pvc.Namespace, pvc.Name, "pvc-snapshottable-data-tester", command, config.ClientNodeSelection)
|
RunInPodWithVolume(cs, pvc.Namespace, pvc.Name, "pvc-snapshottable-data-tester", command, config.ClientNodeSelection)
|
||||||
|
|
||||||
ginkgo.By("creating a pvc from the snapshot")
|
ginkgo.By("creating a pvc from the snapshot")
|
||||||
@ -274,11 +276,9 @@ func (s *snapshottableTestSuite) DefineTests(driver TestDriver, pattern testpatt
|
|||||||
StopPod(cs, restoredPod)
|
StopPod(cs, restoredPod)
|
||||||
})
|
})
|
||||||
framework.ExpectNoError(e2epod.WaitForPodRunningInNamespaceSlow(cs, restoredPod.Name, restoredPod.Namespace))
|
framework.ExpectNoError(e2epod.WaitForPodRunningInNamespaceSlow(cs, restoredPod.Name, restoredPod.Namespace))
|
||||||
|
commands := e2evolume.GenerateReadFileCmd(datapath)
|
||||||
command = "cat /mnt/test/data"
|
_, err = framework.LookForStringInPodExec(restoredPod.Namespace, restoredPod.Name, commands, originalMntTestData, time.Minute)
|
||||||
actualData, stderr, err := utils.PodExec(f, restoredPod, command)
|
framework.ExpectNoError(err)
|
||||||
framework.ExpectNoError(err, "command %q: stdout: %s\nstderr: %s", command, actualData, stderr)
|
|
||||||
framework.ExpectEqual(actualData, originalMntTestData)
|
|
||||||
|
|
||||||
ginkgo.By("should delete the VolumeSnapshotContent according to its deletion policy")
|
ginkgo.By("should delete the VolumeSnapshotContent according to its deletion policy")
|
||||||
err = DeleteAndWaitSnapshot(dc, vs.GetNamespace(), vs.GetName(), framework.Poll, framework.SnapshotDeleteTimeout)
|
err = DeleteAndWaitSnapshot(dc, vs.GetNamespace(), vs.GetName(), framework.Poll, framework.SnapshotDeleteTimeout)
|
||||||
|
Loading…
Reference in New Issue
Block a user