mirror of
https://github.com/k3s-io/kubernetes.git
synced 2025-07-30 06:54:01 +00:00
storage e2e: refactor snapshottable
During PR review it was pointed out that the branches for ephemeral vs. persistent make the test harder to read. Therefore all code that depends on if checks gets moved into two different versions of the test, one hat runs for ephemeral volumes and one for persistent volumes, with skip statements at the beginning.
This commit is contained in:
parent
5462d97e62
commit
c0bdf14942
@ -29,7 +29,6 @@ import (
|
||||
storagev1 "k8s.io/api/storage/v1"
|
||||
apierrors "k8s.io/apimachinery/pkg/api/errors"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/apis/meta/v1/unstructured"
|
||||
"k8s.io/client-go/dynamic"
|
||||
clientset "k8s.io/client-go/kubernetes"
|
||||
"k8s.io/component-helpers/storage/ephemeral"
|
||||
@ -119,6 +118,8 @@ func (s *snapshottableTestSuite) DefineTests(driver storageframework.TestDriver,
|
||||
dc dynamic.Interface
|
||||
pvc *v1.PersistentVolumeClaim
|
||||
sc *storagev1.StorageClass
|
||||
volumeResource *storageframework.VolumeResource
|
||||
pod *v1.Pod
|
||||
claimSize string
|
||||
originalMntTestData string
|
||||
)
|
||||
@ -134,110 +135,21 @@ func (s *snapshottableTestSuite) DefineTests(driver storageframework.TestDriver,
|
||||
config, driverCleanup = driver.PrepareTest(f)
|
||||
cleanupSteps = append(cleanupSteps, driverCleanup)
|
||||
|
||||
var volumeResource *storageframework.VolumeResource
|
||||
cleanupSteps = append(cleanupSteps, func() {
|
||||
framework.ExpectNoError(volumeResource.CleanupResource())
|
||||
})
|
||||
volumeResource = storageframework.CreateVolumeResource(dDriver, config, pattern, s.GetTestSuiteInfo().SupportedSizeRange)
|
||||
|
||||
pvcName := ""
|
||||
pvcNamespace := f.Namespace.Name
|
||||
|
||||
ginkgo.By("[init] starting a pod to use the claim")
|
||||
originalMntTestData = fmt.Sprintf("hello from %s namespace", pvcNamespace)
|
||||
originalMntTestData = fmt.Sprintf("hello from %s namespace", f.Namespace.Name)
|
||||
command := fmt.Sprintf("echo '%s' > %s", originalMntTestData, datapath)
|
||||
|
||||
pod := StartInPodWithVolumeSource(cs, *volumeResource.VolSource, f.Namespace.Name, "pvc-snapshottable-tester", command, config.ClientNodeSelection)
|
||||
if pattern.VolType == storageframework.GenericEphemeralVolume {
|
||||
// We can test snapshotting of generic ephemeral volumes. It's just a bit more complicated:
|
||||
// - we need to start the pod
|
||||
// - wait for the pod to stop
|
||||
// - don't delete the pod because once it is marked for deletion,
|
||||
// the PVC also gets marked and snapshotting no longer works
|
||||
// (even when a finalizer prevents actual removal of the PVC)
|
||||
// - skip data validation because data flushing wasn't guaranteed
|
||||
// (see comments below)
|
||||
cleanupSteps = append(cleanupSteps, func() {
|
||||
e2epod.DeletePodWithWait(cs, pod)
|
||||
})
|
||||
} else {
|
||||
defer e2epod.DeletePodWithWait(cs, pod)
|
||||
}
|
||||
framework.ExpectNoError(e2epod.WaitForPodSuccessInNamespaceTimeout(cs, pod.Name, pod.Namespace, f.Timeouts.PodStartSlow))
|
||||
pod, err = cs.CoreV1().Pods(pod.Namespace).Get(context.TODO(), pod.Name, metav1.GetOptions{})
|
||||
framework.ExpectNoError(err, "check pod after it terminated")
|
||||
|
||||
// Get new copy of the claim
|
||||
ginkgo.By("[init] checking the claim")
|
||||
if pattern.VolType == storageframework.GenericEphemeralVolume {
|
||||
pvcName = ephemeral.VolumeClaimName(pod, &pod.Spec.Volumes[0])
|
||||
} else {
|
||||
pvcName = volumeResource.Pvc.Name
|
||||
}
|
||||
|
||||
err = e2epv.WaitForPersistentVolumeClaimPhase(v1.ClaimBound, cs, pvcNamespace, pvcName, framework.Poll, f.Timeouts.ClaimProvision)
|
||||
framework.ExpectNoError(err)
|
||||
|
||||
pvc, err = cs.CoreV1().PersistentVolumeClaims(pvcNamespace).Get(context.TODO(), pvcName, metav1.GetOptions{})
|
||||
framework.ExpectNoError(err, "get PVC")
|
||||
claimSize = pvc.Spec.Resources.Requests.Storage().String()
|
||||
sc = volumeResource.Sc
|
||||
|
||||
// Get the bound PV
|
||||
ginkgo.By("[init] checking the PV")
|
||||
pv, err := cs.CoreV1().PersistentVolumes().Get(context.TODO(), pvc.Spec.VolumeName, metav1.GetOptions{})
|
||||
framework.ExpectNoError(err)
|
||||
|
||||
if pattern.VolType == storageframework.GenericEphemeralVolume {
|
||||
return
|
||||
}
|
||||
|
||||
ginkgo.By("[init] deleting the pod")
|
||||
StopPod(cs, pod)
|
||||
|
||||
// At this point we know that:
|
||||
// - a pod was created with a PV that's supposed to have data
|
||||
//
|
||||
// However there's a caching issue that @jinxu97 explained and it's related with the pod & volume
|
||||
// lifecycle, to understand it we first analyze what the volumemanager does:
|
||||
// - when a pod is delete the volumemanager will try to cleanup the volume mounts
|
||||
// - NodeUnpublishVolume: unbinds the bind mount from the container
|
||||
// - Linux: the bind mount is removed, which does not flush any cache
|
||||
// - Windows: we delete a symlink, data's not flushed yet to disk
|
||||
// - NodeUnstageVolume: unmount the global mount
|
||||
// - Linux: disk is unmounted and all caches flushed.
|
||||
// - Windows: data is flushed to disk and the disk is detached
|
||||
//
|
||||
// Pod deletion might not guarantee a data flush to disk, however NodeUnstageVolume adds the logic
|
||||
// to flush the data to disk (see #81690 for details). We need to wait for NodeUnstageVolume, as
|
||||
// NodeUnpublishVolume only removes the bind mount, which doesn't force the caches to flush.
|
||||
// It's possible to create empty snapshots if we don't wait (see #101279 for details).
|
||||
//
|
||||
// In the following code by checking if the PV is not in the node.Status.VolumesInUse field we
|
||||
// ensure that the volume is not used by the node anymore (an indicator that NodeUnstageVolume has
|
||||
// already finished)
|
||||
nodeName := pod.Spec.NodeName
|
||||
gomega.Expect(nodeName).NotTo(gomega.BeEmpty(), "pod.Spec.NodeName must not be empty")
|
||||
|
||||
// Snapshot tests are only executed for CSI drivers. When CSI drivers
|
||||
// are attached to the node they use VolumeHandle instead of the pv.Name.
|
||||
volumeName := pv.Spec.PersistentVolumeSource.CSI.VolumeHandle
|
||||
|
||||
ginkgo.By(fmt.Sprintf("[init] waiting until the node=%s is not using the volume=%s", nodeName, volumeName))
|
||||
success := storageutils.WaitUntil(framework.Poll, f.Timeouts.PVDelete, func() bool {
|
||||
node, err := cs.CoreV1().Nodes().Get(context.TODO(), nodeName, metav1.GetOptions{})
|
||||
framework.ExpectNoError(err)
|
||||
volumesInUse := node.Status.VolumesInUse
|
||||
framework.Logf("current volumes in use: %+v", volumesInUse)
|
||||
for i := 0; i < len(volumesInUse); i++ {
|
||||
if strings.HasSuffix(string(volumesInUse[i]), volumeName) {
|
||||
return false
|
||||
}
|
||||
}
|
||||
return true
|
||||
pod = StartInPodWithVolumeSource(cs, *volumeResource.VolSource, f.Namespace.Name, "pvc-snapshottable-tester", command, config.ClientNodeSelection)
|
||||
cleanupSteps = append(cleanupSteps, func() {
|
||||
e2epod.DeletePodWithWait(cs, pod)
|
||||
})
|
||||
framework.ExpectEqual(success, true)
|
||||
|
||||
// At this point a pod is running with a PVC. How to proceed depends on which test is running.
|
||||
}
|
||||
|
||||
cleanup := func() {
|
||||
@ -255,32 +167,57 @@ func (s *snapshottableTestSuite) DefineTests(driver storageframework.TestDriver,
|
||||
}
|
||||
|
||||
}
|
||||
ginkgo.BeforeEach(func() {
|
||||
init()
|
||||
})
|
||||
ginkgo.AfterEach(func() {
|
||||
cleanup()
|
||||
})
|
||||
|
||||
ginkgo.Context("", func() {
|
||||
var (
|
||||
vs *unstructured.Unstructured
|
||||
vscontent *unstructured.Unstructured
|
||||
vsc *unstructured.Unstructured
|
||||
)
|
||||
ginkgo.It("should check snapshot fields, check restore correctly works, check deletion (ephemeral)", func() {
|
||||
if pattern.VolType != storageframework.GenericEphemeralVolume {
|
||||
e2eskipper.Skipf("volume type %q is not ephemeral", pattern.VolType)
|
||||
}
|
||||
init()
|
||||
|
||||
ginkgo.BeforeEach(func() {
|
||||
var sr *storageframework.SnapshotResource
|
||||
// We can test snapshotting of generic
|
||||
// ephemeral volumes by creating the snapshot
|
||||
// while the pod is running (online). We cannot do it after pod deletion,
|
||||
// because then the PVC also gets marked and snapshotting no longer works
|
||||
// (even when a finalizer prevents actual removal of the PVC).
|
||||
//
|
||||
// Because data consistency cannot be
|
||||
// guaranteed, this flavor of the test doesn't
|
||||
// check the content of the snapshot.
|
||||
|
||||
framework.ExpectNoError(e2epod.WaitForPodSuccessInNamespaceTimeout(cs, pod.Name, pod.Namespace, f.Timeouts.PodStartSlow))
|
||||
pod, err = cs.CoreV1().Pods(pod.Namespace).Get(context.TODO(), pod.Name, metav1.GetOptions{})
|
||||
framework.ExpectNoError(err, "check pod after it terminated")
|
||||
|
||||
// Get new copy of the claim
|
||||
ginkgo.By("[init] checking the claim")
|
||||
pvcName := ephemeral.VolumeClaimName(pod, &pod.Spec.Volumes[0])
|
||||
pvcNamespace := pod.Namespace
|
||||
|
||||
parameters := map[string]string{}
|
||||
sr := storageframework.CreateSnapshotResource(sDriver, config, pattern, pvcName, pvcNamespace, f.Timeouts, parameters)
|
||||
cleanupSteps = append(cleanupSteps, func() {
|
||||
framework.ExpectNoError(sr.CleanupResource(f.Timeouts))
|
||||
})
|
||||
parameters := map[string]string{}
|
||||
sr = storageframework.CreateSnapshotResource(sDriver, config, pattern, pvc.GetName(), pvc.GetNamespace(), f.Timeouts, parameters)
|
||||
vs = sr.Vs
|
||||
vscontent = sr.Vscontent
|
||||
vsc = sr.Vsclass
|
||||
})
|
||||
ginkgo.It("should check snapshot fields, check restore correctly works after modifying source data, check deletion", func() {
|
||||
vs := sr.Vs
|
||||
vsc := sr.Vsclass
|
||||
|
||||
err = e2epv.WaitForPersistentVolumeClaimPhase(v1.ClaimBound, cs, pvcNamespace, pvcName, framework.Poll, f.Timeouts.ClaimProvision)
|
||||
framework.ExpectNoError(err)
|
||||
|
||||
pvc, err = cs.CoreV1().PersistentVolumeClaims(pvcNamespace).Get(context.TODO(), pvcName, metav1.GetOptions{})
|
||||
framework.ExpectNoError(err, "get PVC")
|
||||
claimSize = pvc.Spec.Resources.Requests.Storage().String()
|
||||
sc = volumeResource.Sc
|
||||
|
||||
// Get the bound PV
|
||||
ginkgo.By("[init] checking the PV")
|
||||
_, err := cs.CoreV1().PersistentVolumes().Get(context.TODO(), pvc.Spec.VolumeName, metav1.GetOptions{})
|
||||
framework.ExpectNoError(err)
|
||||
|
||||
// Get new copy of the snapshot
|
||||
ginkgo.By("checking the snapshot")
|
||||
vs, err = dc.Resource(storageutils.SnapshotGVR).Namespace(vs.GetNamespace()).Get(context.TODO(), vs.GetName(), metav1.GetOptions{})
|
||||
@ -289,7 +226,7 @@ func (s *snapshottableTestSuite) DefineTests(driver storageframework.TestDriver,
|
||||
// Get the bound snapshotContent
|
||||
snapshotStatus := vs.Object["status"].(map[string]interface{})
|
||||
snapshotContentName := snapshotStatus["boundVolumeSnapshotContentName"].(string)
|
||||
vscontent, err = dc.Resource(storageutils.SnapshotContentGVR).Get(context.TODO(), snapshotContentName, metav1.GetOptions{})
|
||||
vscontent, err := dc.Resource(storageutils.SnapshotContentGVR).Get(context.TODO(), snapshotContentName, metav1.GetOptions{})
|
||||
framework.ExpectNoError(err)
|
||||
|
||||
snapshotContentSpec := vscontent.Object["spec"].(map[string]interface{})
|
||||
@ -307,16 +244,6 @@ func (s *snapshottableTestSuite) DefineTests(driver storageframework.TestDriver,
|
||||
framework.ExpectEqual(volumeSnapshotRef["name"], vs.GetName())
|
||||
framework.ExpectEqual(volumeSnapshotRef["namespace"], vs.GetNamespace())
|
||||
|
||||
if pattern.VolType != storageframework.GenericEphemeralVolume {
|
||||
ginkgo.By("Modifying source data test")
|
||||
modifiedMntTestData := fmt.Sprintf("modified data from %s namespace", pvc.GetNamespace())
|
||||
|
||||
ginkgo.By("modifying the data in the source PVC")
|
||||
|
||||
command := fmt.Sprintf("echo '%s' > %s", modifiedMntTestData, datapath)
|
||||
RunInPodWithVolume(cs, f.Timeouts, pvc.Namespace, pvc.Name, "pvc-snapshottable-data-tester", command, config.ClientNodeSelection)
|
||||
}
|
||||
|
||||
ginkgo.By("creating a pvc from the snapshot")
|
||||
restoredPVC = e2epv.MakePersistentVolumeClaim(e2epv.PersistentVolumeClaimConfig{
|
||||
ClaimSize: claimSize,
|
||||
@ -331,33 +258,13 @@ func (s *snapshottableTestSuite) DefineTests(driver storageframework.TestDriver,
|
||||
Name: vs.GetName(),
|
||||
}
|
||||
|
||||
if pattern.VolType != storageframework.GenericEphemeralVolume {
|
||||
restoredPVC, err = cs.CoreV1().PersistentVolumeClaims(restoredPVC.Namespace).Create(context.TODO(), restoredPVC, metav1.CreateOptions{})
|
||||
framework.ExpectNoError(err)
|
||||
cleanupSteps = append(cleanupSteps, func() {
|
||||
framework.Logf("deleting claim %q/%q", restoredPVC.Namespace, restoredPVC.Name)
|
||||
// typically this claim has already been deleted
|
||||
err = cs.CoreV1().PersistentVolumeClaims(restoredPVC.Namespace).Delete(context.TODO(), restoredPVC.Name, metav1.DeleteOptions{})
|
||||
if err != nil && !apierrors.IsNotFound(err) {
|
||||
framework.Failf("Error deleting claim %q. Error: %v", restoredPVC.Name, err)
|
||||
}
|
||||
})
|
||||
}
|
||||
|
||||
ginkgo.By("starting a pod to use the snapshot")
|
||||
volSrc := v1.VolumeSource{
|
||||
PersistentVolumeClaim: &v1.PersistentVolumeClaimVolumeSource{
|
||||
ClaimName: restoredPVC.Name,
|
||||
},
|
||||
}
|
||||
if pattern.VolType == storageframework.GenericEphemeralVolume {
|
||||
volSrc = v1.VolumeSource{
|
||||
Ephemeral: &v1.EphemeralVolumeSource{
|
||||
VolumeClaimTemplate: &v1.PersistentVolumeClaimTemplate{
|
||||
Spec: restoredPVC.Spec,
|
||||
},
|
||||
Ephemeral: &v1.EphemeralVolumeSource{
|
||||
VolumeClaimTemplate: &v1.PersistentVolumeClaimTemplate{
|
||||
Spec: restoredPVC.Spec,
|
||||
},
|
||||
}
|
||||
},
|
||||
}
|
||||
|
||||
restoredPod = StartInPodWithVolumeSource(cs, volSrc, restoredPVC.Namespace, "restored-pvc-tester", "sleep 300", config.ClientNodeSelection)
|
||||
@ -378,15 +285,192 @@ func (s *snapshottableTestSuite) DefineTests(driver storageframework.TestDriver,
|
||||
// Snapshot deletion and some are opposite.
|
||||
err = storageutils.DeleteSnapshotWithoutWaiting(dc, vs.GetNamespace(), vs.GetName())
|
||||
framework.ExpectNoError(err)
|
||||
if pattern.VolType != storageframework.GenericEphemeralVolume {
|
||||
framework.Logf("deleting restored pod %q/%q", restoredPod.Namespace, restoredPod.Name)
|
||||
err = cs.CoreV1().Pods(restoredPod.Namespace).Delete(context.TODO(), restoredPod.Name, metav1.DeleteOptions{})
|
||||
|
||||
// Wait for the Snapshot to be actually deleted from API server
|
||||
err = storageutils.WaitForNamespacedGVRDeletion(dc, storageutils.SnapshotGVR, vs.GetNamespace(), vs.GetNamespace(), framework.Poll, f.Timeouts.SnapshotDelete)
|
||||
framework.ExpectNoError(err)
|
||||
|
||||
switch pattern.SnapshotDeletionPolicy {
|
||||
case storageframework.DeleteSnapshot:
|
||||
ginkgo.By("checking the SnapshotContent has been deleted")
|
||||
err = utils.WaitForGVRDeletion(dc, storageutils.SnapshotContentGVR, vscontent.GetName(), framework.Poll, f.Timeouts.SnapshotDelete)
|
||||
framework.ExpectNoError(err)
|
||||
framework.Logf("deleting restored PVC %q/%q", restoredPVC.Namespace, restoredPVC.Name)
|
||||
case storageframework.RetainSnapshot:
|
||||
ginkgo.By("checking the SnapshotContent has not been deleted")
|
||||
err = utils.WaitForGVRDeletion(dc, storageutils.SnapshotContentGVR, vscontent.GetName(), 1*time.Second /* poll */, 30*time.Second /* timeout */)
|
||||
framework.ExpectError(err)
|
||||
}
|
||||
})
|
||||
|
||||
ginkgo.It("should check snapshot fields, check restore correctly works after modifying source data, check deletion (persistent)", func() {
|
||||
if pattern.VolType == storageframework.GenericEphemeralVolume {
|
||||
e2eskipper.Skipf("volume type %q is ephemeral", pattern.VolType)
|
||||
}
|
||||
init()
|
||||
|
||||
framework.ExpectNoError(e2epod.WaitForPodSuccessInNamespaceTimeout(cs, pod.Name, pod.Namespace, f.Timeouts.PodStartSlow))
|
||||
pod, err = cs.CoreV1().Pods(pod.Namespace).Get(context.TODO(), pod.Name, metav1.GetOptions{})
|
||||
framework.ExpectNoError(err, "check pod after it terminated")
|
||||
|
||||
// Get new copy of the claim
|
||||
ginkgo.By("[init] checking the claim")
|
||||
pvcName := volumeResource.Pvc.Name
|
||||
pvcNamespace := volumeResource.Pvc.Namespace
|
||||
|
||||
parameters := map[string]string{}
|
||||
sr := storageframework.CreateSnapshotResource(sDriver, config, pattern, pvcName, pvcNamespace, f.Timeouts, parameters)
|
||||
cleanupSteps = append(cleanupSteps, func() {
|
||||
framework.ExpectNoError(sr.CleanupResource(f.Timeouts))
|
||||
})
|
||||
vs := sr.Vs
|
||||
vsc := sr.Vsclass
|
||||
|
||||
err = e2epv.WaitForPersistentVolumeClaimPhase(v1.ClaimBound, cs, pvcNamespace, pvcName, framework.Poll, f.Timeouts.ClaimProvision)
|
||||
framework.ExpectNoError(err)
|
||||
|
||||
pvc, err = cs.CoreV1().PersistentVolumeClaims(pvcNamespace).Get(context.TODO(), pvcName, metav1.GetOptions{})
|
||||
framework.ExpectNoError(err, "get PVC")
|
||||
claimSize = pvc.Spec.Resources.Requests.Storage().String()
|
||||
sc = volumeResource.Sc
|
||||
|
||||
// Get the bound PV
|
||||
ginkgo.By("[init] checking the PV")
|
||||
pv, err := cs.CoreV1().PersistentVolumes().Get(context.TODO(), pvc.Spec.VolumeName, metav1.GetOptions{})
|
||||
framework.ExpectNoError(err)
|
||||
|
||||
ginkgo.By("[init] deleting the pod")
|
||||
StopPod(cs, pod)
|
||||
|
||||
// At this point we know that:
|
||||
// - a pod was created with a PV that's supposed to have data
|
||||
//
|
||||
// However there's a caching issue that @jinxu97 explained and it's related with the pod & volume
|
||||
// lifecycle, to understand it we first analyze what the volumemanager does:
|
||||
// - when a pod is delete the volumemanager will try to cleanup the volume mounts
|
||||
// - NodeUnpublishVolume: unbinds the bind mount from the container
|
||||
// - Linux: the bind mount is removed, which does not flush any cache
|
||||
// - Windows: we delete a symlink, data's not flushed yet to disk
|
||||
// - NodeUnstageVolume: unmount the global mount
|
||||
// - Linux: disk is unmounted and all caches flushed.
|
||||
// - Windows: data is flushed to disk and the disk is detached
|
||||
//
|
||||
// Pod deletion might not guarantee a data flush to disk, however NodeUnstageVolume adds the logic
|
||||
// to flush the data to disk (see #81690 for details). We need to wait for NodeUnstageVolume, as
|
||||
// NodeUnpublishVolume only removes the bind mount, which doesn't force the caches to flush.
|
||||
// It's possible to create empty snapshots if we don't wait (see #101279 for details).
|
||||
//
|
||||
// In the following code by checking if the PV is not in the node.Status.VolumesInUse field we
|
||||
// ensure that the volume is not used by the node anymore (an indicator that NodeUnstageVolume has
|
||||
// already finished)
|
||||
nodeName := pod.Spec.NodeName
|
||||
gomega.Expect(nodeName).NotTo(gomega.BeEmpty(), "pod.Spec.NodeName must not be empty")
|
||||
|
||||
// Snapshot tests are only executed for CSI drivers. When CSI drivers
|
||||
// are attached to the node they use VolumeHandle instead of the pv.Name.
|
||||
volumeName := pv.Spec.PersistentVolumeSource.CSI.VolumeHandle
|
||||
|
||||
ginkgo.By(fmt.Sprintf("[init] waiting until the node=%s is not using the volume=%s", nodeName, volumeName))
|
||||
success := storageutils.WaitUntil(framework.Poll, f.Timeouts.PVDelete, func() bool {
|
||||
node, err := cs.CoreV1().Nodes().Get(context.TODO(), nodeName, metav1.GetOptions{})
|
||||
framework.ExpectNoError(err)
|
||||
volumesInUse := node.Status.VolumesInUse
|
||||
framework.Logf("current volumes in use: %+v", volumesInUse)
|
||||
for i := 0; i < len(volumesInUse); i++ {
|
||||
if strings.HasSuffix(string(volumesInUse[i]), volumeName) {
|
||||
return false
|
||||
}
|
||||
}
|
||||
return true
|
||||
})
|
||||
if !success {
|
||||
framework.Failf("timed out waiting for node=%s to not use the volume=%s", nodeName, volumeName)
|
||||
}
|
||||
|
||||
// Get new copy of the snapshot
|
||||
ginkgo.By("checking the snapshot")
|
||||
vs, err = dc.Resource(storageutils.SnapshotGVR).Namespace(vs.GetNamespace()).Get(context.TODO(), vs.GetName(), metav1.GetOptions{})
|
||||
framework.ExpectNoError(err)
|
||||
|
||||
// Get the bound snapshotContent
|
||||
snapshotStatus := vs.Object["status"].(map[string]interface{})
|
||||
snapshotContentName := snapshotStatus["boundVolumeSnapshotContentName"].(string)
|
||||
vscontent, err := dc.Resource(storageutils.SnapshotContentGVR).Get(context.TODO(), snapshotContentName, metav1.GetOptions{})
|
||||
framework.ExpectNoError(err)
|
||||
|
||||
snapshotContentSpec := vscontent.Object["spec"].(map[string]interface{})
|
||||
volumeSnapshotRef := snapshotContentSpec["volumeSnapshotRef"].(map[string]interface{})
|
||||
|
||||
var restoredPVC *v1.PersistentVolumeClaim
|
||||
var restoredPod *v1.Pod
|
||||
|
||||
// Check SnapshotContent properties
|
||||
ginkgo.By("checking the SnapshotContent")
|
||||
// PreprovisionedCreatedSnapshot do not need to set volume snapshot class name
|
||||
if pattern.SnapshotType != storageframework.PreprovisionedCreatedSnapshot {
|
||||
framework.ExpectEqual(snapshotContentSpec["volumeSnapshotClassName"], vsc.GetName())
|
||||
}
|
||||
framework.ExpectEqual(volumeSnapshotRef["name"], vs.GetName())
|
||||
framework.ExpectEqual(volumeSnapshotRef["namespace"], vs.GetNamespace())
|
||||
|
||||
ginkgo.By("Modifying source data test")
|
||||
modifiedMntTestData := fmt.Sprintf("modified data from %s namespace", pvc.GetNamespace())
|
||||
|
||||
ginkgo.By("modifying the data in the source PVC")
|
||||
|
||||
command := fmt.Sprintf("echo '%s' > %s", modifiedMntTestData, datapath)
|
||||
RunInPodWithVolume(cs, f.Timeouts, pvc.Namespace, pvc.Name, "pvc-snapshottable-data-tester", command, config.ClientNodeSelection)
|
||||
|
||||
ginkgo.By("creating a pvc from the snapshot")
|
||||
restoredPVC = e2epv.MakePersistentVolumeClaim(e2epv.PersistentVolumeClaimConfig{
|
||||
ClaimSize: claimSize,
|
||||
StorageClassName: &(sc.Name),
|
||||
}, config.Framework.Namespace.Name)
|
||||
|
||||
group := "snapshot.storage.k8s.io"
|
||||
|
||||
restoredPVC.Spec.DataSource = &v1.TypedLocalObjectReference{
|
||||
APIGroup: &group,
|
||||
Kind: "VolumeSnapshot",
|
||||
Name: vs.GetName(),
|
||||
}
|
||||
|
||||
restoredPVC, err = cs.CoreV1().PersistentVolumeClaims(restoredPVC.Namespace).Create(context.TODO(), restoredPVC, metav1.CreateOptions{})
|
||||
framework.ExpectNoError(err)
|
||||
cleanupSteps = append(cleanupSteps, func() {
|
||||
framework.Logf("deleting claim %q/%q", restoredPVC.Namespace, restoredPVC.Name)
|
||||
// typically this claim has already been deleted
|
||||
err = cs.CoreV1().PersistentVolumeClaims(restoredPVC.Namespace).Delete(context.TODO(), restoredPVC.Name, metav1.DeleteOptions{})
|
||||
if err != nil && !apierrors.IsNotFound(err) {
|
||||
framework.Failf("Error deleting claim %q. Error: %v", restoredPVC.Name, err)
|
||||
}
|
||||
})
|
||||
|
||||
ginkgo.By("starting a pod to use the snapshot")
|
||||
restoredPod = StartInPodWithVolume(cs, restoredPVC.Namespace, restoredPVC.Name, "restored-pvc-tester", "sleep 300", config.ClientNodeSelection)
|
||||
cleanupSteps = append(cleanupSteps, func() {
|
||||
StopPod(cs, restoredPod)
|
||||
})
|
||||
framework.ExpectNoError(e2epod.WaitTimeoutForPodRunningInNamespace(cs, restoredPod.Name, restoredPod.Namespace, f.Timeouts.PodStartSlow))
|
||||
if pattern.VolType != storageframework.GenericEphemeralVolume {
|
||||
commands := e2evolume.GenerateReadFileCmd(datapath)
|
||||
_, err = framework.LookForStringInPodExec(restoredPod.Namespace, restoredPod.Name, commands, originalMntTestData, time.Minute)
|
||||
framework.ExpectNoError(err)
|
||||
}
|
||||
|
||||
ginkgo.By("should delete the VolumeSnapshotContent according to its deletion policy")
|
||||
|
||||
// Delete both Snapshot and restored Pod/PVC at the same time because different storage systems
|
||||
// have different ordering of deletion. Some may require delete the restored PVC first before
|
||||
// Snapshot deletion and some are opposite.
|
||||
err = storageutils.DeleteSnapshotWithoutWaiting(dc, vs.GetNamespace(), vs.GetName())
|
||||
framework.ExpectNoError(err)
|
||||
framework.Logf("deleting restored pod %q/%q", restoredPod.Namespace, restoredPod.Name)
|
||||
err = cs.CoreV1().Pods(restoredPod.Namespace).Delete(context.TODO(), restoredPod.Name, metav1.DeleteOptions{})
|
||||
framework.ExpectNoError(err)
|
||||
framework.Logf("deleting restored PVC %q/%q", restoredPVC.Namespace, restoredPVC.Name)
|
||||
err = cs.CoreV1().PersistentVolumeClaims(restoredPVC.Namespace).Delete(context.TODO(), restoredPVC.Name, metav1.DeleteOptions{})
|
||||
framework.ExpectNoError(err)
|
||||
|
||||
// Wait for the Snapshot to be actually deleted from API server
|
||||
err = storageutils.WaitForNamespacedGVRDeletion(dc, storageutils.SnapshotGVR, vs.GetNamespace(), vs.GetNamespace(), framework.Poll, f.Timeouts.SnapshotDelete)
|
||||
|
||||
|
Loading…
Reference in New Issue
Block a user