storage e2e: refactor snapshottable

During PR review it was pointed out that the branches for ephemeral
vs. persistent make the test harder to read. Therefore all code that depends on
if checks gets moved into two different versions of the test, one hat runs for
ephemeral volumes and one for persistent volumes, with skip statements at the
beginning.
This commit is contained in:
Patrick Ohly 2021-10-20 11:29:33 +02:00
parent 5462d97e62
commit c0bdf14942

View File

@ -29,7 +29,6 @@ import (
storagev1 "k8s.io/api/storage/v1"
apierrors "k8s.io/apimachinery/pkg/api/errors"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/apis/meta/v1/unstructured"
"k8s.io/client-go/dynamic"
clientset "k8s.io/client-go/kubernetes"
"k8s.io/component-helpers/storage/ephemeral"
@ -119,6 +118,8 @@ func (s *snapshottableTestSuite) DefineTests(driver storageframework.TestDriver,
dc dynamic.Interface
pvc *v1.PersistentVolumeClaim
sc *storagev1.StorageClass
volumeResource *storageframework.VolumeResource
pod *v1.Pod
claimSize string
originalMntTestData string
)
@ -134,46 +135,195 @@ func (s *snapshottableTestSuite) DefineTests(driver storageframework.TestDriver,
config, driverCleanup = driver.PrepareTest(f)
cleanupSteps = append(cleanupSteps, driverCleanup)
var volumeResource *storageframework.VolumeResource
cleanupSteps = append(cleanupSteps, func() {
framework.ExpectNoError(volumeResource.CleanupResource())
})
volumeResource = storageframework.CreateVolumeResource(dDriver, config, pattern, s.GetTestSuiteInfo().SupportedSizeRange)
pvcName := ""
pvcNamespace := f.Namespace.Name
ginkgo.By("[init] starting a pod to use the claim")
originalMntTestData = fmt.Sprintf("hello from %s namespace", pvcNamespace)
originalMntTestData = fmt.Sprintf("hello from %s namespace", f.Namespace.Name)
command := fmt.Sprintf("echo '%s' > %s", originalMntTestData, datapath)
pod := StartInPodWithVolumeSource(cs, *volumeResource.VolSource, f.Namespace.Name, "pvc-snapshottable-tester", command, config.ClientNodeSelection)
if pattern.VolType == storageframework.GenericEphemeralVolume {
// We can test snapshotting of generic ephemeral volumes. It's just a bit more complicated:
// - we need to start the pod
// - wait for the pod to stop
// - don't delete the pod because once it is marked for deletion,
// the PVC also gets marked and snapshotting no longer works
// (even when a finalizer prevents actual removal of the PVC)
// - skip data validation because data flushing wasn't guaranteed
// (see comments below)
pod = StartInPodWithVolumeSource(cs, *volumeResource.VolSource, f.Namespace.Name, "pvc-snapshottable-tester", command, config.ClientNodeSelection)
cleanupSteps = append(cleanupSteps, func() {
e2epod.DeletePodWithWait(cs, pod)
})
} else {
defer e2epod.DeletePodWithWait(cs, pod)
// At this point a pod is running with a PVC. How to proceed depends on which test is running.
}
cleanup := func() {
// Don't register an AfterEach then a cleanup step because the order
// of execution will do the AfterEach first then the cleanup step.
// Also AfterEach cleanup registration is not fine grained enough
// Adding to the cleanup steps allows you to register cleanup only when it is needed
// Ideally we could replace this with https://golang.org/pkg/testing/#T.Cleanup
// Depending on how far the test executed, cleanup accordingly
// Execute in reverse order, similar to defer stack
for i := len(cleanupSteps) - 1; i >= 0; i-- {
err := storageutils.TryFunc(cleanupSteps[i])
framework.ExpectNoError(err, "while running cleanup steps")
}
}
ginkgo.AfterEach(func() {
cleanup()
})
ginkgo.Context("", func() {
ginkgo.It("should check snapshot fields, check restore correctly works, check deletion (ephemeral)", func() {
if pattern.VolType != storageframework.GenericEphemeralVolume {
e2eskipper.Skipf("volume type %q is not ephemeral", pattern.VolType)
}
init()
// We can test snapshotting of generic
// ephemeral volumes by creating the snapshot
// while the pod is running (online). We cannot do it after pod deletion,
// because then the PVC also gets marked and snapshotting no longer works
// (even when a finalizer prevents actual removal of the PVC).
//
// Because data consistency cannot be
// guaranteed, this flavor of the test doesn't
// check the content of the snapshot.
framework.ExpectNoError(e2epod.WaitForPodSuccessInNamespaceTimeout(cs, pod.Name, pod.Namespace, f.Timeouts.PodStartSlow))
pod, err = cs.CoreV1().Pods(pod.Namespace).Get(context.TODO(), pod.Name, metav1.GetOptions{})
framework.ExpectNoError(err, "check pod after it terminated")
// Get new copy of the claim
ginkgo.By("[init] checking the claim")
if pattern.VolType == storageframework.GenericEphemeralVolume {
pvcName = ephemeral.VolumeClaimName(pod, &pod.Spec.Volumes[0])
} else {
pvcName = volumeResource.Pvc.Name
pvcName := ephemeral.VolumeClaimName(pod, &pod.Spec.Volumes[0])
pvcNamespace := pod.Namespace
parameters := map[string]string{}
sr := storageframework.CreateSnapshotResource(sDriver, config, pattern, pvcName, pvcNamespace, f.Timeouts, parameters)
cleanupSteps = append(cleanupSteps, func() {
framework.ExpectNoError(sr.CleanupResource(f.Timeouts))
})
vs := sr.Vs
vsc := sr.Vsclass
err = e2epv.WaitForPersistentVolumeClaimPhase(v1.ClaimBound, cs, pvcNamespace, pvcName, framework.Poll, f.Timeouts.ClaimProvision)
framework.ExpectNoError(err)
pvc, err = cs.CoreV1().PersistentVolumeClaims(pvcNamespace).Get(context.TODO(), pvcName, metav1.GetOptions{})
framework.ExpectNoError(err, "get PVC")
claimSize = pvc.Spec.Resources.Requests.Storage().String()
sc = volumeResource.Sc
// Get the bound PV
ginkgo.By("[init] checking the PV")
_, err := cs.CoreV1().PersistentVolumes().Get(context.TODO(), pvc.Spec.VolumeName, metav1.GetOptions{})
framework.ExpectNoError(err)
// Get new copy of the snapshot
ginkgo.By("checking the snapshot")
vs, err = dc.Resource(storageutils.SnapshotGVR).Namespace(vs.GetNamespace()).Get(context.TODO(), vs.GetName(), metav1.GetOptions{})
framework.ExpectNoError(err)
// Get the bound snapshotContent
snapshotStatus := vs.Object["status"].(map[string]interface{})
snapshotContentName := snapshotStatus["boundVolumeSnapshotContentName"].(string)
vscontent, err := dc.Resource(storageutils.SnapshotContentGVR).Get(context.TODO(), snapshotContentName, metav1.GetOptions{})
framework.ExpectNoError(err)
snapshotContentSpec := vscontent.Object["spec"].(map[string]interface{})
volumeSnapshotRef := snapshotContentSpec["volumeSnapshotRef"].(map[string]interface{})
var restoredPVC *v1.PersistentVolumeClaim
var restoredPod *v1.Pod
// Check SnapshotContent properties
ginkgo.By("checking the SnapshotContent")
// PreprovisionedCreatedSnapshot do not need to set volume snapshot class name
if pattern.SnapshotType != storageframework.PreprovisionedCreatedSnapshot {
framework.ExpectEqual(snapshotContentSpec["volumeSnapshotClassName"], vsc.GetName())
}
framework.ExpectEqual(volumeSnapshotRef["name"], vs.GetName())
framework.ExpectEqual(volumeSnapshotRef["namespace"], vs.GetNamespace())
ginkgo.By("creating a pvc from the snapshot")
restoredPVC = e2epv.MakePersistentVolumeClaim(e2epv.PersistentVolumeClaimConfig{
ClaimSize: claimSize,
StorageClassName: &(sc.Name),
}, config.Framework.Namespace.Name)
group := "snapshot.storage.k8s.io"
restoredPVC.Spec.DataSource = &v1.TypedLocalObjectReference{
APIGroup: &group,
Kind: "VolumeSnapshot",
Name: vs.GetName(),
}
ginkgo.By("starting a pod to use the snapshot")
volSrc := v1.VolumeSource{
Ephemeral: &v1.EphemeralVolumeSource{
VolumeClaimTemplate: &v1.PersistentVolumeClaimTemplate{
Spec: restoredPVC.Spec,
},
},
}
restoredPod = StartInPodWithVolumeSource(cs, volSrc, restoredPVC.Namespace, "restored-pvc-tester", "sleep 300", config.ClientNodeSelection)
cleanupSteps = append(cleanupSteps, func() {
StopPod(cs, restoredPod)
})
framework.ExpectNoError(e2epod.WaitTimeoutForPodRunningInNamespace(cs, restoredPod.Name, restoredPod.Namespace, f.Timeouts.PodStartSlow))
if pattern.VolType != storageframework.GenericEphemeralVolume {
commands := e2evolume.GenerateReadFileCmd(datapath)
_, err = framework.LookForStringInPodExec(restoredPod.Namespace, restoredPod.Name, commands, originalMntTestData, time.Minute)
framework.ExpectNoError(err)
}
ginkgo.By("should delete the VolumeSnapshotContent according to its deletion policy")
// Delete both Snapshot and restored Pod/PVC at the same time because different storage systems
// have different ordering of deletion. Some may require delete the restored PVC first before
// Snapshot deletion and some are opposite.
err = storageutils.DeleteSnapshotWithoutWaiting(dc, vs.GetNamespace(), vs.GetName())
framework.ExpectNoError(err)
// Wait for the Snapshot to be actually deleted from API server
err = storageutils.WaitForNamespacedGVRDeletion(dc, storageutils.SnapshotGVR, vs.GetNamespace(), vs.GetNamespace(), framework.Poll, f.Timeouts.SnapshotDelete)
framework.ExpectNoError(err)
switch pattern.SnapshotDeletionPolicy {
case storageframework.DeleteSnapshot:
ginkgo.By("checking the SnapshotContent has been deleted")
err = utils.WaitForGVRDeletion(dc, storageutils.SnapshotContentGVR, vscontent.GetName(), framework.Poll, f.Timeouts.SnapshotDelete)
framework.ExpectNoError(err)
case storageframework.RetainSnapshot:
ginkgo.By("checking the SnapshotContent has not been deleted")
err = utils.WaitForGVRDeletion(dc, storageutils.SnapshotContentGVR, vscontent.GetName(), 1*time.Second /* poll */, 30*time.Second /* timeout */)
framework.ExpectError(err)
}
})
ginkgo.It("should check snapshot fields, check restore correctly works after modifying source data, check deletion (persistent)", func() {
if pattern.VolType == storageframework.GenericEphemeralVolume {
e2eskipper.Skipf("volume type %q is ephemeral", pattern.VolType)
}
init()
framework.ExpectNoError(e2epod.WaitForPodSuccessInNamespaceTimeout(cs, pod.Name, pod.Namespace, f.Timeouts.PodStartSlow))
pod, err = cs.CoreV1().Pods(pod.Namespace).Get(context.TODO(), pod.Name, metav1.GetOptions{})
framework.ExpectNoError(err, "check pod after it terminated")
// Get new copy of the claim
ginkgo.By("[init] checking the claim")
pvcName := volumeResource.Pvc.Name
pvcNamespace := volumeResource.Pvc.Namespace
parameters := map[string]string{}
sr := storageframework.CreateSnapshotResource(sDriver, config, pattern, pvcName, pvcNamespace, f.Timeouts, parameters)
cleanupSteps = append(cleanupSteps, func() {
framework.ExpectNoError(sr.CleanupResource(f.Timeouts))
})
vs := sr.Vs
vsc := sr.Vsclass
err = e2epv.WaitForPersistentVolumeClaimPhase(v1.ClaimBound, cs, pvcNamespace, pvcName, framework.Poll, f.Timeouts.ClaimProvision)
framework.ExpectNoError(err)
@ -188,10 +338,6 @@ func (s *snapshottableTestSuite) DefineTests(driver storageframework.TestDriver,
pv, err := cs.CoreV1().PersistentVolumes().Get(context.TODO(), pvc.Spec.VolumeName, metav1.GetOptions{})
framework.ExpectNoError(err)
if pattern.VolType == storageframework.GenericEphemeralVolume {
return
}
ginkgo.By("[init] deleting the pod")
StopPod(cs, pod)
@ -236,51 +382,10 @@ func (s *snapshottableTestSuite) DefineTests(driver storageframework.TestDriver,
}
return true
})
framework.ExpectEqual(success, true)
if !success {
framework.Failf("timed out waiting for node=%s to not use the volume=%s", nodeName, volumeName)
}
cleanup := func() {
// Don't register an AfterEach then a cleanup step because the order
// of execution will do the AfterEach first then the cleanup step.
// Also AfterEach cleanup registration is not fine grained enough
// Adding to the cleanup steps allows you to register cleanup only when it is needed
// Ideally we could replace this with https://golang.org/pkg/testing/#T.Cleanup
// Depending on how far the test executed, cleanup accordingly
// Execute in reverse order, similar to defer stack
for i := len(cleanupSteps) - 1; i >= 0; i-- {
err := storageutils.TryFunc(cleanupSteps[i])
framework.ExpectNoError(err, "while running cleanup steps")
}
}
ginkgo.BeforeEach(func() {
init()
})
ginkgo.AfterEach(func() {
cleanup()
})
ginkgo.Context("", func() {
var (
vs *unstructured.Unstructured
vscontent *unstructured.Unstructured
vsc *unstructured.Unstructured
)
ginkgo.BeforeEach(func() {
var sr *storageframework.SnapshotResource
cleanupSteps = append(cleanupSteps, func() {
framework.ExpectNoError(sr.CleanupResource(f.Timeouts))
})
parameters := map[string]string{}
sr = storageframework.CreateSnapshotResource(sDriver, config, pattern, pvc.GetName(), pvc.GetNamespace(), f.Timeouts, parameters)
vs = sr.Vs
vscontent = sr.Vscontent
vsc = sr.Vsclass
})
ginkgo.It("should check snapshot fields, check restore correctly works after modifying source data, check deletion", func() {
// Get new copy of the snapshot
ginkgo.By("checking the snapshot")
vs, err = dc.Resource(storageutils.SnapshotGVR).Namespace(vs.GetNamespace()).Get(context.TODO(), vs.GetName(), metav1.GetOptions{})
@ -289,7 +394,7 @@ func (s *snapshottableTestSuite) DefineTests(driver storageframework.TestDriver,
// Get the bound snapshotContent
snapshotStatus := vs.Object["status"].(map[string]interface{})
snapshotContentName := snapshotStatus["boundVolumeSnapshotContentName"].(string)
vscontent, err = dc.Resource(storageutils.SnapshotContentGVR).Get(context.TODO(), snapshotContentName, metav1.GetOptions{})
vscontent, err := dc.Resource(storageutils.SnapshotContentGVR).Get(context.TODO(), snapshotContentName, metav1.GetOptions{})
framework.ExpectNoError(err)
snapshotContentSpec := vscontent.Object["spec"].(map[string]interface{})
@ -307,7 +412,6 @@ func (s *snapshottableTestSuite) DefineTests(driver storageframework.TestDriver,
framework.ExpectEqual(volumeSnapshotRef["name"], vs.GetName())
framework.ExpectEqual(volumeSnapshotRef["namespace"], vs.GetNamespace())
if pattern.VolType != storageframework.GenericEphemeralVolume {
ginkgo.By("Modifying source data test")
modifiedMntTestData := fmt.Sprintf("modified data from %s namespace", pvc.GetNamespace())
@ -315,7 +419,6 @@ func (s *snapshottableTestSuite) DefineTests(driver storageframework.TestDriver,
command := fmt.Sprintf("echo '%s' > %s", modifiedMntTestData, datapath)
RunInPodWithVolume(cs, f.Timeouts, pvc.Namespace, pvc.Name, "pvc-snapshottable-data-tester", command, config.ClientNodeSelection)
}
ginkgo.By("creating a pvc from the snapshot")
restoredPVC = e2epv.MakePersistentVolumeClaim(e2epv.PersistentVolumeClaimConfig{
@ -331,7 +434,6 @@ func (s *snapshottableTestSuite) DefineTests(driver storageframework.TestDriver,
Name: vs.GetName(),
}
if pattern.VolType != storageframework.GenericEphemeralVolume {
restoredPVC, err = cs.CoreV1().PersistentVolumeClaims(restoredPVC.Namespace).Create(context.TODO(), restoredPVC, metav1.CreateOptions{})
framework.ExpectNoError(err)
cleanupSteps = append(cleanupSteps, func() {
@ -342,25 +444,9 @@ func (s *snapshottableTestSuite) DefineTests(driver storageframework.TestDriver,
framework.Failf("Error deleting claim %q. Error: %v", restoredPVC.Name, err)
}
})
}
ginkgo.By("starting a pod to use the snapshot")
volSrc := v1.VolumeSource{
PersistentVolumeClaim: &v1.PersistentVolumeClaimVolumeSource{
ClaimName: restoredPVC.Name,
},
}
if pattern.VolType == storageframework.GenericEphemeralVolume {
volSrc = v1.VolumeSource{
Ephemeral: &v1.EphemeralVolumeSource{
VolumeClaimTemplate: &v1.PersistentVolumeClaimTemplate{
Spec: restoredPVC.Spec,
},
},
}
}
restoredPod = StartInPodWithVolumeSource(cs, volSrc, restoredPVC.Namespace, "restored-pvc-tester", "sleep 300", config.ClientNodeSelection)
restoredPod = StartInPodWithVolume(cs, restoredPVC.Namespace, restoredPVC.Name, "restored-pvc-tester", "sleep 300", config.ClientNodeSelection)
cleanupSteps = append(cleanupSteps, func() {
StopPod(cs, restoredPod)
})
@ -378,14 +464,12 @@ func (s *snapshottableTestSuite) DefineTests(driver storageframework.TestDriver,
// Snapshot deletion and some are opposite.
err = storageutils.DeleteSnapshotWithoutWaiting(dc, vs.GetNamespace(), vs.GetName())
framework.ExpectNoError(err)
if pattern.VolType != storageframework.GenericEphemeralVolume {
framework.Logf("deleting restored pod %q/%q", restoredPod.Namespace, restoredPod.Name)
err = cs.CoreV1().Pods(restoredPod.Namespace).Delete(context.TODO(), restoredPod.Name, metav1.DeleteOptions{})
framework.ExpectNoError(err)
framework.Logf("deleting restored PVC %q/%q", restoredPVC.Namespace, restoredPVC.Name)
err = cs.CoreV1().PersistentVolumeClaims(restoredPVC.Namespace).Delete(context.TODO(), restoredPVC.Name, metav1.DeleteOptions{})
framework.ExpectNoError(err)
}
// Wait for the Snapshot to be actually deleted from API server
err = storageutils.WaitForNamespacedGVRDeletion(dc, storageutils.SnapshotGVR, vs.GetNamespace(), vs.GetNamespace(), framework.Poll, f.Timeouts.SnapshotDelete)