mirror of
https://github.com/k3s-io/kubernetes.git
synced 2025-07-23 11:50:44 +00:00
Merge pull request #107173 from mauriciopoppe/fix-snapshot-refactor
Fix order of commands in the snapshot tests for persistent volumes
This commit is contained in:
commit
a6299aa2ab
@ -145,11 +145,8 @@ func (s *snapshottableTestSuite) DefineTests(driver storageframework.TestDriver,
|
||||
command := fmt.Sprintf("echo '%s' > %s", originalMntTestData, datapath)
|
||||
|
||||
pod = StartInPodWithVolumeSource(cs, *volumeResource.VolSource, f.Namespace.Name, "pvc-snapshottable-tester", command, config.ClientNodeSelection)
|
||||
cleanupSteps = append(cleanupSteps, func() {
|
||||
e2epod.DeletePodWithWait(cs, pod)
|
||||
})
|
||||
|
||||
// At this point a pod is running with a PVC. How to proceed depends on which test is running.
|
||||
// At this point a pod is created with a PVC. How to proceed depends on which test is running.
|
||||
}
|
||||
|
||||
cleanup := func() {
|
||||
@ -178,6 +175,11 @@ func (s *snapshottableTestSuite) DefineTests(driver storageframework.TestDriver,
|
||||
}
|
||||
init()
|
||||
|
||||
// delete the pod at the end of the test
|
||||
cleanupSteps = append(cleanupSteps, func() {
|
||||
e2epod.DeletePodWithWait(cs, pod)
|
||||
})
|
||||
|
||||
// We can test snapshotting of generic
|
||||
// ephemeral volumes by creating the snapshot
|
||||
// while the pod is running (online). We cannot do it after pod deletion,
|
||||
@ -308,36 +310,31 @@ func (s *snapshottableTestSuite) DefineTests(driver storageframework.TestDriver,
|
||||
}
|
||||
init()
|
||||
|
||||
framework.ExpectNoError(e2epod.WaitForPodSuccessInNamespaceTimeout(cs, pod.Name, pod.Namespace, f.Timeouts.PodStartSlow))
|
||||
pod, err = cs.CoreV1().Pods(pod.Namespace).Get(context.TODO(), pod.Name, metav1.GetOptions{})
|
||||
framework.ExpectNoError(err, "check pod after it terminated")
|
||||
|
||||
// Get new copy of the claim
|
||||
ginkgo.By("[init] checking the claim")
|
||||
pvcName := volumeResource.Pvc.Name
|
||||
pvcNamespace := volumeResource.Pvc.Namespace
|
||||
|
||||
parameters := map[string]string{}
|
||||
sr := storageframework.CreateSnapshotResource(sDriver, config, pattern, pvcName, pvcNamespace, f.Timeouts, parameters)
|
||||
cleanupSteps = append(cleanupSteps, func() {
|
||||
framework.ExpectNoError(sr.CleanupResource(f.Timeouts))
|
||||
})
|
||||
vs := sr.Vs
|
||||
vsc := sr.Vsclass
|
||||
|
||||
err = e2epv.WaitForPersistentVolumeClaimPhase(v1.ClaimBound, cs, pvcNamespace, pvcName, framework.Poll, f.Timeouts.ClaimProvision)
|
||||
framework.ExpectNoError(err)
|
||||
|
||||
pvc, err = cs.CoreV1().PersistentVolumeClaims(pvcNamespace).Get(context.TODO(), pvcName, metav1.GetOptions{})
|
||||
framework.ExpectNoError(err, "get PVC")
|
||||
claimSize = pvc.Spec.Resources.Requests.Storage().String()
|
||||
pvc = volumeResource.Pvc
|
||||
sc = volumeResource.Sc
|
||||
|
||||
// Get the bound PV
|
||||
// The pod should be in the Success state.
|
||||
ginkgo.By("[init] check pod success")
|
||||
pod, err = cs.CoreV1().Pods(pod.Namespace).Get(context.TODO(), pod.Name, metav1.GetOptions{})
|
||||
framework.ExpectNoError(err, "Failed to fetch pod: %v", err)
|
||||
framework.ExpectNoError(e2epod.WaitForPodSuccessInNamespaceTimeout(cs, pod.Name, pod.Namespace, f.Timeouts.PodStartSlow))
|
||||
// Sync the pod to know additional fields.
|
||||
pod, err = cs.CoreV1().Pods(pod.Namespace).Get(context.TODO(), pod.Name, metav1.GetOptions{})
|
||||
framework.ExpectNoError(err, "Failed to fetch pod: %v", err)
|
||||
|
||||
ginkgo.By("[init] checking the claim")
|
||||
err = e2epv.WaitForPersistentVolumeClaimPhase(v1.ClaimBound, cs, pvc.Namespace, pvc.Name, framework.Poll, f.Timeouts.ClaimProvision)
|
||||
framework.ExpectNoError(err)
|
||||
// Get new copy of the claim.
|
||||
pvc, err = cs.CoreV1().PersistentVolumeClaims(pvc.Namespace).Get(context.TODO(), pvc.Name, metav1.GetOptions{})
|
||||
framework.ExpectNoError(err)
|
||||
|
||||
// Get the bound PV.
|
||||
ginkgo.By("[init] checking the PV")
|
||||
pv, err := cs.CoreV1().PersistentVolumes().Get(context.TODO(), pvc.Spec.VolumeName, metav1.GetOptions{})
|
||||
framework.ExpectNoError(err)
|
||||
|
||||
// Delete the pod to force NodeUnpublishVolume (unlike the ephemeral case where the pod is deleted at the end of the test).
|
||||
ginkgo.By("[init] deleting the pod")
|
||||
StopPod(cs, pod)
|
||||
|
||||
@ -386,6 +383,15 @@ func (s *snapshottableTestSuite) DefineTests(driver storageframework.TestDriver,
|
||||
framework.Failf("timed out waiting for node=%s to not use the volume=%s", nodeName, volumeName)
|
||||
}
|
||||
|
||||
// Take the snapshot.
|
||||
parameters := map[string]string{}
|
||||
sr := storageframework.CreateSnapshotResource(sDriver, config, pattern, pvc.Name, pvc.Namespace, f.Timeouts, parameters)
|
||||
cleanupSteps = append(cleanupSteps, func() {
|
||||
framework.ExpectNoError(sr.CleanupResource(f.Timeouts))
|
||||
})
|
||||
vs := sr.Vs
|
||||
vsc := sr.Vsclass
|
||||
|
||||
// Get new copy of the snapshot
|
||||
ginkgo.By("checking the snapshot")
|
||||
vs, err = dc.Resource(storageutils.SnapshotGVR).Namespace(vs.GetNamespace()).Get(context.TODO(), vs.GetName(), metav1.GetOptions{})
|
||||
@ -400,9 +406,6 @@ func (s *snapshottableTestSuite) DefineTests(driver storageframework.TestDriver,
|
||||
snapshotContentSpec := vscontent.Object["spec"].(map[string]interface{})
|
||||
volumeSnapshotRef := snapshotContentSpec["volumeSnapshotRef"].(map[string]interface{})
|
||||
|
||||
var restoredPVC *v1.PersistentVolumeClaim
|
||||
var restoredPod *v1.Pod
|
||||
|
||||
// Check SnapshotContent properties
|
||||
ginkgo.By("checking the SnapshotContent")
|
||||
// PreprovisionedCreatedSnapshot do not need to set volume snapshot class name
|
||||
@ -413,6 +416,8 @@ func (s *snapshottableTestSuite) DefineTests(driver storageframework.TestDriver,
|
||||
framework.ExpectEqual(volumeSnapshotRef["namespace"], vs.GetNamespace())
|
||||
|
||||
ginkgo.By("Modifying source data test")
|
||||
var restoredPVC *v1.PersistentVolumeClaim
|
||||
var restoredPod *v1.Pod
|
||||
modifiedMntTestData := fmt.Sprintf("modified data from %s namespace", pvc.GetNamespace())
|
||||
|
||||
ginkgo.By("modifying the data in the source PVC")
|
||||
@ -421,6 +426,7 @@ func (s *snapshottableTestSuite) DefineTests(driver storageframework.TestDriver,
|
||||
RunInPodWithVolume(cs, f.Timeouts, pvc.Namespace, pvc.Name, "pvc-snapshottable-data-tester", command, config.ClientNodeSelection)
|
||||
|
||||
ginkgo.By("creating a pvc from the snapshot")
|
||||
claimSize = pvc.Spec.Resources.Requests.Storage().String()
|
||||
restoredPVC = e2epv.MakePersistentVolumeClaim(e2epv.PersistentVolumeClaimConfig{
|
||||
ClaimSize: claimSize,
|
||||
StorageClassName: &(sc.Name),
|
||||
@ -451,11 +457,9 @@ func (s *snapshottableTestSuite) DefineTests(driver storageframework.TestDriver,
|
||||
StopPod(cs, restoredPod)
|
||||
})
|
||||
framework.ExpectNoError(e2epod.WaitTimeoutForPodRunningInNamespace(cs, restoredPod.Name, restoredPod.Namespace, f.Timeouts.PodStartSlow))
|
||||
if pattern.VolType != storageframework.GenericEphemeralVolume {
|
||||
commands := e2evolume.GenerateReadFileCmd(datapath)
|
||||
_, err = framework.LookForStringInPodExec(restoredPod.Namespace, restoredPod.Name, commands, originalMntTestData, time.Minute)
|
||||
framework.ExpectNoError(err)
|
||||
}
|
||||
|
||||
ginkgo.By("should delete the VolumeSnapshotContent according to its deletion policy")
|
||||
|
||||
|
Loading…
Reference in New Issue
Block a user