Merge pull request #86283 from haosdent/clean-e2e-framework-pv

e2e: move funs of framework/pv to e2e/storage
This commit is contained in:
Kubernetes Prow Robot
2019-12-26 21:17:39 -08:00
committed by GitHub
4 changed files with 16 additions and 24 deletions

View File

@@ -280,7 +280,7 @@ var _ = utils.SIGDescribe("PersistentVolumes", func() {
// (and test) succeed.
ginkgo.It("should test that a PV becomes Available and is clean after the PVC is deleted.", func() {
ginkgo.By("Writing to the volume.")
pod := e2epv.MakeWritePod(ns, pvc)
pod := e2epod.MakePod(ns, nil, []*v1.PersistentVolumeClaim{pvc}, true, "touch /mnt/volume1/SUCCESS && (id -G | grep -E '\\b777\\b')")
pod, err = c.CoreV1().Pods(ns).Create(pod)
framework.ExpectNoError(err)
framework.ExpectNoError(e2epod.WaitForPodSuccessInNamespace(c, pod.Name, ns))

View File

@@ -293,13 +293,26 @@ func (t StorageClassTest) TestDynamicProvisioning() *v1.PersistentVolume {
return pv
}
// getBoundPV returns a PV details.
func getBoundPV(client clientset.Interface, pvc *v1.PersistentVolumeClaim) (*v1.PersistentVolume, error) {
// Get new copy of the claim
claim, err := client.CoreV1().PersistentVolumeClaims(pvc.Namespace).Get(pvc.Name, metav1.GetOptions{})
if err != nil {
return nil, err
}
// Get the bound PV
pv, err := client.CoreV1().PersistentVolumes().Get(claim.Spec.VolumeName, metav1.GetOptions{})
return pv, err
}
// checkProvisioning verifies that the claim is bound and has the correct properities
func (t StorageClassTest) checkProvisioning(client clientset.Interface, claim *v1.PersistentVolumeClaim, class *storagev1.StorageClass) *v1.PersistentVolume {
err := e2epv.WaitForPersistentVolumeClaimPhase(v1.ClaimBound, client, claim.Namespace, claim.Name, framework.Poll, framework.ClaimProvisionTimeout)
framework.ExpectNoError(err)
ginkgo.By("checking the claim")
pv, err := e2epv.GetBoundPV(client, claim)
pv, err := getBoundPV(client, claim)
framework.ExpectNoError(err)
// Check sizes
@@ -372,7 +385,7 @@ func PVWriteReadSingleNodeCheck(client clientset.Interface, claim *v1.Persistent
pod = nil // Don't stop twice.
// Get a new copy of the PV
volume, err := e2epv.GetBoundPV(client, claim)
volume, err := getBoundPV(client, claim)
framework.ExpectNoError(err)
ginkgo.By(fmt.Sprintf("checking the created volume has the correct mount options, is readable and retains data on the same node %q", actualNodeName))