Merge pull request #86283 from haosdent/clean-e2e-framework-pv

e2e: move funs of framework/pv to e2e/storage
This commit is contained in:
Kubernetes Prow Robot 2019-12-26 21:17:39 -08:00 committed by GitHub
commit bcff8bf3aa
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
4 changed files with 16 additions and 24 deletions

View File

@ -16,7 +16,6 @@ go_library(
"//staging/src/k8s.io/apimachinery/pkg/types:go_default_library",
"//staging/src/k8s.io/client-go/kubernetes:go_default_library",
"//test/e2e/framework:go_default_library",
"//test/e2e/framework/pod:go_default_library",
"//vendor/github.com/onsi/ginkgo:go_default_library",
],
)

View File

@ -31,7 +31,6 @@ import (
storageutil "k8s.io/kubernetes/pkg/apis/storage/v1/util"
"k8s.io/kubernetes/pkg/volume/util"
"k8s.io/kubernetes/test/e2e/framework"
e2epod "k8s.io/kubernetes/test/e2e/framework/pod"
)
const (
@ -678,12 +677,6 @@ func deletePD(pdName string) error {
return framework.TestContext.CloudConfig.Provider.DeletePD(pdName)
}
// MakeWritePod returns a pod definition based on the namespace. The pod references the PVC's
// name.
func MakeWritePod(ns string, pvc *v1.PersistentVolumeClaim) *v1.Pod {
return e2epod.MakePod(ns, nil, []*v1.PersistentVolumeClaim{pvc}, true, "touch /mnt/volume1/SUCCESS && (id -G | grep -E '\\b777\\b')")
}
// WaitForPVClaimBoundPhase waits until all pvcs phase set to bound
func WaitForPVClaimBoundPhase(client clientset.Interface, pvclaims []*v1.PersistentVolumeClaim, timeout time.Duration) ([]*v1.PersistentVolume, error) {
persistentvolumes := make([]*v1.PersistentVolume, len(pvclaims))
@ -776,19 +769,6 @@ func DeletePVSource(pvSource *v1.PersistentVolumeSource) error {
return framework.TestContext.CloudConfig.Provider.DeletePVSource(pvSource)
}
// GetBoundPV returns a PV details.
func GetBoundPV(client clientset.Interface, pvc *v1.PersistentVolumeClaim) (*v1.PersistentVolume, error) {
// Get new copy of the claim
claim, err := client.CoreV1().PersistentVolumeClaims(pvc.Namespace).Get(pvc.Name, metav1.GetOptions{})
if err != nil {
return nil, err
}
// Get the bound PV
pv, err := client.CoreV1().PersistentVolumes().Get(claim.Spec.VolumeName, metav1.GetOptions{})
return pv, err
}
// GetDefaultStorageClassName returns default storageClass or return error
func GetDefaultStorageClassName(c clientset.Interface) (string, error) {
list, err := c.StorageV1().StorageClasses().List(metav1.ListOptions{})

View File

@ -280,7 +280,7 @@ var _ = utils.SIGDescribe("PersistentVolumes", func() {
// (and test) succeed.
ginkgo.It("should test that a PV becomes Available and is clean after the PVC is deleted.", func() {
ginkgo.By("Writing to the volume.")
pod := e2epv.MakeWritePod(ns, pvc)
pod := e2epod.MakePod(ns, nil, []*v1.PersistentVolumeClaim{pvc}, true, "touch /mnt/volume1/SUCCESS && (id -G | grep -E '\\b777\\b')")
pod, err = c.CoreV1().Pods(ns).Create(pod)
framework.ExpectNoError(err)
framework.ExpectNoError(e2epod.WaitForPodSuccessInNamespace(c, pod.Name, ns))

View File

@ -293,13 +293,26 @@ func (t StorageClassTest) TestDynamicProvisioning() *v1.PersistentVolume {
return pv
}
// getBoundPV returns a PV details.
func getBoundPV(client clientset.Interface, pvc *v1.PersistentVolumeClaim) (*v1.PersistentVolume, error) {
// Get new copy of the claim
claim, err := client.CoreV1().PersistentVolumeClaims(pvc.Namespace).Get(pvc.Name, metav1.GetOptions{})
if err != nil {
return nil, err
}
// Get the bound PV
pv, err := client.CoreV1().PersistentVolumes().Get(claim.Spec.VolumeName, metav1.GetOptions{})
return pv, err
}
// checkProvisioning verifies that the claim is bound and has the correct properities
func (t StorageClassTest) checkProvisioning(client clientset.Interface, claim *v1.PersistentVolumeClaim, class *storagev1.StorageClass) *v1.PersistentVolume {
err := e2epv.WaitForPersistentVolumeClaimPhase(v1.ClaimBound, client, claim.Namespace, claim.Name, framework.Poll, framework.ClaimProvisionTimeout)
framework.ExpectNoError(err)
ginkgo.By("checking the claim")
pv, err := e2epv.GetBoundPV(client, claim)
pv, err := getBoundPV(client, claim)
framework.ExpectNoError(err)
// Check sizes
@ -372,7 +385,7 @@ func PVWriteReadSingleNodeCheck(client clientset.Interface, claim *v1.Persistent
pod = nil // Don't stop twice.
// Get a new copy of the PV
volume, err := e2epv.GetBoundPV(client, claim)
volume, err := getBoundPV(client, claim)
framework.ExpectNoError(err)
ginkgo.By(fmt.Sprintf("checking the created volume has the correct mount options, is readable and retains data on the same node %q", actualNodeName))