mirror of
https://github.com/k3s-io/kubernetes.git
synced 2025-07-23 19:56:01 +00:00
improve error handling in e2e helpers by always returning error
This commit is contained in:
parent
55042b0ba9
commit
0d81e4c87c
@ -26,7 +26,6 @@ import (
|
|||||||
"github.com/aws/aws-sdk-go/aws/session"
|
"github.com/aws/aws-sdk-go/aws/session"
|
||||||
"github.com/aws/aws-sdk-go/service/ec2"
|
"github.com/aws/aws-sdk-go/service/ec2"
|
||||||
. "github.com/onsi/ginkgo"
|
. "github.com/onsi/ginkgo"
|
||||||
. "github.com/onsi/gomega"
|
|
||||||
"google.golang.org/api/googleapi"
|
"google.golang.org/api/googleapi"
|
||||||
apierrs "k8s.io/apimachinery/pkg/api/errors"
|
apierrs "k8s.io/apimachinery/pkg/api/errors"
|
||||||
"k8s.io/apimachinery/pkg/api/resource"
|
"k8s.io/apimachinery/pkg/api/resource"
|
||||||
@ -92,93 +91,133 @@ type PersistentVolumeClaimConfig struct {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// Clean up a pv and pvc in a single pv/pvc test case.
|
// Clean up a pv and pvc in a single pv/pvc test case.
|
||||||
func PVPVCCleanup(c clientset.Interface, ns string, pv *v1.PersistentVolume, pvc *v1.PersistentVolumeClaim) {
|
// Note: delete errors are appended to []error so that we can attempt to delete both the pvc and pv.
|
||||||
DeletePersistentVolumeClaim(c, pvc.Name, ns)
|
func PVPVCCleanup(c clientset.Interface, ns string, pv *v1.PersistentVolume, pvc *v1.PersistentVolumeClaim) []error {
|
||||||
DeletePersistentVolume(c, pv.Name)
|
var errs []error
|
||||||
|
|
||||||
|
if pvc != nil {
|
||||||
|
err := DeletePersistentVolumeClaim(c, pvc.Name, ns)
|
||||||
|
if err != nil {
|
||||||
|
errs = append(errs, fmt.Errorf("failed to delete PVC %q: %v", pvc.Name, err))
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
Logf("pvc is nil")
|
||||||
|
}
|
||||||
|
if pv != nil {
|
||||||
|
err := DeletePersistentVolume(c, pv.Name)
|
||||||
|
if err != nil {
|
||||||
|
errs = append(errs, fmt.Errorf("failed to delete PV %q: %v", pv.Name, err))
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
Logf("pv is nil")
|
||||||
|
}
|
||||||
|
return errs
|
||||||
}
|
}
|
||||||
|
|
||||||
// Clean up pvs and pvcs in multi-pv-pvc test cases. All entries found in the pv and
|
// Clean up pvs and pvcs in multi-pv-pvc test cases. Entries found in the pv and claim maps are
|
||||||
// claims maps are deleted.
|
// deleted as long as the Delete api call succeeds.
|
||||||
func PVPVCMapCleanup(c clientset.Interface, ns string, pvols PVMap, claims PVCMap) {
|
// Note: delete errors are appended to []error so that as many pvcs and pvs as possible are deleted.
|
||||||
|
func PVPVCMapCleanup(c clientset.Interface, ns string, pvols PVMap, claims PVCMap) []error {
|
||||||
|
var errs []error
|
||||||
|
|
||||||
for pvcKey := range claims {
|
for pvcKey := range claims {
|
||||||
DeletePersistentVolumeClaim(c, pvcKey.Name, ns)
|
err := DeletePersistentVolumeClaim(c, pvcKey.Name, ns)
|
||||||
delete(claims, pvcKey)
|
if err != nil {
|
||||||
|
errs = append(errs, fmt.Errorf("failed to delete PVC %q: %v", pvcKey.Name, err))
|
||||||
|
} else {
|
||||||
|
delete(claims, pvcKey)
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
for pvKey := range pvols {
|
for pvKey := range pvols {
|
||||||
DeletePersistentVolume(c, pvKey)
|
err := DeletePersistentVolume(c, pvKey)
|
||||||
delete(pvols, pvKey)
|
if err != nil {
|
||||||
|
errs = append(errs, fmt.Errorf("failed to delete PV %q: %v", pvKey, err))
|
||||||
|
} else {
|
||||||
|
delete(pvols, pvKey)
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
return errs
|
||||||
}
|
}
|
||||||
|
|
||||||
// Delete the PV.
|
// Delete the PV.
|
||||||
func DeletePersistentVolume(c clientset.Interface, pvName string) {
|
func DeletePersistentVolume(c clientset.Interface, pvName string) error {
|
||||||
if c != nil && len(pvName) > 0 {
|
if c != nil && len(pvName) > 0 {
|
||||||
Logf("Deleting PersistentVolume %v", pvName)
|
Logf("Deleting PersistentVolume %q", pvName)
|
||||||
err := c.CoreV1().PersistentVolumes().Delete(pvName, nil)
|
err := c.CoreV1().PersistentVolumes().Delete(pvName, nil)
|
||||||
if err != nil && !apierrs.IsNotFound(err) {
|
if err != nil && !apierrs.IsNotFound(err) {
|
||||||
Expect(err).NotTo(HaveOccurred())
|
return fmt.Errorf("PV Delete API error: %v", err)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// Delete the Claim
|
// Delete the Claim
|
||||||
func DeletePersistentVolumeClaim(c clientset.Interface, pvcName string, ns string) {
|
func DeletePersistentVolumeClaim(c clientset.Interface, pvcName string, ns string) error {
|
||||||
if c != nil && len(pvcName) > 0 {
|
if c != nil && len(pvcName) > 0 {
|
||||||
Logf("Deleting PersistentVolumeClaim %v", pvcName)
|
Logf("Deleting PersistentVolumeClaim %q", pvcName)
|
||||||
err := c.CoreV1().PersistentVolumeClaims(ns).Delete(pvcName, nil)
|
err := c.CoreV1().PersistentVolumeClaims(ns).Delete(pvcName, nil)
|
||||||
if err != nil && !apierrs.IsNotFound(err) {
|
if err != nil && !apierrs.IsNotFound(err) {
|
||||||
Expect(err).NotTo(HaveOccurred())
|
return fmt.Errorf("PVC Delete API error: %v", err)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// Delete the PVC and wait for the PV to enter its expected phase. Validate that the PV
|
// Delete the PVC and wait for the PV to enter its expected phase. Validate that the PV
|
||||||
// has been reclaimed (assumption here about reclaimPolicy). Caller tells this func which
|
// has been reclaimed (assumption here about reclaimPolicy). Caller tells this func which
|
||||||
// phase value to expect for the pv bound to the to-be-deleted claim.
|
// phase value to expect for the pv bound to the to-be-deleted claim.
|
||||||
func DeletePVCandValidatePV(c clientset.Interface, ns string, pvc *v1.PersistentVolumeClaim, pv *v1.PersistentVolume, expectPVPhase v1.PersistentVolumePhase) {
|
func DeletePVCandValidatePV(c clientset.Interface, ns string, pvc *v1.PersistentVolumeClaim, pv *v1.PersistentVolume, expectPVPhase v1.PersistentVolumePhase) error {
|
||||||
|
|
||||||
pvname := pvc.Spec.VolumeName
|
pvname := pvc.Spec.VolumeName
|
||||||
Logf("Deleting PVC %v to trigger reclamation of PV %v", pvc.Name, pvname)
|
Logf("Deleting PVC %v to trigger reclamation of PV %v", pvc.Name, pvname)
|
||||||
DeletePersistentVolumeClaim(c, pvc.Name, ns)
|
err := DeletePersistentVolumeClaim(c, pvc.Name, ns)
|
||||||
|
if err != nil {
|
||||||
// Check that the PVC is really deleted.
|
return err
|
||||||
pvc, err := c.CoreV1().PersistentVolumeClaims(ns).Get(pvc.Name, metav1.GetOptions{})
|
}
|
||||||
Expect(apierrs.IsNotFound(err)).To(BeTrue())
|
|
||||||
|
|
||||||
// Wait for the PV's phase to return to be `expectPVPhase`
|
// Wait for the PV's phase to return to be `expectPVPhase`
|
||||||
Logf("Waiting for reclaim process to complete.")
|
Logf("Waiting for reclaim process to complete.")
|
||||||
err = WaitForPersistentVolumePhase(expectPVPhase, c, pv.Name, 1*time.Second, 300*time.Second)
|
err = WaitForPersistentVolumePhase(expectPVPhase, c, pv.Name, 1*time.Second, 300*time.Second)
|
||||||
Expect(err).NotTo(HaveOccurred())
|
if err != nil {
|
||||||
|
return fmt.Errorf("pv %q phase did not become %v: %v", pv.Name, expectPVPhase, err)
|
||||||
|
}
|
||||||
|
|
||||||
// examine the pv's ClaimRef and UID and compare to expected values
|
// examine the pv's ClaimRef and UID and compare to expected values
|
||||||
pv, err = c.CoreV1().PersistentVolumes().Get(pv.Name, metav1.GetOptions{})
|
pv, err = c.CoreV1().PersistentVolumes().Get(pv.Name, metav1.GetOptions{})
|
||||||
Expect(err).NotTo(HaveOccurred())
|
if err != nil {
|
||||||
|
return fmt.Errorf("PV Get API error: %v", err)
|
||||||
|
}
|
||||||
cr := pv.Spec.ClaimRef
|
cr := pv.Spec.ClaimRef
|
||||||
if expectPVPhase == v1.VolumeAvailable {
|
if expectPVPhase == v1.VolumeAvailable {
|
||||||
if cr != nil { // may be ok if cr != nil
|
if cr != nil && len(cr.UID) > 0 {
|
||||||
Expect(cr.UID).To(BeEmpty())
|
return fmt.Errorf("PV is 'Available' but ClaimRef.UID is not empty")
|
||||||
}
|
}
|
||||||
} else if expectPVPhase == v1.VolumeBound {
|
} else if expectPVPhase == v1.VolumeBound {
|
||||||
Expect(cr).NotTo(BeNil())
|
if cr == nil {
|
||||||
Expect(cr.UID).NotTo(BeEmpty())
|
return fmt.Errorf("PV is 'Bound' but ClaimRef is nil")
|
||||||
|
}
|
||||||
|
if len(cr.UID) == 0 {
|
||||||
|
return fmt.Errorf("PV is 'Bound' but ClaimRef.UID is empty")
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
Logf("PV %v now in %q phase", pv.Name, expectPVPhase)
|
Logf("PV %v now in %q phase", pv.Name, expectPVPhase)
|
||||||
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// Wraps deletePVCandValidatePV() by calling the function in a loop over the PV map. Only
|
// Wraps deletePVCandValidatePV() by calling the function in a loop over the PV map. Only bound PVs
|
||||||
// bound PVs are deleted. Validates that the claim was deleted and the PV is in the relevant Phase (Released, Available,
|
// are deleted. Validates that the claim was deleted and the PV is in the expected Phase (Released,
|
||||||
// Bound).
|
// Available, Bound).
|
||||||
// Note: if there are more claims than pvs then some of the remaining claims will bind to
|
// Note: if there are more claims than pvs then some of the remaining claims may bind to just made
|
||||||
// the just-made-available pvs.
|
// available pvs.
|
||||||
func DeletePVCandValidatePVGroup(c clientset.Interface, ns string, pvols PVMap, claims PVCMap, expectPVPhase v1.PersistentVolumePhase) {
|
func DeletePVCandValidatePVGroup(c clientset.Interface, ns string, pvols PVMap, claims PVCMap, expectPVPhase v1.PersistentVolumePhase) error {
|
||||||
|
|
||||||
var boundPVs, deletedPVCs int
|
var boundPVs, deletedPVCs int
|
||||||
|
|
||||||
for pvName := range pvols {
|
for pvName := range pvols {
|
||||||
pv, err := c.CoreV1().PersistentVolumes().Get(pvName, metav1.GetOptions{})
|
pv, err := c.CoreV1().PersistentVolumes().Get(pvName, metav1.GetOptions{})
|
||||||
Expect(apierrs.IsNotFound(err)).To(BeFalse())
|
if err != nil {
|
||||||
|
return fmt.Errorf("PV Get API error: %v", err)
|
||||||
|
}
|
||||||
cr := pv.Spec.ClaimRef
|
cr := pv.Spec.ClaimRef
|
||||||
// if pv is bound then delete the pvc it is bound to
|
// if pv is bound then delete the pvc it is bound to
|
||||||
if cr != nil && len(cr.Name) > 0 {
|
if cr != nil && len(cr.Name) > 0 {
|
||||||
@ -186,30 +225,46 @@ func DeletePVCandValidatePVGroup(c clientset.Interface, ns string, pvols PVMap,
|
|||||||
// Assert bound PVC is tracked in this test. Failing this might
|
// Assert bound PVC is tracked in this test. Failing this might
|
||||||
// indicate external PVCs interfering with the test.
|
// indicate external PVCs interfering with the test.
|
||||||
pvcKey := makePvcKey(ns, cr.Name)
|
pvcKey := makePvcKey(ns, cr.Name)
|
||||||
_, found := claims[pvcKey]
|
if _, found := claims[pvcKey]; !found {
|
||||||
Expect(found).To(BeTrue())
|
return fmt.Errorf("internal: claims map is missing pvc %q", pvcKey)
|
||||||
|
}
|
||||||
|
// get the pvc for the delete call below
|
||||||
pvc, err := c.CoreV1().PersistentVolumeClaims(ns).Get(cr.Name, metav1.GetOptions{})
|
pvc, err := c.CoreV1().PersistentVolumeClaims(ns).Get(cr.Name, metav1.GetOptions{})
|
||||||
Expect(apierrs.IsNotFound(err)).To(BeFalse())
|
if err == nil {
|
||||||
DeletePVCandValidatePV(c, ns, pvc, pv, expectPVPhase)
|
if err = DeletePVCandValidatePV(c, ns, pvc, pv, expectPVPhase); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
} else if !apierrs.IsNotFound(err) {
|
||||||
|
return fmt.Errorf("PVC Get API error: %v", err)
|
||||||
|
}
|
||||||
|
// delete pvckey from map even if apierrs.IsNotFound above is true and thus the
|
||||||
|
// claim was not actually deleted here
|
||||||
delete(claims, pvcKey)
|
delete(claims, pvcKey)
|
||||||
deletedPVCs++
|
deletedPVCs++
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
Expect(boundPVs).To(Equal(deletedPVCs))
|
if boundPVs != deletedPVCs {
|
||||||
|
return fmt.Errorf("expect number of bound PVs (%v) to equal number of deleted PVCs (%v)", boundPVs, deletedPVCs)
|
||||||
|
}
|
||||||
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// create the PV resource. Fails test on error.
|
// create the PV resource. Fails test on error.
|
||||||
func createPV(c clientset.Interface, pv *v1.PersistentVolume) *v1.PersistentVolume {
|
func createPV(c clientset.Interface, pv *v1.PersistentVolume) (*v1.PersistentVolume, error) {
|
||||||
pv, err := c.CoreV1().PersistentVolumes().Create(pv)
|
pv, err := c.CoreV1().PersistentVolumes().Create(pv)
|
||||||
Expect(err).NotTo(HaveOccurred())
|
if err != nil {
|
||||||
return pv
|
return nil, fmt.Errorf("PV Create API error: %v", err)
|
||||||
|
}
|
||||||
|
return pv, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// create the PVC resource. Fails test on error.
|
// create the PVC resource. Fails test on error.
|
||||||
func CreatePVC(c clientset.Interface, ns string, pvc *v1.PersistentVolumeClaim) *v1.PersistentVolumeClaim {
|
func CreatePVC(c clientset.Interface, ns string, pvc *v1.PersistentVolumeClaim) (*v1.PersistentVolumeClaim, error) {
|
||||||
pvc, err := c.CoreV1().PersistentVolumeClaims(ns).Create(pvc)
|
pvc, err := c.CoreV1().PersistentVolumeClaims(ns).Create(pvc)
|
||||||
Expect(err).NotTo(HaveOccurred())
|
if err != nil {
|
||||||
return pvc
|
return nil, fmt.Errorf("PVC Create API error: %v", err)
|
||||||
|
}
|
||||||
|
return pvc, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// Create a PVC followed by the PV based on the passed in nfs-server ip and
|
// Create a PVC followed by the PV based on the passed in nfs-server ip and
|
||||||
@ -218,12 +273,10 @@ func CreatePVC(c clientset.Interface, ns string, pvc *v1.PersistentVolumeClaim)
|
|||||||
// Note: in the pre-bind case the real PVC name, which is generated, is not
|
// Note: in the pre-bind case the real PVC name, which is generated, is not
|
||||||
// known until after the PVC is instantiated. This is why the pvc is created
|
// known until after the PVC is instantiated. This is why the pvc is created
|
||||||
// before the pv.
|
// before the pv.
|
||||||
func CreatePVCPV(c clientset.Interface, pvConfig PersistentVolumeConfig, pvcConfig PersistentVolumeClaimConfig, ns string, preBind bool) (*v1.PersistentVolume, *v1.PersistentVolumeClaim) {
|
func CreatePVCPV(c clientset.Interface, pvConfig PersistentVolumeConfig, pvcConfig PersistentVolumeClaimConfig, ns string, preBind bool) (*v1.PersistentVolume, *v1.PersistentVolumeClaim, error) {
|
||||||
|
// make the pvc spec
|
||||||
var preBindMsg string
|
|
||||||
|
|
||||||
// make the pvc definition first
|
|
||||||
pvc := MakePersistentVolumeClaim(pvcConfig, ns)
|
pvc := MakePersistentVolumeClaim(pvcConfig, ns)
|
||||||
|
preBindMsg := ""
|
||||||
if preBind {
|
if preBind {
|
||||||
preBindMsg = " pre-bound"
|
preBindMsg = " pre-bound"
|
||||||
pvConfig.Prebind = pvc
|
pvConfig.Prebind = pvc
|
||||||
@ -232,16 +285,20 @@ func CreatePVCPV(c clientset.Interface, pvConfig PersistentVolumeConfig, pvcConf
|
|||||||
pv := MakePersistentVolume(pvConfig)
|
pv := MakePersistentVolume(pvConfig)
|
||||||
|
|
||||||
By(fmt.Sprintf("Creating a PVC followed by a%s PV", preBindMsg))
|
By(fmt.Sprintf("Creating a PVC followed by a%s PV", preBindMsg))
|
||||||
// instantiate the pvc
|
pvc, err := CreatePVC(c, ns, pvc)
|
||||||
pvc = CreatePVC(c, ns, pvc)
|
if err != nil {
|
||||||
|
return nil, nil, err
|
||||||
|
}
|
||||||
|
|
||||||
// instantiate the pv, handle pre-binding by ClaimRef if needed
|
// instantiate the pv, handle pre-binding by ClaimRef if needed
|
||||||
if preBind {
|
if preBind {
|
||||||
pv.Spec.ClaimRef.Name = pvc.Name
|
pv.Spec.ClaimRef.Name = pvc.Name
|
||||||
}
|
}
|
||||||
pv = createPV(c, pv)
|
pv, err = createPV(c, pv)
|
||||||
|
if err != nil {
|
||||||
return pv, pvc
|
return nil, pvc, err
|
||||||
|
}
|
||||||
|
return pv, pvc, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// Create a PV followed by the PVC based on the passed in nfs-server ip and
|
// Create a PV followed by the PVC based on the passed in nfs-server ip and
|
||||||
@ -251,8 +308,7 @@ func CreatePVCPV(c clientset.Interface, pvConfig PersistentVolumeConfig, pvcConf
|
|||||||
// Note: in the pre-bind case the real PV name, which is generated, is not
|
// Note: in the pre-bind case the real PV name, which is generated, is not
|
||||||
// known until after the PV is instantiated. This is why the pv is created
|
// known until after the PV is instantiated. This is why the pv is created
|
||||||
// before the pvc.
|
// before the pvc.
|
||||||
func CreatePVPVC(c clientset.Interface, pvConfig PersistentVolumeConfig, pvcConfig PersistentVolumeClaimConfig, ns string, preBind bool) (*v1.PersistentVolume, *v1.PersistentVolumeClaim) {
|
func CreatePVPVC(c clientset.Interface, pvConfig PersistentVolumeConfig, pvcConfig PersistentVolumeClaimConfig, ns string, preBind bool) (*v1.PersistentVolume, *v1.PersistentVolumeClaim, error) {
|
||||||
|
|
||||||
preBindMsg := ""
|
preBindMsg := ""
|
||||||
if preBind {
|
if preBind {
|
||||||
preBindMsg = " pre-bound"
|
preBindMsg = " pre-bound"
|
||||||
@ -264,29 +320,33 @@ func CreatePVPVC(c clientset.Interface, pvConfig PersistentVolumeConfig, pvcConf
|
|||||||
pvc := MakePersistentVolumeClaim(pvcConfig, ns)
|
pvc := MakePersistentVolumeClaim(pvcConfig, ns)
|
||||||
|
|
||||||
// instantiate the pv
|
// instantiate the pv
|
||||||
pv = createPV(c, pv)
|
pv, err := createPV(c, pv)
|
||||||
|
if err != nil {
|
||||||
|
return nil, nil, err
|
||||||
|
}
|
||||||
// instantiate the pvc, handle pre-binding by VolumeName if needed
|
// instantiate the pvc, handle pre-binding by VolumeName if needed
|
||||||
if preBind {
|
if preBind {
|
||||||
pvc.Spec.VolumeName = pv.Name
|
pvc.Spec.VolumeName = pv.Name
|
||||||
}
|
}
|
||||||
pvc = CreatePVC(c, ns, pvc)
|
pvc, err = CreatePVC(c, ns, pvc)
|
||||||
|
if err != nil {
|
||||||
return pv, pvc
|
return pv, nil, err
|
||||||
|
}
|
||||||
|
return pv, pvc, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// Create the desired number of PVs and PVCs and return them in separate maps. If the
|
// Create the desired number of PVs and PVCs and return them in separate maps. If the
|
||||||
// number of PVs != the number of PVCs then the min of those two counts is the number of
|
// number of PVs != the number of PVCs then the min of those two counts is the number of
|
||||||
// PVs expected to bind.
|
// PVs expected to bind. If a Create error occurs, the returned maps may contain pv and pvc
|
||||||
func CreatePVsPVCs(numpvs, numpvcs int, c clientset.Interface, ns string, pvConfig PersistentVolumeConfig, pvcConfig PersistentVolumeClaimConfig) (PVMap, PVCMap) {
|
// entries for the resources that were successfully created. In other words, when the caller
|
||||||
|
// sees an error returned, it needs to decide what to do about entries in the maps.
|
||||||
var i int
|
// Note: when the test suite deletes the namespace orphaned pvcs and pods are deleted. However,
|
||||||
var pv *v1.PersistentVolume
|
// orphaned pvs are not deleted and will remain after the suite completes.
|
||||||
var pvc *v1.PersistentVolumeClaim
|
func CreatePVsPVCs(numpvs, numpvcs int, c clientset.Interface, ns string, pvConfig PersistentVolumeConfig, pvcConfig PersistentVolumeClaimConfig) (PVMap, PVCMap, error) {
|
||||||
pvMap := make(PVMap, numpvs)
|
pvMap := make(PVMap, numpvs)
|
||||||
pvcMap := make(PVCMap, numpvcs)
|
pvcMap := make(PVCMap, numpvcs)
|
||||||
|
extraPVCs := 0
|
||||||
var extraPVs, extraPVCs int
|
extraPVs := numpvs - numpvcs
|
||||||
extraPVs = numpvs - numpvcs
|
|
||||||
if extraPVs < 0 {
|
if extraPVs < 0 {
|
||||||
extraPVCs = -extraPVs
|
extraPVCs = -extraPVs
|
||||||
extraPVs = 0
|
extraPVs = 0
|
||||||
@ -294,54 +354,76 @@ func CreatePVsPVCs(numpvs, numpvcs int, c clientset.Interface, ns string, pvConf
|
|||||||
pvsToCreate := numpvs - extraPVs // want the min(numpvs, numpvcs)
|
pvsToCreate := numpvs - extraPVs // want the min(numpvs, numpvcs)
|
||||||
|
|
||||||
// create pvs and pvcs
|
// create pvs and pvcs
|
||||||
for i = 0; i < pvsToCreate; i++ {
|
for i := 0; i < pvsToCreate; i++ {
|
||||||
pv, pvc = CreatePVPVC(c, pvConfig, pvcConfig, ns, false)
|
pv, pvc, err := CreatePVPVC(c, pvConfig, pvcConfig, ns, false)
|
||||||
|
if err != nil {
|
||||||
|
return pvMap, pvcMap, err
|
||||||
|
}
|
||||||
pvMap[pv.Name] = pvval{}
|
pvMap[pv.Name] = pvval{}
|
||||||
pvcMap[makePvcKey(ns, pvc.Name)] = pvcval{}
|
pvcMap[makePvcKey(ns, pvc.Name)] = pvcval{}
|
||||||
}
|
}
|
||||||
|
|
||||||
// create extra pvs or pvcs as needed
|
// create extra pvs or pvcs as needed
|
||||||
for i = 0; i < extraPVs; i++ {
|
for i := 0; i < extraPVs; i++ {
|
||||||
pv = MakePersistentVolume(pvConfig)
|
pv := MakePersistentVolume(pvConfig)
|
||||||
pv = createPV(c, pv)
|
pv, err := createPV(c, pv)
|
||||||
|
if err != nil {
|
||||||
|
return pvMap, pvcMap, err
|
||||||
|
}
|
||||||
pvMap[pv.Name] = pvval{}
|
pvMap[pv.Name] = pvval{}
|
||||||
}
|
}
|
||||||
for i = 0; i < extraPVCs; i++ {
|
for i := 0; i < extraPVCs; i++ {
|
||||||
pvc = MakePersistentVolumeClaim(pvcConfig, ns)
|
pvc := MakePersistentVolumeClaim(pvcConfig, ns)
|
||||||
pvc = CreatePVC(c, ns, pvc)
|
pvc, err := CreatePVC(c, ns, pvc)
|
||||||
|
if err != nil {
|
||||||
|
return pvMap, pvcMap, err
|
||||||
|
}
|
||||||
pvcMap[makePvcKey(ns, pvc.Name)] = pvcval{}
|
pvcMap[makePvcKey(ns, pvc.Name)] = pvcval{}
|
||||||
}
|
}
|
||||||
|
return pvMap, pvcMap, nil
|
||||||
return pvMap, pvcMap
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// Wait for the pv and pvc to bind to each other.
|
// Wait for the pv and pvc to bind to each other.
|
||||||
func WaitOnPVandPVC(c clientset.Interface, ns string, pv *v1.PersistentVolume, pvc *v1.PersistentVolumeClaim) {
|
func WaitOnPVandPVC(c clientset.Interface, ns string, pv *v1.PersistentVolume, pvc *v1.PersistentVolumeClaim) error {
|
||||||
|
|
||||||
// Wait for newly created PVC to bind to the PV
|
// Wait for newly created PVC to bind to the PV
|
||||||
Logf("Waiting for PV %v to bind to PVC %v", pv.Name, pvc.Name)
|
Logf("Waiting for PV %v to bind to PVC %v", pv.Name, pvc.Name)
|
||||||
err := WaitForPersistentVolumeClaimPhase(v1.ClaimBound, c, ns, pvc.Name, 3*time.Second, 300*time.Second)
|
err := WaitForPersistentVolumeClaimPhase(v1.ClaimBound, c, ns, pvc.Name, 3*time.Second, 300*time.Second)
|
||||||
Expect(err).NotTo(HaveOccurred())
|
if err != nil {
|
||||||
|
return fmt.Errorf("PVC %q did not become Bound: %v", pvc.Name, err)
|
||||||
|
}
|
||||||
|
|
||||||
// Wait for PersistentVolume.Status.Phase to be Bound, which it should be
|
// Wait for PersistentVolume.Status.Phase to be Bound, which it should be
|
||||||
// since the PVC is already bound.
|
// since the PVC is already bound.
|
||||||
err = WaitForPersistentVolumePhase(v1.VolumeBound, c, pv.Name, 3*time.Second, 300*time.Second)
|
err = WaitForPersistentVolumePhase(v1.VolumeBound, c, pv.Name, 3*time.Second, 300*time.Second)
|
||||||
Expect(err).NotTo(HaveOccurred())
|
if err != nil {
|
||||||
|
return fmt.Errorf("PV %q did not become Bound: %v", pv.Name, err)
|
||||||
|
}
|
||||||
|
|
||||||
// Re-get the pv and pvc objects
|
// Re-get the pv and pvc objects
|
||||||
pv, err = c.CoreV1().PersistentVolumes().Get(pv.Name, metav1.GetOptions{})
|
pv, err = c.CoreV1().PersistentVolumes().Get(pv.Name, metav1.GetOptions{})
|
||||||
Expect(err).NotTo(HaveOccurred())
|
if err != nil {
|
||||||
|
return fmt.Errorf("PV Get API error: %v", err)
|
||||||
// Re-get the pvc and
|
}
|
||||||
pvc, err = c.CoreV1().PersistentVolumeClaims(ns).Get(pvc.Name, metav1.GetOptions{})
|
pvc, err = c.CoreV1().PersistentVolumeClaims(ns).Get(pvc.Name, metav1.GetOptions{})
|
||||||
Expect(err).NotTo(HaveOccurred())
|
if err != nil {
|
||||||
|
return fmt.Errorf("PVC Get API error: %v", err)
|
||||||
|
}
|
||||||
|
|
||||||
// The pv and pvc are both bound, but to each other?
|
// The pv and pvc are both bound, but to each other?
|
||||||
// Check that the PersistentVolume.ClaimRef matches the PVC
|
// Check that the PersistentVolume.ClaimRef matches the PVC
|
||||||
Expect(pv.Spec.ClaimRef).NotTo(BeNil())
|
if pv.Spec.ClaimRef == nil {
|
||||||
Expect(pv.Spec.ClaimRef.Name).To(Equal(pvc.Name))
|
return fmt.Errorf("PV %q ClaimRef is nil", pv.Name)
|
||||||
Expect(pvc.Spec.VolumeName).To(Equal(pv.Name))
|
}
|
||||||
Expect(pv.Spec.ClaimRef.UID).To(Equal(pvc.UID))
|
if pv.Spec.ClaimRef.Name != pvc.Name {
|
||||||
|
return fmt.Errorf("PV %q ClaimRef's name (%q) should be %q", pv.Name, pv.Spec.ClaimRef.Name, pvc.Name)
|
||||||
|
}
|
||||||
|
if pvc.Spec.VolumeName != pv.Name {
|
||||||
|
return fmt.Errorf("PVC %q VolumeName (%q) should be %q", pvc.Name, pvc.Spec.VolumeName, pv.Name)
|
||||||
|
}
|
||||||
|
if pv.Spec.ClaimRef.UID != pvc.UID {
|
||||||
|
return fmt.Errorf("PV %q ClaimRef's UID (%q) should be %q", pv.Name, pv.Spec.ClaimRef.UID, pvc.UID)
|
||||||
|
}
|
||||||
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// Search for bound PVs and PVCs by examining pvols for non-nil claimRefs.
|
// Search for bound PVs and PVCs by examining pvols for non-nil claimRefs.
|
||||||
@ -350,8 +432,7 @@ func WaitOnPVandPVC(c clientset.Interface, ns string, pv *v1.PersistentVolume, p
|
|||||||
// to situations where the maximum wait times are reached several times in succession,
|
// to situations where the maximum wait times are reached several times in succession,
|
||||||
// extending test time. Thus, it is recommended to keep the delta between PVs and PVCs
|
// extending test time. Thus, it is recommended to keep the delta between PVs and PVCs
|
||||||
// small.
|
// small.
|
||||||
func WaitAndVerifyBinds(c clientset.Interface, ns string, pvols PVMap, claims PVCMap, testExpected bool) {
|
func WaitAndVerifyBinds(c clientset.Interface, ns string, pvols PVMap, claims PVCMap, testExpected bool) error {
|
||||||
|
|
||||||
var actualBinds int
|
var actualBinds int
|
||||||
expectedBinds := len(pvols)
|
expectedBinds := len(pvols)
|
||||||
if expectedBinds > len(claims) { // want the min of # pvs or #pvcs
|
if expectedBinds > len(claims) { // want the min of # pvs or #pvcs
|
||||||
@ -365,69 +446,108 @@ func WaitAndVerifyBinds(c clientset.Interface, ns string, pvols PVMap, claims PV
|
|||||||
Logf(" This may be ok since there are more pvs than pvcs")
|
Logf(" This may be ok since there are more pvs than pvcs")
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
Expect(err).NotTo(HaveOccurred())
|
if err != nil {
|
||||||
|
return fmt.Errorf("PV %q did not become Bound: %v", pvName, err)
|
||||||
|
}
|
||||||
|
|
||||||
pv, err := c.CoreV1().PersistentVolumes().Get(pvName, metav1.GetOptions{})
|
pv, err := c.CoreV1().PersistentVolumes().Get(pvName, metav1.GetOptions{})
|
||||||
Expect(err).NotTo(HaveOccurred())
|
if err != nil {
|
||||||
if cr := pv.Spec.ClaimRef; cr != nil && len(cr.Name) > 0 {
|
return fmt.Errorf("PV Get API error: %v", err)
|
||||||
|
}
|
||||||
|
cr := pv.Spec.ClaimRef
|
||||||
|
if cr != nil && len(cr.Name) > 0 {
|
||||||
// Assert bound pvc is a test resource. Failing assertion could
|
// Assert bound pvc is a test resource. Failing assertion could
|
||||||
// indicate non-test PVC interference or a bug in the test
|
// indicate non-test PVC interference or a bug in the test
|
||||||
pvcKey := makePvcKey(ns, cr.Name)
|
pvcKey := makePvcKey(ns, cr.Name)
|
||||||
_, found := claims[pvcKey]
|
if _, found := claims[pvcKey]; !found {
|
||||||
Expect(found).To(BeTrue(), fmt.Sprintf("PersistentVolume (%q) ClaimRef (%q) does not match any test claims.", pv.Name, cr.Name))
|
return fmt.Errorf("internal: claims map is missing pvc %q", pvcKey)
|
||||||
|
}
|
||||||
|
|
||||||
err = WaitForPersistentVolumeClaimPhase(v1.ClaimBound, c, ns, cr.Name, 3*time.Second, 180*time.Second)
|
err := WaitForPersistentVolumeClaimPhase(v1.ClaimBound, c, ns, cr.Name, 3*time.Second, 180*time.Second)
|
||||||
Expect(err).NotTo(HaveOccurred())
|
if err != nil {
|
||||||
|
return fmt.Errorf("PVC %q did not become Bound: %v", cr.Name, err)
|
||||||
|
}
|
||||||
actualBinds++
|
actualBinds++
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
if testExpected {
|
if testExpected && actualBinds != expectedBinds {
|
||||||
Expect(actualBinds).To(Equal(expectedBinds))
|
return fmt.Errorf("expect number of bound PVs (%v) to equal number of claims (%v)", actualBinds, expectedBinds)
|
||||||
}
|
}
|
||||||
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// Test the pod's exit code to be zero.
|
// Test the pod's exit code to be zero.
|
||||||
func testPodSuccessOrFail(c clientset.Interface, ns string, pod *v1.Pod) {
|
func testPodSuccessOrFail(c clientset.Interface, ns string, pod *v1.Pod) error {
|
||||||
|
|
||||||
By("Pod should terminate with exitcode 0 (success)")
|
By("Pod should terminate with exitcode 0 (success)")
|
||||||
err := WaitForPodSuccessInNamespace(c, pod.Name, ns)
|
if err := WaitForPodSuccessInNamespace(c, pod.Name, ns); err != nil {
|
||||||
Expect(err).NotTo(HaveOccurred())
|
return fmt.Errorf("pod %q failed to reach Success: %v", pod.Name, err)
|
||||||
|
}
|
||||||
Logf("Pod %v succeeded ", pod.Name)
|
Logf("Pod %v succeeded ", pod.Name)
|
||||||
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// Deletes the passed-in pod and waits for the pod to be terminated. Resilient to the pod
|
// Deletes the passed-in pod and waits for the pod to be terminated. Resilient to the pod
|
||||||
// not existing.
|
// not existing.
|
||||||
func DeletePodWithWait(f *Framework, c clientset.Interface, pod *v1.Pod) {
|
func DeletePodWithWait(f *Framework, c clientset.Interface, pod *v1.Pod) error {
|
||||||
|
|
||||||
if pod == nil {
|
if pod == nil {
|
||||||
return
|
return nil
|
||||||
}
|
}
|
||||||
Logf("Deleting pod %v", pod.Name)
|
Logf("Deleting pod %v", pod.Name)
|
||||||
err := c.CoreV1().Pods(pod.Namespace).Delete(pod.Name, nil)
|
err := c.CoreV1().Pods(pod.Namespace).Delete(pod.Name, nil)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
if apierrs.IsNotFound(err) {
|
if apierrs.IsNotFound(err) {
|
||||||
return // assume pod was deleted already
|
return nil // assume pod was deleted already
|
||||||
}
|
}
|
||||||
Expect(err).NotTo(HaveOccurred())
|
return fmt.Errorf("pod Get API error: %v", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
// wait for pod to terminate
|
// wait for pod to terminate
|
||||||
err = f.WaitForPodTerminated(pod.Name, "")
|
err = f.WaitForPodTerminated(pod.Name, "")
|
||||||
if err != nil {
|
if err != nil && !apierrs.IsNotFound(err) {
|
||||||
Expect(apierrs.IsNotFound(err)).To(BeTrue(), fmt.Sprintf("Expected 'IsNotFound' error deleting pod \"%v/%v\", instead got: %v", pod.Namespace, pod.Name, err))
|
return fmt.Errorf("error deleting pod %q: %v", pod.Name, err)
|
||||||
Logf("Ignore \"not found\" error above")
|
|
||||||
}
|
}
|
||||||
Logf("Pod %q successfully deleted", pod.Name)
|
if apierrs.IsNotFound(err) {
|
||||||
|
Logf("Ignore \"not found\" error above. Pod %q successfully deleted", pod.Name)
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// Create the test pod, wait for (hopefully) success, and then delete the pod.
|
||||||
|
// Note: need named return value so that the err assignment in the defer sets the returned error.
|
||||||
|
// Has been shown to be necessary using Go 1.7.
|
||||||
|
func CreateWaitAndDeletePod(f *Framework, c clientset.Interface, ns string, pvc *v1.PersistentVolumeClaim) (err error) {
|
||||||
|
Logf("Creating nfs test pod")
|
||||||
|
pod := MakeWritePod(ns, pvc)
|
||||||
|
runPod, err := c.CoreV1().Pods(ns).Create(pod)
|
||||||
|
if err != nil {
|
||||||
|
return fmt.Errorf("pod Create API error: %v", err)
|
||||||
|
}
|
||||||
|
defer func() {
|
||||||
|
delErr := DeletePodWithWait(f, c, runPod)
|
||||||
|
if err == nil { // don't override previous err value
|
||||||
|
err = delErr // assign to returned err, can be nil
|
||||||
|
}
|
||||||
|
}()
|
||||||
|
|
||||||
|
err = testPodSuccessOrFail(c, ns, runPod)
|
||||||
|
if err != nil {
|
||||||
|
return fmt.Errorf("pod %q did not exit with Success: %v", runPod.Name, err)
|
||||||
|
}
|
||||||
|
return // note: named return value
|
||||||
}
|
}
|
||||||
|
|
||||||
// Sanity check for GCE testing. Verify the persistent disk attached to the node.
|
// Sanity check for GCE testing. Verify the persistent disk attached to the node.
|
||||||
func VerifyGCEDiskAttached(diskName string, nodeName types.NodeName) bool {
|
func VerifyGCEDiskAttached(diskName string, nodeName types.NodeName) (bool, error) {
|
||||||
gceCloud, err := GetGCECloud()
|
gceCloud, err := GetGCECloud()
|
||||||
Expect(err).NotTo(HaveOccurred())
|
if err != nil {
|
||||||
|
return false, fmt.Errorf("GetGCECloud error: %v", err)
|
||||||
|
}
|
||||||
isAttached, err := gceCloud.DiskIsAttached(diskName, nodeName)
|
isAttached, err := gceCloud.DiskIsAttached(diskName, nodeName)
|
||||||
Expect(err).NotTo(HaveOccurred())
|
if err != nil {
|
||||||
return isAttached
|
return false, fmt.Errorf("cannot verify if GCE disk is attached: %v", err)
|
||||||
|
}
|
||||||
|
return isAttached, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// Return a pvckey struct.
|
// Return a pvckey struct.
|
||||||
@ -438,14 +558,11 @@ func makePvcKey(ns, name string) types.NamespacedName {
|
|||||||
// Returns a PV definition based on the nfs server IP. If the PVC is not nil
|
// Returns a PV definition based on the nfs server IP. If the PVC is not nil
|
||||||
// then the PV is defined with a ClaimRef which includes the PVC's namespace.
|
// then the PV is defined with a ClaimRef which includes the PVC's namespace.
|
||||||
// If the PVC is nil then the PV is not defined with a ClaimRef. If no reclaimPolicy
|
// If the PVC is nil then the PV is not defined with a ClaimRef. If no reclaimPolicy
|
||||||
// is assigned, assumes "Retain".
|
// is assigned, assumes "Retain". Specs are expected to match the test's PVC.
|
||||||
// Note: the passed-in claim does not have a name until it is created
|
// Note: the passed-in claim does not have a name until it is created and thus the PV's
|
||||||
// (instantiated) and thus the PV's ClaimRef cannot be completely filled-in in
|
// ClaimRef cannot be completely filled-in in this func. Therefore, the ClaimRef's name
|
||||||
// this func. Therefore, the ClaimRef's name is added later in
|
// is added later in createPVCPV.
|
||||||
// createPVCPV.
|
|
||||||
func MakePersistentVolume(pvConfig PersistentVolumeConfig) *v1.PersistentVolume {
|
func MakePersistentVolume(pvConfig PersistentVolumeConfig) *v1.PersistentVolume {
|
||||||
// Specs are expected to match the test's PersistentVolumeClaim
|
|
||||||
|
|
||||||
var claimRef *v1.ObjectReference
|
var claimRef *v1.ObjectReference
|
||||||
// If the reclaimPolicy is not provided, assume Retain
|
// If the reclaimPolicy is not provided, assume Retain
|
||||||
if pvConfig.ReclaimPolicy == "" {
|
if pvConfig.ReclaimPolicy == "" {
|
||||||
@ -513,30 +630,31 @@ func MakePersistentVolumeClaim(cfg PersistentVolumeClaimConfig, ns string) *v1.P
|
|||||||
}
|
}
|
||||||
|
|
||||||
func CreatePDWithRetry() (string, error) {
|
func CreatePDWithRetry() (string, error) {
|
||||||
newDiskName := ""
|
|
||||||
var err error
|
var err error
|
||||||
for start := time.Now(); time.Since(start) < PDRetryTimeout; time.Sleep(PDRetryPollTime) {
|
for start := time.Now(); time.Since(start) < PDRetryTimeout; time.Sleep(PDRetryPollTime) {
|
||||||
if newDiskName, err = createPD(); err != nil {
|
newDiskName, err := createPD()
|
||||||
Logf("Couldn't create a new PD. Sleeping 5 seconds (%v)", err)
|
if err != nil {
|
||||||
|
Logf("Couldn't create a new PD, sleeping 5 seconds: %v", err)
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
Logf("Successfully created a new PD: %q.", newDiskName)
|
Logf("Successfully created a new PD: %q.", newDiskName)
|
||||||
break
|
return newDiskName, nil
|
||||||
}
|
}
|
||||||
return newDiskName, err
|
return "", err
|
||||||
}
|
}
|
||||||
|
|
||||||
func DeletePDWithRetry(diskName string) {
|
func DeletePDWithRetry(diskName string) error {
|
||||||
var err error
|
var err error
|
||||||
for start := time.Now(); time.Since(start) < PDRetryTimeout; time.Sleep(PDRetryPollTime) {
|
for start := time.Now(); time.Since(start) < PDRetryTimeout; time.Sleep(PDRetryPollTime) {
|
||||||
if err = deletePD(diskName); err != nil {
|
err = deletePD(diskName)
|
||||||
Logf("Couldn't delete PD %q. Sleeping 5 seconds (%v)", diskName, err)
|
if err != nil {
|
||||||
|
Logf("Couldn't delete PD %q, sleeping %v: %v", diskName, PDRetryPollTime, err)
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
Logf("Successfully deleted PD %q.", diskName)
|
Logf("Successfully deleted PD %q.", diskName)
|
||||||
break
|
return nil
|
||||||
}
|
}
|
||||||
ExpectNoError(err, "Error deleting PD")
|
return fmt.Errorf("unable to delete PD %q: %v", diskName, err)
|
||||||
}
|
}
|
||||||
|
|
||||||
func createPD() (string, error) {
|
func createPD() (string, error) {
|
||||||
@ -572,7 +690,7 @@ func createPD() (string, error) {
|
|||||||
volumeName := "aws://" + az + "/" + awsID
|
volumeName := "aws://" + az + "/" + awsID
|
||||||
return volumeName, nil
|
return volumeName, nil
|
||||||
} else {
|
} else {
|
||||||
return "", fmt.Errorf("Provider does not support volume creation")
|
return "", fmt.Errorf("provider does not support volume creation")
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -591,7 +709,7 @@ func deletePD(pdName string) error {
|
|||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
Logf("Error deleting PD %q: %v", pdName, err)
|
Logf("error deleting PD %q: %v", pdName, err)
|
||||||
}
|
}
|
||||||
return err
|
return err
|
||||||
} else if TestContext.Provider == "aws" {
|
} else if TestContext.Provider == "aws" {
|
||||||
@ -604,30 +722,17 @@ func deletePD(pdName string) error {
|
|||||||
_, err := client.DeleteVolume(request)
|
_, err := client.DeleteVolume(request)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
if awsError, ok := err.(awserr.Error); ok && awsError.Code() == "InvalidVolume.NotFound" {
|
if awsError, ok := err.(awserr.Error); ok && awsError.Code() == "InvalidVolume.NotFound" {
|
||||||
Logf("Volume deletion implicitly succeeded because volume %q does not exist.", pdName)
|
Logf("volume deletion implicitly succeeded because volume %q does not exist.", pdName)
|
||||||
} else {
|
} else {
|
||||||
return fmt.Errorf("error deleting EBS volumes: %v", err)
|
return fmt.Errorf("error deleting EBS volumes: %v", err)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
return nil
|
return nil
|
||||||
} else {
|
} else {
|
||||||
return fmt.Errorf("Provider does not support volume deletion")
|
return fmt.Errorf("provider does not support volume deletion")
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// Create the test pod, wait for success, and then delete the pod.
|
|
||||||
func CreateWaitAndDeletePod(f *Framework, c clientset.Interface, ns string, pvc *v1.PersistentVolumeClaim) {
|
|
||||||
Logf("Creating nfs test pod")
|
|
||||||
pod := MakeWritePod(ns, pvc)
|
|
||||||
runPod, err := c.CoreV1().Pods(ns).Create(pod)
|
|
||||||
Expect(err).NotTo(HaveOccurred())
|
|
||||||
Expect(runPod).NotTo(BeNil())
|
|
||||||
defer DeletePodWithWait(f, c, runPod)
|
|
||||||
|
|
||||||
// Wait for the test pod to complete its lifecycle
|
|
||||||
testPodSuccessOrFail(c, ns, runPod)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Returns a pod definition based on the namespace. The pod references the PVC's
|
// Returns a pod definition based on the namespace. The pod references the PVC's
|
||||||
// name.
|
// name.
|
||||||
func MakeWritePod(ns string, pvc *v1.PersistentVolumeClaim) *v1.Pod {
|
func MakeWritePod(ns string, pvc *v1.PersistentVolumeClaim) *v1.Pod {
|
||||||
@ -677,38 +782,49 @@ func MakePod(ns string, pvclaims []*v1.PersistentVolumeClaim, isPrivileged bool,
|
|||||||
}
|
}
|
||||||
|
|
||||||
// create pod with given claims
|
// create pod with given claims
|
||||||
func CreatePod(client clientset.Interface, namespace string, pvclaims []*v1.PersistentVolumeClaim, isPrivileged bool, command string) *v1.Pod {
|
func CreatePod(client clientset.Interface, namespace string, pvclaims []*v1.PersistentVolumeClaim, isPrivileged bool, command string) (*v1.Pod, error) {
|
||||||
podSpec := MakePod(namespace, pvclaims, isPrivileged, command)
|
pod := MakePod(namespace, pvclaims, isPrivileged, command)
|
||||||
pod, err := client.CoreV1().Pods(namespace).Create(podSpec)
|
pod, err := client.CoreV1().Pods(namespace).Create(pod)
|
||||||
Expect(err).NotTo(HaveOccurred())
|
if err != nil {
|
||||||
|
return nil, fmt.Errorf("pod Create API error: %v", err)
|
||||||
|
}
|
||||||
// Waiting for pod to be running
|
// Waiting for pod to be running
|
||||||
Expect(WaitForPodNameRunningInNamespace(client, pod.Name, namespace)).To(Succeed())
|
err = WaitForPodNameRunningInNamespace(client, pod.Name, namespace)
|
||||||
|
if err != nil {
|
||||||
|
return pod, fmt.Errorf("pod %q is not Running: %v", pod.Name, err)
|
||||||
|
}
|
||||||
// get fresh pod info
|
// get fresh pod info
|
||||||
pod, err = client.CoreV1().Pods(namespace).Get(pod.Name, metav1.GetOptions{})
|
pod, err = client.CoreV1().Pods(namespace).Get(pod.Name, metav1.GetOptions{})
|
||||||
Expect(err).NotTo(HaveOccurred())
|
if err != nil {
|
||||||
return pod
|
return pod, fmt.Errorf("pod Get API error: %v", err)
|
||||||
|
}
|
||||||
|
return pod, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// Define and create a pod with a mounted PV. Pod runs infinite loop until killed.
|
// Define and create a pod with a mounted PV. Pod runs infinite loop until killed.
|
||||||
func CreateClientPod(c clientset.Interface, ns string, pvc *v1.PersistentVolumeClaim) *v1.Pod {
|
func CreateClientPod(c clientset.Interface, ns string, pvc *v1.PersistentVolumeClaim) (*v1.Pod, error) {
|
||||||
return CreatePod(c, ns, []*v1.PersistentVolumeClaim{pvc}, true, "")
|
return CreatePod(c, ns, []*v1.PersistentVolumeClaim{pvc}, true, "")
|
||||||
}
|
}
|
||||||
|
|
||||||
// wait until all pvcs phase set to bound
|
// wait until all pvcs phase set to bound
|
||||||
func WaitForPVClaimBoundPhase(client clientset.Interface, pvclaims []*v1.PersistentVolumeClaim) []*v1.PersistentVolume {
|
func WaitForPVClaimBoundPhase(client clientset.Interface, pvclaims []*v1.PersistentVolumeClaim) ([]*v1.PersistentVolume, error) {
|
||||||
var persistentvolumes = make([]*v1.PersistentVolume, len(pvclaims))
|
persistentvolumes := make([]*v1.PersistentVolume, len(pvclaims))
|
||||||
|
|
||||||
for index, claim := range pvclaims {
|
for index, claim := range pvclaims {
|
||||||
err := WaitForPersistentVolumeClaimPhase(v1.ClaimBound, client, claim.Namespace, claim.Name, Poll, ClaimProvisionTimeout)
|
err := WaitForPersistentVolumeClaimPhase(v1.ClaimBound, client, claim.Namespace, claim.Name, Poll, ClaimProvisionTimeout)
|
||||||
Expect(err).NotTo(HaveOccurred())
|
if err != nil {
|
||||||
|
return persistentvolumes, err
|
||||||
|
}
|
||||||
// Get new copy of the claim
|
// Get new copy of the claim
|
||||||
claim, err := client.CoreV1().PersistentVolumeClaims(claim.Namespace).Get(claim.Name, metav1.GetOptions{})
|
claim, err = client.CoreV1().PersistentVolumeClaims(claim.Namespace).Get(claim.Name, metav1.GetOptions{})
|
||||||
Expect(err).NotTo(HaveOccurred())
|
if err != nil {
|
||||||
|
return persistentvolumes, fmt.Errorf("PVC Get API error: %v", err)
|
||||||
|
}
|
||||||
// Get the bounded PV
|
// Get the bounded PV
|
||||||
persistentvolumes[index], err = client.CoreV1().PersistentVolumes().Get(claim.Spec.VolumeName, metav1.GetOptions{})
|
persistentvolumes[index], err = client.CoreV1().PersistentVolumes().Get(claim.Spec.VolumeName, metav1.GetOptions{})
|
||||||
Expect(err).NotTo(HaveOccurred())
|
if err != nil {
|
||||||
|
return persistentvolumes, fmt.Errorf("PV Get API error: %v", err)
|
||||||
|
}
|
||||||
}
|
}
|
||||||
return persistentvolumes
|
return persistentvolumes, nil
|
||||||
}
|
}
|
||||||
|
@ -457,8 +457,8 @@ var _ = framework.KubeDescribe("kubelet", func() {
|
|||||||
})
|
})
|
||||||
|
|
||||||
AfterEach(func() {
|
AfterEach(func() {
|
||||||
framework.DeletePodWithWait(f, c, pod)
|
framework.ExpectNoError(framework.DeletePodWithWait(f, c, pod), "AfterEach: Failed to delete pod ", pod.Name)
|
||||||
framework.DeletePodWithWait(f, c, nfsServerPod)
|
framework.ExpectNoError(framework.DeletePodWithWait(f, c, nfsServerPod), "AfterEach: Failed to delete pod ", nfsServerPod.Name)
|
||||||
})
|
})
|
||||||
|
|
||||||
// execute It blocks from above table of tests
|
// execute It blocks from above table of tests
|
||||||
@ -470,7 +470,7 @@ var _ = framework.KubeDescribe("kubelet", func() {
|
|||||||
stopNfsServer(nfsServerPod)
|
stopNfsServer(nfsServerPod)
|
||||||
|
|
||||||
By("Delete the pod mounted to the NFS volume")
|
By("Delete the pod mounted to the NFS volume")
|
||||||
framework.DeletePodWithWait(f, c, pod)
|
framework.ExpectNoError(framework.DeletePodWithWait(f, c, pod), "Failed to delete pod ", pod.Name)
|
||||||
// pod object is now stale, but is intentionally not nil
|
// pod object is now stale, but is intentionally not nil
|
||||||
|
|
||||||
By("Check if pod's host has been cleaned up -- expect not")
|
By("Check if pod's host has been cleaned up -- expect not")
|
||||||
|
@ -52,6 +52,7 @@ go_library(
|
|||||||
"//vendor/k8s.io/apimachinery/pkg/labels:go_default_library",
|
"//vendor/k8s.io/apimachinery/pkg/labels:go_default_library",
|
||||||
"//vendor/k8s.io/apimachinery/pkg/runtime/schema:go_default_library",
|
"//vendor/k8s.io/apimachinery/pkg/runtime/schema:go_default_library",
|
||||||
"//vendor/k8s.io/apimachinery/pkg/types:go_default_library",
|
"//vendor/k8s.io/apimachinery/pkg/types:go_default_library",
|
||||||
|
"//vendor/k8s.io/apimachinery/pkg/util/errors:go_default_library",
|
||||||
"//vendor/k8s.io/apimachinery/pkg/util/sets:go_default_library",
|
"//vendor/k8s.io/apimachinery/pkg/util/sets:go_default_library",
|
||||||
"//vendor/k8s.io/apimachinery/pkg/util/uuid:go_default_library",
|
"//vendor/k8s.io/apimachinery/pkg/util/uuid:go_default_library",
|
||||||
"//vendor/k8s.io/apimachinery/pkg/util/wait:go_default_library",
|
"//vendor/k8s.io/apimachinery/pkg/util/wait:go_default_library",
|
||||||
|
@ -521,7 +521,7 @@ var _ = framework.KubeDescribe("Pod Disks", func() {
|
|||||||
framework.SkipUnlessProviderIs("gce")
|
framework.SkipUnlessProviderIs("gce")
|
||||||
|
|
||||||
By("delete a PD")
|
By("delete a PD")
|
||||||
framework.DeletePDWithRetry("non-exist")
|
framework.ExpectNoError(framework.DeletePDWithRetry("non-exist"))
|
||||||
})
|
})
|
||||||
})
|
})
|
||||||
|
|
||||||
@ -697,7 +697,7 @@ func detachAndDeletePDs(diskName string, hosts []types.NodeName) {
|
|||||||
waitForPDDetach(diskName, host)
|
waitForPDDetach(diskName, host)
|
||||||
}
|
}
|
||||||
By(fmt.Sprintf("Deleting PD %q", diskName))
|
By(fmt.Sprintf("Deleting PD %q", diskName))
|
||||||
framework.DeletePDWithRetry(diskName)
|
framework.ExpectNoError(framework.DeletePDWithRetry(diskName))
|
||||||
}
|
}
|
||||||
|
|
||||||
func waitForPDInVolumesInUse(
|
func waitForPDInVolumesInUse(
|
||||||
|
@ -184,7 +184,7 @@ func testVolumeUnmountsFromDeletedPod(c clientset.Interface, f *framework.Framew
|
|||||||
|
|
||||||
By("Restarting the kubelet.")
|
By("Restarting the kubelet.")
|
||||||
kubeletCommand(kStop, c, clientPod)
|
kubeletCommand(kStop, c, clientPod)
|
||||||
framework.DeletePodWithWait(f, c, clientPod)
|
framework.ExpectNoError(framework.DeletePodWithWait(f, c, clientPod), "Failed to delete pod ", clientPod.Name)
|
||||||
kubeletCommand(kStart, c, clientPod)
|
kubeletCommand(kStart, c, clientPod)
|
||||||
|
|
||||||
By("Expecting the volume mount not to be found.")
|
By("Expecting the volume mount not to be found.")
|
||||||
@ -195,13 +195,16 @@ func testVolumeUnmountsFromDeletedPod(c clientset.Interface, f *framework.Framew
|
|||||||
framework.Logf("Volume mount detected on pod and written file is readable post-restart.")
|
framework.Logf("Volume mount detected on pod and written file is readable post-restart.")
|
||||||
}
|
}
|
||||||
|
|
||||||
// initTestCase initializes spec resources (pv, pvc, and pod) and returns pointers to be consumed by the test
|
// initTestCase initializes spec resources (pv, pvc, and pod) and returns pointers to be consumed
|
||||||
|
// by the test.
|
||||||
func initTestCase(f *framework.Framework, c clientset.Interface, pvConfig framework.PersistentVolumeConfig, pvcConfig framework.PersistentVolumeClaimConfig, ns, nodeName string) (*v1.Pod, *v1.PersistentVolume, *v1.PersistentVolumeClaim) {
|
func initTestCase(f *framework.Framework, c clientset.Interface, pvConfig framework.PersistentVolumeConfig, pvcConfig framework.PersistentVolumeClaimConfig, ns, nodeName string) (*v1.Pod, *v1.PersistentVolume, *v1.PersistentVolumeClaim) {
|
||||||
pv, pvc := framework.CreatePVPVC(c, pvConfig, pvcConfig, ns, false)
|
|
||||||
|
pv, pvc, err := framework.CreatePVPVC(c, pvConfig, pvcConfig, ns, false)
|
||||||
|
Expect(err).NotTo(HaveOccurred())
|
||||||
pod := framework.MakePod(ns, []*v1.PersistentVolumeClaim{pvc}, true, "")
|
pod := framework.MakePod(ns, []*v1.PersistentVolumeClaim{pvc}, true, "")
|
||||||
pod.Spec.NodeName = nodeName
|
pod.Spec.NodeName = nodeName
|
||||||
framework.Logf("Creating nfs client Pod %s on node %s", pod.Name, nodeName)
|
framework.Logf("Creating nfs client Pod %s on node %s", pod.Name, nodeName)
|
||||||
pod, err := c.CoreV1().Pods(ns).Create(pod)
|
pod, err = c.CoreV1().Pods(ns).Create(pod)
|
||||||
Expect(err).NotTo(HaveOccurred())
|
Expect(err).NotTo(HaveOccurred())
|
||||||
err = framework.WaitForPodRunningInNamespace(c, pod)
|
err = framework.WaitForPodRunningInNamespace(c, pod)
|
||||||
Expect(err).NotTo(HaveOccurred())
|
Expect(err).NotTo(HaveOccurred())
|
||||||
@ -217,9 +220,9 @@ func initTestCase(f *framework.Framework, c clientset.Interface, pvConfig framew
|
|||||||
|
|
||||||
// tearDownTestCase destroy resources created by initTestCase.
|
// tearDownTestCase destroy resources created by initTestCase.
|
||||||
func tearDownTestCase(c clientset.Interface, f *framework.Framework, ns string, pod *v1.Pod, pvc *v1.PersistentVolumeClaim, pv *v1.PersistentVolume) {
|
func tearDownTestCase(c clientset.Interface, f *framework.Framework, ns string, pod *v1.Pod, pvc *v1.PersistentVolumeClaim, pv *v1.PersistentVolume) {
|
||||||
framework.DeletePodWithWait(f, c, pod)
|
framework.ExpectNoError(framework.DeletePodWithWait(f, c, pod), "tearDown: Failed to delete pod ", pod.Name)
|
||||||
framework.DeletePersistentVolumeClaim(c, pvc.Name, ns)
|
framework.ExpectNoError(framework.DeletePersistentVolumeClaim(c, pvc.Name, ns), "tearDown: Failed to delete PVC ", pvc.Name)
|
||||||
framework.DeletePersistentVolume(c, pv.Name)
|
framework.ExpectNoError(framework.DeletePersistentVolume(c, pv.Name), "tearDown: Failed to delete PV ", pv.Name)
|
||||||
}
|
}
|
||||||
|
|
||||||
// kubeletCommand performs `start`, `restart`, or `stop` on the kubelet running on the node of the target pod.
|
// kubeletCommand performs `start`, `restart`, or `stop` on the kubelet running on the node of the target pod.
|
||||||
|
@ -24,6 +24,7 @@ import (
|
|||||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||||
"k8s.io/apimachinery/pkg/labels"
|
"k8s.io/apimachinery/pkg/labels"
|
||||||
"k8s.io/apimachinery/pkg/types"
|
"k8s.io/apimachinery/pkg/types"
|
||||||
|
utilerrors "k8s.io/apimachinery/pkg/util/errors"
|
||||||
"k8s.io/kubernetes/pkg/api/v1"
|
"k8s.io/kubernetes/pkg/api/v1"
|
||||||
"k8s.io/kubernetes/pkg/client/clientset_generated/clientset"
|
"k8s.io/kubernetes/pkg/client/clientset_generated/clientset"
|
||||||
"k8s.io/kubernetes/test/e2e/framework"
|
"k8s.io/kubernetes/test/e2e/framework"
|
||||||
@ -41,11 +42,13 @@ func verifyGCEDiskAttached(diskName string, nodeName types.NodeName) bool {
|
|||||||
// initializeGCETestSpec creates a PV, PVC, and ClientPod that will run until killed by test or clean up.
|
// initializeGCETestSpec creates a PV, PVC, and ClientPod that will run until killed by test or clean up.
|
||||||
func initializeGCETestSpec(c clientset.Interface, ns string, pvConfig framework.PersistentVolumeConfig, pvcConfig framework.PersistentVolumeClaimConfig, isPrebound bool) (*v1.Pod, *v1.PersistentVolume, *v1.PersistentVolumeClaim) {
|
func initializeGCETestSpec(c clientset.Interface, ns string, pvConfig framework.PersistentVolumeConfig, pvcConfig framework.PersistentVolumeClaimConfig, isPrebound bool) (*v1.Pod, *v1.PersistentVolume, *v1.PersistentVolumeClaim) {
|
||||||
By("Creating the PV and PVC")
|
By("Creating the PV and PVC")
|
||||||
pv, pvc := framework.CreatePVPVC(c, pvConfig, pvcConfig, ns, isPrebound)
|
pv, pvc, err := framework.CreatePVPVC(c, pvConfig, pvcConfig, ns, isPrebound)
|
||||||
framework.WaitOnPVandPVC(c, ns, pv, pvc)
|
Expect(err).NotTo(HaveOccurred())
|
||||||
|
framework.ExpectNoError(framework.WaitOnPVandPVC(c, ns, pv, pvc))
|
||||||
|
|
||||||
By("Creating the Client Pod")
|
By("Creating the Client Pod")
|
||||||
clientPod := framework.CreateClientPod(c, ns, pvc)
|
clientPod, err := framework.CreateClientPod(c, ns, pvc)
|
||||||
|
Expect(err).NotTo(HaveOccurred())
|
||||||
return clientPod, pv, pvc
|
return clientPod, pv, pvc
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -104,11 +107,13 @@ var _ = framework.KubeDescribe("PersistentVolumes:GCEPD [Volume]", func() {
|
|||||||
AfterEach(func() {
|
AfterEach(func() {
|
||||||
framework.Logf("AfterEach: Cleaning up test resources")
|
framework.Logf("AfterEach: Cleaning up test resources")
|
||||||
if c != nil {
|
if c != nil {
|
||||||
framework.DeletePodWithWait(f, c, clientPod)
|
framework.ExpectNoError(framework.DeletePodWithWait(f, c, clientPod))
|
||||||
framework.PVPVCCleanup(c, ns, pv, pvc)
|
if errs := framework.PVPVCCleanup(c, ns, pv, pvc); len(errs) > 0 {
|
||||||
|
framework.Failf("AfterEach: Failed to delete PVC and/or PV. Errors: %v", utilerrors.NewAggregate(errs))
|
||||||
|
}
|
||||||
clientPod, pv, pvc, node = nil, nil, nil, ""
|
clientPod, pv, pvc, node = nil, nil, nil, ""
|
||||||
if diskName != "" {
|
if diskName != "" {
|
||||||
framework.DeletePDWithRetry(diskName)
|
framework.ExpectNoError(framework.DeletePDWithRetry(diskName))
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
})
|
})
|
||||||
@ -118,15 +123,14 @@ var _ = framework.KubeDescribe("PersistentVolumes:GCEPD [Volume]", func() {
|
|||||||
It("should test that deleting a PVC before the pod does not cause pod deletion to fail on PD detach", func() {
|
It("should test that deleting a PVC before the pod does not cause pod deletion to fail on PD detach", func() {
|
||||||
|
|
||||||
By("Deleting the Claim")
|
By("Deleting the Claim")
|
||||||
framework.DeletePersistentVolumeClaim(c, pvc.Name, ns)
|
framework.ExpectNoError(framework.DeletePersistentVolumeClaim(c, pvc.Name, ns), "Unable to delete PVC ", pvc.Name)
|
||||||
verifyGCEDiskAttached(diskName, node)
|
Expect(verifyGCEDiskAttached(diskName, node)).To(BeTrue())
|
||||||
|
|
||||||
By("Deleting the Pod")
|
By("Deleting the Pod")
|
||||||
framework.DeletePodWithWait(f, c, clientPod)
|
framework.ExpectNoError(framework.DeletePodWithWait(f, c, clientPod), "Failed to delete pod ", clientPod.Name)
|
||||||
|
|
||||||
By("Verifying Persistent Disk detach")
|
By("Verifying Persistent Disk detach")
|
||||||
err = waitForPDDetach(diskName, node)
|
framework.ExpectNoError(waitForPDDetach(diskName, node), "PD ", diskName, " did not detach")
|
||||||
Expect(err).NotTo(HaveOccurred())
|
|
||||||
})
|
})
|
||||||
|
|
||||||
// Attach a persistent disk to a pod using a PVC.
|
// Attach a persistent disk to a pod using a PVC.
|
||||||
@ -134,15 +138,14 @@ var _ = framework.KubeDescribe("PersistentVolumes:GCEPD [Volume]", func() {
|
|||||||
It("should test that deleting the PV before the pod does not cause pod deletion to fail on PD detach", func() {
|
It("should test that deleting the PV before the pod does not cause pod deletion to fail on PD detach", func() {
|
||||||
|
|
||||||
By("Deleting the Persistent Volume")
|
By("Deleting the Persistent Volume")
|
||||||
framework.DeletePersistentVolume(c, pv.Name)
|
framework.ExpectNoError(framework.DeletePersistentVolume(c, pv.Name), "Failed to delete PV ", pv.Name)
|
||||||
verifyGCEDiskAttached(diskName, node)
|
Expect(verifyGCEDiskAttached(diskName, node)).To(BeTrue())
|
||||||
|
|
||||||
By("Deleting the client pod")
|
By("Deleting the client pod")
|
||||||
framework.DeletePodWithWait(f, c, clientPod)
|
framework.ExpectNoError(framework.DeletePodWithWait(f, c, clientPod), "Failed to delete pod ", clientPod.Name)
|
||||||
|
|
||||||
By("Verifying Persistent Disk detaches")
|
By("Verifying Persistent Disk detaches")
|
||||||
err = waitForPDDetach(diskName, node)
|
framework.ExpectNoError(waitForPDDetach(diskName, node), "PD ", diskName, " did not detach")
|
||||||
Expect(err).NotTo(HaveOccurred())
|
|
||||||
})
|
})
|
||||||
|
|
||||||
// Test that a Pod and PVC attached to a GCEPD successfully unmounts and detaches when the encompassing Namespace is deleted.
|
// Test that a Pod and PVC attached to a GCEPD successfully unmounts and detaches when the encompassing Namespace is deleted.
|
||||||
@ -156,7 +159,6 @@ var _ = framework.KubeDescribe("PersistentVolumes:GCEPD [Volume]", func() {
|
|||||||
Expect(err).NotTo(HaveOccurred())
|
Expect(err).NotTo(HaveOccurred())
|
||||||
|
|
||||||
By("Verifying Persistent Disk detaches")
|
By("Verifying Persistent Disk detaches")
|
||||||
err = waitForPDDetach(diskName, node)
|
framework.ExpectNoError(waitForPDDetach(diskName, node), "PD ", diskName, " did not detach")
|
||||||
Expect(err).NotTo(HaveOccurred())
|
|
||||||
})
|
})
|
||||||
})
|
})
|
||||||
|
@ -17,7 +17,6 @@ limitations under the License.
|
|||||||
package storage
|
package storage
|
||||||
|
|
||||||
import (
|
import (
|
||||||
apierrs "k8s.io/apimachinery/pkg/api/errors"
|
|
||||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||||
"k8s.io/apimachinery/pkg/types"
|
"k8s.io/apimachinery/pkg/types"
|
||||||
"k8s.io/kubernetes/pkg/api/v1"
|
"k8s.io/kubernetes/pkg/api/v1"
|
||||||
@ -95,11 +94,13 @@ var _ = framework.KubeDescribe("PersistentVolumes:vsphere", func() {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
By("Creating the PV and PVC")
|
By("Creating the PV and PVC")
|
||||||
pv, pvc = framework.CreatePVPVC(c, pvConfig, pvcConfig, ns, false)
|
pv, pvc, err = framework.CreatePVPVC(c, pvConfig, pvcConfig, ns, false)
|
||||||
framework.WaitOnPVandPVC(c, ns, pv, pvc)
|
Expect(err).NotTo(HaveOccurred())
|
||||||
|
framework.ExpectNoError(framework.WaitOnPVandPVC(c, ns, pv, pvc))
|
||||||
|
|
||||||
By("Creating the Client Pod")
|
By("Creating the Client Pod")
|
||||||
clientPod = framework.CreateClientPod(c, ns, pvc)
|
clientPod, err = framework.CreateClientPod(c, ns, pvc)
|
||||||
|
Expect(err).NotTo(HaveOccurred())
|
||||||
node := types.NodeName(clientPod.Spec.NodeName)
|
node := types.NodeName(clientPod.Spec.NodeName)
|
||||||
|
|
||||||
By("Verify disk should be attached to the node")
|
By("Verify disk should be attached to the node")
|
||||||
@ -111,18 +112,13 @@ var _ = framework.KubeDescribe("PersistentVolumes:vsphere", func() {
|
|||||||
AfterEach(func() {
|
AfterEach(func() {
|
||||||
framework.Logf("AfterEach: Cleaning up test resources")
|
framework.Logf("AfterEach: Cleaning up test resources")
|
||||||
if c != nil {
|
if c != nil {
|
||||||
if clientPod != nil {
|
framework.ExpectNoError(framework.DeletePodWithWait(f, c, clientPod), "AfterEach: failed to delete pod ", clientPod.Name)
|
||||||
clientPod, err = c.CoreV1().Pods(ns).Get(clientPod.Name, metav1.GetOptions{})
|
|
||||||
if !apierrs.IsNotFound(err) {
|
|
||||||
framework.DeletePodWithWait(f, c, clientPod)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
if pv != nil {
|
if pv != nil {
|
||||||
framework.DeletePersistentVolume(c, pv.Name)
|
framework.ExpectNoError(framework.DeletePersistentVolume(c, pv.Name), "AfterEach: failed to delete PV ", pv.Name)
|
||||||
}
|
}
|
||||||
if pvc != nil {
|
if pvc != nil {
|
||||||
framework.DeletePersistentVolumeClaim(c, pvc.Name, ns)
|
framework.ExpectNoError(framework.DeletePersistentVolumeClaim(c, pvc.Name, ns), "AfterEach: failed to delete PVC ", pvc.Name)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
})
|
})
|
||||||
@ -150,16 +146,11 @@ var _ = framework.KubeDescribe("PersistentVolumes:vsphere", func() {
|
|||||||
|
|
||||||
It("should test that deleting a PVC before the pod does not cause pod deletion to fail on PD detach", func() {
|
It("should test that deleting a PVC before the pod does not cause pod deletion to fail on PD detach", func() {
|
||||||
By("Deleting the Claim")
|
By("Deleting the Claim")
|
||||||
framework.DeletePersistentVolumeClaim(c, pvc.Name, ns)
|
framework.ExpectNoError(framework.DeletePersistentVolumeClaim(c, pvc.Name, ns), "Failed to delete PVC ", pvc.Name)
|
||||||
|
|
||||||
pvc, err = c.CoreV1().PersistentVolumeClaims(ns).Get(pvc.Name, metav1.GetOptions{})
|
|
||||||
if !apierrs.IsNotFound(err) {
|
|
||||||
Expect(err).NotTo(HaveOccurred())
|
|
||||||
}
|
|
||||||
pvc = nil
|
pvc = nil
|
||||||
By("Deleting the Pod")
|
|
||||||
framework.DeletePodWithWait(f, c, clientPod)
|
|
||||||
|
|
||||||
|
By("Deleting the Pod")
|
||||||
|
framework.ExpectNoError(framework.DeletePodWithWait(f, c, clientPod), "Failed to delete pod ", clientPod.Name)
|
||||||
})
|
})
|
||||||
|
|
||||||
/*
|
/*
|
||||||
@ -171,13 +162,10 @@ var _ = framework.KubeDescribe("PersistentVolumes:vsphere", func() {
|
|||||||
*/
|
*/
|
||||||
It("should test that deleting the PV before the pod does not cause pod deletion to fail on PD detach", func() {
|
It("should test that deleting the PV before the pod does not cause pod deletion to fail on PD detach", func() {
|
||||||
By("Deleting the Persistent Volume")
|
By("Deleting the Persistent Volume")
|
||||||
framework.DeletePersistentVolume(c, pv.Name)
|
framework.ExpectNoError(framework.DeletePersistentVolume(c, pv.Name), "Failed to delete PV ", pv.Name)
|
||||||
pv, err = c.CoreV1().PersistentVolumes().Get(pv.Name, metav1.GetOptions{})
|
|
||||||
if !apierrs.IsNotFound(err) {
|
|
||||||
Expect(err).NotTo(HaveOccurred())
|
|
||||||
}
|
|
||||||
pv = nil
|
pv = nil
|
||||||
|
|
||||||
By("Deleting the pod")
|
By("Deleting the pod")
|
||||||
framework.DeletePodWithWait(f, c, clientPod)
|
framework.ExpectNoError(framework.DeletePodWithWait(f, c, clientPod), "Failed to delete pod ", clientPod.Name)
|
||||||
})
|
})
|
||||||
})
|
})
|
||||||
|
@ -18,12 +18,14 @@ package storage
|
|||||||
|
|
||||||
import (
|
import (
|
||||||
"fmt"
|
"fmt"
|
||||||
|
"strings"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
. "github.com/onsi/ginkgo"
|
. "github.com/onsi/ginkgo"
|
||||||
. "github.com/onsi/gomega"
|
. "github.com/onsi/gomega"
|
||||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||||
"k8s.io/apimachinery/pkg/labels"
|
"k8s.io/apimachinery/pkg/labels"
|
||||||
|
utilerrors "k8s.io/apimachinery/pkg/util/errors"
|
||||||
"k8s.io/kubernetes/pkg/api/v1"
|
"k8s.io/kubernetes/pkg/api/v1"
|
||||||
"k8s.io/kubernetes/pkg/client/clientset_generated/clientset"
|
"k8s.io/kubernetes/pkg/client/clientset_generated/clientset"
|
||||||
"k8s.io/kubernetes/test/e2e/framework"
|
"k8s.io/kubernetes/test/e2e/framework"
|
||||||
@ -32,19 +34,18 @@ import (
|
|||||||
// Validate PV/PVC, create and verify writer pod, delete the PVC, and validate the PV's
|
// Validate PV/PVC, create and verify writer pod, delete the PVC, and validate the PV's
|
||||||
// phase. Note: the PV is deleted in the AfterEach, not here.
|
// phase. Note: the PV is deleted in the AfterEach, not here.
|
||||||
func completeTest(f *framework.Framework, c clientset.Interface, ns string, pv *v1.PersistentVolume, pvc *v1.PersistentVolumeClaim) {
|
func completeTest(f *framework.Framework, c clientset.Interface, ns string, pv *v1.PersistentVolume, pvc *v1.PersistentVolumeClaim) {
|
||||||
|
|
||||||
// 1. verify that the PV and PVC have bound correctly
|
// 1. verify that the PV and PVC have bound correctly
|
||||||
By("Validating the PV-PVC binding")
|
By("Validating the PV-PVC binding")
|
||||||
framework.WaitOnPVandPVC(c, ns, pv, pvc)
|
framework.ExpectNoError(framework.WaitOnPVandPVC(c, ns, pv, pvc))
|
||||||
|
|
||||||
// 2. create the nfs writer pod, test if the write was successful,
|
// 2. create the nfs writer pod, test if the write was successful,
|
||||||
// then delete the pod and verify that it was deleted
|
// then delete the pod and verify that it was deleted
|
||||||
By("Checking pod has write access to PersistentVolume")
|
By("Checking pod has write access to PersistentVolume")
|
||||||
framework.CreateWaitAndDeletePod(f, c, ns, pvc)
|
framework.ExpectNoError(framework.CreateWaitAndDeletePod(f, c, ns, pvc))
|
||||||
|
|
||||||
// 3. delete the PVC, wait for PV to become "Released"
|
// 3. delete the PVC, wait for PV to become "Released"
|
||||||
By("Deleting the PVC to invoke the reclaim policy.")
|
By("Deleting the PVC to invoke the reclaim policy.")
|
||||||
framework.DeletePVCandValidatePV(c, ns, pvc, pv, v1.VolumeReleased)
|
framework.ExpectNoError(framework.DeletePVCandValidatePV(c, ns, pvc, pv, v1.VolumeReleased))
|
||||||
}
|
}
|
||||||
|
|
||||||
// Validate pairs of PVs and PVCs, create and verify writer pod, delete PVC and validate
|
// Validate pairs of PVs and PVCs, create and verify writer pod, delete PVC and validate
|
||||||
@ -52,26 +53,36 @@ func completeTest(f *framework.Framework, c clientset.Interface, ns string, pv *
|
|||||||
// Note: the PV is deleted in the AfterEach, not here.
|
// Note: the PV is deleted in the AfterEach, not here.
|
||||||
// Note: this func is serialized, we wait for each pod to be deleted before creating the
|
// Note: this func is serialized, we wait for each pod to be deleted before creating the
|
||||||
// next pod. Adding concurrency is a TODO item.
|
// next pod. Adding concurrency is a TODO item.
|
||||||
func completeMultiTest(f *framework.Framework, c clientset.Interface, ns string, pvols framework.PVMap, claims framework.PVCMap, expectPhase v1.PersistentVolumePhase) {
|
func completeMultiTest(f *framework.Framework, c clientset.Interface, ns string, pvols framework.PVMap, claims framework.PVCMap, expectPhase v1.PersistentVolumePhase) error {
|
||||||
|
var err error
|
||||||
|
|
||||||
// 1. verify each PV permits write access to a client pod
|
// 1. verify each PV permits write access to a client pod
|
||||||
By("Checking pod has write access to PersistentVolumes")
|
By("Checking pod has write access to PersistentVolumes")
|
||||||
for pvcKey := range claims {
|
for pvcKey := range claims {
|
||||||
pvc, err := c.CoreV1().PersistentVolumeClaims(pvcKey.Namespace).Get(pvcKey.Name, metav1.GetOptions{})
|
pvc, err := c.CoreV1().PersistentVolumeClaims(pvcKey.Namespace).Get(pvcKey.Name, metav1.GetOptions{})
|
||||||
Expect(err).NotTo(HaveOccurred())
|
if err != nil {
|
||||||
|
return fmt.Errorf("error getting pvc %q: %v", pvcKey.Name, err)
|
||||||
|
}
|
||||||
if len(pvc.Spec.VolumeName) == 0 {
|
if len(pvc.Spec.VolumeName) == 0 {
|
||||||
continue // claim is not bound
|
continue // claim is not bound
|
||||||
}
|
}
|
||||||
// sanity test to ensure our maps are in sync
|
// sanity test to ensure our maps are in sync
|
||||||
_, found := pvols[pvc.Spec.VolumeName]
|
_, found := pvols[pvc.Spec.VolumeName]
|
||||||
Expect(found).To(BeTrue())
|
if !found {
|
||||||
|
return fmt.Errorf("internal: pvols map is missing volume %q", pvc.Spec.VolumeName)
|
||||||
|
}
|
||||||
// TODO: currently a serialized test of each PV
|
// TODO: currently a serialized test of each PV
|
||||||
framework.CreateWaitAndDeletePod(f, c, pvcKey.Namespace, pvc)
|
if err = framework.CreateWaitAndDeletePod(f, c, pvcKey.Namespace, pvc); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// 2. delete each PVC, wait for its bound PV to reach `expectedPhase`
|
// 2. delete each PVC, wait for its bound PV to reach `expectedPhase`
|
||||||
By("Deleting PVCs to invoke recycler")
|
By("Deleting PVCs to invoke reclaim policy")
|
||||||
framework.DeletePVCandValidatePVGroup(c, ns, pvols, claims, expectPhase)
|
if err = framework.DeletePVCandValidatePVGroup(c, ns, pvols, claims, expectPhase); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// initNFSserverPod wraps volumes.go's startVolumeServer to return a running nfs host pod
|
// initNFSserverPod wraps volumes.go's startVolumeServer to return a running nfs host pod
|
||||||
@ -99,6 +110,7 @@ var _ = framework.KubeDescribe("PersistentVolumes [Volume]", func() {
|
|||||||
selector *metav1.LabelSelector
|
selector *metav1.LabelSelector
|
||||||
pv *v1.PersistentVolume
|
pv *v1.PersistentVolume
|
||||||
pvc *v1.PersistentVolumeClaim
|
pvc *v1.PersistentVolumeClaim
|
||||||
|
err error
|
||||||
)
|
)
|
||||||
|
|
||||||
BeforeEach(func() {
|
BeforeEach(func() {
|
||||||
@ -143,17 +155,18 @@ var _ = framework.KubeDescribe("PersistentVolumes [Volume]", func() {
|
|||||||
})
|
})
|
||||||
|
|
||||||
AfterEach(func() {
|
AfterEach(func() {
|
||||||
framework.DeletePodWithWait(f, c, nfsServerPod)
|
framework.ExpectNoError(framework.DeletePodWithWait(f, c, nfsServerPod), "AfterEach: Failed to delete pod ", nfsServerPod.Name)
|
||||||
pv, pvc = nil, nil
|
pv, pvc = nil, nil
|
||||||
pvConfig, pvcConfig = framework.PersistentVolumeConfig{}, framework.PersistentVolumeClaimConfig{}
|
pvConfig, pvcConfig = framework.PersistentVolumeConfig{}, framework.PersistentVolumeClaimConfig{}
|
||||||
})
|
})
|
||||||
|
|
||||||
Context("with Single PV - PVC pairs", func() {
|
Context("with Single PV - PVC pairs", func() {
|
||||||
|
|
||||||
// Note: this is the only code where the pv is deleted.
|
// Note: this is the only code where the pv is deleted.
|
||||||
AfterEach(func() {
|
AfterEach(func() {
|
||||||
framework.Logf("AfterEach: Cleaning up test resources.")
|
framework.Logf("AfterEach: Cleaning up test resources.")
|
||||||
framework.PVPVCCleanup(c, ns, pv, pvc)
|
if errs := framework.PVPVCCleanup(c, ns, pv, pvc); len(errs) > 0 {
|
||||||
|
framework.Failf("AfterEach: Failed to delete PVC and/or PV. Errors: %v", utilerrors.NewAggregate(errs))
|
||||||
|
}
|
||||||
})
|
})
|
||||||
|
|
||||||
// Individual tests follow:
|
// Individual tests follow:
|
||||||
@ -162,7 +175,8 @@ var _ = framework.KubeDescribe("PersistentVolumes [Volume]", func() {
|
|||||||
// contains the claim. Verify that the PV and PVC bind correctly, and
|
// contains the claim. Verify that the PV and PVC bind correctly, and
|
||||||
// that the pod can write to the nfs volume.
|
// that the pod can write to the nfs volume.
|
||||||
It("should create a non-pre-bound PV and PVC: test write access ", func() {
|
It("should create a non-pre-bound PV and PVC: test write access ", func() {
|
||||||
pv, pvc = framework.CreatePVPVC(c, pvConfig, pvcConfig, ns, false)
|
pv, pvc, err = framework.CreatePVPVC(c, pvConfig, pvcConfig, ns, false)
|
||||||
|
Expect(err).NotTo(HaveOccurred())
|
||||||
completeTest(f, c, ns, pv, pvc)
|
completeTest(f, c, ns, pv, pvc)
|
||||||
})
|
})
|
||||||
|
|
||||||
@ -170,7 +184,8 @@ var _ = framework.KubeDescribe("PersistentVolumes [Volume]", func() {
|
|||||||
// pod that contains the claim. Verify that the PV and PVC bind
|
// pod that contains the claim. Verify that the PV and PVC bind
|
||||||
// correctly, and that the pod can write to the nfs volume.
|
// correctly, and that the pod can write to the nfs volume.
|
||||||
It("create a PVC and non-pre-bound PV: test write access", func() {
|
It("create a PVC and non-pre-bound PV: test write access", func() {
|
||||||
pv, pvc = framework.CreatePVCPV(c, pvConfig, pvcConfig, ns, false)
|
pv, pvc, err = framework.CreatePVCPV(c, pvConfig, pvcConfig, ns, false)
|
||||||
|
Expect(err).NotTo(HaveOccurred())
|
||||||
completeTest(f, c, ns, pv, pvc)
|
completeTest(f, c, ns, pv, pvc)
|
||||||
})
|
})
|
||||||
|
|
||||||
@ -178,7 +193,8 @@ var _ = framework.KubeDescribe("PersistentVolumes [Volume]", func() {
|
|||||||
// and a pod that contains the claim. Verify that the PV and PVC bind
|
// and a pod that contains the claim. Verify that the PV and PVC bind
|
||||||
// correctly, and that the pod can write to the nfs volume.
|
// correctly, and that the pod can write to the nfs volume.
|
||||||
It("create a PVC and a pre-bound PV: test write access", func() {
|
It("create a PVC and a pre-bound PV: test write access", func() {
|
||||||
pv, pvc = framework.CreatePVCPV(c, pvConfig, pvcConfig, ns, true)
|
pv, pvc, err = framework.CreatePVCPV(c, pvConfig, pvcConfig, ns, true)
|
||||||
|
Expect(err).NotTo(HaveOccurred())
|
||||||
completeTest(f, c, ns, pv, pvc)
|
completeTest(f, c, ns, pv, pvc)
|
||||||
})
|
})
|
||||||
|
|
||||||
@ -186,7 +202,8 @@ var _ = framework.KubeDescribe("PersistentVolumes [Volume]", func() {
|
|||||||
// and a pod that contains the claim. Verify that the PV and PVC bind
|
// and a pod that contains the claim. Verify that the PV and PVC bind
|
||||||
// correctly, and that the pod can write to the nfs volume.
|
// correctly, and that the pod can write to the nfs volume.
|
||||||
It("create a PV and a pre-bound PVC: test write access", func() {
|
It("create a PV and a pre-bound PVC: test write access", func() {
|
||||||
pv, pvc = framework.CreatePVPVC(c, pvConfig, pvcConfig, ns, true)
|
pv, pvc, err = framework.CreatePVPVC(c, pvConfig, pvcConfig, ns, true)
|
||||||
|
Expect(err).NotTo(HaveOccurred())
|
||||||
completeTest(f, c, ns, pv, pvc)
|
completeTest(f, c, ns, pv, pvc)
|
||||||
})
|
})
|
||||||
})
|
})
|
||||||
@ -205,40 +222,51 @@ var _ = framework.KubeDescribe("PersistentVolumes [Volume]", func() {
|
|||||||
// define the maximum number of PVs and PVCs supported by these tests
|
// define the maximum number of PVs and PVCs supported by these tests
|
||||||
const maxNumPVs = 10
|
const maxNumPVs = 10
|
||||||
const maxNumPVCs = 10
|
const maxNumPVCs = 10
|
||||||
// create the pv and pvc maps to be reused in the It blocks
|
// scope the pv and pvc maps to be available in the AfterEach
|
||||||
pvols := make(framework.PVMap, maxNumPVs)
|
// note: these maps are created fresh in CreatePVsPVCs()
|
||||||
claims := make(framework.PVCMap, maxNumPVCs)
|
var pvols framework.PVMap
|
||||||
|
var claims framework.PVCMap
|
||||||
|
|
||||||
AfterEach(func() {
|
AfterEach(func() {
|
||||||
framework.Logf("AfterEach: deleting %v PVCs and %v PVs...", len(claims), len(pvols))
|
framework.Logf("AfterEach: deleting %v PVCs and %v PVs...", len(claims), len(pvols))
|
||||||
framework.PVPVCMapCleanup(c, ns, pvols, claims)
|
errs := framework.PVPVCMapCleanup(c, ns, pvols, claims)
|
||||||
|
if len(errs) > 0 {
|
||||||
|
errmsg := []string{}
|
||||||
|
for _, e := range errs {
|
||||||
|
errmsg = append(errmsg, e.Error())
|
||||||
|
}
|
||||||
|
framework.Failf("AfterEach: Failed to delete 1 or more PVs/PVCs. Errors: %v", strings.Join(errmsg, "; "))
|
||||||
|
}
|
||||||
})
|
})
|
||||||
|
|
||||||
// Create 2 PVs and 4 PVCs.
|
// Create 2 PVs and 4 PVCs.
|
||||||
// Note: PVs are created before claims and no pre-binding
|
// Note: PVs are created before claims and no pre-binding
|
||||||
It("should create 2 PVs and 4 PVCs: test write access", func() {
|
It("should create 2 PVs and 4 PVCs: test write access", func() {
|
||||||
numPVs, numPVCs := 2, 4
|
numPVs, numPVCs := 2, 4
|
||||||
pvols, claims = framework.CreatePVsPVCs(numPVs, numPVCs, c, ns, pvConfig, pvcConfig)
|
pvols, claims, err = framework.CreatePVsPVCs(numPVs, numPVCs, c, ns, pvConfig, pvcConfig)
|
||||||
framework.WaitAndVerifyBinds(c, ns, pvols, claims, true)
|
Expect(err).NotTo(HaveOccurred())
|
||||||
completeMultiTest(f, c, ns, pvols, claims, v1.VolumeReleased)
|
framework.ExpectNoError(framework.WaitAndVerifyBinds(c, ns, pvols, claims, true))
|
||||||
|
framework.ExpectNoError(completeMultiTest(f, c, ns, pvols, claims, v1.VolumeReleased))
|
||||||
})
|
})
|
||||||
|
|
||||||
// Create 3 PVs and 3 PVCs.
|
// Create 3 PVs and 3 PVCs.
|
||||||
// Note: PVs are created before claims and no pre-binding
|
// Note: PVs are created before claims and no pre-binding
|
||||||
It("should create 3 PVs and 3 PVCs: test write access", func() {
|
It("should create 3 PVs and 3 PVCs: test write access", func() {
|
||||||
numPVs, numPVCs := 3, 3
|
numPVs, numPVCs := 3, 3
|
||||||
pvols, claims = framework.CreatePVsPVCs(numPVs, numPVCs, c, ns, pvConfig, pvcConfig)
|
pvols, claims, err = framework.CreatePVsPVCs(numPVs, numPVCs, c, ns, pvConfig, pvcConfig)
|
||||||
framework.WaitAndVerifyBinds(c, ns, pvols, claims, true)
|
Expect(err).NotTo(HaveOccurred())
|
||||||
completeMultiTest(f, c, ns, pvols, claims, v1.VolumeReleased)
|
framework.ExpectNoError(framework.WaitAndVerifyBinds(c, ns, pvols, claims, true))
|
||||||
|
framework.ExpectNoError(completeMultiTest(f, c, ns, pvols, claims, v1.VolumeReleased))
|
||||||
})
|
})
|
||||||
|
|
||||||
// Create 4 PVs and 2 PVCs.
|
// Create 4 PVs and 2 PVCs.
|
||||||
// Note: PVs are created before claims and no pre-binding.
|
// Note: PVs are created before claims and no pre-binding.
|
||||||
It("should create 4 PVs and 2 PVCs: test write access", func() {
|
It("should create 4 PVs and 2 PVCs: test write access", func() {
|
||||||
numPVs, numPVCs := 4, 2
|
numPVs, numPVCs := 4, 2
|
||||||
pvols, claims = framework.CreatePVsPVCs(numPVs, numPVCs, c, ns, pvConfig, pvcConfig)
|
pvols, claims, err = framework.CreatePVsPVCs(numPVs, numPVCs, c, ns, pvConfig, pvcConfig)
|
||||||
framework.WaitAndVerifyBinds(c, ns, pvols, claims, true)
|
Expect(err).NotTo(HaveOccurred())
|
||||||
completeMultiTest(f, c, ns, pvols, claims, v1.VolumeReleased)
|
framework.ExpectNoError(framework.WaitAndVerifyBinds(c, ns, pvols, claims, true))
|
||||||
|
framework.ExpectNoError(completeMultiTest(f, c, ns, pvols, claims, v1.VolumeReleased))
|
||||||
})
|
})
|
||||||
})
|
})
|
||||||
|
|
||||||
@ -248,13 +276,16 @@ var _ = framework.KubeDescribe("PersistentVolumes [Volume]", func() {
|
|||||||
Context("when invoking the Recycle reclaim policy", func() {
|
Context("when invoking the Recycle reclaim policy", func() {
|
||||||
BeforeEach(func() {
|
BeforeEach(func() {
|
||||||
pvConfig.ReclaimPolicy = v1.PersistentVolumeReclaimRecycle
|
pvConfig.ReclaimPolicy = v1.PersistentVolumeReclaimRecycle
|
||||||
pv, pvc = framework.CreatePVPVC(c, pvConfig, pvcConfig, ns, false)
|
pv, pvc, err = framework.CreatePVPVC(c, pvConfig, pvcConfig, ns, false)
|
||||||
framework.WaitOnPVandPVC(c, ns, pv, pvc)
|
Expect(err).NotTo(HaveOccurred(), "BeforeEach: Failed to create PV/PVC")
|
||||||
|
framework.ExpectNoError(framework.WaitOnPVandPVC(c, ns, pv, pvc), "BeforeEach: WaitOnPVandPVC failed")
|
||||||
})
|
})
|
||||||
|
|
||||||
AfterEach(func() {
|
AfterEach(func() {
|
||||||
framework.Logf("AfterEach: Cleaning up test resources.")
|
framework.Logf("AfterEach: Cleaning up test resources.")
|
||||||
framework.PVPVCCleanup(c, ns, pv, pvc)
|
if errs := framework.PVPVCCleanup(c, ns, pv, pvc); len(errs) > 0 {
|
||||||
|
framework.Failf("AfterEach: Failed to delete PVC and/or PV. Errors: %v", utilerrors.NewAggregate(errs))
|
||||||
|
}
|
||||||
})
|
})
|
||||||
|
|
||||||
// This It() tests a scenario where a PV is written to by a Pod, recycled, then the volume checked
|
// This It() tests a scenario where a PV is written to by a Pod, recycled, then the volume checked
|
||||||
@ -263,18 +294,18 @@ var _ = framework.KubeDescribe("PersistentVolumes [Volume]", func() {
|
|||||||
It("should test that a PV becomes Available and is clean after the PVC is deleted. [Volume]", func() {
|
It("should test that a PV becomes Available and is clean after the PVC is deleted. [Volume]", func() {
|
||||||
By("Writing to the volume.")
|
By("Writing to the volume.")
|
||||||
pod := framework.MakeWritePod(ns, pvc)
|
pod := framework.MakeWritePod(ns, pvc)
|
||||||
pod, err := c.CoreV1().Pods(ns).Create(pod)
|
pod, err = c.CoreV1().Pods(ns).Create(pod)
|
||||||
Expect(err).NotTo(HaveOccurred())
|
|
||||||
err = framework.WaitForPodSuccessInNamespace(c, pod.Name, ns)
|
|
||||||
Expect(err).NotTo(HaveOccurred())
|
Expect(err).NotTo(HaveOccurred())
|
||||||
|
framework.ExpectNoError(framework.WaitForPodSuccessInNamespace(c, pod.Name, ns))
|
||||||
|
|
||||||
framework.DeletePVCandValidatePV(c, ns, pvc, pv, v1.VolumeAvailable)
|
By("Deleting the claim")
|
||||||
|
framework.ExpectNoError(framework.DeletePVCandValidatePV(c, ns, pvc, pv, v1.VolumeAvailable))
|
||||||
|
|
||||||
By("Re-mounting the volume.")
|
By("Re-mounting the volume.")
|
||||||
pvc = framework.MakePersistentVolumeClaim(pvcConfig, ns)
|
pvc = framework.MakePersistentVolumeClaim(pvcConfig, ns)
|
||||||
pvc = framework.CreatePVC(c, ns, pvc)
|
pvc, err = framework.CreatePVC(c, ns, pvc)
|
||||||
err = framework.WaitForPersistentVolumeClaimPhase(v1.ClaimBound, c, ns, pvc.Name, 2*time.Second, 60*time.Second)
|
|
||||||
Expect(err).NotTo(HaveOccurred())
|
Expect(err).NotTo(HaveOccurred())
|
||||||
|
framework.ExpectNoError(framework.WaitForPersistentVolumeClaimPhase(v1.ClaimBound, c, ns, pvc.Name, 2*time.Second, 60*time.Second), "Failed to reach 'Bound' for PVC ", pvc.Name)
|
||||||
|
|
||||||
// If a file is detected in /mnt, fail the pod and do not restart it.
|
// If a file is detected in /mnt, fail the pod and do not restart it.
|
||||||
By("Verifying the mount has been cleaned.")
|
By("Verifying the mount has been cleaned.")
|
||||||
@ -282,8 +313,7 @@ var _ = framework.KubeDescribe("PersistentVolumes [Volume]", func() {
|
|||||||
pod = framework.MakePod(ns, []*v1.PersistentVolumeClaim{pvc}, true, fmt.Sprintf("[ $(ls -A %s | wc -l) -eq 0 ] && exit 0 || exit 1", mount))
|
pod = framework.MakePod(ns, []*v1.PersistentVolumeClaim{pvc}, true, fmt.Sprintf("[ $(ls -A %s | wc -l) -eq 0 ] && exit 0 || exit 1", mount))
|
||||||
pod, err = c.CoreV1().Pods(ns).Create(pod)
|
pod, err = c.CoreV1().Pods(ns).Create(pod)
|
||||||
Expect(err).NotTo(HaveOccurred())
|
Expect(err).NotTo(HaveOccurred())
|
||||||
err = framework.WaitForPodSuccessInNamespace(c, pod.Name, ns)
|
framework.ExpectNoError(framework.WaitForPodSuccessInNamespace(c, pod.Name, ns))
|
||||||
Expect(err).NotTo(HaveOccurred())
|
|
||||||
framework.Logf("Pod exited without failure; the volume has been recycled.")
|
framework.Logf("Pod exited without failure; the volume has been recycled.")
|
||||||
})
|
})
|
||||||
})
|
})
|
||||||
|
@ -119,14 +119,14 @@ var _ = framework.KubeDescribe("PersistentVolumes [Feature:ReclaimPolicy]", func
|
|||||||
writeContentToVSpherePV(c, pvc, volumeFileContent)
|
writeContentToVSpherePV(c, pvc, volumeFileContent)
|
||||||
|
|
||||||
By("Delete PVC")
|
By("Delete PVC")
|
||||||
framework.DeletePersistentVolumeClaim(c, pvc.Name, ns)
|
framework.ExpectNoError(framework.DeletePersistentVolumeClaim(c, pvc.Name, ns), "Failed to delete PVC ", pvc.Name)
|
||||||
pvc = nil
|
pvc = nil
|
||||||
|
|
||||||
By("Verify PV is retained")
|
By("Verify PV is retained")
|
||||||
framework.Logf("Waiting for PV %v to become Released", pv.Name)
|
framework.Logf("Waiting for PV %v to become Released", pv.Name)
|
||||||
err = framework.WaitForPersistentVolumePhase(v1.VolumeReleased, c, pv.Name, 3*time.Second, 300*time.Second)
|
err = framework.WaitForPersistentVolumePhase(v1.VolumeReleased, c, pv.Name, 3*time.Second, 300*time.Second)
|
||||||
Expect(err).NotTo(HaveOccurred())
|
Expect(err).NotTo(HaveOccurred())
|
||||||
framework.DeletePersistentVolume(c, pv.Name)
|
framework.ExpectNoError(framework.DeletePersistentVolume(c, pv.Name), "Failed to delete PV ", pv.Name)
|
||||||
|
|
||||||
By("Creating the PV for same volume path")
|
By("Creating the PV for same volume path")
|
||||||
pv = getVSpherePersistentVolumeSpec(volumePath, v1.PersistentVolumeReclaimRetain, nil)
|
pv = getVSpherePersistentVolumeSpec(volumePath, v1.PersistentVolumeReclaimRetain, nil)
|
||||||
@ -139,7 +139,7 @@ var _ = framework.KubeDescribe("PersistentVolumes [Feature:ReclaimPolicy]", func
|
|||||||
Expect(err).NotTo(HaveOccurred())
|
Expect(err).NotTo(HaveOccurred())
|
||||||
|
|
||||||
By("wait for the pv and pvc to bind")
|
By("wait for the pv and pvc to bind")
|
||||||
framework.WaitOnPVandPVC(c, ns, pv, pvc)
|
framework.ExpectNoError(framework.WaitOnPVandPVC(c, ns, pv, pvc))
|
||||||
verifyContentOfVSpherePV(c, pvc, volumeFileContent)
|
verifyContentOfVSpherePV(c, pvc, volumeFileContent)
|
||||||
|
|
||||||
})
|
})
|
||||||
@ -173,10 +173,10 @@ func testCleanupVSpherePersistentVolumeReclaim(vsp *vsphere.VSphere, c clientset
|
|||||||
vsp.DeleteVolume(volumePath)
|
vsp.DeleteVolume(volumePath)
|
||||||
}
|
}
|
||||||
if pv != nil {
|
if pv != nil {
|
||||||
framework.DeletePersistentVolume(c, pv.Name)
|
framework.ExpectNoError(framework.DeletePersistentVolume(c, pv.Name), "Failed to delete PV ", pv.Name)
|
||||||
}
|
}
|
||||||
if pvc != nil {
|
if pvc != nil {
|
||||||
framework.DeletePersistentVolumeClaim(c, pvc.Name, ns)
|
framework.ExpectNoError(framework.DeletePersistentVolumeClaim(c, pvc.Name, ns), "Failed to delete PVC ", pvc.Name)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -185,10 +185,10 @@ func deletePVCAfterBind(c clientset.Interface, ns string, pvc *v1.PersistentVolu
|
|||||||
var err error
|
var err error
|
||||||
|
|
||||||
By("wait for the pv and pvc to bind")
|
By("wait for the pv and pvc to bind")
|
||||||
framework.WaitOnPVandPVC(c, ns, pv, pvc)
|
framework.ExpectNoError(framework.WaitOnPVandPVC(c, ns, pv, pvc))
|
||||||
|
|
||||||
By("delete pvc")
|
By("delete pvc")
|
||||||
framework.DeletePersistentVolumeClaim(c, pvc.Name, ns)
|
framework.ExpectNoError(framework.DeletePersistentVolumeClaim(c, pvc.Name, ns), "Failed to delete PVC ", pvc.Name)
|
||||||
pvc, err = c.CoreV1().PersistentVolumeClaims(ns).Get(pvc.Name, metav1.GetOptions{})
|
pvc, err = c.CoreV1().PersistentVolumeClaims(ns).Get(pvc.Name, metav1.GetOptions{})
|
||||||
if !apierrs.IsNotFound(err) {
|
if !apierrs.IsNotFound(err) {
|
||||||
Expect(err).NotTo(HaveOccurred())
|
Expect(err).NotTo(HaveOccurred())
|
||||||
|
@ -81,14 +81,14 @@ var _ = framework.KubeDescribe("PersistentVolumes [Feature:LabelSelector]", func
|
|||||||
Expect(err).NotTo(HaveOccurred())
|
Expect(err).NotTo(HaveOccurred())
|
||||||
|
|
||||||
By("wait for the pvc_ssd to bind with pv_ssd")
|
By("wait for the pvc_ssd to bind with pv_ssd")
|
||||||
framework.WaitOnPVandPVC(c, ns, pv_ssd, pvc_ssd)
|
framework.ExpectNoError(framework.WaitOnPVandPVC(c, ns, pv_ssd, pvc_ssd))
|
||||||
|
|
||||||
By("Verify status of pvc_vvol is pending")
|
By("Verify status of pvc_vvol is pending")
|
||||||
err = framework.WaitForPersistentVolumeClaimPhase(v1.ClaimPending, c, ns, pvc_vvol.Name, 3*time.Second, 300*time.Second)
|
err = framework.WaitForPersistentVolumeClaimPhase(v1.ClaimPending, c, ns, pvc_vvol.Name, 3*time.Second, 300*time.Second)
|
||||||
Expect(err).NotTo(HaveOccurred())
|
Expect(err).NotTo(HaveOccurred())
|
||||||
|
|
||||||
By("delete pvc_ssd")
|
By("delete pvc_ssd")
|
||||||
framework.DeletePersistentVolumeClaim(c, pvc_ssd.Name, ns)
|
framework.ExpectNoError(framework.DeletePersistentVolumeClaim(c, pvc_ssd.Name, ns), "Failed to delete PVC ", pvc_ssd.Name)
|
||||||
|
|
||||||
By("verify pv_ssd is deleted")
|
By("verify pv_ssd is deleted")
|
||||||
err = framework.WaitForPersistentVolumeDeleted(c, pv_ssd.Name, 3*time.Second, 300*time.Second)
|
err = framework.WaitForPersistentVolumeDeleted(c, pv_ssd.Name, 3*time.Second, 300*time.Second)
|
||||||
@ -96,7 +96,7 @@ var _ = framework.KubeDescribe("PersistentVolumes [Feature:LabelSelector]", func
|
|||||||
volumePath = ""
|
volumePath = ""
|
||||||
|
|
||||||
By("delete pvc_vvol")
|
By("delete pvc_vvol")
|
||||||
framework.DeletePersistentVolumeClaim(c, pvc_vvol.Name, ns)
|
framework.ExpectNoError(framework.DeletePersistentVolumeClaim(c, pvc_vvol.Name, ns), "Failed to delete PVC ", pvc_vvol.Name)
|
||||||
})
|
})
|
||||||
})
|
})
|
||||||
})
|
})
|
||||||
@ -139,12 +139,12 @@ func testCleanupVSpherePVClabelselector(c clientset.Interface, ns string, volume
|
|||||||
vsp.DeleteVolume(volumePath)
|
vsp.DeleteVolume(volumePath)
|
||||||
}
|
}
|
||||||
if pvc_ssd != nil {
|
if pvc_ssd != nil {
|
||||||
framework.DeletePersistentVolumeClaim(c, pvc_ssd.Name, ns)
|
framework.ExpectNoError(framework.DeletePersistentVolumeClaim(c, pvc_ssd.Name, ns), "Failed to delete PVC ", pvc_ssd.Name)
|
||||||
}
|
}
|
||||||
if pvc_vvol != nil {
|
if pvc_vvol != nil {
|
||||||
framework.DeletePersistentVolumeClaim(c, pvc_vvol.Name, ns)
|
framework.ExpectNoError(framework.DeletePersistentVolumeClaim(c, pvc_vvol.Name, ns), "Failed to delete PVC ", pvc_vvol.Name)
|
||||||
}
|
}
|
||||||
if pv_ssd != nil {
|
if pv_ssd != nil {
|
||||||
framework.DeletePersistentVolume(c, pv_ssd.Name)
|
framework.ExpectNoError(framework.DeletePersistentVolume(c, pv_ssd.Name), "Faled to delete PV ", pv_ssd.Name)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -68,7 +68,7 @@ func testDynamicProvisioning(t storageClassTest, client clientset.Interface, cla
|
|||||||
Expect(err).NotTo(HaveOccurred())
|
Expect(err).NotTo(HaveOccurred())
|
||||||
defer func() {
|
defer func() {
|
||||||
framework.Logf("deleting storage class %s", class.Name)
|
framework.Logf("deleting storage class %s", class.Name)
|
||||||
client.StorageV1().StorageClasses().Delete(class.Name, nil)
|
framework.ExpectNoError(client.StorageV1().StorageClasses().Delete(class.Name, nil))
|
||||||
}()
|
}()
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -76,8 +76,12 @@ func testDynamicProvisioning(t storageClassTest, client clientset.Interface, cla
|
|||||||
claim, err = client.CoreV1().PersistentVolumeClaims(claim.Namespace).Create(claim)
|
claim, err = client.CoreV1().PersistentVolumeClaims(claim.Namespace).Create(claim)
|
||||||
Expect(err).NotTo(HaveOccurred())
|
Expect(err).NotTo(HaveOccurred())
|
||||||
defer func() {
|
defer func() {
|
||||||
framework.Logf("deleting claim %s/%s", claim.Namespace, claim.Name)
|
framework.Logf("deleting claim %q/%q", claim.Namespace, claim.Name)
|
||||||
client.CoreV1().PersistentVolumeClaims(claim.Namespace).Delete(claim.Name, nil)
|
// typically this claim has already been deleted
|
||||||
|
err = client.CoreV1().PersistentVolumeClaims(claim.Namespace).Delete(claim.Name, nil)
|
||||||
|
if err != nil && !apierrs.IsNotFound(err) {
|
||||||
|
framework.Failf("Error deleting claim %q. Error: %v", claim.Name, err)
|
||||||
|
}
|
||||||
}()
|
}()
|
||||||
err = framework.WaitForPersistentVolumeClaimPhase(v1.ClaimBound, client, claim.Namespace, claim.Name, framework.Poll, framework.ClaimProvisionTimeout)
|
err = framework.WaitForPersistentVolumeClaimPhase(v1.ClaimBound, client, claim.Namespace, claim.Name, framework.Poll, framework.ClaimProvisionTimeout)
|
||||||
Expect(err).NotTo(HaveOccurred())
|
Expect(err).NotTo(HaveOccurred())
|
||||||
@ -125,7 +129,7 @@ func testDynamicProvisioning(t storageClassTest, client clientset.Interface, cla
|
|||||||
By("checking the created volume is readable and retains data")
|
By("checking the created volume is readable and retains data")
|
||||||
runInPodWithVolume(client, claim.Namespace, claim.Name, "grep 'hello world' /mnt/test/data")
|
runInPodWithVolume(client, claim.Namespace, claim.Name, "grep 'hello world' /mnt/test/data")
|
||||||
|
|
||||||
By("deleting the claim")
|
By(fmt.Sprintf("deleting claim %q/%q", claim.Namespace, claim.Name))
|
||||||
framework.ExpectNoError(client.CoreV1().PersistentVolumeClaims(claim.Namespace).Delete(claim.Name, nil))
|
framework.ExpectNoError(client.CoreV1().PersistentVolumeClaims(claim.Namespace).Delete(claim.Name, nil))
|
||||||
|
|
||||||
// Wait for the PV to get deleted. Technically, the first few delete
|
// Wait for the PV to get deleted. Technically, the first few delete
|
||||||
@ -133,6 +137,7 @@ func testDynamicProvisioning(t storageClassTest, client clientset.Interface, cla
|
|||||||
// kubelet is slowly cleaning up the previous pod, however it should succeed
|
// kubelet is slowly cleaning up the previous pod, however it should succeed
|
||||||
// in a couple of minutes. Wait 20 minutes to recover from random cloud
|
// in a couple of minutes. Wait 20 minutes to recover from random cloud
|
||||||
// hiccups.
|
// hiccups.
|
||||||
|
By(fmt.Sprintf("deleting the claim's PV %q", pv.Name))
|
||||||
framework.ExpectNoError(framework.WaitForPersistentVolumeDeleted(client, pv.Name, 5*time.Second, 20*time.Minute))
|
framework.ExpectNoError(framework.WaitForPersistentVolumeDeleted(client, pv.Name, 5*time.Second, 20*time.Minute))
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -391,8 +396,8 @@ var _ = framework.KubeDescribe("Dynamic Provisioning", func() {
|
|||||||
}
|
}
|
||||||
})
|
})
|
||||||
|
|
||||||
// NOTE: Slow! The test will wait up to 5 minutes (framework.ClaimProvisionTimeout) when there is
|
// NOTE: Slow! The test will wait up to 5 minutes (framework.ClaimProvisionTimeout)
|
||||||
// no regression.
|
// when there is no regression.
|
||||||
It("should not provision a volume in an unmanaged GCE zone. [Slow] [Volume]", func() {
|
It("should not provision a volume in an unmanaged GCE zone. [Slow] [Volume]", func() {
|
||||||
framework.SkipUnlessProviderIs("gce", "gke")
|
framework.SkipUnlessProviderIs("gce", "gke")
|
||||||
var suffix string = "unmananged"
|
var suffix string = "unmananged"
|
||||||
@ -410,6 +415,7 @@ var _ = framework.KubeDescribe("Dynamic Provisioning", func() {
|
|||||||
|
|
||||||
// Get a list of all zones in the project
|
// Get a list of all zones in the project
|
||||||
zones, err := gceCloud.GetComputeService().Zones.List(framework.TestContext.CloudConfig.ProjectID).Do()
|
zones, err := gceCloud.GetComputeService().Zones.List(framework.TestContext.CloudConfig.ProjectID).Do()
|
||||||
|
Expect(err).NotTo(HaveOccurred())
|
||||||
for _, z := range zones.Items {
|
for _, z := range zones.Items {
|
||||||
allZones.Insert(z.Name)
|
allZones.Insert(z.Name)
|
||||||
}
|
}
|
||||||
@ -440,7 +446,9 @@ var _ = framework.KubeDescribe("Dynamic Provisioning", func() {
|
|||||||
pvc.Spec.StorageClassName = &sc.Name
|
pvc.Spec.StorageClassName = &sc.Name
|
||||||
pvc, err = c.CoreV1().PersistentVolumeClaims(ns).Create(pvc)
|
pvc, err = c.CoreV1().PersistentVolumeClaims(ns).Create(pvc)
|
||||||
Expect(err).NotTo(HaveOccurred())
|
Expect(err).NotTo(HaveOccurred())
|
||||||
defer framework.DeletePersistentVolumeClaim(c, pvc.Name, ns)
|
defer func() {
|
||||||
|
framework.ExpectNoError(framework.DeletePersistentVolumeClaim(c, pvc.Name, ns), "Failed to delete PVC ", pvc.Name)
|
||||||
|
}()
|
||||||
|
|
||||||
// The claim should timeout phase:Pending
|
// The claim should timeout phase:Pending
|
||||||
err = framework.WaitForPersistentVolumeClaimPhase(v1.ClaimBound, c, ns, pvc.Name, 2*time.Second, framework.ClaimProvisionTimeout)
|
err = framework.WaitForPersistentVolumeClaimPhase(v1.ClaimBound, c, ns, pvc.Name, 2*time.Second, framework.ClaimProvisionTimeout)
|
||||||
@ -475,17 +483,16 @@ var _ = framework.KubeDescribe("Dynamic Provisioning", func() {
|
|||||||
suffix := fmt.Sprintf("race-%d", i)
|
suffix := fmt.Sprintf("race-%d", i)
|
||||||
claim := newClaim(test, ns, suffix)
|
claim := newClaim(test, ns, suffix)
|
||||||
claim.Spec.StorageClassName = &class.Name
|
claim.Spec.StorageClassName = &class.Name
|
||||||
tmpClaim := framework.CreatePVC(c, ns, claim)
|
tmpClaim, err := framework.CreatePVC(c, ns, claim)
|
||||||
framework.DeletePersistentVolumeClaim(c, tmpClaim.Name, ns)
|
Expect(err).NotTo(HaveOccurred())
|
||||||
|
framework.ExpectNoError(framework.DeletePersistentVolumeClaim(c, tmpClaim.Name, ns))
|
||||||
}
|
}
|
||||||
|
|
||||||
By(fmt.Sprintf("Checking for residual PersistentVolumes associated with StorageClass %s", class.Name))
|
By(fmt.Sprintf("Checking for residual PersistentVolumes associated with StorageClass %s", class.Name))
|
||||||
residualPVs, err = waitForProvisionedVolumesDeleted(c, class.Name)
|
residualPVs, err = waitForProvisionedVolumesDeleted(c, class.Name)
|
||||||
if err != nil {
|
Expect(err).NotTo(HaveOccurred())
|
||||||
// Cleanup the test resources before breaking
|
// Cleanup the test resources before breaking
|
||||||
deleteProvisionedVolumesAndDisks(c, residualPVs)
|
defer deleteProvisionedVolumesAndDisks(c, residualPVs)
|
||||||
Expect(err).NotTo(HaveOccurred())
|
|
||||||
}
|
|
||||||
|
|
||||||
// Report indicators of regression
|
// Report indicators of regression
|
||||||
if len(residualPVs) > 0 {
|
if len(residualPVs) > 0 {
|
||||||
@ -548,9 +555,6 @@ var _ = framework.KubeDescribe("Dynamic Provisioning", func() {
|
|||||||
expectedSize: "2Gi",
|
expectedSize: "2Gi",
|
||||||
}
|
}
|
||||||
claim := newClaim(test, ns, "default")
|
claim := newClaim(test, ns, "default")
|
||||||
defer func() {
|
|
||||||
framework.DeletePersistentVolumeClaim(c, claim.Name, ns)
|
|
||||||
}()
|
|
||||||
testDynamicProvisioning(test, c, claim, nil)
|
testDynamicProvisioning(test, c, claim, nil)
|
||||||
})
|
})
|
||||||
|
|
||||||
@ -573,7 +577,7 @@ var _ = framework.KubeDescribe("Dynamic Provisioning", func() {
|
|||||||
claim, err := c.CoreV1().PersistentVolumeClaims(ns).Create(claim)
|
claim, err := c.CoreV1().PersistentVolumeClaims(ns).Create(claim)
|
||||||
Expect(err).NotTo(HaveOccurred())
|
Expect(err).NotTo(HaveOccurred())
|
||||||
defer func() {
|
defer func() {
|
||||||
framework.DeletePersistentVolumeClaim(c, claim.Name, ns)
|
framework.ExpectNoError(framework.DeletePersistentVolumeClaim(c, claim.Name, ns))
|
||||||
}()
|
}()
|
||||||
|
|
||||||
// The claim should timeout phase:Pending
|
// The claim should timeout phase:Pending
|
||||||
@ -604,7 +608,7 @@ var _ = framework.KubeDescribe("Dynamic Provisioning", func() {
|
|||||||
claim, err := c.CoreV1().PersistentVolumeClaims(ns).Create(claim)
|
claim, err := c.CoreV1().PersistentVolumeClaims(ns).Create(claim)
|
||||||
Expect(err).NotTo(HaveOccurred())
|
Expect(err).NotTo(HaveOccurred())
|
||||||
defer func() {
|
defer func() {
|
||||||
framework.DeletePersistentVolumeClaim(c, claim.Name, ns)
|
framework.ExpectNoError(framework.DeletePersistentVolumeClaim(c, claim.Name, ns))
|
||||||
}()
|
}()
|
||||||
|
|
||||||
// The claim should timeout phase:Pending
|
// The claim should timeout phase:Pending
|
||||||
@ -731,10 +735,10 @@ func runInPodWithVolume(c clientset.Interface, ns, claimName, command string) {
|
|||||||
},
|
},
|
||||||
}
|
}
|
||||||
pod, err := c.CoreV1().Pods(ns).Create(pod)
|
pod, err := c.CoreV1().Pods(ns).Create(pod)
|
||||||
|
framework.ExpectNoError(err, "Failed to create pod: %v", err)
|
||||||
defer func() {
|
defer func() {
|
||||||
framework.DeletePodOrFail(c, ns, pod.Name)
|
framework.DeletePodOrFail(c, ns, pod.Name)
|
||||||
}()
|
}()
|
||||||
framework.ExpectNoError(err, "Failed to create pod: %v", err)
|
|
||||||
framework.ExpectNoError(framework.WaitForPodSuccessInNamespaceSlow(c, pod.Name, pod.Namespace))
|
framework.ExpectNoError(framework.WaitForPodSuccessInNamespaceSlow(c, pod.Name, pod.Namespace))
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -907,8 +911,8 @@ func deleteStorageClass(c clientset.Interface, className string) {
|
|||||||
// deleteProvisionedVolumes [gce||gke only] iteratively deletes persistent volumes and attached GCE PDs.
|
// deleteProvisionedVolumes [gce||gke only] iteratively deletes persistent volumes and attached GCE PDs.
|
||||||
func deleteProvisionedVolumesAndDisks(c clientset.Interface, pvs []*v1.PersistentVolume) {
|
func deleteProvisionedVolumesAndDisks(c clientset.Interface, pvs []*v1.PersistentVolume) {
|
||||||
for _, pv := range pvs {
|
for _, pv := range pvs {
|
||||||
framework.DeletePDWithRetry(pv.Spec.PersistentVolumeSource.GCEPersistentDisk.PDName)
|
framework.ExpectNoError(framework.DeletePDWithRetry(pv.Spec.PersistentVolumeSource.GCEPersistentDisk.PDName))
|
||||||
framework.DeletePersistentVolume(c, pv.Name)
|
framework.ExpectNoError(framework.DeletePersistentVolume(c, pv.Name))
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -517,9 +517,8 @@ var _ = framework.KubeDescribe("Volumes [Volume]", func() {
|
|||||||
By("creating a test gce pd volume")
|
By("creating a test gce pd volume")
|
||||||
volumeName, err := framework.CreatePDWithRetry()
|
volumeName, err := framework.CreatePDWithRetry()
|
||||||
Expect(err).NotTo(HaveOccurred())
|
Expect(err).NotTo(HaveOccurred())
|
||||||
|
|
||||||
defer func() {
|
defer func() {
|
||||||
framework.DeletePDWithRetry(volumeName)
|
framework.ExpectNoError(framework.DeletePDWithRetry(volumeName))
|
||||||
}()
|
}()
|
||||||
|
|
||||||
defer func() {
|
defer func() {
|
||||||
|
@ -99,21 +99,24 @@ var _ = framework.KubeDescribe("vsphere volume operations storm [Volume]", func(
|
|||||||
By("Creating PVCs using the Storage Class")
|
By("Creating PVCs using the Storage Class")
|
||||||
count := 0
|
count := 0
|
||||||
for count < volume_ops_scale {
|
for count < volume_ops_scale {
|
||||||
pvclaims[count] = framework.CreatePVC(client, namespace, getVSphereClaimSpecWithStorageClassAnnotation(namespace, storageclass))
|
pvclaims[count], err = framework.CreatePVC(client, namespace, getVSphereClaimSpecWithStorageClassAnnotation(namespace, storageclass))
|
||||||
|
Expect(err).NotTo(HaveOccurred())
|
||||||
count++
|
count++
|
||||||
}
|
}
|
||||||
|
|
||||||
By("Waiting for all claims to be in bound phase")
|
By("Waiting for all claims to be in bound phase")
|
||||||
persistentvolumes = framework.WaitForPVClaimBoundPhase(client, pvclaims)
|
persistentvolumes, err = framework.WaitForPVClaimBoundPhase(client, pvclaims)
|
||||||
|
Expect(err).NotTo(HaveOccurred())
|
||||||
|
|
||||||
By("Creating pod to attach PVs to the node")
|
By("Creating pod to attach PVs to the node")
|
||||||
pod := framework.CreatePod(client, namespace, pvclaims, false, "")
|
pod, err := framework.CreatePod(client, namespace, pvclaims, false, "")
|
||||||
|
Expect(err).NotTo(HaveOccurred())
|
||||||
|
|
||||||
By("Verify all volumes are accessible and available in the pod")
|
By("Verify all volumes are accessible and available in the pod")
|
||||||
verifyVSphereVolumesAccessible(pod, persistentvolumes, vsp)
|
verifyVSphereVolumesAccessible(pod, persistentvolumes, vsp)
|
||||||
|
|
||||||
By("Deleting pod")
|
By("Deleting pod")
|
||||||
framework.DeletePodWithWait(f, client, pod)
|
framework.ExpectNoError(framework.DeletePodWithWait(f, client, pod))
|
||||||
|
|
||||||
By("Waiting for volumes to be detached from the node")
|
By("Waiting for volumes to be detached from the node")
|
||||||
for _, pv := range persistentvolumes {
|
for _, pv := range persistentvolumes {
|
||||||
|
@ -277,8 +277,8 @@ var _ = framework.KubeDescribe("Volume Placement [Volume]", func() {
|
|||||||
|
|
||||||
defer func() {
|
defer func() {
|
||||||
By("clean up undeleted pods")
|
By("clean up undeleted pods")
|
||||||
framework.DeletePodWithWait(f, c, podA)
|
framework.ExpectNoError(framework.DeletePodWithWait(f, c, podA), "defer: Failed to delete pod ", podA.Name)
|
||||||
framework.DeletePodWithWait(f, c, podB)
|
framework.ExpectNoError(framework.DeletePodWithWait(f, c, podB), "defer: Failed to delete pod ", podB.Name)
|
||||||
By(fmt.Sprintf("wait for volumes to be detached from the node: %v", node1Name))
|
By(fmt.Sprintf("wait for volumes to be detached from the node: %v", node1Name))
|
||||||
for _, volumePath := range volumePaths {
|
for _, volumePath := range volumePaths {
|
||||||
waitForVSphereDiskToDetach(vsp, volumePath, types.NodeName(node1Name))
|
waitForVSphereDiskToDetach(vsp, volumePath, types.NodeName(node1Name))
|
||||||
@ -319,9 +319,9 @@ var _ = framework.KubeDescribe("Volume Placement [Volume]", func() {
|
|||||||
verifyFilesExistOnVSphereVolume(ns, podB.Name, podBFiles)
|
verifyFilesExistOnVSphereVolume(ns, podB.Name, podBFiles)
|
||||||
|
|
||||||
By("Deleting pod-A")
|
By("Deleting pod-A")
|
||||||
framework.DeletePodWithWait(f, c, podA)
|
framework.ExpectNoError(framework.DeletePodWithWait(f, c, podA), "Failed to delete pod ", podA.Name)
|
||||||
By("Deleting pod-B")
|
By("Deleting pod-B")
|
||||||
framework.DeletePodWithWait(f, c, podB)
|
framework.ExpectNoError(framework.DeletePodWithWait(f, c, podB), "Failed to delete pod ", podB.Name)
|
||||||
}
|
}
|
||||||
})
|
})
|
||||||
})
|
})
|
||||||
@ -377,7 +377,7 @@ func createAndVerifyFilesOnVolume(namespace string, podname string, newEmptyfile
|
|||||||
|
|
||||||
func deletePodAndWaitForVolumeToDetach(f *framework.Framework, c clientset.Interface, pod *v1.Pod, vsp *vsphere.VSphere, nodeName string, volumePaths []string) {
|
func deletePodAndWaitForVolumeToDetach(f *framework.Framework, c clientset.Interface, pod *v1.Pod, vsp *vsphere.VSphere, nodeName string, volumePaths []string) {
|
||||||
By("Deleting pod")
|
By("Deleting pod")
|
||||||
framework.DeletePodWithWait(f, c, pod)
|
framework.ExpectNoError(framework.DeletePodWithWait(f, c, pod), "Failed to delete pod ", pod.Name)
|
||||||
|
|
||||||
By("Waiting for volume to be detached from the node")
|
By("Waiting for volume to be detached from the node")
|
||||||
for _, volumePath := range volumePaths {
|
for _, volumePath := range volumePaths {
|
||||||
|
@ -42,6 +42,7 @@ go_library(
|
|||||||
"//vendor/k8s.io/apimachinery/pkg/api/errors:go_default_library",
|
"//vendor/k8s.io/apimachinery/pkg/api/errors:go_default_library",
|
||||||
"//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library",
|
"//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library",
|
||||||
"//vendor/k8s.io/apimachinery/pkg/labels:go_default_library",
|
"//vendor/k8s.io/apimachinery/pkg/labels:go_default_library",
|
||||||
|
"//vendor/k8s.io/apimachinery/pkg/util/errors:go_default_library",
|
||||||
"//vendor/k8s.io/apimachinery/pkg/util/uuid:go_default_library",
|
"//vendor/k8s.io/apimachinery/pkg/util/uuid:go_default_library",
|
||||||
"//vendor/k8s.io/apimachinery/pkg/util/wait:go_default_library",
|
"//vendor/k8s.io/apimachinery/pkg/util/wait:go_default_library",
|
||||||
],
|
],
|
||||||
|
@ -17,10 +17,12 @@ limitations under the License.
|
|||||||
package upgrades
|
package upgrades
|
||||||
|
|
||||||
import (
|
import (
|
||||||
|
utilerrors "k8s.io/apimachinery/pkg/util/errors"
|
||||||
"k8s.io/kubernetes/pkg/api/v1"
|
"k8s.io/kubernetes/pkg/api/v1"
|
||||||
"k8s.io/kubernetes/test/e2e/framework"
|
"k8s.io/kubernetes/test/e2e/framework"
|
||||||
|
|
||||||
. "github.com/onsi/ginkgo"
|
. "github.com/onsi/ginkgo"
|
||||||
|
. "github.com/onsi/gomega"
|
||||||
)
|
)
|
||||||
|
|
||||||
// PersistentVolumeUpgradeTest test that a pv is available before and after a cluster upgrade.
|
// PersistentVolumeUpgradeTest test that a pv is available before and after a cluster upgrade.
|
||||||
@ -50,13 +52,14 @@ func (t *PersistentVolumeUpgradeTest) createGCEVolume() *v1.PersistentVolumeSour
|
|||||||
},
|
},
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
func (t *PersistentVolumeUpgradeTest) deleteGCEVolume(pvSource *v1.PersistentVolumeSource) {
|
func (t *PersistentVolumeUpgradeTest) deleteGCEVolume(pvSource *v1.PersistentVolumeSource) error {
|
||||||
framework.DeletePDWithRetry(pvSource.GCEPersistentDisk.PDName)
|
return framework.DeletePDWithRetry(pvSource.GCEPersistentDisk.PDName)
|
||||||
}
|
}
|
||||||
|
|
||||||
// Setup creates a pv and then verifies that a pod can consume it. The pod writes data to the volume.
|
// Setup creates a pv and then verifies that a pod can consume it. The pod writes data to the volume.
|
||||||
func (t *PersistentVolumeUpgradeTest) Setup(f *framework.Framework) {
|
func (t *PersistentVolumeUpgradeTest) Setup(f *framework.Framework) {
|
||||||
|
|
||||||
|
var err error
|
||||||
// TODO: generalize this to other providers
|
// TODO: generalize this to other providers
|
||||||
framework.SkipUnlessProviderIs("gce", "gke")
|
framework.SkipUnlessProviderIs("gce", "gke")
|
||||||
|
|
||||||
@ -76,8 +79,9 @@ func (t *PersistentVolumeUpgradeTest) Setup(f *framework.Framework) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
By("Creating the PV and PVC")
|
By("Creating the PV and PVC")
|
||||||
t.pv, t.pvc = framework.CreatePVPVC(f.ClientSet, pvConfig, pvcConfig, ns, true)
|
t.pv, t.pvc, err = framework.CreatePVPVC(f.ClientSet, pvConfig, pvcConfig, ns, true)
|
||||||
framework.WaitOnPVandPVC(f.ClientSet, ns, t.pv, t.pvc)
|
Expect(err).NotTo(HaveOccurred())
|
||||||
|
framework.ExpectNoError(framework.WaitOnPVandPVC(f.ClientSet, ns, t.pv, t.pvc))
|
||||||
|
|
||||||
By("Consuming the PV before upgrade")
|
By("Consuming the PV before upgrade")
|
||||||
t.testPod(f, pvWriteCmd+";"+pvReadCmd)
|
t.testPod(f, pvWriteCmd+";"+pvReadCmd)
|
||||||
@ -93,8 +97,13 @@ func (t *PersistentVolumeUpgradeTest) Test(f *framework.Framework, done <-chan s
|
|||||||
|
|
||||||
// Teardown cleans up any remaining resources.
|
// Teardown cleans up any remaining resources.
|
||||||
func (t *PersistentVolumeUpgradeTest) Teardown(f *framework.Framework) {
|
func (t *PersistentVolumeUpgradeTest) Teardown(f *framework.Framework) {
|
||||||
framework.PVPVCCleanup(f.ClientSet, f.Namespace.Name, t.pv, t.pvc)
|
errs := framework.PVPVCCleanup(f.ClientSet, f.Namespace.Name, t.pv, t.pvc)
|
||||||
t.deleteGCEVolume(t.pvSource)
|
if err := t.deleteGCEVolume(t.pvSource); err != nil {
|
||||||
|
errs = append(errs, err)
|
||||||
|
}
|
||||||
|
if len(errs) > 0 {
|
||||||
|
framework.Failf("Failed to delete 1 or more PVs/PVCs and/or the GCE volume. Errors: %v", utilerrors.NewAggregate(errs))
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// testPod creates a pod that consumes a pv and prints it out. The output is then verified.
|
// testPod creates a pod that consumes a pv and prints it out. The output is then verified.
|
||||||
|
Loading…
Reference in New Issue
Block a user