Merge pull request #40177 from copejon/isolate-recycler-test

Automatic merge from submit-queue (batch tested with PRs 41134, 41410, 40177, 41049, 41313)

Isolate recycler behavior in PV E2E

**What this PR does / why we need it**:
Sets the default `reclaimPolicy` for PV E2E to `Retain` and isolates `Recycle` tests to their own context.  The purpose of this is to future proof the PV test suite against the possible deprecation of the `Recycle` behavior.  This is done by consolidating recycling test code into a single Context block that can be removed en masse without affecting the test suite.

Secondly, adds a liveliness check for the NFS server pod prior to each test to avoid maxing out timeouts if the NFS server becomes unavailable.

cc @saad-ali @jeffvance
This commit is contained in:
Kubernetes Submit Queue 2017-02-15 05:58:19 -08:00 committed by GitHub
commit f2da9d8cba
2 changed files with 88 additions and 45 deletions

View File

@ -17,6 +17,7 @@ limitations under the License.
package e2e
import (
"fmt"
"time"
. "github.com/onsi/ginkgo"
@ -32,7 +33,7 @@ import (
// phase. Note: the PV is deleted in the AfterEach, not here.
func completeTest(f *framework.Framework, c clientset.Interface, ns string, pv *v1.PersistentVolume, pvc *v1.PersistentVolumeClaim) {
// 1. verify that the PV and PVC have binded correctly
// 1. verify that the PV and PVC have bound correctly
By("Validating the PV-PVC binding")
waitOnPVandPVC(c, ns, pv, pvc)
@ -41,9 +42,9 @@ func completeTest(f *framework.Framework, c clientset.Interface, ns string, pv *
By("Checking pod has write access to PersistentVolume")
createWaitAndDeletePod(f, c, ns, pvc.Name)
// 3. delete the PVC, wait for PV to become "Available"
By("Deleting the PVC to invoke the recycler")
deletePVCandValidatePV(c, ns, pvc, pv, v1.VolumeAvailable)
// 3. delete the PVC, wait for PV to become "Released"
By("Deleting the PVC to invoke the reclaim policy.")
deletePVCandValidatePV(c, ns, pvc, pv, v1.VolumeReleased)
}
// Validate pairs of PVs and PVCs, create and verify writer pod, delete PVC and validate
@ -51,8 +52,7 @@ func completeTest(f *framework.Framework, c clientset.Interface, ns string, pv *
// Note: the PV is deleted in the AfterEach, not here.
// Note: this func is serialized, we wait for each pod to be deleted before creating the
// next pod. Adding concurrency is a TODO item.
// Note: this func is called recursively when there are more claims than pvs.
func completeMultiTest(f *framework.Framework, c clientset.Interface, ns string, pvols pvmap, claims pvcmap) {
func completeMultiTest(f *framework.Framework, c clientset.Interface, ns string, pvols pvmap, claims pvcmap, expectPhase v1.PersistentVolumePhase) {
// 1. verify each PV permits write access to a client pod
By("Checking pod has write access to PersistentVolumes")
@ -69,9 +69,9 @@ func completeMultiTest(f *framework.Framework, c clientset.Interface, ns string,
createWaitAndDeletePod(f, c, pvcKey.Namespace, pvcKey.Name)
}
// 2. delete each PVC, wait for its bound PV to become "Available"
// 2. delete each PVC, wait for its bound PV to reach `expectedPhase`
By("Deleting PVCs to invoke recycler")
deletePVCandValidatePVGroup(c, ns, pvols, claims)
deletePVCandValidatePVGroup(c, ns, pvols, claims, expectPhase)
}
// Creates a PV, PVC, and ClientPod that will run until killed by test or clean up.
@ -218,7 +218,7 @@ var _ = framework.KubeDescribe("PersistentVolumes [Volume][Serial]", func() {
numPVs, numPVCs := 2, 4
pvols, claims = createPVsPVCs(numPVs, numPVCs, c, ns, pvConfig)
waitAndVerifyBinds(c, ns, pvols, claims, true)
completeMultiTest(f, c, ns, pvols, claims)
completeMultiTest(f, c, ns, pvols, claims, v1.VolumeReleased)
})
// Create 3 PVs and 3 PVCs.
@ -227,7 +227,7 @@ var _ = framework.KubeDescribe("PersistentVolumes [Volume][Serial]", func() {
numPVs, numPVCs := 3, 3
pvols, claims = createPVsPVCs(numPVs, numPVCs, c, ns, pvConfig)
waitAndVerifyBinds(c, ns, pvols, claims, true)
completeMultiTest(f, c, ns, pvols, claims)
completeMultiTest(f, c, ns, pvols, claims, v1.VolumeReleased)
})
// Create 4 PVs and 2 PVCs.
@ -236,7 +236,57 @@ var _ = framework.KubeDescribe("PersistentVolumes [Volume][Serial]", func() {
numPVs, numPVCs := 4, 2
pvols, claims = createPVsPVCs(numPVs, numPVCs, c, ns, pvConfig)
waitAndVerifyBinds(c, ns, pvols, claims, true)
completeMultiTest(f, c, ns, pvols, claims)
completeMultiTest(f, c, ns, pvols, claims, v1.VolumeReleased)
})
})
// This Context isolates and tests the "Recycle" reclaim behavior. On deprecation of the
// Recycler, this entire context can be removed without affecting the test suite or leaving behind
// dead code.
Context("when invoking the Recycle reclaim policy", func() {
var pv *v1.PersistentVolume
var pvc *v1.PersistentVolumeClaim
BeforeEach(func() {
pvConfig.reclaimPolicy = v1.PersistentVolumeReclaimRecycle
pv, pvc = createPVPVC(c, pvConfig, ns, false)
waitOnPVandPVC(c, ns, pv, pvc)
})
AfterEach(func() {
framework.Logf("AfterEach: Cleaning up test resources.")
pvPvcCleanup(c, ns, pv, pvc)
})
// This It() tests a scenario where a PV is written to by a Pod, recycled, then the volume checked
// for files. If files are found, the checking Pod fails, failing the test. Otherwise, the pod
// (and test) succeed.
It("should test that a PV becomes Available and is clean after the PVC is deleted. [Volume][Serial][Flaky]", func() {
By("Writing to the volume.")
pod := makeWritePod(ns, pvc.Name)
pod, err := c.Core().Pods(ns).Create(pod)
Expect(err).NotTo(HaveOccurred())
err = framework.WaitForPodSuccessInNamespace(c, pod.Name, ns)
Expect(err).NotTo(HaveOccurred())
deletePVCandValidatePV(c, ns, pvc, pv, v1.VolumeAvailable)
By("Re-mounting the volume.")
pvc = makePersistentVolumeClaim(ns)
pvc = createPVC(c, ns, pvc)
err = framework.WaitForPersistentVolumeClaimPhase(v1.ClaimBound, c, ns, pvc.Name, 2*time.Second, 60*time.Second)
Expect(err).NotTo(HaveOccurred())
// If a file is detected in /mnt, fail the pod and do not restart it.
By("Verifying the mount has been cleaned.")
mount := pod.Spec.Containers[0].VolumeMounts[0].MountPath
pod = makePod(ns, pvc.Name, fmt.Sprintf("[ $(ls -A %s | wc -l) -eq 0 ] && exit 0 || exit 1", mount))
pod, err = c.Core().Pods(ns).Create(pod)
Expect(err).NotTo(HaveOccurred())
err = framework.WaitForPodSuccessInNamespace(c, pod.Name, ns)
Expect(err).NotTo(HaveOccurred())
framework.Logf("Pod exited without failure; the volume has been recycled.")
})
})
})

View File

@ -57,9 +57,10 @@ type pvcmap map[types.NamespacedName]pvcval
// },
// }
type persistentVolumeConfig struct {
pvSource v1.PersistentVolumeSource
prebind *v1.PersistentVolumeClaim
namePrefix string
pvSource v1.PersistentVolumeSource
prebind *v1.PersistentVolumeClaim
reclaimPolicy v1.PersistentVolumeReclaimPolicy
namePrefix string
}
// Delete the nfs-server pod. Only done once per KubeDescription().
@ -117,48 +118,48 @@ func deletePersistentVolumeClaim(c clientset.Interface, pvcName string, ns strin
}
}
// Delete the PVC and wait for the PV to become Available again. Validate that the PV
// has recycled (assumption here about reclaimPolicy). Caller tells this func which
// Delete the PVC and wait for the PV to enter its expected phase. Validate that the PV
// has been reclaimed (assumption here about reclaimPolicy). Caller tells this func which
// phase value to expect for the pv bound to the to-be-deleted claim.
func deletePVCandValidatePV(c clientset.Interface, ns string, pvc *v1.PersistentVolumeClaim, pv *v1.PersistentVolume, expctPVPhase v1.PersistentVolumePhase) {
func deletePVCandValidatePV(c clientset.Interface, ns string, pvc *v1.PersistentVolumeClaim, pv *v1.PersistentVolume, expectPVPhase v1.PersistentVolumePhase) {
pvname := pvc.Spec.VolumeName
framework.Logf("Deleting PVC %v to trigger recycling of PV %v", pvc.Name, pvname)
framework.Logf("Deleting PVC %v to trigger reclamation of PV %v", pvc.Name, pvname)
deletePersistentVolumeClaim(c, pvc.Name, ns)
// Check that the PVC is really deleted.
pvc, err := c.Core().PersistentVolumeClaims(ns).Get(pvc.Name, metav1.GetOptions{})
Expect(apierrs.IsNotFound(err)).To(BeTrue())
// Wait for the PV's phase to return to Available
framework.Logf("Waiting for recycling process to complete.")
err = framework.WaitForPersistentVolumePhase(expctPVPhase, c, pv.Name, 1*time.Second, 300*time.Second)
// Wait for the PV's phase to return to be `expectPVPhase`
framework.Logf("Waiting for reclaim process to complete.")
err = framework.WaitForPersistentVolumePhase(expectPVPhase, c, pv.Name, 1*time.Second, 300*time.Second)
Expect(err).NotTo(HaveOccurred())
// examine the pv's ClaimRef and UID and compare to expected values
pv, err = c.Core().PersistentVolumes().Get(pv.Name, metav1.GetOptions{})
Expect(err).NotTo(HaveOccurred())
cr := pv.Spec.ClaimRef
if expctPVPhase == v1.VolumeAvailable {
if expectPVPhase == v1.VolumeAvailable {
if cr != nil { // may be ok if cr != nil
Expect(len(cr.UID)).To(BeZero())
Expect(cr.UID).To(BeEmpty())
}
} else if expctPVPhase == v1.VolumeBound {
} else if expectPVPhase == v1.VolumeBound {
Expect(cr).NotTo(BeNil())
Expect(len(cr.UID)).NotTo(BeZero())
Expect(cr.UID).NotTo(BeEmpty())
}
framework.Logf("PV %v now in %q phase", pv.Name, expctPVPhase)
framework.Logf("PV %v now in %q phase", pv.Name, expectPVPhase)
}
// Wraps deletePVCandValidatePV() by calling the function in a loop over the PV map. Only
// bound PVs are deleted. Validates that the claim was deleted and the PV is Available.
// bound PVs are deleted. Validates that the claim was deleted and the PV is in the relevant Phase (Released, Available,
// Bound).
// Note: if there are more claims than pvs then some of the remaining claims will bind to
// the just-made-available pvs.
func deletePVCandValidatePVGroup(c clientset.Interface, ns string, pvols pvmap, claims pvcmap) {
func deletePVCandValidatePVGroup(c clientset.Interface, ns string, pvols pvmap, claims pvcmap, expectPVPhase v1.PersistentVolumePhase) {
var boundPVs, deletedPVCs int
var expctPVPhase v1.PersistentVolumePhase
for pvName := range pvols {
pv, err := c.Core().PersistentVolumes().Get(pvName, metav1.GetOptions{})
@ -174,17 +175,7 @@ func deletePVCandValidatePVGroup(c clientset.Interface, ns string, pvols pvmap,
Expect(found).To(BeTrue())
pvc, err := c.Core().PersistentVolumeClaims(ns).Get(cr.Name, metav1.GetOptions{})
Expect(apierrs.IsNotFound(err)).To(BeFalse())
// what Phase do we expect the PV that was bound to the claim to
// be in after that claim is deleted?
expctPVPhase = v1.VolumeAvailable
if len(claims) > len(pvols) {
// there are excess pvcs so expect the previously bound
// PV to become bound again
expctPVPhase = v1.VolumeBound
}
deletePVCandValidatePV(c, ns, pvc, pv, expctPVPhase)
deletePVCandValidatePV(c, ns, pvc, pv, expectPVPhase)
delete(claims, pvcKey)
deletedPVCs++
}
@ -453,24 +444,26 @@ func makePvcKey(ns, name string) types.NamespacedName {
// Returns a PV definition based on the nfs server IP. If the PVC is not nil
// then the PV is defined with a ClaimRef which includes the PVC's namespace.
// If the PVC is nil then the PV is not defined with a ClaimRef.
// If the PVC is nil then the PV is not defined with a ClaimRef. If no reclaimPolicy
// is assigned, assumes "Retain".
// Note: the passed-in claim does not have a name until it is created
// (instantiated) and thus the PV's ClaimRef cannot be completely filled-in in
// this func. Therefore, the ClaimRef's name is added later in
// createPVCPV.
func makePersistentVolume(pvConfig persistentVolumeConfig) *v1.PersistentVolume {
// Specs are expected to match this test's PersistentVolumeClaim
var claimRef *v1.ObjectReference
// If the reclaimPolicy is not provided, assume Retain
if pvConfig.reclaimPolicy == "" {
pvConfig.reclaimPolicy = v1.PersistentVolumeReclaimRetain
}
if pvConfig.prebind != nil {
claimRef = &v1.ObjectReference{
Name: pvConfig.prebind.Name,
Namespace: pvConfig.prebind.Namespace,
}
}
return &v1.PersistentVolume{
ObjectMeta: metav1.ObjectMeta{
GenerateName: pvConfig.namePrefix,
@ -479,7 +472,7 @@ func makePersistentVolume(pvConfig persistentVolumeConfig) *v1.PersistentVolume
},
},
Spec: v1.PersistentVolumeSpec{
PersistentVolumeReclaimPolicy: v1.PersistentVolumeReclaimRecycle,
PersistentVolumeReclaimPolicy: pvConfig.reclaimPolicy,
Capacity: v1.ResourceList{
v1.ResourceName(v1.ResourceStorage): resource.MustParse("2Gi"),
},