Merge pull request #31732 from copejon/multi-pv-pvc-pair-testing

Automatic merge from submit-queue

Add multiple PV/PVC pair handling to persistent volume e2e test

Adds the framework for creating, validating, and deleting groups of PVs and PVCs.
This commit is contained in:
Kubernetes Submit Queue 2016-10-26 13:39:24 -07:00 committed by GitHub
commit 95542190db

View File

@ -17,22 +17,39 @@ limitations under the License.
package e2e package e2e
import ( import (
"encoding/json"
"fmt" "fmt"
"time" "time"
. "github.com/onsi/ginkgo" . "github.com/onsi/ginkgo"
. "github.com/onsi/gomega"
"k8s.io/kubernetes/pkg/api" "k8s.io/kubernetes/pkg/api"
apierrs "k8s.io/kubernetes/pkg/api/errors" apierrs "k8s.io/kubernetes/pkg/api/errors"
"k8s.io/kubernetes/pkg/api/resource" "k8s.io/kubernetes/pkg/api/resource"
"k8s.io/kubernetes/pkg/api/unversioned" "k8s.io/kubernetes/pkg/api/unversioned"
"k8s.io/kubernetes/pkg/apimachinery/registered" "k8s.io/kubernetes/pkg/apimachinery/registered"
clientset "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset" clientset "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset"
"k8s.io/kubernetes/pkg/types"
"k8s.io/kubernetes/pkg/volume/util/volumehelper" "k8s.io/kubernetes/pkg/volume/util/volumehelper"
"k8s.io/kubernetes/test/e2e/framework" "k8s.io/kubernetes/test/e2e/framework"
) )
// Delete the nfs-server pod. // Map of all PVs used in the multi pv-pvc tests. The key is the PV's name, which is
// guaranteed to be unique. The value is {} (empty struct) since we're only interested
// in the PV's name and if it is present. We must always Get the pv object before
// referencing any of its values, eg its ClaimRef.
type pvval struct{}
type pvmap map[string]pvval
// Map of all PVCs used in the multi pv-pvc tests. The key is "namespace/pvc.Name". The
// value is {} (empty struct) since we're only interested in the PVC's name and if it is
// present. We must always Get the pvc object before referencing any of its values, eg.
// its VolumeName.
// Note: It's unsafe to add keys to a map in a loop. Their insertion in the map is
// unpredictable and can result in the same key being iterated over again.
type pvcval struct{}
type pvcmap map[types.NamespacedName]pvcval
// Delete the nfs-server pod. Only done once per KubeDescription().
func nfsServerPodCleanup(c clientset.Interface, config VolumeTestConfig) { func nfsServerPodCleanup(c clientset.Interface, config VolumeTestConfig) {
defer GinkgoRecover() defer GinkgoRecover()
@ -41,110 +58,142 @@ func nfsServerPodCleanup(c clientset.Interface, config VolumeTestConfig) {
if config.serverImage != "" { if config.serverImage != "" {
podName := config.prefix + "-server" podName := config.prefix + "-server"
err := podClient.Delete(podName, nil) err := podClient.Delete(podName, nil)
if err != nil { Expect(err).NotTo(HaveOccurred())
framework.Logf("Delete of %v pod failed: %v", podName, err) }
}
// Cleanup up pvs and pvcs in multi-pv-pvc test cases. All entries found in the pv and
// claims maps are deleted.
// Note: this is the only code that deletes PV objects.
func pvPvcCleanup(c clientset.Interface, ns string, pvols pvmap, claims pvcmap) {
if c != nil && len(ns) > 0 {
for pvcKey := range claims {
_, err := c.Core().PersistentVolumeClaims(pvcKey.Namespace).Get(pvcKey.Name)
if !apierrs.IsNotFound(err) {
Expect(err).NotTo(HaveOccurred())
framework.Logf(" deleting PVC %v ...", pvcKey)
err = c.Core().PersistentVolumeClaims(pvcKey.Namespace).Delete(pvcKey.Name, nil)
Expect(err).NotTo(HaveOccurred())
framework.Logf(" deleted PVC %v", pvcKey)
}
delete(claims, pvcKey)
}
for name := range pvols {
_, err := c.Core().PersistentVolumes().Get(name)
if !apierrs.IsNotFound(err) {
Expect(err).NotTo(HaveOccurred())
framework.Logf(" deleting PV %v ...", name)
err = c.Core().PersistentVolumes().Delete(name, nil)
Expect(err).NotTo(HaveOccurred())
framework.Logf(" deleted PV %v", name)
}
delete(pvols, name)
} }
} }
} }
// Delete the PV. Fail test if delete fails. If success the returned PV should // Delete the PVC and wait for the PV to become Available again. Validate that the PV
// be nil, which prevents the AfterEach from attempting to delete it. // has recycled (assumption here about reclaimPolicy). Caller tells this func which
func deletePersistentVolume(c clientset.Interface, pv *api.PersistentVolume) (*api.PersistentVolume, error) { // phase value to expect for the pv bound to the to-be-deleted claim.
func deletePVCandValidatePV(c clientset.Interface, ns string, pvc *api.PersistentVolumeClaim, pv *api.PersistentVolume, expctPVPhase api.PersistentVolumePhase) {
if pv == nil { pvname := pvc.Spec.VolumeName
return nil, fmt.Errorf("PV to be deleted is nil") framework.Logf("Deleting PVC %v to trigger recycling of PV %v", pvc.Name, pvname)
}
framework.Logf("Deleting PersistentVolume %v", pv.Name)
err := c.Core().PersistentVolumes().Delete(pv.Name, nil)
if err != nil {
return pv, fmt.Errorf("Delete() PersistentVolume %v failed: %v", pv.Name, err)
}
// Wait for PersistentVolume to delete
deleteDuration := 90 * time.Second
err = framework.WaitForPersistentVolumeDeleted(c, pv.Name, 3*time.Second, deleteDuration)
if err != nil {
return pv, fmt.Errorf("Unable to delete PersistentVolume %s after waiting for %v: %v", pv.Name, deleteDuration, err)
}
return nil, nil // success
}
// Delete the PVC and wait for the PV to become Available again. Validate that
// the PV has recycled (assumption here about reclaimPolicy). Return the pv and
// pvc to reflect that these resources have been retrieved again (Get). If the
// delete is successful the returned pvc should be nil and the pv non-nil.
// Note: the pv and pvc are returned back to the It() caller so that the
// AfterEach func can delete these objects if they are not nil.
func deletePVCandValidatePV(c clientset.Interface, ns string, pvc *api.PersistentVolumeClaim, pv *api.PersistentVolume) (*api.PersistentVolume, *api.PersistentVolumeClaim, error) {
framework.Logf("Deleting PersistentVolumeClaim %v to trigger PV Recycling", pvc.Name)
err := c.Core().PersistentVolumeClaims(ns).Delete(pvc.Name, nil) err := c.Core().PersistentVolumeClaims(ns).Delete(pvc.Name, nil)
if err != nil { Expect(err).NotTo(HaveOccurred())
return pv, pvc, fmt.Errorf("Delete of PVC %v failed: %v", pvc.Name, err)
}
// Check that the PVC is really deleted. // Check that the PVC is really deleted.
pvc, err = c.Core().PersistentVolumeClaims(ns).Get(pvc.Name) pvc, err = c.Core().PersistentVolumeClaims(ns).Get(pvc.Name)
if err == nil { Expect(apierrs.IsNotFound(err)).To(BeTrue())
return pv, pvc, fmt.Errorf("PVC %v deleted yet still exists", pvc.Name)
}
if !apierrs.IsNotFound(err) {
return pv, pvc, fmt.Errorf("Get on deleted PVC %v failed with error other than \"not found\": %v", pvc.Name, err)
}
// Wait for the PV's phase to return to Available // Wait for the PV's phase to return to the expected Phase
framework.Logf("Waiting for recycling process to complete.") framework.Logf("Waiting for recycling process to complete.")
err = framework.WaitForPersistentVolumePhase(api.VolumeAvailable, c, pv.Name, 3*time.Second, 300*time.Second) err = framework.WaitForPersistentVolumePhase(expctPVPhase, c, pv.Name, 1*time.Second, 300*time.Second)
if err != nil { Expect(err).NotTo(HaveOccurred())
return pv, pvc, fmt.Errorf("Recycling failed: %v", err)
}
// Examine the pv.ClaimRef and UID. Expect nil values. // examine the pv's ClaimRef and UID and compare to expected values
pv, err = c.Core().PersistentVolumes().Get(pv.Name) pv, err = c.Core().PersistentVolumes().Get(pv.Name)
if err != nil { Expect(err).NotTo(HaveOccurred())
return pv, pvc, fmt.Errorf("Cannot re-get PersistentVolume %v:", pv.Name) cr := pv.Spec.ClaimRef
if expctPVPhase == api.VolumeAvailable {
if cr != nil { // may be ok if cr != nil
Expect(len(cr.UID)).To(BeZero())
} }
if pv.Spec.ClaimRef != nil && len(pv.Spec.ClaimRef.UID) > 0 { } else if expctPVPhase == api.VolumeBound {
crJSON, _ := json.Marshal(pv.Spec.ClaimRef) Expect(cr).NotTo(BeNil())
return pv, pvc, fmt.Errorf("Expected PV %v's ClaimRef to be nil, or the claimRef's UID to be blank. Instead claimRef is: %v", pv.Name, string(crJSON)) Expect(len(cr.UID)).NotTo(BeZero())
} }
return pv, pvc, nil framework.Logf("PV %v now in %q phase", pv.Name, expctPVPhase)
}
// Wraps deletePVCandValidatePV() by calling the function in a loop over the PV map. Only
// bound PVs are deleted. Validates that the claim was deleted and the PV is Available.
// Note: if there are more claims than pvs then some of the remaining claims will bind to
// the just-made-available pvs.
func deletePVCandValidatePVGroup(c clientset.Interface, ns string, pvols pvmap, claims pvcmap) {
var boundPVs, deletedPVCs int
var expctPVPhase api.PersistentVolumePhase
for pvName := range pvols {
pv, err := c.Core().PersistentVolumes().Get(pvName)
Expect(apierrs.IsNotFound(err)).To(BeFalse())
cr := pv.Spec.ClaimRef
// if pv is bound then delete the pvc it is bound to
if cr != nil && len(cr.Name) > 0 {
boundPVs++
// Assert bound PVC is tracked in this test. Failing this might
// indicate external PVCs interfering with the test.
pvcKey := makePvcKey(ns, cr.Name)
_, found := claims[pvcKey]
Expect(found).To(BeTrue())
pvc, err := c.Core().PersistentVolumeClaims(ns).Get(cr.Name)
Expect(apierrs.IsNotFound(err)).To(BeFalse())
// what Phase do we expect the PV that was bound to the claim to
// be in after that claim is deleted?
expctPVPhase = api.VolumeAvailable
if len(claims) > len(pvols) {
// there are excess pvcs so expect the previously bound
// PV to become bound again
expctPVPhase = api.VolumeBound
}
deletePVCandValidatePV(c, ns, pvc, pv, expctPVPhase)
delete(claims, pvcKey)
deletedPVCs++
}
}
Expect(boundPVs).To(Equal(deletedPVCs))
} }
// create the PV resource. Fails test on error. // create the PV resource. Fails test on error.
func createPV(c clientset.Interface, pv *api.PersistentVolume) (*api.PersistentVolume, error) { func createPV(c clientset.Interface, pv *api.PersistentVolume) *api.PersistentVolume {
pv, err := c.Core().PersistentVolumes().Create(pv) pv, err := c.Core().PersistentVolumes().Create(pv)
if err != nil { Expect(err).NotTo(HaveOccurred())
return pv, fmt.Errorf("Create PersistentVolume %v failed: %v", pv.Name, err) return pv
}
return pv, nil
} }
// create the PVC resource. Fails test on error. // create the PVC resource. Fails test on error.
func createPVC(c clientset.Interface, ns string, pvc *api.PersistentVolumeClaim) (*api.PersistentVolumeClaim, error) { func createPVC(c clientset.Interface, ns string, pvc *api.PersistentVolumeClaim) *api.PersistentVolumeClaim {
pvc, err := c.Core().PersistentVolumeClaims(ns).Create(pvc) pvc, err := c.Core().PersistentVolumeClaims(ns).Create(pvc)
if err != nil { Expect(err).NotTo(HaveOccurred())
return pvc, fmt.Errorf("Create PersistentVolumeClaim %v failed: %v", pvc.Name, err) return pvc
}
return pvc, nil
} }
// Create a PVC followed by the PV based on the passed in nfs-server ip and // Create a PVC followed by the PV based on the passed in nfs-server ip and
// namespace. If the "preBind" bool is true then pre-bind the PV to the PVC // namespace. If the "preBind" bool is true then pre-bind the PV to the PVC
// via the PV's ClaimRef. Return the pv and pvc to reflect the created objects. // via the PV's ClaimRef. Return the pv and pvc to reflect the created objects.
// Note: the pv and pvc are returned back to the It() caller so that the
// AfterEach func can delete these objects if they are not nil.
// Note: in the pre-bind case the real PVC name, which is generated, is not // Note: in the pre-bind case the real PVC name, which is generated, is not
// known until after the PVC is instantiated. This is why the pvc is created // known until after the PVC is instantiated. This is why the pvc is created
// before the pv. // before the pv.
func createPVCPV(c clientset.Interface, serverIP, ns string, preBind bool) (*api.PersistentVolume, *api.PersistentVolumeClaim, error) { func createPVCPV(c clientset.Interface, serverIP, ns string, preBind bool) (*api.PersistentVolume, *api.PersistentVolumeClaim) {
var bindTo *api.PersistentVolumeClaim var bindTo *api.PersistentVolumeClaim
var preBindMsg string var preBindMsg string
@ -159,166 +208,187 @@ func createPVCPV(c clientset.Interface, serverIP, ns string, preBind bool) (*api
pv := makePersistentVolume(serverIP, bindTo) pv := makePersistentVolume(serverIP, bindTo)
By(fmt.Sprintf("Creating a PVC followed by a%s PV", preBindMsg)) By(fmt.Sprintf("Creating a PVC followed by a%s PV", preBindMsg))
// instantiate the pvc // instantiate the pvc
pvc, err := createPVC(c, ns, pvc) pvc = createPVC(c, ns, pvc)
if err != nil {
return nil, nil, err
}
// instantiate the pvc, handle pre-binding by ClaimRef if needed // instantiate the pv, handle pre-binding by ClaimRef if needed
if preBind { if preBind {
pv.Spec.ClaimRef.Name = pvc.Name pv.Spec.ClaimRef.Name = pvc.Name
} }
pv, err = createPV(c, pv) pv = createPV(c, pv)
if err != nil {
return nil, pvc, err
}
return pv, pvc, nil return pv, pvc
} }
// Create a PV followed by the PVC based on the passed in nfs-server ip and // Create a PV followed by the PVC based on the passed in nfs-server ip and
// namespace. If the "preBind" bool is true then pre-bind the PVC to the PV // namespace. If the "preBind" bool is true then pre-bind the PVC to the PV
// via the PVC's VolumeName. Return the pv and pvc to reflect the created // via the PVC's VolumeName. Return the pv and pvc to reflect the created
// objects. // objects.
// Note: the pv and pvc are returned back to the It() caller so that the
// AfterEach func can delete these objects if they are not nil.
// Note: in the pre-bind case the real PV name, which is generated, is not // Note: in the pre-bind case the real PV name, which is generated, is not
// known until after the PV is instantiated. This is why the pv is created // known until after the PV is instantiated. This is why the pv is created
// before the pvc. // before the pvc.
func createPVPVC(c clientset.Interface, serverIP, ns string, preBind bool) (*api.PersistentVolume, *api.PersistentVolumeClaim, error) { func createPVPVC(c clientset.Interface, serverIP, ns string, preBind bool) (*api.PersistentVolume, *api.PersistentVolumeClaim) {
preBindMsg := "" preBindMsg := ""
if preBind { if preBind {
preBindMsg = " pre-bound" preBindMsg = " pre-bound"
} }
framework.Logf("Creating a PV followed by a%s PVC", preBindMsg)
By(fmt.Sprintf("Creating a PV followed by a%s PVC", preBindMsg))
// make the pv and pvc definitions // make the pv and pvc definitions
pv := makePersistentVolume(serverIP, nil) pv := makePersistentVolume(serverIP, nil)
pvc := makePersistentVolumeClaim(ns) pvc := makePersistentVolumeClaim(ns)
// instantiate the pv // instantiate the pv
pv, err := createPV(c, pv) pv = createPV(c, pv)
if err != nil {
return nil, nil, err
}
// instantiate the pvc, handle pre-binding by VolumeName if needed // instantiate the pvc, handle pre-binding by VolumeName if needed
if preBind { if preBind {
pvc.Spec.VolumeName = pv.Name pvc.Spec.VolumeName = pv.Name
} }
pvc, err = createPVC(c, ns, pvc) pvc = createPVC(c, ns, pvc)
if err != nil {
return pv, nil, err return pv, pvc
} }
return pv, pvc, nil // Create the desired number of PVs and PVCs and return them in separate maps. If the
// number of PVs != the number of PVCs then the min of those two counts is the number of
// PVs expected to bind.
func createPVsPVCs(numpvs, numpvcs int, c clientset.Interface, ns, serverIP string) (pvmap, pvcmap) {
var i int
var pv *api.PersistentVolume
var pvc *api.PersistentVolumeClaim
pvMap := make(pvmap, numpvs)
pvcMap := make(pvcmap, numpvcs)
var extraPVs, extraPVCs int
extraPVs = numpvs - numpvcs
if extraPVs < 0 {
extraPVCs = -extraPVs
extraPVs = 0
}
pvsToCreate := numpvs - extraPVs // want the min(numpvs, numpvcs)
// create pvs and pvcs
for i = 0; i < pvsToCreate; i++ {
pv, pvc = createPVPVC(c, serverIP, ns, false)
pvMap[pv.Name] = pvval{}
pvcMap[makePvcKey(ns, pvc.Name)] = pvcval{}
} }
// Wait for the pv and pvc to bind to each other. Fail test on errors. // create extra pvs or pvcs as needed
func waitOnPVandPVC(c clientset.Interface, ns string, pv *api.PersistentVolume, pvc *api.PersistentVolumeClaim) error { for i = 0; i < extraPVs; i++ {
pv = makePersistentVolume(serverIP, nil)
pv = createPV(c, pv)
pvMap[pv.Name] = pvval{}
}
for i = 0; i < extraPVCs; i++ {
pvc = makePersistentVolumeClaim(ns)
pvc = createPVC(c, ns, pvc)
pvcMap[makePvcKey(ns, pvc.Name)] = pvcval{}
}
return pvMap, pvcMap
}
// Wait for the pv and pvc to bind to each other.
func waitOnPVandPVC(c clientset.Interface, ns string, pv *api.PersistentVolume, pvc *api.PersistentVolumeClaim) {
// Wait for newly created PVC to bind to the PV // Wait for newly created PVC to bind to the PV
framework.Logf("Waiting for PV %v to bind to PVC %v", pv.Name, pvc.Name) framework.Logf("Waiting for PV %v to bind to PVC %v", pv.Name, pvc.Name)
err := framework.WaitForPersistentVolumeClaimPhase(api.ClaimBound, c, ns, pvc.Name, 3*time.Second, 300*time.Second) err := framework.WaitForPersistentVolumeClaimPhase(api.ClaimBound, c, ns, pvc.Name, 3*time.Second, 300*time.Second)
if err != nil { Expect(err).NotTo(HaveOccurred())
return fmt.Errorf("PersistentVolumeClaim failed to enter a bound state: %+v", err)
}
// Wait for PersistentVolume.Status.Phase to be Bound, which it should be // Wait for PersistentVolume.Status.Phase to be Bound, which it should be
// since the PVC is already bound. // since the PVC is already bound.
err = framework.WaitForPersistentVolumePhase(api.VolumeBound, c, pv.Name, 3*time.Second, 300*time.Second) err = framework.WaitForPersistentVolumePhase(api.VolumeBound, c, pv.Name, 3*time.Second, 300*time.Second)
if err != nil { Expect(err).NotTo(HaveOccurred())
return fmt.Errorf("PersistentVolume failed to enter a bound state even though PVC is Bound: %+v", err)
}
return nil // Re-get the pv and pvc objects
}
// Waits for the pv and pvc to be bound to each other, then checks that the pv's
// claimRef matches the pvc. Fails test on errors. Return the pv and pvc to
// reflect that these resources have been retrieved again (Get).
// Note: the pv and pvc are returned back to the It() caller so that the
// AfterEach func can delete these objects if they are not nil.
func waitAndValidatePVandPVC(c clientset.Interface, ns string, pv *api.PersistentVolume, pvc *api.PersistentVolumeClaim) (*api.PersistentVolume, *api.PersistentVolumeClaim, error) {
var err error
// Wait for pv and pvc to bind to each other
if err = waitOnPVandPVC(c, ns, pv, pvc); err != nil {
return pv, pvc, err
}
// Check that the PersistentVolume.ClaimRef is valid and matches the PVC
framework.Logf("Checking PersistentVolume ClaimRef is non-nil")
pv, err = c.Core().PersistentVolumes().Get(pv.Name) pv, err = c.Core().PersistentVolumes().Get(pv.Name)
if err != nil { Expect(err).NotTo(HaveOccurred())
return pv, pvc, fmt.Errorf("Cannot re-get PersistentVolume %v:", pv.Name)
}
// Re-get the pvc and
pvc, err = c.Core().PersistentVolumeClaims(ns).Get(pvc.Name) pvc, err = c.Core().PersistentVolumeClaims(ns).Get(pvc.Name)
if err != nil { Expect(err).NotTo(HaveOccurred())
return pv, pvc, fmt.Errorf("Cannot re-get PersistentVolumeClaim %v:", pvc.Name)
// The pv and pvc are both bound, but to each other?
// Check that the PersistentVolume.ClaimRef matches the PVC
Expect(pv.Spec.ClaimRef).NotTo(BeNil())
Expect(pv.Spec.ClaimRef.Name).To(Equal(pvc.Name))
Expect(pvc.Spec.VolumeName).To(Equal(pv.Name))
Expect(pv.Spec.ClaimRef.UID).To(Equal(pvc.UID))
} }
if pv.Spec.ClaimRef == nil || pv.Spec.ClaimRef.UID != pvc.UID { // Search for bound PVs and PVCs by examining pvols for non-nil claimRefs.
pvJSON, _ := json.Marshal(pv.Spec.ClaimRef) // NOTE: Each iteration waits for a maximum of 3 minutes per PV and, if the PV is bound,
return pv, pvc, fmt.Errorf("Expected Bound PersistentVolume %v to have valid ClaimRef: %+v", pv.Name, string(pvJSON)) // up to 3 minutes for the PVC. When the number of PVs != number of PVCs, this can lead
// to situations where the maximum wait times are reached several times in succession,
// extending test time. Thus, it is recommended to keep the delta between PVs and PVCs
// small.
func waitAndVerifyBinds(c clientset.Interface, ns string, pvols pvmap, claims pvcmap, testExpected bool) {
var actualBinds int
expectedBinds := len(pvols)
if expectedBinds > len(claims) { // want the min of # pvs or #pvcs
expectedBinds = len(claims)
} }
return pv, pvc, nil for pvName := range pvols {
err := framework.WaitForPersistentVolumePhase(api.VolumeBound, c, pvName, 3*time.Second, 180*time.Second)
if err != nil && len(pvols) > len(claims) {
framework.Logf("WARN: pv %v is not bound after max wait", pvName)
framework.Logf(" This may be ok since there are more pvs than pvcs")
continue
}
Expect(err).NotTo(HaveOccurred())
pv, err := c.Core().PersistentVolumes().Get(pvName)
Expect(err).NotTo(HaveOccurred())
if cr := pv.Spec.ClaimRef; cr != nil && len(cr.Name) > 0 {
// Assert bound pvc is a test resource. Failing assertion could
// indicate non-test PVC interference or a bug in the test
pvcKey := makePvcKey(ns, cr.Name)
_, found := claims[pvcKey]
Expect(found).To(BeTrue())
err = framework.WaitForPersistentVolumeClaimPhase(api.ClaimBound, c, ns, cr.Name, 3*time.Second, 180*time.Second)
Expect(err).NotTo(HaveOccurred())
actualBinds++
}
}
if testExpected {
Expect(actualBinds).To(Equal(expectedBinds))
}
} }
// Test the pod's exit code to be zero. // Test the pod's exit code to be zero.
func testPodSuccessOrFail(f *framework.Framework, c clientset.Interface, ns string, pod *api.Pod) error { func testPodSuccessOrFail(c clientset.Interface, ns string, pod *api.Pod) {
By("Pod should terminate with exitcode 0 (success)") By("Pod should terminate with exitcode 0 (success)")
err := framework.WaitForPodSuccessInNamespace(c, pod.Name, ns) err := framework.WaitForPodSuccessInNamespace(c, pod.Name, ns)
if err != nil { Expect(err).NotTo(HaveOccurred())
return fmt.Errorf("Pod %v returned non-zero exitcode: %+v", pod.Name, err) framework.Logf("Pod %v succeeded ", pod.Name)
}
framework.Logf("pod %v exited successfully", pod.Name)
return nil
} }
// Delete the passed in pod. // Delete the passed in pod.
func deletePod(f *framework.Framework, c clientset.Interface, ns string, pod *api.Pod) error { func deletePod(f *framework.Framework, c clientset.Interface, ns string, pod *api.Pod) {
framework.Logf("Deleting pod %v", pod.Name) framework.Logf("Deleting pod %v", pod.Name)
err := c.Core().Pods(ns).Delete(pod.Name, nil) err := c.Core().Pods(ns).Delete(pod.Name, nil)
if err != nil { Expect(err).NotTo(HaveOccurred())
return fmt.Errorf("Pod %v encountered a delete error: %v", pod.Name, err)
}
// Wait for pod to terminate // Wait for pod to terminate. Expect apierr NotFound
err = f.WaitForPodTerminated(pod.Name, "") err = f.WaitForPodTerminated(pod.Name, "")
if err != nil && !apierrs.IsNotFound(err) { Expect(err).To(HaveOccurred())
return fmt.Errorf("Pod %v will not teminate: %v", pod.Name, err) Expect(apierrs.IsNotFound(err)).To(BeTrue())
}
// Re-get the pod to double check that it has been deleted; expect err
// Note: Get() writes a log error if the pod is not found
_, err = c.Core().Pods(ns).Get(pod.Name)
if err == nil {
return fmt.Errorf("Pod %v has been deleted but able to re-Get the deleted pod", pod.Name)
}
if !apierrs.IsNotFound(err) {
return fmt.Errorf("Pod %v has been deleted but still exists: %v", pod.Name, err)
}
framework.Logf("Ignore \"not found\" error above. Pod %v successfully deleted", pod.Name) framework.Logf("Ignore \"not found\" error above. Pod %v successfully deleted", pod.Name)
return nil
} }
// Create the test pod, wait for (hopefully) success, and then delete the pod. // Create the test pod, wait for (hopefully) success, and then delete the pod.
func createWaitAndDeletePod(f *framework.Framework, c clientset.Interface, ns string, claimName string) error { func createWaitAndDeletePod(f *framework.Framework, c clientset.Interface, ns string, claimName string) {
var errmsg string
framework.Logf("Creating nfs test pod") framework.Logf("Creating nfs test pod")
@ -327,89 +397,70 @@ func createWaitAndDeletePod(f *framework.Framework, c clientset.Interface, ns st
// Instantiate pod (Create) // Instantiate pod (Create)
runPod, err := c.Core().Pods(ns).Create(pod) runPod, err := c.Core().Pods(ns).Create(pod)
if err != nil || runPod == nil { Expect(err).NotTo(HaveOccurred())
name := "" Expect(runPod).NotTo(BeNil())
if runPod != nil {
name = runPod.Name defer deletePod(f, c, ns, runPod)
}
return fmt.Errorf("Create test pod %v failed: %v", name, err)
}
// Wait for the test pod to complete its lifecycle // Wait for the test pod to complete its lifecycle
podErr := testPodSuccessOrFail(f, c, ns, runPod) testPodSuccessOrFail(c, ns, runPod)
// Regardless of podErr above, delete the pod if it exists
if runPod != nil {
err = deletePod(f, c, ns, runPod)
} }
// Check results of pod success and pod delete // Validate PV/PVC, create and verify writer pod, delete the PVC, and validate the PV's
if podErr != nil { // phase. Note: the PV is deleted in the AfterEach, not here.
errmsg = fmt.Sprintf("Pod %v exited with non-zero exitcode: %v", runPod.Name, podErr) func completeTest(f *framework.Framework, c clientset.Interface, ns string, pv *api.PersistentVolume, pvc *api.PersistentVolumeClaim) {
}
if err != nil { // Delete error
if len(errmsg) > 0 {
errmsg += "; and "
}
errmsg += fmt.Sprintf("Delete error on pod %v: %v", runPod.Name, err)
}
if len(errmsg) > 0 {
return fmt.Errorf(errmsg)
}
return nil
}
// validate PV/PVC, create and verify writer pod, delete PVC and PV. Ensure
// all of these steps were successful. Return the pv and pvc to reflect that
// these resources have been retrieved again (Get).
// Note: the pv and pvc are returned back to the It() caller so that the
// AfterEach func can delete these objects if they are not nil.
func completeTest(f *framework.Framework, c clientset.Interface, ns string, pv *api.PersistentVolume, pvc *api.PersistentVolumeClaim) (*api.PersistentVolume, *api.PersistentVolumeClaim, error) {
// 1. verify that the PV and PVC have binded correctly // 1. verify that the PV and PVC have binded correctly
By("Validating the PV-PVC binding") By("Validating the PV-PVC binding")
pv, pvc, err := waitAndValidatePVandPVC(c, ns, pv, pvc) waitOnPVandPVC(c, ns, pv, pvc)
if err != nil {
return pv, pvc, err
}
// 2. create the nfs writer pod, test if the write was successful, // 2. create the nfs writer pod, test if the write was successful,
// then delete the pod and verify that it was deleted // then delete the pod and verify that it was deleted
By("Checking pod has write access to PersistentVolume") By("Checking pod has write access to PersistentVolume")
if err = createWaitAndDeletePod(f, c, ns, pvc.Name); err != nil { createWaitAndDeletePod(f, c, ns, pvc.Name)
return pv, pvc, err
}
// 3. delete the PVC before deleting PV, wait for PV to be "Available" // 3. delete the PVC, wait for PV to become "Available"
By("Deleting the PVC to invoke the recycler") By("Deleting the PVC to invoke the recycler")
pv, pvc, err = deletePVCandValidatePV(c, ns, pvc, pv) deletePVCandValidatePV(c, ns, pvc, pv, api.VolumeAvailable)
if err != nil {
return pv, pvc, err
} }
// 4. cleanup by deleting the pv // Validate pairs of PVs and PVCs, create and verify writer pod, delete PVC and validate
By("Deleting the PV") // PV. Ensure each step succeeds.
if pv, err = deletePersistentVolume(c, pv); err != nil { // Note: the PV is deleted in the AfterEach, not here.
return pv, pvc, err // Note: this func is serialized, we wait for each pod to be deleted before creating the
// next pod. Adding concurrency is a TODO item.
// Note: this func is called recursively when there are more claims than pvs.
func completeMultiTest(f *framework.Framework, c clientset.Interface, ns string, pvols pvmap, claims pvcmap) {
// 1. verify each PV permits write access to a client pod
By("Checking pod has write access to PersistentVolumes")
for pvcKey := range claims {
pvc, err := c.Core().PersistentVolumeClaims(pvcKey.Namespace).Get(pvcKey.Name)
Expect(err).NotTo(HaveOccurred())
if len(pvc.Spec.VolumeName) == 0 {
continue // claim is not bound
}
// sanity test to ensure our maps are in sync
_, found := pvols[pvc.Spec.VolumeName]
Expect(found).To(BeTrue())
// TODO: currently a serialized test of each PV
createWaitAndDeletePod(f, c, pvcKey.Namespace, pvcKey.Name)
} }
return pv, pvc, nil // 2. delete each PVC, wait for its bound PV to become "Available"
By("Deleting PVCs to invoke recycler")
deletePVCandValidatePVGroup(c, ns, pvols, claims)
} }
var _ = framework.KubeDescribe("PersistentVolumes", func() { var _ = framework.KubeDescribe("PersistentVolumes", func() {
// global vars for the It() tests below // global vars for the Context()s and It()'s below
f := framework.NewDefaultFramework("pv") f := framework.NewDefaultFramework("pv")
var c clientset.Interface var c clientset.Interface
var ns string var ns string
var NFSconfig VolumeTestConfig var NFSconfig VolumeTestConfig
var serverIP string var serverIP string
var nfsServerPod *api.Pod var nfsServerPod *api.Pod
var pv *api.PersistentVolume
var pvc *api.PersistentVolumeClaim
var err error
// config for the nfs-server pod in the default namespace // config for the nfs-server pod in the default namespace
NFSconfig = VolumeTestConfig{ NFSconfig = VolumeTestConfig{
@ -424,9 +475,9 @@ var _ = framework.KubeDescribe("PersistentVolumes", func() {
c = f.ClientSet c = f.ClientSet
ns = f.Namespace.Name ns = f.Namespace.Name
// If it doesn't exist, create the nfs server pod in "default" ns // If it doesn't exist, create the nfs server pod in the "default" ns.
// The "default" ns is used so that individual tests can delete // The "default" ns is used so that individual tests can delete their
// their ns without impacting the nfs-server pod. // ns without impacting the nfs-server pod.
if nfsServerPod == nil { if nfsServerPod == nil {
nfsServerPod = startVolumeServer(c, NFSconfig) nfsServerPod = startVolumeServer(c, NFSconfig)
serverIP = nfsServerPod.Status.PodIP serverIP = nfsServerPod.Status.PodIP
@ -434,112 +485,141 @@ var _ = framework.KubeDescribe("PersistentVolumes", func() {
} }
}) })
AfterEach(func() {
if c != nil && len(ns) > 0 { // still have client and namespace
if pvc != nil && len(pvc.Name) > 0 {
// Delete the PersistentVolumeClaim
framework.Logf("AfterEach: PVC %v is non-nil, deleting claim", pvc.Name)
err := c.Core().PersistentVolumeClaims(ns).Delete(pvc.Name, nil)
if err != nil && !apierrs.IsNotFound(err) {
framework.Logf("AfterEach: delete of PersistentVolumeClaim %v error: %v", pvc.Name, err)
}
pvc = nil
}
if pv != nil && len(pv.Name) > 0 {
framework.Logf("AfterEach: PV %v is non-nil, deleting pv", pv.Name)
err := c.Core().PersistentVolumes().Delete(pv.Name, nil)
if err != nil && !apierrs.IsNotFound(err) {
framework.Logf("AfterEach: delete of PersistentVolume %v error: %v", pv.Name, err)
}
pv = nil
}
}
})
// Execute after *all* the tests have run // Execute after *all* the tests have run
AddCleanupAction(func() { AddCleanupAction(func() {
if nfsServerPod != nil && c != nil { if nfsServerPod != nil && c != nil {
framework.Logf("AfterSuite: nfs-server pod %v is non-nil, deleting pod", nfsServerPod.Name) framework.Logf("AfterSuite: deleting nfs-server pod: %v", nfsServerPod.Name)
nfsServerPodCleanup(c, NFSconfig) nfsServerPodCleanup(c, NFSconfig)
nfsServerPod = nil nfsServerPod = nil
} }
}) })
Context("with Single PV - PVC pairs", func() {
var pv *api.PersistentVolume
var pvc *api.PersistentVolumeClaim
// Note: this is the only code where the pv is deleted.
AfterEach(func() {
if c != nil && len(ns) > 0 {
if pvc != nil && len(pvc.Name) > 0 {
_, err := c.Core().PersistentVolumeClaims(ns).Get(pvc.Name)
if !apierrs.IsNotFound(err) {
Expect(err).NotTo(HaveOccurred())
framework.Logf("AfterEach: deleting PVC %v", pvc.Name)
err = c.Core().PersistentVolumeClaims(ns).Delete(pvc.Name, nil)
Expect(err).NotTo(HaveOccurred())
framework.Logf("AfterEach: deleted PVC %v", pvc.Name)
}
}
pvc = nil
if pv != nil && len(pv.Name) > 0 {
_, err := c.Core().PersistentVolumes().Get(pv.Name)
if !apierrs.IsNotFound(err) {
Expect(err).NotTo(HaveOccurred())
framework.Logf("AfterEach: deleting PV %v", pv.Name)
err := c.Core().PersistentVolumes().Delete(pv.Name, nil)
Expect(err).NotTo(HaveOccurred())
framework.Logf("AfterEach: deleted PV %v", pv.Name)
}
}
pv = nil
}
})
// Individual tests follow: // Individual tests follow:
// //
// Create an nfs PV, then a claim that matches the PV, and a pod that // Create an nfs PV, then a claim that matches the PV, and a pod that
// contains the claim. Verify that the PV and PVC bind correctly, and // contains the claim. Verify that the PV and PVC bind correctly, and
// that the pod can write to the nfs volume. // that the pod can write to the nfs volume.
It("should create a non-pre-bound PV and PVC: test write access [Flaky]", func() { It("should create a non-pre-bound PV and PVC: test write access [Flaky]", func() {
pv, pvc = createPVPVC(c, serverIP, ns, false)
pv, pvc, err = createPVPVC(c, serverIP, ns, false) completeTest(f, c, ns, pv, pvc)
if err != nil {
framework.Failf("%v", err)
}
// validate PV-PVC, create and verify writer pod, delete PVC
// and PV
pv, pvc, err = completeTest(f, c, ns, pv, pvc)
if err != nil {
framework.Failf("%v", err)
}
}) })
// Create a claim first, then a nfs PV that matches the claim, and a // Create a claim first, then a nfs PV that matches the claim, and a
// pod that contains the claim. Verify that the PV and PVC bind // pod that contains the claim. Verify that the PV and PVC bind
// correctly, and that the pod can write to the nfs volume. // correctly, and that the pod can write to the nfs volume.
It("create a PVC and non-pre-bound PV: test write access [Flaky]", func() { It("create a PVC and non-pre-bound PV: test write access [Flaky]", func() {
pv, pvc = createPVCPV(c, serverIP, ns, false)
pv, pvc, err = createPVCPV(c, serverIP, ns, false) completeTest(f, c, ns, pv, pvc)
if err != nil {
framework.Failf("%v", err)
}
// validate PV-PVC, create and verify writer pod, delete PVC
// and PV
pv, pvc, err = completeTest(f, c, ns, pv, pvc)
if err != nil {
framework.Failf("%v", err)
}
}) })
// Create a claim first, then a pre-bound nfs PV that matches the claim, // Create a claim first, then a pre-bound nfs PV that matches the claim,
// and a pod that contains the claim. Verify that the PV and PVC bind // and a pod that contains the claim. Verify that the PV and PVC bind
// correctly, and that the pod can write to the nfs volume. // correctly, and that the pod can write to the nfs volume.
It("create a PVC and a pre-bound PV: test write access [Flaky]", func() { It("create a PVC and a pre-bound PV: test write access [Flaky]", func() {
pv, pvc = createPVCPV(c, serverIP, ns, true)
pv, pvc, err = createPVCPV(c, serverIP, ns, true) completeTest(f, c, ns, pv, pvc)
if err != nil {
framework.Failf("%v", err)
}
// validate PV-PVC, create and verify writer pod, delete PVC
// and PV
pv, pvc, err = completeTest(f, c, ns, pv, pvc)
if err != nil {
framework.Failf("%v", err)
}
}) })
// Create a nfs PV first, then a pre-bound PVC that matches the PV, // Create a nfs PV first, then a pre-bound PVC that matches the PV,
// and a pod that contains the claim. Verify that the PV and PVC bind // and a pod that contains the claim. Verify that the PV and PVC bind
// correctly, and that the pod can write to the nfs volume. // correctly, and that the pod can write to the nfs volume.
It("create a PV and a pre-bound PVC: test write access [Flaky]", func() { It("create a PV and a pre-bound PVC: test write access [Flaky]", func() {
pv, pvc = createPVPVC(c, serverIP, ns, true)
pv, pvc, err = createPVPVC(c, serverIP, ns, true) completeTest(f, c, ns, pv, pvc)
if err != nil {
framework.Failf("%v", err)
}
// validate PV-PVC, create and verify writer pod, delete PVC
// and PV
pv, pvc, err = completeTest(f, c, ns, pv, pvc)
if err != nil {
framework.Failf("%v", err)
}
}) })
}) })
// Create multiple pvs and pvcs, all in the same namespace. The PVs-PVCs are
// verified to bind, though it's not known in advanced which PV will bind to
// which claim. For each pv-pvc pair create a pod that writes to the nfs mount.
// Note: when the number of PVs exceeds the number of PVCs the max binding wait
// time will occur for each PV in excess. This is expected but the delta
// should be kept small so that the tests aren't unnecessarily slow.
// Note: future tests may wish to incorporate the following:
// a) pre-binding, b) create pvcs before pvs, c) create pvcs and pods
// in different namespaces.
Context("with multiple PVs and PVCs all in same ns", func() {
// define the maximum number of PVs and PVCs supported by these tests
const maxNumPVs = 10
const maxNumPVCs = 10
// create the pv and pvc maps to be reused in the It blocks
pvols := make(pvmap, maxNumPVs)
claims := make(pvcmap, maxNumPVCs)
AfterEach(func() {
framework.Logf("AfterEach: deleting %v PVCs and %v PVs...", len(claims), len(pvols))
pvPvcCleanup(c, ns, pvols, claims)
})
// Create 2 PVs and 4 PVCs.
// Note: PVs are created before claims and no pre-binding
It("should create 2 PVs and 4 PVCs: test write access[Flaky]", func() {
numPVs, numPVCs := 2, 4
pvols, claims = createPVsPVCs(numPVs, numPVCs, c, ns, serverIP)
waitAndVerifyBinds(c, ns, pvols, claims, true)
completeMultiTest(f, c, ns, pvols, claims)
})
// Create 3 PVs and 3 PVCs.
// Note: PVs are created before claims and no pre-binding
It("should create 3 PVs and 3 PVCs: test write access[Flaky]", func() {
numPVs, numPVCs := 3, 3
pvols, claims = createPVsPVCs(numPVs, numPVCs, c, ns, serverIP)
waitAndVerifyBinds(c, ns, pvols, claims, true)
completeMultiTest(f, c, ns, pvols, claims)
})
// Create 4 PVs and 2 PVCs.
// Note: PVs are created before claims and no pre-binding.
It("should create 4 PVs and 2 PVCs: test write access[Flaky]", func() {
numPVs, numPVCs := 4, 2
pvols, claims = createPVsPVCs(numPVs, numPVCs, c, ns, serverIP)
waitAndVerifyBinds(c, ns, pvols, claims, true)
completeMultiTest(f, c, ns, pvols, claims)
})
})
})
// Return a pvckey struct.
func makePvcKey(ns, name string) types.NamespacedName {
return types.NamespacedName{Namespace: ns, Name: name}
}
// Returns a PV definition based on the nfs server IP. If the PVC is not nil // Returns a PV definition based on the nfs server IP. If the PVC is not nil
// then the PV is defined with a ClaimRef which includes the PVC's namespace. // then the PV is defined with a ClaimRef which includes the PVC's namespace.
// If the PVC is nil then the PV is not defined with a ClaimRef. // If the PVC is nil then the PV is not defined with a ClaimRef.