mirror of
https://github.com/k3s-io/kubernetes.git
synced 2025-08-01 15:58:37 +00:00
Overhauled pv e2e test to reflect common lifecycle and de-flake
This commit is contained in:
parent
8f4f682659
commit
9b28212eb6
@ -22,6 +22,7 @@ import (
|
||||
|
||||
. "github.com/onsi/ginkgo"
|
||||
"k8s.io/kubernetes/pkg/api"
|
||||
apierrs "k8s.io/kubernetes/pkg/api/errors"
|
||||
"k8s.io/kubernetes/pkg/api/resource"
|
||||
"k8s.io/kubernetes/pkg/api/testapi"
|
||||
"k8s.io/kubernetes/pkg/api/unversioned"
|
||||
@ -29,72 +30,144 @@ import (
|
||||
"k8s.io/kubernetes/test/e2e/framework"
|
||||
)
|
||||
|
||||
// Clean both server and client pods.
|
||||
func persistentVolumeTestCleanup(client *client.Client, config VolumeTestConfig) {
|
||||
// Delete the nfs-server pod.
|
||||
func nfsServerPodCleanup(c *client.Client, config VolumeTestConfig) {
|
||||
defer GinkgoRecover()
|
||||
|
||||
podClient := client.Pods(config.namespace)
|
||||
podClient := c.Pods(config.namespace)
|
||||
|
||||
if config.serverImage != "" {
|
||||
err := podClient.Delete(config.prefix+"-server", nil)
|
||||
podName := config.prefix+"-server"
|
||||
err := podClient.Delete(podName, nil)
|
||||
if err != nil {
|
||||
framework.Failf("Failed to delete the server pod: %v", err)
|
||||
framework.Failf("Delete of %v pod failed: %v", podName, err)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Delete the PV. Fail test if delete fails.
|
||||
func deletePersistentVolume(c *client.Client, pv *api.PersistentVolume) {
|
||||
// Delete the PersistentVolume
|
||||
framework.Logf("Deleting PersistentVolume")
|
||||
err := c.PersistentVolumes().Delete(pv.Name)
|
||||
if err != nil {
|
||||
framework.Failf("Delete PersistentVolume failed: %v", err)
|
||||
framework.Failf("Delete PersistentVolume %v failed: %v", pv.Name, err)
|
||||
}
|
||||
// Wait for PersistentVolume to Delete
|
||||
framework.WaitForPersistentVolumeDeleted(c, pv.Name, 3*time.Second, 30*time.Second)
|
||||
}
|
||||
|
||||
// Test the pod's exitcode to be zero, delete the pod, wait for it to be deleted,
|
||||
// and fail if these steps return an error.
|
||||
func testPodSuccessOrFail(f *framework.Framework, c *client.Client, ns string, pod *api.Pod) {
|
||||
|
||||
By("Pod should terminate with exitcode 0 (success)")
|
||||
|
||||
err := framework.WaitForPodSuccessInNamespace(c, pod.Name, pod.Spec.Containers[0].Name, ns)
|
||||
if err != nil {
|
||||
framework.Failf("Pod %v returned non-zero exitcode: %+v", pod.Name, err)
|
||||
}
|
||||
|
||||
framework.Logf("Deleting pod %v after it exited successfully", pod.Name)
|
||||
err = c.Pods(ns).Delete(pod.Name, nil)
|
||||
if err != nil {
|
||||
framework.Failf("Pod %v exited successfully but failed to delete: %+v", pod.Name, err)
|
||||
}
|
||||
|
||||
// Wait for pod to terminate
|
||||
err = f.WaitForPodTerminated(pod.Name, "")
|
||||
if err != nil && !apierrs.IsNotFound(err) {
|
||||
framework.Failf("Pod %v has exitcode 0 but will not teminate: %v", pod.Name, err)
|
||||
}
|
||||
framework.Logf("Pod %v exited SUCCESSFULLY and was deleted", pod.Name)
|
||||
}
|
||||
|
||||
|
||||
var _ = framework.KubeDescribe("PersistentVolumes", func() {
|
||||
f := framework.NewDefaultFramework("pv")
|
||||
var c *client.Client
|
||||
var ns string
|
||||
var NFSconfig VolumeTestConfig
|
||||
var serverIP string
|
||||
var nfsServerPod *api.Pod
|
||||
var checkPod *api.Pod
|
||||
var pv *api.PersistentVolume
|
||||
var pvc *api.PersistentVolumeClaim
|
||||
var err error
|
||||
|
||||
// config for the nfs-server pod in the default namespace
|
||||
NFSconfig = VolumeTestConfig{
|
||||
namespace: api.NamespaceDefault,
|
||||
prefix: "nfs",
|
||||
serverImage: "gcr.io/google_containers/volume-nfs:0.6",
|
||||
serverPorts: []int{2049},
|
||||
}
|
||||
|
||||
BeforeEach(func() {
|
||||
c = f.Client
|
||||
ns = f.Namespace.Name
|
||||
|
||||
// If it doesn't exist, create the nfs server pod in "default" ns
|
||||
if nfsServerPod == nil {
|
||||
nfsServerPod = startVolumeServer(c, NFSconfig)
|
||||
serverIP = nfsServerPod.Status.PodIP
|
||||
framework.Logf("NFS server IP address: %v", serverIP)
|
||||
}
|
||||
})
|
||||
|
||||
It("should create a PersistentVolume, Claim, and a client Pod that will test the read/write access of the volume[Flaky]", func() {
|
||||
config := VolumeTestConfig{
|
||||
namespace: ns,
|
||||
prefix: "nfs",
|
||||
serverImage: "gcr.io/google_containers/volume-nfs:0.6",
|
||||
serverPorts: []int{2049},
|
||||
AfterEach(func() {
|
||||
if c != nil && len(ns) > 0 {
|
||||
if checkPod != nil {
|
||||
// Wait for checkpod to complete termination
|
||||
err = c.Pods(ns).Delete(checkPod.Name, nil)
|
||||
if err != nil {
|
||||
framework.Failf("AfterEach: pod %v delete ierror: %v", checkPod.Name, err)
|
||||
}
|
||||
checkPod = nil
|
||||
}
|
||||
|
||||
if pvc != nil {
|
||||
// Delete the PersistentVolumeClaim
|
||||
err = c.PersistentVolumeClaims(ns).Delete(pvc.Name)
|
||||
if err != nil && !apierrs.IsNotFound(err) {
|
||||
framework.Failf("AfterEach: delete of PersistentVolumeClaim %v experienced an unexpected error: %v", pvc.Name, err)
|
||||
}
|
||||
pvc = nil
|
||||
}
|
||||
if pv != nil {
|
||||
deletePersistentVolume(c, pv)
|
||||
pv = nil
|
||||
}
|
||||
}
|
||||
})
|
||||
|
||||
defer func() {
|
||||
persistentVolumeTestCleanup(c, config)
|
||||
}()
|
||||
// Execute after *all* the tests have run
|
||||
AddCleanupAction(func() {
|
||||
if nfsServerPod != nil && c != nil {
|
||||
nfsServerPodCleanup(c, NFSconfig)
|
||||
nfsServerPod = nil
|
||||
}
|
||||
})
|
||||
|
||||
// Create the nfs server pod
|
||||
pod := startVolumeServer(c, config)
|
||||
serverIP := pod.Status.PodIP
|
||||
framework.Logf("NFS server IP address: %v", serverIP)
|
||||
|
||||
// Individual tests follow:
|
||||
It("should create a PersistentVolume, Claim, and a client Pod that will test the read/write access of the volume", func() {
|
||||
|
||||
// Define the PersistentVolume and PersistentVolumeClaim
|
||||
pv := makePersistentVolume(serverIP)
|
||||
pvc := makePersistentVolumeClaim(ns)
|
||||
|
||||
// Create the PersistentVolume and wait for PersistentVolume.Status.Phase to be Available
|
||||
// defer deletion to clean up the PV should the test fail post-creation.
|
||||
By("Creating PV and PVC and waiting for Bound status")
|
||||
framework.Logf("Creating PersistentVolume")
|
||||
pv, err := c.PersistentVolumes().Create(pv)
|
||||
if err != nil {
|
||||
framework.Failf("Create PersistentVolume failed: %v", err)
|
||||
}
|
||||
defer deletePersistentVolume(c, pv)
|
||||
// Wait for PV to become Available.
|
||||
framework.WaitForPersistentVolumePhase(api.VolumeAvailable, c, pv.Name, 1*time.Second, 20*time.Second)
|
||||
|
||||
// Create the PersistentVolumeClaim and wait for Bound phase
|
||||
// Create the PersistentVolumeClaim and wait for Bound phase, can take several minutes.
|
||||
framework.Logf("Creating PersistentVolumeClaim")
|
||||
pvc, err = c.PersistentVolumeClaims(ns).Create(pvc)
|
||||
if err != nil {
|
||||
@ -102,55 +175,46 @@ var _ = framework.KubeDescribe("PersistentVolumes", func() {
|
||||
}
|
||||
framework.WaitForPersistentVolumeClaimPhase(api.ClaimBound, c, ns, pvc.Name, 3*time.Second, 300*time.Second)
|
||||
|
||||
// Wait for PersistentVolume.Status.Phase to be Bound. Can take several minutes.
|
||||
// Wait for PersistentVolume.Status.Phase to be Bound, which it should already be since the PVC is bound.
|
||||
err = framework.WaitForPersistentVolumePhase(api.VolumeBound, c, pv.Name, 3*time.Second, 300*time.Second)
|
||||
if err != nil {
|
||||
framework.Failf("PersistentVolume failed to enter a bound state: %+v", err)
|
||||
framework.Failf("PersistentVolume failed to enter a bound state even though PVC is Bound: %+v", err)
|
||||
}
|
||||
// Check the PersistentVolume.ClaimRef.UID for non-nil value as confirmation of the bound state.
|
||||
|
||||
// Check the PersistentVolume.ClaimRef is valid and matches the PVC
|
||||
framework.Logf("Checking PersistentVolume ClaimRef is non-nil")
|
||||
pv, err = c.PersistentVolumes().Get(pv.Name)
|
||||
if pv.Spec.ClaimRef == nil || len(pv.Spec.ClaimRef.UID) == 0 {
|
||||
pvJson, _ := json.MarshalIndent(pv, "", " ")
|
||||
framework.Failf("Expected PersistentVolume to be bound, but got nil ClaimRef or UID: %+v", string(pvJson))
|
||||
if err != nil {
|
||||
framework.Failf("Cannot re-get PersistentVolume %v:", pv.Name, err)
|
||||
}
|
||||
|
||||
// Check the PersistentVolumeClaim.Status.Phase for Bound state
|
||||
framework.Logf("Checking PersistentVolumeClaim status is Bound")
|
||||
pvc, err = c.PersistentVolumeClaims(ns).Get(pvc.Name)
|
||||
if pvcPhase := pvc.Status.Phase; pvcPhase != "Bound" {
|
||||
framework.Failf("Expected PersistentVolumeClaim status Bound. Actual: %+v. Error: %+v", pvcPhase, err)
|
||||
}
|
||||
|
||||
// Check that the PersistentVolume's ClaimRef contains the UID of the PersistendVolumeClaim
|
||||
if pvc.ObjectMeta.UID != pv.Spec.ClaimRef.UID {
|
||||
framework.Failf("Binding failed: PersistentVolumeClaim UID does not match PersistentVolume's ClaimRef UID. ")
|
||||
}
|
||||
|
||||
// writePod writes to the nfs volume
|
||||
framework.Logf("Creating writePod")
|
||||
pvc, _ = c.PersistentVolumeClaims(ns).Get(pvc.Name)
|
||||
writePod := makeWritePod(ns, pvc.Name)
|
||||
writePod, err = c.Pods(ns).Create(writePod)
|
||||
if err != nil {
|
||||
framework.Failf("Create writePod failed: %+v", err)
|
||||
framework.Failf("Cannot re-get PersistentVolumeClaim %v:", pvc.Name, err)
|
||||
}
|
||||
if pv.Spec.ClaimRef == nil || pv.Spec.ClaimRef.UID != pvc.UID {
|
||||
pvJson, _ := json.MarshalIndent(pv.Spec.ClaimRef, "", " ")
|
||||
framework.Failf("Expected Bound PersistentVolume %v to have valid ClaimRef: %+v", pv.Name, string(pvJson))
|
||||
}
|
||||
|
||||
// Wait for the writePod to complete it's lifecycle
|
||||
err = framework.WaitForPodSuccessInNamespace(c, writePod.Name, writePod.Spec.Containers[0].Name, writePod.Namespace)
|
||||
// checkPod writes to the nfs volume
|
||||
By("Checking pod has write access to PersistentVolume")
|
||||
framework.Logf("Creating checkPod")
|
||||
checkPod := makeWritePod(ns, pvc.Name)
|
||||
checkPod, err = c.Pods(ns).Create(checkPod)
|
||||
if err != nil {
|
||||
framework.Failf("WritePod exited with error: %+v", err)
|
||||
} else {
|
||||
framework.Logf("WritePod exited without error.")
|
||||
framework.Failf("Create checkPod failed: %+v", err)
|
||||
}
|
||||
// Wait for the checkPod to complete its lifecycle
|
||||
testPodSuccessOrFail(f, c, ns, checkPod)
|
||||
checkPod = nil
|
||||
|
||||
// Delete the PersistentVolumeClaim
|
||||
By("Deleting PersistentVolumeClaim to trigger PV Recycling")
|
||||
framework.Logf("Deleting PersistentVolumeClaim to trigger PV Recycling")
|
||||
err = c.PersistentVolumeClaims(ns).Delete(pvc.Name)
|
||||
if err != nil {
|
||||
framework.Failf("Delete PersistentVolumeClaim failed: %v", err)
|
||||
}
|
||||
|
||||
// Wait for the PersistentVolume phase to return to Available
|
||||
framework.Logf("Waiting for recycling process to complete.")
|
||||
err = framework.WaitForPersistentVolumePhase(api.VolumeAvailable, c, pv.Name, 3*time.Second, 300*time.Second)
|
||||
@ -158,13 +222,30 @@ var _ = framework.KubeDescribe("PersistentVolumes", func() {
|
||||
framework.Failf("Recycling failed: %v", err)
|
||||
}
|
||||
|
||||
// Examine the PersistentVolume.ClaimRef and UID. Expect nil values.
|
||||
// Examine the PersistentVolume.ClaimRef and UID. Expect nil values.
|
||||
pv, err = c.PersistentVolumes().Get(pv.Name)
|
||||
if pv.Spec.ClaimRef != nil && len(pv.Spec.ClaimRef.UID) > 0 {
|
||||
crjson, _ := json.MarshalIndent(pv.Spec.ClaimRef, "", " ")
|
||||
framework.Failf("Expected a nil ClaimRef or UID. Found: ", string(crjson))
|
||||
framework.Failf("Expected a nil pv.ClaimRef or empty UID. Found: ", string(crjson))
|
||||
}
|
||||
|
||||
// Delete the PersistentVolume
|
||||
By("Deleting PersistentVolume")
|
||||
deletePersistentVolume(c, pv)
|
||||
})
|
||||
|
||||
|
||||
It("should create another pod.... testing...", func() {
|
||||
checkPod = makeTestPod(ns, serverIP)
|
||||
checkPod, err = c.Pods(ns).Create(checkPod)
|
||||
if err != nil {
|
||||
framework.Failf("Error during testpod create: %v", err)
|
||||
}
|
||||
|
||||
// Wait for checkpod to complete it's lifecycle
|
||||
testPodSuccessOrFail(f, c, ns, checkPod)
|
||||
checkPod = nil // for AfterEach above
|
||||
|
||||
})
|
||||
})
|
||||
|
||||
@ -264,3 +345,51 @@ func makeWritePod(ns string, pvcName string) *api.Pod {
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
func makeTestPod(ns string, nfsserver string) *api.Pod {
|
||||
// Prepare pod that mounts the NFS volume again and
|
||||
// checks that the volume can be written to via the mount.
|
||||
|
||||
var isPrivileged bool = true
|
||||
return &api.Pod{
|
||||
TypeMeta: unversioned.TypeMeta{
|
||||
Kind: "Pod",
|
||||
APIVersion: testapi.Default.GroupVersion().String(),
|
||||
},
|
||||
ObjectMeta: api.ObjectMeta{
|
||||
GenerateName: "test-pod-",
|
||||
Namespace: ns,
|
||||
},
|
||||
Spec: api.PodSpec{
|
||||
Containers: []api.Container{
|
||||
{
|
||||
Name: "test-pod",
|
||||
Image: "gcr.io/google_containers/busybox:1.24",
|
||||
Command: []string{"/bin/sh"},
|
||||
Args: []string{"-c", "touch /mnt/FOO && exit 0 || exit 1"},
|
||||
VolumeMounts: []api.VolumeMount{
|
||||
{
|
||||
Name: "nfs-vol",
|
||||
MountPath: "/mnt",
|
||||
},
|
||||
},
|
||||
SecurityContext: &api.SecurityContext{
|
||||
Privileged: &isPrivileged,
|
||||
},
|
||||
},
|
||||
},
|
||||
Volumes: []api.Volume{
|
||||
{
|
||||
Name: "nfs-vol",
|
||||
VolumeSource: api.VolumeSource{
|
||||
NFS: &api.NFSVolumeSource{
|
||||
Server: nfsserver,
|
||||
Path: "/",
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
|
Loading…
Reference in New Issue
Block a user