From 6c20b6766203568fa5ac602b252f3906bbb75061 Mon Sep 17 00:00:00 2001 From: divyenpatel Date: Fri, 31 Mar 2017 10:15:23 -0700 Subject: [PATCH] addressed jeffvance's review comments --- test/e2e/framework/pv_util.go | 190 ++++++++++++---------- test/e2e/persistent_volumes-disruptive.go | 2 +- test/e2e/persistent_volumes.go | 9 +- test/e2e/upgrades/persistent_volumes.go | 2 +- test/e2e/vsphere_utils.go | 21 ++- test/e2e/vsphere_volume_ops_storm.go | 86 +--------- 6 files changed, 130 insertions(+), 180 deletions(-) diff --git a/test/e2e/framework/pv_util.go b/test/e2e/framework/pv_util.go index a6c7029655d..e58c9b4c4e5 100644 --- a/test/e2e/framework/pv_util.go +++ b/test/e2e/framework/pv_util.go @@ -422,25 +422,6 @@ func DeletePodWithWait(f *Framework, c clientset.Interface, pod *v1.Pod) { Logf("Ignore \"not found\" error above. Pod %v successfully deleted", pod.Name) } -// Create the test pod, wait for (hopefully) success, and then delete the pod. -func CreateWaitAndDeletePod(f *Framework, c clientset.Interface, ns string, claimName string) { - - Logf("Creating nfs test pod") - - // Make pod spec - pod := MakeWritePod(ns, claimName) - - // Instantiate pod (Create) - runPod, err := c.CoreV1().Pods(ns).Create(pod) - Expect(err).NotTo(HaveOccurred()) - Expect(runPod).NotTo(BeNil()) - - defer DeletePodWithWait(f, c, runPod) - - // Wait for the test pod to complete its lifecycle - testPodSuccessOrFail(c, ns, runPod) -} - // Sanity check for GCE testing. Verify the persistent disk attached to the node. func VerifyGCEDiskAttached(diskName string, nodeName types.NodeName) bool { gceCloud, err := GetGCECloud() @@ -532,75 +513,6 @@ func MakePersistentVolumeClaim(cfg PersistentVolumeClaimConfig, ns string) *v1.P } } -// Returns a pod definition based on the namespace. The pod references the PVC's -// name. -func MakeWritePod(ns string, pvcName string) *v1.Pod { - return MakePod(ns, pvcName, true, "touch /mnt/SUCCESS && (id -G | grep -E '\\b777\\b')") -} - -// Returns a pod definition based on the namespace. The pod references the PVC's -// name. A slice of BASH commands can be supplied as args to be run by the pod -func MakePod(ns string, pvcName string, isPrivileged bool, command string) *v1.Pod { - - if len(command) == 0 { - command = "while true; do sleep 1; done" - } - return &v1.Pod{ - TypeMeta: metav1.TypeMeta{ - Kind: "Pod", - APIVersion: api.Registry.GroupOrDie(v1.GroupName).GroupVersion.String(), - }, - ObjectMeta: metav1.ObjectMeta{ - GenerateName: "pvc-tester-", - Namespace: ns, - }, - Spec: v1.PodSpec{ - Containers: []v1.Container{ - { - Name: "write-pod", - Image: "gcr.io/google_containers/busybox:1.24", - Command: []string{"/bin/sh"}, - Args: []string{"-c", command}, - VolumeMounts: []v1.VolumeMount{ - { - Name: pvcName, - MountPath: "/mnt", - }, - }, - SecurityContext: &v1.SecurityContext{ - Privileged: &isPrivileged, - }, - }, - }, - RestartPolicy: v1.RestartPolicyOnFailure, - Volumes: []v1.Volume{ - { - Name: pvcName, - VolumeSource: v1.VolumeSource{ - PersistentVolumeClaim: &v1.PersistentVolumeClaimVolumeSource{ - ClaimName: pvcName, - }, - }, - }, - }, - }, - } -} - -// Define and create a pod with a mounted PV. Pod runs infinite loop until killed. -func CreateClientPod(c clientset.Interface, ns string, pvc *v1.PersistentVolumeClaim) *v1.Pod { - clientPod := MakePod(ns, pvc.Name, true, "") - clientPod, err := c.CoreV1().Pods(ns).Create(clientPod) - Expect(err).NotTo(HaveOccurred()) - - // Verify the pod is running before returning it - err = WaitForPodRunningInNamespace(c, clientPod) - Expect(err).NotTo(HaveOccurred()) - clientPod, err = c.CoreV1().Pods(ns).Get(clientPod.Name, metav1.GetOptions{}) - Expect(apierrs.IsNotFound(err)).To(BeFalse()) - return clientPod -} - func CreatePDWithRetry() (string, error) { newDiskName := "" var err error @@ -703,3 +615,105 @@ func deletePD(pdName string) error { return fmt.Errorf("Provider does not support volume deletion") } } + +// Create the test pod, wait for (hopefully) success, and then delete the pod. +func CreateWaitAndDeletePod(f *Framework, c clientset.Interface, ns string, pvc *v1.PersistentVolumeClaim) { + Logf("Creating nfs test pod") + // Make pod spec + pod := MakeWritePod(ns, pvc) + + // Instantiate pod (Create) + runPod, err := c.CoreV1().Pods(ns).Create(pod) + Expect(err).NotTo(HaveOccurred()) + Expect(runPod).NotTo(BeNil()) + + defer DeletePodWithWait(f, c, runPod) + + // Wait for the test pod to complete its lifecycle + testPodSuccessOrFail(c, ns, runPod) +} + +// Returns a pod definition based on the namespace. The pod references the PVC's +// name. +func MakeWritePod(ns string, pvc *v1.PersistentVolumeClaim) *v1.Pod { + return MakePod(ns, []*v1.PersistentVolumeClaim{pvc}, true, "touch /mnt/volume1/SUCCESS && (id -G | grep -E '\\b777\\b')") +} + +// Returns a pod definition based on the namespace. The pod references the PVC's +// name. A slice of BASH commands can be supplied as args to be run by the pod +func MakePod(ns string, pvclaims []*v1.PersistentVolumeClaim, isPrivileged bool, command string) *v1.Pod { + if len(command) == 0 { + command = "while true; do sleep 1; done" + } + podSpec := &v1.Pod{ + TypeMeta: metav1.TypeMeta{ + Kind: "Pod", + APIVersion: api.Registry.GroupOrDie(v1.GroupName).GroupVersion.String(), + }, + ObjectMeta: metav1.ObjectMeta{ + GenerateName: "pvc-tester-", + Namespace: ns, + }, + Spec: v1.PodSpec{ + Containers: []v1.Container{ + { + Name: "write-pod", + Image: "gcr.io/google_containers/busybox:1.24", + Command: []string{"/bin/sh"}, + Args: []string{"-c", command}, + SecurityContext: &v1.SecurityContext{ + Privileged: &isPrivileged, + }, + }, + }, + RestartPolicy: v1.RestartPolicyOnFailure, + }, + } + var volumeMounts = make([]v1.VolumeMount, len(pvclaims)) + var volumes = make([]v1.Volume, len(pvclaims)) + for index, pvclaim := range pvclaims { + volumename := fmt.Sprintf("volume%v", index+1) + volumeMounts[index] = v1.VolumeMount{Name: volumename, MountPath: "/mnt/" + volumename} + volumes[index] = v1.Volume{Name: volumename, VolumeSource: v1.VolumeSource{PersistentVolumeClaim: &v1.PersistentVolumeClaimVolumeSource{ClaimName: pvclaim.Name, ReadOnly: false}}} + } + podSpec.Spec.Containers[0].VolumeMounts = volumeMounts + podSpec.Spec.Volumes = volumes + return podSpec +} + +// create pod with given claims +func CreatePod(client clientset.Interface, namespace string, pvclaims []*v1.PersistentVolumeClaim, isPrivileged bool, command string) *v1.Pod { + podSpec := MakePod(namespace, pvclaims, isPrivileged, command) + pod, err := client.CoreV1().Pods(namespace).Create(podSpec) + Expect(err).NotTo(HaveOccurred()) + + // Waiting for pod to be running + Expect(WaitForPodNameRunningInNamespace(client, pod.Name, namespace)).To(Succeed()) + + // get fresh pod info + pod, err = client.CoreV1().Pods(namespace).Get(pod.Name, metav1.GetOptions{}) + Expect(err).NotTo(HaveOccurred()) + return pod +} + +// Define and create a pod with a mounted PV. Pod runs infinite loop until killed. +func CreateClientPod(c clientset.Interface, ns string, pvc *v1.PersistentVolumeClaim) *v1.Pod { + return CreatePod(c, ns, []*v1.PersistentVolumeClaim{pvc}, true, "") +} + +// wait until all pvcs phase set to bound +func WaitForPVClaimBoundPhase(client clientset.Interface, pvclaims []*v1.PersistentVolumeClaim) []*v1.PersistentVolume { + var persistentvolumes = make([]*v1.PersistentVolume, len(pvclaims)) + for index, claim := range pvclaims { + err := WaitForPersistentVolumeClaimPhase(v1.ClaimBound, client, claim.Namespace, claim.Name, Poll, ClaimProvisionTimeout) + Expect(err).NotTo(HaveOccurred()) + // Get new copy of the claim + claim, err := client.CoreV1().PersistentVolumeClaims(claim.Namespace).Get(claim.Name, metav1.GetOptions{}) + Expect(err).NotTo(HaveOccurred()) + + // Get the bounded PV + persistentvolumes[index], err = client.CoreV1().PersistentVolumes().Get(claim.Spec.VolumeName, metav1.GetOptions{}) + Expect(err).NotTo(HaveOccurred()) + } + return persistentvolumes +} diff --git a/test/e2e/persistent_volumes-disruptive.go b/test/e2e/persistent_volumes-disruptive.go index adefdd5c62c..f457081ff5e 100644 --- a/test/e2e/persistent_volumes-disruptive.go +++ b/test/e2e/persistent_volumes-disruptive.go @@ -202,7 +202,7 @@ func testVolumeUnmountsFromDeletedPod(c clientset.Interface, f *framework.Framew // initTestCase initializes spec resources (pv, pvc, and pod) and returns pointers to be consumed by the test func initTestCase(f *framework.Framework, c clientset.Interface, pvConfig framework.PersistentVolumeConfig, pvcConfig framework.PersistentVolumeClaimConfig, ns, nodeName string) (*v1.Pod, *v1.PersistentVolume, *v1.PersistentVolumeClaim) { pv, pvc := framework.CreatePVPVC(c, pvConfig, pvcConfig, ns, false) - pod := framework.MakePod(ns, pvc.Name, true, "") + pod := framework.MakePod(ns, []*v1.PersistentVolumeClaim{pvc}, true, "") pod.Spec.NodeName = nodeName framework.Logf("Creating nfs client Pod %s on node %s", pod.Name, nodeName) pod, err := c.CoreV1().Pods(ns).Create(pod) diff --git a/test/e2e/persistent_volumes.go b/test/e2e/persistent_volumes.go index 950791ab876..7ee8a8a3933 100644 --- a/test/e2e/persistent_volumes.go +++ b/test/e2e/persistent_volumes.go @@ -40,7 +40,7 @@ func completeTest(f *framework.Framework, c clientset.Interface, ns string, pv * // 2. create the nfs writer pod, test if the write was successful, // then delete the pod and verify that it was deleted By("Checking pod has write access to PersistentVolume") - framework.CreateWaitAndDeletePod(f, c, ns, pvc.Name) + framework.CreateWaitAndDeletePod(f, c, ns, pvc) // 3. delete the PVC, wait for PV to become "Released" By("Deleting the PVC to invoke the reclaim policy.") @@ -66,7 +66,7 @@ func completeMultiTest(f *framework.Framework, c clientset.Interface, ns string, _, found := pvols[pvc.Spec.VolumeName] Expect(found).To(BeTrue()) // TODO: currently a serialized test of each PV - framework.CreateWaitAndDeletePod(f, c, pvcKey.Namespace, pvcKey.Name) + framework.CreateWaitAndDeletePod(f, c, pvcKey.Namespace, pvc) } // 2. delete each PVC, wait for its bound PV to reach `expectedPhase` @@ -262,7 +262,7 @@ var _ = framework.KubeDescribe("PersistentVolumes [Volume]", func() { // (and test) succeed. It("should test that a PV becomes Available and is clean after the PVC is deleted. [Volume] [Flaky]", func() { By("Writing to the volume.") - pod := framework.MakeWritePod(ns, pvc.Name) + pod := framework.MakeWritePod(ns, pvc) pod, err := c.CoreV1().Pods(ns).Create(pod) Expect(err).NotTo(HaveOccurred()) err = framework.WaitForPodSuccessInNamespace(c, pod.Name, ns) @@ -279,8 +279,7 @@ var _ = framework.KubeDescribe("PersistentVolumes [Volume]", func() { // If a file is detected in /mnt, fail the pod and do not restart it. By("Verifying the mount has been cleaned.") mount := pod.Spec.Containers[0].VolumeMounts[0].MountPath - pod = framework.MakePod(ns, pvc.Name, true, fmt.Sprintf("[ $(ls -A %s | wc -l) -eq 0 ] && exit 0 || exit 1", mount)) - + pod = framework.MakePod(ns, []*v1.PersistentVolumeClaim{pvc}, true, fmt.Sprintf("[ $(ls -A %s | wc -l) -eq 0 ] && exit 0 || exit 1", mount)) pod, err = c.CoreV1().Pods(ns).Create(pod) Expect(err).NotTo(HaveOccurred()) err = framework.WaitForPodSuccessInNamespace(c, pod.Name, ns) diff --git a/test/e2e/upgrades/persistent_volumes.go b/test/e2e/upgrades/persistent_volumes.go index 58a9a361491..337ab307eb9 100644 --- a/test/e2e/upgrades/persistent_volumes.go +++ b/test/e2e/upgrades/persistent_volumes.go @@ -99,7 +99,7 @@ func (t *PersistentVolumeUpgradeTest) Teardown(f *framework.Framework) { // testPod creates a pod that consumes a pv and prints it out. The output is then verified. func (t *PersistentVolumeUpgradeTest) testPod(f *framework.Framework, cmd string) { - pod := framework.MakePod(f.Namespace.Name, t.pvc.Name, false, cmd) + pod := framework.CreatePod(f.ClientSet, f.Namespace.Name, []*v1.PersistentVolumeClaim{t.pvc}, false, cmd) expectedOutput := []string{pvTestData} f.TestContainerOutput("pod consumes pv", pod, 0, expectedOutput) } diff --git a/test/e2e/vsphere_utils.go b/test/e2e/vsphere_utils.go index fba93f92e10..deeb2e727db 100644 --- a/test/e2e/vsphere_utils.go +++ b/test/e2e/vsphere_utils.go @@ -17,15 +17,16 @@ limitations under the License. package e2e import ( + "fmt" + "path/filepath" "strconv" "time" - "fmt" - . "github.com/onsi/gomega" "k8s.io/apimachinery/pkg/api/resource" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/types" + k8stype "k8s.io/apimachinery/pkg/types" "k8s.io/apimachinery/pkg/util/uuid" "k8s.io/apimachinery/pkg/util/wait" "k8s.io/kubernetes/pkg/api/v1" @@ -332,3 +333,19 @@ func createEmptyFilesOnVSphereVolume(namespace string, podName string, filePaths Expect(err).NotTo(HaveOccurred()) } } + +// verify volumes are attached to the node and are accessible in pod +func verifyVSphereVolumesAccessible(pod *v1.Pod, persistentvolumes []*v1.PersistentVolume, vsp *vsphere.VSphere) { + nodeName := pod.Spec.NodeName + namespace := pod.Namespace + for index, pv := range persistentvolumes { + // Verify disks are attached to the node + isAttached, err := verifyVSphereDiskAttached(vsp, pv.Spec.VsphereVolume.VolumePath, k8stype.NodeName(nodeName)) + Expect(err).NotTo(HaveOccurred()) + Expect(isAttached).To(BeTrue(), fmt.Sprintf("disk %v is not attached with the node", pv.Spec.VsphereVolume.VolumePath)) + // Verify Volumes are accessible + filepath := filepath.Join("/mnt/", fmt.Sprintf("volume%v", index+1), "/emptyFile.txt") + _, err = framework.LookForStringInPodExec(namespace, pod.Name, []string{"/bin/touch", filepath}, "", time.Minute) + Expect(err).NotTo(HaveOccurred()) + } +} diff --git a/test/e2e/vsphere_volume_ops_storm.go b/test/e2e/vsphere_volume_ops_storm.go index 18a2b07fd7a..641d0a58b5f 100644 --- a/test/e2e/vsphere_volume_ops_storm.go +++ b/test/e2e/vsphere_volume_ops_storm.go @@ -19,13 +19,10 @@ package e2e import ( "fmt" "os" - "path/filepath" "strconv" - "time" . "github.com/onsi/ginkgo" . "github.com/onsi/gomega" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" k8stype "k8s.io/apimachinery/pkg/types" "k8s.io/kubernetes/pkg/api/v1" storage "k8s.io/kubernetes/pkg/apis/storage/v1beta1" @@ -107,13 +104,13 @@ var _ = framework.KubeDescribe("vsphere volume operations storm [Volume]", func( } By("Waiting for all claims to be in bound phase") - persistentvolumes = waitForPVClaimBoundPhase(client, pvclaims) + persistentvolumes = framework.WaitForPVClaimBoundPhase(client, pvclaims) By("Creating pod to attach PVs to the node") - pod := createPod(client, namespace, pvclaims) + pod := framework.CreatePod(client, namespace, pvclaims, false, "") By("Verify all volumes are accessible and available in the pod") - verifyVolumesAccessible(pod, persistentvolumes, vsp) + verifyVSphereVolumesAccessible(pod, persistentvolumes, vsp) By("Deleting pod") framework.DeletePodWithWait(f, client, pod) @@ -124,80 +121,3 @@ var _ = framework.KubeDescribe("vsphere volume operations storm [Volume]", func( } }) }) - -// create pod with given claims -func createPod(client clientset.Interface, namespace string, pvclaims []*v1.PersistentVolumeClaim) *v1.Pod { - podSpec := &v1.Pod{ - TypeMeta: metav1.TypeMeta{ - Kind: "Pod", - APIVersion: "v1", - }, - ObjectMeta: metav1.ObjectMeta{ - GenerateName: "pod-many-volumes-", - }, - Spec: v1.PodSpec{ - Containers: []v1.Container{ - { - Name: "volume-tester", - Image: "gcr.io/google_containers/busybox:1.24", - Command: []string{"/bin/sh"}, - Args: []string{"-c", "while true ; do sleep 2 ; done"}, - }, - }, - RestartPolicy: v1.RestartPolicyNever, - }, - } - var volumeMounts = make([]v1.VolumeMount, len(pvclaims)) - var volumes = make([]v1.Volume, len(pvclaims)) - for index, pvclaim := range pvclaims { - volumename := fmt.Sprintf("volume%v", index+1) - volumeMounts[index] = v1.VolumeMount{Name: volumename, MountPath: "/mnt/" + volumename} - volumes[index] = v1.Volume{Name: volumename, VolumeSource: v1.VolumeSource{PersistentVolumeClaim: &v1.PersistentVolumeClaimVolumeSource{ClaimName: pvclaim.Name, ReadOnly: false}}} - } - podSpec.Spec.Containers[0].VolumeMounts = volumeMounts - podSpec.Spec.Volumes = volumes - - pod, err := client.CoreV1().Pods(namespace).Create(podSpec) - Expect(err).NotTo(HaveOccurred()) - - // Waiting for pod to be running - Expect(framework.WaitForPodNameRunningInNamespace(client, pod.Name, namespace)).To(Succeed()) - - // get fresh pod info - pod, err = client.CoreV1().Pods(namespace).Get(pod.Name, metav1.GetOptions{}) - Expect(err).NotTo(HaveOccurred()) - return pod -} - -// verify volumes are attached to the node and are accessible in pod -func verifyVolumesAccessible(pod *v1.Pod, persistentvolumes []*v1.PersistentVolume, vsp *vsphere.VSphere) { - nodeName := pod.Spec.NodeName - namespace := pod.Namespace - for index, pv := range persistentvolumes { - // Verify disks are attached to the node - isAttached, err := verifyVSphereDiskAttached(vsp, pv.Spec.VsphereVolume.VolumePath, k8stype.NodeName(nodeName)) - Expect(err).NotTo(HaveOccurred()) - Expect(isAttached).To(BeTrue(), fmt.Sprintf("disk %v is not attached with the node", pv.Spec.VsphereVolume.VolumePath)) - // Verify Volumes are accessible - filepath := filepath.Join("/mnt/", fmt.Sprintf("volume%v", index+1), "/emptyFile.txt") - _, err = framework.LookForStringInPodExec(namespace, pod.Name, []string{"/bin/touch", filepath}, "", time.Minute) - Expect(err).NotTo(HaveOccurred()) - } -} - -// wait until all pvcs phase set to bound -func waitForPVClaimBoundPhase(client clientset.Interface, pvclaims []*v1.PersistentVolumeClaim) []*v1.PersistentVolume { - var persistentvolumes = make([]*v1.PersistentVolume, len(pvclaims)) - for index, claim := range pvclaims { - err := framework.WaitForPersistentVolumeClaimPhase(v1.ClaimBound, client, claim.Namespace, claim.Name, framework.Poll, framework.ClaimProvisionTimeout) - Expect(err).NotTo(HaveOccurred()) - // Get new copy of the claim - claim, err := client.CoreV1().PersistentVolumeClaims(claim.Namespace).Get(claim.Name, metav1.GetOptions{}) - Expect(err).NotTo(HaveOccurred()) - - // Get the bounded PV - persistentvolumes[index], err = client.CoreV1().PersistentVolumes().Get(claim.Spec.VolumeName, metav1.GetOptions{}) - Expect(err).NotTo(HaveOccurred()) - } - return persistentvolumes -}