addressed jeffvance's review comments

This commit is contained in:
divyenpatel 2017-03-31 10:15:23 -07:00
parent 7adf38094d
commit 6c20b67662
6 changed files with 130 additions and 180 deletions

View File

@ -422,25 +422,6 @@ func DeletePodWithWait(f *Framework, c clientset.Interface, pod *v1.Pod) {
Logf("Ignore \"not found\" error above. Pod %v successfully deleted", pod.Name)
}
// Create the test pod, wait for (hopefully) success, and then delete the pod.
func CreateWaitAndDeletePod(f *Framework, c clientset.Interface, ns string, claimName string) {
Logf("Creating nfs test pod")
// Make pod spec
pod := MakeWritePod(ns, claimName)
// Instantiate pod (Create)
runPod, err := c.CoreV1().Pods(ns).Create(pod)
Expect(err).NotTo(HaveOccurred())
Expect(runPod).NotTo(BeNil())
defer DeletePodWithWait(f, c, runPod)
// Wait for the test pod to complete its lifecycle
testPodSuccessOrFail(c, ns, runPod)
}
// Sanity check for GCE testing. Verify the persistent disk attached to the node.
func VerifyGCEDiskAttached(diskName string, nodeName types.NodeName) bool {
gceCloud, err := GetGCECloud()
@ -532,75 +513,6 @@ func MakePersistentVolumeClaim(cfg PersistentVolumeClaimConfig, ns string) *v1.P
}
}
// Returns a pod definition based on the namespace. The pod references the PVC's
// name.
func MakeWritePod(ns string, pvcName string) *v1.Pod {
return MakePod(ns, pvcName, true, "touch /mnt/SUCCESS && (id -G | grep -E '\\b777\\b')")
}
// Returns a pod definition based on the namespace. The pod references the PVC's
// name. A slice of BASH commands can be supplied as args to be run by the pod
func MakePod(ns string, pvcName string, isPrivileged bool, command string) *v1.Pod {
if len(command) == 0 {
command = "while true; do sleep 1; done"
}
return &v1.Pod{
TypeMeta: metav1.TypeMeta{
Kind: "Pod",
APIVersion: api.Registry.GroupOrDie(v1.GroupName).GroupVersion.String(),
},
ObjectMeta: metav1.ObjectMeta{
GenerateName: "pvc-tester-",
Namespace: ns,
},
Spec: v1.PodSpec{
Containers: []v1.Container{
{
Name: "write-pod",
Image: "gcr.io/google_containers/busybox:1.24",
Command: []string{"/bin/sh"},
Args: []string{"-c", command},
VolumeMounts: []v1.VolumeMount{
{
Name: pvcName,
MountPath: "/mnt",
},
},
SecurityContext: &v1.SecurityContext{
Privileged: &isPrivileged,
},
},
},
RestartPolicy: v1.RestartPolicyOnFailure,
Volumes: []v1.Volume{
{
Name: pvcName,
VolumeSource: v1.VolumeSource{
PersistentVolumeClaim: &v1.PersistentVolumeClaimVolumeSource{
ClaimName: pvcName,
},
},
},
},
},
}
}
// Define and create a pod with a mounted PV. Pod runs infinite loop until killed.
func CreateClientPod(c clientset.Interface, ns string, pvc *v1.PersistentVolumeClaim) *v1.Pod {
clientPod := MakePod(ns, pvc.Name, true, "")
clientPod, err := c.CoreV1().Pods(ns).Create(clientPod)
Expect(err).NotTo(HaveOccurred())
// Verify the pod is running before returning it
err = WaitForPodRunningInNamespace(c, clientPod)
Expect(err).NotTo(HaveOccurred())
clientPod, err = c.CoreV1().Pods(ns).Get(clientPod.Name, metav1.GetOptions{})
Expect(apierrs.IsNotFound(err)).To(BeFalse())
return clientPod
}
func CreatePDWithRetry() (string, error) {
newDiskName := ""
var err error
@ -703,3 +615,105 @@ func deletePD(pdName string) error {
return fmt.Errorf("Provider does not support volume deletion")
}
}
// Create the test pod, wait for (hopefully) success, and then delete the pod.
func CreateWaitAndDeletePod(f *Framework, c clientset.Interface, ns string, pvc *v1.PersistentVolumeClaim) {
Logf("Creating nfs test pod")
// Make pod spec
pod := MakeWritePod(ns, pvc)
// Instantiate pod (Create)
runPod, err := c.CoreV1().Pods(ns).Create(pod)
Expect(err).NotTo(HaveOccurred())
Expect(runPod).NotTo(BeNil())
defer DeletePodWithWait(f, c, runPod)
// Wait for the test pod to complete its lifecycle
testPodSuccessOrFail(c, ns, runPod)
}
// Returns a pod definition based on the namespace. The pod references the PVC's
// name.
func MakeWritePod(ns string, pvc *v1.PersistentVolumeClaim) *v1.Pod {
return MakePod(ns, []*v1.PersistentVolumeClaim{pvc}, true, "touch /mnt/volume1/SUCCESS && (id -G | grep -E '\\b777\\b')")
}
// Returns a pod definition based on the namespace. The pod references the PVC's
// name. A slice of BASH commands can be supplied as args to be run by the pod
func MakePod(ns string, pvclaims []*v1.PersistentVolumeClaim, isPrivileged bool, command string) *v1.Pod {
if len(command) == 0 {
command = "while true; do sleep 1; done"
}
podSpec := &v1.Pod{
TypeMeta: metav1.TypeMeta{
Kind: "Pod",
APIVersion: api.Registry.GroupOrDie(v1.GroupName).GroupVersion.String(),
},
ObjectMeta: metav1.ObjectMeta{
GenerateName: "pvc-tester-",
Namespace: ns,
},
Spec: v1.PodSpec{
Containers: []v1.Container{
{
Name: "write-pod",
Image: "gcr.io/google_containers/busybox:1.24",
Command: []string{"/bin/sh"},
Args: []string{"-c", command},
SecurityContext: &v1.SecurityContext{
Privileged: &isPrivileged,
},
},
},
RestartPolicy: v1.RestartPolicyOnFailure,
},
}
var volumeMounts = make([]v1.VolumeMount, len(pvclaims))
var volumes = make([]v1.Volume, len(pvclaims))
for index, pvclaim := range pvclaims {
volumename := fmt.Sprintf("volume%v", index+1)
volumeMounts[index] = v1.VolumeMount{Name: volumename, MountPath: "/mnt/" + volumename}
volumes[index] = v1.Volume{Name: volumename, VolumeSource: v1.VolumeSource{PersistentVolumeClaim: &v1.PersistentVolumeClaimVolumeSource{ClaimName: pvclaim.Name, ReadOnly: false}}}
}
podSpec.Spec.Containers[0].VolumeMounts = volumeMounts
podSpec.Spec.Volumes = volumes
return podSpec
}
// create pod with given claims
func CreatePod(client clientset.Interface, namespace string, pvclaims []*v1.PersistentVolumeClaim, isPrivileged bool, command string) *v1.Pod {
podSpec := MakePod(namespace, pvclaims, isPrivileged, command)
pod, err := client.CoreV1().Pods(namespace).Create(podSpec)
Expect(err).NotTo(HaveOccurred())
// Waiting for pod to be running
Expect(WaitForPodNameRunningInNamespace(client, pod.Name, namespace)).To(Succeed())
// get fresh pod info
pod, err = client.CoreV1().Pods(namespace).Get(pod.Name, metav1.GetOptions{})
Expect(err).NotTo(HaveOccurred())
return pod
}
// Define and create a pod with a mounted PV. Pod runs infinite loop until killed.
func CreateClientPod(c clientset.Interface, ns string, pvc *v1.PersistentVolumeClaim) *v1.Pod {
return CreatePod(c, ns, []*v1.PersistentVolumeClaim{pvc}, true, "")
}
// wait until all pvcs phase set to bound
func WaitForPVClaimBoundPhase(client clientset.Interface, pvclaims []*v1.PersistentVolumeClaim) []*v1.PersistentVolume {
var persistentvolumes = make([]*v1.PersistentVolume, len(pvclaims))
for index, claim := range pvclaims {
err := WaitForPersistentVolumeClaimPhase(v1.ClaimBound, client, claim.Namespace, claim.Name, Poll, ClaimProvisionTimeout)
Expect(err).NotTo(HaveOccurred())
// Get new copy of the claim
claim, err := client.CoreV1().PersistentVolumeClaims(claim.Namespace).Get(claim.Name, metav1.GetOptions{})
Expect(err).NotTo(HaveOccurred())
// Get the bounded PV
persistentvolumes[index], err = client.CoreV1().PersistentVolumes().Get(claim.Spec.VolumeName, metav1.GetOptions{})
Expect(err).NotTo(HaveOccurred())
}
return persistentvolumes
}

View File

@ -202,7 +202,7 @@ func testVolumeUnmountsFromDeletedPod(c clientset.Interface, f *framework.Framew
// initTestCase initializes spec resources (pv, pvc, and pod) and returns pointers to be consumed by the test
func initTestCase(f *framework.Framework, c clientset.Interface, pvConfig framework.PersistentVolumeConfig, pvcConfig framework.PersistentVolumeClaimConfig, ns, nodeName string) (*v1.Pod, *v1.PersistentVolume, *v1.PersistentVolumeClaim) {
pv, pvc := framework.CreatePVPVC(c, pvConfig, pvcConfig, ns, false)
pod := framework.MakePod(ns, pvc.Name, true, "")
pod := framework.MakePod(ns, []*v1.PersistentVolumeClaim{pvc}, true, "")
pod.Spec.NodeName = nodeName
framework.Logf("Creating nfs client Pod %s on node %s", pod.Name, nodeName)
pod, err := c.CoreV1().Pods(ns).Create(pod)

View File

@ -40,7 +40,7 @@ func completeTest(f *framework.Framework, c clientset.Interface, ns string, pv *
// 2. create the nfs writer pod, test if the write was successful,
// then delete the pod and verify that it was deleted
By("Checking pod has write access to PersistentVolume")
framework.CreateWaitAndDeletePod(f, c, ns, pvc.Name)
framework.CreateWaitAndDeletePod(f, c, ns, pvc)
// 3. delete the PVC, wait for PV to become "Released"
By("Deleting the PVC to invoke the reclaim policy.")
@ -66,7 +66,7 @@ func completeMultiTest(f *framework.Framework, c clientset.Interface, ns string,
_, found := pvols[pvc.Spec.VolumeName]
Expect(found).To(BeTrue())
// TODO: currently a serialized test of each PV
framework.CreateWaitAndDeletePod(f, c, pvcKey.Namespace, pvcKey.Name)
framework.CreateWaitAndDeletePod(f, c, pvcKey.Namespace, pvc)
}
// 2. delete each PVC, wait for its bound PV to reach `expectedPhase`
@ -262,7 +262,7 @@ var _ = framework.KubeDescribe("PersistentVolumes [Volume]", func() {
// (and test) succeed.
It("should test that a PV becomes Available and is clean after the PVC is deleted. [Volume] [Flaky]", func() {
By("Writing to the volume.")
pod := framework.MakeWritePod(ns, pvc.Name)
pod := framework.MakeWritePod(ns, pvc)
pod, err := c.CoreV1().Pods(ns).Create(pod)
Expect(err).NotTo(HaveOccurred())
err = framework.WaitForPodSuccessInNamespace(c, pod.Name, ns)
@ -279,8 +279,7 @@ var _ = framework.KubeDescribe("PersistentVolumes [Volume]", func() {
// If a file is detected in /mnt, fail the pod and do not restart it.
By("Verifying the mount has been cleaned.")
mount := pod.Spec.Containers[0].VolumeMounts[0].MountPath
pod = framework.MakePod(ns, pvc.Name, true, fmt.Sprintf("[ $(ls -A %s | wc -l) -eq 0 ] && exit 0 || exit 1", mount))
pod = framework.MakePod(ns, []*v1.PersistentVolumeClaim{pvc}, true, fmt.Sprintf("[ $(ls -A %s | wc -l) -eq 0 ] && exit 0 || exit 1", mount))
pod, err = c.CoreV1().Pods(ns).Create(pod)
Expect(err).NotTo(HaveOccurred())
err = framework.WaitForPodSuccessInNamespace(c, pod.Name, ns)

View File

@ -99,7 +99,7 @@ func (t *PersistentVolumeUpgradeTest) Teardown(f *framework.Framework) {
// testPod creates a pod that consumes a pv and prints it out. The output is then verified.
func (t *PersistentVolumeUpgradeTest) testPod(f *framework.Framework, cmd string) {
pod := framework.MakePod(f.Namespace.Name, t.pvc.Name, false, cmd)
pod := framework.CreatePod(f.ClientSet, f.Namespace.Name, []*v1.PersistentVolumeClaim{t.pvc}, false, cmd)
expectedOutput := []string{pvTestData}
f.TestContainerOutput("pod consumes pv", pod, 0, expectedOutput)
}

View File

@ -17,15 +17,16 @@ limitations under the License.
package e2e
import (
"fmt"
"path/filepath"
"strconv"
"time"
"fmt"
. "github.com/onsi/gomega"
"k8s.io/apimachinery/pkg/api/resource"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/types"
k8stype "k8s.io/apimachinery/pkg/types"
"k8s.io/apimachinery/pkg/util/uuid"
"k8s.io/apimachinery/pkg/util/wait"
"k8s.io/kubernetes/pkg/api/v1"
@ -332,3 +333,19 @@ func createEmptyFilesOnVSphereVolume(namespace string, podName string, filePaths
Expect(err).NotTo(HaveOccurred())
}
}
// verify volumes are attached to the node and are accessible in pod
func verifyVSphereVolumesAccessible(pod *v1.Pod, persistentvolumes []*v1.PersistentVolume, vsp *vsphere.VSphere) {
nodeName := pod.Spec.NodeName
namespace := pod.Namespace
for index, pv := range persistentvolumes {
// Verify disks are attached to the node
isAttached, err := verifyVSphereDiskAttached(vsp, pv.Spec.VsphereVolume.VolumePath, k8stype.NodeName(nodeName))
Expect(err).NotTo(HaveOccurred())
Expect(isAttached).To(BeTrue(), fmt.Sprintf("disk %v is not attached with the node", pv.Spec.VsphereVolume.VolumePath))
// Verify Volumes are accessible
filepath := filepath.Join("/mnt/", fmt.Sprintf("volume%v", index+1), "/emptyFile.txt")
_, err = framework.LookForStringInPodExec(namespace, pod.Name, []string{"/bin/touch", filepath}, "", time.Minute)
Expect(err).NotTo(HaveOccurred())
}
}

View File

@ -19,13 +19,10 @@ package e2e
import (
"fmt"
"os"
"path/filepath"
"strconv"
"time"
. "github.com/onsi/ginkgo"
. "github.com/onsi/gomega"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
k8stype "k8s.io/apimachinery/pkg/types"
"k8s.io/kubernetes/pkg/api/v1"
storage "k8s.io/kubernetes/pkg/apis/storage/v1beta1"
@ -107,13 +104,13 @@ var _ = framework.KubeDescribe("vsphere volume operations storm [Volume]", func(
}
By("Waiting for all claims to be in bound phase")
persistentvolumes = waitForPVClaimBoundPhase(client, pvclaims)
persistentvolumes = framework.WaitForPVClaimBoundPhase(client, pvclaims)
By("Creating pod to attach PVs to the node")
pod := createPod(client, namespace, pvclaims)
pod := framework.CreatePod(client, namespace, pvclaims, false, "")
By("Verify all volumes are accessible and available in the pod")
verifyVolumesAccessible(pod, persistentvolumes, vsp)
verifyVSphereVolumesAccessible(pod, persistentvolumes, vsp)
By("Deleting pod")
framework.DeletePodWithWait(f, client, pod)
@ -124,80 +121,3 @@ var _ = framework.KubeDescribe("vsphere volume operations storm [Volume]", func(
}
})
})
// create pod with given claims
func createPod(client clientset.Interface, namespace string, pvclaims []*v1.PersistentVolumeClaim) *v1.Pod {
podSpec := &v1.Pod{
TypeMeta: metav1.TypeMeta{
Kind: "Pod",
APIVersion: "v1",
},
ObjectMeta: metav1.ObjectMeta{
GenerateName: "pod-many-volumes-",
},
Spec: v1.PodSpec{
Containers: []v1.Container{
{
Name: "volume-tester",
Image: "gcr.io/google_containers/busybox:1.24",
Command: []string{"/bin/sh"},
Args: []string{"-c", "while true ; do sleep 2 ; done"},
},
},
RestartPolicy: v1.RestartPolicyNever,
},
}
var volumeMounts = make([]v1.VolumeMount, len(pvclaims))
var volumes = make([]v1.Volume, len(pvclaims))
for index, pvclaim := range pvclaims {
volumename := fmt.Sprintf("volume%v", index+1)
volumeMounts[index] = v1.VolumeMount{Name: volumename, MountPath: "/mnt/" + volumename}
volumes[index] = v1.Volume{Name: volumename, VolumeSource: v1.VolumeSource{PersistentVolumeClaim: &v1.PersistentVolumeClaimVolumeSource{ClaimName: pvclaim.Name, ReadOnly: false}}}
}
podSpec.Spec.Containers[0].VolumeMounts = volumeMounts
podSpec.Spec.Volumes = volumes
pod, err := client.CoreV1().Pods(namespace).Create(podSpec)
Expect(err).NotTo(HaveOccurred())
// Waiting for pod to be running
Expect(framework.WaitForPodNameRunningInNamespace(client, pod.Name, namespace)).To(Succeed())
// get fresh pod info
pod, err = client.CoreV1().Pods(namespace).Get(pod.Name, metav1.GetOptions{})
Expect(err).NotTo(HaveOccurred())
return pod
}
// verify volumes are attached to the node and are accessible in pod
func verifyVolumesAccessible(pod *v1.Pod, persistentvolumes []*v1.PersistentVolume, vsp *vsphere.VSphere) {
nodeName := pod.Spec.NodeName
namespace := pod.Namespace
for index, pv := range persistentvolumes {
// Verify disks are attached to the node
isAttached, err := verifyVSphereDiskAttached(vsp, pv.Spec.VsphereVolume.VolumePath, k8stype.NodeName(nodeName))
Expect(err).NotTo(HaveOccurred())
Expect(isAttached).To(BeTrue(), fmt.Sprintf("disk %v is not attached with the node", pv.Spec.VsphereVolume.VolumePath))
// Verify Volumes are accessible
filepath := filepath.Join("/mnt/", fmt.Sprintf("volume%v", index+1), "/emptyFile.txt")
_, err = framework.LookForStringInPodExec(namespace, pod.Name, []string{"/bin/touch", filepath}, "", time.Minute)
Expect(err).NotTo(HaveOccurred())
}
}
// wait until all pvcs phase set to bound
func waitForPVClaimBoundPhase(client clientset.Interface, pvclaims []*v1.PersistentVolumeClaim) []*v1.PersistentVolume {
var persistentvolumes = make([]*v1.PersistentVolume, len(pvclaims))
for index, claim := range pvclaims {
err := framework.WaitForPersistentVolumeClaimPhase(v1.ClaimBound, client, claim.Namespace, claim.Name, framework.Poll, framework.ClaimProvisionTimeout)
Expect(err).NotTo(HaveOccurred())
// Get new copy of the claim
claim, err := client.CoreV1().PersistentVolumeClaims(claim.Namespace).Get(claim.Name, metav1.GetOptions{})
Expect(err).NotTo(HaveOccurred())
// Get the bounded PV
persistentvolumes[index], err = client.CoreV1().PersistentVolumes().Get(claim.Spec.VolumeName, metav1.GetOptions{})
Expect(err).NotTo(HaveOccurred())
}
return persistentvolumes
}