diff --git a/test/e2e/framework/util.go b/test/e2e/framework/util.go index d3bcb4a33e5..cd3be6d87e1 100644 --- a/test/e2e/framework/util.go +++ b/test/e2e/framework/util.go @@ -4033,7 +4033,7 @@ func GetSigner(provider string) (ssh.Signer, error) { return sshutil.MakePrivateKeySignerFromFile(keyfile) } return nil, fmt.Errorf("VAGRANT_SSH_KEY env variable should be provided") - case "local": + case "local", "vsphere": keyfile = os.Getenv("LOCAL_SSH_KEY") // maybe? if len(keyfile) == 0 { keyfile = "id_rsa" diff --git a/test/e2e/storage/persistent_volumes-disruptive.go b/test/e2e/storage/persistent_volumes-disruptive.go index c54c9f791cb..fbe7ea0522f 100644 --- a/test/e2e/storage/persistent_volumes-disruptive.go +++ b/test/e2e/storage/persistent_volumes-disruptive.go @@ -134,7 +134,7 @@ var _ = framework.KubeDescribe("PersistentVolumes [Volume][Disruptive][Flaky]", // to runTest. disruptiveTestTable := []disruptiveTest{ { - testItStmt: "Should test that a file written to the mount before kubelet restart is stat-able after restart.", + testItStmt: "Should test that a file written to the mount before kubelet restart can be read after restart.", runTest: testKubeletRestartsAndRestoresMount, }, { @@ -159,16 +159,16 @@ var _ = framework.KubeDescribe("PersistentVolumes [Volume][Disruptive][Flaky]", func testKubeletRestartsAndRestoresMount(c clientset.Interface, f *framework.Framework, clientPod *v1.Pod, pvc *v1.PersistentVolumeClaim, pv *v1.PersistentVolume) { By("Writing to the volume.") file := "/mnt/_SUCCESS" - _, err := podExec(clientPod, "touch "+file) + _, err := podExec(clientPod, fmt.Sprintf("touch %s", file)) Expect(err).NotTo(HaveOccurred()) By("Restarting kubelet") kubeletCommand(kRestart, c, clientPod) By("Testing that written file is accessible.") - _, err = podExec(clientPod, "cat "+file) + _, err = podExec(clientPod, fmt.Sprintf("cat %s", file)) Expect(err).NotTo(HaveOccurred()) - framework.Logf("Pod %s detected %s after kubelet restart", clientPod.Name, file) + framework.Logf("Volume mount detected on pod %s and written file %s is readable post-restart.", clientPod.Name, file) } // testVolumeUnmountsFromDeletedPod tests that a volume unmounts if the client pod was deleted while the kubelet was down. @@ -178,7 +178,7 @@ func testVolumeUnmountsFromDeletedPod(c clientset.Interface, f *framework.Framew nodeIP = nodeIP + ":22" By("Expecting the volume mount to be found.") - result, err := framework.SSH("mount | grep "+string(clientPod.UID), nodeIP, framework.TestContext.Provider) + result, err := framework.SSH(fmt.Sprintf("mount| grep %s", string(clientPod.UID)), nodeIP, framework.TestContext.Provider) Expect(err).NotTo(HaveOccurred()) Expect(result.Code).To(BeZero()) @@ -188,11 +188,10 @@ func testVolumeUnmountsFromDeletedPod(c clientset.Interface, f *framework.Framew kubeletCommand(kStart, c, clientPod) By("Expecting the volume mount not to be found.") - result, err = framework.SSH("mount| grep "+string(clientPod.UID), nodeIP, framework.TestContext.Provider) + result, err = framework.SSH(fmt.Sprintf("mount| grep %s", string(clientPod.UID)), nodeIP, framework.TestContext.Provider) Expect(err).NotTo(HaveOccurred()) Expect(result.Code).NotTo(BeZero()) - - framework.Logf("Volume mount detected on pod and written file is readable post-restart.") + framework.Logf("Volume unmounted on node %s", clientPod.Spec.NodeName) } // initTestCase initializes spec resources (pv, pvc, and pod) and returns pointers to be consumed diff --git a/test/e2e/storage/persistent_volumes-vsphere.go b/test/e2e/storage/persistent_volumes-vsphere.go index 8475025c68e..8d6e7bd70ab 100644 --- a/test/e2e/storage/persistent_volumes-vsphere.go +++ b/test/e2e/storage/persistent_volumes-vsphere.go @@ -17,16 +17,17 @@ limitations under the License. package storage import ( + "time" + + . "github.com/onsi/ginkgo" + . "github.com/onsi/gomega" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/labels" "k8s.io/apimachinery/pkg/types" "k8s.io/kubernetes/pkg/api/v1" "k8s.io/kubernetes/pkg/client/clientset_generated/clientset" vsphere "k8s.io/kubernetes/pkg/cloudprovider/providers/vsphere" "k8s.io/kubernetes/test/e2e/framework" - - . "github.com/onsi/ginkgo" - . "github.com/onsi/gomega" - "k8s.io/apimachinery/pkg/labels" ) // Testing configurations of single a PV/PVC pair attached to a vSphere Disk @@ -144,7 +145,7 @@ var _ = framework.KubeDescribe("PersistentVolumes:vsphere", func() { 2. Delete POD, POD deletion should succeed. */ - It("should test that deleting a PVC before the pod does not cause pod deletion to fail on PD detach", func() { + It("should test that deleting a PVC before the pod does not cause pod deletion to fail on vsphere volume detach", func() { By("Deleting the Claim") framework.ExpectNoError(framework.DeletePersistentVolumeClaim(c, pvc.Name, ns), "Failed to delete PVC ", pvc.Name) pvc = nil @@ -160,7 +161,7 @@ var _ = framework.KubeDescribe("PersistentVolumes:vsphere", func() { 1. Delete PV. 2. Delete POD, POD deletion should succeed. */ - It("should test that deleting the PV before the pod does not cause pod deletion to fail on PD detach", func() { + It("should test that deleting the PV before the pod does not cause pod deletion to fail on vspehre volume detach", func() { By("Deleting the Persistent Volume") framework.ExpectNoError(framework.DeletePersistentVolume(c, pv.Name), "Failed to delete PV ", pv.Name) pv = nil @@ -168,4 +169,49 @@ var _ = framework.KubeDescribe("PersistentVolumes:vsphere", func() { By("Deleting the pod") framework.ExpectNoError(framework.DeletePodWithWait(f, c, clientPod), "Failed to delete pod ", clientPod.Name) }) + /* + This test verifies that a volume mounted to a pod remains mounted after a kubelet restarts. + Steps: + 1. Write to the volume + 2. Restart kubelet + 3. Verify that written file is accessible after kubelet restart + */ + It("should test that a file written to the vspehre volume mount before kubelet restart can be read after restart [Disruptive]", func() { + testKubeletRestartsAndRestoresMount(c, f, clientPod, pvc, pv) + }) + + /* + This test verifies that a volume mounted to a pod that is deleted while the kubelet is down + unmounts volume when the kubelet returns. + + Steps: + 1. Verify volume is mounted on the node. + 2. Stop kubelet. + 3. Delete pod. + 4. Start kubelet. + 5. Verify that volume mount not to be found. + */ + It("should test that a vspehre volume mounted to a pod that is deleted while the kubelet is down unmounts when the kubelet returns [Disruptive]", func() { + testVolumeUnmountsFromDeletedPod(c, f, clientPod, pvc, pv) + }) + + /* + This test verifies that deleting the Namespace of a PVC and Pod causes the successful detach of Persistent Disk + + Steps: + 1. Delete Namespace. + 2. Wait for namespace to get deleted. (Namespace deletion should trigger deletion of belonging pods) + 3. Verify volume should be detached from the node. + */ + It("should test that deleting the Namespace of a PVC and Pod causes the successful detach of vsphere volume", func() { + By("Deleting the Namespace") + err := c.CoreV1().Namespaces().Delete(ns, nil) + Expect(err).NotTo(HaveOccurred()) + + err = framework.WaitForNamespacesDeleted(c, []string{ns}, 3*time.Minute) + Expect(err).NotTo(HaveOccurred()) + + By("Verifying Persistent Disk detaches") + waitForVSphereDiskToDetach(vsp, volumePath, node) + }) })