diff --git a/test/e2e/storage/BUILD b/test/e2e/storage/BUILD index 9823147d014..abbfa825bec 100644 --- a/test/e2e/storage/BUILD +++ b/test/e2e/storage/BUILD @@ -21,6 +21,7 @@ go_library( "volumes.go", "vsphere_utils.go", "vsphere_volume_diskformat.go", + "vsphere_volume_fstype.go", "vsphere_volume_ops_storm.go", "vsphere_volume_placement.go", ], diff --git a/test/e2e/storage/persistent_volumes-vsphere.go b/test/e2e/storage/persistent_volumes-vsphere.go index 8d6e7bd70ab..f8e72fc46de 100644 --- a/test/e2e/storage/persistent_volumes-vsphere.go +++ b/test/e2e/storage/persistent_volumes-vsphere.go @@ -132,7 +132,7 @@ var _ = framework.KubeDescribe("PersistentVolumes:vsphere", func() { */ framework.AddCleanupAction(func() { if len(volumePath) > 0 { - waitForVSphereDiskToDetach(vsp, volumePath, node) + framework.ExpectNoError(waitForVSphereDiskToDetach(vsp, volumePath, node)) vsp.DeleteVolume(volumePath) } }) diff --git a/test/e2e/storage/vsphere_utils.go b/test/e2e/storage/vsphere_utils.go index 988c8eb7564..2171b7610ec 100644 --- a/test/e2e/storage/vsphere_utils.go +++ b/test/e2e/storage/vsphere_utils.go @@ -53,7 +53,7 @@ func verifyVSphereDiskAttached(vsp *vsphere.VSphere, volumePath string, nodeName } // Wait until vsphere vmdk is deteched from the given node or time out after 5 minutes -func waitForVSphereDiskToDetach(vsp *vsphere.VSphere, volumePath string, nodeName types.NodeName) { +func waitForVSphereDiskToDetach(vsp *vsphere.VSphere, volumePath string, nodeName types.NodeName) error { var ( err error diskAttached = true @@ -62,7 +62,9 @@ func waitForVSphereDiskToDetach(vsp *vsphere.VSphere, volumePath string, nodeNam ) if vsp == nil { vsp, err = vsphere.GetVSphere() - Expect(err).NotTo(HaveOccurred()) + if err != nil { + return err + } } err = wait.Poll(detachPollTime, detachTimeout, func() (bool, error) { diskAttached, err = verifyVSphereDiskAttached(vsp, volumePath, nodeName) @@ -77,11 +79,13 @@ func waitForVSphereDiskToDetach(vsp *vsphere.VSphere, volumePath string, nodeNam framework.Logf("Waiting for Volume %q to detach from %q.", volumePath, nodeName) return false, nil }) - Expect(err).NotTo(HaveOccurred()) - if diskAttached { - Expect(fmt.Errorf("Gave up waiting for Volume %q to detach from %q after %v", volumePath, nodeName, detachTimeout)).NotTo(HaveOccurred()) + if err != nil { + return err } - + if diskAttached { + return fmt.Errorf("Gave up waiting for Volume %q to detach from %q after %v", volumePath, nodeName, detachTimeout) + } + return nil } // function to create vsphere volume spec with given VMDK volume path, Reclaim Policy and labels diff --git a/test/e2e/storage/vsphere_volume_fstype.go b/test/e2e/storage/vsphere_volume_fstype.go new file mode 100644 index 00000000000..73d60b843c0 --- /dev/null +++ b/test/e2e/storage/vsphere_volume_fstype.go @@ -0,0 +1,146 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package storage + +import ( + "time" + + . "github.com/onsi/ginkgo" + . "github.com/onsi/gomega" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + k8stype "k8s.io/apimachinery/pkg/types" + "k8s.io/kubernetes/pkg/api/v1" + storage "k8s.io/kubernetes/pkg/apis/storage/v1beta1" + "k8s.io/kubernetes/pkg/client/clientset_generated/clientset" + "k8s.io/kubernetes/pkg/cloudprovider/providers/vsphere" + "k8s.io/kubernetes/test/e2e/framework" +) + +/* + Test to verify fstype specified in storage-class is being honored after volume creation. + + Steps + 1. Create StorageClass with fstype set to valid type (default case included). + 2. Create PVC which uses the StorageClass created in step 1. + 3. Wait for PV to be provisioned. + 4. Wait for PVC's status to become Bound. + 5. Create pod using PVC on specific node. + 6. Wait for Disk to be attached to the node. + 7. Execute command in the pod to get fstype. + 8. Delete pod and Wait for Volume Disk to be detached from the Node. + 9. Delete PVC, PV and Storage Class. +*/ + +var _ = framework.KubeDescribe("vsphere Volume fstype [Volume]", func() { + f := framework.NewDefaultFramework("volume-fstype") + var ( + client clientset.Interface + namespace string + storageclass *storage.StorageClass + pvclaim *v1.PersistentVolumeClaim + ) + BeforeEach(func() { + framework.SkipUnlessProviderIs("vsphere") + client = f.ClientSet + namespace = f.Namespace.Name + nodeList := framework.GetReadySchedulableNodesOrDie(f.ClientSet) + Expect(len(nodeList.Items)).NotTo(BeZero(), "Unable to find ready and schedulable Node") + }) + AfterEach(func() { + var scDeleteError error + var pvDeleteError error + if storageclass != nil { + scDeleteError = client.StorageV1beta1().StorageClasses().Delete(storageclass.Name, nil) + } + if pvclaim != nil { + pvDeleteError = client.CoreV1().PersistentVolumeClaims(namespace).Delete(pvclaim.Name, nil) + } + framework.ExpectNoError(scDeleteError) + framework.ExpectNoError(pvDeleteError) + storageclass = nil + pvclaim = nil + }) + + It("verify fstype - ext3 formatted volume", func() { + By("Invoking Test for fstype: ext3") + storageclass, pvclaim = invokeTestForFstype(f, client, namespace, "ext3", "ext3") + }) + + It("verify disk format type - default value should be ext4", func() { + By("Invoking Test for fstype: Default Value") + storageclass, pvclaim = invokeTestForFstype(f, client, namespace, "", "ext4") + }) +}) + +func invokeTestForFstype(f *framework.Framework, client clientset.Interface, namespace string, fstype string, expectedContent string) (*storage.StorageClass, *v1.PersistentVolumeClaim) { + + framework.Logf("Invoking Test for fstype: %s", fstype) + scParameters := make(map[string]string) + scParameters["fstype"] = fstype + + By("Creating Storage Class With Fstype") + storageClassSpec := getVSphereStorageClassSpec("fstype", scParameters) + storageclass, err := client.StorageV1beta1().StorageClasses().Create(storageClassSpec) + Expect(err).NotTo(HaveOccurred()) + + By("Creating PVC using the Storage Class") + pvclaimSpec := getVSphereClaimSpecWithStorageClassAnnotation(namespace, storageclass) + pvclaim, err := client.CoreV1().PersistentVolumeClaims(namespace).Create(pvclaimSpec) + Expect(err).NotTo(HaveOccurred()) + + By("Waiting for claim to be in bound phase") + err = framework.WaitForPersistentVolumeClaimPhase(v1.ClaimBound, client, pvclaim.Namespace, pvclaim.Name, framework.Poll, framework.ClaimProvisionTimeout) + Expect(err).NotTo(HaveOccurred()) + + // Get new copy of the claim + pvclaim, err = client.CoreV1().PersistentVolumeClaims(pvclaim.Namespace).Get(pvclaim.Name, metav1.GetOptions{}) + Expect(err).NotTo(HaveOccurred()) + + // Get the bound PV + pv, err := client.CoreV1().PersistentVolumes().Get(pvclaim.Spec.VolumeName, metav1.GetOptions{}) + Expect(err).NotTo(HaveOccurred()) + + By("Creating pod to attach PV to the node") + // Create pod to attach Volume to Node + podSpec := getVSpherePodSpecWithClaim(pvclaim.Name, nil, "/bin/df -T /mnt/test | /bin/awk 'FNR == 2 {print $2}' > /mnt/test/fstype && while true ; do sleep 2 ; done") + pod, err := client.CoreV1().Pods(namespace).Create(podSpec) + Expect(err).NotTo(HaveOccurred()) + + By("Waiting for pod to be running") + Expect(framework.WaitForPodNameRunningInNamespace(client, pod.Name, namespace)).To(Succeed()) + + pod, err = client.CoreV1().Pods(namespace).Get(pod.Name, metav1.GetOptions{}) + Expect(err).NotTo(HaveOccurred()) + + // Asserts: Right disk is attached to the pod + vsp, err := vsphere.GetVSphere() + Expect(err).NotTo(HaveOccurred()) + isAttached, err := verifyVSphereDiskAttached(vsp, pv.Spec.VsphereVolume.VolumePath, k8stype.NodeName(pod.Spec.NodeName)) + Expect(err).NotTo(HaveOccurred()) + Expect(isAttached).To(BeTrue(), "disk is not attached with the node") + + _, err = framework.LookForStringInPodExec(namespace, pod.Name, []string{"/bin/cat", "/mnt/test/fstype"}, expectedContent, time.Minute) + Expect(err).NotTo(HaveOccurred()) + + var volumePaths []string + volumePaths = append(volumePaths, pv.Spec.VsphereVolume.VolumePath) + + By("Delete pod and wait for volume to be detached from node") + deletePodAndWaitForVolumeToDetach(f, client, pod, vsp, pod.Spec.NodeName, volumePaths) + + return storageclass, pvclaim +} diff --git a/test/e2e/storage/vsphere_volume_placement.go b/test/e2e/storage/vsphere_volume_placement.go index 7291e623e0a..079218523c4 100644 --- a/test/e2e/storage/vsphere_volume_placement.go +++ b/test/e2e/storage/vsphere_volume_placement.go @@ -281,7 +281,7 @@ var _ = framework.KubeDescribe("Volume Placement [Volume]", func() { framework.ExpectNoError(framework.DeletePodWithWait(f, c, podB), "defer: Failed to delete pod ", podB.Name) By(fmt.Sprintf("wait for volumes to be detached from the node: %v", node1Name)) for _, volumePath := range volumePaths { - waitForVSphereDiskToDetach(vsp, volumePath, types.NodeName(node1Name)) + framework.ExpectNoError(waitForVSphereDiskToDetach(vsp, volumePath, types.NodeName(node1Name))) } }() @@ -381,6 +381,6 @@ func deletePodAndWaitForVolumeToDetach(f *framework.Framework, c clientset.Inter By("Waiting for volume to be detached from the node") for _, volumePath := range volumePaths { - waitForVSphereDiskToDetach(vsp, volumePath, types.NodeName(nodeName)) + framework.ExpectNoError(waitForVSphereDiskToDetach(vsp, volumePath, types.NodeName(nodeName))) } }