diff --git a/test/e2e/storage/BUILD b/test/e2e/storage/BUILD index c85995f1f0e..7f173a8dda3 100644 --- a/test/e2e/storage/BUILD +++ b/test/e2e/storage/BUILD @@ -25,7 +25,9 @@ go_library( "volumes.go", "vsphere_utils.go", "vsphere_volume_cluster_ds.go", + "vsphere_volume_datastore.go", "vsphere_volume_diskformat.go", + "vsphere_volume_disksize.go", "vsphere_volume_fstype.go", "vsphere_volume_ops_storm.go", "vsphere_volume_placement.go", diff --git a/test/e2e/storage/vsphere_utils.go b/test/e2e/storage/vsphere_utils.go index 29c94743de6..a0b15ff3432 100644 --- a/test/e2e/storage/vsphere_utils.go +++ b/test/e2e/storage/vsphere_utils.go @@ -207,7 +207,7 @@ func getVSphereStorageClassSpec(name string, scParameters map[string]string) *st return sc } -func getVSphereClaimSpecWithStorageClassAnnotation(ns string, storageclass *storage.StorageClass) *v1.PersistentVolumeClaim { +func getVSphereClaimSpecWithStorageClassAnnotation(ns string, diskSize string, storageclass *storage.StorageClass) *v1.PersistentVolumeClaim { scAnnotation := make(map[string]string) scAnnotation[v1.BetaStorageClassAnnotation] = storageclass.Name @@ -223,7 +223,7 @@ func getVSphereClaimSpecWithStorageClassAnnotation(ns string, storageclass *stor }, Resources: v1.ResourceRequirements{ Requests: v1.ResourceList{ - v1.ResourceName(v1.ResourceStorage): resource.MustParse("2Gi"), + v1.ResourceName(v1.ResourceStorage): resource.MustParse(diskSize), }, }, }, diff --git a/test/e2e/storage/vsphere_volume_datastore.go b/test/e2e/storage/vsphere_volume_datastore.go new file mode 100644 index 00000000000..a80f22e9119 --- /dev/null +++ b/test/e2e/storage/vsphere_volume_datastore.go @@ -0,0 +1,95 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package storage + +import ( + "fmt" + "strings" + "time" + + . "github.com/onsi/ginkgo" + . "github.com/onsi/gomega" + "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + clientset "k8s.io/client-go/kubernetes" + "k8s.io/kubernetes/test/e2e/framework" +) + +const ( + InvalidDatastore = "invalidDatastore" + DatastoreSCName = "datastoresc" +) + +/* + Test to verify datastore specified in storage-class is being honored while volume creation. + + Steps + 1. Create StorageClass with invalid datastore. + 2. Create PVC which uses the StorageClass created in step 1. + 3. Expect the PVC to fail. + 4. Verify the error returned on PVC failure is the correct. +*/ + +var _ = SIGDescribe("Volume Provisioning on Datastore [Feature:vsphere]", func() { + f := framework.NewDefaultFramework("volume-datastore") + var ( + client clientset.Interface + namespace string + scParameters map[string]string + ) + BeforeEach(func() { + framework.SkipUnlessProviderIs("vsphere") + client = f.ClientSet + namespace = f.Namespace.Name + scParameters = make(map[string]string) + nodeList := framework.GetReadySchedulableNodesOrDie(f.ClientSet) + if !(len(nodeList.Items) > 0) { + framework.Failf("Unable to find ready and schedulable Node") + } + }) + + It("verify dynamically provisioned pv using storageclass fails on an invalid datastore", func() { + By("Invoking Test for invalid datastore") + scParameters[Datastore] = InvalidDatastore + scParameters[DiskFormat] = ThinDisk + err := invokeInvalidDatastoreTestNeg(client, namespace, scParameters) + Expect(err).To(HaveOccurred()) + errorMsg := `Failed to provision volume with StorageClass \"` + DatastoreSCName + `\": datastore '` + InvalidDatastore + `' not found` + if !strings.Contains(err.Error(), errorMsg) { + Expect(err).NotTo(HaveOccurred(), errorMsg) + } + }) +}) + +func invokeInvalidDatastoreTestNeg(client clientset.Interface, namespace string, scParameters map[string]string) error { + By("Creating Storage Class With Invalid Datastore") + storageclass, err := client.StorageV1().StorageClasses().Create(getVSphereStorageClassSpec(DatastoreSCName, scParameters)) + Expect(err).NotTo(HaveOccurred(), fmt.Sprintf("Failed to create storage class with err: %v", err)) + defer client.StorageV1().StorageClasses().Delete(storageclass.Name, nil) + + By("Creating PVC using the Storage Class") + pvclaim, err := framework.CreatePVC(client, namespace, getVSphereClaimSpecWithStorageClassAnnotation(namespace, "2Gi", storageclass)) + Expect(err).NotTo(HaveOccurred()) + defer framework.DeletePersistentVolumeClaim(client, pvclaim.Name, namespace) + + By("Expect claim to fail provisioning volume") + err = framework.WaitForPersistentVolumeClaimPhase(v1.ClaimBound, client, pvclaim.Namespace, pvclaim.Name, framework.Poll, 2*time.Minute) + Expect(err).To(HaveOccurred()) + + eventList, err := client.CoreV1().Events(pvclaim.Namespace).List(metav1.ListOptions{}) + return fmt.Errorf("Failure message: %+q", eventList.Items[0].Message) +} diff --git a/test/e2e/storage/vsphere_volume_diskformat.go b/test/e2e/storage/vsphere_volume_diskformat.go index 61090d03563..d6ecf9e7633 100644 --- a/test/e2e/storage/vsphere_volume_diskformat.go +++ b/test/e2e/storage/vsphere_volume_diskformat.go @@ -52,7 +52,7 @@ import ( 11. Delete PVC, PV and Storage Class */ -var _ = SIGDescribe("Volume Disk Format", func() { +var _ = SIGDescribe("Volume Disk Format [Feature:vsphere]", func() { f := framework.NewDefaultFramework("volume-disk-format") var ( client clientset.Interface @@ -114,7 +114,7 @@ func invokeTest(f *framework.Framework, client clientset.Interface, namespace st defer client.StorageV1().StorageClasses().Delete(storageclass.Name, nil) By("Creating PVC using the Storage Class") - pvclaimSpec := getVSphereClaimSpecWithStorageClassAnnotation(namespace, storageclass) + pvclaimSpec := getVSphereClaimSpecWithStorageClassAnnotation(namespace, "2Gi", storageclass) pvclaim, err := client.CoreV1().PersistentVolumeClaims(namespace).Create(pvclaimSpec) Expect(err).NotTo(HaveOccurred()) diff --git a/test/e2e/storage/vsphere_volume_disksize.go b/test/e2e/storage/vsphere_volume_disksize.go new file mode 100644 index 00000000000..f69983accdc --- /dev/null +++ b/test/e2e/storage/vsphere_volume_disksize.go @@ -0,0 +1,99 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package storage + +import ( + "fmt" + "os" + "strings" + "time" + + . "github.com/onsi/ginkgo" + . "github.com/onsi/gomega" + "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + clientset "k8s.io/client-go/kubernetes" + "k8s.io/kubernetes/test/e2e/framework" +) + +const ( + DiskSizeSCName = "disksizesc" +) + +/* + Test to verify disk size specified in PVC is being honored while volume creation. + + Steps + 1. Create StorageClass. + 2. Create PVC with invalid disk size which uses the StorageClass created in step 1. + 3. Expect the PVC to fail. + 4. Verify the error returned on PVC failure is the correct. +*/ + +var _ = SIGDescribe("Volume Disk Size [Feature:vsphere]", func() { + f := framework.NewDefaultFramework("volume-disksize") + var ( + client clientset.Interface + namespace string + scParameters map[string]string + datastore string + ) + BeforeEach(func() { + framework.SkipUnlessProviderIs("vsphere") + client = f.ClientSet + namespace = f.Namespace.Name + scParameters = make(map[string]string) + datastore = os.Getenv("VSPHERE_DATASTORE") + Expect(datastore).NotTo(BeEmpty()) + nodeList := framework.GetReadySchedulableNodesOrDie(f.ClientSet) + if !(len(nodeList.Items) > 0) { + framework.Failf("Unable to find ready and schedulable Node") + } + }) + + It("verify dynamically provisioned pv using storageclass with an invalid disk size fails", func() { + By("Invoking Test for invalid disk size") + scParameters[Datastore] = datastore + scParameters[DiskFormat] = ThinDisk + diskSize := "1" + err := invokeInvalidDiskSizeTestNeg(client, namespace, scParameters, diskSize) + Expect(err).To(HaveOccurred()) + errorMsg := `Failed to provision volume with StorageClass \"` + DiskSizeSCName + `\": A specified parameter was not correct` + if !strings.Contains(err.Error(), errorMsg) { + Expect(err).NotTo(HaveOccurred(), errorMsg) + } + }) +}) + +func invokeInvalidDiskSizeTestNeg(client clientset.Interface, namespace string, scParameters map[string]string, diskSize string) error { + By("Creating Storage Class With invalid disk size") + storageclass, err := client.StorageV1().StorageClasses().Create(getVSphereStorageClassSpec(DiskSizeSCName, scParameters)) + Expect(err).NotTo(HaveOccurred(), fmt.Sprintf("Failed to create storage class with err: %v", err)) + defer client.StorageV1().StorageClasses().Delete(storageclass.Name, nil) + + By("Creating PVC using the Storage Class") + pvclaim, err := framework.CreatePVC(client, namespace, getVSphereClaimSpecWithStorageClassAnnotation(namespace, diskSize, storageclass)) + Expect(err).NotTo(HaveOccurred()) + defer framework.DeletePersistentVolumeClaim(client, pvclaim.Name, namespace) + + By("Expect claim to fail provisioning volume") + err = framework.WaitForPersistentVolumeClaimPhase(v1.ClaimBound, client, pvclaim.Namespace, pvclaim.Name, framework.Poll, 2*time.Minute) + Expect(err).To(HaveOccurred()) + + eventList, err := client.CoreV1().Events(pvclaim.Namespace).List(metav1.ListOptions{}) + return fmt.Errorf("Failure message: %+q", eventList.Items[0].Message) +} diff --git a/test/e2e/storage/vsphere_volume_fstype.go b/test/e2e/storage/vsphere_volume_fstype.go index aa1d33c4e6a..3e61338e1dc 100644 --- a/test/e2e/storage/vsphere_volume_fstype.go +++ b/test/e2e/storage/vsphere_volume_fstype.go @@ -17,17 +17,26 @@ limitations under the License. package storage import ( + "strings" "time" . "github.com/onsi/ginkgo" . "github.com/onsi/gomega" "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" k8stype "k8s.io/apimachinery/pkg/types" clientset "k8s.io/client-go/kubernetes" - "k8s.io/kubernetes/pkg/cloudprovider/providers/vsphere" + vsphere "k8s.io/kubernetes/pkg/cloudprovider/providers/vsphere" "k8s.io/kubernetes/test/e2e/framework" ) +const ( + Ext4FSType = "ext4" + Ext3FSType = "ext3" + InvalidFSType = "ext10" + ExecCommand = "/bin/df -T /mnt/volume1 | /bin/awk 'FNR == 2 {print $2}' > /mnt/volume1/fstype && while true ; do sleep 2 ; done" +) + /* Test to verify fstype specified in storage-class is being honored after volume creation. @@ -41,9 +50,20 @@ import ( 7. Execute command in the pod to get fstype. 8. Delete pod and Wait for Volume Disk to be detached from the Node. 9. Delete PVC, PV and Storage Class. + + Test to verify if an invalid fstype specified in storage class fails pod creation. + + Steps + 1. Create StorageClass with inavlid. + 2. Create PVC which uses the StorageClass created in step 1. + 3. Wait for PV to be provisioned. + 4. Wait for PVC's status to become Bound. + 5. Create pod using PVC. + 6. Verify if the pod creation fails. + 7. Verify if the MountVolume.MountDevice fails because it is unable to find the file system executable file on the node. */ -var _ = SIGDescribe("vsphere Volume fstype", func() { +var _ = SIGDescribe("Volume FStype [Feature:vsphere]", func() { f := framework.NewDefaultFramework("volume-fstype") var ( client clientset.Interface @@ -59,12 +79,17 @@ var _ = SIGDescribe("vsphere Volume fstype", func() { It("verify fstype - ext3 formatted volume", func() { By("Invoking Test for fstype: ext3") - invokeTestForFstype(f, client, namespace, "ext3", "ext3") + invokeTestForFstype(f, client, namespace, Ext3FSType, Ext3FSType) }) - It("verify disk format type - default value should be ext4", func() { - By("Invoking Test for fstype: Default Value") - invokeTestForFstype(f, client, namespace, "", "ext4") + It("verify fstype - default value should be ext4", func() { + By("Invoking Test for fstype: Default Value - ext4") + invokeTestForFstype(f, client, namespace, "", Ext4FSType) + }) + + It("verify invalid fstype", func() { + By("Invoking Test for fstype: invalid Value") + invokeTestForInvalidFstype(f, client, namespace, InvalidFSType) }) }) @@ -72,40 +97,96 @@ func invokeTestForFstype(f *framework.Framework, client clientset.Interface, nam framework.Logf("Invoking Test for fstype: %s", fstype) scParameters := make(map[string]string) scParameters["fstype"] = fstype + vsp, err := vsphere.GetVSphere() + Expect(err).NotTo(HaveOccurred()) + // Create Persistent Volume By("Creating Storage Class With Fstype") + pvclaim, persistentvolumes := createVolume(client, namespace, scParameters) + + // Create Pod and verify the persistent volume is accessible + pod := createPodAndVerifyVolumeAccessible(client, namespace, pvclaim, persistentvolumes, vsp) + _, err = framework.LookForStringInPodExec(namespace, pod.Name, []string{"/bin/cat", "/mnt/volume1/fstype"}, expectedContent, time.Minute) + Expect(err).NotTo(HaveOccurred()) + + // Detach and delete volume + detachVolume(f, client, vsp, pod, persistentvolumes[0].Spec.VsphereVolume.VolumePath) + deleteVolume(client, pvclaim.Name, namespace) +} + +func invokeTestForInvalidFstype(f *framework.Framework, client clientset.Interface, namespace string, fstype string) { + scParameters := make(map[string]string) + scParameters["fstype"] = fstype + vsp, err := vsphere.GetVSphere() + Expect(err).NotTo(HaveOccurred()) + + // Create Persistent Volume + By("Creating Storage Class With Invalid Fstype") + pvclaim, persistentvolumes := createVolume(client, namespace, scParameters) + + By("Creating pod to attach PV to the node") + var pvclaims []*v1.PersistentVolumeClaim + pvclaims = append(pvclaims, pvclaim) + // Create pod to attach Volume to Node + pod, err := framework.CreatePod(client, namespace, pvclaims, false, ExecCommand) + Expect(err).To(HaveOccurred()) + + eventList, err := client.CoreV1().Events(namespace).List(metav1.ListOptions{}) + + // Detach and delete volume + detachVolume(f, client, vsp, pod, persistentvolumes[0].Spec.VsphereVolume.VolumePath) + deleteVolume(client, pvclaim.Name, namespace) + + Expect(eventList.Items).NotTo(BeEmpty()) + errorMsg := `MountVolume.MountDevice failed for volume "` + persistentvolumes[0].Name + `" : executable file not found` + isFound := false + for _, item := range eventList.Items { + if strings.Contains(item.Message, errorMsg) { + isFound = true + } + } + Expect(isFound).To(BeTrue(), "Unable to verify MountVolume.MountDevice failure") +} + +func createVolume(client clientset.Interface, namespace string, scParameters map[string]string) (*v1.PersistentVolumeClaim, []*v1.PersistentVolume) { storageclass, err := client.StorageV1().StorageClasses().Create(getVSphereStorageClassSpec("fstype", scParameters)) Expect(err).NotTo(HaveOccurred()) defer client.StorageV1().StorageClasses().Delete(storageclass.Name, nil) By("Creating PVC using the Storage Class") - pvclaim, err := client.CoreV1().PersistentVolumeClaims(namespace).Create(getVSphereClaimSpecWithStorageClassAnnotation(namespace, storageclass)) + pvclaim, err := client.CoreV1().PersistentVolumeClaims(namespace).Create(getVSphereClaimSpecWithStorageClassAnnotation(namespace, "2Gi", storageclass)) Expect(err).NotTo(HaveOccurred()) - defer framework.DeletePersistentVolumeClaim(client, pvclaim.Name, namespace) var pvclaims []*v1.PersistentVolumeClaim pvclaims = append(pvclaims, pvclaim) By("Waiting for claim to be in bound phase") persistentvolumes, err := framework.WaitForPVClaimBoundPhase(client, pvclaims, framework.ClaimProvisionTimeout) Expect(err).NotTo(HaveOccurred()) + return pvclaim, persistentvolumes +} +func createPodAndVerifyVolumeAccessible(client clientset.Interface, namespace string, pvclaim *v1.PersistentVolumeClaim, persistentvolumes []*v1.PersistentVolume, vsp *vsphere.VSphere) *v1.Pod { + var pvclaims []*v1.PersistentVolumeClaim + pvclaims = append(pvclaims, pvclaim) By("Creating pod to attach PV to the node") // Create pod to attach Volume to Node - pod, err := framework.CreatePod(client, namespace, pvclaims, false, "") + pod, err := framework.CreatePod(client, namespace, pvclaims, false, ExecCommand) Expect(err).NotTo(HaveOccurred()) // Asserts: Right disk is attached to the pod - vsp, err := vsphere.GetVSphere() - Expect(err).NotTo(HaveOccurred()) By("Verify the volume is accessible and available in the pod") verifyVSphereVolumesAccessible(pod, persistentvolumes, vsp) + return pod +} - _, err = framework.LookForStringInPodExec(namespace, pod.Name, []string{"/bin/cat", "/mnt/test/fstype"}, expectedContent, time.Minute) - Expect(err).NotTo(HaveOccurred()) - +func detachVolume(f *framework.Framework, client clientset.Interface, vsp *vsphere.VSphere, pod *v1.Pod, volPath string) { By("Deleting pod") framework.DeletePodWithWait(f, client, pod) By("Waiting for volumes to be detached from the node") - waitForVSphereDiskToDetach(vsp, persistentvolumes[0].Spec.VsphereVolume.VolumePath, k8stype.NodeName(pod.Spec.NodeName)) + waitForVSphereDiskToDetach(vsp, volPath, k8stype.NodeName(pod.Spec.NodeName)) +} + +func deleteVolume(client clientset.Interface, pvclaimName string, namespace string) { + framework.DeletePersistentVolumeClaim(client, pvclaimName, namespace) } diff --git a/test/e2e/storage/vsphere_volume_ops_storm.go b/test/e2e/storage/vsphere_volume_ops_storm.go index b06faec693c..10177bac120 100644 --- a/test/e2e/storage/vsphere_volume_ops_storm.go +++ b/test/e2e/storage/vsphere_volume_ops_storm.go @@ -47,7 +47,7 @@ import ( 10. Delete storage class. */ -var _ = SIGDescribe("vsphere volume operations storm", func() { +var _ = SIGDescribe("Volume Operations Storm [Feature:vsphere]", func() { f := framework.NewDefaultFramework("volume-ops-storm") const DEFAULT_VOLUME_OPS_SCALE = 30 var ( @@ -99,7 +99,7 @@ var _ = SIGDescribe("vsphere volume operations storm", func() { By("Creating PVCs using the Storage Class") count := 0 for count < volume_ops_scale { - pvclaims[count], err = framework.CreatePVC(client, namespace, getVSphereClaimSpecWithStorageClassAnnotation(namespace, storageclass)) + pvclaims[count], err = framework.CreatePVC(client, namespace, getVSphereClaimSpecWithStorageClassAnnotation(namespace, "2Gi", storageclass)) Expect(err).NotTo(HaveOccurred()) count++ } diff --git a/test/e2e/storage/vsphere_volume_vsan_policy.go b/test/e2e/storage/vsphere_volume_vsan_policy.go index 43ffd2cb8f7..8ad986d7f6b 100644 --- a/test/e2e/storage/vsphere_volume_vsan_policy.go +++ b/test/e2e/storage/vsphere_volume_vsan_policy.go @@ -90,7 +90,7 @@ const ( */ -var _ = SIGDescribe("vSphere Storage policy support for dynamic provisioning", func() { +var _ = SIGDescribe("Storage Policy Based Volume Provisioning [Feature:vsphere]", func() { f := framework.NewDefaultFramework("volume-vsan-policy") var ( client clientset.Interface @@ -280,7 +280,7 @@ func invokeValidPolicyTest(f *framework.Framework, client clientset.Interface, n defer client.StorageV1().StorageClasses().Delete(storageclass.Name, nil) By("Creating PVC using the Storage Class") - pvclaim, err := framework.CreatePVC(client, namespace, getVSphereClaimSpecWithStorageClassAnnotation(namespace, storageclass)) + pvclaim, err := framework.CreatePVC(client, namespace, getVSphereClaimSpecWithStorageClassAnnotation(namespace, "2Gi", storageclass)) Expect(err).NotTo(HaveOccurred()) defer framework.DeletePersistentVolumeClaim(client, pvclaim.Name, namespace) @@ -314,7 +314,7 @@ func invokeInvalidPolicyTestNeg(client clientset.Interface, namespace string, sc defer client.StorageV1().StorageClasses().Delete(storageclass.Name, nil) By("Creating PVC using the Storage Class") - pvclaim, err := framework.CreatePVC(client, namespace, getVSphereClaimSpecWithStorageClassAnnotation(namespace, storageclass)) + pvclaim, err := framework.CreatePVC(client, namespace, getVSphereClaimSpecWithStorageClassAnnotation(namespace, "2Gi", storageclass)) Expect(err).NotTo(HaveOccurred()) defer framework.DeletePersistentVolumeClaim(client, pvclaim.Name, namespace) @@ -333,7 +333,7 @@ func invokeStaleDummyVMTestWithStoragePolicy(client clientset.Interface, namespa defer client.StorageV1().StorageClasses().Delete(storageclass.Name, nil) By("Creating PVC using the Storage Class") - pvclaim, err := framework.CreatePVC(client, namespace, getVSphereClaimSpecWithStorageClassAnnotation(namespace, storageclass)) + pvclaim, err := framework.CreatePVC(client, namespace, getVSphereClaimSpecWithStorageClassAnnotation(namespace, "2Gi", storageclass)) Expect(err).NotTo(HaveOccurred()) var pvclaims []*v1.PersistentVolumeClaim