diff --git a/test/e2e/storage/BUILD b/test/e2e/storage/BUILD index 7f173a8dda3..cae7333ec47 100644 --- a/test/e2e/storage/BUILD +++ b/test/e2e/storage/BUILD @@ -23,6 +23,7 @@ go_library( "volume_metrics.go", "volume_provisioning.go", "volumes.go", + "vsphere_statefulsets.go", "vsphere_utils.go", "vsphere_volume_cluster_ds.go", "vsphere_volume_datastore.go", diff --git a/test/e2e/storage/vsphere_statefulsets.go b/test/e2e/storage/vsphere_statefulsets.go new file mode 100644 index 00000000000..c2823c72d85 --- /dev/null +++ b/test/e2e/storage/vsphere_statefulsets.go @@ -0,0 +1,156 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package storage + +import ( + "fmt" + . "github.com/onsi/ginkgo" + . "github.com/onsi/gomega" + apierrs "k8s.io/apimachinery/pkg/api/errors" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/types" + clientset "k8s.io/client-go/kubernetes" + "k8s.io/kubernetes/pkg/cloudprovider/providers/vsphere" + "k8s.io/kubernetes/test/e2e/framework" +) + +/* + Test performs following operations + + Steps + 1. Create a storage class with thin diskformat. + 2. Create nginx service. + 3. Create nginx statefulsets with 3 replicas. + 4. Wait until all Pods are ready and PVCs are bounded with PV. + 5. Verify volumes are accessible in all statefulsets pods with creating empty file. + 6. Scale down statefulsets to 2 replicas. + 7. Scale up statefulsets to 4 replicas. + 8. Scale down statefulsets to 0 replicas and delete all pods. + 9. Delete all PVCs from the test namespace. + 10. Delete the storage class. +*/ + +const ( + manifestPath = "test/e2e/testing-manifests/statefulset/nginx" + mountPath = "/usr/share/nginx/html" + storageclassname = "nginx-sc" +) + +var _ = SIGDescribe("vsphere statefulset", func() { + f := framework.NewDefaultFramework("vsphere-statefulset") + var ( + namespace string + client clientset.Interface + ) + BeforeEach(func() { + framework.SkipUnlessProviderIs("vsphere") + namespace = f.Namespace.Name + client = f.ClientSet + }) + AfterEach(func() { + framework.Logf("Deleting all statefulset in namespace: %v", namespace) + framework.DeleteAllStatefulSets(client, namespace) + }) + + It("vsphere statefulset testing", func() { + By("Creating StorageClass for Statefulset") + scParameters := make(map[string]string) + scParameters["diskformat"] = "thin" + scSpec := getVSphereStorageClassSpec(storageclassname, scParameters) + sc, err := client.StorageV1().StorageClasses().Create(scSpec) + Expect(err).NotTo(HaveOccurred()) + defer client.StorageV1().StorageClasses().Delete(sc.Name, nil) + + By("Creating statefulset") + statefulsetTester := framework.NewStatefulSetTester(client) + statefulset := statefulsetTester.CreateStatefulSet(manifestPath, namespace) + replicas := *(statefulset.Spec.Replicas) + // Waiting for pods status to be Ready + statefulsetTester.WaitForStatusReadyReplicas(statefulset, replicas) + Expect(statefulsetTester.CheckMount(statefulset, mountPath)).NotTo(HaveOccurred()) + ssPodsBeforeScaleDown := statefulsetTester.GetPodList(statefulset) + Expect(ssPodsBeforeScaleDown.Items).NotTo(BeEmpty(), fmt.Sprintf("Unable to get list of Pods from the Statefulset: %v", statefulset.Name)) + Expect(len(ssPodsBeforeScaleDown.Items) == int(replicas)).To(BeTrue(), "Number of Pods in the statefulset should match with number of replicas") + + // Get the list of Volumes attached to Pods before scale down + volumesBeforeScaleDown := make(map[string]string) + for _, sspod := range ssPodsBeforeScaleDown.Items { + _, err := client.CoreV1().Pods(namespace).Get(sspod.Name, metav1.GetOptions{}) + Expect(err).NotTo(HaveOccurred()) + for _, volumespec := range sspod.Spec.Volumes { + if volumespec.PersistentVolumeClaim != nil { + volumePath := getvSphereVolumePathFromClaim(client, statefulset.Namespace, volumespec.PersistentVolumeClaim.ClaimName) + volumesBeforeScaleDown[volumePath] = volumespec.PersistentVolumeClaim.ClaimName + } + } + } + + By(fmt.Sprintf("Scaling down statefulsets to number of Replica: %v", replicas-1)) + _, scaledownErr := statefulsetTester.Scale(statefulset, replicas-1) + Expect(scaledownErr).NotTo(HaveOccurred()) + statefulsetTester.WaitForStatusReadyReplicas(statefulset, replicas-1) + + vsp, err := vsphere.GetVSphere() + Expect(err).NotTo(HaveOccurred()) + + // After scale down, verify vsphere volumes are detached from deleted pods + By("Verify Volumes are detached from Nodes after Statefulsets is scaled down") + for _, sspod := range ssPodsBeforeScaleDown.Items { + _, err := client.CoreV1().Pods(namespace).Get(sspod.Name, metav1.GetOptions{}) + if err != nil { + Expect(apierrs.IsNotFound(err), BeTrue()) + for _, volumespec := range sspod.Spec.Volumes { + if volumespec.PersistentVolumeClaim != nil { + vSpherediskPath := getvSphereVolumePathFromClaim(client, statefulset.Namespace, volumespec.PersistentVolumeClaim.ClaimName) + framework.Logf("Waiting for Volume: %q to detach from Node: %q", vSpherediskPath, sspod.Spec.NodeName) + Expect(waitForVSphereDiskToDetach(vsp, vSpherediskPath, types.NodeName(sspod.Spec.NodeName))).NotTo(HaveOccurred()) + } + } + } + } + + By(fmt.Sprintf("Scaling up statefulsets to number of Replica: %v", replicas)) + _, scaleupErr := statefulsetTester.Scale(statefulset, replicas) + Expect(scaleupErr).NotTo(HaveOccurred()) + statefulsetTester.WaitForStatusReplicas(statefulset, replicas) + statefulsetTester.WaitForStatusReadyReplicas(statefulset, replicas) + + ssPodsAfterScaleUp := statefulsetTester.GetPodList(statefulset) + Expect(ssPodsAfterScaleUp.Items).NotTo(BeEmpty(), fmt.Sprintf("Unable to get list of Pods from the Statefulset: %v", statefulset.Name)) + Expect(len(ssPodsAfterScaleUp.Items) == int(replicas)).To(BeTrue(), "Number of Pods in the statefulset should match with number of replicas") + + // After scale up, verify all vsphere volumes are attached to node VMs. + By("Verify all volumes are attached to Nodes after Statefulsets is scaled up") + for _, sspod := range ssPodsAfterScaleUp.Items { + err := framework.WaitForPodsReady(client, statefulset.Namespace, sspod.Name, 0) + Expect(err).NotTo(HaveOccurred()) + pod, err := client.CoreV1().Pods(namespace).Get(sspod.Name, metav1.GetOptions{}) + Expect(err).NotTo(HaveOccurred()) + for _, volumespec := range pod.Spec.Volumes { + if volumespec.PersistentVolumeClaim != nil { + vSpherediskPath := getvSphereVolumePathFromClaim(client, statefulset.Namespace, volumespec.PersistentVolumeClaim.ClaimName) + framework.Logf("Verify Volume: %q is attached to the Node: %q", vSpherediskPath, sspod.Spec.NodeName) + // Verify scale up has re-attached the same volumes and not introduced new volume + Expect(volumesBeforeScaleDown[vSpherediskPath] == "").To(BeFalse()) + isVolumeAttached, verifyDiskAttachedError := verifyVSphereDiskAttached(vsp, vSpherediskPath, types.NodeName(sspod.Spec.NodeName)) + Expect(isVolumeAttached).To(BeTrue()) + Expect(verifyDiskAttachedError).NotTo(HaveOccurred()) + } + } + } + }) +}) diff --git a/test/e2e/storage/vsphere_utils.go b/test/e2e/storage/vsphere_utils.go index b615819f59d..e3b93d0b355 100644 --- a/test/e2e/storage/vsphere_utils.go +++ b/test/e2e/storage/vsphere_utils.go @@ -354,3 +354,12 @@ func verifyVSphereVolumesAccessible(pod *v1.Pod, persistentvolumes []*v1.Persist Expect(err).NotTo(HaveOccurred()) } } + +// Get vSphere Volume Path from PVC +func getvSphereVolumePathFromClaim(client clientset.Interface, namespace string, claimName string) string { + pvclaim, err := client.CoreV1().PersistentVolumeClaims(namespace).Get(claimName, metav1.GetOptions{}) + Expect(err).NotTo(HaveOccurred()) + pv, err := client.CoreV1().PersistentVolumes().Get(pvclaim.Spec.VolumeName, metav1.GetOptions{}) + Expect(err).NotTo(HaveOccurred()) + return pv.Spec.VsphereVolume.VolumePath +} diff --git a/test/e2e/testing-manifests/statefulset/nginx/service.yaml b/test/e2e/testing-manifests/statefulset/nginx/service.yaml new file mode 100644 index 00000000000..5499b375c34 --- /dev/null +++ b/test/e2e/testing-manifests/statefulset/nginx/service.yaml @@ -0,0 +1,13 @@ +apiVersion: v1 +kind: Service +metadata: + name: nginx + labels: + app: nginx +spec: + ports: + - port: 80 + name: web + clusterIP: None + selector: + app: nginx \ No newline at end of file diff --git a/test/e2e/testing-manifests/statefulset/nginx/statefulset.yaml b/test/e2e/testing-manifests/statefulset/nginx/statefulset.yaml new file mode 100644 index 00000000000..a483fd5dd3f --- /dev/null +++ b/test/e2e/testing-manifests/statefulset/nginx/statefulset.yaml @@ -0,0 +1,34 @@ +apiVersion: apps/v1beta2 +kind: StatefulSet +metadata: + name: web +spec: + serviceName: "nginx" + replicas: 3 + selector: + matchLabels: + app: nginx + template: + metadata: + labels: + app: nginx + spec: + containers: + - name: nginx + image: gcr.io/google_containers/nginx-slim:0.8 + ports: + - containerPort: 80 + name: web + volumeMounts: + - name: www + mountPath: /usr/share/nginx/html + volumeClaimTemplates: + - metadata: + name: www + annotations: + volume.beta.kubernetes.io/storage-class: nginx-sc + spec: + accessModes: [ "ReadWriteOnce" ] + resources: + requests: + storage: 1Gi \ No newline at end of file