mirror of
https://github.com/k3s-io/kubernetes.git
synced 2025-07-19 18:02:01 +00:00
Merge pull request #53218 from vmware/e2e-vsphere-statefulsets
Automatic merge from submit-queue. If you want to cherry-pick this change to another branch, please follow the instructions <a href="https://github.com/kubernetes/community/blob/master/contributors/devel/cherry-picks.md">here</a>. Adding e2e test for statefulsets for vsphere cloud provider **What this PR does / why we need it**: This PR adds a new e2e test for statefulsets for vSphere cloud Provider. Test does following tasks. - Create a storage class with thin diskformat. - Create nginx service. - Create nginx statefulsets with 3 replicas. - Wait until all Pods are ready and PVCs are bounded with PV. - Verify volumes are accessible in all statefulsets pods with creating empty file. - Scale down statefulsets to 2 replicas. - Scale up statefulsets to 3 replicas. - Scale down statefulsets to 0 replicas and delete all pods. - Delete all PVCs from the test namespace. - Delete the storage class. **Which issue this PR fixes** fixes # https://github.com/vmware/kubernetes/issues/275 **Special notes for your reviewer**: Test Logs ``` root@k8s-dev-vm-02:~/divyenp/kubernetes# go run hack/e2e.go --check-version-skew=false --v --test --test_args='--ginkgo.focus=vsphere\sstatefulset\stesting' flag provided but not defined: -check-version-skew Usage of /tmp/go-build247641121/command-line-arguments/_obj/exe/e2e: -get go get -u kubetest if old or not installed (default true) -old duration Consider kubetest old if it exceeds this (default 24h0m0s) 2017/10/18 19:24:33 e2e.go:55: NOTICE: go run hack/e2e.go is now a shim for test-infra/kubetest 2017/10/18 19:24:33 e2e.go:56: Usage: go run hack/e2e.go [--get=true] [--old=24h0m0s] -- [KUBETEST_ARGS] 2017/10/18 19:24:33 e2e.go:57: The separator is required to use --get or --old flags 2017/10/18 19:24:33 e2e.go:58: The -- flag separator also suppresses this message 2017/10/18 19:24:33 e2e.go:77: Calling kubetest --check-version-skew=false --v --test --test_args=--ginkgo.focus=vsphere\sstatefulset\stesting... 2017/10/18 19:24:33 util.go:154: Running: ./cluster/kubectl.sh --match-server-version=false version 2017/10/18 19:24:34 util.go:156: Step './cluster/kubectl.sh --match-server-version=false version' finished in 290.682219ms 2017/10/18 19:24:34 util.go:154: Running: ./hack/e2e-internal/e2e-status.sh Skeleton Provider: prepare-e2e not implemented Client Version: version.Info{Major:"1", Minor:"9+", GitVersion:"v1.9.0-alpha.1.1217+8b041da0f996c1-dirty", GitCommit:"8b041da0f996c185438a7ed8282f92734a2ed0e7", GitTreeState:"dirty", BuildDate:"2017-10-19T00:46:00Z", GoVersion:"go1.8.3", Compiler:"gc", Platform:"linux/amd64"} Server Version: version.Info{Major:"1", Minor:"9+", GitVersion:"v1.9.0-alpha.1.1293+d462bac7805f53", GitCommit:"d462bac7805f536a43c7d5fb98aca138ba1237eb", GitTreeState:"clean", BuildDate:"2017-10-18T07:07:08Z", GoVersion:"go1.8.3", Compiler:"gc", Platform:"linux/amd64"} 2017/10/18 19:24:34 util.go:156: Step './hack/e2e-internal/e2e-status.sh' finished in 305.965323ms 2017/10/18 19:24:34 util.go:154: Running: ./hack/ginkgo-e2e.sh --ginkgo.focus=vsphere\sstatefulset\stesting Conformance test: not doing test setup. Oct 18 19:24:35.808: INFO: Overriding default scale value of zero to 1 Oct 18 19:24:35.808: INFO: Overriding default milliseconds value of zero to 5000 I1018 19:24:36.073718 7768 e2e.go:383] Starting e2e run "a63561de-b474-11e7-8f6b-0050569c26b8" on Ginkgo node 1 Running Suite: Kubernetes e2e suite =================================== Random Seed: 1508379875 - Will randomize all specs Will run 1 of 713 specs Oct 18 19:24:36.132: INFO: >>> kubeConfig: /root/.kube/config Oct 18 19:24:36.139: INFO: Waiting up to 4h0m0s for all (but 0) nodes to be schedulable Oct 18 19:24:36.177: INFO: Waiting up to 10m0s for all pods (need at least 0) in namespace 'kube-system' to be running and ready Oct 18 19:24:36.321: INFO: 13 / 13 pods in namespace 'kube-system' are running and ready (0 seconds elapsed) Oct 18 19:24:36.321: INFO: expected 4 pod replicas in namespace 'kube-system', 4 are Running and Ready. Oct 18 19:24:36.326: INFO: Waiting for pods to enter Success, but no pods in "kube-system" match label map[name:e2e-image-puller] Oct 18 19:24:36.326: INFO: Dumping network health container logs from all nodes... Oct 18 19:24:36.338: INFO: Client version: v1.9.0-alpha.1.1217+8b041da0f996c1-dirty Oct 18 19:24:36.340: INFO: Server version: v1.9.0-alpha.1.1293+d462bac7805f53 SSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSS ------------------------------ [sig-storage] vsphere statefulset vsphere statefulset testing /root/divyenp/kubernetes/_output/local/go/src/k8s.io/kubernetes/test/e2e/storage/vsphere_statefulsets.go:155 [BeforeEach] [sig-storage] vsphere statefulset /root/divyenp/kubernetes/_output/local/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:133 STEP: Creating a kubernetes client Oct 18 19:24:36.349: INFO: >>> kubeConfig: /root/.kube/config STEP: Building a namespace api object STEP: Waiting for a default service account to be provisioned in namespace [BeforeEach] [sig-storage] vsphere statefulset /root/divyenp/kubernetes/_output/local/go/src/k8s.io/kubernetes/test/e2e/storage/vsphere_statefulsets.go:63 [It] vsphere statefulset testing /root/divyenp/kubernetes/_output/local/go/src/k8s.io/kubernetes/test/e2e/storage/vsphere_statefulsets.go:155 STEP: Creating StorageClass for Statefulset STEP: Creating statefulset Oct 18 19:24:36.489: INFO: Parsing statefulset from test/e2e/testing-manifests/statefulset/nginx/statefulset.yaml Oct 18 19:24:36.503: INFO: Parsing service from test/e2e/testing-manifests/statefulset/nginx/service.yaml Oct 18 19:24:36.514: INFO: creating web service Oct 18 19:24:36.527: INFO: creating statefulset e2e-tests-vsphere-statefulset-gnfmp/web with 3 replicas and selector &LabelSelector{MatchLabels:map[string]string{app: nginx,},MatchExpressions:[],} Oct 18 19:24:36.561: INFO: Found 0 stateful pods, waiting for 3 Oct 18 19:24:46.567: INFO: Found 1 stateful pods, waiting for 3 Oct 18 19:24:56.568: INFO: Found 1 stateful pods, waiting for 3 Oct 18 19:25:06.568: INFO: Found 1 stateful pods, waiting for 3 Oct 18 19:25:16.566: INFO: Found 1 stateful pods, waiting for 3 Oct 18 19:25:26.567: INFO: Found 1 stateful pods, waiting for 3 Oct 18 19:25:36.567: INFO: Found 1 stateful pods, waiting for 3 Oct 18 19:25:46.567: INFO: Found 2 stateful pods, waiting for 3 Oct 18 19:25:56.568: INFO: Found 2 stateful pods, waiting for 3 Oct 18 19:26:06.567: INFO: Found 2 stateful pods, waiting for 3 Oct 18 19:26:16.568: INFO: Found 2 stateful pods, waiting for 3 Oct 18 19:26:26.567: INFO: Found 2 stateful pods, waiting for 3 Oct 18 19:26:36.568: INFO: Found 2 stateful pods, waiting for 3 Oct 18 19:26:46.568: INFO: Found 2 stateful pods, waiting for 3 Oct 18 19:26:56.571: INFO: Found 2 stateful pods, waiting for 3 Oct 18 19:27:06.568: INFO: Found 2 stateful pods, waiting for 3 Oct 18 19:27:16.569: INFO: Found 2 stateful pods, waiting for 3 Oct 18 19:27:26.568: INFO: Found 2 stateful pods, waiting for 3 Oct 18 19:27:36.569: INFO: Found 2 stateful pods, waiting for 3 Oct 18 19:27:46.569: INFO: Found 2 stateful pods, waiting for 3 Oct 18 19:27:56.567: INFO: Found 2 stateful pods, waiting for 3 Oct 18 19:28:06.567: INFO: Found 2 stateful pods, waiting for 3 Oct 18 19:28:16.567: INFO: Found 2 stateful pods, waiting for 3 Oct 18 19:28:26.567: INFO: Found 2 stateful pods, waiting for 3 Oct 18 19:28:36.574: INFO: Found 2 stateful pods, waiting for 3 Oct 18 19:28:46.567: INFO: Found 2 stateful pods, waiting for 3 Oct 18 19:28:56.571: INFO: Found 2 stateful pods, waiting for 3 Oct 18 19:29:06.569: INFO: Found 2 stateful pods, waiting for 3 Oct 18 19:29:16.568: INFO: Found 2 stateful pods, waiting for 3 Oct 18 19:29:26.566: INFO: Found 2 stateful pods, waiting for 3 Oct 18 19:29:36.568: INFO: Found 2 stateful pods, waiting for 3 Oct 18 19:29:46.566: INFO: Found 2 stateful pods, waiting for 3 Oct 18 19:29:56.567: INFO: Found 2 stateful pods, waiting for 3 Oct 18 19:30:06.568: INFO: Waiting for pod web-0 to enter Running - Ready=true, currently Running - Ready=true Oct 18 19:30:06.568: INFO: Waiting for pod web-1 to enter Running - Ready=true, currently Running - Ready=true Oct 18 19:30:06.568: INFO: Waiting for pod web-2 to enter Running - Ready=true, currently Pending - Ready=false Oct 18 19:30:16.567: INFO: Waiting for pod web-0 to enter Running - Ready=true, currently Running - Ready=true Oct 18 19:30:16.567: INFO: Waiting for pod web-1 to enter Running - Ready=true, currently Running - Ready=true Oct 18 19:30:16.567: INFO: Waiting for pod web-2 to enter Running - Ready=true, currently Pending - Ready=false Oct 18 19:30:26.567: INFO: Waiting for pod web-0 to enter Running - Ready=true, currently Running - Ready=true Oct 18 19:30:26.567: INFO: Waiting for pod web-1 to enter Running - Ready=true, currently Running - Ready=true Oct 18 19:30:26.567: INFO: Waiting for pod web-2 to enter Running - Ready=true, currently Pending - Ready=false Oct 18 19:30:36.567: INFO: Waiting for pod web-0 to enter Running - Ready=true, currently Running - Ready=true Oct 18 19:30:36.567: INFO: Waiting for pod web-1 to enter Running - Ready=true, currently Running - Ready=true Oct 18 19:30:36.567: INFO: Waiting for pod web-2 to enter Running - Ready=true, currently Pending - Ready=false Oct 18 19:30:46.567: INFO: Waiting for pod web-0 to enter Running - Ready=true, currently Running - Ready=true Oct 18 19:30:46.567: INFO: Waiting for pod web-1 to enter Running - Ready=true, currently Running - Ready=true Oct 18 19:30:46.567: INFO: Waiting for pod web-2 to enter Running - Ready=true, currently Pending - Ready=false Oct 18 19:30:56.566: INFO: Waiting for pod web-0 to enter Running - Ready=true, currently Running - Ready=true Oct 18 19:30:56.567: INFO: Waiting for pod web-1 to enter Running - Ready=true, currently Running - Ready=true Oct 18 19:30:56.567: INFO: Waiting for pod web-2 to enter Running - Ready=true, currently Pending - Ready=false Oct 18 19:31:06.567: INFO: Waiting for pod web-0 to enter Running - Ready=true, currently Running - Ready=true Oct 18 19:31:06.568: INFO: Waiting for pod web-1 to enter Running - Ready=true, currently Running - Ready=true Oct 18 19:31:06.568: INFO: Waiting for pod web-2 to enter Running - Ready=true, currently Pending - Ready=false Oct 18 19:31:16.567: INFO: Waiting for pod web-0 to enter Running - Ready=true, currently Running - Ready=true Oct 18 19:31:16.567: INFO: Waiting for pod web-1 to enter Running - Ready=true, currently Running - Ready=true Oct 18 19:31:16.567: INFO: Waiting for pod web-2 to enter Running - Ready=true, currently Pending - Ready=false Oct 18 19:31:26.567: INFO: Waiting for pod web-0 to enter Running - Ready=true, currently Running - Ready=true Oct 18 19:31:26.568: INFO: Waiting for pod web-1 to enter Running - Ready=true, currently Running - Ready=true Oct 18 19:31:26.568: INFO: Waiting for pod web-2 to enter Running - Ready=true, currently Pending - Ready=false Oct 18 19:31:36.568: INFO: Waiting for pod web-0 to enter Running - Ready=true, currently Running - Ready=true Oct 18 19:31:36.568: INFO: Waiting for pod web-1 to enter Running - Ready=true, currently Running - Ready=true Oct 18 19:31:36.568: INFO: Waiting for pod web-2 to enter Running - Ready=true, currently Pending - Ready=false Oct 18 19:31:46.568: INFO: Waiting for pod web-0 to enter Running - Ready=true, currently Running - Ready=true Oct 18 19:31:46.568: INFO: Waiting for pod web-1 to enter Running - Ready=true, currently Running - Ready=true Oct 18 19:31:46.568: INFO: Waiting for pod web-2 to enter Running - Ready=true, currently Pending - Ready=false Oct 18 19:31:56.568: INFO: Waiting for pod web-0 to enter Running - Ready=true, currently Running - Ready=true Oct 18 19:31:56.568: INFO: Waiting for pod web-1 to enter Running - Ready=true, currently Running - Ready=true Oct 18 19:31:56.568: INFO: Waiting for pod web-2 to enter Running - Ready=true, currently Pending - Ready=false Oct 18 19:32:06.567: INFO: Waiting for pod web-0 to enter Running - Ready=true, currently Running - Ready=true Oct 18 19:32:06.567: INFO: Waiting for pod web-1 to enter Running - Ready=true, currently Running - Ready=true Oct 18 19:32:06.567: INFO: Waiting for pod web-2 to enter Running - Ready=true, currently Pending - Ready=false Oct 18 19:32:16.571: INFO: Waiting for pod web-0 to enter Running - Ready=true, currently Running - Ready=true Oct 18 19:32:16.571: INFO: Waiting for pod web-1 to enter Running - Ready=true, currently Running - Ready=true Oct 18 19:32:16.571: INFO: Waiting for pod web-2 to enter Running - Ready=true, currently Pending - Ready=false Oct 18 19:32:26.567: INFO: Waiting for pod web-0 to enter Running - Ready=true, currently Running - Ready=true Oct 18 19:32:26.567: INFO: Waiting for pod web-1 to enter Running - Ready=true, currently Running - Ready=true Oct 18 19:32:26.567: INFO: Waiting for pod web-2 to enter Running - Ready=true, currently Running - Ready=true Oct 18 19:32:26.567: INFO: Waiting for statefulset status.replicas updated to 3 Oct 18 19:32:26.605: INFO: Running '/root/divyenp/kubernetes/_output/bin/kubectl --server=https://10.192.38.85 --kubeconfig=/root/.kube/config exec --namespace=e2e-tests-vsphere-statefulset-gnfmp web-0 -- /bin/sh -c ls -idlh /usr/share/nginx/html' Oct 18 19:32:27.170: INFO: stderr: "" Oct 18 19:32:27.170: INFO: stdout of ls -idlh /usr/share/nginx/html on web-0: 2 drwxr-xr-x 3 root root 4.0K Oct 19 02:25 /usr/share/nginx/html Oct 18 19:32:27.171: INFO: Running '/root/divyenp/kubernetes/_output/bin/kubectl --server=https://10.192.38.85 --kubeconfig=/root/.kube/config exec --namespace=e2e-tests-vsphere-statefulset-gnfmp web-1 -- /bin/sh -c ls -idlh /usr/share/nginx/html' Oct 18 19:32:27.687: INFO: stderr: "" Oct 18 19:32:27.688: INFO: stdout of ls -idlh /usr/share/nginx/html on web-1: 2 drwxr-xr-x 3 root root 4.0K Oct 19 02:29 /usr/share/nginx/html Oct 18 19:32:27.688: INFO: Running '/root/divyenp/kubernetes/_output/bin/kubectl --server=https://10.192.38.85 --kubeconfig=/root/.kube/config exec --namespace=e2e-tests-vsphere-statefulset-gnfmp web-2 -- /bin/sh -c ls -idlh /usr/share/nginx/html' Oct 18 19:32:28.177: INFO: stderr: "" Oct 18 19:32:28.177: INFO: stdout of ls -idlh /usr/share/nginx/html on web-2: 2 drwxr-xr-x 3 root root 4.0K Oct 19 02:32 /usr/share/nginx/html Oct 18 19:32:28.183: INFO: Running '/root/divyenp/kubernetes/_output/bin/kubectl --server=https://10.192.38.85 --kubeconfig=/root/.kube/config exec --namespace=e2e-tests-vsphere-statefulset-gnfmp web-0 -- /bin/sh -c find /usr/share/nginx/html' Oct 18 19:32:28.690: INFO: stderr: "" Oct 18 19:32:28.690: INFO: stdout of find /usr/share/nginx/html on web-0: /usr/share/nginx/html /usr/share/nginx/html/lost+found Oct 18 19:32:28.690: INFO: Running '/root/divyenp/kubernetes/_output/bin/kubectl --server=https://10.192.38.85 --kubeconfig=/root/.kube/config exec --namespace=e2e-tests-vsphere-statefulset-gnfmp web-1 -- /bin/sh -c find /usr/share/nginx/html' Oct 18 19:32:29.166: INFO: stderr: "" Oct 18 19:32:29.166: INFO: stdout of find /usr/share/nginx/html on web-1: /usr/share/nginx/html /usr/share/nginx/html/lost+found Oct 18 19:32:29.166: INFO: Running '/root/divyenp/kubernetes/_output/bin/kubectl --server=https://10.192.38.85 --kubeconfig=/root/.kube/config exec --namespace=e2e-tests-vsphere-statefulset-gnfmp web-2 -- /bin/sh -c find /usr/share/nginx/html' Oct 18 19:32:29.696: INFO: stderr: "" Oct 18 19:32:29.696: INFO: stdout of find /usr/share/nginx/html on web-2: /usr/share/nginx/html /usr/share/nginx/html/lost+found Oct 18 19:32:29.707: INFO: Running '/root/divyenp/kubernetes/_output/bin/kubectl --server=https://10.192.38.85 --kubeconfig=/root/.kube/config exec --namespace=e2e-tests-vsphere-statefulset-gnfmp web-0 -- /bin/sh -c touch /usr/share/nginx/html/1508380346587629054' Oct 18 19:32:30.171: INFO: stderr: "" Oct 18 19:32:30.171: INFO: stdout of touch /usr/share/nginx/html/1508380346587629054 on web-0: Oct 18 19:32:30.171: INFO: Running '/root/divyenp/kubernetes/_output/bin/kubectl --server=https://10.192.38.85 --kubeconfig=/root/.kube/config exec --namespace=e2e-tests-vsphere-statefulset-gnfmp web-1 -- /bin/sh -c touch /usr/share/nginx/html/1508380346587629054' Oct 18 19:32:30.653: INFO: stderr: "" Oct 18 19:32:30.653: INFO: stdout of touch /usr/share/nginx/html/1508380346587629054 on web-1: Oct 18 19:32:30.654: INFO: Running '/root/divyenp/kubernetes/_output/bin/kubectl --server=https://10.192.38.85 --kubeconfig=/root/.kube/config exec --namespace=e2e-tests-vsphere-statefulset-gnfmp web-2 -- /bin/sh -c touch /usr/share/nginx/html/1508380346587629054' Oct 18 19:32:31.149: INFO: stderr: "" Oct 18 19:32:31.150: INFO: stdout of touch /usr/share/nginx/html/1508380346587629054 on web-2: STEP: Scaling down statefulsets to number of Replica: 2 Oct 18 19:32:31.263: INFO: Scaling statefulset web to 2 Oct 18 19:32:51.314: INFO: Waiting for statefulset status.replicas updated to 2 STEP: Verify Volumes are detached from Nodes after Statefulsets is scaled down Oct 18 19:32:51.524: INFO: Waiting for Volume: "[vsanDatastore] 1874c359-f300-a0cc-fd7e-02002a623c85/kubernetes-dynamic-pvc-67b7e88c-b475-11e7-a38c-0050569c555f.vmdk" to detach from Node: "kubernetes-node2" Oct 18 19:33:01.657: INFO: Volume "[vsanDatastore] 1874c359-f300-a0cc-fd7e-02002a623c85/kubernetes-dynamic-pvc-67b7e88c-b475-11e7-a38c-0050569c555f.vmdk" appears to have successfully detached from "kubernetes-node2". STEP: Scaling up statefulsets to number of Replica: 3 Oct 18 19:33:01.657: INFO: Scaling statefulset web to 3 Oct 18 19:33:11.731: INFO: Waiting for statefulset status.replicas updated to 3 Oct 18 19:33:11.747: INFO: Waiting for statefulset status.replicas updated to 3 STEP: Verify all volumes are attached to Nodes after Statefulsets is scaled up Oct 18 19:33:13.823: INFO: Verify Volume: "[vsanDatastore] 1874c359-f300-a0cc-fd7e-02002a623c85/kubernetes-dynamic-pvc-a6cf15ef-b474-11e7-a38c-0050569c555f.vmdk" is attached to the Node: "kubernetes-node4" Oct 18 19:33:15.990: INFO: Verify Volume: "[vsanDatastore] 1874c359-f300-a0cc-fd7e-02002a623c85/kubernetes-dynamic-pvc-cfb65f92-b474-11e7-a38c-0050569c555f.vmdk" is attached to the Node: "kubernetes-node3" Oct 18 19:33:18.154: INFO: Verify Volume: "[vsanDatastore] 1874c359-f300-a0cc-fd7e-02002a623c85/kubernetes-dynamic-pvc-67b7e88c-b475-11e7-a38c-0050569c555f.vmdk" is attached to the Node: "kubernetes-node2" [AfterEach] [sig-storage] vsphere statefulset /root/divyenp/kubernetes/_output/local/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:134 Oct 18 19:33:18.323: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready STEP: Destroying namespace "e2e-tests-vsphere-statefulset-gnfmp" for this suite. Oct 18 19:33:44.960: INFO: namespace: e2e-tests-vsphere-statefulset-gnfmp, resource: bindings, ignored listing per whitelist Oct 18 19:33:44.960: INFO: namespace e2e-tests-vsphere-statefulset-gnfmp deletion completed in 26.620223678s [AfterEach] [sig-storage] vsphere statefulset /root/divyenp/kubernetes/_output/local/go/src/k8s.io/kubernetes/test/e2e/storage/vsphere_statefulsets.go:67 Oct 18 19:33:44.960: INFO: Deleting all statefulset in namespace: e2e-tests-vsphere-statefulset-gnfmp • [SLOW TEST:548.654 seconds] [sig-storage] vsphere statefulset /root/divyenp/kubernetes/_output/local/go/src/k8s.io/kubernetes/test/e2e/storage/framework.go:22 vsphere statefulset testing /root/divyenp/kubernetes/_output/local/go/src/k8s.io/kubernetes/test/e2e/storage/vsphere_statefulsets.go:155 ------------------------------ SSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSOct 18 19:33:45.006: INFO: Running AfterSuite actions on all node Oct 18 19:33:45.006: INFO: Running AfterSuite actions on node 1 Ran 1 of 713 Specs in 548.875 seconds SUCCESS! -- 1 Passed | 0 Failed | 0 Pending | 712 Skipped PASS Ginkgo ran 1 suite in 9m9.728218415s Test Suite Passed 2017/10/18 19:33:45 util.go:156: Step './hack/ginkgo-e2e.sh --ginkgo.focus=vsphere\sstatefulset\stesting' finished in 9m10.656371481s 2017/10/18 19:33:45 e2e.go:81: Done ``` VMware Reviewers: @rohitjogvmw @BaluDontu @tusharnt **Release note**: ```release-note NONE ```
This commit is contained in:
commit
1e152dba52
@ -23,6 +23,7 @@ go_library(
|
||||
"volume_metrics.go",
|
||||
"volume_provisioning.go",
|
||||
"volumes.go",
|
||||
"vsphere_statefulsets.go",
|
||||
"vsphere_utils.go",
|
||||
"vsphere_volume_cluster_ds.go",
|
||||
"vsphere_volume_datastore.go",
|
||||
|
156
test/e2e/storage/vsphere_statefulsets.go
Normal file
156
test/e2e/storage/vsphere_statefulsets.go
Normal file
@ -0,0 +1,156 @@
|
||||
/*
|
||||
Copyright 2017 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package storage
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
. "github.com/onsi/ginkgo"
|
||||
. "github.com/onsi/gomega"
|
||||
apierrs "k8s.io/apimachinery/pkg/api/errors"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/types"
|
||||
clientset "k8s.io/client-go/kubernetes"
|
||||
"k8s.io/kubernetes/pkg/cloudprovider/providers/vsphere"
|
||||
"k8s.io/kubernetes/test/e2e/framework"
|
||||
)
|
||||
|
||||
/*
|
||||
Test performs following operations
|
||||
|
||||
Steps
|
||||
1. Create a storage class with thin diskformat.
|
||||
2. Create nginx service.
|
||||
3. Create nginx statefulsets with 3 replicas.
|
||||
4. Wait until all Pods are ready and PVCs are bounded with PV.
|
||||
5. Verify volumes are accessible in all statefulsets pods with creating empty file.
|
||||
6. Scale down statefulsets to 2 replicas.
|
||||
7. Scale up statefulsets to 4 replicas.
|
||||
8. Scale down statefulsets to 0 replicas and delete all pods.
|
||||
9. Delete all PVCs from the test namespace.
|
||||
10. Delete the storage class.
|
||||
*/
|
||||
|
||||
const (
|
||||
manifestPath = "test/e2e/testing-manifests/statefulset/nginx"
|
||||
mountPath = "/usr/share/nginx/html"
|
||||
storageclassname = "nginx-sc"
|
||||
)
|
||||
|
||||
var _ = SIGDescribe("vsphere statefulset", func() {
|
||||
f := framework.NewDefaultFramework("vsphere-statefulset")
|
||||
var (
|
||||
namespace string
|
||||
client clientset.Interface
|
||||
)
|
||||
BeforeEach(func() {
|
||||
framework.SkipUnlessProviderIs("vsphere")
|
||||
namespace = f.Namespace.Name
|
||||
client = f.ClientSet
|
||||
})
|
||||
AfterEach(func() {
|
||||
framework.Logf("Deleting all statefulset in namespace: %v", namespace)
|
||||
framework.DeleteAllStatefulSets(client, namespace)
|
||||
})
|
||||
|
||||
It("vsphere statefulset testing", func() {
|
||||
By("Creating StorageClass for Statefulset")
|
||||
scParameters := make(map[string]string)
|
||||
scParameters["diskformat"] = "thin"
|
||||
scSpec := getVSphereStorageClassSpec(storageclassname, scParameters)
|
||||
sc, err := client.StorageV1().StorageClasses().Create(scSpec)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
defer client.StorageV1().StorageClasses().Delete(sc.Name, nil)
|
||||
|
||||
By("Creating statefulset")
|
||||
statefulsetTester := framework.NewStatefulSetTester(client)
|
||||
statefulset := statefulsetTester.CreateStatefulSet(manifestPath, namespace)
|
||||
replicas := *(statefulset.Spec.Replicas)
|
||||
// Waiting for pods status to be Ready
|
||||
statefulsetTester.WaitForStatusReadyReplicas(statefulset, replicas)
|
||||
Expect(statefulsetTester.CheckMount(statefulset, mountPath)).NotTo(HaveOccurred())
|
||||
ssPodsBeforeScaleDown := statefulsetTester.GetPodList(statefulset)
|
||||
Expect(ssPodsBeforeScaleDown.Items).NotTo(BeEmpty(), fmt.Sprintf("Unable to get list of Pods from the Statefulset: %v", statefulset.Name))
|
||||
Expect(len(ssPodsBeforeScaleDown.Items) == int(replicas)).To(BeTrue(), "Number of Pods in the statefulset should match with number of replicas")
|
||||
|
||||
// Get the list of Volumes attached to Pods before scale down
|
||||
volumesBeforeScaleDown := make(map[string]string)
|
||||
for _, sspod := range ssPodsBeforeScaleDown.Items {
|
||||
_, err := client.CoreV1().Pods(namespace).Get(sspod.Name, metav1.GetOptions{})
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
for _, volumespec := range sspod.Spec.Volumes {
|
||||
if volumespec.PersistentVolumeClaim != nil {
|
||||
volumePath := getvSphereVolumePathFromClaim(client, statefulset.Namespace, volumespec.PersistentVolumeClaim.ClaimName)
|
||||
volumesBeforeScaleDown[volumePath] = volumespec.PersistentVolumeClaim.ClaimName
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
By(fmt.Sprintf("Scaling down statefulsets to number of Replica: %v", replicas-1))
|
||||
_, scaledownErr := statefulsetTester.Scale(statefulset, replicas-1)
|
||||
Expect(scaledownErr).NotTo(HaveOccurred())
|
||||
statefulsetTester.WaitForStatusReadyReplicas(statefulset, replicas-1)
|
||||
|
||||
vsp, err := vsphere.GetVSphere()
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
|
||||
// After scale down, verify vsphere volumes are detached from deleted pods
|
||||
By("Verify Volumes are detached from Nodes after Statefulsets is scaled down")
|
||||
for _, sspod := range ssPodsBeforeScaleDown.Items {
|
||||
_, err := client.CoreV1().Pods(namespace).Get(sspod.Name, metav1.GetOptions{})
|
||||
if err != nil {
|
||||
Expect(apierrs.IsNotFound(err), BeTrue())
|
||||
for _, volumespec := range sspod.Spec.Volumes {
|
||||
if volumespec.PersistentVolumeClaim != nil {
|
||||
vSpherediskPath := getvSphereVolumePathFromClaim(client, statefulset.Namespace, volumespec.PersistentVolumeClaim.ClaimName)
|
||||
framework.Logf("Waiting for Volume: %q to detach from Node: %q", vSpherediskPath, sspod.Spec.NodeName)
|
||||
Expect(waitForVSphereDiskToDetach(vsp, vSpherediskPath, types.NodeName(sspod.Spec.NodeName))).NotTo(HaveOccurred())
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
By(fmt.Sprintf("Scaling up statefulsets to number of Replica: %v", replicas))
|
||||
_, scaleupErr := statefulsetTester.Scale(statefulset, replicas)
|
||||
Expect(scaleupErr).NotTo(HaveOccurred())
|
||||
statefulsetTester.WaitForStatusReplicas(statefulset, replicas)
|
||||
statefulsetTester.WaitForStatusReadyReplicas(statefulset, replicas)
|
||||
|
||||
ssPodsAfterScaleUp := statefulsetTester.GetPodList(statefulset)
|
||||
Expect(ssPodsAfterScaleUp.Items).NotTo(BeEmpty(), fmt.Sprintf("Unable to get list of Pods from the Statefulset: %v", statefulset.Name))
|
||||
Expect(len(ssPodsAfterScaleUp.Items) == int(replicas)).To(BeTrue(), "Number of Pods in the statefulset should match with number of replicas")
|
||||
|
||||
// After scale up, verify all vsphere volumes are attached to node VMs.
|
||||
By("Verify all volumes are attached to Nodes after Statefulsets is scaled up")
|
||||
for _, sspod := range ssPodsAfterScaleUp.Items {
|
||||
err := framework.WaitForPodsReady(client, statefulset.Namespace, sspod.Name, 0)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
pod, err := client.CoreV1().Pods(namespace).Get(sspod.Name, metav1.GetOptions{})
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
for _, volumespec := range pod.Spec.Volumes {
|
||||
if volumespec.PersistentVolumeClaim != nil {
|
||||
vSpherediskPath := getvSphereVolumePathFromClaim(client, statefulset.Namespace, volumespec.PersistentVolumeClaim.ClaimName)
|
||||
framework.Logf("Verify Volume: %q is attached to the Node: %q", vSpherediskPath, sspod.Spec.NodeName)
|
||||
// Verify scale up has re-attached the same volumes and not introduced new volume
|
||||
Expect(volumesBeforeScaleDown[vSpherediskPath] == "").To(BeFalse())
|
||||
isVolumeAttached, verifyDiskAttachedError := verifyVSphereDiskAttached(vsp, vSpherediskPath, types.NodeName(sspod.Spec.NodeName))
|
||||
Expect(isVolumeAttached).To(BeTrue())
|
||||
Expect(verifyDiskAttachedError).NotTo(HaveOccurred())
|
||||
}
|
||||
}
|
||||
}
|
||||
})
|
||||
})
|
@ -354,3 +354,12 @@ func verifyVSphereVolumesAccessible(pod *v1.Pod, persistentvolumes []*v1.Persist
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
}
|
||||
}
|
||||
|
||||
// Get vSphere Volume Path from PVC
|
||||
func getvSphereVolumePathFromClaim(client clientset.Interface, namespace string, claimName string) string {
|
||||
pvclaim, err := client.CoreV1().PersistentVolumeClaims(namespace).Get(claimName, metav1.GetOptions{})
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
pv, err := client.CoreV1().PersistentVolumes().Get(pvclaim.Spec.VolumeName, metav1.GetOptions{})
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
return pv.Spec.VsphereVolume.VolumePath
|
||||
}
|
||||
|
13
test/e2e/testing-manifests/statefulset/nginx/service.yaml
Normal file
13
test/e2e/testing-manifests/statefulset/nginx/service.yaml
Normal file
@ -0,0 +1,13 @@
|
||||
apiVersion: v1
|
||||
kind: Service
|
||||
metadata:
|
||||
name: nginx
|
||||
labels:
|
||||
app: nginx
|
||||
spec:
|
||||
ports:
|
||||
- port: 80
|
||||
name: web
|
||||
clusterIP: None
|
||||
selector:
|
||||
app: nginx
|
@ -0,0 +1,34 @@
|
||||
apiVersion: apps/v1beta2
|
||||
kind: StatefulSet
|
||||
metadata:
|
||||
name: web
|
||||
spec:
|
||||
serviceName: "nginx"
|
||||
replicas: 3
|
||||
selector:
|
||||
matchLabels:
|
||||
app: nginx
|
||||
template:
|
||||
metadata:
|
||||
labels:
|
||||
app: nginx
|
||||
spec:
|
||||
containers:
|
||||
- name: nginx
|
||||
image: gcr.io/google_containers/nginx-slim:0.8
|
||||
ports:
|
||||
- containerPort: 80
|
||||
name: web
|
||||
volumeMounts:
|
||||
- name: www
|
||||
mountPath: /usr/share/nginx/html
|
||||
volumeClaimTemplates:
|
||||
- metadata:
|
||||
name: www
|
||||
annotations:
|
||||
volume.beta.kubernetes.io/storage-class: nginx-sc
|
||||
spec:
|
||||
accessModes: [ "ReadWriteOnce" ]
|
||||
resources:
|
||||
requests:
|
||||
storage: 1Gi
|
Loading…
Reference in New Issue
Block a user