Merge pull request #40756 from vmware/e2eTestsUpdate

Automatic merge from submit-queue (batch tested with PRs 38796, 40823, 40756, 41083, 41105)

e2e tests for vSphere cloud provider

**What this PR does / why we need it**:
This PR contains changes for existing e2e volume provisioning test cases for running on vsphere cloud provider.

**Following is the summary of changes made in  existing e2e test cases**

**Added test/e2e/persistent_volumes-vsphere.go**
- This test verifies deleting a PVC before the pod does not cause pod deletion to fail on PD detach and deleting the PV before the pod does not cause pod deletion to fail on PD detach.

**test/e2e/volume_provisioning.go**
- This test creates a StorageClass and claim with dynamic provisioning and alpha dynamic provisioning annotations and verifies that required volumes are getting created. Test also verifies that created volume is readable and retaining data.
- Added vsphere as supported cloud provider. Also set pluginName to "kubernetes.io/vsphere-volume" for vsphere cloud provider.

**test/e2e/volumes.go**
- Added test spec for vsphere
-  This test creates requested volume, mount it on the pod, write some random content at /opt/0/index.html and verifies file contents are perfect to make sure we don't see the content from previous test runs.
- This test also passes "1234" as fsGroup to mount volume and verifies fsGroup is set correctly.

**added  test/e2e/vsphere_utils.go** 
- Added function verifyVSphereDiskAttached - Verify the persistent disk attached to the node.
- Added function waitForVSphereDiskToDetach - Wait until vsphere vmdk is deteched from the given node or time out after 5 minutes
- Added getVSpherePersistentVolumeSpec - create vsphere volume spec with given VMDK volume path, Reclaim Policy and labels
- Added getVSpherePersistentVolumeClaimSpec - get vsphere persistent volume spec with given selector labels
- createVSphereVolume - function to create vmdk volume

**Following is the summary of new e2e tests added with this PR**

**test/e2e/vsphere_volume_placement.go**
- contains volume placement tests using node label selector
-  Test Back-to-back pod creation/deletion with the same volume source on the same worker node
- Test Back-to-back pod creation/deletion with the same volume source attach/detach to different worker nodes

**test/e2e/pv_reclaimpolicy.go**
- contains tests for PV/PVC - Reclaiming Policy
- Test verifies persistent volume should be deleted when reclaimPolicy on the PV is set to delete and associated claim is deleted
- Test also verified that persistent volume should be retained when reclaimPolicy on the PV is set to retain and associated claim is deleted 

**test/e2e/pvc_label_selector.go** 
- This is function test for Selector-Label Volume Binding Feature.
- Verify volume with the matching label is bounded with the PVC.

Other changes
Updated  pkg/cloudprovider/providers/vsphere/BUILD  and test/e2e/BUILD 


**Which issue this PR fixes** *
fixes # 41087

**Special notes for your reviewer**:
Updated tests were executed on kubernetes v1.4.8 release on vsphere.
Test steps are provided in comments 


@kerneltime @BaluDontu
This commit is contained in:
Kubernetes Submit Queue 2017-02-08 00:49:47 -08:00 committed by GitHub
commit 44980eb55c
10 changed files with 1060 additions and 7 deletions

View File

@ -10,7 +10,10 @@ load(
go_library(
name = "go_default_library",
srcs = ["vsphere.go"],
srcs = [
"vsphere.go",
"vsphere_util.go",
],
tags = ["automanaged"],
deps = [
"//pkg/api/v1:go_default_library",

View File

@ -0,0 +1,52 @@
/*
Copyright 2017 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package vsphere
import (
"context"
"os"
"runtime"
"strings"
)
// Reads vSphere configuration from system environment and construct vSphere object
func GetVSphere() (*VSphere, error) {
var cfg VSphereConfig
var err error
cfg.Global.VCenterIP = os.Getenv("VSPHERE_VCENTER")
cfg.Global.VCenterPort = os.Getenv("VSPHERE_VCENTER_PORT")
cfg.Global.User = os.Getenv("VSPHERE_USER")
cfg.Global.Password = os.Getenv("VSPHERE_PASSWORD")
cfg.Global.Datacenter = os.Getenv("VSPHERE_DATACENTER")
cfg.Global.Datastore = os.Getenv("VSPHERE_DATASTORE")
cfg.Global.WorkingDir = os.Getenv("VSPHERE_WORKING_DIR")
cfg.Global.InsecureFlag = false
if strings.ToLower(os.Getenv("VSPHERE_INSECURE")) == "true" {
cfg.Global.InsecureFlag = true
}
c, err := newClient(context.TODO(), &cfg)
if err != nil {
return nil, err
}
vs := VSphere{
client: c,
cfg: &cfg,
localInstanceID: "",
}
runtime.SetFinalizer(&vs, logout)
return &vs, nil
}

View File

@ -69,11 +69,14 @@ go_library(
"pd.go",
"persistent_volumes.go",
"persistent_volumes-disruptive.go",
"persistent_volumes-vsphere.go",
"pod_gc.go",
"pods.go",
"portforward.go",
"pre_stop.go",
"proxy.go",
"pv_reclaimpolicy.go",
"pvc_label_selector.go",
"pvutil.go",
"rc.go",
"reboot.go",
@ -95,6 +98,8 @@ go_library(
"util_iperf.go",
"volume_provisioning.go",
"volumes.go",
"vsphere_utils.go",
"vsphere_volume_placement.go",
],
tags = ["automanaged"],
deps = [
@ -122,6 +127,7 @@ go_library(
"//pkg/cloudprovider:go_default_library",
"//pkg/cloudprovider/providers/aws:go_default_library",
"//pkg/cloudprovider/providers/gce:go_default_library",
"//pkg/cloudprovider/providers/vsphere:go_default_library",
"//pkg/controller:go_default_library",
"//pkg/controller/deployment/util:go_default_library",
"//pkg/controller/endpoint:go_default_library",

View File

@ -0,0 +1,169 @@
/*
Copyright 2017 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package e2e
import (
apierrs "k8s.io/apimachinery/pkg/api/errors"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/types"
"k8s.io/kubernetes/pkg/api/v1"
"k8s.io/kubernetes/pkg/client/clientset_generated/clientset"
vsphere "k8s.io/kubernetes/pkg/cloudprovider/providers/vsphere"
"k8s.io/kubernetes/test/e2e/framework"
. "github.com/onsi/ginkgo"
. "github.com/onsi/gomega"
)
// Testing configurations of single a PV/PVC pair attached to a vSphere Disk
var _ = framework.KubeDescribe("PersistentVolumes:vsphere", func() {
var (
c clientset.Interface
ns string
volumePath string
pv *v1.PersistentVolume
pvc *v1.PersistentVolumeClaim
clientPod *v1.Pod
pvConfig persistentVolumeConfig
vsp *vsphere.VSphere
err error
node types.NodeName
)
f := framework.NewDefaultFramework("pv")
/*
Test Setup
1. Create volume (vmdk)
2. Create PV with volume path for the vmdk.
3. Create PVC to bind with PV.
4. Create a POD using the PVC.
5. Verify Disk and Attached to the node.
*/
BeforeEach(func() {
framework.SkipUnlessProviderIs("vsphere")
c = f.ClientSet
ns = f.Namespace.Name
clientPod = nil
pvc = nil
pv = nil
if vsp == nil {
vsp, err = vsphere.GetVSphere()
Expect(err).NotTo(HaveOccurred())
}
if volumePath == "" {
volumePath, err = createVSphereVolume(vsp, nil)
Expect(err).NotTo(HaveOccurred())
pvConfig = persistentVolumeConfig{
namePrefix: "vspherepv-",
pvSource: v1.PersistentVolumeSource{
VsphereVolume: &v1.VsphereVirtualDiskVolumeSource{
VolumePath: volumePath,
FSType: "ext4",
},
},
prebind: nil,
}
}
By("Creating the PV and PVC")
pv, pvc = createPVPVC(c, pvConfig, ns, false)
waitOnPVandPVC(c, ns, pv, pvc)
By("Creating the Client Pod")
clientPod = createClientPod(c, ns, pvc)
node := types.NodeName(clientPod.Spec.NodeName)
By("Verify disk should be attached to the node")
isAttached, err := verifyVSphereDiskAttached(vsp, volumePath, node)
Expect(err).NotTo(HaveOccurred())
Expect(isAttached).To(BeTrue(), "disk is not attached with the node")
})
AfterEach(func() {
framework.Logf("AfterEach: Cleaning up test resources")
if c != nil {
if clientPod != nil {
clientPod, err = c.CoreV1().Pods(ns).Get(clientPod.Name, metav1.GetOptions{})
if !apierrs.IsNotFound(err) {
deletePodWithWait(f, c, clientPod)
}
}
if pv != nil {
deletePersistentVolume(c, pv.Name)
}
if pvc != nil {
deletePersistentVolumeClaim(c, pvc.Name, ns)
}
}
})
/*
Clean up
1. Wait and verify volume is detached from the node
2. Delete PV
3. Delete Volume (vmdk)
*/
AddCleanupAction(func() {
if len(volumePath) > 0 {
waitForVSphereDiskToDetach(vsp, volumePath, node)
vsp.DeleteVolume(volumePath)
}
})
/*
Delete the PVC and then the pod. Expect the pod to succeed in unmounting and detaching PD on delete.
Test Steps:
1. Delete PVC.
2. Delete POD, POD deletion should succeed.
*/
It("should test that deleting a PVC before the pod does not cause pod deletion to fail on PD detach", func() {
By("Deleting the Claim")
deletePersistentVolumeClaim(c, pvc.Name, ns)
pvc, err = c.CoreV1().PersistentVolumeClaims(ns).Get(pvc.Name, metav1.GetOptions{})
if !apierrs.IsNotFound(err) {
Expect(err).NotTo(HaveOccurred())
}
pvc = nil
By("Deleting the Pod")
deletePodWithWait(f, c, clientPod)
})
/*
Delete the PV and then the pod. Expect the pod to succeed in unmounting and detaching PD on delete.
Test Steps:
1. Delete PV.
2. Delete POD, POD deletion should succeed.
*/
It("should test that deleting the PV before the pod does not cause pod deletion to fail on PD detach", func() {
By("Deleting the Persistent Volume")
deletePersistentVolume(c, pv.Name)
pv, err = c.CoreV1().PersistentVolumes().Get(pv.Name, metav1.GetOptions{})
if !apierrs.IsNotFound(err) {
Expect(err).NotTo(HaveOccurred())
}
pv = nil
By("Deleting the pod")
deletePodWithWait(f, c, clientPod)
})
})

View File

@ -0,0 +1,196 @@
/*
Copyright 2017 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package e2e
import (
"strconv"
"time"
. "github.com/onsi/ginkgo"
. "github.com/onsi/gomega"
apierrs "k8s.io/apimachinery/pkg/api/errors"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/kubernetes/pkg/api/v1"
"k8s.io/kubernetes/pkg/client/clientset_generated/clientset"
vsphere "k8s.io/kubernetes/pkg/cloudprovider/providers/vsphere"
"k8s.io/kubernetes/test/e2e/framework"
)
var _ = framework.KubeDescribe("PersistentVolumes [Feature:ReclaimPolicy]", func() {
f := framework.NewDefaultFramework("persistentvolumereclaim")
var (
c clientset.Interface
ns string
volumePath string
pv *v1.PersistentVolume
pvc *v1.PersistentVolumeClaim
)
BeforeEach(func() {
c = f.ClientSet
ns = f.Namespace.Name
framework.ExpectNoError(framework.WaitForAllNodesSchedulable(c, framework.TestContext.NodeSchedulableTimeout))
})
framework.KubeDescribe("persistentvolumereclaim:vsphere", func() {
BeforeEach(func() {
framework.SkipUnlessProviderIs("vsphere")
pv = nil
pvc = nil
volumePath = ""
})
AfterEach(func() {
vsp, err := vsphere.GetVSphere()
Expect(err).NotTo(HaveOccurred())
testCleanupVSpherePersistentVolumeReclaim(vsp, c, ns, volumePath, pv, pvc)
})
/*
This test verifies persistent volume should be deleted when reclaimPolicy on the PV is set to delete and
associated claim is deleted
Test Steps:
1. Create vmdk
2. Create PV Spec with volume path set to VMDK file created in Step-1, and PersistentVolumeReclaimPolicy is set to Delete
3. Create PVC with the storage request set to PV's storage capacity.
4. Wait for PV and PVC to bound.
5. Delete PVC
6. Verify PV is deleted automatically.
*/
It("should delete persistent volume when reclaimPolicy set to delete and associated claim is deleted", func() {
vsp, err := vsphere.GetVSphere()
Expect(err).NotTo(HaveOccurred())
volumePath, pv, pvc, err = testSetupVSpherePersistentVolumeReclaim(vsp, c, ns, v1.PersistentVolumeReclaimDelete)
Expect(err).NotTo(HaveOccurred())
deletePVCAfterBind(c, ns, pvc, pv)
pvc = nil
By("verify pv is deleted")
err = framework.WaitForPersistentVolumeDeleted(c, pv.Name, 3*time.Second, 300*time.Second)
Expect(err).NotTo(HaveOccurred())
pv = nil
volumePath = ""
})
/*
This test Verify persistent volume should be retained when reclaimPolicy on the PV is set to retain
and associated claim is deleted
Test Steps:
1. Create vmdk
2. Create PV Spec with volume path set to VMDK file created in Step-1, and PersistentVolumeReclaimPolicy is set to Retain
3. Create PVC with the storage request set to PV's storage capacity.
4. Wait for PV and PVC to bound.
5. Write some content in the volume.
6. Delete PVC
7. Verify PV is retained.
8. Delete retained PV.
9. Create PV Spec with the same volume path used in step 2.
10. Create PVC with the storage request set to PV's storage capacity.
11. Created POD using PVC created in Step 10 and verify volume content is matching.
*/
It("should retain persistent volume when reclaimPolicy set to retain when associated claim is deleted", func() {
var volumeFileContent = "hello from vsphere cloud provider, Random Content is :" + strconv.FormatInt(time.Now().UnixNano(), 10)
vsp, err := vsphere.GetVSphere()
Expect(err).NotTo(HaveOccurred())
volumePath, pv, pvc, err = testSetupVSpherePersistentVolumeReclaim(vsp, c, ns, v1.PersistentVolumeReclaimRetain)
Expect(err).NotTo(HaveOccurred())
writeContentToVSpherePV(c, pvc, volumeFileContent)
By("Delete PVC")
deletePersistentVolumeClaim(c, pvc.Name, ns)
pvc = nil
By("Verify PV is retained")
framework.Logf("Waiting for PV %v to become Released", pv.Name)
err = framework.WaitForPersistentVolumePhase(v1.VolumeReleased, c, pv.Name, 3*time.Second, 300*time.Second)
Expect(err).NotTo(HaveOccurred())
deletePersistentVolume(c, pv.Name)
By("Creating the PV for same volume path")
pv = getVSpherePersistentVolumeSpec(volumePath, v1.PersistentVolumeReclaimRetain, nil)
pv, err = c.CoreV1().PersistentVolumes().Create(pv)
Expect(err).NotTo(HaveOccurred())
By("creating the pvc")
pvc = getVSpherePersistentVolumeClaimSpec(ns, nil)
pvc, err = c.CoreV1().PersistentVolumeClaims(ns).Create(pvc)
Expect(err).NotTo(HaveOccurred())
By("wait for the pv and pvc to bind")
waitOnPVandPVC(c, ns, pv, pvc)
verifyContentOfVSpherePV(c, pvc, volumeFileContent)
})
})
})
// Test Setup for persistentvolumereclaim tests for vSphere Provider
func testSetupVSpherePersistentVolumeReclaim(vsp *vsphere.VSphere, c clientset.Interface, ns string, persistentVolumeReclaimPolicy v1.PersistentVolumeReclaimPolicy) (volumePath string, pv *v1.PersistentVolume, pvc *v1.PersistentVolumeClaim, err error) {
By("running testSetupVSpherePersistentVolumeReclaim")
By("creating vmdk")
volumePath, err = createVSphereVolume(vsp, nil)
if err != nil {
return
}
By("creating the pv")
pv = getVSpherePersistentVolumeSpec(volumePath, persistentVolumeReclaimPolicy, nil)
pv, err = c.CoreV1().PersistentVolumes().Create(pv)
if err != nil {
return
}
By("creating the pvc")
pvc = getVSpherePersistentVolumeClaimSpec(ns, nil)
pvc, err = c.CoreV1().PersistentVolumeClaims(ns).Create(pvc)
return
}
// Test Cleanup for persistentvolumereclaim tests for vSphere Provider
func testCleanupVSpherePersistentVolumeReclaim(vsp *vsphere.VSphere, c clientset.Interface, ns string, volumePath string, pv *v1.PersistentVolume, pvc *v1.PersistentVolumeClaim) {
By("running testCleanupVSpherePersistentVolumeReclaim")
if len(volumePath) > 0 {
vsp.DeleteVolume(volumePath)
}
if pv != nil {
deletePersistentVolume(c, pv.Name)
}
if pvc != nil {
deletePersistentVolumeClaim(c, pvc.Name, ns)
}
}
// func to wait until PV and PVC bind and once bind completes, delete the PVC
func deletePVCAfterBind(c clientset.Interface, ns string, pvc *v1.PersistentVolumeClaim, pv *v1.PersistentVolume) {
var err error
By("wait for the pv and pvc to bind")
waitOnPVandPVC(c, ns, pv, pvc)
By("delete pvc")
deletePersistentVolumeClaim(c, pvc.Name, ns)
pvc, err = c.CoreV1().PersistentVolumeClaims(ns).Get(pvc.Name, metav1.GetOptions{})
if !apierrs.IsNotFound(err) {
Expect(err).NotTo(HaveOccurred())
}
}

View File

@ -0,0 +1,150 @@
/*
Copyright 2017 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package e2e
import (
"time"
. "github.com/onsi/ginkgo"
. "github.com/onsi/gomega"
"k8s.io/kubernetes/pkg/api/v1"
"k8s.io/kubernetes/pkg/client/clientset_generated/clientset"
vsphere "k8s.io/kubernetes/pkg/cloudprovider/providers/vsphere"
"k8s.io/kubernetes/test/e2e/framework"
)
/*
This is a function test for Selector-Label Volume Binding Feature
Test verifies volume with the matching label is bounded with the PVC.
Test Steps
----------
1. Create VMDK.
2. Create pv with lable volume-type:ssd, volume path set to vmdk created in previous step, and PersistentVolumeReclaimPolicy is set to Delete.
3. Create PVC (pvc_vvol) with label selector to match with volume-type:vvol
4. Create PVC (pvc_ssd) with label selector to match with volume-type:ssd
5. Wait and verify pvc_ssd is bound with PV.
6. Verify Status of pvc_vvol is still pending.
7. Delete pvc_ssd.
8. verify associated pv is also deleted.
9. delete pvc_vvol
*/
var _ = framework.KubeDescribe("PersistentVolumes [Feature:LabelSelector]", func() {
f := framework.NewDefaultFramework("pvclabelselector")
var (
c clientset.Interface
ns string
pv_ssd *v1.PersistentVolume
pvc_ssd *v1.PersistentVolumeClaim
pvc_vvol *v1.PersistentVolumeClaim
volumePath string
ssdlabels map[string]string
vvollabels map[string]string
err error
)
BeforeEach(func() {
framework.SkipUnlessProviderIs("vsphere")
c = f.ClientSet
ns = f.Namespace.Name
framework.ExpectNoError(framework.WaitForAllNodesSchedulable(c, framework.TestContext.NodeSchedulableTimeout))
ssdlabels = make(map[string]string)
ssdlabels["volume-type"] = "ssd"
vvollabels = make(map[string]string)
vvollabels["volume-type"] = "vvol"
})
framework.KubeDescribe("Selector-Label Volume Binding:vsphere", func() {
AfterEach(func() {
By("Running clean up actions")
if framework.ProviderIs("vsphere") {
testCleanupVSpherePVClabelselector(c, ns, volumePath, pv_ssd, pvc_ssd, pvc_vvol)
}
})
It("should bind volume with claim for given label", func() {
volumePath, pv_ssd, pvc_ssd, pvc_vvol, err = testSetupVSpherePVClabelselector(c, ns, ssdlabels, vvollabels)
Expect(err).NotTo(HaveOccurred())
By("wait for the pvc_ssd to bind with pv_ssd")
waitOnPVandPVC(c, ns, pv_ssd, pvc_ssd)
By("Verify status of pvc_vvol is pending")
err = framework.WaitForPersistentVolumeClaimPhase(v1.ClaimPending, c, ns, pvc_vvol.Name, 3*time.Second, 300*time.Second)
Expect(err).NotTo(HaveOccurred())
By("delete pvc_ssd")
deletePersistentVolumeClaim(c, pvc_ssd.Name, ns)
By("verify pv_ssd is deleted")
err = framework.WaitForPersistentVolumeDeleted(c, pv_ssd.Name, 3*time.Second, 300*time.Second)
Expect(err).NotTo(HaveOccurred())
volumePath = ""
By("delete pvc_vvol")
deletePersistentVolumeClaim(c, pvc_vvol.Name, ns)
})
})
})
func testSetupVSpherePVClabelselector(c clientset.Interface, ns string, ssdlabels map[string]string, vvollabels map[string]string) (volumePath string, pv_ssd *v1.PersistentVolume, pvc_ssd *v1.PersistentVolumeClaim, pvc_vvol *v1.PersistentVolumeClaim, err error) {
volumePath = ""
By("creating vmdk")
vsp, err := vsphere.GetVSphere()
Expect(err).NotTo(HaveOccurred())
volumePath, err = createVSphereVolume(vsp, nil)
if err != nil {
return
}
By("creating the pv with lable volume-type:ssd")
pv_ssd = getVSpherePersistentVolumeSpec(volumePath, v1.PersistentVolumeReclaimDelete, ssdlabels)
pv_ssd, err = c.CoreV1().PersistentVolumes().Create(pv_ssd)
if err != nil {
return
}
By("creating pvc with label selector to match with volume-type:vvol")
pvc_vvol = getVSpherePersistentVolumeClaimSpec(ns, vvollabels)
pvc_vvol, err = c.CoreV1().PersistentVolumeClaims(ns).Create(pvc_vvol)
if err != nil {
return
}
By("creating pvc with label selector to match with volume-type:ssd")
pvc_ssd = getVSpherePersistentVolumeClaimSpec(ns, ssdlabels)
pvc_ssd, err = c.CoreV1().PersistentVolumeClaims(ns).Create(pvc_ssd)
return
}
func testCleanupVSpherePVClabelselector(c clientset.Interface, ns string, volumePath string, pv_ssd *v1.PersistentVolume, pvc_ssd *v1.PersistentVolumeClaim, pvc_vvol *v1.PersistentVolumeClaim) {
By("running testCleanupVSpherePVClabelselector")
if len(volumePath) > 0 {
vsp, err := vsphere.GetVSphere()
Expect(err).NotTo(HaveOccurred())
vsp.DeleteVolume(volumePath)
}
if pvc_ssd != nil {
deletePersistentVolumeClaim(c, pvc_ssd.Name, ns)
}
if pvc_vvol != nil {
deletePersistentVolumeClaim(c, pvc_vvol.Name, ns)
}
if pv_ssd != nil {
deletePersistentVolume(c, pv_ssd.Name)
}
}

View File

@ -34,9 +34,10 @@ import (
const (
// Requested size of the volume
requestedSize = "1500Mi"
// Expected size of the volume is 2GiB, because all three supported cloud
// providers allocate volumes in 1GiB chunks.
// Expected size of the volume is 2GiB, for "openstack", "gce", "aws", "gke", as they allocate volumes in 1GiB chunks
expectedSize = "2Gi"
// vsphere provider does not allocate volumes in 1GiB chunks, so setting expected size equal to requestedSize
vsphereExpectedSize = "1500Mi"
)
func testDynamicProvisioning(client clientset.Interface, claim *v1.PersistentVolumeClaim) {
@ -54,12 +55,15 @@ func testDynamicProvisioning(client clientset.Interface, claim *v1.PersistentVol
// Check sizes
expectedCapacity := resource.MustParse(expectedSize)
if framework.ProviderIs("vsphere") {
expectedCapacity = resource.MustParse(vsphereExpectedSize)
}
pvCapacity := pv.Spec.Capacity[v1.ResourceName(v1.ResourceStorage)]
Expect(pvCapacity.Value()).To(Equal(expectedCapacity.Value()))
Expect(pvCapacity.Value()).To(Equal(expectedCapacity.Value()), "pvCapacity is not equal to expectedCapacity")
requestedCapacity := resource.MustParse(requestedSize)
claimCapacity := claim.Spec.Resources.Requests[v1.ResourceName(v1.ResourceStorage)]
Expect(claimCapacity.Value()).To(Equal(requestedCapacity.Value()))
Expect(claimCapacity.Value()).To(Equal(requestedCapacity.Value()), "claimCapacity is not equal to requestedCapacity")
// Check PV properties
Expect(pv.Spec.PersistentVolumeReclaimPolicy).To(Equal(v1.PersistentVolumeReclaimDelete))
@ -103,7 +107,7 @@ var _ = framework.KubeDescribe("Dynamic provisioning", func() {
framework.KubeDescribe("DynamicProvisioner", func() {
It("should create and delete persistent volumes [Slow] [Volume]", func() {
framework.SkipUnlessProviderIs("openstack", "gce", "aws", "gke")
framework.SkipUnlessProviderIs("openstack", "gce", "aws", "gke", "vsphere")
By("creating a StorageClass")
class := newStorageClass()
@ -125,7 +129,7 @@ var _ = framework.KubeDescribe("Dynamic provisioning", func() {
framework.KubeDescribe("DynamicProvisioner Alpha", func() {
It("should create and delete alpha persistent volumes [Slow] [Volume]", func() {
framework.SkipUnlessProviderIs("openstack", "gce", "aws", "gke")
framework.SkipUnlessProviderIs("openstack", "gce", "aws", "gke", "vsphere")
By("creating a claim with an alpha dynamic provisioning annotation")
claim := newClaim(ns, true)
@ -229,6 +233,8 @@ func newStorageClass() *storage.StorageClass {
pluginName = "kubernetes.io/aws-ebs"
case framework.ProviderIs("openstack"):
pluginName = "kubernetes.io/cinder"
case framework.ProviderIs("vsphere"):
pluginName = "kubernetes.io/vsphere-volume"
}
return &storage.StorageClass{

View File

@ -50,6 +50,7 @@ import (
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/kubernetes/pkg/api/v1"
"k8s.io/kubernetes/pkg/client/clientset_generated/clientset"
vsphere "k8s.io/kubernetes/pkg/cloudprovider/providers/vsphere"
"k8s.io/kubernetes/test/e2e/framework"
"github.com/golang/glog"
@ -928,4 +929,58 @@ var _ = framework.KubeDescribe("Volumes [Feature:Volumes]", func() {
testVolumeClient(cs, config, nil, tests)
})
})
////////////////////////////////////////////////////////////////////////
// vSphere
////////////////////////////////////////////////////////////////////////
framework.KubeDescribe("vsphere", func() {
It("should be mountable", func() {
framework.SkipUnlessProviderIs("vsphere")
var (
volumePath string
)
config := VolumeTestConfig{
namespace: namespace.Name,
prefix: "vsphere",
}
By("creating a test vsphere volume")
vsp, err := vsphere.GetVSphere()
Expect(err).NotTo(HaveOccurred())
volumePath, err = createVSphereVolume(vsp, nil)
Expect(err).NotTo(HaveOccurred())
defer func() {
vsp.DeleteVolume(volumePath)
}()
defer func() {
if clean {
framework.Logf("Running volumeTestCleanup")
volumeTestCleanup(f, config)
}
}()
tests := []VolumeTest{
{
volume: v1.VolumeSource{
VsphereVolume: &v1.VsphereVirtualDiskVolumeSource{
VolumePath: volumePath,
FSType: "ext4",
},
},
file: "index.html",
// Randomize index.html to make sure we don't see the
// content from previous test runs.
expectedContent: "Hello from vSphere from namespace " + namespace.Name,
},
}
injectHtml(cs, config, tests[0].volume, tests[0].expectedContent)
fsGroup := int64(1234)
testVolumeClient(cs, config, &fsGroup, tests)
})
})
})

180
test/e2e/vsphere_utils.go Normal file
View File

@ -0,0 +1,180 @@
/*
Copyright 2017 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package e2e
import (
"strconv"
"time"
"fmt"
. "github.com/onsi/gomega"
"k8s.io/apimachinery/pkg/api/resource"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/types"
"k8s.io/apimachinery/pkg/util/wait"
"k8s.io/kubernetes/pkg/api/v1"
"k8s.io/kubernetes/pkg/client/clientset_generated/clientset"
vsphere "k8s.io/kubernetes/pkg/cloudprovider/providers/vsphere"
"k8s.io/kubernetes/pkg/volume/util/volumehelper"
"k8s.io/kubernetes/test/e2e/framework"
)
// Sanity check for vSphere testing. Verify the persistent disk attached to the node.
func verifyVSphereDiskAttached(vsp *vsphere.VSphere, volumePath string, nodeName types.NodeName) (bool, error) {
var (
isAttached bool
err error
)
if vsp == nil {
vsp, err = vsphere.GetVSphere()
Expect(err).NotTo(HaveOccurred())
}
isAttached, err = vsp.DiskIsAttached(volumePath, nodeName)
Expect(err).NotTo(HaveOccurred())
return isAttached, err
}
// Wait until vsphere vmdk is deteched from the given node or time out after 5 minutes
func waitForVSphereDiskToDetach(vsp *vsphere.VSphere, volumePath string, nodeName types.NodeName) {
var (
err error
diskAttached = true
detachTimeout = 5 * time.Minute
detachPollTime = 10 * time.Second
)
if vsp == nil {
vsp, err = vsphere.GetVSphere()
Expect(err).NotTo(HaveOccurred())
}
err = wait.Poll(detachPollTime, detachTimeout, func() (bool, error) {
diskAttached, err = verifyVSphereDiskAttached(vsp, volumePath, nodeName)
if err != nil {
return true, err
}
if !diskAttached {
framework.Logf("Volume %q appears to have successfully detached from %q.",
volumePath, nodeName)
return true, nil
}
framework.Logf("Waiting for Volume %q to detach from %q.", volumePath, nodeName)
return false, nil
})
Expect(err).NotTo(HaveOccurred())
if diskAttached {
Expect(fmt.Errorf("Gave up waiting for Volume %q to detach from %q after %v", volumePath, nodeName, detachTimeout)).NotTo(HaveOccurred())
}
}
// function to create vsphere volume spec with given VMDK volume path, Reclaim Policy and labels
func getVSpherePersistentVolumeSpec(volumePath string, persistentVolumeReclaimPolicy v1.PersistentVolumeReclaimPolicy, labels map[string]string) *v1.PersistentVolume {
var (
pvConfig persistentVolumeConfig
pv *v1.PersistentVolume
claimRef *v1.ObjectReference
)
pvConfig = persistentVolumeConfig{
namePrefix: "vspherepv-",
pvSource: v1.PersistentVolumeSource{
VsphereVolume: &v1.VsphereVirtualDiskVolumeSource{
VolumePath: volumePath,
FSType: "ext4",
},
},
prebind: nil,
}
pv = &v1.PersistentVolume{
ObjectMeta: metav1.ObjectMeta{
GenerateName: pvConfig.namePrefix,
Annotations: map[string]string{
volumehelper.VolumeGidAnnotationKey: "777",
},
},
Spec: v1.PersistentVolumeSpec{
PersistentVolumeReclaimPolicy: persistentVolumeReclaimPolicy,
Capacity: v1.ResourceList{
v1.ResourceName(v1.ResourceStorage): resource.MustParse("2Gi"),
},
PersistentVolumeSource: pvConfig.pvSource,
AccessModes: []v1.PersistentVolumeAccessMode{
v1.ReadWriteOnce,
},
ClaimRef: claimRef,
},
}
if labels != nil {
pv.Labels = labels
}
return pv
}
// function to get vsphere persistent volume spec with given selector labels.
func getVSpherePersistentVolumeClaimSpec(namespace string, labels map[string]string) *v1.PersistentVolumeClaim {
var (
pvc *v1.PersistentVolumeClaim
)
pvc = &v1.PersistentVolumeClaim{
ObjectMeta: metav1.ObjectMeta{
GenerateName: "pvc-",
Namespace: namespace,
},
Spec: v1.PersistentVolumeClaimSpec{
AccessModes: []v1.PersistentVolumeAccessMode{
v1.ReadWriteOnce,
},
Resources: v1.ResourceRequirements{
Requests: v1.ResourceList{
v1.ResourceName(v1.ResourceStorage): resource.MustParse("2Gi"),
},
},
},
}
if labels != nil {
pvc.Spec.Selector = &metav1.LabelSelector{MatchLabels: labels}
}
return pvc
}
// function to create vmdk volume
func createVSphereVolume(vsp *vsphere.VSphere, volumeOptions *vsphere.VolumeOptions) (string, error) {
var (
volumePath string
err error
)
if volumeOptions == nil {
volumeOptions = new(vsphere.VolumeOptions)
volumeOptions.CapacityKB = 2097152
volumeOptions.Name = "e2e-vmdk-" + strconv.FormatInt(time.Now().UnixNano(), 10)
}
volumePath, err = vsp.CreateVolume(volumeOptions)
Expect(err).NotTo(HaveOccurred())
return volumePath, nil
}
// function to write content to the volume backed by given PVC
func writeContentToVSpherePV(client clientset.Interface, pvc *v1.PersistentVolumeClaim, expectedContent string) {
runInPodWithVolume(client, pvc.Namespace, pvc.Name, "echo "+expectedContent+" > /mnt/test/data")
framework.Logf("Done with writing content to volume")
}
// function to verify content is matching on the volume backed for given PVC
func verifyContentOfVSpherePV(client clientset.Interface, pvc *v1.PersistentVolumeClaim, expectedContent string) {
runInPodWithVolume(client, pvc.Namespace, pvc.Name, "grep '"+expectedContent+"' /mnt/test/data")
framework.Logf("Sucessfully verified content of the volume")
}

View File

@ -0,0 +1,236 @@
/*
Copyright 2017 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package e2e
import (
. "github.com/onsi/ginkgo"
. "github.com/onsi/gomega"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/types"
"k8s.io/apimachinery/pkg/util/uuid"
"k8s.io/kubernetes/pkg/api/v1"
"k8s.io/kubernetes/pkg/client/clientset_generated/clientset"
vsphere "k8s.io/kubernetes/pkg/cloudprovider/providers/vsphere"
"k8s.io/kubernetes/test/e2e/framework"
)
var _ = framework.KubeDescribe("Volume Placement [Feature:Volume]", func() {
f := framework.NewDefaultFramework("volume-placement")
var (
c clientset.Interface
ns string
vsp *vsphere.VSphere
volumePath string
node1Name string
node1LabelValue string
node1KeyValueLabel map[string]string
node2Name string
node2LabelValue string
node2KeyValueLabel map[string]string
isNodeLabeled bool
)
/*
Steps
1. Create VMDK volume
2. Find two nodes with the status available and ready for scheduling.
3. Add labels to the both nodes. - (vsphere_e2e_label: Random UUID)
*/
BeforeEach(func() {
framework.SkipUnlessProviderIs("vsphere")
c = f.ClientSet
ns = f.Namespace.Name
framework.ExpectNoError(framework.WaitForAllNodesSchedulable(c, framework.TestContext.NodeSchedulableTimeout))
By("creating vmdk")
vsp, err := vsphere.GetVSphere()
Expect(err).NotTo(HaveOccurred())
volumePath, err = createVSphereVolume(vsp, nil)
Expect(err).NotTo(HaveOccurred())
if !isNodeLabeled {
nodeList := framework.GetReadySchedulableNodesOrDie(f.ClientSet)
if len(nodeList.Items) != 0 {
node1Name = nodeList.Items[0].Name
node2Name = nodeList.Items[1].Name
} else {
framework.Failf("Unable to find ready and schedulable Node")
}
node1LabelValue = "vsphere_e2e_" + string(uuid.NewUUID())
node1KeyValueLabel = make(map[string]string)
node1KeyValueLabel["vsphere_e2e_label"] = node1LabelValue
framework.AddOrUpdateLabelOnNode(c, node1Name, "vsphere_e2e_label", node1LabelValue)
node2LabelValue = "vsphere_e2e_" + string(uuid.NewUUID())
node2KeyValueLabel = make(map[string]string)
node2KeyValueLabel["vsphere_e2e_label"] = node2LabelValue
framework.AddOrUpdateLabelOnNode(c, node2Name, "vsphere_e2e_label", node2LabelValue)
}
})
/*
Steps
1. Remove labels assigned to node 1 and node 2
2. Delete VMDK volume
*/
AddCleanupAction(func() {
if len(node1LabelValue) > 0 {
framework.RemoveLabelOffNode(c, node1Name, "vsphere_e2e_label")
}
if len(node2LabelValue) > 0 {
framework.RemoveLabelOffNode(c, node2Name, "vsphere_e2e_label")
}
if len(volumePath) > 0 {
vsp, err := vsphere.GetVSphere()
Expect(err).NotTo(HaveOccurred())
vsp.DeleteVolume(volumePath)
}
})
framework.KubeDescribe("provision pod on node with matching labels", func() {
/*
Steps
1. Create POD Spec with volume path of the vmdk and NodeSelector set to label assigned to node1.
2. Create POD and wait for POD to become ready.
3. Verify volume is attached to the node1.
4. Delete POD.
5. Wait for volume to be detached from the node1.
6. Repeat Step 1 to 5 and make sure back to back pod creation on same worker node with the same volume is working as expected.
*/
It("should create and delete pod with the same volume source on the same worker node", func() {
pod := createPodWithVolumeAndNodeSelector(c, ns, vsp, node1Name, node1KeyValueLabel, volumePath)
deletePodAndWaitForVolumeToDetach(c, ns, vsp, node1Name, pod, volumePath)
By("Creating pod on the same node: " + node1Name)
pod = createPodWithVolumeAndNodeSelector(c, ns, vsp, node1Name, node1KeyValueLabel, volumePath)
deletePodAndWaitForVolumeToDetach(c, ns, vsp, node1Name, pod, volumePath)
})
/*
Steps
1. Create POD Spec with volume path of the vmdk and NodeSelector set to node1's label.
2. Create POD and wait for POD to become ready.
3. Verify volume is attached to the node1.
4. Delete POD.
5. Wait for volume to be detached from the node1.
6. Create POD Spec with volume path of the vmdk and NodeSelector set to node2's label.
7. Create POD and wait for POD to become ready.
8. Verify volume is attached to the node2.
9. Delete POD.
*/
It("should create and delete pod with the same volume source attach/detach to different worker nodes", func() {
pod := createPodWithVolumeAndNodeSelector(c, ns, vsp, node1Name, node1KeyValueLabel, volumePath)
deletePodAndWaitForVolumeToDetach(c, ns, vsp, node1Name, pod, volumePath)
By("Creating pod on the another node: " + node2Name)
pod = createPodWithVolumeAndNodeSelector(c, ns, vsp, node2Name, node2KeyValueLabel, volumePath)
deletePodAndWaitForVolumeToDetach(c, ns, vsp, node2Name, pod, volumePath)
})
})
})
func createPodWithVolumeAndNodeSelector(client clientset.Interface, namespace string, vsp *vsphere.VSphere, nodeName string, nodeKeyValueLabel map[string]string, volumePath string) *v1.Pod {
var pod *v1.Pod
var err error
By("Creating pod on the node: " + nodeName)
podspec := getPodSpec(volumePath, nodeKeyValueLabel, nil)
pod, err = client.CoreV1().Pods(namespace).Create(podspec)
Expect(err).NotTo(HaveOccurred())
By("Waiting for pod to be ready")
Expect(framework.WaitForPodNameRunningInNamespace(client, pod.Name, namespace)).To(Succeed())
By("Verify volume is attached to the node: " + nodeName)
isAttached, err := verifyVSphereDiskAttached(vsp, volumePath, types.NodeName(nodeName))
Expect(err).NotTo(HaveOccurred())
Expect(isAttached).To(BeTrue(), "disk is not attached with the node")
return pod
}
func deletePodAndWaitForVolumeToDetach(client clientset.Interface, namespace string, vsp *vsphere.VSphere, nodeName string, pod *v1.Pod, volumePath string) {
var err error
By("Deleting pod")
err = client.CoreV1().Pods(namespace).Delete(pod.Name, nil)
Expect(err).NotTo(HaveOccurred())
By("Waiting for volume to be detached from the node")
waitForVSphereDiskToDetach(vsp, volumePath, types.NodeName(nodeName))
}
func getPodSpec(volumePath string, keyValuelabel map[string]string, commands []string) *v1.Pod {
if commands == nil || len(commands) == 0 {
commands = make([]string, 3)
commands[0] = "/bin/sh"
commands[1] = "-c"
commands[2] = "while true ; do sleep 2 ; done "
}
pod := &v1.Pod{
TypeMeta: metav1.TypeMeta{
Kind: "Pod",
APIVersion: "v1",
},
ObjectMeta: metav1.ObjectMeta{
GenerateName: "vsphere-e2e-",
},
Spec: v1.PodSpec{
Containers: []v1.Container{
{
Name: "vsphere-e2e-container-" + string(uuid.NewUUID()),
Image: "gcr.io/google_containers/busybox:1.24",
Command: commands,
VolumeMounts: []v1.VolumeMount{
{
Name: "vsphere-volume",
MountPath: "/mnt/vsphere-volume",
},
},
},
},
RestartPolicy: v1.RestartPolicyNever,
Volumes: []v1.Volume{
{
Name: "vsphere-volume",
VolumeSource: v1.VolumeSource{
VsphereVolume: &v1.VsphereVirtualDiskVolumeSource{
VolumePath: volumePath,
FSType: "ext4",
},
},
},
},
},
}
if keyValuelabel != nil {
pod.Spec.NodeSelector = keyValuelabel
}
return pod
}