Merge pull request #72732 from sandeeppsunny/vsphere_volume_zone

Added e2e tests for multi-zone volume provisioning through VCP
This commit is contained in:
Kubernetes Prow Robot 2019-02-20 09:01:27 -08:00 committed by GitHub
commit 21e3c15dfe
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
18 changed files with 652 additions and 24 deletions

View File

@ -35,6 +35,7 @@ go_library(
"vsphere_volume_placement.go",
"vsphere_volume_vpxd_restart.go",
"vsphere_volume_vsan_policy.go",
"vsphere_zone_support.go",
],
importpath = "k8s.io/kubernetes/test/e2e/storage/vsphere",
deps = [
@ -59,6 +60,8 @@ go_library(
"//vendor/github.com/vmware/govmomi/find:go_default_library",
"//vendor/github.com/vmware/govmomi/object:go_default_library",
"//vendor/github.com/vmware/govmomi/session:go_default_library",
"//vendor/github.com/vmware/govmomi/vapi/rest:go_default_library",
"//vendor/github.com/vmware/govmomi/vapi/tags:go_default_library",
"//vendor/github.com/vmware/govmomi/vim25:go_default_library",
"//vendor/github.com/vmware/govmomi/vim25/mo:go_default_library",
"//vendor/github.com/vmware/govmomi/vim25/soap:go_default_library",

View File

@ -55,5 +55,10 @@ func bootstrapOnce() {
if err != nil {
framework.Failf("Failed to bootstrap vSphere with error: %v", err)
}
// 4. Generate Zone to Datastore mapping
err = TestContext.NodeMapper.GenerateZoneToDatastoreMap()
if err != nil {
framework.Failf("Failed to generate zone to datastore mapping with error: %v", err)
}
close(waiting)
}

View File

@ -23,9 +23,14 @@ import (
"sync"
"github.com/vmware/govmomi/object"
"github.com/vmware/govmomi/vapi/rest"
"github.com/vmware/govmomi/vapi/tags"
"github.com/vmware/govmomi/vim25/mo"
"github.com/vmware/govmomi/vim25/types"
"k8s.io/api/core/v1"
"k8s.io/kubernetes/test/e2e/framework"
neturl "net/url"
)
type NodeMapper struct {
@ -35,11 +40,20 @@ type NodeInfo struct {
Name string
DataCenterRef types.ManagedObjectReference
VirtualMachineRef types.ManagedObjectReference
HostSystemRef types.ManagedObjectReference
VSphere *VSphere
Zones []string
}
const (
DatacenterType = "Datacenter"
ClusterComputeResourceType = "ClusterComputeResource"
HostSystemType = "HostSystem"
)
var (
nameToNodeInfo = make(map[string]*NodeInfo)
nameToNodeInfo = make(map[string]*NodeInfo)
vcToZoneDatastoresMap = make(map[string](map[string][]string))
)
// GenerateNodeMap populates node name to node info map
@ -104,9 +118,11 @@ func (nm *NodeMapper) GenerateNodeMap(vSphereInstances map[string]*VSphere, node
continue
}
if vm != nil {
framework.Logf("Found node %s as vm=%+v in vc=%s and datacenter=%s",
n.Name, vm, res.vs.Config.Hostname, res.datacenter.Name())
nodeInfo := &NodeInfo{Name: n.Name, DataCenterRef: res.datacenter.Reference(), VirtualMachineRef: vm.Reference(), VSphere: res.vs}
hostSystemRef := res.vs.GetHostFromVMReference(ctx, vm.Reference())
zones := retrieveZoneInformationForNode(n.Name, res.vs, hostSystemRef)
framework.Logf("Found node %s as vm=%+v placed on host=%+v under zones %s in vc=%s and datacenter=%s",
n.Name, vm, hostSystemRef, zones, res.vs.Config.Hostname, res.datacenter.Name())
nodeInfo := &NodeInfo{Name: n.Name, DataCenterRef: res.datacenter.Reference(), VirtualMachineRef: vm.Reference(), HostSystemRef: hostSystemRef, VSphere: res.vs, Zones: zones}
nm.SetNodeInfo(n.Name, nodeInfo)
break
}
@ -123,6 +139,144 @@ func (nm *NodeMapper) GenerateNodeMap(vSphereInstances map[string]*VSphere, node
return nil
}
// Establish rest connection to retrieve tag manager stub
func withTagsClient(ctx context.Context, connection *VSphere, f func(c *rest.Client) error) error {
c := rest.NewClient(connection.Client.Client)
user := neturl.UserPassword(connection.Config.Username, connection.Config.Password)
if err := c.Login(ctx, user); err != nil {
return err
}
defer c.Logout(ctx)
return f(c)
}
// Iterates over each node and retrieves the zones in which they are placed
func retrieveZoneInformationForNode(nodeName string, connection *VSphere, hostSystemRef types.ManagedObjectReference) []string {
ctx, cancel := context.WithCancel(context.Background())
defer cancel()
var zones []string
pc := connection.Client.ServiceContent.PropertyCollector
withTagsClient(ctx, connection, func(c *rest.Client) error {
client := tags.NewManager(c)
// Example result: ["Host", "Cluster", "Datacenter"]
ancestors, err := mo.Ancestors(ctx, connection.Client, pc, hostSystemRef)
if err != nil {
return err
}
var validAncestors []mo.ManagedEntity
// Filter out only Datacenter, ClusterComputeResource and HostSystem type objects. These objects will be
// in the following order ["Datacenter" < "ClusterComputeResource" < "HostSystem"] so that the highest
// zone precedence will be received by the HostSystem type.
for _, ancestor := range ancestors {
moType := ancestor.ExtensibleManagedObject.Self.Type
if moType == DatacenterType || moType == ClusterComputeResourceType || moType == HostSystemType {
validAncestors = append(validAncestors, ancestor)
}
}
for _, ancestor := range validAncestors {
var zonesAttachedToObject []string
tags, err := client.ListAttachedTags(ctx, ancestor)
if err != nil {
return err
}
for _, value := range tags {
tag, err := client.GetTag(ctx, value)
if err != nil {
return err
}
category, err := client.GetCategory(ctx, tag.CategoryID)
if err != nil {
return err
}
switch {
case category.Name == "k8s-zone":
framework.Logf("Found %s associated with %s for %s", tag.Name, ancestor.Name, nodeName)
zonesAttachedToObject = append(zonesAttachedToObject, tag.Name)
case category.Name == "k8s-region":
framework.Logf("Found %s associated with %s for %s", tag.Name, ancestor.Name, nodeName)
}
}
// Overwrite zone information if it exists for this object
if len(zonesAttachedToObject) != 0 {
zones = zonesAttachedToObject
}
}
return nil
})
return zones
}
// Generate zone to datastore mapping for easily verifying volume placement
func (nm *NodeMapper) GenerateZoneToDatastoreMap() error {
// 1. Create zone to hosts map for each VC
var vcToZoneHostsMap = make(map[string](map[string][]string))
// 2. Create host to datastores map for each VC
var vcToHostDatastoresMap = make(map[string](map[string][]string))
ctx, cancel := context.WithCancel(context.Background())
defer cancel()
// 3. Populate vcToZoneHostsMap and vcToHostDatastoresMap
for _, nodeInfo := range nameToNodeInfo {
vc := nodeInfo.VSphere.Config.Hostname
host := nodeInfo.HostSystemRef.Value
for _, zone := range nodeInfo.Zones {
if vcToZoneHostsMap[vc] == nil {
vcToZoneHostsMap[vc] = make(map[string][]string)
}
// Populating vcToZoneHostsMap using the HostSystemRef and Zone fields from each NodeInfo
hosts := vcToZoneHostsMap[vc][zone]
hosts = append(hosts, host)
vcToZoneHostsMap[vc][zone] = hosts
}
if vcToHostDatastoresMap[vc] == nil {
vcToHostDatastoresMap[vc] = make(map[string][]string)
}
datastores := vcToHostDatastoresMap[vc][host]
// Populating vcToHostDatastoresMap by finding out the datastores mounted on node's host
datastoreRefs := nodeInfo.VSphere.GetDatastoresMountedOnHost(ctx, nodeInfo.HostSystemRef)
for _, datastore := range datastoreRefs {
datastores = append(datastores, datastore.Value)
}
vcToHostDatastoresMap[vc][host] = datastores
}
// 4, Populate vcToZoneDatastoresMap from vcToZoneHostsMap and vcToHostDatastoresMap
for vc, zoneToHostsMap := range vcToZoneHostsMap {
for zone, hosts := range zoneToHostsMap {
commonDatastores := retrieveCommonDatastoresAmongHosts(hosts, vcToHostDatastoresMap[vc])
if vcToZoneDatastoresMap[vc] == nil {
vcToZoneDatastoresMap[vc] = make(map[string][]string)
}
vcToZoneDatastoresMap[vc][zone] = commonDatastores
}
}
framework.Logf("Zone to datastores map : %+v", vcToZoneDatastoresMap)
return nil
}
// Retrieves the common datastores from the specified hosts
func retrieveCommonDatastoresAmongHosts(hosts []string, hostToDatastoresMap map[string][]string) []string {
var datastoreCountMap = make(map[string]int)
for _, host := range hosts {
for _, datastore := range hostToDatastoresMap[host] {
datastoreCountMap[datastore] = datastoreCountMap[datastore] + 1
}
}
var commonDatastores []string
numHosts := len(hosts)
for datastore, count := range datastoreCountMap {
if count == numHosts {
commonDatastores = append(commonDatastores, datastore)
}
}
return commonDatastores
}
// Get all the datastores in the specified zone
func (nm *NodeMapper) GetDatastoresInZone(vc string, zone string) []string {
return vcToZoneDatastoresMap[vc][zone]
}
// GetNodeInfo return NodeInfo for given nodeName
func (nm *NodeMapper) GetNodeInfo(nodeName string) *NodeInfo {
return nameToNodeInfo[nodeName]

View File

@ -27,6 +27,7 @@ import (
"github.com/vmware/govmomi"
"github.com/vmware/govmomi/find"
"github.com/vmware/govmomi/object"
"github.com/vmware/govmomi/vim25/mo"
"github.com/vmware/govmomi/vim25/soap"
"github.com/vmware/govmomi/vim25/types"
"k8s.io/kubernetes/test/e2e/framework"
@ -84,6 +85,33 @@ func (vs *VSphere) GetVMByUUID(ctx context.Context, vmUUID string, dc object.Ref
return s.FindByUuid(ctx, datacenter, vmUUID, true, nil)
}
// Get host object reference of the host on which the specified VM resides
func (vs *VSphere) GetHostFromVMReference(ctx context.Context, vm types.ManagedObjectReference) types.ManagedObjectReference {
Connect(ctx, vs)
var vmMo mo.VirtualMachine
vs.Client.RetrieveOne(ctx, vm, []string{"summary.runtime.host"}, &vmMo)
host := *vmMo.Summary.Runtime.Host
return host
}
// Get the datastore references of all the datastores mounted on the specified host
func (vs *VSphere) GetDatastoresMountedOnHost(ctx context.Context, host types.ManagedObjectReference) []types.ManagedObjectReference {
Connect(ctx, vs)
var hostMo mo.HostSystem
vs.Client.RetrieveOne(ctx, host, []string{"datastore"}, &hostMo)
return hostMo.Datastore
}
// Get the datastore reference of the specified datastore
func (vs *VSphere) GetDatastoreRefFromName(ctx context.Context, dc object.Reference, datastoreName string) (types.ManagedObjectReference, error) {
Connect(ctx, vs)
datacenter := object.NewDatacenter(vs.Client.Client, dc.Reference())
finder := find.NewFinder(vs.Client.Client, false)
finder.SetDatacenter(datacenter)
datastore, err := finder.Datastore(ctx, datastoreName)
return datastore.Reference(), err
}
// GetFolderByPath gets the Folder Object Reference from the given folder path
// folderPath should be the full path to folder
func (vs *VSphere) GetFolderByPath(ctx context.Context, dc object.Reference, folderPath string) (vmFolderMor types.ManagedObjectReference, err error) {

View File

@ -52,6 +52,17 @@ const (
VCPPerfIterations = "VCP_PERF_ITERATIONS"
)
const (
VCPZoneVsanDatastore1 = "VCP_ZONE_VSANDATASTORE1"
VCPZoneVsanDatastore2 = "VCP_ZONE_VSANDATASTORE2"
VCPZoneCompatPolicyName = "VCP_ZONE_COMPATPOLICY_NAME"
VCPZoneNonCompatPolicyName = "VCP_ZONE_NONCOMPATPOLICY_NAME"
VCPZoneA = "VCP_ZONE_A"
VCPZoneB = "VCP_ZONE_B"
VCPZoneC = "VCP_ZONE_C"
VCPZoneD = "VCP_ZONE_D"
)
func GetAndExpectStringEnvVar(varName string) string {
varValue := os.Getenv(varName)
Expect(varValue).NotTo(BeEmpty(), "ENV "+varName+" is not set")

View File

@ -129,7 +129,7 @@ var _ = utils.SIGDescribe("vcp at scale [Feature:vsphere] ", func() {
case storageclass4:
scParams[Datastore] = datastoreName
}
sc, err = client.StorageV1().StorageClasses().Create(getVSphereStorageClassSpec(scname, scParams))
sc, err = client.StorageV1().StorageClasses().Create(getVSphereStorageClassSpec(scname, scParams, nil))
Expect(sc).NotTo(BeNil(), "Storage class is empty")
Expect(err).NotTo(HaveOccurred(), "Failed to create storage class")
defer client.StorageV1().StorageClasses().Delete(scname, nil)

View File

@ -70,7 +70,7 @@ var _ = utils.SIGDescribe("vsphere statefulset", func() {
By("Creating StorageClass for Statefulset")
scParameters := make(map[string]string)
scParameters["diskformat"] = "thin"
scSpec := getVSphereStorageClassSpec(storageclassname, scParameters)
scSpec := getVSphereStorageClassSpec(storageclassname, scParameters, nil)
sc, err := client.StorageV1().StorageClasses().Create(scSpec)
Expect(err).NotTo(HaveOccurred())
defer client.StorageV1().StorageClasses().Delete(sc.Name, nil)

View File

@ -85,22 +85,22 @@ var _ = utils.SIGDescribe("vsphere cloud provider stress [Feature:vsphere]", fun
var err error
switch scname {
case storageclass1:
sc, err = client.StorageV1().StorageClasses().Create(getVSphereStorageClassSpec(storageclass1, nil))
sc, err = client.StorageV1().StorageClasses().Create(getVSphereStorageClassSpec(storageclass1, nil, nil))
case storageclass2:
var scVSanParameters map[string]string
scVSanParameters = make(map[string]string)
scVSanParameters[Policy_HostFailuresToTolerate] = "1"
sc, err = client.StorageV1().StorageClasses().Create(getVSphereStorageClassSpec(storageclass2, scVSanParameters))
sc, err = client.StorageV1().StorageClasses().Create(getVSphereStorageClassSpec(storageclass2, scVSanParameters, nil))
case storageclass3:
var scSPBMPolicyParameters map[string]string
scSPBMPolicyParameters = make(map[string]string)
scSPBMPolicyParameters[SpbmStoragePolicy] = policyName
sc, err = client.StorageV1().StorageClasses().Create(getVSphereStorageClassSpec(storageclass3, scSPBMPolicyParameters))
sc, err = client.StorageV1().StorageClasses().Create(getVSphereStorageClassSpec(storageclass3, scSPBMPolicyParameters, nil))
case storageclass4:
var scWithDSParameters map[string]string
scWithDSParameters = make(map[string]string)
scWithDSParameters[Datastore] = datastoreName
scWithDatastoreSpec := getVSphereStorageClassSpec(storageclass4, scWithDSParameters)
scWithDatastoreSpec := getVSphereStorageClassSpec(storageclass4, scWithDSParameters, nil)
sc, err = client.StorageV1().StorageClasses().Create(scWithDatastoreSpec)
}
Expect(sc).NotTo(BeNil())

View File

@ -235,7 +235,7 @@ func verifyContentOfVSpherePV(client clientset.Interface, pvc *v1.PersistentVolu
framework.Logf("Successfully verified content of the volume")
}
func getVSphereStorageClassSpec(name string, scParameters map[string]string) *storage.StorageClass {
func getVSphereStorageClassSpec(name string, scParameters map[string]string, zones []string) *storage.StorageClass {
var sc *storage.StorageClass
sc = &storage.StorageClass{
@ -250,6 +250,17 @@ func getVSphereStorageClassSpec(name string, scParameters map[string]string) *st
if scParameters != nil {
sc.Parameters = scParameters
}
if zones != nil {
term := v1.TopologySelectorTerm{
MatchLabelExpressions: []v1.TopologySelectorLabelRequirement{
{
Key: v1.LabelZoneFailureDomain,
Values: zones,
},
},
}
sc.AllowedTopologies = append(sc.AllowedTopologies, term)
}
return sc
}
@ -399,6 +410,39 @@ func verifyVSphereVolumesAccessible(c clientset.Interface, pod *v1.Pod, persiste
}
}
// verify volumes are created on one of the specified zones
func verifyVolumeCreationOnRightZone(persistentvolumes []*v1.PersistentVolume, nodeName string, zones []string) {
for _, pv := range persistentvolumes {
volumePath := pv.Spec.VsphereVolume.VolumePath
// Extract datastoreName from the volume path in the pv spec
// For example : "vsanDatastore" is extracted from "[vsanDatastore] 25d8b159-948c-4b73-e499-02001ad1b044/volume.vmdk"
datastorePathObj, _ := getDatastorePathObjFromVMDiskPath(volumePath)
datastoreName := datastorePathObj.Datastore
nodeInfo := TestContext.NodeMapper.GetNodeInfo(nodeName)
ctx, cancel := context.WithCancel(context.Background())
defer cancel()
// Get the datastore object reference from the datastore name
datastoreRef, err := nodeInfo.VSphere.GetDatastoreRefFromName(ctx, nodeInfo.DataCenterRef, datastoreName)
if err != nil {
Expect(err).NotTo(HaveOccurred())
}
// Find common datastores among the specified zones
var datastoreCountMap = make(map[string]int)
numZones := len(zones)
var commonDatastores []string
for _, zone := range zones {
datastoreInZone := TestContext.NodeMapper.GetDatastoresInZone(nodeInfo.VSphere.Config.Hostname, zone)
for _, datastore := range datastoreInZone {
datastoreCountMap[datastore] = datastoreCountMap[datastore] + 1
if datastoreCountMap[datastore] == numZones {
commonDatastores = append(commonDatastores, datastore)
}
}
}
Expect(commonDatastores).To(ContainElement(datastoreRef.Value), "PV was created in an unsupported zone.")
}
}
// Get vSphere Volume Path from PVC
func getvSphereVolumePathFromClaim(client clientset.Interface, namespace string, claimName string) string {
pvclaim, err := client.CoreV1().PersistentVolumeClaims(namespace).Get(claimName, metav1.GetOptions{})

View File

@ -79,7 +79,7 @@ var _ = utils.SIGDescribe("Volume Provisioning on Datastore [Feature:vsphere]",
func invokeInvalidDatastoreTestNeg(client clientset.Interface, namespace string, scParameters map[string]string) error {
By("Creating Storage Class With Invalid Datastore")
storageclass, err := client.StorageV1().StorageClasses().Create(getVSphereStorageClassSpec(DatastoreSCName, scParameters))
storageclass, err := client.StorageV1().StorageClasses().Create(getVSphereStorageClassSpec(DatastoreSCName, scParameters, nil))
Expect(err).NotTo(HaveOccurred(), fmt.Sprintf("Failed to create storage class with err: %v", err))
defer client.StorageV1().StorageClasses().Delete(storageclass.Name, nil)

View File

@ -106,7 +106,7 @@ func invokeTest(f *framework.Framework, client clientset.Interface, namespace st
scParameters["diskformat"] = diskFormat
By("Creating Storage Class With DiskFormat")
storageClassSpec := getVSphereStorageClassSpec("thinsc", scParameters)
storageClassSpec := getVSphereStorageClassSpec("thinsc", scParameters, nil)
storageclass, err := client.StorageV1().StorageClasses().Create(storageClassSpec)
Expect(err).NotTo(HaveOccurred())

View File

@ -77,7 +77,7 @@ var _ = utils.SIGDescribe("Volume Disk Size [Feature:vsphere]", func() {
func invokeInvalidDiskSizeTestNeg(client clientset.Interface, namespace string, scParameters map[string]string, diskSize string) error {
By("Creating Storage Class With invalid disk size")
storageclass, err := client.StorageV1().StorageClasses().Create(getVSphereStorageClassSpec(DiskSizeSCName, scParameters))
storageclass, err := client.StorageV1().StorageClasses().Create(getVSphereStorageClassSpec(DiskSizeSCName, scParameters, nil))
Expect(err).NotTo(HaveOccurred(), fmt.Sprintf("Failed to create storage class with err: %v", err))
defer client.StorageV1().StorageClasses().Delete(storageclass.Name, nil)

View File

@ -146,7 +146,7 @@ func invokeTestForInvalidFstype(f *framework.Framework, client clientset.Interfa
}
func createVolume(client clientset.Interface, namespace string, scParameters map[string]string) (*v1.PersistentVolumeClaim, []*v1.PersistentVolume) {
storageclass, err := client.StorageV1().StorageClasses().Create(getVSphereStorageClassSpec("fstype", scParameters))
storageclass, err := client.StorageV1().StorageClasses().Create(getVSphereStorageClassSpec("fstype", scParameters, nil))
Expect(err).NotTo(HaveOccurred())
defer client.StorageV1().StorageClasses().Delete(storageclass.Name, nil)

View File

@ -75,7 +75,7 @@ var _ = utils.SIGDescribe("Node Poweroff [Feature:vsphere] [Slow] [Disruptive]",
*/
It("verify volume status after node power off", func() {
By("Creating a Storage Class")
storageClassSpec := getVSphereStorageClassSpec("test-sc", nil)
storageClassSpec := getVSphereStorageClassSpec("test-sc", nil, nil)
storageclass, err := client.StorageV1().StorageClasses().Create(storageClassSpec)
Expect(err).NotTo(HaveOccurred(), fmt.Sprintf("Failed to create storage class with err: %v", err))
defer client.StorageV1().StorageClasses().Delete(storageclass.Name, nil)

View File

@ -87,7 +87,7 @@ var _ = utils.SIGDescribe("Volume Operations Storm [Feature:vsphere]", func() {
By("Creating Storage Class")
scParameters := make(map[string]string)
scParameters["diskformat"] = "thin"
storageclass, err = client.StorageV1().StorageClasses().Create(getVSphereStorageClassSpec("thinsc", scParameters))
storageclass, err = client.StorageV1().StorageClasses().Create(getVSphereStorageClassSpec("thinsc", scParameters, nil))
Expect(err).NotTo(HaveOccurred())
By("Creating PVCs using the Storage Class")

View File

@ -128,22 +128,22 @@ func getTestStorageClasses(client clientset.Interface, policyName, datastoreName
var err error
switch scname {
case storageclass1:
sc, err = client.StorageV1().StorageClasses().Create(getVSphereStorageClassSpec(storageclass1, nil))
sc, err = client.StorageV1().StorageClasses().Create(getVSphereStorageClassSpec(storageclass1, nil, nil))
case storageclass2:
var scVSanParameters map[string]string
scVSanParameters = make(map[string]string)
scVSanParameters[Policy_HostFailuresToTolerate] = "1"
sc, err = client.StorageV1().StorageClasses().Create(getVSphereStorageClassSpec(storageclass2, scVSanParameters))
sc, err = client.StorageV1().StorageClasses().Create(getVSphereStorageClassSpec(storageclass2, scVSanParameters, nil))
case storageclass3:
var scSPBMPolicyParameters map[string]string
scSPBMPolicyParameters = make(map[string]string)
scSPBMPolicyParameters[SpbmStoragePolicy] = policyName
sc, err = client.StorageV1().StorageClasses().Create(getVSphereStorageClassSpec(storageclass3, scSPBMPolicyParameters))
sc, err = client.StorageV1().StorageClasses().Create(getVSphereStorageClassSpec(storageclass3, scSPBMPolicyParameters, nil))
case storageclass4:
var scWithDSParameters map[string]string
scWithDSParameters = make(map[string]string)
scWithDSParameters[Datastore] = datastoreName
scWithDatastoreSpec := getVSphereStorageClassSpec(storageclass4, scWithDSParameters)
scWithDatastoreSpec := getVSphereStorageClassSpec(storageclass4, scWithDSParameters, nil)
sc, err = client.StorageV1().StorageClasses().Create(scWithDatastoreSpec)
}
Expect(sc).NotTo(BeNil())

View File

@ -273,7 +273,7 @@ var _ = utils.SIGDescribe("Storage Policy Based Volume Provisioning [Feature:vsp
func invokeValidPolicyTest(f *framework.Framework, client clientset.Interface, namespace string, scParameters map[string]string) {
By("Creating Storage Class With storage policy params")
storageclass, err := client.StorageV1().StorageClasses().Create(getVSphereStorageClassSpec("storagepolicysc", scParameters))
storageclass, err := client.StorageV1().StorageClasses().Create(getVSphereStorageClassSpec("storagepolicysc", scParameters, nil))
Expect(err).NotTo(HaveOccurred(), fmt.Sprintf("Failed to create storage class with err: %v", err))
defer client.StorageV1().StorageClasses().Delete(storageclass.Name, nil)
@ -305,7 +305,7 @@ func invokeValidPolicyTest(f *framework.Framework, client clientset.Interface, n
func invokeInvalidPolicyTestNeg(client clientset.Interface, namespace string, scParameters map[string]string) error {
By("Creating Storage Class With storage policy params")
storageclass, err := client.StorageV1().StorageClasses().Create(getVSphereStorageClassSpec("storagepolicysc", scParameters))
storageclass, err := client.StorageV1().StorageClasses().Create(getVSphereStorageClassSpec("storagepolicysc", scParameters, nil))
Expect(err).NotTo(HaveOccurred(), fmt.Sprintf("Failed to create storage class with err: %v", err))
defer client.StorageV1().StorageClasses().Delete(storageclass.Name, nil)
@ -324,7 +324,7 @@ func invokeInvalidPolicyTestNeg(client clientset.Interface, namespace string, sc
func invokeStaleDummyVMTestWithStoragePolicy(client clientset.Interface, masterNode string, namespace string, clusterName string, scParameters map[string]string) {
By("Creating Storage Class With storage policy params")
storageclass, err := client.StorageV1().StorageClasses().Create(getVSphereStorageClassSpec("storagepolicysc", scParameters))
storageclass, err := client.StorageV1().StorageClasses().Create(getVSphereStorageClassSpec("storagepolicysc", scParameters, nil))
Expect(err).NotTo(HaveOccurred(), fmt.Sprintf("Failed to create storage class with err: %v", err))
defer client.StorageV1().StorageClasses().Delete(storageclass.Name, nil)

View File

@ -0,0 +1,383 @@
/*
Copyright 2019 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package vsphere
import (
"fmt"
"strings"
"time"
. "github.com/onsi/ginkgo"
. "github.com/onsi/gomega"
"k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
clientset "k8s.io/client-go/kubernetes"
"k8s.io/kubernetes/test/e2e/framework"
"k8s.io/kubernetes/test/e2e/storage/utils"
)
/*
Test to verify multi-zone support for dynamic volume provisioning in kubernetes.
The test environment is illustrated below:
datacenter
--->cluster-vsan-1 (zone-a) ____________________ _________________
--->host-1 : master | | | |
--->host-2 : node1 | vsanDatastore | | |
--->host-3 : node2 |____________________| | |
| |
| sharedVmfs-0 |
--->cluster-vsan-2 (zone-b) ____________________ | |
--->host-4 : node3 | | | |
--->host-5 : node4 | vsanDatastore (1) | | |
--->host-6 |____________________| |_________________|
--->cluster-3 (zone-c) ________________
--->host-7 : node5 | |
| localDatastore |
|________________|
____________________
--->host-8 (zone-c) : node6 | |
| localDatastore (1) |
|____________________|
Testbed description :
1. cluster-vsan-1 is tagged with zone-a. So, vsanDatastore inherits zone-a since all the hosts under zone-a have vsanDatastore mounted on them.
2. cluster-vsan-2 is tagged with zone-b. So, vsanDatastore (1) inherits zone-b since all the hosts under zone-b have vsanDatastore (1) mounted on them.
3. sharedVmfs-0 inherits both zone-a and zone-b since all the hosts in both zone-a and zone-b have this datastore mounted on them.
4. cluster-3 is tagged with zone-c. cluster-3 only contains host-7.
5. host-8 is not under any cluster and is tagged with zone-c.
6. Since there are no shared datastores between host-7 under cluster-3 and host-8, no datastores in the environment inherit zone-c.
7. The six worker nodes are distributed among the hosts as shown in the above illustration.
8. Two storage policies are created on VC. One is a VSAN storage policy named as compatpolicy with hostFailuresToTolerate capability set to 1.
Second is a VSAN storage policy named as noncompatpolicy with hostFailuresToTolerate capability set to 4.
Testsuite description :
1. Tests to verify that zone labels are set correctly on a dynamically created PV.
2. Tests to verify dynamic pv creation fails if availability zones are not specified or if there are no shared datastores under the specified zones.
3. Tests to verify dynamic pv creation using availability zones works in combination with other storage class parameters such as storage policy,
datastore and VSAN capabilities.
4. Tests to verify dynamic pv creation using availability zones fails in combination with other storage class parameters such as storage policy,
datastore and VSAN capabilities specifications when any of the former mentioned parameters are incompatible with the rest.
*/
var _ = utils.SIGDescribe("Zone Support", func() {
f := framework.NewDefaultFramework("zone-support")
var (
client clientset.Interface
namespace string
scParameters map[string]string
zones []string
vsanDatastore1 string
vsanDatastore2 string
compatPolicy string
nonCompatPolicy string
zoneA string
zoneB string
zoneC string
zoneD string
)
BeforeEach(func() {
framework.SkipUnlessProviderIs("vsphere")
Bootstrap(f)
client = f.ClientSet
namespace = f.Namespace.Name
vsanDatastore1 = GetAndExpectStringEnvVar(VCPZoneVsanDatastore1)
vsanDatastore2 = GetAndExpectStringEnvVar(VCPZoneVsanDatastore2)
compatPolicy = GetAndExpectStringEnvVar(VCPZoneCompatPolicyName)
nonCompatPolicy = GetAndExpectStringEnvVar(VCPZoneNonCompatPolicyName)
zoneA = GetAndExpectStringEnvVar(VCPZoneA)
zoneB = GetAndExpectStringEnvVar(VCPZoneB)
zoneC = GetAndExpectStringEnvVar(VCPZoneC)
zoneD = GetAndExpectStringEnvVar(VCPZoneD)
scParameters = make(map[string]string)
zones = make([]string, 0)
nodeList := framework.GetReadySchedulableNodesOrDie(f.ClientSet)
if !(len(nodeList.Items) > 0) {
framework.Failf("Unable to find ready and schedulable Node")
}
})
It("Verify dynamically created pv with allowed zones specified in storage class, shows the right zone information on its labels", func() {
By(fmt.Sprintf("Creating storage class with the following zones : %s", zoneA))
zones = append(zones, zoneA)
verifyPVZoneLabels(client, namespace, nil, zones)
})
It("Verify dynamically created pv with multiple zones specified in the storage class, shows both the zones on its labels", func() {
By(fmt.Sprintf("Creating storage class with the following zones : %s, %s", zoneA, zoneB))
zones = append(zones, zoneA)
zones = append(zones, zoneB)
verifyPVZoneLabels(client, namespace, nil, zones)
})
It("Verify PVC creation with invalid zone specified in storage class fails", func() {
By(fmt.Sprintf("Creating storage class with unknown zone : %s", zoneD))
zones = append(zones, zoneD)
err := verifyPVCCreationFails(client, namespace, nil, zones)
Expect(err).To(HaveOccurred())
errorMsg := "Failed to find a shared datastore matching zone [" + zoneD + "]"
if !strings.Contains(err.Error(), errorMsg) {
Expect(err).NotTo(HaveOccurred(), errorMsg)
}
})
It("Verify a pod is created and attached to a dynamically created PV, based on allowed zones specified in storage class ", func() {
By(fmt.Sprintf("Creating storage class with zones :%s", zoneA))
zones = append(zones, zoneA)
verifyPVCAndPodCreationSucceeds(client, namespace, nil, zones)
})
It("Verify a pod is created and attached to a dynamically created PV, based on multiple zones specified in storage class ", func() {
By(fmt.Sprintf("Creating storage class with zones :%s, %s", zoneA, zoneB))
zones = append(zones, zoneA)
zones = append(zones, zoneB)
verifyPVCAndPodCreationSucceeds(client, namespace, nil, zones)
})
It("Verify a pod is created and attached to a dynamically created PV, based on the allowed zones and datastore specified in storage class", func() {
By(fmt.Sprintf("Creating storage class with zone :%s and datastore :%s", zoneA, vsanDatastore1))
scParameters[Datastore] = vsanDatastore1
zones = append(zones, zoneA)
verifyPVCAndPodCreationSucceeds(client, namespace, scParameters, zones)
})
It("Verify PVC creation with incompatible datastore and zone combination specified in storage class fails", func() {
By(fmt.Sprintf("Creating storage class with zone :%s and datastore :%s", zoneC, vsanDatastore1))
scParameters[Datastore] = vsanDatastore1
zones = append(zones, zoneC)
err := verifyPVCCreationFails(client, namespace, scParameters, zones)
errorMsg := "The specified datastore " + scParameters[Datastore] + " does not match the provided zones : [" + zoneC + "]"
if !strings.Contains(err.Error(), errorMsg) {
Expect(err).NotTo(HaveOccurred(), errorMsg)
}
})
It("Verify a pod is created and attached to a dynamically created PV, based on the allowed zones and storage policy specified in storage class", func() {
By(fmt.Sprintf("Creating storage class with zone :%s and storage policy :%s", zoneA, compatPolicy))
scParameters[SpbmStoragePolicy] = compatPolicy
zones = append(zones, zoneA)
verifyPVCAndPodCreationSucceeds(client, namespace, scParameters, zones)
})
It("Verify PVC creation with incompatible storagePolicy and zone combination specified in storage class fails", func() {
By(fmt.Sprintf("Creating storage class with zone :%s and storage policy :%s", zoneA, nonCompatPolicy))
scParameters[SpbmStoragePolicy] = nonCompatPolicy
zones = append(zones, zoneA)
err := verifyPVCCreationFails(client, namespace, scParameters, zones)
errorMsg := "No compatible datastores found that satisfy the storage policy requirements"
if !strings.Contains(err.Error(), errorMsg) {
Expect(err).NotTo(HaveOccurred(), errorMsg)
}
})
It("Verify a pod is created and attached to a dynamically created PV, based on the allowed zones, datastore and storage policy specified in storage class", func() {
By(fmt.Sprintf("Creating storage class with zone :%s datastore :%s and storagePolicy :%s", zoneA, vsanDatastore1, compatPolicy))
scParameters[SpbmStoragePolicy] = compatPolicy
scParameters[Datastore] = vsanDatastore1
zones = append(zones, zoneA)
verifyPVCAndPodCreationSucceeds(client, namespace, scParameters, zones)
})
It("Verify PVC creation with incompatible storage policy along with compatible zone and datastore combination specified in storage class fails", func() {
By(fmt.Sprintf("Creating storage class with zone :%s datastore :%s and storagePolicy :%s", zoneA, vsanDatastore1, nonCompatPolicy))
scParameters[SpbmStoragePolicy] = nonCompatPolicy
scParameters[Datastore] = vsanDatastore1
zones = append(zones, zoneA)
err := verifyPVCCreationFails(client, namespace, scParameters, zones)
errorMsg := "User specified datastore is not compatible with the storagePolicy: \\\"" + nonCompatPolicy + "\\\"."
if !strings.Contains(err.Error(), errorMsg) {
Expect(err).NotTo(HaveOccurred(), errorMsg)
}
})
It("Verify PVC creation with incompatible zone along with compatible storagePolicy and datastore combination specified in storage class fails", func() {
By(fmt.Sprintf("Creating storage class with zone :%s datastore :%s and storagePolicy :%s", zoneC, vsanDatastore2, compatPolicy))
scParameters[SpbmStoragePolicy] = compatPolicy
scParameters[Datastore] = vsanDatastore2
zones = append(zones, zoneC)
err := verifyPVCCreationFails(client, namespace, scParameters, zones)
errorMsg := "The specified datastore " + scParameters[Datastore] + " does not match the provided zones : [" + zoneC + "]"
if !strings.Contains(err.Error(), errorMsg) {
Expect(err).NotTo(HaveOccurred(), errorMsg)
}
})
It("Verify PVC creation fails if no zones are specified in the storage class (No shared datastores exist among all the nodes)", func() {
By(fmt.Sprintf("Creating storage class with no zones"))
err := verifyPVCCreationFails(client, namespace, nil, nil)
errorMsg := "No shared datastores found in the Kubernetes cluster"
if !strings.Contains(err.Error(), errorMsg) {
Expect(err).NotTo(HaveOccurred(), errorMsg)
}
})
It("Verify PVC creation fails if only datastore is specified in the storage class (No shared datastores exist among all the nodes)", func() {
By(fmt.Sprintf("Creating storage class with datastore :%s", vsanDatastore1))
scParameters[Datastore] = vsanDatastore1
err := verifyPVCCreationFails(client, namespace, scParameters, nil)
errorMsg := "No shared datastores found in the Kubernetes cluster"
if !strings.Contains(err.Error(), errorMsg) {
Expect(err).NotTo(HaveOccurred(), errorMsg)
}
})
It("Verify PVC creation fails if only storage policy is specified in the storage class (No shared datastores exist among all the nodes)", func() {
By(fmt.Sprintf("Creating storage class with storage policy :%s", compatPolicy))
scParameters[SpbmStoragePolicy] = compatPolicy
err := verifyPVCCreationFails(client, namespace, scParameters, nil)
errorMsg := "No shared datastores found in the Kubernetes cluster"
if !strings.Contains(err.Error(), errorMsg) {
Expect(err).NotTo(HaveOccurred(), errorMsg)
}
})
It("Verify PVC creation with compatible policy and datastore without any zones specified in the storage class fails (No shared datastores exist among all the nodes)", func() {
By(fmt.Sprintf("Creating storage class with storage policy :%s and datastore :%s", compatPolicy, vsanDatastore1))
scParameters[SpbmStoragePolicy] = compatPolicy
scParameters[Datastore] = vsanDatastore1
err := verifyPVCCreationFails(client, namespace, scParameters, nil)
errorMsg := "No shared datastores found in the Kubernetes cluster"
if !strings.Contains(err.Error(), errorMsg) {
Expect(err).NotTo(HaveOccurred(), errorMsg)
}
})
It("Verify PVC creation fails if the availability zone specified in the storage class have no shared datastores under it.", func() {
By(fmt.Sprintf("Creating storage class with zone :%s", zoneC))
zones = append(zones, zoneC)
err := verifyPVCCreationFails(client, namespace, nil, zones)
errorMsg := "Failed to find a shared datastore matching zone [" + zoneC + "]"
if !strings.Contains(err.Error(), errorMsg) {
Expect(err).NotTo(HaveOccurred(), errorMsg)
}
})
It("Verify a pod is created and attached to a dynamically created PV, based on multiple zones specified in the storage class. (No shared datastores exist among both zones)", func() {
By(fmt.Sprintf("Creating storage class with the following zones :%s and %s", zoneA, zoneC))
zones = append(zones, zoneA)
zones = append(zones, zoneC)
err := verifyPVCCreationFails(client, namespace, nil, zones)
errorMsg := "Failed to find a shared datastore matching zone [" + zoneA + " " + zoneC + "]"
if !strings.Contains(err.Error(), errorMsg) {
Expect(err).NotTo(HaveOccurred(), errorMsg)
}
})
It("Verify PVC creation with an invalid VSAN capability along with a compatible zone combination specified in storage class fails", func() {
By(fmt.Sprintf("Creating storage class with %s :%s and zone :%s", Policy_HostFailuresToTolerate, HostFailuresToTolerateCapabilityInvalidVal, zoneA))
scParameters[Policy_HostFailuresToTolerate] = HostFailuresToTolerateCapabilityInvalidVal
zones = append(zones, zoneA)
err := verifyPVCCreationFails(client, namespace, scParameters, zones)
errorMsg := "Invalid value for " + Policy_HostFailuresToTolerate + "."
if !strings.Contains(err.Error(), errorMsg) {
Expect(err).NotTo(HaveOccurred(), errorMsg)
}
})
It("Verify a pod is created and attached to a dynamically created PV, based on a VSAN capability, datastore and compatible zone specified in storage class", func() {
By(fmt.Sprintf("Creating storage class with %s :%s, %s :%s, datastore :%s and zone :%s", Policy_ObjectSpaceReservation, ObjectSpaceReservationCapabilityVal, Policy_IopsLimit, IopsLimitCapabilityVal, vsanDatastore1, zoneA))
scParameters[Policy_ObjectSpaceReservation] = ObjectSpaceReservationCapabilityVal
scParameters[Policy_IopsLimit] = IopsLimitCapabilityVal
scParameters[Datastore] = vsanDatastore1
zones = append(zones, zoneA)
verifyPVCAndPodCreationSucceeds(client, namespace, scParameters, zones)
})
})
func verifyPVCAndPodCreationSucceeds(client clientset.Interface, namespace string, scParameters map[string]string, zones []string) {
storageclass, err := client.StorageV1().StorageClasses().Create(getVSphereStorageClassSpec("zone-sc", scParameters, zones))
Expect(err).NotTo(HaveOccurred(), fmt.Sprintf("Failed to create storage class with err: %v", err))
defer client.StorageV1().StorageClasses().Delete(storageclass.Name, nil)
By("Creating PVC using the Storage Class")
pvclaim, err := framework.CreatePVC(client, namespace, getVSphereClaimSpecWithStorageClass(namespace, "2Gi", storageclass))
Expect(err).NotTo(HaveOccurred())
defer framework.DeletePersistentVolumeClaim(client, pvclaim.Name, namespace)
var pvclaims []*v1.PersistentVolumeClaim
pvclaims = append(pvclaims, pvclaim)
By("Waiting for claim to be in bound phase")
persistentvolumes, err := framework.WaitForPVClaimBoundPhase(client, pvclaims, framework.ClaimProvisionTimeout)
Expect(err).NotTo(HaveOccurred())
By("Creating pod to attach PV to the node")
pod, err := framework.CreatePod(client, namespace, nil, pvclaims, false, "")
Expect(err).NotTo(HaveOccurred())
By("Verify persistent volume was created on the right zone")
verifyVolumeCreationOnRightZone(persistentvolumes, pod.Spec.NodeName, zones)
By("Verify the volume is accessible and available in the pod")
verifyVSphereVolumesAccessible(client, pod, persistentvolumes)
By("Deleting pod")
framework.DeletePodWithWait(f, client, pod)
By("Waiting for volumes to be detached from the node")
waitForVSphereDiskToDetach(persistentvolumes[0].Spec.VsphereVolume.VolumePath, pod.Spec.NodeName)
}
func verifyPVCCreationFails(client clientset.Interface, namespace string, scParameters map[string]string, zones []string) error {
storageclass, err := client.StorageV1().StorageClasses().Create(getVSphereStorageClassSpec("zone-sc", scParameters, zones))
Expect(err).NotTo(HaveOccurred(), fmt.Sprintf("Failed to create storage class with err: %v", err))
defer client.StorageV1().StorageClasses().Delete(storageclass.Name, nil)
By("Creating PVC using the Storage Class")
pvclaim, err := framework.CreatePVC(client, namespace, getVSphereClaimSpecWithStorageClass(namespace, "2Gi", storageclass))
Expect(err).NotTo(HaveOccurred())
defer framework.DeletePersistentVolumeClaim(client, pvclaim.Name, namespace)
var pvclaims []*v1.PersistentVolumeClaim
pvclaims = append(pvclaims, pvclaim)
By("Waiting for claim to be in bound phase")
err = framework.WaitForPersistentVolumeClaimPhase(v1.ClaimBound, client, pvclaim.Namespace, pvclaim.Name, framework.Poll, 2*time.Minute)
Expect(err).To(HaveOccurred())
eventList, err := client.CoreV1().Events(pvclaim.Namespace).List(metav1.ListOptions{})
framework.Logf("Failure message : %+q", eventList.Items[0].Message)
return fmt.Errorf("Failure message: %+q", eventList.Items[0].Message)
}
func verifyPVZoneLabels(client clientset.Interface, namespace string, scParameters map[string]string, zones []string) {
storageclass, err := client.StorageV1().StorageClasses().Create(getVSphereStorageClassSpec("zone-sc", nil, zones))
Expect(err).NotTo(HaveOccurred(), fmt.Sprintf("Failed to create storage class with err: %v", err))
defer client.StorageV1().StorageClasses().Delete(storageclass.Name, nil)
By("Creating PVC using the storage class")
pvclaim, err := framework.CreatePVC(client, namespace, getVSphereClaimSpecWithStorageClass(namespace, "2Gi", storageclass))
Expect(err).NotTo(HaveOccurred())
defer framework.DeletePersistentVolumeClaim(client, pvclaim.Name, namespace)
var pvclaims []*v1.PersistentVolumeClaim
pvclaims = append(pvclaims, pvclaim)
By("Waiting for claim to be in bound phase")
persistentvolumes, err := framework.WaitForPVClaimBoundPhase(client, pvclaims, framework.ClaimProvisionTimeout)
Expect(err).NotTo(HaveOccurred())
By("Verify zone information is present in the volume labels")
for _, pv := range persistentvolumes {
// Multiple zones are separated with "__"
pvZoneLabels := strings.Split(pv.ObjectMeta.Labels["failure-domain.beta.kubernetes.io/zone"], "__")
for _, zone := range zones {
Expect(pvZoneLabels).Should(ContainElement(zone), "Incorrect or missing zone labels in pv.")
}
}
}