Added e2e test to verify zone support across datacenters in the same vCenter

This commit is contained in:
Sandeep Sunny 2019-09-10 21:59:41 -07:00
parent 0f46a8a4c8
commit 6b0748d3b5
2 changed files with 49 additions and 29 deletions

View File

@ -57,12 +57,14 @@ const (
const ( const (
VCPZoneVsanDatastore1 = "VCP_ZONE_VSANDATASTORE1" VCPZoneVsanDatastore1 = "VCP_ZONE_VSANDATASTORE1"
VCPZoneVsanDatastore2 = "VCP_ZONE_VSANDATASTORE2" VCPZoneVsanDatastore2 = "VCP_ZONE_VSANDATASTORE2"
VCPZoneLocalDatastore = "VCP_ZONE_LOCALDATASTORE"
VCPZoneCompatPolicyName = "VCP_ZONE_COMPATPOLICY_NAME" VCPZoneCompatPolicyName = "VCP_ZONE_COMPATPOLICY_NAME"
VCPZoneNonCompatPolicyName = "VCP_ZONE_NONCOMPATPOLICY_NAME" VCPZoneNonCompatPolicyName = "VCP_ZONE_NONCOMPATPOLICY_NAME"
VCPZoneA = "VCP_ZONE_A" VCPZoneA = "VCP_ZONE_A"
VCPZoneB = "VCP_ZONE_B" VCPZoneB = "VCP_ZONE_B"
VCPZoneC = "VCP_ZONE_C" VCPZoneC = "VCP_ZONE_C"
VCPZoneD = "VCP_ZONE_D" VCPZoneD = "VCP_ZONE_D"
VCPInvalidZone = "VCP_INVALID_ZONE"
) )
func GetAndExpectStringEnvVar(varName string) string { func GetAndExpectStringEnvVar(varName string) string {

View File

@ -39,38 +39,38 @@ import (
Test to verify multi-zone support for dynamic volume provisioning in kubernetes. Test to verify multi-zone support for dynamic volume provisioning in kubernetes.
The test environment is illustrated below: The test environment is illustrated below:
datacenter datacenter-1
--->cluster-vsan-1 (zone-a) ____________________ _________________ --->cluster-vsan-1 (zone-a) ____________________ _________________
--->host-1 : master | | | | --->host-1 : master | | | |
--->host-2 : node1 | vsanDatastore | | | --->host-2 : node1 ___________________ | | | |
--->host-3 : node2 |____________________| | | --->host-3 (zone-c): node2 | || vsanDatastore | | |
| | | localDatastore || | | |
| sharedVmfs-0 | |___________________||____________________| | sharedVmfs-0 |
--->cluster-vsan-2 (zone-b) ____________________ | | --->cluster-vsan-2 (zone-b) ____________________ | |
--->host-4 : node3 | | | | --->host-4 : node3 | | | |
--->host-5 : node4 | vsanDatastore (1) | | | --->host-5 : node4 | vsanDatastore (1) | | |
--->host-6 |____________________| |_________________| --->host-6 | | | |
|____________________| |_________________|
--->cluster-3 (zone-c) ________________ --->cluster-3 (zone-c) ___________________
--->host-7 : node5 | | --->host-7 : node5 | |
| localDatastore | | localDatastore (1)|
|________________| |___________________|
____________________ datacenter-2
--->host-8 (zone-c) : node6 | | --->cluster-1 (zone-d) ___________________
| localDatastore (1) | --->host-8 : node6 | |
|____________________| | localDatastore |
|___________________|
Testbed description : Testbed description :
1. cluster-vsan-1 is tagged with zone-a. So, vsanDatastore inherits zone-a since all the hosts under zone-a have vsanDatastore mounted on them. 1. cluster-vsan-1 is tagged with zone-a. So, vsanDatastore inherits zone-a since all the hosts under zone-a have vsanDatastore mounted on them.
2. cluster-vsan-2 is tagged with zone-b. So, vsanDatastore (1) inherits zone-b since all the hosts under zone-b have vsanDatastore (1) mounted on them. 2. cluster-vsan-2 is tagged with zone-b. So, vsanDatastore (1) inherits zone-b since all the hosts under zone-b have vsanDatastore (1) mounted on them.
3. sharedVmfs-0 inherits both zone-a and zone-b since all the hosts in both zone-a and zone-b have this datastore mounted on them. 3. sharedVmfs-0 inherits both zone-a and zone-b since all the hosts in both zone-a and zone-b have this datastore mounted on them.
4. cluster-3 is tagged with zone-c. cluster-3 only contains host-7. 4. cluster-3 is tagged with zone-c. cluster-3 only contains host-7.
5. host-8 is not under any cluster and is tagged with zone-c. 5. host-3 under cluster-vsan-1 is tagged with zone-c.
6. Since there are no shared datastores between host-7 under cluster-3 and host-8, no datastores in the environment inherit zone-c. 6. Since there are no shared datastores between host-7 under cluster-3 and host-3 under cluster-vsan-1, no datastores in the environment inherit zone-c.
7. The six worker nodes are distributed among the hosts as shown in the above illustration. 7. host-8 under datacenter-2 and cluster-1 is tagged with zone-d. So, localDatastore attached to host-8 inherits zone-d.
8. Two storage policies are created on VC. One is a VSAN storage policy named as compatpolicy with hostFailuresToTolerate capability set to 1. 8. The six worker nodes are distributed among the hosts as shown in the above illustration.
Second is a VSAN storage policy named as noncompatpolicy with hostFailuresToTolerate capability set to 4. 9. Two storage policies are created on VC. One is a VSAN storage policy named as compatpolicy with hostFailuresToTolerate capability set to 1.
Testsuite description : Testsuite description :
1. Tests to verify that zone labels are set correctly on a dynamically created PV. 1. Tests to verify that zone labels are set correctly on a dynamically created PV.
@ -79,6 +79,7 @@ import (
datastore and VSAN capabilities. datastore and VSAN capabilities.
4. Tests to verify dynamic pv creation using availability zones fails in combination with other storage class parameters such as storage policy, 4. Tests to verify dynamic pv creation using availability zones fails in combination with other storage class parameters such as storage policy,
datastore and VSAN capabilities specifications when any of the former mentioned parameters are incompatible with the rest. datastore and VSAN capabilities specifications when any of the former mentioned parameters are incompatible with the rest.
5. Tests to verify dynamic pv creation using availability zones work across different datacenters in the same VC.
*/ */
var _ = utils.SIGDescribe("Zone Support", func() { var _ = utils.SIGDescribe("Zone Support", func() {
@ -90,12 +91,14 @@ var _ = utils.SIGDescribe("Zone Support", func() {
zones []string zones []string
vsanDatastore1 string vsanDatastore1 string
vsanDatastore2 string vsanDatastore2 string
localDatastore string
compatPolicy string compatPolicy string
nonCompatPolicy string nonCompatPolicy string
zoneA string zoneA string
zoneB string zoneB string
zoneC string zoneC string
zoneD string zoneD string
invalidZone string
) )
ginkgo.BeforeEach(func() { ginkgo.BeforeEach(func() {
framework.SkipUnlessProviderIs("vsphere") framework.SkipUnlessProviderIs("vsphere")
@ -104,12 +107,14 @@ var _ = utils.SIGDescribe("Zone Support", func() {
namespace = f.Namespace.Name namespace = f.Namespace.Name
vsanDatastore1 = GetAndExpectStringEnvVar(VCPZoneVsanDatastore1) vsanDatastore1 = GetAndExpectStringEnvVar(VCPZoneVsanDatastore1)
vsanDatastore2 = GetAndExpectStringEnvVar(VCPZoneVsanDatastore2) vsanDatastore2 = GetAndExpectStringEnvVar(VCPZoneVsanDatastore2)
localDatastore = GetAndExpectStringEnvVar(VCPZoneLocalDatastore)
compatPolicy = GetAndExpectStringEnvVar(VCPZoneCompatPolicyName) compatPolicy = GetAndExpectStringEnvVar(VCPZoneCompatPolicyName)
nonCompatPolicy = GetAndExpectStringEnvVar(VCPZoneNonCompatPolicyName) nonCompatPolicy = GetAndExpectStringEnvVar(VCPZoneNonCompatPolicyName)
zoneA = GetAndExpectStringEnvVar(VCPZoneA) zoneA = GetAndExpectStringEnvVar(VCPZoneA)
zoneB = GetAndExpectStringEnvVar(VCPZoneB) zoneB = GetAndExpectStringEnvVar(VCPZoneB)
zoneC = GetAndExpectStringEnvVar(VCPZoneC) zoneC = GetAndExpectStringEnvVar(VCPZoneC)
zoneD = GetAndExpectStringEnvVar(VCPZoneD) zoneD = GetAndExpectStringEnvVar(VCPZoneD)
invalidZone = GetAndExpectStringEnvVar(VCPInvalidZone)
scParameters = make(map[string]string) scParameters = make(map[string]string)
zones = make([]string, 0) zones = make([]string, 0)
nodeList := framework.GetReadySchedulableNodesOrDie(f.ClientSet) nodeList := framework.GetReadySchedulableNodesOrDie(f.ClientSet)
@ -132,11 +137,11 @@ var _ = utils.SIGDescribe("Zone Support", func() {
}) })
ginkgo.It("Verify PVC creation with invalid zone specified in storage class fails", func() { ginkgo.It("Verify PVC creation with invalid zone specified in storage class fails", func() {
ginkgo.By(fmt.Sprintf("Creating storage class with unknown zone : %s", zoneD)) ginkgo.By(fmt.Sprintf("Creating storage class with unknown zone : %s", invalidZone))
zones = append(zones, zoneD) zones = append(zones, invalidZone)
err := verifyPVCCreationFails(client, namespace, nil, zones, "") err := verifyPVCCreationFails(client, namespace, nil, zones, "")
framework.ExpectError(err) framework.ExpectError(err)
errorMsg := "Failed to find a shared datastore matching zone [" + zoneD + "]" errorMsg := "Failed to find a shared datastore matching zone [" + invalidZone + "]"
if !strings.Contains(err.Error(), errorMsg) { if !strings.Contains(err.Error(), errorMsg) {
framework.ExpectNoError(err, errorMsg) framework.ExpectNoError(err, errorMsg)
} }
@ -311,6 +316,19 @@ var _ = utils.SIGDescribe("Zone Support", func() {
verifyPVCAndPodCreationSucceeds(client, namespace, scParameters, zones, "") verifyPVCAndPodCreationSucceeds(client, namespace, scParameters, zones, "")
}) })
ginkgo.It("Verify a pod is created and attached to a dynamically created PV, based on the allowed zones specified in storage class when the datastore under the zone is present in another datacenter", func() {
ginkgo.By(fmt.Sprintf("Creating storage class with zone :%s", zoneD))
zones = append(zones, zoneD)
verifyPVCAndPodCreationSucceeds(client, namespace, scParameters, zones, "")
})
ginkgo.It("Verify a pod is created and attached to a dynamically created PV, based on the allowed zones and datastore specified in storage class when there are multiple datastores with the same name under different zones across datacenters", func() {
ginkgo.By(fmt.Sprintf("Creating storage class with zone :%s and datastore name :%s", zoneD, localDatastore))
scParameters[Datastore] = localDatastore
zones = append(zones, zoneD)
verifyPVCAndPodCreationSucceeds(client, namespace, scParameters, zones, "")
})
ginkgo.It("Verify a pod is created and attached to a dynamically created PV with storage policy specified in storage class in waitForFirstConsumer binding mode", func() { ginkgo.It("Verify a pod is created and attached to a dynamically created PV with storage policy specified in storage class in waitForFirstConsumer binding mode", func() {
ginkgo.By(fmt.Sprintf("Creating storage class with waitForFirstConsumer mode and storage policy :%s", compatPolicy)) ginkgo.By(fmt.Sprintf("Creating storage class with waitForFirstConsumer mode and storage policy :%s", compatPolicy))
scParameters[SpbmStoragePolicy] = compatPolicy scParameters[SpbmStoragePolicy] = compatPolicy