From 6b0748d3b5c5c41b6bd4849be0ed903906bd9ba4 Mon Sep 17 00:00:00 2001 From: Sandeep Sunny Date: Tue, 10 Sep 2019 21:59:41 -0700 Subject: [PATCH] Added e2e test to verify zone support across datacenters in the same vCenter --- test/e2e/storage/vsphere/vsphere_common.go | 2 + .../storage/vsphere/vsphere_zone_support.go | 76 ++++++++++++------- 2 files changed, 49 insertions(+), 29 deletions(-) diff --git a/test/e2e/storage/vsphere/vsphere_common.go b/test/e2e/storage/vsphere/vsphere_common.go index a8950928d74..81353ccfbf1 100644 --- a/test/e2e/storage/vsphere/vsphere_common.go +++ b/test/e2e/storage/vsphere/vsphere_common.go @@ -57,12 +57,14 @@ const ( const ( VCPZoneVsanDatastore1 = "VCP_ZONE_VSANDATASTORE1" VCPZoneVsanDatastore2 = "VCP_ZONE_VSANDATASTORE2" + VCPZoneLocalDatastore = "VCP_ZONE_LOCALDATASTORE" VCPZoneCompatPolicyName = "VCP_ZONE_COMPATPOLICY_NAME" VCPZoneNonCompatPolicyName = "VCP_ZONE_NONCOMPATPOLICY_NAME" VCPZoneA = "VCP_ZONE_A" VCPZoneB = "VCP_ZONE_B" VCPZoneC = "VCP_ZONE_C" VCPZoneD = "VCP_ZONE_D" + VCPInvalidZone = "VCP_INVALID_ZONE" ) func GetAndExpectStringEnvVar(varName string) string { diff --git a/test/e2e/storage/vsphere/vsphere_zone_support.go b/test/e2e/storage/vsphere/vsphere_zone_support.go index f5aa994b4cd..d6da43aa7b9 100644 --- a/test/e2e/storage/vsphere/vsphere_zone_support.go +++ b/test/e2e/storage/vsphere/vsphere_zone_support.go @@ -39,38 +39,38 @@ import ( Test to verify multi-zone support for dynamic volume provisioning in kubernetes. The test environment is illustrated below: - datacenter - --->cluster-vsan-1 (zone-a) ____________________ _________________ - --->host-1 : master | | | | - --->host-2 : node1 | vsanDatastore | | | - --->host-3 : node2 |____________________| | | - | | - | sharedVmfs-0 | - --->cluster-vsan-2 (zone-b) ____________________ | | - --->host-4 : node3 | | | | - --->host-5 : node4 | vsanDatastore (1) | | | - --->host-6 |____________________| |_________________| - - --->cluster-3 (zone-c) ________________ - --->host-7 : node5 | | - | localDatastore | - |________________| - ____________________ - --->host-8 (zone-c) : node6 | | - | localDatastore (1) | - |____________________| - + datacenter-1 + --->cluster-vsan-1 (zone-a) ____________________ _________________ + --->host-1 : master | | | | + --->host-2 : node1 ___________________ | | | | + --->host-3 (zone-c): node2 | || vsanDatastore | | | + | localDatastore || | | | + |___________________||____________________| | sharedVmfs-0 | + --->cluster-vsan-2 (zone-b) ____________________ | | + --->host-4 : node3 | | | | + --->host-5 : node4 | vsanDatastore (1) | | | + --->host-6 | | | | + |____________________| |_________________| + --->cluster-3 (zone-c) ___________________ + --->host-7 : node5 | | + | localDatastore (1)| + |___________________| + datacenter-2 + --->cluster-1 (zone-d) ___________________ + --->host-8 : node6 | | + | localDatastore | + |___________________| Testbed description : 1. cluster-vsan-1 is tagged with zone-a. So, vsanDatastore inherits zone-a since all the hosts under zone-a have vsanDatastore mounted on them. 2. cluster-vsan-2 is tagged with zone-b. So, vsanDatastore (1) inherits zone-b since all the hosts under zone-b have vsanDatastore (1) mounted on them. 3. sharedVmfs-0 inherits both zone-a and zone-b since all the hosts in both zone-a and zone-b have this datastore mounted on them. 4. cluster-3 is tagged with zone-c. cluster-3 only contains host-7. - 5. host-8 is not under any cluster and is tagged with zone-c. - 6. Since there are no shared datastores between host-7 under cluster-3 and host-8, no datastores in the environment inherit zone-c. - 7. The six worker nodes are distributed among the hosts as shown in the above illustration. - 8. Two storage policies are created on VC. One is a VSAN storage policy named as compatpolicy with hostFailuresToTolerate capability set to 1. - Second is a VSAN storage policy named as noncompatpolicy with hostFailuresToTolerate capability set to 4. + 5. host-3 under cluster-vsan-1 is tagged with zone-c. + 6. Since there are no shared datastores between host-7 under cluster-3 and host-3 under cluster-vsan-1, no datastores in the environment inherit zone-c. + 7. host-8 under datacenter-2 and cluster-1 is tagged with zone-d. So, localDatastore attached to host-8 inherits zone-d. + 8. The six worker nodes are distributed among the hosts as shown in the above illustration. + 9. Two storage policies are created on VC. One is a VSAN storage policy named as compatpolicy with hostFailuresToTolerate capability set to 1. Testsuite description : 1. Tests to verify that zone labels are set correctly on a dynamically created PV. @@ -79,6 +79,7 @@ import ( datastore and VSAN capabilities. 4. Tests to verify dynamic pv creation using availability zones fails in combination with other storage class parameters such as storage policy, datastore and VSAN capabilities specifications when any of the former mentioned parameters are incompatible with the rest. + 5. Tests to verify dynamic pv creation using availability zones work across different datacenters in the same VC. */ var _ = utils.SIGDescribe("Zone Support", func() { @@ -90,12 +91,14 @@ var _ = utils.SIGDescribe("Zone Support", func() { zones []string vsanDatastore1 string vsanDatastore2 string + localDatastore string compatPolicy string nonCompatPolicy string zoneA string zoneB string zoneC string zoneD string + invalidZone string ) ginkgo.BeforeEach(func() { framework.SkipUnlessProviderIs("vsphere") @@ -104,12 +107,14 @@ var _ = utils.SIGDescribe("Zone Support", func() { namespace = f.Namespace.Name vsanDatastore1 = GetAndExpectStringEnvVar(VCPZoneVsanDatastore1) vsanDatastore2 = GetAndExpectStringEnvVar(VCPZoneVsanDatastore2) + localDatastore = GetAndExpectStringEnvVar(VCPZoneLocalDatastore) compatPolicy = GetAndExpectStringEnvVar(VCPZoneCompatPolicyName) nonCompatPolicy = GetAndExpectStringEnvVar(VCPZoneNonCompatPolicyName) zoneA = GetAndExpectStringEnvVar(VCPZoneA) zoneB = GetAndExpectStringEnvVar(VCPZoneB) zoneC = GetAndExpectStringEnvVar(VCPZoneC) zoneD = GetAndExpectStringEnvVar(VCPZoneD) + invalidZone = GetAndExpectStringEnvVar(VCPInvalidZone) scParameters = make(map[string]string) zones = make([]string, 0) nodeList := framework.GetReadySchedulableNodesOrDie(f.ClientSet) @@ -132,11 +137,11 @@ var _ = utils.SIGDescribe("Zone Support", func() { }) ginkgo.It("Verify PVC creation with invalid zone specified in storage class fails", func() { - ginkgo.By(fmt.Sprintf("Creating storage class with unknown zone : %s", zoneD)) - zones = append(zones, zoneD) + ginkgo.By(fmt.Sprintf("Creating storage class with unknown zone : %s", invalidZone)) + zones = append(zones, invalidZone) err := verifyPVCCreationFails(client, namespace, nil, zones, "") framework.ExpectError(err) - errorMsg := "Failed to find a shared datastore matching zone [" + zoneD + "]" + errorMsg := "Failed to find a shared datastore matching zone [" + invalidZone + "]" if !strings.Contains(err.Error(), errorMsg) { framework.ExpectNoError(err, errorMsg) } @@ -311,6 +316,19 @@ var _ = utils.SIGDescribe("Zone Support", func() { verifyPVCAndPodCreationSucceeds(client, namespace, scParameters, zones, "") }) + ginkgo.It("Verify a pod is created and attached to a dynamically created PV, based on the allowed zones specified in storage class when the datastore under the zone is present in another datacenter", func() { + ginkgo.By(fmt.Sprintf("Creating storage class with zone :%s", zoneD)) + zones = append(zones, zoneD) + verifyPVCAndPodCreationSucceeds(client, namespace, scParameters, zones, "") + }) + + ginkgo.It("Verify a pod is created and attached to a dynamically created PV, based on the allowed zones and datastore specified in storage class when there are multiple datastores with the same name under different zones across datacenters", func() { + ginkgo.By(fmt.Sprintf("Creating storage class with zone :%s and datastore name :%s", zoneD, localDatastore)) + scParameters[Datastore] = localDatastore + zones = append(zones, zoneD) + verifyPVCAndPodCreationSucceeds(client, namespace, scParameters, zones, "") + }) + ginkgo.It("Verify a pod is created and attached to a dynamically created PV with storage policy specified in storage class in waitForFirstConsumer binding mode", func() { ginkgo.By(fmt.Sprintf("Creating storage class with waitForFirstConsumer mode and storage policy :%s", compatPolicy)) scParameters[SpbmStoragePolicy] = compatPolicy