Merge pull request #75583 from sandeeppsunny/zone_support

Added e2e test to verify zone support across datacenters
This commit is contained in:
Kubernetes Prow Robot 2019-12-10 16:04:06 -08:00 committed by GitHub
commit c0d5d11263
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
2 changed files with 49 additions and 29 deletions

View File

@ -57,12 +57,14 @@ const (
const (
VCPZoneVsanDatastore1 = "VCP_ZONE_VSANDATASTORE1"
VCPZoneVsanDatastore2 = "VCP_ZONE_VSANDATASTORE2"
VCPZoneLocalDatastore = "VCP_ZONE_LOCALDATASTORE"
VCPZoneCompatPolicyName = "VCP_ZONE_COMPATPOLICY_NAME"
VCPZoneNonCompatPolicyName = "VCP_ZONE_NONCOMPATPOLICY_NAME"
VCPZoneA = "VCP_ZONE_A"
VCPZoneB = "VCP_ZONE_B"
VCPZoneC = "VCP_ZONE_C"
VCPZoneD = "VCP_ZONE_D"
VCPInvalidZone = "VCP_INVALID_ZONE"
)
func GetAndExpectStringEnvVar(varName string) string {

View File

@ -40,38 +40,38 @@ import (
Test to verify multi-zone support for dynamic volume provisioning in kubernetes.
The test environment is illustrated below:
datacenter
--->cluster-vsan-1 (zone-a) ____________________ _________________
--->host-1 : master | | | |
--->host-2 : node1 | vsanDatastore | | |
--->host-3 : node2 |____________________| | |
| |
| sharedVmfs-0 |
--->cluster-vsan-2 (zone-b) ____________________ | |
--->host-4 : node3 | | | |
--->host-5 : node4 | vsanDatastore (1) | | |
--->host-6 |____________________| |_________________|
--->cluster-3 (zone-c) ________________
--->host-7 : node5 | |
| localDatastore |
|________________|
____________________
--->host-8 (zone-c) : node6 | |
| localDatastore (1) |
|____________________|
datacenter-1
--->cluster-vsan-1 (zone-a) ____________________ _________________
--->host-1 : master | | | |
--->host-2 : node1 ___________________ | | | |
--->host-3 (zone-c): node2 | || vsanDatastore | | |
| localDatastore || | | |
|___________________||____________________| | sharedVmfs-0 |
--->cluster-vsan-2 (zone-b) ____________________ | |
--->host-4 : node3 | | | |
--->host-5 : node4 | vsanDatastore (1) | | |
--->host-6 | | | |
|____________________| |_________________|
--->cluster-3 (zone-c) ___________________
--->host-7 : node5 | |
| localDatastore (1)|
|___________________|
datacenter-2
--->cluster-1 (zone-d) ___________________
--->host-8 : node6 | |
| localDatastore |
|___________________|
Testbed description :
1. cluster-vsan-1 is tagged with zone-a. So, vsanDatastore inherits zone-a since all the hosts under zone-a have vsanDatastore mounted on them.
2. cluster-vsan-2 is tagged with zone-b. So, vsanDatastore (1) inherits zone-b since all the hosts under zone-b have vsanDatastore (1) mounted on them.
3. sharedVmfs-0 inherits both zone-a and zone-b since all the hosts in both zone-a and zone-b have this datastore mounted on them.
4. cluster-3 is tagged with zone-c. cluster-3 only contains host-7.
5. host-8 is not under any cluster and is tagged with zone-c.
6. Since there are no shared datastores between host-7 under cluster-3 and host-8, no datastores in the environment inherit zone-c.
7. The six worker nodes are distributed among the hosts as shown in the above illustration.
8. Two storage policies are created on VC. One is a VSAN storage policy named as compatpolicy with hostFailuresToTolerate capability set to 1.
Second is a VSAN storage policy named as noncompatpolicy with hostFailuresToTolerate capability set to 4.
5. host-3 under cluster-vsan-1 is tagged with zone-c.
6. Since there are no shared datastores between host-7 under cluster-3 and host-3 under cluster-vsan-1, no datastores in the environment inherit zone-c.
7. host-8 under datacenter-2 and cluster-1 is tagged with zone-d. So, localDatastore attached to host-8 inherits zone-d.
8. The six worker nodes are distributed among the hosts as shown in the above illustration.
9. Two storage policies are created on VC. One is a VSAN storage policy named as compatpolicy with hostFailuresToTolerate capability set to 1.
Testsuite description :
1. Tests to verify that zone labels are set correctly on a dynamically created PV.
@ -80,6 +80,7 @@ import (
datastore and VSAN capabilities.
4. Tests to verify dynamic pv creation using availability zones fails in combination with other storage class parameters such as storage policy,
datastore and VSAN capabilities specifications when any of the former mentioned parameters are incompatible with the rest.
5. Tests to verify dynamic pv creation using availability zones work across different datacenters in the same VC.
*/
var _ = utils.SIGDescribe("Zone Support", func() {
@ -91,12 +92,14 @@ var _ = utils.SIGDescribe("Zone Support", func() {
zones []string
vsanDatastore1 string
vsanDatastore2 string
localDatastore string
compatPolicy string
nonCompatPolicy string
zoneA string
zoneB string
zoneC string
zoneD string
invalidZone string
)
ginkgo.BeforeEach(func() {
framework.SkipUnlessProviderIs("vsphere")
@ -105,12 +108,14 @@ var _ = utils.SIGDescribe("Zone Support", func() {
namespace = f.Namespace.Name
vsanDatastore1 = GetAndExpectStringEnvVar(VCPZoneVsanDatastore1)
vsanDatastore2 = GetAndExpectStringEnvVar(VCPZoneVsanDatastore2)
localDatastore = GetAndExpectStringEnvVar(VCPZoneLocalDatastore)
compatPolicy = GetAndExpectStringEnvVar(VCPZoneCompatPolicyName)
nonCompatPolicy = GetAndExpectStringEnvVar(VCPZoneNonCompatPolicyName)
zoneA = GetAndExpectStringEnvVar(VCPZoneA)
zoneB = GetAndExpectStringEnvVar(VCPZoneB)
zoneC = GetAndExpectStringEnvVar(VCPZoneC)
zoneD = GetAndExpectStringEnvVar(VCPZoneD)
invalidZone = GetAndExpectStringEnvVar(VCPInvalidZone)
scParameters = make(map[string]string)
zones = make([]string, 0)
_, err := e2enode.GetRandomReadySchedulableNode(f.ClientSet)
@ -131,11 +136,11 @@ var _ = utils.SIGDescribe("Zone Support", func() {
})
ginkgo.It("Verify PVC creation with invalid zone specified in storage class fails", func() {
ginkgo.By(fmt.Sprintf("Creating storage class with unknown zone : %s", zoneD))
zones = append(zones, zoneD)
ginkgo.By(fmt.Sprintf("Creating storage class with unknown zone : %s", invalidZone))
zones = append(zones, invalidZone)
err := verifyPVCCreationFails(client, namespace, nil, zones, "")
framework.ExpectError(err)
errorMsg := "Failed to find a shared datastore matching zone [" + zoneD + "]"
errorMsg := "Failed to find a shared datastore matching zone [" + invalidZone + "]"
if !strings.Contains(err.Error(), errorMsg) {
framework.ExpectNoError(err, errorMsg)
}
@ -310,6 +315,19 @@ var _ = utils.SIGDescribe("Zone Support", func() {
verifyPVCAndPodCreationSucceeds(client, namespace, scParameters, zones, "")
})
ginkgo.It("Verify a pod is created and attached to a dynamically created PV, based on the allowed zones specified in storage class when the datastore under the zone is present in another datacenter", func() {
ginkgo.By(fmt.Sprintf("Creating storage class with zone :%s", zoneD))
zones = append(zones, zoneD)
verifyPVCAndPodCreationSucceeds(client, namespace, scParameters, zones, "")
})
ginkgo.It("Verify a pod is created and attached to a dynamically created PV, based on the allowed zones and datastore specified in storage class when there are multiple datastores with the same name under different zones across datacenters", func() {
ginkgo.By(fmt.Sprintf("Creating storage class with zone :%s and datastore name :%s", zoneD, localDatastore))
scParameters[Datastore] = localDatastore
zones = append(zones, zoneD)
verifyPVCAndPodCreationSucceeds(client, namespace, scParameters, zones, "")
})
ginkgo.It("Verify a pod is created and attached to a dynamically created PV with storage policy specified in storage class in waitForFirstConsumer binding mode", func() {
ginkgo.By(fmt.Sprintf("Creating storage class with waitForFirstConsumer mode and storage policy :%s", compatPolicy))
scParameters[SpbmStoragePolicy] = compatPolicy