[storage] [test] Remove extra zone test

We're running ubernetes tests
`should only be allowed to provision PDs in zones
where nodes exist`
on gcp&gke. While the test is useful in exercising
the scenario of identifying extra zone and
creating a node in it, not every Kube
distribution uses the same approach to create a node,
further if even there is an extra zone, we cannot
guarantee the zone to have enough quota. There can also
be other GCP specific edge cases all of which cannot be
covered within this test. So, removing the test
as agreed upon with the storage team
This commit is contained in:
ravisantoshgudimetla 2021-06-24 13:30:21 -04:00
parent 6dd9deea3d
commit c65b80a637

View File

@ -19,20 +19,13 @@ package storage
import (
"context"
"fmt"
"strconv"
"github.com/onsi/ginkgo"
compute "google.golang.org/api/compute/v1"
v1 "k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/api/resource"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/util/sets"
"k8s.io/apimachinery/pkg/util/uuid"
clientset "k8s.io/client-go/kubernetes"
"k8s.io/kubernetes/test/e2e/framework"
e2enode "k8s.io/kubernetes/test/e2e/framework/node"
e2epod "k8s.io/kubernetes/test/e2e/framework/pod"
"k8s.io/kubernetes/test/e2e/framework/providers/gce"
e2epv "k8s.io/kubernetes/test/e2e/framework/pv"
e2eskipper "k8s.io/kubernetes/test/e2e/framework/skipper"
"k8s.io/kubernetes/test/e2e/storage/utils"
@ -57,130 +50,8 @@ var _ = utils.SIGDescribe("Multi-AZ Cluster Volumes", func() {
ginkgo.It("should schedule pods in the same zones as statically provisioned PVs", func() {
PodsUseStaticPVsOrFail(f, (2*zoneCount)+1, image)
})
ginkgo.It("should only be allowed to provision PDs in zones where nodes exist", func() {
OnlyAllowNodeZones(f, zoneCount, image)
})
})
// OnlyAllowNodeZones tests that PDs are only provisioned in zones with nodes.
func OnlyAllowNodeZones(f *framework.Framework, zoneCount int, image string) {
gceCloud, err := gce.GetGCECloud()
framework.ExpectNoError(err)
// Get all the zones that the nodes are in
expectedZones, err := gceCloud.GetAllZonesFromCloudProvider()
framework.ExpectNoError(err)
framework.Logf("Expected zones: %v", expectedZones)
// Get all the zones in this current region
region := gceCloud.Region()
allZonesInRegion, err := gceCloud.ListZonesInRegion(region)
framework.ExpectNoError(err)
var extraZone string
for _, zone := range allZonesInRegion {
if !expectedZones.Has(zone.Name) {
extraZone = zone.Name
break
}
}
if extraZone == "" {
e2eskipper.Skipf("All zones in region %s have compute instances, no extra zones available", region)
}
ginkgo.By(fmt.Sprintf("starting a compute instance in unused zone: %v\n", extraZone))
project := framework.TestContext.CloudConfig.ProjectID
zone := extraZone
myuuid := string(uuid.NewUUID())
name := "compute-" + myuuid
imageURL := "https://www.googleapis.com/compute/v1/projects/debian-cloud/global/images/debian-7-wheezy-v20140606"
rb := &compute.Instance{
MachineType: "zones/" + zone + "/machineTypes/f1-micro",
Disks: []*compute.AttachedDisk{
{
AutoDelete: true,
Boot: true,
Type: "PERSISTENT",
InitializeParams: &compute.AttachedDiskInitializeParams{
DiskName: "my-root-pd-" + myuuid,
SourceImage: imageURL,
},
},
},
NetworkInterfaces: []*compute.NetworkInterface{
{
AccessConfigs: []*compute.AccessConfig{
{
Type: "ONE_TO_ONE_NAT",
Name: "External NAT",
},
},
Network: "/global/networks/default",
},
},
Name: name,
}
err = gceCloud.InsertInstance(project, zone, rb)
framework.ExpectNoError(err)
defer func() {
// Teardown of the compute instance
framework.Logf("Deleting compute resource: %v", name)
err := gceCloud.DeleteInstance(project, zone, name)
framework.ExpectNoError(err)
}()
ginkgo.By("Creating zoneCount+1 PVCs and making sure PDs are only provisioned in zones with nodes")
// Create some (zoneCount+1) PVCs with names of form "pvc-x" where x is 1...zoneCount+1
// This will exploit ChooseZoneForVolume in pkg/volume/util.go to provision them in all the zones it "sees"
var pvcList []*v1.PersistentVolumeClaim
c := f.ClientSet
ns := f.Namespace.Name
for index := 1; index <= zoneCount+1; index++ {
pvc := newNamedDefaultClaim(ns, index)
pvc, err = e2epv.CreatePVC(c, ns, pvc)
framework.ExpectNoError(err)
pvcList = append(pvcList, pvc)
// Defer the cleanup
defer func() {
framework.Logf("deleting claim %q/%q", pvc.Namespace, pvc.Name)
err = c.CoreV1().PersistentVolumeClaims(pvc.Namespace).Delete(context.TODO(), pvc.Name, metav1.DeleteOptions{})
if err != nil {
framework.Failf("Error deleting claim %q. Error: %v", pvc.Name, err)
}
}()
}
// Wait for all claims bound
for _, claim := range pvcList {
err = e2epv.WaitForPersistentVolumeClaimPhase(v1.ClaimBound, c, claim.Namespace, claim.Name, framework.Poll, framework.ClaimProvisionTimeout)
framework.ExpectNoError(err)
}
pvZones := sets.NewString()
ginkgo.By("Checking that PDs have been provisioned in only the expected zones")
for _, claim := range pvcList {
// Get a new copy of the claim to have all fields populated
claim, err = c.CoreV1().PersistentVolumeClaims(claim.Namespace).Get(context.TODO(), claim.Name, metav1.GetOptions{})
framework.ExpectNoError(err)
// Get the related PV
pv, err := c.CoreV1().PersistentVolumes().Get(context.TODO(), claim.Spec.VolumeName, metav1.GetOptions{})
framework.ExpectNoError(err)
pvZone, ok := pv.ObjectMeta.Labels[v1.LabelFailureDomainBetaZone]
framework.ExpectEqual(ok, true, "PV has no LabelZone to be found")
pvZones.Insert(pvZone)
}
framework.ExpectEqual(pvZones.Equal(expectedZones), true, fmt.Sprintf("PDs provisioned in unwanted zones. We want zones: %v, got: %v", expectedZones, pvZones))
}
// Return the number of zones in which we have nodes in this cluster.
func getZoneCount(c clientset.Interface) (int, error) {
zoneNames, err := e2enode.GetClusterZones(c)
@ -261,24 +132,3 @@ func PodsUseStaticPVsOrFail(f *framework.Framework, podCount int, image string)
framework.ExpectNoError(err)
}
}
func newNamedDefaultClaim(ns string, index int) *v1.PersistentVolumeClaim {
claim := v1.PersistentVolumeClaim{
ObjectMeta: metav1.ObjectMeta{
Name: "pvc-" + strconv.Itoa(index),
Namespace: ns,
},
Spec: v1.PersistentVolumeClaimSpec{
AccessModes: []v1.PersistentVolumeAccessMode{
v1.ReadWriteOnce,
},
Resources: v1.ResourceRequirements{
Requests: v1.ResourceList{
v1.ResourceName(v1.ResourceStorage): resource.MustParse("1Gi"),
},
},
},
}
return &claim
}