From 95c7b4234093aa38dbd72f5520f44d09fa6fc36d Mon Sep 17 00:00:00 2001 From: Michelle Au Date: Thu, 22 Aug 2019 18:19:58 -0700 Subject: [PATCH 1/2] add topology e2es to storage testsuite Change-Id: I1ec4247cef5d477ae2a76136113a4ed5ce30bf44 --- test/e2e/storage/BUILD | 1 - test/e2e/storage/csi_volumes.go | 142 +------- test/e2e/storage/drivers/BUILD | 1 + test/e2e/storage/drivers/csi.go | 44 ++- test/e2e/storage/drivers/in_tree.go | 2 + test/e2e/storage/in_tree_volumes.go | 1 + test/e2e/storage/regional_pd.go | 56 +++ test/e2e/storage/testpatterns/testpattern.go | 16 + test/e2e/storage/testsuites/BUILD | 2 + test/e2e/storage/testsuites/provisioning.go | 9 - test/e2e/storage/testsuites/testdriver.go | 7 + test/e2e/storage/testsuites/topology.go | 351 +++++++++++++++++++ test/e2e/storage/volume_provisioning.go | 187 ---------- 13 files changed, 478 insertions(+), 341 deletions(-) create mode 100644 test/e2e/storage/testsuites/topology.go diff --git a/test/e2e/storage/BUILD b/test/e2e/storage/BUILD index 8b91d94fd2c..3ae280e2ae8 100644 --- a/test/e2e/storage/BUILD +++ b/test/e2e/storage/BUILD @@ -75,7 +75,6 @@ go_library( "//test/e2e/framework/testfiles:go_default_library", "//test/e2e/framework/volume:go_default_library", "//test/e2e/storage/drivers:go_default_library", - "//test/e2e/storage/testpatterns:go_default_library", "//test/e2e/storage/testsuites:go_default_library", "//test/e2e/storage/utils:go_default_library", "//test/utils/image:go_default_library", diff --git a/test/e2e/storage/csi_volumes.go b/test/e2e/storage/csi_volumes.go index c7898a0613e..358e51286f6 100644 --- a/test/e2e/storage/csi_volumes.go +++ b/test/e2e/storage/csi_volumes.go @@ -17,20 +17,11 @@ limitations under the License. package storage import ( - v1 "k8s.io/api/core/v1" - storagev1 "k8s.io/api/storage/v1" - clientset "k8s.io/client-go/kubernetes" - "k8s.io/kubernetes/test/e2e/framework" - e2epod "k8s.io/kubernetes/test/e2e/framework/pod" - e2epv "k8s.io/kubernetes/test/e2e/framework/pv" "k8s.io/kubernetes/test/e2e/storage/drivers" - "k8s.io/kubernetes/test/e2e/storage/testpatterns" "k8s.io/kubernetes/test/e2e/storage/testsuites" "k8s.io/kubernetes/test/e2e/storage/utils" "github.com/onsi/ginkgo" - "github.com/onsi/gomega" - "k8s.io/apimachinery/pkg/util/rand" ) // List of testDrivers to be executed in below loop @@ -54,6 +45,7 @@ var csiTestSuites = []func() testsuites.TestSuite{ testsuites.InitDisruptiveTestSuite, testsuites.InitVolumeExpandTestSuite, testsuites.InitVolumeLimitsTestSuite, + testsuites.InitTopologyTestSuite, } // This executes testSuites for csi volumes. @@ -65,136 +57,4 @@ var _ = utils.SIGDescribe("CSI Volumes", func() { testsuites.DefineTestSuite(curDriver, csiTestSuites) }) } - - // TODO: PD CSI driver needs to be serial because it uses a fixed name. Address as part of #71289 - ginkgo.Context("CSI Topology test using GCE PD driver [Serial]", func() { - f := framework.NewDefaultFramework("csitopology") - driver := drivers.InitGcePDCSIDriver().(testsuites.DynamicPVTestDriver) // TODO (#71289) eliminate by moving this test to common test suite. - var ( - config *testsuites.PerTestConfig - testCleanup func() - ) - ginkgo.BeforeEach(func() { - driver.SkipUnsupportedTest(testpatterns.TestPattern{}) - config, testCleanup = driver.PrepareTest(f) - }) - - ginkgo.AfterEach(func() { - if testCleanup != nil { - testCleanup() - } - }) - - ginkgo.It("should provision zonal PD with immediate volume binding and AllowedTopologies set and mount the volume to a pod", func() { - suffix := "topology-positive" - testTopologyPositive(config.Framework.ClientSet, suffix, config.Framework.Namespace.GetName(), false /* delayBinding */, true /* allowedTopologies */) - }) - - ginkgo.It("should provision zonal PD with delayed volume binding and mount the volume to a pod", func() { - suffix := "delayed" - testTopologyPositive(config.Framework.ClientSet, suffix, config.Framework.Namespace.GetName(), true /* delayBinding */, false /* allowedTopologies */) - }) - - ginkgo.It("should provision zonal PD with delayed volume binding and AllowedTopologies set and mount the volume to a pod", func() { - suffix := "delayed-topology-positive" - testTopologyPositive(config.Framework.ClientSet, suffix, config.Framework.Namespace.GetName(), true /* delayBinding */, true /* allowedTopologies */) - }) - - ginkgo.It("should fail to schedule a pod with a zone missing from AllowedTopologies; PD is provisioned with immediate volume binding", func() { - framework.SkipUnlessMultizone(config.Framework.ClientSet) - suffix := "topology-negative" - testTopologyNegative(config.Framework.ClientSet, suffix, config.Framework.Namespace.GetName(), false /* delayBinding */) - }) - - ginkgo.It("should fail to schedule a pod with a zone missing from AllowedTopologies; PD is provisioned with delayed volume binding", func() { - framework.SkipUnlessMultizone(config.Framework.ClientSet) - suffix := "delayed-topology-negative" - testTopologyNegative(config.Framework.ClientSet, suffix, config.Framework.Namespace.GetName(), true /* delayBinding */) - }) - }) }) - -func testTopologyPositive(cs clientset.Interface, suffix, namespace string, delayBinding, allowedTopologies bool) { - test := createGCEPDStorageClassTest() - test.DelayBinding = delayBinding - - class := newStorageClass(test, namespace, suffix) - if allowedTopologies { - topoZone := getRandomClusterZone(cs) - addSingleCSIZoneAllowedTopologyToStorageClass(cs, class, topoZone) - } - test.Client = cs - test.Claim = e2epv.MakePersistentVolumeClaim(e2epv.PersistentVolumeClaimConfig{ - ClaimSize: test.ClaimSize, - StorageClassName: &(class.Name), - VolumeMode: &test.VolumeMode, - }, namespace) - test.Class = class - - if delayBinding { - _, node := test.TestBindingWaitForFirstConsumer(nil /* node selector */, false /* expect unschedulable */) - gomega.Expect(node).ToNot(gomega.BeNil(), "Unexpected nil node found") - } else { - test.TestDynamicProvisioning() - } -} - -func testTopologyNegative(cs clientset.Interface, suffix, namespace string, delayBinding bool) { - framework.SkipUnlessMultizone(cs) - - // Use different zones for pod and PV - zones, err := framework.GetClusterZones(cs) - framework.ExpectNoError(err) - gomega.Expect(zones.Len()).To(gomega.BeNumerically(">=", 2)) - zonesList := zones.UnsortedList() - podZoneIndex := rand.Intn(zones.Len()) - podZone := zonesList[podZoneIndex] - pvZone := zonesList[(podZoneIndex+1)%zones.Len()] - - test := createGCEPDStorageClassTest() - test.DelayBinding = delayBinding - nodeSelector := map[string]string{v1.LabelZoneFailureDomain: podZone} - - test.Client = cs - test.Class = newStorageClass(test, namespace, suffix) - addSingleCSIZoneAllowedTopologyToStorageClass(cs, test.Class, pvZone) - test.Claim = e2epv.MakePersistentVolumeClaim(e2epv.PersistentVolumeClaimConfig{ - ClaimSize: test.ClaimSize, - StorageClassName: &(test.Class.Name), - VolumeMode: &test.VolumeMode, - }, namespace) - if delayBinding { - test.TestBindingWaitForFirstConsumer(nodeSelector, true /* expect unschedulable */) - } else { - test.PvCheck = func(claim *v1.PersistentVolumeClaim) { - // Ensure that a pod cannot be scheduled in an unsuitable zone. - pod := testsuites.StartInPodWithVolume(cs, namespace, claim.Name, "pvc-tester-unschedulable", "sleep 100000", - e2epod.NodeSelection{Selector: nodeSelector}) - defer testsuites.StopPod(cs, pod) - framework.ExpectNoError(e2epod.WaitForPodNameUnschedulableInNamespace(cs, pod.Name, pod.Namespace), "pod should be unschedulable") - } - test.TestDynamicProvisioning() - } -} - -func addSingleCSIZoneAllowedTopologyToStorageClass(c clientset.Interface, sc *storagev1.StorageClass, zone string) { - term := v1.TopologySelectorTerm{ - MatchLabelExpressions: []v1.TopologySelectorLabelRequirement{ - { - Key: drivers.GCEPDCSIZoneTopologyKey, - Values: []string{zone}, - }, - }, - } - sc.AllowedTopologies = append(sc.AllowedTopologies, term) -} - -func createGCEPDStorageClassTest() testsuites.StorageClassTest { - return testsuites.StorageClassTest{ - Name: drivers.GCEPDCSIProvisionerName, - Provisioner: drivers.GCEPDCSIProvisionerName, - Parameters: map[string]string{"type": "pd-standard"}, - ClaimSize: "5Gi", - ExpectedSize: "5Gi", - } -} diff --git a/test/e2e/storage/drivers/BUILD b/test/e2e/storage/drivers/BUILD index 9522dd56990..59704021577 100644 --- a/test/e2e/storage/drivers/BUILD +++ b/test/e2e/storage/drivers/BUILD @@ -20,6 +20,7 @@ go_library( "//staging/src/k8s.io/apimachinery/pkg/runtime/schema:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/util/sets:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/util/uuid:go_default_library", + "//staging/src/k8s.io/apimachinery/pkg/util/wait:go_default_library", "//staging/src/k8s.io/apiserver/pkg/authentication/serviceaccount:go_default_library", "//staging/src/k8s.io/client-go/kubernetes:go_default_library", "//test/e2e/framework:go_default_library", diff --git a/test/e2e/storage/drivers/csi.go b/test/e2e/storage/drivers/csi.go index fda246e1b97..d79e0d5804a 100644 --- a/test/e2e/storage/drivers/csi.go +++ b/test/e2e/storage/drivers/csi.go @@ -39,13 +39,18 @@ import ( "fmt" "math/rand" "strconv" + "time" "github.com/onsi/ginkgo" v1 "k8s.io/api/core/v1" storagev1 "k8s.io/api/storage/v1" storagev1beta1 "k8s.io/api/storage/v1beta1" + "k8s.io/apimachinery/pkg/api/errors" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" "k8s.io/apimachinery/pkg/util/sets" + "k8s.io/apimachinery/pkg/util/wait" + clientset "k8s.io/client-go/kubernetes" "k8s.io/kubernetes/test/e2e/framework" "k8s.io/kubernetes/test/e2e/storage/testpatterns" "k8s.io/kubernetes/test/e2e/storage/testsuites" @@ -53,8 +58,8 @@ import ( ) const ( - // GCEPDCSIProvisionerName is the name of GCE Persistent Disk CSI provisioner - GCEPDCSIProvisionerName = "pd.csi.storage.gke.io" + // GCEPDCSIDriverName is the name of GCE Persistent Disk CSI driver + GCEPDCSIDriverName = "pd.csi.storage.gke.io" // GCEPDCSIZoneTopologyKey is the key of GCE Persistent Disk CSI zone topology GCEPDCSIZoneTopologyKey = "topology.gke.io/zone" ) @@ -372,7 +377,7 @@ var _ testsuites.DynamicPVTestDriver = &gcePDCSIDriver{} func InitGcePDCSIDriver() testsuites.TestDriver { return &gcePDCSIDriver{ driverInfo: testsuites.DriverInfo{ - Name: GCEPDCSIProvisionerName, + Name: GCEPDCSIDriverName, FeatureTag: "[Serial]", MaxFileSize: testpatterns.FileSizeMedium, SupportedFsType: sets.NewString( @@ -391,8 +396,10 @@ func InitGcePDCSIDriver() testsuites.TestDriver { // GCE supports volume limits, but the test creates large // number of volumes and times out test suites. testsuites.CapVolumeLimits: false, + testsuites.CapTopology: true, }, RequiredAccessModes: []v1.PersistentVolumeAccessMode{v1.ReadWriteOnce}, + TopologyKeys: []string{GCEPDCSIZoneTopologyKey}, }, } } @@ -459,6 +466,10 @@ func (g *gcePDCSIDriver) PrepareTest(f *framework.Framework) (*testsuites.PerTes framework.Failf("deploying csi gce-pd driver: %v", err) } + if err = waitForCSIDriverRegistrationOnAllNodes(GCEPDCSIDriverName, f.ClientSet); err != nil { + framework.Failf("waiting for csi driver node registration on: %v", err) + } + return &testsuites.PerTestConfig{ Driver: g, Prefix: "gcepd", @@ -469,3 +480,30 @@ func (g *gcePDCSIDriver) PrepareTest(f *framework.Framework) (*testsuites.PerTes cancelLogging() } } + +func waitForCSIDriverRegistrationOnAllNodes(driverName string, cs clientset.Interface) error { + nodes := framework.GetReadySchedulableNodesOrDie(cs) + for _, node := range nodes.Items { + if err := waitForCSIDriverRegistrationOnNode(node.Name, driverName, cs); err != nil { + return err + } + } + return nil +} + +func waitForCSIDriverRegistrationOnNode(nodeName string, driverName string, cs clientset.Interface) error { + const csiNodeRegisterTimeout = 1 * time.Minute + + return wait.PollImmediate(10*time.Second, csiNodeRegisterTimeout, func() (bool, error) { + csiNode, err := cs.StorageV1beta1().CSINodes().Get(nodeName, metav1.GetOptions{}) + if err != nil && !errors.IsNotFound(err) { + return false, err + } + for _, driver := range csiNode.Spec.Drivers { + if driver.Name == driverName { + return true, nil + } + } + return false, nil + }) +} diff --git a/test/e2e/storage/drivers/in_tree.go b/test/e2e/storage/drivers/in_tree.go index 4e71c6d2197..8e2eba506a7 100644 --- a/test/e2e/storage/drivers/in_tree.go +++ b/test/e2e/storage/drivers/in_tree.go @@ -1158,6 +1158,7 @@ func InitGcePdDriver() testsuites.TestDriver { MaxFileSize: testpatterns.FileSizeMedium, SupportedFsType: supportedTypes, SupportedMountOption: sets.NewString("debug", "nouid32"), + TopologyKeys: []string{v1.LabelZoneFailureDomain}, Capabilities: map[testsuites.Capability]bool{ testsuites.CapPersistence: true, testsuites.CapFsGroup: true, @@ -1169,6 +1170,7 @@ func InitGcePdDriver() testsuites.TestDriver { // GCE supports volume limits, but the test creates large // number of volumes and times out test suites. testsuites.CapVolumeLimits: false, + testsuites.CapTopology: true, }, }, } diff --git a/test/e2e/storage/in_tree_volumes.go b/test/e2e/storage/in_tree_volumes.go index a9f3f9bdd7e..249dbfa326f 100644 --- a/test/e2e/storage/in_tree_volumes.go +++ b/test/e2e/storage/in_tree_volumes.go @@ -59,6 +59,7 @@ var testSuites = []func() testsuites.TestSuite{ testsuites.InitVolumeExpandTestSuite, testsuites.InitDisruptiveTestSuite, testsuites.InitVolumeLimitsTestSuite, + testsuites.InitTopologyTestSuite, } // This executes testSuites for in-tree volumes. diff --git a/test/e2e/storage/regional_pd.go b/test/e2e/storage/regional_pd.go index b60cfc2757f..cce22b459b5 100644 --- a/test/e2e/storage/regional_pd.go +++ b/test/e2e/storage/regional_pd.go @@ -573,3 +573,59 @@ func verifyZonesInPV(volume *v1.PersistentVolume, zones sets.String, match bool) return fmt.Errorf("Zones in StorageClass are %v, but zones in PV are %v", zones, pvZones) } + +func checkZoneFromLabelAndAffinity(pv *v1.PersistentVolume, zone string, matchZone bool) { + checkZonesFromLabelAndAffinity(pv, sets.NewString(zone), matchZone) +} + +// checkZoneLabelAndAffinity checks the LabelZoneFailureDomain label of PV and terms +// with key LabelZoneFailureDomain in PV's node affinity contains zone +// matchZones is used to indicate if zones should match perfectly +func checkZonesFromLabelAndAffinity(pv *v1.PersistentVolume, zones sets.String, matchZones bool) { + ginkgo.By("checking PV's zone label and node affinity terms match expected zone") + if pv == nil { + framework.Failf("nil pv passed") + } + pvLabel, ok := pv.Labels[v1.LabelZoneFailureDomain] + if !ok { + framework.Failf("label %s not found on PV", v1.LabelZoneFailureDomain) + } + + zonesFromLabel, err := volumehelpers.LabelZonesToSet(pvLabel) + if err != nil { + framework.Failf("unable to parse zone labels %s: %v", pvLabel, err) + } + if matchZones && !zonesFromLabel.Equal(zones) { + framework.Failf("value[s] of %s label for PV: %v does not match expected zone[s]: %v", v1.LabelZoneFailureDomain, zonesFromLabel, zones) + } + if !matchZones && !zonesFromLabel.IsSuperset(zones) { + framework.Failf("value[s] of %s label for PV: %v does not contain expected zone[s]: %v", v1.LabelZoneFailureDomain, zonesFromLabel, zones) + } + if pv.Spec.NodeAffinity == nil { + framework.Failf("node affinity not found in PV spec %v", pv.Spec) + } + if len(pv.Spec.NodeAffinity.Required.NodeSelectorTerms) == 0 { + framework.Failf("node selector terms not found in PV spec %v", pv.Spec) + } + + for _, term := range pv.Spec.NodeAffinity.Required.NodeSelectorTerms { + keyFound := false + for _, r := range term.MatchExpressions { + if r.Key != v1.LabelZoneFailureDomain { + continue + } + keyFound = true + zonesFromNodeAffinity := sets.NewString(r.Values...) + if matchZones && !zonesFromNodeAffinity.Equal(zones) { + framework.Failf("zones from NodeAffinity of PV: %v does not equal expected zone[s]: %v", zonesFromNodeAffinity, zones) + } + if !matchZones && !zonesFromNodeAffinity.IsSuperset(zones) { + framework.Failf("zones from NodeAffinity of PV: %v does not contain expected zone[s]: %v", zonesFromNodeAffinity, zones) + } + break + } + if !keyFound { + framework.Failf("label %s not found in term %v", v1.LabelZoneFailureDomain, term) + } + } +} diff --git a/test/e2e/storage/testpatterns/testpattern.go b/test/e2e/storage/testpatterns/testpattern.go index e90f416a91c..80b510bc6c9 100644 --- a/test/e2e/storage/testpatterns/testpattern.go +++ b/test/e2e/storage/testpatterns/testpattern.go @@ -230,6 +230,22 @@ var ( VolMode: v1.PersistentVolumeBlock, AllowExpansion: true, } + + // Definitions for topology tests + + // TopologyImmediate is TestPattern for immediate binding + TopologyImmediate = TestPattern{ + Name: "Dynamic PV (immediate binding)", + VolType: DynamicPV, + BindingMode: storagev1.VolumeBindingImmediate, + } + + // TopologyDelayed is TestPattern for delayed binding + TopologyDelayed = TestPattern{ + Name: "Dynamic PV (delayed binding)", + VolType: DynamicPV, + BindingMode: storagev1.VolumeBindingWaitForFirstConsumer, + } ) // NewVolTypeMap creates a map with the given TestVolTypes enabled diff --git a/test/e2e/storage/testsuites/BUILD b/test/e2e/storage/testsuites/BUILD index df0abe2b042..c544a3f6ab5 100644 --- a/test/e2e/storage/testsuites/BUILD +++ b/test/e2e/storage/testsuites/BUILD @@ -12,6 +12,7 @@ go_library( "snapshottable.go", "subpath.go", "testdriver.go", + "topology.go", "volume_expand.go", "volume_io.go", "volumelimits.go", @@ -45,6 +46,7 @@ go_library( "//staging/src/k8s.io/csi-translation-lib/plugins:go_default_library", "//test/e2e/common:go_default_library", "//test/e2e/framework:go_default_library", + "//test/e2e/framework/log:go_default_library", "//test/e2e/framework/metrics:go_default_library", "//test/e2e/framework/pod:go_default_library", "//test/e2e/framework/podlogs:go_default_library", diff --git a/test/e2e/storage/testsuites/provisioning.go b/test/e2e/storage/testsuites/provisioning.go index 02d7587547f..5798937d4bc 100644 --- a/test/e2e/storage/testsuites/provisioning.go +++ b/test/e2e/storage/testsuites/provisioning.go @@ -438,15 +438,6 @@ func PVMultiNodeCheck(client clientset.Interface, claim *v1.PersistentVolumeClai pod = nil } -// TestBindingWaitForFirstConsumer tests the binding with WaitForFirstConsumer mode -func (t StorageClassTest) TestBindingWaitForFirstConsumer(nodeSelector map[string]string, expectUnschedulable bool) (*v1.PersistentVolume, *v1.Node) { - pvs, node := t.TestBindingWaitForFirstConsumerMultiPVC([]*v1.PersistentVolumeClaim{t.Claim}, nodeSelector, expectUnschedulable) - if pvs == nil { - return nil, node - } - return pvs[0], node -} - // TestBindingWaitForFirstConsumerMultiPVC tests the binding with WaitForFirstConsumer mode func (t StorageClassTest) TestBindingWaitForFirstConsumerMultiPVC(claims []*v1.PersistentVolumeClaim, nodeSelector map[string]string, expectUnschedulable bool) ([]*v1.PersistentVolume, *v1.Node) { var err error diff --git a/test/e2e/storage/testsuites/testdriver.go b/test/e2e/storage/testsuites/testdriver.go index abafe023e29..8351f30af8d 100644 --- a/test/e2e/storage/testsuites/testdriver.go +++ b/test/e2e/storage/testsuites/testdriver.go @@ -157,6 +157,7 @@ const ( CapNodeExpansion Capability = "nodeExpansion" // support volume expansion for node CapVolumeLimits Capability = "volumeLimits" // support volume limits (can be *very* slow) CapSingleNodeVolume Capability = "singleNodeVolume" // support volume that can run on single node (like hostpath) + CapTopology Capability = "topology" // support topology ) // DriverInfo represents static information about a TestDriver. @@ -183,6 +184,12 @@ type DriverInfo struct { // [Optional] List of access modes required for provisioning, defaults to // RWO if unset RequiredAccessModes []v1.PersistentVolumeAccessMode + // [Optional] List of topology keys driver supports + TopologyKeys []string + // [Optional] Number of allowed topologies the driver requires. + // Only relevant if TopologyKeys is set. Defaults to 1. + // Example: multi-zonal disk requires at least 2 allowed topologies. + NumAllowedTopologies int } // PerTestConfig represents parameters that control test execution. diff --git a/test/e2e/storage/testsuites/topology.go b/test/e2e/storage/testsuites/topology.go new file mode 100644 index 00000000000..22f93c19bf9 --- /dev/null +++ b/test/e2e/storage/testsuites/topology.go @@ -0,0 +1,351 @@ +/* +Copyright 2019 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// This suite tests volume topology + +package testsuites + +import ( + "fmt" + "math/rand" + + "github.com/onsi/ginkgo" + + v1 "k8s.io/api/core/v1" + storagev1 "k8s.io/api/storage/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + clientset "k8s.io/client-go/kubernetes" + "k8s.io/kubernetes/test/e2e/framework" + e2elog "k8s.io/kubernetes/test/e2e/framework/log" + e2epod "k8s.io/kubernetes/test/e2e/framework/pod" + e2epv "k8s.io/kubernetes/test/e2e/framework/pv" + "k8s.io/kubernetes/test/e2e/storage/testpatterns" +) + +type topologyTestSuite struct { + tsInfo TestSuiteInfo +} + +type topologyTest struct { + config *PerTestConfig + testCleanup func() + + intreeOps opCounts + migratedOps opCounts + + resource genericVolumeTestResource + pod *v1.Pod + allTopologies []topology +} + +type topology map[string]string + +var _ TestSuite = &topologyTestSuite{} + +// InitTopologyTestSuite returns topologyTestSuite that implements TestSuite interface +func InitTopologyTestSuite() TestSuite { + return &topologyTestSuite{ + tsInfo: TestSuiteInfo{ + name: "topology", + testPatterns: []testpatterns.TestPattern{ + testpatterns.TopologyImmediate, + testpatterns.TopologyDelayed, + }, + }, + } +} + +func (t *topologyTestSuite) getTestSuiteInfo() TestSuiteInfo { + return t.tsInfo +} + +func (t *topologyTestSuite) skipRedundantSuite(driver TestDriver, pattern testpatterns.TestPattern) { +} + +func (t *topologyTestSuite) defineTests(driver TestDriver, pattern testpatterns.TestPattern) { + var ( + dInfo = driver.GetDriverInfo() + dDriver DynamicPVTestDriver + cs clientset.Interface + err error + ) + + ginkgo.BeforeEach(func() { + // Check preconditions. + ok := false + dDriver, ok = driver.(DynamicPVTestDriver) + if !ok { + framework.Skipf("Driver %s doesn't support %v -- skipping", dInfo.Name, pattern.VolType) + } + + if !dInfo.Capabilities[CapTopology] { + framework.Skipf("Driver %q does not support topology - skipping", dInfo.Name) + } + + }) + + // This intentionally comes after checking the preconditions because it + // registers its own BeforeEach which creates the namespace. Beware that it + // also registers an AfterEach which renders f unusable. Any code using + // f must run inside an It or Context callback. + f := framework.NewDefaultFramework("topology") + + init := func() topologyTest { + const numTestTopologies = 2 + + l := topologyTest{} + + // Now do the more expensive test initialization. + l.config, l.testCleanup = driver.PrepareTest(f) + + l.resource = genericVolumeTestResource{ + driver: driver, + config: l.config, + pattern: pattern, + } + + // After driver is installed, check driver topologies on nodes + cs = f.ClientSet + keys := dInfo.TopologyKeys + if len(keys) == 0 { + framework.Skipf("Driver didn't provide topology keys -- skipping") + } + if dInfo.NumAllowedTopologies == 0 { + // Any plugin that supports topology defaults to 1 topology + dInfo.NumAllowedTopologies = 1 + } + // We collect 1 additional topology, if possible, for the conflicting topology test + // case, but it's not needed for the positive test + l.allTopologies, err = t.getCurrentTopologies(cs, keys, dInfo.NumAllowedTopologies+1) + framework.ExpectNoError(err, "failed to get current driver topologies") + if len(l.allTopologies) < dInfo.NumAllowedTopologies { + framework.Skipf("Not enough topologies in cluster -- skipping") + } + + l.resource.sc = dDriver.GetDynamicProvisionStorageClass(l.config, pattern.FsType) + framework.ExpectNotEqual(l.resource.sc, nil, "driver failed to provide a StorageClass") + l.resource.sc.VolumeBindingMode = &pattern.BindingMode + + claimSize := dDriver.GetClaimSize() + l.resource.pvc = e2epv.MakePersistentVolumeClaim(e2epv.PersistentVolumeClaimConfig{ + ClaimSize: claimSize, + StorageClassName: &(l.resource.sc.Name), + }, l.config.Framework.Namespace.Name) + + l.intreeOps, l.migratedOps = getMigrationVolumeOpCounts(f.ClientSet, dInfo.InTreePluginName) + return l + } + + cleanup := func(l topologyTest) { + t.cleanupResources(cs, &l) + if l.testCleanup != nil { + l.testCleanup() + } + + validateMigrationVolumeOpCounts(f.ClientSet, dInfo.InTreePluginName, l.intreeOps, l.migratedOps) + } + + ginkgo.It("should provision a volume and schedule a pod with AllowedTopologies", func() { + l := init() + defer func() { + cleanup(l) + }() + + // If possible, exclude one topology, otherwise allow them all + excludedIndex := -1 + if len(l.allTopologies) > dInfo.NumAllowedTopologies { + excludedIndex = rand.Intn(len(l.allTopologies)) + } + allowedTopologies := t.setAllowedTopologies(l.resource.sc, l.allTopologies, excludedIndex) + + t.createResources(cs, &l, nil) + + err = e2epod.WaitForPodRunningInNamespace(cs, l.pod) + framework.ExpectNoError(err) + + ginkgo.By("Verifying pod scheduled to correct node") + pod, err := cs.CoreV1().Pods(l.pod.Namespace).Get(l.pod.Name, metav1.GetOptions{}) + framework.ExpectNoError(err) + + node, err := cs.CoreV1().Nodes().Get(pod.Spec.NodeName, metav1.GetOptions{}) + framework.ExpectNoError(err) + + t.verifyNodeTopology(node, allowedTopologies) + }) + + ginkgo.It("should fail to schedule a pod which has topologies that conflict with AllowedTopologies", func() { + l := init() + defer func() { + cleanup(l) + }() + + if len(l.allTopologies) < dInfo.NumAllowedTopologies+1 { + framework.Skipf("Not enough topologies in cluster -- skipping") + } + + // Exclude one topology + excludedIndex := rand.Intn(len(l.allTopologies)) + t.setAllowedTopologies(l.resource.sc, l.allTopologies, excludedIndex) + + // Set pod nodeSelector to the excluded topology + exprs := []v1.NodeSelectorRequirement{} + for k, v := range l.allTopologies[excludedIndex] { + exprs = append(exprs, v1.NodeSelectorRequirement{ + Key: k, + Operator: v1.NodeSelectorOpIn, + Values: []string{v}, + }) + } + + affinity := &v1.Affinity{ + NodeAffinity: &v1.NodeAffinity{ + RequiredDuringSchedulingIgnoredDuringExecution: &v1.NodeSelector{ + NodeSelectorTerms: []v1.NodeSelectorTerm{ + { + MatchExpressions: exprs, + }, + }, + }, + }, + } + t.createResources(cs, &l, affinity) + + // Wait for pod to fail scheduling + // With delayed binding, the scheduler errors before provisioning + // With immediate binding, the volume gets provisioned but cannot be scheduled + err = e2epod.WaitForPodNameUnschedulableInNamespace(cs, l.pod.Name, l.pod.Namespace) + framework.ExpectNoError(err) + }) +} + +// getCurrentTopologies() goes through all Nodes and returns up to maxCount unique driver topologies +func (t *topologyTestSuite) getCurrentTopologies(cs clientset.Interface, keys []string, maxCount int) ([]topology, error) { + nodes := framework.GetReadySchedulableNodesOrDie(cs) + + topos := []topology{} + + // TODO: scale? + for _, n := range nodes.Items { + topo := map[string]string{} + for _, k := range keys { + v, ok := n.Labels[k] + if !ok { + return nil, fmt.Errorf("node %v missing topology label %v", n.Name, k) + } + topo[k] = v + } + + found := false + for _, existingTopo := range topos { + if topologyEqual(existingTopo, topo) { + found = true + break + } + } + if !found { + e2elog.Logf("found topology %v", topo) + topos = append(topos, topo) + } + if len(topos) >= maxCount { + break + } + } + return topos, nil +} + +// reflect.DeepEqual doesn't seem to work +func topologyEqual(t1, t2 topology) bool { + if len(t1) != len(t2) { + return false + } + for k1, v1 := range t1 { + if v2, ok := t2[k1]; !ok || v1 != v2 { + return false + } + } + return true +} + +// Set StorageClass.Allowed topologies from topos while excluding the topology at excludedIndex. +// excludedIndex can be -1 to specify nothing should be excluded. +// Return the list of allowed topologies generated. +func (t *topologyTestSuite) setAllowedTopologies(sc *storagev1.StorageClass, topos []topology, excludedIndex int) []topology { + allowedTopologies := []topology{} + sc.AllowedTopologies = []v1.TopologySelectorTerm{} + + for i := 0; i < len(topos); i++ { + if i != excludedIndex { + exprs := []v1.TopologySelectorLabelRequirement{} + for k, v := range topos[i] { + exprs = append(exprs, v1.TopologySelectorLabelRequirement{ + Key: k, + Values: []string{v}, + }) + } + sc.AllowedTopologies = append(sc.AllowedTopologies, v1.TopologySelectorTerm{MatchLabelExpressions: exprs}) + allowedTopologies = append(allowedTopologies, topos[i]) + } + } + return allowedTopologies +} + +func (t *topologyTestSuite) verifyNodeTopology(node *v1.Node, allowedTopos []topology) { + for _, topo := range allowedTopos { + for k, v := range topo { + nodeV, _ := node.Labels[k] + if nodeV == v { + return + } + } + } + e2elog.Failf("node %v topology labels %+v doesn't match allowed topologies +%v", node.Name, node.Labels, allowedTopos) +} + +func (t *topologyTestSuite) createResources(cs clientset.Interface, l *topologyTest, affinity *v1.Affinity) { + var err error + e2elog.Logf("Creating storage class object and pvc object for driver - sc: %v, pvc: %v", l.resource.sc, l.resource.pvc) + + ginkgo.By("Creating sc") + l.resource.sc, err = cs.StorageV1().StorageClasses().Create(l.resource.sc) + framework.ExpectNoError(err) + + ginkgo.By("Creating pvc") + l.resource.pvc, err = cs.CoreV1().PersistentVolumeClaims(l.resource.pvc.Namespace).Create(l.resource.pvc) + framework.ExpectNoError(err) + + ginkgo.By("Creating pod") + l.pod = e2epod.MakeSecPod(l.config.Framework.Namespace.Name, + []*v1.PersistentVolumeClaim{l.resource.pvc}, + nil, + false, + "", + false, + false, + e2epv.SELinuxLabel, + nil) + l.pod.Spec.Affinity = affinity + l.pod, err = cs.CoreV1().Pods(l.pod.Namespace).Create(l.pod) + framework.ExpectNoError(err) +} + +func (t *topologyTestSuite) cleanupResources(cs clientset.Interface, l *topologyTest) { + if l.pod != nil { + ginkgo.By("Deleting pod") + err := e2epod.DeletePodWithWait(cs, l.pod) + framework.ExpectNoError(err, "while deleting pod") + } + l.resource.cleanupResource() +} diff --git a/test/e2e/storage/volume_provisioning.go b/test/e2e/storage/volume_provisioning.go index 75ff8aa22c1..844b8ee471a 100644 --- a/test/e2e/storage/volume_provisioning.go +++ b/test/e2e/storage/volume_provisioning.go @@ -42,7 +42,6 @@ import ( "k8s.io/apimachinery/pkg/util/wait" "k8s.io/apiserver/pkg/authentication/serviceaccount" clientset "k8s.io/client-go/kubernetes" - volumehelpers "k8s.io/cloud-provider/volume/helpers" storageutil "k8s.io/kubernetes/pkg/apis/storage/v1/util" "k8s.io/kubernetes/test/e2e/framework" "k8s.io/kubernetes/test/e2e/framework/auth" @@ -56,66 +55,8 @@ import ( const ( // Plugin name of the external provisioner externalPluginName = "example.com/nfs" - // Number of PVCs for multi PVC tests - multiPVCcount = 3 ) -func checkZoneFromLabelAndAffinity(pv *v1.PersistentVolume, zone string, matchZone bool) { - checkZonesFromLabelAndAffinity(pv, sets.NewString(zone), matchZone) -} - -// checkZoneLabelAndAffinity checks the LabelZoneFailureDomain label of PV and terms -// with key LabelZoneFailureDomain in PV's node affinity contains zone -// matchZones is used to indicate if zones should match perfectly -func checkZonesFromLabelAndAffinity(pv *v1.PersistentVolume, zones sets.String, matchZones bool) { - ginkgo.By("checking PV's zone label and node affinity terms match expected zone") - if pv == nil { - framework.Failf("nil pv passed") - } - pvLabel, ok := pv.Labels[v1.LabelZoneFailureDomain] - if !ok { - framework.Failf("label %s not found on PV", v1.LabelZoneFailureDomain) - } - - zonesFromLabel, err := volumehelpers.LabelZonesToSet(pvLabel) - if err != nil { - framework.Failf("unable to parse zone labels %s: %v", pvLabel, err) - } - if matchZones && !zonesFromLabel.Equal(zones) { - framework.Failf("value[s] of %s label for PV: %v does not match expected zone[s]: %v", v1.LabelZoneFailureDomain, zonesFromLabel, zones) - } - if !matchZones && !zonesFromLabel.IsSuperset(zones) { - framework.Failf("value[s] of %s label for PV: %v does not contain expected zone[s]: %v", v1.LabelZoneFailureDomain, zonesFromLabel, zones) - } - if pv.Spec.NodeAffinity == nil { - framework.Failf("node affinity not found in PV spec %v", pv.Spec) - } - if len(pv.Spec.NodeAffinity.Required.NodeSelectorTerms) == 0 { - framework.Failf("node selector terms not found in PV spec %v", pv.Spec) - } - - for _, term := range pv.Spec.NodeAffinity.Required.NodeSelectorTerms { - keyFound := false - for _, r := range term.MatchExpressions { - if r.Key != v1.LabelZoneFailureDomain { - continue - } - keyFound = true - zonesFromNodeAffinity := sets.NewString(r.Values...) - if matchZones && !zonesFromNodeAffinity.Equal(zones) { - framework.Failf("zones from NodeAffinity of PV: %v does not equal expected zone[s]: %v", zonesFromNodeAffinity, zones) - } - if !matchZones && !zonesFromNodeAffinity.IsSuperset(zones) { - framework.Failf("zones from NodeAffinity of PV: %v does not contain expected zone[s]: %v", zonesFromNodeAffinity, zones) - } - break - } - if !keyFound { - framework.Failf("label %s not found in term %v", v1.LabelZoneFailureDomain, term) - } - } -} - // checkAWSEBS checks properties of an AWS EBS. Test framework does not // instantiate full AWS provider, therefore we need use ec2 API directly. func checkAWSEBS(volume *v1.PersistentVolume, volumeType string, encrypted bool) error { @@ -184,71 +125,6 @@ func checkGCEPD(volume *v1.PersistentVolume, volumeType string) error { return nil } -func testZonalDelayedBinding(c clientset.Interface, ns string, specifyAllowedTopology bool, pvcCount int) { - storageClassTestNameFmt := "Delayed binding %s storage class test %s" - storageClassTestNameSuffix := "" - if specifyAllowedTopology { - storageClassTestNameSuffix += " with AllowedTopologies" - } - tests := []testsuites.StorageClassTest{ - { - Name: fmt.Sprintf(storageClassTestNameFmt, "EBS", storageClassTestNameSuffix), - CloudProviders: []string{"aws"}, - Provisioner: "kubernetes.io/aws-ebs", - ClaimSize: "2Gi", - DelayBinding: true, - }, - { - Name: fmt.Sprintf(storageClassTestNameFmt, "GCE PD", storageClassTestNameSuffix), - CloudProviders: []string{"gce", "gke"}, - Provisioner: "kubernetes.io/gce-pd", - ClaimSize: "2Gi", - DelayBinding: true, - }, - } - for _, test := range tests { - if !framework.ProviderIs(test.CloudProviders...) { - framework.Logf("Skipping %q: cloud providers is not %v", test.Name, test.CloudProviders) - continue - } - action := "creating claims with class with waitForFirstConsumer" - suffix := "delayed" - var topoZone string - test.Client = c - test.Class = newStorageClass(test, ns, suffix) - if specifyAllowedTopology { - action += " and allowedTopologies" - suffix += "-topo" - topoZone = getRandomClusterZone(c) - addSingleZoneAllowedTopologyToStorageClass(c, test.Class, topoZone) - } - ginkgo.By(action) - var claims []*v1.PersistentVolumeClaim - for i := 0; i < pvcCount; i++ { - claim := e2epv.MakePersistentVolumeClaim(e2epv.PersistentVolumeClaimConfig{ - ClaimSize: test.ClaimSize, - StorageClassName: &test.Class.Name, - VolumeMode: &test.VolumeMode, - }, ns) - claims = append(claims, claim) - } - pvs, node := test.TestBindingWaitForFirstConsumerMultiPVC(claims, nil /* node selector */, false /* expect unschedulable */) - if node == nil { - framework.Failf("unexpected nil node found") - } - zone, ok := node.Labels[v1.LabelZoneFailureDomain] - if !ok { - framework.Failf("label %s not found on Node", v1.LabelZoneFailureDomain) - } - if specifyAllowedTopology && topoZone != zone { - framework.Failf("zone specified in allowedTopologies: %s does not match zone of node where PV got provisioned: %s", topoZone, zone) - } - for _, pv := range pvs { - checkZoneFromLabelAndAffinity(pv, zone, true) - } - } -} - var _ = utils.SIGDescribe("Dynamic Provisioning", func() { f := framework.NewDefaultFramework("volume-provisioning") @@ -964,57 +840,6 @@ var _ = utils.SIGDescribe("Dynamic Provisioning", func() { framework.ExpectNoError(err) }) }) - ginkgo.Describe("DynamicProvisioner delayed binding [Slow]", func() { - ginkgo.It("should create persistent volumes in the same zone as node after a pod mounting the claims is started", func() { - testZonalDelayedBinding(c, ns, false /*specifyAllowedTopology*/, 1 /*pvcCount*/) - testZonalDelayedBinding(c, ns, false /*specifyAllowedTopology*/, 3 /*pvcCount*/) - }) - }) - ginkgo.Describe("DynamicProvisioner allowedTopologies", func() { - ginkgo.It("should create persistent volume in the zone specified in allowedTopologies of storageclass", func() { - tests := []testsuites.StorageClassTest{ - { - Name: "AllowedTopologies EBS storage class test", - CloudProviders: []string{"aws"}, - Provisioner: "kubernetes.io/aws-ebs", - ClaimSize: "2Gi", - ExpectedSize: "2Gi", - }, - { - Name: "AllowedTopologies GCE PD storage class test", - CloudProviders: []string{"gce", "gke"}, - Provisioner: "kubernetes.io/gce-pd", - ClaimSize: "2Gi", - ExpectedSize: "2Gi", - }, - } - for _, test := range tests { - if !framework.ProviderIs(test.CloudProviders...) { - framework.Logf("Skipping %q: cloud providers is not %v", test.Name, test.CloudProviders) - continue - } - ginkgo.By("creating a claim with class with allowedTopologies set") - suffix := "topology" - test.Client = c - test.Class = newStorageClass(test, ns, suffix) - zone := getRandomClusterZone(c) - addSingleZoneAllowedTopologyToStorageClass(c, test.Class, zone) - test.Claim = e2epv.MakePersistentVolumeClaim(e2epv.PersistentVolumeClaimConfig{ - ClaimSize: test.ClaimSize, - StorageClassName: &test.Class.Name, - VolumeMode: &test.VolumeMode, - }, ns) - pv := test.TestDynamicProvisioning() - checkZoneFromLabelAndAffinity(pv, zone, true) - } - }) - }) - ginkgo.Describe("DynamicProvisioner delayed binding with allowedTopologies [Slow]", func() { - ginkgo.It("should create persistent volumes in the same zone as specified in allowedTopologies after a pod mounting the claims is started", func() { - testZonalDelayedBinding(c, ns, true /*specifyAllowedTopology*/, 1 /*pvcCount*/) - testZonalDelayedBinding(c, ns, true /*specifyAllowedTopology*/, 3 /*pvcCount*/) - }) - }) }) func verifyDefaultStorageClass(c clientset.Interface, scName string, expectedDefault bool) { @@ -1064,18 +889,6 @@ func getDefaultPluginName() string { return "" } -func addSingleZoneAllowedTopologyToStorageClass(c clientset.Interface, sc *storagev1.StorageClass, zone string) { - term := v1.TopologySelectorTerm{ - MatchLabelExpressions: []v1.TopologySelectorLabelRequirement{ - { - Key: v1.LabelZoneFailureDomain, - Values: []string{zone}, - }, - }, - } - sc.AllowedTopologies = append(sc.AllowedTopologies, term) -} - func newStorageClass(t testsuites.StorageClassTest, ns string, suffix string) *storagev1.StorageClass { pluginName := t.Provisioner if pluginName == "" { From 8d3abb83f9d0bf5c9c538adefa82d08074b79980 Mon Sep 17 00:00:00 2001 From: Michelle Au Date: Mon, 9 Sep 2019 18:48:43 -0700 Subject: [PATCH 2/2] Rename testCleanup to driverCleanup Change-Id: I2ee50e2c7eed82724b7a2703819dcefcf7fa915f --- test/e2e/storage/testsuites/disruptive.go | 12 ++++++------ test/e2e/storage/testsuites/ephemeral.go | 12 ++++++------ test/e2e/storage/testsuites/multivolume.go | 12 ++++++------ test/e2e/storage/testsuites/provisioning.go | 12 ++++++------ test/e2e/storage/testsuites/snapshottable.go | 4 ++-- test/e2e/storage/testsuites/subpath.go | 12 ++++++------ test/e2e/storage/testsuites/topology.go | 10 +++++----- test/e2e/storage/testsuites/volume_expand.go | 12 ++++++------ test/e2e/storage/testsuites/volume_io.go | 12 ++++++------ test/e2e/storage/testsuites/volumemode.go | 12 ++++++------ test/e2e/storage/testsuites/volumes.go | 12 ++++++------ 11 files changed, 61 insertions(+), 61 deletions(-) diff --git a/test/e2e/storage/testsuites/disruptive.go b/test/e2e/storage/testsuites/disruptive.go index d820a3639da..7201944d8bb 100644 --- a/test/e2e/storage/testsuites/disruptive.go +++ b/test/e2e/storage/testsuites/disruptive.go @@ -61,8 +61,8 @@ func (s *disruptiveTestSuite) skipRedundantSuite(driver TestDriver, pattern test func (s *disruptiveTestSuite) defineTests(driver TestDriver, pattern testpatterns.TestPattern) { type local struct { - config *PerTestConfig - testCleanup func() + config *PerTestConfig + driverCleanup func() cs clientset.Interface ns *v1.Namespace @@ -87,7 +87,7 @@ func (s *disruptiveTestSuite) defineTests(driver TestDriver, pattern testpattern l.cs = f.ClientSet // Now do the more expensive test initialization. - l.config, l.testCleanup = driver.PrepareTest(f) + l.config, l.driverCleanup = driver.PrepareTest(f) if pattern.VolMode == v1.PersistentVolumeBlock && !driver.GetDriverInfo().Capabilities[CapBlock] { framework.Skipf("Driver %s doesn't support %v -- skipping", driver.GetDriverInfo().Name, pattern.VolMode) @@ -109,9 +109,9 @@ func (s *disruptiveTestSuite) defineTests(driver TestDriver, pattern testpattern l.resource = nil } - if l.testCleanup != nil { - l.testCleanup() - l.testCleanup = nil + if l.driverCleanup != nil { + l.driverCleanup() + l.driverCleanup = nil } } diff --git a/test/e2e/storage/testsuites/ephemeral.go b/test/e2e/storage/testsuites/ephemeral.go index b2a400c0fae..c1a823d486a 100644 --- a/test/e2e/storage/testsuites/ephemeral.go +++ b/test/e2e/storage/testsuites/ephemeral.go @@ -65,8 +65,8 @@ func (p *ephemeralTestSuite) skipRedundantSuite(driver TestDriver, pattern testp func (p *ephemeralTestSuite) defineTests(driver TestDriver, pattern testpatterns.TestPattern) { type local struct { - config *PerTestConfig - testCleanup func() + config *PerTestConfig + driverCleanup func() testCase *EphemeralTest } @@ -94,7 +94,7 @@ func (p *ephemeralTestSuite) defineTests(driver TestDriver, pattern testpatterns l = local{} // Now do the more expensive test initialization. - l.config, l.testCleanup = driver.PrepareTest(f) + l.config, l.driverCleanup = driver.PrepareTest(f) l.testCase = &EphemeralTest{ Client: l.config.Framework.ClientSet, Namespace: f.Namespace.Name, @@ -107,9 +107,9 @@ func (p *ephemeralTestSuite) defineTests(driver TestDriver, pattern testpatterns } cleanup := func() { - if l.testCleanup != nil { - l.testCleanup() - l.testCleanup = nil + if l.driverCleanup != nil { + l.driverCleanup() + l.driverCleanup = nil } } diff --git a/test/e2e/storage/testsuites/multivolume.go b/test/e2e/storage/testsuites/multivolume.go index 9a17a864e9f..4f3aae6ba2c 100644 --- a/test/e2e/storage/testsuites/multivolume.go +++ b/test/e2e/storage/testsuites/multivolume.go @@ -63,8 +63,8 @@ func (t *multiVolumeTestSuite) skipRedundantSuite(driver TestDriver, pattern tes func (t *multiVolumeTestSuite) defineTests(driver TestDriver, pattern testpatterns.TestPattern) { type local struct { - config *PerTestConfig - testCleanup func() + config *PerTestConfig + driverCleanup func() cs clientset.Interface ns *v1.Namespace @@ -99,7 +99,7 @@ func (t *multiVolumeTestSuite) defineTests(driver TestDriver, pattern testpatter l.driver = driver // Now do the more expensive test initialization. - l.config, l.testCleanup = driver.PrepareTest(f) + l.config, l.driverCleanup = driver.PrepareTest(f) l.intreeOps, l.migratedOps = getMigrationVolumeOpCounts(f.ClientSet, dInfo.InTreePluginName) } @@ -108,9 +108,9 @@ func (t *multiVolumeTestSuite) defineTests(driver TestDriver, pattern testpatter resource.cleanupResource() } - if l.testCleanup != nil { - l.testCleanup() - l.testCleanup = nil + if l.driverCleanup != nil { + l.driverCleanup() + l.driverCleanup = nil } validateMigrationVolumeOpCounts(f.ClientSet, dInfo.InTreePluginName, l.intreeOps, l.migratedOps) diff --git a/test/e2e/storage/testsuites/provisioning.go b/test/e2e/storage/testsuites/provisioning.go index 5798937d4bc..56095db8080 100644 --- a/test/e2e/storage/testsuites/provisioning.go +++ b/test/e2e/storage/testsuites/provisioning.go @@ -87,8 +87,8 @@ func (p *provisioningTestSuite) skipRedundantSuite(driver TestDriver, pattern te func (p *provisioningTestSuite) defineTests(driver TestDriver, pattern testpatterns.TestPattern) { type local struct { - config *PerTestConfig - testCleanup func() + config *PerTestConfig + driverCleanup func() testCase *StorageClassTest cs clientset.Interface @@ -127,7 +127,7 @@ func (p *provisioningTestSuite) defineTests(driver TestDriver, pattern testpatte l = local{} // Now do the more expensive test initialization. - l.config, l.testCleanup = driver.PrepareTest(f) + l.config, l.driverCleanup = driver.PrepareTest(f) l.intreeOps, l.migratedOps = getMigrationVolumeOpCounts(f.ClientSet, dInfo.InTreePluginName) l.cs = l.config.Framework.ClientSet claimSize := dDriver.GetClaimSize() @@ -155,9 +155,9 @@ func (p *provisioningTestSuite) defineTests(driver TestDriver, pattern testpatte } cleanup := func() { - if l.testCleanup != nil { - l.testCleanup() - l.testCleanup = nil + if l.driverCleanup != nil { + l.driverCleanup() + l.driverCleanup = nil } validateMigrationVolumeOpCounts(f.ClientSet, dInfo.InTreePluginName, l.intreeOps, l.migratedOps) diff --git a/test/e2e/storage/testsuites/snapshottable.go b/test/e2e/storage/testsuites/snapshottable.go index fc017a35588..638a73c82fd 100644 --- a/test/e2e/storage/testsuites/snapshottable.go +++ b/test/e2e/storage/testsuites/snapshottable.go @@ -101,8 +101,8 @@ func (s *snapshottableTestSuite) defineTests(driver TestDriver, pattern testpatt dc := f.DynamicClient // Now do the more expensive test initialization. - config, testCleanup := driver.PrepareTest(f) - defer testCleanup() + config, driverCleanup := driver.PrepareTest(f) + defer driverCleanup() vsc := sDriver.GetSnapshotClass(config) class := dDriver.GetDynamicProvisionStorageClass(config, "") diff --git a/test/e2e/storage/testsuites/subpath.go b/test/e2e/storage/testsuites/subpath.go index b2b5186fb92..4e77dcf5b59 100644 --- a/test/e2e/storage/testsuites/subpath.go +++ b/test/e2e/storage/testsuites/subpath.go @@ -81,8 +81,8 @@ func (s *subPathTestSuite) skipRedundantSuite(driver TestDriver, pattern testpat func (s *subPathTestSuite) defineTests(driver TestDriver, pattern testpatterns.TestPattern) { type local struct { - config *PerTestConfig - testCleanup func() + config *PerTestConfig + driverCleanup func() resource *genericVolumeTestResource roVolSource *v1.VolumeSource @@ -109,7 +109,7 @@ func (s *subPathTestSuite) defineTests(driver TestDriver, pattern testpatterns.T l = local{} // Now do the more expensive test initialization. - l.config, l.testCleanup = driver.PrepareTest(f) + l.config, l.driverCleanup = driver.PrepareTest(f) l.intreeOps, l.migratedOps = getMigrationVolumeOpCounts(f.ClientSet, driver.GetDriverInfo().InTreePluginName) l.resource = createGenericVolumeTestResource(driver, l.config, pattern) @@ -165,9 +165,9 @@ func (s *subPathTestSuite) defineTests(driver TestDriver, pattern testpatterns.T l.resource = nil } - if l.testCleanup != nil { - l.testCleanup() - l.testCleanup = nil + if l.driverCleanup != nil { + l.driverCleanup() + l.driverCleanup = nil } validateMigrationVolumeOpCounts(f.ClientSet, driver.GetDriverInfo().InTreePluginName, l.intreeOps, l.migratedOps) diff --git a/test/e2e/storage/testsuites/topology.go b/test/e2e/storage/testsuites/topology.go index 22f93c19bf9..421dcafbec0 100644 --- a/test/e2e/storage/testsuites/topology.go +++ b/test/e2e/storage/testsuites/topology.go @@ -40,8 +40,8 @@ type topologyTestSuite struct { } type topologyTest struct { - config *PerTestConfig - testCleanup func() + config *PerTestConfig + driverCleanup func() intreeOps opCounts migratedOps opCounts @@ -109,7 +109,7 @@ func (t *topologyTestSuite) defineTests(driver TestDriver, pattern testpatterns. l := topologyTest{} // Now do the more expensive test initialization. - l.config, l.testCleanup = driver.PrepareTest(f) + l.config, l.driverCleanup = driver.PrepareTest(f) l.resource = genericVolumeTestResource{ driver: driver, @@ -151,8 +151,8 @@ func (t *topologyTestSuite) defineTests(driver TestDriver, pattern testpatterns. cleanup := func(l topologyTest) { t.cleanupResources(cs, &l) - if l.testCleanup != nil { - l.testCleanup() + if l.driverCleanup != nil { + l.driverCleanup() } validateMigrationVolumeOpCounts(f.ClientSet, dInfo.InTreePluginName, l.intreeOps, l.migratedOps) diff --git a/test/e2e/storage/testsuites/volume_expand.go b/test/e2e/storage/testsuites/volume_expand.go index fd48dc0dc4a..97ec20759c1 100644 --- a/test/e2e/storage/testsuites/volume_expand.go +++ b/test/e2e/storage/testsuites/volume_expand.go @@ -70,8 +70,8 @@ func (v *volumeExpandTestSuite) skipRedundantSuite(driver TestDriver, pattern te func (v *volumeExpandTestSuite) defineTests(driver TestDriver, pattern testpatterns.TestPattern) { type local struct { - config *PerTestConfig - testCleanup func() + config *PerTestConfig + driverCleanup func() resource *genericVolumeTestResource pod *v1.Pod @@ -102,7 +102,7 @@ func (v *volumeExpandTestSuite) defineTests(driver TestDriver, pattern testpatte l = local{} // Now do the more expensive test initialization. - l.config, l.testCleanup = driver.PrepareTest(f) + l.config, l.driverCleanup = driver.PrepareTest(f) l.intreeOps, l.migratedOps = getMigrationVolumeOpCounts(f.ClientSet, driver.GetDriverInfo().InTreePluginName) l.resource = createGenericVolumeTestResource(driver, l.config, pattern) } @@ -127,9 +127,9 @@ func (v *volumeExpandTestSuite) defineTests(driver TestDriver, pattern testpatte l.resource = nil } - if l.testCleanup != nil { - l.testCleanup() - l.testCleanup = nil + if l.driverCleanup != nil { + l.driverCleanup() + l.driverCleanup = nil } validateMigrationVolumeOpCounts(f.ClientSet, driver.GetDriverInfo().InTreePluginName, l.intreeOps, l.migratedOps) diff --git a/test/e2e/storage/testsuites/volume_io.go b/test/e2e/storage/testsuites/volume_io.go index 71d29acfd5b..e789e1bac3c 100644 --- a/test/e2e/storage/testsuites/volume_io.go +++ b/test/e2e/storage/testsuites/volume_io.go @@ -84,8 +84,8 @@ func (t *volumeIOTestSuite) skipRedundantSuite(driver TestDriver, pattern testpa func (t *volumeIOTestSuite) defineTests(driver TestDriver, pattern testpatterns.TestPattern) { type local struct { - config *PerTestConfig - testCleanup func() + config *PerTestConfig + driverCleanup func() resource *genericVolumeTestResource @@ -109,7 +109,7 @@ func (t *volumeIOTestSuite) defineTests(driver TestDriver, pattern testpatterns. l = local{} // Now do the more expensive test initialization. - l.config, l.testCleanup = driver.PrepareTest(f) + l.config, l.driverCleanup = driver.PrepareTest(f) l.intreeOps, l.migratedOps = getMigrationVolumeOpCounts(f.ClientSet, dInfo.InTreePluginName) l.resource = createGenericVolumeTestResource(driver, l.config, pattern) @@ -125,9 +125,9 @@ func (t *volumeIOTestSuite) defineTests(driver TestDriver, pattern testpatterns. l.resource = nil } - if l.testCleanup != nil { - l.testCleanup() - l.testCleanup = nil + if l.driverCleanup != nil { + l.driverCleanup() + l.driverCleanup = nil } validateMigrationVolumeOpCounts(f.ClientSet, dInfo.InTreePluginName, l.intreeOps, l.migratedOps) diff --git a/test/e2e/storage/testsuites/volumemode.go b/test/e2e/storage/testsuites/volumemode.go index d4dbe330325..ee1a42a6624 100644 --- a/test/e2e/storage/testsuites/volumemode.go +++ b/test/e2e/storage/testsuites/volumemode.go @@ -73,8 +73,8 @@ func (t *volumeModeTestSuite) skipRedundantSuite(driver TestDriver, pattern test func (t *volumeModeTestSuite) defineTests(driver TestDriver, pattern testpatterns.TestPattern) { type local struct { - config *PerTestConfig - testCleanup func() + config *PerTestConfig + driverCleanup func() cs clientset.Interface ns *v1.Namespace @@ -103,7 +103,7 @@ func (t *volumeModeTestSuite) defineTests(driver TestDriver, pattern testpattern l.cs = f.ClientSet // Now do the more expensive test initialization. - l.config, l.testCleanup = driver.PrepareTest(f) + l.config, l.driverCleanup = driver.PrepareTest(f) l.intreeOps, l.migratedOps = getMigrationVolumeOpCounts(f.ClientSet, dInfo.InTreePluginName) } @@ -169,9 +169,9 @@ func (t *volumeModeTestSuite) defineTests(driver TestDriver, pattern testpattern cleanup := func() { l.cleanupResource() - if l.testCleanup != nil { - l.testCleanup() - l.testCleanup = nil + if l.driverCleanup != nil { + l.driverCleanup() + l.driverCleanup = nil } validateMigrationVolumeOpCounts(f.ClientSet, dInfo.InTreePluginName, l.intreeOps, l.migratedOps) diff --git a/test/e2e/storage/testsuites/volumes.go b/test/e2e/storage/testsuites/volumes.go index 67dd2b74c89..7f5fe286fdb 100644 --- a/test/e2e/storage/testsuites/volumes.go +++ b/test/e2e/storage/testsuites/volumes.go @@ -98,8 +98,8 @@ func skipBlockTest(driver TestDriver) { func (t *volumesTestSuite) defineTests(driver TestDriver, pattern testpatterns.TestPattern) { type local struct { - config *PerTestConfig - testCleanup func() + config *PerTestConfig + driverCleanup func() resource *genericVolumeTestResource @@ -121,7 +121,7 @@ func (t *volumesTestSuite) defineTests(driver TestDriver, pattern testpatterns.T l = local{} // Now do the more expensive test initialization. - l.config, l.testCleanup = driver.PrepareTest(f) + l.config, l.driverCleanup = driver.PrepareTest(f) l.intreeOps, l.migratedOps = getMigrationVolumeOpCounts(f.ClientSet, dInfo.InTreePluginName) l.resource = createGenericVolumeTestResource(driver, l.config, pattern) if l.resource.volSource == nil { @@ -135,9 +135,9 @@ func (t *volumesTestSuite) defineTests(driver TestDriver, pattern testpatterns.T l.resource = nil } - if l.testCleanup != nil { - l.testCleanup() - l.testCleanup = nil + if l.driverCleanup != nil { + l.driverCleanup() + l.driverCleanup = nil } validateMigrationVolumeOpCounts(f.ClientSet, dInfo.InTreePluginName, l.intreeOps, l.migratedOps)