add topology e2es to storage testsuite

Change-Id: I1ec4247cef5d477ae2a76136113a4ed5ce30bf44
This commit is contained in:
Michelle Au 2019-08-22 18:19:58 -07:00
parent a2760c9c13
commit 95c7b42340
13 changed files with 478 additions and 341 deletions

View File

@ -75,7 +75,6 @@ go_library(
"//test/e2e/framework/testfiles:go_default_library",
"//test/e2e/framework/volume:go_default_library",
"//test/e2e/storage/drivers:go_default_library",
"//test/e2e/storage/testpatterns:go_default_library",
"//test/e2e/storage/testsuites:go_default_library",
"//test/e2e/storage/utils:go_default_library",
"//test/utils/image:go_default_library",

View File

@ -17,20 +17,11 @@ limitations under the License.
package storage
import (
v1 "k8s.io/api/core/v1"
storagev1 "k8s.io/api/storage/v1"
clientset "k8s.io/client-go/kubernetes"
"k8s.io/kubernetes/test/e2e/framework"
e2epod "k8s.io/kubernetes/test/e2e/framework/pod"
e2epv "k8s.io/kubernetes/test/e2e/framework/pv"
"k8s.io/kubernetes/test/e2e/storage/drivers"
"k8s.io/kubernetes/test/e2e/storage/testpatterns"
"k8s.io/kubernetes/test/e2e/storage/testsuites"
"k8s.io/kubernetes/test/e2e/storage/utils"
"github.com/onsi/ginkgo"
"github.com/onsi/gomega"
"k8s.io/apimachinery/pkg/util/rand"
)
// List of testDrivers to be executed in below loop
@ -54,6 +45,7 @@ var csiTestSuites = []func() testsuites.TestSuite{
testsuites.InitDisruptiveTestSuite,
testsuites.InitVolumeExpandTestSuite,
testsuites.InitVolumeLimitsTestSuite,
testsuites.InitTopologyTestSuite,
}
// This executes testSuites for csi volumes.
@ -65,136 +57,4 @@ var _ = utils.SIGDescribe("CSI Volumes", func() {
testsuites.DefineTestSuite(curDriver, csiTestSuites)
})
}
// TODO: PD CSI driver needs to be serial because it uses a fixed name. Address as part of #71289
ginkgo.Context("CSI Topology test using GCE PD driver [Serial]", func() {
f := framework.NewDefaultFramework("csitopology")
driver := drivers.InitGcePDCSIDriver().(testsuites.DynamicPVTestDriver) // TODO (#71289) eliminate by moving this test to common test suite.
var (
config *testsuites.PerTestConfig
testCleanup func()
)
ginkgo.BeforeEach(func() {
driver.SkipUnsupportedTest(testpatterns.TestPattern{})
config, testCleanup = driver.PrepareTest(f)
})
ginkgo.AfterEach(func() {
if testCleanup != nil {
testCleanup()
}
})
ginkgo.It("should provision zonal PD with immediate volume binding and AllowedTopologies set and mount the volume to a pod", func() {
suffix := "topology-positive"
testTopologyPositive(config.Framework.ClientSet, suffix, config.Framework.Namespace.GetName(), false /* delayBinding */, true /* allowedTopologies */)
})
ginkgo.It("should provision zonal PD with delayed volume binding and mount the volume to a pod", func() {
suffix := "delayed"
testTopologyPositive(config.Framework.ClientSet, suffix, config.Framework.Namespace.GetName(), true /* delayBinding */, false /* allowedTopologies */)
})
ginkgo.It("should provision zonal PD with delayed volume binding and AllowedTopologies set and mount the volume to a pod", func() {
suffix := "delayed-topology-positive"
testTopologyPositive(config.Framework.ClientSet, suffix, config.Framework.Namespace.GetName(), true /* delayBinding */, true /* allowedTopologies */)
})
ginkgo.It("should fail to schedule a pod with a zone missing from AllowedTopologies; PD is provisioned with immediate volume binding", func() {
framework.SkipUnlessMultizone(config.Framework.ClientSet)
suffix := "topology-negative"
testTopologyNegative(config.Framework.ClientSet, suffix, config.Framework.Namespace.GetName(), false /* delayBinding */)
})
ginkgo.It("should fail to schedule a pod with a zone missing from AllowedTopologies; PD is provisioned with delayed volume binding", func() {
framework.SkipUnlessMultizone(config.Framework.ClientSet)
suffix := "delayed-topology-negative"
testTopologyNegative(config.Framework.ClientSet, suffix, config.Framework.Namespace.GetName(), true /* delayBinding */)
})
})
})
func testTopologyPositive(cs clientset.Interface, suffix, namespace string, delayBinding, allowedTopologies bool) {
test := createGCEPDStorageClassTest()
test.DelayBinding = delayBinding
class := newStorageClass(test, namespace, suffix)
if allowedTopologies {
topoZone := getRandomClusterZone(cs)
addSingleCSIZoneAllowedTopologyToStorageClass(cs, class, topoZone)
}
test.Client = cs
test.Claim = e2epv.MakePersistentVolumeClaim(e2epv.PersistentVolumeClaimConfig{
ClaimSize: test.ClaimSize,
StorageClassName: &(class.Name),
VolumeMode: &test.VolumeMode,
}, namespace)
test.Class = class
if delayBinding {
_, node := test.TestBindingWaitForFirstConsumer(nil /* node selector */, false /* expect unschedulable */)
gomega.Expect(node).ToNot(gomega.BeNil(), "Unexpected nil node found")
} else {
test.TestDynamicProvisioning()
}
}
func testTopologyNegative(cs clientset.Interface, suffix, namespace string, delayBinding bool) {
framework.SkipUnlessMultizone(cs)
// Use different zones for pod and PV
zones, err := framework.GetClusterZones(cs)
framework.ExpectNoError(err)
gomega.Expect(zones.Len()).To(gomega.BeNumerically(">=", 2))
zonesList := zones.UnsortedList()
podZoneIndex := rand.Intn(zones.Len())
podZone := zonesList[podZoneIndex]
pvZone := zonesList[(podZoneIndex+1)%zones.Len()]
test := createGCEPDStorageClassTest()
test.DelayBinding = delayBinding
nodeSelector := map[string]string{v1.LabelZoneFailureDomain: podZone}
test.Client = cs
test.Class = newStorageClass(test, namespace, suffix)
addSingleCSIZoneAllowedTopologyToStorageClass(cs, test.Class, pvZone)
test.Claim = e2epv.MakePersistentVolumeClaim(e2epv.PersistentVolumeClaimConfig{
ClaimSize: test.ClaimSize,
StorageClassName: &(test.Class.Name),
VolumeMode: &test.VolumeMode,
}, namespace)
if delayBinding {
test.TestBindingWaitForFirstConsumer(nodeSelector, true /* expect unschedulable */)
} else {
test.PvCheck = func(claim *v1.PersistentVolumeClaim) {
// Ensure that a pod cannot be scheduled in an unsuitable zone.
pod := testsuites.StartInPodWithVolume(cs, namespace, claim.Name, "pvc-tester-unschedulable", "sleep 100000",
e2epod.NodeSelection{Selector: nodeSelector})
defer testsuites.StopPod(cs, pod)
framework.ExpectNoError(e2epod.WaitForPodNameUnschedulableInNamespace(cs, pod.Name, pod.Namespace), "pod should be unschedulable")
}
test.TestDynamicProvisioning()
}
}
func addSingleCSIZoneAllowedTopologyToStorageClass(c clientset.Interface, sc *storagev1.StorageClass, zone string) {
term := v1.TopologySelectorTerm{
MatchLabelExpressions: []v1.TopologySelectorLabelRequirement{
{
Key: drivers.GCEPDCSIZoneTopologyKey,
Values: []string{zone},
},
},
}
sc.AllowedTopologies = append(sc.AllowedTopologies, term)
}
func createGCEPDStorageClassTest() testsuites.StorageClassTest {
return testsuites.StorageClassTest{
Name: drivers.GCEPDCSIProvisionerName,
Provisioner: drivers.GCEPDCSIProvisionerName,
Parameters: map[string]string{"type": "pd-standard"},
ClaimSize: "5Gi",
ExpectedSize: "5Gi",
}
}

View File

@ -20,6 +20,7 @@ go_library(
"//staging/src/k8s.io/apimachinery/pkg/runtime/schema:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/util/sets:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/util/uuid:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/util/wait:go_default_library",
"//staging/src/k8s.io/apiserver/pkg/authentication/serviceaccount:go_default_library",
"//staging/src/k8s.io/client-go/kubernetes:go_default_library",
"//test/e2e/framework:go_default_library",

View File

@ -39,13 +39,18 @@ import (
"fmt"
"math/rand"
"strconv"
"time"
"github.com/onsi/ginkgo"
v1 "k8s.io/api/core/v1"
storagev1 "k8s.io/api/storage/v1"
storagev1beta1 "k8s.io/api/storage/v1beta1"
"k8s.io/apimachinery/pkg/api/errors"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/apis/meta/v1/unstructured"
"k8s.io/apimachinery/pkg/util/sets"
"k8s.io/apimachinery/pkg/util/wait"
clientset "k8s.io/client-go/kubernetes"
"k8s.io/kubernetes/test/e2e/framework"
"k8s.io/kubernetes/test/e2e/storage/testpatterns"
"k8s.io/kubernetes/test/e2e/storage/testsuites"
@ -53,8 +58,8 @@ import (
)
const (
// GCEPDCSIProvisionerName is the name of GCE Persistent Disk CSI provisioner
GCEPDCSIProvisionerName = "pd.csi.storage.gke.io"
// GCEPDCSIDriverName is the name of GCE Persistent Disk CSI driver
GCEPDCSIDriverName = "pd.csi.storage.gke.io"
// GCEPDCSIZoneTopologyKey is the key of GCE Persistent Disk CSI zone topology
GCEPDCSIZoneTopologyKey = "topology.gke.io/zone"
)
@ -372,7 +377,7 @@ var _ testsuites.DynamicPVTestDriver = &gcePDCSIDriver{}
func InitGcePDCSIDriver() testsuites.TestDriver {
return &gcePDCSIDriver{
driverInfo: testsuites.DriverInfo{
Name: GCEPDCSIProvisionerName,
Name: GCEPDCSIDriverName,
FeatureTag: "[Serial]",
MaxFileSize: testpatterns.FileSizeMedium,
SupportedFsType: sets.NewString(
@ -391,8 +396,10 @@ func InitGcePDCSIDriver() testsuites.TestDriver {
// GCE supports volume limits, but the test creates large
// number of volumes and times out test suites.
testsuites.CapVolumeLimits: false,
testsuites.CapTopology: true,
},
RequiredAccessModes: []v1.PersistentVolumeAccessMode{v1.ReadWriteOnce},
TopologyKeys: []string{GCEPDCSIZoneTopologyKey},
},
}
}
@ -459,6 +466,10 @@ func (g *gcePDCSIDriver) PrepareTest(f *framework.Framework) (*testsuites.PerTes
framework.Failf("deploying csi gce-pd driver: %v", err)
}
if err = waitForCSIDriverRegistrationOnAllNodes(GCEPDCSIDriverName, f.ClientSet); err != nil {
framework.Failf("waiting for csi driver node registration on: %v", err)
}
return &testsuites.PerTestConfig{
Driver: g,
Prefix: "gcepd",
@ -469,3 +480,30 @@ func (g *gcePDCSIDriver) PrepareTest(f *framework.Framework) (*testsuites.PerTes
cancelLogging()
}
}
func waitForCSIDriverRegistrationOnAllNodes(driverName string, cs clientset.Interface) error {
nodes := framework.GetReadySchedulableNodesOrDie(cs)
for _, node := range nodes.Items {
if err := waitForCSIDriverRegistrationOnNode(node.Name, driverName, cs); err != nil {
return err
}
}
return nil
}
func waitForCSIDriverRegistrationOnNode(nodeName string, driverName string, cs clientset.Interface) error {
const csiNodeRegisterTimeout = 1 * time.Minute
return wait.PollImmediate(10*time.Second, csiNodeRegisterTimeout, func() (bool, error) {
csiNode, err := cs.StorageV1beta1().CSINodes().Get(nodeName, metav1.GetOptions{})
if err != nil && !errors.IsNotFound(err) {
return false, err
}
for _, driver := range csiNode.Spec.Drivers {
if driver.Name == driverName {
return true, nil
}
}
return false, nil
})
}

View File

@ -1158,6 +1158,7 @@ func InitGcePdDriver() testsuites.TestDriver {
MaxFileSize: testpatterns.FileSizeMedium,
SupportedFsType: supportedTypes,
SupportedMountOption: sets.NewString("debug", "nouid32"),
TopologyKeys: []string{v1.LabelZoneFailureDomain},
Capabilities: map[testsuites.Capability]bool{
testsuites.CapPersistence: true,
testsuites.CapFsGroup: true,
@ -1169,6 +1170,7 @@ func InitGcePdDriver() testsuites.TestDriver {
// GCE supports volume limits, but the test creates large
// number of volumes and times out test suites.
testsuites.CapVolumeLimits: false,
testsuites.CapTopology: true,
},
},
}

View File

@ -59,6 +59,7 @@ var testSuites = []func() testsuites.TestSuite{
testsuites.InitVolumeExpandTestSuite,
testsuites.InitDisruptiveTestSuite,
testsuites.InitVolumeLimitsTestSuite,
testsuites.InitTopologyTestSuite,
}
// This executes testSuites for in-tree volumes.

View File

@ -573,3 +573,59 @@ func verifyZonesInPV(volume *v1.PersistentVolume, zones sets.String, match bool)
return fmt.Errorf("Zones in StorageClass are %v, but zones in PV are %v", zones, pvZones)
}
func checkZoneFromLabelAndAffinity(pv *v1.PersistentVolume, zone string, matchZone bool) {
checkZonesFromLabelAndAffinity(pv, sets.NewString(zone), matchZone)
}
// checkZoneLabelAndAffinity checks the LabelZoneFailureDomain label of PV and terms
// with key LabelZoneFailureDomain in PV's node affinity contains zone
// matchZones is used to indicate if zones should match perfectly
func checkZonesFromLabelAndAffinity(pv *v1.PersistentVolume, zones sets.String, matchZones bool) {
ginkgo.By("checking PV's zone label and node affinity terms match expected zone")
if pv == nil {
framework.Failf("nil pv passed")
}
pvLabel, ok := pv.Labels[v1.LabelZoneFailureDomain]
if !ok {
framework.Failf("label %s not found on PV", v1.LabelZoneFailureDomain)
}
zonesFromLabel, err := volumehelpers.LabelZonesToSet(pvLabel)
if err != nil {
framework.Failf("unable to parse zone labels %s: %v", pvLabel, err)
}
if matchZones && !zonesFromLabel.Equal(zones) {
framework.Failf("value[s] of %s label for PV: %v does not match expected zone[s]: %v", v1.LabelZoneFailureDomain, zonesFromLabel, zones)
}
if !matchZones && !zonesFromLabel.IsSuperset(zones) {
framework.Failf("value[s] of %s label for PV: %v does not contain expected zone[s]: %v", v1.LabelZoneFailureDomain, zonesFromLabel, zones)
}
if pv.Spec.NodeAffinity == nil {
framework.Failf("node affinity not found in PV spec %v", pv.Spec)
}
if len(pv.Spec.NodeAffinity.Required.NodeSelectorTerms) == 0 {
framework.Failf("node selector terms not found in PV spec %v", pv.Spec)
}
for _, term := range pv.Spec.NodeAffinity.Required.NodeSelectorTerms {
keyFound := false
for _, r := range term.MatchExpressions {
if r.Key != v1.LabelZoneFailureDomain {
continue
}
keyFound = true
zonesFromNodeAffinity := sets.NewString(r.Values...)
if matchZones && !zonesFromNodeAffinity.Equal(zones) {
framework.Failf("zones from NodeAffinity of PV: %v does not equal expected zone[s]: %v", zonesFromNodeAffinity, zones)
}
if !matchZones && !zonesFromNodeAffinity.IsSuperset(zones) {
framework.Failf("zones from NodeAffinity of PV: %v does not contain expected zone[s]: %v", zonesFromNodeAffinity, zones)
}
break
}
if !keyFound {
framework.Failf("label %s not found in term %v", v1.LabelZoneFailureDomain, term)
}
}
}

View File

@ -230,6 +230,22 @@ var (
VolMode: v1.PersistentVolumeBlock,
AllowExpansion: true,
}
// Definitions for topology tests
// TopologyImmediate is TestPattern for immediate binding
TopologyImmediate = TestPattern{
Name: "Dynamic PV (immediate binding)",
VolType: DynamicPV,
BindingMode: storagev1.VolumeBindingImmediate,
}
// TopologyDelayed is TestPattern for delayed binding
TopologyDelayed = TestPattern{
Name: "Dynamic PV (delayed binding)",
VolType: DynamicPV,
BindingMode: storagev1.VolumeBindingWaitForFirstConsumer,
}
)
// NewVolTypeMap creates a map with the given TestVolTypes enabled

View File

@ -12,6 +12,7 @@ go_library(
"snapshottable.go",
"subpath.go",
"testdriver.go",
"topology.go",
"volume_expand.go",
"volume_io.go",
"volumelimits.go",
@ -45,6 +46,7 @@ go_library(
"//staging/src/k8s.io/csi-translation-lib/plugins:go_default_library",
"//test/e2e/common:go_default_library",
"//test/e2e/framework:go_default_library",
"//test/e2e/framework/log:go_default_library",
"//test/e2e/framework/metrics:go_default_library",
"//test/e2e/framework/pod:go_default_library",
"//test/e2e/framework/podlogs:go_default_library",

View File

@ -438,15 +438,6 @@ func PVMultiNodeCheck(client clientset.Interface, claim *v1.PersistentVolumeClai
pod = nil
}
// TestBindingWaitForFirstConsumer tests the binding with WaitForFirstConsumer mode
func (t StorageClassTest) TestBindingWaitForFirstConsumer(nodeSelector map[string]string, expectUnschedulable bool) (*v1.PersistentVolume, *v1.Node) {
pvs, node := t.TestBindingWaitForFirstConsumerMultiPVC([]*v1.PersistentVolumeClaim{t.Claim}, nodeSelector, expectUnschedulable)
if pvs == nil {
return nil, node
}
return pvs[0], node
}
// TestBindingWaitForFirstConsumerMultiPVC tests the binding with WaitForFirstConsumer mode
func (t StorageClassTest) TestBindingWaitForFirstConsumerMultiPVC(claims []*v1.PersistentVolumeClaim, nodeSelector map[string]string, expectUnschedulable bool) ([]*v1.PersistentVolume, *v1.Node) {
var err error

View File

@ -157,6 +157,7 @@ const (
CapNodeExpansion Capability = "nodeExpansion" // support volume expansion for node
CapVolumeLimits Capability = "volumeLimits" // support volume limits (can be *very* slow)
CapSingleNodeVolume Capability = "singleNodeVolume" // support volume that can run on single node (like hostpath)
CapTopology Capability = "topology" // support topology
)
// DriverInfo represents static information about a TestDriver.
@ -183,6 +184,12 @@ type DriverInfo struct {
// [Optional] List of access modes required for provisioning, defaults to
// RWO if unset
RequiredAccessModes []v1.PersistentVolumeAccessMode
// [Optional] List of topology keys driver supports
TopologyKeys []string
// [Optional] Number of allowed topologies the driver requires.
// Only relevant if TopologyKeys is set. Defaults to 1.
// Example: multi-zonal disk requires at least 2 allowed topologies.
NumAllowedTopologies int
}
// PerTestConfig represents parameters that control test execution.

View File

@ -0,0 +1,351 @@
/*
Copyright 2019 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
// This suite tests volume topology
package testsuites
import (
"fmt"
"math/rand"
"github.com/onsi/ginkgo"
v1 "k8s.io/api/core/v1"
storagev1 "k8s.io/api/storage/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
clientset "k8s.io/client-go/kubernetes"
"k8s.io/kubernetes/test/e2e/framework"
e2elog "k8s.io/kubernetes/test/e2e/framework/log"
e2epod "k8s.io/kubernetes/test/e2e/framework/pod"
e2epv "k8s.io/kubernetes/test/e2e/framework/pv"
"k8s.io/kubernetes/test/e2e/storage/testpatterns"
)
type topologyTestSuite struct {
tsInfo TestSuiteInfo
}
type topologyTest struct {
config *PerTestConfig
testCleanup func()
intreeOps opCounts
migratedOps opCounts
resource genericVolumeTestResource
pod *v1.Pod
allTopologies []topology
}
type topology map[string]string
var _ TestSuite = &topologyTestSuite{}
// InitTopologyTestSuite returns topologyTestSuite that implements TestSuite interface
func InitTopologyTestSuite() TestSuite {
return &topologyTestSuite{
tsInfo: TestSuiteInfo{
name: "topology",
testPatterns: []testpatterns.TestPattern{
testpatterns.TopologyImmediate,
testpatterns.TopologyDelayed,
},
},
}
}
func (t *topologyTestSuite) getTestSuiteInfo() TestSuiteInfo {
return t.tsInfo
}
func (t *topologyTestSuite) skipRedundantSuite(driver TestDriver, pattern testpatterns.TestPattern) {
}
func (t *topologyTestSuite) defineTests(driver TestDriver, pattern testpatterns.TestPattern) {
var (
dInfo = driver.GetDriverInfo()
dDriver DynamicPVTestDriver
cs clientset.Interface
err error
)
ginkgo.BeforeEach(func() {
// Check preconditions.
ok := false
dDriver, ok = driver.(DynamicPVTestDriver)
if !ok {
framework.Skipf("Driver %s doesn't support %v -- skipping", dInfo.Name, pattern.VolType)
}
if !dInfo.Capabilities[CapTopology] {
framework.Skipf("Driver %q does not support topology - skipping", dInfo.Name)
}
})
// This intentionally comes after checking the preconditions because it
// registers its own BeforeEach which creates the namespace. Beware that it
// also registers an AfterEach which renders f unusable. Any code using
// f must run inside an It or Context callback.
f := framework.NewDefaultFramework("topology")
init := func() topologyTest {
const numTestTopologies = 2
l := topologyTest{}
// Now do the more expensive test initialization.
l.config, l.testCleanup = driver.PrepareTest(f)
l.resource = genericVolumeTestResource{
driver: driver,
config: l.config,
pattern: pattern,
}
// After driver is installed, check driver topologies on nodes
cs = f.ClientSet
keys := dInfo.TopologyKeys
if len(keys) == 0 {
framework.Skipf("Driver didn't provide topology keys -- skipping")
}
if dInfo.NumAllowedTopologies == 0 {
// Any plugin that supports topology defaults to 1 topology
dInfo.NumAllowedTopologies = 1
}
// We collect 1 additional topology, if possible, for the conflicting topology test
// case, but it's not needed for the positive test
l.allTopologies, err = t.getCurrentTopologies(cs, keys, dInfo.NumAllowedTopologies+1)
framework.ExpectNoError(err, "failed to get current driver topologies")
if len(l.allTopologies) < dInfo.NumAllowedTopologies {
framework.Skipf("Not enough topologies in cluster -- skipping")
}
l.resource.sc = dDriver.GetDynamicProvisionStorageClass(l.config, pattern.FsType)
framework.ExpectNotEqual(l.resource.sc, nil, "driver failed to provide a StorageClass")
l.resource.sc.VolumeBindingMode = &pattern.BindingMode
claimSize := dDriver.GetClaimSize()
l.resource.pvc = e2epv.MakePersistentVolumeClaim(e2epv.PersistentVolumeClaimConfig{
ClaimSize: claimSize,
StorageClassName: &(l.resource.sc.Name),
}, l.config.Framework.Namespace.Name)
l.intreeOps, l.migratedOps = getMigrationVolumeOpCounts(f.ClientSet, dInfo.InTreePluginName)
return l
}
cleanup := func(l topologyTest) {
t.cleanupResources(cs, &l)
if l.testCleanup != nil {
l.testCleanup()
}
validateMigrationVolumeOpCounts(f.ClientSet, dInfo.InTreePluginName, l.intreeOps, l.migratedOps)
}
ginkgo.It("should provision a volume and schedule a pod with AllowedTopologies", func() {
l := init()
defer func() {
cleanup(l)
}()
// If possible, exclude one topology, otherwise allow them all
excludedIndex := -1
if len(l.allTopologies) > dInfo.NumAllowedTopologies {
excludedIndex = rand.Intn(len(l.allTopologies))
}
allowedTopologies := t.setAllowedTopologies(l.resource.sc, l.allTopologies, excludedIndex)
t.createResources(cs, &l, nil)
err = e2epod.WaitForPodRunningInNamespace(cs, l.pod)
framework.ExpectNoError(err)
ginkgo.By("Verifying pod scheduled to correct node")
pod, err := cs.CoreV1().Pods(l.pod.Namespace).Get(l.pod.Name, metav1.GetOptions{})
framework.ExpectNoError(err)
node, err := cs.CoreV1().Nodes().Get(pod.Spec.NodeName, metav1.GetOptions{})
framework.ExpectNoError(err)
t.verifyNodeTopology(node, allowedTopologies)
})
ginkgo.It("should fail to schedule a pod which has topologies that conflict with AllowedTopologies", func() {
l := init()
defer func() {
cleanup(l)
}()
if len(l.allTopologies) < dInfo.NumAllowedTopologies+1 {
framework.Skipf("Not enough topologies in cluster -- skipping")
}
// Exclude one topology
excludedIndex := rand.Intn(len(l.allTopologies))
t.setAllowedTopologies(l.resource.sc, l.allTopologies, excludedIndex)
// Set pod nodeSelector to the excluded topology
exprs := []v1.NodeSelectorRequirement{}
for k, v := range l.allTopologies[excludedIndex] {
exprs = append(exprs, v1.NodeSelectorRequirement{
Key: k,
Operator: v1.NodeSelectorOpIn,
Values: []string{v},
})
}
affinity := &v1.Affinity{
NodeAffinity: &v1.NodeAffinity{
RequiredDuringSchedulingIgnoredDuringExecution: &v1.NodeSelector{
NodeSelectorTerms: []v1.NodeSelectorTerm{
{
MatchExpressions: exprs,
},
},
},
},
}
t.createResources(cs, &l, affinity)
// Wait for pod to fail scheduling
// With delayed binding, the scheduler errors before provisioning
// With immediate binding, the volume gets provisioned but cannot be scheduled
err = e2epod.WaitForPodNameUnschedulableInNamespace(cs, l.pod.Name, l.pod.Namespace)
framework.ExpectNoError(err)
})
}
// getCurrentTopologies() goes through all Nodes and returns up to maxCount unique driver topologies
func (t *topologyTestSuite) getCurrentTopologies(cs clientset.Interface, keys []string, maxCount int) ([]topology, error) {
nodes := framework.GetReadySchedulableNodesOrDie(cs)
topos := []topology{}
// TODO: scale?
for _, n := range nodes.Items {
topo := map[string]string{}
for _, k := range keys {
v, ok := n.Labels[k]
if !ok {
return nil, fmt.Errorf("node %v missing topology label %v", n.Name, k)
}
topo[k] = v
}
found := false
for _, existingTopo := range topos {
if topologyEqual(existingTopo, topo) {
found = true
break
}
}
if !found {
e2elog.Logf("found topology %v", topo)
topos = append(topos, topo)
}
if len(topos) >= maxCount {
break
}
}
return topos, nil
}
// reflect.DeepEqual doesn't seem to work
func topologyEqual(t1, t2 topology) bool {
if len(t1) != len(t2) {
return false
}
for k1, v1 := range t1 {
if v2, ok := t2[k1]; !ok || v1 != v2 {
return false
}
}
return true
}
// Set StorageClass.Allowed topologies from topos while excluding the topology at excludedIndex.
// excludedIndex can be -1 to specify nothing should be excluded.
// Return the list of allowed topologies generated.
func (t *topologyTestSuite) setAllowedTopologies(sc *storagev1.StorageClass, topos []topology, excludedIndex int) []topology {
allowedTopologies := []topology{}
sc.AllowedTopologies = []v1.TopologySelectorTerm{}
for i := 0; i < len(topos); i++ {
if i != excludedIndex {
exprs := []v1.TopologySelectorLabelRequirement{}
for k, v := range topos[i] {
exprs = append(exprs, v1.TopologySelectorLabelRequirement{
Key: k,
Values: []string{v},
})
}
sc.AllowedTopologies = append(sc.AllowedTopologies, v1.TopologySelectorTerm{MatchLabelExpressions: exprs})
allowedTopologies = append(allowedTopologies, topos[i])
}
}
return allowedTopologies
}
func (t *topologyTestSuite) verifyNodeTopology(node *v1.Node, allowedTopos []topology) {
for _, topo := range allowedTopos {
for k, v := range topo {
nodeV, _ := node.Labels[k]
if nodeV == v {
return
}
}
}
e2elog.Failf("node %v topology labels %+v doesn't match allowed topologies +%v", node.Name, node.Labels, allowedTopos)
}
func (t *topologyTestSuite) createResources(cs clientset.Interface, l *topologyTest, affinity *v1.Affinity) {
var err error
e2elog.Logf("Creating storage class object and pvc object for driver - sc: %v, pvc: %v", l.resource.sc, l.resource.pvc)
ginkgo.By("Creating sc")
l.resource.sc, err = cs.StorageV1().StorageClasses().Create(l.resource.sc)
framework.ExpectNoError(err)
ginkgo.By("Creating pvc")
l.resource.pvc, err = cs.CoreV1().PersistentVolumeClaims(l.resource.pvc.Namespace).Create(l.resource.pvc)
framework.ExpectNoError(err)
ginkgo.By("Creating pod")
l.pod = e2epod.MakeSecPod(l.config.Framework.Namespace.Name,
[]*v1.PersistentVolumeClaim{l.resource.pvc},
nil,
false,
"",
false,
false,
e2epv.SELinuxLabel,
nil)
l.pod.Spec.Affinity = affinity
l.pod, err = cs.CoreV1().Pods(l.pod.Namespace).Create(l.pod)
framework.ExpectNoError(err)
}
func (t *topologyTestSuite) cleanupResources(cs clientset.Interface, l *topologyTest) {
if l.pod != nil {
ginkgo.By("Deleting pod")
err := e2epod.DeletePodWithWait(cs, l.pod)
framework.ExpectNoError(err, "while deleting pod")
}
l.resource.cleanupResource()
}

View File

@ -42,7 +42,6 @@ import (
"k8s.io/apimachinery/pkg/util/wait"
"k8s.io/apiserver/pkg/authentication/serviceaccount"
clientset "k8s.io/client-go/kubernetes"
volumehelpers "k8s.io/cloud-provider/volume/helpers"
storageutil "k8s.io/kubernetes/pkg/apis/storage/v1/util"
"k8s.io/kubernetes/test/e2e/framework"
"k8s.io/kubernetes/test/e2e/framework/auth"
@ -56,66 +55,8 @@ import (
const (
// Plugin name of the external provisioner
externalPluginName = "example.com/nfs"
// Number of PVCs for multi PVC tests
multiPVCcount = 3
)
func checkZoneFromLabelAndAffinity(pv *v1.PersistentVolume, zone string, matchZone bool) {
checkZonesFromLabelAndAffinity(pv, sets.NewString(zone), matchZone)
}
// checkZoneLabelAndAffinity checks the LabelZoneFailureDomain label of PV and terms
// with key LabelZoneFailureDomain in PV's node affinity contains zone
// matchZones is used to indicate if zones should match perfectly
func checkZonesFromLabelAndAffinity(pv *v1.PersistentVolume, zones sets.String, matchZones bool) {
ginkgo.By("checking PV's zone label and node affinity terms match expected zone")
if pv == nil {
framework.Failf("nil pv passed")
}
pvLabel, ok := pv.Labels[v1.LabelZoneFailureDomain]
if !ok {
framework.Failf("label %s not found on PV", v1.LabelZoneFailureDomain)
}
zonesFromLabel, err := volumehelpers.LabelZonesToSet(pvLabel)
if err != nil {
framework.Failf("unable to parse zone labels %s: %v", pvLabel, err)
}
if matchZones && !zonesFromLabel.Equal(zones) {
framework.Failf("value[s] of %s label for PV: %v does not match expected zone[s]: %v", v1.LabelZoneFailureDomain, zonesFromLabel, zones)
}
if !matchZones && !zonesFromLabel.IsSuperset(zones) {
framework.Failf("value[s] of %s label for PV: %v does not contain expected zone[s]: %v", v1.LabelZoneFailureDomain, zonesFromLabel, zones)
}
if pv.Spec.NodeAffinity == nil {
framework.Failf("node affinity not found in PV spec %v", pv.Spec)
}
if len(pv.Spec.NodeAffinity.Required.NodeSelectorTerms) == 0 {
framework.Failf("node selector terms not found in PV spec %v", pv.Spec)
}
for _, term := range pv.Spec.NodeAffinity.Required.NodeSelectorTerms {
keyFound := false
for _, r := range term.MatchExpressions {
if r.Key != v1.LabelZoneFailureDomain {
continue
}
keyFound = true
zonesFromNodeAffinity := sets.NewString(r.Values...)
if matchZones && !zonesFromNodeAffinity.Equal(zones) {
framework.Failf("zones from NodeAffinity of PV: %v does not equal expected zone[s]: %v", zonesFromNodeAffinity, zones)
}
if !matchZones && !zonesFromNodeAffinity.IsSuperset(zones) {
framework.Failf("zones from NodeAffinity of PV: %v does not contain expected zone[s]: %v", zonesFromNodeAffinity, zones)
}
break
}
if !keyFound {
framework.Failf("label %s not found in term %v", v1.LabelZoneFailureDomain, term)
}
}
}
// checkAWSEBS checks properties of an AWS EBS. Test framework does not
// instantiate full AWS provider, therefore we need use ec2 API directly.
func checkAWSEBS(volume *v1.PersistentVolume, volumeType string, encrypted bool) error {
@ -184,71 +125,6 @@ func checkGCEPD(volume *v1.PersistentVolume, volumeType string) error {
return nil
}
func testZonalDelayedBinding(c clientset.Interface, ns string, specifyAllowedTopology bool, pvcCount int) {
storageClassTestNameFmt := "Delayed binding %s storage class test %s"
storageClassTestNameSuffix := ""
if specifyAllowedTopology {
storageClassTestNameSuffix += " with AllowedTopologies"
}
tests := []testsuites.StorageClassTest{
{
Name: fmt.Sprintf(storageClassTestNameFmt, "EBS", storageClassTestNameSuffix),
CloudProviders: []string{"aws"},
Provisioner: "kubernetes.io/aws-ebs",
ClaimSize: "2Gi",
DelayBinding: true,
},
{
Name: fmt.Sprintf(storageClassTestNameFmt, "GCE PD", storageClassTestNameSuffix),
CloudProviders: []string{"gce", "gke"},
Provisioner: "kubernetes.io/gce-pd",
ClaimSize: "2Gi",
DelayBinding: true,
},
}
for _, test := range tests {
if !framework.ProviderIs(test.CloudProviders...) {
framework.Logf("Skipping %q: cloud providers is not %v", test.Name, test.CloudProviders)
continue
}
action := "creating claims with class with waitForFirstConsumer"
suffix := "delayed"
var topoZone string
test.Client = c
test.Class = newStorageClass(test, ns, suffix)
if specifyAllowedTopology {
action += " and allowedTopologies"
suffix += "-topo"
topoZone = getRandomClusterZone(c)
addSingleZoneAllowedTopologyToStorageClass(c, test.Class, topoZone)
}
ginkgo.By(action)
var claims []*v1.PersistentVolumeClaim
for i := 0; i < pvcCount; i++ {
claim := e2epv.MakePersistentVolumeClaim(e2epv.PersistentVolumeClaimConfig{
ClaimSize: test.ClaimSize,
StorageClassName: &test.Class.Name,
VolumeMode: &test.VolumeMode,
}, ns)
claims = append(claims, claim)
}
pvs, node := test.TestBindingWaitForFirstConsumerMultiPVC(claims, nil /* node selector */, false /* expect unschedulable */)
if node == nil {
framework.Failf("unexpected nil node found")
}
zone, ok := node.Labels[v1.LabelZoneFailureDomain]
if !ok {
framework.Failf("label %s not found on Node", v1.LabelZoneFailureDomain)
}
if specifyAllowedTopology && topoZone != zone {
framework.Failf("zone specified in allowedTopologies: %s does not match zone of node where PV got provisioned: %s", topoZone, zone)
}
for _, pv := range pvs {
checkZoneFromLabelAndAffinity(pv, zone, true)
}
}
}
var _ = utils.SIGDescribe("Dynamic Provisioning", func() {
f := framework.NewDefaultFramework("volume-provisioning")
@ -964,57 +840,6 @@ var _ = utils.SIGDescribe("Dynamic Provisioning", func() {
framework.ExpectNoError(err)
})
})
ginkgo.Describe("DynamicProvisioner delayed binding [Slow]", func() {
ginkgo.It("should create persistent volumes in the same zone as node after a pod mounting the claims is started", func() {
testZonalDelayedBinding(c, ns, false /*specifyAllowedTopology*/, 1 /*pvcCount*/)
testZonalDelayedBinding(c, ns, false /*specifyAllowedTopology*/, 3 /*pvcCount*/)
})
})
ginkgo.Describe("DynamicProvisioner allowedTopologies", func() {
ginkgo.It("should create persistent volume in the zone specified in allowedTopologies of storageclass", func() {
tests := []testsuites.StorageClassTest{
{
Name: "AllowedTopologies EBS storage class test",
CloudProviders: []string{"aws"},
Provisioner: "kubernetes.io/aws-ebs",
ClaimSize: "2Gi",
ExpectedSize: "2Gi",
},
{
Name: "AllowedTopologies GCE PD storage class test",
CloudProviders: []string{"gce", "gke"},
Provisioner: "kubernetes.io/gce-pd",
ClaimSize: "2Gi",
ExpectedSize: "2Gi",
},
}
for _, test := range tests {
if !framework.ProviderIs(test.CloudProviders...) {
framework.Logf("Skipping %q: cloud providers is not %v", test.Name, test.CloudProviders)
continue
}
ginkgo.By("creating a claim with class with allowedTopologies set")
suffix := "topology"
test.Client = c
test.Class = newStorageClass(test, ns, suffix)
zone := getRandomClusterZone(c)
addSingleZoneAllowedTopologyToStorageClass(c, test.Class, zone)
test.Claim = e2epv.MakePersistentVolumeClaim(e2epv.PersistentVolumeClaimConfig{
ClaimSize: test.ClaimSize,
StorageClassName: &test.Class.Name,
VolumeMode: &test.VolumeMode,
}, ns)
pv := test.TestDynamicProvisioning()
checkZoneFromLabelAndAffinity(pv, zone, true)
}
})
})
ginkgo.Describe("DynamicProvisioner delayed binding with allowedTopologies [Slow]", func() {
ginkgo.It("should create persistent volumes in the same zone as specified in allowedTopologies after a pod mounting the claims is started", func() {
testZonalDelayedBinding(c, ns, true /*specifyAllowedTopology*/, 1 /*pvcCount*/)
testZonalDelayedBinding(c, ns, true /*specifyAllowedTopology*/, 3 /*pvcCount*/)
})
})
})
func verifyDefaultStorageClass(c clientset.Interface, scName string, expectedDefault bool) {
@ -1064,18 +889,6 @@ func getDefaultPluginName() string {
return ""
}
func addSingleZoneAllowedTopologyToStorageClass(c clientset.Interface, sc *storagev1.StorageClass, zone string) {
term := v1.TopologySelectorTerm{
MatchLabelExpressions: []v1.TopologySelectorLabelRequirement{
{
Key: v1.LabelZoneFailureDomain,
Values: []string{zone},
},
},
}
sc.AllowedTopologies = append(sc.AllowedTopologies, term)
}
func newStorageClass(t testsuites.StorageClassTest, ns string, suffix string) *storagev1.StorageClass {
pluginName := t.Provisioner
if pluginName == "" {