From 6a040438ea8837c5990f6b3e122ebe349fbcf409 Mon Sep 17 00:00:00 2001 From: Dan Winship Date: Sun, 8 Sep 2019 13:19:17 -0400 Subject: [PATCH] Update test/e2e/storage for new GetReadySchedulableNodes stuff --- test/e2e/storage/detach_mounted.go | 13 +++++++------ test/e2e/storage/drivers/BUILD | 1 + test/e2e/storage/drivers/csi.go | 19 +++++++++++-------- test/e2e/storage/drivers/in_tree.go | 16 ++++++++-------- test/e2e/storage/empty_dir_wrapper.go | 9 ++++----- test/e2e/storage/flexvolume.go | 16 ++++++++-------- .../flexvolume_mounted_volume_resize.go | 15 ++++++--------- test/e2e/storage/flexvolume_online_resize.go | 9 ++++----- test/e2e/storage/mounted_volume_resize.go | 11 ++++------- .../nfs_persistent_volume-disruptive.go | 4 ++-- test/e2e/storage/pd.go | 7 +++++-- test/e2e/storage/persistent_volumes-local.go | 14 ++++---------- test/e2e/storage/testsuites/BUILD | 1 + test/e2e/storage/testsuites/multivolume.go | 10 +++++++--- test/e2e/storage/testsuites/topology.go | 6 +++++- test/e2e/storage/testsuites/volumelimits.go | 10 ++++------ test/e2e/storage/volume_limits.go | 7 +++---- .../vsphere/persistent_volumes-vsphere.go | 6 +----- test/e2e/storage/vsphere/vsphere_scale.go | 5 ++++- test/e2e/storage/vsphere/vsphere_stress.go | 7 +++---- test/e2e/storage/vsphere/vsphere_utils.go | 9 +++++---- .../vsphere/vsphere_volume_datastore.go | 7 +++---- .../vsphere/vsphere_volume_master_restart.go | 4 +++- .../vsphere/vsphere_volume_node_delete.go | 12 +++++++----- .../vsphere/vsphere_volume_node_poweroff.go | 5 +++-- .../storage/vsphere/vsphere_volume_perf.go | 4 +++- .../vsphere/vsphere_volume_placement.go | 4 +++- .../vsphere/vsphere_volume_vpxd_restart.go | 5 +++-- .../vsphere/vsphere_volume_vsan_policy.go | 12 +++--------- .../storage/vsphere/vsphere_zone_support.go | 7 +++---- 30 files changed, 128 insertions(+), 127 deletions(-) diff --git a/test/e2e/storage/detach_mounted.go b/test/e2e/storage/detach_mounted.go index 73592a73d1f..7a797f7e241 100644 --- a/test/e2e/storage/detach_mounted.go +++ b/test/e2e/storage/detach_mounted.go @@ -18,7 +18,6 @@ package storage import ( "fmt" - "math/rand" "path" "time" @@ -28,6 +27,7 @@ import ( "k8s.io/apimachinery/pkg/util/wait" clientset "k8s.io/client-go/kubernetes" "k8s.io/kubernetes/test/e2e/framework" + e2enode "k8s.io/kubernetes/test/e2e/framework/node" e2epod "k8s.io/kubernetes/test/e2e/framework/pod" "k8s.io/kubernetes/test/e2e/storage/utils" imageutils "k8s.io/kubernetes/test/utils/image" @@ -48,7 +48,7 @@ var _ = utils.SIGDescribe("Detaching volumes", func() { var cs clientset.Interface var ns *v1.Namespace - var node v1.Node + var node *v1.Node var suffix string ginkgo.BeforeEach(func() { @@ -59,8 +59,9 @@ var _ = utils.SIGDescribe("Detaching volumes", func() { cs = f.ClientSet ns = f.Namespace - nodes := framework.GetReadySchedulableNodesOrDie(f.ClientSet) - node = nodes.Items[rand.Intn(len(nodes.Items))] + var err error + node, err = e2enode.GetRandomReadySchedulableNode(f.ClientSet) + framework.ExpectNoError(err) suffix = ns.Name }) @@ -71,7 +72,7 @@ var _ = utils.SIGDescribe("Detaching volumes", func() { driverInstallAs := driver + "-" + suffix ginkgo.By(fmt.Sprintf("installing flexvolume %s on node %s as %s", path.Join(driverDir, driver), node.Name, driverInstallAs)) - installFlex(cs, &node, "k8s", driverInstallAs, path.Join(driverDir, driver)) + installFlex(cs, node, "k8s", driverInstallAs, path.Join(driverDir, driver)) ginkgo.By(fmt.Sprintf("installing flexvolume %s on master as %s", path.Join(driverDir, driver), driverInstallAs)) installFlex(cs, nil, "k8s", driverInstallAs, path.Join(driverDir, driver)) volumeSource := v1.VolumeSource{ @@ -118,7 +119,7 @@ var _ = utils.SIGDescribe("Detaching volumes", func() { framework.ExpectNoError(err, "while waiting for volume to be removed from in-use") ginkgo.By(fmt.Sprintf("uninstalling flexvolume %s from node %s", driverInstallAs, node.Name)) - uninstallFlex(cs, &node, "k8s", driverInstallAs) + uninstallFlex(cs, node, "k8s", driverInstallAs) ginkgo.By(fmt.Sprintf("uninstalling flexvolume %s from master", driverInstallAs)) uninstallFlex(cs, nil, "k8s", driverInstallAs) }) diff --git a/test/e2e/storage/drivers/BUILD b/test/e2e/storage/drivers/BUILD index 59704021577..ab8543eb505 100644 --- a/test/e2e/storage/drivers/BUILD +++ b/test/e2e/storage/drivers/BUILD @@ -25,6 +25,7 @@ go_library( "//staging/src/k8s.io/client-go/kubernetes:go_default_library", "//test/e2e/framework:go_default_library", "//test/e2e/framework/auth:go_default_library", + "//test/e2e/framework/node:go_default_library", "//test/e2e/framework/pod:go_default_library", "//test/e2e/framework/pv:go_default_library", "//test/e2e/framework/volume:go_default_library", diff --git a/test/e2e/storage/drivers/csi.go b/test/e2e/storage/drivers/csi.go index d7ecd57c922..3907edf25e3 100644 --- a/test/e2e/storage/drivers/csi.go +++ b/test/e2e/storage/drivers/csi.go @@ -37,7 +37,6 @@ package drivers import ( "fmt" - "math/rand" "strconv" "time" @@ -52,6 +51,7 @@ import ( "k8s.io/apimachinery/pkg/util/wait" clientset "k8s.io/client-go/kubernetes" "k8s.io/kubernetes/test/e2e/framework" + e2enode "k8s.io/kubernetes/test/e2e/framework/node" "k8s.io/kubernetes/test/e2e/framework/volume" "k8s.io/kubernetes/test/e2e/storage/testpatterns" "k8s.io/kubernetes/test/e2e/storage/testsuites" @@ -169,13 +169,13 @@ func (h *hostpathCSIDriver) PrepareTest(f *framework.Framework) (*testsuites.Per cs := f.ClientSet // The hostpath CSI driver only works when everything runs on the same node. - nodes := framework.GetReadySchedulableNodesOrDie(cs) - nodeName := nodes.Items[rand.Intn(len(nodes.Items))].Name + node, err := e2enode.GetRandomReadySchedulableNode(cs) + framework.ExpectNoError(err) config := &testsuites.PerTestConfig{ Driver: h, Prefix: "hostpath", Framework: f, - ClientNodeName: nodeName, + ClientNodeName: node.Name, } o := utils.PatchCSIOptions{ @@ -185,7 +185,7 @@ func (h *hostpathCSIDriver) PrepareTest(f *framework.Framework) (*testsuites.Per DriverContainerArguments: []string{"--drivername=" + config.GetUniqueDriverName()}, ProvisionerContainerName: "csi-provisioner", SnapshotterContainerName: "csi-snapshotter", - NodeName: nodeName, + NodeName: node.Name, } cleanup, err := config.Framework.CreateFromManifests(func(item interface{}) error { return utils.PatchCSIDeployment(config.Framework, o, item) @@ -293,8 +293,8 @@ func (m *mockCSIDriver) PrepareTest(f *framework.Framework) (*testsuites.PerTest cs := f.ClientSet // pods should be scheduled on the node - nodes := framework.GetReadySchedulableNodesOrDie(cs) - node := nodes.Items[rand.Intn(len(nodes.Items))] + node, err := e2enode.GetRandomReadySchedulableNode(cs) + framework.ExpectNoError(err) config := &testsuites.PerTestConfig{ Driver: m, Prefix: "mock", @@ -477,7 +477,10 @@ func (g *gcePDCSIDriver) PrepareTest(f *framework.Framework) (*testsuites.PerTes } func waitForCSIDriverRegistrationOnAllNodes(driverName string, cs clientset.Interface) error { - nodes := framework.GetReadySchedulableNodesOrDie(cs) + nodes, err := e2enode.GetReadySchedulableNodes(cs) + if err != nil { + return err + } for _, node := range nodes.Items { if err := waitForCSIDriverRegistrationOnNode(node.Name, driverName, cs); err != nil { return err diff --git a/test/e2e/storage/drivers/in_tree.go b/test/e2e/storage/drivers/in_tree.go index 0cd8812d08a..b2ce04e4787 100644 --- a/test/e2e/storage/drivers/in_tree.go +++ b/test/e2e/storage/drivers/in_tree.go @@ -37,7 +37,6 @@ package drivers import ( "fmt" - "math/rand" "os/exec" "strconv" "strings" @@ -55,6 +54,7 @@ import ( "k8s.io/apiserver/pkg/authentication/serviceaccount" "k8s.io/kubernetes/test/e2e/framework" "k8s.io/kubernetes/test/e2e/framework/auth" + e2enode "k8s.io/kubernetes/test/e2e/framework/node" e2epod "k8s.io/kubernetes/test/e2e/framework/pod" e2epv "k8s.io/kubernetes/test/e2e/framework/pv" "k8s.io/kubernetes/test/e2e/framework/volume" @@ -750,8 +750,8 @@ func (h *hostPathDriver) CreateVolume(config *testsuites.PerTestConfig, volType cs := f.ClientSet // pods should be scheduled on the node - nodes := framework.GetReadySchedulableNodesOrDie(cs) - node := nodes.Items[rand.Intn(len(nodes.Items))] + node, err := e2enode.GetRandomReadySchedulableNode(cs) + framework.ExpectNoError(err) config.ClientNodeName = node.Name return nil } @@ -832,8 +832,8 @@ func (h *hostPathSymlinkDriver) CreateVolume(config *testsuites.PerTestConfig, v volumeName := "test-volume" // pods should be scheduled on the node - nodes := framework.GetReadySchedulableNodesOrDie(cs) - node := nodes.Items[rand.Intn(len(nodes.Items))] + node, err := e2enode.GetRandomReadySchedulableNode(cs) + framework.ExpectNoError(err) config.ClientNodeName = node.Name cmd := fmt.Sprintf("mkdir %v -m 777 && ln -s %v %v", sourcePath, sourcePath, targetPath) @@ -1766,9 +1766,9 @@ func (l *localDriver) SkipUnsupportedTest(pattern testpatterns.TestPattern) { } func (l *localDriver) PrepareTest(f *framework.Framework) (*testsuites.PerTestConfig, func()) { - // choose a randome node to test against - nodes := framework.GetReadySchedulableNodesOrDie(f.ClientSet) - l.node = &nodes.Items[rand.Intn(len(nodes.Items))] + var err error + l.node, err = e2enode.GetRandomReadySchedulableNode(f.ClientSet) + framework.ExpectNoError(err) l.hostExec = utils.NewHostExec(f) l.ltrMgr = utils.NewLocalResourceManager("local-driver", l.hostExec, "/tmp") diff --git a/test/e2e/storage/empty_dir_wrapper.go b/test/e2e/storage/empty_dir_wrapper.go index 323840eee8f..cb90c340cd2 100644 --- a/test/e2e/storage/empty_dir_wrapper.go +++ b/test/e2e/storage/empty_dir_wrapper.go @@ -25,12 +25,12 @@ import ( "k8s.io/apimachinery/pkg/util/intstr" "k8s.io/apimachinery/pkg/util/uuid" "k8s.io/kubernetes/test/e2e/framework" + e2enode "k8s.io/kubernetes/test/e2e/framework/node" e2epod "k8s.io/kubernetes/test/e2e/framework/pod" "k8s.io/kubernetes/test/e2e/storage/utils" imageutils "k8s.io/kubernetes/test/utils/image" "github.com/onsi/ginkgo" - "github.com/onsi/gomega" ) const ( @@ -346,9 +346,8 @@ func testNoWrappedVolumeRace(f *framework.Framework, volumes []v1.Volume, volume const nodeHostnameLabelKey = "kubernetes.io/hostname" rcName := wrappedVolumeRaceRCNamePrefix + string(uuid.NewUUID()) - nodeList := framework.GetReadySchedulableNodesOrDie(f.ClientSet) - gomega.Expect(len(nodeList.Items)).To(gomega.BeNumerically(">", 0)) - targetNode := nodeList.Items[0] + targetNode, err := e2enode.GetRandomReadySchedulableNode(f.ClientSet) + framework.ExpectNoError(err) ginkgo.By("Creating RC which spawns configmap-volume pods") affinity := &v1.Affinity{ @@ -397,7 +396,7 @@ func testNoWrappedVolumeRace(f *framework.Framework, volumes []v1.Volume, volume }, }, } - _, err := f.ClientSet.CoreV1().ReplicationControllers(f.Namespace.Name).Create(rc) + _, err = f.ClientSet.CoreV1().ReplicationControllers(f.Namespace.Name).Create(rc) framework.ExpectNoError(err, "error creating replication controller") defer func() { diff --git a/test/e2e/storage/flexvolume.go b/test/e2e/storage/flexvolume.go index 736c245d99d..1e042dee901 100644 --- a/test/e2e/storage/flexvolume.go +++ b/test/e2e/storage/flexvolume.go @@ -18,7 +18,6 @@ package storage import ( "fmt" - "math/rand" "net" "path" @@ -161,7 +160,7 @@ var _ = utils.SIGDescribe("Flexvolumes", func() { var cs clientset.Interface var ns *v1.Namespace - var node v1.Node + var node *v1.Node var config volume.TestConfig var suffix string @@ -173,8 +172,9 @@ var _ = utils.SIGDescribe("Flexvolumes", func() { cs = f.ClientSet ns = f.Namespace - nodes := framework.GetReadySchedulableNodesOrDie(f.ClientSet) - node = nodes.Items[rand.Intn(len(nodes.Items))] + var err error + node, err = e2enode.GetRandomReadySchedulableNode(f.ClientSet) + framework.ExpectNoError(err) config = volume.TestConfig{ Namespace: ns.Name, Prefix: "flex", @@ -188,7 +188,7 @@ var _ = utils.SIGDescribe("Flexvolumes", func() { driverInstallAs := driver + "-" + suffix ginkgo.By(fmt.Sprintf("installing flexvolume %s on node %s as %s", path.Join(driverDir, driver), node.Name, driverInstallAs)) - installFlex(cs, &node, "k8s", driverInstallAs, path.Join(driverDir, driver)) + installFlex(cs, node, "k8s", driverInstallAs, path.Join(driverDir, driver)) testFlexVolume(driverInstallAs, cs, config, f) @@ -198,7 +198,7 @@ var _ = utils.SIGDescribe("Flexvolumes", func() { } ginkgo.By(fmt.Sprintf("uninstalling flexvolume %s from node %s", driverInstallAs, node.Name)) - uninstallFlex(cs, &node, "k8s", driverInstallAs) + uninstallFlex(cs, node, "k8s", driverInstallAs) }) ginkgo.It("should be mountable when attachable", func() { @@ -206,7 +206,7 @@ var _ = utils.SIGDescribe("Flexvolumes", func() { driverInstallAs := driver + "-" + suffix ginkgo.By(fmt.Sprintf("installing flexvolume %s on node %s as %s", path.Join(driverDir, driver), node.Name, driverInstallAs)) - installFlex(cs, &node, "k8s", driverInstallAs, path.Join(driverDir, driver)) + installFlex(cs, node, "k8s", driverInstallAs, path.Join(driverDir, driver)) ginkgo.By(fmt.Sprintf("installing flexvolume %s on master as %s", path.Join(driverDir, driver), driverInstallAs)) installFlex(cs, nil, "k8s", driverInstallAs, path.Join(driverDir, driver)) @@ -221,7 +221,7 @@ var _ = utils.SIGDescribe("Flexvolumes", func() { time.Sleep(detachTimeout) ginkgo.By(fmt.Sprintf("uninstalling flexvolume %s from node %s", driverInstallAs, node.Name)) - uninstallFlex(cs, &node, "k8s", driverInstallAs) + uninstallFlex(cs, node, "k8s", driverInstallAs) ginkgo.By(fmt.Sprintf("uninstalling flexvolume %s from master", driverInstallAs)) uninstallFlex(cs, nil, "k8s", driverInstallAs) }) diff --git a/test/e2e/storage/flexvolume_mounted_volume_resize.go b/test/e2e/storage/flexvolume_mounted_volume_resize.go index b574ab0ebfa..ff698d2775e 100644 --- a/test/e2e/storage/flexvolume_mounted_volume_resize.go +++ b/test/e2e/storage/flexvolume_mounted_volume_resize.go @@ -31,6 +31,7 @@ import ( clientset "k8s.io/client-go/kubernetes" "k8s.io/kubernetes/test/e2e/framework" e2edeploy "k8s.io/kubernetes/test/e2e/framework/deployment" + e2enode "k8s.io/kubernetes/test/e2e/framework/node" e2epod "k8s.io/kubernetes/test/e2e/framework/pod" e2epv "k8s.io/kubernetes/test/e2e/framework/pv" "k8s.io/kubernetes/test/e2e/storage/testsuites" @@ -49,6 +50,7 @@ var _ = utils.SIGDescribe("Mounted flexvolume expand[Slow]", func() { err error pvc *v1.PersistentVolumeClaim resizableSc *storagev1.StorageClass + node *v1.Node nodeName string isNodeLabeled bool nodeKeyValueLabel map[string]string @@ -66,12 +68,9 @@ var _ = utils.SIGDescribe("Mounted flexvolume expand[Slow]", func() { ns = f.Namespace.Name framework.ExpectNoError(framework.WaitForAllNodesSchedulable(c, framework.TestContext.NodeSchedulableTimeout)) - nodeList := framework.GetReadySchedulableNodesOrDie(f.ClientSet) - if len(nodeList.Items) != 0 { - nodeName = nodeList.Items[0].Name - } else { - framework.Failf("Unable to find ready and schedulable Node") - } + node, err = e2enode.GetRandomReadySchedulableNode(f.ClientSet) + framework.ExpectNoError(err) + nodeName = node.Name nodeKey = "mounted_flexvolume_expand" @@ -125,10 +124,8 @@ var _ = utils.SIGDescribe("Mounted flexvolume expand[Slow]", func() { ginkgo.It("Should verify mounted flex volumes can be resized", func() { driver := "dummy-attachable" - nodeList := framework.GetReadySchedulableNodesOrDie(f.ClientSet) - node := nodeList.Items[0] ginkgo.By(fmt.Sprintf("installing flexvolume %s on node %s as %s", path.Join(driverDir, driver), node.Name, driver)) - installFlex(c, &node, "k8s", driver, path.Join(driverDir, driver)) + installFlex(c, node, "k8s", driver, path.Join(driverDir, driver)) ginkgo.By(fmt.Sprintf("installing flexvolume %s on (master) node %s as %s", path.Join(driverDir, driver), node.Name, driver)) installFlex(c, nil, "k8s", driver, path.Join(driverDir, driver)) diff --git a/test/e2e/storage/flexvolume_online_resize.go b/test/e2e/storage/flexvolume_online_resize.go index 996dbfb0ffe..03bc07f874e 100644 --- a/test/e2e/storage/flexvolume_online_resize.go +++ b/test/e2e/storage/flexvolume_online_resize.go @@ -28,6 +28,7 @@ import ( utilerrors "k8s.io/apimachinery/pkg/util/errors" clientset "k8s.io/client-go/kubernetes" "k8s.io/kubernetes/test/e2e/framework" + e2enode "k8s.io/kubernetes/test/e2e/framework/node" e2epod "k8s.io/kubernetes/test/e2e/framework/pod" e2epv "k8s.io/kubernetes/test/e2e/framework/pv" "k8s.io/kubernetes/test/e2e/storage/testsuites" @@ -59,11 +60,9 @@ var _ = utils.SIGDescribe("Mounted flexvolume volume expand [Slow] [Feature:Expa ns = f.Namespace.Name framework.ExpectNoError(framework.WaitForAllNodesSchedulable(c, framework.TestContext.NodeSchedulableTimeout)) - nodeList = framework.GetReadySchedulableNodesOrDie(f.ClientSet) - if len(nodeList.Items) == 0 { - framework.Failf("unable to find ready and schedulable Node") - } - nodeName = nodeList.Items[0].Name + node, err := e2enode.GetRandomReadySchedulableNode(f.ClientSet) + framework.ExpectNoError(err) + nodeName = node.Name nodeKey = "mounted_flexvolume_expand" diff --git a/test/e2e/storage/mounted_volume_resize.go b/test/e2e/storage/mounted_volume_resize.go index 2d0b2e877b8..6f1eb2c884f 100644 --- a/test/e2e/storage/mounted_volume_resize.go +++ b/test/e2e/storage/mounted_volume_resize.go @@ -32,6 +32,7 @@ import ( "k8s.io/kubernetes/pkg/client/conditions" "k8s.io/kubernetes/test/e2e/framework" e2edeploy "k8s.io/kubernetes/test/e2e/framework/deployment" + e2enode "k8s.io/kubernetes/test/e2e/framework/node" e2epod "k8s.io/kubernetes/test/e2e/framework/pod" e2epv "k8s.io/kubernetes/test/e2e/framework/pv" "k8s.io/kubernetes/test/e2e/storage/testsuites" @@ -42,7 +43,6 @@ var _ = utils.SIGDescribe("Mounted volume expand", func() { var ( c clientset.Interface ns string - err error pvc *v1.PersistentVolumeClaim resizableSc *storagev1.StorageClass nodeName string @@ -59,12 +59,9 @@ var _ = utils.SIGDescribe("Mounted volume expand", func() { ns = f.Namespace.Name framework.ExpectNoError(framework.WaitForAllNodesSchedulable(c, framework.TestContext.NodeSchedulableTimeout)) - nodeList := framework.GetReadySchedulableNodesOrDie(f.ClientSet) - if len(nodeList.Items) != 0 { - nodeName = nodeList.Items[0].Name - } else { - framework.Failf("Unable to find ready and schedulable Node") - } + node, err := e2enode.GetRandomReadySchedulableNode(f.ClientSet) + framework.ExpectNoError(err) + nodeName = node.Name nodeKey = "mounted_volume_expand" diff --git a/test/e2e/storage/nfs_persistent_volume-disruptive.go b/test/e2e/storage/nfs_persistent_volume-disruptive.go index 1eb7af4a4b8..5dfcadd61ca 100644 --- a/test/e2e/storage/nfs_persistent_volume-disruptive.go +++ b/test/e2e/storage/nfs_persistent_volume-disruptive.go @@ -85,10 +85,10 @@ var _ = utils.SIGDescribe("NFSPersistentVolumes[Disruptive][Flaky]", func() { StorageClassName: &emptyStorageClass, } // Get the first ready node IP that is not hosting the NFS pod. - var err error if clientNodeIP == "" { framework.Logf("Designating test node") - nodes := framework.GetReadySchedulableNodesOrDie(c) + nodes, err := e2enode.GetReadySchedulableNodes(c) + framework.ExpectNoError(err) for _, node := range nodes.Items { if node.Name != nfsServerPod.Spec.NodeName { clientNode = &node diff --git a/test/e2e/storage/pd.go b/test/e2e/storage/pd.go index dddc718bc10..3edb4924fd0 100644 --- a/test/e2e/storage/pd.go +++ b/test/e2e/storage/pd.go @@ -77,7 +77,9 @@ var _ = utils.SIGDescribe("Pod Disks", func() { podClient = cs.CoreV1().Pods(ns) nodeClient = cs.CoreV1().Nodes() - nodes = framework.GetReadySchedulableNodesOrDie(cs) + var err error + nodes, err = e2enode.GetReadySchedulableNodes(cs) + framework.ExpectNoError(err) gomega.Expect(len(nodes.Items)).To(gomega.BeNumerically(">=", minNodes), fmt.Sprintf("Requires at least %d nodes", minNodes)) host0Name = types.NodeName(nodes.Items[0].ObjectMeta.Name) host1Name = types.NodeName(nodes.Items[1].ObjectMeta.Name) @@ -443,7 +445,8 @@ var _ = utils.SIGDescribe("Pod Disks", func() { func countReadyNodes(c clientset.Interface, hostName types.NodeName) int { e2enode.WaitForNodeToBeReady(c, string(hostName), nodeStatusTimeout) framework.WaitForAllNodesSchedulable(c, nodeStatusTimeout) - nodes := framework.GetReadySchedulableNodesOrDie(c) + nodes, err := e2enode.GetReadySchedulableNodes(c) + framework.ExpectNoError(err) return len(nodes.Items) } diff --git a/test/e2e/storage/persistent_volumes-local.go b/test/e2e/storage/persistent_volumes-local.go index 9a48b23db36..8e6db3b25e1 100644 --- a/test/e2e/storage/persistent_volumes-local.go +++ b/test/e2e/storage/persistent_volumes-local.go @@ -39,6 +39,7 @@ import ( "k8s.io/apimachinery/pkg/watch" clientset "k8s.io/client-go/kubernetes" "k8s.io/kubernetes/test/e2e/framework" + e2enode "k8s.io/kubernetes/test/e2e/framework/node" e2epod "k8s.io/kubernetes/test/e2e/framework/pod" e2epv "k8s.io/kubernetes/test/e2e/framework/pv" e2esset "k8s.io/kubernetes/test/e2e/framework/statefulset" @@ -150,15 +151,8 @@ var _ = utils.SIGDescribe("PersistentVolumes-local ", func() { ) ginkgo.BeforeEach(func() { - // Get all the schedulable nodes - nodes := framework.GetReadySchedulableNodesOrDie(f.ClientSet) - gomega.Expect(len(nodes.Items)).NotTo(gomega.BeZero(), "No available nodes for scheduling") - - // Cap max number of nodes - maxLen := len(nodes.Items) - if maxLen > maxNodes { - maxLen = maxNodes - } + nodes, err := e2enode.GetBoundedReadySchedulableNodes(f.ClientSet, maxNodes) + framework.ExpectNoError(err) scName = fmt.Sprintf("%v-%v", testSCPrefix, f.Namespace.Name) // Choose the first node @@ -169,7 +163,7 @@ var _ = utils.SIGDescribe("PersistentVolumes-local ", func() { config = &localTestConfig{ ns: f.Namespace.Name, client: f.ClientSet, - nodes: nodes.Items[:maxLen], + nodes: nodes.Items, node0: node0, scName: scName, discoveryDir: filepath.Join(hostBase, f.Namespace.Name), diff --git a/test/e2e/storage/testsuites/BUILD b/test/e2e/storage/testsuites/BUILD index d4699ccc952..d419f66faed 100644 --- a/test/e2e/storage/testsuites/BUILD +++ b/test/e2e/storage/testsuites/BUILD @@ -48,6 +48,7 @@ go_library( "//test/e2e/framework:go_default_library", "//test/e2e/framework/log:go_default_library", "//test/e2e/framework/metrics:go_default_library", + "//test/e2e/framework/node:go_default_library", "//test/e2e/framework/pod:go_default_library", "//test/e2e/framework/podlogs:go_default_library", "//test/e2e/framework/pv:go_default_library", diff --git a/test/e2e/storage/testsuites/multivolume.go b/test/e2e/storage/testsuites/multivolume.go index c1bf976b07b..6244f263f8b 100644 --- a/test/e2e/storage/testsuites/multivolume.go +++ b/test/e2e/storage/testsuites/multivolume.go @@ -26,6 +26,7 @@ import ( metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" clientset "k8s.io/client-go/kubernetes" "k8s.io/kubernetes/test/e2e/framework" + e2enode "k8s.io/kubernetes/test/e2e/framework/node" e2epod "k8s.io/kubernetes/test/e2e/framework/pod" e2epv "k8s.io/kubernetes/test/e2e/framework/pv" "k8s.io/kubernetes/test/e2e/framework/volume" @@ -170,7 +171,8 @@ func (t *multiVolumeTestSuite) defineTests(driver TestDriver, pattern testpatter if l.driver.GetDriverInfo().Capabilities[CapSingleNodeVolume] { framework.Skipf("Driver %s only supports %v -- skipping", l.driver.GetDriverInfo().Name, CapSingleNodeVolume) } - nodes := framework.GetReadySchedulableNodesOrDie(l.cs) + nodes, err := e2enode.GetReadySchedulableNodes(l.cs) + framework.ExpectNoError(err) if len(nodes.Items) < 2 { framework.Skipf("Number of available nodes is less than 2 - skipping") } @@ -255,7 +257,8 @@ func (t *multiVolumeTestSuite) defineTests(driver TestDriver, pattern testpatter if l.driver.GetDriverInfo().Capabilities[CapSingleNodeVolume] { framework.Skipf("Driver %s only supports %v -- skipping", l.driver.GetDriverInfo().Name, CapSingleNodeVolume) } - nodes := framework.GetReadySchedulableNodesOrDie(l.cs) + nodes, err := e2enode.GetReadySchedulableNodes(l.cs) + framework.ExpectNoError(err) if len(nodes.Items) < 2 { framework.Skipf("Number of available nodes is less than 2 - skipping") } @@ -323,7 +326,8 @@ func (t *multiVolumeTestSuite) defineTests(driver TestDriver, pattern testpatter } // Check different-node test requirement - nodes := framework.GetReadySchedulableNodesOrDie(l.cs) + nodes, err := e2enode.GetReadySchedulableNodes(l.cs) + framework.ExpectNoError(err) if len(nodes.Items) < numPods { framework.Skipf(fmt.Sprintf("Number of available nodes is less than %d - skipping", numPods)) } diff --git a/test/e2e/storage/testsuites/topology.go b/test/e2e/storage/testsuites/topology.go index ea40ba45392..b37582681d1 100644 --- a/test/e2e/storage/testsuites/topology.go +++ b/test/e2e/storage/testsuites/topology.go @@ -30,6 +30,7 @@ import ( clientset "k8s.io/client-go/kubernetes" "k8s.io/kubernetes/test/e2e/framework" e2elog "k8s.io/kubernetes/test/e2e/framework/log" + e2enode "k8s.io/kubernetes/test/e2e/framework/node" e2epod "k8s.io/kubernetes/test/e2e/framework/pod" e2epv "k8s.io/kubernetes/test/e2e/framework/pv" "k8s.io/kubernetes/test/e2e/storage/testpatterns" @@ -236,7 +237,10 @@ func (t *topologyTestSuite) defineTests(driver TestDriver, pattern testpatterns. // getCurrentTopologies() goes through all Nodes and returns up to maxCount unique driver topologies func (t *topologyTestSuite) getCurrentTopologies(cs clientset.Interface, keys []string, maxCount int) ([]topology, error) { - nodes := framework.GetReadySchedulableNodesOrDie(cs) + nodes, err := e2enode.GetReadySchedulableNodes(cs) + if err != nil { + return nil, err + } topos := []topology{} diff --git a/test/e2e/storage/testsuites/volumelimits.go b/test/e2e/storage/testsuites/volumelimits.go index ac1604dadb8..fdad915055e 100644 --- a/test/e2e/storage/testsuites/volumelimits.go +++ b/test/e2e/storage/testsuites/volumelimits.go @@ -34,6 +34,7 @@ import ( migrationplugins "k8s.io/csi-translation-lib/plugins" // volume plugin names are exported nicely there volumeutil "k8s.io/kubernetes/pkg/volume/util" "k8s.io/kubernetes/test/e2e/framework" + e2enode "k8s.io/kubernetes/test/e2e/framework/node" e2epod "k8s.io/kubernetes/test/e2e/framework/pod" e2epv "k8s.io/kubernetes/test/e2e/framework/pv" "k8s.io/kubernetes/test/e2e/storage/testpatterns" @@ -125,12 +126,9 @@ func (t *volumeLimitsTestSuite) defineTests(driver TestDriver, pattern testpatte ginkgo.By("Picking a random node") var nodeName string - nodeList := framework.GetReadySchedulableNodesOrDie(f.ClientSet) - if len(nodeList.Items) != 0 { - nodeName = nodeList.Items[0].Name - } else { - framework.Failf("Unable to find ready and schedulable Node") - } + node, err := e2enode.GetRandomReadySchedulableNode(f.ClientSet) + framework.ExpectNoError(err) + nodeName = node.Name framework.Logf("Selected node %s", nodeName) ginkgo.By("Checking node limits") diff --git a/test/e2e/storage/volume_limits.go b/test/e2e/storage/volume_limits.go index 97c82b25907..6cb602f1a88 100644 --- a/test/e2e/storage/volume_limits.go +++ b/test/e2e/storage/volume_limits.go @@ -22,6 +22,7 @@ import ( clientset "k8s.io/client-go/kubernetes" v1helper "k8s.io/kubernetes/pkg/apis/core/v1/helper" "k8s.io/kubernetes/test/e2e/framework" + e2enode "k8s.io/kubernetes/test/e2e/framework/node" "k8s.io/kubernetes/test/e2e/storage/utils" ) @@ -37,10 +38,8 @@ var _ = utils.SIGDescribe("Volume limits", func() { }) ginkgo.It("should verify that all nodes have volume limits", func() { - nodeList := framework.GetReadySchedulableNodesOrDie(f.ClientSet) - if len(nodeList.Items) == 0 { - framework.Failf("Unable to find ready and schedulable Node") - } + nodeList, err := e2enode.GetReadySchedulableNodes(f.ClientSet) + framework.ExpectNoError(err) for _, node := range nodeList.Items { volumeLimits := getVolumeLimit(&node) if len(volumeLimits) == 0 { diff --git a/test/e2e/storage/vsphere/persistent_volumes-vsphere.go b/test/e2e/storage/vsphere/persistent_volumes-vsphere.go index fe34f4b0d04..52d090fab03 100644 --- a/test/e2e/storage/vsphere/persistent_volumes-vsphere.go +++ b/test/e2e/storage/vsphere/persistent_volumes-vsphere.go @@ -67,11 +67,7 @@ var _ = utils.SIGDescribe("PersistentVolumes:vsphere", func() { clientPod = nil pvc = nil pv = nil - nodes := framework.GetReadySchedulableNodesOrDie(c) - if len(nodes.Items) < 1 { - framework.Skipf("Requires at least %d node", 1) - } - nodeInfo = TestContext.NodeMapper.GetNodeInfo(nodes.Items[0].Name) + nodeInfo = GetReadySchedulableRandomNodeInfo() volLabel = labels.Set{e2epv.VolumeSelectorKey: ns} selector = metav1.SetAsLabelSelector(volLabel) diff --git a/test/e2e/storage/vsphere/vsphere_scale.go b/test/e2e/storage/vsphere/vsphere_scale.go index 2a96e0d6813..b21c637f546 100644 --- a/test/e2e/storage/vsphere/vsphere_scale.go +++ b/test/e2e/storage/vsphere/vsphere_scale.go @@ -27,6 +27,7 @@ import ( metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" clientset "k8s.io/client-go/kubernetes" "k8s.io/kubernetes/test/e2e/framework" + e2enode "k8s.io/kubernetes/test/e2e/framework/node" e2epod "k8s.io/kubernetes/test/e2e/framework/pod" e2epv "k8s.io/kubernetes/test/e2e/framework/pv" "k8s.io/kubernetes/test/e2e/storage/utils" @@ -87,7 +88,9 @@ var _ = utils.SIGDescribe("vcp at scale [Feature:vsphere] ", func() { policyName = GetAndExpectStringEnvVar(SPBMPolicyName) datastoreName = GetAndExpectStringEnvVar(StorageClassDatastoreName) - nodes = framework.GetReadySchedulableNodesOrDie(client) + var err error + nodes, err = e2enode.GetReadySchedulableNodes(client) + framework.ExpectNoError(err) if len(nodes.Items) < 2 { framework.Skipf("Requires at least %d nodes (not %d)", 2, len(nodes.Items)) } diff --git a/test/e2e/storage/vsphere/vsphere_stress.go b/test/e2e/storage/vsphere/vsphere_stress.go index 9df2189451e..90c618914fd 100644 --- a/test/e2e/storage/vsphere/vsphere_stress.go +++ b/test/e2e/storage/vsphere/vsphere_stress.go @@ -27,6 +27,7 @@ import ( metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" clientset "k8s.io/client-go/kubernetes" "k8s.io/kubernetes/test/e2e/framework" + e2enode "k8s.io/kubernetes/test/e2e/framework/node" e2epod "k8s.io/kubernetes/test/e2e/framework/pod" e2epv "k8s.io/kubernetes/test/e2e/framework/pv" "k8s.io/kubernetes/test/e2e/storage/utils" @@ -51,7 +52,6 @@ var _ = utils.SIGDescribe("vsphere cloud provider stress [Feature:vsphere]", fun iterations int policyName string datastoreName string - err error scNames = []string{storageclass1, storageclass2, storageclass3, storageclass4} ) @@ -61,8 +61,8 @@ var _ = utils.SIGDescribe("vsphere cloud provider stress [Feature:vsphere]", fun client = f.ClientSet namespace = f.Namespace.Name - nodeList := framework.GetReadySchedulableNodesOrDie(f.ClientSet) - gomega.Expect(nodeList.Items).NotTo(gomega.BeEmpty(), "Unable to find ready and schedulable Node") + nodeList, err := e2enode.GetReadySchedulableNodes(f.ClientSet) + framework.ExpectNoError(err) // if VCP_STRESS_INSTANCES = 12 and VCP_STRESS_ITERATIONS is 10. 12 threads will run in parallel for 10 times. // Resulting 120 Volumes and POD Creation. Volumes will be provisioned with each different types of Storage Class, @@ -72,7 +72,6 @@ var _ = utils.SIGDescribe("vsphere cloud provider stress [Feature:vsphere]", fun gomega.Expect(instances > len(scNames)).To(gomega.BeTrue(), "VCP_STRESS_INSTANCES should be greater than 3 to utilize all 4 types of storage classes") iterations = GetAndExpectIntEnvVar(VCPStressIterations) - framework.ExpectNoError(err, "Error Parsing VCP_STRESS_ITERATIONS") gomega.Expect(iterations > 0).To(gomega.BeTrue(), "VCP_STRESS_ITERATIONS should be greater than 0") policyName = GetAndExpectStringEnvVar(SPBMPolicyName) diff --git a/test/e2e/storage/vsphere/vsphere_utils.go b/test/e2e/storage/vsphere/vsphere_utils.go index 2925b8fa74a..98bb8395842 100644 --- a/test/e2e/storage/vsphere/vsphere_utils.go +++ b/test/e2e/storage/vsphere/vsphere_utils.go @@ -41,6 +41,7 @@ import ( "k8s.io/apimachinery/pkg/util/wait" clientset "k8s.io/client-go/kubernetes" "k8s.io/kubernetes/test/e2e/framework" + e2enode "k8s.io/kubernetes/test/e2e/framework/node" e2epv "k8s.io/kubernetes/test/e2e/framework/pv" e2essh "k8s.io/kubernetes/test/e2e/framework/ssh" "k8s.io/kubernetes/test/e2e/storage/utils" @@ -621,8 +622,8 @@ func getVMXFilePath(vmObject *object.VirtualMachine) (vmxPath string) { func verifyReadyNodeCount(client clientset.Interface, expectedNodes int) bool { numNodes := 0 for i := 0; i < 36; i++ { - nodeList := framework.GetReadySchedulableNodesOrDie(client) - gomega.Expect(nodeList.Items).NotTo(gomega.BeEmpty(), "Unable to find ready and schedulable Node") + nodeList, err := e2enode.GetReadySchedulableNodes(client) + framework.ExpectNoError(err) numNodes = len(nodeList.Items) if numNodes == expectedNodes { @@ -755,8 +756,8 @@ func getUUIDFromProviderID(providerID string) string { // GetAllReadySchedulableNodeInfos returns NodeInfo objects for all nodes with Ready and schedulable state func GetReadySchedulableNodeInfos() []*NodeInfo { - nodeList := framework.GetReadySchedulableNodesOrDie(f.ClientSet) - gomega.Expect(nodeList.Items).NotTo(gomega.BeEmpty(), "Unable to find ready and schedulable Node") + nodeList, err := e2enode.GetReadySchedulableNodes(f.ClientSet) + framework.ExpectNoError(err) var nodesInfo []*NodeInfo for _, node := range nodeList.Items { nodeInfo := TestContext.NodeMapper.GetNodeInfo(node.Name) diff --git a/test/e2e/storage/vsphere/vsphere_volume_datastore.go b/test/e2e/storage/vsphere/vsphere_volume_datastore.go index 1d3209a1634..4b0ad53917c 100644 --- a/test/e2e/storage/vsphere/vsphere_volume_datastore.go +++ b/test/e2e/storage/vsphere/vsphere_volume_datastore.go @@ -26,6 +26,7 @@ import ( metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" clientset "k8s.io/client-go/kubernetes" "k8s.io/kubernetes/test/e2e/framework" + e2enode "k8s.io/kubernetes/test/e2e/framework/node" e2epv "k8s.io/kubernetes/test/e2e/framework/pv" "k8s.io/kubernetes/test/e2e/storage/utils" ) @@ -58,10 +59,8 @@ var _ = utils.SIGDescribe("Volume Provisioning on Datastore [Feature:vsphere]", client = f.ClientSet namespace = f.Namespace.Name scParameters = make(map[string]string) - nodeList := framework.GetReadySchedulableNodesOrDie(f.ClientSet) - if !(len(nodeList.Items) > 0) { - framework.Failf("Unable to find ready and schedulable Node") - } + _, err := e2enode.GetRandomReadySchedulableNode(f.ClientSet) + framework.ExpectNoError(err) }) ginkgo.It("verify dynamically provisioned pv using storageclass fails on an invalid datastore", func() { diff --git a/test/e2e/storage/vsphere/vsphere_volume_master_restart.go b/test/e2e/storage/vsphere/vsphere_volume_master_restart.go index 39342980bd0..3f90cc35903 100644 --- a/test/e2e/storage/vsphere/vsphere_volume_master_restart.go +++ b/test/e2e/storage/vsphere/vsphere_volume_master_restart.go @@ -27,6 +27,7 @@ import ( "k8s.io/apimachinery/pkg/util/uuid" clientset "k8s.io/client-go/kubernetes" "k8s.io/kubernetes/test/e2e/framework" + e2enode "k8s.io/kubernetes/test/e2e/framework/node" e2epod "k8s.io/kubernetes/test/e2e/framework/pod" "k8s.io/kubernetes/test/e2e/storage/utils" ) @@ -63,7 +64,8 @@ var _ = utils.SIGDescribe("Volume Attach Verify [Feature:vsphere][Serial][Disrup namespace = f.Namespace.Name framework.ExpectNoError(framework.WaitForAllNodesSchedulable(client, framework.TestContext.NodeSchedulableTimeout)) - nodes := framework.GetReadySchedulableNodesOrDie(client) + nodes, err := e2enode.GetReadySchedulableNodes(client) + framework.ExpectNoError(err) numNodes = len(nodes.Items) if numNodes < 2 { framework.Skipf("Requires at least %d nodes (not %d)", 2, len(nodes.Items)) diff --git a/test/e2e/storage/vsphere/vsphere_volume_node_delete.go b/test/e2e/storage/vsphere/vsphere_volume_node_delete.go index 6c4e774584a..cb3de444062 100644 --- a/test/e2e/storage/vsphere/vsphere_volume_node_delete.go +++ b/test/e2e/storage/vsphere/vsphere_volume_node_delete.go @@ -25,6 +25,7 @@ import ( clientset "k8s.io/client-go/kubernetes" "k8s.io/kubernetes/test/e2e/framework" + e2enode "k8s.io/kubernetes/test/e2e/framework/node" "k8s.io/kubernetes/test/e2e/storage/utils" ) @@ -49,7 +50,8 @@ var _ = utils.SIGDescribe("Node Unregister [Feature:vsphere] [Slow] [Disruptive] ginkgo.It("node unregister", func() { ginkgo.By("Get total Ready nodes") - nodeList := framework.GetReadySchedulableNodesOrDie(f.ClientSet) + nodeList, err := e2enode.GetReadySchedulableNodes(f.ClientSet) + framework.ExpectNoError(err) gomega.Expect(len(nodeList.Items) > 1).To(gomega.BeTrue(), "At least 2 nodes are required for this test") totalNodesCount := len(nodeList.Items) @@ -79,8 +81,8 @@ var _ = utils.SIGDescribe("Node Unregister [Feature:vsphere] [Slow] [Disruptive] ginkgo.By("Verifying the ready node counts") gomega.Expect(verifyReadyNodeCount(f.ClientSet, totalNodesCount-1)).To(gomega.BeTrue(), "Unable to verify expected ready node count") - nodeList = framework.GetReadySchedulableNodesOrDie(client) - gomega.Expect(nodeList.Items).NotTo(gomega.BeEmpty(), "Unable to find ready and schedulable Node") + nodeList, err = e2enode.GetReadySchedulableNodes(client) + framework.ExpectNoError(err) var nodeNameList []string for _, node := range nodeList.Items { @@ -96,8 +98,8 @@ var _ = utils.SIGDescribe("Node Unregister [Feature:vsphere] [Slow] [Disruptive] ginkgo.By("Verifying the ready node counts") gomega.Expect(verifyReadyNodeCount(f.ClientSet, totalNodesCount)).To(gomega.BeTrue(), "Unable to verify expected ready node count") - nodeList = framework.GetReadySchedulableNodesOrDie(client) - gomega.Expect(nodeList.Items).NotTo(gomega.BeEmpty(), "Unable to find ready and schedulable Node") + nodeList, err = e2enode.GetReadySchedulableNodes(client) + framework.ExpectNoError(err) nodeNameList = nodeNameList[:0] for _, node := range nodeList.Items { diff --git a/test/e2e/storage/vsphere/vsphere_volume_node_poweroff.go b/test/e2e/storage/vsphere/vsphere_volume_node_poweroff.go index 2531c1250cc..93dc4e58dfc 100644 --- a/test/e2e/storage/vsphere/vsphere_volume_node_poweroff.go +++ b/test/e2e/storage/vsphere/vsphere_volume_node_poweroff.go @@ -33,6 +33,7 @@ import ( clientset "k8s.io/client-go/kubernetes" "k8s.io/kubernetes/test/e2e/framework" e2edeploy "k8s.io/kubernetes/test/e2e/framework/deployment" + e2enode "k8s.io/kubernetes/test/e2e/framework/node" e2epv "k8s.io/kubernetes/test/e2e/framework/pv" "k8s.io/kubernetes/test/e2e/storage/utils" ) @@ -55,8 +56,8 @@ var _ = utils.SIGDescribe("Node Poweroff [Feature:vsphere] [Slow] [Disruptive]", client = f.ClientSet namespace = f.Namespace.Name framework.ExpectNoError(framework.WaitForAllNodesSchedulable(client, framework.TestContext.NodeSchedulableTimeout)) - nodeList := framework.GetReadySchedulableNodesOrDie(f.ClientSet) - gomega.Expect(nodeList.Items).NotTo(gomega.BeEmpty(), "Unable to find ready and schedulable Node") + nodeList, err := e2enode.GetReadySchedulableNodes(f.ClientSet) + framework.ExpectNoError(err) gomega.Expect(len(nodeList.Items) > 1).To(gomega.BeTrue(), "At least 2 nodes are required for this test") }) diff --git a/test/e2e/storage/vsphere/vsphere_volume_perf.go b/test/e2e/storage/vsphere/vsphere_volume_perf.go index 5b24f3b811a..1903f8eab15 100644 --- a/test/e2e/storage/vsphere/vsphere_volume_perf.go +++ b/test/e2e/storage/vsphere/vsphere_volume_perf.go @@ -26,6 +26,7 @@ import ( storagev1 "k8s.io/api/storage/v1" clientset "k8s.io/client-go/kubernetes" "k8s.io/kubernetes/test/e2e/framework" + e2enode "k8s.io/kubernetes/test/e2e/framework/node" e2epod "k8s.io/kubernetes/test/e2e/framework/pod" e2epv "k8s.io/kubernetes/test/e2e/framework/pv" "k8s.io/kubernetes/test/e2e/storage/utils" @@ -76,7 +77,8 @@ var _ = utils.SIGDescribe("vcp-performance [Feature:vsphere]", func() { policyName = GetAndExpectStringEnvVar(SPBMPolicyName) datastoreName = GetAndExpectStringEnvVar(StorageClassDatastoreName) - nodes := framework.GetReadySchedulableNodesOrDie(client) + nodes, err := e2enode.GetReadySchedulableNodes(client) + framework.ExpectNoError(err) gomega.Expect(len(nodes.Items)).To(gomega.BeNumerically(">=", 1), "Requires at least %d nodes (not %d)", 2, len(nodes.Items)) msg := fmt.Sprintf("Cannot attach %d volumes to %d nodes. Maximum volumes that can be attached on %d nodes is %d", volumeCount, len(nodes.Items), len(nodes.Items), SCSIUnitsAvailablePerNode*len(nodes.Items)) diff --git a/test/e2e/storage/vsphere/vsphere_volume_placement.go b/test/e2e/storage/vsphere/vsphere_volume_placement.go index 832fc418a3d..87e1f0d2230 100644 --- a/test/e2e/storage/vsphere/vsphere_volume_placement.go +++ b/test/e2e/storage/vsphere/vsphere_volume_placement.go @@ -27,6 +27,7 @@ import ( "k8s.io/apimachinery/pkg/util/uuid" clientset "k8s.io/client-go/kubernetes" "k8s.io/kubernetes/test/e2e/framework" + e2enode "k8s.io/kubernetes/test/e2e/framework/node" e2epod "k8s.io/kubernetes/test/e2e/framework/pod" "k8s.io/kubernetes/test/e2e/storage/utils" ) @@ -334,7 +335,8 @@ var _ = utils.SIGDescribe("Volume Placement", func() { }) func testSetupVolumePlacement(client clientset.Interface, namespace string) (node1Name string, node1KeyValueLabel map[string]string, node2Name string, node2KeyValueLabel map[string]string) { - nodes := framework.GetReadySchedulableNodesOrDie(client) + nodes, err := e2enode.GetBoundedReadySchedulableNodes(client, 2) + framework.ExpectNoError(err) if len(nodes.Items) < 2 { framework.Skipf("Requires at least %d nodes (not %d)", 2, len(nodes.Items)) } diff --git a/test/e2e/storage/vsphere/vsphere_volume_vpxd_restart.go b/test/e2e/storage/vsphere/vsphere_volume_vpxd_restart.go index d6ef1c1a801..c91a5b404ad 100644 --- a/test/e2e/storage/vsphere/vsphere_volume_vpxd_restart.go +++ b/test/e2e/storage/vsphere/vsphere_volume_vpxd_restart.go @@ -29,6 +29,7 @@ import ( "k8s.io/apimachinery/pkg/util/uuid" clientset "k8s.io/client-go/kubernetes" "k8s.io/kubernetes/test/e2e/framework" + e2enode "k8s.io/kubernetes/test/e2e/framework/node" e2epod "k8s.io/kubernetes/test/e2e/framework/pod" "k8s.io/kubernetes/test/e2e/storage/utils" ) @@ -77,9 +78,9 @@ var _ = utils.SIGDescribe("Verify Volume Attach Through vpxd Restart [Feature:vs namespace = f.Namespace.Name framework.ExpectNoError(framework.WaitForAllNodesSchedulable(client, framework.TestContext.NodeSchedulableTimeout)) - nodes := framework.GetReadySchedulableNodesOrDie(client) + nodes, err := e2enode.GetReadySchedulableNodes(client) + framework.ExpectNoError(err) numNodes := len(nodes.Items) - gomega.Expect(numNodes).NotTo(gomega.BeZero(), "No nodes are available for testing volume access through vpxd restart") vcNodesMap = make(map[string][]node) for i := 0; i < numNodes; i++ { diff --git a/test/e2e/storage/vsphere/vsphere_volume_vsan_policy.go b/test/e2e/storage/vsphere/vsphere_volume_vsan_policy.go index 65f3acd1801..fd65df42fb8 100644 --- a/test/e2e/storage/vsphere/vsphere_volume_vsan_policy.go +++ b/test/e2e/storage/vsphere/vsphere_volume_vsan_policy.go @@ -108,16 +108,10 @@ var _ = utils.SIGDescribe("Storage Policy Based Volume Provisioning [Feature:vsp tagPolicy = GetAndExpectStringEnvVar(SPBMTagPolicy) framework.Logf("framework: %+v", f) scParameters = make(map[string]string) - nodeList := framework.GetReadySchedulableNodesOrDie(f.ClientSet) - if !(len(nodeList.Items) > 0) { - framework.Failf("Unable to find ready and schedulable Node") - } + _, err := e2enode.GetRandomReadySchedulableNode(f.ClientSet) + framework.ExpectNoError(err) masternodes, _, err := e2enode.GetMasterAndWorkerNodes(client) - if err != nil { - framework.Logf("Unexpected error occurred: %v", err) - } - // TODO: write a wrapper for ExpectNoErrorWithOffset() - framework.ExpectNoErrorWithOffset(0, err) + framework.ExpectNoError(err) gomega.Expect(masternodes).NotTo(gomega.BeEmpty()) masterNode = masternodes.List()[0] }) diff --git a/test/e2e/storage/vsphere/vsphere_zone_support.go b/test/e2e/storage/vsphere/vsphere_zone_support.go index 25c48b7cace..8f06e119f9f 100644 --- a/test/e2e/storage/vsphere/vsphere_zone_support.go +++ b/test/e2e/storage/vsphere/vsphere_zone_support.go @@ -30,6 +30,7 @@ import ( clientset "k8s.io/client-go/kubernetes" volumeevents "k8s.io/kubernetes/pkg/controller/volume/events" "k8s.io/kubernetes/test/e2e/framework" + e2enode "k8s.io/kubernetes/test/e2e/framework/node" e2epod "k8s.io/kubernetes/test/e2e/framework/pod" e2epv "k8s.io/kubernetes/test/e2e/framework/pv" "k8s.io/kubernetes/test/e2e/storage/utils" @@ -112,10 +113,8 @@ var _ = utils.SIGDescribe("Zone Support", func() { zoneD = GetAndExpectStringEnvVar(VCPZoneD) scParameters = make(map[string]string) zones = make([]string, 0) - nodeList := framework.GetReadySchedulableNodesOrDie(f.ClientSet) - if !(len(nodeList.Items) > 0) { - framework.Failf("Unable to find ready and schedulable Node") - } + _, err := e2enode.GetRandomReadySchedulableNode(f.ClientSet) + framework.ExpectNoError(err) }) ginkgo.It("Verify dynamically created pv with allowed zones specified in storage class, shows the right zone information on its labels", func() {