Merge pull request #83480 from danwinship/getnodes-storage

Update test/e2e/storage for new GetReadySchedulableNodes stuff
This commit is contained in:
Kubernetes Prow Robot 2019-10-09 11:06:04 -07:00 committed by GitHub
commit 0a98ccbcaf
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
30 changed files with 128 additions and 127 deletions

View File

@ -18,7 +18,6 @@ package storage
import (
"fmt"
"math/rand"
"path"
"time"
@ -28,6 +27,7 @@ import (
"k8s.io/apimachinery/pkg/util/wait"
clientset "k8s.io/client-go/kubernetes"
"k8s.io/kubernetes/test/e2e/framework"
e2enode "k8s.io/kubernetes/test/e2e/framework/node"
e2epod "k8s.io/kubernetes/test/e2e/framework/pod"
"k8s.io/kubernetes/test/e2e/storage/utils"
imageutils "k8s.io/kubernetes/test/utils/image"
@ -48,7 +48,7 @@ var _ = utils.SIGDescribe("Detaching volumes", func() {
var cs clientset.Interface
var ns *v1.Namespace
var node v1.Node
var node *v1.Node
var suffix string
ginkgo.BeforeEach(func() {
@ -59,8 +59,9 @@ var _ = utils.SIGDescribe("Detaching volumes", func() {
cs = f.ClientSet
ns = f.Namespace
nodes := framework.GetReadySchedulableNodesOrDie(f.ClientSet)
node = nodes.Items[rand.Intn(len(nodes.Items))]
var err error
node, err = e2enode.GetRandomReadySchedulableNode(f.ClientSet)
framework.ExpectNoError(err)
suffix = ns.Name
})
@ -71,7 +72,7 @@ var _ = utils.SIGDescribe("Detaching volumes", func() {
driverInstallAs := driver + "-" + suffix
ginkgo.By(fmt.Sprintf("installing flexvolume %s on node %s as %s", path.Join(driverDir, driver), node.Name, driverInstallAs))
installFlex(cs, &node, "k8s", driverInstallAs, path.Join(driverDir, driver))
installFlex(cs, node, "k8s", driverInstallAs, path.Join(driverDir, driver))
ginkgo.By(fmt.Sprintf("installing flexvolume %s on master as %s", path.Join(driverDir, driver), driverInstallAs))
installFlex(cs, nil, "k8s", driverInstallAs, path.Join(driverDir, driver))
volumeSource := v1.VolumeSource{
@ -118,7 +119,7 @@ var _ = utils.SIGDescribe("Detaching volumes", func() {
framework.ExpectNoError(err, "while waiting for volume to be removed from in-use")
ginkgo.By(fmt.Sprintf("uninstalling flexvolume %s from node %s", driverInstallAs, node.Name))
uninstallFlex(cs, &node, "k8s", driverInstallAs)
uninstallFlex(cs, node, "k8s", driverInstallAs)
ginkgo.By(fmt.Sprintf("uninstalling flexvolume %s from master", driverInstallAs))
uninstallFlex(cs, nil, "k8s", driverInstallAs)
})

View File

@ -25,6 +25,7 @@ go_library(
"//staging/src/k8s.io/client-go/kubernetes:go_default_library",
"//test/e2e/framework:go_default_library",
"//test/e2e/framework/auth:go_default_library",
"//test/e2e/framework/node:go_default_library",
"//test/e2e/framework/pod:go_default_library",
"//test/e2e/framework/pv:go_default_library",
"//test/e2e/framework/volume:go_default_library",

View File

@ -37,7 +37,6 @@ package drivers
import (
"fmt"
"math/rand"
"strconv"
"time"
@ -52,6 +51,7 @@ import (
"k8s.io/apimachinery/pkg/util/wait"
clientset "k8s.io/client-go/kubernetes"
"k8s.io/kubernetes/test/e2e/framework"
e2enode "k8s.io/kubernetes/test/e2e/framework/node"
"k8s.io/kubernetes/test/e2e/framework/volume"
"k8s.io/kubernetes/test/e2e/storage/testpatterns"
"k8s.io/kubernetes/test/e2e/storage/testsuites"
@ -169,13 +169,13 @@ func (h *hostpathCSIDriver) PrepareTest(f *framework.Framework) (*testsuites.Per
cs := f.ClientSet
// The hostpath CSI driver only works when everything runs on the same node.
nodes := framework.GetReadySchedulableNodesOrDie(cs)
nodeName := nodes.Items[rand.Intn(len(nodes.Items))].Name
node, err := e2enode.GetRandomReadySchedulableNode(cs)
framework.ExpectNoError(err)
config := &testsuites.PerTestConfig{
Driver: h,
Prefix: "hostpath",
Framework: f,
ClientNodeName: nodeName,
ClientNodeName: node.Name,
}
o := utils.PatchCSIOptions{
@ -185,7 +185,7 @@ func (h *hostpathCSIDriver) PrepareTest(f *framework.Framework) (*testsuites.Per
DriverContainerArguments: []string{"--drivername=" + config.GetUniqueDriverName()},
ProvisionerContainerName: "csi-provisioner",
SnapshotterContainerName: "csi-snapshotter",
NodeName: nodeName,
NodeName: node.Name,
}
cleanup, err := config.Framework.CreateFromManifests(func(item interface{}) error {
return utils.PatchCSIDeployment(config.Framework, o, item)
@ -293,8 +293,8 @@ func (m *mockCSIDriver) PrepareTest(f *framework.Framework) (*testsuites.PerTest
cs := f.ClientSet
// pods should be scheduled on the node
nodes := framework.GetReadySchedulableNodesOrDie(cs)
node := nodes.Items[rand.Intn(len(nodes.Items))]
node, err := e2enode.GetRandomReadySchedulableNode(cs)
framework.ExpectNoError(err)
config := &testsuites.PerTestConfig{
Driver: m,
Prefix: "mock",
@ -477,7 +477,10 @@ func (g *gcePDCSIDriver) PrepareTest(f *framework.Framework) (*testsuites.PerTes
}
func waitForCSIDriverRegistrationOnAllNodes(driverName string, cs clientset.Interface) error {
nodes := framework.GetReadySchedulableNodesOrDie(cs)
nodes, err := e2enode.GetReadySchedulableNodes(cs)
if err != nil {
return err
}
for _, node := range nodes.Items {
if err := waitForCSIDriverRegistrationOnNode(node.Name, driverName, cs); err != nil {
return err

View File

@ -37,7 +37,6 @@ package drivers
import (
"fmt"
"math/rand"
"os/exec"
"strconv"
"strings"
@ -55,6 +54,7 @@ import (
"k8s.io/apiserver/pkg/authentication/serviceaccount"
"k8s.io/kubernetes/test/e2e/framework"
"k8s.io/kubernetes/test/e2e/framework/auth"
e2enode "k8s.io/kubernetes/test/e2e/framework/node"
e2epod "k8s.io/kubernetes/test/e2e/framework/pod"
e2epv "k8s.io/kubernetes/test/e2e/framework/pv"
"k8s.io/kubernetes/test/e2e/framework/volume"
@ -750,8 +750,8 @@ func (h *hostPathDriver) CreateVolume(config *testsuites.PerTestConfig, volType
cs := f.ClientSet
// pods should be scheduled on the node
nodes := framework.GetReadySchedulableNodesOrDie(cs)
node := nodes.Items[rand.Intn(len(nodes.Items))]
node, err := e2enode.GetRandomReadySchedulableNode(cs)
framework.ExpectNoError(err)
config.ClientNodeName = node.Name
return nil
}
@ -832,8 +832,8 @@ func (h *hostPathSymlinkDriver) CreateVolume(config *testsuites.PerTestConfig, v
volumeName := "test-volume"
// pods should be scheduled on the node
nodes := framework.GetReadySchedulableNodesOrDie(cs)
node := nodes.Items[rand.Intn(len(nodes.Items))]
node, err := e2enode.GetRandomReadySchedulableNode(cs)
framework.ExpectNoError(err)
config.ClientNodeName = node.Name
cmd := fmt.Sprintf("mkdir %v -m 777 && ln -s %v %v", sourcePath, sourcePath, targetPath)
@ -1766,9 +1766,9 @@ func (l *localDriver) SkipUnsupportedTest(pattern testpatterns.TestPattern) {
}
func (l *localDriver) PrepareTest(f *framework.Framework) (*testsuites.PerTestConfig, func()) {
// choose a randome node to test against
nodes := framework.GetReadySchedulableNodesOrDie(f.ClientSet)
l.node = &nodes.Items[rand.Intn(len(nodes.Items))]
var err error
l.node, err = e2enode.GetRandomReadySchedulableNode(f.ClientSet)
framework.ExpectNoError(err)
l.hostExec = utils.NewHostExec(f)
l.ltrMgr = utils.NewLocalResourceManager("local-driver", l.hostExec, "/tmp")

View File

@ -25,12 +25,12 @@ import (
"k8s.io/apimachinery/pkg/util/intstr"
"k8s.io/apimachinery/pkg/util/uuid"
"k8s.io/kubernetes/test/e2e/framework"
e2enode "k8s.io/kubernetes/test/e2e/framework/node"
e2epod "k8s.io/kubernetes/test/e2e/framework/pod"
"k8s.io/kubernetes/test/e2e/storage/utils"
imageutils "k8s.io/kubernetes/test/utils/image"
"github.com/onsi/ginkgo"
"github.com/onsi/gomega"
)
const (
@ -346,9 +346,8 @@ func testNoWrappedVolumeRace(f *framework.Framework, volumes []v1.Volume, volume
const nodeHostnameLabelKey = "kubernetes.io/hostname"
rcName := wrappedVolumeRaceRCNamePrefix + string(uuid.NewUUID())
nodeList := framework.GetReadySchedulableNodesOrDie(f.ClientSet)
gomega.Expect(len(nodeList.Items)).To(gomega.BeNumerically(">", 0))
targetNode := nodeList.Items[0]
targetNode, err := e2enode.GetRandomReadySchedulableNode(f.ClientSet)
framework.ExpectNoError(err)
ginkgo.By("Creating RC which spawns configmap-volume pods")
affinity := &v1.Affinity{
@ -397,7 +396,7 @@ func testNoWrappedVolumeRace(f *framework.Framework, volumes []v1.Volume, volume
},
},
}
_, err := f.ClientSet.CoreV1().ReplicationControllers(f.Namespace.Name).Create(rc)
_, err = f.ClientSet.CoreV1().ReplicationControllers(f.Namespace.Name).Create(rc)
framework.ExpectNoError(err, "error creating replication controller")
defer func() {

View File

@ -18,7 +18,6 @@ package storage
import (
"fmt"
"math/rand"
"net"
"path"
@ -161,7 +160,7 @@ var _ = utils.SIGDescribe("Flexvolumes", func() {
var cs clientset.Interface
var ns *v1.Namespace
var node v1.Node
var node *v1.Node
var config volume.TestConfig
var suffix string
@ -173,8 +172,9 @@ var _ = utils.SIGDescribe("Flexvolumes", func() {
cs = f.ClientSet
ns = f.Namespace
nodes := framework.GetReadySchedulableNodesOrDie(f.ClientSet)
node = nodes.Items[rand.Intn(len(nodes.Items))]
var err error
node, err = e2enode.GetRandomReadySchedulableNode(f.ClientSet)
framework.ExpectNoError(err)
config = volume.TestConfig{
Namespace: ns.Name,
Prefix: "flex",
@ -188,7 +188,7 @@ var _ = utils.SIGDescribe("Flexvolumes", func() {
driverInstallAs := driver + "-" + suffix
ginkgo.By(fmt.Sprintf("installing flexvolume %s on node %s as %s", path.Join(driverDir, driver), node.Name, driverInstallAs))
installFlex(cs, &node, "k8s", driverInstallAs, path.Join(driverDir, driver))
installFlex(cs, node, "k8s", driverInstallAs, path.Join(driverDir, driver))
testFlexVolume(driverInstallAs, cs, config, f)
@ -198,7 +198,7 @@ var _ = utils.SIGDescribe("Flexvolumes", func() {
}
ginkgo.By(fmt.Sprintf("uninstalling flexvolume %s from node %s", driverInstallAs, node.Name))
uninstallFlex(cs, &node, "k8s", driverInstallAs)
uninstallFlex(cs, node, "k8s", driverInstallAs)
})
ginkgo.It("should be mountable when attachable", func() {
@ -206,7 +206,7 @@ var _ = utils.SIGDescribe("Flexvolumes", func() {
driverInstallAs := driver + "-" + suffix
ginkgo.By(fmt.Sprintf("installing flexvolume %s on node %s as %s", path.Join(driverDir, driver), node.Name, driverInstallAs))
installFlex(cs, &node, "k8s", driverInstallAs, path.Join(driverDir, driver))
installFlex(cs, node, "k8s", driverInstallAs, path.Join(driverDir, driver))
ginkgo.By(fmt.Sprintf("installing flexvolume %s on master as %s", path.Join(driverDir, driver), driverInstallAs))
installFlex(cs, nil, "k8s", driverInstallAs, path.Join(driverDir, driver))
@ -221,7 +221,7 @@ var _ = utils.SIGDescribe("Flexvolumes", func() {
time.Sleep(detachTimeout)
ginkgo.By(fmt.Sprintf("uninstalling flexvolume %s from node %s", driverInstallAs, node.Name))
uninstallFlex(cs, &node, "k8s", driverInstallAs)
uninstallFlex(cs, node, "k8s", driverInstallAs)
ginkgo.By(fmt.Sprintf("uninstalling flexvolume %s from master", driverInstallAs))
uninstallFlex(cs, nil, "k8s", driverInstallAs)
})

View File

@ -31,6 +31,7 @@ import (
clientset "k8s.io/client-go/kubernetes"
"k8s.io/kubernetes/test/e2e/framework"
e2edeploy "k8s.io/kubernetes/test/e2e/framework/deployment"
e2enode "k8s.io/kubernetes/test/e2e/framework/node"
e2epod "k8s.io/kubernetes/test/e2e/framework/pod"
e2epv "k8s.io/kubernetes/test/e2e/framework/pv"
"k8s.io/kubernetes/test/e2e/storage/testsuites"
@ -49,6 +50,7 @@ var _ = utils.SIGDescribe("Mounted flexvolume expand[Slow]", func() {
err error
pvc *v1.PersistentVolumeClaim
resizableSc *storagev1.StorageClass
node *v1.Node
nodeName string
isNodeLabeled bool
nodeKeyValueLabel map[string]string
@ -66,12 +68,9 @@ var _ = utils.SIGDescribe("Mounted flexvolume expand[Slow]", func() {
ns = f.Namespace.Name
framework.ExpectNoError(framework.WaitForAllNodesSchedulable(c, framework.TestContext.NodeSchedulableTimeout))
nodeList := framework.GetReadySchedulableNodesOrDie(f.ClientSet)
if len(nodeList.Items) != 0 {
nodeName = nodeList.Items[0].Name
} else {
framework.Failf("Unable to find ready and schedulable Node")
}
node, err = e2enode.GetRandomReadySchedulableNode(f.ClientSet)
framework.ExpectNoError(err)
nodeName = node.Name
nodeKey = "mounted_flexvolume_expand"
@ -125,10 +124,8 @@ var _ = utils.SIGDescribe("Mounted flexvolume expand[Slow]", func() {
ginkgo.It("Should verify mounted flex volumes can be resized", func() {
driver := "dummy-attachable"
nodeList := framework.GetReadySchedulableNodesOrDie(f.ClientSet)
node := nodeList.Items[0]
ginkgo.By(fmt.Sprintf("installing flexvolume %s on node %s as %s", path.Join(driverDir, driver), node.Name, driver))
installFlex(c, &node, "k8s", driver, path.Join(driverDir, driver))
installFlex(c, node, "k8s", driver, path.Join(driverDir, driver))
ginkgo.By(fmt.Sprintf("installing flexvolume %s on (master) node %s as %s", path.Join(driverDir, driver), node.Name, driver))
installFlex(c, nil, "k8s", driver, path.Join(driverDir, driver))

View File

@ -28,6 +28,7 @@ import (
utilerrors "k8s.io/apimachinery/pkg/util/errors"
clientset "k8s.io/client-go/kubernetes"
"k8s.io/kubernetes/test/e2e/framework"
e2enode "k8s.io/kubernetes/test/e2e/framework/node"
e2epod "k8s.io/kubernetes/test/e2e/framework/pod"
e2epv "k8s.io/kubernetes/test/e2e/framework/pv"
"k8s.io/kubernetes/test/e2e/storage/testsuites"
@ -59,11 +60,9 @@ var _ = utils.SIGDescribe("Mounted flexvolume volume expand [Slow] [Feature:Expa
ns = f.Namespace.Name
framework.ExpectNoError(framework.WaitForAllNodesSchedulable(c, framework.TestContext.NodeSchedulableTimeout))
nodeList = framework.GetReadySchedulableNodesOrDie(f.ClientSet)
if len(nodeList.Items) == 0 {
framework.Failf("unable to find ready and schedulable Node")
}
nodeName = nodeList.Items[0].Name
node, err := e2enode.GetRandomReadySchedulableNode(f.ClientSet)
framework.ExpectNoError(err)
nodeName = node.Name
nodeKey = "mounted_flexvolume_expand"

View File

@ -32,6 +32,7 @@ import (
"k8s.io/kubernetes/pkg/client/conditions"
"k8s.io/kubernetes/test/e2e/framework"
e2edeploy "k8s.io/kubernetes/test/e2e/framework/deployment"
e2enode "k8s.io/kubernetes/test/e2e/framework/node"
e2epod "k8s.io/kubernetes/test/e2e/framework/pod"
e2epv "k8s.io/kubernetes/test/e2e/framework/pv"
"k8s.io/kubernetes/test/e2e/storage/testsuites"
@ -42,7 +43,6 @@ var _ = utils.SIGDescribe("Mounted volume expand", func() {
var (
c clientset.Interface
ns string
err error
pvc *v1.PersistentVolumeClaim
resizableSc *storagev1.StorageClass
nodeName string
@ -59,12 +59,9 @@ var _ = utils.SIGDescribe("Mounted volume expand", func() {
ns = f.Namespace.Name
framework.ExpectNoError(framework.WaitForAllNodesSchedulable(c, framework.TestContext.NodeSchedulableTimeout))
nodeList := framework.GetReadySchedulableNodesOrDie(f.ClientSet)
if len(nodeList.Items) != 0 {
nodeName = nodeList.Items[0].Name
} else {
framework.Failf("Unable to find ready and schedulable Node")
}
node, err := e2enode.GetRandomReadySchedulableNode(f.ClientSet)
framework.ExpectNoError(err)
nodeName = node.Name
nodeKey = "mounted_volume_expand"

View File

@ -85,10 +85,10 @@ var _ = utils.SIGDescribe("NFSPersistentVolumes[Disruptive][Flaky]", func() {
StorageClassName: &emptyStorageClass,
}
// Get the first ready node IP that is not hosting the NFS pod.
var err error
if clientNodeIP == "" {
framework.Logf("Designating test node")
nodes := framework.GetReadySchedulableNodesOrDie(c)
nodes, err := e2enode.GetReadySchedulableNodes(c)
framework.ExpectNoError(err)
for _, node := range nodes.Items {
if node.Name != nfsServerPod.Spec.NodeName {
clientNode = &node

View File

@ -77,7 +77,9 @@ var _ = utils.SIGDescribe("Pod Disks", func() {
podClient = cs.CoreV1().Pods(ns)
nodeClient = cs.CoreV1().Nodes()
nodes = framework.GetReadySchedulableNodesOrDie(cs)
var err error
nodes, err = e2enode.GetReadySchedulableNodes(cs)
framework.ExpectNoError(err)
gomega.Expect(len(nodes.Items)).To(gomega.BeNumerically(">=", minNodes), fmt.Sprintf("Requires at least %d nodes", minNodes))
host0Name = types.NodeName(nodes.Items[0].ObjectMeta.Name)
host1Name = types.NodeName(nodes.Items[1].ObjectMeta.Name)
@ -443,7 +445,8 @@ var _ = utils.SIGDescribe("Pod Disks", func() {
func countReadyNodes(c clientset.Interface, hostName types.NodeName) int {
e2enode.WaitForNodeToBeReady(c, string(hostName), nodeStatusTimeout)
framework.WaitForAllNodesSchedulable(c, nodeStatusTimeout)
nodes := framework.GetReadySchedulableNodesOrDie(c)
nodes, err := e2enode.GetReadySchedulableNodes(c)
framework.ExpectNoError(err)
return len(nodes.Items)
}

View File

@ -39,6 +39,7 @@ import (
"k8s.io/apimachinery/pkg/watch"
clientset "k8s.io/client-go/kubernetes"
"k8s.io/kubernetes/test/e2e/framework"
e2enode "k8s.io/kubernetes/test/e2e/framework/node"
e2epod "k8s.io/kubernetes/test/e2e/framework/pod"
e2epv "k8s.io/kubernetes/test/e2e/framework/pv"
e2esset "k8s.io/kubernetes/test/e2e/framework/statefulset"
@ -150,15 +151,8 @@ var _ = utils.SIGDescribe("PersistentVolumes-local ", func() {
)
ginkgo.BeforeEach(func() {
// Get all the schedulable nodes
nodes := framework.GetReadySchedulableNodesOrDie(f.ClientSet)
gomega.Expect(len(nodes.Items)).NotTo(gomega.BeZero(), "No available nodes for scheduling")
// Cap max number of nodes
maxLen := len(nodes.Items)
if maxLen > maxNodes {
maxLen = maxNodes
}
nodes, err := e2enode.GetBoundedReadySchedulableNodes(f.ClientSet, maxNodes)
framework.ExpectNoError(err)
scName = fmt.Sprintf("%v-%v", testSCPrefix, f.Namespace.Name)
// Choose the first node
@ -169,7 +163,7 @@ var _ = utils.SIGDescribe("PersistentVolumes-local ", func() {
config = &localTestConfig{
ns: f.Namespace.Name,
client: f.ClientSet,
nodes: nodes.Items[:maxLen],
nodes: nodes.Items,
node0: node0,
scName: scName,
discoveryDir: filepath.Join(hostBase, f.Namespace.Name),

View File

@ -48,6 +48,7 @@ go_library(
"//test/e2e/framework:go_default_library",
"//test/e2e/framework/log:go_default_library",
"//test/e2e/framework/metrics:go_default_library",
"//test/e2e/framework/node:go_default_library",
"//test/e2e/framework/pod:go_default_library",
"//test/e2e/framework/podlogs:go_default_library",
"//test/e2e/framework/pv:go_default_library",

View File

@ -26,6 +26,7 @@ import (
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
clientset "k8s.io/client-go/kubernetes"
"k8s.io/kubernetes/test/e2e/framework"
e2enode "k8s.io/kubernetes/test/e2e/framework/node"
e2epod "k8s.io/kubernetes/test/e2e/framework/pod"
e2epv "k8s.io/kubernetes/test/e2e/framework/pv"
"k8s.io/kubernetes/test/e2e/framework/volume"
@ -170,7 +171,8 @@ func (t *multiVolumeTestSuite) defineTests(driver TestDriver, pattern testpatter
if l.driver.GetDriverInfo().Capabilities[CapSingleNodeVolume] {
framework.Skipf("Driver %s only supports %v -- skipping", l.driver.GetDriverInfo().Name, CapSingleNodeVolume)
}
nodes := framework.GetReadySchedulableNodesOrDie(l.cs)
nodes, err := e2enode.GetReadySchedulableNodes(l.cs)
framework.ExpectNoError(err)
if len(nodes.Items) < 2 {
framework.Skipf("Number of available nodes is less than 2 - skipping")
}
@ -255,7 +257,8 @@ func (t *multiVolumeTestSuite) defineTests(driver TestDriver, pattern testpatter
if l.driver.GetDriverInfo().Capabilities[CapSingleNodeVolume] {
framework.Skipf("Driver %s only supports %v -- skipping", l.driver.GetDriverInfo().Name, CapSingleNodeVolume)
}
nodes := framework.GetReadySchedulableNodesOrDie(l.cs)
nodes, err := e2enode.GetReadySchedulableNodes(l.cs)
framework.ExpectNoError(err)
if len(nodes.Items) < 2 {
framework.Skipf("Number of available nodes is less than 2 - skipping")
}
@ -323,7 +326,8 @@ func (t *multiVolumeTestSuite) defineTests(driver TestDriver, pattern testpatter
}
// Check different-node test requirement
nodes := framework.GetReadySchedulableNodesOrDie(l.cs)
nodes, err := e2enode.GetReadySchedulableNodes(l.cs)
framework.ExpectNoError(err)
if len(nodes.Items) < numPods {
framework.Skipf(fmt.Sprintf("Number of available nodes is less than %d - skipping", numPods))
}

View File

@ -30,6 +30,7 @@ import (
clientset "k8s.io/client-go/kubernetes"
"k8s.io/kubernetes/test/e2e/framework"
e2elog "k8s.io/kubernetes/test/e2e/framework/log"
e2enode "k8s.io/kubernetes/test/e2e/framework/node"
e2epod "k8s.io/kubernetes/test/e2e/framework/pod"
e2epv "k8s.io/kubernetes/test/e2e/framework/pv"
"k8s.io/kubernetes/test/e2e/storage/testpatterns"
@ -236,7 +237,10 @@ func (t *topologyTestSuite) defineTests(driver TestDriver, pattern testpatterns.
// getCurrentTopologies() goes through all Nodes and returns up to maxCount unique driver topologies
func (t *topologyTestSuite) getCurrentTopologies(cs clientset.Interface, keys []string, maxCount int) ([]topology, error) {
nodes := framework.GetReadySchedulableNodesOrDie(cs)
nodes, err := e2enode.GetReadySchedulableNodes(cs)
if err != nil {
return nil, err
}
topos := []topology{}

View File

@ -34,6 +34,7 @@ import (
migrationplugins "k8s.io/csi-translation-lib/plugins" // volume plugin names are exported nicely there
volumeutil "k8s.io/kubernetes/pkg/volume/util"
"k8s.io/kubernetes/test/e2e/framework"
e2enode "k8s.io/kubernetes/test/e2e/framework/node"
e2epod "k8s.io/kubernetes/test/e2e/framework/pod"
e2epv "k8s.io/kubernetes/test/e2e/framework/pv"
"k8s.io/kubernetes/test/e2e/storage/testpatterns"
@ -125,12 +126,9 @@ func (t *volumeLimitsTestSuite) defineTests(driver TestDriver, pattern testpatte
ginkgo.By("Picking a random node")
var nodeName string
nodeList := framework.GetReadySchedulableNodesOrDie(f.ClientSet)
if len(nodeList.Items) != 0 {
nodeName = nodeList.Items[0].Name
} else {
framework.Failf("Unable to find ready and schedulable Node")
}
node, err := e2enode.GetRandomReadySchedulableNode(f.ClientSet)
framework.ExpectNoError(err)
nodeName = node.Name
framework.Logf("Selected node %s", nodeName)
ginkgo.By("Checking node limits")

View File

@ -22,6 +22,7 @@ import (
clientset "k8s.io/client-go/kubernetes"
v1helper "k8s.io/kubernetes/pkg/apis/core/v1/helper"
"k8s.io/kubernetes/test/e2e/framework"
e2enode "k8s.io/kubernetes/test/e2e/framework/node"
"k8s.io/kubernetes/test/e2e/storage/utils"
)
@ -37,10 +38,8 @@ var _ = utils.SIGDescribe("Volume limits", func() {
})
ginkgo.It("should verify that all nodes have volume limits", func() {
nodeList := framework.GetReadySchedulableNodesOrDie(f.ClientSet)
if len(nodeList.Items) == 0 {
framework.Failf("Unable to find ready and schedulable Node")
}
nodeList, err := e2enode.GetReadySchedulableNodes(f.ClientSet)
framework.ExpectNoError(err)
for _, node := range nodeList.Items {
volumeLimits := getVolumeLimit(&node)
if len(volumeLimits) == 0 {

View File

@ -67,11 +67,7 @@ var _ = utils.SIGDescribe("PersistentVolumes:vsphere", func() {
clientPod = nil
pvc = nil
pv = nil
nodes := framework.GetReadySchedulableNodesOrDie(c)
if len(nodes.Items) < 1 {
framework.Skipf("Requires at least %d node", 1)
}
nodeInfo = TestContext.NodeMapper.GetNodeInfo(nodes.Items[0].Name)
nodeInfo = GetReadySchedulableRandomNodeInfo()
volLabel = labels.Set{e2epv.VolumeSelectorKey: ns}
selector = metav1.SetAsLabelSelector(volLabel)

View File

@ -27,6 +27,7 @@ import (
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
clientset "k8s.io/client-go/kubernetes"
"k8s.io/kubernetes/test/e2e/framework"
e2enode "k8s.io/kubernetes/test/e2e/framework/node"
e2epod "k8s.io/kubernetes/test/e2e/framework/pod"
e2epv "k8s.io/kubernetes/test/e2e/framework/pv"
"k8s.io/kubernetes/test/e2e/storage/utils"
@ -87,7 +88,9 @@ var _ = utils.SIGDescribe("vcp at scale [Feature:vsphere] ", func() {
policyName = GetAndExpectStringEnvVar(SPBMPolicyName)
datastoreName = GetAndExpectStringEnvVar(StorageClassDatastoreName)
nodes = framework.GetReadySchedulableNodesOrDie(client)
var err error
nodes, err = e2enode.GetReadySchedulableNodes(client)
framework.ExpectNoError(err)
if len(nodes.Items) < 2 {
framework.Skipf("Requires at least %d nodes (not %d)", 2, len(nodes.Items))
}

View File

@ -27,6 +27,7 @@ import (
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
clientset "k8s.io/client-go/kubernetes"
"k8s.io/kubernetes/test/e2e/framework"
e2enode "k8s.io/kubernetes/test/e2e/framework/node"
e2epod "k8s.io/kubernetes/test/e2e/framework/pod"
e2epv "k8s.io/kubernetes/test/e2e/framework/pv"
"k8s.io/kubernetes/test/e2e/storage/utils"
@ -51,7 +52,6 @@ var _ = utils.SIGDescribe("vsphere cloud provider stress [Feature:vsphere]", fun
iterations int
policyName string
datastoreName string
err error
scNames = []string{storageclass1, storageclass2, storageclass3, storageclass4}
)
@ -61,8 +61,8 @@ var _ = utils.SIGDescribe("vsphere cloud provider stress [Feature:vsphere]", fun
client = f.ClientSet
namespace = f.Namespace.Name
nodeList := framework.GetReadySchedulableNodesOrDie(f.ClientSet)
gomega.Expect(nodeList.Items).NotTo(gomega.BeEmpty(), "Unable to find ready and schedulable Node")
nodeList, err := e2enode.GetReadySchedulableNodes(f.ClientSet)
framework.ExpectNoError(err)
// if VCP_STRESS_INSTANCES = 12 and VCP_STRESS_ITERATIONS is 10. 12 threads will run in parallel for 10 times.
// Resulting 120 Volumes and POD Creation. Volumes will be provisioned with each different types of Storage Class,
@ -72,7 +72,6 @@ var _ = utils.SIGDescribe("vsphere cloud provider stress [Feature:vsphere]", fun
gomega.Expect(instances > len(scNames)).To(gomega.BeTrue(), "VCP_STRESS_INSTANCES should be greater than 3 to utilize all 4 types of storage classes")
iterations = GetAndExpectIntEnvVar(VCPStressIterations)
framework.ExpectNoError(err, "Error Parsing VCP_STRESS_ITERATIONS")
gomega.Expect(iterations > 0).To(gomega.BeTrue(), "VCP_STRESS_ITERATIONS should be greater than 0")
policyName = GetAndExpectStringEnvVar(SPBMPolicyName)

View File

@ -41,6 +41,7 @@ import (
"k8s.io/apimachinery/pkg/util/wait"
clientset "k8s.io/client-go/kubernetes"
"k8s.io/kubernetes/test/e2e/framework"
e2enode "k8s.io/kubernetes/test/e2e/framework/node"
e2epv "k8s.io/kubernetes/test/e2e/framework/pv"
e2essh "k8s.io/kubernetes/test/e2e/framework/ssh"
"k8s.io/kubernetes/test/e2e/storage/utils"
@ -621,8 +622,8 @@ func getVMXFilePath(vmObject *object.VirtualMachine) (vmxPath string) {
func verifyReadyNodeCount(client clientset.Interface, expectedNodes int) bool {
numNodes := 0
for i := 0; i < 36; i++ {
nodeList := framework.GetReadySchedulableNodesOrDie(client)
gomega.Expect(nodeList.Items).NotTo(gomega.BeEmpty(), "Unable to find ready and schedulable Node")
nodeList, err := e2enode.GetReadySchedulableNodes(client)
framework.ExpectNoError(err)
numNodes = len(nodeList.Items)
if numNodes == expectedNodes {
@ -755,8 +756,8 @@ func getUUIDFromProviderID(providerID string) string {
// GetAllReadySchedulableNodeInfos returns NodeInfo objects for all nodes with Ready and schedulable state
func GetReadySchedulableNodeInfos() []*NodeInfo {
nodeList := framework.GetReadySchedulableNodesOrDie(f.ClientSet)
gomega.Expect(nodeList.Items).NotTo(gomega.BeEmpty(), "Unable to find ready and schedulable Node")
nodeList, err := e2enode.GetReadySchedulableNodes(f.ClientSet)
framework.ExpectNoError(err)
var nodesInfo []*NodeInfo
for _, node := range nodeList.Items {
nodeInfo := TestContext.NodeMapper.GetNodeInfo(node.Name)

View File

@ -26,6 +26,7 @@ import (
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
clientset "k8s.io/client-go/kubernetes"
"k8s.io/kubernetes/test/e2e/framework"
e2enode "k8s.io/kubernetes/test/e2e/framework/node"
e2epv "k8s.io/kubernetes/test/e2e/framework/pv"
"k8s.io/kubernetes/test/e2e/storage/utils"
)
@ -58,10 +59,8 @@ var _ = utils.SIGDescribe("Volume Provisioning on Datastore [Feature:vsphere]",
client = f.ClientSet
namespace = f.Namespace.Name
scParameters = make(map[string]string)
nodeList := framework.GetReadySchedulableNodesOrDie(f.ClientSet)
if !(len(nodeList.Items) > 0) {
framework.Failf("Unable to find ready and schedulable Node")
}
_, err := e2enode.GetRandomReadySchedulableNode(f.ClientSet)
framework.ExpectNoError(err)
})
ginkgo.It("verify dynamically provisioned pv using storageclass fails on an invalid datastore", func() {

View File

@ -27,6 +27,7 @@ import (
"k8s.io/apimachinery/pkg/util/uuid"
clientset "k8s.io/client-go/kubernetes"
"k8s.io/kubernetes/test/e2e/framework"
e2enode "k8s.io/kubernetes/test/e2e/framework/node"
e2epod "k8s.io/kubernetes/test/e2e/framework/pod"
"k8s.io/kubernetes/test/e2e/storage/utils"
)
@ -63,7 +64,8 @@ var _ = utils.SIGDescribe("Volume Attach Verify [Feature:vsphere][Serial][Disrup
namespace = f.Namespace.Name
framework.ExpectNoError(framework.WaitForAllNodesSchedulable(client, framework.TestContext.NodeSchedulableTimeout))
nodes := framework.GetReadySchedulableNodesOrDie(client)
nodes, err := e2enode.GetReadySchedulableNodes(client)
framework.ExpectNoError(err)
numNodes = len(nodes.Items)
if numNodes < 2 {
framework.Skipf("Requires at least %d nodes (not %d)", 2, len(nodes.Items))

View File

@ -25,6 +25,7 @@ import (
clientset "k8s.io/client-go/kubernetes"
"k8s.io/kubernetes/test/e2e/framework"
e2enode "k8s.io/kubernetes/test/e2e/framework/node"
"k8s.io/kubernetes/test/e2e/storage/utils"
)
@ -49,7 +50,8 @@ var _ = utils.SIGDescribe("Node Unregister [Feature:vsphere] [Slow] [Disruptive]
ginkgo.It("node unregister", func() {
ginkgo.By("Get total Ready nodes")
nodeList := framework.GetReadySchedulableNodesOrDie(f.ClientSet)
nodeList, err := e2enode.GetReadySchedulableNodes(f.ClientSet)
framework.ExpectNoError(err)
gomega.Expect(len(nodeList.Items) > 1).To(gomega.BeTrue(), "At least 2 nodes are required for this test")
totalNodesCount := len(nodeList.Items)
@ -79,8 +81,8 @@ var _ = utils.SIGDescribe("Node Unregister [Feature:vsphere] [Slow] [Disruptive]
ginkgo.By("Verifying the ready node counts")
gomega.Expect(verifyReadyNodeCount(f.ClientSet, totalNodesCount-1)).To(gomega.BeTrue(), "Unable to verify expected ready node count")
nodeList = framework.GetReadySchedulableNodesOrDie(client)
gomega.Expect(nodeList.Items).NotTo(gomega.BeEmpty(), "Unable to find ready and schedulable Node")
nodeList, err = e2enode.GetReadySchedulableNodes(client)
framework.ExpectNoError(err)
var nodeNameList []string
for _, node := range nodeList.Items {
@ -96,8 +98,8 @@ var _ = utils.SIGDescribe("Node Unregister [Feature:vsphere] [Slow] [Disruptive]
ginkgo.By("Verifying the ready node counts")
gomega.Expect(verifyReadyNodeCount(f.ClientSet, totalNodesCount)).To(gomega.BeTrue(), "Unable to verify expected ready node count")
nodeList = framework.GetReadySchedulableNodesOrDie(client)
gomega.Expect(nodeList.Items).NotTo(gomega.BeEmpty(), "Unable to find ready and schedulable Node")
nodeList, err = e2enode.GetReadySchedulableNodes(client)
framework.ExpectNoError(err)
nodeNameList = nodeNameList[:0]
for _, node := range nodeList.Items {

View File

@ -33,6 +33,7 @@ import (
clientset "k8s.io/client-go/kubernetes"
"k8s.io/kubernetes/test/e2e/framework"
e2edeploy "k8s.io/kubernetes/test/e2e/framework/deployment"
e2enode "k8s.io/kubernetes/test/e2e/framework/node"
e2epv "k8s.io/kubernetes/test/e2e/framework/pv"
"k8s.io/kubernetes/test/e2e/storage/utils"
)
@ -55,8 +56,8 @@ var _ = utils.SIGDescribe("Node Poweroff [Feature:vsphere] [Slow] [Disruptive]",
client = f.ClientSet
namespace = f.Namespace.Name
framework.ExpectNoError(framework.WaitForAllNodesSchedulable(client, framework.TestContext.NodeSchedulableTimeout))
nodeList := framework.GetReadySchedulableNodesOrDie(f.ClientSet)
gomega.Expect(nodeList.Items).NotTo(gomega.BeEmpty(), "Unable to find ready and schedulable Node")
nodeList, err := e2enode.GetReadySchedulableNodes(f.ClientSet)
framework.ExpectNoError(err)
gomega.Expect(len(nodeList.Items) > 1).To(gomega.BeTrue(), "At least 2 nodes are required for this test")
})

View File

@ -26,6 +26,7 @@ import (
storagev1 "k8s.io/api/storage/v1"
clientset "k8s.io/client-go/kubernetes"
"k8s.io/kubernetes/test/e2e/framework"
e2enode "k8s.io/kubernetes/test/e2e/framework/node"
e2epod "k8s.io/kubernetes/test/e2e/framework/pod"
e2epv "k8s.io/kubernetes/test/e2e/framework/pv"
"k8s.io/kubernetes/test/e2e/storage/utils"
@ -76,7 +77,8 @@ var _ = utils.SIGDescribe("vcp-performance [Feature:vsphere]", func() {
policyName = GetAndExpectStringEnvVar(SPBMPolicyName)
datastoreName = GetAndExpectStringEnvVar(StorageClassDatastoreName)
nodes := framework.GetReadySchedulableNodesOrDie(client)
nodes, err := e2enode.GetReadySchedulableNodes(client)
framework.ExpectNoError(err)
gomega.Expect(len(nodes.Items)).To(gomega.BeNumerically(">=", 1), "Requires at least %d nodes (not %d)", 2, len(nodes.Items))
msg := fmt.Sprintf("Cannot attach %d volumes to %d nodes. Maximum volumes that can be attached on %d nodes is %d", volumeCount, len(nodes.Items), len(nodes.Items), SCSIUnitsAvailablePerNode*len(nodes.Items))

View File

@ -27,6 +27,7 @@ import (
"k8s.io/apimachinery/pkg/util/uuid"
clientset "k8s.io/client-go/kubernetes"
"k8s.io/kubernetes/test/e2e/framework"
e2enode "k8s.io/kubernetes/test/e2e/framework/node"
e2epod "k8s.io/kubernetes/test/e2e/framework/pod"
"k8s.io/kubernetes/test/e2e/storage/utils"
)
@ -334,7 +335,8 @@ var _ = utils.SIGDescribe("Volume Placement", func() {
})
func testSetupVolumePlacement(client clientset.Interface, namespace string) (node1Name string, node1KeyValueLabel map[string]string, node2Name string, node2KeyValueLabel map[string]string) {
nodes := framework.GetReadySchedulableNodesOrDie(client)
nodes, err := e2enode.GetBoundedReadySchedulableNodes(client, 2)
framework.ExpectNoError(err)
if len(nodes.Items) < 2 {
framework.Skipf("Requires at least %d nodes (not %d)", 2, len(nodes.Items))
}

View File

@ -29,6 +29,7 @@ import (
"k8s.io/apimachinery/pkg/util/uuid"
clientset "k8s.io/client-go/kubernetes"
"k8s.io/kubernetes/test/e2e/framework"
e2enode "k8s.io/kubernetes/test/e2e/framework/node"
e2epod "k8s.io/kubernetes/test/e2e/framework/pod"
"k8s.io/kubernetes/test/e2e/storage/utils"
)
@ -77,9 +78,9 @@ var _ = utils.SIGDescribe("Verify Volume Attach Through vpxd Restart [Feature:vs
namespace = f.Namespace.Name
framework.ExpectNoError(framework.WaitForAllNodesSchedulable(client, framework.TestContext.NodeSchedulableTimeout))
nodes := framework.GetReadySchedulableNodesOrDie(client)
nodes, err := e2enode.GetReadySchedulableNodes(client)
framework.ExpectNoError(err)
numNodes := len(nodes.Items)
gomega.Expect(numNodes).NotTo(gomega.BeZero(), "No nodes are available for testing volume access through vpxd restart")
vcNodesMap = make(map[string][]node)
for i := 0; i < numNodes; i++ {

View File

@ -108,16 +108,10 @@ var _ = utils.SIGDescribe("Storage Policy Based Volume Provisioning [Feature:vsp
tagPolicy = GetAndExpectStringEnvVar(SPBMTagPolicy)
framework.Logf("framework: %+v", f)
scParameters = make(map[string]string)
nodeList := framework.GetReadySchedulableNodesOrDie(f.ClientSet)
if !(len(nodeList.Items) > 0) {
framework.Failf("Unable to find ready and schedulable Node")
}
_, err := e2enode.GetRandomReadySchedulableNode(f.ClientSet)
framework.ExpectNoError(err)
masternodes, _, err := e2enode.GetMasterAndWorkerNodes(client)
if err != nil {
framework.Logf("Unexpected error occurred: %v", err)
}
// TODO: write a wrapper for ExpectNoErrorWithOffset()
framework.ExpectNoErrorWithOffset(0, err)
framework.ExpectNoError(err)
gomega.Expect(masternodes).NotTo(gomega.BeEmpty())
masterNode = masternodes.List()[0]
})

View File

@ -30,6 +30,7 @@ import (
clientset "k8s.io/client-go/kubernetes"
volumeevents "k8s.io/kubernetes/pkg/controller/volume/events"
"k8s.io/kubernetes/test/e2e/framework"
e2enode "k8s.io/kubernetes/test/e2e/framework/node"
e2epod "k8s.io/kubernetes/test/e2e/framework/pod"
e2epv "k8s.io/kubernetes/test/e2e/framework/pv"
"k8s.io/kubernetes/test/e2e/storage/utils"
@ -112,10 +113,8 @@ var _ = utils.SIGDescribe("Zone Support", func() {
zoneD = GetAndExpectStringEnvVar(VCPZoneD)
scParameters = make(map[string]string)
zones = make([]string, 0)
nodeList := framework.GetReadySchedulableNodesOrDie(f.ClientSet)
if !(len(nodeList.Items) > 0) {
framework.Failf("Unable to find ready and schedulable Node")
}
_, err := e2enode.GetRandomReadySchedulableNode(f.ClientSet)
framework.ExpectNoError(err)
})
ginkgo.It("Verify dynamically created pv with allowed zones specified in storage class, shows the right zone information on its labels", func() {