From 326b2132311214b274b833584578fd7f81d6a659 Mon Sep 17 00:00:00 2001 From: Jay Vyas Date: Thu, 5 May 2016 16:56:25 -0400 Subject: [PATCH] Consolidate node selecting tests to only use Schedulable + Running nodes. --- test/e2e/cluster_size_autoscaling.go | 2 +- test/e2e/cluster_upgrade.go | 2 +- test/e2e/daemon_set.go | 4 +-- test/e2e/density.go | 2 +- test/e2e/es_cluster_logging.go | 2 +- test/e2e/example_k8petstore.go | 2 +- test/e2e/framework/framework.go | 2 +- test/e2e/framework/util.go | 37 +++++++++++++--------------- test/e2e/kubelet.go | 2 +- test/e2e/kubelet_perf.go | 2 +- test/e2e/kubeproxy.go | 4 +-- test/e2e/load.go | 2 +- test/e2e/mesos.go | 2 +- test/e2e/metrics_grabber_test.go | 2 +- test/e2e/networking.go | 9 +++---- test/e2e/networking_perf.go | 5 ++-- test/e2e/nodeoutofdisk.go | 6 ++--- test/e2e/pd.go | 2 +- test/e2e/proxy.go | 2 +- test/e2e/reboot.go | 2 +- test/e2e/service.go | 2 +- 21 files changed, 44 insertions(+), 51 deletions(-) diff --git a/test/e2e/cluster_size_autoscaling.go b/test/e2e/cluster_size_autoscaling.go index 052e0a72cd9..3ac34711e9a 100644 --- a/test/e2e/cluster_size_autoscaling.go +++ b/test/e2e/cluster_size_autoscaling.go @@ -47,7 +47,7 @@ var _ = framework.KubeDescribe("Cluster size autoscaling [Feature:ClusterSizeAut BeforeEach(func() { framework.SkipUnlessProviderIs("gce") - nodes := framework.ListSchedulableNodesOrDie(f.Client) + nodes := framework.GetReadySchedulableNodesOrDie(f.Client) nodeCount = len(nodes.Items) Expect(nodeCount).NotTo(BeZero()) cpu := nodes.Items[0].Status.Capacity[api.ResourceCPU] diff --git a/test/e2e/cluster_upgrade.go b/test/e2e/cluster_upgrade.go index e88e5038d2a..deb3cd9bbb3 100644 --- a/test/e2e/cluster_upgrade.go +++ b/test/e2e/cluster_upgrade.go @@ -372,7 +372,7 @@ func checkMasterVersion(c *client.Client, want string) error { } func checkNodesVersions(c *client.Client, want string) error { - l := framework.ListSchedulableNodesOrDie(c) + l := framework.GetReadySchedulableNodesOrDie(c) for _, n := range l.Items { // We do prefix trimming and then matching because: // want looks like: 0.19.3-815-g50e67d4 diff --git a/test/e2e/daemon_set.go b/test/e2e/daemon_set.go index 26480c3afa8..2ea6072cd68 100644 --- a/test/e2e/daemon_set.go +++ b/test/e2e/daemon_set.go @@ -178,7 +178,7 @@ var _ = framework.KubeDescribe("Daemon set [Serial]", func() { Expect(err).NotTo(HaveOccurred(), "error waiting for daemon pods to be running on no nodes") By("Change label of node, check that daemon pod is launched.") - nodeList := framework.ListSchedulableNodesOrDie(f.Client) + nodeList := framework.GetReadySchedulableNodesOrDie(f.Client) Expect(len(nodeList.Items)).To(BeNumerically(">", 0)) newNode, err := setDaemonSetNodeLabels(c, nodeList.Items[0].Name, nodeSelector) Expect(err).NotTo(HaveOccurred(), "error setting labels on node") @@ -213,7 +213,7 @@ func separateDaemonSetNodeLabels(labels map[string]string) (map[string]string, m } func clearDaemonSetNodeLabels(c *client.Client) error { - nodeList := framework.ListSchedulableNodesOrDie(c) + nodeList := framework.GetReadySchedulableNodesOrDie(c) for _, node := range nodeList.Items { _, err := setDaemonSetNodeLabels(c, node.Name, map[string]string{}) if err != nil { diff --git a/test/e2e/density.go b/test/e2e/density.go index 291d0e42f80..06915685e9b 100644 --- a/test/e2e/density.go +++ b/test/e2e/density.go @@ -158,7 +158,7 @@ var _ = framework.KubeDescribe("Density", func() { c = f.Client ns = f.Namespace.Name - nodes := framework.ListSchedulableNodesOrDie(c) + nodes := framework.GetReadySchedulableNodesOrDie(c) nodeCount = len(nodes.Items) Expect(nodeCount).NotTo(BeZero()) diff --git a/test/e2e/es_cluster_logging.go b/test/e2e/es_cluster_logging.go index 632a3241992..b12b24c1a82 100644 --- a/test/e2e/es_cluster_logging.go +++ b/test/e2e/es_cluster_logging.go @@ -201,7 +201,7 @@ func ClusterLevelLoggingWithElasticsearch(f *framework.Framework) { } // Obtain a list of nodes so we can place one synthetic logger on each node. - nodes := framework.ListSchedulableNodesOrDie(f.Client) + nodes := framework.GetReadySchedulableNodesOrDie(f.Client) nodeCount := len(nodes.Items) if nodeCount == 0 { framework.Failf("Failed to find any nodes") diff --git a/test/e2e/example_k8petstore.go b/test/e2e/example_k8petstore.go index 71c28389dc8..a3558ac4ac3 100644 --- a/test/e2e/example_k8petstore.go +++ b/test/e2e/example_k8petstore.go @@ -165,7 +165,7 @@ var _ = framework.KubeDescribe("Pet Store [Feature:Example]", func() { f := framework.NewDefaultFramework("petstore") It(fmt.Sprintf("should scale to persist a nominal number ( %v ) of transactions in %v seconds", k8bpsSmokeTestFinalTransactions, k8bpsSmokeTestTimeout), func() { - nodes := framework.ListSchedulableNodesOrDie(f.Client) + nodes := framework.GetReadySchedulableNodesOrDie(f.Client) nodeCount = len(nodes.Items) loadGenerators := nodeCount diff --git a/test/e2e/framework/framework.go b/test/e2e/framework/framework.go index 9730561375e..aa99effc6c4 100644 --- a/test/e2e/framework/framework.go +++ b/test/e2e/framework/framework.go @@ -436,7 +436,7 @@ func (f *Framework) CreateServiceForSimpleApp(contPort, svcPort int, appName str // CreatePodsPerNodeForSimpleApp Creates pods w/ labels. Useful for tests which make a bunch of pods w/o any networking. func (f *Framework) CreatePodsPerNodeForSimpleApp(appName string, podSpec func(n api.Node) api.PodSpec, maxCount int) map[string]string { - nodes := ListSchedulableNodesOrDie(f.Client) + nodes := GetReadySchedulableNodesOrDie(f.Client) labels := map[string]string{ "app": appName + "-pod", } diff --git a/test/e2e/framework/util.go b/test/e2e/framework/util.go index 6d7749806a8..8b7daebcd3a 100644 --- a/test/e2e/framework/util.go +++ b/test/e2e/framework/util.go @@ -18,7 +18,6 @@ package framework import ( "bytes" - "errors" "fmt" "io" "io/ioutil" @@ -2241,8 +2240,8 @@ func getNodeEvents(c *client.Client, nodeName string) []api.Event { return events.Items } -// Convenient wrapper around listing nodes supporting retries. -func ListSchedulableNodesOrDie(c *client.Client) *api.NodeList { +// waitListSchedulableNodesOrDie is a wrapper around listing nodes supporting retries. +func waitListSchedulableNodesOrDie(c *client.Client) *api.NodeList { var nodes *api.NodeList var err error if wait.PollImmediate(Poll, SingleCallTimeout, func() (bool, error) { @@ -2256,6 +2255,20 @@ func ListSchedulableNodesOrDie(c *client.Client) *api.NodeList { return nodes } +// GetReadySchedulableNodesOrDie addresses the common use case of getting nodes you can do work on. +// 1) Needs to be schedulable. +// 2) Needs to be ready. +// If EITHER 1 or 2 is not true, most tests will want to ignore the node entirely. +func GetReadySchedulableNodesOrDie(c *client.Client) (nodes *api.NodeList) { + nodes = waitListSchedulableNodesOrDie(c) + // previous tests may have cause failures of some nodes. Let's skip + // 'Not Ready' nodes, just in case (there is no need to fail the test). + FilterNodes(nodes, func(node api.Node) bool { + return !node.Spec.Unschedulable && IsNodeConditionSetAsExpected(&node, api.NodeReady, true) + }) + return nodes +} + func ScaleRC(c *client.Client, ns, name string, size uint, wait bool) error { By(fmt.Sprintf("Scaling replication controller %s in namespace %s to %d", name, ns, size)) scaler, err := kubectl.ScalerFor(api.Kind("ReplicationController"), c) @@ -2806,7 +2819,7 @@ func NodeAddresses(nodelist *api.NodeList, addrType api.NodeAddressType) []strin // It returns an error if it can't find an external IP for every node, though it still returns all // hosts that it found in that case. func NodeSSHHosts(c *client.Client) ([]string, error) { - nodelist := ListSchedulableNodesOrDie(c) + nodelist := waitListSchedulableNodesOrDie(c) // TODO(roberthbailey): Use the "preferred" address for the node, once such a thing is defined (#2462). hosts := NodeAddresses(nodelist, api.NodeExternalIP) @@ -3587,22 +3600,6 @@ func CheckPodHashLabel(pods *api.PodList) error { return nil } -// GetReadyNodes retrieves a list of schedulable nodes whose condition -// is Ready. An error will be returned if no such nodes are found. -func GetReadyNodes(f *Framework) (nodes *api.NodeList, err error) { - nodes = ListSchedulableNodesOrDie(f.Client) - // previous tests may have cause failures of some nodes. Let's skip - // 'Not Ready' nodes, just in case (there is no need to fail the test). - FilterNodes(nodes, func(node api.Node) bool { - return !node.Spec.Unschedulable && IsNodeConditionSetAsExpected(&node, api.NodeReady, true) - }) - - if len(nodes.Items) == 0 { - return nil, errors.New("No Ready nodes found.") - } - return nodes, nil -} - // timeout for proxy requests. const proxyTimeout = 2 * time.Minute diff --git a/test/e2e/kubelet.go b/test/e2e/kubelet.go index d9da606ba2b..7c1e93f28a5 100644 --- a/test/e2e/kubelet.go +++ b/test/e2e/kubelet.go @@ -94,7 +94,7 @@ var _ = framework.KubeDescribe("kubelet", func() { var resourceMonitor *framework.ResourceMonitor BeforeEach(func() { - nodes := framework.ListSchedulableNodesOrDie(f.Client) + nodes := framework.GetReadySchedulableNodesOrDie(f.Client) numNodes = len(nodes.Items) nodeNames = sets.NewString() for _, node := range nodes.Items { diff --git a/test/e2e/kubelet_perf.go b/test/e2e/kubelet_perf.go index afc87df2ee8..f8de34a847c 100644 --- a/test/e2e/kubelet_perf.go +++ b/test/e2e/kubelet_perf.go @@ -192,7 +192,7 @@ var _ = framework.KubeDescribe("Kubelet [Serial] [Slow]", func() { var rm *framework.ResourceMonitor BeforeEach(func() { - nodes := framework.ListSchedulableNodesOrDie(f.Client) + nodes := framework.GetReadySchedulableNodesOrDie(f.Client) nodeNames = sets.NewString() for _, node := range nodes.Items { nodeNames.Insert(node.Name) diff --git a/test/e2e/kubeproxy.go b/test/e2e/kubeproxy.go index 191104bc21e..6680efcff5a 100644 --- a/test/e2e/kubeproxy.go +++ b/test/e2e/kubeproxy.go @@ -463,7 +463,7 @@ func (config *KubeProxyTestConfig) setup() { } By("Getting node addresses") - nodeList := framework.ListSchedulableNodesOrDie(config.f.Client) + nodeList := framework.GetReadySchedulableNodesOrDie(config.f.Client) config.externalAddrs = framework.NodeAddresses(nodeList, api.NodeExternalIP) if len(config.externalAddrs) < 2 { // fall back to legacy IPs @@ -501,7 +501,7 @@ func (config *KubeProxyTestConfig) cleanup() { } func (config *KubeProxyTestConfig) createNetProxyPods(podName string, selector map[string]string) []*api.Pod { - nodes := framework.ListSchedulableNodesOrDie(config.f.Client) + nodes := framework.GetReadySchedulableNodesOrDie(config.f.Client) // create pods, one for each node createdPods := make([]*api.Pod, 0, len(nodes.Items)) diff --git a/test/e2e/load.go b/test/e2e/load.go index 98d0e4d2e07..b86131ea859 100644 --- a/test/e2e/load.go +++ b/test/e2e/load.go @@ -78,7 +78,7 @@ var _ = framework.KubeDescribe("Load capacity", func() { c = f.Client ns = f.Namespace.Name - nodes := framework.ListSchedulableNodesOrDie(c) + nodes := framework.GetReadySchedulableNodesOrDie(c) nodeCount = len(nodes.Items) Expect(nodeCount).NotTo(BeZero()) diff --git a/test/e2e/mesos.go b/test/e2e/mesos.go index 242f893f761..9a6f9631ab2 100644 --- a/test/e2e/mesos.go +++ b/test/e2e/mesos.go @@ -65,7 +65,7 @@ var _ = framework.KubeDescribe("Mesos", func() { client := f.Client framework.ExpectNoError(framework.AllNodesReady(client, wait.ForeverTestTimeout), "all nodes ready") - nodelist := framework.ListSchedulableNodesOrDie(f.Client) + nodelist := framework.GetReadySchedulableNodesOrDie(f.Client) const ns = "static-pods" numpods := int32(len(nodelist.Items)) diff --git a/test/e2e/metrics_grabber_test.go b/test/e2e/metrics_grabber_test.go index 181bd362683..3d1f6ee4d92 100644 --- a/test/e2e/metrics_grabber_test.go +++ b/test/e2e/metrics_grabber_test.go @@ -102,7 +102,7 @@ var _ = framework.KubeDescribe("MetricsGrabber", func() { It("should grab all metrics from a Kubelet.", func() { By("Proxying to Node through the API server") - nodes := framework.ListSchedulableNodesOrDie(c) + nodes := framework.GetReadySchedulableNodesOrDie(c) Expect(nodes.Items).NotTo(BeEmpty()) response, err := grabber.GrabFromKubelet(nodes.Items[0].Name) framework.ExpectNoError(err) diff --git a/test/e2e/networking.go b/test/e2e/networking.go index 8c5294c5376..cce92358728 100644 --- a/test/e2e/networking.go +++ b/test/e2e/networking.go @@ -111,8 +111,7 @@ var _ = framework.KubeDescribe("Networking", func() { By("Creating a webserver (pending) pod on each node") - nodes, err := framework.GetReadyNodes(f) - framework.ExpectNoError(err) + nodes := framework.GetReadySchedulableNodesOrDie(f.Client) if len(nodes.Items) == 1 { // in general, the test requires two nodes. But for local development, often a one node cluster @@ -220,8 +219,7 @@ var _ = framework.KubeDescribe("Networking", func() { It("should function for pod communication on a single node", func() { By("Picking a node") - nodes, err := framework.GetReadyNodes(f) - framework.ExpectNoError(err) + nodes := framework.GetReadySchedulableNodesOrDie(f.Client) node := nodes.Items[0] By("Creating a webserver pod") @@ -238,8 +236,7 @@ var _ = framework.KubeDescribe("Networking", func() { podClient := f.Client.Pods(f.Namespace.Name) By("Picking multiple nodes") - nodes, err := framework.GetReadyNodes(f) - framework.ExpectNoError(err) + nodes := framework.GetReadySchedulableNodesOrDie(f.Client) if len(nodes.Items) == 1 { framework.Skipf("The test requires two Ready nodes on %s, but found just one.", framework.TestContext.Provider) diff --git a/test/e2e/networking_perf.go b/test/e2e/networking_perf.go index 63791bb6ac5..775bf3bdb53 100644 --- a/test/e2e/networking_perf.go +++ b/test/e2e/networking_perf.go @@ -51,7 +51,7 @@ func runClientServerBandwidthMeasurement(f *framework.Framework, numClient int, numServer := 1 It(fmt.Sprintf("should transfer ~ 1GB onto the service endpoint %v servers (maximum of %v clients)", numServer, numClient), func() { - nodes := framework.ListSchedulableNodesOrDie(f.Client) + nodes := framework.GetReadySchedulableNodesOrDie(f.Client) totalPods := len(nodes.Items) // for a single service, we expect to divide bandwidth between the network. Very crude estimate. expectedBandwidth := int(float64(maxBandwidthBits) / float64(totalPods)) @@ -112,8 +112,7 @@ func runClientServerBandwidthMeasurement(f *framework.Framework, numClient int, // Calculate expected number of clients based on total nodes. expectedCli := func() int { - nodes, err := framework.GetReadyNodes(f) - framework.ExpectNoError(err) + nodes := framework.GetReadySchedulableNodesOrDie(f.Client) return int(math.Min(float64(len(nodes.Items)), float64(numClient))) }() diff --git a/test/e2e/nodeoutofdisk.go b/test/e2e/nodeoutofdisk.go index acbc480c583..0f04fc540d0 100644 --- a/test/e2e/nodeoutofdisk.go +++ b/test/e2e/nodeoutofdisk.go @@ -73,7 +73,7 @@ var _ = framework.KubeDescribe("NodeOutOfDisk [Serial] [Flaky] [Disruptive]", fu BeforeEach(func() { c = f.Client - nodelist := framework.ListSchedulableNodesOrDie(c) + nodelist := framework.GetReadySchedulableNodesOrDie(c) // Skip this test on small clusters. No need to fail since it is not a use // case that any cluster of small size needs to support. @@ -87,7 +87,7 @@ var _ = framework.KubeDescribe("NodeOutOfDisk [Serial] [Flaky] [Disruptive]", fu AfterEach(func() { - nodelist := framework.ListSchedulableNodesOrDie(c) + nodelist := framework.GetReadySchedulableNodesOrDie(c) Expect(len(nodelist.Items)).ToNot(BeZero()) for _, node := range nodelist.Items { if unfilledNodeName == node.Name || recoveredNodeName == node.Name { @@ -150,7 +150,7 @@ var _ = framework.KubeDescribe("NodeOutOfDisk [Serial] [Flaky] [Disruptive]", fu } }) - nodelist := framework.ListSchedulableNodesOrDie(c) + nodelist := framework.GetReadySchedulableNodesOrDie(c) Expect(len(nodelist.Items)).To(BeNumerically(">", 1)) nodeToRecover := nodelist.Items[1] diff --git a/test/e2e/pd.go b/test/e2e/pd.go index 6e8e40bf05c..c739eb1dbc4 100644 --- a/test/e2e/pd.go +++ b/test/e2e/pd.go @@ -58,7 +58,7 @@ var _ = framework.KubeDescribe("Pod Disks", func() { framework.SkipUnlessNodeCountIsAtLeast(2) podClient = f.Client.Pods(f.Namespace.Name) - nodes := framework.ListSchedulableNodesOrDie(f.Client) + nodes := framework.GetReadySchedulableNodesOrDie(f.Client) Expect(len(nodes.Items)).To(BeNumerically(">=", 2), "Requires at least 2 nodes") diff --git a/test/e2e/proxy.go b/test/e2e/proxy.go index 48267ca95a5..82738bf6011 100644 --- a/test/e2e/proxy.go +++ b/test/e2e/proxy.go @@ -277,7 +277,7 @@ func truncate(b []byte, maxLen int) []byte { func pickNode(c *client.Client) (string, error) { // TODO: investigate why it doesn't work on master Node. - nodes := framework.ListSchedulableNodesOrDie(c) + nodes := framework.GetReadySchedulableNodesOrDie(c) if len(nodes.Items) == 0 { return "", fmt.Errorf("no nodes exist, can't test node proxy") } diff --git a/test/e2e/reboot.go b/test/e2e/reboot.go index af794770c8b..d048b6ee59d 100644 --- a/test/e2e/reboot.go +++ b/test/e2e/reboot.go @@ -128,7 +128,7 @@ var _ = framework.KubeDescribe("Reboot [Disruptive] [Feature:Reboot]", func() { func testReboot(c *client.Client, rebootCmd string) { // Get all nodes, and kick off the test on each. - nodelist := framework.ListSchedulableNodesOrDie(c) + nodelist := framework.GetReadySchedulableNodesOrDie(c) result := make([]bool, len(nodelist.Items)) wg := sync.WaitGroup{} wg.Add(len(nodelist.Items)) diff --git a/test/e2e/service.go b/test/e2e/service.go index b7f9ac2c8b0..c6fb79f0e2e 100644 --- a/test/e2e/service.go +++ b/test/e2e/service.go @@ -1112,7 +1112,7 @@ func collectAddresses(nodes *api.NodeList, addressType api.NodeAddressType) []st } func getNodePublicIps(c *client.Client) ([]string, error) { - nodes := framework.ListSchedulableNodesOrDie(c) + nodes := framework.GetReadySchedulableNodesOrDie(c) ips := collectAddresses(nodes, api.NodeExternalIP) if len(ips) == 0 {