Consolidate node selecting tests to only use Schedulable + Running

nodes.
This commit is contained in:
Jay Vyas 2016-05-05 16:56:25 -04:00
parent 32256d53aa
commit 326b213231
21 changed files with 44 additions and 51 deletions

View File

@ -47,7 +47,7 @@ var _ = framework.KubeDescribe("Cluster size autoscaling [Feature:ClusterSizeAut
BeforeEach(func() { BeforeEach(func() {
framework.SkipUnlessProviderIs("gce") framework.SkipUnlessProviderIs("gce")
nodes := framework.ListSchedulableNodesOrDie(f.Client) nodes := framework.GetReadySchedulableNodesOrDie(f.Client)
nodeCount = len(nodes.Items) nodeCount = len(nodes.Items)
Expect(nodeCount).NotTo(BeZero()) Expect(nodeCount).NotTo(BeZero())
cpu := nodes.Items[0].Status.Capacity[api.ResourceCPU] cpu := nodes.Items[0].Status.Capacity[api.ResourceCPU]

View File

@ -372,7 +372,7 @@ func checkMasterVersion(c *client.Client, want string) error {
} }
func checkNodesVersions(c *client.Client, want string) error { func checkNodesVersions(c *client.Client, want string) error {
l := framework.ListSchedulableNodesOrDie(c) l := framework.GetReadySchedulableNodesOrDie(c)
for _, n := range l.Items { for _, n := range l.Items {
// We do prefix trimming and then matching because: // We do prefix trimming and then matching because:
// want looks like: 0.19.3-815-g50e67d4 // want looks like: 0.19.3-815-g50e67d4

View File

@ -178,7 +178,7 @@ var _ = framework.KubeDescribe("Daemon set [Serial]", func() {
Expect(err).NotTo(HaveOccurred(), "error waiting for daemon pods to be running on no nodes") Expect(err).NotTo(HaveOccurred(), "error waiting for daemon pods to be running on no nodes")
By("Change label of node, check that daemon pod is launched.") By("Change label of node, check that daemon pod is launched.")
nodeList := framework.ListSchedulableNodesOrDie(f.Client) nodeList := framework.GetReadySchedulableNodesOrDie(f.Client)
Expect(len(nodeList.Items)).To(BeNumerically(">", 0)) Expect(len(nodeList.Items)).To(BeNumerically(">", 0))
newNode, err := setDaemonSetNodeLabels(c, nodeList.Items[0].Name, nodeSelector) newNode, err := setDaemonSetNodeLabels(c, nodeList.Items[0].Name, nodeSelector)
Expect(err).NotTo(HaveOccurred(), "error setting labels on node") Expect(err).NotTo(HaveOccurred(), "error setting labels on node")
@ -213,7 +213,7 @@ func separateDaemonSetNodeLabels(labels map[string]string) (map[string]string, m
} }
func clearDaemonSetNodeLabels(c *client.Client) error { func clearDaemonSetNodeLabels(c *client.Client) error {
nodeList := framework.ListSchedulableNodesOrDie(c) nodeList := framework.GetReadySchedulableNodesOrDie(c)
for _, node := range nodeList.Items { for _, node := range nodeList.Items {
_, err := setDaemonSetNodeLabels(c, node.Name, map[string]string{}) _, err := setDaemonSetNodeLabels(c, node.Name, map[string]string{})
if err != nil { if err != nil {

View File

@ -158,7 +158,7 @@ var _ = framework.KubeDescribe("Density", func() {
c = f.Client c = f.Client
ns = f.Namespace.Name ns = f.Namespace.Name
nodes := framework.ListSchedulableNodesOrDie(c) nodes := framework.GetReadySchedulableNodesOrDie(c)
nodeCount = len(nodes.Items) nodeCount = len(nodes.Items)
Expect(nodeCount).NotTo(BeZero()) Expect(nodeCount).NotTo(BeZero())

View File

@ -201,7 +201,7 @@ func ClusterLevelLoggingWithElasticsearch(f *framework.Framework) {
} }
// Obtain a list of nodes so we can place one synthetic logger on each node. // Obtain a list of nodes so we can place one synthetic logger on each node.
nodes := framework.ListSchedulableNodesOrDie(f.Client) nodes := framework.GetReadySchedulableNodesOrDie(f.Client)
nodeCount := len(nodes.Items) nodeCount := len(nodes.Items)
if nodeCount == 0 { if nodeCount == 0 {
framework.Failf("Failed to find any nodes") framework.Failf("Failed to find any nodes")

View File

@ -165,7 +165,7 @@ var _ = framework.KubeDescribe("Pet Store [Feature:Example]", func() {
f := framework.NewDefaultFramework("petstore") f := framework.NewDefaultFramework("petstore")
It(fmt.Sprintf("should scale to persist a nominal number ( %v ) of transactions in %v seconds", k8bpsSmokeTestFinalTransactions, k8bpsSmokeTestTimeout), func() { It(fmt.Sprintf("should scale to persist a nominal number ( %v ) of transactions in %v seconds", k8bpsSmokeTestFinalTransactions, k8bpsSmokeTestTimeout), func() {
nodes := framework.ListSchedulableNodesOrDie(f.Client) nodes := framework.GetReadySchedulableNodesOrDie(f.Client)
nodeCount = len(nodes.Items) nodeCount = len(nodes.Items)
loadGenerators := nodeCount loadGenerators := nodeCount

View File

@ -436,7 +436,7 @@ func (f *Framework) CreateServiceForSimpleApp(contPort, svcPort int, appName str
// CreatePodsPerNodeForSimpleApp Creates pods w/ labels. Useful for tests which make a bunch of pods w/o any networking. // CreatePodsPerNodeForSimpleApp Creates pods w/ labels. Useful for tests which make a bunch of pods w/o any networking.
func (f *Framework) CreatePodsPerNodeForSimpleApp(appName string, podSpec func(n api.Node) api.PodSpec, maxCount int) map[string]string { func (f *Framework) CreatePodsPerNodeForSimpleApp(appName string, podSpec func(n api.Node) api.PodSpec, maxCount int) map[string]string {
nodes := ListSchedulableNodesOrDie(f.Client) nodes := GetReadySchedulableNodesOrDie(f.Client)
labels := map[string]string{ labels := map[string]string{
"app": appName + "-pod", "app": appName + "-pod",
} }

View File

@ -18,7 +18,6 @@ package framework
import ( import (
"bytes" "bytes"
"errors"
"fmt" "fmt"
"io" "io"
"io/ioutil" "io/ioutil"
@ -2241,8 +2240,8 @@ func getNodeEvents(c *client.Client, nodeName string) []api.Event {
return events.Items return events.Items
} }
// Convenient wrapper around listing nodes supporting retries. // waitListSchedulableNodesOrDie is a wrapper around listing nodes supporting retries.
func ListSchedulableNodesOrDie(c *client.Client) *api.NodeList { func waitListSchedulableNodesOrDie(c *client.Client) *api.NodeList {
var nodes *api.NodeList var nodes *api.NodeList
var err error var err error
if wait.PollImmediate(Poll, SingleCallTimeout, func() (bool, error) { if wait.PollImmediate(Poll, SingleCallTimeout, func() (bool, error) {
@ -2256,6 +2255,20 @@ func ListSchedulableNodesOrDie(c *client.Client) *api.NodeList {
return nodes return nodes
} }
// GetReadySchedulableNodesOrDie addresses the common use case of getting nodes you can do work on.
// 1) Needs to be schedulable.
// 2) Needs to be ready.
// If EITHER 1 or 2 is not true, most tests will want to ignore the node entirely.
func GetReadySchedulableNodesOrDie(c *client.Client) (nodes *api.NodeList) {
nodes = waitListSchedulableNodesOrDie(c)
// previous tests may have cause failures of some nodes. Let's skip
// 'Not Ready' nodes, just in case (there is no need to fail the test).
FilterNodes(nodes, func(node api.Node) bool {
return !node.Spec.Unschedulable && IsNodeConditionSetAsExpected(&node, api.NodeReady, true)
})
return nodes
}
func ScaleRC(c *client.Client, ns, name string, size uint, wait bool) error { func ScaleRC(c *client.Client, ns, name string, size uint, wait bool) error {
By(fmt.Sprintf("Scaling replication controller %s in namespace %s to %d", name, ns, size)) By(fmt.Sprintf("Scaling replication controller %s in namespace %s to %d", name, ns, size))
scaler, err := kubectl.ScalerFor(api.Kind("ReplicationController"), c) scaler, err := kubectl.ScalerFor(api.Kind("ReplicationController"), c)
@ -2806,7 +2819,7 @@ func NodeAddresses(nodelist *api.NodeList, addrType api.NodeAddressType) []strin
// It returns an error if it can't find an external IP for every node, though it still returns all // It returns an error if it can't find an external IP for every node, though it still returns all
// hosts that it found in that case. // hosts that it found in that case.
func NodeSSHHosts(c *client.Client) ([]string, error) { func NodeSSHHosts(c *client.Client) ([]string, error) {
nodelist := ListSchedulableNodesOrDie(c) nodelist := waitListSchedulableNodesOrDie(c)
// TODO(roberthbailey): Use the "preferred" address for the node, once such a thing is defined (#2462). // TODO(roberthbailey): Use the "preferred" address for the node, once such a thing is defined (#2462).
hosts := NodeAddresses(nodelist, api.NodeExternalIP) hosts := NodeAddresses(nodelist, api.NodeExternalIP)
@ -3587,22 +3600,6 @@ func CheckPodHashLabel(pods *api.PodList) error {
return nil return nil
} }
// GetReadyNodes retrieves a list of schedulable nodes whose condition
// is Ready. An error will be returned if no such nodes are found.
func GetReadyNodes(f *Framework) (nodes *api.NodeList, err error) {
nodes = ListSchedulableNodesOrDie(f.Client)
// previous tests may have cause failures of some nodes. Let's skip
// 'Not Ready' nodes, just in case (there is no need to fail the test).
FilterNodes(nodes, func(node api.Node) bool {
return !node.Spec.Unschedulable && IsNodeConditionSetAsExpected(&node, api.NodeReady, true)
})
if len(nodes.Items) == 0 {
return nil, errors.New("No Ready nodes found.")
}
return nodes, nil
}
// timeout for proxy requests. // timeout for proxy requests.
const proxyTimeout = 2 * time.Minute const proxyTimeout = 2 * time.Minute

View File

@ -94,7 +94,7 @@ var _ = framework.KubeDescribe("kubelet", func() {
var resourceMonitor *framework.ResourceMonitor var resourceMonitor *framework.ResourceMonitor
BeforeEach(func() { BeforeEach(func() {
nodes := framework.ListSchedulableNodesOrDie(f.Client) nodes := framework.GetReadySchedulableNodesOrDie(f.Client)
numNodes = len(nodes.Items) numNodes = len(nodes.Items)
nodeNames = sets.NewString() nodeNames = sets.NewString()
for _, node := range nodes.Items { for _, node := range nodes.Items {

View File

@ -192,7 +192,7 @@ var _ = framework.KubeDescribe("Kubelet [Serial] [Slow]", func() {
var rm *framework.ResourceMonitor var rm *framework.ResourceMonitor
BeforeEach(func() { BeforeEach(func() {
nodes := framework.ListSchedulableNodesOrDie(f.Client) nodes := framework.GetReadySchedulableNodesOrDie(f.Client)
nodeNames = sets.NewString() nodeNames = sets.NewString()
for _, node := range nodes.Items { for _, node := range nodes.Items {
nodeNames.Insert(node.Name) nodeNames.Insert(node.Name)

View File

@ -463,7 +463,7 @@ func (config *KubeProxyTestConfig) setup() {
} }
By("Getting node addresses") By("Getting node addresses")
nodeList := framework.ListSchedulableNodesOrDie(config.f.Client) nodeList := framework.GetReadySchedulableNodesOrDie(config.f.Client)
config.externalAddrs = framework.NodeAddresses(nodeList, api.NodeExternalIP) config.externalAddrs = framework.NodeAddresses(nodeList, api.NodeExternalIP)
if len(config.externalAddrs) < 2 { if len(config.externalAddrs) < 2 {
// fall back to legacy IPs // fall back to legacy IPs
@ -501,7 +501,7 @@ func (config *KubeProxyTestConfig) cleanup() {
} }
func (config *KubeProxyTestConfig) createNetProxyPods(podName string, selector map[string]string) []*api.Pod { func (config *KubeProxyTestConfig) createNetProxyPods(podName string, selector map[string]string) []*api.Pod {
nodes := framework.ListSchedulableNodesOrDie(config.f.Client) nodes := framework.GetReadySchedulableNodesOrDie(config.f.Client)
// create pods, one for each node // create pods, one for each node
createdPods := make([]*api.Pod, 0, len(nodes.Items)) createdPods := make([]*api.Pod, 0, len(nodes.Items))

View File

@ -78,7 +78,7 @@ var _ = framework.KubeDescribe("Load capacity", func() {
c = f.Client c = f.Client
ns = f.Namespace.Name ns = f.Namespace.Name
nodes := framework.ListSchedulableNodesOrDie(c) nodes := framework.GetReadySchedulableNodesOrDie(c)
nodeCount = len(nodes.Items) nodeCount = len(nodes.Items)
Expect(nodeCount).NotTo(BeZero()) Expect(nodeCount).NotTo(BeZero())

View File

@ -65,7 +65,7 @@ var _ = framework.KubeDescribe("Mesos", func() {
client := f.Client client := f.Client
framework.ExpectNoError(framework.AllNodesReady(client, wait.ForeverTestTimeout), "all nodes ready") framework.ExpectNoError(framework.AllNodesReady(client, wait.ForeverTestTimeout), "all nodes ready")
nodelist := framework.ListSchedulableNodesOrDie(f.Client) nodelist := framework.GetReadySchedulableNodesOrDie(f.Client)
const ns = "static-pods" const ns = "static-pods"
numpods := int32(len(nodelist.Items)) numpods := int32(len(nodelist.Items))

View File

@ -102,7 +102,7 @@ var _ = framework.KubeDescribe("MetricsGrabber", func() {
It("should grab all metrics from a Kubelet.", func() { It("should grab all metrics from a Kubelet.", func() {
By("Proxying to Node through the API server") By("Proxying to Node through the API server")
nodes := framework.ListSchedulableNodesOrDie(c) nodes := framework.GetReadySchedulableNodesOrDie(c)
Expect(nodes.Items).NotTo(BeEmpty()) Expect(nodes.Items).NotTo(BeEmpty())
response, err := grabber.GrabFromKubelet(nodes.Items[0].Name) response, err := grabber.GrabFromKubelet(nodes.Items[0].Name)
framework.ExpectNoError(err) framework.ExpectNoError(err)

View File

@ -111,8 +111,7 @@ var _ = framework.KubeDescribe("Networking", func() {
By("Creating a webserver (pending) pod on each node") By("Creating a webserver (pending) pod on each node")
nodes, err := framework.GetReadyNodes(f) nodes := framework.GetReadySchedulableNodesOrDie(f.Client)
framework.ExpectNoError(err)
if len(nodes.Items) == 1 { if len(nodes.Items) == 1 {
// in general, the test requires two nodes. But for local development, often a one node cluster // in general, the test requires two nodes. But for local development, often a one node cluster
@ -220,8 +219,7 @@ var _ = framework.KubeDescribe("Networking", func() {
It("should function for pod communication on a single node", func() { It("should function for pod communication on a single node", func() {
By("Picking a node") By("Picking a node")
nodes, err := framework.GetReadyNodes(f) nodes := framework.GetReadySchedulableNodesOrDie(f.Client)
framework.ExpectNoError(err)
node := nodes.Items[0] node := nodes.Items[0]
By("Creating a webserver pod") By("Creating a webserver pod")
@ -238,8 +236,7 @@ var _ = framework.KubeDescribe("Networking", func() {
podClient := f.Client.Pods(f.Namespace.Name) podClient := f.Client.Pods(f.Namespace.Name)
By("Picking multiple nodes") By("Picking multiple nodes")
nodes, err := framework.GetReadyNodes(f) nodes := framework.GetReadySchedulableNodesOrDie(f.Client)
framework.ExpectNoError(err)
if len(nodes.Items) == 1 { if len(nodes.Items) == 1 {
framework.Skipf("The test requires two Ready nodes on %s, but found just one.", framework.TestContext.Provider) framework.Skipf("The test requires two Ready nodes on %s, but found just one.", framework.TestContext.Provider)

View File

@ -51,7 +51,7 @@ func runClientServerBandwidthMeasurement(f *framework.Framework, numClient int,
numServer := 1 numServer := 1
It(fmt.Sprintf("should transfer ~ 1GB onto the service endpoint %v servers (maximum of %v clients)", numServer, numClient), func() { It(fmt.Sprintf("should transfer ~ 1GB onto the service endpoint %v servers (maximum of %v clients)", numServer, numClient), func() {
nodes := framework.ListSchedulableNodesOrDie(f.Client) nodes := framework.GetReadySchedulableNodesOrDie(f.Client)
totalPods := len(nodes.Items) totalPods := len(nodes.Items)
// for a single service, we expect to divide bandwidth between the network. Very crude estimate. // for a single service, we expect to divide bandwidth between the network. Very crude estimate.
expectedBandwidth := int(float64(maxBandwidthBits) / float64(totalPods)) expectedBandwidth := int(float64(maxBandwidthBits) / float64(totalPods))
@ -112,8 +112,7 @@ func runClientServerBandwidthMeasurement(f *framework.Framework, numClient int,
// Calculate expected number of clients based on total nodes. // Calculate expected number of clients based on total nodes.
expectedCli := func() int { expectedCli := func() int {
nodes, err := framework.GetReadyNodes(f) nodes := framework.GetReadySchedulableNodesOrDie(f.Client)
framework.ExpectNoError(err)
return int(math.Min(float64(len(nodes.Items)), float64(numClient))) return int(math.Min(float64(len(nodes.Items)), float64(numClient)))
}() }()

View File

@ -73,7 +73,7 @@ var _ = framework.KubeDescribe("NodeOutOfDisk [Serial] [Flaky] [Disruptive]", fu
BeforeEach(func() { BeforeEach(func() {
c = f.Client c = f.Client
nodelist := framework.ListSchedulableNodesOrDie(c) nodelist := framework.GetReadySchedulableNodesOrDie(c)
// Skip this test on small clusters. No need to fail since it is not a use // Skip this test on small clusters. No need to fail since it is not a use
// case that any cluster of small size needs to support. // case that any cluster of small size needs to support.
@ -87,7 +87,7 @@ var _ = framework.KubeDescribe("NodeOutOfDisk [Serial] [Flaky] [Disruptive]", fu
AfterEach(func() { AfterEach(func() {
nodelist := framework.ListSchedulableNodesOrDie(c) nodelist := framework.GetReadySchedulableNodesOrDie(c)
Expect(len(nodelist.Items)).ToNot(BeZero()) Expect(len(nodelist.Items)).ToNot(BeZero())
for _, node := range nodelist.Items { for _, node := range nodelist.Items {
if unfilledNodeName == node.Name || recoveredNodeName == node.Name { if unfilledNodeName == node.Name || recoveredNodeName == node.Name {
@ -150,7 +150,7 @@ var _ = framework.KubeDescribe("NodeOutOfDisk [Serial] [Flaky] [Disruptive]", fu
} }
}) })
nodelist := framework.ListSchedulableNodesOrDie(c) nodelist := framework.GetReadySchedulableNodesOrDie(c)
Expect(len(nodelist.Items)).To(BeNumerically(">", 1)) Expect(len(nodelist.Items)).To(BeNumerically(">", 1))
nodeToRecover := nodelist.Items[1] nodeToRecover := nodelist.Items[1]

View File

@ -58,7 +58,7 @@ var _ = framework.KubeDescribe("Pod Disks", func() {
framework.SkipUnlessNodeCountIsAtLeast(2) framework.SkipUnlessNodeCountIsAtLeast(2)
podClient = f.Client.Pods(f.Namespace.Name) podClient = f.Client.Pods(f.Namespace.Name)
nodes := framework.ListSchedulableNodesOrDie(f.Client) nodes := framework.GetReadySchedulableNodesOrDie(f.Client)
Expect(len(nodes.Items)).To(BeNumerically(">=", 2), "Requires at least 2 nodes") Expect(len(nodes.Items)).To(BeNumerically(">=", 2), "Requires at least 2 nodes")

View File

@ -277,7 +277,7 @@ func truncate(b []byte, maxLen int) []byte {
func pickNode(c *client.Client) (string, error) { func pickNode(c *client.Client) (string, error) {
// TODO: investigate why it doesn't work on master Node. // TODO: investigate why it doesn't work on master Node.
nodes := framework.ListSchedulableNodesOrDie(c) nodes := framework.GetReadySchedulableNodesOrDie(c)
if len(nodes.Items) == 0 { if len(nodes.Items) == 0 {
return "", fmt.Errorf("no nodes exist, can't test node proxy") return "", fmt.Errorf("no nodes exist, can't test node proxy")
} }

View File

@ -128,7 +128,7 @@ var _ = framework.KubeDescribe("Reboot [Disruptive] [Feature:Reboot]", func() {
func testReboot(c *client.Client, rebootCmd string) { func testReboot(c *client.Client, rebootCmd string) {
// Get all nodes, and kick off the test on each. // Get all nodes, and kick off the test on each.
nodelist := framework.ListSchedulableNodesOrDie(c) nodelist := framework.GetReadySchedulableNodesOrDie(c)
result := make([]bool, len(nodelist.Items)) result := make([]bool, len(nodelist.Items))
wg := sync.WaitGroup{} wg := sync.WaitGroup{}
wg.Add(len(nodelist.Items)) wg.Add(len(nodelist.Items))

View File

@ -1112,7 +1112,7 @@ func collectAddresses(nodes *api.NodeList, addressType api.NodeAddressType) []st
} }
func getNodePublicIps(c *client.Client) ([]string, error) { func getNodePublicIps(c *client.Client) ([]string, error) {
nodes := framework.ListSchedulableNodesOrDie(c) nodes := framework.GetReadySchedulableNodesOrDie(c)
ips := collectAddresses(nodes, api.NodeExternalIP) ips := collectAddresses(nodes, api.NodeExternalIP)
if len(ips) == 0 { if len(ips) == 0 {