mirror of
https://github.com/k3s-io/kubernetes.git
synced 2025-07-22 19:31:44 +00:00
Consolidate node selecting tests to only use Schedulable + Running
nodes.
This commit is contained in:
parent
32256d53aa
commit
326b213231
@ -47,7 +47,7 @@ var _ = framework.KubeDescribe("Cluster size autoscaling [Feature:ClusterSizeAut
|
||||
BeforeEach(func() {
|
||||
framework.SkipUnlessProviderIs("gce")
|
||||
|
||||
nodes := framework.ListSchedulableNodesOrDie(f.Client)
|
||||
nodes := framework.GetReadySchedulableNodesOrDie(f.Client)
|
||||
nodeCount = len(nodes.Items)
|
||||
Expect(nodeCount).NotTo(BeZero())
|
||||
cpu := nodes.Items[0].Status.Capacity[api.ResourceCPU]
|
||||
|
@ -372,7 +372,7 @@ func checkMasterVersion(c *client.Client, want string) error {
|
||||
}
|
||||
|
||||
func checkNodesVersions(c *client.Client, want string) error {
|
||||
l := framework.ListSchedulableNodesOrDie(c)
|
||||
l := framework.GetReadySchedulableNodesOrDie(c)
|
||||
for _, n := range l.Items {
|
||||
// We do prefix trimming and then matching because:
|
||||
// want looks like: 0.19.3-815-g50e67d4
|
||||
|
@ -178,7 +178,7 @@ var _ = framework.KubeDescribe("Daemon set [Serial]", func() {
|
||||
Expect(err).NotTo(HaveOccurred(), "error waiting for daemon pods to be running on no nodes")
|
||||
|
||||
By("Change label of node, check that daemon pod is launched.")
|
||||
nodeList := framework.ListSchedulableNodesOrDie(f.Client)
|
||||
nodeList := framework.GetReadySchedulableNodesOrDie(f.Client)
|
||||
Expect(len(nodeList.Items)).To(BeNumerically(">", 0))
|
||||
newNode, err := setDaemonSetNodeLabels(c, nodeList.Items[0].Name, nodeSelector)
|
||||
Expect(err).NotTo(HaveOccurred(), "error setting labels on node")
|
||||
@ -213,7 +213,7 @@ func separateDaemonSetNodeLabels(labels map[string]string) (map[string]string, m
|
||||
}
|
||||
|
||||
func clearDaemonSetNodeLabels(c *client.Client) error {
|
||||
nodeList := framework.ListSchedulableNodesOrDie(c)
|
||||
nodeList := framework.GetReadySchedulableNodesOrDie(c)
|
||||
for _, node := range nodeList.Items {
|
||||
_, err := setDaemonSetNodeLabels(c, node.Name, map[string]string{})
|
||||
if err != nil {
|
||||
|
@ -158,7 +158,7 @@ var _ = framework.KubeDescribe("Density", func() {
|
||||
c = f.Client
|
||||
ns = f.Namespace.Name
|
||||
|
||||
nodes := framework.ListSchedulableNodesOrDie(c)
|
||||
nodes := framework.GetReadySchedulableNodesOrDie(c)
|
||||
nodeCount = len(nodes.Items)
|
||||
Expect(nodeCount).NotTo(BeZero())
|
||||
|
||||
|
@ -201,7 +201,7 @@ func ClusterLevelLoggingWithElasticsearch(f *framework.Framework) {
|
||||
}
|
||||
|
||||
// Obtain a list of nodes so we can place one synthetic logger on each node.
|
||||
nodes := framework.ListSchedulableNodesOrDie(f.Client)
|
||||
nodes := framework.GetReadySchedulableNodesOrDie(f.Client)
|
||||
nodeCount := len(nodes.Items)
|
||||
if nodeCount == 0 {
|
||||
framework.Failf("Failed to find any nodes")
|
||||
|
@ -165,7 +165,7 @@ var _ = framework.KubeDescribe("Pet Store [Feature:Example]", func() {
|
||||
f := framework.NewDefaultFramework("petstore")
|
||||
|
||||
It(fmt.Sprintf("should scale to persist a nominal number ( %v ) of transactions in %v seconds", k8bpsSmokeTestFinalTransactions, k8bpsSmokeTestTimeout), func() {
|
||||
nodes := framework.ListSchedulableNodesOrDie(f.Client)
|
||||
nodes := framework.GetReadySchedulableNodesOrDie(f.Client)
|
||||
nodeCount = len(nodes.Items)
|
||||
|
||||
loadGenerators := nodeCount
|
||||
|
@ -436,7 +436,7 @@ func (f *Framework) CreateServiceForSimpleApp(contPort, svcPort int, appName str
|
||||
|
||||
// CreatePodsPerNodeForSimpleApp Creates pods w/ labels. Useful for tests which make a bunch of pods w/o any networking.
|
||||
func (f *Framework) CreatePodsPerNodeForSimpleApp(appName string, podSpec func(n api.Node) api.PodSpec, maxCount int) map[string]string {
|
||||
nodes := ListSchedulableNodesOrDie(f.Client)
|
||||
nodes := GetReadySchedulableNodesOrDie(f.Client)
|
||||
labels := map[string]string{
|
||||
"app": appName + "-pod",
|
||||
}
|
||||
|
@ -18,7 +18,6 @@ package framework
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
"io/ioutil"
|
||||
@ -2241,8 +2240,8 @@ func getNodeEvents(c *client.Client, nodeName string) []api.Event {
|
||||
return events.Items
|
||||
}
|
||||
|
||||
// Convenient wrapper around listing nodes supporting retries.
|
||||
func ListSchedulableNodesOrDie(c *client.Client) *api.NodeList {
|
||||
// waitListSchedulableNodesOrDie is a wrapper around listing nodes supporting retries.
|
||||
func waitListSchedulableNodesOrDie(c *client.Client) *api.NodeList {
|
||||
var nodes *api.NodeList
|
||||
var err error
|
||||
if wait.PollImmediate(Poll, SingleCallTimeout, func() (bool, error) {
|
||||
@ -2256,6 +2255,20 @@ func ListSchedulableNodesOrDie(c *client.Client) *api.NodeList {
|
||||
return nodes
|
||||
}
|
||||
|
||||
// GetReadySchedulableNodesOrDie addresses the common use case of getting nodes you can do work on.
|
||||
// 1) Needs to be schedulable.
|
||||
// 2) Needs to be ready.
|
||||
// If EITHER 1 or 2 is not true, most tests will want to ignore the node entirely.
|
||||
func GetReadySchedulableNodesOrDie(c *client.Client) (nodes *api.NodeList) {
|
||||
nodes = waitListSchedulableNodesOrDie(c)
|
||||
// previous tests may have cause failures of some nodes. Let's skip
|
||||
// 'Not Ready' nodes, just in case (there is no need to fail the test).
|
||||
FilterNodes(nodes, func(node api.Node) bool {
|
||||
return !node.Spec.Unschedulable && IsNodeConditionSetAsExpected(&node, api.NodeReady, true)
|
||||
})
|
||||
return nodes
|
||||
}
|
||||
|
||||
func ScaleRC(c *client.Client, ns, name string, size uint, wait bool) error {
|
||||
By(fmt.Sprintf("Scaling replication controller %s in namespace %s to %d", name, ns, size))
|
||||
scaler, err := kubectl.ScalerFor(api.Kind("ReplicationController"), c)
|
||||
@ -2806,7 +2819,7 @@ func NodeAddresses(nodelist *api.NodeList, addrType api.NodeAddressType) []strin
|
||||
// It returns an error if it can't find an external IP for every node, though it still returns all
|
||||
// hosts that it found in that case.
|
||||
func NodeSSHHosts(c *client.Client) ([]string, error) {
|
||||
nodelist := ListSchedulableNodesOrDie(c)
|
||||
nodelist := waitListSchedulableNodesOrDie(c)
|
||||
|
||||
// TODO(roberthbailey): Use the "preferred" address for the node, once such a thing is defined (#2462).
|
||||
hosts := NodeAddresses(nodelist, api.NodeExternalIP)
|
||||
@ -3587,22 +3600,6 @@ func CheckPodHashLabel(pods *api.PodList) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
// GetReadyNodes retrieves a list of schedulable nodes whose condition
|
||||
// is Ready. An error will be returned if no such nodes are found.
|
||||
func GetReadyNodes(f *Framework) (nodes *api.NodeList, err error) {
|
||||
nodes = ListSchedulableNodesOrDie(f.Client)
|
||||
// previous tests may have cause failures of some nodes. Let's skip
|
||||
// 'Not Ready' nodes, just in case (there is no need to fail the test).
|
||||
FilterNodes(nodes, func(node api.Node) bool {
|
||||
return !node.Spec.Unschedulable && IsNodeConditionSetAsExpected(&node, api.NodeReady, true)
|
||||
})
|
||||
|
||||
if len(nodes.Items) == 0 {
|
||||
return nil, errors.New("No Ready nodes found.")
|
||||
}
|
||||
return nodes, nil
|
||||
}
|
||||
|
||||
// timeout for proxy requests.
|
||||
const proxyTimeout = 2 * time.Minute
|
||||
|
||||
|
@ -94,7 +94,7 @@ var _ = framework.KubeDescribe("kubelet", func() {
|
||||
var resourceMonitor *framework.ResourceMonitor
|
||||
|
||||
BeforeEach(func() {
|
||||
nodes := framework.ListSchedulableNodesOrDie(f.Client)
|
||||
nodes := framework.GetReadySchedulableNodesOrDie(f.Client)
|
||||
numNodes = len(nodes.Items)
|
||||
nodeNames = sets.NewString()
|
||||
for _, node := range nodes.Items {
|
||||
|
@ -192,7 +192,7 @@ var _ = framework.KubeDescribe("Kubelet [Serial] [Slow]", func() {
|
||||
var rm *framework.ResourceMonitor
|
||||
|
||||
BeforeEach(func() {
|
||||
nodes := framework.ListSchedulableNodesOrDie(f.Client)
|
||||
nodes := framework.GetReadySchedulableNodesOrDie(f.Client)
|
||||
nodeNames = sets.NewString()
|
||||
for _, node := range nodes.Items {
|
||||
nodeNames.Insert(node.Name)
|
||||
|
@ -463,7 +463,7 @@ func (config *KubeProxyTestConfig) setup() {
|
||||
}
|
||||
|
||||
By("Getting node addresses")
|
||||
nodeList := framework.ListSchedulableNodesOrDie(config.f.Client)
|
||||
nodeList := framework.GetReadySchedulableNodesOrDie(config.f.Client)
|
||||
config.externalAddrs = framework.NodeAddresses(nodeList, api.NodeExternalIP)
|
||||
if len(config.externalAddrs) < 2 {
|
||||
// fall back to legacy IPs
|
||||
@ -501,7 +501,7 @@ func (config *KubeProxyTestConfig) cleanup() {
|
||||
}
|
||||
|
||||
func (config *KubeProxyTestConfig) createNetProxyPods(podName string, selector map[string]string) []*api.Pod {
|
||||
nodes := framework.ListSchedulableNodesOrDie(config.f.Client)
|
||||
nodes := framework.GetReadySchedulableNodesOrDie(config.f.Client)
|
||||
|
||||
// create pods, one for each node
|
||||
createdPods := make([]*api.Pod, 0, len(nodes.Items))
|
||||
|
@ -78,7 +78,7 @@ var _ = framework.KubeDescribe("Load capacity", func() {
|
||||
c = f.Client
|
||||
|
||||
ns = f.Namespace.Name
|
||||
nodes := framework.ListSchedulableNodesOrDie(c)
|
||||
nodes := framework.GetReadySchedulableNodesOrDie(c)
|
||||
nodeCount = len(nodes.Items)
|
||||
Expect(nodeCount).NotTo(BeZero())
|
||||
|
||||
|
@ -65,7 +65,7 @@ var _ = framework.KubeDescribe("Mesos", func() {
|
||||
client := f.Client
|
||||
framework.ExpectNoError(framework.AllNodesReady(client, wait.ForeverTestTimeout), "all nodes ready")
|
||||
|
||||
nodelist := framework.ListSchedulableNodesOrDie(f.Client)
|
||||
nodelist := framework.GetReadySchedulableNodesOrDie(f.Client)
|
||||
|
||||
const ns = "static-pods"
|
||||
numpods := int32(len(nodelist.Items))
|
||||
|
@ -102,7 +102,7 @@ var _ = framework.KubeDescribe("MetricsGrabber", func() {
|
||||
|
||||
It("should grab all metrics from a Kubelet.", func() {
|
||||
By("Proxying to Node through the API server")
|
||||
nodes := framework.ListSchedulableNodesOrDie(c)
|
||||
nodes := framework.GetReadySchedulableNodesOrDie(c)
|
||||
Expect(nodes.Items).NotTo(BeEmpty())
|
||||
response, err := grabber.GrabFromKubelet(nodes.Items[0].Name)
|
||||
framework.ExpectNoError(err)
|
||||
|
@ -111,8 +111,7 @@ var _ = framework.KubeDescribe("Networking", func() {
|
||||
|
||||
By("Creating a webserver (pending) pod on each node")
|
||||
|
||||
nodes, err := framework.GetReadyNodes(f)
|
||||
framework.ExpectNoError(err)
|
||||
nodes := framework.GetReadySchedulableNodesOrDie(f.Client)
|
||||
|
||||
if len(nodes.Items) == 1 {
|
||||
// in general, the test requires two nodes. But for local development, often a one node cluster
|
||||
@ -220,8 +219,7 @@ var _ = framework.KubeDescribe("Networking", func() {
|
||||
It("should function for pod communication on a single node", func() {
|
||||
|
||||
By("Picking a node")
|
||||
nodes, err := framework.GetReadyNodes(f)
|
||||
framework.ExpectNoError(err)
|
||||
nodes := framework.GetReadySchedulableNodesOrDie(f.Client)
|
||||
node := nodes.Items[0]
|
||||
|
||||
By("Creating a webserver pod")
|
||||
@ -238,8 +236,7 @@ var _ = framework.KubeDescribe("Networking", func() {
|
||||
podClient := f.Client.Pods(f.Namespace.Name)
|
||||
|
||||
By("Picking multiple nodes")
|
||||
nodes, err := framework.GetReadyNodes(f)
|
||||
framework.ExpectNoError(err)
|
||||
nodes := framework.GetReadySchedulableNodesOrDie(f.Client)
|
||||
|
||||
if len(nodes.Items) == 1 {
|
||||
framework.Skipf("The test requires two Ready nodes on %s, but found just one.", framework.TestContext.Provider)
|
||||
|
@ -51,7 +51,7 @@ func runClientServerBandwidthMeasurement(f *framework.Framework, numClient int,
|
||||
numServer := 1
|
||||
|
||||
It(fmt.Sprintf("should transfer ~ 1GB onto the service endpoint %v servers (maximum of %v clients)", numServer, numClient), func() {
|
||||
nodes := framework.ListSchedulableNodesOrDie(f.Client)
|
||||
nodes := framework.GetReadySchedulableNodesOrDie(f.Client)
|
||||
totalPods := len(nodes.Items)
|
||||
// for a single service, we expect to divide bandwidth between the network. Very crude estimate.
|
||||
expectedBandwidth := int(float64(maxBandwidthBits) / float64(totalPods))
|
||||
@ -112,8 +112,7 @@ func runClientServerBandwidthMeasurement(f *framework.Framework, numClient int,
|
||||
|
||||
// Calculate expected number of clients based on total nodes.
|
||||
expectedCli := func() int {
|
||||
nodes, err := framework.GetReadyNodes(f)
|
||||
framework.ExpectNoError(err)
|
||||
nodes := framework.GetReadySchedulableNodesOrDie(f.Client)
|
||||
return int(math.Min(float64(len(nodes.Items)), float64(numClient)))
|
||||
}()
|
||||
|
||||
|
@ -73,7 +73,7 @@ var _ = framework.KubeDescribe("NodeOutOfDisk [Serial] [Flaky] [Disruptive]", fu
|
||||
BeforeEach(func() {
|
||||
c = f.Client
|
||||
|
||||
nodelist := framework.ListSchedulableNodesOrDie(c)
|
||||
nodelist := framework.GetReadySchedulableNodesOrDie(c)
|
||||
|
||||
// Skip this test on small clusters. No need to fail since it is not a use
|
||||
// case that any cluster of small size needs to support.
|
||||
@ -87,7 +87,7 @@ var _ = framework.KubeDescribe("NodeOutOfDisk [Serial] [Flaky] [Disruptive]", fu
|
||||
|
||||
AfterEach(func() {
|
||||
|
||||
nodelist := framework.ListSchedulableNodesOrDie(c)
|
||||
nodelist := framework.GetReadySchedulableNodesOrDie(c)
|
||||
Expect(len(nodelist.Items)).ToNot(BeZero())
|
||||
for _, node := range nodelist.Items {
|
||||
if unfilledNodeName == node.Name || recoveredNodeName == node.Name {
|
||||
@ -150,7 +150,7 @@ var _ = framework.KubeDescribe("NodeOutOfDisk [Serial] [Flaky] [Disruptive]", fu
|
||||
}
|
||||
})
|
||||
|
||||
nodelist := framework.ListSchedulableNodesOrDie(c)
|
||||
nodelist := framework.GetReadySchedulableNodesOrDie(c)
|
||||
Expect(len(nodelist.Items)).To(BeNumerically(">", 1))
|
||||
|
||||
nodeToRecover := nodelist.Items[1]
|
||||
|
@ -58,7 +58,7 @@ var _ = framework.KubeDescribe("Pod Disks", func() {
|
||||
framework.SkipUnlessNodeCountIsAtLeast(2)
|
||||
|
||||
podClient = f.Client.Pods(f.Namespace.Name)
|
||||
nodes := framework.ListSchedulableNodesOrDie(f.Client)
|
||||
nodes := framework.GetReadySchedulableNodesOrDie(f.Client)
|
||||
|
||||
Expect(len(nodes.Items)).To(BeNumerically(">=", 2), "Requires at least 2 nodes")
|
||||
|
||||
|
@ -277,7 +277,7 @@ func truncate(b []byte, maxLen int) []byte {
|
||||
|
||||
func pickNode(c *client.Client) (string, error) {
|
||||
// TODO: investigate why it doesn't work on master Node.
|
||||
nodes := framework.ListSchedulableNodesOrDie(c)
|
||||
nodes := framework.GetReadySchedulableNodesOrDie(c)
|
||||
if len(nodes.Items) == 0 {
|
||||
return "", fmt.Errorf("no nodes exist, can't test node proxy")
|
||||
}
|
||||
|
@ -128,7 +128,7 @@ var _ = framework.KubeDescribe("Reboot [Disruptive] [Feature:Reboot]", func() {
|
||||
|
||||
func testReboot(c *client.Client, rebootCmd string) {
|
||||
// Get all nodes, and kick off the test on each.
|
||||
nodelist := framework.ListSchedulableNodesOrDie(c)
|
||||
nodelist := framework.GetReadySchedulableNodesOrDie(c)
|
||||
result := make([]bool, len(nodelist.Items))
|
||||
wg := sync.WaitGroup{}
|
||||
wg.Add(len(nodelist.Items))
|
||||
|
@ -1112,7 +1112,7 @@ func collectAddresses(nodes *api.NodeList, addressType api.NodeAddressType) []st
|
||||
}
|
||||
|
||||
func getNodePublicIps(c *client.Client) ([]string, error) {
|
||||
nodes := framework.ListSchedulableNodesOrDie(c)
|
||||
nodes := framework.GetReadySchedulableNodesOrDie(c)
|
||||
|
||||
ips := collectAddresses(nodes, api.NodeExternalIP)
|
||||
if len(ips) == 0 {
|
||||
|
Loading…
Reference in New Issue
Block a user