Add e2enode.GetRandomReadySchedulableNode, replace some uses of framework.GetReadySchedulableNodesOrDie

For tests that want a single ready, schedulable node
This commit is contained in:
Dan Winship 2019-09-08 13:19:17 -04:00
parent 3c445b2ad0
commit ec4c1a1c05
11 changed files with 50 additions and 42 deletions

View File

@ -160,9 +160,9 @@ var _ = SIGDescribe("Daemon set [Serial]", func() {
framework.ExpectNoError(err, "error waiting for daemon pods to be running on no nodes") framework.ExpectNoError(err, "error waiting for daemon pods to be running on no nodes")
ginkgo.By("Change node label to blue, check that daemon pod is launched.") ginkgo.By("Change node label to blue, check that daemon pod is launched.")
nodeList := framework.GetReadySchedulableNodesOrDie(f.ClientSet) node, err := e2enode.GetRandomReadySchedulableNode(f.ClientSet)
gomega.Expect(len(nodeList.Items)).To(gomega.BeNumerically(">", 0)) framework.ExpectNoError(err)
newNode, err := setDaemonSetNodeLabels(c, nodeList.Items[0].Name, nodeSelector) newNode, err := setDaemonSetNodeLabels(c, node.Name, nodeSelector)
framework.ExpectNoError(err, "error setting labels on node") framework.ExpectNoError(err, "error setting labels on node")
daemonSetLabels, _ := separateDaemonSetNodeLabels(newNode.Labels) daemonSetLabels, _ := separateDaemonSetNodeLabels(newNode.Labels)
framework.ExpectEqual(len(daemonSetLabels), 1) framework.ExpectEqual(len(daemonSetLabels), 1)
@ -173,7 +173,7 @@ var _ = SIGDescribe("Daemon set [Serial]", func() {
ginkgo.By("Update the node label to green, and wait for daemons to be unscheduled") ginkgo.By("Update the node label to green, and wait for daemons to be unscheduled")
nodeSelector[daemonsetColorLabel] = "green" nodeSelector[daemonsetColorLabel] = "green"
greenNode, err := setDaemonSetNodeLabels(c, nodeList.Items[0].Name, nodeSelector) greenNode, err := setDaemonSetNodeLabels(c, node.Name, nodeSelector)
framework.ExpectNoError(err, "error removing labels on node") framework.ExpectNoError(err, "error removing labels on node")
err = wait.PollImmediate(dsRetryPeriod, dsRetryTimeout, checkRunningOnNoNodes(f, ds)) err = wait.PollImmediate(dsRetryPeriod, dsRetryTimeout, checkRunningOnNoNodes(f, ds))
framework.ExpectNoError(err, "error waiting for daemon pod to not be running on nodes") framework.ExpectNoError(err, "error waiting for daemon pod to not be running on nodes")
@ -223,9 +223,9 @@ var _ = SIGDescribe("Daemon set [Serial]", func() {
framework.ExpectNoError(err, "error waiting for daemon pods to be running on no nodes") framework.ExpectNoError(err, "error waiting for daemon pods to be running on no nodes")
ginkgo.By("Change node label to blue, check that daemon pod is launched.") ginkgo.By("Change node label to blue, check that daemon pod is launched.")
nodeList := framework.GetReadySchedulableNodesOrDie(f.ClientSet) node, err := e2enode.GetRandomReadySchedulableNode(f.ClientSet)
gomega.Expect(len(nodeList.Items)).To(gomega.BeNumerically(">", 0)) framework.ExpectNoError(err)
newNode, err := setDaemonSetNodeLabels(c, nodeList.Items[0].Name, nodeSelector) newNode, err := setDaemonSetNodeLabels(c, node.Name, nodeSelector)
framework.ExpectNoError(err, "error setting labels on node") framework.ExpectNoError(err, "error setting labels on node")
daemonSetLabels, _ := separateDaemonSetNodeLabels(newNode.Labels) daemonSetLabels, _ := separateDaemonSetNodeLabels(newNode.Labels)
framework.ExpectEqual(len(daemonSetLabels), 1) framework.ExpectEqual(len(daemonSetLabels), 1)
@ -235,7 +235,7 @@ var _ = SIGDescribe("Daemon set [Serial]", func() {
framework.ExpectNoError(err) framework.ExpectNoError(err)
ginkgo.By("Remove the node label and wait for daemons to be unscheduled") ginkgo.By("Remove the node label and wait for daemons to be unscheduled")
_, err = setDaemonSetNodeLabels(c, nodeList.Items[0].Name, map[string]string{}) _, err = setDaemonSetNodeLabels(c, node.Name, map[string]string{})
framework.ExpectNoError(err, "error removing labels on node") framework.ExpectNoError(err, "error removing labels on node")
err = wait.PollImmediate(dsRetryPeriod, dsRetryTimeout, checkRunningOnNoNodes(f, ds)) err = wait.PollImmediate(dsRetryPeriod, dsRetryTimeout, checkRunningOnNoNodes(f, ds))
framework.ExpectNoError(err, "error waiting for daemon pod to not be running on nodes") framework.ExpectNoError(err, "error waiting for daemon pod to not be running on nodes")

View File

@ -34,6 +34,7 @@ import (
clientset "k8s.io/client-go/kubernetes" clientset "k8s.io/client-go/kubernetes"
watchtools "k8s.io/client-go/tools/watch" watchtools "k8s.io/client-go/tools/watch"
"k8s.io/kubernetes/test/e2e/framework" "k8s.io/kubernetes/test/e2e/framework"
e2enode "k8s.io/kubernetes/test/e2e/framework/node"
e2epod "k8s.io/kubernetes/test/e2e/framework/pod" e2epod "k8s.io/kubernetes/test/e2e/framework/pod"
e2epv "k8s.io/kubernetes/test/e2e/framework/pv" e2epv "k8s.io/kubernetes/test/e2e/framework/pv"
e2eservice "k8s.io/kubernetes/test/e2e/framework/service" e2eservice "k8s.io/kubernetes/test/e2e/framework/service"
@ -679,8 +680,8 @@ var _ = SIGDescribe("StatefulSet", func() {
podName := "test-pod" podName := "test-pod"
statefulPodName := ssName + "-0" statefulPodName := ssName + "-0"
ginkgo.By("Looking for a node to schedule stateful set and pod") ginkgo.By("Looking for a node to schedule stateful set and pod")
nodes := framework.GetReadySchedulableNodesOrDie(f.ClientSet) node, err := e2enode.GetRandomReadySchedulableNode(f.ClientSet)
node := nodes.Items[0] framework.ExpectNoError(err)
ginkgo.By("Creating pod with conflicting port in namespace " + f.Namespace.Name) ginkgo.By("Creating pod with conflicting port in namespace " + f.Namespace.Name)
conflictingPort := v1.ContainerPort{HostPort: 21017, ContainerPort: 21017, Name: "conflict"} conflictingPort := v1.ContainerPort{HostPort: 21017, ContainerPort: 21017, Name: "conflict"}
@ -699,7 +700,7 @@ var _ = SIGDescribe("StatefulSet", func() {
NodeName: node.Name, NodeName: node.Name,
}, },
} }
pod, err := f.ClientSet.CoreV1().Pods(f.Namespace.Name).Create(pod) pod, err = f.ClientSet.CoreV1().Pods(f.Namespace.Name).Create(pod)
framework.ExpectNoError(err) framework.ExpectNoError(err)
ginkgo.By("Creating statefulset with conflicting port in namespace " + f.Namespace.Name) ginkgo.By("Creating statefulset with conflicting port in namespace " + f.Namespace.Name)

View File

@ -43,8 +43,8 @@ var _ = SIGDescribe("[Feature:CloudProvider][Disruptive] Nodes", func() {
ginkgo.It("should be deleted on API server if it doesn't exist in the cloud provider", func() { ginkgo.It("should be deleted on API server if it doesn't exist in the cloud provider", func() {
ginkgo.By("deleting a node on the cloud provider") ginkgo.By("deleting a node on the cloud provider")
nodeDeleteCandidates := framework.GetReadySchedulableNodesOrDie(c) nodeToDelete, err := e2enode.GetRandomReadySchedulableNode(c)
nodeToDelete := nodeDeleteCandidates.Items[0] framework.ExpectNoError(err)
origNodes, err := e2enode.GetReadyNodesIncludingTainted(c) origNodes, err := e2enode.GetReadyNodesIncludingTainted(c)
if err != nil { if err != nil {
@ -55,7 +55,7 @@ var _ = SIGDescribe("[Feature:CloudProvider][Disruptive] Nodes", func() {
framework.Logf("Original number of ready nodes: %d", len(origNodes.Items)) framework.Logf("Original number of ready nodes: %d", len(origNodes.Items))
err = framework.DeleteNodeOnCloudProvider(&nodeToDelete) err = framework.DeleteNodeOnCloudProvider(nodeToDelete)
if err != nil { if err != nil {
framework.Failf("failed to delete node %q, err: %q", nodeToDelete.Name, err) framework.Failf("failed to delete node %q, err: %q", nodeToDelete.Name, err)
} }

View File

@ -41,9 +41,9 @@ var _ = framework.KubeDescribe("NodeLease", func() {
f := framework.NewDefaultFramework("node-lease-test") f := framework.NewDefaultFramework("node-lease-test")
ginkgo.BeforeEach(func() { ginkgo.BeforeEach(func() {
nodes := framework.GetReadySchedulableNodesOrDie(f.ClientSet) node, err := e2enode.GetRandomReadySchedulableNode(f.ClientSet)
gomega.Expect(len(nodes.Items)).NotTo(gomega.BeZero()) framework.ExpectNoError(err)
nodeName = nodes.Items[0].ObjectMeta.Name nodeName = node.Name
}) })
ginkgo.Context("when the NodeLease feature is enabled", func() { ginkgo.Context("when the NodeLease feature is enabled", func() {

View File

@ -371,6 +371,16 @@ func GetBoundedReadySchedulableNodes(c clientset.Interface, maxNodes int) (nodes
return nodes, nil return nodes, nil
} }
// GetRandomReadySchedulableNode gets a single randomly-selected node which is available for
// running pods on. If there are no available nodes it will return an error.
func GetRandomReadySchedulableNode(c clientset.Interface) (*v1.Node, error) {
nodes, err := GetReadySchedulableNodes(c)
if err != nil {
return nil, err
}
return &nodes.Items[rand.Intn(len(nodes.Items))], nil
}
// GetReadyNodesIncludingTainted returns all ready nodes, even those which are tainted. // GetReadyNodesIncludingTainted returns all ready nodes, even those which are tainted.
// There are cases when we care about tainted nodes // There are cases when we care about tainted nodes
// E.g. in tests related to nodes with gpu we care about nodes despite // E.g. in tests related to nodes with gpu we care about nodes despite

View File

@ -39,6 +39,7 @@ go_library(
"//test/e2e/framework/config:go_default_library", "//test/e2e/framework/config:go_default_library",
"//test/e2e/framework/gpu:go_default_library", "//test/e2e/framework/gpu:go_default_library",
"//test/e2e/framework/metrics:go_default_library", "//test/e2e/framework/metrics:go_default_library",
"//test/e2e/framework/node:go_default_library",
"//test/e2e/framework/pod:go_default_library", "//test/e2e/framework/pod:go_default_library",
"//test/e2e/instrumentation/common:go_default_library", "//test/e2e/instrumentation/common:go_default_library",
"//test/e2e/scheduling:go_default_library", "//test/e2e/scheduling:go_default_library",

View File

@ -23,6 +23,7 @@ import (
clientset "k8s.io/client-go/kubernetes" clientset "k8s.io/client-go/kubernetes"
"k8s.io/kubernetes/test/e2e/framework" "k8s.io/kubernetes/test/e2e/framework"
"k8s.io/kubernetes/test/e2e/framework/metrics" "k8s.io/kubernetes/test/e2e/framework/metrics"
e2enode "k8s.io/kubernetes/test/e2e/framework/node"
instrumentation "k8s.io/kubernetes/test/e2e/instrumentation/common" instrumentation "k8s.io/kubernetes/test/e2e/instrumentation/common"
gin "github.com/onsi/ginkgo" gin "github.com/onsi/ginkgo"
@ -51,9 +52,9 @@ var _ = instrumentation.SIGDescribe("MetricsGrabber", func() {
gin.It("should grab all metrics from a Kubelet.", func() { gin.It("should grab all metrics from a Kubelet.", func() {
gin.By("Proxying to Node through the API server") gin.By("Proxying to Node through the API server")
nodes := framework.GetReadySchedulableNodesOrDie(f.ClientSet) node, err := e2enode.GetRandomReadySchedulableNode(f.ClientSet)
gom.Expect(nodes.Items).NotTo(gom.BeEmpty()) framework.ExpectNoError(err)
response, err := grabber.GrabFromKubelet(nodes.Items[0].Name) response, err := grabber.GrabFromKubelet(node.Name)
framework.ExpectNoError(err) framework.ExpectNoError(err)
gom.Expect(response).NotTo(gom.BeEmpty()) gom.Expect(response).NotTo(gom.BeEmpty())
}) })

View File

@ -39,9 +39,9 @@ var _ = SIGDescribe("Ports Security Check [Feature:KubeletSecurity]", func() {
var nodeName string var nodeName string
ginkgo.BeforeEach(func() { ginkgo.BeforeEach(func() {
nodes := framework.GetReadySchedulableNodesOrDie(f.ClientSet) var err error
gomega.Expect(len(nodes.Items)).NotTo(gomega.BeZero()) node, err = e2enode.GetRandomReadySchedulableNode(f.ClientSet)
node = &nodes.Items[0] framework.ExpectNoError(err)
nodeName = node.Name nodeName = node.Name
}) })

View File

@ -33,6 +33,7 @@ import (
"k8s.io/apimachinery/pkg/util/net" "k8s.io/apimachinery/pkg/util/net"
clientset "k8s.io/client-go/kubernetes" clientset "k8s.io/client-go/kubernetes"
"k8s.io/kubernetes/test/e2e/framework" "k8s.io/kubernetes/test/e2e/framework"
e2enode "k8s.io/kubernetes/test/e2e/framework/node"
testutils "k8s.io/kubernetes/test/utils" testutils "k8s.io/kubernetes/test/utils"
imageutils "k8s.io/kubernetes/test/utils/image" imageutils "k8s.io/kubernetes/test/utils/image"
@ -287,23 +288,16 @@ func truncate(b []byte, maxLen int) []byte {
return b2 return b2
} }
func pickNode(cs clientset.Interface) (string, error) {
// TODO: investigate why it doesn't work on master Node.
nodes := framework.GetReadySchedulableNodesOrDie(cs)
if len(nodes.Items) == 0 {
return "", fmt.Errorf("no nodes exist, can't test node proxy")
}
return nodes.Items[0].Name, nil
}
func nodeProxyTest(f *framework.Framework, prefix, nodeDest string) { func nodeProxyTest(f *framework.Framework, prefix, nodeDest string) {
node, err := pickNode(f.ClientSet) // TODO: investigate why it doesn't work on master Node.
node, err := e2enode.GetRandomReadySchedulableNode(f.ClientSet)
framework.ExpectNoError(err) framework.ExpectNoError(err)
// TODO: Change it to test whether all requests succeeded when requests // TODO: Change it to test whether all requests succeeded when requests
// not reaching Kubelet issue is debugged. // not reaching Kubelet issue is debugged.
serviceUnavailableErrors := 0 serviceUnavailableErrors := 0
for i := 0; i < proxyAttempts; i++ { for i := 0; i < proxyAttempts; i++ {
_, status, d, err := doProxy(f, prefix+node+nodeDest, i) _, status, d, err := doProxy(f, prefix+node.Name+nodeDest, i)
if status == http.StatusServiceUnavailable { if status == http.StatusServiceUnavailable {
framework.Logf("ginkgo.Failed proxying node logs due to service unavailable: %v", err) framework.Logf("ginkgo.Failed proxying node logs due to service unavailable: %v", err)
time.Sleep(time.Second) time.Sleep(time.Second)

View File

@ -23,6 +23,7 @@ import (
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/util/sets" "k8s.io/apimachinery/pkg/util/sets"
"k8s.io/kubernetes/test/e2e/framework" "k8s.io/kubernetes/test/e2e/framework"
e2enode "k8s.io/kubernetes/test/e2e/framework/node"
e2essh "k8s.io/kubernetes/test/e2e/framework/ssh" e2essh "k8s.io/kubernetes/test/e2e/framework/ssh"
imageutils "k8s.io/kubernetes/test/utils/image" imageutils "k8s.io/kubernetes/test/utils/image"
@ -85,9 +86,8 @@ var _ = SIGDescribe("Mount propagation", func() {
// propagated to the right places. // propagated to the right places.
// Pick a node where all pods will run. // Pick a node where all pods will run.
nodes := framework.GetReadySchedulableNodesOrDie(f.ClientSet) node, err := e2enode.GetRandomReadySchedulableNode(f.ClientSet)
framework.ExpectNotEqual(len(nodes.Items), 0, "No available nodes for scheduling") framework.ExpectNoError(err)
node := &nodes.Items[0]
// Fail the test if the namespace is not set. We expect that the // Fail the test if the namespace is not set. We expect that the
// namespace is unique and we might delete user data if it's not. // namespace is unique and we might delete user data if it's not.
@ -139,7 +139,7 @@ var _ = SIGDescribe("Mount propagation", func() {
// The host mounts one tmpfs to testdir/host and puts a file there so we // The host mounts one tmpfs to testdir/host and puts a file there so we
// can check mount propagation from the host to pods. // can check mount propagation from the host to pods.
cmd := fmt.Sprintf("sudo mkdir %[1]q/host; sudo mount -t tmpfs e2e-mount-propagation-host %[1]q/host; echo host > %[1]q/host/file", hostDir) cmd := fmt.Sprintf("sudo mkdir %[1]q/host; sudo mount -t tmpfs e2e-mount-propagation-host %[1]q/host; echo host > %[1]q/host/file", hostDir)
err := e2essh.IssueSSHCommand(cmd, framework.TestContext.Provider, node) err = e2essh.IssueSSHCommand(cmd, framework.TestContext.Provider, node)
framework.ExpectNoError(err) framework.ExpectNoError(err)
defer func() { defer func() {

View File

@ -59,6 +59,7 @@ import (
"k8s.io/kubernetes/pkg/apis/extensions" "k8s.io/kubernetes/pkg/apis/extensions"
"k8s.io/kubernetes/test/e2e/framework" "k8s.io/kubernetes/test/e2e/framework"
e2emetrics "k8s.io/kubernetes/test/e2e/framework/metrics" e2emetrics "k8s.io/kubernetes/test/e2e/framework/metrics"
e2enode "k8s.io/kubernetes/test/e2e/framework/node"
"k8s.io/kubernetes/test/e2e/framework/timer" "k8s.io/kubernetes/test/e2e/framework/timer"
testutils "k8s.io/kubernetes/test/utils" testutils "k8s.io/kubernetes/test/utils"
@ -158,14 +159,14 @@ var _ = SIGDescribe("Load capacity", func() {
clientset = f.ClientSet clientset = f.ClientSet
ns = f.Namespace.Name ns = f.Namespace.Name
nodes := framework.GetReadySchedulableNodesOrDie(clientset)
nodeCount = len(nodes.Items) _, err := e2enode.GetRandomReadySchedulableNode(clientset)
gomega.Expect(nodeCount).NotTo(gomega.BeZero()) framework.ExpectNoError(err)
// Terminating a namespace (deleting the remaining objects from it - which // Terminating a namespace (deleting the remaining objects from it - which
// generally means events) can affect the current run. Thus we wait for all // generally means events) can affect the current run. Thus we wait for all
// terminating namespace to be finally deleted before starting this test. // terminating namespace to be finally deleted before starting this test.
err := framework.CheckTestingNSDeletedExcept(clientset, ns) err = framework.CheckTestingNSDeletedExcept(clientset, ns)
framework.ExpectNoError(err) framework.ExpectNoError(err)
framework.ExpectNoError(e2emetrics.ResetMetrics(clientset)) framework.ExpectNoError(e2emetrics.ResetMetrics(clientset))