mirror of
https://github.com/k3s-io/kubernetes.git
synced 2025-07-19 09:52:49 +00:00
Add e2enode.GetRandomReadySchedulableNode, replace some uses of framework.GetReadySchedulableNodesOrDie
For tests that want a single ready, schedulable node
This commit is contained in:
parent
3c445b2ad0
commit
ec4c1a1c05
@ -160,9 +160,9 @@ var _ = SIGDescribe("Daemon set [Serial]", func() {
|
||||
framework.ExpectNoError(err, "error waiting for daemon pods to be running on no nodes")
|
||||
|
||||
ginkgo.By("Change node label to blue, check that daemon pod is launched.")
|
||||
nodeList := framework.GetReadySchedulableNodesOrDie(f.ClientSet)
|
||||
gomega.Expect(len(nodeList.Items)).To(gomega.BeNumerically(">", 0))
|
||||
newNode, err := setDaemonSetNodeLabels(c, nodeList.Items[0].Name, nodeSelector)
|
||||
node, err := e2enode.GetRandomReadySchedulableNode(f.ClientSet)
|
||||
framework.ExpectNoError(err)
|
||||
newNode, err := setDaemonSetNodeLabels(c, node.Name, nodeSelector)
|
||||
framework.ExpectNoError(err, "error setting labels on node")
|
||||
daemonSetLabels, _ := separateDaemonSetNodeLabels(newNode.Labels)
|
||||
framework.ExpectEqual(len(daemonSetLabels), 1)
|
||||
@ -173,7 +173,7 @@ var _ = SIGDescribe("Daemon set [Serial]", func() {
|
||||
|
||||
ginkgo.By("Update the node label to green, and wait for daemons to be unscheduled")
|
||||
nodeSelector[daemonsetColorLabel] = "green"
|
||||
greenNode, err := setDaemonSetNodeLabels(c, nodeList.Items[0].Name, nodeSelector)
|
||||
greenNode, err := setDaemonSetNodeLabels(c, node.Name, nodeSelector)
|
||||
framework.ExpectNoError(err, "error removing labels on node")
|
||||
err = wait.PollImmediate(dsRetryPeriod, dsRetryTimeout, checkRunningOnNoNodes(f, ds))
|
||||
framework.ExpectNoError(err, "error waiting for daemon pod to not be running on nodes")
|
||||
@ -223,9 +223,9 @@ var _ = SIGDescribe("Daemon set [Serial]", func() {
|
||||
framework.ExpectNoError(err, "error waiting for daemon pods to be running on no nodes")
|
||||
|
||||
ginkgo.By("Change node label to blue, check that daemon pod is launched.")
|
||||
nodeList := framework.GetReadySchedulableNodesOrDie(f.ClientSet)
|
||||
gomega.Expect(len(nodeList.Items)).To(gomega.BeNumerically(">", 0))
|
||||
newNode, err := setDaemonSetNodeLabels(c, nodeList.Items[0].Name, nodeSelector)
|
||||
node, err := e2enode.GetRandomReadySchedulableNode(f.ClientSet)
|
||||
framework.ExpectNoError(err)
|
||||
newNode, err := setDaemonSetNodeLabels(c, node.Name, nodeSelector)
|
||||
framework.ExpectNoError(err, "error setting labels on node")
|
||||
daemonSetLabels, _ := separateDaemonSetNodeLabels(newNode.Labels)
|
||||
framework.ExpectEqual(len(daemonSetLabels), 1)
|
||||
@ -235,7 +235,7 @@ var _ = SIGDescribe("Daemon set [Serial]", func() {
|
||||
framework.ExpectNoError(err)
|
||||
|
||||
ginkgo.By("Remove the node label and wait for daemons to be unscheduled")
|
||||
_, err = setDaemonSetNodeLabels(c, nodeList.Items[0].Name, map[string]string{})
|
||||
_, err = setDaemonSetNodeLabels(c, node.Name, map[string]string{})
|
||||
framework.ExpectNoError(err, "error removing labels on node")
|
||||
err = wait.PollImmediate(dsRetryPeriod, dsRetryTimeout, checkRunningOnNoNodes(f, ds))
|
||||
framework.ExpectNoError(err, "error waiting for daemon pod to not be running on nodes")
|
||||
|
@ -34,6 +34,7 @@ import (
|
||||
clientset "k8s.io/client-go/kubernetes"
|
||||
watchtools "k8s.io/client-go/tools/watch"
|
||||
"k8s.io/kubernetes/test/e2e/framework"
|
||||
e2enode "k8s.io/kubernetes/test/e2e/framework/node"
|
||||
e2epod "k8s.io/kubernetes/test/e2e/framework/pod"
|
||||
e2epv "k8s.io/kubernetes/test/e2e/framework/pv"
|
||||
e2eservice "k8s.io/kubernetes/test/e2e/framework/service"
|
||||
@ -679,8 +680,8 @@ var _ = SIGDescribe("StatefulSet", func() {
|
||||
podName := "test-pod"
|
||||
statefulPodName := ssName + "-0"
|
||||
ginkgo.By("Looking for a node to schedule stateful set and pod")
|
||||
nodes := framework.GetReadySchedulableNodesOrDie(f.ClientSet)
|
||||
node := nodes.Items[0]
|
||||
node, err := e2enode.GetRandomReadySchedulableNode(f.ClientSet)
|
||||
framework.ExpectNoError(err)
|
||||
|
||||
ginkgo.By("Creating pod with conflicting port in namespace " + f.Namespace.Name)
|
||||
conflictingPort := v1.ContainerPort{HostPort: 21017, ContainerPort: 21017, Name: "conflict"}
|
||||
@ -699,7 +700,7 @@ var _ = SIGDescribe("StatefulSet", func() {
|
||||
NodeName: node.Name,
|
||||
},
|
||||
}
|
||||
pod, err := f.ClientSet.CoreV1().Pods(f.Namespace.Name).Create(pod)
|
||||
pod, err = f.ClientSet.CoreV1().Pods(f.Namespace.Name).Create(pod)
|
||||
framework.ExpectNoError(err)
|
||||
|
||||
ginkgo.By("Creating statefulset with conflicting port in namespace " + f.Namespace.Name)
|
||||
|
@ -43,8 +43,8 @@ var _ = SIGDescribe("[Feature:CloudProvider][Disruptive] Nodes", func() {
|
||||
ginkgo.It("should be deleted on API server if it doesn't exist in the cloud provider", func() {
|
||||
ginkgo.By("deleting a node on the cloud provider")
|
||||
|
||||
nodeDeleteCandidates := framework.GetReadySchedulableNodesOrDie(c)
|
||||
nodeToDelete := nodeDeleteCandidates.Items[0]
|
||||
nodeToDelete, err := e2enode.GetRandomReadySchedulableNode(c)
|
||||
framework.ExpectNoError(err)
|
||||
|
||||
origNodes, err := e2enode.GetReadyNodesIncludingTainted(c)
|
||||
if err != nil {
|
||||
@ -55,7 +55,7 @@ var _ = SIGDescribe("[Feature:CloudProvider][Disruptive] Nodes", func() {
|
||||
|
||||
framework.Logf("Original number of ready nodes: %d", len(origNodes.Items))
|
||||
|
||||
err = framework.DeleteNodeOnCloudProvider(&nodeToDelete)
|
||||
err = framework.DeleteNodeOnCloudProvider(nodeToDelete)
|
||||
if err != nil {
|
||||
framework.Failf("failed to delete node %q, err: %q", nodeToDelete.Name, err)
|
||||
}
|
||||
|
@ -41,9 +41,9 @@ var _ = framework.KubeDescribe("NodeLease", func() {
|
||||
f := framework.NewDefaultFramework("node-lease-test")
|
||||
|
||||
ginkgo.BeforeEach(func() {
|
||||
nodes := framework.GetReadySchedulableNodesOrDie(f.ClientSet)
|
||||
gomega.Expect(len(nodes.Items)).NotTo(gomega.BeZero())
|
||||
nodeName = nodes.Items[0].ObjectMeta.Name
|
||||
node, err := e2enode.GetRandomReadySchedulableNode(f.ClientSet)
|
||||
framework.ExpectNoError(err)
|
||||
nodeName = node.Name
|
||||
})
|
||||
|
||||
ginkgo.Context("when the NodeLease feature is enabled", func() {
|
||||
|
@ -371,6 +371,16 @@ func GetBoundedReadySchedulableNodes(c clientset.Interface, maxNodes int) (nodes
|
||||
return nodes, nil
|
||||
}
|
||||
|
||||
// GetRandomReadySchedulableNode gets a single randomly-selected node which is available for
|
||||
// running pods on. If there are no available nodes it will return an error.
|
||||
func GetRandomReadySchedulableNode(c clientset.Interface) (*v1.Node, error) {
|
||||
nodes, err := GetReadySchedulableNodes(c)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return &nodes.Items[rand.Intn(len(nodes.Items))], nil
|
||||
}
|
||||
|
||||
// GetReadyNodesIncludingTainted returns all ready nodes, even those which are tainted.
|
||||
// There are cases when we care about tainted nodes
|
||||
// E.g. in tests related to nodes with gpu we care about nodes despite
|
||||
|
@ -39,6 +39,7 @@ go_library(
|
||||
"//test/e2e/framework/config:go_default_library",
|
||||
"//test/e2e/framework/gpu:go_default_library",
|
||||
"//test/e2e/framework/metrics:go_default_library",
|
||||
"//test/e2e/framework/node:go_default_library",
|
||||
"//test/e2e/framework/pod:go_default_library",
|
||||
"//test/e2e/instrumentation/common:go_default_library",
|
||||
"//test/e2e/scheduling:go_default_library",
|
||||
|
@ -23,6 +23,7 @@ import (
|
||||
clientset "k8s.io/client-go/kubernetes"
|
||||
"k8s.io/kubernetes/test/e2e/framework"
|
||||
"k8s.io/kubernetes/test/e2e/framework/metrics"
|
||||
e2enode "k8s.io/kubernetes/test/e2e/framework/node"
|
||||
instrumentation "k8s.io/kubernetes/test/e2e/instrumentation/common"
|
||||
|
||||
gin "github.com/onsi/ginkgo"
|
||||
@ -51,9 +52,9 @@ var _ = instrumentation.SIGDescribe("MetricsGrabber", func() {
|
||||
|
||||
gin.It("should grab all metrics from a Kubelet.", func() {
|
||||
gin.By("Proxying to Node through the API server")
|
||||
nodes := framework.GetReadySchedulableNodesOrDie(f.ClientSet)
|
||||
gom.Expect(nodes.Items).NotTo(gom.BeEmpty())
|
||||
response, err := grabber.GrabFromKubelet(nodes.Items[0].Name)
|
||||
node, err := e2enode.GetRandomReadySchedulableNode(f.ClientSet)
|
||||
framework.ExpectNoError(err)
|
||||
response, err := grabber.GrabFromKubelet(node.Name)
|
||||
framework.ExpectNoError(err)
|
||||
gom.Expect(response).NotTo(gom.BeEmpty())
|
||||
})
|
||||
|
@ -39,9 +39,9 @@ var _ = SIGDescribe("Ports Security Check [Feature:KubeletSecurity]", func() {
|
||||
var nodeName string
|
||||
|
||||
ginkgo.BeforeEach(func() {
|
||||
nodes := framework.GetReadySchedulableNodesOrDie(f.ClientSet)
|
||||
gomega.Expect(len(nodes.Items)).NotTo(gomega.BeZero())
|
||||
node = &nodes.Items[0]
|
||||
var err error
|
||||
node, err = e2enode.GetRandomReadySchedulableNode(f.ClientSet)
|
||||
framework.ExpectNoError(err)
|
||||
nodeName = node.Name
|
||||
})
|
||||
|
||||
|
@ -33,6 +33,7 @@ import (
|
||||
"k8s.io/apimachinery/pkg/util/net"
|
||||
clientset "k8s.io/client-go/kubernetes"
|
||||
"k8s.io/kubernetes/test/e2e/framework"
|
||||
e2enode "k8s.io/kubernetes/test/e2e/framework/node"
|
||||
testutils "k8s.io/kubernetes/test/utils"
|
||||
imageutils "k8s.io/kubernetes/test/utils/image"
|
||||
|
||||
@ -287,23 +288,16 @@ func truncate(b []byte, maxLen int) []byte {
|
||||
return b2
|
||||
}
|
||||
|
||||
func pickNode(cs clientset.Interface) (string, error) {
|
||||
// TODO: investigate why it doesn't work on master Node.
|
||||
nodes := framework.GetReadySchedulableNodesOrDie(cs)
|
||||
if len(nodes.Items) == 0 {
|
||||
return "", fmt.Errorf("no nodes exist, can't test node proxy")
|
||||
}
|
||||
return nodes.Items[0].Name, nil
|
||||
}
|
||||
|
||||
func nodeProxyTest(f *framework.Framework, prefix, nodeDest string) {
|
||||
node, err := pickNode(f.ClientSet)
|
||||
// TODO: investigate why it doesn't work on master Node.
|
||||
node, err := e2enode.GetRandomReadySchedulableNode(f.ClientSet)
|
||||
framework.ExpectNoError(err)
|
||||
|
||||
// TODO: Change it to test whether all requests succeeded when requests
|
||||
// not reaching Kubelet issue is debugged.
|
||||
serviceUnavailableErrors := 0
|
||||
for i := 0; i < proxyAttempts; i++ {
|
||||
_, status, d, err := doProxy(f, prefix+node+nodeDest, i)
|
||||
_, status, d, err := doProxy(f, prefix+node.Name+nodeDest, i)
|
||||
if status == http.StatusServiceUnavailable {
|
||||
framework.Logf("ginkgo.Failed proxying node logs due to service unavailable: %v", err)
|
||||
time.Sleep(time.Second)
|
||||
|
@ -23,6 +23,7 @@ import (
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/util/sets"
|
||||
"k8s.io/kubernetes/test/e2e/framework"
|
||||
e2enode "k8s.io/kubernetes/test/e2e/framework/node"
|
||||
e2essh "k8s.io/kubernetes/test/e2e/framework/ssh"
|
||||
imageutils "k8s.io/kubernetes/test/utils/image"
|
||||
|
||||
@ -85,9 +86,8 @@ var _ = SIGDescribe("Mount propagation", func() {
|
||||
// propagated to the right places.
|
||||
|
||||
// Pick a node where all pods will run.
|
||||
nodes := framework.GetReadySchedulableNodesOrDie(f.ClientSet)
|
||||
framework.ExpectNotEqual(len(nodes.Items), 0, "No available nodes for scheduling")
|
||||
node := &nodes.Items[0]
|
||||
node, err := e2enode.GetRandomReadySchedulableNode(f.ClientSet)
|
||||
framework.ExpectNoError(err)
|
||||
|
||||
// Fail the test if the namespace is not set. We expect that the
|
||||
// namespace is unique and we might delete user data if it's not.
|
||||
@ -139,7 +139,7 @@ var _ = SIGDescribe("Mount propagation", func() {
|
||||
// The host mounts one tmpfs to testdir/host and puts a file there so we
|
||||
// can check mount propagation from the host to pods.
|
||||
cmd := fmt.Sprintf("sudo mkdir %[1]q/host; sudo mount -t tmpfs e2e-mount-propagation-host %[1]q/host; echo host > %[1]q/host/file", hostDir)
|
||||
err := e2essh.IssueSSHCommand(cmd, framework.TestContext.Provider, node)
|
||||
err = e2essh.IssueSSHCommand(cmd, framework.TestContext.Provider, node)
|
||||
framework.ExpectNoError(err)
|
||||
|
||||
defer func() {
|
||||
|
@ -59,6 +59,7 @@ import (
|
||||
"k8s.io/kubernetes/pkg/apis/extensions"
|
||||
"k8s.io/kubernetes/test/e2e/framework"
|
||||
e2emetrics "k8s.io/kubernetes/test/e2e/framework/metrics"
|
||||
e2enode "k8s.io/kubernetes/test/e2e/framework/node"
|
||||
"k8s.io/kubernetes/test/e2e/framework/timer"
|
||||
testutils "k8s.io/kubernetes/test/utils"
|
||||
|
||||
@ -158,14 +159,14 @@ var _ = SIGDescribe("Load capacity", func() {
|
||||
clientset = f.ClientSet
|
||||
|
||||
ns = f.Namespace.Name
|
||||
nodes := framework.GetReadySchedulableNodesOrDie(clientset)
|
||||
nodeCount = len(nodes.Items)
|
||||
gomega.Expect(nodeCount).NotTo(gomega.BeZero())
|
||||
|
||||
_, err := e2enode.GetRandomReadySchedulableNode(clientset)
|
||||
framework.ExpectNoError(err)
|
||||
|
||||
// Terminating a namespace (deleting the remaining objects from it - which
|
||||
// generally means events) can affect the current run. Thus we wait for all
|
||||
// terminating namespace to be finally deleted before starting this test.
|
||||
err := framework.CheckTestingNSDeletedExcept(clientset, ns)
|
||||
err = framework.CheckTestingNSDeletedExcept(clientset, ns)
|
||||
framework.ExpectNoError(err)
|
||||
|
||||
framework.ExpectNoError(e2emetrics.ResetMetrics(clientset))
|
||||
|
Loading…
Reference in New Issue
Block a user