diff --git a/test/e2e/cadvisor.go b/test/e2e/cadvisor.go index 95783e0f1d2..4cddade31ca 100644 --- a/test/e2e/cadvisor.go +++ b/test/e2e/cadvisor.go @@ -47,6 +47,7 @@ var _ = Describe("Cadvisor", func() { }) func CheckCadvisorHealthOnAllNodes(c *client.Client, timeout time.Duration) { + // It should be OK to list unschedulable Nodes here. By("getting list of nodes") nodeList, err := c.Nodes().List(api.ListOptions{}) expectNoError(err) diff --git a/test/e2e/cluster_size_autoscaling.go b/test/e2e/cluster_size_autoscaling.go index 5e2a25f1156..667aa41f31c 100644 --- a/test/e2e/cluster_size_autoscaling.go +++ b/test/e2e/cluster_size_autoscaling.go @@ -41,8 +41,7 @@ var _ = Describe("[Autoscaling] [Skipped]", func() { BeforeEach(func() { SkipUnlessProviderIs("gce") - nodes, err := f.Client.Nodes().List(api.ListOptions{}) - expectNoError(err) + nodes := ListSchedulableNodesOrDie(f.Client) nodeCount = len(nodes.Items) Expect(nodeCount).NotTo(BeZero()) cpu := nodes.Items[0].Status.Capacity[api.ResourceCPU] diff --git a/test/e2e/cluster_upgrade.go b/test/e2e/cluster_upgrade.go index 1f6edca76f1..38d120849f8 100644 --- a/test/e2e/cluster_upgrade.go +++ b/test/e2e/cluster_upgrade.go @@ -30,8 +30,6 @@ import ( "k8s.io/kubernetes/pkg/api" client "k8s.io/kubernetes/pkg/client/unversioned" - "k8s.io/kubernetes/pkg/fields" - "k8s.io/kubernetes/pkg/labels" "k8s.io/kubernetes/pkg/util" "k8s.io/kubernetes/pkg/util/wait" @@ -354,10 +352,7 @@ func testNodeUpgrade(f *Framework, nUp func(f *Framework, n int, v string) error } func checkNodesVersions(c *client.Client, want string) error { - l, err := listNodes(c, labels.Everything(), fields.Everything()) - if err != nil { - return fmt.Errorf("checkNodesVersions() failed to list nodes: %v", err) - } + l := ListSchedulableNodesOrDie(c) for _, n := range l.Items { // We do prefix trimming and then matching because: // want looks like: 0.19.3-815-g50e67d4 diff --git a/test/e2e/daemon_set.go b/test/e2e/daemon_set.go index ce06434719a..c6ac0b2406f 100644 --- a/test/e2e/daemon_set.go +++ b/test/e2e/daemon_set.go @@ -160,8 +160,7 @@ var _ = Describe("Daemon set", func() { Expect(err).NotTo(HaveOccurred(), "error waiting for daemon pods to be running on no nodes") By("Change label of node, check that daemon pod is launched.") - nodeClient := c.Nodes() - nodeList, err := nodeClient.List(api.ListOptions{}) + nodeList := ListSchedulableNodesOrDie(f.Client) Expect(len(nodeList.Items)).To(BeNumerically(">", 0)) newNode, err := setDaemonSetNodeLabels(c, nodeList.Items[0].Name, nodeSelector) Expect(err).NotTo(HaveOccurred(), "error setting labels on node") @@ -196,11 +195,7 @@ func separateDaemonSetNodeLabels(labels map[string]string) (map[string]string, m } func clearDaemonSetNodeLabels(c *client.Client) error { - nodeClient := c.Nodes() - nodeList, err := nodeClient.List(api.ListOptions{}) - if err != nil { - return err - } + nodeList := ListSchedulableNodesOrDie(c) for _, node := range nodeList.Items { _, err := setDaemonSetNodeLabels(c, node.Name, map[string]string{}) if err != nil { @@ -282,10 +277,7 @@ func checkDaemonPodOnNodes(f *Framework, selector map[string]string, nodeNames [ func checkRunningOnAllNodes(f *Framework, selector map[string]string) func() (bool, error) { return func() (bool, error) { - nodeList, err := f.Client.Nodes().List(api.ListOptions{}) - if err != nil { - return false, nil - } + nodeList := ListSchedulableNodesOrDie(f.Client) nodeNames := make([]string, 0) for _, node := range nodeList.Items { nodeNames = append(nodeNames, node.Name) diff --git a/test/e2e/density.go b/test/e2e/density.go index 1436599f39f..a02b382feaf 100644 --- a/test/e2e/density.go +++ b/test/e2e/density.go @@ -158,8 +158,7 @@ var _ = Describe("Density [Skipped]", func() { ns = framework.Namespace.Name var err error - nodes, err := c.Nodes().List(api.ListOptions{}) - expectNoError(err) + nodes := ListSchedulableNodesOrDie(c) nodeCount = len(nodes.Items) Expect(nodeCount).NotTo(BeZero()) diff --git a/test/e2e/es_cluster_logging.go b/test/e2e/es_cluster_logging.go index 21b359d2ae9..df778c82f0e 100644 --- a/test/e2e/es_cluster_logging.go +++ b/test/e2e/es_cluster_logging.go @@ -192,10 +192,7 @@ func ClusterLevelLoggingWithElasticsearch(f *Framework) { } // Obtain a list of nodes so we can place one synthetic logger on each node. - nodes, err := f.Client.Nodes().List(api.ListOptions{}) - if err != nil { - Failf("Failed to list nodes: %v", err) - } + nodes := ListSchedulableNodesOrDie(f.Client) nodeCount := len(nodes.Items) if nodeCount == 0 { Failf("Failed to find any nodes") diff --git a/test/e2e/example_k8petstore.go b/test/e2e/example_k8petstore.go index 9bae8978f81..7161311f164 100644 --- a/test/e2e/example_k8petstore.go +++ b/test/e2e/example_k8petstore.go @@ -18,10 +18,6 @@ package e2e import ( "fmt" - . "github.com/onsi/ginkgo" - . "github.com/onsi/gomega" - "k8s.io/kubernetes/pkg/api" - client "k8s.io/kubernetes/pkg/client/unversioned" "log" "os" "os/exec" @@ -29,6 +25,11 @@ import ( "strconv" "syscall" "time" + + client "k8s.io/kubernetes/pkg/client/unversioned" + + . "github.com/onsi/ginkgo" + . "github.com/onsi/gomega" ) const ( @@ -151,17 +152,16 @@ T: var _ = Describe("[Example] Pet Store [Skipped]", func() { - // The number of minions dictates total number of generators/transaction expectations. - var minionCount int + // The number of nodes dictates total number of generators/transaction expectations. + var nodeCount int f := NewFramework("petstore") It(fmt.Sprintf("should scale to persist a nominal number ( %v ) of transactions in %v seconds", k8bpsSmokeTestTransactions, k8bpsSmokeTestTimeout), func() { - minions, err := f.Client.Nodes().List(api.ListOptions{}) - Expect(err).NotTo(HaveOccurred()) - minionCount = len(minions.Items) + nodes := ListSchedulableNodesOrDie(f.Client) + nodeCount = len(nodes.Items) - loadGenerators := minionCount - restServers := minionCount + loadGenerators := nodeCount + restServers := nodeCount fmt.Printf("load generators / rest servers [ %v / %v ] ", loadGenerators, restServers) runK8petstore(restServers, loadGenerators, f.Client, f.Namespace.Name, k8bpsSmokeTestTransactions, k8bpsSmokeTestTimeout) }) diff --git a/test/e2e/kubectl.go b/test/e2e/kubectl.go index faca3edf531..da5b95918bb 100644 --- a/test/e2e/kubectl.go +++ b/test/e2e/kubectl.go @@ -529,6 +529,7 @@ var _ = Describe("Kubectl client", func() { checkOutput(output, requiredStrings) // Node + // It should be OK to list unschedulable Nodes here. nodes, err := c.Nodes().List(api.ListOptions{}) Expect(err).NotTo(HaveOccurred()) node := nodes.Items[0] diff --git a/test/e2e/kubelet.go b/test/e2e/kubelet.go index a03ee0822f9..9e59c2bc4c6 100644 --- a/test/e2e/kubelet.go +++ b/test/e2e/kubelet.go @@ -21,7 +21,6 @@ import ( "strings" "time" - "k8s.io/kubernetes/pkg/api" client "k8s.io/kubernetes/pkg/client/unversioned" "k8s.io/kubernetes/pkg/util" "k8s.io/kubernetes/pkg/util/sets" @@ -94,8 +93,7 @@ var _ = Describe("kubelet", func() { var resourceMonitor *resourceMonitor BeforeEach(func() { - nodes, err := framework.Client.Nodes().List(api.ListOptions{}) - expectNoError(err) + nodes := ListSchedulableNodesOrDie(framework.Client) numNodes = len(nodes.Items) nodeNames = sets.NewString() for _, node := range nodes.Items { diff --git a/test/e2e/kubelet_perf.go b/test/e2e/kubelet_perf.go index b7f69260eab..43ddfa19507 100644 --- a/test/e2e/kubelet_perf.go +++ b/test/e2e/kubelet_perf.go @@ -144,6 +144,7 @@ var _ = Describe("Kubelet", func() { var rm *resourceMonitor BeforeEach(func() { + // It should be OK to list unschedulable Nodes here. nodes, err := framework.Client.Nodes().List(api.ListOptions{}) expectNoError(err) nodeNames = sets.NewString() diff --git a/test/e2e/kubelet_stats.go b/test/e2e/kubelet_stats.go index 22ed9275201..485cc143ee2 100644 --- a/test/e2e/kubelet_stats.go +++ b/test/e2e/kubelet_stats.go @@ -690,6 +690,7 @@ func newResourceMonitor(c *client.Client, containerNames []string, pollingInterv } func (r *resourceMonitor) Start() { + // It should be OK to monitor unschedulable Nodes nodes, err := r.client.Nodes().List(api.ListOptions{}) if err != nil { Failf("resourceMonitor: unable to get list of nodes: %v", err) diff --git a/test/e2e/kubeproxy.go b/test/e2e/kubeproxy.go index fc822e71eb9..934e4d07edf 100644 --- a/test/e2e/kubeproxy.go +++ b/test/e2e/kubeproxy.go @@ -429,8 +429,7 @@ func (config *KubeProxyTestConfig) setup() { } By("Getting node addresses") - nodeList, err := config.f.Client.Nodes().List(api.ListOptions{}) - Expect(err).NotTo(HaveOccurred(), fmt.Sprintf("Failed to get node list: %v", err)) + nodeList := ListSchedulableNodesOrDie(config.f.Client) config.externalAddrs = NodeAddresses(nodeList, api.NodeExternalIP) if len(config.externalAddrs) < 2 { // fall back to legacy IPs @@ -468,8 +467,7 @@ func (config *KubeProxyTestConfig) cleanup() { } func (config *KubeProxyTestConfig) createNetProxyPods(podName string, selector map[string]string) []*api.Pod { - nodes, err := config.f.Client.Nodes().List(api.ListOptions{}) - Expect(err).NotTo(HaveOccurred()) + nodes := ListSchedulableNodesOrDie(config.f.Client) // create pods, one for each node createdPods := make([]*api.Pod, 0, len(nodes.Items)) diff --git a/test/e2e/latency.go b/test/e2e/latency.go index 4971ebde545..7bb96a7aadf 100644 --- a/test/e2e/latency.go +++ b/test/e2e/latency.go @@ -67,10 +67,8 @@ var _ = Describe("Latency [Skipped]", func() { BeforeEach(func() { c = framework.Client ns = framework.Namespace.Name - var err error - nodes, err := c.Nodes().List(api.ListOptions{}) - expectNoError(err) + nodes := ListSchedulableNodesOrDie(framework.Client) nodeCount = len(nodes.Items) Expect(nodeCount).NotTo(BeZero()) diff --git a/test/e2e/load.go b/test/e2e/load.go index c53b5df1e0b..db5b911d461 100644 --- a/test/e2e/load.go +++ b/test/e2e/load.go @@ -72,15 +72,14 @@ var _ = Describe("Load capacity [Skipped]", func() { BeforeEach(func() { c = framework.Client ns = framework.Namespace.Name - nodes, err := c.Nodes().List(api.ListOptions{}) - expectNoError(err) + nodes := ListSchedulableNodesOrDie(c) nodeCount = len(nodes.Items) Expect(nodeCount).NotTo(BeZero()) // Terminating a namespace (deleting the remaining objects from it - which // generally means events) can affect the current run. Thus we wait for all // terminating namespace to be finally deleted before starting this test. - err = checkTestingNSDeletedExcept(c, ns) + err := checkTestingNSDeletedExcept(c, ns) expectNoError(err) expectNoError(resetMetrics(c)) diff --git a/test/e2e/mesos.go b/test/e2e/mesos.go index 6843ce0c8b0..3335026e71e 100644 --- a/test/e2e/mesos.go +++ b/test/e2e/mesos.go @@ -64,8 +64,7 @@ var _ = Describe("Mesos", func() { client := framework.Client expectNoError(allNodesReady(client, util.ForeverTestTimeout), "all nodes ready") - nodelist, err := client.Nodes().List(api.ListOptions{}) - expectNoError(err, "nodes fetched from apiserver") + nodelist := ListSchedulableNodesOrDie(framework.Client) const ns = "static-pods" numpods := len(nodelist.Items) diff --git a/test/e2e/monitor_resources.go b/test/e2e/monitor_resources.go index 8031c26e68b..f10ae414aa6 100644 --- a/test/e2e/monitor_resources.go +++ b/test/e2e/monitor_resources.go @@ -82,6 +82,7 @@ var _ = Describe("Resource usage of system containers", func() { It("should not exceed expected amount.", func() { By("Getting ResourceConsumption on all nodes") + // It should be OK to list unschedulable Nodes here. nodeList, err := c.Nodes().List(api.ListOptions{}) expectNoError(err) diff --git a/test/e2e/monitoring.go b/test/e2e/monitoring.go index 8c7d96bc296..aaaf7b48705 100644 --- a/test/e2e/monitoring.go +++ b/test/e2e/monitoring.go @@ -123,6 +123,7 @@ func expectedServicesExist(c *client.Client) error { } func getAllNodesInCluster(c *client.Client) ([]string, error) { + // It should be OK to list unschedulable Nodes here. nodeList, err := c.Nodes().List(api.ListOptions{}) if err != nil { return nil, err diff --git a/test/e2e/networking.go b/test/e2e/networking.go index ab0353a0b89..6d7268e2a7a 100644 --- a/test/e2e/networking.go +++ b/test/e2e/networking.go @@ -136,10 +136,7 @@ var _ = Describe("Networking", func() { By("Creating a webserver (pending) pod on each node") - nodes, err := f.Client.Nodes().List(api.ListOptions{}) - if err != nil { - Failf("Failed to list nodes: %v", err) - } + nodes := ListSchedulableNodesOrDie(f.Client) // previous tests may have cause failures of some nodes. Let's skip // 'Not Ready' nodes, just in case (there is no need to fail the test). filterNodes(nodes, func(node api.Node) bool { diff --git a/test/e2e/nodeoutofdisk.go b/test/e2e/nodeoutofdisk.go index c690f16890f..6a9cca5e64c 100644 --- a/test/e2e/nodeoutofdisk.go +++ b/test/e2e/nodeoutofdisk.go @@ -26,7 +26,6 @@ import ( "k8s.io/kubernetes/pkg/api/resource" client "k8s.io/kubernetes/pkg/client/unversioned" "k8s.io/kubernetes/pkg/fields" - "k8s.io/kubernetes/pkg/labels" "k8s.io/kubernetes/pkg/util/wait" . "github.com/onsi/ginkgo" @@ -73,8 +72,7 @@ var _ = Describe("NodeOutOfDisk", func() { framework.beforeEach() c = framework.Client - nodelist, err := listNodes(c, labels.Everything(), fields.Everything()) - expectNoError(err, "Error retrieving nodes") + nodelist := ListSchedulableNodesOrDie(c) Expect(len(nodelist.Items)).To(BeNumerically(">", 1)) unfilledNodeName = nodelist.Items[0].Name @@ -86,8 +84,7 @@ var _ = Describe("NodeOutOfDisk", func() { AfterEach(func() { defer framework.afterEach() - nodelist, err := listNodes(c, labels.Everything(), fields.Everything()) - expectNoError(err, "Error retrieving nodes") + nodelist := ListSchedulableNodesOrDie(c) Expect(len(nodelist.Items)).ToNot(BeZero()) for _, node := range nodelist.Items { if unfilledNodeName == node.Name || recoveredNodeName == node.Name { @@ -150,8 +147,7 @@ var _ = Describe("NodeOutOfDisk", func() { } }) - nodelist, err := listNodes(c, labels.Everything(), fields.Everything()) - expectNoError(err, "Error retrieving nodes") + nodelist := ListSchedulableNodesOrDie(c) Expect(len(nodelist.Items)).To(BeNumerically(">", 1)) nodeToRecover := nodelist.Items[1] diff --git a/test/e2e/pd.go b/test/e2e/pd.go index 521c577f716..0c0c9f981bb 100644 --- a/test/e2e/pd.go +++ b/test/e2e/pd.go @@ -53,9 +53,7 @@ var _ = Describe("Pod Disks", func() { SkipUnlessNodeCountIsAtLeast(2) podClient = framework.Client.Pods(framework.Namespace.Name) - - nodes, err := framework.Client.Nodes().List(api.ListOptions{}) - expectNoError(err, "Failed to list nodes for e2e cluster.") + nodes := ListSchedulableNodesOrDie(framework.Client) Expect(len(nodes.Items)).To(BeNumerically(">=", 2), "Requires at least 2 nodes") diff --git a/test/e2e/proxy.go b/test/e2e/proxy.go index 8c7e76db42b..7cc5d4308cc 100644 --- a/test/e2e/proxy.go +++ b/test/e2e/proxy.go @@ -250,10 +250,8 @@ func truncate(b []byte, maxLen int) []byte { } func pickNode(c *client.Client) (string, error) { - nodes, err := c.Nodes().List(api.ListOptions{}) - if err != nil { - return "", err - } + // TODO: investigate why it doesn't work on master Node. + nodes := ListSchedulableNodesOrDie(c) if len(nodes.Items) == 0 { return "", fmt.Errorf("no nodes exist, can't test node proxy") } diff --git a/test/e2e/reboot.go b/test/e2e/reboot.go index 6a856ead8fc..c4927b15e02 100644 --- a/test/e2e/reboot.go +++ b/test/e2e/reboot.go @@ -115,10 +115,7 @@ var _ = Describe("Reboot", func() { func testReboot(c *client.Client, rebootCmd string) { // Get all nodes, and kick off the test on each. - nodelist, err := listNodes(c, labels.Everything(), fields.Everything()) - if err != nil { - Failf("Error getting nodes: %v", err) - } + nodelist := ListSchedulableNodesOrDie(c) result := make([]bool, len(nodelist.Items)) wg := sync.WaitGroup{} wg.Add(len(nodelist.Items)) diff --git a/test/e2e/restart.go b/test/e2e/restart.go index 7be5d9df366..62570186ca7 100644 --- a/test/e2e/restart.go +++ b/test/e2e/restart.go @@ -161,11 +161,11 @@ func checkNodesReady(c *client.Client, nt time.Duration, expect int) ([]string, var errLast error start := time.Now() found := wait.Poll(poll, nt, func() (bool, error) { - // Even though listNodes(...) has its own retries, a rolling-update - // (GCE/GKE implementation of restart) can complete before the apiserver + // A rolling-update (GCE/GKE implementation of restart) can complete before the apiserver // knows about all of the nodes. Thus, we retry the list nodes call // until we get the expected number of nodes. - nodeList, errLast = listNodes(c, labels.Everything(), fields.Everything()) + nodeList, errLast = c.Nodes().List(api.ListOptions{ + FieldSelector: fields.Set{"spec.unschedulable": "false"}.AsSelector()}) if errLast != nil { return false, nil } diff --git a/test/e2e/scheduler_predicates.go b/test/e2e/scheduler_predicates.go index dbf9d3f9e75..a2d72bbf88c 100644 --- a/test/e2e/scheduler_predicates.go +++ b/test/e2e/scheduler_predicates.go @@ -194,9 +194,7 @@ var _ = Describe("SchedulerPredicates", func() { BeforeEach(func() { c = framework.Client ns = framework.Namespace.Name - var err error - nodeList, err = c.Nodes().List(api.ListOptions{}) - expectNoError(err) + nodeList = ListSchedulableNodesOrDie(c) }) // This test verifies that max-pods flag works as advertised. It assumes that cluster add-on pods stay stable diff --git a/test/e2e/service.go b/test/e2e/service.go index 937270898e5..51bcfe4d5ef 100644 --- a/test/e2e/service.go +++ b/test/e2e/service.go @@ -1102,10 +1102,7 @@ func collectAddresses(nodes *api.NodeList, addressType api.NodeAddressType) []st } func getNodePublicIps(c *client.Client) ([]string, error) { - nodes, err := c.Nodes().List(api.ListOptions{}) - if err != nil { - return nil, err - } + nodes := ListSchedulableNodesOrDie(c) ips := collectAddresses(nodes, api.NodeExternalIP) if len(ips) == 0 { diff --git a/test/e2e/util.go b/test/e2e/util.go index 6cfc72090c8..f6869c7c1e1 100644 --- a/test/e2e/util.go +++ b/test/e2e/util.go @@ -1661,6 +1661,7 @@ func dumpAllPodInfo(c *client.Client) { } func dumpAllNodeInfo(c *client.Client) { + // It should be OK to list unschedulable Nodes here. nodes, err := c.Nodes().List(api.ListOptions{}) if err != nil { Logf("unable to fetch node list: %v", err) @@ -1724,6 +1725,21 @@ func getNodeEvents(c *client.Client, nodeName string) []api.Event { return events.Items } +// Convenient wrapper around listing nodes supporting retries. +func ListSchedulableNodesOrDie(c *client.Client) *api.NodeList { + var nodes *api.NodeList + var err error + if wait.PollImmediate(poll, singleCallTimeout, func() (bool, error) { + nodes, err = c.Nodes().List(api.ListOptions{FieldSelector: fields.Set{ + "spec.unschedulable": "false", + }.AsSelector()}) + return err == nil, nil + }) != nil { + expectNoError(err, "Timed out while listing nodes for e2e cluster.") + } + return nodes +} + func ScaleRC(c *client.Client, ns, name string, size uint, wait bool) error { By(fmt.Sprintf("Scaling replication controller %s in namespace %s to %d", name, ns, size)) scaler, err := kubectl.ScalerFor(api.Kind("ReplicationController"), c) @@ -1910,23 +1926,6 @@ func waitForEvents(c *client.Client, ns string, objOrRef runtime.Object, desired }) } -// Convenient wrapper around listing nodes supporting retries. -func listNodes(c *client.Client, label labels.Selector, field fields.Selector) (*api.NodeList, error) { - var nodes *api.NodeList - var errLast error - if wait.PollImmediate(poll, singleCallTimeout, func() (bool, error) { - options := api.ListOptions{ - LabelSelector: label, - FieldSelector: field, - } - nodes, errLast = c.Nodes().List(options) - return errLast == nil, nil - }) != nil { - return nil, fmt.Errorf("listNodes() failed with last error: %v", errLast) - } - return nodes, nil -} - // FailedContainers inspects all containers in a pod and returns failure // information for containers that have failed or been restarted. // A map is returned where the key is the containerID and the value is a @@ -2008,6 +2007,7 @@ func NodeAddresses(nodelist *api.NodeList, addrType api.NodeAddressType) []strin // if it can't find an external IP for every node, though it still returns all // hosts that it found in that case. func NodeSSHHosts(c *client.Client) ([]string, error) { + // It should be OK to list unschedulable Nodes here. nodelist, err := c.Nodes().List(api.ListOptions{}) if err != nil { return nil, fmt.Errorf("error getting nodes: %v", err) @@ -2256,6 +2256,7 @@ func allNodesReady(c *client.Client, timeout time.Duration) error { var notReady []api.Node err := wait.PollImmediate(poll, timeout, func() (bool, error) { notReady = nil + // It should be OK to list unschedulable Nodes here. nodes, err := c.Nodes().List(api.ListOptions{}) if err != nil { return false, err @@ -2373,9 +2374,12 @@ func waitForApiserverUp(c *client.Client) error { } // waitForClusterSize waits until the cluster has desired size and there is no not-ready nodes in it. +// By cluster size we mean number of Nodes excluding Master Node. func waitForClusterSize(c *client.Client, size int, timeout time.Duration) error { for start := time.Now(); time.Since(start) < timeout; time.Sleep(20 * time.Second) { - nodes, err := c.Nodes().List(api.ListOptions{}) + nodes, err := c.Nodes().List(api.ListOptions{FieldSelector: fields.Set{ + "spec.unschedulable": "false", + }.AsSelector()}) if err != nil { Logf("Failed to list nodes: %v", err) continue @@ -2569,6 +2573,7 @@ func getNodePortURL(client *client.Client, ns, name string, svcPort int) (string if err != nil { return "", err } + // It should be OK to list unschedulable Node here. nodes, err := client.Nodes().List(api.ListOptions{}) if err != nil { return "", err