Merge pull request #18509 from gmarek/fix-e2e-pd

Auto commit by PR queue bot
This commit is contained in:
k8s-merge-robot 2015-12-17 02:06:23 -08:00
commit 4a9922c900
26 changed files with 67 additions and 101 deletions

View File

@ -47,6 +47,7 @@ var _ = Describe("Cadvisor", func() {
}) })
func CheckCadvisorHealthOnAllNodes(c *client.Client, timeout time.Duration) { func CheckCadvisorHealthOnAllNodes(c *client.Client, timeout time.Duration) {
// It should be OK to list unschedulable Nodes here.
By("getting list of nodes") By("getting list of nodes")
nodeList, err := c.Nodes().List(api.ListOptions{}) nodeList, err := c.Nodes().List(api.ListOptions{})
expectNoError(err) expectNoError(err)

View File

@ -41,8 +41,7 @@ var _ = Describe("[Autoscaling] [Skipped]", func() {
BeforeEach(func() { BeforeEach(func() {
SkipUnlessProviderIs("gce") SkipUnlessProviderIs("gce")
nodes, err := f.Client.Nodes().List(api.ListOptions{}) nodes := ListSchedulableNodesOrDie(f.Client)
expectNoError(err)
nodeCount = len(nodes.Items) nodeCount = len(nodes.Items)
Expect(nodeCount).NotTo(BeZero()) Expect(nodeCount).NotTo(BeZero())
cpu := nodes.Items[0].Status.Capacity[api.ResourceCPU] cpu := nodes.Items[0].Status.Capacity[api.ResourceCPU]

View File

@ -30,8 +30,6 @@ import (
"k8s.io/kubernetes/pkg/api" "k8s.io/kubernetes/pkg/api"
client "k8s.io/kubernetes/pkg/client/unversioned" client "k8s.io/kubernetes/pkg/client/unversioned"
"k8s.io/kubernetes/pkg/fields"
"k8s.io/kubernetes/pkg/labels"
"k8s.io/kubernetes/pkg/util" "k8s.io/kubernetes/pkg/util"
"k8s.io/kubernetes/pkg/util/wait" "k8s.io/kubernetes/pkg/util/wait"
@ -354,10 +352,7 @@ func testNodeUpgrade(f *Framework, nUp func(f *Framework, n int, v string) error
} }
func checkNodesVersions(c *client.Client, want string) error { func checkNodesVersions(c *client.Client, want string) error {
l, err := listNodes(c, labels.Everything(), fields.Everything()) l := ListSchedulableNodesOrDie(c)
if err != nil {
return fmt.Errorf("checkNodesVersions() failed to list nodes: %v", err)
}
for _, n := range l.Items { for _, n := range l.Items {
// We do prefix trimming and then matching because: // We do prefix trimming and then matching because:
// want looks like: 0.19.3-815-g50e67d4 // want looks like: 0.19.3-815-g50e67d4

View File

@ -160,8 +160,7 @@ var _ = Describe("Daemon set", func() {
Expect(err).NotTo(HaveOccurred(), "error waiting for daemon pods to be running on no nodes") Expect(err).NotTo(HaveOccurred(), "error waiting for daemon pods to be running on no nodes")
By("Change label of node, check that daemon pod is launched.") By("Change label of node, check that daemon pod is launched.")
nodeClient := c.Nodes() nodeList := ListSchedulableNodesOrDie(f.Client)
nodeList, err := nodeClient.List(api.ListOptions{})
Expect(len(nodeList.Items)).To(BeNumerically(">", 0)) Expect(len(nodeList.Items)).To(BeNumerically(">", 0))
newNode, err := setDaemonSetNodeLabels(c, nodeList.Items[0].Name, nodeSelector) newNode, err := setDaemonSetNodeLabels(c, nodeList.Items[0].Name, nodeSelector)
Expect(err).NotTo(HaveOccurred(), "error setting labels on node") Expect(err).NotTo(HaveOccurred(), "error setting labels on node")
@ -196,11 +195,7 @@ func separateDaemonSetNodeLabels(labels map[string]string) (map[string]string, m
} }
func clearDaemonSetNodeLabels(c *client.Client) error { func clearDaemonSetNodeLabels(c *client.Client) error {
nodeClient := c.Nodes() nodeList := ListSchedulableNodesOrDie(c)
nodeList, err := nodeClient.List(api.ListOptions{})
if err != nil {
return err
}
for _, node := range nodeList.Items { for _, node := range nodeList.Items {
_, err := setDaemonSetNodeLabels(c, node.Name, map[string]string{}) _, err := setDaemonSetNodeLabels(c, node.Name, map[string]string{})
if err != nil { if err != nil {
@ -282,10 +277,7 @@ func checkDaemonPodOnNodes(f *Framework, selector map[string]string, nodeNames [
func checkRunningOnAllNodes(f *Framework, selector map[string]string) func() (bool, error) { func checkRunningOnAllNodes(f *Framework, selector map[string]string) func() (bool, error) {
return func() (bool, error) { return func() (bool, error) {
nodeList, err := f.Client.Nodes().List(api.ListOptions{}) nodeList := ListSchedulableNodesOrDie(f.Client)
if err != nil {
return false, nil
}
nodeNames := make([]string, 0) nodeNames := make([]string, 0)
for _, node := range nodeList.Items { for _, node := range nodeList.Items {
nodeNames = append(nodeNames, node.Name) nodeNames = append(nodeNames, node.Name)

View File

@ -158,8 +158,7 @@ var _ = Describe("Density [Skipped]", func() {
ns = framework.Namespace.Name ns = framework.Namespace.Name
var err error var err error
nodes, err := c.Nodes().List(api.ListOptions{}) nodes := ListSchedulableNodesOrDie(c)
expectNoError(err)
nodeCount = len(nodes.Items) nodeCount = len(nodes.Items)
Expect(nodeCount).NotTo(BeZero()) Expect(nodeCount).NotTo(BeZero())

View File

@ -192,10 +192,7 @@ func ClusterLevelLoggingWithElasticsearch(f *Framework) {
} }
// Obtain a list of nodes so we can place one synthetic logger on each node. // Obtain a list of nodes so we can place one synthetic logger on each node.
nodes, err := f.Client.Nodes().List(api.ListOptions{}) nodes := ListSchedulableNodesOrDie(f.Client)
if err != nil {
Failf("Failed to list nodes: %v", err)
}
nodeCount := len(nodes.Items) nodeCount := len(nodes.Items)
if nodeCount == 0 { if nodeCount == 0 {
Failf("Failed to find any nodes") Failf("Failed to find any nodes")

View File

@ -18,10 +18,6 @@ package e2e
import ( import (
"fmt" "fmt"
. "github.com/onsi/ginkgo"
. "github.com/onsi/gomega"
"k8s.io/kubernetes/pkg/api"
client "k8s.io/kubernetes/pkg/client/unversioned"
"log" "log"
"os" "os"
"os/exec" "os/exec"
@ -29,6 +25,11 @@ import (
"strconv" "strconv"
"syscall" "syscall"
"time" "time"
client "k8s.io/kubernetes/pkg/client/unversioned"
. "github.com/onsi/ginkgo"
. "github.com/onsi/gomega"
) )
const ( const (
@ -151,17 +152,16 @@ T:
var _ = Describe("[Example] Pet Store [Skipped]", func() { var _ = Describe("[Example] Pet Store [Skipped]", func() {
// The number of minions dictates total number of generators/transaction expectations. // The number of nodes dictates total number of generators/transaction expectations.
var minionCount int var nodeCount int
f := NewFramework("petstore") f := NewFramework("petstore")
It(fmt.Sprintf("should scale to persist a nominal number ( %v ) of transactions in %v seconds", k8bpsSmokeTestTransactions, k8bpsSmokeTestTimeout), func() { It(fmt.Sprintf("should scale to persist a nominal number ( %v ) of transactions in %v seconds", k8bpsSmokeTestTransactions, k8bpsSmokeTestTimeout), func() {
minions, err := f.Client.Nodes().List(api.ListOptions{}) nodes := ListSchedulableNodesOrDie(f.Client)
Expect(err).NotTo(HaveOccurred()) nodeCount = len(nodes.Items)
minionCount = len(minions.Items)
loadGenerators := minionCount loadGenerators := nodeCount
restServers := minionCount restServers := nodeCount
fmt.Printf("load generators / rest servers [ %v / %v ] ", loadGenerators, restServers) fmt.Printf("load generators / rest servers [ %v / %v ] ", loadGenerators, restServers)
runK8petstore(restServers, loadGenerators, f.Client, f.Namespace.Name, k8bpsSmokeTestTransactions, k8bpsSmokeTestTimeout) runK8petstore(restServers, loadGenerators, f.Client, f.Namespace.Name, k8bpsSmokeTestTransactions, k8bpsSmokeTestTimeout)
}) })

View File

@ -529,6 +529,7 @@ var _ = Describe("Kubectl client", func() {
checkOutput(output, requiredStrings) checkOutput(output, requiredStrings)
// Node // Node
// It should be OK to list unschedulable Nodes here.
nodes, err := c.Nodes().List(api.ListOptions{}) nodes, err := c.Nodes().List(api.ListOptions{})
Expect(err).NotTo(HaveOccurred()) Expect(err).NotTo(HaveOccurred())
node := nodes.Items[0] node := nodes.Items[0]

View File

@ -21,7 +21,6 @@ import (
"strings" "strings"
"time" "time"
"k8s.io/kubernetes/pkg/api"
client "k8s.io/kubernetes/pkg/client/unversioned" client "k8s.io/kubernetes/pkg/client/unversioned"
"k8s.io/kubernetes/pkg/util" "k8s.io/kubernetes/pkg/util"
"k8s.io/kubernetes/pkg/util/sets" "k8s.io/kubernetes/pkg/util/sets"
@ -94,8 +93,7 @@ var _ = Describe("kubelet", func() {
var resourceMonitor *resourceMonitor var resourceMonitor *resourceMonitor
BeforeEach(func() { BeforeEach(func() {
nodes, err := framework.Client.Nodes().List(api.ListOptions{}) nodes := ListSchedulableNodesOrDie(framework.Client)
expectNoError(err)
numNodes = len(nodes.Items) numNodes = len(nodes.Items)
nodeNames = sets.NewString() nodeNames = sets.NewString()
for _, node := range nodes.Items { for _, node := range nodes.Items {

View File

@ -144,6 +144,7 @@ var _ = Describe("Kubelet", func() {
var rm *resourceMonitor var rm *resourceMonitor
BeforeEach(func() { BeforeEach(func() {
// It should be OK to list unschedulable Nodes here.
nodes, err := framework.Client.Nodes().List(api.ListOptions{}) nodes, err := framework.Client.Nodes().List(api.ListOptions{})
expectNoError(err) expectNoError(err)
nodeNames = sets.NewString() nodeNames = sets.NewString()

View File

@ -690,6 +690,7 @@ func newResourceMonitor(c *client.Client, containerNames []string, pollingInterv
} }
func (r *resourceMonitor) Start() { func (r *resourceMonitor) Start() {
// It should be OK to monitor unschedulable Nodes
nodes, err := r.client.Nodes().List(api.ListOptions{}) nodes, err := r.client.Nodes().List(api.ListOptions{})
if err != nil { if err != nil {
Failf("resourceMonitor: unable to get list of nodes: %v", err) Failf("resourceMonitor: unable to get list of nodes: %v", err)

View File

@ -429,8 +429,7 @@ func (config *KubeProxyTestConfig) setup() {
} }
By("Getting node addresses") By("Getting node addresses")
nodeList, err := config.f.Client.Nodes().List(api.ListOptions{}) nodeList := ListSchedulableNodesOrDie(config.f.Client)
Expect(err).NotTo(HaveOccurred(), fmt.Sprintf("Failed to get node list: %v", err))
config.externalAddrs = NodeAddresses(nodeList, api.NodeExternalIP) config.externalAddrs = NodeAddresses(nodeList, api.NodeExternalIP)
if len(config.externalAddrs) < 2 { if len(config.externalAddrs) < 2 {
// fall back to legacy IPs // fall back to legacy IPs
@ -468,8 +467,7 @@ func (config *KubeProxyTestConfig) cleanup() {
} }
func (config *KubeProxyTestConfig) createNetProxyPods(podName string, selector map[string]string) []*api.Pod { func (config *KubeProxyTestConfig) createNetProxyPods(podName string, selector map[string]string) []*api.Pod {
nodes, err := config.f.Client.Nodes().List(api.ListOptions{}) nodes := ListSchedulableNodesOrDie(config.f.Client)
Expect(err).NotTo(HaveOccurred())
// create pods, one for each node // create pods, one for each node
createdPods := make([]*api.Pod, 0, len(nodes.Items)) createdPods := make([]*api.Pod, 0, len(nodes.Items))

View File

@ -67,10 +67,8 @@ var _ = Describe("Latency [Skipped]", func() {
BeforeEach(func() { BeforeEach(func() {
c = framework.Client c = framework.Client
ns = framework.Namespace.Name ns = framework.Namespace.Name
var err error
nodes, err := c.Nodes().List(api.ListOptions{}) nodes := ListSchedulableNodesOrDie(framework.Client)
expectNoError(err)
nodeCount = len(nodes.Items) nodeCount = len(nodes.Items)
Expect(nodeCount).NotTo(BeZero()) Expect(nodeCount).NotTo(BeZero())

View File

@ -72,15 +72,14 @@ var _ = Describe("Load capacity [Skipped]", func() {
BeforeEach(func() { BeforeEach(func() {
c = framework.Client c = framework.Client
ns = framework.Namespace.Name ns = framework.Namespace.Name
nodes, err := c.Nodes().List(api.ListOptions{}) nodes := ListSchedulableNodesOrDie(c)
expectNoError(err)
nodeCount = len(nodes.Items) nodeCount = len(nodes.Items)
Expect(nodeCount).NotTo(BeZero()) Expect(nodeCount).NotTo(BeZero())
// Terminating a namespace (deleting the remaining objects from it - which // Terminating a namespace (deleting the remaining objects from it - which
// generally means events) can affect the current run. Thus we wait for all // generally means events) can affect the current run. Thus we wait for all
// terminating namespace to be finally deleted before starting this test. // terminating namespace to be finally deleted before starting this test.
err = checkTestingNSDeletedExcept(c, ns) err := checkTestingNSDeletedExcept(c, ns)
expectNoError(err) expectNoError(err)
expectNoError(resetMetrics(c)) expectNoError(resetMetrics(c))

View File

@ -64,8 +64,7 @@ var _ = Describe("Mesos", func() {
client := framework.Client client := framework.Client
expectNoError(allNodesReady(client, util.ForeverTestTimeout), "all nodes ready") expectNoError(allNodesReady(client, util.ForeverTestTimeout), "all nodes ready")
nodelist, err := client.Nodes().List(api.ListOptions{}) nodelist := ListSchedulableNodesOrDie(framework.Client)
expectNoError(err, "nodes fetched from apiserver")
const ns = "static-pods" const ns = "static-pods"
numpods := len(nodelist.Items) numpods := len(nodelist.Items)

View File

@ -82,6 +82,7 @@ var _ = Describe("Resource usage of system containers", func() {
It("should not exceed expected amount.", func() { It("should not exceed expected amount.", func() {
By("Getting ResourceConsumption on all nodes") By("Getting ResourceConsumption on all nodes")
// It should be OK to list unschedulable Nodes here.
nodeList, err := c.Nodes().List(api.ListOptions{}) nodeList, err := c.Nodes().List(api.ListOptions{})
expectNoError(err) expectNoError(err)

View File

@ -123,6 +123,7 @@ func expectedServicesExist(c *client.Client) error {
} }
func getAllNodesInCluster(c *client.Client) ([]string, error) { func getAllNodesInCluster(c *client.Client) ([]string, error) {
// It should be OK to list unschedulable Nodes here.
nodeList, err := c.Nodes().List(api.ListOptions{}) nodeList, err := c.Nodes().List(api.ListOptions{})
if err != nil { if err != nil {
return nil, err return nil, err

View File

@ -136,10 +136,7 @@ var _ = Describe("Networking", func() {
By("Creating a webserver (pending) pod on each node") By("Creating a webserver (pending) pod on each node")
nodes, err := f.Client.Nodes().List(api.ListOptions{}) nodes := ListSchedulableNodesOrDie(f.Client)
if err != nil {
Failf("Failed to list nodes: %v", err)
}
// previous tests may have cause failures of some nodes. Let's skip // previous tests may have cause failures of some nodes. Let's skip
// 'Not Ready' nodes, just in case (there is no need to fail the test). // 'Not Ready' nodes, just in case (there is no need to fail the test).
filterNodes(nodes, func(node api.Node) bool { filterNodes(nodes, func(node api.Node) bool {

View File

@ -26,7 +26,6 @@ import (
"k8s.io/kubernetes/pkg/api/resource" "k8s.io/kubernetes/pkg/api/resource"
client "k8s.io/kubernetes/pkg/client/unversioned" client "k8s.io/kubernetes/pkg/client/unversioned"
"k8s.io/kubernetes/pkg/fields" "k8s.io/kubernetes/pkg/fields"
"k8s.io/kubernetes/pkg/labels"
"k8s.io/kubernetes/pkg/util/wait" "k8s.io/kubernetes/pkg/util/wait"
. "github.com/onsi/ginkgo" . "github.com/onsi/ginkgo"
@ -73,8 +72,7 @@ var _ = Describe("NodeOutOfDisk", func() {
framework.beforeEach() framework.beforeEach()
c = framework.Client c = framework.Client
nodelist, err := listNodes(c, labels.Everything(), fields.Everything()) nodelist := ListSchedulableNodesOrDie(c)
expectNoError(err, "Error retrieving nodes")
Expect(len(nodelist.Items)).To(BeNumerically(">", 1)) Expect(len(nodelist.Items)).To(BeNumerically(">", 1))
unfilledNodeName = nodelist.Items[0].Name unfilledNodeName = nodelist.Items[0].Name
@ -86,8 +84,7 @@ var _ = Describe("NodeOutOfDisk", func() {
AfterEach(func() { AfterEach(func() {
defer framework.afterEach() defer framework.afterEach()
nodelist, err := listNodes(c, labels.Everything(), fields.Everything()) nodelist := ListSchedulableNodesOrDie(c)
expectNoError(err, "Error retrieving nodes")
Expect(len(nodelist.Items)).ToNot(BeZero()) Expect(len(nodelist.Items)).ToNot(BeZero())
for _, node := range nodelist.Items { for _, node := range nodelist.Items {
if unfilledNodeName == node.Name || recoveredNodeName == node.Name { if unfilledNodeName == node.Name || recoveredNodeName == node.Name {
@ -150,8 +147,7 @@ var _ = Describe("NodeOutOfDisk", func() {
} }
}) })
nodelist, err := listNodes(c, labels.Everything(), fields.Everything()) nodelist := ListSchedulableNodesOrDie(c)
expectNoError(err, "Error retrieving nodes")
Expect(len(nodelist.Items)).To(BeNumerically(">", 1)) Expect(len(nodelist.Items)).To(BeNumerically(">", 1))
nodeToRecover := nodelist.Items[1] nodeToRecover := nodelist.Items[1]

View File

@ -53,9 +53,7 @@ var _ = Describe("Pod Disks", func() {
SkipUnlessNodeCountIsAtLeast(2) SkipUnlessNodeCountIsAtLeast(2)
podClient = framework.Client.Pods(framework.Namespace.Name) podClient = framework.Client.Pods(framework.Namespace.Name)
nodes := ListSchedulableNodesOrDie(framework.Client)
nodes, err := framework.Client.Nodes().List(api.ListOptions{})
expectNoError(err, "Failed to list nodes for e2e cluster.")
Expect(len(nodes.Items)).To(BeNumerically(">=", 2), "Requires at least 2 nodes") Expect(len(nodes.Items)).To(BeNumerically(">=", 2), "Requires at least 2 nodes")

View File

@ -250,10 +250,8 @@ func truncate(b []byte, maxLen int) []byte {
} }
func pickNode(c *client.Client) (string, error) { func pickNode(c *client.Client) (string, error) {
nodes, err := c.Nodes().List(api.ListOptions{}) // TODO: investigate why it doesn't work on master Node.
if err != nil { nodes := ListSchedulableNodesOrDie(c)
return "", err
}
if len(nodes.Items) == 0 { if len(nodes.Items) == 0 {
return "", fmt.Errorf("no nodes exist, can't test node proxy") return "", fmt.Errorf("no nodes exist, can't test node proxy")
} }

View File

@ -115,10 +115,7 @@ var _ = Describe("Reboot", func() {
func testReboot(c *client.Client, rebootCmd string) { func testReboot(c *client.Client, rebootCmd string) {
// Get all nodes, and kick off the test on each. // Get all nodes, and kick off the test on each.
nodelist, err := listNodes(c, labels.Everything(), fields.Everything()) nodelist := ListSchedulableNodesOrDie(c)
if err != nil {
Failf("Error getting nodes: %v", err)
}
result := make([]bool, len(nodelist.Items)) result := make([]bool, len(nodelist.Items))
wg := sync.WaitGroup{} wg := sync.WaitGroup{}
wg.Add(len(nodelist.Items)) wg.Add(len(nodelist.Items))

View File

@ -161,11 +161,11 @@ func checkNodesReady(c *client.Client, nt time.Duration, expect int) ([]string,
var errLast error var errLast error
start := time.Now() start := time.Now()
found := wait.Poll(poll, nt, func() (bool, error) { found := wait.Poll(poll, nt, func() (bool, error) {
// Even though listNodes(...) has its own retries, a rolling-update // A rolling-update (GCE/GKE implementation of restart) can complete before the apiserver
// (GCE/GKE implementation of restart) can complete before the apiserver
// knows about all of the nodes. Thus, we retry the list nodes call // knows about all of the nodes. Thus, we retry the list nodes call
// until we get the expected number of nodes. // until we get the expected number of nodes.
nodeList, errLast = listNodes(c, labels.Everything(), fields.Everything()) nodeList, errLast = c.Nodes().List(api.ListOptions{
FieldSelector: fields.Set{"spec.unschedulable": "false"}.AsSelector()})
if errLast != nil { if errLast != nil {
return false, nil return false, nil
} }

View File

@ -194,9 +194,7 @@ var _ = Describe("SchedulerPredicates", func() {
BeforeEach(func() { BeforeEach(func() {
c = framework.Client c = framework.Client
ns = framework.Namespace.Name ns = framework.Namespace.Name
var err error nodeList = ListSchedulableNodesOrDie(c)
nodeList, err = c.Nodes().List(api.ListOptions{})
expectNoError(err)
}) })
// This test verifies that max-pods flag works as advertised. It assumes that cluster add-on pods stay stable // This test verifies that max-pods flag works as advertised. It assumes that cluster add-on pods stay stable

View File

@ -1102,10 +1102,7 @@ func collectAddresses(nodes *api.NodeList, addressType api.NodeAddressType) []st
} }
func getNodePublicIps(c *client.Client) ([]string, error) { func getNodePublicIps(c *client.Client) ([]string, error) {
nodes, err := c.Nodes().List(api.ListOptions{}) nodes := ListSchedulableNodesOrDie(c)
if err != nil {
return nil, err
}
ips := collectAddresses(nodes, api.NodeExternalIP) ips := collectAddresses(nodes, api.NodeExternalIP)
if len(ips) == 0 { if len(ips) == 0 {

View File

@ -1661,6 +1661,7 @@ func dumpAllPodInfo(c *client.Client) {
} }
func dumpAllNodeInfo(c *client.Client) { func dumpAllNodeInfo(c *client.Client) {
// It should be OK to list unschedulable Nodes here.
nodes, err := c.Nodes().List(api.ListOptions{}) nodes, err := c.Nodes().List(api.ListOptions{})
if err != nil { if err != nil {
Logf("unable to fetch node list: %v", err) Logf("unable to fetch node list: %v", err)
@ -1724,6 +1725,21 @@ func getNodeEvents(c *client.Client, nodeName string) []api.Event {
return events.Items return events.Items
} }
// Convenient wrapper around listing nodes supporting retries.
func ListSchedulableNodesOrDie(c *client.Client) *api.NodeList {
var nodes *api.NodeList
var err error
if wait.PollImmediate(poll, singleCallTimeout, func() (bool, error) {
nodes, err = c.Nodes().List(api.ListOptions{FieldSelector: fields.Set{
"spec.unschedulable": "false",
}.AsSelector()})
return err == nil, nil
}) != nil {
expectNoError(err, "Timed out while listing nodes for e2e cluster.")
}
return nodes
}
func ScaleRC(c *client.Client, ns, name string, size uint, wait bool) error { func ScaleRC(c *client.Client, ns, name string, size uint, wait bool) error {
By(fmt.Sprintf("Scaling replication controller %s in namespace %s to %d", name, ns, size)) By(fmt.Sprintf("Scaling replication controller %s in namespace %s to %d", name, ns, size))
scaler, err := kubectl.ScalerFor(api.Kind("ReplicationController"), c) scaler, err := kubectl.ScalerFor(api.Kind("ReplicationController"), c)
@ -1910,23 +1926,6 @@ func waitForEvents(c *client.Client, ns string, objOrRef runtime.Object, desired
}) })
} }
// Convenient wrapper around listing nodes supporting retries.
func listNodes(c *client.Client, label labels.Selector, field fields.Selector) (*api.NodeList, error) {
var nodes *api.NodeList
var errLast error
if wait.PollImmediate(poll, singleCallTimeout, func() (bool, error) {
options := api.ListOptions{
LabelSelector: label,
FieldSelector: field,
}
nodes, errLast = c.Nodes().List(options)
return errLast == nil, nil
}) != nil {
return nil, fmt.Errorf("listNodes() failed with last error: %v", errLast)
}
return nodes, nil
}
// FailedContainers inspects all containers in a pod and returns failure // FailedContainers inspects all containers in a pod and returns failure
// information for containers that have failed or been restarted. // information for containers that have failed or been restarted.
// A map is returned where the key is the containerID and the value is a // A map is returned where the key is the containerID and the value is a
@ -2008,6 +2007,7 @@ func NodeAddresses(nodelist *api.NodeList, addrType api.NodeAddressType) []strin
// if it can't find an external IP for every node, though it still returns all // if it can't find an external IP for every node, though it still returns all
// hosts that it found in that case. // hosts that it found in that case.
func NodeSSHHosts(c *client.Client) ([]string, error) { func NodeSSHHosts(c *client.Client) ([]string, error) {
// It should be OK to list unschedulable Nodes here.
nodelist, err := c.Nodes().List(api.ListOptions{}) nodelist, err := c.Nodes().List(api.ListOptions{})
if err != nil { if err != nil {
return nil, fmt.Errorf("error getting nodes: %v", err) return nil, fmt.Errorf("error getting nodes: %v", err)
@ -2256,6 +2256,7 @@ func allNodesReady(c *client.Client, timeout time.Duration) error {
var notReady []api.Node var notReady []api.Node
err := wait.PollImmediate(poll, timeout, func() (bool, error) { err := wait.PollImmediate(poll, timeout, func() (bool, error) {
notReady = nil notReady = nil
// It should be OK to list unschedulable Nodes here.
nodes, err := c.Nodes().List(api.ListOptions{}) nodes, err := c.Nodes().List(api.ListOptions{})
if err != nil { if err != nil {
return false, err return false, err
@ -2373,9 +2374,12 @@ func waitForApiserverUp(c *client.Client) error {
} }
// waitForClusterSize waits until the cluster has desired size and there is no not-ready nodes in it. // waitForClusterSize waits until the cluster has desired size and there is no not-ready nodes in it.
// By cluster size we mean number of Nodes excluding Master Node.
func waitForClusterSize(c *client.Client, size int, timeout time.Duration) error { func waitForClusterSize(c *client.Client, size int, timeout time.Duration) error {
for start := time.Now(); time.Since(start) < timeout; time.Sleep(20 * time.Second) { for start := time.Now(); time.Since(start) < timeout; time.Sleep(20 * time.Second) {
nodes, err := c.Nodes().List(api.ListOptions{}) nodes, err := c.Nodes().List(api.ListOptions{FieldSelector: fields.Set{
"spec.unschedulable": "false",
}.AsSelector()})
if err != nil { if err != nil {
Logf("Failed to list nodes: %v", err) Logf("Failed to list nodes: %v", err)
continue continue
@ -2569,6 +2573,7 @@ func getNodePortURL(client *client.Client, ns, name string, svcPort int) (string
if err != nil { if err != nil {
return "", err return "", err
} }
// It should be OK to list unschedulable Node here.
nodes, err := client.Nodes().List(api.ListOptions{}) nodes, err := client.Nodes().List(api.ListOptions{})
if err != nil { if err != nil {
return "", err return "", err