Merge pull request #18509 from gmarek/fix-e2e-pd

Auto commit by PR queue bot
This commit is contained in:
k8s-merge-robot 2015-12-17 02:06:23 -08:00
commit 4a9922c900
26 changed files with 67 additions and 101 deletions

View File

@ -47,6 +47,7 @@ var _ = Describe("Cadvisor", func() {
})
func CheckCadvisorHealthOnAllNodes(c *client.Client, timeout time.Duration) {
// It should be OK to list unschedulable Nodes here.
By("getting list of nodes")
nodeList, err := c.Nodes().List(api.ListOptions{})
expectNoError(err)

View File

@ -41,8 +41,7 @@ var _ = Describe("[Autoscaling] [Skipped]", func() {
BeforeEach(func() {
SkipUnlessProviderIs("gce")
nodes, err := f.Client.Nodes().List(api.ListOptions{})
expectNoError(err)
nodes := ListSchedulableNodesOrDie(f.Client)
nodeCount = len(nodes.Items)
Expect(nodeCount).NotTo(BeZero())
cpu := nodes.Items[0].Status.Capacity[api.ResourceCPU]

View File

@ -30,8 +30,6 @@ import (
"k8s.io/kubernetes/pkg/api"
client "k8s.io/kubernetes/pkg/client/unversioned"
"k8s.io/kubernetes/pkg/fields"
"k8s.io/kubernetes/pkg/labels"
"k8s.io/kubernetes/pkg/util"
"k8s.io/kubernetes/pkg/util/wait"
@ -354,10 +352,7 @@ func testNodeUpgrade(f *Framework, nUp func(f *Framework, n int, v string) error
}
func checkNodesVersions(c *client.Client, want string) error {
l, err := listNodes(c, labels.Everything(), fields.Everything())
if err != nil {
return fmt.Errorf("checkNodesVersions() failed to list nodes: %v", err)
}
l := ListSchedulableNodesOrDie(c)
for _, n := range l.Items {
// We do prefix trimming and then matching because:
// want looks like: 0.19.3-815-g50e67d4

View File

@ -160,8 +160,7 @@ var _ = Describe("Daemon set", func() {
Expect(err).NotTo(HaveOccurred(), "error waiting for daemon pods to be running on no nodes")
By("Change label of node, check that daemon pod is launched.")
nodeClient := c.Nodes()
nodeList, err := nodeClient.List(api.ListOptions{})
nodeList := ListSchedulableNodesOrDie(f.Client)
Expect(len(nodeList.Items)).To(BeNumerically(">", 0))
newNode, err := setDaemonSetNodeLabels(c, nodeList.Items[0].Name, nodeSelector)
Expect(err).NotTo(HaveOccurred(), "error setting labels on node")
@ -196,11 +195,7 @@ func separateDaemonSetNodeLabels(labels map[string]string) (map[string]string, m
}
func clearDaemonSetNodeLabels(c *client.Client) error {
nodeClient := c.Nodes()
nodeList, err := nodeClient.List(api.ListOptions{})
if err != nil {
return err
}
nodeList := ListSchedulableNodesOrDie(c)
for _, node := range nodeList.Items {
_, err := setDaemonSetNodeLabels(c, node.Name, map[string]string{})
if err != nil {
@ -282,10 +277,7 @@ func checkDaemonPodOnNodes(f *Framework, selector map[string]string, nodeNames [
func checkRunningOnAllNodes(f *Framework, selector map[string]string) func() (bool, error) {
return func() (bool, error) {
nodeList, err := f.Client.Nodes().List(api.ListOptions{})
if err != nil {
return false, nil
}
nodeList := ListSchedulableNodesOrDie(f.Client)
nodeNames := make([]string, 0)
for _, node := range nodeList.Items {
nodeNames = append(nodeNames, node.Name)

View File

@ -158,8 +158,7 @@ var _ = Describe("Density [Skipped]", func() {
ns = framework.Namespace.Name
var err error
nodes, err := c.Nodes().List(api.ListOptions{})
expectNoError(err)
nodes := ListSchedulableNodesOrDie(c)
nodeCount = len(nodes.Items)
Expect(nodeCount).NotTo(BeZero())

View File

@ -192,10 +192,7 @@ func ClusterLevelLoggingWithElasticsearch(f *Framework) {
}
// Obtain a list of nodes so we can place one synthetic logger on each node.
nodes, err := f.Client.Nodes().List(api.ListOptions{})
if err != nil {
Failf("Failed to list nodes: %v", err)
}
nodes := ListSchedulableNodesOrDie(f.Client)
nodeCount := len(nodes.Items)
if nodeCount == 0 {
Failf("Failed to find any nodes")

View File

@ -18,10 +18,6 @@ package e2e
import (
"fmt"
. "github.com/onsi/ginkgo"
. "github.com/onsi/gomega"
"k8s.io/kubernetes/pkg/api"
client "k8s.io/kubernetes/pkg/client/unversioned"
"log"
"os"
"os/exec"
@ -29,6 +25,11 @@ import (
"strconv"
"syscall"
"time"
client "k8s.io/kubernetes/pkg/client/unversioned"
. "github.com/onsi/ginkgo"
. "github.com/onsi/gomega"
)
const (
@ -151,17 +152,16 @@ T:
var _ = Describe("[Example] Pet Store [Skipped]", func() {
// The number of minions dictates total number of generators/transaction expectations.
var minionCount int
// The number of nodes dictates total number of generators/transaction expectations.
var nodeCount int
f := NewFramework("petstore")
It(fmt.Sprintf("should scale to persist a nominal number ( %v ) of transactions in %v seconds", k8bpsSmokeTestTransactions, k8bpsSmokeTestTimeout), func() {
minions, err := f.Client.Nodes().List(api.ListOptions{})
Expect(err).NotTo(HaveOccurred())
minionCount = len(minions.Items)
nodes := ListSchedulableNodesOrDie(f.Client)
nodeCount = len(nodes.Items)
loadGenerators := minionCount
restServers := minionCount
loadGenerators := nodeCount
restServers := nodeCount
fmt.Printf("load generators / rest servers [ %v / %v ] ", loadGenerators, restServers)
runK8petstore(restServers, loadGenerators, f.Client, f.Namespace.Name, k8bpsSmokeTestTransactions, k8bpsSmokeTestTimeout)
})

View File

@ -529,6 +529,7 @@ var _ = Describe("Kubectl client", func() {
checkOutput(output, requiredStrings)
// Node
// It should be OK to list unschedulable Nodes here.
nodes, err := c.Nodes().List(api.ListOptions{})
Expect(err).NotTo(HaveOccurred())
node := nodes.Items[0]

View File

@ -21,7 +21,6 @@ import (
"strings"
"time"
"k8s.io/kubernetes/pkg/api"
client "k8s.io/kubernetes/pkg/client/unversioned"
"k8s.io/kubernetes/pkg/util"
"k8s.io/kubernetes/pkg/util/sets"
@ -94,8 +93,7 @@ var _ = Describe("kubelet", func() {
var resourceMonitor *resourceMonitor
BeforeEach(func() {
nodes, err := framework.Client.Nodes().List(api.ListOptions{})
expectNoError(err)
nodes := ListSchedulableNodesOrDie(framework.Client)
numNodes = len(nodes.Items)
nodeNames = sets.NewString()
for _, node := range nodes.Items {

View File

@ -144,6 +144,7 @@ var _ = Describe("Kubelet", func() {
var rm *resourceMonitor
BeforeEach(func() {
// It should be OK to list unschedulable Nodes here.
nodes, err := framework.Client.Nodes().List(api.ListOptions{})
expectNoError(err)
nodeNames = sets.NewString()

View File

@ -690,6 +690,7 @@ func newResourceMonitor(c *client.Client, containerNames []string, pollingInterv
}
func (r *resourceMonitor) Start() {
// It should be OK to monitor unschedulable Nodes
nodes, err := r.client.Nodes().List(api.ListOptions{})
if err != nil {
Failf("resourceMonitor: unable to get list of nodes: %v", err)

View File

@ -429,8 +429,7 @@ func (config *KubeProxyTestConfig) setup() {
}
By("Getting node addresses")
nodeList, err := config.f.Client.Nodes().List(api.ListOptions{})
Expect(err).NotTo(HaveOccurred(), fmt.Sprintf("Failed to get node list: %v", err))
nodeList := ListSchedulableNodesOrDie(config.f.Client)
config.externalAddrs = NodeAddresses(nodeList, api.NodeExternalIP)
if len(config.externalAddrs) < 2 {
// fall back to legacy IPs
@ -468,8 +467,7 @@ func (config *KubeProxyTestConfig) cleanup() {
}
func (config *KubeProxyTestConfig) createNetProxyPods(podName string, selector map[string]string) []*api.Pod {
nodes, err := config.f.Client.Nodes().List(api.ListOptions{})
Expect(err).NotTo(HaveOccurred())
nodes := ListSchedulableNodesOrDie(config.f.Client)
// create pods, one for each node
createdPods := make([]*api.Pod, 0, len(nodes.Items))

View File

@ -67,10 +67,8 @@ var _ = Describe("Latency [Skipped]", func() {
BeforeEach(func() {
c = framework.Client
ns = framework.Namespace.Name
var err error
nodes, err := c.Nodes().List(api.ListOptions{})
expectNoError(err)
nodes := ListSchedulableNodesOrDie(framework.Client)
nodeCount = len(nodes.Items)
Expect(nodeCount).NotTo(BeZero())

View File

@ -72,15 +72,14 @@ var _ = Describe("Load capacity [Skipped]", func() {
BeforeEach(func() {
c = framework.Client
ns = framework.Namespace.Name
nodes, err := c.Nodes().List(api.ListOptions{})
expectNoError(err)
nodes := ListSchedulableNodesOrDie(c)
nodeCount = len(nodes.Items)
Expect(nodeCount).NotTo(BeZero())
// Terminating a namespace (deleting the remaining objects from it - which
// generally means events) can affect the current run. Thus we wait for all
// terminating namespace to be finally deleted before starting this test.
err = checkTestingNSDeletedExcept(c, ns)
err := checkTestingNSDeletedExcept(c, ns)
expectNoError(err)
expectNoError(resetMetrics(c))

View File

@ -64,8 +64,7 @@ var _ = Describe("Mesos", func() {
client := framework.Client
expectNoError(allNodesReady(client, util.ForeverTestTimeout), "all nodes ready")
nodelist, err := client.Nodes().List(api.ListOptions{})
expectNoError(err, "nodes fetched from apiserver")
nodelist := ListSchedulableNodesOrDie(framework.Client)
const ns = "static-pods"
numpods := len(nodelist.Items)

View File

@ -82,6 +82,7 @@ var _ = Describe("Resource usage of system containers", func() {
It("should not exceed expected amount.", func() {
By("Getting ResourceConsumption on all nodes")
// It should be OK to list unschedulable Nodes here.
nodeList, err := c.Nodes().List(api.ListOptions{})
expectNoError(err)

View File

@ -123,6 +123,7 @@ func expectedServicesExist(c *client.Client) error {
}
func getAllNodesInCluster(c *client.Client) ([]string, error) {
// It should be OK to list unschedulable Nodes here.
nodeList, err := c.Nodes().List(api.ListOptions{})
if err != nil {
return nil, err

View File

@ -136,10 +136,7 @@ var _ = Describe("Networking", func() {
By("Creating a webserver (pending) pod on each node")
nodes, err := f.Client.Nodes().List(api.ListOptions{})
if err != nil {
Failf("Failed to list nodes: %v", err)
}
nodes := ListSchedulableNodesOrDie(f.Client)
// previous tests may have cause failures of some nodes. Let's skip
// 'Not Ready' nodes, just in case (there is no need to fail the test).
filterNodes(nodes, func(node api.Node) bool {

View File

@ -26,7 +26,6 @@ import (
"k8s.io/kubernetes/pkg/api/resource"
client "k8s.io/kubernetes/pkg/client/unversioned"
"k8s.io/kubernetes/pkg/fields"
"k8s.io/kubernetes/pkg/labels"
"k8s.io/kubernetes/pkg/util/wait"
. "github.com/onsi/ginkgo"
@ -73,8 +72,7 @@ var _ = Describe("NodeOutOfDisk", func() {
framework.beforeEach()
c = framework.Client
nodelist, err := listNodes(c, labels.Everything(), fields.Everything())
expectNoError(err, "Error retrieving nodes")
nodelist := ListSchedulableNodesOrDie(c)
Expect(len(nodelist.Items)).To(BeNumerically(">", 1))
unfilledNodeName = nodelist.Items[0].Name
@ -86,8 +84,7 @@ var _ = Describe("NodeOutOfDisk", func() {
AfterEach(func() {
defer framework.afterEach()
nodelist, err := listNodes(c, labels.Everything(), fields.Everything())
expectNoError(err, "Error retrieving nodes")
nodelist := ListSchedulableNodesOrDie(c)
Expect(len(nodelist.Items)).ToNot(BeZero())
for _, node := range nodelist.Items {
if unfilledNodeName == node.Name || recoveredNodeName == node.Name {
@ -150,8 +147,7 @@ var _ = Describe("NodeOutOfDisk", func() {
}
})
nodelist, err := listNodes(c, labels.Everything(), fields.Everything())
expectNoError(err, "Error retrieving nodes")
nodelist := ListSchedulableNodesOrDie(c)
Expect(len(nodelist.Items)).To(BeNumerically(">", 1))
nodeToRecover := nodelist.Items[1]

View File

@ -53,9 +53,7 @@ var _ = Describe("Pod Disks", func() {
SkipUnlessNodeCountIsAtLeast(2)
podClient = framework.Client.Pods(framework.Namespace.Name)
nodes, err := framework.Client.Nodes().List(api.ListOptions{})
expectNoError(err, "Failed to list nodes for e2e cluster.")
nodes := ListSchedulableNodesOrDie(framework.Client)
Expect(len(nodes.Items)).To(BeNumerically(">=", 2), "Requires at least 2 nodes")

View File

@ -250,10 +250,8 @@ func truncate(b []byte, maxLen int) []byte {
}
func pickNode(c *client.Client) (string, error) {
nodes, err := c.Nodes().List(api.ListOptions{})
if err != nil {
return "", err
}
// TODO: investigate why it doesn't work on master Node.
nodes := ListSchedulableNodesOrDie(c)
if len(nodes.Items) == 0 {
return "", fmt.Errorf("no nodes exist, can't test node proxy")
}

View File

@ -115,10 +115,7 @@ var _ = Describe("Reboot", func() {
func testReboot(c *client.Client, rebootCmd string) {
// Get all nodes, and kick off the test on each.
nodelist, err := listNodes(c, labels.Everything(), fields.Everything())
if err != nil {
Failf("Error getting nodes: %v", err)
}
nodelist := ListSchedulableNodesOrDie(c)
result := make([]bool, len(nodelist.Items))
wg := sync.WaitGroup{}
wg.Add(len(nodelist.Items))

View File

@ -161,11 +161,11 @@ func checkNodesReady(c *client.Client, nt time.Duration, expect int) ([]string,
var errLast error
start := time.Now()
found := wait.Poll(poll, nt, func() (bool, error) {
// Even though listNodes(...) has its own retries, a rolling-update
// (GCE/GKE implementation of restart) can complete before the apiserver
// A rolling-update (GCE/GKE implementation of restart) can complete before the apiserver
// knows about all of the nodes. Thus, we retry the list nodes call
// until we get the expected number of nodes.
nodeList, errLast = listNodes(c, labels.Everything(), fields.Everything())
nodeList, errLast = c.Nodes().List(api.ListOptions{
FieldSelector: fields.Set{"spec.unschedulable": "false"}.AsSelector()})
if errLast != nil {
return false, nil
}

View File

@ -194,9 +194,7 @@ var _ = Describe("SchedulerPredicates", func() {
BeforeEach(func() {
c = framework.Client
ns = framework.Namespace.Name
var err error
nodeList, err = c.Nodes().List(api.ListOptions{})
expectNoError(err)
nodeList = ListSchedulableNodesOrDie(c)
})
// This test verifies that max-pods flag works as advertised. It assumes that cluster add-on pods stay stable

View File

@ -1102,10 +1102,7 @@ func collectAddresses(nodes *api.NodeList, addressType api.NodeAddressType) []st
}
func getNodePublicIps(c *client.Client) ([]string, error) {
nodes, err := c.Nodes().List(api.ListOptions{})
if err != nil {
return nil, err
}
nodes := ListSchedulableNodesOrDie(c)
ips := collectAddresses(nodes, api.NodeExternalIP)
if len(ips) == 0 {

View File

@ -1661,6 +1661,7 @@ func dumpAllPodInfo(c *client.Client) {
}
func dumpAllNodeInfo(c *client.Client) {
// It should be OK to list unschedulable Nodes here.
nodes, err := c.Nodes().List(api.ListOptions{})
if err != nil {
Logf("unable to fetch node list: %v", err)
@ -1724,6 +1725,21 @@ func getNodeEvents(c *client.Client, nodeName string) []api.Event {
return events.Items
}
// Convenient wrapper around listing nodes supporting retries.
func ListSchedulableNodesOrDie(c *client.Client) *api.NodeList {
var nodes *api.NodeList
var err error
if wait.PollImmediate(poll, singleCallTimeout, func() (bool, error) {
nodes, err = c.Nodes().List(api.ListOptions{FieldSelector: fields.Set{
"spec.unschedulable": "false",
}.AsSelector()})
return err == nil, nil
}) != nil {
expectNoError(err, "Timed out while listing nodes for e2e cluster.")
}
return nodes
}
func ScaleRC(c *client.Client, ns, name string, size uint, wait bool) error {
By(fmt.Sprintf("Scaling replication controller %s in namespace %s to %d", name, ns, size))
scaler, err := kubectl.ScalerFor(api.Kind("ReplicationController"), c)
@ -1910,23 +1926,6 @@ func waitForEvents(c *client.Client, ns string, objOrRef runtime.Object, desired
})
}
// Convenient wrapper around listing nodes supporting retries.
func listNodes(c *client.Client, label labels.Selector, field fields.Selector) (*api.NodeList, error) {
var nodes *api.NodeList
var errLast error
if wait.PollImmediate(poll, singleCallTimeout, func() (bool, error) {
options := api.ListOptions{
LabelSelector: label,
FieldSelector: field,
}
nodes, errLast = c.Nodes().List(options)
return errLast == nil, nil
}) != nil {
return nil, fmt.Errorf("listNodes() failed with last error: %v", errLast)
}
return nodes, nil
}
// FailedContainers inspects all containers in a pod and returns failure
// information for containers that have failed or been restarted.
// A map is returned where the key is the containerID and the value is a
@ -2008,6 +2007,7 @@ func NodeAddresses(nodelist *api.NodeList, addrType api.NodeAddressType) []strin
// if it can't find an external IP for every node, though it still returns all
// hosts that it found in that case.
func NodeSSHHosts(c *client.Client) ([]string, error) {
// It should be OK to list unschedulable Nodes here.
nodelist, err := c.Nodes().List(api.ListOptions{})
if err != nil {
return nil, fmt.Errorf("error getting nodes: %v", err)
@ -2256,6 +2256,7 @@ func allNodesReady(c *client.Client, timeout time.Duration) error {
var notReady []api.Node
err := wait.PollImmediate(poll, timeout, func() (bool, error) {
notReady = nil
// It should be OK to list unschedulable Nodes here.
nodes, err := c.Nodes().List(api.ListOptions{})
if err != nil {
return false, err
@ -2373,9 +2374,12 @@ func waitForApiserverUp(c *client.Client) error {
}
// waitForClusterSize waits until the cluster has desired size and there is no not-ready nodes in it.
// By cluster size we mean number of Nodes excluding Master Node.
func waitForClusterSize(c *client.Client, size int, timeout time.Duration) error {
for start := time.Now(); time.Since(start) < timeout; time.Sleep(20 * time.Second) {
nodes, err := c.Nodes().List(api.ListOptions{})
nodes, err := c.Nodes().List(api.ListOptions{FieldSelector: fields.Set{
"spec.unschedulable": "false",
}.AsSelector()})
if err != nil {
Logf("Failed to list nodes: %v", err)
continue
@ -2569,6 +2573,7 @@ func getNodePortURL(client *client.Client, ns, name string, svcPort int) (string
if err != nil {
return "", err
}
// It should be OK to list unschedulable Node here.
nodes, err := client.Nodes().List(api.ListOptions{})
if err != nil {
return "", err