diff --git a/test/e2e/BUILD b/test/e2e/BUILD index 1e8b63d999d..1d43bcb7c33 100644 --- a/test/e2e/BUILD +++ b/test/e2e/BUILD @@ -66,6 +66,7 @@ go_library( "//test/e2e/framework:go_default_library", "//test/e2e/framework/auth:go_default_library", "//test/e2e/framework/log:go_default_library", + "//test/e2e/framework/node:go_default_library", "//test/e2e/framework/pod:go_default_library", "//test/e2e/framework/providers/aws:go_default_library", "//test/e2e/framework/providers/azure:go_default_library", diff --git a/test/e2e/apimachinery/BUILD b/test/e2e/apimachinery/BUILD index ff38893fa69..8524e30780f 100644 --- a/test/e2e/apimachinery/BUILD +++ b/test/e2e/apimachinery/BUILD @@ -84,6 +84,7 @@ go_library( "//test/e2e/framework:go_default_library", "//test/e2e/framework/deployment:go_default_library", "//test/e2e/framework/metrics:go_default_library", + "//test/e2e/framework/node:go_default_library", "//test/e2e/framework/pod:go_default_library", "//test/e2e/framework/ssh:go_default_library", "//test/utils:go_default_library", diff --git a/test/e2e/apimachinery/garbage_collector.go b/test/e2e/apimachinery/garbage_collector.go index 336256536eb..4fc340cc300 100644 --- a/test/e2e/apimachinery/garbage_collector.go +++ b/test/e2e/apimachinery/garbage_collector.go @@ -39,6 +39,7 @@ import ( clientset "k8s.io/client-go/kubernetes" "k8s.io/kubernetes/test/e2e/framework" e2emetrics "k8s.io/kubernetes/test/e2e/framework/metrics" + e2enode "k8s.io/kubernetes/test/e2e/framework/node" "github.com/onsi/ginkgo" imageutils "k8s.io/kubernetes/test/utils/image" @@ -48,8 +49,11 @@ import ( // with some wiggle room, to prevent pods being unable to schedule due // to max pod constraints. func estimateMaximumPods(c clientset.Interface, min, max int32) int32 { + nodes, err := e2enode.GetReadySchedulableNodes(c) + framework.ExpectNoError(err) + availablePods := int32(0) - for _, node := range framework.GetReadySchedulableNodesOrDie(c).Items { + for _, node := range nodes.Items { if q, ok := node.Status.Allocatable["pods"]; ok { if num, ok := q.AsInt64(); ok { availablePods += int32(num) diff --git a/test/e2e/apps/daemon_set.go b/test/e2e/apps/daemon_set.go index 508d93dbb79..ef00c1c6ed3 100644 --- a/test/e2e/apps/daemon_set.go +++ b/test/e2e/apps/daemon_set.go @@ -37,6 +37,7 @@ import ( "k8s.io/kubernetes/pkg/controller/daemon" schedulernodeinfo "k8s.io/kubernetes/pkg/scheduler/nodeinfo" "k8s.io/kubernetes/test/e2e/framework" + e2enode "k8s.io/kubernetes/test/e2e/framework/node" "github.com/onsi/ginkgo" "github.com/onsi/gomega" @@ -381,13 +382,14 @@ var _ = SIGDescribe("Daemon set [Serial]", func() { rollback of updates to a DaemonSet. */ framework.ConformanceIt("should rollback without unnecessary restarts", func() { - schedulableNodes := framework.GetReadySchedulableNodesOrDie(c) + schedulableNodes, err := e2enode.GetReadySchedulableNodes(c) + framework.ExpectNoError(err) gomega.Expect(len(schedulableNodes.Items)).To(gomega.BeNumerically(">", 1), "Conformance test suite needs a cluster with at least 2 nodes.") framework.Logf("Create a RollingUpdate DaemonSet") label := map[string]string{daemonsetNameLabel: dsName} ds := newDaemonSet(dsName, image, label) ds.Spec.UpdateStrategy = appsv1.DaemonSetUpdateStrategy{Type: appsv1.RollingUpdateDaemonSetStrategyType} - ds, err := c.AppsV1().DaemonSets(ns).Create(ds) + ds, err = c.AppsV1().DaemonSets(ns).Create(ds) framework.ExpectNoError(err) framework.Logf("Check that daemon pods launch on every node of the cluster") @@ -420,7 +422,8 @@ var _ = SIGDescribe("Daemon set [Serial]", func() { framework.Failf("unexpected pod found, image = %s", image) } } - schedulableNodes = framework.GetReadySchedulableNodesOrDie(c) + schedulableNodes, err = e2enode.GetReadySchedulableNodes(c) + framework.ExpectNoError(err) if len(schedulableNodes.Items) < 2 { framework.ExpectEqual(len(existingPods), 0) } else { @@ -505,7 +508,10 @@ func separateDaemonSetNodeLabels(labels map[string]string) (map[string]string, m } func clearDaemonSetNodeLabels(c clientset.Interface) error { - nodeList := framework.GetReadySchedulableNodesOrDie(c) + nodeList, err := e2enode.GetReadySchedulableNodes(c) + if err != nil { + return err + } for _, node := range nodeList.Items { _, err := setDaemonSetNodeLabels(c, node.Name, map[string]string{}) if err != nil { diff --git a/test/e2e/apps/network_partition.go b/test/e2e/apps/network_partition.go index 16a07b3efdb..6040f3d67b3 100644 --- a/test/e2e/apps/network_partition.go +++ b/test/e2e/apps/network_partition.go @@ -492,7 +492,8 @@ var _ = SIGDescribe("Network Partition [Disruptive] [Slow]", func() { framework.SkipUnlessSSHKeyPresent() ginkgo.By("choose a node - we will block all network traffic on this node") var podOpts metav1.ListOptions - nodes := framework.GetReadySchedulableNodesOrDie(c) + nodes, err := e2enode.GetReadySchedulableNodes(c) + framework.ExpectNoError(err) e2enode.Filter(nodes, func(node v1.Node) bool { if !e2enode.IsConditionSetAsExpected(&node, v1.NodeReady, true) { return false diff --git a/test/e2e/autoscaling/autoscaling_timer.go b/test/e2e/autoscaling/autoscaling_timer.go index f6c47d62dd2..10c45c8e4fb 100644 --- a/test/e2e/autoscaling/autoscaling_timer.go +++ b/test/e2e/autoscaling/autoscaling_timer.go @@ -62,7 +62,8 @@ var _ = SIGDescribe("[Feature:ClusterSizeAutoscalingScaleUp] [Slow] Autoscaling" } // Make sure all nodes are schedulable, otherwise we are in some kind of a problem state. - nodes = framework.GetReadySchedulableNodesOrDie(f.ClientSet) + nodes, err = e2enode.GetReadySchedulableNodes(f.ClientSet) + framework.ExpectNoError(err) schedulableCount := len(nodes.Items) framework.ExpectEqual(schedulableCount, nodeGroupSize, "not all nodes are schedulable") }) diff --git a/test/e2e/autoscaling/cluster_autoscaler_scalability.go b/test/e2e/autoscaling/cluster_autoscaler_scalability.go index a3200e5e2c5..4963fdce078 100644 --- a/test/e2e/autoscaling/cluster_autoscaler_scalability.go +++ b/test/e2e/autoscaling/cluster_autoscaler_scalability.go @@ -36,7 +36,6 @@ import ( imageutils "k8s.io/kubernetes/test/utils/image" "github.com/onsi/ginkgo" - "github.com/onsi/gomega" ) const ( @@ -90,9 +89,9 @@ var _ = framework.KubeDescribe("Cluster size autoscaler scalability [Slow]", fun framework.ExpectNoError(e2enode.WaitForReadyNodes(c, sum, scaleUpTimeout)) - nodes := framework.GetReadySchedulableNodesOrDie(f.ClientSet) + nodes, err := e2enode.GetReadySchedulableNodes(f.ClientSet) + framework.ExpectNoError(err) nodeCount = len(nodes.Items) - gomega.Expect(nodeCount).NotTo(gomega.BeZero()) cpu := nodes.Items[0].Status.Capacity[v1.ResourceCPU] mem := nodes.Items[0].Status.Capacity[v1.ResourceMemory] coresPerNode = int((&cpu).MilliValue() / 1000) @@ -324,7 +323,8 @@ var _ = framework.KubeDescribe("Cluster size autoscaler scalability [Slow]", fun time.Sleep(scaleDownTimeout) ginkgo.By("Checking if the number of nodes is as expected") - nodes := framework.GetReadySchedulableNodesOrDie(f.ClientSet) + nodes, err := e2enode.GetReadySchedulableNodes(f.ClientSet) + framework.ExpectNoError(err) klog.Infof("Nodes: %v, expected: %v", len(nodes.Items), totalNodes) framework.ExpectEqual(len(nodes.Items), totalNodes) }) diff --git a/test/e2e/autoscaling/cluster_size_autoscaling.go b/test/e2e/autoscaling/cluster_size_autoscaling.go index 00a46bb8bfc..73f7885c232 100644 --- a/test/e2e/autoscaling/cluster_size_autoscaling.go +++ b/test/e2e/autoscaling/cluster_size_autoscaling.go @@ -111,7 +111,8 @@ var _ = SIGDescribe("Cluster size autoscaling [Slow]", func() { // Give instances time to spin up framework.ExpectNoError(e2enode.WaitForReadyNodes(c, sum, scaleUpTimeout)) - nodes := framework.GetReadySchedulableNodesOrDie(f.ClientSet) + nodes, err := e2enode.GetReadySchedulableNodes(f.ClientSet) + framework.ExpectNoError(err) nodeCount = len(nodes.Items) coreCount = 0 for _, node := range nodes.Items { @@ -363,7 +364,9 @@ var _ = SIGDescribe("Cluster size autoscaling [Slow]", func() { framework.ExpectEqual(status.timestamp.Add(freshStatusLimit).Before(time.Now()), false) framework.ExpectEqual(status.status, caNoScaleUpStatus) framework.ExpectEqual(status.ready, status.target) - framework.ExpectEqual(len(framework.GetReadySchedulableNodesOrDie(f.ClientSet).Items), status.target+unmanagedNodes) + nodes, err := e2enode.GetReadySchedulableNodes(f.ClientSet) + framework.ExpectNoError(err) + framework.ExpectEqual(len(nodes.Items), status.target+unmanagedNodes) }) ginkgo.It("should increase cluster size if pending pods are small and there is another node pool that is not autoscaled [Feature:ClusterSizeAutoscalingScaleUp]", func() { @@ -723,7 +726,8 @@ var _ = SIGDescribe("Cluster size autoscaling [Slow]", func() { runDrainTest(f, originalSizes, f.Namespace.Name, 1, 0, func(increasedSize int) { ginkgo.By("No nodes should be removed") time.Sleep(scaleDownTimeout) - nodes := framework.GetReadySchedulableNodesOrDie(f.ClientSet) + nodes, err := e2enode.GetReadySchedulableNodes(f.ClientSet) + framework.ExpectNoError(err) framework.ExpectEqual(len(nodes.Items), increasedSize) }) }) @@ -919,7 +923,8 @@ var _ = SIGDescribe("Cluster size autoscaling [Slow]", func() { ReserveMemory(f, "memory-reservation", 100, nodeCount*memAllocatableMb, false, defaultTimeout) defer framework.DeleteRCAndWaitForGC(f.ClientSet, f.Namespace.Name, "memory-reservation") time.Sleep(scaleUpTimeout) - currentNodes := framework.GetReadySchedulableNodesOrDie(f.ClientSet) + currentNodes, err := e2enode.GetReadySchedulableNodes(f.ClientSet) + framework.ExpectNoError(err) framework.Logf("Currently available nodes: %v, nodes available at the start of test: %v, disabled nodes: %v", len(currentNodes.Items), len(nodes.Items), nodesToBreakCount) framework.ExpectEqual(len(currentNodes.Items), len(nodes.Items)-nodesToBreakCount) status, err := getClusterwideStatus(c) @@ -1271,7 +1276,8 @@ func getPoolInitialSize(poolName string) int { func getPoolSize(f *framework.Framework, poolName string) int { size := 0 - nodeList := framework.GetReadySchedulableNodesOrDie(f.ClientSet) + nodeList, err := e2enode.GetReadySchedulableNodes(f.ClientSet) + framework.ExpectNoError(err) for _, node := range nodeList.Items { if node.Labels[gkeNodepoolNameKey] == poolName { size++ diff --git a/test/e2e/autoscaling/dns_autoscaling.go b/test/e2e/autoscaling/dns_autoscaling.go index 6162b080555..f58aafe6794 100644 --- a/test/e2e/autoscaling/dns_autoscaling.go +++ b/test/e2e/autoscaling/dns_autoscaling.go @@ -33,7 +33,6 @@ import ( e2epod "k8s.io/kubernetes/test/e2e/framework/pod" "github.com/onsi/ginkgo" - "github.com/onsi/gomega" ) // Constants used in dns-autoscaling test. @@ -57,11 +56,11 @@ var _ = SIGDescribe("DNS horizontal autoscaling", func() { framework.SkipUnlessProviderIs("gce", "gke") c = f.ClientSet - nodeCount := len(framework.GetReadySchedulableNodesOrDie(c).Items) - gomega.Expect(nodeCount).NotTo(gomega.BeZero()) + nodes, err := e2enode.GetReadySchedulableNodes(c) + framework.ExpectNoError(err) + nodeCount := len(nodes.Items) ginkgo.By("Collecting original replicas count and DNS scaling params") - var err error originDNSReplicasCount, err = getDNSReplicas(c) framework.ExpectNoError(err) @@ -236,12 +235,13 @@ func getExpectReplicasFuncLinear(c clientset.Interface, params *DNSParamsLinear) return func(c clientset.Interface) int { var replicasFromNodes float64 var replicasFromCores float64 - nodes := framework.GetReadySchedulableNodesOrDie(c).Items + nodes, err := e2enode.GetReadySchedulableNodes(c) + framework.ExpectNoError(err) if params.nodesPerReplica > 0 { - replicasFromNodes = math.Ceil(float64(len(nodes)) / params.nodesPerReplica) + replicasFromNodes = math.Ceil(float64(len(nodes.Items)) / params.nodesPerReplica) } if params.coresPerReplica > 0 { - replicasFromCores = math.Ceil(float64(getScheduableCores(nodes)) / params.coresPerReplica) + replicasFromCores = math.Ceil(float64(getScheduableCores(nodes.Items)) / params.coresPerReplica) } return int(math.Max(1.0, math.Max(replicasFromNodes, replicasFromCores))) } diff --git a/test/e2e/framework/lifecycle/BUILD b/test/e2e/framework/lifecycle/BUILD index b08d8a0d50b..72c4a4664fa 100644 --- a/test/e2e/framework/lifecycle/BUILD +++ b/test/e2e/framework/lifecycle/BUILD @@ -10,6 +10,7 @@ go_library( "//staging/src/k8s.io/apimachinery/pkg/version:go_default_library", "//staging/src/k8s.io/client-go/kubernetes:go_default_library", "//test/e2e/framework:go_default_library", + "//test/e2e/framework/node:go_default_library", ], ) diff --git a/test/e2e/framework/lifecycle/upgrade.go b/test/e2e/framework/lifecycle/upgrade.go index 776ac9d97d7..027bb6ab462 100644 --- a/test/e2e/framework/lifecycle/upgrade.go +++ b/test/e2e/framework/lifecycle/upgrade.go @@ -27,6 +27,7 @@ import ( "k8s.io/apimachinery/pkg/version" clientset "k8s.io/client-go/kubernetes" "k8s.io/kubernetes/test/e2e/framework" + e2enode "k8s.io/kubernetes/test/e2e/framework/node" ) // RealVersion turns a version constants into a version string deployable on @@ -86,7 +87,10 @@ func CheckMasterVersion(c clientset.Interface, want string) error { // CheckNodesVersions validates the nodes versions func CheckNodesVersions(cs clientset.Interface, want string) error { - l := framework.GetReadySchedulableNodesOrDie(cs) + l, err := e2enode.GetReadySchedulableNodes(cs) + if err != nil { + return err + } for _, n := range l.Items { // We do prefix trimming and then matching because: // want looks like: 0.19.3-815-g50e67d4 diff --git a/test/e2e/framework/networking_utils.go b/test/e2e/framework/networking_utils.go index 82d93289ac7..bc83f048bca 100644 --- a/test/e2e/framework/networking_utils.go +++ b/test/e2e/framework/networking_utils.go @@ -591,7 +591,8 @@ func (config *NetworkingTestConfig) setup(selector map[string]string) { ginkgo.By("Getting node addresses") ExpectNoError(WaitForAllNodesSchedulable(config.f.ClientSet, 10*time.Minute)) - nodeList := GetReadySchedulableNodesOrDie(config.f.ClientSet) + nodeList, err := e2enode.GetReadySchedulableNodes(config.f.ClientSet) + ExpectNoError(err) config.ExternalAddrs = e2enode.FirstAddress(nodeList, v1.NodeExternalIP) SkipUnlessNodeCountIsAtLeast(2) diff --git a/test/e2e/framework/node/resource.go b/test/e2e/framework/node/resource.go index adf1cbaf9e1..ef2b570f433 100644 --- a/test/e2e/framework/node/resource.go +++ b/test/e2e/framework/node/resource.go @@ -317,7 +317,7 @@ func PickIP(c clientset.Interface) (string, error) { // GetPublicIps returns a public IP list of nodes. func GetPublicIps(c clientset.Interface) ([]string, error) { - nodes, err := GetReadySchedulableNodesOrDie(c) + nodes, err := GetReadySchedulableNodes(c) if err != nil { return nil, fmt.Errorf("get schedulable and ready nodes error: %s", err) } @@ -329,22 +329,23 @@ func GetPublicIps(c clientset.Interface) ([]string, error) { return ips, nil } -// GetReadySchedulableNodesOrDie addresses the common use case of getting nodes you can do work on. +// GetReadySchedulableNodes addresses the common use case of getting nodes you can do work on. // 1) Needs to be schedulable. // 2) Needs to be ready. // If EITHER 1 or 2 is not true, most tests will want to ignore the node entirely. +// If there are no nodes that are both ready and schedulable, this will return an error. // TODO: remove references in framework/util.go. -// TODO: remove "OrDie" suffix. -func GetReadySchedulableNodesOrDie(c clientset.Interface) (nodes *v1.NodeList, err error) { +func GetReadySchedulableNodes(c clientset.Interface) (nodes *v1.NodeList, err error) { nodes, err = checkWaitListSchedulableNodes(c) if err != nil { return nil, fmt.Errorf("listing schedulable nodes error: %s", err) } - // previous tests may have cause failures of some nodes. Let's skip - // 'Not Ready' nodes, just in case (there is no need to fail the test). Filter(nodes, func(node v1.Node) bool { return IsNodeSchedulable(&node) && IsNodeUntainted(&node) }) + if len(nodes.Items) == 0 { + return nil, fmt.Errorf("there are currently no ready, schedulable nodes in the cluster") + } return nodes, nil } diff --git a/test/e2e/framework/nodes_util.go b/test/e2e/framework/nodes_util.go index a970864bf20..bb8b62ac875 100644 --- a/test/e2e/framework/nodes_util.go +++ b/test/e2e/framework/nodes_util.go @@ -338,7 +338,8 @@ func (k *NodeKiller) Run(stopCh <-chan struct{}) { } func (k *NodeKiller) pickNodes() []v1.Node { - nodes := GetReadySchedulableNodesOrDie(k.client) + nodes, err := e2enode.GetReadySchedulableNodes(k.client) + ExpectNoError(err) numNodes := int(k.config.FailureRatio * float64(len(nodes.Items))) shuffledNodes := shuffleNodes(nodes.Items) if len(shuffledNodes) > numNodes { diff --git a/test/e2e/framework/providers/gce/gce.go b/test/e2e/framework/providers/gce/gce.go index 3bb5a032bc6..df91f8a899c 100644 --- a/test/e2e/framework/providers/gce/gce.go +++ b/test/e2e/framework/providers/gce/gce.go @@ -353,16 +353,6 @@ func SetInstanceTags(cloudConfig framework.CloudConfig, instanceName, zone strin return resTags.Items } -// GetNodeTags gets k8s node tag from one of the nodes -func GetNodeTags(c clientset.Interface, cloudConfig framework.CloudConfig) []string { - nodes := framework.GetReadySchedulableNodesOrDie(c) - if len(nodes.Items) == 0 { - framework.Logf("GetNodeTags: Found 0 node.") - return []string{} - } - return GetInstanceTags(cloudConfig, nodes.Items[0].Name).Items -} - // IsGoogleAPIHTTPErrorCode returns true if the error is a google api // error matching the corresponding HTTP error code. func IsGoogleAPIHTTPErrorCode(err error, code int) bool { diff --git a/test/e2e/framework/service/resource.go b/test/e2e/framework/service/resource.go index 611ebd28160..ed52f6dbb1a 100644 --- a/test/e2e/framework/service/resource.go +++ b/test/e2e/framework/service/resource.go @@ -26,6 +26,7 @@ import ( clientset "k8s.io/client-go/kubernetes" restclient "k8s.io/client-go/rest" "k8s.io/kubernetes/test/e2e/framework" + e2enode "k8s.io/kubernetes/test/e2e/framework/node" ) // GetServicesProxyRequest returns a request for a service proxy. @@ -110,7 +111,9 @@ func DescribeSvc(ns string) { // GetServiceLoadBalancerCreationTimeout returns a timeout value for creating a load balancer of a service. func GetServiceLoadBalancerCreationTimeout(cs clientset.Interface) time.Duration { - if nodes := framework.GetReadySchedulableNodesOrDie(cs); len(nodes.Items) > LargeClusterMinNodesNumber { + nodes, err := e2enode.GetReadySchedulableNodes(cs) + framework.ExpectNoError(err) + if len(nodes.Items) > LargeClusterMinNodesNumber { return LoadBalancerCreateTimeoutLarge } return LoadBalancerCreateTimeoutDefault diff --git a/test/e2e/framework/suites.go b/test/e2e/framework/suites.go index e3ed593b435..b4630f3efde 100644 --- a/test/e2e/framework/suites.go +++ b/test/e2e/framework/suites.go @@ -29,6 +29,7 @@ import ( "k8s.io/component-base/version" e2elog "k8s.io/kubernetes/test/e2e/framework/log" e2emetrics "k8s.io/kubernetes/test/e2e/framework/metrics" + e2enode "k8s.io/kubernetes/test/e2e/framework/node" e2epod "k8s.io/kubernetes/test/e2e/framework/pod" ) @@ -80,7 +81,9 @@ func SetupSuite() { // If NumNodes is not specified then auto-detect how many are scheduleable and not tainted if TestContext.CloudConfig.NumNodes == DefaultNumNodes { - TestContext.CloudConfig.NumNodes = len(GetReadySchedulableNodesOrDie(c).Items) + nodes, err := e2enode.GetReadySchedulableNodes(c) + ExpectNoError(err) + TestContext.CloudConfig.NumNodes = len(nodes.Items) } // Ensure all pods are running and ready before starting tests (otherwise, diff --git a/test/e2e/framework/util.go b/test/e2e/framework/util.go index 7dbce9669b4..8d57486066b 100644 --- a/test/e2e/framework/util.go +++ b/test/e2e/framework/util.go @@ -2897,7 +2897,10 @@ func NewE2ETestNodePreparer(client clientset.Interface, countToStrategy []testut // PrepareNodes prepares nodes in the cluster. func (p *E2ETestNodePreparer) PrepareNodes() error { - nodes := GetReadySchedulableNodesOrDie(p.client) + nodes, err := e2enode.GetReadySchedulableNodes(p.client) + if err != nil { + return err + } numTemplates := 0 for _, v := range p.countToStrategy { numTemplates += v.Count @@ -2923,9 +2926,11 @@ func (p *E2ETestNodePreparer) PrepareNodes() error { // CleanupNodes cleanups nodes in the cluster. func (p *E2ETestNodePreparer) CleanupNodes() error { var encounteredError error - nodes := GetReadySchedulableNodesOrDie(p.client) + nodes, err := e2enode.GetReadySchedulableNodes(p.client) + if err != nil { + return err + } for i := range nodes.Items { - var err error name := nodes.Items[i].Name strategy, found := p.nodeToAppliedStrategy[name] if found { diff --git a/test/e2e/gke_node_pools.go b/test/e2e/gke_node_pools.go index 18ac2b1b8f6..02bbdfd8cfc 100644 --- a/test/e2e/gke_node_pools.go +++ b/test/e2e/gke_node_pools.go @@ -21,6 +21,7 @@ import ( "os/exec" "k8s.io/kubernetes/test/e2e/framework" + e2enode "k8s.io/kubernetes/test/e2e/framework/node" "github.com/onsi/ginkgo" ) @@ -98,7 +99,8 @@ func testCreateDeleteNodePool(f *framework.Framework, poolName string) { // label with the given node pool name. func nodesWithPoolLabel(f *framework.Framework, poolName string) int { nodeCount := 0 - nodeList := framework.GetReadySchedulableNodesOrDie(f.ClientSet) + nodeList, err := e2enode.GetReadySchedulableNodes(f.ClientSet) + framework.ExpectNoError(err) for _, node := range nodeList.Items { if poolLabel := node.Labels["cloud.google.com/gke-nodepool"]; poolLabel == poolName { nodeCount++ diff --git a/test/e2e/instrumentation/logging/BUILD b/test/e2e/instrumentation/logging/BUILD index c5976022ec1..bda535982b4 100644 --- a/test/e2e/instrumentation/logging/BUILD +++ b/test/e2e/instrumentation/logging/BUILD @@ -16,6 +16,7 @@ go_library( "//staging/src/k8s.io/api/core/v1:go_default_library", "//test/e2e/framework:go_default_library", "//test/e2e/framework/config:go_default_library", + "//test/e2e/framework/node:go_default_library", "//test/e2e/instrumentation/common:go_default_library", "//test/e2e/instrumentation/logging/elasticsearch:go_default_library", "//test/e2e/instrumentation/logging/stackdriver:go_default_library", diff --git a/test/e2e/instrumentation/logging/generic_soak.go b/test/e2e/instrumentation/logging/generic_soak.go index caa33b537ed..c97cc707e7e 100644 --- a/test/e2e/instrumentation/logging/generic_soak.go +++ b/test/e2e/instrumentation/logging/generic_soak.go @@ -27,6 +27,7 @@ import ( "k8s.io/api/core/v1" "k8s.io/kubernetes/test/e2e/framework" "k8s.io/kubernetes/test/e2e/framework/config" + e2enode "k8s.io/kubernetes/test/e2e/framework/node" instrumentation "k8s.io/kubernetes/test/e2e/instrumentation/common" imageutils "k8s.io/kubernetes/test/utils/image" ) @@ -76,7 +77,8 @@ var _ = instrumentation.SIGDescribe("Logging soak [Performance] [Slow] [Disrupti // was produced in each and every pod at least once. The final arg is the timeout for the test to verify all the pods got logs. func RunLogPodsWithSleepOf(f *framework.Framework, sleep time.Duration, podname string, timeout time.Duration) { - nodes := framework.GetReadySchedulableNodesOrDie(f.ClientSet) + nodes, err := e2enode.GetReadySchedulableNodes(f.ClientSet) + framework.ExpectNoError(err) totalPods := len(nodes.Items) framework.ExpectNotEqual(totalPods, 0) diff --git a/test/e2e/instrumentation/logging/stackdriver/BUILD b/test/e2e/instrumentation/logging/stackdriver/BUILD index effafb418da..b6076a80fe7 100644 --- a/test/e2e/instrumentation/logging/stackdriver/BUILD +++ b/test/e2e/instrumentation/logging/stackdriver/BUILD @@ -18,6 +18,7 @@ go_library( "//staging/src/k8s.io/apimachinery/pkg/util/uuid:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/util/wait:go_default_library", "//test/e2e/framework:go_default_library", + "//test/e2e/framework/node:go_default_library", "//test/e2e/instrumentation/common:go_default_library", "//test/e2e/instrumentation/logging/utils:go_default_library", "//vendor/github.com/onsi/ginkgo:go_default_library", diff --git a/test/e2e/instrumentation/logging/stackdriver/soak.go b/test/e2e/instrumentation/logging/stackdriver/soak.go index 14df92bdcc1..43bcd36c359 100644 --- a/test/e2e/instrumentation/logging/stackdriver/soak.go +++ b/test/e2e/instrumentation/logging/stackdriver/soak.go @@ -22,6 +22,7 @@ import ( "time" "k8s.io/kubernetes/test/e2e/framework" + e2enode "k8s.io/kubernetes/test/e2e/framework/node" instrumentation "k8s.io/kubernetes/test/e2e/instrumentation/common" "k8s.io/kubernetes/test/e2e/instrumentation/logging/utils" @@ -45,7 +46,8 @@ var _ = instrumentation.SIGDescribe("Cluster level logging implemented by Stackd ginkgo.It("should ingest logs from applications running for a prolonged amount of time", func() { withLogProviderForScope(f, podsScope, func(p *sdLogProvider) { - nodes := framework.GetReadySchedulableNodesOrDie(f.ClientSet).Items + nodes, err := e2enode.GetReadySchedulableNodes(f.ClientSet) + framework.ExpectNoError(err) maxPodCount := 10 jobDuration := 30 * time.Minute linesPerPodPerSecond := 100 @@ -68,7 +70,7 @@ var _ = instrumentation.SIGDescribe("Cluster level logging implemented by Stackd podsByRun := [][]utils.FiniteLoggingPod{} for runIdx := 0; runIdx < podRunCount; runIdx++ { podsInRun := []utils.FiniteLoggingPod{} - for nodeIdx, node := range nodes { + for nodeIdx, node := range nodes.Items { podName := fmt.Sprintf("job-logs-generator-%d-%d-%d-%d", maxPodCount, linesPerPod, runIdx, nodeIdx) pod := utils.NewLoadLoggingPod(podName, node.Name, linesPerPod, jobDuration) pods = append(pods, pod) @@ -93,7 +95,7 @@ var _ = instrumentation.SIGDescribe("Cluster level logging implemented by Stackd }() checker := utils.NewFullIngestionPodLogChecker(p, maxAllowedLostFraction, pods...) - err := utils.WaitForLogs(checker, ingestionInterval, ingestionTimeout) + err = utils.WaitForLogs(checker, ingestionInterval, ingestionTimeout) framework.ExpectNoError(err) utils.EnsureLoggingAgentRestartsCount(f, p.LoggingAgentName(), allowedRestarts) diff --git a/test/e2e/instrumentation/logging/utils/BUILD b/test/e2e/instrumentation/logging/utils/BUILD index 201f36accb3..e424e646223 100644 --- a/test/e2e/instrumentation/logging/utils/BUILD +++ b/test/e2e/instrumentation/logging/utils/BUILD @@ -25,6 +25,7 @@ go_library( "//staging/src/k8s.io/apimachinery/pkg/util/wait:go_default_library", "//staging/src/k8s.io/client-go/kubernetes:go_default_library", "//test/e2e/framework:go_default_library", + "//test/e2e/framework/node:go_default_library", "//test/e2e/framework/pod:go_default_library", "//test/utils/image:go_default_library", "//vendor/k8s.io/utils/integer:go_default_library", diff --git a/test/e2e/instrumentation/logging/utils/logging_agent.go b/test/e2e/instrumentation/logging/utils/logging_agent.go index 761a9cbc60c..77655c401b8 100644 --- a/test/e2e/instrumentation/logging/utils/logging_agent.go +++ b/test/e2e/instrumentation/logging/utils/logging_agent.go @@ -24,6 +24,7 @@ import ( "k8s.io/apimachinery/pkg/labels" api "k8s.io/kubernetes/pkg/apis/core" "k8s.io/kubernetes/test/e2e/framework" + e2enode "k8s.io/kubernetes/test/e2e/framework/node" "k8s.io/utils/integer" ) @@ -40,7 +41,10 @@ func EnsureLoggingAgentDeployment(f *framework.Framework, appName string) error agentPerNode[pod.Spec.NodeName]++ } - nodeList := framework.GetReadySchedulableNodesOrDie(f.ClientSet) + nodeList, err := e2enode.GetReadySchedulableNodes(f.ClientSet) + if err != nil { + return fmt.Errorf("failed to get nodes: %v", err) + } for _, node := range nodeList.Items { agentPodsCount, ok := agentPerNode[node.Name] diff --git a/test/e2e/instrumentation/logging/utils/misc.go b/test/e2e/instrumentation/logging/utils/misc.go index 76b44a048fd..474497aa2a8 100644 --- a/test/e2e/instrumentation/logging/utils/misc.go +++ b/test/e2e/instrumentation/logging/utils/misc.go @@ -19,11 +19,13 @@ package utils import ( clientset "k8s.io/client-go/kubernetes" "k8s.io/kubernetes/test/e2e/framework" + e2enode "k8s.io/kubernetes/test/e2e/framework/node" ) // GetNodeIds returns the list of node names and panics in case of failure. func GetNodeIds(cs clientset.Interface) []string { - nodes := framework.GetReadySchedulableNodesOrDie(cs) + nodes, err := e2enode.GetReadySchedulableNodes(cs) + framework.ExpectNoError(err) nodeIds := []string{} for _, n := range nodes.Items { nodeIds = append(nodeIds, n.Name) diff --git a/test/e2e/lifecycle/node_lease.go b/test/e2e/lifecycle/node_lease.go index 82c0ef6da86..bf9e3df7a1b 100644 --- a/test/e2e/lifecycle/node_lease.go +++ b/test/e2e/lifecycle/node_lease.go @@ -103,7 +103,8 @@ var _ = SIGDescribe("[Disruptive]NodeLease", func() { gomega.Expect(err).To(gomega.BeNil()) ginkgo.By("verify node lease exists for every nodes") - originalNodes := framework.GetReadySchedulableNodesOrDie(c) + originalNodes, err := e2enode.GetReadySchedulableNodes(c) + framework.ExpectNoError(err) framework.ExpectEqual(len(originalNodes.Items), framework.TestContext.CloudConfig.NumNodes) gomega.Eventually(func() error { @@ -128,7 +129,8 @@ var _ = SIGDescribe("[Disruptive]NodeLease", func() { gomega.Expect(err).To(gomega.BeNil()) err = e2enode.WaitForReadyNodes(c, framework.TestContext.CloudConfig.NumNodes-1, 10*time.Minute) gomega.Expect(err).To(gomega.BeNil()) - targetNodes := framework.GetReadySchedulableNodesOrDie(c) + targetNodes, err := e2enode.GetReadySchedulableNodes(c) + framework.ExpectNoError(err) framework.ExpectEqual(len(targetNodes.Items), int(targetNumNodes)) ginkgo.By("verify node lease is deleted for the deleted node") diff --git a/test/e2e/lifecycle/reboot.go b/test/e2e/lifecycle/reboot.go index d738bb8f479..1ef9bcd6fbe 100644 --- a/test/e2e/lifecycle/reboot.go +++ b/test/e2e/lifecycle/reboot.go @@ -134,7 +134,8 @@ var _ = SIGDescribe("Reboot [Disruptive] [Feature:Reboot]", func() { func testReboot(c clientset.Interface, rebootCmd string, hook terminationHook) { // Get all nodes, and kick off the test on each. - nodelist := framework.GetReadySchedulableNodesOrDie(c) + nodelist, err := e2enode.GetReadySchedulableNodes(c) + framework.ExpectNoError(err, "failed to list nodes") if hook != nil { defer func() { framework.Logf("Executing termination hook on nodes") diff --git a/test/e2e/network/dual_stack.go b/test/e2e/network/dual_stack.go index f5598dc83ef..718b08b9255 100644 --- a/test/e2e/network/dual_stack.go +++ b/test/e2e/network/dual_stack.go @@ -47,7 +47,8 @@ var _ = SIGDescribe("[Feature:IPv6DualStackAlphaFeature] [LinuxOnly]", func() { ginkgo.It("should have ipv4 and ipv6 internal node ip", func() { // TODO (aramase) can switch to new function to get all nodes - nodeList := framework.GetReadySchedulableNodesOrDie(cs) + nodeList, err := e2enode.GetReadySchedulableNodes(cs) + framework.ExpectNoError(err) for _, node := range nodeList.Items { // get all internal ips for node @@ -61,7 +62,8 @@ var _ = SIGDescribe("[Feature:IPv6DualStackAlphaFeature] [LinuxOnly]", func() { ginkgo.It("should have ipv4 and ipv6 node podCIDRs", func() { // TODO (aramase) can switch to new function to get all nodes - nodeList := framework.GetReadySchedulableNodesOrDie(cs) + nodeList, err := e2enode.GetReadySchedulableNodes(cs) + framework.ExpectNoError(err) for _, node := range nodeList.Items { framework.ExpectEqual(len(node.Spec.PodCIDRs), 2) @@ -121,12 +123,8 @@ var _ = SIGDescribe("[Feature:IPv6DualStackAlphaFeature] [LinuxOnly]", func() { // get all schedulable nodes to determine the number of replicas for pods // this is to ensure connectivity from all nodes on cluster - nodeList := framework.GetReadySchedulableNodesOrDie(cs) - gomega.Expect(nodeList).NotTo(gomega.BeNil()) - - if len(nodeList.Items) < 1 { - framework.Failf("Expect at least 1 node, got %v", len(nodeList.Items)) - } + nodeList, err := e2enode.GetReadySchedulableNodes(cs) + framework.ExpectNoError(err) replicas := int32(len(nodeList.Items)) diff --git a/test/e2e/network/firewall.go b/test/e2e/network/firewall.go index 7db789d3c3c..b25a2fddc2d 100644 --- a/test/e2e/network/firewall.go +++ b/test/e2e/network/firewall.go @@ -188,10 +188,8 @@ var _ = SIGDescribe("Firewall rule", func() { }) ginkgo.It("should have correct firewall rules for e2e cluster", func() { - nodes := framework.GetReadySchedulableNodesOrDie(cs) - if len(nodes.Items) <= 0 { - framework.Failf("Expect at least 1 node, got: %v", len(nodes.Items)) - } + nodes, err := e2enode.GetReadySchedulableNodes(cs) + framework.ExpectNoError(err) ginkgo.By("Checking if e2e firewall rules are correct") for _, expFw := range gce.GetE2eFirewalls(cloudConfig.MasterName, cloudConfig.MasterTag, cloudConfig.NodeTag, cloudConfig.Network, cloudConfig.ClusterIPRange) { diff --git a/test/e2e/network/networking_perf.go b/test/e2e/network/networking_perf.go index 203710eed78..aec9bfb3e97 100644 --- a/test/e2e/network/networking_perf.go +++ b/test/e2e/network/networking_perf.go @@ -19,12 +19,12 @@ package network // Tests network performance using iperf or other containers. import ( "fmt" - "math" "time" "github.com/onsi/ginkgo" v1 "k8s.io/api/core/v1" "k8s.io/kubernetes/test/e2e/framework" + e2enode "k8s.io/kubernetes/test/e2e/framework/node" imageutils "k8s.io/kubernetes/test/utils/image" ) @@ -53,13 +53,13 @@ func networkingIPerfTest(isIPv6 bool) { } ginkgo.It(fmt.Sprintf("should transfer ~ 1GB onto the service endpoint %v servers (maximum of %v clients)", numServer, numClient), func() { - nodes := framework.GetReadySchedulableNodesOrDie(f.ClientSet) + nodes, err := e2enode.GetReadySchedulableNodes(f.ClientSet) + framework.ExpectNoError(err) totalPods := len(nodes.Items) // for a single service, we expect to divide bandwidth between the network. Very crude estimate. expectedBandwidth := int(float64(maxBandwidthBits) / float64(totalPods)) - framework.ExpectNotEqual(totalPods, 0) appName := "iperf-e2e" - _, err := f.CreateServiceForSimpleAppWithPods( + _, err = f.CreateServiceForSimpleAppWithPods( 8001, 8002, appName, @@ -108,16 +108,14 @@ func networkingIPerfTest(isIPv6 bool) { }, numClient, ) + expectedCli := numClient + if len(nodes.Items) < expectedCli { + expectedCli = len(nodes.Items) + } framework.Logf("Reading all perf results to stdout.") framework.Logf("date,cli,cliPort,server,serverPort,id,interval,transferBits,bandwidthBits") - // Calculate expected number of clients based on total nodes. - expectedCli := func() int { - nodes := framework.GetReadySchedulableNodesOrDie(f.ClientSet) - return int(math.Min(float64(len(nodes.Items)), float64(numClient))) - }() - // Extra 1/10 second per client. iperfTimeout := smallClusterTimeout + (time.Duration(expectedCli/10) * time.Second) iperfResults := &IPerfResults{} diff --git a/test/e2e/network/service.go b/test/e2e/network/service.go index 1fd882c1760..66b6c06fe4e 100644 --- a/test/e2e/network/service.go +++ b/test/e2e/network/service.go @@ -566,7 +566,9 @@ var _ = SIGDescribe("Services", func() { loadBalancerLagTimeout = e2eservice.LoadBalancerLagTimeoutAWS } loadBalancerCreateTimeout := e2eservice.LoadBalancerCreateTimeoutDefault - if nodes := framework.GetReadySchedulableNodesOrDie(cs); len(nodes.Items) > e2eservice.LargeClusterMinNodesNumber { + nodes, err := e2enode.GetReadySchedulableNodes(cs) + framework.ExpectNoError(err) + if len(nodes.Items) > e2eservice.LargeClusterMinNodesNumber { loadBalancerCreateTimeout = e2eservice.LoadBalancerCreateTimeoutLarge } @@ -1522,7 +1524,9 @@ var _ = SIGDescribe("Services", func() { loadBalancerLagTimeout = e2eservice.LoadBalancerLagTimeoutAWS } loadBalancerCreateTimeout := e2eservice.LoadBalancerCreateTimeoutDefault - if nodes := framework.GetReadySchedulableNodesOrDie(cs); len(nodes.Items) > e2eservice.LargeClusterMinNodesNumber { + nodes, err := e2enode.GetReadySchedulableNodes(cs) + framework.ExpectNoError(err) + if len(nodes.Items) > e2eservice.LargeClusterMinNodesNumber { loadBalancerCreateTimeout = e2eservice.LoadBalancerCreateTimeoutLarge } @@ -1540,7 +1544,6 @@ var _ = SIGDescribe("Services", func() { // This container is an nginx container listening on port 80 // See kubernetes/contrib/ingress/echoheaders/nginx.conf for content of response jig.RunOrFail(namespace, nil) - var err error // Make sure acceptPod is running. There are certain chances that pod might be teminated due to unexpected reasons. acceptPod, err = cs.CoreV1().Pods(namespace).Get(acceptPod.Name, metav1.GetOptions{}) framework.ExpectNoError(err, "Unable to get pod %s", acceptPod.Name) @@ -1598,7 +1601,9 @@ var _ = SIGDescribe("Services", func() { framework.SkipUnlessProviderIs("azure", "gke", "gce") createTimeout := e2eservice.LoadBalancerCreateTimeoutDefault - if nodes := framework.GetReadySchedulableNodesOrDie(cs); len(nodes.Items) > e2eservice.LargeClusterMinNodesNumber { + nodes, err := e2enode.GetReadySchedulableNodes(cs) + framework.ExpectNoError(err) + if len(nodes.Items) > e2eservice.LargeClusterMinNodesNumber { createTimeout = e2eservice.LoadBalancerCreateTimeoutLarge } @@ -2076,7 +2081,9 @@ var _ = SIGDescribe("ESIPP [Slow] [DisabledForLargeClusters]", func() { framework.SkipUnlessProviderIs("gce", "gke") cs = f.ClientSet - if nodes := framework.GetReadySchedulableNodesOrDie(cs); len(nodes.Items) > e2eservice.LargeClusterMinNodesNumber { + nodes, err := e2enode.GetReadySchedulableNodes(cs) + framework.ExpectNoError(err) + if len(nodes.Items) > e2eservice.LargeClusterMinNodesNumber { loadBalancerCreateTimeout = e2eservice.LoadBalancerCreateTimeoutLarge } }) @@ -2450,7 +2457,8 @@ func execAffinityTestForNonLBServiceWithOptionalTransition(f *framework.Framewor framework.ExpectNoError(err, "failed to fetch service: %s in namespace: %s", serviceName, ns) var svcIP string if serviceType == v1.ServiceTypeNodePort { - nodes := framework.GetReadySchedulableNodesOrDie(cs) + nodes, err := e2enode.GetReadySchedulableNodes(cs) + framework.ExpectNoError(err) addrs := e2enode.CollectAddresses(nodes, v1.NodeInternalIP) gomega.Expect(len(addrs)).To(gomega.BeNumerically(">", 0), "ginkgo.Failed to get Node internal IP") svcIP = addrs[0] diff --git a/test/e2e/node/kubelet_perf.go b/test/e2e/node/kubelet_perf.go index de7d238bbee..a0c9f69e080 100644 --- a/test/e2e/node/kubelet_perf.go +++ b/test/e2e/node/kubelet_perf.go @@ -27,6 +27,7 @@ import ( kubeletstatsv1alpha1 "k8s.io/kubernetes/pkg/kubelet/apis/stats/v1alpha1" "k8s.io/kubernetes/test/e2e/framework" e2ekubelet "k8s.io/kubernetes/test/e2e/framework/kubelet" + e2enode "k8s.io/kubernetes/test/e2e/framework/node" e2eperf "k8s.io/kubernetes/test/e2e/framework/perf" "k8s.io/kubernetes/test/e2e/perftype" testutils "k8s.io/kubernetes/test/utils" @@ -200,7 +201,8 @@ var _ = SIGDescribe("Kubelet [Serial] [Slow]", func() { var rm *e2ekubelet.ResourceMonitor ginkgo.BeforeEach(func() { - nodes := framework.GetReadySchedulableNodesOrDie(f.ClientSet) + nodes, err := e2enode.GetReadySchedulableNodes(f.ClientSet) + framework.ExpectNoError(err) nodeNames = sets.NewString() for _, node := range nodes.Items { nodeNames.Insert(node.Name) diff --git a/test/e2e/node/node_problem_detector.go b/test/e2e/node/node_problem_detector.go index 914b3f2f75e..6c673ca8eee 100644 --- a/test/e2e/node/node_problem_detector.go +++ b/test/e2e/node/node_problem_detector.go @@ -58,8 +58,8 @@ var _ = SIGDescribe("NodeProblemDetector [DisabledForLargeClusters]", func() { framework.SkipUnlessSSHKeyPresent() ginkgo.By("Getting all nodes and their SSH-able IP addresses") - nodes := framework.GetReadySchedulableNodesOrDie(f.ClientSet) - framework.ExpectNotEqual(len(nodes.Items), 0) + nodes, err := e2enode.GetReadySchedulableNodes(f.ClientSet) + framework.ExpectNoError(err) hosts := []string{} for _, node := range nodes.Items { for _, addr := range node.Status.Addresses { diff --git a/test/e2e/scheduling/predicates.go b/test/e2e/scheduling/predicates.go index 5973db504c5..231f06a11e0 100644 --- a/test/e2e/scheduling/predicates.go +++ b/test/e2e/scheduling/predicates.go @@ -95,7 +95,7 @@ var _ = SIGDescribe("SchedulerPredicates [Serial]", func() { if err != nil { framework.Logf("Unexpected error occurred: %v", err) } - nodeList, err = e2enode.GetReadySchedulableNodesOrDie(cs) + nodeList, err = e2enode.GetReadySchedulableNodes(cs) if err != nil { framework.Logf("Unexpected error occurred: %v", err) } diff --git a/test/e2e/upgrades/BUILD b/test/e2e/upgrades/BUILD index 9a534754f6d..0a684122b26 100644 --- a/test/e2e/upgrades/BUILD +++ b/test/e2e/upgrades/BUILD @@ -37,6 +37,7 @@ go_library( "//test/e2e/common:go_default_library", "//test/e2e/framework:go_default_library", "//test/e2e/framework/job:go_default_library", + "//test/e2e/framework/node:go_default_library", "//test/e2e/framework/service:go_default_library", "//test/e2e/framework/statefulset:go_default_library", "//test/e2e/framework/testfiles:go_default_library", diff --git a/test/e2e/upgrades/kube_proxy_migration.go b/test/e2e/upgrades/kube_proxy_migration.go index 4a73a41b819..47e6fdefb09 100644 --- a/test/e2e/upgrades/kube_proxy_migration.go +++ b/test/e2e/upgrades/kube_proxy_migration.go @@ -27,6 +27,7 @@ import ( "k8s.io/apimachinery/pkg/util/wait" clientset "k8s.io/client-go/kubernetes" "k8s.io/kubernetes/test/e2e/framework" + e2enode "k8s.io/kubernetes/test/e2e/framework/node" "github.com/onsi/ginkgo" ) @@ -118,7 +119,13 @@ func waitForKubeProxyStaticPodsRunning(c clientset.Interface) error { return false, nil } - numberSchedulableNodes := len(framework.GetReadySchedulableNodesOrDie(c).Items) + nodes, err := e2enode.GetReadySchedulableNodes(c) + if err != nil { + framework.Logf("Failed to get nodes: %v", err) + return false, nil + } + + numberSchedulableNodes := len(nodes.Items) numberkubeProxyPods := 0 for _, pod := range pods.Items { if pod.Status.Phase == v1.PodRunning { @@ -176,7 +183,13 @@ func waitForKubeProxyDaemonSetRunning(c clientset.Interface) error { return false, nil } - numberSchedulableNodes := len(framework.GetReadySchedulableNodesOrDie(c).Items) + nodes, err := e2enode.GetReadySchedulableNodes(c) + if err != nil { + framework.Logf("Failed to get nodes: %v", err) + return false, nil + } + + numberSchedulableNodes := len(nodes.Items) numberkubeProxyPods := int(daemonSets.Items[0].Status.NumberAvailable) if numberkubeProxyPods != numberSchedulableNodes { framework.Logf("Expect %v kube-proxy DaemonSet pods running, got %v", numberSchedulableNodes, numberkubeProxyPods) diff --git a/test/e2e_node/BUILD b/test/e2e_node/BUILD index 2f95d11fe41..92f21edf97f 100644 --- a/test/e2e_node/BUILD +++ b/test/e2e_node/BUILD @@ -47,6 +47,7 @@ go_library( "//test/e2e/framework:go_default_library", "//test/e2e/framework/gpu:go_default_library", "//test/e2e/framework/metrics:go_default_library", + "//test/e2e/framework/node:go_default_library", "//test/utils/image:go_default_library", "//vendor/github.com/blang/semver:go_default_library", "//vendor/github.com/coreos/go-systemd/util:go_default_library", diff --git a/test/e2e_node/cpu_manager_test.go b/test/e2e_node/cpu_manager_test.go index 2953f452060..1825f1885c8 100644 --- a/test/e2e_node/cpu_manager_test.go +++ b/test/e2e_node/cpu_manager_test.go @@ -33,6 +33,7 @@ import ( "k8s.io/kubernetes/pkg/kubelet/cm/cpuset" "k8s.io/kubernetes/pkg/kubelet/types" "k8s.io/kubernetes/test/e2e/framework" + e2enode "k8s.io/kubernetes/test/e2e/framework/node" "github.com/onsi/ginkgo" "github.com/onsi/gomega" @@ -179,8 +180,9 @@ func disableCPUManagerInKubelet(f *framework.Framework) (oldCfg *kubeletconfig.K // Wait for the Kubelet to be ready. gomega.Eventually(func() bool { - nodeList := framework.GetReadySchedulableNodesOrDie(f.ClientSet) - return len(nodeList.Items) == 1 + nodes, err := e2enode.TotalReady(f.ClientSet) + framework.ExpectNoError(err) + return nodes == 1 }, time.Minute, time.Second).Should(gomega.BeTrue()) return oldCfg @@ -231,8 +233,9 @@ func enableCPUManagerInKubelet(f *framework.Framework, cleanStateFile bool) (old // Wait for the Kubelet to be ready. gomega.Eventually(func() bool { - nodeList := framework.GetReadySchedulableNodesOrDie(f.ClientSet) - return len(nodeList.Items) == 1 + nodes, err := e2enode.TotalReady(f.ClientSet) + framework.ExpectNoError(err) + return nodes == 1 }, time.Minute, time.Second).Should(gomega.BeTrue()) return oldCfg diff --git a/test/e2e_node/node_perf_test.go b/test/e2e_node/node_perf_test.go index cde7eb66a18..ce26b6a0ccc 100644 --- a/test/e2e_node/node_perf_test.go +++ b/test/e2e_node/node_perf_test.go @@ -24,6 +24,7 @@ import ( metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" kubeletconfig "k8s.io/kubernetes/pkg/kubelet/apis/config" "k8s.io/kubernetes/test/e2e/framework" + e2enode "k8s.io/kubernetes/test/e2e/framework/node" e2epod "k8s.io/kubernetes/test/e2e/framework/pod" "k8s.io/kubernetes/test/e2e_node/perf/workloads" @@ -48,8 +49,9 @@ func setKubeletConfig(f *framework.Framework, cfg *kubeletconfig.KubeletConfigur // Wait for the Kubelet to be ready. gomega.Eventually(func() bool { - nodeList := framework.GetReadySchedulableNodesOrDie(f.ClientSet) - return len(nodeList.Items) == 1 + nodes, err := e2enode.TotalReady(f.ClientSet) + framework.ExpectNoError(err) + return nodes == 1 }, time.Minute, time.Second).Should(gomega.BeTrue()) } diff --git a/test/e2e_node/util.go b/test/e2e_node/util.go index d1452bf47e8..e6562937877 100644 --- a/test/e2e_node/util.go +++ b/test/e2e_node/util.go @@ -51,7 +51,7 @@ import ( "k8s.io/kubernetes/pkg/kubelet/util" "k8s.io/kubernetes/test/e2e/framework" e2emetrics "k8s.io/kubernetes/test/e2e/framework/metrics" - frameworkmetrics "k8s.io/kubernetes/test/e2e/framework/metrics" + e2enode "k8s.io/kubernetes/test/e2e/framework/node" imageutils "k8s.io/kubernetes/test/utils/image" "github.com/onsi/ginkgo" @@ -345,7 +345,8 @@ func logNodeEvents(f *framework.Framework) { } func getLocalNode(f *framework.Framework) *v1.Node { - nodeList := framework.GetReadySchedulableNodesOrDie(f.ClientSet) + nodeList, err := e2enode.GetReadySchedulableNodes(f.ClientSet) + framework.ExpectNoError(err) framework.ExpectEqual(len(nodeList.Items), 1, "Unexpected number of node objects for node e2e. Expects only one node.") return &nodeList.Items[0] } @@ -358,7 +359,7 @@ func logKubeletLatencyMetrics(metricNames ...string) { for _, key := range metricNames { metricSet.Insert(kubeletmetrics.KubeletSubsystem + "_" + key) } - metric, err := frameworkmetrics.GrabKubeletMetricsWithoutProxy(framework.TestContext.NodeName+":10255", "/metrics") + metric, err := e2emetrics.GrabKubeletMetricsWithoutProxy(framework.TestContext.NodeName+":10255", "/metrics") if err != nil { framework.Logf("Error getting kubelet metrics: %v", err) } else { @@ -367,14 +368,14 @@ func logKubeletLatencyMetrics(metricNames ...string) { } // returns config related metrics from the local kubelet, filtered to the filterMetricNames passed in -func getKubeletMetrics(filterMetricNames sets.String) (frameworkmetrics.KubeletMetrics, error) { +func getKubeletMetrics(filterMetricNames sets.String) (e2emetrics.KubeletMetrics, error) { // grab Kubelet metrics - ms, err := frameworkmetrics.GrabKubeletMetricsWithoutProxy(framework.TestContext.NodeName+":10255", "/metrics") + ms, err := e2emetrics.GrabKubeletMetricsWithoutProxy(framework.TestContext.NodeName+":10255", "/metrics") if err != nil { return nil, err } - filtered := frameworkmetrics.NewKubeletMetrics() + filtered := e2emetrics.NewKubeletMetrics() for name := range ms { if !filterMetricNames.Has(name) { continue diff --git a/test/integration/framework/BUILD b/test/integration/framework/BUILD index 999a2d7708d..ec03d0bc696 100644 --- a/test/integration/framework/BUILD +++ b/test/integration/framework/BUILD @@ -67,7 +67,7 @@ go_library( "//staging/src/k8s.io/client-go/rest:go_default_library", "//staging/src/k8s.io/client-go/util/cert:go_default_library", "//staging/src/k8s.io/component-base/version:go_default_library", - "//test/e2e/framework:go_default_library", + "//test/e2e/framework/node:go_default_library", "//test/utils:go_default_library", "//vendor/github.com/coreos/etcd/clientv3:go_default_library", "//vendor/github.com/go-openapi/spec:go_default_library", diff --git a/test/integration/framework/perf_utils.go b/test/integration/framework/perf_utils.go index 8730855d246..1e2c3bc2909 100644 --- a/test/integration/framework/perf_utils.go +++ b/test/integration/framework/perf_utils.go @@ -21,7 +21,7 @@ import ( "k8s.io/apimachinery/pkg/api/resource" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" clientset "k8s.io/client-go/kubernetes" - e2eframework "k8s.io/kubernetes/test/e2e/framework" + e2enode "k8s.io/kubernetes/test/e2e/framework/node" testutils "k8s.io/kubernetes/test/utils" "k8s.io/klog" @@ -84,7 +84,10 @@ func (p *IntegrationTestNodePreparer) PrepareNodes() error { } } - nodes := e2eframework.GetReadySchedulableNodesOrDie(p.client) + nodes, err := e2enode.GetReadySchedulableNodes(p.client) + if err != nil { + klog.Fatalf("Error listing nodes: %v", err) + } index := 0 sum := 0 for _, v := range p.countToStrategy { @@ -101,7 +104,10 @@ func (p *IntegrationTestNodePreparer) PrepareNodes() error { // CleanupNodes deletes existing test nodes. func (p *IntegrationTestNodePreparer) CleanupNodes() error { - nodes := e2eframework.GetReadySchedulableNodesOrDie(p.client) + nodes, err := e2enode.GetReadySchedulableNodes(p.client) + if err != nil { + klog.Fatalf("Error listing nodes: %v", err) + } for i := range nodes.Items { if err := p.client.CoreV1().Nodes().Delete(nodes.Items[i].Name, &metav1.DeleteOptions{}); err != nil { klog.Errorf("Error while deleting Node: %v", err)