Fix up e2enode.GetReadySchedulableNodes, replace many uses of framework.GetReadySchedulableNodesOrDie

Remove the "OrDie" from the name (since it doesn't "or die") and add
an extra check that there is at least 1 node available, since many
callers already did that themselves, and many others should have.
This commit is contained in:
Dan Winship 2019-09-03 15:00:00 -04:00
parent 512eccac1f
commit 71b02dd422
43 changed files with 185 additions and 107 deletions

View File

@ -66,6 +66,7 @@ go_library(
"//test/e2e/framework:go_default_library",
"//test/e2e/framework/auth:go_default_library",
"//test/e2e/framework/log:go_default_library",
"//test/e2e/framework/node:go_default_library",
"//test/e2e/framework/pod:go_default_library",
"//test/e2e/framework/providers/aws:go_default_library",
"//test/e2e/framework/providers/azure:go_default_library",

View File

@ -84,6 +84,7 @@ go_library(
"//test/e2e/framework:go_default_library",
"//test/e2e/framework/deployment:go_default_library",
"//test/e2e/framework/metrics:go_default_library",
"//test/e2e/framework/node:go_default_library",
"//test/e2e/framework/pod:go_default_library",
"//test/e2e/framework/ssh:go_default_library",
"//test/utils:go_default_library",

View File

@ -39,6 +39,7 @@ import (
clientset "k8s.io/client-go/kubernetes"
"k8s.io/kubernetes/test/e2e/framework"
e2emetrics "k8s.io/kubernetes/test/e2e/framework/metrics"
e2enode "k8s.io/kubernetes/test/e2e/framework/node"
"github.com/onsi/ginkgo"
imageutils "k8s.io/kubernetes/test/utils/image"
@ -48,8 +49,11 @@ import (
// with some wiggle room, to prevent pods being unable to schedule due
// to max pod constraints.
func estimateMaximumPods(c clientset.Interface, min, max int32) int32 {
nodes, err := e2enode.GetReadySchedulableNodes(c)
framework.ExpectNoError(err)
availablePods := int32(0)
for _, node := range framework.GetReadySchedulableNodesOrDie(c).Items {
for _, node := range nodes.Items {
if q, ok := node.Status.Allocatable["pods"]; ok {
if num, ok := q.AsInt64(); ok {
availablePods += int32(num)

View File

@ -37,6 +37,7 @@ import (
"k8s.io/kubernetes/pkg/controller/daemon"
schedulernodeinfo "k8s.io/kubernetes/pkg/scheduler/nodeinfo"
"k8s.io/kubernetes/test/e2e/framework"
e2enode "k8s.io/kubernetes/test/e2e/framework/node"
"github.com/onsi/ginkgo"
"github.com/onsi/gomega"
@ -381,13 +382,14 @@ var _ = SIGDescribe("Daemon set [Serial]", func() {
rollback of updates to a DaemonSet.
*/
framework.ConformanceIt("should rollback without unnecessary restarts", func() {
schedulableNodes := framework.GetReadySchedulableNodesOrDie(c)
schedulableNodes, err := e2enode.GetReadySchedulableNodes(c)
framework.ExpectNoError(err)
gomega.Expect(len(schedulableNodes.Items)).To(gomega.BeNumerically(">", 1), "Conformance test suite needs a cluster with at least 2 nodes.")
framework.Logf("Create a RollingUpdate DaemonSet")
label := map[string]string{daemonsetNameLabel: dsName}
ds := newDaemonSet(dsName, image, label)
ds.Spec.UpdateStrategy = appsv1.DaemonSetUpdateStrategy{Type: appsv1.RollingUpdateDaemonSetStrategyType}
ds, err := c.AppsV1().DaemonSets(ns).Create(ds)
ds, err = c.AppsV1().DaemonSets(ns).Create(ds)
framework.ExpectNoError(err)
framework.Logf("Check that daemon pods launch on every node of the cluster")
@ -420,7 +422,8 @@ var _ = SIGDescribe("Daemon set [Serial]", func() {
framework.Failf("unexpected pod found, image = %s", image)
}
}
schedulableNodes = framework.GetReadySchedulableNodesOrDie(c)
schedulableNodes, err = e2enode.GetReadySchedulableNodes(c)
framework.ExpectNoError(err)
if len(schedulableNodes.Items) < 2 {
framework.ExpectEqual(len(existingPods), 0)
} else {
@ -505,7 +508,10 @@ func separateDaemonSetNodeLabels(labels map[string]string) (map[string]string, m
}
func clearDaemonSetNodeLabels(c clientset.Interface) error {
nodeList := framework.GetReadySchedulableNodesOrDie(c)
nodeList, err := e2enode.GetReadySchedulableNodes(c)
if err != nil {
return err
}
for _, node := range nodeList.Items {
_, err := setDaemonSetNodeLabels(c, node.Name, map[string]string{})
if err != nil {

View File

@ -492,7 +492,8 @@ var _ = SIGDescribe("Network Partition [Disruptive] [Slow]", func() {
framework.SkipUnlessSSHKeyPresent()
ginkgo.By("choose a node - we will block all network traffic on this node")
var podOpts metav1.ListOptions
nodes := framework.GetReadySchedulableNodesOrDie(c)
nodes, err := e2enode.GetReadySchedulableNodes(c)
framework.ExpectNoError(err)
e2enode.Filter(nodes, func(node v1.Node) bool {
if !e2enode.IsConditionSetAsExpected(&node, v1.NodeReady, true) {
return false

View File

@ -62,7 +62,8 @@ var _ = SIGDescribe("[Feature:ClusterSizeAutoscalingScaleUp] [Slow] Autoscaling"
}
// Make sure all nodes are schedulable, otherwise we are in some kind of a problem state.
nodes = framework.GetReadySchedulableNodesOrDie(f.ClientSet)
nodes, err = e2enode.GetReadySchedulableNodes(f.ClientSet)
framework.ExpectNoError(err)
schedulableCount := len(nodes.Items)
framework.ExpectEqual(schedulableCount, nodeGroupSize, "not all nodes are schedulable")
})

View File

@ -36,7 +36,6 @@ import (
imageutils "k8s.io/kubernetes/test/utils/image"
"github.com/onsi/ginkgo"
"github.com/onsi/gomega"
)
const (
@ -90,9 +89,9 @@ var _ = framework.KubeDescribe("Cluster size autoscaler scalability [Slow]", fun
framework.ExpectNoError(e2enode.WaitForReadyNodes(c, sum, scaleUpTimeout))
nodes := framework.GetReadySchedulableNodesOrDie(f.ClientSet)
nodes, err := e2enode.GetReadySchedulableNodes(f.ClientSet)
framework.ExpectNoError(err)
nodeCount = len(nodes.Items)
gomega.Expect(nodeCount).NotTo(gomega.BeZero())
cpu := nodes.Items[0].Status.Capacity[v1.ResourceCPU]
mem := nodes.Items[0].Status.Capacity[v1.ResourceMemory]
coresPerNode = int((&cpu).MilliValue() / 1000)
@ -324,7 +323,8 @@ var _ = framework.KubeDescribe("Cluster size autoscaler scalability [Slow]", fun
time.Sleep(scaleDownTimeout)
ginkgo.By("Checking if the number of nodes is as expected")
nodes := framework.GetReadySchedulableNodesOrDie(f.ClientSet)
nodes, err := e2enode.GetReadySchedulableNodes(f.ClientSet)
framework.ExpectNoError(err)
klog.Infof("Nodes: %v, expected: %v", len(nodes.Items), totalNodes)
framework.ExpectEqual(len(nodes.Items), totalNodes)
})

View File

@ -111,7 +111,8 @@ var _ = SIGDescribe("Cluster size autoscaling [Slow]", func() {
// Give instances time to spin up
framework.ExpectNoError(e2enode.WaitForReadyNodes(c, sum, scaleUpTimeout))
nodes := framework.GetReadySchedulableNodesOrDie(f.ClientSet)
nodes, err := e2enode.GetReadySchedulableNodes(f.ClientSet)
framework.ExpectNoError(err)
nodeCount = len(nodes.Items)
coreCount = 0
for _, node := range nodes.Items {
@ -363,7 +364,9 @@ var _ = SIGDescribe("Cluster size autoscaling [Slow]", func() {
framework.ExpectEqual(status.timestamp.Add(freshStatusLimit).Before(time.Now()), false)
framework.ExpectEqual(status.status, caNoScaleUpStatus)
framework.ExpectEqual(status.ready, status.target)
framework.ExpectEqual(len(framework.GetReadySchedulableNodesOrDie(f.ClientSet).Items), status.target+unmanagedNodes)
nodes, err := e2enode.GetReadySchedulableNodes(f.ClientSet)
framework.ExpectNoError(err)
framework.ExpectEqual(len(nodes.Items), status.target+unmanagedNodes)
})
ginkgo.It("should increase cluster size if pending pods are small and there is another node pool that is not autoscaled [Feature:ClusterSizeAutoscalingScaleUp]", func() {
@ -723,7 +726,8 @@ var _ = SIGDescribe("Cluster size autoscaling [Slow]", func() {
runDrainTest(f, originalSizes, f.Namespace.Name, 1, 0, func(increasedSize int) {
ginkgo.By("No nodes should be removed")
time.Sleep(scaleDownTimeout)
nodes := framework.GetReadySchedulableNodesOrDie(f.ClientSet)
nodes, err := e2enode.GetReadySchedulableNodes(f.ClientSet)
framework.ExpectNoError(err)
framework.ExpectEqual(len(nodes.Items), increasedSize)
})
})
@ -919,7 +923,8 @@ var _ = SIGDescribe("Cluster size autoscaling [Slow]", func() {
ReserveMemory(f, "memory-reservation", 100, nodeCount*memAllocatableMb, false, defaultTimeout)
defer framework.DeleteRCAndWaitForGC(f.ClientSet, f.Namespace.Name, "memory-reservation")
time.Sleep(scaleUpTimeout)
currentNodes := framework.GetReadySchedulableNodesOrDie(f.ClientSet)
currentNodes, err := e2enode.GetReadySchedulableNodes(f.ClientSet)
framework.ExpectNoError(err)
framework.Logf("Currently available nodes: %v, nodes available at the start of test: %v, disabled nodes: %v", len(currentNodes.Items), len(nodes.Items), nodesToBreakCount)
framework.ExpectEqual(len(currentNodes.Items), len(nodes.Items)-nodesToBreakCount)
status, err := getClusterwideStatus(c)
@ -1271,7 +1276,8 @@ func getPoolInitialSize(poolName string) int {
func getPoolSize(f *framework.Framework, poolName string) int {
size := 0
nodeList := framework.GetReadySchedulableNodesOrDie(f.ClientSet)
nodeList, err := e2enode.GetReadySchedulableNodes(f.ClientSet)
framework.ExpectNoError(err)
for _, node := range nodeList.Items {
if node.Labels[gkeNodepoolNameKey] == poolName {
size++

View File

@ -33,7 +33,6 @@ import (
e2epod "k8s.io/kubernetes/test/e2e/framework/pod"
"github.com/onsi/ginkgo"
"github.com/onsi/gomega"
)
// Constants used in dns-autoscaling test.
@ -57,11 +56,11 @@ var _ = SIGDescribe("DNS horizontal autoscaling", func() {
framework.SkipUnlessProviderIs("gce", "gke")
c = f.ClientSet
nodeCount := len(framework.GetReadySchedulableNodesOrDie(c).Items)
gomega.Expect(nodeCount).NotTo(gomega.BeZero())
nodes, err := e2enode.GetReadySchedulableNodes(c)
framework.ExpectNoError(err)
nodeCount := len(nodes.Items)
ginkgo.By("Collecting original replicas count and DNS scaling params")
var err error
originDNSReplicasCount, err = getDNSReplicas(c)
framework.ExpectNoError(err)
@ -236,12 +235,13 @@ func getExpectReplicasFuncLinear(c clientset.Interface, params *DNSParamsLinear)
return func(c clientset.Interface) int {
var replicasFromNodes float64
var replicasFromCores float64
nodes := framework.GetReadySchedulableNodesOrDie(c).Items
nodes, err := e2enode.GetReadySchedulableNodes(c)
framework.ExpectNoError(err)
if params.nodesPerReplica > 0 {
replicasFromNodes = math.Ceil(float64(len(nodes)) / params.nodesPerReplica)
replicasFromNodes = math.Ceil(float64(len(nodes.Items)) / params.nodesPerReplica)
}
if params.coresPerReplica > 0 {
replicasFromCores = math.Ceil(float64(getScheduableCores(nodes)) / params.coresPerReplica)
replicasFromCores = math.Ceil(float64(getScheduableCores(nodes.Items)) / params.coresPerReplica)
}
return int(math.Max(1.0, math.Max(replicasFromNodes, replicasFromCores)))
}

View File

@ -10,6 +10,7 @@ go_library(
"//staging/src/k8s.io/apimachinery/pkg/version:go_default_library",
"//staging/src/k8s.io/client-go/kubernetes:go_default_library",
"//test/e2e/framework:go_default_library",
"//test/e2e/framework/node:go_default_library",
],
)

View File

@ -27,6 +27,7 @@ import (
"k8s.io/apimachinery/pkg/version"
clientset "k8s.io/client-go/kubernetes"
"k8s.io/kubernetes/test/e2e/framework"
e2enode "k8s.io/kubernetes/test/e2e/framework/node"
)
// RealVersion turns a version constants into a version string deployable on
@ -86,7 +87,10 @@ func CheckMasterVersion(c clientset.Interface, want string) error {
// CheckNodesVersions validates the nodes versions
func CheckNodesVersions(cs clientset.Interface, want string) error {
l := framework.GetReadySchedulableNodesOrDie(cs)
l, err := e2enode.GetReadySchedulableNodes(cs)
if err != nil {
return err
}
for _, n := range l.Items {
// We do prefix trimming and then matching because:
// want looks like: 0.19.3-815-g50e67d4

View File

@ -591,7 +591,8 @@ func (config *NetworkingTestConfig) setup(selector map[string]string) {
ginkgo.By("Getting node addresses")
ExpectNoError(WaitForAllNodesSchedulable(config.f.ClientSet, 10*time.Minute))
nodeList := GetReadySchedulableNodesOrDie(config.f.ClientSet)
nodeList, err := e2enode.GetReadySchedulableNodes(config.f.ClientSet)
ExpectNoError(err)
config.ExternalAddrs = e2enode.FirstAddress(nodeList, v1.NodeExternalIP)
SkipUnlessNodeCountIsAtLeast(2)

View File

@ -317,7 +317,7 @@ func PickIP(c clientset.Interface) (string, error) {
// GetPublicIps returns a public IP list of nodes.
func GetPublicIps(c clientset.Interface) ([]string, error) {
nodes, err := GetReadySchedulableNodesOrDie(c)
nodes, err := GetReadySchedulableNodes(c)
if err != nil {
return nil, fmt.Errorf("get schedulable and ready nodes error: %s", err)
}
@ -329,22 +329,23 @@ func GetPublicIps(c clientset.Interface) ([]string, error) {
return ips, nil
}
// GetReadySchedulableNodesOrDie addresses the common use case of getting nodes you can do work on.
// GetReadySchedulableNodes addresses the common use case of getting nodes you can do work on.
// 1) Needs to be schedulable.
// 2) Needs to be ready.
// If EITHER 1 or 2 is not true, most tests will want to ignore the node entirely.
// If there are no nodes that are both ready and schedulable, this will return an error.
// TODO: remove references in framework/util.go.
// TODO: remove "OrDie" suffix.
func GetReadySchedulableNodesOrDie(c clientset.Interface) (nodes *v1.NodeList, err error) {
func GetReadySchedulableNodes(c clientset.Interface) (nodes *v1.NodeList, err error) {
nodes, err = checkWaitListSchedulableNodes(c)
if err != nil {
return nil, fmt.Errorf("listing schedulable nodes error: %s", err)
}
// previous tests may have cause failures of some nodes. Let's skip
// 'Not Ready' nodes, just in case (there is no need to fail the test).
Filter(nodes, func(node v1.Node) bool {
return IsNodeSchedulable(&node) && IsNodeUntainted(&node)
})
if len(nodes.Items) == 0 {
return nil, fmt.Errorf("there are currently no ready, schedulable nodes in the cluster")
}
return nodes, nil
}

View File

@ -338,7 +338,8 @@ func (k *NodeKiller) Run(stopCh <-chan struct{}) {
}
func (k *NodeKiller) pickNodes() []v1.Node {
nodes := GetReadySchedulableNodesOrDie(k.client)
nodes, err := e2enode.GetReadySchedulableNodes(k.client)
ExpectNoError(err)
numNodes := int(k.config.FailureRatio * float64(len(nodes.Items)))
shuffledNodes := shuffleNodes(nodes.Items)
if len(shuffledNodes) > numNodes {

View File

@ -353,16 +353,6 @@ func SetInstanceTags(cloudConfig framework.CloudConfig, instanceName, zone strin
return resTags.Items
}
// GetNodeTags gets k8s node tag from one of the nodes
func GetNodeTags(c clientset.Interface, cloudConfig framework.CloudConfig) []string {
nodes := framework.GetReadySchedulableNodesOrDie(c)
if len(nodes.Items) == 0 {
framework.Logf("GetNodeTags: Found 0 node.")
return []string{}
}
return GetInstanceTags(cloudConfig, nodes.Items[0].Name).Items
}
// IsGoogleAPIHTTPErrorCode returns true if the error is a google api
// error matching the corresponding HTTP error code.
func IsGoogleAPIHTTPErrorCode(err error, code int) bool {

View File

@ -26,6 +26,7 @@ import (
clientset "k8s.io/client-go/kubernetes"
restclient "k8s.io/client-go/rest"
"k8s.io/kubernetes/test/e2e/framework"
e2enode "k8s.io/kubernetes/test/e2e/framework/node"
)
// GetServicesProxyRequest returns a request for a service proxy.
@ -110,7 +111,9 @@ func DescribeSvc(ns string) {
// GetServiceLoadBalancerCreationTimeout returns a timeout value for creating a load balancer of a service.
func GetServiceLoadBalancerCreationTimeout(cs clientset.Interface) time.Duration {
if nodes := framework.GetReadySchedulableNodesOrDie(cs); len(nodes.Items) > LargeClusterMinNodesNumber {
nodes, err := e2enode.GetReadySchedulableNodes(cs)
framework.ExpectNoError(err)
if len(nodes.Items) > LargeClusterMinNodesNumber {
return LoadBalancerCreateTimeoutLarge
}
return LoadBalancerCreateTimeoutDefault

View File

@ -29,6 +29,7 @@ import (
"k8s.io/component-base/version"
e2elog "k8s.io/kubernetes/test/e2e/framework/log"
e2emetrics "k8s.io/kubernetes/test/e2e/framework/metrics"
e2enode "k8s.io/kubernetes/test/e2e/framework/node"
e2epod "k8s.io/kubernetes/test/e2e/framework/pod"
)
@ -80,7 +81,9 @@ func SetupSuite() {
// If NumNodes is not specified then auto-detect how many are scheduleable and not tainted
if TestContext.CloudConfig.NumNodes == DefaultNumNodes {
TestContext.CloudConfig.NumNodes = len(GetReadySchedulableNodesOrDie(c).Items)
nodes, err := e2enode.GetReadySchedulableNodes(c)
ExpectNoError(err)
TestContext.CloudConfig.NumNodes = len(nodes.Items)
}
// Ensure all pods are running and ready before starting tests (otherwise,

View File

@ -2897,7 +2897,10 @@ func NewE2ETestNodePreparer(client clientset.Interface, countToStrategy []testut
// PrepareNodes prepares nodes in the cluster.
func (p *E2ETestNodePreparer) PrepareNodes() error {
nodes := GetReadySchedulableNodesOrDie(p.client)
nodes, err := e2enode.GetReadySchedulableNodes(p.client)
if err != nil {
return err
}
numTemplates := 0
for _, v := range p.countToStrategy {
numTemplates += v.Count
@ -2923,9 +2926,11 @@ func (p *E2ETestNodePreparer) PrepareNodes() error {
// CleanupNodes cleanups nodes in the cluster.
func (p *E2ETestNodePreparer) CleanupNodes() error {
var encounteredError error
nodes := GetReadySchedulableNodesOrDie(p.client)
nodes, err := e2enode.GetReadySchedulableNodes(p.client)
if err != nil {
return err
}
for i := range nodes.Items {
var err error
name := nodes.Items[i].Name
strategy, found := p.nodeToAppliedStrategy[name]
if found {

View File

@ -21,6 +21,7 @@ import (
"os/exec"
"k8s.io/kubernetes/test/e2e/framework"
e2enode "k8s.io/kubernetes/test/e2e/framework/node"
"github.com/onsi/ginkgo"
)
@ -98,7 +99,8 @@ func testCreateDeleteNodePool(f *framework.Framework, poolName string) {
// label with the given node pool name.
func nodesWithPoolLabel(f *framework.Framework, poolName string) int {
nodeCount := 0
nodeList := framework.GetReadySchedulableNodesOrDie(f.ClientSet)
nodeList, err := e2enode.GetReadySchedulableNodes(f.ClientSet)
framework.ExpectNoError(err)
for _, node := range nodeList.Items {
if poolLabel := node.Labels["cloud.google.com/gke-nodepool"]; poolLabel == poolName {
nodeCount++

View File

@ -16,6 +16,7 @@ go_library(
"//staging/src/k8s.io/api/core/v1:go_default_library",
"//test/e2e/framework:go_default_library",
"//test/e2e/framework/config:go_default_library",
"//test/e2e/framework/node:go_default_library",
"//test/e2e/instrumentation/common:go_default_library",
"//test/e2e/instrumentation/logging/elasticsearch:go_default_library",
"//test/e2e/instrumentation/logging/stackdriver:go_default_library",

View File

@ -27,6 +27,7 @@ import (
"k8s.io/api/core/v1"
"k8s.io/kubernetes/test/e2e/framework"
"k8s.io/kubernetes/test/e2e/framework/config"
e2enode "k8s.io/kubernetes/test/e2e/framework/node"
instrumentation "k8s.io/kubernetes/test/e2e/instrumentation/common"
imageutils "k8s.io/kubernetes/test/utils/image"
)
@ -76,7 +77,8 @@ var _ = instrumentation.SIGDescribe("Logging soak [Performance] [Slow] [Disrupti
// was produced in each and every pod at least once. The final arg is the timeout for the test to verify all the pods got logs.
func RunLogPodsWithSleepOf(f *framework.Framework, sleep time.Duration, podname string, timeout time.Duration) {
nodes := framework.GetReadySchedulableNodesOrDie(f.ClientSet)
nodes, err := e2enode.GetReadySchedulableNodes(f.ClientSet)
framework.ExpectNoError(err)
totalPods := len(nodes.Items)
framework.ExpectNotEqual(totalPods, 0)

View File

@ -18,6 +18,7 @@ go_library(
"//staging/src/k8s.io/apimachinery/pkg/util/uuid:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/util/wait:go_default_library",
"//test/e2e/framework:go_default_library",
"//test/e2e/framework/node:go_default_library",
"//test/e2e/instrumentation/common:go_default_library",
"//test/e2e/instrumentation/logging/utils:go_default_library",
"//vendor/github.com/onsi/ginkgo:go_default_library",

View File

@ -22,6 +22,7 @@ import (
"time"
"k8s.io/kubernetes/test/e2e/framework"
e2enode "k8s.io/kubernetes/test/e2e/framework/node"
instrumentation "k8s.io/kubernetes/test/e2e/instrumentation/common"
"k8s.io/kubernetes/test/e2e/instrumentation/logging/utils"
@ -45,7 +46,8 @@ var _ = instrumentation.SIGDescribe("Cluster level logging implemented by Stackd
ginkgo.It("should ingest logs from applications running for a prolonged amount of time", func() {
withLogProviderForScope(f, podsScope, func(p *sdLogProvider) {
nodes := framework.GetReadySchedulableNodesOrDie(f.ClientSet).Items
nodes, err := e2enode.GetReadySchedulableNodes(f.ClientSet)
framework.ExpectNoError(err)
maxPodCount := 10
jobDuration := 30 * time.Minute
linesPerPodPerSecond := 100
@ -68,7 +70,7 @@ var _ = instrumentation.SIGDescribe("Cluster level logging implemented by Stackd
podsByRun := [][]utils.FiniteLoggingPod{}
for runIdx := 0; runIdx < podRunCount; runIdx++ {
podsInRun := []utils.FiniteLoggingPod{}
for nodeIdx, node := range nodes {
for nodeIdx, node := range nodes.Items {
podName := fmt.Sprintf("job-logs-generator-%d-%d-%d-%d", maxPodCount, linesPerPod, runIdx, nodeIdx)
pod := utils.NewLoadLoggingPod(podName, node.Name, linesPerPod, jobDuration)
pods = append(pods, pod)
@ -93,7 +95,7 @@ var _ = instrumentation.SIGDescribe("Cluster level logging implemented by Stackd
}()
checker := utils.NewFullIngestionPodLogChecker(p, maxAllowedLostFraction, pods...)
err := utils.WaitForLogs(checker, ingestionInterval, ingestionTimeout)
err = utils.WaitForLogs(checker, ingestionInterval, ingestionTimeout)
framework.ExpectNoError(err)
utils.EnsureLoggingAgentRestartsCount(f, p.LoggingAgentName(), allowedRestarts)

View File

@ -25,6 +25,7 @@ go_library(
"//staging/src/k8s.io/apimachinery/pkg/util/wait:go_default_library",
"//staging/src/k8s.io/client-go/kubernetes:go_default_library",
"//test/e2e/framework:go_default_library",
"//test/e2e/framework/node:go_default_library",
"//test/e2e/framework/pod:go_default_library",
"//test/utils/image:go_default_library",
"//vendor/k8s.io/utils/integer:go_default_library",

View File

@ -24,6 +24,7 @@ import (
"k8s.io/apimachinery/pkg/labels"
api "k8s.io/kubernetes/pkg/apis/core"
"k8s.io/kubernetes/test/e2e/framework"
e2enode "k8s.io/kubernetes/test/e2e/framework/node"
"k8s.io/utils/integer"
)
@ -40,7 +41,10 @@ func EnsureLoggingAgentDeployment(f *framework.Framework, appName string) error
agentPerNode[pod.Spec.NodeName]++
}
nodeList := framework.GetReadySchedulableNodesOrDie(f.ClientSet)
nodeList, err := e2enode.GetReadySchedulableNodes(f.ClientSet)
if err != nil {
return fmt.Errorf("failed to get nodes: %v", err)
}
for _, node := range nodeList.Items {
agentPodsCount, ok := agentPerNode[node.Name]

View File

@ -19,11 +19,13 @@ package utils
import (
clientset "k8s.io/client-go/kubernetes"
"k8s.io/kubernetes/test/e2e/framework"
e2enode "k8s.io/kubernetes/test/e2e/framework/node"
)
// GetNodeIds returns the list of node names and panics in case of failure.
func GetNodeIds(cs clientset.Interface) []string {
nodes := framework.GetReadySchedulableNodesOrDie(cs)
nodes, err := e2enode.GetReadySchedulableNodes(cs)
framework.ExpectNoError(err)
nodeIds := []string{}
for _, n := range nodes.Items {
nodeIds = append(nodeIds, n.Name)

View File

@ -103,7 +103,8 @@ var _ = SIGDescribe("[Disruptive]NodeLease", func() {
gomega.Expect(err).To(gomega.BeNil())
ginkgo.By("verify node lease exists for every nodes")
originalNodes := framework.GetReadySchedulableNodesOrDie(c)
originalNodes, err := e2enode.GetReadySchedulableNodes(c)
framework.ExpectNoError(err)
framework.ExpectEqual(len(originalNodes.Items), framework.TestContext.CloudConfig.NumNodes)
gomega.Eventually(func() error {
@ -128,7 +129,8 @@ var _ = SIGDescribe("[Disruptive]NodeLease", func() {
gomega.Expect(err).To(gomega.BeNil())
err = e2enode.WaitForReadyNodes(c, framework.TestContext.CloudConfig.NumNodes-1, 10*time.Minute)
gomega.Expect(err).To(gomega.BeNil())
targetNodes := framework.GetReadySchedulableNodesOrDie(c)
targetNodes, err := e2enode.GetReadySchedulableNodes(c)
framework.ExpectNoError(err)
framework.ExpectEqual(len(targetNodes.Items), int(targetNumNodes))
ginkgo.By("verify node lease is deleted for the deleted node")

View File

@ -134,7 +134,8 @@ var _ = SIGDescribe("Reboot [Disruptive] [Feature:Reboot]", func() {
func testReboot(c clientset.Interface, rebootCmd string, hook terminationHook) {
// Get all nodes, and kick off the test on each.
nodelist := framework.GetReadySchedulableNodesOrDie(c)
nodelist, err := e2enode.GetReadySchedulableNodes(c)
framework.ExpectNoError(err, "failed to list nodes")
if hook != nil {
defer func() {
framework.Logf("Executing termination hook on nodes")

View File

@ -47,7 +47,8 @@ var _ = SIGDescribe("[Feature:IPv6DualStackAlphaFeature] [LinuxOnly]", func() {
ginkgo.It("should have ipv4 and ipv6 internal node ip", func() {
// TODO (aramase) can switch to new function to get all nodes
nodeList := framework.GetReadySchedulableNodesOrDie(cs)
nodeList, err := e2enode.GetReadySchedulableNodes(cs)
framework.ExpectNoError(err)
for _, node := range nodeList.Items {
// get all internal ips for node
@ -61,7 +62,8 @@ var _ = SIGDescribe("[Feature:IPv6DualStackAlphaFeature] [LinuxOnly]", func() {
ginkgo.It("should have ipv4 and ipv6 node podCIDRs", func() {
// TODO (aramase) can switch to new function to get all nodes
nodeList := framework.GetReadySchedulableNodesOrDie(cs)
nodeList, err := e2enode.GetReadySchedulableNodes(cs)
framework.ExpectNoError(err)
for _, node := range nodeList.Items {
framework.ExpectEqual(len(node.Spec.PodCIDRs), 2)
@ -121,12 +123,8 @@ var _ = SIGDescribe("[Feature:IPv6DualStackAlphaFeature] [LinuxOnly]", func() {
// get all schedulable nodes to determine the number of replicas for pods
// this is to ensure connectivity from all nodes on cluster
nodeList := framework.GetReadySchedulableNodesOrDie(cs)
gomega.Expect(nodeList).NotTo(gomega.BeNil())
if len(nodeList.Items) < 1 {
framework.Failf("Expect at least 1 node, got %v", len(nodeList.Items))
}
nodeList, err := e2enode.GetReadySchedulableNodes(cs)
framework.ExpectNoError(err)
replicas := int32(len(nodeList.Items))

View File

@ -188,10 +188,8 @@ var _ = SIGDescribe("Firewall rule", func() {
})
ginkgo.It("should have correct firewall rules for e2e cluster", func() {
nodes := framework.GetReadySchedulableNodesOrDie(cs)
if len(nodes.Items) <= 0 {
framework.Failf("Expect at least 1 node, got: %v", len(nodes.Items))
}
nodes, err := e2enode.GetReadySchedulableNodes(cs)
framework.ExpectNoError(err)
ginkgo.By("Checking if e2e firewall rules are correct")
for _, expFw := range gce.GetE2eFirewalls(cloudConfig.MasterName, cloudConfig.MasterTag, cloudConfig.NodeTag, cloudConfig.Network, cloudConfig.ClusterIPRange) {

View File

@ -19,12 +19,12 @@ package network
// Tests network performance using iperf or other containers.
import (
"fmt"
"math"
"time"
"github.com/onsi/ginkgo"
v1 "k8s.io/api/core/v1"
"k8s.io/kubernetes/test/e2e/framework"
e2enode "k8s.io/kubernetes/test/e2e/framework/node"
imageutils "k8s.io/kubernetes/test/utils/image"
)
@ -53,13 +53,13 @@ func networkingIPerfTest(isIPv6 bool) {
}
ginkgo.It(fmt.Sprintf("should transfer ~ 1GB onto the service endpoint %v servers (maximum of %v clients)", numServer, numClient), func() {
nodes := framework.GetReadySchedulableNodesOrDie(f.ClientSet)
nodes, err := e2enode.GetReadySchedulableNodes(f.ClientSet)
framework.ExpectNoError(err)
totalPods := len(nodes.Items)
// for a single service, we expect to divide bandwidth between the network. Very crude estimate.
expectedBandwidth := int(float64(maxBandwidthBits) / float64(totalPods))
framework.ExpectNotEqual(totalPods, 0)
appName := "iperf-e2e"
_, err := f.CreateServiceForSimpleAppWithPods(
_, err = f.CreateServiceForSimpleAppWithPods(
8001,
8002,
appName,
@ -108,16 +108,14 @@ func networkingIPerfTest(isIPv6 bool) {
},
numClient,
)
expectedCli := numClient
if len(nodes.Items) < expectedCli {
expectedCli = len(nodes.Items)
}
framework.Logf("Reading all perf results to stdout.")
framework.Logf("date,cli,cliPort,server,serverPort,id,interval,transferBits,bandwidthBits")
// Calculate expected number of clients based on total nodes.
expectedCli := func() int {
nodes := framework.GetReadySchedulableNodesOrDie(f.ClientSet)
return int(math.Min(float64(len(nodes.Items)), float64(numClient)))
}()
// Extra 1/10 second per client.
iperfTimeout := smallClusterTimeout + (time.Duration(expectedCli/10) * time.Second)
iperfResults := &IPerfResults{}

View File

@ -566,7 +566,9 @@ var _ = SIGDescribe("Services", func() {
loadBalancerLagTimeout = e2eservice.LoadBalancerLagTimeoutAWS
}
loadBalancerCreateTimeout := e2eservice.LoadBalancerCreateTimeoutDefault
if nodes := framework.GetReadySchedulableNodesOrDie(cs); len(nodes.Items) > e2eservice.LargeClusterMinNodesNumber {
nodes, err := e2enode.GetReadySchedulableNodes(cs)
framework.ExpectNoError(err)
if len(nodes.Items) > e2eservice.LargeClusterMinNodesNumber {
loadBalancerCreateTimeout = e2eservice.LoadBalancerCreateTimeoutLarge
}
@ -1522,7 +1524,9 @@ var _ = SIGDescribe("Services", func() {
loadBalancerLagTimeout = e2eservice.LoadBalancerLagTimeoutAWS
}
loadBalancerCreateTimeout := e2eservice.LoadBalancerCreateTimeoutDefault
if nodes := framework.GetReadySchedulableNodesOrDie(cs); len(nodes.Items) > e2eservice.LargeClusterMinNodesNumber {
nodes, err := e2enode.GetReadySchedulableNodes(cs)
framework.ExpectNoError(err)
if len(nodes.Items) > e2eservice.LargeClusterMinNodesNumber {
loadBalancerCreateTimeout = e2eservice.LoadBalancerCreateTimeoutLarge
}
@ -1540,7 +1544,6 @@ var _ = SIGDescribe("Services", func() {
// This container is an nginx container listening on port 80
// See kubernetes/contrib/ingress/echoheaders/nginx.conf for content of response
jig.RunOrFail(namespace, nil)
var err error
// Make sure acceptPod is running. There are certain chances that pod might be teminated due to unexpected reasons.
acceptPod, err = cs.CoreV1().Pods(namespace).Get(acceptPod.Name, metav1.GetOptions{})
framework.ExpectNoError(err, "Unable to get pod %s", acceptPod.Name)
@ -1598,7 +1601,9 @@ var _ = SIGDescribe("Services", func() {
framework.SkipUnlessProviderIs("azure", "gke", "gce")
createTimeout := e2eservice.LoadBalancerCreateTimeoutDefault
if nodes := framework.GetReadySchedulableNodesOrDie(cs); len(nodes.Items) > e2eservice.LargeClusterMinNodesNumber {
nodes, err := e2enode.GetReadySchedulableNodes(cs)
framework.ExpectNoError(err)
if len(nodes.Items) > e2eservice.LargeClusterMinNodesNumber {
createTimeout = e2eservice.LoadBalancerCreateTimeoutLarge
}
@ -2076,7 +2081,9 @@ var _ = SIGDescribe("ESIPP [Slow] [DisabledForLargeClusters]", func() {
framework.SkipUnlessProviderIs("gce", "gke")
cs = f.ClientSet
if nodes := framework.GetReadySchedulableNodesOrDie(cs); len(nodes.Items) > e2eservice.LargeClusterMinNodesNumber {
nodes, err := e2enode.GetReadySchedulableNodes(cs)
framework.ExpectNoError(err)
if len(nodes.Items) > e2eservice.LargeClusterMinNodesNumber {
loadBalancerCreateTimeout = e2eservice.LoadBalancerCreateTimeoutLarge
}
})
@ -2450,7 +2457,8 @@ func execAffinityTestForNonLBServiceWithOptionalTransition(f *framework.Framewor
framework.ExpectNoError(err, "failed to fetch service: %s in namespace: %s", serviceName, ns)
var svcIP string
if serviceType == v1.ServiceTypeNodePort {
nodes := framework.GetReadySchedulableNodesOrDie(cs)
nodes, err := e2enode.GetReadySchedulableNodes(cs)
framework.ExpectNoError(err)
addrs := e2enode.CollectAddresses(nodes, v1.NodeInternalIP)
gomega.Expect(len(addrs)).To(gomega.BeNumerically(">", 0), "ginkgo.Failed to get Node internal IP")
svcIP = addrs[0]

View File

@ -27,6 +27,7 @@ import (
kubeletstatsv1alpha1 "k8s.io/kubernetes/pkg/kubelet/apis/stats/v1alpha1"
"k8s.io/kubernetes/test/e2e/framework"
e2ekubelet "k8s.io/kubernetes/test/e2e/framework/kubelet"
e2enode "k8s.io/kubernetes/test/e2e/framework/node"
e2eperf "k8s.io/kubernetes/test/e2e/framework/perf"
"k8s.io/kubernetes/test/e2e/perftype"
testutils "k8s.io/kubernetes/test/utils"
@ -200,7 +201,8 @@ var _ = SIGDescribe("Kubelet [Serial] [Slow]", func() {
var rm *e2ekubelet.ResourceMonitor
ginkgo.BeforeEach(func() {
nodes := framework.GetReadySchedulableNodesOrDie(f.ClientSet)
nodes, err := e2enode.GetReadySchedulableNodes(f.ClientSet)
framework.ExpectNoError(err)
nodeNames = sets.NewString()
for _, node := range nodes.Items {
nodeNames.Insert(node.Name)

View File

@ -58,8 +58,8 @@ var _ = SIGDescribe("NodeProblemDetector [DisabledForLargeClusters]", func() {
framework.SkipUnlessSSHKeyPresent()
ginkgo.By("Getting all nodes and their SSH-able IP addresses")
nodes := framework.GetReadySchedulableNodesOrDie(f.ClientSet)
framework.ExpectNotEqual(len(nodes.Items), 0)
nodes, err := e2enode.GetReadySchedulableNodes(f.ClientSet)
framework.ExpectNoError(err)
hosts := []string{}
for _, node := range nodes.Items {
for _, addr := range node.Status.Addresses {

View File

@ -95,7 +95,7 @@ var _ = SIGDescribe("SchedulerPredicates [Serial]", func() {
if err != nil {
framework.Logf("Unexpected error occurred: %v", err)
}
nodeList, err = e2enode.GetReadySchedulableNodesOrDie(cs)
nodeList, err = e2enode.GetReadySchedulableNodes(cs)
if err != nil {
framework.Logf("Unexpected error occurred: %v", err)
}

View File

@ -37,6 +37,7 @@ go_library(
"//test/e2e/common:go_default_library",
"//test/e2e/framework:go_default_library",
"//test/e2e/framework/job:go_default_library",
"//test/e2e/framework/node:go_default_library",
"//test/e2e/framework/service:go_default_library",
"//test/e2e/framework/statefulset:go_default_library",
"//test/e2e/framework/testfiles:go_default_library",

View File

@ -27,6 +27,7 @@ import (
"k8s.io/apimachinery/pkg/util/wait"
clientset "k8s.io/client-go/kubernetes"
"k8s.io/kubernetes/test/e2e/framework"
e2enode "k8s.io/kubernetes/test/e2e/framework/node"
"github.com/onsi/ginkgo"
)
@ -118,7 +119,13 @@ func waitForKubeProxyStaticPodsRunning(c clientset.Interface) error {
return false, nil
}
numberSchedulableNodes := len(framework.GetReadySchedulableNodesOrDie(c).Items)
nodes, err := e2enode.GetReadySchedulableNodes(c)
if err != nil {
framework.Logf("Failed to get nodes: %v", err)
return false, nil
}
numberSchedulableNodes := len(nodes.Items)
numberkubeProxyPods := 0
for _, pod := range pods.Items {
if pod.Status.Phase == v1.PodRunning {
@ -176,7 +183,13 @@ func waitForKubeProxyDaemonSetRunning(c clientset.Interface) error {
return false, nil
}
numberSchedulableNodes := len(framework.GetReadySchedulableNodesOrDie(c).Items)
nodes, err := e2enode.GetReadySchedulableNodes(c)
if err != nil {
framework.Logf("Failed to get nodes: %v", err)
return false, nil
}
numberSchedulableNodes := len(nodes.Items)
numberkubeProxyPods := int(daemonSets.Items[0].Status.NumberAvailable)
if numberkubeProxyPods != numberSchedulableNodes {
framework.Logf("Expect %v kube-proxy DaemonSet pods running, got %v", numberSchedulableNodes, numberkubeProxyPods)

View File

@ -47,6 +47,7 @@ go_library(
"//test/e2e/framework:go_default_library",
"//test/e2e/framework/gpu:go_default_library",
"//test/e2e/framework/metrics:go_default_library",
"//test/e2e/framework/node:go_default_library",
"//test/utils/image:go_default_library",
"//vendor/github.com/blang/semver:go_default_library",
"//vendor/github.com/coreos/go-systemd/util:go_default_library",

View File

@ -33,6 +33,7 @@ import (
"k8s.io/kubernetes/pkg/kubelet/cm/cpuset"
"k8s.io/kubernetes/pkg/kubelet/types"
"k8s.io/kubernetes/test/e2e/framework"
e2enode "k8s.io/kubernetes/test/e2e/framework/node"
"github.com/onsi/ginkgo"
"github.com/onsi/gomega"
@ -179,8 +180,9 @@ func disableCPUManagerInKubelet(f *framework.Framework) (oldCfg *kubeletconfig.K
// Wait for the Kubelet to be ready.
gomega.Eventually(func() bool {
nodeList := framework.GetReadySchedulableNodesOrDie(f.ClientSet)
return len(nodeList.Items) == 1
nodes, err := e2enode.TotalReady(f.ClientSet)
framework.ExpectNoError(err)
return nodes == 1
}, time.Minute, time.Second).Should(gomega.BeTrue())
return oldCfg
@ -231,8 +233,9 @@ func enableCPUManagerInKubelet(f *framework.Framework, cleanStateFile bool) (old
// Wait for the Kubelet to be ready.
gomega.Eventually(func() bool {
nodeList := framework.GetReadySchedulableNodesOrDie(f.ClientSet)
return len(nodeList.Items) == 1
nodes, err := e2enode.TotalReady(f.ClientSet)
framework.ExpectNoError(err)
return nodes == 1
}, time.Minute, time.Second).Should(gomega.BeTrue())
return oldCfg

View File

@ -24,6 +24,7 @@ import (
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
kubeletconfig "k8s.io/kubernetes/pkg/kubelet/apis/config"
"k8s.io/kubernetes/test/e2e/framework"
e2enode "k8s.io/kubernetes/test/e2e/framework/node"
e2epod "k8s.io/kubernetes/test/e2e/framework/pod"
"k8s.io/kubernetes/test/e2e_node/perf/workloads"
@ -48,8 +49,9 @@ func setKubeletConfig(f *framework.Framework, cfg *kubeletconfig.KubeletConfigur
// Wait for the Kubelet to be ready.
gomega.Eventually(func() bool {
nodeList := framework.GetReadySchedulableNodesOrDie(f.ClientSet)
return len(nodeList.Items) == 1
nodes, err := e2enode.TotalReady(f.ClientSet)
framework.ExpectNoError(err)
return nodes == 1
}, time.Minute, time.Second).Should(gomega.BeTrue())
}

View File

@ -51,7 +51,7 @@ import (
"k8s.io/kubernetes/pkg/kubelet/util"
"k8s.io/kubernetes/test/e2e/framework"
e2emetrics "k8s.io/kubernetes/test/e2e/framework/metrics"
frameworkmetrics "k8s.io/kubernetes/test/e2e/framework/metrics"
e2enode "k8s.io/kubernetes/test/e2e/framework/node"
imageutils "k8s.io/kubernetes/test/utils/image"
"github.com/onsi/ginkgo"
@ -345,7 +345,8 @@ func logNodeEvents(f *framework.Framework) {
}
func getLocalNode(f *framework.Framework) *v1.Node {
nodeList := framework.GetReadySchedulableNodesOrDie(f.ClientSet)
nodeList, err := e2enode.GetReadySchedulableNodes(f.ClientSet)
framework.ExpectNoError(err)
framework.ExpectEqual(len(nodeList.Items), 1, "Unexpected number of node objects for node e2e. Expects only one node.")
return &nodeList.Items[0]
}
@ -358,7 +359,7 @@ func logKubeletLatencyMetrics(metricNames ...string) {
for _, key := range metricNames {
metricSet.Insert(kubeletmetrics.KubeletSubsystem + "_" + key)
}
metric, err := frameworkmetrics.GrabKubeletMetricsWithoutProxy(framework.TestContext.NodeName+":10255", "/metrics")
metric, err := e2emetrics.GrabKubeletMetricsWithoutProxy(framework.TestContext.NodeName+":10255", "/metrics")
if err != nil {
framework.Logf("Error getting kubelet metrics: %v", err)
} else {
@ -367,14 +368,14 @@ func logKubeletLatencyMetrics(metricNames ...string) {
}
// returns config related metrics from the local kubelet, filtered to the filterMetricNames passed in
func getKubeletMetrics(filterMetricNames sets.String) (frameworkmetrics.KubeletMetrics, error) {
func getKubeletMetrics(filterMetricNames sets.String) (e2emetrics.KubeletMetrics, error) {
// grab Kubelet metrics
ms, err := frameworkmetrics.GrabKubeletMetricsWithoutProxy(framework.TestContext.NodeName+":10255", "/metrics")
ms, err := e2emetrics.GrabKubeletMetricsWithoutProxy(framework.TestContext.NodeName+":10255", "/metrics")
if err != nil {
return nil, err
}
filtered := frameworkmetrics.NewKubeletMetrics()
filtered := e2emetrics.NewKubeletMetrics()
for name := range ms {
if !filterMetricNames.Has(name) {
continue

View File

@ -67,7 +67,7 @@ go_library(
"//staging/src/k8s.io/client-go/rest:go_default_library",
"//staging/src/k8s.io/client-go/util/cert:go_default_library",
"//staging/src/k8s.io/component-base/version:go_default_library",
"//test/e2e/framework:go_default_library",
"//test/e2e/framework/node:go_default_library",
"//test/utils:go_default_library",
"//vendor/github.com/coreos/etcd/clientv3:go_default_library",
"//vendor/github.com/go-openapi/spec:go_default_library",

View File

@ -21,7 +21,7 @@ import (
"k8s.io/apimachinery/pkg/api/resource"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
clientset "k8s.io/client-go/kubernetes"
e2eframework "k8s.io/kubernetes/test/e2e/framework"
e2enode "k8s.io/kubernetes/test/e2e/framework/node"
testutils "k8s.io/kubernetes/test/utils"
"k8s.io/klog"
@ -84,7 +84,10 @@ func (p *IntegrationTestNodePreparer) PrepareNodes() error {
}
}
nodes := e2eframework.GetReadySchedulableNodesOrDie(p.client)
nodes, err := e2enode.GetReadySchedulableNodes(p.client)
if err != nil {
klog.Fatalf("Error listing nodes: %v", err)
}
index := 0
sum := 0
for _, v := range p.countToStrategy {
@ -101,7 +104,10 @@ func (p *IntegrationTestNodePreparer) PrepareNodes() error {
// CleanupNodes deletes existing test nodes.
func (p *IntegrationTestNodePreparer) CleanupNodes() error {
nodes := e2eframework.GetReadySchedulableNodesOrDie(p.client)
nodes, err := e2enode.GetReadySchedulableNodes(p.client)
if err != nil {
klog.Fatalf("Error listing nodes: %v", err)
}
for i := range nodes.Items {
if err := p.client.CoreV1().Nodes().Delete(nodes.Items[i].Name, &metav1.DeleteOptions{}); err != nil {
klog.Errorf("Error while deleting Node: %v", err)