Merge pull request #78282 from jiatongw/e2e/framework/utilNode

Move node related methods to framework/node package
This commit is contained in:
Kubernetes Prow Robot 2019-06-17 18:20:14 -07:00 committed by GitHub
commit eaf89cfbb4
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
53 changed files with 721 additions and 527 deletions

View File

@ -65,6 +65,7 @@ go_library(
"//test/e2e/framework/deployment:go_default_library",
"//test/e2e/framework/job:go_default_library",
"//test/e2e/framework/log:go_default_library",
"//test/e2e/framework/node:go_default_library",
"//test/e2e/framework/pod:go_default_library",
"//test/e2e/framework/replicaset:go_default_library",
"//test/e2e/framework/ssh:go_default_library",

View File

@ -38,6 +38,7 @@ import (
"k8s.io/kubernetes/test/e2e/framework"
jobutil "k8s.io/kubernetes/test/e2e/framework/job"
e2elog "k8s.io/kubernetes/test/e2e/framework/log"
e2enode "k8s.io/kubernetes/test/e2e/framework/node"
e2epod "k8s.io/kubernetes/test/e2e/framework/pod"
testutils "k8s.io/kubernetes/test/utils"
@ -59,7 +60,7 @@ func expectNodeReadiness(isReady bool, newNode chan *v1.Node) {
for !expected && !timeout {
select {
case n := <-newNode:
if framework.IsNodeConditionSetAsExpected(n, v1.NodeReady, isReady) {
if e2enode.IsConditionSetAsExpected(n, v1.NodeReady, isReady) {
expected = true
} else {
e2elog.Logf("Observed node ready status is NOT %v as expected", isReady)
@ -142,8 +143,8 @@ var _ = SIGDescribe("Network Partition [Disruptive] [Slow]", func() {
nodeOpts := metav1.ListOptions{}
nodes, err := c.CoreV1().Nodes().List(nodeOpts)
framework.ExpectNoError(err)
framework.FilterNodes(nodes, func(node v1.Node) bool {
if !framework.IsNodeConditionSetAsExpected(&node, v1.NodeReady, true) {
e2enode.Filter(nodes, func(node v1.Node) bool {
if !e2enode.IsConditionSetAsExpected(&node, v1.NodeReady, true) {
return false
}
podOpts = metav1.ListOptions{FieldSelector: fields.OneTermEqualSelector(api.PodHostField, node.Name).String()}
@ -199,7 +200,7 @@ var _ = SIGDescribe("Network Partition [Disruptive] [Slow]", func() {
go controller.Run(stopCh)
ginkgo.By(fmt.Sprintf("Block traffic from node %s to the master", node.Name))
host, err := framework.GetNodeExternalIP(&node)
host, err := e2enode.GetExternalIP(&node)
framework.ExpectNoError(err)
masterAddresses := framework.GetAllMasterAddresses(c)
defer func() {
@ -240,7 +241,7 @@ var _ = SIGDescribe("Network Partition [Disruptive] [Slow]", func() {
// The source for the Docker container kubernetes/serve_hostname is in contrib/for-demos/serve_hostname
name := "my-hostname-net"
common.NewSVCByName(c, ns, name)
numNodes, err := framework.NumberOfRegisteredNodes(f.ClientSet)
numNodes, err := e2enode.TotalRegistered(f.ClientSet)
framework.ExpectNoError(err)
replicas := int32(numNodes)
common.NewRCByName(c, ns, name, replicas, nil)
@ -274,7 +275,7 @@ var _ = SIGDescribe("Network Partition [Disruptive] [Slow]", func() {
})
e2elog.Logf("Waiting %v for node %s to be ready once temporary network failure ends", resizeNodeReadyTimeout, node.Name)
if !framework.WaitForNodeToBeReady(c, node.Name, resizeNodeReadyTimeout) {
if !e2enode.WaitForNodeToBeReady(c, node.Name, resizeNodeReadyTimeout) {
framework.Failf("Node %s did not become ready within %v", node.Name, resizeNodeReadyTimeout)
}
@ -307,7 +308,7 @@ var _ = SIGDescribe("Network Partition [Disruptive] [Slow]", func() {
gracePeriod := int64(30)
common.NewSVCByName(c, ns, name)
numNodes, err := framework.NumberOfRegisteredNodes(f.ClientSet)
numNodes, err := e2enode.TotalRegistered(f.ClientSet)
framework.ExpectNoError(err)
replicas := int32(numNodes)
common.NewRCByName(c, ns, name, replicas, &gracePeriod)
@ -341,7 +342,7 @@ var _ = SIGDescribe("Network Partition [Disruptive] [Slow]", func() {
})
e2elog.Logf("Waiting %v for node %s to be ready once temporary network failure ends", resizeNodeReadyTimeout, node.Name)
if !framework.WaitForNodeToBeReady(c, node.Name, resizeNodeReadyTimeout) {
if !e2enode.WaitForNodeToBeReady(c, node.Name, resizeNodeReadyTimeout) {
framework.Failf("Node %s did not become ready within %v", node.Name, resizeNodeReadyTimeout)
}
})
@ -382,9 +383,9 @@ var _ = SIGDescribe("Network Partition [Disruptive] [Slow]", func() {
pst := framework.NewStatefulSetTester(c)
nn, err := framework.NumberOfRegisteredNodes(f.ClientSet)
nn, err := e2enode.TotalRegistered(f.ClientSet)
framework.ExpectNoError(err)
nodes, err := framework.CheckNodesReady(f.ClientSet, nn, framework.NodeReadyInitialTimeout)
nodes, err := e2enode.CheckReady(f.ClientSet, nn, framework.NodeReadyInitialTimeout)
framework.ExpectNoError(err)
common.RestartNodes(f.ClientSet, nodes)
@ -414,7 +415,7 @@ var _ = SIGDescribe("Network Partition [Disruptive] [Slow]", func() {
})
e2elog.Logf("Waiting %v for node %s to be ready once temporary network failure ends", resizeNodeReadyTimeout, node.Name)
if !framework.WaitForNodeToBeReady(c, node.Name, resizeNodeReadyTimeout) {
if !e2enode.WaitForNodeToBeReady(c, node.Name, resizeNodeReadyTimeout) {
framework.Failf("Node %s did not become ready within %v", node.Name, resizeNodeReadyTimeout)
}
@ -462,7 +463,7 @@ var _ = SIGDescribe("Network Partition [Disruptive] [Slow]", func() {
})
e2elog.Logf("Waiting %v for node %s to be ready once temporary network failure ends", resizeNodeReadyTimeout, node.Name)
if !framework.WaitForNodeToBeReady(c, node.Name, resizeNodeReadyTimeout) {
if !e2enode.WaitForNodeToBeReady(c, node.Name, resizeNodeReadyTimeout) {
framework.Failf("Node %s did not become ready within %v", node.Name, resizeNodeReadyTimeout)
}
})
@ -485,8 +486,8 @@ var _ = SIGDescribe("Network Partition [Disruptive] [Slow]", func() {
ginkgo.By("choose a node - we will block all network traffic on this node")
var podOpts metav1.ListOptions
nodes := framework.GetReadySchedulableNodesOrDie(c)
framework.FilterNodes(nodes, func(node v1.Node) bool {
if !framework.IsNodeConditionSetAsExpected(&node, v1.NodeReady, true) {
e2enode.Filter(nodes, func(node v1.Node) bool {
if !e2enode.IsConditionSetAsExpected(&node, v1.NodeReady, true) {
return false
}
podOpts = metav1.ListOptions{FieldSelector: fields.OneTermEqualSelector(api.PodHostField, node.Name).String()}
@ -581,7 +582,7 @@ var _ = SIGDescribe("Network Partition [Disruptive] [Slow]", func() {
go controller.Run(stopCh)
ginkgo.By(fmt.Sprintf("Block traffic from node %s to the master", node.Name))
host, err := framework.GetNodeExternalIP(&node)
host, err := e2enode.GetExternalIP(&node)
framework.ExpectNoError(err)
masterAddresses := framework.GetAllMasterAddresses(c)
defer func() {

View File

@ -57,6 +57,7 @@ go_library(
"//test/e2e/framework/deployment:go_default_library",
"//test/e2e/framework/job:go_default_library",
"//test/e2e/framework/log:go_default_library",
"//test/e2e/framework/node:go_default_library",
"//test/e2e/framework/pod:go_default_library",
"//test/utils:go_default_library",
"//test/utils/image:go_default_library",

View File

@ -27,6 +27,7 @@ import (
"github.com/onsi/ginkgo"
"github.com/onsi/gomega"
e2enode "k8s.io/kubernetes/test/e2e/framework/node"
)
var _ = SIGDescribe("[Feature:NodeAuthenticator]", func() {
@ -42,9 +43,9 @@ var _ = SIGDescribe("[Feature:NodeAuthenticator]", func() {
gomega.Expect(len(nodeList.Items)).NotTo(gomega.BeZero())
pickedNode := nodeList.Items[0]
nodeIPs = framework.GetNodeAddresses(&pickedNode, v1.NodeExternalIP)
nodeIPs = e2enode.GetAddresses(&pickedNode, v1.NodeExternalIP)
// The pods running in the cluster can see the internal addresses.
nodeIPs = append(nodeIPs, framework.GetNodeAddresses(&pickedNode, v1.NodeInternalIP)...)
nodeIPs = append(nodeIPs, e2enode.GetAddresses(&pickedNode, v1.NodeInternalIP)...)
// make sure ServiceAccount admission controller is enabled, so secret generation on SA creation works
saName := "default"

View File

@ -41,6 +41,7 @@ go_library(
"//test/e2e/common:go_default_library",
"//test/e2e/framework:go_default_library",
"//test/e2e/framework/log:go_default_library",
"//test/e2e/framework/node:go_default_library",
"//test/e2e/framework/pod:go_default_library",
"//test/e2e/instrumentation/monitoring:go_default_library",
"//test/e2e/scheduling:go_default_library",

View File

@ -24,6 +24,7 @@ import (
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/kubernetes/test/e2e/common"
"k8s.io/kubernetes/test/e2e/framework"
e2enode "k8s.io/kubernetes/test/e2e/framework/node"
"github.com/onsi/ginkgo"
"github.com/onsi/gomega"
@ -73,7 +74,7 @@ var _ = SIGDescribe("[Feature:ClusterSizeAutoscalingScaleUp] [Slow] Autoscaling"
if len(nodeGroupName) > 0 {
// Scale down back to only 'nodesNum' nodes, as expected at the start of the test.
framework.ExpectNoError(framework.ResizeGroup(nodeGroupName, nodesNum))
framework.ExpectNoError(framework.WaitForReadyNodes(f.ClientSet, nodesNum, 15*time.Minute))
framework.ExpectNoError(e2enode.WaitForReadyNodes(f.ClientSet, nodesNum, 15*time.Minute))
}
})

View File

@ -30,6 +30,7 @@ import (
"k8s.io/apimachinery/pkg/util/strategicpatch"
clientset "k8s.io/client-go/kubernetes"
"k8s.io/kubernetes/test/e2e/framework"
e2enode "k8s.io/kubernetes/test/e2e/framework/node"
testutils "k8s.io/kubernetes/test/utils"
imageutils "k8s.io/kubernetes/test/utils/image"
@ -87,7 +88,7 @@ var _ = framework.KubeDescribe("Cluster size autoscaler scalability [Slow]", fun
}
}
framework.ExpectNoError(framework.WaitForReadyNodes(c, sum, scaleUpTimeout))
framework.ExpectNoError(e2enode.WaitForReadyNodes(c, sum, scaleUpTimeout))
nodes := framework.GetReadySchedulableNodesOrDie(f.ClientSet)
nodeCount = len(nodes.Items)
@ -112,7 +113,7 @@ var _ = framework.KubeDescribe("Cluster size autoscaler scalability [Slow]", fun
ginkgo.AfterEach(func() {
ginkgo.By(fmt.Sprintf("Restoring initial size of the cluster"))
setMigSizes(originalSizes)
framework.ExpectNoError(framework.WaitForReadyNodes(c, nodeCount, scaleDownTimeout))
framework.ExpectNoError(e2enode.WaitForReadyNodes(c, nodeCount, scaleDownTimeout))
nodes, err := c.CoreV1().Nodes().List(metav1.ListOptions{})
framework.ExpectNoError(err)
s := time.Now()
@ -214,7 +215,7 @@ var _ = framework.KubeDescribe("Cluster size autoscaler scalability [Slow]", fun
anyKey(originalSizes): totalNodes,
}
setMigSizes(newSizes)
framework.ExpectNoError(framework.WaitForReadyNodes(f.ClientSet, totalNodes, largeResizeTimeout))
framework.ExpectNoError(e2enode.WaitForReadyNodes(f.ClientSet, totalNodes, largeResizeTimeout))
// run replicas
rcConfig := reserveMemoryRCConfig(f, "some-pod", replicas, replicas*perNodeReservation, largeScaleUpTimeout)
@ -248,7 +249,7 @@ var _ = framework.KubeDescribe("Cluster size autoscaler scalability [Slow]", fun
}
setMigSizes(newSizes)
framework.ExpectNoError(framework.WaitForReadyNodes(f.ClientSet, totalNodes, largeResizeTimeout))
framework.ExpectNoError(e2enode.WaitForReadyNodes(f.ClientSet, totalNodes, largeResizeTimeout))
// annotate all nodes with no-scale-down
ScaleDownDisabledKey := "cluster-autoscaler.kubernetes.io/scale-down-disabled"
@ -302,7 +303,7 @@ var _ = framework.KubeDescribe("Cluster size autoscaler scalability [Slow]", fun
anyKey(originalSizes): totalNodes,
}
setMigSizes(newSizes)
framework.ExpectNoError(framework.WaitForReadyNodes(f.ClientSet, totalNodes, largeResizeTimeout))
framework.ExpectNoError(e2enode.WaitForReadyNodes(f.ClientSet, totalNodes, largeResizeTimeout))
divider := int(float64(totalNodes) * 0.7)
fullNodesCount := divider
underutilizedNodesCount := totalNodes - fullNodesCount
@ -348,7 +349,7 @@ var _ = framework.KubeDescribe("Cluster size autoscaler scalability [Slow]", fun
defer framework.DeleteRCAndWaitForGC(f.ClientSet, f.Namespace.Name, podsConfig.Name)
// Ensure that no new nodes have been added so far.
gomega.Expect(framework.NumberOfReadyNodes(f.ClientSet)).To(gomega.Equal(nodeCount))
gomega.Expect(e2enode.TotalReady(f.ClientSet)).To(gomega.Equal(nodeCount))
// Start a number of schedulable pods to ensure CA reacts.
additionalNodes := maxNodes - nodeCount
@ -385,7 +386,7 @@ func simpleScaleUpTestWithTolerance(f *framework.Framework, config *scaleUpTestC
framework.ExpectNoError(WaitForClusterSizeFunc(f.ClientSet,
func(size int) bool { return size >= minExpectedNodeCount }, scaleUpTimeout))
} else {
framework.ExpectNoError(framework.WaitForReadyNodes(f.ClientSet, config.expectedResult.nodes, scaleUpTimeout))
framework.ExpectNoError(e2enode.WaitForReadyNodes(f.ClientSet, config.expectedResult.nodes, scaleUpTimeout))
}
klog.Infof("cluster is increased")
if tolerateMissingPodCount > 0 {

View File

@ -44,6 +44,7 @@ import (
api "k8s.io/kubernetes/pkg/apis/core"
"k8s.io/kubernetes/test/e2e/framework"
e2elog "k8s.io/kubernetes/test/e2e/framework/log"
e2enode "k8s.io/kubernetes/test/e2e/framework/node"
"k8s.io/kubernetes/test/e2e/scheduling"
testutils "k8s.io/kubernetes/test/utils"
imageutils "k8s.io/kubernetes/test/utils/image"
@ -108,7 +109,7 @@ var _ = SIGDescribe("Cluster size autoscaling [Slow]", func() {
sum += size
}
// Give instances time to spin up
framework.ExpectNoError(framework.WaitForReadyNodes(c, sum, scaleUpTimeout))
framework.ExpectNoError(e2enode.WaitForReadyNodes(c, sum, scaleUpTimeout))
nodes := framework.GetReadySchedulableNodesOrDie(f.ClientSet)
nodeCount = len(nodes.Items)
@ -142,7 +143,7 @@ var _ = SIGDescribe("Cluster size autoscaling [Slow]", func() {
for _, size := range originalSizes {
expectedNodes += size
}
framework.ExpectNoError(framework.WaitForReadyNodes(c, expectedNodes, scaleDownTimeout))
framework.ExpectNoError(e2enode.WaitForReadyNodes(c, expectedNodes, scaleDownTimeout))
nodes, err := c.CoreV1().Nodes().List(metav1.ListOptions{})
framework.ExpectNoError(err)
@ -373,7 +374,7 @@ var _ = SIGDescribe("Cluster size autoscaling [Slow]", func() {
addNodePool(extraPoolName, "n1-standard-4", 1)
defer deleteNodePool(extraPoolName)
extraNodes := getPoolInitialSize(extraPoolName)
framework.ExpectNoError(framework.WaitForReadyNodes(c, nodeCount+extraNodes, resizeTimeout))
framework.ExpectNoError(e2enode.WaitForReadyNodes(c, nodeCount+extraNodes, resizeTimeout))
// We wait for nodes to become schedulable to make sure the new nodes
// will be returned by getPoolNodes below.
framework.ExpectNoError(framework.WaitForAllNodesSchedulable(c, resizeTimeout))
@ -407,7 +408,7 @@ var _ = SIGDescribe("Cluster size autoscaling [Slow]", func() {
addNodePool(extraPoolName, "n1-standard-4", 1)
defer deleteNodePool(extraPoolName)
extraNodes := getPoolInitialSize(extraPoolName)
framework.ExpectNoError(framework.WaitForReadyNodes(c, nodeCount+extraNodes, resizeTimeout))
framework.ExpectNoError(e2enode.WaitForReadyNodes(c, nodeCount+extraNodes, resizeTimeout))
framework.ExpectNoError(enableAutoscaler(extraPoolName, 1, 2))
framework.ExpectNoError(disableAutoscaler(extraPoolName, 1, 2))
})
@ -437,7 +438,7 @@ var _ = SIGDescribe("Cluster size autoscaling [Slow]", func() {
defer framework.DeleteRCAndWaitForGC(f.ClientSet, f.Namespace.Name, "extra-pod")
framework.ExpectNoError(waitForAllCaPodsReadyInNamespace(f, c))
framework.ExpectNoError(framework.WaitForReadyNodes(c, nodeCount+newPods, scaleUpTimeout))
framework.ExpectNoError(e2enode.WaitForReadyNodes(c, nodeCount+newPods, scaleUpTimeout))
})
ginkgo.It("should increase cluster size if pod requesting EmptyDir volume is pending [Feature:ClusterSizeAutoscalingScaleUp]", func() {
@ -458,7 +459,7 @@ var _ = SIGDescribe("Cluster size autoscaling [Slow]", func() {
defer framework.DeleteRCAndWaitForGC(f.ClientSet, f.Namespace.Name, "extra-pod")
framework.ExpectNoError(waitForAllCaPodsReadyInNamespace(f, c))
framework.ExpectNoError(framework.WaitForReadyNodes(c, nodeCount+newPods, scaleUpTimeout))
framework.ExpectNoError(e2enode.WaitForReadyNodes(c, nodeCount+newPods, scaleUpTimeout))
})
ginkgo.It("should increase cluster size if pod requesting volume is pending [Feature:ClusterSizeAutoscalingScaleUp]", func() {
@ -530,7 +531,7 @@ var _ = SIGDescribe("Cluster size autoscaling [Slow]", func() {
}()
framework.ExpectNoError(waitForAllCaPodsReadyInNamespace(f, c))
framework.ExpectNoError(framework.WaitForReadyNodes(c, nodeCount+newPods, scaleUpTimeout))
framework.ExpectNoError(e2enode.WaitForReadyNodes(c, nodeCount+newPods, scaleUpTimeout))
})
ginkgo.It("should add node to the particular mig [Feature:ClusterSizeAutoscalingScaleUp]", func() {
@ -641,7 +642,7 @@ var _ = SIGDescribe("Cluster size autoscaling [Slow]", func() {
addNodePool(extraPoolName, "n1-standard-4", 1)
defer deleteNodePool(extraPoolName)
extraNodes := getPoolInitialSize(extraPoolName)
framework.ExpectNoError(framework.WaitForReadyNodes(c, nodeCount+extraNodes, resizeTimeout))
framework.ExpectNoError(e2enode.WaitForReadyNodes(c, nodeCount+extraNodes, resizeTimeout))
framework.ExpectNoError(enableAutoscaler(extraPoolName, 1, 2))
defer disableAutoscaler(extraPoolName, 1, 2)
@ -655,7 +656,7 @@ var _ = SIGDescribe("Cluster size autoscaling [Slow]", func() {
// reseting all the timers in scale down code. Adding 5 extra minutes to workaround
// this issue.
// TODO: Remove the extra time when GKE restart is fixed.
framework.ExpectNoError(framework.WaitForReadyNodes(c, nodeCount+extraNodes+1, scaleUpTimeout+5*time.Minute))
framework.ExpectNoError(e2enode.WaitForReadyNodes(c, nodeCount+extraNodes+1, scaleUpTimeout+5*time.Minute))
})
simpleScaleDownTest := func(unready int) {
@ -766,7 +767,7 @@ var _ = SIGDescribe("Cluster size autoscaling [Slow]", func() {
}
}
framework.ExpectNoError(framework.ResizeGroup(minMig, int32(0)))
framework.ExpectNoError(framework.WaitForReadyNodes(c, nodeCount-minSize, resizeTimeout))
framework.ExpectNoError(e2enode.WaitForReadyNodes(c, nodeCount-minSize, resizeTimeout))
}
ginkgo.By("Make remaining nodes unschedulable")
@ -812,7 +813,7 @@ var _ = SIGDescribe("Cluster size autoscaling [Slow]", func() {
addNodePool(extraPoolName, "n1-standard-4", 1)
defer deleteNodePool(extraPoolName)
extraNodes := getPoolInitialSize(extraPoolName)
framework.ExpectNoError(framework.WaitForReadyNodes(c, nodeCount+extraNodes, resizeTimeout))
framework.ExpectNoError(e2enode.WaitForReadyNodes(c, nodeCount+extraNodes, resizeTimeout))
framework.ExpectNoError(enableAutoscaler(extraPoolName, 0, 1))
defer disableAutoscaler(extraPoolName, 0, 1)
@ -845,7 +846,7 @@ var _ = SIGDescribe("Cluster size autoscaling [Slow]", func() {
}
}
framework.ExpectNoError(framework.ResizeGroup(minMig, int32(1)))
framework.ExpectNoError(framework.WaitForReadyNodes(c, nodeCount-minSize+1, resizeTimeout))
framework.ExpectNoError(e2enode.WaitForReadyNodes(c, nodeCount-minSize+1, resizeTimeout))
ngNodes, err := framework.GetGroupNodes(minMig)
framework.ExpectNoError(err)
gomega.Expect(len(ngNodes) == 1).To(gomega.BeTrue())
@ -926,7 +927,7 @@ var _ = SIGDescribe("Cluster size autoscaling [Slow]", func() {
}
testFunction()
// Give nodes time to recover from network failure
framework.ExpectNoError(framework.WaitForReadyNodes(c, len(nodes.Items), nodesRecoverTimeout))
framework.ExpectNoError(e2enode.WaitForReadyNodes(c, len(nodes.Items), nodesRecoverTimeout))
})
ginkgo.It("shouldn't scale up when expendable pod is created [Feature:ClusterSizeAutoscalingScaleUp]", func() {
@ -1339,8 +1340,8 @@ func WaitForClusterSizeFuncWithUnready(c clientset.Interface, sizeFunc func(int)
numNodes := len(nodes.Items)
// Filter out not-ready nodes.
framework.FilterNodes(nodes, func(node v1.Node) bool {
return framework.IsNodeConditionSetAsExpected(&node, v1.NodeReady, true)
e2enode.Filter(nodes, func(node v1.Node) bool {
return e2enode.IsConditionSetAsExpected(&node, v1.NodeReady, true)
})
numReady := len(nodes.Items)

View File

@ -30,6 +30,7 @@ import (
clientset "k8s.io/client-go/kubernetes"
"k8s.io/kubernetes/test/e2e/framework"
e2elog "k8s.io/kubernetes/test/e2e/framework/log"
e2enode "k8s.io/kubernetes/test/e2e/framework/node"
e2epod "k8s.io/kubernetes/test/e2e/framework/pod"
"github.com/onsi/ginkgo"
@ -102,7 +103,7 @@ var _ = SIGDescribe("DNS horizontal autoscaling", func() {
// This test is separated because it is slow and need to run serially.
// Will take around 5 minutes to run on a 4 nodes cluster.
ginkgo.It("[Serial] [Slow] kube-dns-autoscaler should scale kube-dns pods when cluster size changed", func() {
numNodes, err := framework.NumberOfRegisteredNodes(c)
numNodes, err := e2enode.TotalRegistered(c)
framework.ExpectNoError(err)
ginkgo.By("Replace the dns autoscaling parameters with testing parameters")
@ -157,7 +158,7 @@ var _ = SIGDescribe("DNS horizontal autoscaling", func() {
ginkgo.By("Restoring cluster size")
setMigSizes(originalSizes)
err = framework.WaitForReadyNodes(c, numNodes, scaleDownTimeout)
err = e2enode.WaitForReadyNodes(c, numNodes, scaleDownTimeout)
framework.ExpectNoError(err)
ginkgo.By("Wait for kube-dns scaled to expected number")

View File

@ -14,6 +14,7 @@ go_library(
"//staging/src/k8s.io/client-go/kubernetes:go_default_library",
"//test/e2e/framework:go_default_library",
"//test/e2e/framework/log:go_default_library",
"//test/e2e/framework/node:go_default_library",
"//vendor/github.com/onsi/ginkgo:go_default_library",
"//vendor/github.com/onsi/gomega:go_default_library",
],

View File

@ -24,6 +24,7 @@ import (
clientset "k8s.io/client-go/kubernetes"
"k8s.io/kubernetes/test/e2e/framework"
e2elog "k8s.io/kubernetes/test/e2e/framework/log"
e2enode "k8s.io/kubernetes/test/e2e/framework/node"
"github.com/onsi/ginkgo"
"github.com/onsi/gomega"
@ -54,7 +55,7 @@ var _ = SIGDescribe("[Feature:CloudProvider][Disruptive] Nodes", func() {
framework.Failf("failed to delete node %q, err: %q", nodeToDelete.Name, err)
}
newNodes, err := framework.CheckNodesReady(c, len(origNodes.Items)-1, 5*time.Minute)
newNodes, err := e2enode.CheckReady(c, len(origNodes.Items)-1, 5*time.Minute)
gomega.Expect(err).To(gomega.BeNil())
gomega.Expect(len(newNodes)).To(gomega.Equal(len(origNodes.Items) - 1))

View File

@ -78,6 +78,7 @@ go_library(
"//test/e2e/framework:go_default_library",
"//test/e2e/framework/deployment:go_default_library",
"//test/e2e/framework/log:go_default_library",
"//test/e2e/framework/node:go_default_library",
"//test/e2e/framework/pod:go_default_library",
"//test/e2e/framework/replicaset:go_default_library",
"//test/e2e/framework/volume:go_default_library",

View File

@ -29,6 +29,7 @@ import (
clientset "k8s.io/client-go/kubernetes"
"k8s.io/kubernetes/test/e2e/framework"
e2elog "k8s.io/kubernetes/test/e2e/framework/log"
e2enode "k8s.io/kubernetes/test/e2e/framework/node"
testutils "k8s.io/kubernetes/test/utils"
"github.com/onsi/ginkgo"
@ -86,7 +87,7 @@ var _ = framework.KubeDescribe("NodeLease", func() {
ginkgo.It("the kubelet should report node status infrequently", func() {
ginkgo.By("wait until node is ready")
framework.WaitForNodeToBeReady(f.ClientSet, nodeName, 5*time.Minute)
e2enode.WaitForNodeToBeReady(f.ClientSet, nodeName, 5*time.Minute)
ginkgo.By("wait until there is node lease")
var err error

View File

@ -41,7 +41,6 @@ go_library(
"//pkg/apis/storage/v1/util:go_default_library",
"//pkg/client/conditions:go_default_library",
"//pkg/controller:go_default_library",
"//pkg/controller/nodelifecycle:go_default_library",
"//pkg/controller/service:go_default_library",
"//pkg/features:go_default_library",
"//pkg/kubelet/apis/config:go_default_library",
@ -108,6 +107,7 @@ go_library(
"//test/e2e/framework/ginkgowrapper:go_default_library",
"//test/e2e/framework/log:go_default_library",
"//test/e2e/framework/metrics:go_default_library",
"//test/e2e/framework/node:go_default_library",
"//test/e2e/framework/pod:go_default_library",
"//test/e2e/framework/ssh:go_default_library",
"//test/e2e/framework/testfiles:go_default_library",
@ -150,6 +150,7 @@ filegroup(
"//test/e2e/framework/lifecycle:all-srcs",
"//test/e2e/framework/log:all-srcs",
"//test/e2e/framework/metrics:all-srcs",
"//test/e2e/framework/node:all-srcs",
"//test/e2e/framework/pod:all-srcs",
"//test/e2e/framework/podlogs:all-srcs",
"//test/e2e/framework/providers/aws:all-srcs",

View File

@ -19,6 +19,7 @@ go_library(
"//staging/src/k8s.io/client-go/kubernetes:go_default_library",
"//test/e2e/framework:go_default_library",
"//test/e2e/framework/log:go_default_library",
"//test/e2e/framework/node:go_default_library",
"//test/e2e/framework/testfiles:go_default_library",
"//test/e2e/manifest:go_default_library",
"//test/utils:go_default_library",

View File

@ -50,6 +50,7 @@ import (
clientset "k8s.io/client-go/kubernetes"
"k8s.io/kubernetes/test/e2e/framework"
e2elog "k8s.io/kubernetes/test/e2e/framework/log"
e2enode "k8s.io/kubernetes/test/e2e/framework/node"
"k8s.io/kubernetes/test/e2e/framework/testfiles"
"k8s.io/kubernetes/test/e2e/manifest"
testutils "k8s.io/kubernetes/test/utils"
@ -744,7 +745,7 @@ func (j *TestJig) VerifyURL(route, host string, iterations int, interval time.Du
func (j *TestJig) pollServiceNodePort(ns, name string, port int) error {
// TODO: Curl all nodes?
u, err := framework.GetNodePortURL(j.Client, ns, name, port)
u, err := e2enode.GetPortURL(j.Client, ns, name, port)
if err != nil {
return err
}

View File

@ -40,6 +40,7 @@ import (
"k8s.io/kubernetes/pkg/master/ports"
e2elog "k8s.io/kubernetes/test/e2e/framework/log"
"k8s.io/kubernetes/test/e2e/framework/metrics"
e2enode "k8s.io/kubernetes/test/e2e/framework/node"
"github.com/prometheus/common/model"
)
@ -499,7 +500,7 @@ type usageDataPerContainer struct {
// GetKubeletHeapStats returns stats of kubelet heap.
func GetKubeletHeapStats(c clientset.Interface, nodeName string) (string, error) {
client, err := NodeProxyRequest(c, nodeName, "debug/pprof/heap", ports.KubeletPort)
client, err := e2enode.ProxyRequest(c, nodeName, "debug/pprof/heap", ports.KubeletPort)
if err != nil {
return "", err
}

View File

@ -39,6 +39,7 @@ import (
clientset "k8s.io/client-go/kubernetes"
coreclientset "k8s.io/client-go/kubernetes/typed/core/v1"
e2elog "k8s.io/kubernetes/test/e2e/framework/log"
e2enode "k8s.io/kubernetes/test/e2e/framework/node"
e2epod "k8s.io/kubernetes/test/e2e/framework/pod"
imageutils "k8s.io/kubernetes/test/utils/image"
)
@ -580,7 +581,7 @@ func (config *NetworkingTestConfig) setup(selector map[string]string) {
ginkgo.By("Getting node addresses")
ExpectNoError(WaitForAllNodesSchedulable(config.f.ClientSet, 10*time.Minute))
nodeList := GetReadySchedulableNodesOrDie(config.f.ClientSet)
config.ExternalAddrs = NodeAddresses(nodeList, v1.NodeExternalIP)
config.ExternalAddrs = e2enode.FirstAddress(nodeList, v1.NodeExternalIP)
SkipUnlessNodeCountIsAtLeast(2)
config.Nodes = nodeList.Items
@ -603,7 +604,7 @@ func (config *NetworkingTestConfig) setup(selector map[string]string) {
if len(config.ExternalAddrs) != 0 {
config.NodeIP = config.ExternalAddrs[0]
} else {
internalAddrs := NodeAddresses(nodeList, v1.NodeInternalIP)
internalAddrs := e2enode.FirstAddress(nodeList, v1.NodeInternalIP)
config.NodeIP = internalAddrs[0]
}
}
@ -1048,7 +1049,7 @@ func TestHitNodesFromOutsideWithCount(externalIP string, httpPort int32, timeout
// This function executes commands on a node so it will work only for some
// environments.
func TestUnderTemporaryNetworkFailure(c clientset.Interface, ns string, node *v1.Node, testFunc func()) {
host, err := GetNodeExternalIP(node)
host, err := e2enode.GetExternalIP(node)
if err != nil {
Failf("Error getting node external ip : %v", err)
}
@ -1066,7 +1067,7 @@ func TestUnderTemporaryNetworkFailure(c clientset.Interface, ns string, node *v1
}()
e2elog.Logf("Waiting %v to ensure node %s is ready before beginning test...", resizeNodeReadyTimeout, node.Name)
if !WaitForNodeToBe(c, node.Name, v1.NodeReady, true, resizeNodeReadyTimeout) {
if !e2enode.WaitConditionToBe(c, node.Name, v1.NodeReady, true, resizeNodeReadyTimeout) {
Failf("Node %s did not become ready within %v", node.Name, resizeNodeReadyTimeout)
}
for _, masterAddress := range masterAddresses {
@ -1074,7 +1075,7 @@ func TestUnderTemporaryNetworkFailure(c clientset.Interface, ns string, node *v1
}
e2elog.Logf("Waiting %v for node %s to be not ready after simulated network failure", resizeNodeNotReadyTimeout, node.Name)
if !WaitForNodeToBe(c, node.Name, v1.NodeReady, false, resizeNodeNotReadyTimeout) {
if !e2enode.WaitConditionToBe(c, node.Name, v1.NodeReady, false, resizeNodeNotReadyTimeout) {
Failf("Node %s did not become not-ready within %v", node.Name, resizeNodeNotReadyTimeout)
}

View File

@ -0,0 +1,37 @@
load("@io_bazel_rules_go//go:def.bzl", "go_library")
go_library(
name = "go_default_library",
srcs = [
"resource.go",
"wait.go",
],
importpath = "k8s.io/kubernetes/test/e2e/framework/node",
visibility = ["//visibility:public"],
deps = [
"//pkg/controller/nodelifecycle:go_default_library",
"//pkg/util/system:go_default_library",
"//staging/src/k8s.io/api/core/v1:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/fields:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/util/wait:go_default_library",
"//staging/src/k8s.io/client-go/kubernetes:go_default_library",
"//staging/src/k8s.io/client-go/rest:go_default_library",
"//test/e2e/framework/log:go_default_library",
"//test/utils:go_default_library",
],
)
filegroup(
name = "package-srcs",
srcs = glob(["**"]),
tags = ["automanaged"],
visibility = ["//visibility:private"],
)
filegroup(
name = "all-srcs",
srcs = [":package-srcs"],
tags = ["automanaged"],
visibility = ["//visibility:public"],
)

View File

@ -0,0 +1,317 @@
/*
Copyright 2019 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package node
import (
"fmt"
"net"
"time"
"k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/fields"
"k8s.io/apimachinery/pkg/util/wait"
clientset "k8s.io/client-go/kubernetes"
restclient "k8s.io/client-go/rest"
nodectlr "k8s.io/kubernetes/pkg/controller/nodelifecycle"
e2elog "k8s.io/kubernetes/test/e2e/framework/log"
testutils "k8s.io/kubernetes/test/utils"
)
const (
// poll is how often to Poll pods, nodes and claims.
poll = 2 * time.Second
// singleCallTimeout is how long to try single API calls (like 'get' or 'list'). Used to prevent
// transient failures from failing tests.
// TODO: client should not apply this timeout to Watch calls. Increased from 30s until that is fixed.
singleCallTimeout = 5 * time.Minute
// ssh port
sshPort = "22"
// timeout for proxy requests.
proxyTimeout = 2 * time.Minute
)
// FirstAddress returns the first address of the given type of each node.
// TODO: Use return type string instead of []string
func FirstAddress(nodelist *v1.NodeList, addrType v1.NodeAddressType) []string {
hosts := []string{}
for _, n := range nodelist.Items {
for _, addr := range n.Status.Addresses {
if addr.Type == addrType && addr.Address != "" {
hosts = append(hosts, addr.Address)
break
}
}
}
return hosts
}
// TODO: better to change to a easy read name
func isNodeConditionSetAsExpected(node *v1.Node, conditionType v1.NodeConditionType, wantTrue, silent bool) bool {
// Check the node readiness condition (logging all).
for _, cond := range node.Status.Conditions {
// Ensure that the condition type and the status matches as desired.
if cond.Type == conditionType {
// For NodeReady condition we need to check Taints as well
if cond.Type == v1.NodeReady {
hasNodeControllerTaints := false
// For NodeReady we need to check if Taints are gone as well
taints := node.Spec.Taints
for _, taint := range taints {
if taint.MatchTaint(nodectlr.UnreachableTaintTemplate) || taint.MatchTaint(nodectlr.NotReadyTaintTemplate) {
hasNodeControllerTaints = true
break
}
}
if wantTrue {
if (cond.Status == v1.ConditionTrue) && !hasNodeControllerTaints {
return true
}
msg := ""
if !hasNodeControllerTaints {
msg = fmt.Sprintf("Condition %s of node %s is %v instead of %t. Reason: %v, message: %v",
conditionType, node.Name, cond.Status == v1.ConditionTrue, wantTrue, cond.Reason, cond.Message)
}
msg = fmt.Sprintf("Condition %s of node %s is %v, but Node is tainted by NodeController with %v. Failure",
conditionType, node.Name, cond.Status == v1.ConditionTrue, taints)
if !silent {
e2elog.Logf(msg)
}
return false
}
// TODO: check if the Node is tainted once we enable NC notReady/unreachable taints by default
if cond.Status != v1.ConditionTrue {
return true
}
if !silent {
e2elog.Logf("Condition %s of node %s is %v instead of %t. Reason: %v, message: %v",
conditionType, node.Name, cond.Status == v1.ConditionTrue, wantTrue, cond.Reason, cond.Message)
}
return false
}
if (wantTrue && (cond.Status == v1.ConditionTrue)) || (!wantTrue && (cond.Status != v1.ConditionTrue)) {
return true
}
if !silent {
e2elog.Logf("Condition %s of node %s is %v instead of %t. Reason: %v, message: %v",
conditionType, node.Name, cond.Status == v1.ConditionTrue, wantTrue, cond.Reason, cond.Message)
}
return false
}
}
if !silent {
e2elog.Logf("Couldn't find condition %v on node %v", conditionType, node.Name)
}
return false
}
// IsConditionSetAsExpected returns a wantTrue value if the node has a match to the conditionType, otherwise returns an opposite value of the wantTrue with detailed logging.
func IsConditionSetAsExpected(node *v1.Node, conditionType v1.NodeConditionType, wantTrue bool) bool {
return isNodeConditionSetAsExpected(node, conditionType, wantTrue, false)
}
// IsConditionSetAsExpectedSilent returns a wantTrue value if the node has a match to the conditionType, otherwise returns an opposite value of the wantTrue.
func IsConditionSetAsExpectedSilent(node *v1.Node, conditionType v1.NodeConditionType, wantTrue bool) bool {
return isNodeConditionSetAsExpected(node, conditionType, wantTrue, true)
}
// IsConditionUnset returns true if conditions of the given node do not have a match to the given conditionType, otherwise false.
func IsConditionUnset(node *v1.Node, conditionType v1.NodeConditionType) bool {
for _, cond := range node.Status.Conditions {
if cond.Type == conditionType {
return false
}
}
return true
}
// Filter filters nodes in NodeList in place, removing nodes that do not
// satisfy the given condition
// TODO: consider merging with pkg/client/cache.NodeLister
func Filter(nodeList *v1.NodeList, fn func(node v1.Node) bool) {
var l []v1.Node
for _, node := range nodeList.Items {
if fn(node) {
l = append(l, node)
}
}
nodeList.Items = l
}
// TotalRegistered returns number of registered Nodes excluding Master Node.
func TotalRegistered(c clientset.Interface) (int, error) {
nodes, err := waitListSchedulableNodes(c)
if err != nil {
e2elog.Logf("Failed to list nodes: %v", err)
return 0, err
}
return len(nodes.Items), nil
}
// TotalReady returns number of ready Nodes excluding Master Node.
func TotalReady(c clientset.Interface) (int, error) {
nodes, err := waitListSchedulableNodes(c)
if err != nil {
e2elog.Logf("Failed to list nodes: %v", err)
return 0, err
}
// Filter out not-ready nodes.
Filter(nodes, func(node v1.Node) bool {
return IsConditionSetAsExpected(&node, v1.NodeReady, true)
})
return len(nodes.Items), nil
}
// getSvcNodePort returns the node port for the given service:port.
func getSvcNodePort(client clientset.Interface, ns, name string, svcPort int) (int, error) {
svc, err := client.CoreV1().Services(ns).Get(name, metav1.GetOptions{})
if err != nil {
return 0, err
}
for _, p := range svc.Spec.Ports {
if p.Port == int32(svcPort) {
if p.NodePort != 0 {
return int(p.NodePort), nil
}
}
}
return 0, fmt.Errorf(
"No node port found for service %v, port %v", name, svcPort)
}
// GetPortURL returns the url to a nodeport Service.
func GetPortURL(client clientset.Interface, ns, name string, svcPort int) (string, error) {
nodePort, err := getSvcNodePort(client, ns, name, svcPort)
if err != nil {
return "", err
}
// This list of nodes must not include the master, which is marked
// unschedulable, since the master doesn't run kube-proxy. Without
// kube-proxy NodePorts won't work.
var nodes *v1.NodeList
if wait.PollImmediate(poll, singleCallTimeout, func() (bool, error) {
nodes, err = client.CoreV1().Nodes().List(metav1.ListOptions{FieldSelector: fields.Set{
"spec.unschedulable": "false",
}.AsSelector().String()})
if err != nil {
if testutils.IsRetryableAPIError(err) {
return false, nil
}
return false, err
}
return true, nil
}) != nil {
return "", err
}
if len(nodes.Items) == 0 {
return "", fmt.Errorf("Unable to list nodes in cluster")
}
for _, node := range nodes.Items {
for _, address := range node.Status.Addresses {
if address.Type == v1.NodeExternalIP {
if address.Address != "" {
return fmt.Sprintf("http://%v:%v", address.Address, nodePort), nil
}
}
}
}
return "", fmt.Errorf("Failed to find external address for service %v", name)
}
// ProxyRequest performs a get on a node proxy endpoint given the nodename and rest client.
func ProxyRequest(c clientset.Interface, node, endpoint string, port int) (restclient.Result, error) {
// proxy tends to hang in some cases when Node is not ready. Add an artificial timeout for this call.
// This will leak a goroutine if proxy hangs. #22165
var result restclient.Result
finished := make(chan struct{})
go func() {
result = c.CoreV1().RESTClient().Get().
Resource("nodes").
SubResource("proxy").
Name(fmt.Sprintf("%v:%v", node, port)).
Suffix(endpoint).
Do()
finished <- struct{}{}
}()
select {
case <-finished:
return result, nil
case <-time.After(proxyTimeout):
return restclient.Result{}, nil
}
}
// GetExternalIP returns node external IP concatenated with port 22 for ssh
// e.g. 1.2.3.4:22
func GetExternalIP(node *v1.Node) (string, error) {
e2elog.Logf("Getting external IP address for %s", node.Name)
host := ""
for _, a := range node.Status.Addresses {
if a.Type == v1.NodeExternalIP && a.Address != "" {
host = net.JoinHostPort(a.Address, sshPort)
break
}
}
if host == "" {
return "", fmt.Errorf("Couldn't get the external IP of host %s with addresses %v", node.Name, node.Status.Addresses)
}
return host, nil
}
// GetInternalIP returns node internal IP
func GetInternalIP(node *v1.Node) (string, error) {
host := ""
for _, address := range node.Status.Addresses {
if address.Type == v1.NodeInternalIP {
if address.Address != "" {
host = net.JoinHostPort(address.Address, sshPort)
break
}
}
}
if host == "" {
return "", fmt.Errorf("Couldn't get the internal IP of host %s with addresses %v", node.Name, node.Status.Addresses)
}
return host, nil
}
// GetAddresses returns a list of addresses of the given addressType for the given node
func GetAddresses(node *v1.Node, addressType v1.NodeAddressType) (ips []string) {
for j := range node.Status.Addresses {
nodeAddress := &node.Status.Addresses[j]
if nodeAddress.Type == addressType && nodeAddress.Address != "" {
ips = append(ips, nodeAddress.Address)
}
}
return
}
// CollectAddresses returns a list of addresses of the given addressType for the given list of nodes
func CollectAddresses(nodes *v1.NodeList, addressType v1.NodeAddressType) []string {
ips := []string{}
for i := range nodes.Items {
ips = append(ips, GetAddresses(&nodes.Items[i], addressType)...)
}
return ips
}

View File

@ -0,0 +1,199 @@
/*
Copyright 2019 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package node
import (
"fmt"
"regexp"
"time"
"k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/fields"
"k8s.io/apimachinery/pkg/util/wait"
clientset "k8s.io/client-go/kubernetes"
"k8s.io/kubernetes/pkg/util/system"
e2elog "k8s.io/kubernetes/test/e2e/framework/log"
testutils "k8s.io/kubernetes/test/utils"
)
const sleepTime = 20 * time.Second
var requiredPerNodePods = []*regexp.Regexp{
regexp.MustCompile(".*kube-proxy.*"),
regexp.MustCompile(".*fluentd-elasticsearch.*"),
regexp.MustCompile(".*node-problem-detector.*"),
}
// WaitForReadyNodes waits up to timeout for cluster to has desired size and
// there is no not-ready nodes in it. By cluster size we mean number of Nodes
// excluding Master Node.
func WaitForReadyNodes(c clientset.Interface, size int, timeout time.Duration) error {
_, err := CheckReady(c, size, timeout)
return err
}
// WaitForTotalHealthy checks whether all registered nodes are ready and all required Pods are running on them.
func WaitForTotalHealthy(c clientset.Interface, timeout time.Duration) error {
e2elog.Logf("Waiting up to %v for all nodes to be ready", timeout)
var notReady []v1.Node
var missingPodsPerNode map[string][]string
err := wait.PollImmediate(poll, timeout, func() (bool, error) {
notReady = nil
// It should be OK to list unschedulable Nodes here.
nodes, err := c.CoreV1().Nodes().List(metav1.ListOptions{ResourceVersion: "0"})
if err != nil {
if testutils.IsRetryableAPIError(err) {
return false, nil
}
return false, err
}
for _, node := range nodes.Items {
if !IsConditionSetAsExpected(&node, v1.NodeReady, true) {
notReady = append(notReady, node)
}
}
pods, err := c.CoreV1().Pods(metav1.NamespaceAll).List(metav1.ListOptions{ResourceVersion: "0"})
if err != nil {
return false, err
}
systemPodsPerNode := make(map[string][]string)
for _, pod := range pods.Items {
if pod.Namespace == metav1.NamespaceSystem && pod.Status.Phase == v1.PodRunning {
if pod.Spec.NodeName != "" {
systemPodsPerNode[pod.Spec.NodeName] = append(systemPodsPerNode[pod.Spec.NodeName], pod.Name)
}
}
}
missingPodsPerNode = make(map[string][]string)
for _, node := range nodes.Items {
if !system.IsMasterNode(node.Name) {
for _, requiredPod := range requiredPerNodePods {
foundRequired := false
for _, presentPod := range systemPodsPerNode[node.Name] {
if requiredPod.MatchString(presentPod) {
foundRequired = true
break
}
}
if !foundRequired {
missingPodsPerNode[node.Name] = append(missingPodsPerNode[node.Name], requiredPod.String())
}
}
}
}
return len(notReady) == 0 && len(missingPodsPerNode) == 0, nil
})
if err != nil && err != wait.ErrWaitTimeout {
return err
}
if len(notReady) > 0 {
return fmt.Errorf("Not ready nodes: %v", notReady)
}
if len(missingPodsPerNode) > 0 {
return fmt.Errorf("Not running system Pods: %v", missingPodsPerNode)
}
return nil
}
// WaitConditionToBe returns whether node "name's" condition state matches wantTrue
// within timeout. If wantTrue is true, it will ensure the node condition status
// is ConditionTrue; if it's false, it ensures the node condition is in any state
// other than ConditionTrue (e.g. not true or unknown).
func WaitConditionToBe(c clientset.Interface, name string, conditionType v1.NodeConditionType, wantTrue bool, timeout time.Duration) bool {
e2elog.Logf("Waiting up to %v for node %s condition %s to be %t", timeout, name, conditionType, wantTrue)
for start := time.Now(); time.Since(start) < timeout; time.Sleep(poll) {
node, err := c.CoreV1().Nodes().Get(name, metav1.GetOptions{})
if err != nil {
e2elog.Logf("Couldn't get node %s", name)
continue
}
if IsConditionSetAsExpected(node, conditionType, wantTrue) {
return true
}
}
e2elog.Logf("Node %s didn't reach desired %s condition status (%t) within %v", name, conditionType, wantTrue, timeout)
return false
}
// WaitForNodeToBeNotReady returns whether node name is not ready (i.e. the
// readiness condition is anything but ready, e.g false or unknown) within
// timeout.
func WaitForNodeToBeNotReady(c clientset.Interface, name string, timeout time.Duration) bool {
return WaitConditionToBe(c, name, v1.NodeReady, false, timeout)
}
// WaitForNodeToBeReady returns whether node name is ready within timeout.
func WaitForNodeToBeReady(c clientset.Interface, name string, timeout time.Duration) bool {
return WaitConditionToBe(c, name, v1.NodeReady, true, timeout)
}
// CheckReady waits up to timeout for cluster to has desired size and
// there is no not-ready nodes in it. By cluster size we mean number of Nodes
// excluding Master Node.
func CheckReady(c clientset.Interface, size int, timeout time.Duration) ([]v1.Node, error) {
for start := time.Now(); time.Since(start) < timeout; time.Sleep(sleepTime) {
nodes, err := waitListSchedulableNodes(c)
if err != nil {
e2elog.Logf("Failed to list nodes: %v", err)
continue
}
numNodes := len(nodes.Items)
// Filter out not-ready nodes.
Filter(nodes, func(node v1.Node) bool {
nodeReady := IsConditionSetAsExpected(&node, v1.NodeReady, true)
networkReady := IsConditionUnset(&node, v1.NodeNetworkUnavailable) || IsConditionSetAsExpected(&node, v1.NodeNetworkUnavailable, false)
return nodeReady && networkReady
})
numReady := len(nodes.Items)
if numNodes == size && numReady == size {
e2elog.Logf("Cluster has reached the desired number of ready nodes %d", size)
return nodes.Items, nil
}
e2elog.Logf("Waiting for ready nodes %d, current ready %d, not ready nodes %d", size, numReady, numNodes-numReady)
}
return nil, fmt.Errorf("timeout waiting %v for number of ready nodes to be %d", timeout, size)
}
// waitListSchedulableNodes is a wrapper around listing nodes supporting retries.
func waitListSchedulableNodes(c clientset.Interface) (*v1.NodeList, error) {
var nodes *v1.NodeList
var err error
if wait.PollImmediate(poll, singleCallTimeout, func() (bool, error) {
nodes, err = c.CoreV1().Nodes().List(metav1.ListOptions{FieldSelector: fields.Set{
"spec.unschedulable": "false",
}.AsSelector().String()})
if err != nil {
if testutils.IsRetryableAPIError(err) {
return false, nil
}
return false, err
}
return true, nil
}) != nil {
return nodes, err
}
return nodes, nil
}

View File

@ -25,10 +25,11 @@ import (
"sync"
"time"
v1 "k8s.io/api/core/v1"
"k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/util/wait"
clientset "k8s.io/client-go/kubernetes"
e2elog "k8s.io/kubernetes/test/e2e/framework/log"
e2enode "k8s.io/kubernetes/test/e2e/framework/node"
e2essh "k8s.io/kubernetes/test/e2e/framework/ssh"
)
@ -202,12 +203,12 @@ func waitForNodesReadyAfterUpgrade(f *Framework) error {
//
// TODO(ihmccreery) We shouldn't have to wait for nodes to be ready in
// GKE; the operation shouldn't return until they all are.
numNodes, err := NumberOfRegisteredNodes(f.ClientSet)
numNodes, err := e2enode.TotalRegistered(f.ClientSet)
if err != nil {
return fmt.Errorf("couldn't detect number of nodes")
}
e2elog.Logf("Waiting up to %v for all %d nodes to be ready after the upgrade", RestartNodeReadyAgainTimeout, numNodes)
if _, err := CheckNodesReady(f.ClientSet, numNodes, RestartNodeReadyAgainTimeout); err != nil {
if _, err := e2enode.CheckReady(f.ClientSet, numNodes, RestartNodeReadyAgainTimeout); err != nil {
return err
}
return nil

View File

@ -24,6 +24,7 @@ go_library(
"//staging/src/k8s.io/legacy-cloud-providers/gce:go_default_library",
"//test/e2e/framework:go_default_library",
"//test/e2e/framework/log:go_default_library",
"//test/e2e/framework/node:go_default_library",
"//test/e2e/framework/pod:go_default_library",
"//test/utils:go_default_library",
"//vendor/github.com/onsi/ginkgo:go_default_library",

View File

@ -28,6 +28,7 @@ import (
clientset "k8s.io/client-go/kubernetes"
"k8s.io/kubernetes/test/e2e/framework"
e2elog "k8s.io/kubernetes/test/e2e/framework/log"
e2enode "k8s.io/kubernetes/test/e2e/framework/node"
e2epod "k8s.io/kubernetes/test/e2e/framework/pod"
testutils "k8s.io/kubernetes/test/utils"
)
@ -49,9 +50,9 @@ var _ = ginkgo.Describe("Recreate [Feature:Recreate]", func() {
ginkgo.BeforeEach(func() {
framework.SkipUnlessProviderIs("gce", "gke")
var err error
numNodes, err := framework.NumberOfRegisteredNodes(f.ClientSet)
numNodes, err := e2enode.TotalRegistered(f.ClientSet)
framework.ExpectNoError(err)
originalNodes, err = framework.CheckNodesReady(f.ClientSet, numNodes, framework.NodeReadyInitialTimeout)
originalNodes, err = e2enode.CheckReady(f.ClientSet, numNodes, framework.NodeReadyInitialTimeout)
framework.ExpectNoError(err)
e2elog.Logf("Got the following nodes before recreate %v", nodeNames(originalNodes))
@ -104,7 +105,7 @@ func testRecreate(c clientset.Interface, ps *testutils.PodStore, systemNamespace
framework.Failf("Test failed; failed to recreate at least one node in %v.", framework.RecreateNodeReadyAgainTimeout)
}
nodesAfter, err := framework.CheckNodesReady(c, len(nodes), framework.RestartNodeReadyAgainTimeout)
nodesAfter, err := e2enode.CheckReady(c, len(nodes), framework.RestartNodeReadyAgainTimeout)
framework.ExpectNoError(err)
e2elog.Logf("Got the following nodes after recreate: %v", nodeNames(nodesAfter))

View File

@ -40,6 +40,7 @@ import (
api "k8s.io/kubernetes/pkg/apis/core"
"k8s.io/kubernetes/pkg/registry/core/service/portallocator"
e2elog "k8s.io/kubernetes/test/e2e/framework/log"
e2enode "k8s.io/kubernetes/test/e2e/framework/node"
e2epod "k8s.io/kubernetes/test/e2e/framework/pod"
e2essh "k8s.io/kubernetes/test/e2e/framework/ssh"
testutils "k8s.io/kubernetes/test/utils"
@ -324,34 +325,14 @@ func (j *ServiceTestJig) CreateLoadBalancerService(namespace, serviceName string
return svc
}
// GetNodeAddresses returns a list of addresses of the given addressType for the given node
func GetNodeAddresses(node *v1.Node, addressType v1.NodeAddressType) (ips []string) {
for j := range node.Status.Addresses {
nodeAddress := &node.Status.Addresses[j]
if nodeAddress.Type == addressType && nodeAddress.Address != "" {
ips = append(ips, nodeAddress.Address)
}
}
return
}
// CollectAddresses returns a list of addresses of the given addressType for the given list of nodes
func CollectAddresses(nodes *v1.NodeList, addressType v1.NodeAddressType) []string {
ips := []string{}
for i := range nodes.Items {
ips = append(ips, GetNodeAddresses(&nodes.Items[i], addressType)...)
}
return ips
}
// GetNodePublicIps returns a public IP list of nodes.
func GetNodePublicIps(c clientset.Interface) ([]string, error) {
nodes := GetReadySchedulableNodesOrDie(c)
ips := CollectAddresses(nodes, v1.NodeExternalIP)
ips := e2enode.CollectAddresses(nodes, v1.NodeExternalIP)
if len(ips) == 0 {
// If ExternalIP isn't set, assume the test programs can reach the InternalIP
ips = CollectAddresses(nodes, v1.NodeInternalIP)
ips = e2enode.CollectAddresses(nodes, v1.NodeInternalIP)
}
return ips, nil
}
@ -408,7 +389,7 @@ func (j *ServiceTestJig) GetEndpointNodes(svc *v1.Service) map[string][]string {
nodeMap := map[string][]string{}
for _, n := range nodes.Items {
if epNodes.Has(n.Name) {
nodeMap[n.Name] = GetNodeAddresses(&n, v1.NodeExternalIP)
nodeMap[n.Name] = e2enode.GetAddresses(&n, v1.NodeExternalIP)
}
}
return nodeMap

View File

@ -80,7 +80,6 @@ import (
extensionsinternal "k8s.io/kubernetes/pkg/apis/extensions"
"k8s.io/kubernetes/pkg/client/conditions"
"k8s.io/kubernetes/pkg/controller"
nodectlr "k8s.io/kubernetes/pkg/controller/nodelifecycle"
"k8s.io/kubernetes/pkg/controller/service"
"k8s.io/kubernetes/pkg/features"
"k8s.io/kubernetes/pkg/master/ports"
@ -90,6 +89,7 @@ import (
taintutils "k8s.io/kubernetes/pkg/util/taints"
"k8s.io/kubernetes/test/e2e/framework/ginkgowrapper"
e2elog "k8s.io/kubernetes/test/e2e/framework/log"
e2enode "k8s.io/kubernetes/test/e2e/framework/node"
e2epod "k8s.io/kubernetes/test/e2e/framework/pod"
e2essh "k8s.io/kubernetes/test/e2e/framework/ssh"
testutils "k8s.io/kubernetes/test/utils"
@ -133,8 +133,6 @@ const (
// PollShortTimeout is the short timeout value in polling.
PollShortTimeout = 1 * time.Minute
// PollLongTimeout is the long timeout value in polling.
PollLongTimeout = 5 * time.Minute
// ServiceAccountProvisionTimeout is how long to wait for a service account to be provisioned.
// service accounts are provisioned after namespace creation
@ -1914,9 +1912,9 @@ func waitListSchedulableNodesOrDie(c clientset.Interface) *v1.NodeList {
// 2) it's Ready condition is set to true
// 3) doesn't have NetworkUnavailable condition set to true
func isNodeSchedulable(node *v1.Node) bool {
nodeReady := IsNodeConditionSetAsExpected(node, v1.NodeReady, true)
networkReady := IsNodeConditionUnset(node, v1.NodeNetworkUnavailable) ||
IsNodeConditionSetAsExpectedSilent(node, v1.NodeNetworkUnavailable, false)
nodeReady := e2enode.IsConditionSetAsExpected(node, v1.NodeReady, true)
networkReady := e2enode.IsConditionUnset(node, v1.NodeNetworkUnavailable) ||
e2enode.IsConditionSetAsExpectedSilent(node, v1.NodeNetworkUnavailable, false)
return !node.Spec.Unschedulable && nodeReady && networkReady
}
@ -1958,7 +1956,7 @@ func GetReadySchedulableNodesOrDie(c clientset.Interface) (nodes *v1.NodeList) {
nodes = waitListSchedulableNodesOrDie(c)
// previous tests may have cause failures of some nodes. Let's skip
// 'Not Ready' nodes, just in case (there is no need to fail the test).
FilterNodes(nodes, func(node v1.Node) bool {
e2enode.Filter(nodes, func(node v1.Node) bool {
return isNodeSchedulable(&node) && isNodeUntainted(&node)
})
return nodes
@ -1970,7 +1968,7 @@ func GetReadySchedulableNodesOrDie(c clientset.Interface) (nodes *v1.NodeList) {
// presence of nvidia.com/gpu=present:NoSchedule taint
func GetReadyNodesIncludingTaintedOrDie(c clientset.Interface) (nodes *v1.NodeList) {
nodes = waitListSchedulableNodesOrDie(c)
FilterNodes(nodes, func(node v1.Node) bool {
e2enode.Filter(nodes, func(node v1.Node) bool {
return isNodeSchedulable(&node)
})
return nodes
@ -2024,8 +2022,8 @@ func WaitForAllNodesSchedulable(c clientset.Interface, timeout time.Duration) er
for i := range notSchedulable {
e2elog.Logf("-> %s Ready=%t Network=%t Taints=%v",
notSchedulable[i].Name,
IsNodeConditionSetAsExpectedSilent(notSchedulable[i], v1.NodeReady, true),
IsNodeConditionSetAsExpectedSilent(notSchedulable[i], v1.NodeNetworkUnavailable, false),
e2enode.IsConditionSetAsExpectedSilent(notSchedulable[i], v1.NodeReady, true),
e2enode.IsConditionSetAsExpectedSilent(notSchedulable[i], v1.NodeNetworkUnavailable, false),
notSchedulable[i].Spec.Taints)
}
e2elog.Logf("================================")
@ -2410,20 +2408,6 @@ func UpdateDaemonSetWithRetries(c clientset.Interface, namespace, name string, a
return ds, pollErr
}
// NodeAddresses returns the first address of the given type of each node.
func NodeAddresses(nodelist *v1.NodeList, addrType v1.NodeAddressType) []string {
hosts := []string{}
for _, n := range nodelist.Items {
for _, addr := range n.Status.Addresses {
if addr.Type == addrType && addr.Address != "" {
hosts = append(hosts, addr.Address)
break
}
}
}
return hosts
}
// RunHostCmd runs the given cmd in the context of the given pod using `kubectl exec`
// inside of a shell.
func RunHostCmd(ns, name, cmd string) (string, error) {
@ -2456,118 +2440,6 @@ func RunHostCmdWithRetries(ns, name, cmd string, interval, timeout time.Duration
}
}
// WaitForNodeToBeReady returns whether node name is ready within timeout.
func WaitForNodeToBeReady(c clientset.Interface, name string, timeout time.Duration) bool {
return WaitForNodeToBe(c, name, v1.NodeReady, true, timeout)
}
// WaitForNodeToBeNotReady returns whether node name is not ready (i.e. the
// readiness condition is anything but ready, e.g false or unknown) within
// timeout.
func WaitForNodeToBeNotReady(c clientset.Interface, name string, timeout time.Duration) bool {
return WaitForNodeToBe(c, name, v1.NodeReady, false, timeout)
}
func isNodeConditionSetAsExpected(node *v1.Node, conditionType v1.NodeConditionType, wantTrue, silent bool) bool {
// Check the node readiness condition (logging all).
for _, cond := range node.Status.Conditions {
// Ensure that the condition type and the status matches as desired.
if cond.Type == conditionType {
// For NodeReady condition we need to check Taints as well
if cond.Type == v1.NodeReady {
hasNodeControllerTaints := false
// For NodeReady we need to check if Taints are gone as well
taints := node.Spec.Taints
for _, taint := range taints {
if taint.MatchTaint(nodectlr.UnreachableTaintTemplate) || taint.MatchTaint(nodectlr.NotReadyTaintTemplate) {
hasNodeControllerTaints = true
break
}
}
if wantTrue {
if (cond.Status == v1.ConditionTrue) && !hasNodeControllerTaints {
return true
}
msg := ""
if !hasNodeControllerTaints {
msg = fmt.Sprintf("Condition %s of node %s is %v instead of %t. Reason: %v, message: %v",
conditionType, node.Name, cond.Status == v1.ConditionTrue, wantTrue, cond.Reason, cond.Message)
}
msg = fmt.Sprintf("Condition %s of node %s is %v, but Node is tainted by NodeController with %v. Failure",
conditionType, node.Name, cond.Status == v1.ConditionTrue, taints)
if !silent {
e2elog.Logf(msg)
}
return false
}
// TODO: check if the Node is tainted once we enable NC notReady/unreachable taints by default
if cond.Status != v1.ConditionTrue {
return true
}
if !silent {
e2elog.Logf("Condition %s of node %s is %v instead of %t. Reason: %v, message: %v",
conditionType, node.Name, cond.Status == v1.ConditionTrue, wantTrue, cond.Reason, cond.Message)
}
return false
}
if (wantTrue && (cond.Status == v1.ConditionTrue)) || (!wantTrue && (cond.Status != v1.ConditionTrue)) {
return true
}
if !silent {
e2elog.Logf("Condition %s of node %s is %v instead of %t. Reason: %v, message: %v",
conditionType, node.Name, cond.Status == v1.ConditionTrue, wantTrue, cond.Reason, cond.Message)
}
return false
}
}
if !silent {
e2elog.Logf("Couldn't find condition %v on node %v", conditionType, node.Name)
}
return false
}
// IsNodeConditionSetAsExpected returns a wantTrue value if the node has a match to the conditionType, otherwise returns an opposite value of the wantTrue with detailed logging.
func IsNodeConditionSetAsExpected(node *v1.Node, conditionType v1.NodeConditionType, wantTrue bool) bool {
return isNodeConditionSetAsExpected(node, conditionType, wantTrue, false)
}
// IsNodeConditionSetAsExpectedSilent returns a wantTrue value if the node has a match to the conditionType, otherwise returns an opposite value of the wantTrue.
func IsNodeConditionSetAsExpectedSilent(node *v1.Node, conditionType v1.NodeConditionType, wantTrue bool) bool {
return isNodeConditionSetAsExpected(node, conditionType, wantTrue, true)
}
// IsNodeConditionUnset returns true if conditions of the given node do not have a match to the given conditionType, otherwise false.
func IsNodeConditionUnset(node *v1.Node, conditionType v1.NodeConditionType) bool {
for _, cond := range node.Status.Conditions {
if cond.Type == conditionType {
return false
}
}
return true
}
// WaitForNodeToBe returns whether node "name's" condition state matches wantTrue
// within timeout. If wantTrue is true, it will ensure the node condition status
// is ConditionTrue; if it's false, it ensures the node condition is in any state
// other than ConditionTrue (e.g. not true or unknown).
func WaitForNodeToBe(c clientset.Interface, name string, conditionType v1.NodeConditionType, wantTrue bool, timeout time.Duration) bool {
e2elog.Logf("Waiting up to %v for node %s condition %s to be %t", timeout, name, conditionType, wantTrue)
for start := time.Now(); time.Since(start) < timeout; time.Sleep(Poll) {
node, err := c.CoreV1().Nodes().Get(name, metav1.GetOptions{})
if err != nil {
e2elog.Logf("Couldn't get node %s", name)
continue
}
if IsNodeConditionSetAsExpected(node, conditionType, wantTrue) {
return true
}
}
e2elog.Logf("Node %s didn't reach desired %s condition status (%t) within %v", name, conditionType, wantTrue, timeout)
return false
}
// AllNodesReady checks whether all registered nodes are ready.
// TODO: we should change the AllNodesReady call in AfterEach to WaitForAllNodesHealthy,
// and figure out how to do it in a configurable way, as we can't expect all setups to run
@ -2588,7 +2460,7 @@ func AllNodesReady(c clientset.Interface, timeout time.Duration) error {
}
for i := range nodes.Items {
node := &nodes.Items[i]
if !IsNodeConditionSetAsExpected(node, v1.NodeReady, true) {
if !e2enode.IsConditionSetAsExpected(node, v1.NodeReady, true) {
notReady = append(notReady, node)
}
}
@ -2614,88 +2486,6 @@ func AllNodesReady(c clientset.Interface, timeout time.Duration) error {
return nil
}
// WaitForAllNodesHealthy checks whether all registered nodes are ready and all required Pods are running on them.
func WaitForAllNodesHealthy(c clientset.Interface, timeout time.Duration) error {
e2elog.Logf("Waiting up to %v for all nodes to be ready", timeout)
var notReady []v1.Node
var missingPodsPerNode map[string][]string
err := wait.PollImmediate(Poll, timeout, func() (bool, error) {
notReady = nil
// It should be OK to list unschedulable Nodes here.
nodes, err := c.CoreV1().Nodes().List(metav1.ListOptions{ResourceVersion: "0"})
if err != nil {
if testutils.IsRetryableAPIError(err) {
return false, nil
}
return false, err
}
for _, node := range nodes.Items {
if !IsNodeConditionSetAsExpected(&node, v1.NodeReady, true) {
notReady = append(notReady, node)
}
}
pods, err := c.CoreV1().Pods(metav1.NamespaceAll).List(metav1.ListOptions{ResourceVersion: "0"})
if err != nil {
return false, err
}
systemPodsPerNode := make(map[string][]string)
for _, pod := range pods.Items {
if pod.Namespace == metav1.NamespaceSystem && pod.Status.Phase == v1.PodRunning {
if pod.Spec.NodeName != "" {
systemPodsPerNode[pod.Spec.NodeName] = append(systemPodsPerNode[pod.Spec.NodeName], pod.Name)
}
}
}
missingPodsPerNode = make(map[string][]string)
for _, node := range nodes.Items {
if !system.IsMasterNode(node.Name) {
for _, requiredPod := range requiredPerNodePods {
foundRequired := false
for _, presentPod := range systemPodsPerNode[node.Name] {
if requiredPod.MatchString(presentPod) {
foundRequired = true
break
}
}
if !foundRequired {
missingPodsPerNode[node.Name] = append(missingPodsPerNode[node.Name], requiredPod.String())
}
}
}
}
return len(notReady) == 0 && len(missingPodsPerNode) == 0, nil
})
if err != nil && err != wait.ErrWaitTimeout {
return err
}
if len(notReady) > 0 {
return fmt.Errorf("Not ready nodes: %v", notReady)
}
if len(missingPodsPerNode) > 0 {
return fmt.Errorf("Not running system Pods: %v", missingPodsPerNode)
}
return nil
}
// FilterNodes filters nodes in NodeList in place, removing nodes that do not
// satisfy the given condition
// TODO: consider merging with pkg/client/cache.NodeLister
func FilterNodes(nodeList *v1.NodeList, fn func(node v1.Node) bool) {
var l []v1.Node
for _, node := range nodeList.Items {
if fn(node) {
l = append(l, node)
}
}
nodeList.Items = l
}
// ParseKVLines parses output that looks like lines containing "<key>: <val>"
// and returns <val> if <key> is found. Otherwise, it returns the empty string.
func ParseKVLines(output, key string) string {
@ -2962,68 +2752,6 @@ func CheckForControllerManagerHealthy(duration time.Duration) error {
return nil
}
// NumberOfRegisteredNodes returns number of registered Nodes excluding Master Node.
func NumberOfRegisteredNodes(c clientset.Interface) (int, error) {
nodes, err := waitListSchedulableNodes(c)
if err != nil {
e2elog.Logf("Failed to list nodes: %v", err)
return 0, err
}
return len(nodes.Items), nil
}
// NumberOfReadyNodes returns number of ready Nodes excluding Master Node.
func NumberOfReadyNodes(c clientset.Interface) (int, error) {
nodes, err := waitListSchedulableNodes(c)
if err != nil {
e2elog.Logf("Failed to list nodes: %v", err)
return 0, err
}
// Filter out not-ready nodes.
FilterNodes(nodes, func(node v1.Node) bool {
return IsNodeConditionSetAsExpected(&node, v1.NodeReady, true)
})
return len(nodes.Items), nil
}
// CheckNodesReady waits up to timeout for cluster to has desired size and
// there is no not-ready nodes in it. By cluster size we mean number of Nodes
// excluding Master Node.
func CheckNodesReady(c clientset.Interface, size int, timeout time.Duration) ([]v1.Node, error) {
for start := time.Now(); time.Since(start) < timeout; time.Sleep(20 * time.Second) {
nodes, err := waitListSchedulableNodes(c)
if err != nil {
e2elog.Logf("Failed to list nodes: %v", err)
continue
}
numNodes := len(nodes.Items)
// Filter out not-ready nodes.
FilterNodes(nodes, func(node v1.Node) bool {
nodeReady := IsNodeConditionSetAsExpected(&node, v1.NodeReady, true)
networkReady := IsNodeConditionUnset(&node, v1.NodeNetworkUnavailable) || IsNodeConditionSetAsExpected(&node, v1.NodeNetworkUnavailable, false)
return nodeReady && networkReady
})
numReady := len(nodes.Items)
if numNodes == size && numReady == size {
e2elog.Logf("Cluster has reached the desired number of ready nodes %d", size)
return nodes.Items, nil
}
e2elog.Logf("Waiting for ready nodes %d, current ready %d, not ready nodes %d", size, numReady, numNodes-numReady)
}
return nil, fmt.Errorf("timeout waiting %v for number of ready nodes to be %d", timeout, size)
}
// WaitForReadyNodes waits up to timeout for cluster to has desired size and
// there is no not-ready nodes in it. By cluster size we mean number of Nodes
// excluding Master Node.
func WaitForReadyNodes(c clientset.Interface, size int, timeout time.Duration) error {
_, err := CheckNodesReady(c, size, timeout)
return err
}
// GenerateMasterRegexp returns a regex for matching master node name.
func GenerateMasterRegexp(prefix string) string {
return prefix + "(-...)?"
@ -3039,7 +2767,7 @@ func WaitForMasters(masterPrefix string, c clientset.Interface, size int, timeou
}
// Filter out nodes that are not master replicas
FilterNodes(nodes, func(node v1.Node) bool {
e2enode.Filter(nodes, func(node v1.Node) bool {
res, err := regexp.Match(GenerateMasterRegexp(masterPrefix), ([]byte)(node.Name))
if err != nil {
e2elog.Logf("Failed to match regexp to node name: %v", err)
@ -3051,8 +2779,8 @@ func WaitForMasters(masterPrefix string, c clientset.Interface, size int, timeou
numNodes := len(nodes.Items)
// Filter out not-ready nodes.
FilterNodes(nodes, func(node v1.Node) bool {
return IsNodeConditionSetAsExpected(&node, v1.NodeReady, true)
e2enode.Filter(nodes, func(node v1.Node) bool {
return e2enode.IsConditionSetAsExpected(&node, v1.NodeReady, true)
})
numReady := len(nodes.Items)
@ -3152,62 +2880,6 @@ func LookForStringInFile(ns, podName, container, file, expectedString string, ti
})
}
// getSvcNodePort returns the node port for the given service:port.
func getSvcNodePort(client clientset.Interface, ns, name string, svcPort int) (int, error) {
svc, err := client.CoreV1().Services(ns).Get(name, metav1.GetOptions{})
if err != nil {
return 0, err
}
for _, p := range svc.Spec.Ports {
if p.Port == int32(svcPort) {
if p.NodePort != 0 {
return int(p.NodePort), nil
}
}
}
return 0, fmt.Errorf(
"No node port found for service %v, port %v", name, svcPort)
}
// GetNodePortURL returns the url to a nodeport Service.
func GetNodePortURL(client clientset.Interface, ns, name string, svcPort int) (string, error) {
nodePort, err := getSvcNodePort(client, ns, name, svcPort)
if err != nil {
return "", err
}
// This list of nodes must not include the master, which is marked
// unschedulable, since the master doesn't run kube-proxy. Without
// kube-proxy NodePorts won't work.
var nodes *v1.NodeList
if wait.PollImmediate(Poll, SingleCallTimeout, func() (bool, error) {
nodes, err = client.CoreV1().Nodes().List(metav1.ListOptions{FieldSelector: fields.Set{
"spec.unschedulable": "false",
}.AsSelector().String()})
if err != nil {
if testutils.IsRetryableAPIError(err) {
return false, nil
}
return false, err
}
return true, nil
}) != nil {
return "", err
}
if len(nodes.Items) == 0 {
return "", fmt.Errorf("Unable to list nodes in cluster")
}
for _, node := range nodes.Items {
for _, address := range node.Status.Addresses {
if address.Type == v1.NodeExternalIP {
if address.Address != "" {
return fmt.Sprintf("http://%v:%v", address.Address, nodePort), nil
}
}
}
}
return "", fmt.Errorf("Failed to find external address for service %v", name)
}
// EnsureLoadBalancerResourcesDeleted ensures that cloud load balancer resources that were created
// are actually cleaned up. Currently only implemented for GCE/GKE.
func EnsureLoadBalancerResourcesDeleted(ip, portRange string) error {
@ -3270,33 +2942,6 @@ func UnblockNetwork(from string, to string) {
}
}
// timeout for proxy requests.
const proxyTimeout = 2 * time.Minute
// NodeProxyRequest performs a get on a node proxy endpoint given the nodename and rest client.
func NodeProxyRequest(c clientset.Interface, node, endpoint string, port int) (restclient.Result, error) {
// proxy tends to hang in some cases when Node is not ready. Add an artificial timeout for this call.
// This will leak a goroutine if proxy hangs. #22165
var result restclient.Result
finished := make(chan struct{})
go func() {
result = c.CoreV1().RESTClient().Get().
Resource("nodes").
SubResource("proxy").
Name(fmt.Sprintf("%v:%v", node, port)).
Suffix(endpoint).
Do()
finished <- struct{}{}
}()
select {
case <-finished:
return result, nil
case <-time.After(proxyTimeout):
return restclient.Result{}, nil
}
}
// GetKubeletPods retrieves the list of pods on the kubelet.
// TODO(alejandrox1): move to pod subpkg once node methods have been refactored.
func GetKubeletPods(c clientset.Interface, node string) (*v1.PodList, error) {
@ -3315,7 +2960,7 @@ func GetKubeletRunningPods(c clientset.Interface, node string) (*v1.PodList, err
// refactored.
func getKubeletPods(c clientset.Interface, node, resource string) (*v1.PodList, error) {
result := &v1.PodList{}
client, err := NodeProxyRequest(c, node, resource, ports.KubeletPort)
client, err := e2enode.ProxyRequest(c, node, resource, ports.KubeletPort)
if err != nil {
return &v1.PodList{}, err
}
@ -3637,40 +3282,6 @@ func GetAllMasterAddresses(c clientset.Interface) []string {
return ips.List()
}
// GetNodeExternalIP returns node external IP concatenated with port 22 for ssh
// e.g. 1.2.3.4:22
func GetNodeExternalIP(node *v1.Node) (string, error) {
e2elog.Logf("Getting external IP address for %s", node.Name)
host := ""
for _, a := range node.Status.Addresses {
if a.Type == v1.NodeExternalIP && a.Address != "" {
host = net.JoinHostPort(a.Address, sshPort)
break
}
}
if host == "" {
return "", fmt.Errorf("Couldn't get the external IP of host %s with addresses %v", node.Name, node.Status.Addresses)
}
return host, nil
}
// GetNodeInternalIP returns node internal IP
func GetNodeInternalIP(node *v1.Node) (string, error) {
host := ""
for _, address := range node.Status.Addresses {
if address.Type == v1.NodeInternalIP {
if address.Address != "" {
host = net.JoinHostPort(address.Address, sshPort)
break
}
}
}
if host == "" {
return "", fmt.Errorf("Couldn't get the internal IP of host %s with addresses %v", node.Name, node.Status.Addresses)
}
return host, nil
}
// SimpleGET executes a get on the given url, returns error if non-200 returned.
func SimpleGET(c *http.Client, url, host string) (string, error) {
req, err := http.NewRequest("GET", url, nil)

View File

@ -36,6 +36,7 @@ go_library(
"//test/e2e/framework/ginkgowrapper:go_default_library",
"//test/e2e/framework/lifecycle:go_default_library",
"//test/e2e/framework/log:go_default_library",
"//test/e2e/framework/node:go_default_library",
"//test/e2e/framework/pod:go_default_library",
"//test/e2e/framework/ssh:go_default_library",
"//test/e2e/upgrades:go_default_library",

View File

@ -28,6 +28,7 @@ import (
"github.com/onsi/ginkgo"
"github.com/onsi/gomega"
e2enode "k8s.io/kubernetes/test/e2e/framework/node"
)
var _ = SIGDescribe("Ports Security Check [Feature:KubeletSecurity]", func() {
@ -45,7 +46,7 @@ var _ = SIGDescribe("Ports Security Check [Feature:KubeletSecurity]", func() {
// make sure kubelet readonly (10255) and cadvisor (4194) ports are disabled via API server proxy
ginkgo.It(fmt.Sprintf("should not be able to proxy to the readonly kubelet port %v using proxy subresource", ports.KubeletReadOnlyPort), func() {
result, err := framework.NodeProxyRequest(f.ClientSet, nodeName, "pods/", ports.KubeletReadOnlyPort)
result, err := e2enode.ProxyRequest(f.ClientSet, nodeName, "pods/", ports.KubeletReadOnlyPort)
framework.ExpectNoError(err)
var statusCode int
@ -53,7 +54,7 @@ var _ = SIGDescribe("Ports Security Check [Feature:KubeletSecurity]", func() {
gomega.Expect(statusCode).NotTo(gomega.Equal(http.StatusOK))
})
ginkgo.It("should not be able to proxy to cadvisor port 4194 using proxy subresource", func() {
result, err := framework.NodeProxyRequest(f.ClientSet, nodeName, "containers/", 4194)
result, err := e2enode.ProxyRequest(f.ClientSet, nodeName, "containers/", 4194)
framework.ExpectNoError(err)
var statusCode int
@ -72,7 +73,7 @@ var _ = SIGDescribe("Ports Security Check [Feature:KubeletSecurity]", func() {
// checks whether the target port is closed
func portClosedTest(f *framework.Framework, pickNode *v1.Node, port int) {
nodeAddrs := framework.GetNodeAddresses(pickNode, v1.NodeExternalIP)
nodeAddrs := e2enode.GetAddresses(pickNode, v1.NodeExternalIP)
gomega.Expect(len(nodeAddrs)).NotTo(gomega.BeZero())
for _, addr := range nodeAddrs {

View File

@ -26,6 +26,7 @@ import (
clientset "k8s.io/client-go/kubernetes"
"k8s.io/kubernetes/test/e2e/framework"
e2elog "k8s.io/kubernetes/test/e2e/framework/log"
e2enode "k8s.io/kubernetes/test/e2e/framework/node"
e2epod "k8s.io/kubernetes/test/e2e/framework/pod"
"github.com/onsi/ginkgo"
@ -87,7 +88,7 @@ var _ = SIGDescribe("[Disruptive]NodeLease", func() {
framework.Failf("Couldn't restore the original node instance group size: %v", err)
}
if err := framework.WaitForReadyNodes(c, framework.TestContext.CloudConfig.NumNodes, 10*time.Minute); err != nil {
if err := e2enode.WaitForReadyNodes(c, framework.TestContext.CloudConfig.NumNodes, 10*time.Minute); err != nil {
framework.Failf("Couldn't restore the original cluster size: %v", err)
}
// Many e2e tests assume that the cluster is fully healthy before they start. Wait until
@ -99,7 +100,7 @@ var _ = SIGDescribe("[Disruptive]NodeLease", func() {
ginkgo.It("node lease should be deleted when corresponding node is deleted", func() {
leaseClient := c.CoordinationV1beta1().Leases(corev1.NamespaceNodeLease)
err := framework.WaitForReadyNodes(c, framework.TestContext.CloudConfig.NumNodes, 10*time.Minute)
err := e2enode.WaitForReadyNodes(c, framework.TestContext.CloudConfig.NumNodes, 10*time.Minute)
gomega.Expect(err).To(gomega.BeNil())
ginkgo.By("verify node lease exists for every nodes")
@ -126,7 +127,7 @@ var _ = SIGDescribe("[Disruptive]NodeLease", func() {
gomega.Expect(err).To(gomega.BeNil())
err = framework.WaitForGroupSize(group, targetNumNodes)
gomega.Expect(err).To(gomega.BeNil())
err = framework.WaitForReadyNodes(c, framework.TestContext.CloudConfig.NumNodes-1, 10*time.Minute)
err = e2enode.WaitForReadyNodes(c, framework.TestContext.CloudConfig.NumNodes-1, 10*time.Minute)
gomega.Expect(err).To(gomega.BeNil())
targetNodes := framework.GetReadySchedulableNodesOrDie(c)
gomega.Expect(len(targetNodes.Items)).To(gomega.Equal(int(targetNumNodes)))

View File

@ -31,6 +31,7 @@ import (
api "k8s.io/kubernetes/pkg/apis/core"
"k8s.io/kubernetes/test/e2e/framework"
e2elog "k8s.io/kubernetes/test/e2e/framework/log"
e2enode "k8s.io/kubernetes/test/e2e/framework/node"
e2epod "k8s.io/kubernetes/test/e2e/framework/pod"
e2essh "k8s.io/kubernetes/test/e2e/framework/ssh"
testutils "k8s.io/kubernetes/test/utils"
@ -237,7 +238,7 @@ func rebootNode(c clientset.Interface, provider, name, rebootCmd string) bool {
}
// Node sanity check: ensure it is "ready".
if !framework.WaitForNodeToBeReady(c, name, framework.NodeReadyInitialTimeout) {
if !e2enode.WaitForNodeToBeReady(c, name, framework.NodeReadyInitialTimeout) {
return false
}
@ -273,12 +274,12 @@ func rebootNode(c clientset.Interface, provider, name, rebootCmd string) bool {
}
// Wait for some kind of "not ready" status.
if !framework.WaitForNodeToBeNotReady(c, name, rebootNodeNotReadyTimeout) {
if !e2enode.WaitForNodeToBeNotReady(c, name, rebootNodeNotReadyTimeout) {
return false
}
// Wait for some kind of "ready" status.
if !framework.WaitForNodeToBeReady(c, name, rebootNodeReadyAgainTimeout) {
if !e2enode.WaitForNodeToBeReady(c, name, rebootNodeReadyAgainTimeout) {
return false
}

View File

@ -25,6 +25,7 @@ import (
clientset "k8s.io/client-go/kubernetes"
"k8s.io/kubernetes/test/e2e/common"
"k8s.io/kubernetes/test/e2e/framework"
e2enode "k8s.io/kubernetes/test/e2e/framework/node"
e2epod "k8s.io/kubernetes/test/e2e/framework/pod"
"github.com/onsi/ginkgo"
@ -97,7 +98,7 @@ var _ = SIGDescribe("Nodes [Disruptive]", func() {
framework.Failf("Couldn't restore the original node instance group size: %v", err)
}
if err := framework.WaitForReadyNodes(c, int(originalNodeCount), 10*time.Minute); err != nil {
if err := e2enode.WaitForReadyNodes(c, int(originalNodeCount), 10*time.Minute); err != nil {
framework.Failf("Couldn't restore the original cluster size: %v", err)
}
// Many e2e tests assume that the cluster is fully healthy before they start. Wait until
@ -111,7 +112,7 @@ var _ = SIGDescribe("Nodes [Disruptive]", func() {
// Create a replication controller for a service that serves its hostname.
// The source for the Docker container kubernetes/serve_hostname is in contrib/for-demos/serve_hostname
name := "my-hostname-delete-node"
numNodes, err := framework.NumberOfRegisteredNodes(c)
numNodes, err := e2enode.TotalRegistered(c)
framework.ExpectNoError(err)
originalNodeCount = int32(numNodes)
common.NewRCByName(c, ns, name, originalNodeCount, nil)
@ -124,7 +125,7 @@ var _ = SIGDescribe("Nodes [Disruptive]", func() {
framework.ExpectNoError(err)
err = framework.WaitForGroupSize(group, targetNumNodes)
framework.ExpectNoError(err)
err = framework.WaitForReadyNodes(c, int(originalNodeCount-1), 10*time.Minute)
err = e2enode.WaitForReadyNodes(c, int(originalNodeCount-1), 10*time.Minute)
framework.ExpectNoError(err)
ginkgo.By("waiting 1 minute for the watch in the podGC to catch up, remove any pods scheduled on " +
@ -142,7 +143,7 @@ var _ = SIGDescribe("Nodes [Disruptive]", func() {
// The source for the Docker container kubernetes/serve_hostname is in contrib/for-demos/serve_hostname
name := "my-hostname-add-node"
common.NewSVCByName(c, ns, name)
numNodes, err := framework.NumberOfRegisteredNodes(c)
numNodes, err := e2enode.TotalRegistered(c)
framework.ExpectNoError(err)
originalNodeCount = int32(numNodes)
common.NewRCByName(c, ns, name, originalNodeCount, nil)
@ -155,7 +156,7 @@ var _ = SIGDescribe("Nodes [Disruptive]", func() {
framework.ExpectNoError(err)
err = framework.WaitForGroupSize(group, targetNumNodes)
framework.ExpectNoError(err)
err = framework.WaitForReadyNodes(c, int(originalNodeCount+1), 10*time.Minute)
err = e2enode.WaitForReadyNodes(c, int(originalNodeCount+1), 10*time.Minute)
framework.ExpectNoError(err)
ginkgo.By(fmt.Sprintf("increasing size of the replication controller to %d and verifying all pods are running", originalNodeCount+1))

View File

@ -26,6 +26,7 @@ import (
"k8s.io/kubernetes/test/e2e/common"
"k8s.io/kubernetes/test/e2e/framework"
e2elog "k8s.io/kubernetes/test/e2e/framework/log"
e2enode "k8s.io/kubernetes/test/e2e/framework/node"
e2epod "k8s.io/kubernetes/test/e2e/framework/pod"
testutils "k8s.io/kubernetes/test/utils"
@ -55,12 +56,12 @@ var _ = SIGDescribe("Restart [Disruptive]", func() {
var err error
ps, err = testutils.NewPodStore(f.ClientSet, metav1.NamespaceSystem, labels.Everything(), fields.Everything())
framework.ExpectNoError(err)
numNodes, err = framework.NumberOfRegisteredNodes(f.ClientSet)
numNodes, err = e2enode.TotalRegistered(f.ClientSet)
framework.ExpectNoError(err)
systemNamespace = metav1.NamespaceSystem
ginkgo.By("ensuring all nodes are ready")
originalNodes, err = framework.CheckNodesReady(f.ClientSet, numNodes, framework.NodeReadyInitialTimeout)
originalNodes, err = e2enode.CheckReady(f.ClientSet, numNodes, framework.NodeReadyInitialTimeout)
framework.ExpectNoError(err)
e2elog.Logf("Got the following nodes before restart: %v", nodeNames(originalNodes))
@ -90,7 +91,7 @@ var _ = SIGDescribe("Restart [Disruptive]", func() {
framework.ExpectNoError(err)
ginkgo.By("ensuring all nodes are ready after the restart")
nodesAfter, err := framework.CheckNodesReady(f.ClientSet, numNodes, framework.RestartNodeReadyAgainTimeout)
nodesAfter, err := e2enode.CheckReady(f.ClientSet, numNodes, framework.RestartNodeReadyAgainTimeout)
framework.ExpectNoError(err)
e2elog.Logf("Got the following nodes after restart: %v", nodeNames(nodesAfter))

View File

@ -62,6 +62,7 @@ go_library(
"//test/e2e/framework/endpoints:go_default_library",
"//test/e2e/framework/ingress:go_default_library",
"//test/e2e/framework/log:go_default_library",
"//test/e2e/framework/node:go_default_library",
"//test/e2e/framework/pod:go_default_library",
"//test/e2e/framework/providers/gce:go_default_library",
"//test/e2e/framework/ssh:go_default_library",

View File

@ -28,6 +28,7 @@ import (
"k8s.io/client-go/util/workqueue"
"k8s.io/kubernetes/test/e2e/framework"
e2elog "k8s.io/kubernetes/test/e2e/framework/log"
e2enode "k8s.io/kubernetes/test/e2e/framework/node"
testutils "k8s.io/kubernetes/test/utils"
"github.com/onsi/ginkgo"
@ -45,7 +46,7 @@ var _ = SIGDescribe("[Feature:PerformanceDNS][Serial]", func() {
ginkgo.BeforeEach(func() {
framework.ExpectNoError(framework.WaitForAllNodesSchedulable(f.ClientSet, framework.TestContext.NodeSchedulableTimeout))
framework.WaitForAllNodesHealthy(f.ClientSet, time.Minute)
e2enode.WaitForTotalHealthy(f.ClientSet, time.Minute)
err := framework.CheckTestingNSDeletedExcept(f.ClientSet, f.Namespace.Name)
framework.ExpectNoError(err)

View File

@ -27,6 +27,7 @@ import (
"k8s.io/kubernetes/pkg/master/ports"
"k8s.io/kubernetes/test/e2e/framework"
e2elog "k8s.io/kubernetes/test/e2e/framework/log"
e2enode "k8s.io/kubernetes/test/e2e/framework/node"
"k8s.io/kubernetes/test/e2e/framework/providers/gce"
gcecloud "k8s.io/legacy-cloud-providers/gce"
@ -188,7 +189,7 @@ var _ = SIGDescribe("Firewall rule", func() {
}
ginkgo.By("Checking well known ports on master and nodes are not exposed externally")
nodeAddrs := framework.NodeAddresses(nodes, v1.NodeExternalIP)
nodeAddrs := e2enode.FirstAddress(nodes, v1.NodeExternalIP)
if len(nodeAddrs) == 0 {
framework.Failf("did not find any node addresses")
}

View File

@ -29,6 +29,7 @@ import (
"k8s.io/kubernetes/test/e2e/framework"
e2elog "k8s.io/kubernetes/test/e2e/framework/log"
e2enode "k8s.io/kubernetes/test/e2e/framework/node"
e2essh "k8s.io/kubernetes/test/e2e/framework/ssh"
"k8s.io/kubernetes/test/images/agnhost/net/nat"
imageutils "k8s.io/kubernetes/test/utils/image"
@ -51,7 +52,7 @@ var _ = SIGDescribe("Network", func() {
ginkgo.It("should set TCP CLOSE_WAIT timeout", func() {
nodes := framework.GetReadySchedulableNodesOrDie(fr.ClientSet)
ips := framework.CollectAddresses(nodes, v1.NodeInternalIP)
ips := e2enode.CollectAddresses(nodes, v1.NodeInternalIP)
if len(nodes.Items) < 2 {
framework.Skipf(

View File

@ -40,6 +40,7 @@ import (
"k8s.io/kubernetes/test/e2e/framework"
e2eendpoints "k8s.io/kubernetes/test/e2e/framework/endpoints"
e2elog "k8s.io/kubernetes/test/e2e/framework/log"
e2enode "k8s.io/kubernetes/test/e2e/framework/node"
e2epod "k8s.io/kubernetes/test/e2e/framework/pod"
"k8s.io/kubernetes/test/e2e/framework/providers/gce"
e2essh "k8s.io/kubernetes/test/e2e/framework/ssh"
@ -2083,7 +2084,7 @@ var _ = SIGDescribe("ESIPP [Slow] [DisabledForLargeClusters]", func() {
framework.Failf("Service HealthCheck NodePort was not allocated")
}
ips := framework.CollectAddresses(nodes, v1.NodeExternalIP)
ips := e2enode.CollectAddresses(nodes, v1.NodeExternalIP)
ingressIP := framework.GetIngressPoint(&svc.Status.LoadBalancer.Ingress[0])
svcTCPPort := int(svc.Spec.Ports[0].Port)
@ -2206,7 +2207,7 @@ var _ = SIGDescribe("ESIPP [Slow] [DisabledForLargeClusters]", func() {
if _, ok := endpointNodeMap[n.Name]; ok {
continue
}
noEndpointNodeMap[n.Name] = framework.GetNodeAddresses(&n, v1.NodeExternalIP)
noEndpointNodeMap[n.Name] = e2enode.GetAddresses(&n, v1.NodeExternalIP)
}
svcTCPPort := int(svc.Spec.Ports[0].Port)
@ -2354,7 +2355,7 @@ func execAffinityTestForNonLBServiceWithOptionalTransition(f *framework.Framewor
var svcIP string
if serviceType == v1.ServiceTypeNodePort {
nodes := framework.GetReadySchedulableNodesOrDie(cs)
addrs := framework.CollectAddresses(nodes, v1.NodeInternalIP)
addrs := e2enode.CollectAddresses(nodes, v1.NodeInternalIP)
gomega.Expect(len(addrs)).To(gomega.BeNumerically(">", 0), "ginkgo.Failed to get Node internal IP")
svcIP = addrs[0]
servicePort = int(svc.Spec.Ports[0].NodePort)

View File

@ -38,6 +38,7 @@ go_library(
"//test/e2e/framework:go_default_library",
"//test/e2e/framework/job:go_default_library",
"//test/e2e/framework/log:go_default_library",
"//test/e2e/framework/node:go_default_library",
"//test/e2e/framework/pod:go_default_library",
"//test/e2e/framework/ssh:go_default_library",
"//test/e2e/framework/volume:go_default_library",

View File

@ -29,6 +29,7 @@ import (
"k8s.io/apimachinery/pkg/fields"
"k8s.io/kubernetes/test/e2e/framework"
e2elog "k8s.io/kubernetes/test/e2e/framework/log"
e2enode "k8s.io/kubernetes/test/e2e/framework/node"
e2essh "k8s.io/kubernetes/test/e2e/framework/ssh"
testutils "k8s.io/kubernetes/test/utils"
@ -50,7 +51,7 @@ var _ = SIGDescribe("NodeProblemDetector [DisabledForLargeClusters]", func() {
framework.SkipUnlessProviderIs(framework.ProvidersWithSSH...)
framework.SkipUnlessProviderIs("gce", "gke")
framework.SkipUnlessNodeOSDistroIs("gci", "ubuntu")
framework.WaitForAllNodesHealthy(f.ClientSet, time.Minute)
e2enode.WaitForTotalHealthy(f.ClientSet, time.Minute)
})
ginkgo.It("should run without error", func() {

View File

@ -30,6 +30,7 @@ import (
"k8s.io/kubernetes/pkg/master/ports"
"k8s.io/kubernetes/test/e2e/framework"
e2elog "k8s.io/kubernetes/test/e2e/framework/log"
e2enode "k8s.io/kubernetes/test/e2e/framework/node"
e2epod "k8s.io/kubernetes/test/e2e/framework/pod"
imageutils "k8s.io/kubernetes/test/utils/image"
@ -204,7 +205,7 @@ var _ = SIGDescribe("PreStop", func() {
ginkgo.By("verifying the pod running state after graceful termination")
result := &v1.PodList{}
err = wait.Poll(time.Second*5, time.Second*60, func() (bool, error) {
client, err := framework.NodeProxyRequest(f.ClientSet, pod.Spec.NodeName, "pods", ports.KubeletPort)
client, err := e2enode.ProxyRequest(f.ClientSet, pod.Spec.NodeName, "pods", ports.KubeletPort)
framework.ExpectNoError(err, "failed to get the pods of the node")
err = client.Into(result)
framework.ExpectNoError(err, "failed to parse the pods of the node")

View File

@ -47,6 +47,7 @@ go_library(
"//test/e2e/framework/gpu:go_default_library",
"//test/e2e/framework/job:go_default_library",
"//test/e2e/framework/log:go_default_library",
"//test/e2e/framework/node:go_default_library",
"//test/e2e/framework/pod:go_default_library",
"//test/e2e/framework/providers/gce:go_default_library",
"//test/e2e/framework/replicaset:go_default_library",

View File

@ -28,6 +28,7 @@ import (
api "k8s.io/kubernetes/pkg/apis/core"
"k8s.io/kubernetes/test/e2e/framework"
e2elog "k8s.io/kubernetes/test/e2e/framework/log"
e2enode "k8s.io/kubernetes/test/e2e/framework/node"
e2epod "k8s.io/kubernetes/test/e2e/framework/pod"
testutils "k8s.io/kubernetes/test/utils"
imageutils "k8s.io/kubernetes/test/utils/image"
@ -54,7 +55,7 @@ var _ = framework.KubeDescribe("EquivalenceCache [Serial]", func() {
cs = f.ClientSet
ns = f.Namespace.Name
framework.WaitForAllNodesHealthy(cs, time.Minute)
e2enode.WaitForTotalHealthy(cs, time.Minute)
masterNodes, nodeList = framework.GetMasterAndWorkerNodesOrDie(cs)
framework.ExpectNoError(framework.CheckTestingNSDeletedExcept(cs, ns))

View File

@ -30,6 +30,7 @@ import (
"k8s.io/kubernetes/test/e2e/framework/gpu"
jobutil "k8s.io/kubernetes/test/e2e/framework/job"
e2elog "k8s.io/kubernetes/test/e2e/framework/log"
e2enode "k8s.io/kubernetes/test/e2e/framework/node"
e2epod "k8s.io/kubernetes/test/e2e/framework/pod"
"k8s.io/kubernetes/test/e2e/framework/providers/gce"
imageutils "k8s.io/kubernetes/test/utils/image"
@ -209,9 +210,9 @@ func testNvidiaGPUsJob(f *framework.Framework) {
err = jobutil.WaitForAllJobPodsRunning(f.ClientSet, f.Namespace.Name, job.Name, 1)
framework.ExpectNoError(err)
numNodes, err := framework.NumberOfRegisteredNodes(f.ClientSet)
numNodes, err := e2enode.TotalRegistered(f.ClientSet)
framework.ExpectNoError(err)
nodes, err := framework.CheckNodesReady(f.ClientSet, numNodes, framework.NodeReadyInitialTimeout)
nodes, err := e2enode.CheckReady(f.ClientSet, numNodes, framework.NodeReadyInitialTimeout)
framework.ExpectNoError(err)
ginkgo.By("Recreating nodes")

View File

@ -35,6 +35,7 @@ import (
"k8s.io/kubernetes/pkg/apis/scheduling"
"k8s.io/kubernetes/test/e2e/framework"
e2elog "k8s.io/kubernetes/test/e2e/framework/log"
e2enode "k8s.io/kubernetes/test/e2e/framework/node"
e2epod "k8s.io/kubernetes/test/e2e/framework/pod"
"k8s.io/kubernetes/test/e2e/framework/replicaset"
@ -80,7 +81,7 @@ var _ = SIGDescribe("SchedulerPreemption [Serial]", func() {
gomega.Expect(err == nil || errors.IsAlreadyExists(err)).To(gomega.Equal(true))
}
framework.WaitForAllNodesHealthy(cs, time.Minute)
e2enode.WaitForTotalHealthy(cs, time.Minute)
masterNodes, nodeList = framework.GetMasterAndWorkerNodesOrDie(cs)
err := framework.CheckTestingNSDeletedExcept(cs, ns)

View File

@ -24,6 +24,7 @@ import (
"github.com/onsi/ginkgo"
"github.com/onsi/gomega"
// ensure libs have a chance to initialize
_ "github.com/stretchr/testify/assert"
@ -37,6 +38,7 @@ import (
"k8s.io/kubernetes/test/e2e/common"
"k8s.io/kubernetes/test/e2e/framework"
e2elog "k8s.io/kubernetes/test/e2e/framework/log"
e2enode "k8s.io/kubernetes/test/e2e/framework/node"
e2epod "k8s.io/kubernetes/test/e2e/framework/pod"
testutils "k8s.io/kubernetes/test/utils"
imageutils "k8s.io/kubernetes/test/utils/image"
@ -77,7 +79,7 @@ var _ = SIGDescribe("SchedulerPriorities [Serial]", func() {
ns = f.Namespace.Name
nodeList = &v1.NodeList{}
framework.WaitForAllNodesHealthy(cs, time.Minute)
e2enode.WaitForTotalHealthy(cs, time.Minute)
_, nodeList = framework.GetMasterAndWorkerNodesOrDie(cs)
err := framework.CheckTestingNSDeletedExcept(cs, ns)

View File

@ -27,6 +27,7 @@ import (
clientset "k8s.io/client-go/kubernetes"
schedulerapi "k8s.io/kubernetes/pkg/scheduler/api"
"k8s.io/kubernetes/test/e2e/framework"
e2enode "k8s.io/kubernetes/test/e2e/framework/node"
e2epod "k8s.io/kubernetes/test/e2e/framework/pod"
"github.com/onsi/ginkgo"
@ -123,9 +124,9 @@ var _ = SIGDescribe("TaintBasedEvictions [Serial]", func() {
node := nodeList.Items[0]
ginkgo.By(fmt.Sprintf("Blocking traffic from node %s to the master", nodeName))
host, err := framework.GetNodeExternalIP(&node)
host, err := e2enode.GetExternalIP(&node)
if err != nil {
host, err = framework.GetNodeInternalIP(&node)
host, err = e2enode.GetInternalIP(&node)
}
framework.ExpectNoError(err)
masterAddresses := framework.GetAllMasterAddresses(cs)
@ -143,7 +144,7 @@ var _ = SIGDescribe("TaintBasedEvictions [Serial]", func() {
}
ginkgo.By(fmt.Sprintf("Expecting to see node %q becomes Ready", nodeName))
framework.WaitForNodeToBeReady(cs, nodeName, time.Minute*1)
e2enode.WaitForNodeToBeReady(cs, nodeName, time.Minute*1)
ginkgo.By("Expecting to see unreachable=:NoExecute taint is taken off")
err := framework.WaitForNodeHasTaintOrNot(cs, nodeName, taint, false, time.Second*30)
framework.ExpectNoError(err)
@ -154,7 +155,7 @@ var _ = SIGDescribe("TaintBasedEvictions [Serial]", func() {
}
ginkgo.By(fmt.Sprintf("Expecting to see node %q becomes NotReady", nodeName))
if !framework.WaitForNodeToBeNotReady(cs, nodeName, time.Minute*3) {
if !e2enode.WaitForNodeToBeNotReady(cs, nodeName, time.Minute*3) {
framework.Failf("node %q doesn't turn to NotReady after 3 minutes", nodeName)
}
ginkgo.By("Expecting to see unreachable=:NoExecute taint is applied")

View File

@ -28,6 +28,7 @@ import (
"k8s.io/client-go/tools/cache"
"k8s.io/kubernetes/test/e2e/framework"
e2elog "k8s.io/kubernetes/test/e2e/framework/log"
e2enode "k8s.io/kubernetes/test/e2e/framework/node"
testutils "k8s.io/kubernetes/test/utils"
imageutils "k8s.io/kubernetes/test/utils/image"
@ -163,7 +164,7 @@ var _ = SIGDescribe("NoExecuteTaintManager Single Pod [Serial]", func() {
cs = f.ClientSet
ns = f.Namespace.Name
framework.WaitForAllNodesHealthy(cs, time.Minute)
e2enode.WaitForTotalHealthy(cs, time.Minute)
err := framework.CheckTestingNSDeletedExcept(cs, ns)
framework.ExpectNoError(err)
@ -337,7 +338,7 @@ var _ = SIGDescribe("NoExecuteTaintManager Multiple Pods [Serial]", func() {
cs = f.ClientSet
ns = f.Namespace.Name
framework.WaitForAllNodesHealthy(cs, time.Minute)
e2enode.WaitForTotalHealthy(cs, time.Minute)
err := framework.CheckTestingNSDeletedExcept(cs, ns)
framework.ExpectNoError(err)

View File

@ -68,6 +68,7 @@ go_library(
"//test/e2e/framework/deployment:go_default_library",
"//test/e2e/framework/log:go_default_library",
"//test/e2e/framework/metrics:go_default_library",
"//test/e2e/framework/node:go_default_library",
"//test/e2e/framework/pod:go_default_library",
"//test/e2e/framework/providers/gce:go_default_library",
"//test/e2e/framework/ssh:go_default_library",

View File

@ -29,6 +29,7 @@ import (
apierrs "k8s.io/apimachinery/pkg/api/errors"
clientset "k8s.io/client-go/kubernetes"
"k8s.io/kubernetes/test/e2e/framework"
e2enode "k8s.io/kubernetes/test/e2e/framework/node"
e2essh "k8s.io/kubernetes/test/e2e/framework/ssh"
"k8s.io/kubernetes/test/e2e/framework/testfiles"
"k8s.io/kubernetes/test/e2e/framework/volume"
@ -77,9 +78,9 @@ func installFlex(c clientset.Interface, node *v1.Node, vendor, driver, filePath
host := ""
var err error
if node != nil {
host, err = framework.GetNodeExternalIP(node)
host, err = e2enode.GetExternalIP(node)
if err != nil {
host, err = framework.GetNodeInternalIP(node)
host, err = e2enode.GetInternalIP(node)
}
} else {
masterHostWithPort := framework.GetMasterHost()
@ -106,9 +107,9 @@ func uninstallFlex(c clientset.Interface, node *v1.Node, vendor, driver string)
host := ""
var err error
if node != nil {
host, err = framework.GetNodeExternalIP(node)
host, err = e2enode.GetExternalIP(node)
if err != nil {
host, err = framework.GetNodeInternalIP(node)
host, err = e2enode.GetInternalIP(node)
}
} else {
masterHostWithPort := framework.GetMasterHost()

View File

@ -30,6 +30,7 @@ import (
clientset "k8s.io/client-go/kubernetes"
"k8s.io/kubernetes/test/e2e/framework"
e2elog "k8s.io/kubernetes/test/e2e/framework/log"
e2enode "k8s.io/kubernetes/test/e2e/framework/node"
e2epod "k8s.io/kubernetes/test/e2e/framework/pod"
"k8s.io/kubernetes/test/e2e/framework/volume"
"k8s.io/kubernetes/test/e2e/storage/utils"
@ -91,7 +92,7 @@ var _ = utils.SIGDescribe("NFSPersistentVolumes[Disruptive][Flaky]", func() {
for _, node := range nodes.Items {
if node.Name != nfsServerPod.Spec.NodeName {
clientNode = &node
clientNodeIP, err = framework.GetNodeExternalIP(clientNode)
clientNodeIP, err = e2enode.GetExternalIP(clientNode)
framework.ExpectNoError(err)
break
}

View File

@ -40,6 +40,7 @@ import (
v1core "k8s.io/client-go/kubernetes/typed/core/v1"
"k8s.io/kubernetes/test/e2e/framework"
e2elog "k8s.io/kubernetes/test/e2e/framework/log"
e2enode "k8s.io/kubernetes/test/e2e/framework/node"
"k8s.io/kubernetes/test/e2e/framework/providers/gce"
"k8s.io/kubernetes/test/e2e/storage/utils"
imageutils "k8s.io/kubernetes/test/utils/image"
@ -442,7 +443,7 @@ var _ = utils.SIGDescribe("Pod Disks", func() {
})
func countReadyNodes(c clientset.Interface, hostName types.NodeName) int {
framework.WaitForNodeToBeReady(c, string(hostName), nodeStatusTimeout)
e2enode.WaitForNodeToBeReady(c, string(hostName), nodeStatusTimeout)
framework.WaitForAllNodesSchedulable(c, nodeStatusTimeout)
nodes := framework.GetReadySchedulableNodesOrDie(c)
return len(nodes.Items)

View File

@ -27,6 +27,7 @@ go_library(
"//staging/src/k8s.io/client-go/kubernetes:go_default_library",
"//test/e2e/framework:go_default_library",
"//test/e2e/framework/log:go_default_library",
"//test/e2e/framework/node:go_default_library",
"//test/e2e/framework/pod:go_default_library",
"//test/e2e/framework/ssh:go_default_library",
"//test/utils/image:go_default_library",

View File

@ -36,6 +36,7 @@ import (
clientset "k8s.io/client-go/kubernetes"
"k8s.io/kubernetes/test/e2e/framework"
e2elog "k8s.io/kubernetes/test/e2e/framework/log"
e2enode "k8s.io/kubernetes/test/e2e/framework/node"
e2epod "k8s.io/kubernetes/test/e2e/framework/pod"
e2essh "k8s.io/kubernetes/test/e2e/framework/ssh"
imageutils "k8s.io/kubernetes/test/utils/image"
@ -148,7 +149,7 @@ func KubeletCommand(kOp KubeletOpt, c clientset.Interface, pod *v1.Pod) {
gomega.Expect(sshResult.Code).To(gomega.BeZero(), "Failed to [%s] kubelet:\n%#v", string(kOp), sshResult)
if kOp == KStop {
if ok := framework.WaitForNodeToBeNotReady(c, pod.Spec.NodeName, NodeStateTimeout); !ok {
if ok := e2enode.WaitForNodeToBeNotReady(c, pod.Spec.NodeName, NodeStateTimeout); !ok {
framework.Failf("Node %s failed to enter NotReady state", pod.Spec.NodeName)
}
}
@ -168,7 +169,7 @@ func KubeletCommand(kOp KubeletOpt, c clientset.Interface, pod *v1.Pod) {
}
if kOp == KStart || kOp == KRestart {
// For kubelet start and restart operations, Wait until Node becomes Ready
if ok := framework.WaitForNodeToBeReady(c, pod.Spec.NodeName, NodeStateTimeout); !ok {
if ok := e2enode.WaitForNodeToBeReady(c, pod.Spec.NodeName, NodeStateTimeout); !ok {
framework.Failf("Node %s failed to enter Ready state", pod.Spec.NodeName)
}
}