mirror of
https://github.com/k3s-io/kubernetes.git
synced 2025-07-27 13:37:30 +00:00
Merge pull request #78282 from jiatongw/e2e/framework/utilNode
Move node related methods to framework/node package
This commit is contained in:
commit
eaf89cfbb4
@ -65,6 +65,7 @@ go_library(
|
|||||||
"//test/e2e/framework/deployment:go_default_library",
|
"//test/e2e/framework/deployment:go_default_library",
|
||||||
"//test/e2e/framework/job:go_default_library",
|
"//test/e2e/framework/job:go_default_library",
|
||||||
"//test/e2e/framework/log:go_default_library",
|
"//test/e2e/framework/log:go_default_library",
|
||||||
|
"//test/e2e/framework/node:go_default_library",
|
||||||
"//test/e2e/framework/pod:go_default_library",
|
"//test/e2e/framework/pod:go_default_library",
|
||||||
"//test/e2e/framework/replicaset:go_default_library",
|
"//test/e2e/framework/replicaset:go_default_library",
|
||||||
"//test/e2e/framework/ssh:go_default_library",
|
"//test/e2e/framework/ssh:go_default_library",
|
||||||
|
@ -38,6 +38,7 @@ import (
|
|||||||
"k8s.io/kubernetes/test/e2e/framework"
|
"k8s.io/kubernetes/test/e2e/framework"
|
||||||
jobutil "k8s.io/kubernetes/test/e2e/framework/job"
|
jobutil "k8s.io/kubernetes/test/e2e/framework/job"
|
||||||
e2elog "k8s.io/kubernetes/test/e2e/framework/log"
|
e2elog "k8s.io/kubernetes/test/e2e/framework/log"
|
||||||
|
e2enode "k8s.io/kubernetes/test/e2e/framework/node"
|
||||||
e2epod "k8s.io/kubernetes/test/e2e/framework/pod"
|
e2epod "k8s.io/kubernetes/test/e2e/framework/pod"
|
||||||
testutils "k8s.io/kubernetes/test/utils"
|
testutils "k8s.io/kubernetes/test/utils"
|
||||||
|
|
||||||
@ -59,7 +60,7 @@ func expectNodeReadiness(isReady bool, newNode chan *v1.Node) {
|
|||||||
for !expected && !timeout {
|
for !expected && !timeout {
|
||||||
select {
|
select {
|
||||||
case n := <-newNode:
|
case n := <-newNode:
|
||||||
if framework.IsNodeConditionSetAsExpected(n, v1.NodeReady, isReady) {
|
if e2enode.IsConditionSetAsExpected(n, v1.NodeReady, isReady) {
|
||||||
expected = true
|
expected = true
|
||||||
} else {
|
} else {
|
||||||
e2elog.Logf("Observed node ready status is NOT %v as expected", isReady)
|
e2elog.Logf("Observed node ready status is NOT %v as expected", isReady)
|
||||||
@ -142,8 +143,8 @@ var _ = SIGDescribe("Network Partition [Disruptive] [Slow]", func() {
|
|||||||
nodeOpts := metav1.ListOptions{}
|
nodeOpts := metav1.ListOptions{}
|
||||||
nodes, err := c.CoreV1().Nodes().List(nodeOpts)
|
nodes, err := c.CoreV1().Nodes().List(nodeOpts)
|
||||||
framework.ExpectNoError(err)
|
framework.ExpectNoError(err)
|
||||||
framework.FilterNodes(nodes, func(node v1.Node) bool {
|
e2enode.Filter(nodes, func(node v1.Node) bool {
|
||||||
if !framework.IsNodeConditionSetAsExpected(&node, v1.NodeReady, true) {
|
if !e2enode.IsConditionSetAsExpected(&node, v1.NodeReady, true) {
|
||||||
return false
|
return false
|
||||||
}
|
}
|
||||||
podOpts = metav1.ListOptions{FieldSelector: fields.OneTermEqualSelector(api.PodHostField, node.Name).String()}
|
podOpts = metav1.ListOptions{FieldSelector: fields.OneTermEqualSelector(api.PodHostField, node.Name).String()}
|
||||||
@ -199,7 +200,7 @@ var _ = SIGDescribe("Network Partition [Disruptive] [Slow]", func() {
|
|||||||
go controller.Run(stopCh)
|
go controller.Run(stopCh)
|
||||||
|
|
||||||
ginkgo.By(fmt.Sprintf("Block traffic from node %s to the master", node.Name))
|
ginkgo.By(fmt.Sprintf("Block traffic from node %s to the master", node.Name))
|
||||||
host, err := framework.GetNodeExternalIP(&node)
|
host, err := e2enode.GetExternalIP(&node)
|
||||||
framework.ExpectNoError(err)
|
framework.ExpectNoError(err)
|
||||||
masterAddresses := framework.GetAllMasterAddresses(c)
|
masterAddresses := framework.GetAllMasterAddresses(c)
|
||||||
defer func() {
|
defer func() {
|
||||||
@ -240,7 +241,7 @@ var _ = SIGDescribe("Network Partition [Disruptive] [Slow]", func() {
|
|||||||
// The source for the Docker container kubernetes/serve_hostname is in contrib/for-demos/serve_hostname
|
// The source for the Docker container kubernetes/serve_hostname is in contrib/for-demos/serve_hostname
|
||||||
name := "my-hostname-net"
|
name := "my-hostname-net"
|
||||||
common.NewSVCByName(c, ns, name)
|
common.NewSVCByName(c, ns, name)
|
||||||
numNodes, err := framework.NumberOfRegisteredNodes(f.ClientSet)
|
numNodes, err := e2enode.TotalRegistered(f.ClientSet)
|
||||||
framework.ExpectNoError(err)
|
framework.ExpectNoError(err)
|
||||||
replicas := int32(numNodes)
|
replicas := int32(numNodes)
|
||||||
common.NewRCByName(c, ns, name, replicas, nil)
|
common.NewRCByName(c, ns, name, replicas, nil)
|
||||||
@ -274,7 +275,7 @@ var _ = SIGDescribe("Network Partition [Disruptive] [Slow]", func() {
|
|||||||
})
|
})
|
||||||
|
|
||||||
e2elog.Logf("Waiting %v for node %s to be ready once temporary network failure ends", resizeNodeReadyTimeout, node.Name)
|
e2elog.Logf("Waiting %v for node %s to be ready once temporary network failure ends", resizeNodeReadyTimeout, node.Name)
|
||||||
if !framework.WaitForNodeToBeReady(c, node.Name, resizeNodeReadyTimeout) {
|
if !e2enode.WaitForNodeToBeReady(c, node.Name, resizeNodeReadyTimeout) {
|
||||||
framework.Failf("Node %s did not become ready within %v", node.Name, resizeNodeReadyTimeout)
|
framework.Failf("Node %s did not become ready within %v", node.Name, resizeNodeReadyTimeout)
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -307,7 +308,7 @@ var _ = SIGDescribe("Network Partition [Disruptive] [Slow]", func() {
|
|||||||
gracePeriod := int64(30)
|
gracePeriod := int64(30)
|
||||||
|
|
||||||
common.NewSVCByName(c, ns, name)
|
common.NewSVCByName(c, ns, name)
|
||||||
numNodes, err := framework.NumberOfRegisteredNodes(f.ClientSet)
|
numNodes, err := e2enode.TotalRegistered(f.ClientSet)
|
||||||
framework.ExpectNoError(err)
|
framework.ExpectNoError(err)
|
||||||
replicas := int32(numNodes)
|
replicas := int32(numNodes)
|
||||||
common.NewRCByName(c, ns, name, replicas, &gracePeriod)
|
common.NewRCByName(c, ns, name, replicas, &gracePeriod)
|
||||||
@ -341,7 +342,7 @@ var _ = SIGDescribe("Network Partition [Disruptive] [Slow]", func() {
|
|||||||
})
|
})
|
||||||
|
|
||||||
e2elog.Logf("Waiting %v for node %s to be ready once temporary network failure ends", resizeNodeReadyTimeout, node.Name)
|
e2elog.Logf("Waiting %v for node %s to be ready once temporary network failure ends", resizeNodeReadyTimeout, node.Name)
|
||||||
if !framework.WaitForNodeToBeReady(c, node.Name, resizeNodeReadyTimeout) {
|
if !e2enode.WaitForNodeToBeReady(c, node.Name, resizeNodeReadyTimeout) {
|
||||||
framework.Failf("Node %s did not become ready within %v", node.Name, resizeNodeReadyTimeout)
|
framework.Failf("Node %s did not become ready within %v", node.Name, resizeNodeReadyTimeout)
|
||||||
}
|
}
|
||||||
})
|
})
|
||||||
@ -382,9 +383,9 @@ var _ = SIGDescribe("Network Partition [Disruptive] [Slow]", func() {
|
|||||||
|
|
||||||
pst := framework.NewStatefulSetTester(c)
|
pst := framework.NewStatefulSetTester(c)
|
||||||
|
|
||||||
nn, err := framework.NumberOfRegisteredNodes(f.ClientSet)
|
nn, err := e2enode.TotalRegistered(f.ClientSet)
|
||||||
framework.ExpectNoError(err)
|
framework.ExpectNoError(err)
|
||||||
nodes, err := framework.CheckNodesReady(f.ClientSet, nn, framework.NodeReadyInitialTimeout)
|
nodes, err := e2enode.CheckReady(f.ClientSet, nn, framework.NodeReadyInitialTimeout)
|
||||||
framework.ExpectNoError(err)
|
framework.ExpectNoError(err)
|
||||||
common.RestartNodes(f.ClientSet, nodes)
|
common.RestartNodes(f.ClientSet, nodes)
|
||||||
|
|
||||||
@ -414,7 +415,7 @@ var _ = SIGDescribe("Network Partition [Disruptive] [Slow]", func() {
|
|||||||
})
|
})
|
||||||
|
|
||||||
e2elog.Logf("Waiting %v for node %s to be ready once temporary network failure ends", resizeNodeReadyTimeout, node.Name)
|
e2elog.Logf("Waiting %v for node %s to be ready once temporary network failure ends", resizeNodeReadyTimeout, node.Name)
|
||||||
if !framework.WaitForNodeToBeReady(c, node.Name, resizeNodeReadyTimeout) {
|
if !e2enode.WaitForNodeToBeReady(c, node.Name, resizeNodeReadyTimeout) {
|
||||||
framework.Failf("Node %s did not become ready within %v", node.Name, resizeNodeReadyTimeout)
|
framework.Failf("Node %s did not become ready within %v", node.Name, resizeNodeReadyTimeout)
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -462,7 +463,7 @@ var _ = SIGDescribe("Network Partition [Disruptive] [Slow]", func() {
|
|||||||
})
|
})
|
||||||
|
|
||||||
e2elog.Logf("Waiting %v for node %s to be ready once temporary network failure ends", resizeNodeReadyTimeout, node.Name)
|
e2elog.Logf("Waiting %v for node %s to be ready once temporary network failure ends", resizeNodeReadyTimeout, node.Name)
|
||||||
if !framework.WaitForNodeToBeReady(c, node.Name, resizeNodeReadyTimeout) {
|
if !e2enode.WaitForNodeToBeReady(c, node.Name, resizeNodeReadyTimeout) {
|
||||||
framework.Failf("Node %s did not become ready within %v", node.Name, resizeNodeReadyTimeout)
|
framework.Failf("Node %s did not become ready within %v", node.Name, resizeNodeReadyTimeout)
|
||||||
}
|
}
|
||||||
})
|
})
|
||||||
@ -485,8 +486,8 @@ var _ = SIGDescribe("Network Partition [Disruptive] [Slow]", func() {
|
|||||||
ginkgo.By("choose a node - we will block all network traffic on this node")
|
ginkgo.By("choose a node - we will block all network traffic on this node")
|
||||||
var podOpts metav1.ListOptions
|
var podOpts metav1.ListOptions
|
||||||
nodes := framework.GetReadySchedulableNodesOrDie(c)
|
nodes := framework.GetReadySchedulableNodesOrDie(c)
|
||||||
framework.FilterNodes(nodes, func(node v1.Node) bool {
|
e2enode.Filter(nodes, func(node v1.Node) bool {
|
||||||
if !framework.IsNodeConditionSetAsExpected(&node, v1.NodeReady, true) {
|
if !e2enode.IsConditionSetAsExpected(&node, v1.NodeReady, true) {
|
||||||
return false
|
return false
|
||||||
}
|
}
|
||||||
podOpts = metav1.ListOptions{FieldSelector: fields.OneTermEqualSelector(api.PodHostField, node.Name).String()}
|
podOpts = metav1.ListOptions{FieldSelector: fields.OneTermEqualSelector(api.PodHostField, node.Name).String()}
|
||||||
@ -581,7 +582,7 @@ var _ = SIGDescribe("Network Partition [Disruptive] [Slow]", func() {
|
|||||||
go controller.Run(stopCh)
|
go controller.Run(stopCh)
|
||||||
|
|
||||||
ginkgo.By(fmt.Sprintf("Block traffic from node %s to the master", node.Name))
|
ginkgo.By(fmt.Sprintf("Block traffic from node %s to the master", node.Name))
|
||||||
host, err := framework.GetNodeExternalIP(&node)
|
host, err := e2enode.GetExternalIP(&node)
|
||||||
framework.ExpectNoError(err)
|
framework.ExpectNoError(err)
|
||||||
masterAddresses := framework.GetAllMasterAddresses(c)
|
masterAddresses := framework.GetAllMasterAddresses(c)
|
||||||
defer func() {
|
defer func() {
|
||||||
|
@ -57,6 +57,7 @@ go_library(
|
|||||||
"//test/e2e/framework/deployment:go_default_library",
|
"//test/e2e/framework/deployment:go_default_library",
|
||||||
"//test/e2e/framework/job:go_default_library",
|
"//test/e2e/framework/job:go_default_library",
|
||||||
"//test/e2e/framework/log:go_default_library",
|
"//test/e2e/framework/log:go_default_library",
|
||||||
|
"//test/e2e/framework/node:go_default_library",
|
||||||
"//test/e2e/framework/pod:go_default_library",
|
"//test/e2e/framework/pod:go_default_library",
|
||||||
"//test/utils:go_default_library",
|
"//test/utils:go_default_library",
|
||||||
"//test/utils/image:go_default_library",
|
"//test/utils/image:go_default_library",
|
||||||
|
@ -27,6 +27,7 @@ import (
|
|||||||
|
|
||||||
"github.com/onsi/ginkgo"
|
"github.com/onsi/ginkgo"
|
||||||
"github.com/onsi/gomega"
|
"github.com/onsi/gomega"
|
||||||
|
e2enode "k8s.io/kubernetes/test/e2e/framework/node"
|
||||||
)
|
)
|
||||||
|
|
||||||
var _ = SIGDescribe("[Feature:NodeAuthenticator]", func() {
|
var _ = SIGDescribe("[Feature:NodeAuthenticator]", func() {
|
||||||
@ -42,9 +43,9 @@ var _ = SIGDescribe("[Feature:NodeAuthenticator]", func() {
|
|||||||
gomega.Expect(len(nodeList.Items)).NotTo(gomega.BeZero())
|
gomega.Expect(len(nodeList.Items)).NotTo(gomega.BeZero())
|
||||||
|
|
||||||
pickedNode := nodeList.Items[0]
|
pickedNode := nodeList.Items[0]
|
||||||
nodeIPs = framework.GetNodeAddresses(&pickedNode, v1.NodeExternalIP)
|
nodeIPs = e2enode.GetAddresses(&pickedNode, v1.NodeExternalIP)
|
||||||
// The pods running in the cluster can see the internal addresses.
|
// The pods running in the cluster can see the internal addresses.
|
||||||
nodeIPs = append(nodeIPs, framework.GetNodeAddresses(&pickedNode, v1.NodeInternalIP)...)
|
nodeIPs = append(nodeIPs, e2enode.GetAddresses(&pickedNode, v1.NodeInternalIP)...)
|
||||||
|
|
||||||
// make sure ServiceAccount admission controller is enabled, so secret generation on SA creation works
|
// make sure ServiceAccount admission controller is enabled, so secret generation on SA creation works
|
||||||
saName := "default"
|
saName := "default"
|
||||||
|
@ -41,6 +41,7 @@ go_library(
|
|||||||
"//test/e2e/common:go_default_library",
|
"//test/e2e/common:go_default_library",
|
||||||
"//test/e2e/framework:go_default_library",
|
"//test/e2e/framework:go_default_library",
|
||||||
"//test/e2e/framework/log:go_default_library",
|
"//test/e2e/framework/log:go_default_library",
|
||||||
|
"//test/e2e/framework/node:go_default_library",
|
||||||
"//test/e2e/framework/pod:go_default_library",
|
"//test/e2e/framework/pod:go_default_library",
|
||||||
"//test/e2e/instrumentation/monitoring:go_default_library",
|
"//test/e2e/instrumentation/monitoring:go_default_library",
|
||||||
"//test/e2e/scheduling:go_default_library",
|
"//test/e2e/scheduling:go_default_library",
|
||||||
|
@ -24,6 +24,7 @@ import (
|
|||||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||||
"k8s.io/kubernetes/test/e2e/common"
|
"k8s.io/kubernetes/test/e2e/common"
|
||||||
"k8s.io/kubernetes/test/e2e/framework"
|
"k8s.io/kubernetes/test/e2e/framework"
|
||||||
|
e2enode "k8s.io/kubernetes/test/e2e/framework/node"
|
||||||
|
|
||||||
"github.com/onsi/ginkgo"
|
"github.com/onsi/ginkgo"
|
||||||
"github.com/onsi/gomega"
|
"github.com/onsi/gomega"
|
||||||
@ -73,7 +74,7 @@ var _ = SIGDescribe("[Feature:ClusterSizeAutoscalingScaleUp] [Slow] Autoscaling"
|
|||||||
if len(nodeGroupName) > 0 {
|
if len(nodeGroupName) > 0 {
|
||||||
// Scale down back to only 'nodesNum' nodes, as expected at the start of the test.
|
// Scale down back to only 'nodesNum' nodes, as expected at the start of the test.
|
||||||
framework.ExpectNoError(framework.ResizeGroup(nodeGroupName, nodesNum))
|
framework.ExpectNoError(framework.ResizeGroup(nodeGroupName, nodesNum))
|
||||||
framework.ExpectNoError(framework.WaitForReadyNodes(f.ClientSet, nodesNum, 15*time.Minute))
|
framework.ExpectNoError(e2enode.WaitForReadyNodes(f.ClientSet, nodesNum, 15*time.Minute))
|
||||||
}
|
}
|
||||||
})
|
})
|
||||||
|
|
||||||
|
@ -30,6 +30,7 @@ import (
|
|||||||
"k8s.io/apimachinery/pkg/util/strategicpatch"
|
"k8s.io/apimachinery/pkg/util/strategicpatch"
|
||||||
clientset "k8s.io/client-go/kubernetes"
|
clientset "k8s.io/client-go/kubernetes"
|
||||||
"k8s.io/kubernetes/test/e2e/framework"
|
"k8s.io/kubernetes/test/e2e/framework"
|
||||||
|
e2enode "k8s.io/kubernetes/test/e2e/framework/node"
|
||||||
testutils "k8s.io/kubernetes/test/utils"
|
testutils "k8s.io/kubernetes/test/utils"
|
||||||
imageutils "k8s.io/kubernetes/test/utils/image"
|
imageutils "k8s.io/kubernetes/test/utils/image"
|
||||||
|
|
||||||
@ -87,7 +88,7 @@ var _ = framework.KubeDescribe("Cluster size autoscaler scalability [Slow]", fun
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
framework.ExpectNoError(framework.WaitForReadyNodes(c, sum, scaleUpTimeout))
|
framework.ExpectNoError(e2enode.WaitForReadyNodes(c, sum, scaleUpTimeout))
|
||||||
|
|
||||||
nodes := framework.GetReadySchedulableNodesOrDie(f.ClientSet)
|
nodes := framework.GetReadySchedulableNodesOrDie(f.ClientSet)
|
||||||
nodeCount = len(nodes.Items)
|
nodeCount = len(nodes.Items)
|
||||||
@ -112,7 +113,7 @@ var _ = framework.KubeDescribe("Cluster size autoscaler scalability [Slow]", fun
|
|||||||
ginkgo.AfterEach(func() {
|
ginkgo.AfterEach(func() {
|
||||||
ginkgo.By(fmt.Sprintf("Restoring initial size of the cluster"))
|
ginkgo.By(fmt.Sprintf("Restoring initial size of the cluster"))
|
||||||
setMigSizes(originalSizes)
|
setMigSizes(originalSizes)
|
||||||
framework.ExpectNoError(framework.WaitForReadyNodes(c, nodeCount, scaleDownTimeout))
|
framework.ExpectNoError(e2enode.WaitForReadyNodes(c, nodeCount, scaleDownTimeout))
|
||||||
nodes, err := c.CoreV1().Nodes().List(metav1.ListOptions{})
|
nodes, err := c.CoreV1().Nodes().List(metav1.ListOptions{})
|
||||||
framework.ExpectNoError(err)
|
framework.ExpectNoError(err)
|
||||||
s := time.Now()
|
s := time.Now()
|
||||||
@ -214,7 +215,7 @@ var _ = framework.KubeDescribe("Cluster size autoscaler scalability [Slow]", fun
|
|||||||
anyKey(originalSizes): totalNodes,
|
anyKey(originalSizes): totalNodes,
|
||||||
}
|
}
|
||||||
setMigSizes(newSizes)
|
setMigSizes(newSizes)
|
||||||
framework.ExpectNoError(framework.WaitForReadyNodes(f.ClientSet, totalNodes, largeResizeTimeout))
|
framework.ExpectNoError(e2enode.WaitForReadyNodes(f.ClientSet, totalNodes, largeResizeTimeout))
|
||||||
|
|
||||||
// run replicas
|
// run replicas
|
||||||
rcConfig := reserveMemoryRCConfig(f, "some-pod", replicas, replicas*perNodeReservation, largeScaleUpTimeout)
|
rcConfig := reserveMemoryRCConfig(f, "some-pod", replicas, replicas*perNodeReservation, largeScaleUpTimeout)
|
||||||
@ -248,7 +249,7 @@ var _ = framework.KubeDescribe("Cluster size autoscaler scalability [Slow]", fun
|
|||||||
}
|
}
|
||||||
setMigSizes(newSizes)
|
setMigSizes(newSizes)
|
||||||
|
|
||||||
framework.ExpectNoError(framework.WaitForReadyNodes(f.ClientSet, totalNodes, largeResizeTimeout))
|
framework.ExpectNoError(e2enode.WaitForReadyNodes(f.ClientSet, totalNodes, largeResizeTimeout))
|
||||||
|
|
||||||
// annotate all nodes with no-scale-down
|
// annotate all nodes with no-scale-down
|
||||||
ScaleDownDisabledKey := "cluster-autoscaler.kubernetes.io/scale-down-disabled"
|
ScaleDownDisabledKey := "cluster-autoscaler.kubernetes.io/scale-down-disabled"
|
||||||
@ -302,7 +303,7 @@ var _ = framework.KubeDescribe("Cluster size autoscaler scalability [Slow]", fun
|
|||||||
anyKey(originalSizes): totalNodes,
|
anyKey(originalSizes): totalNodes,
|
||||||
}
|
}
|
||||||
setMigSizes(newSizes)
|
setMigSizes(newSizes)
|
||||||
framework.ExpectNoError(framework.WaitForReadyNodes(f.ClientSet, totalNodes, largeResizeTimeout))
|
framework.ExpectNoError(e2enode.WaitForReadyNodes(f.ClientSet, totalNodes, largeResizeTimeout))
|
||||||
divider := int(float64(totalNodes) * 0.7)
|
divider := int(float64(totalNodes) * 0.7)
|
||||||
fullNodesCount := divider
|
fullNodesCount := divider
|
||||||
underutilizedNodesCount := totalNodes - fullNodesCount
|
underutilizedNodesCount := totalNodes - fullNodesCount
|
||||||
@ -348,7 +349,7 @@ var _ = framework.KubeDescribe("Cluster size autoscaler scalability [Slow]", fun
|
|||||||
defer framework.DeleteRCAndWaitForGC(f.ClientSet, f.Namespace.Name, podsConfig.Name)
|
defer framework.DeleteRCAndWaitForGC(f.ClientSet, f.Namespace.Name, podsConfig.Name)
|
||||||
|
|
||||||
// Ensure that no new nodes have been added so far.
|
// Ensure that no new nodes have been added so far.
|
||||||
gomega.Expect(framework.NumberOfReadyNodes(f.ClientSet)).To(gomega.Equal(nodeCount))
|
gomega.Expect(e2enode.TotalReady(f.ClientSet)).To(gomega.Equal(nodeCount))
|
||||||
|
|
||||||
// Start a number of schedulable pods to ensure CA reacts.
|
// Start a number of schedulable pods to ensure CA reacts.
|
||||||
additionalNodes := maxNodes - nodeCount
|
additionalNodes := maxNodes - nodeCount
|
||||||
@ -385,7 +386,7 @@ func simpleScaleUpTestWithTolerance(f *framework.Framework, config *scaleUpTestC
|
|||||||
framework.ExpectNoError(WaitForClusterSizeFunc(f.ClientSet,
|
framework.ExpectNoError(WaitForClusterSizeFunc(f.ClientSet,
|
||||||
func(size int) bool { return size >= minExpectedNodeCount }, scaleUpTimeout))
|
func(size int) bool { return size >= minExpectedNodeCount }, scaleUpTimeout))
|
||||||
} else {
|
} else {
|
||||||
framework.ExpectNoError(framework.WaitForReadyNodes(f.ClientSet, config.expectedResult.nodes, scaleUpTimeout))
|
framework.ExpectNoError(e2enode.WaitForReadyNodes(f.ClientSet, config.expectedResult.nodes, scaleUpTimeout))
|
||||||
}
|
}
|
||||||
klog.Infof("cluster is increased")
|
klog.Infof("cluster is increased")
|
||||||
if tolerateMissingPodCount > 0 {
|
if tolerateMissingPodCount > 0 {
|
||||||
|
@ -44,6 +44,7 @@ import (
|
|||||||
api "k8s.io/kubernetes/pkg/apis/core"
|
api "k8s.io/kubernetes/pkg/apis/core"
|
||||||
"k8s.io/kubernetes/test/e2e/framework"
|
"k8s.io/kubernetes/test/e2e/framework"
|
||||||
e2elog "k8s.io/kubernetes/test/e2e/framework/log"
|
e2elog "k8s.io/kubernetes/test/e2e/framework/log"
|
||||||
|
e2enode "k8s.io/kubernetes/test/e2e/framework/node"
|
||||||
"k8s.io/kubernetes/test/e2e/scheduling"
|
"k8s.io/kubernetes/test/e2e/scheduling"
|
||||||
testutils "k8s.io/kubernetes/test/utils"
|
testutils "k8s.io/kubernetes/test/utils"
|
||||||
imageutils "k8s.io/kubernetes/test/utils/image"
|
imageutils "k8s.io/kubernetes/test/utils/image"
|
||||||
@ -108,7 +109,7 @@ var _ = SIGDescribe("Cluster size autoscaling [Slow]", func() {
|
|||||||
sum += size
|
sum += size
|
||||||
}
|
}
|
||||||
// Give instances time to spin up
|
// Give instances time to spin up
|
||||||
framework.ExpectNoError(framework.WaitForReadyNodes(c, sum, scaleUpTimeout))
|
framework.ExpectNoError(e2enode.WaitForReadyNodes(c, sum, scaleUpTimeout))
|
||||||
|
|
||||||
nodes := framework.GetReadySchedulableNodesOrDie(f.ClientSet)
|
nodes := framework.GetReadySchedulableNodesOrDie(f.ClientSet)
|
||||||
nodeCount = len(nodes.Items)
|
nodeCount = len(nodes.Items)
|
||||||
@ -142,7 +143,7 @@ var _ = SIGDescribe("Cluster size autoscaling [Slow]", func() {
|
|||||||
for _, size := range originalSizes {
|
for _, size := range originalSizes {
|
||||||
expectedNodes += size
|
expectedNodes += size
|
||||||
}
|
}
|
||||||
framework.ExpectNoError(framework.WaitForReadyNodes(c, expectedNodes, scaleDownTimeout))
|
framework.ExpectNoError(e2enode.WaitForReadyNodes(c, expectedNodes, scaleDownTimeout))
|
||||||
nodes, err := c.CoreV1().Nodes().List(metav1.ListOptions{})
|
nodes, err := c.CoreV1().Nodes().List(metav1.ListOptions{})
|
||||||
framework.ExpectNoError(err)
|
framework.ExpectNoError(err)
|
||||||
|
|
||||||
@ -373,7 +374,7 @@ var _ = SIGDescribe("Cluster size autoscaling [Slow]", func() {
|
|||||||
addNodePool(extraPoolName, "n1-standard-4", 1)
|
addNodePool(extraPoolName, "n1-standard-4", 1)
|
||||||
defer deleteNodePool(extraPoolName)
|
defer deleteNodePool(extraPoolName)
|
||||||
extraNodes := getPoolInitialSize(extraPoolName)
|
extraNodes := getPoolInitialSize(extraPoolName)
|
||||||
framework.ExpectNoError(framework.WaitForReadyNodes(c, nodeCount+extraNodes, resizeTimeout))
|
framework.ExpectNoError(e2enode.WaitForReadyNodes(c, nodeCount+extraNodes, resizeTimeout))
|
||||||
// We wait for nodes to become schedulable to make sure the new nodes
|
// We wait for nodes to become schedulable to make sure the new nodes
|
||||||
// will be returned by getPoolNodes below.
|
// will be returned by getPoolNodes below.
|
||||||
framework.ExpectNoError(framework.WaitForAllNodesSchedulable(c, resizeTimeout))
|
framework.ExpectNoError(framework.WaitForAllNodesSchedulable(c, resizeTimeout))
|
||||||
@ -407,7 +408,7 @@ var _ = SIGDescribe("Cluster size autoscaling [Slow]", func() {
|
|||||||
addNodePool(extraPoolName, "n1-standard-4", 1)
|
addNodePool(extraPoolName, "n1-standard-4", 1)
|
||||||
defer deleteNodePool(extraPoolName)
|
defer deleteNodePool(extraPoolName)
|
||||||
extraNodes := getPoolInitialSize(extraPoolName)
|
extraNodes := getPoolInitialSize(extraPoolName)
|
||||||
framework.ExpectNoError(framework.WaitForReadyNodes(c, nodeCount+extraNodes, resizeTimeout))
|
framework.ExpectNoError(e2enode.WaitForReadyNodes(c, nodeCount+extraNodes, resizeTimeout))
|
||||||
framework.ExpectNoError(enableAutoscaler(extraPoolName, 1, 2))
|
framework.ExpectNoError(enableAutoscaler(extraPoolName, 1, 2))
|
||||||
framework.ExpectNoError(disableAutoscaler(extraPoolName, 1, 2))
|
framework.ExpectNoError(disableAutoscaler(extraPoolName, 1, 2))
|
||||||
})
|
})
|
||||||
@ -437,7 +438,7 @@ var _ = SIGDescribe("Cluster size autoscaling [Slow]", func() {
|
|||||||
defer framework.DeleteRCAndWaitForGC(f.ClientSet, f.Namespace.Name, "extra-pod")
|
defer framework.DeleteRCAndWaitForGC(f.ClientSet, f.Namespace.Name, "extra-pod")
|
||||||
|
|
||||||
framework.ExpectNoError(waitForAllCaPodsReadyInNamespace(f, c))
|
framework.ExpectNoError(waitForAllCaPodsReadyInNamespace(f, c))
|
||||||
framework.ExpectNoError(framework.WaitForReadyNodes(c, nodeCount+newPods, scaleUpTimeout))
|
framework.ExpectNoError(e2enode.WaitForReadyNodes(c, nodeCount+newPods, scaleUpTimeout))
|
||||||
})
|
})
|
||||||
|
|
||||||
ginkgo.It("should increase cluster size if pod requesting EmptyDir volume is pending [Feature:ClusterSizeAutoscalingScaleUp]", func() {
|
ginkgo.It("should increase cluster size if pod requesting EmptyDir volume is pending [Feature:ClusterSizeAutoscalingScaleUp]", func() {
|
||||||
@ -458,7 +459,7 @@ var _ = SIGDescribe("Cluster size autoscaling [Slow]", func() {
|
|||||||
defer framework.DeleteRCAndWaitForGC(f.ClientSet, f.Namespace.Name, "extra-pod")
|
defer framework.DeleteRCAndWaitForGC(f.ClientSet, f.Namespace.Name, "extra-pod")
|
||||||
|
|
||||||
framework.ExpectNoError(waitForAllCaPodsReadyInNamespace(f, c))
|
framework.ExpectNoError(waitForAllCaPodsReadyInNamespace(f, c))
|
||||||
framework.ExpectNoError(framework.WaitForReadyNodes(c, nodeCount+newPods, scaleUpTimeout))
|
framework.ExpectNoError(e2enode.WaitForReadyNodes(c, nodeCount+newPods, scaleUpTimeout))
|
||||||
})
|
})
|
||||||
|
|
||||||
ginkgo.It("should increase cluster size if pod requesting volume is pending [Feature:ClusterSizeAutoscalingScaleUp]", func() {
|
ginkgo.It("should increase cluster size if pod requesting volume is pending [Feature:ClusterSizeAutoscalingScaleUp]", func() {
|
||||||
@ -530,7 +531,7 @@ var _ = SIGDescribe("Cluster size autoscaling [Slow]", func() {
|
|||||||
}()
|
}()
|
||||||
|
|
||||||
framework.ExpectNoError(waitForAllCaPodsReadyInNamespace(f, c))
|
framework.ExpectNoError(waitForAllCaPodsReadyInNamespace(f, c))
|
||||||
framework.ExpectNoError(framework.WaitForReadyNodes(c, nodeCount+newPods, scaleUpTimeout))
|
framework.ExpectNoError(e2enode.WaitForReadyNodes(c, nodeCount+newPods, scaleUpTimeout))
|
||||||
})
|
})
|
||||||
|
|
||||||
ginkgo.It("should add node to the particular mig [Feature:ClusterSizeAutoscalingScaleUp]", func() {
|
ginkgo.It("should add node to the particular mig [Feature:ClusterSizeAutoscalingScaleUp]", func() {
|
||||||
@ -641,7 +642,7 @@ var _ = SIGDescribe("Cluster size autoscaling [Slow]", func() {
|
|||||||
addNodePool(extraPoolName, "n1-standard-4", 1)
|
addNodePool(extraPoolName, "n1-standard-4", 1)
|
||||||
defer deleteNodePool(extraPoolName)
|
defer deleteNodePool(extraPoolName)
|
||||||
extraNodes := getPoolInitialSize(extraPoolName)
|
extraNodes := getPoolInitialSize(extraPoolName)
|
||||||
framework.ExpectNoError(framework.WaitForReadyNodes(c, nodeCount+extraNodes, resizeTimeout))
|
framework.ExpectNoError(e2enode.WaitForReadyNodes(c, nodeCount+extraNodes, resizeTimeout))
|
||||||
framework.ExpectNoError(enableAutoscaler(extraPoolName, 1, 2))
|
framework.ExpectNoError(enableAutoscaler(extraPoolName, 1, 2))
|
||||||
defer disableAutoscaler(extraPoolName, 1, 2)
|
defer disableAutoscaler(extraPoolName, 1, 2)
|
||||||
|
|
||||||
@ -655,7 +656,7 @@ var _ = SIGDescribe("Cluster size autoscaling [Slow]", func() {
|
|||||||
// reseting all the timers in scale down code. Adding 5 extra minutes to workaround
|
// reseting all the timers in scale down code. Adding 5 extra minutes to workaround
|
||||||
// this issue.
|
// this issue.
|
||||||
// TODO: Remove the extra time when GKE restart is fixed.
|
// TODO: Remove the extra time when GKE restart is fixed.
|
||||||
framework.ExpectNoError(framework.WaitForReadyNodes(c, nodeCount+extraNodes+1, scaleUpTimeout+5*time.Minute))
|
framework.ExpectNoError(e2enode.WaitForReadyNodes(c, nodeCount+extraNodes+1, scaleUpTimeout+5*time.Minute))
|
||||||
})
|
})
|
||||||
|
|
||||||
simpleScaleDownTest := func(unready int) {
|
simpleScaleDownTest := func(unready int) {
|
||||||
@ -766,7 +767,7 @@ var _ = SIGDescribe("Cluster size autoscaling [Slow]", func() {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
framework.ExpectNoError(framework.ResizeGroup(minMig, int32(0)))
|
framework.ExpectNoError(framework.ResizeGroup(minMig, int32(0)))
|
||||||
framework.ExpectNoError(framework.WaitForReadyNodes(c, nodeCount-minSize, resizeTimeout))
|
framework.ExpectNoError(e2enode.WaitForReadyNodes(c, nodeCount-minSize, resizeTimeout))
|
||||||
}
|
}
|
||||||
|
|
||||||
ginkgo.By("Make remaining nodes unschedulable")
|
ginkgo.By("Make remaining nodes unschedulable")
|
||||||
@ -812,7 +813,7 @@ var _ = SIGDescribe("Cluster size autoscaling [Slow]", func() {
|
|||||||
addNodePool(extraPoolName, "n1-standard-4", 1)
|
addNodePool(extraPoolName, "n1-standard-4", 1)
|
||||||
defer deleteNodePool(extraPoolName)
|
defer deleteNodePool(extraPoolName)
|
||||||
extraNodes := getPoolInitialSize(extraPoolName)
|
extraNodes := getPoolInitialSize(extraPoolName)
|
||||||
framework.ExpectNoError(framework.WaitForReadyNodes(c, nodeCount+extraNodes, resizeTimeout))
|
framework.ExpectNoError(e2enode.WaitForReadyNodes(c, nodeCount+extraNodes, resizeTimeout))
|
||||||
framework.ExpectNoError(enableAutoscaler(extraPoolName, 0, 1))
|
framework.ExpectNoError(enableAutoscaler(extraPoolName, 0, 1))
|
||||||
defer disableAutoscaler(extraPoolName, 0, 1)
|
defer disableAutoscaler(extraPoolName, 0, 1)
|
||||||
|
|
||||||
@ -845,7 +846,7 @@ var _ = SIGDescribe("Cluster size autoscaling [Slow]", func() {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
framework.ExpectNoError(framework.ResizeGroup(minMig, int32(1)))
|
framework.ExpectNoError(framework.ResizeGroup(minMig, int32(1)))
|
||||||
framework.ExpectNoError(framework.WaitForReadyNodes(c, nodeCount-minSize+1, resizeTimeout))
|
framework.ExpectNoError(e2enode.WaitForReadyNodes(c, nodeCount-minSize+1, resizeTimeout))
|
||||||
ngNodes, err := framework.GetGroupNodes(minMig)
|
ngNodes, err := framework.GetGroupNodes(minMig)
|
||||||
framework.ExpectNoError(err)
|
framework.ExpectNoError(err)
|
||||||
gomega.Expect(len(ngNodes) == 1).To(gomega.BeTrue())
|
gomega.Expect(len(ngNodes) == 1).To(gomega.BeTrue())
|
||||||
@ -926,7 +927,7 @@ var _ = SIGDescribe("Cluster size autoscaling [Slow]", func() {
|
|||||||
}
|
}
|
||||||
testFunction()
|
testFunction()
|
||||||
// Give nodes time to recover from network failure
|
// Give nodes time to recover from network failure
|
||||||
framework.ExpectNoError(framework.WaitForReadyNodes(c, len(nodes.Items), nodesRecoverTimeout))
|
framework.ExpectNoError(e2enode.WaitForReadyNodes(c, len(nodes.Items), nodesRecoverTimeout))
|
||||||
})
|
})
|
||||||
|
|
||||||
ginkgo.It("shouldn't scale up when expendable pod is created [Feature:ClusterSizeAutoscalingScaleUp]", func() {
|
ginkgo.It("shouldn't scale up when expendable pod is created [Feature:ClusterSizeAutoscalingScaleUp]", func() {
|
||||||
@ -1339,8 +1340,8 @@ func WaitForClusterSizeFuncWithUnready(c clientset.Interface, sizeFunc func(int)
|
|||||||
numNodes := len(nodes.Items)
|
numNodes := len(nodes.Items)
|
||||||
|
|
||||||
// Filter out not-ready nodes.
|
// Filter out not-ready nodes.
|
||||||
framework.FilterNodes(nodes, func(node v1.Node) bool {
|
e2enode.Filter(nodes, func(node v1.Node) bool {
|
||||||
return framework.IsNodeConditionSetAsExpected(&node, v1.NodeReady, true)
|
return e2enode.IsConditionSetAsExpected(&node, v1.NodeReady, true)
|
||||||
})
|
})
|
||||||
numReady := len(nodes.Items)
|
numReady := len(nodes.Items)
|
||||||
|
|
||||||
|
@ -30,6 +30,7 @@ import (
|
|||||||
clientset "k8s.io/client-go/kubernetes"
|
clientset "k8s.io/client-go/kubernetes"
|
||||||
"k8s.io/kubernetes/test/e2e/framework"
|
"k8s.io/kubernetes/test/e2e/framework"
|
||||||
e2elog "k8s.io/kubernetes/test/e2e/framework/log"
|
e2elog "k8s.io/kubernetes/test/e2e/framework/log"
|
||||||
|
e2enode "k8s.io/kubernetes/test/e2e/framework/node"
|
||||||
e2epod "k8s.io/kubernetes/test/e2e/framework/pod"
|
e2epod "k8s.io/kubernetes/test/e2e/framework/pod"
|
||||||
|
|
||||||
"github.com/onsi/ginkgo"
|
"github.com/onsi/ginkgo"
|
||||||
@ -102,7 +103,7 @@ var _ = SIGDescribe("DNS horizontal autoscaling", func() {
|
|||||||
// This test is separated because it is slow and need to run serially.
|
// This test is separated because it is slow and need to run serially.
|
||||||
// Will take around 5 minutes to run on a 4 nodes cluster.
|
// Will take around 5 minutes to run on a 4 nodes cluster.
|
||||||
ginkgo.It("[Serial] [Slow] kube-dns-autoscaler should scale kube-dns pods when cluster size changed", func() {
|
ginkgo.It("[Serial] [Slow] kube-dns-autoscaler should scale kube-dns pods when cluster size changed", func() {
|
||||||
numNodes, err := framework.NumberOfRegisteredNodes(c)
|
numNodes, err := e2enode.TotalRegistered(c)
|
||||||
framework.ExpectNoError(err)
|
framework.ExpectNoError(err)
|
||||||
|
|
||||||
ginkgo.By("Replace the dns autoscaling parameters with testing parameters")
|
ginkgo.By("Replace the dns autoscaling parameters with testing parameters")
|
||||||
@ -157,7 +158,7 @@ var _ = SIGDescribe("DNS horizontal autoscaling", func() {
|
|||||||
|
|
||||||
ginkgo.By("Restoring cluster size")
|
ginkgo.By("Restoring cluster size")
|
||||||
setMigSizes(originalSizes)
|
setMigSizes(originalSizes)
|
||||||
err = framework.WaitForReadyNodes(c, numNodes, scaleDownTimeout)
|
err = e2enode.WaitForReadyNodes(c, numNodes, scaleDownTimeout)
|
||||||
framework.ExpectNoError(err)
|
framework.ExpectNoError(err)
|
||||||
|
|
||||||
ginkgo.By("Wait for kube-dns scaled to expected number")
|
ginkgo.By("Wait for kube-dns scaled to expected number")
|
||||||
|
@ -14,6 +14,7 @@ go_library(
|
|||||||
"//staging/src/k8s.io/client-go/kubernetes:go_default_library",
|
"//staging/src/k8s.io/client-go/kubernetes:go_default_library",
|
||||||
"//test/e2e/framework:go_default_library",
|
"//test/e2e/framework:go_default_library",
|
||||||
"//test/e2e/framework/log:go_default_library",
|
"//test/e2e/framework/log:go_default_library",
|
||||||
|
"//test/e2e/framework/node:go_default_library",
|
||||||
"//vendor/github.com/onsi/ginkgo:go_default_library",
|
"//vendor/github.com/onsi/ginkgo:go_default_library",
|
||||||
"//vendor/github.com/onsi/gomega:go_default_library",
|
"//vendor/github.com/onsi/gomega:go_default_library",
|
||||||
],
|
],
|
||||||
|
@ -24,6 +24,7 @@ import (
|
|||||||
clientset "k8s.io/client-go/kubernetes"
|
clientset "k8s.io/client-go/kubernetes"
|
||||||
"k8s.io/kubernetes/test/e2e/framework"
|
"k8s.io/kubernetes/test/e2e/framework"
|
||||||
e2elog "k8s.io/kubernetes/test/e2e/framework/log"
|
e2elog "k8s.io/kubernetes/test/e2e/framework/log"
|
||||||
|
e2enode "k8s.io/kubernetes/test/e2e/framework/node"
|
||||||
|
|
||||||
"github.com/onsi/ginkgo"
|
"github.com/onsi/ginkgo"
|
||||||
"github.com/onsi/gomega"
|
"github.com/onsi/gomega"
|
||||||
@ -54,7 +55,7 @@ var _ = SIGDescribe("[Feature:CloudProvider][Disruptive] Nodes", func() {
|
|||||||
framework.Failf("failed to delete node %q, err: %q", nodeToDelete.Name, err)
|
framework.Failf("failed to delete node %q, err: %q", nodeToDelete.Name, err)
|
||||||
}
|
}
|
||||||
|
|
||||||
newNodes, err := framework.CheckNodesReady(c, len(origNodes.Items)-1, 5*time.Minute)
|
newNodes, err := e2enode.CheckReady(c, len(origNodes.Items)-1, 5*time.Minute)
|
||||||
gomega.Expect(err).To(gomega.BeNil())
|
gomega.Expect(err).To(gomega.BeNil())
|
||||||
gomega.Expect(len(newNodes)).To(gomega.Equal(len(origNodes.Items) - 1))
|
gomega.Expect(len(newNodes)).To(gomega.Equal(len(origNodes.Items) - 1))
|
||||||
|
|
||||||
|
@ -78,6 +78,7 @@ go_library(
|
|||||||
"//test/e2e/framework:go_default_library",
|
"//test/e2e/framework:go_default_library",
|
||||||
"//test/e2e/framework/deployment:go_default_library",
|
"//test/e2e/framework/deployment:go_default_library",
|
||||||
"//test/e2e/framework/log:go_default_library",
|
"//test/e2e/framework/log:go_default_library",
|
||||||
|
"//test/e2e/framework/node:go_default_library",
|
||||||
"//test/e2e/framework/pod:go_default_library",
|
"//test/e2e/framework/pod:go_default_library",
|
||||||
"//test/e2e/framework/replicaset:go_default_library",
|
"//test/e2e/framework/replicaset:go_default_library",
|
||||||
"//test/e2e/framework/volume:go_default_library",
|
"//test/e2e/framework/volume:go_default_library",
|
||||||
|
@ -29,6 +29,7 @@ import (
|
|||||||
clientset "k8s.io/client-go/kubernetes"
|
clientset "k8s.io/client-go/kubernetes"
|
||||||
"k8s.io/kubernetes/test/e2e/framework"
|
"k8s.io/kubernetes/test/e2e/framework"
|
||||||
e2elog "k8s.io/kubernetes/test/e2e/framework/log"
|
e2elog "k8s.io/kubernetes/test/e2e/framework/log"
|
||||||
|
e2enode "k8s.io/kubernetes/test/e2e/framework/node"
|
||||||
testutils "k8s.io/kubernetes/test/utils"
|
testutils "k8s.io/kubernetes/test/utils"
|
||||||
|
|
||||||
"github.com/onsi/ginkgo"
|
"github.com/onsi/ginkgo"
|
||||||
@ -86,7 +87,7 @@ var _ = framework.KubeDescribe("NodeLease", func() {
|
|||||||
|
|
||||||
ginkgo.It("the kubelet should report node status infrequently", func() {
|
ginkgo.It("the kubelet should report node status infrequently", func() {
|
||||||
ginkgo.By("wait until node is ready")
|
ginkgo.By("wait until node is ready")
|
||||||
framework.WaitForNodeToBeReady(f.ClientSet, nodeName, 5*time.Minute)
|
e2enode.WaitForNodeToBeReady(f.ClientSet, nodeName, 5*time.Minute)
|
||||||
|
|
||||||
ginkgo.By("wait until there is node lease")
|
ginkgo.By("wait until there is node lease")
|
||||||
var err error
|
var err error
|
||||||
|
@ -41,7 +41,6 @@ go_library(
|
|||||||
"//pkg/apis/storage/v1/util:go_default_library",
|
"//pkg/apis/storage/v1/util:go_default_library",
|
||||||
"//pkg/client/conditions:go_default_library",
|
"//pkg/client/conditions:go_default_library",
|
||||||
"//pkg/controller:go_default_library",
|
"//pkg/controller:go_default_library",
|
||||||
"//pkg/controller/nodelifecycle:go_default_library",
|
|
||||||
"//pkg/controller/service:go_default_library",
|
"//pkg/controller/service:go_default_library",
|
||||||
"//pkg/features:go_default_library",
|
"//pkg/features:go_default_library",
|
||||||
"//pkg/kubelet/apis/config:go_default_library",
|
"//pkg/kubelet/apis/config:go_default_library",
|
||||||
@ -108,6 +107,7 @@ go_library(
|
|||||||
"//test/e2e/framework/ginkgowrapper:go_default_library",
|
"//test/e2e/framework/ginkgowrapper:go_default_library",
|
||||||
"//test/e2e/framework/log:go_default_library",
|
"//test/e2e/framework/log:go_default_library",
|
||||||
"//test/e2e/framework/metrics:go_default_library",
|
"//test/e2e/framework/metrics:go_default_library",
|
||||||
|
"//test/e2e/framework/node:go_default_library",
|
||||||
"//test/e2e/framework/pod:go_default_library",
|
"//test/e2e/framework/pod:go_default_library",
|
||||||
"//test/e2e/framework/ssh:go_default_library",
|
"//test/e2e/framework/ssh:go_default_library",
|
||||||
"//test/e2e/framework/testfiles:go_default_library",
|
"//test/e2e/framework/testfiles:go_default_library",
|
||||||
@ -150,6 +150,7 @@ filegroup(
|
|||||||
"//test/e2e/framework/lifecycle:all-srcs",
|
"//test/e2e/framework/lifecycle:all-srcs",
|
||||||
"//test/e2e/framework/log:all-srcs",
|
"//test/e2e/framework/log:all-srcs",
|
||||||
"//test/e2e/framework/metrics:all-srcs",
|
"//test/e2e/framework/metrics:all-srcs",
|
||||||
|
"//test/e2e/framework/node:all-srcs",
|
||||||
"//test/e2e/framework/pod:all-srcs",
|
"//test/e2e/framework/pod:all-srcs",
|
||||||
"//test/e2e/framework/podlogs:all-srcs",
|
"//test/e2e/framework/podlogs:all-srcs",
|
||||||
"//test/e2e/framework/providers/aws:all-srcs",
|
"//test/e2e/framework/providers/aws:all-srcs",
|
||||||
|
@ -19,6 +19,7 @@ go_library(
|
|||||||
"//staging/src/k8s.io/client-go/kubernetes:go_default_library",
|
"//staging/src/k8s.io/client-go/kubernetes:go_default_library",
|
||||||
"//test/e2e/framework:go_default_library",
|
"//test/e2e/framework:go_default_library",
|
||||||
"//test/e2e/framework/log:go_default_library",
|
"//test/e2e/framework/log:go_default_library",
|
||||||
|
"//test/e2e/framework/node:go_default_library",
|
||||||
"//test/e2e/framework/testfiles:go_default_library",
|
"//test/e2e/framework/testfiles:go_default_library",
|
||||||
"//test/e2e/manifest:go_default_library",
|
"//test/e2e/manifest:go_default_library",
|
||||||
"//test/utils:go_default_library",
|
"//test/utils:go_default_library",
|
||||||
|
@ -50,6 +50,7 @@ import (
|
|||||||
clientset "k8s.io/client-go/kubernetes"
|
clientset "k8s.io/client-go/kubernetes"
|
||||||
"k8s.io/kubernetes/test/e2e/framework"
|
"k8s.io/kubernetes/test/e2e/framework"
|
||||||
e2elog "k8s.io/kubernetes/test/e2e/framework/log"
|
e2elog "k8s.io/kubernetes/test/e2e/framework/log"
|
||||||
|
e2enode "k8s.io/kubernetes/test/e2e/framework/node"
|
||||||
"k8s.io/kubernetes/test/e2e/framework/testfiles"
|
"k8s.io/kubernetes/test/e2e/framework/testfiles"
|
||||||
"k8s.io/kubernetes/test/e2e/manifest"
|
"k8s.io/kubernetes/test/e2e/manifest"
|
||||||
testutils "k8s.io/kubernetes/test/utils"
|
testutils "k8s.io/kubernetes/test/utils"
|
||||||
@ -744,7 +745,7 @@ func (j *TestJig) VerifyURL(route, host string, iterations int, interval time.Du
|
|||||||
|
|
||||||
func (j *TestJig) pollServiceNodePort(ns, name string, port int) error {
|
func (j *TestJig) pollServiceNodePort(ns, name string, port int) error {
|
||||||
// TODO: Curl all nodes?
|
// TODO: Curl all nodes?
|
||||||
u, err := framework.GetNodePortURL(j.Client, ns, name, port)
|
u, err := e2enode.GetPortURL(j.Client, ns, name, port)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
@ -40,6 +40,7 @@ import (
|
|||||||
"k8s.io/kubernetes/pkg/master/ports"
|
"k8s.io/kubernetes/pkg/master/ports"
|
||||||
e2elog "k8s.io/kubernetes/test/e2e/framework/log"
|
e2elog "k8s.io/kubernetes/test/e2e/framework/log"
|
||||||
"k8s.io/kubernetes/test/e2e/framework/metrics"
|
"k8s.io/kubernetes/test/e2e/framework/metrics"
|
||||||
|
e2enode "k8s.io/kubernetes/test/e2e/framework/node"
|
||||||
|
|
||||||
"github.com/prometheus/common/model"
|
"github.com/prometheus/common/model"
|
||||||
)
|
)
|
||||||
@ -499,7 +500,7 @@ type usageDataPerContainer struct {
|
|||||||
|
|
||||||
// GetKubeletHeapStats returns stats of kubelet heap.
|
// GetKubeletHeapStats returns stats of kubelet heap.
|
||||||
func GetKubeletHeapStats(c clientset.Interface, nodeName string) (string, error) {
|
func GetKubeletHeapStats(c clientset.Interface, nodeName string) (string, error) {
|
||||||
client, err := NodeProxyRequest(c, nodeName, "debug/pprof/heap", ports.KubeletPort)
|
client, err := e2enode.ProxyRequest(c, nodeName, "debug/pprof/heap", ports.KubeletPort)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return "", err
|
return "", err
|
||||||
}
|
}
|
||||||
|
@ -39,6 +39,7 @@ import (
|
|||||||
clientset "k8s.io/client-go/kubernetes"
|
clientset "k8s.io/client-go/kubernetes"
|
||||||
coreclientset "k8s.io/client-go/kubernetes/typed/core/v1"
|
coreclientset "k8s.io/client-go/kubernetes/typed/core/v1"
|
||||||
e2elog "k8s.io/kubernetes/test/e2e/framework/log"
|
e2elog "k8s.io/kubernetes/test/e2e/framework/log"
|
||||||
|
e2enode "k8s.io/kubernetes/test/e2e/framework/node"
|
||||||
e2epod "k8s.io/kubernetes/test/e2e/framework/pod"
|
e2epod "k8s.io/kubernetes/test/e2e/framework/pod"
|
||||||
imageutils "k8s.io/kubernetes/test/utils/image"
|
imageutils "k8s.io/kubernetes/test/utils/image"
|
||||||
)
|
)
|
||||||
@ -580,7 +581,7 @@ func (config *NetworkingTestConfig) setup(selector map[string]string) {
|
|||||||
ginkgo.By("Getting node addresses")
|
ginkgo.By("Getting node addresses")
|
||||||
ExpectNoError(WaitForAllNodesSchedulable(config.f.ClientSet, 10*time.Minute))
|
ExpectNoError(WaitForAllNodesSchedulable(config.f.ClientSet, 10*time.Minute))
|
||||||
nodeList := GetReadySchedulableNodesOrDie(config.f.ClientSet)
|
nodeList := GetReadySchedulableNodesOrDie(config.f.ClientSet)
|
||||||
config.ExternalAddrs = NodeAddresses(nodeList, v1.NodeExternalIP)
|
config.ExternalAddrs = e2enode.FirstAddress(nodeList, v1.NodeExternalIP)
|
||||||
|
|
||||||
SkipUnlessNodeCountIsAtLeast(2)
|
SkipUnlessNodeCountIsAtLeast(2)
|
||||||
config.Nodes = nodeList.Items
|
config.Nodes = nodeList.Items
|
||||||
@ -603,7 +604,7 @@ func (config *NetworkingTestConfig) setup(selector map[string]string) {
|
|||||||
if len(config.ExternalAddrs) != 0 {
|
if len(config.ExternalAddrs) != 0 {
|
||||||
config.NodeIP = config.ExternalAddrs[0]
|
config.NodeIP = config.ExternalAddrs[0]
|
||||||
} else {
|
} else {
|
||||||
internalAddrs := NodeAddresses(nodeList, v1.NodeInternalIP)
|
internalAddrs := e2enode.FirstAddress(nodeList, v1.NodeInternalIP)
|
||||||
config.NodeIP = internalAddrs[0]
|
config.NodeIP = internalAddrs[0]
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -1048,7 +1049,7 @@ func TestHitNodesFromOutsideWithCount(externalIP string, httpPort int32, timeout
|
|||||||
// This function executes commands on a node so it will work only for some
|
// This function executes commands on a node so it will work only for some
|
||||||
// environments.
|
// environments.
|
||||||
func TestUnderTemporaryNetworkFailure(c clientset.Interface, ns string, node *v1.Node, testFunc func()) {
|
func TestUnderTemporaryNetworkFailure(c clientset.Interface, ns string, node *v1.Node, testFunc func()) {
|
||||||
host, err := GetNodeExternalIP(node)
|
host, err := e2enode.GetExternalIP(node)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
Failf("Error getting node external ip : %v", err)
|
Failf("Error getting node external ip : %v", err)
|
||||||
}
|
}
|
||||||
@ -1066,7 +1067,7 @@ func TestUnderTemporaryNetworkFailure(c clientset.Interface, ns string, node *v1
|
|||||||
}()
|
}()
|
||||||
|
|
||||||
e2elog.Logf("Waiting %v to ensure node %s is ready before beginning test...", resizeNodeReadyTimeout, node.Name)
|
e2elog.Logf("Waiting %v to ensure node %s is ready before beginning test...", resizeNodeReadyTimeout, node.Name)
|
||||||
if !WaitForNodeToBe(c, node.Name, v1.NodeReady, true, resizeNodeReadyTimeout) {
|
if !e2enode.WaitConditionToBe(c, node.Name, v1.NodeReady, true, resizeNodeReadyTimeout) {
|
||||||
Failf("Node %s did not become ready within %v", node.Name, resizeNodeReadyTimeout)
|
Failf("Node %s did not become ready within %v", node.Name, resizeNodeReadyTimeout)
|
||||||
}
|
}
|
||||||
for _, masterAddress := range masterAddresses {
|
for _, masterAddress := range masterAddresses {
|
||||||
@ -1074,7 +1075,7 @@ func TestUnderTemporaryNetworkFailure(c clientset.Interface, ns string, node *v1
|
|||||||
}
|
}
|
||||||
|
|
||||||
e2elog.Logf("Waiting %v for node %s to be not ready after simulated network failure", resizeNodeNotReadyTimeout, node.Name)
|
e2elog.Logf("Waiting %v for node %s to be not ready after simulated network failure", resizeNodeNotReadyTimeout, node.Name)
|
||||||
if !WaitForNodeToBe(c, node.Name, v1.NodeReady, false, resizeNodeNotReadyTimeout) {
|
if !e2enode.WaitConditionToBe(c, node.Name, v1.NodeReady, false, resizeNodeNotReadyTimeout) {
|
||||||
Failf("Node %s did not become not-ready within %v", node.Name, resizeNodeNotReadyTimeout)
|
Failf("Node %s did not become not-ready within %v", node.Name, resizeNodeNotReadyTimeout)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
37
test/e2e/framework/node/BUILD
Normal file
37
test/e2e/framework/node/BUILD
Normal file
@ -0,0 +1,37 @@
|
|||||||
|
load("@io_bazel_rules_go//go:def.bzl", "go_library")
|
||||||
|
|
||||||
|
go_library(
|
||||||
|
name = "go_default_library",
|
||||||
|
srcs = [
|
||||||
|
"resource.go",
|
||||||
|
"wait.go",
|
||||||
|
],
|
||||||
|
importpath = "k8s.io/kubernetes/test/e2e/framework/node",
|
||||||
|
visibility = ["//visibility:public"],
|
||||||
|
deps = [
|
||||||
|
"//pkg/controller/nodelifecycle:go_default_library",
|
||||||
|
"//pkg/util/system:go_default_library",
|
||||||
|
"//staging/src/k8s.io/api/core/v1:go_default_library",
|
||||||
|
"//staging/src/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library",
|
||||||
|
"//staging/src/k8s.io/apimachinery/pkg/fields:go_default_library",
|
||||||
|
"//staging/src/k8s.io/apimachinery/pkg/util/wait:go_default_library",
|
||||||
|
"//staging/src/k8s.io/client-go/kubernetes:go_default_library",
|
||||||
|
"//staging/src/k8s.io/client-go/rest:go_default_library",
|
||||||
|
"//test/e2e/framework/log:go_default_library",
|
||||||
|
"//test/utils:go_default_library",
|
||||||
|
],
|
||||||
|
)
|
||||||
|
|
||||||
|
filegroup(
|
||||||
|
name = "package-srcs",
|
||||||
|
srcs = glob(["**"]),
|
||||||
|
tags = ["automanaged"],
|
||||||
|
visibility = ["//visibility:private"],
|
||||||
|
)
|
||||||
|
|
||||||
|
filegroup(
|
||||||
|
name = "all-srcs",
|
||||||
|
srcs = [":package-srcs"],
|
||||||
|
tags = ["automanaged"],
|
||||||
|
visibility = ["//visibility:public"],
|
||||||
|
)
|
317
test/e2e/framework/node/resource.go
Normal file
317
test/e2e/framework/node/resource.go
Normal file
@ -0,0 +1,317 @@
|
|||||||
|
/*
|
||||||
|
Copyright 2019 The Kubernetes Authors.
|
||||||
|
|
||||||
|
Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
you may not use this file except in compliance with the License.
|
||||||
|
You may obtain a copy of the License at
|
||||||
|
|
||||||
|
http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
|
||||||
|
Unless required by applicable law or agreed to in writing, software
|
||||||
|
distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
See the License for the specific language governing permissions and
|
||||||
|
limitations under the License.
|
||||||
|
*/
|
||||||
|
|
||||||
|
package node
|
||||||
|
|
||||||
|
import (
|
||||||
|
"fmt"
|
||||||
|
"net"
|
||||||
|
"time"
|
||||||
|
|
||||||
|
"k8s.io/api/core/v1"
|
||||||
|
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||||
|
"k8s.io/apimachinery/pkg/fields"
|
||||||
|
"k8s.io/apimachinery/pkg/util/wait"
|
||||||
|
clientset "k8s.io/client-go/kubernetes"
|
||||||
|
restclient "k8s.io/client-go/rest"
|
||||||
|
nodectlr "k8s.io/kubernetes/pkg/controller/nodelifecycle"
|
||||||
|
e2elog "k8s.io/kubernetes/test/e2e/framework/log"
|
||||||
|
testutils "k8s.io/kubernetes/test/utils"
|
||||||
|
)
|
||||||
|
|
||||||
|
const (
|
||||||
|
// poll is how often to Poll pods, nodes and claims.
|
||||||
|
poll = 2 * time.Second
|
||||||
|
|
||||||
|
// singleCallTimeout is how long to try single API calls (like 'get' or 'list'). Used to prevent
|
||||||
|
// transient failures from failing tests.
|
||||||
|
// TODO: client should not apply this timeout to Watch calls. Increased from 30s until that is fixed.
|
||||||
|
singleCallTimeout = 5 * time.Minute
|
||||||
|
|
||||||
|
// ssh port
|
||||||
|
sshPort = "22"
|
||||||
|
|
||||||
|
// timeout for proxy requests.
|
||||||
|
proxyTimeout = 2 * time.Minute
|
||||||
|
)
|
||||||
|
|
||||||
|
// FirstAddress returns the first address of the given type of each node.
|
||||||
|
// TODO: Use return type string instead of []string
|
||||||
|
func FirstAddress(nodelist *v1.NodeList, addrType v1.NodeAddressType) []string {
|
||||||
|
hosts := []string{}
|
||||||
|
for _, n := range nodelist.Items {
|
||||||
|
for _, addr := range n.Status.Addresses {
|
||||||
|
if addr.Type == addrType && addr.Address != "" {
|
||||||
|
hosts = append(hosts, addr.Address)
|
||||||
|
break
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return hosts
|
||||||
|
}
|
||||||
|
|
||||||
|
// TODO: better to change to a easy read name
|
||||||
|
func isNodeConditionSetAsExpected(node *v1.Node, conditionType v1.NodeConditionType, wantTrue, silent bool) bool {
|
||||||
|
// Check the node readiness condition (logging all).
|
||||||
|
for _, cond := range node.Status.Conditions {
|
||||||
|
// Ensure that the condition type and the status matches as desired.
|
||||||
|
if cond.Type == conditionType {
|
||||||
|
// For NodeReady condition we need to check Taints as well
|
||||||
|
if cond.Type == v1.NodeReady {
|
||||||
|
hasNodeControllerTaints := false
|
||||||
|
// For NodeReady we need to check if Taints are gone as well
|
||||||
|
taints := node.Spec.Taints
|
||||||
|
for _, taint := range taints {
|
||||||
|
if taint.MatchTaint(nodectlr.UnreachableTaintTemplate) || taint.MatchTaint(nodectlr.NotReadyTaintTemplate) {
|
||||||
|
hasNodeControllerTaints = true
|
||||||
|
break
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if wantTrue {
|
||||||
|
if (cond.Status == v1.ConditionTrue) && !hasNodeControllerTaints {
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
msg := ""
|
||||||
|
if !hasNodeControllerTaints {
|
||||||
|
msg = fmt.Sprintf("Condition %s of node %s is %v instead of %t. Reason: %v, message: %v",
|
||||||
|
conditionType, node.Name, cond.Status == v1.ConditionTrue, wantTrue, cond.Reason, cond.Message)
|
||||||
|
}
|
||||||
|
msg = fmt.Sprintf("Condition %s of node %s is %v, but Node is tainted by NodeController with %v. Failure",
|
||||||
|
conditionType, node.Name, cond.Status == v1.ConditionTrue, taints)
|
||||||
|
if !silent {
|
||||||
|
e2elog.Logf(msg)
|
||||||
|
}
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
// TODO: check if the Node is tainted once we enable NC notReady/unreachable taints by default
|
||||||
|
if cond.Status != v1.ConditionTrue {
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
if !silent {
|
||||||
|
e2elog.Logf("Condition %s of node %s is %v instead of %t. Reason: %v, message: %v",
|
||||||
|
conditionType, node.Name, cond.Status == v1.ConditionTrue, wantTrue, cond.Reason, cond.Message)
|
||||||
|
}
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
if (wantTrue && (cond.Status == v1.ConditionTrue)) || (!wantTrue && (cond.Status != v1.ConditionTrue)) {
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
if !silent {
|
||||||
|
e2elog.Logf("Condition %s of node %s is %v instead of %t. Reason: %v, message: %v",
|
||||||
|
conditionType, node.Name, cond.Status == v1.ConditionTrue, wantTrue, cond.Reason, cond.Message)
|
||||||
|
}
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
|
||||||
|
}
|
||||||
|
if !silent {
|
||||||
|
e2elog.Logf("Couldn't find condition %v on node %v", conditionType, node.Name)
|
||||||
|
}
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
|
||||||
|
// IsConditionSetAsExpected returns a wantTrue value if the node has a match to the conditionType, otherwise returns an opposite value of the wantTrue with detailed logging.
|
||||||
|
func IsConditionSetAsExpected(node *v1.Node, conditionType v1.NodeConditionType, wantTrue bool) bool {
|
||||||
|
return isNodeConditionSetAsExpected(node, conditionType, wantTrue, false)
|
||||||
|
}
|
||||||
|
|
||||||
|
// IsConditionSetAsExpectedSilent returns a wantTrue value if the node has a match to the conditionType, otherwise returns an opposite value of the wantTrue.
|
||||||
|
func IsConditionSetAsExpectedSilent(node *v1.Node, conditionType v1.NodeConditionType, wantTrue bool) bool {
|
||||||
|
return isNodeConditionSetAsExpected(node, conditionType, wantTrue, true)
|
||||||
|
}
|
||||||
|
|
||||||
|
// IsConditionUnset returns true if conditions of the given node do not have a match to the given conditionType, otherwise false.
|
||||||
|
func IsConditionUnset(node *v1.Node, conditionType v1.NodeConditionType) bool {
|
||||||
|
for _, cond := range node.Status.Conditions {
|
||||||
|
if cond.Type == conditionType {
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
|
||||||
|
// Filter filters nodes in NodeList in place, removing nodes that do not
|
||||||
|
// satisfy the given condition
|
||||||
|
// TODO: consider merging with pkg/client/cache.NodeLister
|
||||||
|
func Filter(nodeList *v1.NodeList, fn func(node v1.Node) bool) {
|
||||||
|
var l []v1.Node
|
||||||
|
|
||||||
|
for _, node := range nodeList.Items {
|
||||||
|
if fn(node) {
|
||||||
|
l = append(l, node)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
nodeList.Items = l
|
||||||
|
}
|
||||||
|
|
||||||
|
// TotalRegistered returns number of registered Nodes excluding Master Node.
|
||||||
|
func TotalRegistered(c clientset.Interface) (int, error) {
|
||||||
|
nodes, err := waitListSchedulableNodes(c)
|
||||||
|
if err != nil {
|
||||||
|
e2elog.Logf("Failed to list nodes: %v", err)
|
||||||
|
return 0, err
|
||||||
|
}
|
||||||
|
return len(nodes.Items), nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// TotalReady returns number of ready Nodes excluding Master Node.
|
||||||
|
func TotalReady(c clientset.Interface) (int, error) {
|
||||||
|
nodes, err := waitListSchedulableNodes(c)
|
||||||
|
if err != nil {
|
||||||
|
e2elog.Logf("Failed to list nodes: %v", err)
|
||||||
|
return 0, err
|
||||||
|
}
|
||||||
|
|
||||||
|
// Filter out not-ready nodes.
|
||||||
|
Filter(nodes, func(node v1.Node) bool {
|
||||||
|
return IsConditionSetAsExpected(&node, v1.NodeReady, true)
|
||||||
|
})
|
||||||
|
return len(nodes.Items), nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// getSvcNodePort returns the node port for the given service:port.
|
||||||
|
func getSvcNodePort(client clientset.Interface, ns, name string, svcPort int) (int, error) {
|
||||||
|
svc, err := client.CoreV1().Services(ns).Get(name, metav1.GetOptions{})
|
||||||
|
if err != nil {
|
||||||
|
return 0, err
|
||||||
|
}
|
||||||
|
for _, p := range svc.Spec.Ports {
|
||||||
|
if p.Port == int32(svcPort) {
|
||||||
|
if p.NodePort != 0 {
|
||||||
|
return int(p.NodePort), nil
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return 0, fmt.Errorf(
|
||||||
|
"No node port found for service %v, port %v", name, svcPort)
|
||||||
|
}
|
||||||
|
|
||||||
|
// GetPortURL returns the url to a nodeport Service.
|
||||||
|
func GetPortURL(client clientset.Interface, ns, name string, svcPort int) (string, error) {
|
||||||
|
nodePort, err := getSvcNodePort(client, ns, name, svcPort)
|
||||||
|
if err != nil {
|
||||||
|
return "", err
|
||||||
|
}
|
||||||
|
// This list of nodes must not include the master, which is marked
|
||||||
|
// unschedulable, since the master doesn't run kube-proxy. Without
|
||||||
|
// kube-proxy NodePorts won't work.
|
||||||
|
var nodes *v1.NodeList
|
||||||
|
if wait.PollImmediate(poll, singleCallTimeout, func() (bool, error) {
|
||||||
|
nodes, err = client.CoreV1().Nodes().List(metav1.ListOptions{FieldSelector: fields.Set{
|
||||||
|
"spec.unschedulable": "false",
|
||||||
|
}.AsSelector().String()})
|
||||||
|
if err != nil {
|
||||||
|
if testutils.IsRetryableAPIError(err) {
|
||||||
|
return false, nil
|
||||||
|
}
|
||||||
|
return false, err
|
||||||
|
}
|
||||||
|
return true, nil
|
||||||
|
}) != nil {
|
||||||
|
return "", err
|
||||||
|
}
|
||||||
|
if len(nodes.Items) == 0 {
|
||||||
|
return "", fmt.Errorf("Unable to list nodes in cluster")
|
||||||
|
}
|
||||||
|
for _, node := range nodes.Items {
|
||||||
|
for _, address := range node.Status.Addresses {
|
||||||
|
if address.Type == v1.NodeExternalIP {
|
||||||
|
if address.Address != "" {
|
||||||
|
return fmt.Sprintf("http://%v:%v", address.Address, nodePort), nil
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return "", fmt.Errorf("Failed to find external address for service %v", name)
|
||||||
|
}
|
||||||
|
|
||||||
|
// ProxyRequest performs a get on a node proxy endpoint given the nodename and rest client.
|
||||||
|
func ProxyRequest(c clientset.Interface, node, endpoint string, port int) (restclient.Result, error) {
|
||||||
|
// proxy tends to hang in some cases when Node is not ready. Add an artificial timeout for this call.
|
||||||
|
// This will leak a goroutine if proxy hangs. #22165
|
||||||
|
var result restclient.Result
|
||||||
|
finished := make(chan struct{})
|
||||||
|
go func() {
|
||||||
|
result = c.CoreV1().RESTClient().Get().
|
||||||
|
Resource("nodes").
|
||||||
|
SubResource("proxy").
|
||||||
|
Name(fmt.Sprintf("%v:%v", node, port)).
|
||||||
|
Suffix(endpoint).
|
||||||
|
Do()
|
||||||
|
|
||||||
|
finished <- struct{}{}
|
||||||
|
}()
|
||||||
|
select {
|
||||||
|
case <-finished:
|
||||||
|
return result, nil
|
||||||
|
case <-time.After(proxyTimeout):
|
||||||
|
return restclient.Result{}, nil
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// GetExternalIP returns node external IP concatenated with port 22 for ssh
|
||||||
|
// e.g. 1.2.3.4:22
|
||||||
|
func GetExternalIP(node *v1.Node) (string, error) {
|
||||||
|
e2elog.Logf("Getting external IP address for %s", node.Name)
|
||||||
|
host := ""
|
||||||
|
for _, a := range node.Status.Addresses {
|
||||||
|
if a.Type == v1.NodeExternalIP && a.Address != "" {
|
||||||
|
host = net.JoinHostPort(a.Address, sshPort)
|
||||||
|
break
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if host == "" {
|
||||||
|
return "", fmt.Errorf("Couldn't get the external IP of host %s with addresses %v", node.Name, node.Status.Addresses)
|
||||||
|
}
|
||||||
|
return host, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// GetInternalIP returns node internal IP
|
||||||
|
func GetInternalIP(node *v1.Node) (string, error) {
|
||||||
|
host := ""
|
||||||
|
for _, address := range node.Status.Addresses {
|
||||||
|
if address.Type == v1.NodeInternalIP {
|
||||||
|
if address.Address != "" {
|
||||||
|
host = net.JoinHostPort(address.Address, sshPort)
|
||||||
|
break
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if host == "" {
|
||||||
|
return "", fmt.Errorf("Couldn't get the internal IP of host %s with addresses %v", node.Name, node.Status.Addresses)
|
||||||
|
}
|
||||||
|
return host, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// GetAddresses returns a list of addresses of the given addressType for the given node
|
||||||
|
func GetAddresses(node *v1.Node, addressType v1.NodeAddressType) (ips []string) {
|
||||||
|
for j := range node.Status.Addresses {
|
||||||
|
nodeAddress := &node.Status.Addresses[j]
|
||||||
|
if nodeAddress.Type == addressType && nodeAddress.Address != "" {
|
||||||
|
ips = append(ips, nodeAddress.Address)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
// CollectAddresses returns a list of addresses of the given addressType for the given list of nodes
|
||||||
|
func CollectAddresses(nodes *v1.NodeList, addressType v1.NodeAddressType) []string {
|
||||||
|
ips := []string{}
|
||||||
|
for i := range nodes.Items {
|
||||||
|
ips = append(ips, GetAddresses(&nodes.Items[i], addressType)...)
|
||||||
|
}
|
||||||
|
return ips
|
||||||
|
}
|
199
test/e2e/framework/node/wait.go
Normal file
199
test/e2e/framework/node/wait.go
Normal file
@ -0,0 +1,199 @@
|
|||||||
|
/*
|
||||||
|
Copyright 2019 The Kubernetes Authors.
|
||||||
|
|
||||||
|
Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
you may not use this file except in compliance with the License.
|
||||||
|
You may obtain a copy of the License at
|
||||||
|
|
||||||
|
http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
|
||||||
|
Unless required by applicable law or agreed to in writing, software
|
||||||
|
distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
See the License for the specific language governing permissions and
|
||||||
|
limitations under the License.
|
||||||
|
*/
|
||||||
|
|
||||||
|
package node
|
||||||
|
|
||||||
|
import (
|
||||||
|
"fmt"
|
||||||
|
"regexp"
|
||||||
|
"time"
|
||||||
|
|
||||||
|
"k8s.io/api/core/v1"
|
||||||
|
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||||
|
"k8s.io/apimachinery/pkg/fields"
|
||||||
|
"k8s.io/apimachinery/pkg/util/wait"
|
||||||
|
clientset "k8s.io/client-go/kubernetes"
|
||||||
|
"k8s.io/kubernetes/pkg/util/system"
|
||||||
|
e2elog "k8s.io/kubernetes/test/e2e/framework/log"
|
||||||
|
testutils "k8s.io/kubernetes/test/utils"
|
||||||
|
)
|
||||||
|
|
||||||
|
const sleepTime = 20 * time.Second
|
||||||
|
|
||||||
|
var requiredPerNodePods = []*regexp.Regexp{
|
||||||
|
regexp.MustCompile(".*kube-proxy.*"),
|
||||||
|
regexp.MustCompile(".*fluentd-elasticsearch.*"),
|
||||||
|
regexp.MustCompile(".*node-problem-detector.*"),
|
||||||
|
}
|
||||||
|
|
||||||
|
// WaitForReadyNodes waits up to timeout for cluster to has desired size and
|
||||||
|
// there is no not-ready nodes in it. By cluster size we mean number of Nodes
|
||||||
|
// excluding Master Node.
|
||||||
|
func WaitForReadyNodes(c clientset.Interface, size int, timeout time.Duration) error {
|
||||||
|
_, err := CheckReady(c, size, timeout)
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
// WaitForTotalHealthy checks whether all registered nodes are ready and all required Pods are running on them.
|
||||||
|
func WaitForTotalHealthy(c clientset.Interface, timeout time.Duration) error {
|
||||||
|
e2elog.Logf("Waiting up to %v for all nodes to be ready", timeout)
|
||||||
|
|
||||||
|
var notReady []v1.Node
|
||||||
|
var missingPodsPerNode map[string][]string
|
||||||
|
err := wait.PollImmediate(poll, timeout, func() (bool, error) {
|
||||||
|
notReady = nil
|
||||||
|
// It should be OK to list unschedulable Nodes here.
|
||||||
|
nodes, err := c.CoreV1().Nodes().List(metav1.ListOptions{ResourceVersion: "0"})
|
||||||
|
if err != nil {
|
||||||
|
if testutils.IsRetryableAPIError(err) {
|
||||||
|
return false, nil
|
||||||
|
}
|
||||||
|
return false, err
|
||||||
|
}
|
||||||
|
for _, node := range nodes.Items {
|
||||||
|
if !IsConditionSetAsExpected(&node, v1.NodeReady, true) {
|
||||||
|
notReady = append(notReady, node)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
pods, err := c.CoreV1().Pods(metav1.NamespaceAll).List(metav1.ListOptions{ResourceVersion: "0"})
|
||||||
|
if err != nil {
|
||||||
|
return false, err
|
||||||
|
}
|
||||||
|
|
||||||
|
systemPodsPerNode := make(map[string][]string)
|
||||||
|
for _, pod := range pods.Items {
|
||||||
|
if pod.Namespace == metav1.NamespaceSystem && pod.Status.Phase == v1.PodRunning {
|
||||||
|
if pod.Spec.NodeName != "" {
|
||||||
|
systemPodsPerNode[pod.Spec.NodeName] = append(systemPodsPerNode[pod.Spec.NodeName], pod.Name)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
missingPodsPerNode = make(map[string][]string)
|
||||||
|
for _, node := range nodes.Items {
|
||||||
|
if !system.IsMasterNode(node.Name) {
|
||||||
|
for _, requiredPod := range requiredPerNodePods {
|
||||||
|
foundRequired := false
|
||||||
|
for _, presentPod := range systemPodsPerNode[node.Name] {
|
||||||
|
if requiredPod.MatchString(presentPod) {
|
||||||
|
foundRequired = true
|
||||||
|
break
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if !foundRequired {
|
||||||
|
missingPodsPerNode[node.Name] = append(missingPodsPerNode[node.Name], requiredPod.String())
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return len(notReady) == 0 && len(missingPodsPerNode) == 0, nil
|
||||||
|
})
|
||||||
|
|
||||||
|
if err != nil && err != wait.ErrWaitTimeout {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
if len(notReady) > 0 {
|
||||||
|
return fmt.Errorf("Not ready nodes: %v", notReady)
|
||||||
|
}
|
||||||
|
if len(missingPodsPerNode) > 0 {
|
||||||
|
return fmt.Errorf("Not running system Pods: %v", missingPodsPerNode)
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
|
||||||
|
}
|
||||||
|
|
||||||
|
// WaitConditionToBe returns whether node "name's" condition state matches wantTrue
|
||||||
|
// within timeout. If wantTrue is true, it will ensure the node condition status
|
||||||
|
// is ConditionTrue; if it's false, it ensures the node condition is in any state
|
||||||
|
// other than ConditionTrue (e.g. not true or unknown).
|
||||||
|
func WaitConditionToBe(c clientset.Interface, name string, conditionType v1.NodeConditionType, wantTrue bool, timeout time.Duration) bool {
|
||||||
|
e2elog.Logf("Waiting up to %v for node %s condition %s to be %t", timeout, name, conditionType, wantTrue)
|
||||||
|
for start := time.Now(); time.Since(start) < timeout; time.Sleep(poll) {
|
||||||
|
node, err := c.CoreV1().Nodes().Get(name, metav1.GetOptions{})
|
||||||
|
if err != nil {
|
||||||
|
e2elog.Logf("Couldn't get node %s", name)
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
|
||||||
|
if IsConditionSetAsExpected(node, conditionType, wantTrue) {
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
}
|
||||||
|
e2elog.Logf("Node %s didn't reach desired %s condition status (%t) within %v", name, conditionType, wantTrue, timeout)
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
|
||||||
|
// WaitForNodeToBeNotReady returns whether node name is not ready (i.e. the
|
||||||
|
// readiness condition is anything but ready, e.g false or unknown) within
|
||||||
|
// timeout.
|
||||||
|
func WaitForNodeToBeNotReady(c clientset.Interface, name string, timeout time.Duration) bool {
|
||||||
|
return WaitConditionToBe(c, name, v1.NodeReady, false, timeout)
|
||||||
|
}
|
||||||
|
|
||||||
|
// WaitForNodeToBeReady returns whether node name is ready within timeout.
|
||||||
|
func WaitForNodeToBeReady(c clientset.Interface, name string, timeout time.Duration) bool {
|
||||||
|
return WaitConditionToBe(c, name, v1.NodeReady, true, timeout)
|
||||||
|
}
|
||||||
|
|
||||||
|
// CheckReady waits up to timeout for cluster to has desired size and
|
||||||
|
// there is no not-ready nodes in it. By cluster size we mean number of Nodes
|
||||||
|
// excluding Master Node.
|
||||||
|
func CheckReady(c clientset.Interface, size int, timeout time.Duration) ([]v1.Node, error) {
|
||||||
|
for start := time.Now(); time.Since(start) < timeout; time.Sleep(sleepTime) {
|
||||||
|
nodes, err := waitListSchedulableNodes(c)
|
||||||
|
if err != nil {
|
||||||
|
e2elog.Logf("Failed to list nodes: %v", err)
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
numNodes := len(nodes.Items)
|
||||||
|
|
||||||
|
// Filter out not-ready nodes.
|
||||||
|
Filter(nodes, func(node v1.Node) bool {
|
||||||
|
nodeReady := IsConditionSetAsExpected(&node, v1.NodeReady, true)
|
||||||
|
networkReady := IsConditionUnset(&node, v1.NodeNetworkUnavailable) || IsConditionSetAsExpected(&node, v1.NodeNetworkUnavailable, false)
|
||||||
|
return nodeReady && networkReady
|
||||||
|
})
|
||||||
|
numReady := len(nodes.Items)
|
||||||
|
|
||||||
|
if numNodes == size && numReady == size {
|
||||||
|
e2elog.Logf("Cluster has reached the desired number of ready nodes %d", size)
|
||||||
|
return nodes.Items, nil
|
||||||
|
}
|
||||||
|
e2elog.Logf("Waiting for ready nodes %d, current ready %d, not ready nodes %d", size, numReady, numNodes-numReady)
|
||||||
|
}
|
||||||
|
return nil, fmt.Errorf("timeout waiting %v for number of ready nodes to be %d", timeout, size)
|
||||||
|
}
|
||||||
|
|
||||||
|
// waitListSchedulableNodes is a wrapper around listing nodes supporting retries.
|
||||||
|
func waitListSchedulableNodes(c clientset.Interface) (*v1.NodeList, error) {
|
||||||
|
var nodes *v1.NodeList
|
||||||
|
var err error
|
||||||
|
if wait.PollImmediate(poll, singleCallTimeout, func() (bool, error) {
|
||||||
|
nodes, err = c.CoreV1().Nodes().List(metav1.ListOptions{FieldSelector: fields.Set{
|
||||||
|
"spec.unschedulable": "false",
|
||||||
|
}.AsSelector().String()})
|
||||||
|
if err != nil {
|
||||||
|
if testutils.IsRetryableAPIError(err) {
|
||||||
|
return false, nil
|
||||||
|
}
|
||||||
|
return false, err
|
||||||
|
}
|
||||||
|
return true, nil
|
||||||
|
}) != nil {
|
||||||
|
return nodes, err
|
||||||
|
}
|
||||||
|
return nodes, nil
|
||||||
|
}
|
@ -25,10 +25,11 @@ import (
|
|||||||
"sync"
|
"sync"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
v1 "k8s.io/api/core/v1"
|
"k8s.io/api/core/v1"
|
||||||
"k8s.io/apimachinery/pkg/util/wait"
|
"k8s.io/apimachinery/pkg/util/wait"
|
||||||
clientset "k8s.io/client-go/kubernetes"
|
clientset "k8s.io/client-go/kubernetes"
|
||||||
e2elog "k8s.io/kubernetes/test/e2e/framework/log"
|
e2elog "k8s.io/kubernetes/test/e2e/framework/log"
|
||||||
|
e2enode "k8s.io/kubernetes/test/e2e/framework/node"
|
||||||
e2essh "k8s.io/kubernetes/test/e2e/framework/ssh"
|
e2essh "k8s.io/kubernetes/test/e2e/framework/ssh"
|
||||||
)
|
)
|
||||||
|
|
||||||
@ -202,12 +203,12 @@ func waitForNodesReadyAfterUpgrade(f *Framework) error {
|
|||||||
//
|
//
|
||||||
// TODO(ihmccreery) We shouldn't have to wait for nodes to be ready in
|
// TODO(ihmccreery) We shouldn't have to wait for nodes to be ready in
|
||||||
// GKE; the operation shouldn't return until they all are.
|
// GKE; the operation shouldn't return until they all are.
|
||||||
numNodes, err := NumberOfRegisteredNodes(f.ClientSet)
|
numNodes, err := e2enode.TotalRegistered(f.ClientSet)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return fmt.Errorf("couldn't detect number of nodes")
|
return fmt.Errorf("couldn't detect number of nodes")
|
||||||
}
|
}
|
||||||
e2elog.Logf("Waiting up to %v for all %d nodes to be ready after the upgrade", RestartNodeReadyAgainTimeout, numNodes)
|
e2elog.Logf("Waiting up to %v for all %d nodes to be ready after the upgrade", RestartNodeReadyAgainTimeout, numNodes)
|
||||||
if _, err := CheckNodesReady(f.ClientSet, numNodes, RestartNodeReadyAgainTimeout); err != nil {
|
if _, err := e2enode.CheckReady(f.ClientSet, numNodes, RestartNodeReadyAgainTimeout); err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
return nil
|
return nil
|
||||||
|
@ -24,6 +24,7 @@ go_library(
|
|||||||
"//staging/src/k8s.io/legacy-cloud-providers/gce:go_default_library",
|
"//staging/src/k8s.io/legacy-cloud-providers/gce:go_default_library",
|
||||||
"//test/e2e/framework:go_default_library",
|
"//test/e2e/framework:go_default_library",
|
||||||
"//test/e2e/framework/log:go_default_library",
|
"//test/e2e/framework/log:go_default_library",
|
||||||
|
"//test/e2e/framework/node:go_default_library",
|
||||||
"//test/e2e/framework/pod:go_default_library",
|
"//test/e2e/framework/pod:go_default_library",
|
||||||
"//test/utils:go_default_library",
|
"//test/utils:go_default_library",
|
||||||
"//vendor/github.com/onsi/ginkgo:go_default_library",
|
"//vendor/github.com/onsi/ginkgo:go_default_library",
|
||||||
|
@ -28,6 +28,7 @@ import (
|
|||||||
clientset "k8s.io/client-go/kubernetes"
|
clientset "k8s.io/client-go/kubernetes"
|
||||||
"k8s.io/kubernetes/test/e2e/framework"
|
"k8s.io/kubernetes/test/e2e/framework"
|
||||||
e2elog "k8s.io/kubernetes/test/e2e/framework/log"
|
e2elog "k8s.io/kubernetes/test/e2e/framework/log"
|
||||||
|
e2enode "k8s.io/kubernetes/test/e2e/framework/node"
|
||||||
e2epod "k8s.io/kubernetes/test/e2e/framework/pod"
|
e2epod "k8s.io/kubernetes/test/e2e/framework/pod"
|
||||||
testutils "k8s.io/kubernetes/test/utils"
|
testutils "k8s.io/kubernetes/test/utils"
|
||||||
)
|
)
|
||||||
@ -49,9 +50,9 @@ var _ = ginkgo.Describe("Recreate [Feature:Recreate]", func() {
|
|||||||
ginkgo.BeforeEach(func() {
|
ginkgo.BeforeEach(func() {
|
||||||
framework.SkipUnlessProviderIs("gce", "gke")
|
framework.SkipUnlessProviderIs("gce", "gke")
|
||||||
var err error
|
var err error
|
||||||
numNodes, err := framework.NumberOfRegisteredNodes(f.ClientSet)
|
numNodes, err := e2enode.TotalRegistered(f.ClientSet)
|
||||||
framework.ExpectNoError(err)
|
framework.ExpectNoError(err)
|
||||||
originalNodes, err = framework.CheckNodesReady(f.ClientSet, numNodes, framework.NodeReadyInitialTimeout)
|
originalNodes, err = e2enode.CheckReady(f.ClientSet, numNodes, framework.NodeReadyInitialTimeout)
|
||||||
framework.ExpectNoError(err)
|
framework.ExpectNoError(err)
|
||||||
|
|
||||||
e2elog.Logf("Got the following nodes before recreate %v", nodeNames(originalNodes))
|
e2elog.Logf("Got the following nodes before recreate %v", nodeNames(originalNodes))
|
||||||
@ -104,7 +105,7 @@ func testRecreate(c clientset.Interface, ps *testutils.PodStore, systemNamespace
|
|||||||
framework.Failf("Test failed; failed to recreate at least one node in %v.", framework.RecreateNodeReadyAgainTimeout)
|
framework.Failf("Test failed; failed to recreate at least one node in %v.", framework.RecreateNodeReadyAgainTimeout)
|
||||||
}
|
}
|
||||||
|
|
||||||
nodesAfter, err := framework.CheckNodesReady(c, len(nodes), framework.RestartNodeReadyAgainTimeout)
|
nodesAfter, err := e2enode.CheckReady(c, len(nodes), framework.RestartNodeReadyAgainTimeout)
|
||||||
framework.ExpectNoError(err)
|
framework.ExpectNoError(err)
|
||||||
e2elog.Logf("Got the following nodes after recreate: %v", nodeNames(nodesAfter))
|
e2elog.Logf("Got the following nodes after recreate: %v", nodeNames(nodesAfter))
|
||||||
|
|
||||||
|
@ -40,6 +40,7 @@ import (
|
|||||||
api "k8s.io/kubernetes/pkg/apis/core"
|
api "k8s.io/kubernetes/pkg/apis/core"
|
||||||
"k8s.io/kubernetes/pkg/registry/core/service/portallocator"
|
"k8s.io/kubernetes/pkg/registry/core/service/portallocator"
|
||||||
e2elog "k8s.io/kubernetes/test/e2e/framework/log"
|
e2elog "k8s.io/kubernetes/test/e2e/framework/log"
|
||||||
|
e2enode "k8s.io/kubernetes/test/e2e/framework/node"
|
||||||
e2epod "k8s.io/kubernetes/test/e2e/framework/pod"
|
e2epod "k8s.io/kubernetes/test/e2e/framework/pod"
|
||||||
e2essh "k8s.io/kubernetes/test/e2e/framework/ssh"
|
e2essh "k8s.io/kubernetes/test/e2e/framework/ssh"
|
||||||
testutils "k8s.io/kubernetes/test/utils"
|
testutils "k8s.io/kubernetes/test/utils"
|
||||||
@ -324,34 +325,14 @@ func (j *ServiceTestJig) CreateLoadBalancerService(namespace, serviceName string
|
|||||||
return svc
|
return svc
|
||||||
}
|
}
|
||||||
|
|
||||||
// GetNodeAddresses returns a list of addresses of the given addressType for the given node
|
|
||||||
func GetNodeAddresses(node *v1.Node, addressType v1.NodeAddressType) (ips []string) {
|
|
||||||
for j := range node.Status.Addresses {
|
|
||||||
nodeAddress := &node.Status.Addresses[j]
|
|
||||||
if nodeAddress.Type == addressType && nodeAddress.Address != "" {
|
|
||||||
ips = append(ips, nodeAddress.Address)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
// CollectAddresses returns a list of addresses of the given addressType for the given list of nodes
|
|
||||||
func CollectAddresses(nodes *v1.NodeList, addressType v1.NodeAddressType) []string {
|
|
||||||
ips := []string{}
|
|
||||||
for i := range nodes.Items {
|
|
||||||
ips = append(ips, GetNodeAddresses(&nodes.Items[i], addressType)...)
|
|
||||||
}
|
|
||||||
return ips
|
|
||||||
}
|
|
||||||
|
|
||||||
// GetNodePublicIps returns a public IP list of nodes.
|
// GetNodePublicIps returns a public IP list of nodes.
|
||||||
func GetNodePublicIps(c clientset.Interface) ([]string, error) {
|
func GetNodePublicIps(c clientset.Interface) ([]string, error) {
|
||||||
nodes := GetReadySchedulableNodesOrDie(c)
|
nodes := GetReadySchedulableNodesOrDie(c)
|
||||||
|
|
||||||
ips := CollectAddresses(nodes, v1.NodeExternalIP)
|
ips := e2enode.CollectAddresses(nodes, v1.NodeExternalIP)
|
||||||
if len(ips) == 0 {
|
if len(ips) == 0 {
|
||||||
// If ExternalIP isn't set, assume the test programs can reach the InternalIP
|
// If ExternalIP isn't set, assume the test programs can reach the InternalIP
|
||||||
ips = CollectAddresses(nodes, v1.NodeInternalIP)
|
ips = e2enode.CollectAddresses(nodes, v1.NodeInternalIP)
|
||||||
}
|
}
|
||||||
return ips, nil
|
return ips, nil
|
||||||
}
|
}
|
||||||
@ -408,7 +389,7 @@ func (j *ServiceTestJig) GetEndpointNodes(svc *v1.Service) map[string][]string {
|
|||||||
nodeMap := map[string][]string{}
|
nodeMap := map[string][]string{}
|
||||||
for _, n := range nodes.Items {
|
for _, n := range nodes.Items {
|
||||||
if epNodes.Has(n.Name) {
|
if epNodes.Has(n.Name) {
|
||||||
nodeMap[n.Name] = GetNodeAddresses(&n, v1.NodeExternalIP)
|
nodeMap[n.Name] = e2enode.GetAddresses(&n, v1.NodeExternalIP)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
return nodeMap
|
return nodeMap
|
||||||
|
@ -80,7 +80,6 @@ import (
|
|||||||
extensionsinternal "k8s.io/kubernetes/pkg/apis/extensions"
|
extensionsinternal "k8s.io/kubernetes/pkg/apis/extensions"
|
||||||
"k8s.io/kubernetes/pkg/client/conditions"
|
"k8s.io/kubernetes/pkg/client/conditions"
|
||||||
"k8s.io/kubernetes/pkg/controller"
|
"k8s.io/kubernetes/pkg/controller"
|
||||||
nodectlr "k8s.io/kubernetes/pkg/controller/nodelifecycle"
|
|
||||||
"k8s.io/kubernetes/pkg/controller/service"
|
"k8s.io/kubernetes/pkg/controller/service"
|
||||||
"k8s.io/kubernetes/pkg/features"
|
"k8s.io/kubernetes/pkg/features"
|
||||||
"k8s.io/kubernetes/pkg/master/ports"
|
"k8s.io/kubernetes/pkg/master/ports"
|
||||||
@ -90,6 +89,7 @@ import (
|
|||||||
taintutils "k8s.io/kubernetes/pkg/util/taints"
|
taintutils "k8s.io/kubernetes/pkg/util/taints"
|
||||||
"k8s.io/kubernetes/test/e2e/framework/ginkgowrapper"
|
"k8s.io/kubernetes/test/e2e/framework/ginkgowrapper"
|
||||||
e2elog "k8s.io/kubernetes/test/e2e/framework/log"
|
e2elog "k8s.io/kubernetes/test/e2e/framework/log"
|
||||||
|
e2enode "k8s.io/kubernetes/test/e2e/framework/node"
|
||||||
e2epod "k8s.io/kubernetes/test/e2e/framework/pod"
|
e2epod "k8s.io/kubernetes/test/e2e/framework/pod"
|
||||||
e2essh "k8s.io/kubernetes/test/e2e/framework/ssh"
|
e2essh "k8s.io/kubernetes/test/e2e/framework/ssh"
|
||||||
testutils "k8s.io/kubernetes/test/utils"
|
testutils "k8s.io/kubernetes/test/utils"
|
||||||
@ -133,8 +133,6 @@ const (
|
|||||||
|
|
||||||
// PollShortTimeout is the short timeout value in polling.
|
// PollShortTimeout is the short timeout value in polling.
|
||||||
PollShortTimeout = 1 * time.Minute
|
PollShortTimeout = 1 * time.Minute
|
||||||
// PollLongTimeout is the long timeout value in polling.
|
|
||||||
PollLongTimeout = 5 * time.Minute
|
|
||||||
|
|
||||||
// ServiceAccountProvisionTimeout is how long to wait for a service account to be provisioned.
|
// ServiceAccountProvisionTimeout is how long to wait for a service account to be provisioned.
|
||||||
// service accounts are provisioned after namespace creation
|
// service accounts are provisioned after namespace creation
|
||||||
@ -1914,9 +1912,9 @@ func waitListSchedulableNodesOrDie(c clientset.Interface) *v1.NodeList {
|
|||||||
// 2) it's Ready condition is set to true
|
// 2) it's Ready condition is set to true
|
||||||
// 3) doesn't have NetworkUnavailable condition set to true
|
// 3) doesn't have NetworkUnavailable condition set to true
|
||||||
func isNodeSchedulable(node *v1.Node) bool {
|
func isNodeSchedulable(node *v1.Node) bool {
|
||||||
nodeReady := IsNodeConditionSetAsExpected(node, v1.NodeReady, true)
|
nodeReady := e2enode.IsConditionSetAsExpected(node, v1.NodeReady, true)
|
||||||
networkReady := IsNodeConditionUnset(node, v1.NodeNetworkUnavailable) ||
|
networkReady := e2enode.IsConditionUnset(node, v1.NodeNetworkUnavailable) ||
|
||||||
IsNodeConditionSetAsExpectedSilent(node, v1.NodeNetworkUnavailable, false)
|
e2enode.IsConditionSetAsExpectedSilent(node, v1.NodeNetworkUnavailable, false)
|
||||||
return !node.Spec.Unschedulable && nodeReady && networkReady
|
return !node.Spec.Unschedulable && nodeReady && networkReady
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -1958,7 +1956,7 @@ func GetReadySchedulableNodesOrDie(c clientset.Interface) (nodes *v1.NodeList) {
|
|||||||
nodes = waitListSchedulableNodesOrDie(c)
|
nodes = waitListSchedulableNodesOrDie(c)
|
||||||
// previous tests may have cause failures of some nodes. Let's skip
|
// previous tests may have cause failures of some nodes. Let's skip
|
||||||
// 'Not Ready' nodes, just in case (there is no need to fail the test).
|
// 'Not Ready' nodes, just in case (there is no need to fail the test).
|
||||||
FilterNodes(nodes, func(node v1.Node) bool {
|
e2enode.Filter(nodes, func(node v1.Node) bool {
|
||||||
return isNodeSchedulable(&node) && isNodeUntainted(&node)
|
return isNodeSchedulable(&node) && isNodeUntainted(&node)
|
||||||
})
|
})
|
||||||
return nodes
|
return nodes
|
||||||
@ -1970,7 +1968,7 @@ func GetReadySchedulableNodesOrDie(c clientset.Interface) (nodes *v1.NodeList) {
|
|||||||
// presence of nvidia.com/gpu=present:NoSchedule taint
|
// presence of nvidia.com/gpu=present:NoSchedule taint
|
||||||
func GetReadyNodesIncludingTaintedOrDie(c clientset.Interface) (nodes *v1.NodeList) {
|
func GetReadyNodesIncludingTaintedOrDie(c clientset.Interface) (nodes *v1.NodeList) {
|
||||||
nodes = waitListSchedulableNodesOrDie(c)
|
nodes = waitListSchedulableNodesOrDie(c)
|
||||||
FilterNodes(nodes, func(node v1.Node) bool {
|
e2enode.Filter(nodes, func(node v1.Node) bool {
|
||||||
return isNodeSchedulable(&node)
|
return isNodeSchedulable(&node)
|
||||||
})
|
})
|
||||||
return nodes
|
return nodes
|
||||||
@ -2024,8 +2022,8 @@ func WaitForAllNodesSchedulable(c clientset.Interface, timeout time.Duration) er
|
|||||||
for i := range notSchedulable {
|
for i := range notSchedulable {
|
||||||
e2elog.Logf("-> %s Ready=%t Network=%t Taints=%v",
|
e2elog.Logf("-> %s Ready=%t Network=%t Taints=%v",
|
||||||
notSchedulable[i].Name,
|
notSchedulable[i].Name,
|
||||||
IsNodeConditionSetAsExpectedSilent(notSchedulable[i], v1.NodeReady, true),
|
e2enode.IsConditionSetAsExpectedSilent(notSchedulable[i], v1.NodeReady, true),
|
||||||
IsNodeConditionSetAsExpectedSilent(notSchedulable[i], v1.NodeNetworkUnavailable, false),
|
e2enode.IsConditionSetAsExpectedSilent(notSchedulable[i], v1.NodeNetworkUnavailable, false),
|
||||||
notSchedulable[i].Spec.Taints)
|
notSchedulable[i].Spec.Taints)
|
||||||
}
|
}
|
||||||
e2elog.Logf("================================")
|
e2elog.Logf("================================")
|
||||||
@ -2410,20 +2408,6 @@ func UpdateDaemonSetWithRetries(c clientset.Interface, namespace, name string, a
|
|||||||
return ds, pollErr
|
return ds, pollErr
|
||||||
}
|
}
|
||||||
|
|
||||||
// NodeAddresses returns the first address of the given type of each node.
|
|
||||||
func NodeAddresses(nodelist *v1.NodeList, addrType v1.NodeAddressType) []string {
|
|
||||||
hosts := []string{}
|
|
||||||
for _, n := range nodelist.Items {
|
|
||||||
for _, addr := range n.Status.Addresses {
|
|
||||||
if addr.Type == addrType && addr.Address != "" {
|
|
||||||
hosts = append(hosts, addr.Address)
|
|
||||||
break
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return hosts
|
|
||||||
}
|
|
||||||
|
|
||||||
// RunHostCmd runs the given cmd in the context of the given pod using `kubectl exec`
|
// RunHostCmd runs the given cmd in the context of the given pod using `kubectl exec`
|
||||||
// inside of a shell.
|
// inside of a shell.
|
||||||
func RunHostCmd(ns, name, cmd string) (string, error) {
|
func RunHostCmd(ns, name, cmd string) (string, error) {
|
||||||
@ -2456,118 +2440,6 @@ func RunHostCmdWithRetries(ns, name, cmd string, interval, timeout time.Duration
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// WaitForNodeToBeReady returns whether node name is ready within timeout.
|
|
||||||
func WaitForNodeToBeReady(c clientset.Interface, name string, timeout time.Duration) bool {
|
|
||||||
return WaitForNodeToBe(c, name, v1.NodeReady, true, timeout)
|
|
||||||
}
|
|
||||||
|
|
||||||
// WaitForNodeToBeNotReady returns whether node name is not ready (i.e. the
|
|
||||||
// readiness condition is anything but ready, e.g false or unknown) within
|
|
||||||
// timeout.
|
|
||||||
func WaitForNodeToBeNotReady(c clientset.Interface, name string, timeout time.Duration) bool {
|
|
||||||
return WaitForNodeToBe(c, name, v1.NodeReady, false, timeout)
|
|
||||||
}
|
|
||||||
|
|
||||||
func isNodeConditionSetAsExpected(node *v1.Node, conditionType v1.NodeConditionType, wantTrue, silent bool) bool {
|
|
||||||
// Check the node readiness condition (logging all).
|
|
||||||
for _, cond := range node.Status.Conditions {
|
|
||||||
// Ensure that the condition type and the status matches as desired.
|
|
||||||
if cond.Type == conditionType {
|
|
||||||
// For NodeReady condition we need to check Taints as well
|
|
||||||
if cond.Type == v1.NodeReady {
|
|
||||||
hasNodeControllerTaints := false
|
|
||||||
// For NodeReady we need to check if Taints are gone as well
|
|
||||||
taints := node.Spec.Taints
|
|
||||||
for _, taint := range taints {
|
|
||||||
if taint.MatchTaint(nodectlr.UnreachableTaintTemplate) || taint.MatchTaint(nodectlr.NotReadyTaintTemplate) {
|
|
||||||
hasNodeControllerTaints = true
|
|
||||||
break
|
|
||||||
}
|
|
||||||
}
|
|
||||||
if wantTrue {
|
|
||||||
if (cond.Status == v1.ConditionTrue) && !hasNodeControllerTaints {
|
|
||||||
return true
|
|
||||||
}
|
|
||||||
msg := ""
|
|
||||||
if !hasNodeControllerTaints {
|
|
||||||
msg = fmt.Sprintf("Condition %s of node %s is %v instead of %t. Reason: %v, message: %v",
|
|
||||||
conditionType, node.Name, cond.Status == v1.ConditionTrue, wantTrue, cond.Reason, cond.Message)
|
|
||||||
}
|
|
||||||
msg = fmt.Sprintf("Condition %s of node %s is %v, but Node is tainted by NodeController with %v. Failure",
|
|
||||||
conditionType, node.Name, cond.Status == v1.ConditionTrue, taints)
|
|
||||||
if !silent {
|
|
||||||
e2elog.Logf(msg)
|
|
||||||
}
|
|
||||||
return false
|
|
||||||
}
|
|
||||||
// TODO: check if the Node is tainted once we enable NC notReady/unreachable taints by default
|
|
||||||
if cond.Status != v1.ConditionTrue {
|
|
||||||
return true
|
|
||||||
}
|
|
||||||
if !silent {
|
|
||||||
e2elog.Logf("Condition %s of node %s is %v instead of %t. Reason: %v, message: %v",
|
|
||||||
conditionType, node.Name, cond.Status == v1.ConditionTrue, wantTrue, cond.Reason, cond.Message)
|
|
||||||
}
|
|
||||||
return false
|
|
||||||
}
|
|
||||||
if (wantTrue && (cond.Status == v1.ConditionTrue)) || (!wantTrue && (cond.Status != v1.ConditionTrue)) {
|
|
||||||
return true
|
|
||||||
}
|
|
||||||
if !silent {
|
|
||||||
e2elog.Logf("Condition %s of node %s is %v instead of %t. Reason: %v, message: %v",
|
|
||||||
conditionType, node.Name, cond.Status == v1.ConditionTrue, wantTrue, cond.Reason, cond.Message)
|
|
||||||
}
|
|
||||||
return false
|
|
||||||
}
|
|
||||||
|
|
||||||
}
|
|
||||||
if !silent {
|
|
||||||
e2elog.Logf("Couldn't find condition %v on node %v", conditionType, node.Name)
|
|
||||||
}
|
|
||||||
return false
|
|
||||||
}
|
|
||||||
|
|
||||||
// IsNodeConditionSetAsExpected returns a wantTrue value if the node has a match to the conditionType, otherwise returns an opposite value of the wantTrue with detailed logging.
|
|
||||||
func IsNodeConditionSetAsExpected(node *v1.Node, conditionType v1.NodeConditionType, wantTrue bool) bool {
|
|
||||||
return isNodeConditionSetAsExpected(node, conditionType, wantTrue, false)
|
|
||||||
}
|
|
||||||
|
|
||||||
// IsNodeConditionSetAsExpectedSilent returns a wantTrue value if the node has a match to the conditionType, otherwise returns an opposite value of the wantTrue.
|
|
||||||
func IsNodeConditionSetAsExpectedSilent(node *v1.Node, conditionType v1.NodeConditionType, wantTrue bool) bool {
|
|
||||||
return isNodeConditionSetAsExpected(node, conditionType, wantTrue, true)
|
|
||||||
}
|
|
||||||
|
|
||||||
// IsNodeConditionUnset returns true if conditions of the given node do not have a match to the given conditionType, otherwise false.
|
|
||||||
func IsNodeConditionUnset(node *v1.Node, conditionType v1.NodeConditionType) bool {
|
|
||||||
for _, cond := range node.Status.Conditions {
|
|
||||||
if cond.Type == conditionType {
|
|
||||||
return false
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return true
|
|
||||||
}
|
|
||||||
|
|
||||||
// WaitForNodeToBe returns whether node "name's" condition state matches wantTrue
|
|
||||||
// within timeout. If wantTrue is true, it will ensure the node condition status
|
|
||||||
// is ConditionTrue; if it's false, it ensures the node condition is in any state
|
|
||||||
// other than ConditionTrue (e.g. not true or unknown).
|
|
||||||
func WaitForNodeToBe(c clientset.Interface, name string, conditionType v1.NodeConditionType, wantTrue bool, timeout time.Duration) bool {
|
|
||||||
e2elog.Logf("Waiting up to %v for node %s condition %s to be %t", timeout, name, conditionType, wantTrue)
|
|
||||||
for start := time.Now(); time.Since(start) < timeout; time.Sleep(Poll) {
|
|
||||||
node, err := c.CoreV1().Nodes().Get(name, metav1.GetOptions{})
|
|
||||||
if err != nil {
|
|
||||||
e2elog.Logf("Couldn't get node %s", name)
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
|
|
||||||
if IsNodeConditionSetAsExpected(node, conditionType, wantTrue) {
|
|
||||||
return true
|
|
||||||
}
|
|
||||||
}
|
|
||||||
e2elog.Logf("Node %s didn't reach desired %s condition status (%t) within %v", name, conditionType, wantTrue, timeout)
|
|
||||||
return false
|
|
||||||
}
|
|
||||||
|
|
||||||
// AllNodesReady checks whether all registered nodes are ready.
|
// AllNodesReady checks whether all registered nodes are ready.
|
||||||
// TODO: we should change the AllNodesReady call in AfterEach to WaitForAllNodesHealthy,
|
// TODO: we should change the AllNodesReady call in AfterEach to WaitForAllNodesHealthy,
|
||||||
// and figure out how to do it in a configurable way, as we can't expect all setups to run
|
// and figure out how to do it in a configurable way, as we can't expect all setups to run
|
||||||
@ -2588,7 +2460,7 @@ func AllNodesReady(c clientset.Interface, timeout time.Duration) error {
|
|||||||
}
|
}
|
||||||
for i := range nodes.Items {
|
for i := range nodes.Items {
|
||||||
node := &nodes.Items[i]
|
node := &nodes.Items[i]
|
||||||
if !IsNodeConditionSetAsExpected(node, v1.NodeReady, true) {
|
if !e2enode.IsConditionSetAsExpected(node, v1.NodeReady, true) {
|
||||||
notReady = append(notReady, node)
|
notReady = append(notReady, node)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -2614,88 +2486,6 @@ func AllNodesReady(c clientset.Interface, timeout time.Duration) error {
|
|||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// WaitForAllNodesHealthy checks whether all registered nodes are ready and all required Pods are running on them.
|
|
||||||
func WaitForAllNodesHealthy(c clientset.Interface, timeout time.Duration) error {
|
|
||||||
e2elog.Logf("Waiting up to %v for all nodes to be ready", timeout)
|
|
||||||
|
|
||||||
var notReady []v1.Node
|
|
||||||
var missingPodsPerNode map[string][]string
|
|
||||||
err := wait.PollImmediate(Poll, timeout, func() (bool, error) {
|
|
||||||
notReady = nil
|
|
||||||
// It should be OK to list unschedulable Nodes here.
|
|
||||||
nodes, err := c.CoreV1().Nodes().List(metav1.ListOptions{ResourceVersion: "0"})
|
|
||||||
if err != nil {
|
|
||||||
if testutils.IsRetryableAPIError(err) {
|
|
||||||
return false, nil
|
|
||||||
}
|
|
||||||
return false, err
|
|
||||||
}
|
|
||||||
for _, node := range nodes.Items {
|
|
||||||
if !IsNodeConditionSetAsExpected(&node, v1.NodeReady, true) {
|
|
||||||
notReady = append(notReady, node)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
pods, err := c.CoreV1().Pods(metav1.NamespaceAll).List(metav1.ListOptions{ResourceVersion: "0"})
|
|
||||||
if err != nil {
|
|
||||||
return false, err
|
|
||||||
}
|
|
||||||
|
|
||||||
systemPodsPerNode := make(map[string][]string)
|
|
||||||
for _, pod := range pods.Items {
|
|
||||||
if pod.Namespace == metav1.NamespaceSystem && pod.Status.Phase == v1.PodRunning {
|
|
||||||
if pod.Spec.NodeName != "" {
|
|
||||||
systemPodsPerNode[pod.Spec.NodeName] = append(systemPodsPerNode[pod.Spec.NodeName], pod.Name)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
missingPodsPerNode = make(map[string][]string)
|
|
||||||
for _, node := range nodes.Items {
|
|
||||||
if !system.IsMasterNode(node.Name) {
|
|
||||||
for _, requiredPod := range requiredPerNodePods {
|
|
||||||
foundRequired := false
|
|
||||||
for _, presentPod := range systemPodsPerNode[node.Name] {
|
|
||||||
if requiredPod.MatchString(presentPod) {
|
|
||||||
foundRequired = true
|
|
||||||
break
|
|
||||||
}
|
|
||||||
}
|
|
||||||
if !foundRequired {
|
|
||||||
missingPodsPerNode[node.Name] = append(missingPodsPerNode[node.Name], requiredPod.String())
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return len(notReady) == 0 && len(missingPodsPerNode) == 0, nil
|
|
||||||
})
|
|
||||||
|
|
||||||
if err != nil && err != wait.ErrWaitTimeout {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
if len(notReady) > 0 {
|
|
||||||
return fmt.Errorf("Not ready nodes: %v", notReady)
|
|
||||||
}
|
|
||||||
if len(missingPodsPerNode) > 0 {
|
|
||||||
return fmt.Errorf("Not running system Pods: %v", missingPodsPerNode)
|
|
||||||
}
|
|
||||||
return nil
|
|
||||||
|
|
||||||
}
|
|
||||||
|
|
||||||
// FilterNodes filters nodes in NodeList in place, removing nodes that do not
|
|
||||||
// satisfy the given condition
|
|
||||||
// TODO: consider merging with pkg/client/cache.NodeLister
|
|
||||||
func FilterNodes(nodeList *v1.NodeList, fn func(node v1.Node) bool) {
|
|
||||||
var l []v1.Node
|
|
||||||
|
|
||||||
for _, node := range nodeList.Items {
|
|
||||||
if fn(node) {
|
|
||||||
l = append(l, node)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
nodeList.Items = l
|
|
||||||
}
|
|
||||||
|
|
||||||
// ParseKVLines parses output that looks like lines containing "<key>: <val>"
|
// ParseKVLines parses output that looks like lines containing "<key>: <val>"
|
||||||
// and returns <val> if <key> is found. Otherwise, it returns the empty string.
|
// and returns <val> if <key> is found. Otherwise, it returns the empty string.
|
||||||
func ParseKVLines(output, key string) string {
|
func ParseKVLines(output, key string) string {
|
||||||
@ -2962,68 +2752,6 @@ func CheckForControllerManagerHealthy(duration time.Duration) error {
|
|||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// NumberOfRegisteredNodes returns number of registered Nodes excluding Master Node.
|
|
||||||
func NumberOfRegisteredNodes(c clientset.Interface) (int, error) {
|
|
||||||
nodes, err := waitListSchedulableNodes(c)
|
|
||||||
if err != nil {
|
|
||||||
e2elog.Logf("Failed to list nodes: %v", err)
|
|
||||||
return 0, err
|
|
||||||
}
|
|
||||||
return len(nodes.Items), nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// NumberOfReadyNodes returns number of ready Nodes excluding Master Node.
|
|
||||||
func NumberOfReadyNodes(c clientset.Interface) (int, error) {
|
|
||||||
nodes, err := waitListSchedulableNodes(c)
|
|
||||||
if err != nil {
|
|
||||||
e2elog.Logf("Failed to list nodes: %v", err)
|
|
||||||
return 0, err
|
|
||||||
}
|
|
||||||
|
|
||||||
// Filter out not-ready nodes.
|
|
||||||
FilterNodes(nodes, func(node v1.Node) bool {
|
|
||||||
return IsNodeConditionSetAsExpected(&node, v1.NodeReady, true)
|
|
||||||
})
|
|
||||||
return len(nodes.Items), nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// CheckNodesReady waits up to timeout for cluster to has desired size and
|
|
||||||
// there is no not-ready nodes in it. By cluster size we mean number of Nodes
|
|
||||||
// excluding Master Node.
|
|
||||||
func CheckNodesReady(c clientset.Interface, size int, timeout time.Duration) ([]v1.Node, error) {
|
|
||||||
for start := time.Now(); time.Since(start) < timeout; time.Sleep(20 * time.Second) {
|
|
||||||
nodes, err := waitListSchedulableNodes(c)
|
|
||||||
if err != nil {
|
|
||||||
e2elog.Logf("Failed to list nodes: %v", err)
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
numNodes := len(nodes.Items)
|
|
||||||
|
|
||||||
// Filter out not-ready nodes.
|
|
||||||
FilterNodes(nodes, func(node v1.Node) bool {
|
|
||||||
nodeReady := IsNodeConditionSetAsExpected(&node, v1.NodeReady, true)
|
|
||||||
networkReady := IsNodeConditionUnset(&node, v1.NodeNetworkUnavailable) || IsNodeConditionSetAsExpected(&node, v1.NodeNetworkUnavailable, false)
|
|
||||||
return nodeReady && networkReady
|
|
||||||
})
|
|
||||||
numReady := len(nodes.Items)
|
|
||||||
|
|
||||||
if numNodes == size && numReady == size {
|
|
||||||
e2elog.Logf("Cluster has reached the desired number of ready nodes %d", size)
|
|
||||||
return nodes.Items, nil
|
|
||||||
}
|
|
||||||
e2elog.Logf("Waiting for ready nodes %d, current ready %d, not ready nodes %d", size, numReady, numNodes-numReady)
|
|
||||||
}
|
|
||||||
return nil, fmt.Errorf("timeout waiting %v for number of ready nodes to be %d", timeout, size)
|
|
||||||
}
|
|
||||||
|
|
||||||
// WaitForReadyNodes waits up to timeout for cluster to has desired size and
|
|
||||||
// there is no not-ready nodes in it. By cluster size we mean number of Nodes
|
|
||||||
// excluding Master Node.
|
|
||||||
func WaitForReadyNodes(c clientset.Interface, size int, timeout time.Duration) error {
|
|
||||||
_, err := CheckNodesReady(c, size, timeout)
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
// GenerateMasterRegexp returns a regex for matching master node name.
|
// GenerateMasterRegexp returns a regex for matching master node name.
|
||||||
func GenerateMasterRegexp(prefix string) string {
|
func GenerateMasterRegexp(prefix string) string {
|
||||||
return prefix + "(-...)?"
|
return prefix + "(-...)?"
|
||||||
@ -3039,7 +2767,7 @@ func WaitForMasters(masterPrefix string, c clientset.Interface, size int, timeou
|
|||||||
}
|
}
|
||||||
|
|
||||||
// Filter out nodes that are not master replicas
|
// Filter out nodes that are not master replicas
|
||||||
FilterNodes(nodes, func(node v1.Node) bool {
|
e2enode.Filter(nodes, func(node v1.Node) bool {
|
||||||
res, err := regexp.Match(GenerateMasterRegexp(masterPrefix), ([]byte)(node.Name))
|
res, err := regexp.Match(GenerateMasterRegexp(masterPrefix), ([]byte)(node.Name))
|
||||||
if err != nil {
|
if err != nil {
|
||||||
e2elog.Logf("Failed to match regexp to node name: %v", err)
|
e2elog.Logf("Failed to match regexp to node name: %v", err)
|
||||||
@ -3051,8 +2779,8 @@ func WaitForMasters(masterPrefix string, c clientset.Interface, size int, timeou
|
|||||||
numNodes := len(nodes.Items)
|
numNodes := len(nodes.Items)
|
||||||
|
|
||||||
// Filter out not-ready nodes.
|
// Filter out not-ready nodes.
|
||||||
FilterNodes(nodes, func(node v1.Node) bool {
|
e2enode.Filter(nodes, func(node v1.Node) bool {
|
||||||
return IsNodeConditionSetAsExpected(&node, v1.NodeReady, true)
|
return e2enode.IsConditionSetAsExpected(&node, v1.NodeReady, true)
|
||||||
})
|
})
|
||||||
|
|
||||||
numReady := len(nodes.Items)
|
numReady := len(nodes.Items)
|
||||||
@ -3152,62 +2880,6 @@ func LookForStringInFile(ns, podName, container, file, expectedString string, ti
|
|||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
// getSvcNodePort returns the node port for the given service:port.
|
|
||||||
func getSvcNodePort(client clientset.Interface, ns, name string, svcPort int) (int, error) {
|
|
||||||
svc, err := client.CoreV1().Services(ns).Get(name, metav1.GetOptions{})
|
|
||||||
if err != nil {
|
|
||||||
return 0, err
|
|
||||||
}
|
|
||||||
for _, p := range svc.Spec.Ports {
|
|
||||||
if p.Port == int32(svcPort) {
|
|
||||||
if p.NodePort != 0 {
|
|
||||||
return int(p.NodePort), nil
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return 0, fmt.Errorf(
|
|
||||||
"No node port found for service %v, port %v", name, svcPort)
|
|
||||||
}
|
|
||||||
|
|
||||||
// GetNodePortURL returns the url to a nodeport Service.
|
|
||||||
func GetNodePortURL(client clientset.Interface, ns, name string, svcPort int) (string, error) {
|
|
||||||
nodePort, err := getSvcNodePort(client, ns, name, svcPort)
|
|
||||||
if err != nil {
|
|
||||||
return "", err
|
|
||||||
}
|
|
||||||
// This list of nodes must not include the master, which is marked
|
|
||||||
// unschedulable, since the master doesn't run kube-proxy. Without
|
|
||||||
// kube-proxy NodePorts won't work.
|
|
||||||
var nodes *v1.NodeList
|
|
||||||
if wait.PollImmediate(Poll, SingleCallTimeout, func() (bool, error) {
|
|
||||||
nodes, err = client.CoreV1().Nodes().List(metav1.ListOptions{FieldSelector: fields.Set{
|
|
||||||
"spec.unschedulable": "false",
|
|
||||||
}.AsSelector().String()})
|
|
||||||
if err != nil {
|
|
||||||
if testutils.IsRetryableAPIError(err) {
|
|
||||||
return false, nil
|
|
||||||
}
|
|
||||||
return false, err
|
|
||||||
}
|
|
||||||
return true, nil
|
|
||||||
}) != nil {
|
|
||||||
return "", err
|
|
||||||
}
|
|
||||||
if len(nodes.Items) == 0 {
|
|
||||||
return "", fmt.Errorf("Unable to list nodes in cluster")
|
|
||||||
}
|
|
||||||
for _, node := range nodes.Items {
|
|
||||||
for _, address := range node.Status.Addresses {
|
|
||||||
if address.Type == v1.NodeExternalIP {
|
|
||||||
if address.Address != "" {
|
|
||||||
return fmt.Sprintf("http://%v:%v", address.Address, nodePort), nil
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return "", fmt.Errorf("Failed to find external address for service %v", name)
|
|
||||||
}
|
|
||||||
|
|
||||||
// EnsureLoadBalancerResourcesDeleted ensures that cloud load balancer resources that were created
|
// EnsureLoadBalancerResourcesDeleted ensures that cloud load balancer resources that were created
|
||||||
// are actually cleaned up. Currently only implemented for GCE/GKE.
|
// are actually cleaned up. Currently only implemented for GCE/GKE.
|
||||||
func EnsureLoadBalancerResourcesDeleted(ip, portRange string) error {
|
func EnsureLoadBalancerResourcesDeleted(ip, portRange string) error {
|
||||||
@ -3270,33 +2942,6 @@ func UnblockNetwork(from string, to string) {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// timeout for proxy requests.
|
|
||||||
const proxyTimeout = 2 * time.Minute
|
|
||||||
|
|
||||||
// NodeProxyRequest performs a get on a node proxy endpoint given the nodename and rest client.
|
|
||||||
func NodeProxyRequest(c clientset.Interface, node, endpoint string, port int) (restclient.Result, error) {
|
|
||||||
// proxy tends to hang in some cases when Node is not ready. Add an artificial timeout for this call.
|
|
||||||
// This will leak a goroutine if proxy hangs. #22165
|
|
||||||
var result restclient.Result
|
|
||||||
finished := make(chan struct{})
|
|
||||||
go func() {
|
|
||||||
result = c.CoreV1().RESTClient().Get().
|
|
||||||
Resource("nodes").
|
|
||||||
SubResource("proxy").
|
|
||||||
Name(fmt.Sprintf("%v:%v", node, port)).
|
|
||||||
Suffix(endpoint).
|
|
||||||
Do()
|
|
||||||
|
|
||||||
finished <- struct{}{}
|
|
||||||
}()
|
|
||||||
select {
|
|
||||||
case <-finished:
|
|
||||||
return result, nil
|
|
||||||
case <-time.After(proxyTimeout):
|
|
||||||
return restclient.Result{}, nil
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// GetKubeletPods retrieves the list of pods on the kubelet.
|
// GetKubeletPods retrieves the list of pods on the kubelet.
|
||||||
// TODO(alejandrox1): move to pod subpkg once node methods have been refactored.
|
// TODO(alejandrox1): move to pod subpkg once node methods have been refactored.
|
||||||
func GetKubeletPods(c clientset.Interface, node string) (*v1.PodList, error) {
|
func GetKubeletPods(c clientset.Interface, node string) (*v1.PodList, error) {
|
||||||
@ -3315,7 +2960,7 @@ func GetKubeletRunningPods(c clientset.Interface, node string) (*v1.PodList, err
|
|||||||
// refactored.
|
// refactored.
|
||||||
func getKubeletPods(c clientset.Interface, node, resource string) (*v1.PodList, error) {
|
func getKubeletPods(c clientset.Interface, node, resource string) (*v1.PodList, error) {
|
||||||
result := &v1.PodList{}
|
result := &v1.PodList{}
|
||||||
client, err := NodeProxyRequest(c, node, resource, ports.KubeletPort)
|
client, err := e2enode.ProxyRequest(c, node, resource, ports.KubeletPort)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return &v1.PodList{}, err
|
return &v1.PodList{}, err
|
||||||
}
|
}
|
||||||
@ -3637,40 +3282,6 @@ func GetAllMasterAddresses(c clientset.Interface) []string {
|
|||||||
return ips.List()
|
return ips.List()
|
||||||
}
|
}
|
||||||
|
|
||||||
// GetNodeExternalIP returns node external IP concatenated with port 22 for ssh
|
|
||||||
// e.g. 1.2.3.4:22
|
|
||||||
func GetNodeExternalIP(node *v1.Node) (string, error) {
|
|
||||||
e2elog.Logf("Getting external IP address for %s", node.Name)
|
|
||||||
host := ""
|
|
||||||
for _, a := range node.Status.Addresses {
|
|
||||||
if a.Type == v1.NodeExternalIP && a.Address != "" {
|
|
||||||
host = net.JoinHostPort(a.Address, sshPort)
|
|
||||||
break
|
|
||||||
}
|
|
||||||
}
|
|
||||||
if host == "" {
|
|
||||||
return "", fmt.Errorf("Couldn't get the external IP of host %s with addresses %v", node.Name, node.Status.Addresses)
|
|
||||||
}
|
|
||||||
return host, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// GetNodeInternalIP returns node internal IP
|
|
||||||
func GetNodeInternalIP(node *v1.Node) (string, error) {
|
|
||||||
host := ""
|
|
||||||
for _, address := range node.Status.Addresses {
|
|
||||||
if address.Type == v1.NodeInternalIP {
|
|
||||||
if address.Address != "" {
|
|
||||||
host = net.JoinHostPort(address.Address, sshPort)
|
|
||||||
break
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
if host == "" {
|
|
||||||
return "", fmt.Errorf("Couldn't get the internal IP of host %s with addresses %v", node.Name, node.Status.Addresses)
|
|
||||||
}
|
|
||||||
return host, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// SimpleGET executes a get on the given url, returns error if non-200 returned.
|
// SimpleGET executes a get on the given url, returns error if non-200 returned.
|
||||||
func SimpleGET(c *http.Client, url, host string) (string, error) {
|
func SimpleGET(c *http.Client, url, host string) (string, error) {
|
||||||
req, err := http.NewRequest("GET", url, nil)
|
req, err := http.NewRequest("GET", url, nil)
|
||||||
|
@ -36,6 +36,7 @@ go_library(
|
|||||||
"//test/e2e/framework/ginkgowrapper:go_default_library",
|
"//test/e2e/framework/ginkgowrapper:go_default_library",
|
||||||
"//test/e2e/framework/lifecycle:go_default_library",
|
"//test/e2e/framework/lifecycle:go_default_library",
|
||||||
"//test/e2e/framework/log:go_default_library",
|
"//test/e2e/framework/log:go_default_library",
|
||||||
|
"//test/e2e/framework/node:go_default_library",
|
||||||
"//test/e2e/framework/pod:go_default_library",
|
"//test/e2e/framework/pod:go_default_library",
|
||||||
"//test/e2e/framework/ssh:go_default_library",
|
"//test/e2e/framework/ssh:go_default_library",
|
||||||
"//test/e2e/upgrades:go_default_library",
|
"//test/e2e/upgrades:go_default_library",
|
||||||
|
@ -28,6 +28,7 @@ import (
|
|||||||
|
|
||||||
"github.com/onsi/ginkgo"
|
"github.com/onsi/ginkgo"
|
||||||
"github.com/onsi/gomega"
|
"github.com/onsi/gomega"
|
||||||
|
e2enode "k8s.io/kubernetes/test/e2e/framework/node"
|
||||||
)
|
)
|
||||||
|
|
||||||
var _ = SIGDescribe("Ports Security Check [Feature:KubeletSecurity]", func() {
|
var _ = SIGDescribe("Ports Security Check [Feature:KubeletSecurity]", func() {
|
||||||
@ -45,7 +46,7 @@ var _ = SIGDescribe("Ports Security Check [Feature:KubeletSecurity]", func() {
|
|||||||
|
|
||||||
// make sure kubelet readonly (10255) and cadvisor (4194) ports are disabled via API server proxy
|
// make sure kubelet readonly (10255) and cadvisor (4194) ports are disabled via API server proxy
|
||||||
ginkgo.It(fmt.Sprintf("should not be able to proxy to the readonly kubelet port %v using proxy subresource", ports.KubeletReadOnlyPort), func() {
|
ginkgo.It(fmt.Sprintf("should not be able to proxy to the readonly kubelet port %v using proxy subresource", ports.KubeletReadOnlyPort), func() {
|
||||||
result, err := framework.NodeProxyRequest(f.ClientSet, nodeName, "pods/", ports.KubeletReadOnlyPort)
|
result, err := e2enode.ProxyRequest(f.ClientSet, nodeName, "pods/", ports.KubeletReadOnlyPort)
|
||||||
framework.ExpectNoError(err)
|
framework.ExpectNoError(err)
|
||||||
|
|
||||||
var statusCode int
|
var statusCode int
|
||||||
@ -53,7 +54,7 @@ var _ = SIGDescribe("Ports Security Check [Feature:KubeletSecurity]", func() {
|
|||||||
gomega.Expect(statusCode).NotTo(gomega.Equal(http.StatusOK))
|
gomega.Expect(statusCode).NotTo(gomega.Equal(http.StatusOK))
|
||||||
})
|
})
|
||||||
ginkgo.It("should not be able to proxy to cadvisor port 4194 using proxy subresource", func() {
|
ginkgo.It("should not be able to proxy to cadvisor port 4194 using proxy subresource", func() {
|
||||||
result, err := framework.NodeProxyRequest(f.ClientSet, nodeName, "containers/", 4194)
|
result, err := e2enode.ProxyRequest(f.ClientSet, nodeName, "containers/", 4194)
|
||||||
framework.ExpectNoError(err)
|
framework.ExpectNoError(err)
|
||||||
|
|
||||||
var statusCode int
|
var statusCode int
|
||||||
@ -72,7 +73,7 @@ var _ = SIGDescribe("Ports Security Check [Feature:KubeletSecurity]", func() {
|
|||||||
|
|
||||||
// checks whether the target port is closed
|
// checks whether the target port is closed
|
||||||
func portClosedTest(f *framework.Framework, pickNode *v1.Node, port int) {
|
func portClosedTest(f *framework.Framework, pickNode *v1.Node, port int) {
|
||||||
nodeAddrs := framework.GetNodeAddresses(pickNode, v1.NodeExternalIP)
|
nodeAddrs := e2enode.GetAddresses(pickNode, v1.NodeExternalIP)
|
||||||
gomega.Expect(len(nodeAddrs)).NotTo(gomega.BeZero())
|
gomega.Expect(len(nodeAddrs)).NotTo(gomega.BeZero())
|
||||||
|
|
||||||
for _, addr := range nodeAddrs {
|
for _, addr := range nodeAddrs {
|
||||||
|
@ -26,6 +26,7 @@ import (
|
|||||||
clientset "k8s.io/client-go/kubernetes"
|
clientset "k8s.io/client-go/kubernetes"
|
||||||
"k8s.io/kubernetes/test/e2e/framework"
|
"k8s.io/kubernetes/test/e2e/framework"
|
||||||
e2elog "k8s.io/kubernetes/test/e2e/framework/log"
|
e2elog "k8s.io/kubernetes/test/e2e/framework/log"
|
||||||
|
e2enode "k8s.io/kubernetes/test/e2e/framework/node"
|
||||||
e2epod "k8s.io/kubernetes/test/e2e/framework/pod"
|
e2epod "k8s.io/kubernetes/test/e2e/framework/pod"
|
||||||
|
|
||||||
"github.com/onsi/ginkgo"
|
"github.com/onsi/ginkgo"
|
||||||
@ -87,7 +88,7 @@ var _ = SIGDescribe("[Disruptive]NodeLease", func() {
|
|||||||
framework.Failf("Couldn't restore the original node instance group size: %v", err)
|
framework.Failf("Couldn't restore the original node instance group size: %v", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
if err := framework.WaitForReadyNodes(c, framework.TestContext.CloudConfig.NumNodes, 10*time.Minute); err != nil {
|
if err := e2enode.WaitForReadyNodes(c, framework.TestContext.CloudConfig.NumNodes, 10*time.Minute); err != nil {
|
||||||
framework.Failf("Couldn't restore the original cluster size: %v", err)
|
framework.Failf("Couldn't restore the original cluster size: %v", err)
|
||||||
}
|
}
|
||||||
// Many e2e tests assume that the cluster is fully healthy before they start. Wait until
|
// Many e2e tests assume that the cluster is fully healthy before they start. Wait until
|
||||||
@ -99,7 +100,7 @@ var _ = SIGDescribe("[Disruptive]NodeLease", func() {
|
|||||||
|
|
||||||
ginkgo.It("node lease should be deleted when corresponding node is deleted", func() {
|
ginkgo.It("node lease should be deleted when corresponding node is deleted", func() {
|
||||||
leaseClient := c.CoordinationV1beta1().Leases(corev1.NamespaceNodeLease)
|
leaseClient := c.CoordinationV1beta1().Leases(corev1.NamespaceNodeLease)
|
||||||
err := framework.WaitForReadyNodes(c, framework.TestContext.CloudConfig.NumNodes, 10*time.Minute)
|
err := e2enode.WaitForReadyNodes(c, framework.TestContext.CloudConfig.NumNodes, 10*time.Minute)
|
||||||
gomega.Expect(err).To(gomega.BeNil())
|
gomega.Expect(err).To(gomega.BeNil())
|
||||||
|
|
||||||
ginkgo.By("verify node lease exists for every nodes")
|
ginkgo.By("verify node lease exists for every nodes")
|
||||||
@ -126,7 +127,7 @@ var _ = SIGDescribe("[Disruptive]NodeLease", func() {
|
|||||||
gomega.Expect(err).To(gomega.BeNil())
|
gomega.Expect(err).To(gomega.BeNil())
|
||||||
err = framework.WaitForGroupSize(group, targetNumNodes)
|
err = framework.WaitForGroupSize(group, targetNumNodes)
|
||||||
gomega.Expect(err).To(gomega.BeNil())
|
gomega.Expect(err).To(gomega.BeNil())
|
||||||
err = framework.WaitForReadyNodes(c, framework.TestContext.CloudConfig.NumNodes-1, 10*time.Minute)
|
err = e2enode.WaitForReadyNodes(c, framework.TestContext.CloudConfig.NumNodes-1, 10*time.Minute)
|
||||||
gomega.Expect(err).To(gomega.BeNil())
|
gomega.Expect(err).To(gomega.BeNil())
|
||||||
targetNodes := framework.GetReadySchedulableNodesOrDie(c)
|
targetNodes := framework.GetReadySchedulableNodesOrDie(c)
|
||||||
gomega.Expect(len(targetNodes.Items)).To(gomega.Equal(int(targetNumNodes)))
|
gomega.Expect(len(targetNodes.Items)).To(gomega.Equal(int(targetNumNodes)))
|
||||||
|
@ -31,6 +31,7 @@ import (
|
|||||||
api "k8s.io/kubernetes/pkg/apis/core"
|
api "k8s.io/kubernetes/pkg/apis/core"
|
||||||
"k8s.io/kubernetes/test/e2e/framework"
|
"k8s.io/kubernetes/test/e2e/framework"
|
||||||
e2elog "k8s.io/kubernetes/test/e2e/framework/log"
|
e2elog "k8s.io/kubernetes/test/e2e/framework/log"
|
||||||
|
e2enode "k8s.io/kubernetes/test/e2e/framework/node"
|
||||||
e2epod "k8s.io/kubernetes/test/e2e/framework/pod"
|
e2epod "k8s.io/kubernetes/test/e2e/framework/pod"
|
||||||
e2essh "k8s.io/kubernetes/test/e2e/framework/ssh"
|
e2essh "k8s.io/kubernetes/test/e2e/framework/ssh"
|
||||||
testutils "k8s.io/kubernetes/test/utils"
|
testutils "k8s.io/kubernetes/test/utils"
|
||||||
@ -237,7 +238,7 @@ func rebootNode(c clientset.Interface, provider, name, rebootCmd string) bool {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// Node sanity check: ensure it is "ready".
|
// Node sanity check: ensure it is "ready".
|
||||||
if !framework.WaitForNodeToBeReady(c, name, framework.NodeReadyInitialTimeout) {
|
if !e2enode.WaitForNodeToBeReady(c, name, framework.NodeReadyInitialTimeout) {
|
||||||
return false
|
return false
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -273,12 +274,12 @@ func rebootNode(c clientset.Interface, provider, name, rebootCmd string) bool {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// Wait for some kind of "not ready" status.
|
// Wait for some kind of "not ready" status.
|
||||||
if !framework.WaitForNodeToBeNotReady(c, name, rebootNodeNotReadyTimeout) {
|
if !e2enode.WaitForNodeToBeNotReady(c, name, rebootNodeNotReadyTimeout) {
|
||||||
return false
|
return false
|
||||||
}
|
}
|
||||||
|
|
||||||
// Wait for some kind of "ready" status.
|
// Wait for some kind of "ready" status.
|
||||||
if !framework.WaitForNodeToBeReady(c, name, rebootNodeReadyAgainTimeout) {
|
if !e2enode.WaitForNodeToBeReady(c, name, rebootNodeReadyAgainTimeout) {
|
||||||
return false
|
return false
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -25,6 +25,7 @@ import (
|
|||||||
clientset "k8s.io/client-go/kubernetes"
|
clientset "k8s.io/client-go/kubernetes"
|
||||||
"k8s.io/kubernetes/test/e2e/common"
|
"k8s.io/kubernetes/test/e2e/common"
|
||||||
"k8s.io/kubernetes/test/e2e/framework"
|
"k8s.io/kubernetes/test/e2e/framework"
|
||||||
|
e2enode "k8s.io/kubernetes/test/e2e/framework/node"
|
||||||
e2epod "k8s.io/kubernetes/test/e2e/framework/pod"
|
e2epod "k8s.io/kubernetes/test/e2e/framework/pod"
|
||||||
|
|
||||||
"github.com/onsi/ginkgo"
|
"github.com/onsi/ginkgo"
|
||||||
@ -97,7 +98,7 @@ var _ = SIGDescribe("Nodes [Disruptive]", func() {
|
|||||||
framework.Failf("Couldn't restore the original node instance group size: %v", err)
|
framework.Failf("Couldn't restore the original node instance group size: %v", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
if err := framework.WaitForReadyNodes(c, int(originalNodeCount), 10*time.Minute); err != nil {
|
if err := e2enode.WaitForReadyNodes(c, int(originalNodeCount), 10*time.Minute); err != nil {
|
||||||
framework.Failf("Couldn't restore the original cluster size: %v", err)
|
framework.Failf("Couldn't restore the original cluster size: %v", err)
|
||||||
}
|
}
|
||||||
// Many e2e tests assume that the cluster is fully healthy before they start. Wait until
|
// Many e2e tests assume that the cluster is fully healthy before they start. Wait until
|
||||||
@ -111,7 +112,7 @@ var _ = SIGDescribe("Nodes [Disruptive]", func() {
|
|||||||
// Create a replication controller for a service that serves its hostname.
|
// Create a replication controller for a service that serves its hostname.
|
||||||
// The source for the Docker container kubernetes/serve_hostname is in contrib/for-demos/serve_hostname
|
// The source for the Docker container kubernetes/serve_hostname is in contrib/for-demos/serve_hostname
|
||||||
name := "my-hostname-delete-node"
|
name := "my-hostname-delete-node"
|
||||||
numNodes, err := framework.NumberOfRegisteredNodes(c)
|
numNodes, err := e2enode.TotalRegistered(c)
|
||||||
framework.ExpectNoError(err)
|
framework.ExpectNoError(err)
|
||||||
originalNodeCount = int32(numNodes)
|
originalNodeCount = int32(numNodes)
|
||||||
common.NewRCByName(c, ns, name, originalNodeCount, nil)
|
common.NewRCByName(c, ns, name, originalNodeCount, nil)
|
||||||
@ -124,7 +125,7 @@ var _ = SIGDescribe("Nodes [Disruptive]", func() {
|
|||||||
framework.ExpectNoError(err)
|
framework.ExpectNoError(err)
|
||||||
err = framework.WaitForGroupSize(group, targetNumNodes)
|
err = framework.WaitForGroupSize(group, targetNumNodes)
|
||||||
framework.ExpectNoError(err)
|
framework.ExpectNoError(err)
|
||||||
err = framework.WaitForReadyNodes(c, int(originalNodeCount-1), 10*time.Minute)
|
err = e2enode.WaitForReadyNodes(c, int(originalNodeCount-1), 10*time.Minute)
|
||||||
framework.ExpectNoError(err)
|
framework.ExpectNoError(err)
|
||||||
|
|
||||||
ginkgo.By("waiting 1 minute for the watch in the podGC to catch up, remove any pods scheduled on " +
|
ginkgo.By("waiting 1 minute for the watch in the podGC to catch up, remove any pods scheduled on " +
|
||||||
@ -142,7 +143,7 @@ var _ = SIGDescribe("Nodes [Disruptive]", func() {
|
|||||||
// The source for the Docker container kubernetes/serve_hostname is in contrib/for-demos/serve_hostname
|
// The source for the Docker container kubernetes/serve_hostname is in contrib/for-demos/serve_hostname
|
||||||
name := "my-hostname-add-node"
|
name := "my-hostname-add-node"
|
||||||
common.NewSVCByName(c, ns, name)
|
common.NewSVCByName(c, ns, name)
|
||||||
numNodes, err := framework.NumberOfRegisteredNodes(c)
|
numNodes, err := e2enode.TotalRegistered(c)
|
||||||
framework.ExpectNoError(err)
|
framework.ExpectNoError(err)
|
||||||
originalNodeCount = int32(numNodes)
|
originalNodeCount = int32(numNodes)
|
||||||
common.NewRCByName(c, ns, name, originalNodeCount, nil)
|
common.NewRCByName(c, ns, name, originalNodeCount, nil)
|
||||||
@ -155,7 +156,7 @@ var _ = SIGDescribe("Nodes [Disruptive]", func() {
|
|||||||
framework.ExpectNoError(err)
|
framework.ExpectNoError(err)
|
||||||
err = framework.WaitForGroupSize(group, targetNumNodes)
|
err = framework.WaitForGroupSize(group, targetNumNodes)
|
||||||
framework.ExpectNoError(err)
|
framework.ExpectNoError(err)
|
||||||
err = framework.WaitForReadyNodes(c, int(originalNodeCount+1), 10*time.Minute)
|
err = e2enode.WaitForReadyNodes(c, int(originalNodeCount+1), 10*time.Minute)
|
||||||
framework.ExpectNoError(err)
|
framework.ExpectNoError(err)
|
||||||
|
|
||||||
ginkgo.By(fmt.Sprintf("increasing size of the replication controller to %d and verifying all pods are running", originalNodeCount+1))
|
ginkgo.By(fmt.Sprintf("increasing size of the replication controller to %d and verifying all pods are running", originalNodeCount+1))
|
||||||
|
@ -26,6 +26,7 @@ import (
|
|||||||
"k8s.io/kubernetes/test/e2e/common"
|
"k8s.io/kubernetes/test/e2e/common"
|
||||||
"k8s.io/kubernetes/test/e2e/framework"
|
"k8s.io/kubernetes/test/e2e/framework"
|
||||||
e2elog "k8s.io/kubernetes/test/e2e/framework/log"
|
e2elog "k8s.io/kubernetes/test/e2e/framework/log"
|
||||||
|
e2enode "k8s.io/kubernetes/test/e2e/framework/node"
|
||||||
e2epod "k8s.io/kubernetes/test/e2e/framework/pod"
|
e2epod "k8s.io/kubernetes/test/e2e/framework/pod"
|
||||||
testutils "k8s.io/kubernetes/test/utils"
|
testutils "k8s.io/kubernetes/test/utils"
|
||||||
|
|
||||||
@ -55,12 +56,12 @@ var _ = SIGDescribe("Restart [Disruptive]", func() {
|
|||||||
var err error
|
var err error
|
||||||
ps, err = testutils.NewPodStore(f.ClientSet, metav1.NamespaceSystem, labels.Everything(), fields.Everything())
|
ps, err = testutils.NewPodStore(f.ClientSet, metav1.NamespaceSystem, labels.Everything(), fields.Everything())
|
||||||
framework.ExpectNoError(err)
|
framework.ExpectNoError(err)
|
||||||
numNodes, err = framework.NumberOfRegisteredNodes(f.ClientSet)
|
numNodes, err = e2enode.TotalRegistered(f.ClientSet)
|
||||||
framework.ExpectNoError(err)
|
framework.ExpectNoError(err)
|
||||||
systemNamespace = metav1.NamespaceSystem
|
systemNamespace = metav1.NamespaceSystem
|
||||||
|
|
||||||
ginkgo.By("ensuring all nodes are ready")
|
ginkgo.By("ensuring all nodes are ready")
|
||||||
originalNodes, err = framework.CheckNodesReady(f.ClientSet, numNodes, framework.NodeReadyInitialTimeout)
|
originalNodes, err = e2enode.CheckReady(f.ClientSet, numNodes, framework.NodeReadyInitialTimeout)
|
||||||
framework.ExpectNoError(err)
|
framework.ExpectNoError(err)
|
||||||
e2elog.Logf("Got the following nodes before restart: %v", nodeNames(originalNodes))
|
e2elog.Logf("Got the following nodes before restart: %v", nodeNames(originalNodes))
|
||||||
|
|
||||||
@ -90,7 +91,7 @@ var _ = SIGDescribe("Restart [Disruptive]", func() {
|
|||||||
framework.ExpectNoError(err)
|
framework.ExpectNoError(err)
|
||||||
|
|
||||||
ginkgo.By("ensuring all nodes are ready after the restart")
|
ginkgo.By("ensuring all nodes are ready after the restart")
|
||||||
nodesAfter, err := framework.CheckNodesReady(f.ClientSet, numNodes, framework.RestartNodeReadyAgainTimeout)
|
nodesAfter, err := e2enode.CheckReady(f.ClientSet, numNodes, framework.RestartNodeReadyAgainTimeout)
|
||||||
framework.ExpectNoError(err)
|
framework.ExpectNoError(err)
|
||||||
e2elog.Logf("Got the following nodes after restart: %v", nodeNames(nodesAfter))
|
e2elog.Logf("Got the following nodes after restart: %v", nodeNames(nodesAfter))
|
||||||
|
|
||||||
|
@ -62,6 +62,7 @@ go_library(
|
|||||||
"//test/e2e/framework/endpoints:go_default_library",
|
"//test/e2e/framework/endpoints:go_default_library",
|
||||||
"//test/e2e/framework/ingress:go_default_library",
|
"//test/e2e/framework/ingress:go_default_library",
|
||||||
"//test/e2e/framework/log:go_default_library",
|
"//test/e2e/framework/log:go_default_library",
|
||||||
|
"//test/e2e/framework/node:go_default_library",
|
||||||
"//test/e2e/framework/pod:go_default_library",
|
"//test/e2e/framework/pod:go_default_library",
|
||||||
"//test/e2e/framework/providers/gce:go_default_library",
|
"//test/e2e/framework/providers/gce:go_default_library",
|
||||||
"//test/e2e/framework/ssh:go_default_library",
|
"//test/e2e/framework/ssh:go_default_library",
|
||||||
|
@ -28,6 +28,7 @@ import (
|
|||||||
"k8s.io/client-go/util/workqueue"
|
"k8s.io/client-go/util/workqueue"
|
||||||
"k8s.io/kubernetes/test/e2e/framework"
|
"k8s.io/kubernetes/test/e2e/framework"
|
||||||
e2elog "k8s.io/kubernetes/test/e2e/framework/log"
|
e2elog "k8s.io/kubernetes/test/e2e/framework/log"
|
||||||
|
e2enode "k8s.io/kubernetes/test/e2e/framework/node"
|
||||||
testutils "k8s.io/kubernetes/test/utils"
|
testutils "k8s.io/kubernetes/test/utils"
|
||||||
|
|
||||||
"github.com/onsi/ginkgo"
|
"github.com/onsi/ginkgo"
|
||||||
@ -45,7 +46,7 @@ var _ = SIGDescribe("[Feature:PerformanceDNS][Serial]", func() {
|
|||||||
|
|
||||||
ginkgo.BeforeEach(func() {
|
ginkgo.BeforeEach(func() {
|
||||||
framework.ExpectNoError(framework.WaitForAllNodesSchedulable(f.ClientSet, framework.TestContext.NodeSchedulableTimeout))
|
framework.ExpectNoError(framework.WaitForAllNodesSchedulable(f.ClientSet, framework.TestContext.NodeSchedulableTimeout))
|
||||||
framework.WaitForAllNodesHealthy(f.ClientSet, time.Minute)
|
e2enode.WaitForTotalHealthy(f.ClientSet, time.Minute)
|
||||||
|
|
||||||
err := framework.CheckTestingNSDeletedExcept(f.ClientSet, f.Namespace.Name)
|
err := framework.CheckTestingNSDeletedExcept(f.ClientSet, f.Namespace.Name)
|
||||||
framework.ExpectNoError(err)
|
framework.ExpectNoError(err)
|
||||||
|
@ -27,6 +27,7 @@ import (
|
|||||||
"k8s.io/kubernetes/pkg/master/ports"
|
"k8s.io/kubernetes/pkg/master/ports"
|
||||||
"k8s.io/kubernetes/test/e2e/framework"
|
"k8s.io/kubernetes/test/e2e/framework"
|
||||||
e2elog "k8s.io/kubernetes/test/e2e/framework/log"
|
e2elog "k8s.io/kubernetes/test/e2e/framework/log"
|
||||||
|
e2enode "k8s.io/kubernetes/test/e2e/framework/node"
|
||||||
"k8s.io/kubernetes/test/e2e/framework/providers/gce"
|
"k8s.io/kubernetes/test/e2e/framework/providers/gce"
|
||||||
gcecloud "k8s.io/legacy-cloud-providers/gce"
|
gcecloud "k8s.io/legacy-cloud-providers/gce"
|
||||||
|
|
||||||
@ -188,7 +189,7 @@ var _ = SIGDescribe("Firewall rule", func() {
|
|||||||
}
|
}
|
||||||
|
|
||||||
ginkgo.By("Checking well known ports on master and nodes are not exposed externally")
|
ginkgo.By("Checking well known ports on master and nodes are not exposed externally")
|
||||||
nodeAddrs := framework.NodeAddresses(nodes, v1.NodeExternalIP)
|
nodeAddrs := e2enode.FirstAddress(nodes, v1.NodeExternalIP)
|
||||||
if len(nodeAddrs) == 0 {
|
if len(nodeAddrs) == 0 {
|
||||||
framework.Failf("did not find any node addresses")
|
framework.Failf("did not find any node addresses")
|
||||||
}
|
}
|
||||||
|
@ -29,6 +29,7 @@ import (
|
|||||||
|
|
||||||
"k8s.io/kubernetes/test/e2e/framework"
|
"k8s.io/kubernetes/test/e2e/framework"
|
||||||
e2elog "k8s.io/kubernetes/test/e2e/framework/log"
|
e2elog "k8s.io/kubernetes/test/e2e/framework/log"
|
||||||
|
e2enode "k8s.io/kubernetes/test/e2e/framework/node"
|
||||||
e2essh "k8s.io/kubernetes/test/e2e/framework/ssh"
|
e2essh "k8s.io/kubernetes/test/e2e/framework/ssh"
|
||||||
"k8s.io/kubernetes/test/images/agnhost/net/nat"
|
"k8s.io/kubernetes/test/images/agnhost/net/nat"
|
||||||
imageutils "k8s.io/kubernetes/test/utils/image"
|
imageutils "k8s.io/kubernetes/test/utils/image"
|
||||||
@ -51,7 +52,7 @@ var _ = SIGDescribe("Network", func() {
|
|||||||
|
|
||||||
ginkgo.It("should set TCP CLOSE_WAIT timeout", func() {
|
ginkgo.It("should set TCP CLOSE_WAIT timeout", func() {
|
||||||
nodes := framework.GetReadySchedulableNodesOrDie(fr.ClientSet)
|
nodes := framework.GetReadySchedulableNodesOrDie(fr.ClientSet)
|
||||||
ips := framework.CollectAddresses(nodes, v1.NodeInternalIP)
|
ips := e2enode.CollectAddresses(nodes, v1.NodeInternalIP)
|
||||||
|
|
||||||
if len(nodes.Items) < 2 {
|
if len(nodes.Items) < 2 {
|
||||||
framework.Skipf(
|
framework.Skipf(
|
||||||
|
@ -40,6 +40,7 @@ import (
|
|||||||
"k8s.io/kubernetes/test/e2e/framework"
|
"k8s.io/kubernetes/test/e2e/framework"
|
||||||
e2eendpoints "k8s.io/kubernetes/test/e2e/framework/endpoints"
|
e2eendpoints "k8s.io/kubernetes/test/e2e/framework/endpoints"
|
||||||
e2elog "k8s.io/kubernetes/test/e2e/framework/log"
|
e2elog "k8s.io/kubernetes/test/e2e/framework/log"
|
||||||
|
e2enode "k8s.io/kubernetes/test/e2e/framework/node"
|
||||||
e2epod "k8s.io/kubernetes/test/e2e/framework/pod"
|
e2epod "k8s.io/kubernetes/test/e2e/framework/pod"
|
||||||
"k8s.io/kubernetes/test/e2e/framework/providers/gce"
|
"k8s.io/kubernetes/test/e2e/framework/providers/gce"
|
||||||
e2essh "k8s.io/kubernetes/test/e2e/framework/ssh"
|
e2essh "k8s.io/kubernetes/test/e2e/framework/ssh"
|
||||||
@ -2083,7 +2084,7 @@ var _ = SIGDescribe("ESIPP [Slow] [DisabledForLargeClusters]", func() {
|
|||||||
framework.Failf("Service HealthCheck NodePort was not allocated")
|
framework.Failf("Service HealthCheck NodePort was not allocated")
|
||||||
}
|
}
|
||||||
|
|
||||||
ips := framework.CollectAddresses(nodes, v1.NodeExternalIP)
|
ips := e2enode.CollectAddresses(nodes, v1.NodeExternalIP)
|
||||||
|
|
||||||
ingressIP := framework.GetIngressPoint(&svc.Status.LoadBalancer.Ingress[0])
|
ingressIP := framework.GetIngressPoint(&svc.Status.LoadBalancer.Ingress[0])
|
||||||
svcTCPPort := int(svc.Spec.Ports[0].Port)
|
svcTCPPort := int(svc.Spec.Ports[0].Port)
|
||||||
@ -2206,7 +2207,7 @@ var _ = SIGDescribe("ESIPP [Slow] [DisabledForLargeClusters]", func() {
|
|||||||
if _, ok := endpointNodeMap[n.Name]; ok {
|
if _, ok := endpointNodeMap[n.Name]; ok {
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
noEndpointNodeMap[n.Name] = framework.GetNodeAddresses(&n, v1.NodeExternalIP)
|
noEndpointNodeMap[n.Name] = e2enode.GetAddresses(&n, v1.NodeExternalIP)
|
||||||
}
|
}
|
||||||
|
|
||||||
svcTCPPort := int(svc.Spec.Ports[0].Port)
|
svcTCPPort := int(svc.Spec.Ports[0].Port)
|
||||||
@ -2354,7 +2355,7 @@ func execAffinityTestForNonLBServiceWithOptionalTransition(f *framework.Framewor
|
|||||||
var svcIP string
|
var svcIP string
|
||||||
if serviceType == v1.ServiceTypeNodePort {
|
if serviceType == v1.ServiceTypeNodePort {
|
||||||
nodes := framework.GetReadySchedulableNodesOrDie(cs)
|
nodes := framework.GetReadySchedulableNodesOrDie(cs)
|
||||||
addrs := framework.CollectAddresses(nodes, v1.NodeInternalIP)
|
addrs := e2enode.CollectAddresses(nodes, v1.NodeInternalIP)
|
||||||
gomega.Expect(len(addrs)).To(gomega.BeNumerically(">", 0), "ginkgo.Failed to get Node internal IP")
|
gomega.Expect(len(addrs)).To(gomega.BeNumerically(">", 0), "ginkgo.Failed to get Node internal IP")
|
||||||
svcIP = addrs[0]
|
svcIP = addrs[0]
|
||||||
servicePort = int(svc.Spec.Ports[0].NodePort)
|
servicePort = int(svc.Spec.Ports[0].NodePort)
|
||||||
|
@ -38,6 +38,7 @@ go_library(
|
|||||||
"//test/e2e/framework:go_default_library",
|
"//test/e2e/framework:go_default_library",
|
||||||
"//test/e2e/framework/job:go_default_library",
|
"//test/e2e/framework/job:go_default_library",
|
||||||
"//test/e2e/framework/log:go_default_library",
|
"//test/e2e/framework/log:go_default_library",
|
||||||
|
"//test/e2e/framework/node:go_default_library",
|
||||||
"//test/e2e/framework/pod:go_default_library",
|
"//test/e2e/framework/pod:go_default_library",
|
||||||
"//test/e2e/framework/ssh:go_default_library",
|
"//test/e2e/framework/ssh:go_default_library",
|
||||||
"//test/e2e/framework/volume:go_default_library",
|
"//test/e2e/framework/volume:go_default_library",
|
||||||
|
@ -29,6 +29,7 @@ import (
|
|||||||
"k8s.io/apimachinery/pkg/fields"
|
"k8s.io/apimachinery/pkg/fields"
|
||||||
"k8s.io/kubernetes/test/e2e/framework"
|
"k8s.io/kubernetes/test/e2e/framework"
|
||||||
e2elog "k8s.io/kubernetes/test/e2e/framework/log"
|
e2elog "k8s.io/kubernetes/test/e2e/framework/log"
|
||||||
|
e2enode "k8s.io/kubernetes/test/e2e/framework/node"
|
||||||
e2essh "k8s.io/kubernetes/test/e2e/framework/ssh"
|
e2essh "k8s.io/kubernetes/test/e2e/framework/ssh"
|
||||||
testutils "k8s.io/kubernetes/test/utils"
|
testutils "k8s.io/kubernetes/test/utils"
|
||||||
|
|
||||||
@ -50,7 +51,7 @@ var _ = SIGDescribe("NodeProblemDetector [DisabledForLargeClusters]", func() {
|
|||||||
framework.SkipUnlessProviderIs(framework.ProvidersWithSSH...)
|
framework.SkipUnlessProviderIs(framework.ProvidersWithSSH...)
|
||||||
framework.SkipUnlessProviderIs("gce", "gke")
|
framework.SkipUnlessProviderIs("gce", "gke")
|
||||||
framework.SkipUnlessNodeOSDistroIs("gci", "ubuntu")
|
framework.SkipUnlessNodeOSDistroIs("gci", "ubuntu")
|
||||||
framework.WaitForAllNodesHealthy(f.ClientSet, time.Minute)
|
e2enode.WaitForTotalHealthy(f.ClientSet, time.Minute)
|
||||||
})
|
})
|
||||||
|
|
||||||
ginkgo.It("should run without error", func() {
|
ginkgo.It("should run without error", func() {
|
||||||
|
@ -30,6 +30,7 @@ import (
|
|||||||
"k8s.io/kubernetes/pkg/master/ports"
|
"k8s.io/kubernetes/pkg/master/ports"
|
||||||
"k8s.io/kubernetes/test/e2e/framework"
|
"k8s.io/kubernetes/test/e2e/framework"
|
||||||
e2elog "k8s.io/kubernetes/test/e2e/framework/log"
|
e2elog "k8s.io/kubernetes/test/e2e/framework/log"
|
||||||
|
e2enode "k8s.io/kubernetes/test/e2e/framework/node"
|
||||||
e2epod "k8s.io/kubernetes/test/e2e/framework/pod"
|
e2epod "k8s.io/kubernetes/test/e2e/framework/pod"
|
||||||
imageutils "k8s.io/kubernetes/test/utils/image"
|
imageutils "k8s.io/kubernetes/test/utils/image"
|
||||||
|
|
||||||
@ -204,7 +205,7 @@ var _ = SIGDescribe("PreStop", func() {
|
|||||||
ginkgo.By("verifying the pod running state after graceful termination")
|
ginkgo.By("verifying the pod running state after graceful termination")
|
||||||
result := &v1.PodList{}
|
result := &v1.PodList{}
|
||||||
err = wait.Poll(time.Second*5, time.Second*60, func() (bool, error) {
|
err = wait.Poll(time.Second*5, time.Second*60, func() (bool, error) {
|
||||||
client, err := framework.NodeProxyRequest(f.ClientSet, pod.Spec.NodeName, "pods", ports.KubeletPort)
|
client, err := e2enode.ProxyRequest(f.ClientSet, pod.Spec.NodeName, "pods", ports.KubeletPort)
|
||||||
framework.ExpectNoError(err, "failed to get the pods of the node")
|
framework.ExpectNoError(err, "failed to get the pods of the node")
|
||||||
err = client.Into(result)
|
err = client.Into(result)
|
||||||
framework.ExpectNoError(err, "failed to parse the pods of the node")
|
framework.ExpectNoError(err, "failed to parse the pods of the node")
|
||||||
|
@ -47,6 +47,7 @@ go_library(
|
|||||||
"//test/e2e/framework/gpu:go_default_library",
|
"//test/e2e/framework/gpu:go_default_library",
|
||||||
"//test/e2e/framework/job:go_default_library",
|
"//test/e2e/framework/job:go_default_library",
|
||||||
"//test/e2e/framework/log:go_default_library",
|
"//test/e2e/framework/log:go_default_library",
|
||||||
|
"//test/e2e/framework/node:go_default_library",
|
||||||
"//test/e2e/framework/pod:go_default_library",
|
"//test/e2e/framework/pod:go_default_library",
|
||||||
"//test/e2e/framework/providers/gce:go_default_library",
|
"//test/e2e/framework/providers/gce:go_default_library",
|
||||||
"//test/e2e/framework/replicaset:go_default_library",
|
"//test/e2e/framework/replicaset:go_default_library",
|
||||||
|
@ -28,6 +28,7 @@ import (
|
|||||||
api "k8s.io/kubernetes/pkg/apis/core"
|
api "k8s.io/kubernetes/pkg/apis/core"
|
||||||
"k8s.io/kubernetes/test/e2e/framework"
|
"k8s.io/kubernetes/test/e2e/framework"
|
||||||
e2elog "k8s.io/kubernetes/test/e2e/framework/log"
|
e2elog "k8s.io/kubernetes/test/e2e/framework/log"
|
||||||
|
e2enode "k8s.io/kubernetes/test/e2e/framework/node"
|
||||||
e2epod "k8s.io/kubernetes/test/e2e/framework/pod"
|
e2epod "k8s.io/kubernetes/test/e2e/framework/pod"
|
||||||
testutils "k8s.io/kubernetes/test/utils"
|
testutils "k8s.io/kubernetes/test/utils"
|
||||||
imageutils "k8s.io/kubernetes/test/utils/image"
|
imageutils "k8s.io/kubernetes/test/utils/image"
|
||||||
@ -54,7 +55,7 @@ var _ = framework.KubeDescribe("EquivalenceCache [Serial]", func() {
|
|||||||
cs = f.ClientSet
|
cs = f.ClientSet
|
||||||
ns = f.Namespace.Name
|
ns = f.Namespace.Name
|
||||||
|
|
||||||
framework.WaitForAllNodesHealthy(cs, time.Minute)
|
e2enode.WaitForTotalHealthy(cs, time.Minute)
|
||||||
masterNodes, nodeList = framework.GetMasterAndWorkerNodesOrDie(cs)
|
masterNodes, nodeList = framework.GetMasterAndWorkerNodesOrDie(cs)
|
||||||
|
|
||||||
framework.ExpectNoError(framework.CheckTestingNSDeletedExcept(cs, ns))
|
framework.ExpectNoError(framework.CheckTestingNSDeletedExcept(cs, ns))
|
||||||
|
@ -30,6 +30,7 @@ import (
|
|||||||
"k8s.io/kubernetes/test/e2e/framework/gpu"
|
"k8s.io/kubernetes/test/e2e/framework/gpu"
|
||||||
jobutil "k8s.io/kubernetes/test/e2e/framework/job"
|
jobutil "k8s.io/kubernetes/test/e2e/framework/job"
|
||||||
e2elog "k8s.io/kubernetes/test/e2e/framework/log"
|
e2elog "k8s.io/kubernetes/test/e2e/framework/log"
|
||||||
|
e2enode "k8s.io/kubernetes/test/e2e/framework/node"
|
||||||
e2epod "k8s.io/kubernetes/test/e2e/framework/pod"
|
e2epod "k8s.io/kubernetes/test/e2e/framework/pod"
|
||||||
"k8s.io/kubernetes/test/e2e/framework/providers/gce"
|
"k8s.io/kubernetes/test/e2e/framework/providers/gce"
|
||||||
imageutils "k8s.io/kubernetes/test/utils/image"
|
imageutils "k8s.io/kubernetes/test/utils/image"
|
||||||
@ -209,9 +210,9 @@ func testNvidiaGPUsJob(f *framework.Framework) {
|
|||||||
err = jobutil.WaitForAllJobPodsRunning(f.ClientSet, f.Namespace.Name, job.Name, 1)
|
err = jobutil.WaitForAllJobPodsRunning(f.ClientSet, f.Namespace.Name, job.Name, 1)
|
||||||
framework.ExpectNoError(err)
|
framework.ExpectNoError(err)
|
||||||
|
|
||||||
numNodes, err := framework.NumberOfRegisteredNodes(f.ClientSet)
|
numNodes, err := e2enode.TotalRegistered(f.ClientSet)
|
||||||
framework.ExpectNoError(err)
|
framework.ExpectNoError(err)
|
||||||
nodes, err := framework.CheckNodesReady(f.ClientSet, numNodes, framework.NodeReadyInitialTimeout)
|
nodes, err := e2enode.CheckReady(f.ClientSet, numNodes, framework.NodeReadyInitialTimeout)
|
||||||
framework.ExpectNoError(err)
|
framework.ExpectNoError(err)
|
||||||
|
|
||||||
ginkgo.By("Recreating nodes")
|
ginkgo.By("Recreating nodes")
|
||||||
|
@ -35,6 +35,7 @@ import (
|
|||||||
"k8s.io/kubernetes/pkg/apis/scheduling"
|
"k8s.io/kubernetes/pkg/apis/scheduling"
|
||||||
"k8s.io/kubernetes/test/e2e/framework"
|
"k8s.io/kubernetes/test/e2e/framework"
|
||||||
e2elog "k8s.io/kubernetes/test/e2e/framework/log"
|
e2elog "k8s.io/kubernetes/test/e2e/framework/log"
|
||||||
|
e2enode "k8s.io/kubernetes/test/e2e/framework/node"
|
||||||
e2epod "k8s.io/kubernetes/test/e2e/framework/pod"
|
e2epod "k8s.io/kubernetes/test/e2e/framework/pod"
|
||||||
"k8s.io/kubernetes/test/e2e/framework/replicaset"
|
"k8s.io/kubernetes/test/e2e/framework/replicaset"
|
||||||
|
|
||||||
@ -80,7 +81,7 @@ var _ = SIGDescribe("SchedulerPreemption [Serial]", func() {
|
|||||||
gomega.Expect(err == nil || errors.IsAlreadyExists(err)).To(gomega.Equal(true))
|
gomega.Expect(err == nil || errors.IsAlreadyExists(err)).To(gomega.Equal(true))
|
||||||
}
|
}
|
||||||
|
|
||||||
framework.WaitForAllNodesHealthy(cs, time.Minute)
|
e2enode.WaitForTotalHealthy(cs, time.Minute)
|
||||||
masterNodes, nodeList = framework.GetMasterAndWorkerNodesOrDie(cs)
|
masterNodes, nodeList = framework.GetMasterAndWorkerNodesOrDie(cs)
|
||||||
|
|
||||||
err := framework.CheckTestingNSDeletedExcept(cs, ns)
|
err := framework.CheckTestingNSDeletedExcept(cs, ns)
|
||||||
|
@ -24,6 +24,7 @@ import (
|
|||||||
|
|
||||||
"github.com/onsi/ginkgo"
|
"github.com/onsi/ginkgo"
|
||||||
"github.com/onsi/gomega"
|
"github.com/onsi/gomega"
|
||||||
|
|
||||||
// ensure libs have a chance to initialize
|
// ensure libs have a chance to initialize
|
||||||
_ "github.com/stretchr/testify/assert"
|
_ "github.com/stretchr/testify/assert"
|
||||||
|
|
||||||
@ -37,6 +38,7 @@ import (
|
|||||||
"k8s.io/kubernetes/test/e2e/common"
|
"k8s.io/kubernetes/test/e2e/common"
|
||||||
"k8s.io/kubernetes/test/e2e/framework"
|
"k8s.io/kubernetes/test/e2e/framework"
|
||||||
e2elog "k8s.io/kubernetes/test/e2e/framework/log"
|
e2elog "k8s.io/kubernetes/test/e2e/framework/log"
|
||||||
|
e2enode "k8s.io/kubernetes/test/e2e/framework/node"
|
||||||
e2epod "k8s.io/kubernetes/test/e2e/framework/pod"
|
e2epod "k8s.io/kubernetes/test/e2e/framework/pod"
|
||||||
testutils "k8s.io/kubernetes/test/utils"
|
testutils "k8s.io/kubernetes/test/utils"
|
||||||
imageutils "k8s.io/kubernetes/test/utils/image"
|
imageutils "k8s.io/kubernetes/test/utils/image"
|
||||||
@ -77,7 +79,7 @@ var _ = SIGDescribe("SchedulerPriorities [Serial]", func() {
|
|||||||
ns = f.Namespace.Name
|
ns = f.Namespace.Name
|
||||||
nodeList = &v1.NodeList{}
|
nodeList = &v1.NodeList{}
|
||||||
|
|
||||||
framework.WaitForAllNodesHealthy(cs, time.Minute)
|
e2enode.WaitForTotalHealthy(cs, time.Minute)
|
||||||
_, nodeList = framework.GetMasterAndWorkerNodesOrDie(cs)
|
_, nodeList = framework.GetMasterAndWorkerNodesOrDie(cs)
|
||||||
|
|
||||||
err := framework.CheckTestingNSDeletedExcept(cs, ns)
|
err := framework.CheckTestingNSDeletedExcept(cs, ns)
|
||||||
|
@ -27,6 +27,7 @@ import (
|
|||||||
clientset "k8s.io/client-go/kubernetes"
|
clientset "k8s.io/client-go/kubernetes"
|
||||||
schedulerapi "k8s.io/kubernetes/pkg/scheduler/api"
|
schedulerapi "k8s.io/kubernetes/pkg/scheduler/api"
|
||||||
"k8s.io/kubernetes/test/e2e/framework"
|
"k8s.io/kubernetes/test/e2e/framework"
|
||||||
|
e2enode "k8s.io/kubernetes/test/e2e/framework/node"
|
||||||
e2epod "k8s.io/kubernetes/test/e2e/framework/pod"
|
e2epod "k8s.io/kubernetes/test/e2e/framework/pod"
|
||||||
|
|
||||||
"github.com/onsi/ginkgo"
|
"github.com/onsi/ginkgo"
|
||||||
@ -123,9 +124,9 @@ var _ = SIGDescribe("TaintBasedEvictions [Serial]", func() {
|
|||||||
node := nodeList.Items[0]
|
node := nodeList.Items[0]
|
||||||
|
|
||||||
ginkgo.By(fmt.Sprintf("Blocking traffic from node %s to the master", nodeName))
|
ginkgo.By(fmt.Sprintf("Blocking traffic from node %s to the master", nodeName))
|
||||||
host, err := framework.GetNodeExternalIP(&node)
|
host, err := e2enode.GetExternalIP(&node)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
host, err = framework.GetNodeInternalIP(&node)
|
host, err = e2enode.GetInternalIP(&node)
|
||||||
}
|
}
|
||||||
framework.ExpectNoError(err)
|
framework.ExpectNoError(err)
|
||||||
masterAddresses := framework.GetAllMasterAddresses(cs)
|
masterAddresses := framework.GetAllMasterAddresses(cs)
|
||||||
@ -143,7 +144,7 @@ var _ = SIGDescribe("TaintBasedEvictions [Serial]", func() {
|
|||||||
}
|
}
|
||||||
|
|
||||||
ginkgo.By(fmt.Sprintf("Expecting to see node %q becomes Ready", nodeName))
|
ginkgo.By(fmt.Sprintf("Expecting to see node %q becomes Ready", nodeName))
|
||||||
framework.WaitForNodeToBeReady(cs, nodeName, time.Minute*1)
|
e2enode.WaitForNodeToBeReady(cs, nodeName, time.Minute*1)
|
||||||
ginkgo.By("Expecting to see unreachable=:NoExecute taint is taken off")
|
ginkgo.By("Expecting to see unreachable=:NoExecute taint is taken off")
|
||||||
err := framework.WaitForNodeHasTaintOrNot(cs, nodeName, taint, false, time.Second*30)
|
err := framework.WaitForNodeHasTaintOrNot(cs, nodeName, taint, false, time.Second*30)
|
||||||
framework.ExpectNoError(err)
|
framework.ExpectNoError(err)
|
||||||
@ -154,7 +155,7 @@ var _ = SIGDescribe("TaintBasedEvictions [Serial]", func() {
|
|||||||
}
|
}
|
||||||
|
|
||||||
ginkgo.By(fmt.Sprintf("Expecting to see node %q becomes NotReady", nodeName))
|
ginkgo.By(fmt.Sprintf("Expecting to see node %q becomes NotReady", nodeName))
|
||||||
if !framework.WaitForNodeToBeNotReady(cs, nodeName, time.Minute*3) {
|
if !e2enode.WaitForNodeToBeNotReady(cs, nodeName, time.Minute*3) {
|
||||||
framework.Failf("node %q doesn't turn to NotReady after 3 minutes", nodeName)
|
framework.Failf("node %q doesn't turn to NotReady after 3 minutes", nodeName)
|
||||||
}
|
}
|
||||||
ginkgo.By("Expecting to see unreachable=:NoExecute taint is applied")
|
ginkgo.By("Expecting to see unreachable=:NoExecute taint is applied")
|
||||||
|
@ -28,6 +28,7 @@ import (
|
|||||||
"k8s.io/client-go/tools/cache"
|
"k8s.io/client-go/tools/cache"
|
||||||
"k8s.io/kubernetes/test/e2e/framework"
|
"k8s.io/kubernetes/test/e2e/framework"
|
||||||
e2elog "k8s.io/kubernetes/test/e2e/framework/log"
|
e2elog "k8s.io/kubernetes/test/e2e/framework/log"
|
||||||
|
e2enode "k8s.io/kubernetes/test/e2e/framework/node"
|
||||||
testutils "k8s.io/kubernetes/test/utils"
|
testutils "k8s.io/kubernetes/test/utils"
|
||||||
imageutils "k8s.io/kubernetes/test/utils/image"
|
imageutils "k8s.io/kubernetes/test/utils/image"
|
||||||
|
|
||||||
@ -163,7 +164,7 @@ var _ = SIGDescribe("NoExecuteTaintManager Single Pod [Serial]", func() {
|
|||||||
cs = f.ClientSet
|
cs = f.ClientSet
|
||||||
ns = f.Namespace.Name
|
ns = f.Namespace.Name
|
||||||
|
|
||||||
framework.WaitForAllNodesHealthy(cs, time.Minute)
|
e2enode.WaitForTotalHealthy(cs, time.Minute)
|
||||||
|
|
||||||
err := framework.CheckTestingNSDeletedExcept(cs, ns)
|
err := framework.CheckTestingNSDeletedExcept(cs, ns)
|
||||||
framework.ExpectNoError(err)
|
framework.ExpectNoError(err)
|
||||||
@ -337,7 +338,7 @@ var _ = SIGDescribe("NoExecuteTaintManager Multiple Pods [Serial]", func() {
|
|||||||
cs = f.ClientSet
|
cs = f.ClientSet
|
||||||
ns = f.Namespace.Name
|
ns = f.Namespace.Name
|
||||||
|
|
||||||
framework.WaitForAllNodesHealthy(cs, time.Minute)
|
e2enode.WaitForTotalHealthy(cs, time.Minute)
|
||||||
|
|
||||||
err := framework.CheckTestingNSDeletedExcept(cs, ns)
|
err := framework.CheckTestingNSDeletedExcept(cs, ns)
|
||||||
framework.ExpectNoError(err)
|
framework.ExpectNoError(err)
|
||||||
|
@ -68,6 +68,7 @@ go_library(
|
|||||||
"//test/e2e/framework/deployment:go_default_library",
|
"//test/e2e/framework/deployment:go_default_library",
|
||||||
"//test/e2e/framework/log:go_default_library",
|
"//test/e2e/framework/log:go_default_library",
|
||||||
"//test/e2e/framework/metrics:go_default_library",
|
"//test/e2e/framework/metrics:go_default_library",
|
||||||
|
"//test/e2e/framework/node:go_default_library",
|
||||||
"//test/e2e/framework/pod:go_default_library",
|
"//test/e2e/framework/pod:go_default_library",
|
||||||
"//test/e2e/framework/providers/gce:go_default_library",
|
"//test/e2e/framework/providers/gce:go_default_library",
|
||||||
"//test/e2e/framework/ssh:go_default_library",
|
"//test/e2e/framework/ssh:go_default_library",
|
||||||
|
@ -29,6 +29,7 @@ import (
|
|||||||
apierrs "k8s.io/apimachinery/pkg/api/errors"
|
apierrs "k8s.io/apimachinery/pkg/api/errors"
|
||||||
clientset "k8s.io/client-go/kubernetes"
|
clientset "k8s.io/client-go/kubernetes"
|
||||||
"k8s.io/kubernetes/test/e2e/framework"
|
"k8s.io/kubernetes/test/e2e/framework"
|
||||||
|
e2enode "k8s.io/kubernetes/test/e2e/framework/node"
|
||||||
e2essh "k8s.io/kubernetes/test/e2e/framework/ssh"
|
e2essh "k8s.io/kubernetes/test/e2e/framework/ssh"
|
||||||
"k8s.io/kubernetes/test/e2e/framework/testfiles"
|
"k8s.io/kubernetes/test/e2e/framework/testfiles"
|
||||||
"k8s.io/kubernetes/test/e2e/framework/volume"
|
"k8s.io/kubernetes/test/e2e/framework/volume"
|
||||||
@ -77,9 +78,9 @@ func installFlex(c clientset.Interface, node *v1.Node, vendor, driver, filePath
|
|||||||
host := ""
|
host := ""
|
||||||
var err error
|
var err error
|
||||||
if node != nil {
|
if node != nil {
|
||||||
host, err = framework.GetNodeExternalIP(node)
|
host, err = e2enode.GetExternalIP(node)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
host, err = framework.GetNodeInternalIP(node)
|
host, err = e2enode.GetInternalIP(node)
|
||||||
}
|
}
|
||||||
} else {
|
} else {
|
||||||
masterHostWithPort := framework.GetMasterHost()
|
masterHostWithPort := framework.GetMasterHost()
|
||||||
@ -106,9 +107,9 @@ func uninstallFlex(c clientset.Interface, node *v1.Node, vendor, driver string)
|
|||||||
host := ""
|
host := ""
|
||||||
var err error
|
var err error
|
||||||
if node != nil {
|
if node != nil {
|
||||||
host, err = framework.GetNodeExternalIP(node)
|
host, err = e2enode.GetExternalIP(node)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
host, err = framework.GetNodeInternalIP(node)
|
host, err = e2enode.GetInternalIP(node)
|
||||||
}
|
}
|
||||||
} else {
|
} else {
|
||||||
masterHostWithPort := framework.GetMasterHost()
|
masterHostWithPort := framework.GetMasterHost()
|
||||||
|
@ -30,6 +30,7 @@ import (
|
|||||||
clientset "k8s.io/client-go/kubernetes"
|
clientset "k8s.io/client-go/kubernetes"
|
||||||
"k8s.io/kubernetes/test/e2e/framework"
|
"k8s.io/kubernetes/test/e2e/framework"
|
||||||
e2elog "k8s.io/kubernetes/test/e2e/framework/log"
|
e2elog "k8s.io/kubernetes/test/e2e/framework/log"
|
||||||
|
e2enode "k8s.io/kubernetes/test/e2e/framework/node"
|
||||||
e2epod "k8s.io/kubernetes/test/e2e/framework/pod"
|
e2epod "k8s.io/kubernetes/test/e2e/framework/pod"
|
||||||
"k8s.io/kubernetes/test/e2e/framework/volume"
|
"k8s.io/kubernetes/test/e2e/framework/volume"
|
||||||
"k8s.io/kubernetes/test/e2e/storage/utils"
|
"k8s.io/kubernetes/test/e2e/storage/utils"
|
||||||
@ -91,7 +92,7 @@ var _ = utils.SIGDescribe("NFSPersistentVolumes[Disruptive][Flaky]", func() {
|
|||||||
for _, node := range nodes.Items {
|
for _, node := range nodes.Items {
|
||||||
if node.Name != nfsServerPod.Spec.NodeName {
|
if node.Name != nfsServerPod.Spec.NodeName {
|
||||||
clientNode = &node
|
clientNode = &node
|
||||||
clientNodeIP, err = framework.GetNodeExternalIP(clientNode)
|
clientNodeIP, err = e2enode.GetExternalIP(clientNode)
|
||||||
framework.ExpectNoError(err)
|
framework.ExpectNoError(err)
|
||||||
break
|
break
|
||||||
}
|
}
|
||||||
|
@ -40,6 +40,7 @@ import (
|
|||||||
v1core "k8s.io/client-go/kubernetes/typed/core/v1"
|
v1core "k8s.io/client-go/kubernetes/typed/core/v1"
|
||||||
"k8s.io/kubernetes/test/e2e/framework"
|
"k8s.io/kubernetes/test/e2e/framework"
|
||||||
e2elog "k8s.io/kubernetes/test/e2e/framework/log"
|
e2elog "k8s.io/kubernetes/test/e2e/framework/log"
|
||||||
|
e2enode "k8s.io/kubernetes/test/e2e/framework/node"
|
||||||
"k8s.io/kubernetes/test/e2e/framework/providers/gce"
|
"k8s.io/kubernetes/test/e2e/framework/providers/gce"
|
||||||
"k8s.io/kubernetes/test/e2e/storage/utils"
|
"k8s.io/kubernetes/test/e2e/storage/utils"
|
||||||
imageutils "k8s.io/kubernetes/test/utils/image"
|
imageutils "k8s.io/kubernetes/test/utils/image"
|
||||||
@ -442,7 +443,7 @@ var _ = utils.SIGDescribe("Pod Disks", func() {
|
|||||||
})
|
})
|
||||||
|
|
||||||
func countReadyNodes(c clientset.Interface, hostName types.NodeName) int {
|
func countReadyNodes(c clientset.Interface, hostName types.NodeName) int {
|
||||||
framework.WaitForNodeToBeReady(c, string(hostName), nodeStatusTimeout)
|
e2enode.WaitForNodeToBeReady(c, string(hostName), nodeStatusTimeout)
|
||||||
framework.WaitForAllNodesSchedulable(c, nodeStatusTimeout)
|
framework.WaitForAllNodesSchedulable(c, nodeStatusTimeout)
|
||||||
nodes := framework.GetReadySchedulableNodesOrDie(c)
|
nodes := framework.GetReadySchedulableNodesOrDie(c)
|
||||||
return len(nodes.Items)
|
return len(nodes.Items)
|
||||||
|
@ -27,6 +27,7 @@ go_library(
|
|||||||
"//staging/src/k8s.io/client-go/kubernetes:go_default_library",
|
"//staging/src/k8s.io/client-go/kubernetes:go_default_library",
|
||||||
"//test/e2e/framework:go_default_library",
|
"//test/e2e/framework:go_default_library",
|
||||||
"//test/e2e/framework/log:go_default_library",
|
"//test/e2e/framework/log:go_default_library",
|
||||||
|
"//test/e2e/framework/node:go_default_library",
|
||||||
"//test/e2e/framework/pod:go_default_library",
|
"//test/e2e/framework/pod:go_default_library",
|
||||||
"//test/e2e/framework/ssh:go_default_library",
|
"//test/e2e/framework/ssh:go_default_library",
|
||||||
"//test/utils/image:go_default_library",
|
"//test/utils/image:go_default_library",
|
||||||
|
@ -36,6 +36,7 @@ import (
|
|||||||
clientset "k8s.io/client-go/kubernetes"
|
clientset "k8s.io/client-go/kubernetes"
|
||||||
"k8s.io/kubernetes/test/e2e/framework"
|
"k8s.io/kubernetes/test/e2e/framework"
|
||||||
e2elog "k8s.io/kubernetes/test/e2e/framework/log"
|
e2elog "k8s.io/kubernetes/test/e2e/framework/log"
|
||||||
|
e2enode "k8s.io/kubernetes/test/e2e/framework/node"
|
||||||
e2epod "k8s.io/kubernetes/test/e2e/framework/pod"
|
e2epod "k8s.io/kubernetes/test/e2e/framework/pod"
|
||||||
e2essh "k8s.io/kubernetes/test/e2e/framework/ssh"
|
e2essh "k8s.io/kubernetes/test/e2e/framework/ssh"
|
||||||
imageutils "k8s.io/kubernetes/test/utils/image"
|
imageutils "k8s.io/kubernetes/test/utils/image"
|
||||||
@ -148,7 +149,7 @@ func KubeletCommand(kOp KubeletOpt, c clientset.Interface, pod *v1.Pod) {
|
|||||||
gomega.Expect(sshResult.Code).To(gomega.BeZero(), "Failed to [%s] kubelet:\n%#v", string(kOp), sshResult)
|
gomega.Expect(sshResult.Code).To(gomega.BeZero(), "Failed to [%s] kubelet:\n%#v", string(kOp), sshResult)
|
||||||
|
|
||||||
if kOp == KStop {
|
if kOp == KStop {
|
||||||
if ok := framework.WaitForNodeToBeNotReady(c, pod.Spec.NodeName, NodeStateTimeout); !ok {
|
if ok := e2enode.WaitForNodeToBeNotReady(c, pod.Spec.NodeName, NodeStateTimeout); !ok {
|
||||||
framework.Failf("Node %s failed to enter NotReady state", pod.Spec.NodeName)
|
framework.Failf("Node %s failed to enter NotReady state", pod.Spec.NodeName)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -168,7 +169,7 @@ func KubeletCommand(kOp KubeletOpt, c clientset.Interface, pod *v1.Pod) {
|
|||||||
}
|
}
|
||||||
if kOp == KStart || kOp == KRestart {
|
if kOp == KStart || kOp == KRestart {
|
||||||
// For kubelet start and restart operations, Wait until Node becomes Ready
|
// For kubelet start and restart operations, Wait until Node becomes Ready
|
||||||
if ok := framework.WaitForNodeToBeReady(c, pod.Spec.NodeName, NodeStateTimeout); !ok {
|
if ok := e2enode.WaitForNodeToBeReady(c, pod.Spec.NodeName, NodeStateTimeout); !ok {
|
||||||
framework.Failf("Node %s failed to enter Ready state", pod.Spec.NodeName)
|
framework.Failf("Node %s failed to enter Ready state", pod.Spec.NodeName)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
Loading…
Reference in New Issue
Block a user