Merge pull request #84379 from oomichi/e2e-network

Separate e2enetwork from e2e core framework
This commit is contained in:
Kubernetes Prow Robot 2019-10-31 02:55:37 -07:00 committed by GitHub
commit dda78d59be
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
20 changed files with 199 additions and 147 deletions

View File

@ -64,6 +64,7 @@ go_library(
"//test/e2e/framework:go_default_library", "//test/e2e/framework:go_default_library",
"//test/e2e/framework/deployment:go_default_library", "//test/e2e/framework/deployment:go_default_library",
"//test/e2e/framework/job:go_default_library", "//test/e2e/framework/job:go_default_library",
"//test/e2e/framework/network:go_default_library",
"//test/e2e/framework/node:go_default_library", "//test/e2e/framework/node:go_default_library",
"//test/e2e/framework/pod:go_default_library", "//test/e2e/framework/pod:go_default_library",
"//test/e2e/framework/pv:go_default_library", "//test/e2e/framework/pv:go_default_library",

View File

@ -37,6 +37,7 @@ import (
"k8s.io/kubernetes/test/e2e/common" "k8s.io/kubernetes/test/e2e/common"
"k8s.io/kubernetes/test/e2e/framework" "k8s.io/kubernetes/test/e2e/framework"
jobutil "k8s.io/kubernetes/test/e2e/framework/job" jobutil "k8s.io/kubernetes/test/e2e/framework/job"
e2enetwork "k8s.io/kubernetes/test/e2e/framework/network"
e2enode "k8s.io/kubernetes/test/e2e/framework/node" e2enode "k8s.io/kubernetes/test/e2e/framework/node"
e2epod "k8s.io/kubernetes/test/e2e/framework/pod" e2epod "k8s.io/kubernetes/test/e2e/framework/pod"
e2eservice "k8s.io/kubernetes/test/e2e/framework/service" e2eservice "k8s.io/kubernetes/test/e2e/framework/service"
@ -267,7 +268,7 @@ var _ = SIGDescribe("Network Partition [Disruptive] [Slow]", func() {
// Finally, it checks that the replication controller recreates the // Finally, it checks that the replication controller recreates the
// pods on another node and that now the number of replicas is equal 'replicas'. // pods on another node and that now the number of replicas is equal 'replicas'.
ginkgo.By(fmt.Sprintf("blocking network traffic from node %s", node.Name)) ginkgo.By(fmt.Sprintf("blocking network traffic from node %s", node.Name))
framework.TestUnderTemporaryNetworkFailure(c, ns, node, func() { e2enetwork.TestUnderTemporaryNetworkFailure(c, ns, node, func() {
framework.Logf("Waiting for pod %s to be removed", pods.Items[0].Name) framework.Logf("Waiting for pod %s to be removed", pods.Items[0].Name)
err := waitForRCPodToDisappear(c, ns, name, pods.Items[0].Name) err := waitForRCPodToDisappear(c, ns, name, pods.Items[0].Name)
framework.ExpectNoError(err) framework.ExpectNoError(err)
@ -336,7 +337,7 @@ var _ = SIGDescribe("Network Partition [Disruptive] [Slow]", func() {
// Finally, it checks that the replication controller recreates the // Finally, it checks that the replication controller recreates the
// pods on another node and that now the number of replicas is equal 'replicas + 1'. // pods on another node and that now the number of replicas is equal 'replicas + 1'.
ginkgo.By(fmt.Sprintf("blocking network traffic from node %s", node.Name)) ginkgo.By(fmt.Sprintf("blocking network traffic from node %s", node.Name))
framework.TestUnderTemporaryNetworkFailure(c, ns, node, func() { e2enetwork.TestUnderTemporaryNetworkFailure(c, ns, node, func() {
framework.Logf("Waiting for pod %s to be removed", pods.Items[0].Name) framework.Logf("Waiting for pod %s to be removed", pods.Items[0].Name)
err := waitForRCPodToDisappear(c, ns, name, pods.Items[0].Name) err := waitForRCPodToDisappear(c, ns, name, pods.Items[0].Name)
framework.ExpectEqual(err, wait.ErrWaitTimeout, "Pod was not deleted during network partition.") framework.ExpectEqual(err, wait.ErrWaitTimeout, "Pod was not deleted during network partition.")
@ -412,7 +413,7 @@ var _ = SIGDescribe("Network Partition [Disruptive] [Slow]", func() {
// Blocks outgoing network traffic on 'node'. Then verifies that 'podNameToDisappear', // Blocks outgoing network traffic on 'node'. Then verifies that 'podNameToDisappear',
// that belongs to StatefulSet 'statefulSetName', **does not** disappear due to forced deletion from the apiserver. // that belongs to StatefulSet 'statefulSetName', **does not** disappear due to forced deletion from the apiserver.
// The grace period on the stateful pods is set to a value > 0. // The grace period on the stateful pods is set to a value > 0.
framework.TestUnderTemporaryNetworkFailure(c, ns, node, func() { e2enetwork.TestUnderTemporaryNetworkFailure(c, ns, node, func() {
framework.Logf("Checking that the NodeController does not force delete stateful pods %v", pod.Name) framework.Logf("Checking that the NodeController does not force delete stateful pods %v", pod.Name)
err := e2epod.WaitTimeoutForPodNoLongerRunningInNamespace(c, pod.Name, ns, 10*time.Minute) err := e2epod.WaitTimeoutForPodNoLongerRunningInNamespace(c, pod.Name, ns, 10*time.Minute)
framework.ExpectEqual(err, wait.ErrWaitTimeout, "Pod was not deleted during network partition.") framework.ExpectEqual(err, wait.ErrWaitTimeout, "Pod was not deleted during network partition.")
@ -458,7 +459,7 @@ var _ = SIGDescribe("Network Partition [Disruptive] [Slow]", func() {
// This creates a temporary network partition, verifies that the job has 'parallelism' number of // This creates a temporary network partition, verifies that the job has 'parallelism' number of
// running pods after the node-controller detects node unreachable. // running pods after the node-controller detects node unreachable.
ginkgo.By(fmt.Sprintf("blocking network traffic from node %s", node.Name)) ginkgo.By(fmt.Sprintf("blocking network traffic from node %s", node.Name))
framework.TestUnderTemporaryNetworkFailure(c, ns, node, func() { e2enetwork.TestUnderTemporaryNetworkFailure(c, ns, node, func() {
framework.Logf("Waiting for pod %s to be removed", pods.Items[0].Name) framework.Logf("Waiting for pod %s to be removed", pods.Items[0].Name)
err := e2epod.WaitForPodToDisappear(c, ns, pods.Items[0].Name, label, 20*time.Second, 10*time.Minute) err := e2epod.WaitForPodToDisappear(c, ns, pods.Items[0].Name, label, 20*time.Second, 10*time.Minute)
framework.ExpectEqual(err, wait.ErrWaitTimeout, "Pod was not deleted during network partition.") framework.ExpectEqual(err, wait.ErrWaitTimeout, "Pod was not deleted during network partition.")

View File

@ -40,6 +40,7 @@ go_library(
"//staging/src/k8s.io/client-go/kubernetes:go_default_library", "//staging/src/k8s.io/client-go/kubernetes:go_default_library",
"//test/e2e/common:go_default_library", "//test/e2e/common:go_default_library",
"//test/e2e/framework:go_default_library", "//test/e2e/framework:go_default_library",
"//test/e2e/framework/network:go_default_library",
"//test/e2e/framework/node:go_default_library", "//test/e2e/framework/node:go_default_library",
"//test/e2e/framework/pod:go_default_library", "//test/e2e/framework/pod:go_default_library",
"//test/e2e/framework/pv:go_default_library", "//test/e2e/framework/pv:go_default_library",

View File

@ -44,6 +44,7 @@ import (
"k8s.io/klog" "k8s.io/klog"
api "k8s.io/kubernetes/pkg/apis/core" api "k8s.io/kubernetes/pkg/apis/core"
"k8s.io/kubernetes/test/e2e/framework" "k8s.io/kubernetes/test/e2e/framework"
e2enetwork "k8s.io/kubernetes/test/e2e/framework/network"
e2enode "k8s.io/kubernetes/test/e2e/framework/node" e2enode "k8s.io/kubernetes/test/e2e/framework/node"
e2epv "k8s.io/kubernetes/test/e2e/framework/pv" e2epv "k8s.io/kubernetes/test/e2e/framework/pv"
"k8s.io/kubernetes/test/e2e/scheduling" "k8s.io/kubernetes/test/e2e/scheduling"
@ -326,7 +327,7 @@ var _ = SIGDescribe("Cluster size autoscaling [Slow]", func() {
ginkgo.It("should increase cluster size if pending pods are small and one node is broken [Feature:ClusterSizeAutoscalingScaleUp]", ginkgo.It("should increase cluster size if pending pods are small and one node is broken [Feature:ClusterSizeAutoscalingScaleUp]",
func() { func() {
framework.TestUnderTemporaryNetworkFailure(c, "default", getAnyNode(c), func() { simpleScaleUpTest(1) }) e2enetwork.TestUnderTemporaryNetworkFailure(c, "default", getAnyNode(c), func() { simpleScaleUpTest(1) })
}) })
ginkgo.It("shouldn't trigger additional scale-ups during processing scale-up [Feature:ClusterSizeAutoscalingScaleUp]", func() { ginkgo.It("shouldn't trigger additional scale-ups during processing scale-up [Feature:ClusterSizeAutoscalingScaleUp]", func() {
@ -689,7 +690,7 @@ var _ = SIGDescribe("Cluster size autoscaling [Slow]", func() {
ginkgo.It("should correctly scale down after a node is not needed and one node is broken [Feature:ClusterSizeAutoscalingScaleDown]", ginkgo.It("should correctly scale down after a node is not needed and one node is broken [Feature:ClusterSizeAutoscalingScaleDown]",
func() { func() {
framework.SkipUnlessSSHKeyPresent() framework.SkipUnlessSSHKeyPresent()
framework.TestUnderTemporaryNetworkFailure(c, "default", getAnyNode(c), func() { simpleScaleDownTest(1) }) e2enetwork.TestUnderTemporaryNetworkFailure(c, "default", getAnyNode(c), func() { simpleScaleDownTest(1) })
}) })
ginkgo.It("should correctly scale down after a node is not needed when there is non autoscaled pool[Feature:ClusterSizeAutoscalingScaleDown]", func() { ginkgo.It("should correctly scale down after a node is not needed when there is non autoscaled pool[Feature:ClusterSizeAutoscalingScaleDown]", func() {
@ -918,7 +919,7 @@ var _ = SIGDescribe("Cluster size autoscaling [Slow]", func() {
if len(nodesToBreak) > 0 { if len(nodesToBreak) > 0 {
ntb := &nodesToBreak[0] ntb := &nodesToBreak[0]
nodesToBreak = nodesToBreak[1:] nodesToBreak = nodesToBreak[1:]
framework.TestUnderTemporaryNetworkFailure(c, "default", ntb, testFunction) e2enetwork.TestUnderTemporaryNetworkFailure(c, "default", ntb, testFunction)
} else { } else {
ReserveMemory(f, "memory-reservation", 100, nodeCount*memAllocatableMb, false, defaultTimeout) ReserveMemory(f, "memory-reservation", 100, nodeCount*memAllocatableMb, false, defaultTimeout)
defer framework.DeleteRCAndWaitForGC(f.ClientSet, f.Namespace.Name, "memory-reservation") defer framework.DeleteRCAndWaitForGC(f.ClientSet, f.Namespace.Name, "memory-reservation")

View File

@ -79,6 +79,7 @@ go_library(
"//staging/src/k8s.io/client-go/tools/watch:go_default_library", "//staging/src/k8s.io/client-go/tools/watch:go_default_library",
"//test/e2e/framework:go_default_library", "//test/e2e/framework:go_default_library",
"//test/e2e/framework/kubelet:go_default_library", "//test/e2e/framework/kubelet:go_default_library",
"//test/e2e/framework/network:go_default_library",
"//test/e2e/framework/node:go_default_library", "//test/e2e/framework/node:go_default_library",
"//test/e2e/framework/pod:go_default_library", "//test/e2e/framework/pod:go_default_library",
"//test/e2e/framework/replicaset:go_default_library", "//test/e2e/framework/replicaset:go_default_library",

View File

@ -24,6 +24,7 @@ import (
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/util/uuid" "k8s.io/apimachinery/pkg/util/uuid"
"k8s.io/kubernetes/test/e2e/framework" "k8s.io/kubernetes/test/e2e/framework"
e2enetwork "k8s.io/kubernetes/test/e2e/framework/network"
imageutils "k8s.io/kubernetes/test/utils/image" imageutils "k8s.io/kubernetes/test/utils/image"
"github.com/onsi/ginkgo" "github.com/onsi/ginkgo"
@ -72,7 +73,7 @@ var _ = ginkgo.Describe("[sig-node] Downward API", func() {
expectations := []string{ expectations := []string{
fmt.Sprintf("POD_NAME=%v", podName), fmt.Sprintf("POD_NAME=%v", podName),
fmt.Sprintf("POD_NAMESPACE=%v", f.Namespace.Name), fmt.Sprintf("POD_NAMESPACE=%v", f.Namespace.Name),
fmt.Sprintf("POD_IP=%v|%v", framework.RegexIPv4, framework.RegexIPv6), fmt.Sprintf("POD_IP=%v|%v", e2enetwork.RegexIPv4, e2enetwork.RegexIPv6),
} }
testDownwardAPI(f, podName, env, expectations) testDownwardAPI(f, podName, env, expectations)
@ -98,7 +99,7 @@ var _ = ginkgo.Describe("[sig-node] Downward API", func() {
} }
expectations := []string{ expectations := []string{
fmt.Sprintf("HOST_IP=%v|%v", framework.RegexIPv4, framework.RegexIPv6), fmt.Sprintf("HOST_IP=%v|%v", e2enetwork.RegexIPv4, e2enetwork.RegexIPv6),
} }
testDownwardAPI(f, podName, env, expectations) testDownwardAPI(f, podName, env, expectations)

View File

@ -20,6 +20,7 @@ import (
"github.com/onsi/ginkgo" "github.com/onsi/ginkgo"
"k8s.io/apimachinery/pkg/util/sets" "k8s.io/apimachinery/pkg/util/sets"
"k8s.io/kubernetes/test/e2e/framework" "k8s.io/kubernetes/test/e2e/framework"
e2enetwork "k8s.io/kubernetes/test/e2e/framework/network"
) )
var _ = ginkgo.Describe("[sig-network] Networking", func() { var _ = ginkgo.Describe("[sig-network] Networking", func() {
@ -38,9 +39,9 @@ var _ = ginkgo.Describe("[sig-network] Networking", func() {
This test is marked LinuxOnly since HostNetwork is not supported on other platforms like Windows. This test is marked LinuxOnly since HostNetwork is not supported on other platforms like Windows.
*/ */
framework.ConformanceIt("should function for intra-pod communication: http [LinuxOnly] [NodeConformance]", func() { framework.ConformanceIt("should function for intra-pod communication: http [LinuxOnly] [NodeConformance]", func() {
config := framework.NewCoreNetworkingTestConfig(f, true) config := e2enetwork.NewCoreNetworkingTestConfig(f, true)
for _, endpointPod := range config.EndpointPods { for _, endpointPod := range config.EndpointPods {
config.DialFromTestContainer("http", endpointPod.Status.PodIP, framework.EndpointHTTPPort, config.MaxTries, 0, sets.NewString(endpointPod.Name)) config.DialFromTestContainer("http", endpointPod.Status.PodIP, e2enetwork.EndpointHTTPPort, config.MaxTries, 0, sets.NewString(endpointPod.Name))
} }
}) })
@ -52,9 +53,9 @@ var _ = ginkgo.Describe("[sig-network] Networking", func() {
This test is marked LinuxOnly since HostNetwork is not supported on other platforms like Windows. This test is marked LinuxOnly since HostNetwork is not supported on other platforms like Windows.
*/ */
framework.ConformanceIt("should function for intra-pod communication: udp [LinuxOnly] [NodeConformance]", func() { framework.ConformanceIt("should function for intra-pod communication: udp [LinuxOnly] [NodeConformance]", func() {
config := framework.NewCoreNetworkingTestConfig(f, true) config := e2enetwork.NewCoreNetworkingTestConfig(f, true)
for _, endpointPod := range config.EndpointPods { for _, endpointPod := range config.EndpointPods {
config.DialFromTestContainer("udp", endpointPod.Status.PodIP, framework.EndpointUDPPort, config.MaxTries, 0, sets.NewString(endpointPod.Name)) config.DialFromTestContainer("udp", endpointPod.Status.PodIP, e2enetwork.EndpointUDPPort, config.MaxTries, 0, sets.NewString(endpointPod.Name))
} }
}) })
@ -66,9 +67,9 @@ var _ = ginkgo.Describe("[sig-network] Networking", func() {
This test is marked LinuxOnly since HostNetwork is not supported on other platforms like Windows. This test is marked LinuxOnly since HostNetwork is not supported on other platforms like Windows.
*/ */
framework.ConformanceIt("should function for node-pod communication: http [LinuxOnly] [NodeConformance]", func() { framework.ConformanceIt("should function for node-pod communication: http [LinuxOnly] [NodeConformance]", func() {
config := framework.NewCoreNetworkingTestConfig(f, true) config := e2enetwork.NewCoreNetworkingTestConfig(f, true)
for _, endpointPod := range config.EndpointPods { for _, endpointPod := range config.EndpointPods {
config.DialFromNode("http", endpointPod.Status.PodIP, framework.EndpointHTTPPort, config.MaxTries, 0, sets.NewString(endpointPod.Name)) config.DialFromNode("http", endpointPod.Status.PodIP, e2enetwork.EndpointHTTPPort, config.MaxTries, 0, sets.NewString(endpointPod.Name))
} }
}) })
@ -80,9 +81,9 @@ var _ = ginkgo.Describe("[sig-network] Networking", func() {
This test is marked LinuxOnly since HostNetwork is not supported on other platforms like Windows. This test is marked LinuxOnly since HostNetwork is not supported on other platforms like Windows.
*/ */
framework.ConformanceIt("should function for node-pod communication: udp [LinuxOnly] [NodeConformance]", func() { framework.ConformanceIt("should function for node-pod communication: udp [LinuxOnly] [NodeConformance]", func() {
config := framework.NewCoreNetworkingTestConfig(f, true) config := e2enetwork.NewCoreNetworkingTestConfig(f, true)
for _, endpointPod := range config.EndpointPods { for _, endpointPod := range config.EndpointPods {
config.DialFromNode("udp", endpointPod.Status.PodIP, framework.EndpointUDPPort, config.MaxTries, 0, sets.NewString(endpointPod.Name)) config.DialFromNode("udp", endpointPod.Status.PodIP, e2enetwork.EndpointUDPPort, config.MaxTries, 0, sets.NewString(endpointPod.Name))
} }
}) })
}) })

View File

@ -13,7 +13,6 @@ go_library(
"google_compute.go", "google_compute.go",
"log.go", "log.go",
"log_size_monitoring.go", "log_size_monitoring.go",
"networking_utils.go",
"nodes_util.go", "nodes_util.go",
"pods.go", "pods.go",
"profile_gatherer.go", "profile_gatherer.go",
@ -51,7 +50,6 @@ go_library(
"//staging/src/k8s.io/apimachinery/pkg/runtime:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/runtime:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/runtime/schema:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/runtime/schema:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/util/intstr:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/util/intstr:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/util/net:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/util/runtime:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/util/runtime:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/util/sets:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/util/sets:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/util/uuid:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/util/uuid:go_default_library",
@ -125,6 +123,7 @@ filegroup(
"//test/e2e/framework/lifecycle:all-srcs", "//test/e2e/framework/lifecycle:all-srcs",
"//test/e2e/framework/log:all-srcs", "//test/e2e/framework/log:all-srcs",
"//test/e2e/framework/metrics:all-srcs", "//test/e2e/framework/metrics:all-srcs",
"//test/e2e/framework/network:all-srcs",
"//test/e2e/framework/node:all-srcs", "//test/e2e/framework/node:all-srcs",
"//test/e2e/framework/perf:all-srcs", "//test/e2e/framework/perf:all-srcs",
"//test/e2e/framework/pod:all-srcs", "//test/e2e/framework/pod:all-srcs",

View File

@ -0,0 +1,39 @@
load("@io_bazel_rules_go//go:def.bzl", "go_library")
go_library(
name = "go_default_library",
srcs = ["utils.go"],
importpath = "k8s.io/kubernetes/test/e2e/framework/network",
visibility = ["//visibility:public"],
deps = [
"//staging/src/k8s.io/api/core/v1:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/labels:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/util/intstr:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/util/net:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/util/sets:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/util/uuid:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/util/wait:go_default_library",
"//staging/src/k8s.io/client-go/kubernetes:go_default_library",
"//staging/src/k8s.io/client-go/kubernetes/typed/core/v1:go_default_library",
"//test/e2e/framework:go_default_library",
"//test/e2e/framework/node:go_default_library",
"//test/e2e/framework/pod:go_default_library",
"//test/utils/image:go_default_library",
"//vendor/github.com/onsi/ginkgo:go_default_library",
],
)
filegroup(
name = "package-srcs",
srcs = glob(["**"]),
tags = ["automanaged"],
visibility = ["//visibility:private"],
)
filegroup(
name = "all-srcs",
srcs = [":package-srcs"],
tags = ["automanaged"],
visibility = ["//visibility:public"],
)

View File

@ -14,7 +14,7 @@ See the License for the specific language governing permissions and
limitations under the License. limitations under the License.
*/ */
package framework package network
import ( import (
"encoding/json" "encoding/json"
@ -37,6 +37,7 @@ import (
"k8s.io/apimachinery/pkg/util/wait" "k8s.io/apimachinery/pkg/util/wait"
clientset "k8s.io/client-go/kubernetes" clientset "k8s.io/client-go/kubernetes"
coreclientset "k8s.io/client-go/kubernetes/typed/core/v1" coreclientset "k8s.io/client-go/kubernetes/typed/core/v1"
"k8s.io/kubernetes/test/e2e/framework"
e2enode "k8s.io/kubernetes/test/e2e/framework/node" e2enode "k8s.io/kubernetes/test/e2e/framework/node"
e2epod "k8s.io/kubernetes/test/e2e/framework/pod" e2epod "k8s.io/kubernetes/test/e2e/framework/pod"
imageutils "k8s.io/kubernetes/test/utils/image" imageutils "k8s.io/kubernetes/test/utils/image"
@ -69,14 +70,16 @@ const (
// RegexIPv4 is a regex to match IPv4 addresses // RegexIPv4 is a regex to match IPv4 addresses
RegexIPv4 = "(?:\\d+)\\.(?:\\d+)\\.(?:\\d+)\\.(?:\\d+)" RegexIPv4 = "(?:\\d+)\\.(?:\\d+)\\.(?:\\d+)\\.(?:\\d+)"
// RegexIPv6 is a regex to match IPv6 addresses // RegexIPv6 is a regex to match IPv6 addresses
RegexIPv6 = "(?:(?:(?:(?:(?:(?:(?:[0-9a-fA-F]{1,4})):){6})(?:(?:(?:(?:(?:[0-9a-fA-F]{1,4})):(?:(?:[0-9a-fA-F]{1,4})))|(?:(?:(?:(?:(?:25[0-5]|(?:[1-9]|1[0-9]|2[0-4])?[0-9]))\\.){3}(?:(?:25[0-5]|(?:[1-9]|1[0-9]|2[0-4])?[0-9])))))))|(?:(?:::(?:(?:(?:[0-9a-fA-F]{1,4})):){5})(?:(?:(?:(?:(?:[0-9a-fA-F]{1,4})):(?:(?:[0-9a-fA-F]{1,4})))|(?:(?:(?:(?:(?:25[0-5]|(?:[1-9]|1[0-9]|2[0-4])?[0-9]))\\.){3}(?:(?:25[0-5]|(?:[1-9]|1[0-9]|2[0-4])?[0-9])))))))|(?:(?:(?:(?:(?:[0-9a-fA-F]{1,4})))?::(?:(?:(?:[0-9a-fA-F]{1,4})):){4})(?:(?:(?:(?:(?:[0-9a-fA-F]{1,4})):(?:(?:[0-9a-fA-F]{1,4})))|(?:(?:(?:(?:(?:25[0-5]|(?:[1-9]|1[0-9]|2[0-4])?[0-9]))\\.){3}(?:(?:25[0-5]|(?:[1-9]|1[0-9]|2[0-4])?[0-9])))))))|(?:(?:(?:(?:(?:(?:[0-9a-fA-F]{1,4})):){0,1}(?:(?:[0-9a-fA-F]{1,4})))?::(?:(?:(?:[0-9a-fA-F]{1,4})):){3})(?:(?:(?:(?:(?:[0-9a-fA-F]{1,4})):(?:(?:[0-9a-fA-F]{1,4})))|(?:(?:(?:(?:(?:25[0-5]|(?:[1-9]|1[0-9]|2[0-4])?[0-9]))\\.){3}(?:(?:25[0-5]|(?:[1-9]|1[0-9]|2[0-4])?[0-9])))))))|(?:(?:(?:(?:(?:(?:[0-9a-fA-F]{1,4})):){0,2}(?:(?:[0-9a-fA-F]{1,4})))?::(?:(?:(?:[0-9a-fA-F]{1,4})):){2})(?:(?:(?:(?:(?:[0-9a-fA-F]{1,4})):(?:(?:[0-9a-fA-F]{1,4})))|(?:(?:(?:(?:(?:25[0-5]|(?:[1-9]|1[0-9]|2[0-4])?[0-9]))\\.){3}(?:(?:25[0-5]|(?:[1-9]|1[0-9]|2[0-4])?[0-9])))))))|(?:(?:(?:(?:(?:(?:[0-9a-fA-F]{1,4})):){0,3}(?:(?:[0-9a-fA-F]{1,4})))?::(?:(?:[0-9a-fA-F]{1,4})):)(?:(?:(?:(?:(?:[0-9a-fA-F]{1,4})):(?:(?:[0-9a-fA-F]{1,4})))|(?:(?:(?:(?:(?:25[0-5]|(?:[1-9]|1[0-9]|2[0-4])?[0-9]))\\.){3}(?:(?:25[0-5]|(?:[1-9]|1[0-9]|2[0-4])?[0-9])))))))|(?:(?:(?:(?:(?:(?:[0-9a-fA-F]{1,4})):){0,4}(?:(?:[0-9a-fA-F]{1,4})))?::)(?:(?:(?:(?:(?:[0-9a-fA-F]{1,4})):(?:(?:[0-9a-fA-F]{1,4})))|(?:(?:(?:(?:(?:25[0-5]|(?:[1-9]|1[0-9]|2[0-4])?[0-9]))\\.){3}(?:(?:25[0-5]|(?:[1-9]|1[0-9]|2[0-4])?[0-9])))))))|(?:(?:(?:(?:(?:(?:[0-9a-fA-F]{1,4})):){0,5}(?:(?:[0-9a-fA-F]{1,4})))?::)(?:(?:[0-9a-fA-F]{1,4})))|(?:(?:(?:(?:(?:(?:[0-9a-fA-F]{1,4})):){0,6}(?:(?:[0-9a-fA-F]{1,4})))?::))))" RegexIPv6 = "(?:(?:(?:(?:(?:(?:(?:[0-9a-fA-F]{1,4})):){6})(?:(?:(?:(?:(?:[0-9a-fA-F]{1,4})):(?:(?:[0-9a-fA-F]{1,4})))|(?:(?:(?:(?:(?:25[0-5]|(?:[1-9]|1[0-9]|2[0-4])?[0-9]))\\.){3}(?:(?:25[0-5]|(?:[1-9]|1[0-9]|2[0-4])?[0-9])))))))|(?:(?:::(?:(?:(?:[0-9a-fA-F]{1,4})):){5})(?:(?:(?:(?:(?:[0-9a-fA-F]{1,4})):(?:(?:[0-9a-fA-F]{1,4})))|(?:(?:(?:(?:(?:25[0-5]|(?:[1-9]|1[0-9]|2[0-4])?[0-9]))\\.){3}(?:(?:25[0-5]|(?:[1-9]|1[0-9]|2[0-4])?[0-9])))))))|(?:(?:(?:(?:(?:[0-9a-fA-F]{1,4})))?::(?:(?:(?:[0-9a-fA-F]{1,4})):){4})(?:(?:(?:(?:(?:[0-9a-fA-F]{1,4})):(?:(?:[0-9a-fA-F]{1,4})))|(?:(?:(?:(?:(?:25[0-5]|(?:[1-9]|1[0-9]|2[0-4])?[0-9]))\\.){3}(?:(?:25[0-5]|(?:[1-9]|1[0-9]|2[0-4])?[0-9])))))))|(?:(?:(?:(?:(?:(?:[0-9a-fA-F]{1,4})):){0,1}(?:(?:[0-9a-fA-F]{1,4})))?::(?:(?:(?:[0-9a-fA-F]{1,4})):){3})(?:(?:(?:(?:(?:[0-9a-fA-F]{1,4})):(?:(?:[0-9a-fA-F]{1,4})))|(?:(?:(?:(?:(?:25[0-5]|(?:[1-9]|1[0-9]|2[0-4])?[0-9]))\\.){3}(?:(?:25[0-5]|(?:[1-9]|1[0-9]|2[0-4])?[0-9])))))))|(?:(?:(?:(?:(?:(?:[0-9a-fA-F]{1,4})):){0,2}(?:(?:[0-9a-fA-F]{1,4})))?::(?:(?:(?:[0-9a-fA-F]{1,4})):){2})(?:(?:(?:(?:(?:[0-9a-fA-F]{1,4})):(?:(?:[0-9a-fA-F]{1,4})))|(?:(?:(?:(?:(?:25[0-5]|(?:[1-9]|1[0-9]|2[0-4])?[0-9]))\\.){3}(?:(?:25[0-5]|(?:[1-9]|1[0-9]|2[0-4])?[0-9])))))))|(?:(?:(?:(?:(?:(?:[0-9a-fA-F]{1,4})):){0,3}(?:(?:[0-9a-fA-F]{1,4})))?::(?:(?:[0-9a-fA-F]{1,4})):)(?:(?:(?:(?:(?:[0-9a-fA-F]{1,4})):(?:(?:[0-9a-fA-F]{1,4})))|(?:(?:(?:(?:(?:25[0-5]|(?:[1-9]|1[0-9]|2[0-4])?[0-9]))\\.){3}(?:(?:25[0-5]|(?:[1-9]|1[0-9]|2[0-4])?[0-9])))))))|(?:(?:(?:(?:(?:(?:[0-9a-fA-F]{1,4})):){0,4}(?:(?:[0-9a-fA-F]{1,4})))?::)(?:(?:(?:(?:(?:[0-9a-fA-F]{1,4})):(?:(?:[0-9a-fA-F]{1,4})))|(?:(?:(?:(?:(?:25[0-5]|(?:[1-9]|1[0-9]|2[0-4])?[0-9]))\\.){3}(?:(?:25[0-5]|(?:[1-9]|1[0-9]|2[0-4])?[0-9])))))))|(?:(?:(?:(?:(?:(?:[0-9a-fA-F]{1,4})):){0,5}(?:(?:[0-9a-fA-F]{1,4})))?::)(?:(?:[0-9a-fA-F]{1,4})))|(?:(?:(?:(?:(?:(?:[0-9a-fA-F]{1,4})):){0,6}(?:(?:[0-9a-fA-F]{1,4})))?::))))"
resizeNodeReadyTimeout = 2 * time.Minute
resizeNodeNotReadyTimeout = 2 * time.Minute
) )
// NetexecImageName is the image name for agnhost. // NetexecImageName is the image name for agnhost.
var NetexecImageName = imageutils.GetE2EImage(imageutils.Agnhost) var NetexecImageName = imageutils.GetE2EImage(imageutils.Agnhost)
// NewNetworkingTestConfig creates and sets up a new test config helper. // NewNetworkingTestConfig creates and sets up a new test config helper.
func NewNetworkingTestConfig(f *Framework) *NetworkingTestConfig { func NewNetworkingTestConfig(f *framework.Framework) *NetworkingTestConfig {
config := &NetworkingTestConfig{f: f, Namespace: f.Namespace.Name, HostNetwork: true} config := &NetworkingTestConfig{f: f, Namespace: f.Namespace.Name, HostNetwork: true}
ginkgo.By(fmt.Sprintf("Performing setup for networking test in namespace %v", config.Namespace)) ginkgo.By(fmt.Sprintf("Performing setup for networking test in namespace %v", config.Namespace))
config.setup(getServiceSelector()) config.setup(getServiceSelector())
@ -84,7 +87,7 @@ func NewNetworkingTestConfig(f *Framework) *NetworkingTestConfig {
} }
// NewCoreNetworkingTestConfig creates and sets up a new test config helper for Node E2E. // NewCoreNetworkingTestConfig creates and sets up a new test config helper for Node E2E.
func NewCoreNetworkingTestConfig(f *Framework, hostNetwork bool) *NetworkingTestConfig { func NewCoreNetworkingTestConfig(f *framework.Framework, hostNetwork bool) *NetworkingTestConfig {
config := &NetworkingTestConfig{f: f, Namespace: f.Namespace.Name, HostNetwork: hostNetwork} config := &NetworkingTestConfig{f: f, Namespace: f.Namespace.Name, HostNetwork: hostNetwork}
ginkgo.By(fmt.Sprintf("Performing setup for networking test in namespace %v", config.Namespace)) ginkgo.By(fmt.Sprintf("Performing setup for networking test in namespace %v", config.Namespace))
config.setupCore(getServiceSelector()) config.setupCore(getServiceSelector())
@ -114,8 +117,8 @@ type NetworkingTestConfig struct {
// test config. Each invocation of `setup` creates a service with // test config. Each invocation of `setup` creates a service with
// 1 pod per node running the netexecImage. // 1 pod per node running the netexecImage.
EndpointPods []*v1.Pod EndpointPods []*v1.Pod
f *Framework f *framework.Framework
podClient *PodClient podClient *framework.PodClient
// NodePortService is a Service with Type=NodePort spanning over all // NodePortService is a Service with Type=NodePort spanning over all
// endpointPods. // endpointPods.
NodePortService *v1.Service NodePortService *v1.Service
@ -159,10 +162,10 @@ func (config *NetworkingTestConfig) diagnoseMissingEndpoints(foundEndpoints sets
if foundEndpoints.Has(e.Name) { if foundEndpoints.Has(e.Name) {
continue continue
} }
Logf("\nOutput of kubectl describe pod %v/%v:\n", e.Namespace, e.Name) framework.Logf("\nOutput of kubectl describe pod %v/%v:\n", e.Namespace, e.Name)
desc, _ := RunKubectl( desc, _ := framework.RunKubectl(
"describe", "pod", e.Name, fmt.Sprintf("--namespace=%v", e.Namespace)) "describe", "pod", e.Name, fmt.Sprintf("--namespace=%v", e.Namespace))
Logf(desc) framework.Logf(desc)
} }
} }
@ -207,11 +210,11 @@ func (config *NetworkingTestConfig) DialFromContainer(protocol, containerIP, tar
// A failure to kubectl exec counts as a try, not a hard fail. // A failure to kubectl exec counts as a try, not a hard fail.
// Also note that we will keep failing for maxTries in tests where // Also note that we will keep failing for maxTries in tests where
// we confirm unreachability. // we confirm unreachability.
Logf("Failed to execute %q: %v, stdout: %q, stderr %q", cmd, err, stdout, stderr) framework.Logf("Failed to execute %q: %v, stdout: %q, stderr %q", cmd, err, stdout, stderr)
} else { } else {
var output map[string][]string var output map[string][]string
if err := json.Unmarshal([]byte(stdout), &output); err != nil { if err := json.Unmarshal([]byte(stdout), &output); err != nil {
Logf("WARNING: Failed to unmarshal curl response. Cmd %v run in %v, output: %s, err: %v", framework.Logf("WARNING: Failed to unmarshal curl response. Cmd %v run in %v, output: %s, err: %v",
cmd, config.HostTestContainerPod.Name, stdout, err) cmd, config.HostTestContainerPod.Name, stdout, err)
continue continue
} }
@ -223,7 +226,7 @@ func (config *NetworkingTestConfig) DialFromContainer(protocol, containerIP, tar
} }
} }
} }
Logf("Waiting for endpoints: %v", expectedEps.Difference(eps)) framework.Logf("Waiting for endpoints: %v", expectedEps.Difference(eps))
// Check against i+1 so we exit if minTries == maxTries. // Check against i+1 so we exit if minTries == maxTries.
if (eps.Equal(expectedEps) || eps.Len() == 0 && expectedEps.Len() == 0) && i+1 >= minTries { if (eps.Equal(expectedEps) || eps.Len() == 0 && expectedEps.Len() == 0) && i+1 >= minTries {
@ -234,7 +237,7 @@ func (config *NetworkingTestConfig) DialFromContainer(protocol, containerIP, tar
} }
config.diagnoseMissingEndpoints(eps) config.diagnoseMissingEndpoints(eps)
Failf("Failed to find expected endpoints:\nTries %d\nCommand %v\nretrieved %v\nexpected %v\n", maxTries, cmd, eps, expectedEps) framework.Failf("Failed to find expected endpoints:\nTries %d\nCommand %v\nretrieved %v\nexpected %v\n", maxTries, cmd, eps, expectedEps)
} }
// GetEndpointsFromTestContainer executes a curl via kubectl exec in a test container. // GetEndpointsFromTestContainer executes a curl via kubectl exec in a test container.
@ -266,12 +269,12 @@ func (config *NetworkingTestConfig) GetEndpointsFromContainer(protocol, containe
// A failure to kubectl exec counts as a try, not a hard fail. // A failure to kubectl exec counts as a try, not a hard fail.
// Also note that we will keep failing for maxTries in tests where // Also note that we will keep failing for maxTries in tests where
// we confirm unreachability. // we confirm unreachability.
Logf("Failed to execute %q: %v, stdout: %q, stderr: %q", cmd, err, stdout, stderr) framework.Logf("Failed to execute %q: %v, stdout: %q, stderr: %q", cmd, err, stdout, stderr)
} else { } else {
Logf("Tries: %d, in try: %d, stdout: %v, stderr: %v, command run in: %#v", tries, i, stdout, stderr, config.HostTestContainerPod) framework.Logf("Tries: %d, in try: %d, stdout: %v, stderr: %v, command run in: %#v", tries, i, stdout, stderr, config.HostTestContainerPod)
var output map[string][]string var output map[string][]string
if err := json.Unmarshal([]byte(stdout), &output); err != nil { if err := json.Unmarshal([]byte(stdout), &output); err != nil {
Logf("WARNING: Failed to unmarshal curl response. Cmd %v run in %v, output: %s, err: %v", framework.Logf("WARNING: Failed to unmarshal curl response. Cmd %v run in %v, output: %s, err: %v",
cmd, config.HostTestContainerPod.Name, stdout, err) cmd, config.HostTestContainerPod.Name, stdout, err)
continue continue
} }
@ -325,7 +328,7 @@ func (config *NetworkingTestConfig) DialFromNode(protocol, targetIP string, targ
// A failure to exec command counts as a try, not a hard fail. // A failure to exec command counts as a try, not a hard fail.
// Also note that we will keep failing for maxTries in tests where // Also note that we will keep failing for maxTries in tests where
// we confirm unreachability. // we confirm unreachability.
Logf("Failed to execute %q: %v, stdout: %q, stderr: %q", filterCmd, err, stdout, stderr) framework.Logf("Failed to execute %q: %v, stdout: %q, stderr: %q", filterCmd, err, stdout, stderr)
} else { } else {
trimmed := strings.TrimSpace(stdout) trimmed := strings.TrimSpace(stdout)
if trimmed != "" { if trimmed != "" {
@ -335,18 +338,18 @@ func (config *NetworkingTestConfig) DialFromNode(protocol, targetIP string, targ
// Check against i+1 so we exit if minTries == maxTries. // Check against i+1 so we exit if minTries == maxTries.
if eps.Equal(expectedEps) && i+1 >= minTries { if eps.Equal(expectedEps) && i+1 >= minTries {
Logf("Found all expected endpoints: %+v", eps.List()) framework.Logf("Found all expected endpoints: %+v", eps.List())
return return
} }
Logf("Waiting for %+v endpoints (expected=%+v, actual=%+v)", expectedEps.Difference(eps).List(), expectedEps.List(), eps.List()) framework.Logf("Waiting for %+v endpoints (expected=%+v, actual=%+v)", expectedEps.Difference(eps).List(), expectedEps.List(), eps.List())
// TODO: get rid of this delay #36281 // TODO: get rid of this delay #36281
time.Sleep(hitEndpointRetryDelay) time.Sleep(hitEndpointRetryDelay)
} }
config.diagnoseMissingEndpoints(eps) config.diagnoseMissingEndpoints(eps)
Failf("Failed to find expected endpoints:\nTries %d\nCommand %v\nretrieved %v\nexpected %v\n", maxTries, cmd, eps, expectedEps) framework.Failf("Failed to find expected endpoints:\nTries %d\nCommand %v\nretrieved %v\nexpected %v\n", maxTries, cmd, eps, expectedEps)
} }
// GetSelfURL executes a curl against the given path via kubectl exec into a // GetSelfURL executes a curl against the given path via kubectl exec into a
@ -376,24 +379,24 @@ func (config *NetworkingTestConfig) executeCurlCmd(cmd string, expected string)
podName := config.HostTestContainerPod.Name podName := config.HostTestContainerPod.Name
var msg string var msg string
if pollErr := wait.PollImmediate(retryInterval, retryTimeout, func() (bool, error) { if pollErr := wait.PollImmediate(retryInterval, retryTimeout, func() (bool, error) {
stdout, err := RunHostCmd(config.Namespace, podName, cmd) stdout, err := framework.RunHostCmd(config.Namespace, podName, cmd)
if err != nil { if err != nil {
msg = fmt.Sprintf("failed executing cmd %v in %v/%v: %v", cmd, config.Namespace, podName, err) msg = fmt.Sprintf("failed executing cmd %v in %v/%v: %v", cmd, config.Namespace, podName, err)
Logf(msg) framework.Logf(msg)
return false, nil return false, nil
} }
if !strings.Contains(stdout, expected) { if !strings.Contains(stdout, expected) {
msg = fmt.Sprintf("successfully executed %v in %v/%v, but output '%v' doesn't contain expected string '%v'", cmd, config.Namespace, podName, stdout, expected) msg = fmt.Sprintf("successfully executed %v in %v/%v, but output '%v' doesn't contain expected string '%v'", cmd, config.Namespace, podName, stdout, expected)
Logf(msg) framework.Logf(msg)
return false, nil return false, nil
} }
return true, nil return true, nil
}); pollErr != nil { }); pollErr != nil {
Logf("\nOutput of kubectl describe pod %v/%v:\n", config.Namespace, podName) framework.Logf("\nOutput of kubectl describe pod %v/%v:\n", config.Namespace, podName)
desc, _ := RunKubectl( desc, _ := framework.RunKubectl(
"describe", "pod", podName, fmt.Sprintf("--namespace=%v", config.Namespace)) "describe", "pod", podName, fmt.Sprintf("--namespace=%v", config.Namespace))
Logf("%s", desc) framework.Logf("%s", desc)
Failf("Timed out in %v: %v", retryTimeout, msg) framework.Failf("Timed out in %v: %v", retryTimeout, msg)
} }
} }
@ -520,7 +523,7 @@ func (config *NetworkingTestConfig) createSessionAffinityService(selector map[st
// DeleteNodePortService deletes NodePort service. // DeleteNodePortService deletes NodePort service.
func (config *NetworkingTestConfig) DeleteNodePortService() { func (config *NetworkingTestConfig) DeleteNodePortService() {
err := config.getServiceClient().Delete(config.NodePortService.Name, nil) err := config.getServiceClient().Delete(config.NodePortService.Name, nil)
ExpectNoError(err, "error while deleting NodePortService. err:%v)", err) framework.ExpectNoError(err, "error while deleting NodePortService. err:%v)", err)
time.Sleep(15 * time.Second) // wait for kube-proxy to catch up with the service being deleted. time.Sleep(15 * time.Second) // wait for kube-proxy to catch up with the service being deleted.
} }
@ -531,30 +534,30 @@ func (config *NetworkingTestConfig) createTestPods() {
config.createPod(testContainerPod) config.createPod(testContainerPod)
config.createPod(hostTestContainerPod) config.createPod(hostTestContainerPod)
ExpectNoError(config.f.WaitForPodRunning(testContainerPod.Name)) framework.ExpectNoError(config.f.WaitForPodRunning(testContainerPod.Name))
ExpectNoError(config.f.WaitForPodRunning(hostTestContainerPod.Name)) framework.ExpectNoError(config.f.WaitForPodRunning(hostTestContainerPod.Name))
var err error var err error
config.TestContainerPod, err = config.getPodClient().Get(testContainerPod.Name, metav1.GetOptions{}) config.TestContainerPod, err = config.getPodClient().Get(testContainerPod.Name, metav1.GetOptions{})
if err != nil { if err != nil {
Failf("Failed to retrieve %s pod: %v", testContainerPod.Name, err) framework.Failf("Failed to retrieve %s pod: %v", testContainerPod.Name, err)
} }
config.HostTestContainerPod, err = config.getPodClient().Get(hostTestContainerPod.Name, metav1.GetOptions{}) config.HostTestContainerPod, err = config.getPodClient().Get(hostTestContainerPod.Name, metav1.GetOptions{})
if err != nil { if err != nil {
Failf("Failed to retrieve %s pod: %v", hostTestContainerPod.Name, err) framework.Failf("Failed to retrieve %s pod: %v", hostTestContainerPod.Name, err)
} }
} }
func (config *NetworkingTestConfig) createService(serviceSpec *v1.Service) *v1.Service { func (config *NetworkingTestConfig) createService(serviceSpec *v1.Service) *v1.Service {
_, err := config.getServiceClient().Create(serviceSpec) _, err := config.getServiceClient().Create(serviceSpec)
ExpectNoError(err, fmt.Sprintf("Failed to create %s service: %v", serviceSpec.Name, err)) framework.ExpectNoError(err, fmt.Sprintf("Failed to create %s service: %v", serviceSpec.Name, err))
err = WaitForService(config.f.ClientSet, config.Namespace, serviceSpec.Name, true, 5*time.Second, 45*time.Second) err = framework.WaitForService(config.f.ClientSet, config.Namespace, serviceSpec.Name, true, 5*time.Second, 45*time.Second)
ExpectNoError(err, fmt.Sprintf("error while waiting for service:%s err: %v", serviceSpec.Name, err)) framework.ExpectNoError(err, fmt.Sprintf("error while waiting for service:%s err: %v", serviceSpec.Name, err))
createdService, err := config.getServiceClient().Get(serviceSpec.Name, metav1.GetOptions{}) createdService, err := config.getServiceClient().Get(serviceSpec.Name, metav1.GetOptions{})
ExpectNoError(err, fmt.Sprintf("Failed to create %s service: %v", serviceSpec.Name, err)) framework.ExpectNoError(err, fmt.Sprintf("Failed to create %s service: %v", serviceSpec.Name, err))
return createdService return createdService
} }
@ -578,12 +581,12 @@ func (config *NetworkingTestConfig) setup(selector map[string]string) {
config.setupCore(selector) config.setupCore(selector)
ginkgo.By("Getting node addresses") ginkgo.By("Getting node addresses")
ExpectNoError(WaitForAllNodesSchedulable(config.f.ClientSet, 10*time.Minute)) framework.ExpectNoError(framework.WaitForAllNodesSchedulable(config.f.ClientSet, 10*time.Minute))
nodeList, err := e2enode.GetReadySchedulableNodes(config.f.ClientSet) nodeList, err := e2enode.GetReadySchedulableNodes(config.f.ClientSet)
ExpectNoError(err) framework.ExpectNoError(err)
config.ExternalAddrs = e2enode.FirstAddress(nodeList, v1.NodeExternalIP) config.ExternalAddrs = e2enode.FirstAddress(nodeList, v1.NodeExternalIP)
SkipUnlessNodeCountIsAtLeast(2) framework.SkipUnlessNodeCountIsAtLeast(2)
config.Nodes = nodeList.Items config.Nodes = nodeList.Items
ginkgo.By("Creating the service on top of the pods in kubernetes") ginkgo.By("Creating the service on top of the pods in kubernetes")
@ -610,9 +613,9 @@ func (config *NetworkingTestConfig) setup(selector map[string]string) {
} }
func (config *NetworkingTestConfig) createNetProxyPods(podName string, selector map[string]string) []*v1.Pod { func (config *NetworkingTestConfig) createNetProxyPods(podName string, selector map[string]string) []*v1.Pod {
ExpectNoError(WaitForAllNodesSchedulable(config.f.ClientSet, 10*time.Minute)) framework.ExpectNoError(framework.WaitForAllNodesSchedulable(config.f.ClientSet, 10*time.Minute))
nodeList, err := e2enode.GetBoundedReadySchedulableNodes(config.f.ClientSet, maxNetProxyPodsCount) nodeList, err := e2enode.GetBoundedReadySchedulableNodes(config.f.ClientSet, maxNetProxyPodsCount)
ExpectNoError(err) framework.ExpectNoError(err)
nodes := nodeList.Items nodes := nodeList.Items
// create pods, one for each node // create pods, one for each node
@ -629,9 +632,9 @@ func (config *NetworkingTestConfig) createNetProxyPods(podName string, selector
// wait that all of them are up // wait that all of them are up
runningPods := make([]*v1.Pod, 0, len(nodes)) runningPods := make([]*v1.Pod, 0, len(nodes))
for _, p := range createdPods { for _, p := range createdPods {
ExpectNoError(config.f.WaitForPodReady(p.Name)) framework.ExpectNoError(config.f.WaitForPodReady(p.Name))
rp, err := config.getPodClient().Get(p.Name, metav1.GetOptions{}) rp, err := config.getPodClient().Get(p.Name, metav1.GetOptions{})
ExpectNoError(err) framework.ExpectNoError(err)
runningPods = append(runningPods, rp) runningPods = append(runningPods, rp)
} }
@ -646,12 +649,12 @@ func (config *NetworkingTestConfig) DeleteNetProxyPod() {
// wait for pod being deleted. // wait for pod being deleted.
err := e2epod.WaitForPodToDisappear(config.f.ClientSet, config.Namespace, pod.Name, labels.Everything(), time.Second, wait.ForeverTestTimeout) err := e2epod.WaitForPodToDisappear(config.f.ClientSet, config.Namespace, pod.Name, labels.Everything(), time.Second, wait.ForeverTestTimeout)
if err != nil { if err != nil {
Failf("Failed to delete %s pod: %v", pod.Name, err) framework.Failf("Failed to delete %s pod: %v", pod.Name, err)
} }
// wait for endpoint being removed. // wait for endpoint being removed.
err = WaitForServiceEndpointsNum(config.f.ClientSet, config.Namespace, nodePortServiceName, len(config.EndpointPods), time.Second, wait.ForeverTestTimeout) err = framework.WaitForServiceEndpointsNum(config.f.ClientSet, config.Namespace, nodePortServiceName, len(config.EndpointPods), time.Second, wait.ForeverTestTimeout)
if err != nil { if err != nil {
Failf("Failed to remove endpoint from service: %s", nodePortServiceName) framework.Failf("Failed to remove endpoint from service: %s", nodePortServiceName)
} }
// wait for kube-proxy to catch up with the pod being deleted. // wait for kube-proxy to catch up with the pod being deleted.
time.Sleep(5 * time.Second) time.Sleep(5 * time.Second)
@ -661,7 +664,7 @@ func (config *NetworkingTestConfig) createPod(pod *v1.Pod) *v1.Pod {
return config.getPodClient().Create(pod) return config.getPodClient().Create(pod)
} }
func (config *NetworkingTestConfig) getPodClient() *PodClient { func (config *NetworkingTestConfig) getPodClient() *framework.PodClient {
if config.podClient == nil { if config.podClient == nil {
config.podClient = config.f.PodClient() config.podClient = config.f.PodClient()
} }
@ -732,11 +735,11 @@ func PokeHTTP(host string, port int, path string, params *HTTPPokeParams) HTTPPo
// Sanity check inputs, because it has happened. These are the only things // Sanity check inputs, because it has happened. These are the only things
// that should hard fail the test - they are basically ASSERT()s. // that should hard fail the test - they are basically ASSERT()s.
if host == "" { if host == "" {
Failf("Got empty host for HTTP poke (%s)", url) framework.Failf("Got empty host for HTTP poke (%s)", url)
return ret return ret
} }
if port == 0 { if port == 0 {
Failf("Got port==0 for HTTP poke (%s)", url) framework.Failf("Got port==0 for HTTP poke (%s)", url)
return ret return ret
} }
@ -748,7 +751,7 @@ func PokeHTTP(host string, port int, path string, params *HTTPPokeParams) HTTPPo
params.ExpectCode = http.StatusOK params.ExpectCode = http.StatusOK
} }
Logf("Poking %q", url) framework.Logf("Poking %q", url)
resp, err := httpGetNoConnectionPoolTimeout(url, params.Timeout) resp, err := httpGetNoConnectionPoolTimeout(url, params.Timeout)
if err != nil { if err != nil {
@ -761,7 +764,7 @@ func PokeHTTP(host string, port int, path string, params *HTTPPokeParams) HTTPPo
} else { } else {
ret.Status = HTTPError ret.Status = HTTPError
} }
Logf("Poke(%q): %v", url, err) framework.Logf("Poke(%q): %v", url, err)
return ret return ret
} }
@ -772,7 +775,7 @@ func PokeHTTP(host string, port int, path string, params *HTTPPokeParams) HTTPPo
if err != nil { if err != nil {
ret.Status = HTTPError ret.Status = HTTPError
ret.Error = fmt.Errorf("error reading HTTP body: %v", err) ret.Error = fmt.Errorf("error reading HTTP body: %v", err)
Logf("Poke(%q): %v", url, ret.Error) framework.Logf("Poke(%q): %v", url, ret.Error)
return ret return ret
} }
ret.Body = make([]byte, len(body)) ret.Body = make([]byte, len(body))
@ -783,25 +786,25 @@ func PokeHTTP(host string, port int, path string, params *HTTPPokeParams) HTTPPo
if resp.StatusCode == code { if resp.StatusCode == code {
ret.Error = fmt.Errorf("retriable status code: %d", resp.StatusCode) ret.Error = fmt.Errorf("retriable status code: %d", resp.StatusCode)
ret.Status = HTTPRetryCode ret.Status = HTTPRetryCode
Logf("Poke(%q): %v", url, ret.Error) framework.Logf("Poke(%q): %v", url, ret.Error)
return ret return ret
} }
} }
ret.Status = HTTPWrongCode ret.Status = HTTPWrongCode
ret.Error = fmt.Errorf("bad status code: %d", resp.StatusCode) ret.Error = fmt.Errorf("bad status code: %d", resp.StatusCode)
Logf("Poke(%q): %v", url, ret.Error) framework.Logf("Poke(%q): %v", url, ret.Error)
return ret return ret
} }
if params.BodyContains != "" && !strings.Contains(string(body), params.BodyContains) { if params.BodyContains != "" && !strings.Contains(string(body), params.BodyContains) {
ret.Status = HTTPBadResponse ret.Status = HTTPBadResponse
ret.Error = fmt.Errorf("response does not contain expected substring: %q", string(body)) ret.Error = fmt.Errorf("response does not contain expected substring: %q", string(body))
Logf("Poke(%q): %v", url, ret.Error) framework.Logf("Poke(%q): %v", url, ret.Error)
return ret return ret
} }
ret.Status = HTTPSuccess ret.Status = HTTPSuccess
Logf("Poke(%q): success", url) framework.Logf("Poke(%q): success", url)
return ret return ret
} }
@ -826,9 +829,9 @@ func httpGetNoConnectionPoolTimeout(url string, timeout time.Duration) (*http.Re
func TestUnderTemporaryNetworkFailure(c clientset.Interface, ns string, node *v1.Node, testFunc func()) { func TestUnderTemporaryNetworkFailure(c clientset.Interface, ns string, node *v1.Node, testFunc func()) {
host, err := e2enode.GetExternalIP(node) host, err := e2enode.GetExternalIP(node)
if err != nil { if err != nil {
Failf("Error getting node external ip : %v", err) framework.Failf("Error getting node external ip : %v", err)
} }
masterAddresses := GetAllMasterAddresses(c) masterAddresses := framework.GetAllMasterAddresses(c)
ginkgo.By(fmt.Sprintf("block network traffic from node %s to the master", node.Name)) ginkgo.By(fmt.Sprintf("block network traffic from node %s to the master", node.Name))
defer func() { defer func() {
// This code will execute even if setting the iptables rule failed. // This code will execute even if setting the iptables rule failed.
@ -837,21 +840,21 @@ func TestUnderTemporaryNetworkFailure(c clientset.Interface, ns string, node *v1
// separately, but I prefer to stay on the safe side). // separately, but I prefer to stay on the safe side).
ginkgo.By(fmt.Sprintf("Unblock network traffic from node %s to the master", node.Name)) ginkgo.By(fmt.Sprintf("Unblock network traffic from node %s to the master", node.Name))
for _, masterAddress := range masterAddresses { for _, masterAddress := range masterAddresses {
UnblockNetwork(host, masterAddress) framework.UnblockNetwork(host, masterAddress)
} }
}() }()
Logf("Waiting %v to ensure node %s is ready before beginning test...", resizeNodeReadyTimeout, node.Name) framework.Logf("Waiting %v to ensure node %s is ready before beginning test...", resizeNodeReadyTimeout, node.Name)
if !e2enode.WaitConditionToBe(c, node.Name, v1.NodeReady, true, resizeNodeReadyTimeout) { if !e2enode.WaitConditionToBe(c, node.Name, v1.NodeReady, true, resizeNodeReadyTimeout) {
Failf("Node %s did not become ready within %v", node.Name, resizeNodeReadyTimeout) framework.Failf("Node %s did not become ready within %v", node.Name, resizeNodeReadyTimeout)
} }
for _, masterAddress := range masterAddresses { for _, masterAddress := range masterAddresses {
BlockNetwork(host, masterAddress) framework.BlockNetwork(host, masterAddress)
} }
Logf("Waiting %v for node %s to be not ready after simulated network failure", resizeNodeNotReadyTimeout, node.Name) framework.Logf("Waiting %v for node %s to be not ready after simulated network failure", resizeNodeNotReadyTimeout, node.Name)
if !e2enode.WaitConditionToBe(c, node.Name, v1.NodeReady, false, resizeNodeNotReadyTimeout) { if !e2enode.WaitConditionToBe(c, node.Name, v1.NodeReady, false, resizeNodeNotReadyTimeout) {
Failf("Node %s did not become not-ready within %v", node.Name, resizeNodeNotReadyTimeout) framework.Failf("Node %s did not become not-ready within %v", node.Name, resizeNodeNotReadyTimeout)
} }
testFunc() testFunc()

View File

@ -35,6 +35,7 @@ go_library(
"//staging/src/k8s.io/client-go/util/retry:go_default_library", "//staging/src/k8s.io/client-go/util/retry:go_default_library",
"//staging/src/k8s.io/cloud-provider/service/helpers:go_default_library", "//staging/src/k8s.io/cloud-provider/service/helpers:go_default_library",
"//test/e2e/framework:go_default_library", "//test/e2e/framework:go_default_library",
"//test/e2e/framework/network:go_default_library",
"//test/e2e/framework/node:go_default_library", "//test/e2e/framework/node:go_default_library",
"//test/e2e/framework/pod:go_default_library", "//test/e2e/framework/pod:go_default_library",
"//test/e2e/framework/ssh:go_default_library", "//test/e2e/framework/ssh:go_default_library",

View File

@ -42,6 +42,7 @@ import (
"k8s.io/client-go/tools/cache" "k8s.io/client-go/tools/cache"
"k8s.io/kubernetes/pkg/registry/core/service/portallocator" "k8s.io/kubernetes/pkg/registry/core/service/portallocator"
"k8s.io/kubernetes/test/e2e/framework" "k8s.io/kubernetes/test/e2e/framework"
e2enetwork "k8s.io/kubernetes/test/e2e/framework/network"
e2enode "k8s.io/kubernetes/test/e2e/framework/node" e2enode "k8s.io/kubernetes/test/e2e/framework/node"
e2epod "k8s.io/kubernetes/test/e2e/framework/pod" e2epod "k8s.io/kubernetes/test/e2e/framework/pod"
testutils "k8s.io/kubernetes/test/utils" testutils "k8s.io/kubernetes/test/utils"
@ -750,7 +751,7 @@ func testReachabilityOverServiceName(serviceName string, sp v1.ServicePort, exec
func testReachabilityOverClusterIP(clusterIP string, sp v1.ServicePort, execPod *v1.Pod) error { func testReachabilityOverClusterIP(clusterIP string, sp v1.ServicePort, execPod *v1.Pod) error {
// If .spec.clusterIP is set to "" or "None" for service, ClusterIP is not created, so reachability can not be tested over clusterIP:servicePort // If .spec.clusterIP is set to "" or "None" for service, ClusterIP is not created, so reachability can not be tested over clusterIP:servicePort
isClusterIPV46, err := regexp.MatchString(framework.RegexIPv4+"||"+framework.RegexIPv6, clusterIP) isClusterIPV46, err := regexp.MatchString(e2enetwork.RegexIPv4+"||"+e2enetwork.RegexIPv6, clusterIP)
if err != nil { if err != nil {
return fmt.Errorf("unable to parse ClusterIP: %s", clusterIP) return fmt.Errorf("unable to parse ClusterIP: %s", clusterIP)
} }

View File

@ -28,6 +28,7 @@ import (
utilnet "k8s.io/apimachinery/pkg/util/net" utilnet "k8s.io/apimachinery/pkg/util/net"
"k8s.io/apimachinery/pkg/util/wait" "k8s.io/apimachinery/pkg/util/wait"
"k8s.io/kubernetes/test/e2e/framework" "k8s.io/kubernetes/test/e2e/framework"
e2enetwork "k8s.io/kubernetes/test/e2e/framework/network"
) )
// TestReachableHTTP tests that the given host serves HTTP on the given port. // TestReachableHTTP tests that the given host serves HTTP on the given port.
@ -38,12 +39,12 @@ func TestReachableHTTP(host string, port int, timeout time.Duration) {
// TestReachableHTTPWithRetriableErrorCodes tests that the given host serves HTTP on the given port with the given retriableErrCodes. // TestReachableHTTPWithRetriableErrorCodes tests that the given host serves HTTP on the given port with the given retriableErrCodes.
func TestReachableHTTPWithRetriableErrorCodes(host string, port int, retriableErrCodes []int, timeout time.Duration) { func TestReachableHTTPWithRetriableErrorCodes(host string, port int, retriableErrCodes []int, timeout time.Duration) {
pollfn := func() (bool, error) { pollfn := func() (bool, error) {
result := framework.PokeHTTP(host, port, "/echo?msg=hello", result := e2enetwork.PokeHTTP(host, port, "/echo?msg=hello",
&framework.HTTPPokeParams{ &e2enetwork.HTTPPokeParams{
BodyContains: "hello", BodyContains: "hello",
RetriableCodes: retriableErrCodes, RetriableCodes: retriableErrCodes,
}) })
if result.Status == framework.HTTPSuccess { if result.Status == e2enetwork.HTTPSuccess {
return true, nil return true, nil
} }
return false, nil // caller can retry return false, nil // caller can retry
@ -61,7 +62,7 @@ func TestReachableHTTPWithRetriableErrorCodes(host string, port int, retriableEr
// TestNotReachableHTTP tests that a HTTP request doesn't connect to the given host and port. // TestNotReachableHTTP tests that a HTTP request doesn't connect to the given host and port.
func TestNotReachableHTTP(host string, port int, timeout time.Duration) { func TestNotReachableHTTP(host string, port int, timeout time.Duration) {
pollfn := func() (bool, error) { pollfn := func() (bool, error) {
result := framework.PokeHTTP(host, port, "/", nil) result := e2enetwork.PokeHTTP(host, port, "/", nil)
if result.Code == 0 { if result.Code == 0 {
return true, nil return true, nil
} }
@ -76,8 +77,8 @@ func TestNotReachableHTTP(host string, port int, timeout time.Duration) {
// TestRejectedHTTP tests that the given host rejects a HTTP request on the given port. // TestRejectedHTTP tests that the given host rejects a HTTP request on the given port.
func TestRejectedHTTP(host string, port int, timeout time.Duration) { func TestRejectedHTTP(host string, port int, timeout time.Duration) {
pollfn := func() (bool, error) { pollfn := func() (bool, error) {
result := framework.PokeHTTP(host, port, "/", nil) result := e2enetwork.PokeHTTP(host, port, "/", nil)
if result.Status == framework.HTTPRefused { if result.Status == e2enetwork.HTTPRefused {
return true, nil return true, nil
} }
return false, nil // caller can retry return false, nil // caller can retry
@ -201,8 +202,8 @@ func httpGetNoConnectionPoolTimeout(url string, timeout time.Duration) (*http.Re
func GetHTTPContent(host string, port int, timeout time.Duration, url string) bytes.Buffer { func GetHTTPContent(host string, port int, timeout time.Duration, url string) bytes.Buffer {
var body bytes.Buffer var body bytes.Buffer
if pollErr := wait.PollImmediate(framework.Poll, timeout, func() (bool, error) { if pollErr := wait.PollImmediate(framework.Poll, timeout, func() (bool, error) {
result := framework.PokeHTTP(host, port, url, nil) result := e2enetwork.PokeHTTP(host, port, url, nil)
if result.Status == framework.HTTPSuccess { if result.Status == e2enetwork.HTTPSuccess {
body.Write(result.Body) body.Write(result.Body)
return true, nil return true, nil
} }

View File

@ -21,11 +21,6 @@ import (
"time" "time"
) )
const (
resizeNodeReadyTimeout = 2 * time.Minute
resizeNodeNotReadyTimeout = 2 * time.Minute
)
// ResizeGroup resizes an instance group // ResizeGroup resizes an instance group
func ResizeGroup(group string, size int32) error { func ResizeGroup(group string, size int32) error {
if TestContext.ReportDir != "" { if TestContext.ReportDir != "" {

View File

@ -65,6 +65,7 @@ go_library(
"//test/e2e/framework/deployment:go_default_library", "//test/e2e/framework/deployment:go_default_library",
"//test/e2e/framework/endpoints:go_default_library", "//test/e2e/framework/endpoints:go_default_library",
"//test/e2e/framework/ingress:go_default_library", "//test/e2e/framework/ingress:go_default_library",
"//test/e2e/framework/network:go_default_library",
"//test/e2e/framework/node:go_default_library", "//test/e2e/framework/node:go_default_library",
"//test/e2e/framework/pod:go_default_library", "//test/e2e/framework/pod:go_default_library",
"//test/e2e/framework/providers/gce:go_default_library", "//test/e2e/framework/providers/gce:go_default_library",

View File

@ -28,6 +28,7 @@ import (
cloudprovider "k8s.io/cloud-provider" cloudprovider "k8s.io/cloud-provider"
"k8s.io/kubernetes/pkg/master/ports" "k8s.io/kubernetes/pkg/master/ports"
"k8s.io/kubernetes/test/e2e/framework" "k8s.io/kubernetes/test/e2e/framework"
e2enetwork "k8s.io/kubernetes/test/e2e/framework/network"
e2enode "k8s.io/kubernetes/test/e2e/framework/node" e2enode "k8s.io/kubernetes/test/e2e/framework/node"
"k8s.io/kubernetes/test/e2e/framework/providers/gce" "k8s.io/kubernetes/test/e2e/framework/providers/gce"
e2eservice "k8s.io/kubernetes/test/e2e/framework/service" e2eservice "k8s.io/kubernetes/test/e2e/framework/service"
@ -222,8 +223,8 @@ var _ = SIGDescribe("Firewall rule", func() {
}) })
func assertNotReachableHTTPTimeout(ip string, port int, timeout time.Duration) { func assertNotReachableHTTPTimeout(ip string, port int, timeout time.Duration) {
result := framework.PokeHTTP(ip, port, "/", &framework.HTTPPokeParams{Timeout: timeout}) result := e2enetwork.PokeHTTP(ip, port, "/", &e2enetwork.HTTPPokeParams{Timeout: timeout})
if result.Status == framework.HTTPError { if result.Status == e2enetwork.HTTPError {
framework.Failf("Unexpected error checking for reachability of %s:%d: %v", ip, port, result.Error) framework.Failf("Unexpected error checking for reachability of %s:%d: %v", ip, port, result.Error)
} }
if result.Code != 0 { if result.Code != 0 {
@ -243,8 +244,8 @@ func testHitNodesFromOutsideWithCount(externalIP string, httpPort int32, timeout
hittedHosts := sets.NewString() hittedHosts := sets.NewString()
count := 0 count := 0
condition := func() (bool, error) { condition := func() (bool, error) {
result := framework.PokeHTTP(externalIP, int(httpPort), "/hostname", &framework.HTTPPokeParams{Timeout: 1 * time.Second}) result := e2enetwork.PokeHTTP(externalIP, int(httpPort), "/hostname", &e2enetwork.HTTPPokeParams{Timeout: 1 * time.Second})
if result.Status != framework.HTTPSuccess { if result.Status != e2enetwork.HTTPSuccess {
return false, nil return false, nil
} }

View File

@ -25,6 +25,7 @@ import (
utilwait "k8s.io/apimachinery/pkg/util/wait" utilwait "k8s.io/apimachinery/pkg/util/wait"
"k8s.io/kubernetes/pkg/master/ports" "k8s.io/kubernetes/pkg/master/ports"
"k8s.io/kubernetes/test/e2e/framework" "k8s.io/kubernetes/test/e2e/framework"
e2enetwork "k8s.io/kubernetes/test/e2e/framework/network"
e2eservice "k8s.io/kubernetes/test/e2e/framework/service" e2eservice "k8s.io/kubernetes/test/e2e/framework/service"
e2essh "k8s.io/kubernetes/test/e2e/framework/ssh" e2essh "k8s.io/kubernetes/test/e2e/framework/ssh"
@ -93,7 +94,7 @@ var _ = SIGDescribe("Networking", func() {
ginkgo.It("should check kube-proxy urls", func() { ginkgo.It("should check kube-proxy urls", func() {
// TODO: this is overkill we just need the host networking pod // TODO: this is overkill we just need the host networking pod
// to hit kube-proxy urls. // to hit kube-proxy urls.
config := framework.NewNetworkingTestConfig(f) config := e2enetwork.NewNetworkingTestConfig(f)
ginkgo.By("checking kube-proxy URLs") ginkgo.By("checking kube-proxy URLs")
config.GetSelfURL(ports.ProxyHealthzPort, "/healthz", "200 OK") config.GetSelfURL(ports.ProxyHealthzPort, "/healthz", "200 OK")
@ -106,84 +107,84 @@ var _ = SIGDescribe("Networking", func() {
ginkgo.Describe("Granular Checks: Services", func() { ginkgo.Describe("Granular Checks: Services", func() {
ginkgo.It("should function for pod-Service: http", func() { ginkgo.It("should function for pod-Service: http", func() {
config := framework.NewNetworkingTestConfig(f) config := e2enetwork.NewNetworkingTestConfig(f)
ginkgo.By(fmt.Sprintf("dialing(http) %v --> %v:%v (config.clusterIP)", config.TestContainerPod.Name, config.ClusterIP, framework.ClusterHTTPPort)) ginkgo.By(fmt.Sprintf("dialing(http) %v --> %v:%v (config.clusterIP)", config.TestContainerPod.Name, config.ClusterIP, e2enetwork.ClusterHTTPPort))
config.DialFromTestContainer("http", config.ClusterIP, framework.ClusterHTTPPort, config.MaxTries, 0, config.EndpointHostnames()) config.DialFromTestContainer("http", config.ClusterIP, e2enetwork.ClusterHTTPPort, config.MaxTries, 0, config.EndpointHostnames())
ginkgo.By(fmt.Sprintf("dialing(http) %v --> %v:%v (nodeIP)", config.TestContainerPod.Name, config.NodeIP, config.NodeHTTPPort)) ginkgo.By(fmt.Sprintf("dialing(http) %v --> %v:%v (nodeIP)", config.TestContainerPod.Name, config.NodeIP, config.NodeHTTPPort))
config.DialFromTestContainer("http", config.NodeIP, config.NodeHTTPPort, config.MaxTries, 0, config.EndpointHostnames()) config.DialFromTestContainer("http", config.NodeIP, config.NodeHTTPPort, config.MaxTries, 0, config.EndpointHostnames())
}) })
ginkgo.It("should function for pod-Service: udp", func() { ginkgo.It("should function for pod-Service: udp", func() {
config := framework.NewNetworkingTestConfig(f) config := e2enetwork.NewNetworkingTestConfig(f)
ginkgo.By(fmt.Sprintf("dialing(udp) %v --> %v:%v (config.clusterIP)", config.TestContainerPod.Name, config.ClusterIP, framework.ClusterUDPPort)) ginkgo.By(fmt.Sprintf("dialing(udp) %v --> %v:%v (config.clusterIP)", config.TestContainerPod.Name, config.ClusterIP, e2enetwork.ClusterUDPPort))
config.DialFromTestContainer("udp", config.ClusterIP, framework.ClusterUDPPort, config.MaxTries, 0, config.EndpointHostnames()) config.DialFromTestContainer("udp", config.ClusterIP, e2enetwork.ClusterUDPPort, config.MaxTries, 0, config.EndpointHostnames())
ginkgo.By(fmt.Sprintf("dialing(udp) %v --> %v:%v (nodeIP)", config.TestContainerPod.Name, config.NodeIP, config.NodeUDPPort)) ginkgo.By(fmt.Sprintf("dialing(udp) %v --> %v:%v (nodeIP)", config.TestContainerPod.Name, config.NodeIP, config.NodeUDPPort))
config.DialFromTestContainer("udp", config.NodeIP, config.NodeUDPPort, config.MaxTries, 0, config.EndpointHostnames()) config.DialFromTestContainer("udp", config.NodeIP, config.NodeUDPPort, config.MaxTries, 0, config.EndpointHostnames())
}) })
ginkgo.It("should function for node-Service: http", func() { ginkgo.It("should function for node-Service: http", func() {
config := framework.NewNetworkingTestConfig(f) config := e2enetwork.NewNetworkingTestConfig(f)
ginkgo.By(fmt.Sprintf("dialing(http) %v (node) --> %v:%v (config.clusterIP)", config.NodeIP, config.ClusterIP, framework.ClusterHTTPPort)) ginkgo.By(fmt.Sprintf("dialing(http) %v (node) --> %v:%v (config.clusterIP)", config.NodeIP, config.ClusterIP, e2enetwork.ClusterHTTPPort))
config.DialFromNode("http", config.ClusterIP, framework.ClusterHTTPPort, config.MaxTries, 0, config.EndpointHostnames()) config.DialFromNode("http", config.ClusterIP, e2enetwork.ClusterHTTPPort, config.MaxTries, 0, config.EndpointHostnames())
ginkgo.By(fmt.Sprintf("dialing(http) %v (node) --> %v:%v (nodeIP)", config.NodeIP, config.NodeIP, config.NodeHTTPPort)) ginkgo.By(fmt.Sprintf("dialing(http) %v (node) --> %v:%v (nodeIP)", config.NodeIP, config.NodeIP, config.NodeHTTPPort))
config.DialFromNode("http", config.NodeIP, config.NodeHTTPPort, config.MaxTries, 0, config.EndpointHostnames()) config.DialFromNode("http", config.NodeIP, config.NodeHTTPPort, config.MaxTries, 0, config.EndpointHostnames())
}) })
ginkgo.It("should function for node-Service: udp", func() { ginkgo.It("should function for node-Service: udp", func() {
config := framework.NewNetworkingTestConfig(f) config := e2enetwork.NewNetworkingTestConfig(f)
ginkgo.By(fmt.Sprintf("dialing(udp) %v (node) --> %v:%v (config.clusterIP)", config.NodeIP, config.ClusterIP, framework.ClusterUDPPort)) ginkgo.By(fmt.Sprintf("dialing(udp) %v (node) --> %v:%v (config.clusterIP)", config.NodeIP, config.ClusterIP, e2enetwork.ClusterUDPPort))
config.DialFromNode("udp", config.ClusterIP, framework.ClusterUDPPort, config.MaxTries, 0, config.EndpointHostnames()) config.DialFromNode("udp", config.ClusterIP, e2enetwork.ClusterUDPPort, config.MaxTries, 0, config.EndpointHostnames())
ginkgo.By(fmt.Sprintf("dialing(udp) %v (node) --> %v:%v (nodeIP)", config.NodeIP, config.NodeIP, config.NodeUDPPort)) ginkgo.By(fmt.Sprintf("dialing(udp) %v (node) --> %v:%v (nodeIP)", config.NodeIP, config.NodeIP, config.NodeUDPPort))
config.DialFromNode("udp", config.NodeIP, config.NodeUDPPort, config.MaxTries, 0, config.EndpointHostnames()) config.DialFromNode("udp", config.NodeIP, config.NodeUDPPort, config.MaxTries, 0, config.EndpointHostnames())
}) })
ginkgo.It("should function for endpoint-Service: http", func() { ginkgo.It("should function for endpoint-Service: http", func() {
config := framework.NewNetworkingTestConfig(f) config := e2enetwork.NewNetworkingTestConfig(f)
ginkgo.By(fmt.Sprintf("dialing(http) %v (endpoint) --> %v:%v (config.clusterIP)", config.EndpointPods[0].Name, config.ClusterIP, framework.ClusterHTTPPort)) ginkgo.By(fmt.Sprintf("dialing(http) %v (endpoint) --> %v:%v (config.clusterIP)", config.EndpointPods[0].Name, config.ClusterIP, e2enetwork.ClusterHTTPPort))
config.DialFromEndpointContainer("http", config.ClusterIP, framework.ClusterHTTPPort, config.MaxTries, 0, config.EndpointHostnames()) config.DialFromEndpointContainer("http", config.ClusterIP, e2enetwork.ClusterHTTPPort, config.MaxTries, 0, config.EndpointHostnames())
ginkgo.By(fmt.Sprintf("dialing(http) %v (endpoint) --> %v:%v (nodeIP)", config.EndpointPods[0].Name, config.NodeIP, config.NodeHTTPPort)) ginkgo.By(fmt.Sprintf("dialing(http) %v (endpoint) --> %v:%v (nodeIP)", config.EndpointPods[0].Name, config.NodeIP, config.NodeHTTPPort))
config.DialFromEndpointContainer("http", config.NodeIP, config.NodeHTTPPort, config.MaxTries, 0, config.EndpointHostnames()) config.DialFromEndpointContainer("http", config.NodeIP, config.NodeHTTPPort, config.MaxTries, 0, config.EndpointHostnames())
}) })
ginkgo.It("should function for endpoint-Service: udp", func() { ginkgo.It("should function for endpoint-Service: udp", func() {
config := framework.NewNetworkingTestConfig(f) config := e2enetwork.NewNetworkingTestConfig(f)
ginkgo.By(fmt.Sprintf("dialing(udp) %v (endpoint) --> %v:%v (config.clusterIP)", config.EndpointPods[0].Name, config.ClusterIP, framework.ClusterUDPPort)) ginkgo.By(fmt.Sprintf("dialing(udp) %v (endpoint) --> %v:%v (config.clusterIP)", config.EndpointPods[0].Name, config.ClusterIP, e2enetwork.ClusterUDPPort))
config.DialFromEndpointContainer("udp", config.ClusterIP, framework.ClusterUDPPort, config.MaxTries, 0, config.EndpointHostnames()) config.DialFromEndpointContainer("udp", config.ClusterIP, e2enetwork.ClusterUDPPort, config.MaxTries, 0, config.EndpointHostnames())
ginkgo.By(fmt.Sprintf("dialing(udp) %v (endpoint) --> %v:%v (nodeIP)", config.EndpointPods[0].Name, config.NodeIP, config.NodeUDPPort)) ginkgo.By(fmt.Sprintf("dialing(udp) %v (endpoint) --> %v:%v (nodeIP)", config.EndpointPods[0].Name, config.NodeIP, config.NodeUDPPort))
config.DialFromEndpointContainer("udp", config.NodeIP, config.NodeUDPPort, config.MaxTries, 0, config.EndpointHostnames()) config.DialFromEndpointContainer("udp", config.NodeIP, config.NodeUDPPort, config.MaxTries, 0, config.EndpointHostnames())
}) })
ginkgo.It("should update endpoints: http", func() { ginkgo.It("should update endpoints: http", func() {
config := framework.NewNetworkingTestConfig(f) config := e2enetwork.NewNetworkingTestConfig(f)
ginkgo.By(fmt.Sprintf("dialing(http) %v --> %v:%v (config.clusterIP)", config.TestContainerPod.Name, config.ClusterIP, framework.ClusterHTTPPort)) ginkgo.By(fmt.Sprintf("dialing(http) %v --> %v:%v (config.clusterIP)", config.TestContainerPod.Name, config.ClusterIP, e2enetwork.ClusterHTTPPort))
config.DialFromTestContainer("http", config.ClusterIP, framework.ClusterHTTPPort, config.MaxTries, 0, config.EndpointHostnames()) config.DialFromTestContainer("http", config.ClusterIP, e2enetwork.ClusterHTTPPort, config.MaxTries, 0, config.EndpointHostnames())
config.DeleteNetProxyPod() config.DeleteNetProxyPod()
ginkgo.By(fmt.Sprintf("dialing(http) %v --> %v:%v (config.clusterIP)", config.TestContainerPod.Name, config.ClusterIP, framework.ClusterHTTPPort)) ginkgo.By(fmt.Sprintf("dialing(http) %v --> %v:%v (config.clusterIP)", config.TestContainerPod.Name, config.ClusterIP, e2enetwork.ClusterHTTPPort))
config.DialFromTestContainer("http", config.ClusterIP, framework.ClusterHTTPPort, config.MaxTries, config.MaxTries, config.EndpointHostnames()) config.DialFromTestContainer("http", config.ClusterIP, e2enetwork.ClusterHTTPPort, config.MaxTries, config.MaxTries, config.EndpointHostnames())
}) })
ginkgo.It("should update endpoints: udp", func() { ginkgo.It("should update endpoints: udp", func() {
config := framework.NewNetworkingTestConfig(f) config := e2enetwork.NewNetworkingTestConfig(f)
ginkgo.By(fmt.Sprintf("dialing(udp) %v --> %v:%v (config.clusterIP)", config.TestContainerPod.Name, config.ClusterIP, framework.ClusterUDPPort)) ginkgo.By(fmt.Sprintf("dialing(udp) %v --> %v:%v (config.clusterIP)", config.TestContainerPod.Name, config.ClusterIP, e2enetwork.ClusterUDPPort))
config.DialFromTestContainer("udp", config.ClusterIP, framework.ClusterUDPPort, config.MaxTries, 0, config.EndpointHostnames()) config.DialFromTestContainer("udp", config.ClusterIP, e2enetwork.ClusterUDPPort, config.MaxTries, 0, config.EndpointHostnames())
config.DeleteNetProxyPod() config.DeleteNetProxyPod()
ginkgo.By(fmt.Sprintf("dialing(udp) %v --> %v:%v (config.clusterIP)", config.TestContainerPod.Name, config.ClusterIP, framework.ClusterUDPPort)) ginkgo.By(fmt.Sprintf("dialing(udp) %v --> %v:%v (config.clusterIP)", config.TestContainerPod.Name, config.ClusterIP, e2enetwork.ClusterUDPPort))
config.DialFromTestContainer("udp", config.ClusterIP, framework.ClusterUDPPort, config.MaxTries, config.MaxTries, config.EndpointHostnames()) config.DialFromTestContainer("udp", config.ClusterIP, e2enetwork.ClusterUDPPort, config.MaxTries, config.MaxTries, config.EndpointHostnames())
}) })
// Slow because we confirm that the nodePort doesn't serve traffic, which requires a period of polling. // Slow because we confirm that the nodePort doesn't serve traffic, which requires a period of polling.
ginkgo.It("should update nodePort: http [Slow]", func() { ginkgo.It("should update nodePort: http [Slow]", func() {
config := framework.NewNetworkingTestConfig(f) config := e2enetwork.NewNetworkingTestConfig(f)
ginkgo.By(fmt.Sprintf("dialing(http) %v (node) --> %v:%v (nodeIP)", config.NodeIP, config.NodeIP, config.NodeHTTPPort)) ginkgo.By(fmt.Sprintf("dialing(http) %v (node) --> %v:%v (nodeIP)", config.NodeIP, config.NodeIP, config.NodeHTTPPort))
config.DialFromNode("http", config.NodeIP, config.NodeHTTPPort, config.MaxTries, 0, config.EndpointHostnames()) config.DialFromNode("http", config.NodeIP, config.NodeHTTPPort, config.MaxTries, 0, config.EndpointHostnames())
@ -195,7 +196,7 @@ var _ = SIGDescribe("Networking", func() {
// Slow because we confirm that the nodePort doesn't serve traffic, which requires a period of polling. // Slow because we confirm that the nodePort doesn't serve traffic, which requires a period of polling.
ginkgo.It("should update nodePort: udp [Slow]", func() { ginkgo.It("should update nodePort: udp [Slow]", func() {
config := framework.NewNetworkingTestConfig(f) config := e2enetwork.NewNetworkingTestConfig(f)
ginkgo.By(fmt.Sprintf("dialing(udp) %v (node) --> %v:%v (nodeIP)", config.NodeIP, config.NodeIP, config.NodeUDPPort)) ginkgo.By(fmt.Sprintf("dialing(udp) %v (node) --> %v:%v (nodeIP)", config.NodeIP, config.NodeIP, config.NodeUDPPort))
config.DialFromNode("udp", config.NodeIP, config.NodeUDPPort, config.MaxTries, 0, config.EndpointHostnames()) config.DialFromNode("udp", config.NodeIP, config.NodeUDPPort, config.MaxTries, 0, config.EndpointHostnames())
@ -207,11 +208,11 @@ var _ = SIGDescribe("Networking", func() {
// [LinuxOnly]: Windows does not support session affinity. // [LinuxOnly]: Windows does not support session affinity.
ginkgo.It("should function for client IP based session affinity: http [LinuxOnly]", func() { ginkgo.It("should function for client IP based session affinity: http [LinuxOnly]", func() {
config := framework.NewNetworkingTestConfig(f) config := e2enetwork.NewNetworkingTestConfig(f)
ginkgo.By(fmt.Sprintf("dialing(http) %v --> %v:%v", config.TestContainerPod.Name, config.SessionAffinityService.Spec.ClusterIP, framework.ClusterHTTPPort)) ginkgo.By(fmt.Sprintf("dialing(http) %v --> %v:%v", config.TestContainerPod.Name, config.SessionAffinityService.Spec.ClusterIP, e2enetwork.ClusterHTTPPort))
// Check if number of endpoints returned are exactly one. // Check if number of endpoints returned are exactly one.
eps, err := config.GetEndpointsFromTestContainer("http", config.SessionAffinityService.Spec.ClusterIP, framework.ClusterHTTPPort, framework.SessionAffinityChecks) eps, err := config.GetEndpointsFromTestContainer("http", config.SessionAffinityService.Spec.ClusterIP, e2enetwork.ClusterHTTPPort, e2enetwork.SessionAffinityChecks)
if err != nil { if err != nil {
framework.Failf("ginkgo.Failed to get endpoints from test container, error: %v", err) framework.Failf("ginkgo.Failed to get endpoints from test container, error: %v", err)
} }
@ -225,11 +226,11 @@ var _ = SIGDescribe("Networking", func() {
// [LinuxOnly]: Windows does not support session affinity. // [LinuxOnly]: Windows does not support session affinity.
ginkgo.It("should function for client IP based session affinity: udp [LinuxOnly]", func() { ginkgo.It("should function for client IP based session affinity: udp [LinuxOnly]", func() {
config := framework.NewNetworkingTestConfig(f) config := e2enetwork.NewNetworkingTestConfig(f)
ginkgo.By(fmt.Sprintf("dialing(udp) %v --> %v:%v", config.TestContainerPod.Name, config.SessionAffinityService.Spec.ClusterIP, framework.ClusterUDPPort)) ginkgo.By(fmt.Sprintf("dialing(udp) %v --> %v:%v", config.TestContainerPod.Name, config.SessionAffinityService.Spec.ClusterIP, e2enetwork.ClusterUDPPort))
// Check if number of endpoints returned are exactly one. // Check if number of endpoints returned are exactly one.
eps, err := config.GetEndpointsFromTestContainer("udp", config.SessionAffinityService.Spec.ClusterIP, framework.ClusterUDPPort, framework.SessionAffinityChecks) eps, err := config.GetEndpointsFromTestContainer("udp", config.SessionAffinityService.Spec.ClusterIP, e2enetwork.ClusterUDPPort, e2enetwork.SessionAffinityChecks)
if err != nil { if err != nil {
framework.Failf("ginkgo.Failed to get endpoints from test container, error: %v", err) framework.Failf("ginkgo.Failed to get endpoints from test container, error: %v", err)
} }

View File

@ -40,6 +40,7 @@ import (
"k8s.io/kubernetes/test/e2e/framework" "k8s.io/kubernetes/test/e2e/framework"
e2edeploy "k8s.io/kubernetes/test/e2e/framework/deployment" e2edeploy "k8s.io/kubernetes/test/e2e/framework/deployment"
e2eendpoints "k8s.io/kubernetes/test/e2e/framework/endpoints" e2eendpoints "k8s.io/kubernetes/test/e2e/framework/endpoints"
e2enetwork "k8s.io/kubernetes/test/e2e/framework/network"
e2enode "k8s.io/kubernetes/test/e2e/framework/node" e2enode "k8s.io/kubernetes/test/e2e/framework/node"
e2epod "k8s.io/kubernetes/test/e2e/framework/pod" e2epod "k8s.io/kubernetes/test/e2e/framework/pod"
"k8s.io/kubernetes/test/e2e/framework/providers/gce" "k8s.io/kubernetes/test/e2e/framework/providers/gce"
@ -2326,7 +2327,7 @@ var _ = SIGDescribe("ESIPP [Slow] [DisabledForLargeClusters]", func() {
ginkgo.By(fmt.Sprintf("checking kube-proxy health check fails on node with endpoint (%s), public IP %s", nodeName, nodeIPs[0])) ginkgo.By(fmt.Sprintf("checking kube-proxy health check fails on node with endpoint (%s), public IP %s", nodeName, nodeIPs[0]))
var body bytes.Buffer var body bytes.Buffer
pollfn := func() (bool, error) { pollfn := func() (bool, error) {
result := framework.PokeHTTP(nodeIPs[0], healthCheckNodePort, "/healthz", nil) result := e2enetwork.PokeHTTP(nodeIPs[0], healthCheckNodePort, "/healthz", nil)
if result.Code == 0 { if result.Code == 0 {
return true, nil return true, nil
} }

View File

@ -38,6 +38,7 @@ go_library(
"//test/e2e/framework:go_default_library", "//test/e2e/framework:go_default_library",
"//test/e2e/framework/kubelet:go_default_library", "//test/e2e/framework/kubelet:go_default_library",
"//test/e2e/framework/metrics:go_default_library", "//test/e2e/framework/metrics:go_default_library",
"//test/e2e/framework/network:go_default_library",
"//test/e2e/framework/node:go_default_library", "//test/e2e/framework/node:go_default_library",
"//test/e2e/framework/pod:go_default_library", "//test/e2e/framework/pod:go_default_library",
"//test/e2e/framework/service:go_default_library", "//test/e2e/framework/service:go_default_library",

View File

@ -19,6 +19,7 @@ package windows
import ( import (
"k8s.io/apimachinery/pkg/util/sets" "k8s.io/apimachinery/pkg/util/sets"
"k8s.io/kubernetes/test/e2e/framework" "k8s.io/kubernetes/test/e2e/framework"
e2enetwork "k8s.io/kubernetes/test/e2e/framework/network"
"github.com/onsi/ginkgo" "github.com/onsi/ginkgo"
) )
@ -46,9 +47,9 @@ var _ = ginkgo.Describe("[sig-network] [sig-windows] Networking", func() {
The kubectl exec on the webserver container MUST reach a http port on the each of service proxy endpoints in the cluster and the request MUST be successful. Container will execute curl command to reach the service port within specified max retry limit and MUST result in reporting unique hostnames. The kubectl exec on the webserver container MUST reach a http port on the each of service proxy endpoints in the cluster and the request MUST be successful. Container will execute curl command to reach the service port within specified max retry limit and MUST result in reporting unique hostnames.
*/ */
ginkgo.It("should function for intra-pod communication: http", func() { ginkgo.It("should function for intra-pod communication: http", func() {
config := framework.NewCoreNetworkingTestConfig(f, false) config := e2enetwork.NewCoreNetworkingTestConfig(f, false)
for _, endpointPod := range config.EndpointPods { for _, endpointPod := range config.EndpointPods {
config.DialFromTestContainer("http", endpointPod.Status.PodIP, framework.EndpointHTTPPort, config.MaxTries, 0, sets.NewString(endpointPod.Name)) config.DialFromTestContainer("http", endpointPod.Status.PodIP, e2enetwork.EndpointHTTPPort, config.MaxTries, 0, sets.NewString(endpointPod.Name))
} }
}) })
@ -59,9 +60,9 @@ var _ = ginkgo.Describe("[sig-network] [sig-windows] Networking", func() {
The kubectl exec on the webserver container MUST reach a udp port on the each of service proxy endpoints in the cluster and the request MUST be successful. Container will execute curl command to reach the service port within specified max retry limit and MUST result in reporting unique hostnames. The kubectl exec on the webserver container MUST reach a udp port on the each of service proxy endpoints in the cluster and the request MUST be successful. Container will execute curl command to reach the service port within specified max retry limit and MUST result in reporting unique hostnames.
*/ */
ginkgo.It("should function for intra-pod communication: udp", func() { ginkgo.It("should function for intra-pod communication: udp", func() {
config := framework.NewCoreNetworkingTestConfig(f, false) config := e2enetwork.NewCoreNetworkingTestConfig(f, false)
for _, endpointPod := range config.EndpointPods { for _, endpointPod := range config.EndpointPods {
config.DialFromTestContainer("udp", endpointPod.Status.PodIP, framework.EndpointUDPPort, config.MaxTries, 0, sets.NewString(endpointPod.Name)) config.DialFromTestContainer("udp", endpointPod.Status.PodIP, e2enetwork.EndpointUDPPort, config.MaxTries, 0, sets.NewString(endpointPod.Name))
} }
}) })
@ -72,9 +73,9 @@ var _ = ginkgo.Describe("[sig-network] [sig-windows] Networking", func() {
The kubectl exec on the webserver container MUST reach a http port on the each of service proxy endpoints in the cluster using a http post(protocol=tcp) and the request MUST be successful. Container will execute curl command to reach the service port within specified max retry limit and MUST result in reporting unique hostnames. The kubectl exec on the webserver container MUST reach a http port on the each of service proxy endpoints in the cluster using a http post(protocol=tcp) and the request MUST be successful. Container will execute curl command to reach the service port within specified max retry limit and MUST result in reporting unique hostnames.
*/ */
ginkgo.It("should function for node-pod communication: http", func() { ginkgo.It("should function for node-pod communication: http", func() {
config := framework.NewCoreNetworkingTestConfig(f, false) config := e2enetwork.NewCoreNetworkingTestConfig(f, false)
for _, endpointPod := range config.EndpointPods { for _, endpointPod := range config.EndpointPods {
config.DialFromNode("http", endpointPod.Status.PodIP, framework.EndpointHTTPPort, config.MaxTries, 0, sets.NewString(endpointPod.Name)) config.DialFromNode("http", endpointPod.Status.PodIP, e2enetwork.EndpointHTTPPort, config.MaxTries, 0, sets.NewString(endpointPod.Name))
} }
}) })
@ -85,9 +86,9 @@ var _ = ginkgo.Describe("[sig-network] [sig-windows] Networking", func() {
The kubectl exec on the webserver container MUST reach a http port on the each of service proxy endpoints in the cluster using a http post(protocol=udp) and the request MUST be successful. Container will execute curl command to reach the service port within specified max retry limit and MUST result in reporting unique hostnames. The kubectl exec on the webserver container MUST reach a http port on the each of service proxy endpoints in the cluster using a http post(protocol=udp) and the request MUST be successful. Container will execute curl command to reach the service port within specified max retry limit and MUST result in reporting unique hostnames.
*/ */
ginkgo.It("should function for node-pod communication: udp", func() { ginkgo.It("should function for node-pod communication: udp", func() {
config := framework.NewCoreNetworkingTestConfig(f, false) config := e2enetwork.NewCoreNetworkingTestConfig(f, false)
for _, endpointPod := range config.EndpointPods { for _, endpointPod := range config.EndpointPods {
config.DialFromNode("udp", endpointPod.Status.PodIP, framework.EndpointUDPPort, config.MaxTries, 0, sets.NewString(endpointPod.Name)) config.DialFromNode("udp", endpointPod.Status.PodIP, e2enetwork.EndpointUDPPort, config.MaxTries, 0, sets.NewString(endpointPod.Name))
} }
}) })
}) })