From c38ae01f8e7509dec80fa0cc72846d3ffb6912f5 Mon Sep 17 00:00:00 2001 From: draveness Date: Tue, 2 Jul 2019 00:55:04 +0800 Subject: [PATCH] feat: move service_util to separated package --- hack/.import-aliases | 5 +- test/e2e/apps/BUILD | 1 + test/e2e/apps/daemon_restart.go | 1 - test/e2e/apps/network_partition.go | 5 +- test/e2e/apps/statefulset.go | 5 +- test/e2e/common/BUILD | 1 + test/e2e/common/autoscaling_utils.go | 7 +- test/e2e/framework/BUILD | 4 +- test/e2e/framework/ingress/BUILD | 1 + test/e2e/framework/ingress/ingress_utils.go | 19 +- test/e2e/framework/networking_utils.go | 9 +- test/e2e/framework/providers/gce/BUILD | 1 + test/e2e/framework/providers/gce/gce.go | 9 +- test/e2e/framework/providers/gce/ingress.go | 5 +- test/e2e/framework/service/BUILD | 55 ++ .../e2e/framework/service/affinity_checker.go | 56 ++ test/e2e/framework/service/const.go | 78 ++ test/e2e/framework/service/fixture.go | 159 ++++ test/e2e/framework/service/hostname.go | 202 ++++ .../{service_util.go => service/jig.go} | 870 ++++-------------- test/e2e/framework/service/resource.go | 174 ++++ test/e2e/framework/service/wait.go | 65 ++ test/e2e/framework/statefulset/fixtures.go | 72 +- test/e2e/framework/statefulset/rest.go | 10 +- test/e2e/framework/statefulset/wait.go | 34 +- test/e2e/framework/util.go | 45 - .../logging/elasticsearch/BUILD | 1 + .../logging/elasticsearch/kibana.go | 3 +- .../logging/elasticsearch/utils.go | 7 +- test/e2e/kubectl/BUILD | 1 + test/e2e/kubectl/kubectl.go | 9 +- test/e2e/network/BUILD | 1 + test/e2e/network/dns.go | 19 +- test/e2e/network/dns_configmap.go | 7 +- test/e2e/network/example_cluster_dns.go | 6 +- test/e2e/network/firewall.go | 23 +- test/e2e/network/ingress.go | 21 +- test/e2e/network/network_tiers.go | 13 +- test/e2e/network/service.go | 359 ++++---- test/e2e/scheduling/BUILD | 1 + test/e2e/scheduling/limit_range.go | 9 +- test/e2e/ui/BUILD | 1 + test/e2e/ui/dashboard.go | 3 +- test/e2e/upgrades/BUILD | 1 + test/e2e/upgrades/services.go | 17 +- test/e2e/windows/BUILD | 1 + test/e2e/windows/service.go | 9 +- test/e2e_node/device_plugin.go | 4 +- test/soak/serve_hostnames/BUILD | 1 + test/soak/serve_hostnames/serve_hostnames.go | 5 +- 50 files changed, 1339 insertions(+), 1076 deletions(-) create mode 100644 test/e2e/framework/service/BUILD create mode 100644 test/e2e/framework/service/affinity_checker.go create mode 100644 test/e2e/framework/service/const.go create mode 100644 test/e2e/framework/service/fixture.go create mode 100644 test/e2e/framework/service/hostname.go rename test/e2e/framework/{service_util.go => service/jig.go} (51%) create mode 100644 test/e2e/framework/service/resource.go create mode 100644 test/e2e/framework/service/wait.go diff --git a/hack/.import-aliases b/hack/.import-aliases index ef9fd163569..46c0b54e376 100644 --- a/hack/.import-aliases +++ b/hack/.import-aliases @@ -46,5 +46,6 @@ "k8s.io/kubernetes/pkg/kubelet/apis/resourcemetrics/v1alpha1": "kubeletresourcemetricsv1alpha1", "k8s.io/kubernetes/pkg/kubelet/apis/stats/v1alpha1": "kubeletstatsv1alpha1", "k8s.io/kubernetes/pkg/proxy/apis/config/v1alpha1": "proxyconfigv1alpha1", - "k8s.io/kubernetes/pkg/scheduler/apis/config/v1alpha1": "schedulerconfigv1alpha1" -} \ No newline at end of file + "k8s.io/kubernetes/pkg/scheduler/apis/config/v1alpha1": "schedulerconfigv1alpha1", + "k8s.io/kubernetes/test/e2e/framework/service": "e2eservice" +} diff --git a/test/e2e/apps/BUILD b/test/e2e/apps/BUILD index e51a2825b60..00637d1e67c 100644 --- a/test/e2e/apps/BUILD +++ b/test/e2e/apps/BUILD @@ -68,6 +68,7 @@ go_library( "//test/e2e/framework/node:go_default_library", "//test/e2e/framework/pod:go_default_library", "//test/e2e/framework/replicaset:go_default_library", + "//test/e2e/framework/service:go_default_library", "//test/e2e/framework/ssh:go_default_library", "//test/e2e/framework/statefulset:go_default_library", "//test/utils:go_default_library", diff --git a/test/e2e/apps/daemon_restart.go b/test/e2e/apps/daemon_restart.go index a1bfcb4bba1..29b74eea4eb 100644 --- a/test/e2e/apps/daemon_restart.go +++ b/test/e2e/apps/daemon_restart.go @@ -298,7 +298,6 @@ var _ = SIGDescribe("DaemonRestart [Disruptive]", func() { }) ginkgo.It("Kubelet should not restart containers across restart", func() { - nodeIPs, err := e2enode.GetPublicIps(f.ClientSet) if err != nil { e2elog.Logf("Unexpected error occurred: %v", err) diff --git a/test/e2e/apps/network_partition.go b/test/e2e/apps/network_partition.go index c977413d25d..c0ac8f26666 100644 --- a/test/e2e/apps/network_partition.go +++ b/test/e2e/apps/network_partition.go @@ -30,7 +30,7 @@ import ( "k8s.io/client-go/tools/cache" - "k8s.io/api/core/v1" + v1 "k8s.io/api/core/v1" clientset "k8s.io/client-go/kubernetes" api "k8s.io/kubernetes/pkg/apis/core" nodepkg "k8s.io/kubernetes/pkg/controller/nodelifecycle" @@ -40,6 +40,7 @@ import ( e2elog "k8s.io/kubernetes/test/e2e/framework/log" e2enode "k8s.io/kubernetes/test/e2e/framework/node" e2epod "k8s.io/kubernetes/test/e2e/framework/pod" + e2eservice "k8s.io/kubernetes/test/e2e/framework/service" e2esset "k8s.io/kubernetes/test/e2e/framework/statefulset" testutils "k8s.io/kubernetes/test/utils" @@ -360,7 +361,7 @@ var _ = SIGDescribe("Network Partition [Disruptive] [Slow]", func() { // TODO(foxish): Re-enable testing on gce after kubernetes#56787 is fixed. framework.SkipUnlessProviderIs("gke") ginkgo.By("creating service " + headlessSvcName + " in namespace " + f.Namespace.Name) - headlessService := framework.CreateServiceSpec(headlessSvcName, "", true, labels) + headlessService := e2eservice.CreateServiceSpec(headlessSvcName, "", true, labels) _, err := f.ClientSet.CoreV1().Services(f.Namespace.Name).Create(headlessService) framework.ExpectNoError(err) c = f.ClientSet diff --git a/test/e2e/apps/statefulset.go b/test/e2e/apps/statefulset.go index c1054db542b..1a86b0a07a0 100644 --- a/test/e2e/apps/statefulset.go +++ b/test/e2e/apps/statefulset.go @@ -25,7 +25,7 @@ import ( "github.com/onsi/ginkgo" "github.com/onsi/gomega" appsv1 "k8s.io/api/apps/v1" - "k8s.io/api/core/v1" + v1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" klabels "k8s.io/apimachinery/pkg/labels" "k8s.io/apimachinery/pkg/types" @@ -36,6 +36,7 @@ import ( "k8s.io/kubernetes/test/e2e/framework" e2elog "k8s.io/kubernetes/test/e2e/framework/log" e2epod "k8s.io/kubernetes/test/e2e/framework/pod" + e2eservice "k8s.io/kubernetes/test/e2e/framework/service" e2esset "k8s.io/kubernetes/test/e2e/framework/statefulset" imageutils "k8s.io/kubernetes/test/utils/image" ) @@ -80,7 +81,7 @@ var _ = SIGDescribe("StatefulSet", func() { ss = e2esset.NewStatefulSet(ssName, ns, headlessSvcName, 2, statefulPodMounts, podMounts, labels) ginkgo.By("Creating service " + headlessSvcName + " in namespace " + ns) - headlessService := framework.CreateServiceSpec(headlessSvcName, "", true, labels) + headlessService := e2eservice.CreateServiceSpec(headlessSvcName, "", true, labels) _, err := c.CoreV1().Services(ns).Create(headlessService) framework.ExpectNoError(err) }) diff --git a/test/e2e/common/BUILD b/test/e2e/common/BUILD index c731f36f83f..f9c7a055cf7 100644 --- a/test/e2e/common/BUILD +++ b/test/e2e/common/BUILD @@ -81,6 +81,7 @@ go_library( "//test/e2e/framework/node:go_default_library", "//test/e2e/framework/pod:go_default_library", "//test/e2e/framework/replicaset:go_default_library", + "//test/e2e/framework/service:go_default_library", "//test/e2e/framework/volume:go_default_library", "//test/utils:go_default_library", "//test/utils/image:go_default_library", diff --git a/test/e2e/common/autoscaling_utils.go b/test/e2e/common/autoscaling_utils.go index 647810086f4..6847c1dc4bb 100644 --- a/test/e2e/common/autoscaling_utils.go +++ b/test/e2e/common/autoscaling_utils.go @@ -35,6 +35,7 @@ import ( e2edeploy "k8s.io/kubernetes/test/e2e/framework/deployment" e2elog "k8s.io/kubernetes/test/e2e/framework/log" "k8s.io/kubernetes/test/e2e/framework/replicaset" + e2eservice "k8s.io/kubernetes/test/e2e/framework/service" testutils "k8s.io/kubernetes/test/utils" "github.com/onsi/ginkgo" @@ -255,7 +256,7 @@ func (rc *ResourceConsumer) sendConsumeCPURequest(millicores int) { defer cancel() err := wait.PollImmediate(serviceInitializationInterval, serviceInitializationTimeout, func() (bool, error) { - proxyRequest, err := framework.GetServicesProxyRequest(rc.clientSet, rc.clientSet.CoreV1().RESTClient().Post()) + proxyRequest, err := e2eservice.GetServicesProxyRequest(rc.clientSet, rc.clientSet.CoreV1().RESTClient().Post()) framework.ExpectNoError(err) req := proxyRequest.Namespace(rc.nsName). Context(ctx). @@ -282,7 +283,7 @@ func (rc *ResourceConsumer) sendConsumeMemRequest(megabytes int) { defer cancel() err := wait.PollImmediate(serviceInitializationInterval, serviceInitializationTimeout, func() (bool, error) { - proxyRequest, err := framework.GetServicesProxyRequest(rc.clientSet, rc.clientSet.CoreV1().RESTClient().Post()) + proxyRequest, err := e2eservice.GetServicesProxyRequest(rc.clientSet, rc.clientSet.CoreV1().RESTClient().Post()) framework.ExpectNoError(err) req := proxyRequest.Namespace(rc.nsName). Context(ctx). @@ -309,7 +310,7 @@ func (rc *ResourceConsumer) sendConsumeCustomMetric(delta int) { defer cancel() err := wait.PollImmediate(serviceInitializationInterval, serviceInitializationTimeout, func() (bool, error) { - proxyRequest, err := framework.GetServicesProxyRequest(rc.clientSet, rc.clientSet.CoreV1().RESTClient().Post()) + proxyRequest, err := e2eservice.GetServicesProxyRequest(rc.clientSet, rc.clientSet.CoreV1().RESTClient().Post()) framework.ExpectNoError(err) req := proxyRequest.Namespace(rc.nsName). Context(ctx). diff --git a/test/e2e/framework/BUILD b/test/e2e/framework/BUILD index cfbfb88ece5..cae2c3326e4 100644 --- a/test/e2e/framework/BUILD +++ b/test/e2e/framework/BUILD @@ -24,7 +24,6 @@ go_library( "pv_util.go", "rc_util.go", "resource_usage_gatherer.go", - "service_util.go", "size.go", "test_context.go", "util.go", @@ -45,7 +44,6 @@ go_library( "//pkg/kubelet/metrics:go_default_library", "//pkg/kubelet/sysctl:go_default_library", "//pkg/master/ports:go_default_library", - "//pkg/registry/core/service/portallocator:go_default_library", "//pkg/scheduler/algorithm/predicates:go_default_library", "//pkg/scheduler/nodeinfo:go_default_library", "//pkg/security/podsecuritypolicy/seccomp:go_default_library", @@ -92,7 +90,6 @@ go_library( "//staging/src/k8s.io/client-go/tools/clientcmd/api:go_default_library", "//staging/src/k8s.io/client-go/tools/remotecommand:go_default_library", "//staging/src/k8s.io/client-go/tools/watch:go_default_library", - "//staging/src/k8s.io/client-go/util/retry:go_default_library", "//staging/src/k8s.io/component-base/cli/flag:go_default_library", "//test/e2e/framework/auth:go_default_library", "//test/e2e/framework/config:go_default_library", @@ -153,6 +150,7 @@ filegroup( "//test/e2e/framework/providers/vsphere:all-srcs", "//test/e2e/framework/replicaset:all-srcs", "//test/e2e/framework/resource:all-srcs", + "//test/e2e/framework/service:all-srcs", "//test/e2e/framework/ssh:all-srcs", "//test/e2e/framework/statefulset:all-srcs", "//test/e2e/framework/testfiles:all-srcs", diff --git a/test/e2e/framework/ingress/BUILD b/test/e2e/framework/ingress/BUILD index ffabed3eea4..b85ad23665a 100644 --- a/test/e2e/framework/ingress/BUILD +++ b/test/e2e/framework/ingress/BUILD @@ -20,6 +20,7 @@ go_library( "//test/e2e/framework:go_default_library", "//test/e2e/framework/log:go_default_library", "//test/e2e/framework/node:go_default_library", + "//test/e2e/framework/service:go_default_library", "//test/e2e/framework/testfiles:go_default_library", "//test/e2e/manifest:go_default_library", "//test/utils:go_default_library", diff --git a/test/e2e/framework/ingress/ingress_utils.go b/test/e2e/framework/ingress/ingress_utils.go index c708e18ac00..c265a2e885e 100644 --- a/test/e2e/framework/ingress/ingress_utils.go +++ b/test/e2e/framework/ingress/ingress_utils.go @@ -51,6 +51,7 @@ import ( "k8s.io/kubernetes/test/e2e/framework" e2elog "k8s.io/kubernetes/test/e2e/framework/log" e2enode "k8s.io/kubernetes/test/e2e/framework/node" + e2eservice "k8s.io/kubernetes/test/e2e/framework/service" "k8s.io/kubernetes/test/e2e/framework/testfiles" "k8s.io/kubernetes/test/e2e/manifest" testutils "k8s.io/kubernetes/test/utils" @@ -215,7 +216,7 @@ func CreateIngressComformanceTests(jig *TestJig, ns string, annotations map[stri }) ginkgo.By("Checking that " + pathToFail + " is not exposed by polling for failure") route := fmt.Sprintf("http://%v%v", jig.Address, pathToFail) - framework.ExpectNoError(framework.PollURL(route, updateURLMapHost, framework.LoadBalancerCleanupTimeout, jig.PollInterval, &http.Client{Timeout: IngressReqTimeout}, true)) + framework.ExpectNoError(framework.PollURL(route, updateURLMapHost, e2eservice.LoadBalancerCleanupTimeout, jig.PollInterval, &http.Client{Timeout: IngressReqTimeout}, true)) }, fmt.Sprintf("Waiting for path updates to reflect in L7"), }, @@ -385,7 +386,7 @@ func NewIngressTestJig(c clientset.Interface) *TestJig { return &TestJig{ Client: c, RootCAs: map[string][]byte{}, - PollInterval: framework.LoadBalancerPollInterval, + PollInterval: e2eservice.LoadBalancerPollInterval, Logger: &E2ELogger{}, } } @@ -674,14 +675,14 @@ func (j *TestJig) pollIngressWithCert(ing *networkingv1beta1.Ingress, address st // WaitForIngress waits for the Ingress to get an address. // WaitForIngress returns when it gets the first 200 response func (j *TestJig) WaitForIngress(waitForNodePort bool) { - if err := j.WaitForGivenIngressWithTimeout(j.Ingress, waitForNodePort, framework.LoadBalancerPollTimeout); err != nil { + if err := j.WaitForGivenIngressWithTimeout(j.Ingress, waitForNodePort, e2eservice.LoadBalancerPollTimeout); err != nil { e2elog.Failf("error in waiting for ingress to get an address: %s", err) } } // WaitForIngressToStable waits for the LB return 100 consecutive 200 responses. func (j *TestJig) WaitForIngressToStable() { - if err := wait.Poll(10*time.Second, framework.LoadBalancerCreateTimeoutDefault, func() (bool, error) { + if err := wait.Poll(10*time.Second, e2eservice.LoadBalancerCreateTimeoutDefault, func() (bool, error) { _, err := j.GetDistinctResponseFromIngress() if err != nil { return false, nil @@ -720,12 +721,12 @@ func (j *TestJig) WaitForGivenIngressWithTimeout(ing *networkingv1beta1.Ingress, // Ingress. Hostnames and certificate need to be explicitly passed in. func (j *TestJig) WaitForIngressWithCert(waitForNodePort bool, knownHosts []string, cert []byte) error { // Wait for the loadbalancer IP. - address, err := j.WaitForIngressAddress(j.Client, j.Ingress.Namespace, j.Ingress.Name, framework.LoadBalancerPollTimeout) + address, err := j.WaitForIngressAddress(j.Client, j.Ingress.Namespace, j.Ingress.Name, e2eservice.LoadBalancerPollTimeout) if err != nil { - return fmt.Errorf("Ingress failed to acquire an IP address within %v", framework.LoadBalancerPollTimeout) + return fmt.Errorf("Ingress failed to acquire an IP address within %v", e2eservice.LoadBalancerPollTimeout) } - return j.pollIngressWithCert(j.Ingress, address, knownHosts, cert, waitForNodePort, framework.LoadBalancerPollTimeout) + return j.pollIngressWithCert(j.Ingress, address, knownHosts, cert, waitForNodePort, e2eservice.LoadBalancerPollTimeout) } // VerifyURL polls for the given iterations, in intervals, and fails if the @@ -812,9 +813,9 @@ func (j *TestJig) ConstructFirewallForIngress(firewallRuleName string, nodeTags // GetDistinctResponseFromIngress tries GET call to the ingress VIP and return all distinct responses. func (j *TestJig) GetDistinctResponseFromIngress() (sets.String, error) { // Wait for the loadbalancer IP. - address, err := j.WaitForIngressAddress(j.Client, j.Ingress.Namespace, j.Ingress.Name, framework.LoadBalancerPollTimeout) + address, err := j.WaitForIngressAddress(j.Client, j.Ingress.Namespace, j.Ingress.Name, e2eservice.LoadBalancerPollTimeout) if err != nil { - e2elog.Failf("Ingress failed to acquire an IP address within %v", framework.LoadBalancerPollTimeout) + e2elog.Failf("Ingress failed to acquire an IP address within %v", e2eservice.LoadBalancerPollTimeout) } responses := sets.NewString() timeoutClient := &http.Client{Timeout: IngressReqTimeout} diff --git a/test/e2e/framework/networking_utils.go b/test/e2e/framework/networking_utils.go index b661efbae37..4c1d344a06e 100644 --- a/test/e2e/framework/networking_utils.go +++ b/test/e2e/framework/networking_utils.go @@ -27,7 +27,7 @@ import ( "time" "github.com/onsi/ginkgo" - "k8s.io/api/core/v1" + v1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/labels" "k8s.io/apimachinery/pkg/util/intstr" @@ -74,7 +74,8 @@ const ( RegexIPv6 = "(?:(?:(?:(?:(?:(?:(?:[0-9a-fA-F]{1,4})):){6})(?:(?:(?:(?:(?:[0-9a-fA-F]{1,4})):(?:(?:[0-9a-fA-F]{1,4})))|(?:(?:(?:(?:(?:25[0-5]|(?:[1-9]|1[0-9]|2[0-4])?[0-9]))\\.){3}(?:(?:25[0-5]|(?:[1-9]|1[0-9]|2[0-4])?[0-9])))))))|(?:(?:::(?:(?:(?:[0-9a-fA-F]{1,4})):){5})(?:(?:(?:(?:(?:[0-9a-fA-F]{1,4})):(?:(?:[0-9a-fA-F]{1,4})))|(?:(?:(?:(?:(?:25[0-5]|(?:[1-9]|1[0-9]|2[0-4])?[0-9]))\\.){3}(?:(?:25[0-5]|(?:[1-9]|1[0-9]|2[0-4])?[0-9])))))))|(?:(?:(?:(?:(?:[0-9a-fA-F]{1,4})))?::(?:(?:(?:[0-9a-fA-F]{1,4})):){4})(?:(?:(?:(?:(?:[0-9a-fA-F]{1,4})):(?:(?:[0-9a-fA-F]{1,4})))|(?:(?:(?:(?:(?:25[0-5]|(?:[1-9]|1[0-9]|2[0-4])?[0-9]))\\.){3}(?:(?:25[0-5]|(?:[1-9]|1[0-9]|2[0-4])?[0-9])))))))|(?:(?:(?:(?:(?:(?:[0-9a-fA-F]{1,4})):){0,1}(?:(?:[0-9a-fA-F]{1,4})))?::(?:(?:(?:[0-9a-fA-F]{1,4})):){3})(?:(?:(?:(?:(?:[0-9a-fA-F]{1,4})):(?:(?:[0-9a-fA-F]{1,4})))|(?:(?:(?:(?:(?:25[0-5]|(?:[1-9]|1[0-9]|2[0-4])?[0-9]))\\.){3}(?:(?:25[0-5]|(?:[1-9]|1[0-9]|2[0-4])?[0-9])))))))|(?:(?:(?:(?:(?:(?:[0-9a-fA-F]{1,4})):){0,2}(?:(?:[0-9a-fA-F]{1,4})))?::(?:(?:(?:[0-9a-fA-F]{1,4})):){2})(?:(?:(?:(?:(?:[0-9a-fA-F]{1,4})):(?:(?:[0-9a-fA-F]{1,4})))|(?:(?:(?:(?:(?:25[0-5]|(?:[1-9]|1[0-9]|2[0-4])?[0-9]))\\.){3}(?:(?:25[0-5]|(?:[1-9]|1[0-9]|2[0-4])?[0-9])))))))|(?:(?:(?:(?:(?:(?:[0-9a-fA-F]{1,4})):){0,3}(?:(?:[0-9a-fA-F]{1,4})))?::(?:(?:[0-9a-fA-F]{1,4})):)(?:(?:(?:(?:(?:[0-9a-fA-F]{1,4})):(?:(?:[0-9a-fA-F]{1,4})))|(?:(?:(?:(?:(?:25[0-5]|(?:[1-9]|1[0-9]|2[0-4])?[0-9]))\\.){3}(?:(?:25[0-5]|(?:[1-9]|1[0-9]|2[0-4])?[0-9])))))))|(?:(?:(?:(?:(?:(?:[0-9a-fA-F]{1,4})):){0,4}(?:(?:[0-9a-fA-F]{1,4})))?::)(?:(?:(?:(?:(?:[0-9a-fA-F]{1,4})):(?:(?:[0-9a-fA-F]{1,4})))|(?:(?:(?:(?:(?:25[0-5]|(?:[1-9]|1[0-9]|2[0-4])?[0-9]))\\.){3}(?:(?:25[0-5]|(?:[1-9]|1[0-9]|2[0-4])?[0-9])))))))|(?:(?:(?:(?:(?:(?:[0-9a-fA-F]{1,4})):){0,5}(?:(?:[0-9a-fA-F]{1,4})))?::)(?:(?:[0-9a-fA-F]{1,4})))|(?:(?:(?:(?:(?:(?:[0-9a-fA-F]{1,4})):){0,6}(?:(?:[0-9a-fA-F]{1,4})))?::))))" ) -var netexecImageName = imageutils.GetE2EImage(imageutils.Agnhost) +// NetexecImageName is the image name for agnhost. +var NetexecImageName = imageutils.GetE2EImage(imageutils.Agnhost) // NewNetworkingTestConfig creates and sets up a new test config helper. func NewNetworkingTestConfig(f *Framework) *NetworkingTestConfig { @@ -425,7 +426,7 @@ func (config *NetworkingTestConfig) createNetShellPodSpec(podName, hostname stri Containers: []v1.Container{ { Name: "webserver", - Image: netexecImageName, + Image: NetexecImageName, ImagePullPolicy: v1.PullIfNotPresent, Args: []string{ "netexec", @@ -469,7 +470,7 @@ func (config *NetworkingTestConfig) createTestPodSpec() *v1.Pod { Containers: []v1.Container{ { Name: "webserver", - Image: netexecImageName, + Image: NetexecImageName, ImagePullPolicy: v1.PullIfNotPresent, Args: []string{ "netexec", diff --git a/test/e2e/framework/providers/gce/BUILD b/test/e2e/framework/providers/gce/BUILD index 725d6a304da..f40974d7b0c 100644 --- a/test/e2e/framework/providers/gce/BUILD +++ b/test/e2e/framework/providers/gce/BUILD @@ -26,6 +26,7 @@ go_library( "//test/e2e/framework/log:go_default_library", "//test/e2e/framework/node:go_default_library", "//test/e2e/framework/pod:go_default_library", + "//test/e2e/framework/service:go_default_library", "//test/utils:go_default_library", "//vendor/github.com/onsi/ginkgo:go_default_library", "//vendor/google.golang.org/api/compute/v1:go_default_library", diff --git a/test/e2e/framework/providers/gce/gce.go b/test/e2e/framework/providers/gce/gce.go index e0167d1eda4..89207b1daf6 100644 --- a/test/e2e/framework/providers/gce/gce.go +++ b/test/e2e/framework/providers/gce/gce.go @@ -26,13 +26,14 @@ import ( compute "google.golang.org/api/compute/v1" "google.golang.org/api/googleapi" - "k8s.io/api/core/v1" + v1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/util/uuid" "k8s.io/apimachinery/pkg/util/wait" clientset "k8s.io/client-go/kubernetes" "k8s.io/kubernetes/test/e2e/framework" e2elog "k8s.io/kubernetes/test/e2e/framework/log" + e2eservice "k8s.io/kubernetes/test/e2e/framework/service" gcecloud "k8s.io/legacy-cloud-providers/gce" ) @@ -169,8 +170,8 @@ func (p *Provider) EnsureLoadBalancerResourcesDeleted(ip, portRange string) erro } return wait.Poll(10*time.Second, 5*time.Minute, func() (bool, error) { - service := p.gceCloud.ComputeServices().GA - list, err := service.ForwardingRules.List(project, region).Do() + e2eservice := p.gceCloud.ComputeServices().GA + list, err := e2eservice.ForwardingRules.List(project, region).Do() if err != nil { return false, err } @@ -255,7 +256,7 @@ func (p *Provider) DeletePVSource(pvSource *v1.PersistentVolumeSource) error { // the given name. The name is usually the UUID of the Service prefixed with an // alpha-numeric character ('a') to work around cloudprovider rules. func (p *Provider) CleanupServiceResources(c clientset.Interface, loadBalancerName, region, zone string) { - if pollErr := wait.Poll(5*time.Second, framework.LoadBalancerCleanupTimeout, func() (bool, error) { + if pollErr := wait.Poll(5*time.Second, e2eservice.LoadBalancerCleanupTimeout, func() (bool, error) { if err := p.cleanupGCEResources(c, loadBalancerName, region, zone); err != nil { e2elog.Logf("Still waiting for glbc to cleanup: %v", err) return false, nil diff --git a/test/e2e/framework/providers/gce/ingress.go b/test/e2e/framework/providers/gce/ingress.go index 06e839b4ae5..2da229ef7b9 100644 --- a/test/e2e/framework/providers/gce/ingress.go +++ b/test/e2e/framework/providers/gce/ingress.go @@ -29,12 +29,13 @@ import ( "github.com/onsi/ginkgo" compute "google.golang.org/api/compute/v1" "google.golang.org/api/googleapi" - "k8s.io/api/core/v1" + v1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/util/wait" clientset "k8s.io/client-go/kubernetes" "k8s.io/kubernetes/test/e2e/framework" e2elog "k8s.io/kubernetes/test/e2e/framework/log" + e2eservice "k8s.io/kubernetes/test/e2e/framework/service" utilexec "k8s.io/utils/exec" ) @@ -80,7 +81,7 @@ type IngressController struct { // CleanupIngressController calls cont.CleanupIngressControllerWithTimeout with hard-coded timeout func (cont *IngressController) CleanupIngressController() error { - return cont.CleanupIngressControllerWithTimeout(framework.LoadBalancerCleanupTimeout) + return cont.CleanupIngressControllerWithTimeout(e2eservice.LoadBalancerCleanupTimeout) } // CleanupIngressControllerWithTimeout calls the IngressController.Cleanup(false) diff --git a/test/e2e/framework/service/BUILD b/test/e2e/framework/service/BUILD new file mode 100644 index 00000000000..abbfb5b46e9 --- /dev/null +++ b/test/e2e/framework/service/BUILD @@ -0,0 +1,55 @@ +load("@io_bazel_rules_go//go:def.bzl", "go_library") + +go_library( + name = "go_default_library", + srcs = [ + "affinity_checker.go", + "const.go", + "fixture.go", + "hostname.go", + "jig.go", + "resource.go", + "wait.go", + ], + importpath = "k8s.io/kubernetes/test/e2e/framework/service", + visibility = ["//visibility:public"], + deps = [ + "//pkg/apis/core:go_default_library", + "//pkg/registry/core/service/portallocator:go_default_library", + "//staging/src/k8s.io/api/core/v1:go_default_library", + "//staging/src/k8s.io/api/policy/v1beta1:go_default_library", + "//staging/src/k8s.io/apimachinery/pkg/api/errors:go_default_library", + "//staging/src/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library", + "//staging/src/k8s.io/apimachinery/pkg/labels:go_default_library", + "//staging/src/k8s.io/apimachinery/pkg/util/intstr:go_default_library", + "//staging/src/k8s.io/apimachinery/pkg/util/net:go_default_library", + "//staging/src/k8s.io/apimachinery/pkg/util/sets:go_default_library", + "//staging/src/k8s.io/apimachinery/pkg/util/uuid:go_default_library", + "//staging/src/k8s.io/apimachinery/pkg/util/wait:go_default_library", + "//staging/src/k8s.io/client-go/kubernetes:go_default_library", + "//staging/src/k8s.io/client-go/rest:go_default_library", + "//staging/src/k8s.io/client-go/util/retry:go_default_library", + "//test/e2e/framework:go_default_library", + "//test/e2e/framework/log:go_default_library", + "//test/e2e/framework/node:go_default_library", + "//test/e2e/framework/pod:go_default_library", + "//test/e2e/framework/ssh:go_default_library", + "//test/utils:go_default_library", + "//test/utils/image:go_default_library", + "//vendor/github.com/onsi/ginkgo:go_default_library", + ], +) + +filegroup( + name = "package-srcs", + srcs = glob(["**"]), + tags = ["automanaged"], + visibility = ["//visibility:private"], +) + +filegroup( + name = "all-srcs", + srcs = [":package-srcs"], + tags = ["automanaged"], + visibility = ["//visibility:public"], +) diff --git a/test/e2e/framework/service/affinity_checker.go b/test/e2e/framework/service/affinity_checker.go new file mode 100644 index 00000000000..9becea3be93 --- /dev/null +++ b/test/e2e/framework/service/affinity_checker.go @@ -0,0 +1,56 @@ +/* +Copyright 2019 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package service + +import ( + e2elog "k8s.io/kubernetes/test/e2e/framework/log" +) + +// affinityTracker tracks the destination of a request for the affinity tests. +type affinityTracker struct { + hostTrace []string +} + +// Record the response going to a given host. +func (at *affinityTracker) recordHost(host string) { + at.hostTrace = append(at.hostTrace, host) + e2elog.Logf("Received response from host: %s", host) +} + +// Check that we got a constant count requests going to the same host. +func (at *affinityTracker) checkHostTrace(count int) (fulfilled, affinityHolds bool) { + fulfilled = (len(at.hostTrace) >= count) + if len(at.hostTrace) == 0 { + return fulfilled, true + } + last := at.hostTrace[0:] + if len(at.hostTrace)-count >= 0 { + last = at.hostTrace[len(at.hostTrace)-count:] + } + host := at.hostTrace[len(at.hostTrace)-1] + for _, h := range last { + if h != host { + return fulfilled, false + } + } + return fulfilled, true +} + +func checkAffinityFailed(tracker affinityTracker, err string) { + e2elog.Logf("%v", tracker.hostTrace) + e2elog.Failf(err) +} diff --git a/test/e2e/framework/service/const.go b/test/e2e/framework/service/const.go new file mode 100644 index 00000000000..0810f675b32 --- /dev/null +++ b/test/e2e/framework/service/const.go @@ -0,0 +1,78 @@ +/* +Copyright 2019 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package service + +import ( + "time" +) + +const ( + // RespondingTimeout is how long to wait for a service to be responding. + RespondingTimeout = 2 * time.Minute + + // MaxNodesForEndpointsTests is the max number for testing endpoints. + // Don't test with more than 3 nodes. + // Many tests create an endpoint per node, in large clusters, this is + // resource and time intensive. + MaxNodesForEndpointsTests = 3 +) + +const ( + // KubeProxyLagTimeout is the maximum time a kube-proxy daemon on a node is allowed + // to not notice a Service update, such as type=NodePort. + // TODO: This timeout should be O(10s), observed values are O(1m), 5m is very + // liberal. Fix tracked in #20567. + KubeProxyLagTimeout = 5 * time.Minute + + // KubeProxyEndpointLagTimeout is the maximum time a kube-proxy daemon on a node is allowed + // to not notice an Endpoint update. + KubeProxyEndpointLagTimeout = 30 * time.Second + + // LoadBalancerLagTimeoutDefault is the maximum time a load balancer is allowed to + // not respond after creation. + LoadBalancerLagTimeoutDefault = 2 * time.Minute + + // LoadBalancerLagTimeoutAWS is the delay between ELB creation and serving traffic + // on AWS. A few minutes is typical, so use 10m. + LoadBalancerLagTimeoutAWS = 10 * time.Minute + + // LoadBalancerCreateTimeoutDefault is the default time to wait for a load balancer to be created/modified. + // TODO: once support ticket 21807001 is resolved, reduce this timeout back to something reasonable + LoadBalancerCreateTimeoutDefault = 20 * time.Minute + // LoadBalancerCreateTimeoutLarge is the maximum time to wait for a load balancer to be created/modified. + LoadBalancerCreateTimeoutLarge = 2 * time.Hour + + // LoadBalancerCleanupTimeout is the time required by the loadbalancer to cleanup, proportional to numApps/Ing. + // Bring the cleanup timeout back down to 5m once b/33588344 is resolved. + LoadBalancerCleanupTimeout = 15 * time.Minute + + // LoadBalancerPollTimeout is the time required by the loadbalancer to poll. + // On average it takes ~6 minutes for a single backend to come online in GCE. + LoadBalancerPollTimeout = 15 * time.Minute + // LoadBalancerPollInterval is the interval value in which the loadbalancer polls. + LoadBalancerPollInterval = 30 * time.Second + + // LargeClusterMinNodesNumber is the number of nodes which a large cluster consists of. + LargeClusterMinNodesNumber = 100 + + // TestTimeout is used for most polling/waiting activities + TestTimeout = 60 * time.Second + + // AffinityConfirmCount is the number of needed continuous requests to confirm that + // affinity is enabled. + AffinityConfirmCount = 15 +) diff --git a/test/e2e/framework/service/fixture.go b/test/e2e/framework/service/fixture.go new file mode 100644 index 00000000000..fbf7e9fe6d5 --- /dev/null +++ b/test/e2e/framework/service/fixture.go @@ -0,0 +1,159 @@ +/* +Copyright 2019 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package service + +import ( + v1 "k8s.io/api/core/v1" + "k8s.io/apimachinery/pkg/api/errors" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/util/intstr" + "k8s.io/apimachinery/pkg/util/uuid" + clientset "k8s.io/client-go/kubernetes" + "k8s.io/client-go/util/retry" + imageutils "k8s.io/kubernetes/test/utils/image" + + "github.com/onsi/ginkgo" +) + +// TestFixture is a simple helper class to avoid too much boilerplate in tests +type TestFixture struct { + ServiceName string + Namespace string + Client clientset.Interface + + TestID string + Labels map[string]string + + rcs map[string]bool + services map[string]bool + Name string + Image string +} + +// NewServerTest creates a new TestFixture for the tests. +func NewServerTest(client clientset.Interface, namespace string, serviceName string) *TestFixture { + t := &TestFixture{} + t.Client = client + t.Namespace = namespace + t.ServiceName = serviceName + t.TestID = t.ServiceName + "-" + string(uuid.NewUUID()) + t.Labels = map[string]string{ + "testid": t.TestID, + } + + t.rcs = make(map[string]bool) + t.services = make(map[string]bool) + + t.Name = "webserver" + t.Image = imageutils.GetE2EImage(imageutils.TestWebserver) + + return t +} + +// BuildServiceSpec builds default config for a service (which can then be changed) +func (t *TestFixture) BuildServiceSpec() *v1.Service { + service := &v1.Service{ + ObjectMeta: metav1.ObjectMeta{ + Name: t.ServiceName, + Namespace: t.Namespace, + }, + Spec: v1.ServiceSpec{ + Selector: t.Labels, + Ports: []v1.ServicePort{{ + Port: 80, + TargetPort: intstr.FromInt(80), + }}, + }, + } + return service +} + +// CreateRC creates a replication controller and records it for cleanup. +func (t *TestFixture) CreateRC(rc *v1.ReplicationController) (*v1.ReplicationController, error) { + rc, err := t.Client.CoreV1().ReplicationControllers(t.Namespace).Create(rc) + if err == nil { + t.rcs[rc.Name] = true + } + return rc, err +} + +// CreateService creates a service, and record it for cleanup +func (t *TestFixture) CreateService(service *v1.Service) (*v1.Service, error) { + result, err := t.Client.CoreV1().Services(t.Namespace).Create(service) + if err == nil { + t.services[service.Name] = true + } + return result, err +} + +// DeleteService deletes a service, and remove it from the cleanup list +func (t *TestFixture) DeleteService(serviceName string) error { + err := t.Client.CoreV1().Services(t.Namespace).Delete(serviceName, nil) + if err == nil { + delete(t.services, serviceName) + } + return err +} + +// Cleanup cleans all ReplicationControllers and Services which this object holds. +func (t *TestFixture) Cleanup() []error { + var errs []error + for rcName := range t.rcs { + ginkgo.By("stopping RC " + rcName + " in namespace " + t.Namespace) + err := retry.RetryOnConflict(retry.DefaultRetry, func() error { + // First, resize the RC to 0. + old, err := t.Client.CoreV1().ReplicationControllers(t.Namespace).Get(rcName, metav1.GetOptions{}) + if err != nil { + if errors.IsNotFound(err) { + return nil + } + return err + } + x := int32(0) + old.Spec.Replicas = &x + if _, err := t.Client.CoreV1().ReplicationControllers(t.Namespace).Update(old); err != nil { + if errors.IsNotFound(err) { + return nil + } + return err + } + return nil + }) + if err != nil { + errs = append(errs, err) + } + // TODO(mikedanese): Wait. + // Then, delete the RC altogether. + if err := t.Client.CoreV1().ReplicationControllers(t.Namespace).Delete(rcName, nil); err != nil { + if !errors.IsNotFound(err) { + errs = append(errs, err) + } + } + } + + for serviceName := range t.services { + ginkgo.By("deleting service " + serviceName + " in namespace " + t.Namespace) + err := t.Client.CoreV1().Services(t.Namespace).Delete(serviceName, nil) + if err != nil { + if !errors.IsNotFound(err) { + errs = append(errs, err) + } + } + } + + return errs +} diff --git a/test/e2e/framework/service/hostname.go b/test/e2e/framework/service/hostname.go new file mode 100644 index 00000000000..f6019d0b694 --- /dev/null +++ b/test/e2e/framework/service/hostname.go @@ -0,0 +1,202 @@ +/* +Copyright 2019 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package service + +import ( + "fmt" + "net" + "sort" + "strconv" + "strings" + "time" + + "github.com/onsi/ginkgo" + v1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/util/sets" + clientset "k8s.io/client-go/kubernetes" + "k8s.io/kubernetes/test/e2e/framework" + e2elog "k8s.io/kubernetes/test/e2e/framework/log" + e2epod "k8s.io/kubernetes/test/e2e/framework/pod" + e2essh "k8s.io/kubernetes/test/e2e/framework/ssh" + testutils "k8s.io/kubernetes/test/utils" +) + +// StartServeHostnameService creates a replication controller that serves its +// hostname and a service on top of it. +func StartServeHostnameService(c clientset.Interface, svc *v1.Service, ns string, replicas int) ([]string, string, error) { + podNames := make([]string, replicas) + name := svc.ObjectMeta.Name + ginkgo.By("creating service " + name + " in namespace " + ns) + _, err := c.CoreV1().Services(ns).Create(svc) + if err != nil { + return podNames, "", err + } + + var createdPods []*v1.Pod + maxContainerFailures := 0 + config := testutils.RCConfig{ + Client: c, + Image: framework.ServeHostnameImage, + Command: []string{"/agnhost", "serve-hostname"}, + Name: name, + Namespace: ns, + PollInterval: 3 * time.Second, + Timeout: framework.PodReadyBeforeTimeout, + Replicas: replicas, + CreatedPods: &createdPods, + MaxContainerFailures: &maxContainerFailures, + } + err = framework.RunRC(config) + if err != nil { + return podNames, "", err + } + + if len(createdPods) != replicas { + return podNames, "", fmt.Errorf("incorrect number of running pods: %v", len(createdPods)) + } + + for i := range createdPods { + podNames[i] = createdPods[i].ObjectMeta.Name + } + sort.StringSlice(podNames).Sort() + + service, err := c.CoreV1().Services(ns).Get(name, metav1.GetOptions{}) + if err != nil { + return podNames, "", err + } + if service.Spec.ClusterIP == "" { + return podNames, "", fmt.Errorf("service IP is blank for %v", name) + } + serviceIP := service.Spec.ClusterIP + return podNames, serviceIP, nil +} + +// StopServeHostnameService stops the given service. +func StopServeHostnameService(clientset clientset.Interface, ns, name string) error { + if err := framework.DeleteRCAndWaitForGC(clientset, ns, name); err != nil { + return err + } + if err := clientset.CoreV1().Services(ns).Delete(name, nil); err != nil { + return err + } + return nil +} + +// VerifyServeHostnameServiceUp wgets the given serviceIP:servicePort from the +// given host and from within a pod. The host is expected to be an SSH-able node +// in the cluster. Each pod in the service is expected to echo its name. These +// names are compared with the given expectedPods list after a sort | uniq. +func VerifyServeHostnameServiceUp(c clientset.Interface, ns, host string, expectedPods []string, serviceIP string, servicePort int) error { + execPodName := e2epod.CreateExecPodOrFail(c, ns, "execpod-", nil) + defer func() { + e2epod.DeletePodOrFail(c, ns, execPodName) + }() + + // Loop a bunch of times - the proxy is randomized, so we want a good + // chance of hitting each backend at least once. + buildCommand := func(wget string) string { + serviceIPPort := net.JoinHostPort(serviceIP, strconv.Itoa(servicePort)) + return fmt.Sprintf("for i in $(seq 1 %d); do %s http://%s 2>&1 || true; echo; done", + 50*len(expectedPods), wget, serviceIPPort) + } + commands := []func() string{ + // verify service from node + func() string { + cmd := "set -e; " + buildCommand("wget -q --timeout=0.2 --tries=1 -O -") + e2elog.Logf("Executing cmd %q on host %v", cmd, host) + result, err := e2essh.SSH(cmd, host, framework.TestContext.Provider) + if err != nil || result.Code != 0 { + e2essh.LogResult(result) + e2elog.Logf("error while SSH-ing to node: %v", err) + } + return result.Stdout + }, + // verify service from pod + func() string { + cmd := buildCommand("wget -q -T 1 -O -") + e2elog.Logf("Executing cmd %q in pod %v/%v", cmd, ns, execPodName) + // TODO: Use exec-over-http via the netexec pod instead of kubectl exec. + output, err := framework.RunHostCmd(ns, execPodName, cmd) + if err != nil { + e2elog.Logf("error while kubectl execing %q in pod %v/%v: %v\nOutput: %v", cmd, ns, execPodName, err, output) + } + return output + }, + } + + expectedEndpoints := sets.NewString(expectedPods...) + ginkgo.By(fmt.Sprintf("verifying service has %d reachable backends", len(expectedPods))) + for _, cmdFunc := range commands { + passed := false + gotEndpoints := sets.NewString() + + // Retry cmdFunc for a while + for start := time.Now(); time.Since(start) < KubeProxyLagTimeout; time.Sleep(5 * time.Second) { + for _, endpoint := range strings.Split(cmdFunc(), "\n") { + trimmedEp := strings.TrimSpace(endpoint) + if trimmedEp != "" { + gotEndpoints.Insert(trimmedEp) + } + } + // TODO: simply checking that the retrieved endpoints is a superset + // of the expected allows us to ignore intermitten network flakes that + // result in output like "wget timed out", but these should be rare + // and we need a better way to track how often it occurs. + if gotEndpoints.IsSuperset(expectedEndpoints) { + if !gotEndpoints.Equal(expectedEndpoints) { + e2elog.Logf("Ignoring unexpected output wgetting endpoints of service %s: %v", serviceIP, gotEndpoints.Difference(expectedEndpoints)) + } + passed = true + break + } + e2elog.Logf("Unable to reach the following endpoints of service %s: %v", serviceIP, expectedEndpoints.Difference(gotEndpoints)) + } + if !passed { + // Sort the lists so they're easier to visually diff. + exp := expectedEndpoints.List() + got := gotEndpoints.List() + sort.StringSlice(exp).Sort() + sort.StringSlice(got).Sort() + return fmt.Errorf("service verification failed for: %s\nexpected %v\nreceived %v", serviceIP, exp, got) + } + } + return nil +} + +// VerifyServeHostnameServiceDown verifies that the given service isn't served. +func VerifyServeHostnameServiceDown(c clientset.Interface, host string, serviceIP string, servicePort int) error { + ipPort := net.JoinHostPort(serviceIP, strconv.Itoa(servicePort)) + // The current versions of curl included in CentOS and RHEL distros + // misinterpret square brackets around IPv6 as globbing, so use the -g + // argument to disable globbing to handle the IPv6 case. + command := fmt.Sprintf( + "curl -g -s --connect-timeout 2 http://%s && exit 99", ipPort) + + for start := time.Now(); time.Since(start) < time.Minute; time.Sleep(5 * time.Second) { + result, err := e2essh.SSH(command, host, framework.TestContext.Provider) + if err != nil { + e2essh.LogResult(result) + e2elog.Logf("error while SSH-ing to node: %v", err) + } + if result.Code != 99 { + return nil + } + e2elog.Logf("service still alive - still waiting") + } + return fmt.Errorf("waiting for service to be down timed out") +} diff --git a/test/e2e/framework/service_util.go b/test/e2e/framework/service/jig.go similarity index 51% rename from test/e2e/framework/service_util.go rename to test/e2e/framework/service/jig.go index fb312389cd2..b6e9541066e 100644 --- a/test/e2e/framework/service_util.go +++ b/test/e2e/framework/service/jig.go @@ -14,17 +14,18 @@ See the License for the specific language governing permissions and limitations under the License. */ -package framework +package service import ( "bytes" "fmt" "net" - "sort" + "net/http" "strconv" "strings" "time" + "github.com/onsi/ginkgo" v1 "k8s.io/api/core/v1" policyv1beta1 "k8s.io/api/policy/v1beta1" "k8s.io/apimachinery/pkg/api/errors" @@ -36,89 +37,29 @@ import ( "k8s.io/apimachinery/pkg/util/uuid" "k8s.io/apimachinery/pkg/util/wait" clientset "k8s.io/client-go/kubernetes" - "k8s.io/client-go/util/retry" api "k8s.io/kubernetes/pkg/apis/core" "k8s.io/kubernetes/pkg/registry/core/service/portallocator" + "k8s.io/kubernetes/test/e2e/framework" e2elog "k8s.io/kubernetes/test/e2e/framework/log" e2enode "k8s.io/kubernetes/test/e2e/framework/node" e2epod "k8s.io/kubernetes/test/e2e/framework/pod" - e2essh "k8s.io/kubernetes/test/e2e/framework/ssh" - testutils "k8s.io/kubernetes/test/utils" imageutils "k8s.io/kubernetes/test/utils/image" - - "github.com/onsi/ginkgo" ) -const ( - // KubeProxyLagTimeout is the maximum time a kube-proxy daemon on a node is allowed - // to not notice a Service update, such as type=NodePort. - // TODO: This timeout should be O(10s), observed values are O(1m), 5m is very - // liberal. Fix tracked in #20567. - KubeProxyLagTimeout = 5 * time.Minute +// NodePortRange should match whatever the default/configured range is +var NodePortRange = utilnet.PortRange{Base: 30000, Size: 2768} - // KubeProxyEndpointLagTimeout is the maximum time a kube-proxy daemon on a node is allowed - // to not notice an Endpoint update. - KubeProxyEndpointLagTimeout = 30 * time.Second - - // LoadBalancerLagTimeoutDefault is the maximum time a load balancer is allowed to - // not respond after creation. - LoadBalancerLagTimeoutDefault = 2 * time.Minute - - // LoadBalancerLagTimeoutAWS is the delay between ELB creation and serving traffic - // on AWS. A few minutes is typical, so use 10m. - LoadBalancerLagTimeoutAWS = 10 * time.Minute - - // LoadBalancerCreateTimeoutDefault is the default time to wait for a load balancer to be created/modified. - // TODO: once support ticket 21807001 is resolved, reduce this timeout back to something reasonable - LoadBalancerCreateTimeoutDefault = 20 * time.Minute - // LoadBalancerCreateTimeoutLarge is the maximum time to wait for a load balancer to be created/modified. - LoadBalancerCreateTimeoutLarge = 2 * time.Hour - - // LoadBalancerCleanupTimeout is the time required by the loadbalancer to cleanup, proportional to numApps/Ing. - // Bring the cleanup timeout back down to 5m once b/33588344 is resolved. - LoadBalancerCleanupTimeout = 15 * time.Minute - - // LoadBalancerPollTimeout is the time required by the loadbalancer to poll. - // On average it takes ~6 minutes for a single backend to come online in GCE. - LoadBalancerPollTimeout = 15 * time.Minute - // LoadBalancerPollInterval is the interval value in which the loadbalancer polls. - LoadBalancerPollInterval = 30 * time.Second - - // LargeClusterMinNodesNumber is the number of nodes which a large cluster consists of. - LargeClusterMinNodesNumber = 100 - - // MaxNodesForEndpointsTests is the max number for testing endpoints. - // Don't test with more than 3 nodes. - // Many tests create an endpoint per node, in large clusters, this is - // resource and time intensive. - MaxNodesForEndpointsTests = 3 - - // ServiceTestTimeout is used for most polling/waiting activities - ServiceTestTimeout = 60 * time.Second - - // GCPMaxInstancesInInstanceGroup is the maximum number of instances supported in - // one instance group on GCP. - GCPMaxInstancesInInstanceGroup = 2000 - - // AffinityConfirmCount is the number of needed continuous requests to confirm that - // affinity is enabled. - AffinityConfirmCount = 15 -) - -// ServiceNodePortRange should match whatever the default/configured range is -var ServiceNodePortRange = utilnet.PortRange{Base: 30000, Size: 2768} - -// ServiceTestJig is a test jig to help service testing. -type ServiceTestJig struct { +// TestJig is a test j to help service testing. +type TestJig struct { ID string Name string Client clientset.Interface Labels map[string]string } -// NewServiceTestJig allocates and inits a new ServiceTestJig. -func NewServiceTestJig(client clientset.Interface, name string) *ServiceTestJig { - j := &ServiceTestJig{} +// NewTestJig allocates and inits a new TestJig. +func NewTestJig(client clientset.Interface, name string) *TestJig { + j := &TestJig{} j.Client = client j.Name = name j.ID = j.Name + "-" + string(uuid.NewUUID()) @@ -127,10 +68,10 @@ func NewServiceTestJig(client clientset.Interface, name string) *ServiceTestJig return j } -// newServiceTemplate returns the default v1.Service template for this jig, but +// newServiceTemplate returns the default v1.Service template for this j, but // does not actually create the Service. The default Service has the same name -// as the jig and exposes the given port. -func (j *ServiceTestJig) newServiceTemplate(namespace string, proto v1.Protocol, port int32) *v1.Service { +// as the j and exposes the given port. +func (j *TestJig) newServiceTemplate(namespace string, proto v1.Protocol, port int32) *v1.Service { service := &v1.Service{ ObjectMeta: metav1.ObjectMeta{ Namespace: namespace, @@ -151,9 +92,9 @@ func (j *ServiceTestJig) newServiceTemplate(namespace string, proto v1.Protocol, } // CreateTCPServiceWithPort creates a new TCP Service with given port based on the -// jig's defaults. Callers can provide a function to tweak the Service object before +// j's defaults. Callers can provide a function to tweak the Service object before // it is created. -func (j *ServiceTestJig) CreateTCPServiceWithPort(namespace string, tweak func(svc *v1.Service), port int32) *v1.Service { +func (j *TestJig) CreateTCPServiceWithPort(namespace string, tweak func(svc *v1.Service), port int32) *v1.Service { svc := j.newServiceTemplate(namespace, v1.ProtocolTCP, port) if tweak != nil { tweak(svc) @@ -165,10 +106,10 @@ func (j *ServiceTestJig) CreateTCPServiceWithPort(namespace string, tweak func(s return result } -// CreateTCPServiceOrFail creates a new TCP Service based on the jig's +// CreateTCPServiceOrFail creates a new TCP Service based on the j's // defaults. Callers can provide a function to tweak the Service object before // it is created. -func (j *ServiceTestJig) CreateTCPServiceOrFail(namespace string, tweak func(svc *v1.Service)) *v1.Service { +func (j *TestJig) CreateTCPServiceOrFail(namespace string, tweak func(svc *v1.Service)) *v1.Service { svc := j.newServiceTemplate(namespace, v1.ProtocolTCP, 80) if tweak != nil { tweak(svc) @@ -180,10 +121,10 @@ func (j *ServiceTestJig) CreateTCPServiceOrFail(namespace string, tweak func(svc return result } -// CreateUDPServiceOrFail creates a new UDP Service based on the jig's +// CreateUDPServiceOrFail creates a new UDP Service based on the j's // defaults. Callers can provide a function to tweak the Service object before // it is created. -func (j *ServiceTestJig) CreateUDPServiceOrFail(namespace string, tweak func(svc *v1.Service)) *v1.Service { +func (j *TestJig) CreateUDPServiceOrFail(namespace string, tweak func(svc *v1.Service)) *v1.Service { svc := j.newServiceTemplate(namespace, v1.ProtocolUDP, 80) if tweak != nil { tweak(svc) @@ -195,9 +136,9 @@ func (j *ServiceTestJig) CreateUDPServiceOrFail(namespace string, tweak func(svc return result } -// CreateExternalNameServiceOrFail creates a new ExternalName type Service based on the jig's defaults. +// CreateExternalNameServiceOrFail creates a new ExternalName type Service based on the j's defaults. // Callers can provide a function to tweak the Service object before it is created. -func (j *ServiceTestJig) CreateExternalNameServiceOrFail(namespace string, tweak func(svc *v1.Service)) *v1.Service { +func (j *TestJig) CreateExternalNameServiceOrFail(namespace string, tweak func(svc *v1.Service)) *v1.Service { svc := &v1.Service{ ObjectMeta: metav1.ObjectMeta{ Namespace: namespace, @@ -221,7 +162,7 @@ func (j *ServiceTestJig) CreateExternalNameServiceOrFail(namespace string, tweak } // CreateServiceWithServicePort creates a new Service with ServicePort. -func (j *ServiceTestJig) CreateServiceWithServicePort(labels map[string]string, namespace string, ports []v1.ServicePort) (*v1.Service, error) { +func (j *TestJig) CreateServiceWithServicePort(labels map[string]string, namespace string, ports []v1.ServicePort) (*v1.Service, error) { service := &v1.Service{ ObjectMeta: metav1.ObjectMeta{ Name: j.Name, @@ -235,7 +176,7 @@ func (j *ServiceTestJig) CreateServiceWithServicePort(labels map[string]string, } // ChangeServiceType updates the given service's ServiceType to the given newType. -func (j *ServiceTestJig) ChangeServiceType(namespace, name string, newType v1.ServiceType, timeout time.Duration) { +func (j *TestJig) ChangeServiceType(namespace, name string, newType v1.ServiceType, timeout time.Duration) { ingressIP := "" svc := j.UpdateServiceOrFail(namespace, name, func(s *v1.Service) { for _, ing := range s.Status.LoadBalancer.Ingress { @@ -255,7 +196,7 @@ func (j *ServiceTestJig) ChangeServiceType(namespace, name string, newType v1.Se // ExternalTrafficPolicy set to Local and sanity checks its nodePort. // If createPod is true, it also creates an RC with 1 replica of // the standard netexec container used everywhere in this test. -func (j *ServiceTestJig) CreateOnlyLocalNodePortService(namespace, serviceName string, createPod bool) *v1.Service { +func (j *TestJig) CreateOnlyLocalNodePortService(namespace, serviceName string, createPod bool) *v1.Service { ginkgo.By("creating a service " + namespace + "/" + serviceName + " with type=NodePort and ExternalTrafficPolicy=Local") svc := j.CreateTCPServiceOrFail(namespace, func(svc *v1.Service) { svc.Spec.Type = v1.ServiceTypeNodePort @@ -275,7 +216,7 @@ func (j *ServiceTestJig) CreateOnlyLocalNodePortService(namespace, serviceName s // ExternalTrafficPolicy set to Local and waits for it to acquire an ingress IP. // If createPod is true, it also creates an RC with 1 replica of // the standard netexec container used everywhere in this test. -func (j *ServiceTestJig) CreateOnlyLocalLoadBalancerService(namespace, serviceName string, timeout time.Duration, createPod bool, +func (j *TestJig) CreateOnlyLocalLoadBalancerService(namespace, serviceName string, timeout time.Duration, createPod bool, tweak func(svc *v1.Service)) *v1.Service { ginkgo.By("creating a service " + namespace + "/" + serviceName + " with type=LoadBalancer and ExternalTrafficPolicy=Local") svc := j.CreateTCPServiceOrFail(namespace, func(svc *v1.Service) { @@ -300,7 +241,7 @@ func (j *ServiceTestJig) CreateOnlyLocalLoadBalancerService(namespace, serviceNa // CreateLoadBalancerService creates a loadbalancer service and waits // for it to acquire an ingress IP. -func (j *ServiceTestJig) CreateLoadBalancerService(namespace, serviceName string, timeout time.Duration, tweak func(svc *v1.Service)) *v1.Service { +func (j *TestJig) CreateLoadBalancerService(namespace, serviceName string, timeout time.Duration, tweak func(svc *v1.Service)) *v1.Service { ginkgo.By("creating a service " + namespace + "/" + serviceName + " with type=LoadBalancer") svc := j.CreateTCPServiceOrFail(namespace, func(svc *v1.Service) { svc.Spec.Type = v1.ServiceTypeLoadBalancer @@ -319,7 +260,7 @@ func (j *ServiceTestJig) CreateLoadBalancerService(namespace, serviceName string // GetEndpointNodes returns a map of nodenames:external-ip on which the // endpoints of the given Service are running. -func (j *ServiceTestJig) GetEndpointNodes(svc *v1.Service) map[string][]string { +func (j *TestJig) GetEndpointNodes(svc *v1.Service) map[string][]string { nodes := j.GetNodes(MaxNodesForEndpointsTests) endpoints, err := j.Client.CoreV1().Endpoints(svc.Namespace).Get(svc.Name, metav1.GetOptions{}) if err != nil { @@ -347,8 +288,8 @@ func (j *ServiceTestJig) GetEndpointNodes(svc *v1.Service) map[string][]string { // GetNodes returns the first maxNodesForTest nodes. Useful in large clusters // where we don't eg: want to create an endpoint per node. -func (j *ServiceTestJig) GetNodes(maxNodesForTest int) (nodes *v1.NodeList) { - nodes = GetReadySchedulableNodesOrDie(j.Client) +func (j *TestJig) GetNodes(maxNodesForTest int) (nodes *v1.NodeList) { + nodes = framework.GetReadySchedulableNodesOrDie(j.Client) if len(nodes.Items) <= maxNodesForTest { maxNodesForTest = len(nodes.Items) } @@ -357,7 +298,7 @@ func (j *ServiceTestJig) GetNodes(maxNodesForTest int) (nodes *v1.NodeList) { } // GetNodesNames returns a list of names of the first maxNodesForTest nodes -func (j *ServiceTestJig) GetNodesNames(maxNodesForTest int) []string { +func (j *TestJig) GetNodesNames(maxNodesForTest int) []string { nodes := j.GetNodes(maxNodesForTest) nodesNames := []string{} for _, node := range nodes.Items { @@ -367,8 +308,8 @@ func (j *ServiceTestJig) GetNodesNames(maxNodesForTest int) []string { } // WaitForEndpointOnNode waits for a service endpoint on the given node. -func (j *ServiceTestJig) WaitForEndpointOnNode(namespace, serviceName, nodeName string) { - err := wait.PollImmediate(Poll, LoadBalancerCreateTimeoutDefault, func() (bool, error) { +func (j *TestJig) WaitForEndpointOnNode(namespace, serviceName, nodeName string) { + err := wait.PollImmediate(framework.Poll, LoadBalancerCreateTimeoutDefault, func() (bool, error) { endpoints, err := j.Client.CoreV1().Endpoints(namespace).Get(serviceName, metav1.GetOptions{}) if err != nil { e2elog.Logf("Get endpoints for service %s/%s failed (%s)", namespace, serviceName, err) @@ -391,11 +332,11 @@ func (j *ServiceTestJig) WaitForEndpointOnNode(namespace, serviceName, nodeName } return true, nil }) - ExpectNoError(err) + framework.ExpectNoError(err) } // SanityCheckService performs sanity checks on the given service -func (j *ServiceTestJig) SanityCheckService(svc *v1.Service, svcType v1.ServiceType) { +func (j *TestJig) SanityCheckService(svc *v1.Service, svcType v1.ServiceType) { if svc.Spec.Type != svcType { e2elog.Failf("unexpected Spec.Type (%s) for service, expected %s", svc.Spec.Type, svcType) } @@ -423,7 +364,7 @@ func (j *ServiceTestJig) SanityCheckService(svc *v1.Service, svcType v1.ServiceT e2elog.Failf("unexpected Spec.Ports[%d].NodePort (%d) for service", i, port.NodePort) } if hasNodePort { - if !ServiceNodePortRange.Contains(int(port.NodePort)) { + if !NodePortRange.Contains(int(port.NodePort)) { e2elog.Failf("out-of-range nodePort (%d) for service", port.NodePort) } } @@ -448,7 +389,7 @@ func (j *ServiceTestJig) SanityCheckService(svc *v1.Service, svcType v1.ServiceT // UpdateService fetches a service, calls the update function on it, and // then attempts to send the updated service. It tries up to 3 times in the // face of timeouts and conflicts. -func (j *ServiceTestJig) UpdateService(namespace, name string, update func(*v1.Service)) (*v1.Service, error) { +func (j *TestJig) UpdateService(namespace, name string, update func(*v1.Service)) (*v1.Service, error) { for i := 0; i < 3; i++ { service, err := j.Client.CoreV1().Services(namespace).Get(name, metav1.GetOptions{}) if err != nil { @@ -469,7 +410,7 @@ func (j *ServiceTestJig) UpdateService(namespace, name string, update func(*v1.S // UpdateServiceOrFail fetches a service, calls the update function on it, and // then attempts to send the updated service. It tries up to 3 times in the // face of timeouts and conflicts. -func (j *ServiceTestJig) UpdateServiceOrFail(namespace, name string, update func(*v1.Service)) *v1.Service { +func (j *TestJig) UpdateServiceOrFail(namespace, name string, update func(*v1.Service)) *v1.Service { svc, err := j.UpdateService(namespace, name, update) if err != nil { e2elog.Failf(err.Error()) @@ -478,7 +419,7 @@ func (j *ServiceTestJig) UpdateServiceOrFail(namespace, name string, update func } // WaitForNewIngressIPOrFail waits for the given service to get a new ingress IP, or fails after the given timeout -func (j *ServiceTestJig) WaitForNewIngressIPOrFail(namespace, name, existingIP string, timeout time.Duration) *v1.Service { +func (j *TestJig) WaitForNewIngressIPOrFail(namespace, name, existingIP string, timeout time.Duration) *v1.Service { e2elog.Logf("Waiting up to %v for service %q to get a new ingress IP", timeout, name) service := j.waitForConditionOrFail(namespace, name, timeout, "have a new ingress IP", func(svc *v1.Service) bool { if len(svc.Status.LoadBalancer.Ingress) == 0 { @@ -494,13 +435,13 @@ func (j *ServiceTestJig) WaitForNewIngressIPOrFail(namespace, name, existingIP s } // ChangeServiceNodePortOrFail changes node ports of the given service. -func (j *ServiceTestJig) ChangeServiceNodePortOrFail(namespace, name string, initial int) *v1.Service { +func (j *TestJig) ChangeServiceNodePortOrFail(namespace, name string, initial int) *v1.Service { var err error var service *v1.Service - for i := 1; i < ServiceNodePortRange.Size; i++ { - offs1 := initial - ServiceNodePortRange.Base - offs2 := (offs1 + i) % ServiceNodePortRange.Size - newPort := ServiceNodePortRange.Base + offs2 + for i := 1; i < NodePortRange.Size; i++ { + offs1 := initial - NodePortRange.Base + offs2 := (offs1 + i) % NodePortRange.Size + newPort := NodePortRange.Base + offs2 service, err = j.UpdateService(namespace, name, func(s *v1.Service) { s.Spec.Ports[0].NodePort = int32(newPort) }) @@ -518,7 +459,7 @@ func (j *ServiceTestJig) ChangeServiceNodePortOrFail(namespace, name string, ini } // WaitForLoadBalancerOrFail waits the given service to have a LoadBalancer, or fails after the given timeout -func (j *ServiceTestJig) WaitForLoadBalancerOrFail(namespace, name string, timeout time.Duration) *v1.Service { +func (j *TestJig) WaitForLoadBalancerOrFail(namespace, name string, timeout time.Duration) *v1.Service { e2elog.Logf("Waiting up to %v for service %q to have a LoadBalancer", timeout, name) service := j.waitForConditionOrFail(namespace, name, timeout, "have a load balancer", func(svc *v1.Service) bool { return len(svc.Status.LoadBalancer.Ingress) > 0 @@ -527,10 +468,10 @@ func (j *ServiceTestJig) WaitForLoadBalancerOrFail(namespace, name string, timeo } // WaitForLoadBalancerDestroyOrFail waits the given service to destroy a LoadBalancer, or fails after the given timeout -func (j *ServiceTestJig) WaitForLoadBalancerDestroyOrFail(namespace, name string, ip string, port int, timeout time.Duration) *v1.Service { +func (j *TestJig) WaitForLoadBalancerDestroyOrFail(namespace, name string, ip string, port int, timeout time.Duration) *v1.Service { // TODO: once support ticket 21807001 is resolved, reduce this timeout back to something reasonable defer func() { - if err := EnsureLoadBalancerResourcesDeleted(ip, strconv.Itoa(port)); err != nil { + if err := framework.EnsureLoadBalancerResourcesDeleted(ip, strconv.Itoa(port)); err != nil { e2elog.Logf("Failed to delete cloud resources for service: %s %d (%v)", ip, port, err) } }() @@ -542,7 +483,7 @@ func (j *ServiceTestJig) WaitForLoadBalancerDestroyOrFail(namespace, name string return service } -func (j *ServiceTestJig) waitForConditionOrFail(namespace, name string, timeout time.Duration, message string, conditionFn func(*v1.Service) bool) *v1.Service { +func (j *TestJig) waitForConditionOrFail(namespace, name string, timeout time.Duration, message string, conditionFn func(*v1.Service) bool) *v1.Service { var service *v1.Service pollFunc := func() (bool, error) { svc, err := j.Client.CoreV1().Services(namespace).Get(name, metav1.GetOptions{}) @@ -555,16 +496,16 @@ func (j *ServiceTestJig) waitForConditionOrFail(namespace, name string, timeout } return false, nil } - if err := wait.PollImmediate(Poll, timeout, pollFunc); err != nil { + if err := wait.PollImmediate(framework.Poll, timeout, pollFunc); err != nil { e2elog.Failf("Timed out waiting for service %q to %s", name, message) } return service } // newRCTemplate returns the default v1.ReplicationController object for -// this jig, but does not actually create the RC. The default RC has the same -// name as the jig and runs the "netexec" container. -func (j *ServiceTestJig) newRCTemplate(namespace string) *v1.ReplicationController { +// this j, but does not actually create the RC. The default RC has the same +// name as the j and runs the "netexec" container. +func (j *TestJig) newRCTemplate(namespace string) *v1.ReplicationController { var replicas int32 = 1 var grace int64 = 3 // so we don't race with kube-proxy when scaling up/down @@ -607,7 +548,7 @@ func (j *ServiceTestJig) newRCTemplate(namespace string) *v1.ReplicationControll } // AddRCAntiAffinity adds AntiAffinity to the given ReplicationController. -func (j *ServiceTestJig) AddRCAntiAffinity(rc *v1.ReplicationController) { +func (j *TestJig) AddRCAntiAffinity(rc *v1.ReplicationController) { var replicas int32 = 2 rc.Spec.Replicas = &replicas @@ -627,7 +568,7 @@ func (j *ServiceTestJig) AddRCAntiAffinity(rc *v1.ReplicationController) { } // CreatePDBOrFail returns a PodDisruptionBudget for the given ReplicationController, or fails if a PodDisruptionBudget isn't ready -func (j *ServiceTestJig) CreatePDBOrFail(namespace string, rc *v1.ReplicationController) *policyv1beta1.PodDisruptionBudget { +func (j *TestJig) CreatePDBOrFail(namespace string, rc *v1.ReplicationController) *policyv1beta1.PodDisruptionBudget { pdb := j.newPDBTemplate(namespace, rc) newPdb, err := j.Client.PolicyV1beta1().PodDisruptionBudgets(namespace).Create(pdb) if err != nil { @@ -641,9 +582,9 @@ func (j *ServiceTestJig) CreatePDBOrFail(namespace string, rc *v1.ReplicationCon } // newPDBTemplate returns the default policyv1beta1.PodDisruptionBudget object for -// this jig, but does not actually create the PDB. The default PDB specifies a +// this j, but does not actually create the PDB. The default PDB specifies a // MinAvailable of N-1 and matches the pods created by the RC. -func (j *ServiceTestJig) newPDBTemplate(namespace string, rc *v1.ReplicationController) *policyv1beta1.PodDisruptionBudget { +func (j *TestJig) newPDBTemplate(namespace string, rc *v1.ReplicationController) *policyv1beta1.PodDisruptionBudget { minAvailable := intstr.FromInt(int(*rc.Spec.Replicas) - 1) pdb := &policyv1beta1.PodDisruptionBudget{ @@ -664,7 +605,7 @@ func (j *ServiceTestJig) newPDBTemplate(namespace string, rc *v1.ReplicationCont // RunOrFail creates a ReplicationController and Pod(s) and waits for the // Pod(s) to be running. Callers can provide a function to tweak the RC object // before it is created. -func (j *ServiceTestJig) RunOrFail(namespace string, tweak func(rc *v1.ReplicationController)) *v1.ReplicationController { +func (j *TestJig) RunOrFail(namespace string, tweak func(rc *v1.ReplicationController)) *v1.ReplicationController { rc := j.newRCTemplate(namespace) if tweak != nil { tweak(rc) @@ -684,7 +625,7 @@ func (j *ServiceTestJig) RunOrFail(namespace string, tweak func(rc *v1.Replicati } // Scale scales pods to the given replicas -func (j *ServiceTestJig) Scale(namespace string, replicas int) { +func (j *TestJig) Scale(namespace string, replicas int) { rc := j.Name scale, err := j.Client.CoreV1().ReplicationControllers(namespace).GetScale(rc, metav1.GetOptions{}) if err != nil { @@ -705,7 +646,7 @@ func (j *ServiceTestJig) Scale(namespace string, replicas int) { } } -func (j *ServiceTestJig) waitForPdbReady(namespace string) error { +func (j *TestJig) waitForPdbReady(namespace string) error { timeout := 2 * time.Minute for start := time.Now(); time.Since(start) < timeout; time.Sleep(2 * time.Second) { pdb, err := j.Client.PolicyV1beta1().PodDisruptionBudgets(namespace).Get(j.Name, metav1.GetOptions{}) @@ -720,7 +661,7 @@ func (j *ServiceTestJig) waitForPdbReady(namespace string) error { return fmt.Errorf("timeout waiting for PDB %q to be ready", j.Name) } -func (j *ServiceTestJig) waitForPodsCreated(namespace string, replicas int) ([]string, error) { +func (j *TestJig) waitForPodsCreated(namespace string, replicas int) ([]string, error) { timeout := 2 * time.Minute // List the pods, making sure we observe all the replicas. label := labels.SelectorFromSet(labels.Set(j.Labels)) @@ -748,7 +689,7 @@ func (j *ServiceTestJig) waitForPodsCreated(namespace string, replicas int) ([]s return nil, fmt.Errorf("timeout waiting for %d pods to be created", replicas) } -func (j *ServiceTestJig) waitForPodsReady(namespace string, pods []string) error { +func (j *TestJig) waitForPodsReady(namespace string, pods []string) error { timeout := 2 * time.Minute if !e2epod.CheckPodsRunningReady(j.Client, namespace, pods, timeout) { return fmt.Errorf("timeout waiting for %d pods to be ready", len(pods)) @@ -756,109 +697,54 @@ func (j *ServiceTestJig) waitForPodsReady(namespace string, pods []string) error return nil } -// newNetexecPodSpec returns the pod spec of netexec pod -func newNetexecPodSpec(podName string, httpPort, udpPort int32, hostNetwork bool) *v1.Pod { - pod := &v1.Pod{ - ObjectMeta: metav1.ObjectMeta{ - Name: podName, - }, - Spec: v1.PodSpec{ - Containers: []v1.Container{ - { - Name: "netexec", - Image: netexecImageName, - Args: []string{ - "netexec", - fmt.Sprintf("--http-port=%d", httpPort), - fmt.Sprintf("--udp-port=%d", udpPort), - }, - Ports: []v1.ContainerPort{ - { - Name: "http", - ContainerPort: httpPort, - }, - { - Name: "udp", - ContainerPort: udpPort, - }, - }, - }, - }, - HostNetwork: hostNetwork, - }, - } - return pod -} - // LaunchNetexecPodOnNode launches a netexec pod on the given node. -func (j *ServiceTestJig) LaunchNetexecPodOnNode(f *Framework, nodeName, podName string, httpPort, udpPort int32, hostNetwork bool) { +func (j *TestJig) LaunchNetexecPodOnNode(f *framework.Framework, nodeName, podName string, httpPort, udpPort int32, hostNetwork bool) { e2elog.Logf("Creating netexec pod %q on node %v in namespace %q", podName, nodeName, f.Namespace.Name) pod := newNetexecPodSpec(podName, httpPort, udpPort, hostNetwork) pod.Spec.NodeName = nodeName pod.ObjectMeta.Labels = j.Labels podClient := f.ClientSet.CoreV1().Pods(f.Namespace.Name) _, err := podClient.Create(pod) - ExpectNoError(err) - ExpectNoError(f.WaitForPodRunning(podName)) + framework.ExpectNoError(err) + framework.ExpectNoError(f.WaitForPodRunning(podName)) e2elog.Logf("Netexec pod %q in namespace %q running", pod.Name, f.Namespace.Name) } -// newEchoServerPodSpec returns the pod spec of echo server pod -func newEchoServerPodSpec(podName string) *v1.Pod { - port := 8080 - pod := &v1.Pod{ - ObjectMeta: metav1.ObjectMeta{ - Name: podName, - }, - Spec: v1.PodSpec{ - Containers: []v1.Container{ - { - Name: "echoserver", - Image: imageutils.GetE2EImage(imageutils.EchoServer), - Ports: []v1.ContainerPort{{ContainerPort: int32(port)}}, - }, - }, - RestartPolicy: v1.RestartPolicyNever, - }, - } - return pod -} - // LaunchEchoserverPodOnNode launches a pod serving http on port 8080 to act // as the target for source IP preservation test. The client's source ip would // be echoed back by the web server. -func (j *ServiceTestJig) LaunchEchoserverPodOnNode(f *Framework, nodeName, podName string) { +func (j *TestJig) LaunchEchoserverPodOnNode(f *framework.Framework, nodeName, podName string) { e2elog.Logf("Creating echo server pod %q in namespace %q", podName, f.Namespace.Name) pod := newEchoServerPodSpec(podName) pod.Spec.NodeName = nodeName pod.ObjectMeta.Labels = j.Labels podClient := f.ClientSet.CoreV1().Pods(f.Namespace.Name) _, err := podClient.Create(pod) - ExpectNoError(err) - ExpectNoError(f.WaitForPodRunning(podName)) + framework.ExpectNoError(err) + framework.ExpectNoError(f.WaitForPodRunning(podName)) e2elog.Logf("Echo server pod %q in namespace %q running", pod.Name, f.Namespace.Name) } // TestReachableHTTP tests that the given host serves HTTP on the given port. -func (j *ServiceTestJig) TestReachableHTTP(host string, port int, timeout time.Duration) { +func (j *TestJig) TestReachableHTTP(host string, port int, timeout time.Duration) { j.TestReachableHTTPWithRetriableErrorCodes(host, port, []int{}, timeout) } // TestReachableHTTPWithRetriableErrorCodes tests that the given host serves HTTP on the given port with the given retriableErrCodes. -func (j *ServiceTestJig) TestReachableHTTPWithRetriableErrorCodes(host string, port int, retriableErrCodes []int, timeout time.Duration) { +func (j *TestJig) TestReachableHTTPWithRetriableErrorCodes(host string, port int, retriableErrCodes []int, timeout time.Duration) { pollfn := func() (bool, error) { - result := PokeHTTP(host, port, "/echo?msg=hello", - &HTTPPokeParams{ + result := framework.PokeHTTP(host, port, "/echo?msg=hello", + &framework.HTTPPokeParams{ BodyContains: "hello", RetriableCodes: retriableErrCodes, }) - if result.Status == HTTPSuccess { + if result.Status == framework.HTTPSuccess { return true, nil } return false, nil // caller can retry } - if err := wait.PollImmediate(Poll, timeout, pollfn); err != nil { + if err := wait.PollImmediate(framework.Poll, timeout, pollfn); err != nil { if err == wait.ErrWaitTimeout { e2elog.Failf("Could not reach HTTP service through %v:%v after %v", host, port, timeout) } else { @@ -868,87 +754,87 @@ func (j *ServiceTestJig) TestReachableHTTPWithRetriableErrorCodes(host string, p } // TestNotReachableHTTP tests that a HTTP request doesn't connect to the given host and port. -func (j *ServiceTestJig) TestNotReachableHTTP(host string, port int, timeout time.Duration) { +func (j *TestJig) TestNotReachableHTTP(host string, port int, timeout time.Duration) { pollfn := func() (bool, error) { - result := PokeHTTP(host, port, "/", nil) + result := framework.PokeHTTP(host, port, "/", nil) if result.Code == 0 { return true, nil } return false, nil // caller can retry } - if err := wait.PollImmediate(Poll, timeout, pollfn); err != nil { + if err := wait.PollImmediate(framework.Poll, timeout, pollfn); err != nil { e2elog.Failf("HTTP service %v:%v reachable after %v: %v", host, port, timeout, err) } } // TestRejectedHTTP tests that the given host rejects a HTTP request on the given port. -func (j *ServiceTestJig) TestRejectedHTTP(host string, port int, timeout time.Duration) { +func (j *TestJig) TestRejectedHTTP(host string, port int, timeout time.Duration) { pollfn := func() (bool, error) { - result := PokeHTTP(host, port, "/", nil) - if result.Status == HTTPRefused { + result := framework.PokeHTTP(host, port, "/", nil) + if result.Status == framework.HTTPRefused { return true, nil } return false, nil // caller can retry } - if err := wait.PollImmediate(Poll, timeout, pollfn); err != nil { + if err := wait.PollImmediate(framework.Poll, timeout, pollfn); err != nil { e2elog.Failf("HTTP service %v:%v not rejected: %v", host, port, err) } } // TestReachableUDP tests that the given host serves UDP on the given port. -func (j *ServiceTestJig) TestReachableUDP(host string, port int, timeout time.Duration) { +func (j *TestJig) TestReachableUDP(host string, port int, timeout time.Duration) { pollfn := func() (bool, error) { - result := PokeUDP(host, port, "echo hello", &UDPPokeParams{ + result := framework.PokeUDP(host, port, "echo hello", &framework.UDPPokeParams{ Timeout: 3 * time.Second, Response: "hello", }) - if result.Status == UDPSuccess { + if result.Status == framework.UDPSuccess { return true, nil } return false, nil // caller can retry } - if err := wait.PollImmediate(Poll, timeout, pollfn); err != nil { + if err := wait.PollImmediate(framework.Poll, timeout, pollfn); err != nil { e2elog.Failf("Could not reach UDP service through %v:%v after %v: %v", host, port, timeout, err) } } // TestNotReachableUDP tests that the given host doesn't serve UDP on the given port. -func (j *ServiceTestJig) TestNotReachableUDP(host string, port int, timeout time.Duration) { +func (j *TestJig) TestNotReachableUDP(host string, port int, timeout time.Duration) { pollfn := func() (bool, error) { - result := PokeUDP(host, port, "echo hello", &UDPPokeParams{Timeout: 3 * time.Second}) - if result.Status != UDPSuccess && result.Status != UDPError { + result := framework.PokeUDP(host, port, "echo hello", &framework.UDPPokeParams{Timeout: 3 * time.Second}) + if result.Status != framework.UDPSuccess && result.Status != framework.UDPError { return true, nil } return false, nil // caller can retry } - if err := wait.PollImmediate(Poll, timeout, pollfn); err != nil { + if err := wait.PollImmediate(framework.Poll, timeout, pollfn); err != nil { e2elog.Failf("UDP service %v:%v reachable after %v: %v", host, port, timeout, err) } } // TestRejectedUDP tests that the given host rejects a UDP request on the given port. -func (j *ServiceTestJig) TestRejectedUDP(host string, port int, timeout time.Duration) { +func (j *TestJig) TestRejectedUDP(host string, port int, timeout time.Duration) { pollfn := func() (bool, error) { - result := PokeUDP(host, port, "echo hello", &UDPPokeParams{Timeout: 3 * time.Second}) - if result.Status == UDPRefused { + result := framework.PokeUDP(host, port, "echo hello", &framework.UDPPokeParams{Timeout: 3 * time.Second}) + if result.Status == framework.UDPRefused { return true, nil } return false, nil // caller can retry } - if err := wait.PollImmediate(Poll, timeout, pollfn); err != nil { + if err := wait.PollImmediate(framework.Poll, timeout, pollfn); err != nil { e2elog.Failf("UDP service %v:%v not rejected: %v", host, port, err) } } // GetHTTPContent returns the content of the given url by HTTP. -func (j *ServiceTestJig) GetHTTPContent(host string, port int, timeout time.Duration, url string) bytes.Buffer { +func (j *TestJig) GetHTTPContent(host string, port int, timeout time.Duration, url string) bytes.Buffer { var body bytes.Buffer - if pollErr := wait.PollImmediate(Poll, timeout, func() (bool, error) { - result := PokeHTTP(host, port, url, nil) - if result.Status == HTTPSuccess { + if pollErr := wait.PollImmediate(framework.Poll, timeout, func() (bool, error) { + result := framework.PokeHTTP(host, port, url, nil) + if result.Status == framework.HTTPSuccess { body.Write(result.Body) return true, nil } @@ -959,6 +845,79 @@ func (j *ServiceTestJig) GetHTTPContent(host string, port int, timeout time.Dura return body } +// TestHTTPHealthCheckNodePort tests a HTTP connection by the given request to the given host and port. +func (j *TestJig) TestHTTPHealthCheckNodePort(host string, port int, request string, timeout time.Duration, expectSucceed bool, threshold int) error { + count := 0 + condition := func() (bool, error) { + success, _ := testHTTPHealthCheckNodePort(host, port, request) + if success && expectSucceed || + !success && !expectSucceed { + count++ + } + if count >= threshold { + return true, nil + } + return false, nil + } + + if err := wait.PollImmediate(time.Second, timeout, condition); err != nil { + return fmt.Errorf("error waiting for healthCheckNodePort: expected at least %d succeed=%v on %v%v, got %d", threshold, expectSucceed, host, port, count) + } + return nil +} + +// CheckAffinity function tests whether the service affinity works as expected. +// If affinity is expected, the test will return true once affinityConfirmCount +// number of same response observed in a row. If affinity is not expected, the +// test will keep observe until different responses observed. The function will +// return false only in case of unexpected errors. +func (j *TestJig) CheckAffinity(execPod *v1.Pod, targetIP string, targetPort int, shouldHold bool) bool { + targetIPPort := net.JoinHostPort(targetIP, strconv.Itoa(targetPort)) + cmd := fmt.Sprintf(`wget -qO- http://%s/ -T 2`, targetIPPort) + timeout := TestTimeout + if execPod == nil { + timeout = LoadBalancerPollTimeout + } + var tracker affinityTracker + if pollErr := wait.PollImmediate(framework.Poll, timeout, func() (bool, error) { + if execPod != nil { + stdout, err := framework.RunHostCmd(execPod.Namespace, execPod.Name, cmd) + if err != nil { + e2elog.Logf("Failed to get response from %s. Retry until timeout", targetIPPort) + return false, nil + } + tracker.recordHost(stdout) + } else { + rawResponse := j.GetHTTPContent(targetIP, targetPort, timeout, "") + tracker.recordHost(rawResponse.String()) + } + trackerFulfilled, affinityHolds := tracker.checkHostTrace(AffinityConfirmCount) + if !shouldHold && !affinityHolds { + return true, nil + } + if shouldHold && trackerFulfilled && affinityHolds { + return true, nil + } + return false, nil + }); pollErr != nil { + trackerFulfilled, _ := tracker.checkHostTrace(AffinityConfirmCount) + if pollErr != wait.ErrWaitTimeout { + checkAffinityFailed(tracker, pollErr.Error()) + return false + } + if !trackerFulfilled { + checkAffinityFailed(tracker, fmt.Sprintf("Connection to %s timed out or not enough responses.", targetIPPort)) + } + if shouldHold { + checkAffinityFailed(tracker, "Affinity should hold but didn't.") + } else { + checkAffinityFailed(tracker, "Affinity shouldn't hold but did.") + } + return true + } + return true +} + func testHTTPHealthCheckNodePort(ip string, port int, request string) (bool, error) { ipPort := net.JoinHostPort(ip, strconv.Itoa(port)) url := fmt.Sprintf("http://%s%s", ipPort, request) @@ -988,487 +947,16 @@ func testHTTPHealthCheckNodePort(ip string, port int, request string) (bool, err return false, fmt.Errorf("unexpected HTTP response code %s from health check responder at %s", resp.Status, url) } -// TestHTTPHealthCheckNodePort tests a HTTP connection by the given request to the given host and port. -func (j *ServiceTestJig) TestHTTPHealthCheckNodePort(host string, port int, request string, timeout time.Duration, expectSucceed bool, threshold int) error { - count := 0 - condition := func() (bool, error) { - success, _ := testHTTPHealthCheckNodePort(host, port, request) - if success && expectSucceed || - !success && !expectSucceed { - count++ - } - if count >= threshold { - return true, nil - } - return false, nil +// Does an HTTP GET, but does not reuse TCP connections +// This masks problems where the iptables rule has changed, but we don't see it +func httpGetNoConnectionPoolTimeout(url string, timeout time.Duration) (*http.Response, error) { + tr := utilnet.SetTransportDefaults(&http.Transport{ + DisableKeepAlives: true, + }) + client := &http.Client{ + Transport: tr, + Timeout: timeout, } - if err := wait.PollImmediate(time.Second, timeout, condition); err != nil { - return fmt.Errorf("error waiting for healthCheckNodePort: expected at least %d succeed=%v on %v%v, got %d", threshold, expectSucceed, host, port, count) - } - return nil -} - -// ServiceTestFixture is a simple helper class to avoid too much boilerplate in tests -type ServiceTestFixture struct { - ServiceName string - Namespace string - Client clientset.Interface - - TestID string - Labels map[string]string - - rcs map[string]bool - services map[string]bool - Name string - Image string -} - -// NewServerTest creates a new ServiceTestFixture for the tests. -func NewServerTest(client clientset.Interface, namespace string, serviceName string) *ServiceTestFixture { - t := &ServiceTestFixture{} - t.Client = client - t.Namespace = namespace - t.ServiceName = serviceName - t.TestID = t.ServiceName + "-" + string(uuid.NewUUID()) - t.Labels = map[string]string{ - "testid": t.TestID, - } - - t.rcs = make(map[string]bool) - t.services = make(map[string]bool) - - t.Name = "webserver" - t.Image = imageutils.GetE2EImage(imageutils.TestWebserver) - - return t -} - -// BuildServiceSpec builds default config for a service (which can then be changed) -func (t *ServiceTestFixture) BuildServiceSpec() *v1.Service { - service := &v1.Service{ - ObjectMeta: metav1.ObjectMeta{ - Name: t.ServiceName, - Namespace: t.Namespace, - }, - Spec: v1.ServiceSpec{ - Selector: t.Labels, - Ports: []v1.ServicePort{{ - Port: 80, - TargetPort: intstr.FromInt(80), - }}, - }, - } - return service -} - -// CreateRC creates a replication controller and records it for cleanup. -func (t *ServiceTestFixture) CreateRC(rc *v1.ReplicationController) (*v1.ReplicationController, error) { - rc, err := t.Client.CoreV1().ReplicationControllers(t.Namespace).Create(rc) - if err == nil { - t.rcs[rc.Name] = true - } - return rc, err -} - -// CreateService creates a service, and record it for cleanup -func (t *ServiceTestFixture) CreateService(service *v1.Service) (*v1.Service, error) { - result, err := t.Client.CoreV1().Services(t.Namespace).Create(service) - if err == nil { - t.services[service.Name] = true - } - return result, err -} - -// DeleteService deletes a service, and remove it from the cleanup list -func (t *ServiceTestFixture) DeleteService(serviceName string) error { - err := t.Client.CoreV1().Services(t.Namespace).Delete(serviceName, nil) - if err == nil { - delete(t.services, serviceName) - } - return err -} - -// Cleanup cleans all ReplicationControllers and Services which this object holds. -func (t *ServiceTestFixture) Cleanup() []error { - var errs []error - for rcName := range t.rcs { - ginkgo.By("stopping RC " + rcName + " in namespace " + t.Namespace) - err := retry.RetryOnConflict(retry.DefaultRetry, func() error { - // First, resize the RC to 0. - old, err := t.Client.CoreV1().ReplicationControllers(t.Namespace).Get(rcName, metav1.GetOptions{}) - if err != nil { - if errors.IsNotFound(err) { - return nil - } - return err - } - x := int32(0) - old.Spec.Replicas = &x - if _, err := t.Client.CoreV1().ReplicationControllers(t.Namespace).Update(old); err != nil { - if errors.IsNotFound(err) { - return nil - } - return err - } - return nil - }) - if err != nil { - errs = append(errs, err) - } - // TODO(mikedanese): Wait. - // Then, delete the RC altogether. - if err := t.Client.CoreV1().ReplicationControllers(t.Namespace).Delete(rcName, nil); err != nil { - if !errors.IsNotFound(err) { - errs = append(errs, err) - } - } - } - - for serviceName := range t.services { - ginkgo.By("deleting service " + serviceName + " in namespace " + t.Namespace) - err := t.Client.CoreV1().Services(t.Namespace).Delete(serviceName, nil) - if err != nil { - if !errors.IsNotFound(err) { - errs = append(errs, err) - } - } - } - - return errs -} - -// GetIngressPoint returns a host on which ingress serves. -func GetIngressPoint(ing *v1.LoadBalancerIngress) string { - host := ing.IP - if host == "" { - host = ing.Hostname - } - return host -} - -// UpdateService fetches a service, calls the update function on it, -// and then attempts to send the updated service. It retries up to 2 -// times in the face of timeouts and conflicts. -func UpdateService(c clientset.Interface, namespace, serviceName string, update func(*v1.Service)) (*v1.Service, error) { - var service *v1.Service - var err error - for i := 0; i < 3; i++ { - service, err = c.CoreV1().Services(namespace).Get(serviceName, metav1.GetOptions{}) - if err != nil { - return service, err - } - - update(service) - - service, err = c.CoreV1().Services(namespace).Update(service) - - if !errors.IsConflict(err) && !errors.IsServerTimeout(err) { - return service, err - } - } - return service, err -} - -// StartServeHostnameService creates a replication controller that serves its -// hostname and a service on top of it. -func StartServeHostnameService(c clientset.Interface, svc *v1.Service, ns string, replicas int) ([]string, string, error) { - podNames := make([]string, replicas) - name := svc.ObjectMeta.Name - ginkgo.By("creating service " + name + " in namespace " + ns) - _, err := c.CoreV1().Services(ns).Create(svc) - if err != nil { - return podNames, "", err - } - - var createdPods []*v1.Pod - maxContainerFailures := 0 - config := testutils.RCConfig{ - Client: c, - Image: ServeHostnameImage, - Command: []string{"/agnhost", "serve-hostname"}, - Name: name, - Namespace: ns, - PollInterval: 3 * time.Second, - Timeout: PodReadyBeforeTimeout, - Replicas: replicas, - CreatedPods: &createdPods, - MaxContainerFailures: &maxContainerFailures, - } - err = RunRC(config) - if err != nil { - return podNames, "", err - } - - if len(createdPods) != replicas { - return podNames, "", fmt.Errorf("incorrect number of running pods: %v", len(createdPods)) - } - - for i := range createdPods { - podNames[i] = createdPods[i].ObjectMeta.Name - } - sort.StringSlice(podNames).Sort() - - service, err := c.CoreV1().Services(ns).Get(name, metav1.GetOptions{}) - if err != nil { - return podNames, "", err - } - if service.Spec.ClusterIP == "" { - return podNames, "", fmt.Errorf("service IP is blank for %v", name) - } - serviceIP := service.Spec.ClusterIP - return podNames, serviceIP, nil -} - -// StopServeHostnameService stops the given service. -func StopServeHostnameService(clientset clientset.Interface, ns, name string) error { - if err := DeleteRCAndWaitForGC(clientset, ns, name); err != nil { - return err - } - if err := clientset.CoreV1().Services(ns).Delete(name, nil); err != nil { - return err - } - return nil -} - -// VerifyServeHostnameServiceUp wgets the given serviceIP:servicePort from the -// given host and from within a pod. The host is expected to be an SSH-able node -// in the cluster. Each pod in the service is expected to echo its name. These -// names are compared with the given expectedPods list after a sort | uniq. -func VerifyServeHostnameServiceUp(c clientset.Interface, ns, host string, expectedPods []string, serviceIP string, servicePort int) error { - execPodName := e2epod.CreateExecPodOrFail(c, ns, "execpod-", nil) - defer func() { - e2epod.DeletePodOrFail(c, ns, execPodName) - }() - - // Loop a bunch of times - the proxy is randomized, so we want a good - // chance of hitting each backend at least once. - buildCommand := func(wget string) string { - serviceIPPort := net.JoinHostPort(serviceIP, strconv.Itoa(servicePort)) - return fmt.Sprintf("for i in $(seq 1 %d); do %s http://%s 2>&1 || true; echo; done", - 50*len(expectedPods), wget, serviceIPPort) - } - commands := []func() string{ - // verify service from node - func() string { - cmd := "set -e; " + buildCommand("wget -q --timeout=0.2 --tries=1 -O -") - e2elog.Logf("Executing cmd %q on host %v", cmd, host) - result, err := e2essh.SSH(cmd, host, TestContext.Provider) - if err != nil || result.Code != 0 { - e2essh.LogResult(result) - e2elog.Logf("error while SSH-ing to node: %v", err) - } - return result.Stdout - }, - // verify service from pod - func() string { - cmd := buildCommand("wget -q -T 1 -O -") - e2elog.Logf("Executing cmd %q in pod %v/%v", cmd, ns, execPodName) - // TODO: Use exec-over-http via the netexec pod instead of kubectl exec. - output, err := RunHostCmd(ns, execPodName, cmd) - if err != nil { - e2elog.Logf("error while kubectl execing %q in pod %v/%v: %v\nOutput: %v", cmd, ns, execPodName, err, output) - } - return output - }, - } - - expectedEndpoints := sets.NewString(expectedPods...) - ginkgo.By(fmt.Sprintf("verifying service has %d reachable backends", len(expectedPods))) - for _, cmdFunc := range commands { - passed := false - gotEndpoints := sets.NewString() - - // Retry cmdFunc for a while - for start := time.Now(); time.Since(start) < KubeProxyLagTimeout; time.Sleep(5 * time.Second) { - for _, endpoint := range strings.Split(cmdFunc(), "\n") { - trimmedEp := strings.TrimSpace(endpoint) - if trimmedEp != "" { - gotEndpoints.Insert(trimmedEp) - } - } - // TODO: simply checking that the retrieved endpoints is a superset - // of the expected allows us to ignore intermitten network flakes that - // result in output like "wget timed out", but these should be rare - // and we need a better way to track how often it occurs. - if gotEndpoints.IsSuperset(expectedEndpoints) { - if !gotEndpoints.Equal(expectedEndpoints) { - e2elog.Logf("Ignoring unexpected output wgetting endpoints of service %s: %v", serviceIP, gotEndpoints.Difference(expectedEndpoints)) - } - passed = true - break - } - e2elog.Logf("Unable to reach the following endpoints of service %s: %v", serviceIP, expectedEndpoints.Difference(gotEndpoints)) - } - if !passed { - // Sort the lists so they're easier to visually diff. - exp := expectedEndpoints.List() - got := gotEndpoints.List() - sort.StringSlice(exp).Sort() - sort.StringSlice(got).Sort() - return fmt.Errorf("service verification failed for: %s\nexpected %v\nreceived %v", serviceIP, exp, got) - } - } - return nil -} - -// VerifyServeHostnameServiceDown verifies that the given service isn't served. -func VerifyServeHostnameServiceDown(c clientset.Interface, host string, serviceIP string, servicePort int) error { - ipPort := net.JoinHostPort(serviceIP, strconv.Itoa(servicePort)) - // The current versions of curl included in CentOS and RHEL distros - // misinterpret square brackets around IPv6 as globbing, so use the -g - // argument to disable globbing to handle the IPv6 case. - command := fmt.Sprintf( - "curl -g -s --connect-timeout 2 http://%s && exit 99", ipPort) - - for start := time.Now(); time.Since(start) < time.Minute; time.Sleep(5 * time.Second) { - result, err := e2essh.SSH(command, host, TestContext.Provider) - if err != nil { - e2essh.LogResult(result) - e2elog.Logf("error while SSH-ing to node: %v", err) - } - if result.Code != 99 { - return nil - } - e2elog.Logf("service still alive - still waiting") - } - return fmt.Errorf("waiting for service to be down timed out") -} - -// CleanupServiceResources cleans up service Type=LoadBalancer resources. -func CleanupServiceResources(c clientset.Interface, loadBalancerName, region, zone string) { - TestContext.CloudConfig.Provider.CleanupServiceResources(c, loadBalancerName, region, zone) -} - -// DescribeSvc logs the output of kubectl describe svc for the given namespace -func DescribeSvc(ns string) { - e2elog.Logf("\nOutput of kubectl describe svc:\n") - desc, _ := RunKubectl( - "describe", "svc", fmt.Sprintf("--namespace=%v", ns)) - e2elog.Logf(desc) -} - -// CreateServiceSpec returns a Service object for testing. -func CreateServiceSpec(serviceName, externalName string, isHeadless bool, selector map[string]string) *v1.Service { - headlessService := &v1.Service{ - ObjectMeta: metav1.ObjectMeta{ - Name: serviceName, - }, - Spec: v1.ServiceSpec{ - Selector: selector, - }, - } - if externalName != "" { - headlessService.Spec.Type = v1.ServiceTypeExternalName - headlessService.Spec.ExternalName = externalName - } else { - headlessService.Spec.Ports = []v1.ServicePort{ - {Port: 80, Name: "http", Protocol: v1.ProtocolTCP}, - } - } - if isHeadless { - headlessService.Spec.ClusterIP = "None" - } - return headlessService -} - -// EnableAndDisableInternalLB returns two functions for enabling and disabling the internal load balancer -// setting for the supported cloud providers (currently GCE/GKE and Azure) and empty functions for others. -func EnableAndDisableInternalLB() (enable func(svc *v1.Service), disable func(svc *v1.Service)) { - return TestContext.CloudConfig.Provider.EnableAndDisableInternalLB() -} - -// GetServiceLoadBalancerCreationTimeout returns a timeout value for creating a load balancer of a service. -func GetServiceLoadBalancerCreationTimeout(cs clientset.Interface) time.Duration { - if nodes := GetReadySchedulableNodesOrDie(cs); len(nodes.Items) > LargeClusterMinNodesNumber { - return LoadBalancerCreateTimeoutLarge - } - return LoadBalancerCreateTimeoutDefault -} - -// affinityTracker tracks the destination of a request for the affinity tests. -type affinityTracker struct { - hostTrace []string -} - -// Record the response going to a given host. -func (at *affinityTracker) recordHost(host string) { - at.hostTrace = append(at.hostTrace, host) - e2elog.Logf("Received response from host: %s", host) -} - -// Check that we got a constant count requests going to the same host. -func (at *affinityTracker) checkHostTrace(count int) (fulfilled, affinityHolds bool) { - fulfilled = (len(at.hostTrace) >= count) - if len(at.hostTrace) == 0 { - return fulfilled, true - } - last := at.hostTrace[0:] - if len(at.hostTrace)-count >= 0 { - last = at.hostTrace[len(at.hostTrace)-count:] - } - host := at.hostTrace[len(at.hostTrace)-1] - for _, h := range last { - if h != host { - return fulfilled, false - } - } - return fulfilled, true -} - -func checkAffinityFailed(tracker affinityTracker, err string) { - e2elog.Logf("%v", tracker.hostTrace) - e2elog.Failf(err) -} - -// CheckAffinity function tests whether the service affinity works as expected. -// If affinity is expected, the test will return true once affinityConfirmCount -// number of same response observed in a row. If affinity is not expected, the -// test will keep observe until different responses observed. The function will -// return false only in case of unexpected errors. -func CheckAffinity(jig *ServiceTestJig, execPod *v1.Pod, targetIP string, targetPort int, shouldHold bool) bool { - targetIPPort := net.JoinHostPort(targetIP, strconv.Itoa(targetPort)) - cmd := fmt.Sprintf(`wget -qO- http://%s/ -T 2`, targetIPPort) - timeout := ServiceTestTimeout - if execPod == nil { - timeout = LoadBalancerPollTimeout - } - var tracker affinityTracker - if pollErr := wait.PollImmediate(Poll, timeout, func() (bool, error) { - if execPod != nil { - stdout, err := RunHostCmd(execPod.Namespace, execPod.Name, cmd) - if err != nil { - e2elog.Logf("Failed to get response from %s. Retry until timeout", targetIPPort) - return false, nil - } - tracker.recordHost(stdout) - } else { - rawResponse := jig.GetHTTPContent(targetIP, targetPort, timeout, "") - tracker.recordHost(rawResponse.String()) - } - trackerFulfilled, affinityHolds := tracker.checkHostTrace(AffinityConfirmCount) - if !shouldHold && !affinityHolds { - return true, nil - } - if shouldHold && trackerFulfilled && affinityHolds { - return true, nil - } - return false, nil - }); pollErr != nil { - trackerFulfilled, _ := tracker.checkHostTrace(AffinityConfirmCount) - if pollErr != wait.ErrWaitTimeout { - checkAffinityFailed(tracker, pollErr.Error()) - return false - } - if !trackerFulfilled { - checkAffinityFailed(tracker, fmt.Sprintf("Connection to %s timed out or not enough responses.", targetIPPort)) - } - if shouldHold { - checkAffinityFailed(tracker, "Affinity should hold but didn't.") - } else { - checkAffinityFailed(tracker, "Affinity shouldn't hold but did.") - } - return true - } - return true + return client.Get(url) } diff --git a/test/e2e/framework/service/resource.go b/test/e2e/framework/service/resource.go new file mode 100644 index 00000000000..685e11b1cac --- /dev/null +++ b/test/e2e/framework/service/resource.go @@ -0,0 +1,174 @@ +/* +Copyright 2019 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package service + +import ( + "fmt" + "time" + + v1 "k8s.io/api/core/v1" + "k8s.io/apimachinery/pkg/api/errors" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + clientset "k8s.io/client-go/kubernetes" + restclient "k8s.io/client-go/rest" + "k8s.io/kubernetes/test/e2e/framework" + e2elog "k8s.io/kubernetes/test/e2e/framework/log" + imageutils "k8s.io/kubernetes/test/utils/image" +) + +// GetServicesProxyRequest returns a request for a service proxy. +func GetServicesProxyRequest(c clientset.Interface, request *restclient.Request) (*restclient.Request, error) { + return request.Resource("services").SubResource("proxy"), nil +} + +// CreateServiceSpec returns a Service object for testing. +func CreateServiceSpec(serviceName, externalName string, isHeadless bool, selector map[string]string) *v1.Service { + headlessService := &v1.Service{ + ObjectMeta: metav1.ObjectMeta{ + Name: serviceName, + }, + Spec: v1.ServiceSpec{ + Selector: selector, + }, + } + if externalName != "" { + headlessService.Spec.Type = v1.ServiceTypeExternalName + headlessService.Spec.ExternalName = externalName + } else { + headlessService.Spec.Ports = []v1.ServicePort{ + {Port: 80, Name: "http", Protocol: v1.ProtocolTCP}, + } + } + if isHeadless { + headlessService.Spec.ClusterIP = "None" + } + return headlessService +} + +// UpdateService fetches a service, calls the update function on it, +// and then attempts to send the updated service. It retries up to 2 +// times in the face of timeouts and conflicts. +func UpdateService(c clientset.Interface, namespace, serviceName string, update func(*v1.Service)) (*v1.Service, error) { + var service *v1.Service + var err error + for i := 0; i < 3; i++ { + service, err = c.CoreV1().Services(namespace).Get(serviceName, metav1.GetOptions{}) + if err != nil { + return service, err + } + + update(service) + + service, err = c.CoreV1().Services(namespace).Update(service) + + if !errors.IsConflict(err) && !errors.IsServerTimeout(err) { + return service, err + } + } + return service, err +} + +// CleanupServiceResources cleans up service Type=LoadBalancer resources. +func CleanupServiceResources(c clientset.Interface, loadBalancerName, region, zone string) { + framework.TestContext.CloudConfig.Provider.CleanupServiceResources(c, loadBalancerName, region, zone) +} + +// GetIngressPoint returns a host on which ingress serves. +func GetIngressPoint(ing *v1.LoadBalancerIngress) string { + host := ing.IP + if host == "" { + host = ing.Hostname + } + return host +} + +// EnableAndDisableInternalLB returns two functions for enabling and disabling the internal load balancer +// setting for the supported cloud providers (currently GCE/GKE and Azure) and empty functions for others. +func EnableAndDisableInternalLB() (enable func(svc *v1.Service), disable func(svc *v1.Service)) { + return framework.TestContext.CloudConfig.Provider.EnableAndDisableInternalLB() +} + +// DescribeSvc logs the output of kubectl describe svc for the given namespace +func DescribeSvc(ns string) { + e2elog.Logf("\nOutput of kubectl describe svc:\n") + desc, _ := framework.RunKubectl( + "describe", "svc", fmt.Sprintf("--namespace=%v", ns)) + e2elog.Logf(desc) +} + +// newNetexecPodSpec returns the pod spec of netexec pod +func newNetexecPodSpec(podName string, httpPort, udpPort int32, hostNetwork bool) *v1.Pod { + pod := &v1.Pod{ + ObjectMeta: metav1.ObjectMeta{ + Name: podName, + }, + Spec: v1.PodSpec{ + Containers: []v1.Container{ + { + Name: "netexec", + Image: framework.NetexecImageName, + Args: []string{ + "netexec", + fmt.Sprintf("--http-port=%d", httpPort), + fmt.Sprintf("--udp-port=%d", udpPort), + }, + Ports: []v1.ContainerPort{ + { + Name: "http", + ContainerPort: httpPort, + }, + { + Name: "udp", + ContainerPort: udpPort, + }, + }, + }, + }, + HostNetwork: hostNetwork, + }, + } + return pod +} + +// newEchoServerPodSpec returns the pod spec of echo server pod +func newEchoServerPodSpec(podName string) *v1.Pod { + port := 8080 + pod := &v1.Pod{ + ObjectMeta: metav1.ObjectMeta{ + Name: podName, + }, + Spec: v1.PodSpec{ + Containers: []v1.Container{ + { + Name: "echoserver", + Image: imageutils.GetE2EImage(imageutils.EchoServer), + Ports: []v1.ContainerPort{{ContainerPort: int32(port)}}, + }, + }, + RestartPolicy: v1.RestartPolicyNever, + }, + } + return pod +} + +// GetServiceLoadBalancerCreationTimeout returns a timeout value for creating a load balancer of a service. +func GetServiceLoadBalancerCreationTimeout(cs clientset.Interface) time.Duration { + if nodes := framework.GetReadySchedulableNodesOrDie(cs); len(nodes.Items) > LargeClusterMinNodesNumber { + return LoadBalancerCreateTimeoutLarge + } + return LoadBalancerCreateTimeoutDefault +} diff --git a/test/e2e/framework/service/wait.go b/test/e2e/framework/service/wait.go new file mode 100644 index 00000000000..760e0f4b09c --- /dev/null +++ b/test/e2e/framework/service/wait.go @@ -0,0 +1,65 @@ +/* +Copyright 2019 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package service + +import ( + "context" + "fmt" + + "github.com/onsi/ginkgo" + "k8s.io/apimachinery/pkg/util/wait" + clientset "k8s.io/client-go/kubernetes" + "k8s.io/kubernetes/test/e2e/framework" + e2elog "k8s.io/kubernetes/test/e2e/framework/log" +) + +// WaitForServiceResponding waits for the service to be responding. +func WaitForServiceResponding(c clientset.Interface, ns, name string) error { + ginkgo.By(fmt.Sprintf("trying to dial the service %s.%s via the proxy", ns, name)) + + return wait.PollImmediate(framework.Poll, RespondingTimeout, func() (done bool, err error) { + proxyRequest, errProxy := GetServicesProxyRequest(c, c.CoreV1().RESTClient().Get()) + if errProxy != nil { + e2elog.Logf("Failed to get services proxy request: %v:", errProxy) + return false, nil + } + + ctx, cancel := context.WithTimeout(context.Background(), framework.SingleCallTimeout) + defer cancel() + + body, err := proxyRequest.Namespace(ns). + Context(ctx). + Name(name). + Do(). + Raw() + if err != nil { + if ctx.Err() != nil { + e2elog.Failf("Failed to GET from service %s: %v", name, err) + return true, err + } + e2elog.Logf("Failed to GET from service %s: %v:", name, err) + return false, nil + } + got := string(body) + if len(got) == 0 { + e2elog.Logf("Service %s: expected non-empty response", name) + return false, err // stop polling + } + e2elog.Logf("Service %s: found nonempty answer: %s", name, got) + return true, nil + }) +} diff --git a/test/e2e/framework/statefulset/fixtures.go b/test/e2e/framework/statefulset/fixtures.go index cec5cb300df..c0ea294e6c2 100644 --- a/test/e2e/framework/statefulset/fixtures.go +++ b/test/e2e/framework/statefulset/fixtures.go @@ -24,7 +24,7 @@ import ( "strconv" appsv1 "k8s.io/api/apps/v1" - corev1 "k8s.io/api/core/v1" + v1 "k8s.io/api/core/v1" "k8s.io/apimachinery/pkg/api/resource" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/util/intstr" @@ -38,19 +38,19 @@ import ( // NewStatefulSet creates a new Webserver StatefulSet for testing. The StatefulSet is named name, is in namespace ns, // statefulPodsMounts are the mounts that will be backed by PVs. podsMounts are the mounts that are mounted directly // to the Pod. labels are the labels that will be usd for the StatefulSet selector. -func NewStatefulSet(name, ns, governingSvcName string, replicas int32, statefulPodMounts []corev1.VolumeMount, podMounts []corev1.VolumeMount, labels map[string]string) *appsv1.StatefulSet { +func NewStatefulSet(name, ns, governingSvcName string, replicas int32, statefulPodMounts []v1.VolumeMount, podMounts []v1.VolumeMount, labels map[string]string) *appsv1.StatefulSet { mounts := append(statefulPodMounts, podMounts...) - claims := []corev1.PersistentVolumeClaim{} + claims := []v1.PersistentVolumeClaim{} for _, m := range statefulPodMounts { claims = append(claims, NewStatefulSetPVC(m.Name)) } - vols := []corev1.Volume{} + vols := []v1.Volume{} for _, m := range podMounts { - vols = append(vols, corev1.Volume{ + vols = append(vols, v1.Volume{ Name: m.Name, - VolumeSource: corev1.VolumeSource{ - HostPath: &corev1.HostPathVolumeSource{ + VolumeSource: v1.VolumeSource{ + HostPath: &v1.HostPathVolumeSource{ Path: fmt.Sprintf("/tmp/%v", m.Name), }, }, @@ -71,13 +71,13 @@ func NewStatefulSet(name, ns, governingSvcName string, replicas int32, statefulP MatchLabels: labels, }, Replicas: func(i int32) *int32 { return &i }(replicas), - Template: corev1.PodTemplateSpec{ + Template: v1.PodTemplateSpec{ ObjectMeta: metav1.ObjectMeta{ Labels: labels, Annotations: map[string]string{}, }, - Spec: corev1.PodSpec{ - Containers: []corev1.Container{ + Spec: v1.PodSpec{ + Containers: []v1.Container{ { Name: "webserver", Image: imageutils.GetE2EImage(imageutils.Httpd), @@ -95,18 +95,18 @@ func NewStatefulSet(name, ns, governingSvcName string, replicas int32, statefulP } // NewStatefulSetPVC returns a PersistentVolumeClaim named name, for testing StatefulSets. -func NewStatefulSetPVC(name string) corev1.PersistentVolumeClaim { - return corev1.PersistentVolumeClaim{ +func NewStatefulSetPVC(name string) v1.PersistentVolumeClaim { + return v1.PersistentVolumeClaim{ ObjectMeta: metav1.ObjectMeta{ Name: name, }, - Spec: corev1.PersistentVolumeClaimSpec{ - AccessModes: []corev1.PersistentVolumeAccessMode{ - corev1.ReadWriteOnce, + Spec: v1.PersistentVolumeClaimSpec{ + AccessModes: []v1.PersistentVolumeAccessMode{ + v1.ReadWriteOnce, }, - Resources: corev1.ResourceRequirements{ - Requests: corev1.ResourceList{ - corev1.ResourceStorage: *resource.NewQuantity(1, resource.BinarySI), + Resources: v1.ResourceRequirements{ + Requests: v1.ResourceList{ + v1.ResourceStorage: *resource.NewQuantity(1, resource.BinarySI), }, }, }, @@ -114,17 +114,17 @@ func NewStatefulSetPVC(name string) corev1.PersistentVolumeClaim { } // CreateStatefulSetService creates a Headless Service with Name name and Selector set to match labels. -func CreateStatefulSetService(name string, labels map[string]string) *corev1.Service { - headlessService := &corev1.Service{ +func CreateStatefulSetService(name string, labels map[string]string) *v1.Service { + headlessService := &v1.Service{ ObjectMeta: metav1.ObjectMeta{ Name: name, }, - Spec: corev1.ServiceSpec{ + Spec: v1.ServiceSpec{ Selector: labels, }, } - headlessService.Spec.Ports = []corev1.ServicePort{ - {Port: 80, Name: "http", Protocol: corev1.ProtocolTCP}, + headlessService.Spec.Ports = []v1.ServicePort{ + {Port: 80, Name: "http", Protocol: v1.ProtocolTCP}, } headlessService.Spec.ClusterIP = "None" return headlessService @@ -149,7 +149,7 @@ func BreakHTTPProbe(c clientset.Interface, ss *appsv1.StatefulSet) error { } // BreakPodHTTPProbe breaks the readiness probe for Nginx StatefulSet containers in one pod. -func BreakPodHTTPProbe(ss *appsv1.StatefulSet, pod *corev1.Pod) error { +func BreakPodHTTPProbe(ss *appsv1.StatefulSet, pod *v1.Pod) error { path := httpProbe.HTTPGet.Path if path == "" { return fmt.Errorf("path expected to be not empty: %v", path) @@ -173,7 +173,7 @@ func RestoreHTTPProbe(c clientset.Interface, ss *appsv1.StatefulSet) error { } // RestorePodHTTPProbe restores the readiness probe for Nginx StatefulSet containers in pod. -func RestorePodHTTPProbe(ss *appsv1.StatefulSet, pod *corev1.Pod) error { +func RestorePodHTTPProbe(ss *appsv1.StatefulSet, pod *v1.Pod) error { path := httpProbe.HTTPGet.Path if path == "" { return fmt.Errorf("path expected to be not empty: %v", path) @@ -185,14 +185,14 @@ func RestorePodHTTPProbe(ss *appsv1.StatefulSet, pod *corev1.Pod) error { return err } -func hasPauseProbe(pod *corev1.Pod) bool { +func hasPauseProbe(pod *v1.Pod) bool { probe := pod.Spec.Containers[0].ReadinessProbe return probe != nil && reflect.DeepEqual(probe.Exec.Command, pauseProbe.Exec.Command) } -var httpProbe = &corev1.Probe{ - Handler: corev1.Handler{ - HTTPGet: &corev1.HTTPGetAction{ +var httpProbe = &v1.Probe{ + Handler: v1.Handler{ + HTTPGet: &v1.HTTPGetAction{ Path: "/index.html", Port: intstr.IntOrString{IntVal: 80}, }, @@ -202,16 +202,16 @@ var httpProbe = &corev1.Probe{ FailureThreshold: 1, } -var pauseProbe = &corev1.Probe{ - Handler: corev1.Handler{ - Exec: &corev1.ExecAction{Command: []string{"test", "-f", "/data/statefulset-continue"}}, +var pauseProbe = &v1.Probe{ + Handler: v1.Handler{ + Exec: &v1.ExecAction{Command: []string{"test", "-f", "/data/statefulset-continue"}}, }, PeriodSeconds: 1, SuccessThreshold: 1, FailureThreshold: 1, } -type statefulPodsByOrdinal []corev1.Pod +type statefulPodsByOrdinal []v1.Pod func (sp statefulPodsByOrdinal) Len() int { return len(sp) @@ -242,7 +242,7 @@ func ResumeNextPod(c clientset.Interface, ss *appsv1.StatefulSet) { podList := GetPodList(c, ss) resumedPod := "" for _, pod := range podList.Items { - if pod.Status.Phase != corev1.PodRunning { + if pod.Status.Phase != v1.PodRunning { e2elog.Failf("Found pod in phase %q, cannot resume", pod.Status.Phase) } if podutil.IsPodReady(&pod) || !hasPauseProbe(&pod) { @@ -259,13 +259,13 @@ func ResumeNextPod(c clientset.Interface, ss *appsv1.StatefulSet) { } // SortStatefulPods sorts pods by their ordinals -func SortStatefulPods(pods *corev1.PodList) { +func SortStatefulPods(pods *v1.PodList) { sort.Sort(statefulPodsByOrdinal(pods.Items)) } var statefulPodRegex = regexp.MustCompile("(.*)-([0-9]+)$") -func getStatefulPodOrdinal(pod *corev1.Pod) int { +func getStatefulPodOrdinal(pod *v1.Pod) int { ordinal := -1 subMatches := statefulPodRegex.FindStringSubmatch(pod.Name) if len(subMatches) < 3 { diff --git a/test/e2e/framework/statefulset/rest.go b/test/e2e/framework/statefulset/rest.go index e4dadc011a3..c76ab380145 100644 --- a/test/e2e/framework/statefulset/rest.go +++ b/test/e2e/framework/statefulset/rest.go @@ -23,7 +23,7 @@ import ( "time" appsv1 "k8s.io/api/apps/v1" - corev1 "k8s.io/api/core/v1" + v1 "k8s.io/api/core/v1" apierrs "k8s.io/apimachinery/pkg/api/errors" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/labels" @@ -62,7 +62,7 @@ func CreateStatefulSet(c clientset.Interface, manifestPath, ns string) *appsv1.S } // GetPodList gets the current Pods in ss. -func GetPodList(c clientset.Interface, ss *appsv1.StatefulSet) *corev1.PodList { +func GetPodList(c clientset.Interface, ss *appsv1.StatefulSet) *v1.PodList { selector, err := metav1.LabelSelectorAsSelector(ss.Spec.Selector) e2efwk.ExpectNoError(err) podList, err := c.CoreV1().Pods(ss.Namespace).List(metav1.ListOptions{LabelSelector: selector.String()}) @@ -182,7 +182,7 @@ func Scale(c clientset.Interface, ss *appsv1.StatefulSet, count int32) (*appsv1. e2elog.Logf("Scaling statefulset %s to %d", name, count) ss = update(c, ns, name, func(ss *appsv1.StatefulSet) { *(ss.Spec.Replicas) = count }) - var statefulPodList *corev1.PodList + var statefulPodList *v1.PodList pollErr := wait.PollImmediate(StatefulSetPoll, StatefulSetTimeout, func() (bool, error) { statefulPodList = GetPodList(c, ss) if int32(len(statefulPodList.Items)) == count { @@ -194,7 +194,7 @@ func Scale(c clientset.Interface, ss *appsv1.StatefulSet, count int32) (*appsv1. unhealthy := []string{} for _, statefulPod := range statefulPodList.Items { delTs, phase, readiness := statefulPod.DeletionTimestamp, statefulPod.Status.Phase, podutil.IsPodReady(&statefulPod) - if delTs != nil || phase != corev1.PodRunning || !readiness { + if delTs != nil || phase != v1.PodRunning || !readiness { unhealthy = append(unhealthy, fmt.Sprintf("%v: deletion %v, phase %v, readiness %v", statefulPod.Name, delTs, phase, readiness)) } } @@ -313,7 +313,7 @@ func ExecInStatefulPods(c clientset.Interface, ss *appsv1.StatefulSet, cmd strin type updateStatefulSetFunc func(*appsv1.StatefulSet) // VerifyStatefulPodFunc is a func that examines a StatefulSetPod. -type VerifyStatefulPodFunc func(*corev1.Pod) +type VerifyStatefulPodFunc func(*v1.Pod) // VerifyPodAtIndex applies a visitor pattern to the Pod at index in ss. verify is applied to the Pod to "visit" it. func VerifyPodAtIndex(c clientset.Interface, index int, ss *appsv1.StatefulSet, verify VerifyStatefulPodFunc) { diff --git a/test/e2e/framework/statefulset/wait.go b/test/e2e/framework/statefulset/wait.go index 3df73971a37..1efc3f83636 100644 --- a/test/e2e/framework/statefulset/wait.go +++ b/test/e2e/framework/statefulset/wait.go @@ -20,7 +20,7 @@ import ( "fmt" appsv1 "k8s.io/api/apps/v1" - corev1 "k8s.io/api/core/v1" + v1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/util/wait" clientset "k8s.io/client-go/kubernetes" @@ -32,8 +32,8 @@ import ( // a RollingUpdateStatefulSetStrategyType with a non-nil RollingUpdate and Partition. All Pods with ordinals less // than or equal to the Partition are expected to be at set's current revision. All other Pods are expected to be // at its update revision. -func WaitForPartitionedRollingUpdate(c clientset.Interface, set *appsv1.StatefulSet) (*appsv1.StatefulSet, *corev1.PodList) { - var pods *corev1.PodList +func WaitForPartitionedRollingUpdate(c clientset.Interface, set *appsv1.StatefulSet) (*appsv1.StatefulSet, *v1.PodList) { + var pods *v1.PodList if set.Spec.UpdateStrategy.Type != appsv1.RollingUpdateStatefulSetStrategyType { e2elog.Failf("StatefulSet %s/%s attempt to wait for partitioned update with updateStrategy %s", set.Namespace, @@ -45,7 +45,7 @@ func WaitForPartitionedRollingUpdate(c clientset.Interface, set *appsv1.Stateful set.Namespace, set.Name) } - WaitForState(c, set, func(set2 *appsv1.StatefulSet, pods2 *corev1.PodList) (bool, error) { + WaitForState(c, set, func(set2 *appsv1.StatefulSet, pods2 *v1.PodList) (bool, error) { set = set2 pods = pods2 partition := int(*set.Spec.UpdateStrategy.RollingUpdate.Partition) @@ -102,8 +102,8 @@ func WaitForRunning(c clientset.Interface, numPodsRunning, numPodsReady int32, s shouldBeReady := getStatefulPodOrdinal(&p) < int(numPodsReady) isReady := podutil.IsPodReady(&p) desiredReadiness := shouldBeReady == isReady - e2elog.Logf("Waiting for pod %v to enter %v - Ready=%v, currently %v - Ready=%v", p.Name, corev1.PodRunning, shouldBeReady, p.Status.Phase, isReady) - if p.Status.Phase != corev1.PodRunning || !desiredReadiness { + e2elog.Logf("Waiting for pod %v to enter %v - Ready=%v, currently %v - Ready=%v", p.Name, v1.PodRunning, shouldBeReady, p.Status.Phase, isReady) + if p.Status.Phase != v1.PodRunning || !desiredReadiness { return false, nil } } @@ -115,7 +115,7 @@ func WaitForRunning(c clientset.Interface, numPodsRunning, numPodsReady int32, s } // WaitForState periodically polls for the ss and its pods until the until function returns either true or an error -func WaitForState(c clientset.Interface, ss *appsv1.StatefulSet, until func(*appsv1.StatefulSet, *corev1.PodList) (bool, error)) { +func WaitForState(c clientset.Interface, ss *appsv1.StatefulSet, until func(*appsv1.StatefulSet, *v1.PodList) (bool, error)) { pollErr := wait.PollImmediate(StatefulSetPoll, StatefulSetTimeout, func() (bool, error) { ssGet, err := c.AppsV1().StatefulSets(ss.Namespace).Get(ss.Name, metav1.GetOptions{}) @@ -133,7 +133,7 @@ func WaitForState(c clientset.Interface, ss *appsv1.StatefulSet, until func(*app // WaitForStatus waits for the StatefulSetStatus's ObservedGeneration to be greater than or equal to set's Generation. // The returned StatefulSet contains such a StatefulSetStatus func WaitForStatus(c clientset.Interface, set *appsv1.StatefulSet) *appsv1.StatefulSet { - WaitForState(c, set, func(set2 *appsv1.StatefulSet, pods *corev1.PodList) (bool, error) { + WaitForState(c, set, func(set2 *appsv1.StatefulSet, pods *v1.PodList) (bool, error) { if set2.Status.ObservedGeneration >= set.Generation { set = set2 return true, nil @@ -149,9 +149,9 @@ func WaitForRunningAndReady(c clientset.Interface, numStatefulPods int32, ss *ap } // WaitForPodReady waits for the Pod named podName in set to exist and have a Ready condition. -func WaitForPodReady(c clientset.Interface, set *appsv1.StatefulSet, podName string) (*appsv1.StatefulSet, *corev1.PodList) { - var pods *corev1.PodList - WaitForState(c, set, func(set2 *appsv1.StatefulSet, pods2 *corev1.PodList) (bool, error) { +func WaitForPodReady(c clientset.Interface, set *appsv1.StatefulSet, podName string) (*appsv1.StatefulSet, *v1.PodList) { + var pods *v1.PodList + WaitForState(c, set, func(set2 *appsv1.StatefulSet, pods2 *v1.PodList) (bool, error) { set = set2 pods = pods2 for i := range pods.Items { @@ -165,9 +165,9 @@ func WaitForPodReady(c clientset.Interface, set *appsv1.StatefulSet, podName str } // WaitForPodNotReady waits for the Pod named podName in set to exist and to not have a Ready condition. -func WaitForPodNotReady(c clientset.Interface, set *appsv1.StatefulSet, podName string) (*appsv1.StatefulSet, *corev1.PodList) { - var pods *corev1.PodList - WaitForState(c, set, func(set2 *appsv1.StatefulSet, pods2 *corev1.PodList) (bool, error) { +func WaitForPodNotReady(c clientset.Interface, set *appsv1.StatefulSet, podName string) (*appsv1.StatefulSet, *v1.PodList) { + var pods *v1.PodList + WaitForState(c, set, func(set2 *appsv1.StatefulSet, pods2 *v1.PodList) (bool, error) { set = set2 pods = pods2 for i := range pods.Items { @@ -182,15 +182,15 @@ func WaitForPodNotReady(c clientset.Interface, set *appsv1.StatefulSet, podName // WaitForRollingUpdate waits for all Pods in set to exist and have the correct revision and for the RollingUpdate to // complete. set must have a RollingUpdateStatefulSetStrategyType. -func WaitForRollingUpdate(c clientset.Interface, set *appsv1.StatefulSet) (*appsv1.StatefulSet, *corev1.PodList) { - var pods *corev1.PodList +func WaitForRollingUpdate(c clientset.Interface, set *appsv1.StatefulSet) (*appsv1.StatefulSet, *v1.PodList) { + var pods *v1.PodList if set.Spec.UpdateStrategy.Type != appsv1.RollingUpdateStatefulSetStrategyType { e2elog.Failf("StatefulSet %s/%s attempt to wait for rolling update with updateStrategy %s", set.Namespace, set.Name, set.Spec.UpdateStrategy.Type) } - WaitForState(c, set, func(set2 *appsv1.StatefulSet, pods2 *corev1.PodList) (bool, error) { + WaitForState(c, set, func(set2 *appsv1.StatefulSet, pods2 *v1.PodList) (bool, error) { set = set2 pods = pods2 if len(pods.Items) < int(*set.Spec.Replicas) { diff --git a/test/e2e/framework/util.go b/test/e2e/framework/util.go index d73f7052327..680916d7b73 100644 --- a/test/e2e/framework/util.go +++ b/test/e2e/framework/util.go @@ -149,9 +149,6 @@ const ( podScheduledBeforeTimeout = PodListTimeout + (20 * time.Second) podRespondingTimeout = 15 * time.Minute - // ServiceRespondingTimeout is how long to wait for a service to be responding. - ServiceRespondingTimeout = 2 * time.Minute - // ClaimProvisionTimeout is how long claims have to become dynamically provisioned. ClaimProvisionTimeout = 5 * time.Minute @@ -221,11 +218,6 @@ var ( ServeHostnameImage = imageutils.GetE2EImage(imageutils.Agnhost) ) -// GetServicesProxyRequest returns a request for a service proxy. -func GetServicesProxyRequest(c clientset.Interface, request *restclient.Request) (*restclient.Request, error) { - return request.Resource("services").SubResource("proxy"), nil -} - // RunID is a unique identifier of the e2e run. // Beware that this ID is not the same for all tests in the e2e run, because each Ginkgo node creates it separately. var RunID = uuid.NewUUID() @@ -1254,43 +1246,6 @@ func KubectlVersion() (*utilversion.Version, error) { return utilversion.ParseSemantic(matches[1]) } -// ServiceResponding waits for the service to be responding. -func ServiceResponding(c clientset.Interface, ns, name string) error { - ginkgo.By(fmt.Sprintf("trying to dial the service %s.%s via the proxy", ns, name)) - - return wait.PollImmediate(Poll, ServiceRespondingTimeout, func() (done bool, err error) { - proxyRequest, errProxy := GetServicesProxyRequest(c, c.CoreV1().RESTClient().Get()) - if errProxy != nil { - e2elog.Logf("Failed to get services proxy request: %v:", errProxy) - return false, nil - } - - ctx, cancel := context.WithTimeout(context.Background(), SingleCallTimeout) - defer cancel() - - body, err := proxyRequest.Namespace(ns). - Context(ctx). - Name(name). - Do(). - Raw() - if err != nil { - if ctx.Err() != nil { - e2elog.Failf("Failed to GET from service %s: %v", name, err) - return true, err - } - e2elog.Logf("Failed to GET from service %s: %v:", name, err) - return false, nil - } - got := string(body) - if len(got) == 0 { - e2elog.Logf("Service %s: expected non-empty response", name) - return false, err // stop polling - } - e2elog.Logf("Service %s: found nonempty answer: %s", name, got) - return true, nil - }) -} - // RestclientConfig returns a config holds the information needed to build connection to kubernetes clusters. func RestclientConfig(kubeContext string) (*clientcmdapi.Config, error) { e2elog.Logf(">>> kubeConfig: %s", TestContext.KubeConfig) diff --git a/test/e2e/instrumentation/logging/elasticsearch/BUILD b/test/e2e/instrumentation/logging/elasticsearch/BUILD index 65e7c7cd73a..888671ca0a9 100644 --- a/test/e2e/instrumentation/logging/elasticsearch/BUILD +++ b/test/e2e/instrumentation/logging/elasticsearch/BUILD @@ -22,6 +22,7 @@ go_library( "//test/e2e/framework:go_default_library", "//test/e2e/framework/log:go_default_library", "//test/e2e/framework/pod:go_default_library", + "//test/e2e/framework/service:go_default_library", "//test/e2e/instrumentation/common:go_default_library", "//test/e2e/instrumentation/logging/utils:go_default_library", "//vendor/github.com/onsi/ginkgo:go_default_library", diff --git a/test/e2e/instrumentation/logging/elasticsearch/kibana.go b/test/e2e/instrumentation/logging/elasticsearch/kibana.go index c050e40d55d..0788795d05b 100644 --- a/test/e2e/instrumentation/logging/elasticsearch/kibana.go +++ b/test/e2e/instrumentation/logging/elasticsearch/kibana.go @@ -25,6 +25,7 @@ import ( "k8s.io/kubernetes/test/e2e/framework" e2elog "k8s.io/kubernetes/test/e2e/framework/log" e2epod "k8s.io/kubernetes/test/e2e/framework/pod" + e2eservice "k8s.io/kubernetes/test/e2e/framework/service" instrumentation "k8s.io/kubernetes/test/e2e/instrumentation/common" "github.com/onsi/ginkgo" @@ -83,7 +84,7 @@ func ClusterLevelLoggingWithKibana(f *framework.Framework) { ginkgo.By("Checking to make sure we get a response from the Kibana UI.") err = wait.Poll(pollingInterval, pollingTimeout, func() (bool, error) { - req, err := framework.GetServicesProxyRequest(f.ClientSet, f.ClientSet.CoreV1().RESTClient().Get()) + req, err := e2eservice.GetServicesProxyRequest(f.ClientSet, f.ClientSet.CoreV1().RESTClient().Get()) if err != nil { e2elog.Logf("Failed to get services proxy request: %v", err) return false, nil diff --git a/test/e2e/instrumentation/logging/elasticsearch/utils.go b/test/e2e/instrumentation/logging/elasticsearch/utils.go index 274e62e8a0c..670c27fbcbb 100644 --- a/test/e2e/instrumentation/logging/elasticsearch/utils.go +++ b/test/e2e/instrumentation/logging/elasticsearch/utils.go @@ -28,6 +28,7 @@ import ( "k8s.io/kubernetes/test/e2e/framework" e2elog "k8s.io/kubernetes/test/e2e/framework/log" e2epod "k8s.io/kubernetes/test/e2e/framework/pod" + e2eservice "k8s.io/kubernetes/test/e2e/framework/service" "k8s.io/kubernetes/test/e2e/instrumentation/logging/utils" ) @@ -92,7 +93,7 @@ func (p *esLogProvider) Init() error { err = nil var body []byte for start := time.Now(); time.Since(start) < esRetryTimeout; time.Sleep(esRetryDelay) { - proxyRequest, errProxy := framework.GetServicesProxyRequest(f.ClientSet, f.ClientSet.CoreV1().RESTClient().Get()) + proxyRequest, errProxy := e2eservice.GetServicesProxyRequest(f.ClientSet, f.ClientSet.CoreV1().RESTClient().Get()) if errProxy != nil { e2elog.Logf("After %v failed to get services proxy request: %v", time.Since(start), errProxy) continue @@ -126,7 +127,7 @@ func (p *esLogProvider) Init() error { e2elog.Logf("Checking health of Elasticsearch service.") healthy := false for start := time.Now(); time.Since(start) < esRetryTimeout; time.Sleep(esRetryDelay) { - proxyRequest, errProxy := framework.GetServicesProxyRequest(f.ClientSet, f.ClientSet.CoreV1().RESTClient().Get()) + proxyRequest, errProxy := e2eservice.GetServicesProxyRequest(f.ClientSet, f.ClientSet.CoreV1().RESTClient().Get()) if errProxy != nil { e2elog.Logf("After %v failed to get services proxy request: %v", time.Since(start), errProxy) continue @@ -174,7 +175,7 @@ func (p *esLogProvider) Cleanup() { func (p *esLogProvider) ReadEntries(name string) []utils.LogEntry { f := p.Framework - proxyRequest, errProxy := framework.GetServicesProxyRequest(f.ClientSet, f.ClientSet.CoreV1().RESTClient().Get()) + proxyRequest, errProxy := e2eservice.GetServicesProxyRequest(f.ClientSet, f.ClientSet.CoreV1().RESTClient().Get()) if errProxy != nil { e2elog.Logf("Failed to get services proxy request: %v", errProxy) return nil diff --git a/test/e2e/kubectl/BUILD b/test/e2e/kubectl/BUILD index ca396bf7e7b..0072fe58722 100644 --- a/test/e2e/kubectl/BUILD +++ b/test/e2e/kubectl/BUILD @@ -37,6 +37,7 @@ go_library( "//test/e2e/framework/job:go_default_library", "//test/e2e/framework/log:go_default_library", "//test/e2e/framework/pod:go_default_library", + "//test/e2e/framework/service:go_default_library", "//test/e2e/framework/testfiles:go_default_library", "//test/e2e/scheduling:go_default_library", "//test/utils:go_default_library", diff --git a/test/e2e/kubectl/kubectl.go b/test/e2e/kubectl/kubectl.go index 31a216f0980..90bc83f2bb3 100644 --- a/test/e2e/kubectl/kubectl.go +++ b/test/e2e/kubectl/kubectl.go @@ -64,6 +64,7 @@ import ( jobutil "k8s.io/kubernetes/test/e2e/framework/job" e2elog "k8s.io/kubernetes/test/e2e/framework/log" e2epod "k8s.io/kubernetes/test/e2e/framework/pod" + e2eservice "k8s.io/kubernetes/test/e2e/framework/service" "k8s.io/kubernetes/test/e2e/framework/testfiles" "k8s.io/kubernetes/test/e2e/scheduling" testutils "k8s.io/kubernetes/test/utils" @@ -1179,13 +1180,13 @@ metadata: }) framework.ExpectNoError(err) - service, err := c.CoreV1().Services(ns).Get(name, metav1.GetOptions{}) + e2eservice, err := c.CoreV1().Services(ns).Get(name, metav1.GetOptions{}) framework.ExpectNoError(err) - if len(service.Spec.Ports) != 1 { + if len(e2eservice.Spec.Ports) != 1 { e2elog.Failf("1 port is expected") } - port := service.Spec.Ports[0] + port := e2eservice.Spec.Ports[0] if port.Port != int32(servicePort) { e2elog.Failf("Wrong service port: %d", port.Port) } @@ -2180,7 +2181,7 @@ func waitForGuestbookResponse(c clientset.Interface, cmd, arg, expectedResponse } func makeRequestToGuestbook(c clientset.Interface, cmd, value string, ns string) (string, error) { - proxyRequest, errProxy := framework.GetServicesProxyRequest(c, c.CoreV1().RESTClient().Get()) + proxyRequest, errProxy := e2eservice.GetServicesProxyRequest(c, c.CoreV1().RESTClient().Get()) if errProxy != nil { return "", errProxy } diff --git a/test/e2e/network/BUILD b/test/e2e/network/BUILD index bd7f95c10d0..3560b4e78fd 100644 --- a/test/e2e/network/BUILD +++ b/test/e2e/network/BUILD @@ -69,6 +69,7 @@ go_library( "//test/e2e/framework/node:go_default_library", "//test/e2e/framework/pod:go_default_library", "//test/e2e/framework/providers/gce:go_default_library", + "//test/e2e/framework/service:go_default_library", "//test/e2e/framework/ssh:go_default_library", "//test/e2e/network/scale:go_default_library", "//test/images/agnhost/net/nat:go_default_library", diff --git a/test/e2e/network/dns.go b/test/e2e/network/dns.go index 4c6fe7d0f4f..33e5ab75762 100644 --- a/test/e2e/network/dns.go +++ b/test/e2e/network/dns.go @@ -26,6 +26,7 @@ import ( "k8s.io/apimachinery/pkg/util/wait" "k8s.io/kubernetes/test/e2e/framework" e2elog "k8s.io/kubernetes/test/e2e/framework/log" + e2eservice "k8s.io/kubernetes/test/e2e/framework/service" "github.com/onsi/ginkgo" ) @@ -129,7 +130,7 @@ var _ = SIGDescribe("DNS", func() { testServiceSelector := map[string]string{ "dns-test": "true", } - headlessService := framework.CreateServiceSpec(dnsTestServiceName, "", true, testServiceSelector) + headlessService := e2eservice.CreateServiceSpec(dnsTestServiceName, "", true, testServiceSelector) _, err := f.ClientSet.CoreV1().Services(f.Namespace.Name).Create(headlessService) framework.ExpectNoError(err, "failed to create headless service: %s", dnsTestServiceName) defer func() { @@ -139,7 +140,7 @@ var _ = SIGDescribe("DNS", func() { }() regularServiceName := "test-service-2" - regularService := framework.CreateServiceSpec(regularServiceName, "", false, testServiceSelector) + regularService := e2eservice.CreateServiceSpec(regularServiceName, "", false, testServiceSelector) regularService, err = f.ClientSet.CoreV1().Services(f.Namespace.Name).Create(regularService) framework.ExpectNoError(err, "failed to create regular service: %s", regularServiceName) @@ -178,7 +179,7 @@ var _ = SIGDescribe("DNS", func() { testServiceSelector := map[string]string{ "dns-test": "true", } - headlessService := framework.CreateServiceSpec(dnsTestServiceName, "", true, testServiceSelector) + headlessService := e2eservice.CreateServiceSpec(dnsTestServiceName, "", true, testServiceSelector) _, err := f.ClientSet.CoreV1().Services(f.Namespace.Name).Create(headlessService) framework.ExpectNoError(err, "failed to create headless service: %s", dnsTestServiceName) defer func() { @@ -188,7 +189,7 @@ var _ = SIGDescribe("DNS", func() { }() regularServiceName := "test-service-2" - regularService := framework.CreateServiceSpec(regularServiceName, "", false, testServiceSelector) + regularService := e2eservice.CreateServiceSpec(regularServiceName, "", false, testServiceSelector) regularService, err = f.ClientSet.CoreV1().Services(f.Namespace.Name).Create(regularService) framework.ExpectNoError(err, "failed to create regular service: %s", regularServiceName) defer func() { @@ -235,7 +236,7 @@ var _ = SIGDescribe("DNS", func() { } serviceName := "dns-test-service-2" podHostname := "dns-querier-2" - headlessService := framework.CreateServiceSpec(serviceName, "", true, testServiceSelector) + headlessService := e2eservice.CreateServiceSpec(serviceName, "", true, testServiceSelector) _, err := f.ClientSet.CoreV1().Services(f.Namespace.Name).Create(headlessService) framework.ExpectNoError(err, "failed to create headless service: %s", serviceName) @@ -276,7 +277,7 @@ var _ = SIGDescribe("DNS", func() { } serviceName := "dns-test-service-2" podHostname := "dns-querier-2" - headlessService := framework.CreateServiceSpec(serviceName, "", true, testServiceSelector) + headlessService := e2eservice.CreateServiceSpec(serviceName, "", true, testServiceSelector) _, err := f.ClientSet.CoreV1().Services(f.Namespace.Name).Create(headlessService) framework.ExpectNoError(err, "failed to create headless service: %s", serviceName) @@ -314,7 +315,7 @@ var _ = SIGDescribe("DNS", func() { // Create a test ExternalName service. ginkgo.By("Creating a test externalName service") serviceName := "dns-test-service-3" - externalNameService := framework.CreateServiceSpec(serviceName, "foo.example.com", false, nil) + externalNameService := e2eservice.CreateServiceSpec(serviceName, "foo.example.com", false, nil) _, err := f.ClientSet.CoreV1().Services(f.Namespace.Name).Create(externalNameService) framework.ExpectNoError(err, "failed to create ExternalName service: %s", serviceName) @@ -337,7 +338,7 @@ var _ = SIGDescribe("DNS", func() { // Test changing the externalName field ginkgo.By("changing the externalName to bar.example.com") - _, err = framework.UpdateService(f.ClientSet, f.Namespace.Name, serviceName, func(s *v1.Service) { + _, err = e2eservice.UpdateService(f.ClientSet, f.Namespace.Name, serviceName, func(s *v1.Service) { s.Spec.ExternalName = "bar.example.com" }) framework.ExpectNoError(err, "failed to change externalName of service: %s", serviceName) @@ -354,7 +355,7 @@ var _ = SIGDescribe("DNS", func() { // Test changing type from ExternalName to ClusterIP ginkgo.By("changing the service to type=ClusterIP") - _, err = framework.UpdateService(f.ClientSet, f.Namespace.Name, serviceName, func(s *v1.Service) { + _, err = e2eservice.UpdateService(f.ClientSet, f.Namespace.Name, serviceName, func(s *v1.Service) { s.Spec.Type = v1.ServiceTypeClusterIP s.Spec.Ports = []v1.ServicePort{ {Port: 80, Name: "http", Protocol: v1.ProtocolTCP}, diff --git a/test/e2e/network/dns_configmap.go b/test/e2e/network/dns_configmap.go index c621d8860d8..c648c03c620 100644 --- a/test/e2e/network/dns_configmap.go +++ b/test/e2e/network/dns_configmap.go @@ -20,9 +20,10 @@ import ( "fmt" "time" - "k8s.io/api/core/v1" + v1 "k8s.io/api/core/v1" "k8s.io/apimachinery/pkg/util/wait" "k8s.io/kubernetes/test/e2e/framework" + e2eservice "k8s.io/kubernetes/test/e2e/framework/service" "github.com/onsi/ginkgo" ) @@ -405,12 +406,12 @@ func (t *dnsExternalNameTest) run(isIPv6 bool) { f := t.f serviceName := "dns-externalname-upstream-test" - externalNameService := framework.CreateServiceSpec(serviceName, googleDNSHostname, false, nil) + externalNameService := e2eservice.CreateServiceSpec(serviceName, googleDNSHostname, false, nil) if _, err := f.ClientSet.CoreV1().Services(f.Namespace.Name).Create(externalNameService); err != nil { ginkgo.Fail(fmt.Sprintf("ginkgo.Failed when creating service: %v", err)) } serviceNameLocal := "dns-externalname-upstream-local" - externalNameServiceLocal := framework.CreateServiceSpec(serviceNameLocal, fooHostname, false, nil) + externalNameServiceLocal := e2eservice.CreateServiceSpec(serviceNameLocal, fooHostname, false, nil) if _, err := f.ClientSet.CoreV1().Services(f.Namespace.Name).Create(externalNameServiceLocal); err != nil { ginkgo.Fail(fmt.Sprintf("ginkgo.Failed when creating service: %v", err)) } diff --git a/test/e2e/network/example_cluster_dns.go b/test/e2e/network/example_cluster_dns.go index b8ba8de3483..ebb456fcf30 100644 --- a/test/e2e/network/example_cluster_dns.go +++ b/test/e2e/network/example_cluster_dns.go @@ -24,6 +24,7 @@ import ( "strings" "time" + "github.com/onsi/ginkgo" v1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/labels" @@ -32,8 +33,7 @@ import ( "k8s.io/kubernetes/test/e2e/framework" e2elog "k8s.io/kubernetes/test/e2e/framework/log" e2epod "k8s.io/kubernetes/test/e2e/framework/pod" - - "github.com/onsi/ginkgo" + e2eservice "k8s.io/kubernetes/test/e2e/framework/service" ) const ( @@ -111,7 +111,7 @@ var _ = SIGDescribe("ClusterDns [Feature:Example]", func() { framework.ExpectNoError(err, "waiting for all pods to respond") e2elog.Logf("found %d backend pods responding in namespace %s", len(pods.Items), ns.Name) - err = framework.ServiceResponding(c, ns.Name, backendSvcName) + err = e2eservice.WaitForServiceResponding(c, ns.Name, backendSvcName) framework.ExpectNoError(err, "waiting for the service to respond") } diff --git a/test/e2e/network/firewall.go b/test/e2e/network/firewall.go index 1106c0b237e..f4c0934dc87 100644 --- a/test/e2e/network/firewall.go +++ b/test/e2e/network/firewall.go @@ -29,6 +29,7 @@ import ( e2elog "k8s.io/kubernetes/test/e2e/framework/log" e2enode "k8s.io/kubernetes/test/e2e/framework/node" "k8s.io/kubernetes/test/e2e/framework/providers/gce" + e2eservice "k8s.io/kubernetes/test/e2e/framework/service" gcecloud "k8s.io/legacy-cloud-providers/gce" "github.com/onsi/ginkgo" @@ -72,17 +73,17 @@ var _ = SIGDescribe("Firewall rule", func() { framework.ExpectNoError(err) e2elog.Logf("Got cluster ID: %v", clusterID) - jig := framework.NewServiceTestJig(cs, serviceName) - nodeList := jig.GetNodes(framework.MaxNodesForEndpointsTests) + jig := e2eservice.NewTestJig(cs, serviceName) + nodeList := jig.GetNodes(e2eservice.MaxNodesForEndpointsTests) gomega.Expect(nodeList).NotTo(gomega.BeNil()) - nodesNames := jig.GetNodesNames(framework.MaxNodesForEndpointsTests) + nodesNames := jig.GetNodesNames(e2eservice.MaxNodesForEndpointsTests) if len(nodesNames) <= 0 { e2elog.Failf("Expect at least 1 node, got: %v", nodesNames) } nodesSet := sets.NewString(nodesNames...) ginkgo.By("Creating a LoadBalancer type service with ExternalTrafficPolicy=Global") - svc := jig.CreateLoadBalancerService(ns, serviceName, framework.LoadBalancerCreateTimeoutDefault, func(svc *v1.Service) { + svc := jig.CreateLoadBalancerService(ns, serviceName, e2eservice.LoadBalancerCreateTimeoutDefault, func(svc *v1.Service) { svc.Spec.Ports = []v1.ServicePort{{Protocol: v1.ProtocolTCP, Port: firewallTestHTTPPort}} svc.Spec.LoadBalancerSourceRanges = firewallTestSourceRanges }) @@ -95,7 +96,7 @@ var _ = SIGDescribe("Firewall rule", func() { framework.ExpectNoError(err) ginkgo.By("Waiting for the local traffic health check firewall rule to be deleted") localHCFwName := gce.MakeHealthCheckFirewallNameForLBService(clusterID, cloudprovider.DefaultLoadBalancerName(svc), false) - _, err := gce.WaitForFirewallRule(gceCloud, localHCFwName, false, framework.LoadBalancerCleanupTimeout) + _, err := gce.WaitForFirewallRule(gceCloud, localHCFwName, false, e2eservice.LoadBalancerCleanupTimeout) framework.ExpectNoError(err) }() svcExternalIP := svc.Status.LoadBalancer.Ingress[0].IP @@ -121,17 +122,17 @@ var _ = SIGDescribe("Firewall rule", func() { }) ginkgo.By("Waiting for the nodes health check firewall rule to be deleted") - _, err = gce.WaitForFirewallRule(gceCloud, nodesHCFw.Name, false, framework.LoadBalancerCleanupTimeout) + _, err = gce.WaitForFirewallRule(gceCloud, nodesHCFw.Name, false, e2eservice.LoadBalancerCleanupTimeout) framework.ExpectNoError(err) ginkgo.By("Waiting for the correct local traffic health check firewall rule to be created") localHCFw := gce.ConstructHealthCheckFirewallForLBService(clusterID, svc, cloudConfig.NodeTag, false) - fw, err = gce.WaitForFirewallRule(gceCloud, localHCFw.Name, true, framework.LoadBalancerCreateTimeoutDefault) + fw, err = gce.WaitForFirewallRule(gceCloud, localHCFw.Name, true, e2eservice.LoadBalancerCreateTimeoutDefault) framework.ExpectNoError(err) err = gce.VerifyFirewallRule(fw, localHCFw, cloudConfig.Network, false) framework.ExpectNoError(err) - ginkgo.By(fmt.Sprintf("Creating netexec pods on at most %v nodes", framework.MaxNodesForEndpointsTests)) + ginkgo.By(fmt.Sprintf("Creating netexec pods on at most %v nodes", e2eservice.MaxNodesForEndpointsTests)) for i, nodeName := range nodesNames { podName := fmt.Sprintf("netexec%v", i) jig.LaunchNetexecPodOnNode(f, nodeName, podName, firewallTestHTTPPort, firewallTestUDPPort, true) @@ -144,7 +145,7 @@ var _ = SIGDescribe("Firewall rule", func() { // Send requests from outside of the cluster because internal traffic is whitelisted ginkgo.By("Accessing the external service ip from outside, all non-master nodes should be reached") - err = framework.TestHitNodesFromOutside(svcExternalIP, firewallTestHTTPPort, framework.LoadBalancerCreateTimeoutDefault, nodesSet) + err = framework.TestHitNodesFromOutside(svcExternalIP, firewallTestHTTPPort, e2eservice.LoadBalancerCreateTimeoutDefault, nodesSet) framework.ExpectNoError(err) // Check if there are overlapping tags on the firewall that extend beyond just the vms in our cluster @@ -165,12 +166,12 @@ var _ = SIGDescribe("Firewall rule", func() { nodesSet.Insert(nodesNames[0]) gce.SetInstanceTags(cloudConfig, nodesNames[0], zone, removedTags) // Make sure traffic is recovered before exit - err = framework.TestHitNodesFromOutside(svcExternalIP, firewallTestHTTPPort, framework.LoadBalancerCreateTimeoutDefault, nodesSet) + err = framework.TestHitNodesFromOutside(svcExternalIP, firewallTestHTTPPort, e2eservice.LoadBalancerCreateTimeoutDefault, nodesSet) framework.ExpectNoError(err) }() ginkgo.By("Accessing serivce through the external ip and examine got no response from the node without tags") - err = framework.TestHitNodesFromOutsideWithCount(svcExternalIP, firewallTestHTTPPort, framework.LoadBalancerCreateTimeoutDefault, nodesSet, 15) + err = framework.TestHitNodesFromOutsideWithCount(svcExternalIP, firewallTestHTTPPort, e2eservice.LoadBalancerCreateTimeoutDefault, nodesSet, 15) framework.ExpectNoError(err) }) diff --git a/test/e2e/network/ingress.go b/test/e2e/network/ingress.go index 9602dd18796..96c80d55393 100644 --- a/test/e2e/network/ingress.go +++ b/test/e2e/network/ingress.go @@ -39,6 +39,7 @@ import ( "k8s.io/kubernetes/test/e2e/framework/ingress" e2elog "k8s.io/kubernetes/test/e2e/framework/log" "k8s.io/kubernetes/test/e2e/framework/providers/gce" + e2eservice "k8s.io/kubernetes/test/e2e/framework/service" "github.com/onsi/ginkgo" ) @@ -170,7 +171,7 @@ var _ = SIGDescribe("Loadbalancing: L7", func() { }, map[string]string{}) ginkgo.By(fmt.Sprintf("waiting for Ingress %s to get instance group annotation", name)) - pollErr := wait.Poll(2*time.Second, framework.LoadBalancerPollTimeout, func() (bool, error) { + pollErr := wait.Poll(2*time.Second, e2eservice.LoadBalancerPollTimeout, func() (bool, error) { ing, err := f.ClientSet.NetworkingV1beta1().Ingresses(ns).Get(name, metav1.GetOptions{}) framework.ExpectNoError(err) annotations := ing.Annotations @@ -301,7 +302,7 @@ var _ = SIGDescribe("Loadbalancing: L7", func() { _, err = f.ClientSet.CoreV1().Services(ns).Update(&svc) framework.ExpectNoError(err) } - err = wait.Poll(5*time.Second, framework.LoadBalancerPollTimeout, func() (bool, error) { + err = wait.Poll(5*time.Second, e2eservice.LoadBalancerPollTimeout, func() (bool, error) { if err := gceController.BackendServiceUsingIG(jig.GetServicePorts(false)); err != nil { e2elog.Logf("ginkgo.Failed to verify IG backend service: %v", err) return false, nil @@ -319,7 +320,7 @@ var _ = SIGDescribe("Loadbalancing: L7", func() { _, err = f.ClientSet.CoreV1().Services(ns).Update(&svc) framework.ExpectNoError(err) } - err = wait.Poll(5*time.Second, framework.LoadBalancerPollTimeout, func() (bool, error) { + err = wait.Poll(5*time.Second, e2eservice.LoadBalancerPollTimeout, func() (bool, error) { if err := gceController.BackendServiceUsingNEG(jig.GetServicePorts(false)); err != nil { e2elog.Logf("ginkgo.Failed to verify NEG backend service: %v", err) return false, nil @@ -404,7 +405,7 @@ var _ = SIGDescribe("Loadbalancing: L7", func() { _, err = f.ClientSet.AppsV1().Deployments(ns).UpdateScale(name, scale) framework.ExpectNoError(err) - err = wait.Poll(10*time.Second, framework.LoadBalancerPollTimeout, func() (bool, error) { + err = wait.Poll(10*time.Second, e2eservice.LoadBalancerPollTimeout, func() (bool, error) { res, err := jig.GetDistinctResponseFromIngress() if err != nil { return false, nil @@ -421,7 +422,7 @@ var _ = SIGDescribe("Loadbalancing: L7", func() { deploy.Spec.Template.Spec.TerminationGracePeriodSeconds = &gracePeriod _, err = f.ClientSet.AppsV1().Deployments(ns).Update(deploy) framework.ExpectNoError(err) - err = wait.Poll(10*time.Second, framework.LoadBalancerPollTimeout, func() (bool, error) { + err = wait.Poll(10*time.Second, e2eservice.LoadBalancerPollTimeout, func() (bool, error) { res, err := jig.GetDistinctResponseFromIngress() framework.ExpectNoError(err) deploy, err := f.ClientSet.AppsV1().Deployments(ns).Get(name, metav1.GetOptions{}) @@ -787,7 +788,7 @@ func executePresharedCertTest(f *framework.Framework, jig *ingress.TestJig, stat jig.TryDeleteIngress() } ginkgo.By(fmt.Sprintf("Deleting ssl certificate %q on GCE", preSharedCertName)) - err := wait.Poll(framework.LoadBalancerPollInterval, framework.LoadBalancerCleanupTimeout, func() (bool, error) { + err := wait.Poll(e2eservice.LoadBalancerPollInterval, e2eservice.LoadBalancerCleanupTimeout, func() (bool, error) { if err := gceCloud.DeleteSslCertificate(preSharedCertName); err != nil && !errors.IsNotFound(err) { e2elog.Logf("ginkgo.Failed to delete ssl certificate %q: %v. Retrying...", preSharedCertName, err) return false, nil @@ -830,10 +831,10 @@ func executeStaticIPHttpsOnlyTest(f *framework.Framework, jig *ingress.TestJig, ginkgo.By("waiting for Ingress to come up with ip: " + ip) httpClient := ingress.BuildInsecureClient(ingress.IngressReqTimeout) - framework.ExpectNoError(framework.PollURL(fmt.Sprintf("https://%s/", ip), "", framework.LoadBalancerPollTimeout, jig.PollInterval, httpClient, false)) + framework.ExpectNoError(framework.PollURL(fmt.Sprintf("https://%s/", ip), "", e2eservice.LoadBalancerPollTimeout, jig.PollInterval, httpClient, false)) ginkgo.By("should reject HTTP traffic") - framework.ExpectNoError(framework.PollURL(fmt.Sprintf("http://%s/", ip), "", framework.LoadBalancerPollTimeout, jig.PollInterval, httpClient, true)) + framework.ExpectNoError(framework.PollURL(fmt.Sprintf("http://%s/", ip), "", e2eservice.LoadBalancerPollTimeout, jig.PollInterval, httpClient, true)) } func executeBacksideBacksideHTTPSTest(f *framework.Framework, jig *ingress.TestJig, staticIPName string) { @@ -848,12 +849,12 @@ func executeBacksideBacksideHTTPSTest(f *framework.Framework, jig *ingress.TestJ framework.ExpectNoError(err, "ginkgo.Failed to create re-encryption ingress") ginkgo.By(fmt.Sprintf("Waiting for ingress %s to come up", ingCreated.Name)) - ingIP, err := jig.WaitForIngressAddress(f.ClientSet, f.Namespace.Name, ingCreated.Name, framework.LoadBalancerPollTimeout) + ingIP, err := jig.WaitForIngressAddress(f.ClientSet, f.Namespace.Name, ingCreated.Name, e2eservice.LoadBalancerPollTimeout) framework.ExpectNoError(err, "ginkgo.Failed to wait for ingress IP") ginkgo.By(fmt.Sprintf("Polling on address %s and verify the backend is serving HTTPS", ingIP)) timeoutClient := &http.Client{Timeout: ingress.IngressReqTimeout} - err = wait.PollImmediate(framework.LoadBalancerPollInterval, framework.LoadBalancerPollTimeout, func() (bool, error) { + err = wait.PollImmediate(e2eservice.LoadBalancerPollInterval, e2eservice.LoadBalancerPollTimeout, func() (bool, error) { resp, err := framework.SimpleGET(timeoutClient, fmt.Sprintf("http://%s", ingIP), "") if err != nil { e2elog.Logf("SimpleGET failed: %v", err) diff --git a/test/e2e/network/network_tiers.go b/test/e2e/network/network_tiers.go index 0d6cba44b18..f005a398a77 100644 --- a/test/e2e/network/network_tiers.go +++ b/test/e2e/network/network_tiers.go @@ -31,6 +31,7 @@ import ( "k8s.io/kubernetes/test/e2e/framework" e2elog "k8s.io/kubernetes/test/e2e/framework/log" "k8s.io/kubernetes/test/e2e/framework/providers/gce" + e2eservice "k8s.io/kubernetes/test/e2e/framework/service" gcecloud "k8s.io/legacy-cloud-providers/gce" "github.com/onsi/ginkgo" @@ -50,7 +51,7 @@ var _ = SIGDescribe("Services [Feature:GCEAlphaFeature][Slow]", func() { ginkgo.AfterEach(func() { if ginkgo.CurrentGinkgoTestDescription().Failed { - framework.DescribeSvc(f.Namespace.Name) + e2eservice.DescribeSvc(f.Namespace.Name) } for _, lb := range serviceLBNames { e2elog.Logf("cleaning gce resource for %s", lb) @@ -60,12 +61,12 @@ var _ = SIGDescribe("Services [Feature:GCEAlphaFeature][Slow]", func() { serviceLBNames = []string{} }) ginkgo.It("should be able to create and tear down a standard-tier load balancer [Slow]", func() { - lagTimeout := framework.LoadBalancerLagTimeoutDefault - createTimeout := framework.GetServiceLoadBalancerCreationTimeout(cs) + lagTimeout := e2eservice.LoadBalancerLagTimeoutDefault + createTimeout := e2eservice.GetServiceLoadBalancerCreationTimeout(cs) svcName := "net-tiers-svc" ns := f.Namespace.Name - jig := framework.NewServiceTestJig(cs, svcName) + jig := e2eservice.NewTestJig(cs, svcName) ginkgo.By("creating a pod to be part of the service " + svcName) jig.RunOrFail(ns, nil) @@ -134,7 +135,7 @@ var _ = SIGDescribe("Services [Feature:GCEAlphaFeature][Slow]", func() { }) }) -func waitAndVerifyLBWithTier(jig *framework.ServiceTestJig, ns, svcName, existingIP string, waitTimeout, checkTimeout time.Duration) string { +func waitAndVerifyLBWithTier(jig *e2eservice.TestJig, ns, svcName, existingIP string, waitTimeout, checkTimeout time.Duration) string { var svc *v1.Service if existingIP == "" { // Creating the LB for the first time; wait for any ingress IP to show @@ -147,7 +148,7 @@ func waitAndVerifyLBWithTier(jig *framework.ServiceTestJig, ns, svcName, existin svcPort := int(svc.Spec.Ports[0].Port) lbIngress := &svc.Status.LoadBalancer.Ingress[0] - ingressIP := framework.GetIngressPoint(lbIngress) + ingressIP := e2eservice.GetIngressPoint(lbIngress) ginkgo.By("running sanity and reachability checks") if svc.Spec.LoadBalancerIP != "" { diff --git a/test/e2e/network/service.go b/test/e2e/network/service.go index e217077649f..1290097fe88 100644 --- a/test/e2e/network/service.go +++ b/test/e2e/network/service.go @@ -43,6 +43,7 @@ import ( e2enode "k8s.io/kubernetes/test/e2e/framework/node" e2epod "k8s.io/kubernetes/test/e2e/framework/pod" "k8s.io/kubernetes/test/e2e/framework/providers/gce" + e2eservice "k8s.io/kubernetes/test/e2e/framework/service" e2essh "k8s.io/kubernetes/test/e2e/framework/ssh" imageutils "k8s.io/kubernetes/test/utils/image" gcecloud "k8s.io/legacy-cloud-providers/gce" @@ -93,11 +94,11 @@ var _ = SIGDescribe("Services", func() { ginkgo.AfterEach(func() { if ginkgo.CurrentGinkgoTestDescription().Failed { - framework.DescribeSvc(f.Namespace.Name) + e2eservice.DescribeSvc(f.Namespace.Name) } for _, lb := range serviceLBNames { e2elog.Logf("cleaning load balancer resource for %s", lb) - framework.CleanupServiceResources(cs, lb, framework.TestContext.CloudConfig.Region, framework.TestContext.CloudConfig.Zone) + e2eservice.CleanupServiceResources(cs, lb, framework.TestContext.CloudConfig.Region, framework.TestContext.CloudConfig.Zone) } //reset serviceLBNames serviceLBNames = []string{} @@ -123,7 +124,7 @@ var _ = SIGDescribe("Services", func() { framework.ConformanceIt("should serve a basic endpoint from pods ", func() { serviceName := "endpoint-test2" ns := f.Namespace.Name - jig := framework.NewServiceTestJig(cs, serviceName) + jig := e2eservice.NewTestJig(cs, serviceName) labels := map[string]string{ "foo": "bar", "baz": "blah", @@ -186,7 +187,7 @@ var _ = SIGDescribe("Services", func() { // repacking functionality is intentionally not tested here - it's better to test it in an integration test. serviceName := "multi-endpoint-test" ns := f.Namespace.Name - jig := framework.NewServiceTestJig(cs, serviceName) + jig := e2eservice.NewTestJig(cs, serviceName) defer func() { err := cs.CoreV1().Services(ns).Delete(serviceName, nil) @@ -279,7 +280,7 @@ var _ = SIGDescribe("Services", func() { ns := f.Namespace.Name ginkgo.By("creating a TCP service " + serviceName + " with type=ClusterIP in namespace " + ns) - jig := framework.NewServiceTestJig(cs, serviceName) + jig := e2eservice.NewTestJig(cs, serviceName) servicePort := 8080 tcpService := jig.CreateTCPServiceWithPort(ns, nil, int32(servicePort)) jig.SanityCheckService(tcpService, v1.ServiceTypeClusterIP) @@ -340,10 +341,10 @@ var _ = SIGDescribe("Services", func() { svc3 := "up-down-3" ginkgo.By("creating " + svc1 + " in namespace " + ns) - podNames1, svc1IP, err := framework.StartServeHostnameService(cs, getServeHostnameService(svc1), ns, numPods) + podNames1, svc1IP, err := e2eservice.StartServeHostnameService(cs, getServeHostnameService(svc1), ns, numPods) framework.ExpectNoError(err, "failed to create replication controller with service: %s in the namespace: %s", svc1, ns) ginkgo.By("creating " + svc2 + " in namespace " + ns) - podNames2, svc2IP, err := framework.StartServeHostnameService(cs, getServeHostnameService(svc2), ns, numPods) + podNames2, svc2IP, err := e2eservice.StartServeHostnameService(cs, getServeHostnameService(svc2), ns, numPods) framework.ExpectNoError(err, "failed to create replication controller with service: %s in the namespace: %s", svc2, ns) hosts, err := e2essh.NodeSSHHosts(cs) @@ -354,23 +355,23 @@ var _ = SIGDescribe("Services", func() { host := hosts[0] ginkgo.By("verifying service " + svc1 + " is up") - framework.ExpectNoError(framework.VerifyServeHostnameServiceUp(cs, ns, host, podNames1, svc1IP, servicePort)) + framework.ExpectNoError(e2eservice.VerifyServeHostnameServiceUp(cs, ns, host, podNames1, svc1IP, servicePort)) ginkgo.By("verifying service " + svc2 + " is up") - framework.ExpectNoError(framework.VerifyServeHostnameServiceUp(cs, ns, host, podNames2, svc2IP, servicePort)) + framework.ExpectNoError(e2eservice.VerifyServeHostnameServiceUp(cs, ns, host, podNames2, svc2IP, servicePort)) // Stop service 1 and make sure it is gone. ginkgo.By("stopping service " + svc1) - framework.ExpectNoError(framework.StopServeHostnameService(f.ClientSet, ns, svc1)) + framework.ExpectNoError(e2eservice.StopServeHostnameService(f.ClientSet, ns, svc1)) ginkgo.By("verifying service " + svc1 + " is not up") - framework.ExpectNoError(framework.VerifyServeHostnameServiceDown(cs, host, svc1IP, servicePort)) + framework.ExpectNoError(e2eservice.VerifyServeHostnameServiceDown(cs, host, svc1IP, servicePort)) ginkgo.By("verifying service " + svc2 + " is still up") - framework.ExpectNoError(framework.VerifyServeHostnameServiceUp(cs, ns, host, podNames2, svc2IP, servicePort)) + framework.ExpectNoError(e2eservice.VerifyServeHostnameServiceUp(cs, ns, host, podNames2, svc2IP, servicePort)) // Start another service and verify both are up. ginkgo.By("creating service " + svc3 + " in namespace " + ns) - podNames3, svc3IP, err := framework.StartServeHostnameService(cs, getServeHostnameService(svc3), ns, numPods) + podNames3, svc3IP, err := e2eservice.StartServeHostnameService(cs, getServeHostnameService(svc3), ns, numPods) framework.ExpectNoError(err, "failed to create replication controller with service: %s in the namespace: %s", svc3, ns) if svc2IP == svc3IP { @@ -378,10 +379,10 @@ var _ = SIGDescribe("Services", func() { } ginkgo.By("verifying service " + svc2 + " is still up") - framework.ExpectNoError(framework.VerifyServeHostnameServiceUp(cs, ns, host, podNames2, svc2IP, servicePort)) + framework.ExpectNoError(e2eservice.VerifyServeHostnameServiceUp(cs, ns, host, podNames2, svc2IP, servicePort)) ginkgo.By("verifying service " + svc3 + " is up") - framework.ExpectNoError(framework.VerifyServeHostnameServiceUp(cs, ns, host, podNames3, svc3IP, servicePort)) + framework.ExpectNoError(e2eservice.VerifyServeHostnameServiceUp(cs, ns, host, podNames3, svc3IP, servicePort)) }) ginkgo.It("should work after restarting kube-proxy [Disruptive]", func() { @@ -395,15 +396,15 @@ var _ = SIGDescribe("Services", func() { svc2 := "restart-proxy-2" defer func() { - framework.ExpectNoError(framework.StopServeHostnameService(f.ClientSet, ns, svc1)) + framework.ExpectNoError(e2eservice.StopServeHostnameService(f.ClientSet, ns, svc1)) }() - podNames1, svc1IP, err := framework.StartServeHostnameService(cs, getServeHostnameService(svc1), ns, numPods) + podNames1, svc1IP, err := e2eservice.StartServeHostnameService(cs, getServeHostnameService(svc1), ns, numPods) framework.ExpectNoError(err, "failed to create replication controller with service: %s in the namespace: %s", svc1, ns) defer func() { - framework.ExpectNoError(framework.StopServeHostnameService(f.ClientSet, ns, svc2)) + framework.ExpectNoError(e2eservice.StopServeHostnameService(f.ClientSet, ns, svc2)) }() - podNames2, svc2IP, err := framework.StartServeHostnameService(cs, getServeHostnameService(svc2), ns, numPods) + podNames2, svc2IP, err := e2eservice.StartServeHostnameService(cs, getServeHostnameService(svc2), ns, numPods) framework.ExpectNoError(err, "failed to create replication controller with service: %s in the namespace: %s", svc2, ns) if svc1IP == svc2IP { @@ -417,15 +418,15 @@ var _ = SIGDescribe("Services", func() { } host := hosts[0] - framework.ExpectNoError(framework.VerifyServeHostnameServiceUp(cs, ns, host, podNames1, svc1IP, servicePort)) - framework.ExpectNoError(framework.VerifyServeHostnameServiceUp(cs, ns, host, podNames2, svc2IP, servicePort)) + framework.ExpectNoError(e2eservice.VerifyServeHostnameServiceUp(cs, ns, host, podNames1, svc1IP, servicePort)) + framework.ExpectNoError(e2eservice.VerifyServeHostnameServiceUp(cs, ns, host, podNames2, svc2IP, servicePort)) ginkgo.By(fmt.Sprintf("Restarting kube-proxy on %v", host)) if err := framework.RestartKubeProxy(host); err != nil { e2elog.Failf("error restarting kube-proxy: %v", err) } - framework.ExpectNoError(framework.VerifyServeHostnameServiceUp(cs, ns, host, podNames1, svc1IP, servicePort)) - framework.ExpectNoError(framework.VerifyServeHostnameServiceUp(cs, ns, host, podNames2, svc2IP, servicePort)) + framework.ExpectNoError(e2eservice.VerifyServeHostnameServiceUp(cs, ns, host, podNames1, svc1IP, servicePort)) + framework.ExpectNoError(e2eservice.VerifyServeHostnameServiceUp(cs, ns, host, podNames2, svc2IP, servicePort)) ginkgo.By("Removing iptable rules") result, err := e2essh.SSH(` @@ -436,8 +437,8 @@ var _ = SIGDescribe("Services", func() { e2essh.LogResult(result) e2elog.Failf("couldn't remove iptable rules: %v", err) } - framework.ExpectNoError(framework.VerifyServeHostnameServiceUp(cs, ns, host, podNames1, svc1IP, servicePort)) - framework.ExpectNoError(framework.VerifyServeHostnameServiceUp(cs, ns, host, podNames2, svc2IP, servicePort)) + framework.ExpectNoError(e2eservice.VerifyServeHostnameServiceUp(cs, ns, host, podNames1, svc1IP, servicePort)) + framework.ExpectNoError(e2eservice.VerifyServeHostnameServiceUp(cs, ns, host, podNames2, svc2IP, servicePort)) }) ginkgo.It("should work after restarting apiserver [Disruptive]", func() { @@ -451,9 +452,9 @@ var _ = SIGDescribe("Services", func() { svc2 := "restart-apiserver-2" defer func() { - framework.ExpectNoError(framework.StopServeHostnameService(f.ClientSet, ns, svc1)) + framework.ExpectNoError(e2eservice.StopServeHostnameService(f.ClientSet, ns, svc1)) }() - podNames1, svc1IP, err := framework.StartServeHostnameService(cs, getServeHostnameService(svc1), ns, numPods) + podNames1, svc1IP, err := e2eservice.StartServeHostnameService(cs, getServeHostnameService(svc1), ns, numPods) framework.ExpectNoError(err, "failed to create replication controller with service: %s in the namespace: %s", svc1, ns) hosts, err := e2essh.NodeSSHHosts(cs) @@ -463,7 +464,7 @@ var _ = SIGDescribe("Services", func() { } host := hosts[0] - framework.ExpectNoError(framework.VerifyServeHostnameServiceUp(cs, ns, host, podNames1, svc1IP, servicePort)) + framework.ExpectNoError(e2eservice.VerifyServeHostnameServiceUp(cs, ns, host, podNames1, svc1IP, servicePort)) // Restart apiserver ginkgo.By("Restarting apiserver") @@ -474,20 +475,20 @@ var _ = SIGDescribe("Services", func() { if err := framework.WaitForApiserverUp(cs); err != nil { e2elog.Failf("error while waiting for apiserver up: %v", err) } - framework.ExpectNoError(framework.VerifyServeHostnameServiceUp(cs, ns, host, podNames1, svc1IP, servicePort)) + framework.ExpectNoError(e2eservice.VerifyServeHostnameServiceUp(cs, ns, host, podNames1, svc1IP, servicePort)) // Create a new service and check if it's not reusing IP. defer func() { - framework.ExpectNoError(framework.StopServeHostnameService(f.ClientSet, ns, svc2)) + framework.ExpectNoError(e2eservice.StopServeHostnameService(f.ClientSet, ns, svc2)) }() - podNames2, svc2IP, err := framework.StartServeHostnameService(cs, getServeHostnameService(svc2), ns, numPods) + podNames2, svc2IP, err := e2eservice.StartServeHostnameService(cs, getServeHostnameService(svc2), ns, numPods) framework.ExpectNoError(err, "failed to create replication controller with service: %s in the namespace: %s", svc2, ns) if svc1IP == svc2IP { e2elog.Failf("VIPs conflict: %v", svc1IP) } - framework.ExpectNoError(framework.VerifyServeHostnameServiceUp(cs, ns, host, podNames1, svc1IP, servicePort)) - framework.ExpectNoError(framework.VerifyServeHostnameServiceUp(cs, ns, host, podNames2, svc2IP, servicePort)) + framework.ExpectNoError(e2eservice.VerifyServeHostnameServiceUp(cs, ns, host, podNames1, svc1IP, servicePort)) + framework.ExpectNoError(e2eservice.VerifyServeHostnameServiceUp(cs, ns, host, podNames2, svc2IP, servicePort)) }) // TODO: Run this test against the userspace proxy and nodes @@ -497,7 +498,7 @@ var _ = SIGDescribe("Services", func() { serviceName := "nodeport-test" ns := f.Namespace.Name - jig := framework.NewServiceTestJig(cs, serviceName) + jig := e2eservice.NewTestJig(cs, serviceName) nodeIP, err := e2enode.PickIP(jig.Client) // for later if err != nil { e2elog.Logf("Unexpected error occurred: %v", err) @@ -516,7 +517,7 @@ var _ = SIGDescribe("Services", func() { jig.RunOrFail(ns, nil) ginkgo.By("hitting the pod through the service's NodePort") - jig.TestReachableHTTP(nodeIP, nodePort, framework.KubeProxyLagTimeout) + jig.TestReachableHTTP(nodeIP, nodePort, e2eservice.KubeProxyLagTimeout) ginkgo.By("verifying the node port is locked") hostExec := e2epod.LaunchHostExecPod(f.ClientSet, f.Namespace.Name, "hostexec") @@ -536,13 +537,13 @@ var _ = SIGDescribe("Services", func() { loadBalancerSupportsUDP := !framework.ProviderIs("aws") - loadBalancerLagTimeout := framework.LoadBalancerLagTimeoutDefault + loadBalancerLagTimeout := e2eservice.LoadBalancerLagTimeoutDefault if framework.ProviderIs("aws") { - loadBalancerLagTimeout = framework.LoadBalancerLagTimeoutAWS + loadBalancerLagTimeout = e2eservice.LoadBalancerLagTimeoutAWS } - loadBalancerCreateTimeout := framework.LoadBalancerCreateTimeoutDefault - if nodes := framework.GetReadySchedulableNodesOrDie(cs); len(nodes.Items) > framework.LargeClusterMinNodesNumber { - loadBalancerCreateTimeout = framework.LoadBalancerCreateTimeoutLarge + loadBalancerCreateTimeout := e2eservice.LoadBalancerCreateTimeoutDefault + if nodes := framework.GetReadySchedulableNodesOrDie(cs); len(nodes.Items) > e2eservice.LargeClusterMinNodesNumber { + loadBalancerCreateTimeout = e2eservice.LoadBalancerCreateTimeoutLarge } // This test is more monolithic than we'd like because LB turnup can be @@ -558,7 +559,7 @@ var _ = SIGDescribe("Services", func() { ns2 := namespacePtr.Name // LB2 in ns2 on UDP e2elog.Logf("namespace for UDP test: %s", ns2) - jig := framework.NewServiceTestJig(cs, serviceName) + jig := e2eservice.NewTestJig(cs, serviceName) nodeIP, err := e2enode.PickIP(jig.Client) // for later if err != nil { e2elog.Logf("Unexpected error occurred: %v", err) @@ -609,10 +610,10 @@ var _ = SIGDescribe("Services", func() { e2elog.Logf("UDP node port: %d", udpNodePort) ginkgo.By("hitting the TCP service's NodePort") - jig.TestReachableHTTP(nodeIP, tcpNodePort, framework.KubeProxyLagTimeout) + jig.TestReachableHTTP(nodeIP, tcpNodePort, e2eservice.KubeProxyLagTimeout) ginkgo.By("hitting the UDP service's NodePort") - jig.TestReachableUDP(nodeIP, udpNodePort, framework.KubeProxyLagTimeout) + jig.TestReachableUDP(nodeIP, udpNodePort, e2eservice.KubeProxyLagTimeout) // Change the services to LoadBalancer. @@ -667,10 +668,10 @@ var _ = SIGDescribe("Services", func() { if int(tcpService.Spec.Ports[0].NodePort) != tcpNodePort { e2elog.Failf("TCP Spec.Ports[0].NodePort changed (%d -> %d) when not expected", tcpNodePort, tcpService.Spec.Ports[0].NodePort) } - if requestedIP != "" && framework.GetIngressPoint(&tcpService.Status.LoadBalancer.Ingress[0]) != requestedIP { - e2elog.Failf("unexpected TCP Status.LoadBalancer.Ingress (expected %s, got %s)", requestedIP, framework.GetIngressPoint(&tcpService.Status.LoadBalancer.Ingress[0])) + if requestedIP != "" && e2eservice.GetIngressPoint(&tcpService.Status.LoadBalancer.Ingress[0]) != requestedIP { + e2elog.Failf("unexpected TCP Status.LoadBalancer.Ingress (expected %s, got %s)", requestedIP, e2eservice.GetIngressPoint(&tcpService.Status.LoadBalancer.Ingress[0])) } - tcpIngressIP := framework.GetIngressPoint(&tcpService.Status.LoadBalancer.Ingress[0]) + tcpIngressIP := e2eservice.GetIngressPoint(&tcpService.Status.LoadBalancer.Ingress[0]) e2elog.Logf("TCP load balancer: %s", tcpIngressIP) if framework.ProviderIs("gce", "gke") { @@ -700,20 +701,20 @@ var _ = SIGDescribe("Services", func() { if int(udpService.Spec.Ports[0].NodePort) != udpNodePort { e2elog.Failf("UDP Spec.Ports[0].NodePort changed (%d -> %d) when not expected", udpNodePort, udpService.Spec.Ports[0].NodePort) } - udpIngressIP = framework.GetIngressPoint(&udpService.Status.LoadBalancer.Ingress[0]) + udpIngressIP = e2eservice.GetIngressPoint(&udpService.Status.LoadBalancer.Ingress[0]) e2elog.Logf("UDP load balancer: %s", udpIngressIP) ginkgo.By("verifying that TCP and UDP use different load balancers") if tcpIngressIP == udpIngressIP { - e2elog.Failf("Load balancers are not different: %s", framework.GetIngressPoint(&tcpService.Status.LoadBalancer.Ingress[0])) + e2elog.Failf("Load balancers are not different: %s", e2eservice.GetIngressPoint(&tcpService.Status.LoadBalancer.Ingress[0])) } } ginkgo.By("hitting the TCP service's NodePort") - jig.TestReachableHTTP(nodeIP, tcpNodePort, framework.KubeProxyLagTimeout) + jig.TestReachableHTTP(nodeIP, tcpNodePort, e2eservice.KubeProxyLagTimeout) ginkgo.By("hitting the UDP service's NodePort") - jig.TestReachableUDP(nodeIP, udpNodePort, framework.KubeProxyLagTimeout) + jig.TestReachableUDP(nodeIP, udpNodePort, e2eservice.KubeProxyLagTimeout) ginkgo.By("hitting the TCP service's LoadBalancer") jig.TestReachableHTTP(tcpIngressIP, svcPort, loadBalancerLagTimeout) @@ -733,8 +734,8 @@ var _ = SIGDescribe("Services", func() { if tcpNodePort == tcpNodePortOld { e2elog.Failf("TCP Spec.Ports[0].NodePort (%d) did not change", tcpNodePort) } - if framework.GetIngressPoint(&tcpService.Status.LoadBalancer.Ingress[0]) != tcpIngressIP { - e2elog.Failf("TCP Status.LoadBalancer.Ingress changed (%s -> %s) when not expected", tcpIngressIP, framework.GetIngressPoint(&tcpService.Status.LoadBalancer.Ingress[0])) + if e2eservice.GetIngressPoint(&tcpService.Status.LoadBalancer.Ingress[0]) != tcpIngressIP { + e2elog.Failf("TCP Status.LoadBalancer.Ingress changed (%s -> %s) when not expected", tcpIngressIP, e2eservice.GetIngressPoint(&tcpService.Status.LoadBalancer.Ingress[0])) } e2elog.Logf("TCP node port: %d", tcpNodePort) @@ -750,22 +751,22 @@ var _ = SIGDescribe("Services", func() { if udpNodePort == udpNodePortOld { e2elog.Failf("UDP Spec.Ports[0].NodePort (%d) did not change", udpNodePort) } - if loadBalancerSupportsUDP && framework.GetIngressPoint(&udpService.Status.LoadBalancer.Ingress[0]) != udpIngressIP { - e2elog.Failf("UDP Status.LoadBalancer.Ingress changed (%s -> %s) when not expected", udpIngressIP, framework.GetIngressPoint(&udpService.Status.LoadBalancer.Ingress[0])) + if loadBalancerSupportsUDP && e2eservice.GetIngressPoint(&udpService.Status.LoadBalancer.Ingress[0]) != udpIngressIP { + e2elog.Failf("UDP Status.LoadBalancer.Ingress changed (%s -> %s) when not expected", udpIngressIP, e2eservice.GetIngressPoint(&udpService.Status.LoadBalancer.Ingress[0])) } e2elog.Logf("UDP node port: %d", udpNodePort) ginkgo.By("hitting the TCP service's new NodePort") - jig.TestReachableHTTP(nodeIP, tcpNodePort, framework.KubeProxyLagTimeout) + jig.TestReachableHTTP(nodeIP, tcpNodePort, e2eservice.KubeProxyLagTimeout) ginkgo.By("hitting the UDP service's new NodePort") - jig.TestReachableUDP(nodeIP, udpNodePort, framework.KubeProxyLagTimeout) + jig.TestReachableUDP(nodeIP, udpNodePort, e2eservice.KubeProxyLagTimeout) ginkgo.By("checking the old TCP NodePort is closed") - jig.TestNotReachableHTTP(nodeIP, tcpNodePortOld, framework.KubeProxyLagTimeout) + jig.TestNotReachableHTTP(nodeIP, tcpNodePortOld, e2eservice.KubeProxyLagTimeout) ginkgo.By("checking the old UDP NodePort is closed") - jig.TestNotReachableUDP(nodeIP, udpNodePortOld, framework.KubeProxyLagTimeout) + jig.TestNotReachableUDP(nodeIP, udpNodePortOld, e2eservice.KubeProxyLagTimeout) ginkgo.By("hitting the TCP service's LoadBalancer") jig.TestReachableHTTP(tcpIngressIP, svcPort, loadBalancerLagTimeout) @@ -790,8 +791,8 @@ var _ = SIGDescribe("Services", func() { if int(tcpService.Spec.Ports[0].NodePort) != tcpNodePort { e2elog.Failf("TCP Spec.Ports[0].NodePort (%d) changed", tcpService.Spec.Ports[0].NodePort) } - if framework.GetIngressPoint(&tcpService.Status.LoadBalancer.Ingress[0]) != tcpIngressIP { - e2elog.Failf("TCP Status.LoadBalancer.Ingress changed (%s -> %s) when not expected", tcpIngressIP, framework.GetIngressPoint(&tcpService.Status.LoadBalancer.Ingress[0])) + if e2eservice.GetIngressPoint(&tcpService.Status.LoadBalancer.Ingress[0]) != tcpIngressIP { + e2elog.Failf("TCP Status.LoadBalancer.Ingress changed (%s -> %s) when not expected", tcpIngressIP, e2eservice.GetIngressPoint(&tcpService.Status.LoadBalancer.Ingress[0])) } ginkgo.By("changing the UDP service's port") @@ -809,17 +810,17 @@ var _ = SIGDescribe("Services", func() { if int(udpService.Spec.Ports[0].NodePort) != udpNodePort { e2elog.Failf("UDP Spec.Ports[0].NodePort (%d) changed", udpService.Spec.Ports[0].NodePort) } - if loadBalancerSupportsUDP && framework.GetIngressPoint(&udpService.Status.LoadBalancer.Ingress[0]) != udpIngressIP { - e2elog.Failf("UDP Status.LoadBalancer.Ingress changed (%s -> %s) when not expected", udpIngressIP, framework.GetIngressPoint(&udpService.Status.LoadBalancer.Ingress[0])) + if loadBalancerSupportsUDP && e2eservice.GetIngressPoint(&udpService.Status.LoadBalancer.Ingress[0]) != udpIngressIP { + e2elog.Failf("UDP Status.LoadBalancer.Ingress changed (%s -> %s) when not expected", udpIngressIP, e2eservice.GetIngressPoint(&udpService.Status.LoadBalancer.Ingress[0])) } e2elog.Logf("service port (TCP and UDP): %d", svcPort) ginkgo.By("hitting the TCP service's NodePort") - jig.TestReachableHTTP(nodeIP, tcpNodePort, framework.KubeProxyLagTimeout) + jig.TestReachableHTTP(nodeIP, tcpNodePort, e2eservice.KubeProxyLagTimeout) ginkgo.By("hitting the UDP service's NodePort") - jig.TestReachableUDP(nodeIP, udpNodePort, framework.KubeProxyLagTimeout) + jig.TestReachableUDP(nodeIP, udpNodePort, e2eservice.KubeProxyLagTimeout) ginkgo.By("hitting the TCP service's LoadBalancer") jig.TestReachableHTTP(tcpIngressIP, svcPort, loadBalancerCreateTimeout) @@ -834,10 +835,10 @@ var _ = SIGDescribe("Services", func() { jig.Scale(ns2, 0) ginkgo.By("looking for ICMP REJECT on the TCP service's NodePort") - jig.TestRejectedHTTP(nodeIP, tcpNodePort, framework.KubeProxyLagTimeout) + jig.TestRejectedHTTP(nodeIP, tcpNodePort, e2eservice.KubeProxyLagTimeout) ginkgo.By("looking for ICMP REJECT on the UDP service's NodePort") - jig.TestRejectedUDP(nodeIP, udpNodePort, framework.KubeProxyLagTimeout) + jig.TestRejectedUDP(nodeIP, udpNodePort, e2eservice.KubeProxyLagTimeout) ginkgo.By("looking for ICMP REJECT on the TCP service's LoadBalancer") jig.TestRejectedHTTP(tcpIngressIP, svcPort, loadBalancerCreateTimeout) @@ -852,10 +853,10 @@ var _ = SIGDescribe("Services", func() { jig.Scale(ns2, 1) ginkgo.By("hitting the TCP service's NodePort") - jig.TestReachableHTTP(nodeIP, tcpNodePort, framework.KubeProxyLagTimeout) + jig.TestReachableHTTP(nodeIP, tcpNodePort, e2eservice.KubeProxyLagTimeout) ginkgo.By("hitting the UDP service's NodePort") - jig.TestReachableUDP(nodeIP, udpNodePort, framework.KubeProxyLagTimeout) + jig.TestReachableUDP(nodeIP, udpNodePort, e2eservice.KubeProxyLagTimeout) ginkgo.By("hitting the TCP service's LoadBalancer") jig.TestReachableHTTP(tcpIngressIP, svcPort, loadBalancerCreateTimeout) @@ -888,10 +889,10 @@ var _ = SIGDescribe("Services", func() { } ginkgo.By("checking the TCP NodePort is closed") - jig.TestNotReachableHTTP(nodeIP, tcpNodePort, framework.KubeProxyLagTimeout) + jig.TestNotReachableHTTP(nodeIP, tcpNodePort, e2eservice.KubeProxyLagTimeout) ginkgo.By("checking the UDP NodePort is closed") - jig.TestNotReachableUDP(nodeIP, udpNodePort, framework.KubeProxyLagTimeout) + jig.TestNotReachableUDP(nodeIP, udpNodePort, e2eservice.KubeProxyLagTimeout) ginkgo.By("checking the TCP LoadBalancer is closed") jig.TestNotReachableHTTP(tcpIngressIP, svcPort, loadBalancerLagTimeout) @@ -905,7 +906,7 @@ var _ = SIGDescribe("Services", func() { ginkgo.It("should be able to update NodePorts with two same port numbers but different protocols", func() { serviceName := "nodeport-update-service" ns := f.Namespace.Name - jig := framework.NewServiceTestJig(cs, serviceName) + jig := e2eservice.NewTestJig(cs, serviceName) ginkgo.By("creating a TCP service " + serviceName + " with type=ClusterIP in namespace " + ns) tcpService := jig.CreateTCPServiceOrFail(ns, nil) @@ -952,7 +953,7 @@ var _ = SIGDescribe("Services", func() { ginkgo.It("should be able to change the type from ExternalName to ClusterIP", func() { serviceName := "externalname-service" ns := f.Namespace.Name - jig := framework.NewServiceTestJig(cs, serviceName) + jig := e2eservice.NewTestJig(cs, serviceName) ginkgo.By("creating a service " + serviceName + " with the type=ExternalName in namespace " + ns) externalNameService := jig.CreateExternalNameServiceOrFail(ns, nil) @@ -976,7 +977,7 @@ var _ = SIGDescribe("Services", func() { ginkgo.It("should be able to change the type from ExternalName to NodePort", func() { serviceName := "externalname-service" ns := f.Namespace.Name - jig := framework.NewServiceTestJig(cs, serviceName) + jig := e2eservice.NewTestJig(cs, serviceName) ginkgo.By("creating a service " + serviceName + " with the type=ExternalName in namespace " + ns) externalNameService := jig.CreateExternalNameServiceOrFail(ns, nil) @@ -1000,7 +1001,7 @@ var _ = SIGDescribe("Services", func() { ginkgo.It("should be able to change the type from ClusterIP to ExternalName", func() { serviceName := "clusterip-service" ns := f.Namespace.Name - jig := framework.NewServiceTestJig(cs, serviceName) + jig := e2eservice.NewTestJig(cs, serviceName) ginkgo.By("creating a service " + serviceName + " with the type=ClusterIP in namespace " + ns) clusterIPService := jig.CreateTCPServiceOrFail(ns, nil) @@ -1022,7 +1023,7 @@ var _ = SIGDescribe("Services", func() { ginkgo.It("should be able to change the type from NodePort to ExternalName", func() { serviceName := "nodeport-service" ns := f.Namespace.Name - jig := framework.NewServiceTestJig(cs, serviceName) + jig := e2eservice.NewTestJig(cs, serviceName) ginkgo.By("creating a service " + serviceName + " with the type=NodePort in namespace " + ns) nodePortService := jig.CreateTCPServiceOrFail(ns, func(svc *v1.Service) { @@ -1048,7 +1049,7 @@ var _ = SIGDescribe("Services", func() { serviceName := "nodeports" ns := f.Namespace.Name - t := framework.NewServerTest(cs, ns, serviceName) + t := e2eservice.NewServerTest(cs, ns, serviceName) defer func() { defer ginkgo.GinkgoRecover() errs := t.Cleanup() @@ -1098,7 +1099,7 @@ var _ = SIGDescribe("Services", func() { serviceName2 := baseName + "2" ns := f.Namespace.Name - t := framework.NewServerTest(cs, ns, serviceName1) + t := e2eservice.NewServerTest(cs, ns, serviceName1) defer func() { defer ginkgo.GinkgoRecover() errs := t.Cleanup() @@ -1150,7 +1151,7 @@ var _ = SIGDescribe("Services", func() { serviceName := "nodeport-range-test" ns := f.Namespace.Name - t := framework.NewServerTest(cs, ns, serviceName) + t := e2eservice.NewServerTest(cs, ns, serviceName) defer func() { defer ginkgo.GinkgoRecover() errs := t.Cleanup() @@ -1176,19 +1177,19 @@ var _ = SIGDescribe("Services", func() { if port.NodePort == 0 { e2elog.Failf("got unexpected Spec.Ports[0].nodePort for new service: %v", service) } - if !framework.ServiceNodePortRange.Contains(int(port.NodePort)) { + if !e2eservice.NodePortRange.Contains(int(port.NodePort)) { e2elog.Failf("got unexpected (out-of-range) port for new service: %v", service) } outOfRangeNodePort := 0 for { outOfRangeNodePort = 1 + rand.Intn(65535) - if !framework.ServiceNodePortRange.Contains(outOfRangeNodePort) { + if !e2eservice.NodePortRange.Contains(outOfRangeNodePort) { break } } ginkgo.By(fmt.Sprintf("changing service "+serviceName+" to out-of-range NodePort %d", outOfRangeNodePort)) - result, err := framework.UpdateService(cs, ns, serviceName, func(s *v1.Service) { + result, err := e2eservice.UpdateService(cs, ns, serviceName, func(s *v1.Service) { s.Spec.Ports[0].NodePort = int32(outOfRangeNodePort) }) if err == nil { @@ -1217,7 +1218,7 @@ var _ = SIGDescribe("Services", func() { serviceName := "nodeport-reuse" ns := f.Namespace.Name - t := framework.NewServerTest(cs, ns, serviceName) + t := e2eservice.NewServerTest(cs, ns, serviceName) defer func() { defer ginkgo.GinkgoRecover() errs := t.Cleanup() @@ -1243,7 +1244,7 @@ var _ = SIGDescribe("Services", func() { if port.NodePort == 0 { e2elog.Failf("got unexpected Spec.Ports[0].nodePort for new service: %v", service) } - if !framework.ServiceNodePortRange.Contains(int(port.NodePort)) { + if !e2eservice.NodePortRange.Contains(int(port.NodePort)) { e2elog.Failf("got unexpected (out-of-range) port for new service: %v", service) } nodePort := port.NodePort @@ -1255,7 +1256,7 @@ var _ = SIGDescribe("Services", func() { hostExec := e2epod.LaunchHostExecPod(f.ClientSet, f.Namespace.Name, "hostexec") cmd := fmt.Sprintf(`! ss -ant46 'sport = :%d' | tail -n +2 | grep LISTEN`, nodePort) var stdout string - if pollErr := wait.PollImmediate(framework.Poll, framework.KubeProxyLagTimeout, func() (bool, error) { + if pollErr := wait.PollImmediate(framework.Poll, e2eservice.KubeProxyLagTimeout, func() (bool, error) { var err error stdout, err = framework.RunHostCmd(hostExec.Namespace, hostExec.Name, cmd) if err != nil { @@ -1264,7 +1265,7 @@ var _ = SIGDescribe("Services", func() { } return true, nil }); pollErr != nil { - e2elog.Failf("expected node port (%d) to not be in use in %v, stdout: %v", nodePort, framework.KubeProxyLagTimeout, stdout) + e2elog.Failf("expected node port (%d) to not be in use in %v, stdout: %v", nodePort, e2eservice.KubeProxyLagTimeout, stdout) } ginkgo.By(fmt.Sprintf("creating service "+serviceName+" with same NodePort %d", nodePort)) @@ -1279,7 +1280,7 @@ var _ = SIGDescribe("Services", func() { serviceName := "tolerate-unready" ns := f.Namespace.Name - t := framework.NewServerTest(cs, ns, serviceName) + t := e2eservice.NewServerTest(cs, ns, serviceName) defer func() { defer ginkgo.GinkgoRecover() errs := t.Cleanup() @@ -1347,7 +1348,7 @@ var _ = SIGDescribe("Services", func() { execPodName := e2epod.CreateExecPodOrFail(f.ClientSet, f.Namespace.Name, "execpod-", nil) cmd := fmt.Sprintf("wget -qO- http://%s:%d/", svcName, port) var stdout string - if pollErr := wait.PollImmediate(framework.Poll, framework.KubeProxyLagTimeout, func() (bool, error) { + if pollErr := wait.PollImmediate(framework.Poll, e2eservice.KubeProxyLagTimeout, func() (bool, error) { var err error stdout, err = framework.RunHostCmd(f.Namespace.Name, execPodName, cmd) if err != nil { @@ -1356,21 +1357,21 @@ var _ = SIGDescribe("Services", func() { } return true, nil }); pollErr != nil { - e2elog.Failf("expected un-ready endpoint for Service %v within %v, stdout: %v", t.Name, framework.KubeProxyLagTimeout, stdout) + e2elog.Failf("expected un-ready endpoint for Service %v within %v, stdout: %v", t.Name, e2eservice.KubeProxyLagTimeout, stdout) } ginkgo.By("Scaling down replication controller to zero") framework.ScaleRC(f.ClientSet, f.ScalesGetter, t.Namespace, rcSpec.Name, 0, false) ginkgo.By("Update service to not tolerate unready services") - _, err = framework.UpdateService(f.ClientSet, t.Namespace, t.ServiceName, func(s *v1.Service) { + _, err = e2eservice.UpdateService(f.ClientSet, t.Namespace, t.ServiceName, func(s *v1.Service) { s.ObjectMeta.Annotations[endpoint.TolerateUnreadyEndpointsAnnotation] = "false" }) framework.ExpectNoError(err) ginkgo.By("Check if pod is unreachable") cmd = fmt.Sprintf("wget -qO- -T 2 http://%s:%d/; test \"$?\" -eq \"1\"", svcName, port) - if pollErr := wait.PollImmediate(framework.Poll, framework.KubeProxyLagTimeout, func() (bool, error) { + if pollErr := wait.PollImmediate(framework.Poll, e2eservice.KubeProxyLagTimeout, func() (bool, error) { var err error stdout, err = framework.RunHostCmd(f.Namespace.Name, execPodName, cmd) if err != nil { @@ -1379,18 +1380,18 @@ var _ = SIGDescribe("Services", func() { } return true, nil }); pollErr != nil { - e2elog.Failf("expected un-ready endpoint for Service %v within %v, stdout: %v", t.Name, framework.KubeProxyLagTimeout, stdout) + e2elog.Failf("expected un-ready endpoint for Service %v within %v, stdout: %v", t.Name, e2eservice.KubeProxyLagTimeout, stdout) } ginkgo.By("Update service to tolerate unready services again") - _, err = framework.UpdateService(f.ClientSet, t.Namespace, t.ServiceName, func(s *v1.Service) { + _, err = e2eservice.UpdateService(f.ClientSet, t.Namespace, t.ServiceName, func(s *v1.Service) { s.ObjectMeta.Annotations[endpoint.TolerateUnreadyEndpointsAnnotation] = "true" }) framework.ExpectNoError(err) ginkgo.By("Check if terminating pod is available through service") cmd = fmt.Sprintf("wget -qO- http://%s:%d/", svcName, port) - if pollErr := wait.PollImmediate(framework.Poll, framework.KubeProxyLagTimeout, func() (bool, error) { + if pollErr := wait.PollImmediate(framework.Poll, e2eservice.KubeProxyLagTimeout, func() (bool, error) { var err error stdout, err = framework.RunHostCmd(f.Namespace.Name, execPodName, cmd) if err != nil { @@ -1399,7 +1400,7 @@ var _ = SIGDescribe("Services", func() { } return true, nil }); pollErr != nil { - e2elog.Failf("expected un-ready endpoint for Service %v within %v, stdout: %v", t.Name, framework.KubeProxyLagTimeout, stdout) + e2elog.Failf("expected un-ready endpoint for Service %v within %v, stdout: %v", t.Name, e2eservice.KubeProxyLagTimeout, stdout) } ginkgo.By("Remove pods immediately") @@ -1424,18 +1425,18 @@ var _ = SIGDescribe("Services", func() { // this feature currently supported only on GCE/GKE/AWS framework.SkipUnlessProviderIs("gce", "gke", "aws") - loadBalancerLagTimeout := framework.LoadBalancerLagTimeoutDefault + loadBalancerLagTimeout := e2eservice.LoadBalancerLagTimeoutDefault if framework.ProviderIs("aws") { - loadBalancerLagTimeout = framework.LoadBalancerLagTimeoutAWS + loadBalancerLagTimeout = e2eservice.LoadBalancerLagTimeoutAWS } - loadBalancerCreateTimeout := framework.LoadBalancerCreateTimeoutDefault - if nodes := framework.GetReadySchedulableNodesOrDie(cs); len(nodes.Items) > framework.LargeClusterMinNodesNumber { - loadBalancerCreateTimeout = framework.LoadBalancerCreateTimeoutLarge + loadBalancerCreateTimeout := e2eservice.LoadBalancerCreateTimeoutDefault + if nodes := framework.GetReadySchedulableNodesOrDie(cs); len(nodes.Items) > e2eservice.LargeClusterMinNodesNumber { + loadBalancerCreateTimeout = e2eservice.LoadBalancerCreateTimeoutLarge } namespace := f.Namespace.Name serviceName := "lb-sourcerange" - jig := framework.NewServiceTestJig(cs, serviceName) + jig := e2eservice.NewTestJig(cs, serviceName) ginkgo.By("Prepare allow source ips") // prepare the exec pods @@ -1475,7 +1476,7 @@ var _ = SIGDescribe("Services", func() { normalReachabilityTimeout := 2 * time.Minute ginkgo.By("check reachability from different sources") - svcIP := framework.GetIngressPoint(&svc.Status.LoadBalancer.Ingress[0]) + svcIP := e2eservice.GetIngressPoint(&svc.Status.LoadBalancer.Ingress[0]) // Wait longer as this is our first request after creation. We can't check using a separate method, // because the LB should only be reachable from the "accept" pod framework.CheckReachabilityFromPod(true, loadBalancerLagTimeout, namespace, acceptPodName, svcIP) @@ -1501,24 +1502,24 @@ var _ = SIGDescribe("Services", func() { ginkgo.It("should be able to create an internal type load balancer [Slow] [DisabledForLargeClusters]", func() { framework.SkipUnlessProviderIs("azure", "gke", "gce") - createTimeout := framework.LoadBalancerCreateTimeoutDefault - if nodes := framework.GetReadySchedulableNodesOrDie(cs); len(nodes.Items) > framework.LargeClusterMinNodesNumber { - createTimeout = framework.LoadBalancerCreateTimeoutLarge + createTimeout := e2eservice.LoadBalancerCreateTimeoutDefault + if nodes := framework.GetReadySchedulableNodesOrDie(cs); len(nodes.Items) > e2eservice.LargeClusterMinNodesNumber { + createTimeout = e2eservice.LoadBalancerCreateTimeoutLarge } pollInterval := framework.Poll * 10 namespace := f.Namespace.Name serviceName := "lb-internal" - jig := framework.NewServiceTestJig(cs, serviceName) + jig := e2eservice.NewTestJig(cs, serviceName) ginkgo.By("creating pod to be part of service " + serviceName) jig.RunOrFail(namespace, nil) - enableILB, disableILB := framework.EnableAndDisableInternalLB() + enableILB, disableILB := e2eservice.EnableAndDisableInternalLB() isInternalEndpoint := func(lbIngress *v1.LoadBalancerIngress) bool { - ingressEndpoint := framework.GetIngressPoint(lbIngress) + ingressEndpoint := e2eservice.GetIngressPoint(lbIngress) // Needs update for providers using hostname as endpoint. return strings.HasPrefix(ingressEndpoint, "10.") } @@ -1542,7 +1543,7 @@ var _ = SIGDescribe("Services", func() { hostExec := e2epod.LaunchHostExecPod(f.ClientSet, f.Namespace.Name, "ilb-host-exec") e2elog.Logf("Waiting up to %v for service %q's internal LB to respond to requests", createTimeout, serviceName) - tcpIngressIP := framework.GetIngressPoint(lbIngress) + tcpIngressIP := e2eservice.GetIngressPoint(lbIngress) if pollErr := wait.PollImmediate(pollInterval, createTimeout, func() (bool, error) { cmd := fmt.Sprintf(`curl -m 5 'http://%v:%v/echo?msg=hello'`, tcpIngressIP, svcPort) stdout, err := framework.RunHostCmd(hostExec.Namespace, hostExec.Name, cmd) @@ -1583,8 +1584,8 @@ var _ = SIGDescribe("Services", func() { ginkgo.By("hitting the external load balancer") e2elog.Logf("Waiting up to %v for service %q's external LB to respond to requests", createTimeout, serviceName) - tcpIngressIP = framework.GetIngressPoint(lbIngress) - jig.TestReachableHTTP(tcpIngressIP, svcPort, framework.LoadBalancerLagTimeoutDefault) + tcpIngressIP = e2eservice.GetIngressPoint(lbIngress) + jig.TestReachableHTTP(tcpIngressIP, svcPort, e2eservice.LoadBalancerLagTimeoutDefault) // GCE cannot test a specific IP because the test may not own it. This cloud specific condition // will be removed when GCP supports similar functionality. @@ -1608,7 +1609,7 @@ var _ = SIGDescribe("Services", func() { } // should have the given static internal IP. jig.SanityCheckService(svc, v1.ServiceTypeLoadBalancer) - framework.ExpectEqual(framework.GetIngressPoint(lbIngress), internalStaticIP) + framework.ExpectEqual(e2eservice.GetIngressPoint(lbIngress), internalStaticIP) } ginkgo.By("switching to ClusterIP type to destroy loadbalancer") @@ -1634,7 +1635,7 @@ var _ = SIGDescribe("Services", func() { namespace := f.Namespace.Name serviceName := "lb-hc-int" - jig := framework.NewServiceTestJig(cs, serviceName) + jig := e2eservice.NewTestJig(cs, serviceName) ginkgo.By("create load balancer service") // Create loadbalancer service with source range from node[0] and podAccept @@ -1651,7 +1652,7 @@ var _ = SIGDescribe("Services", func() { framework.ExpectNoError(err) }() - svc = jig.WaitForLoadBalancerOrFail(namespace, serviceName, framework.LoadBalancerCreateTimeoutDefault) + svc = jig.WaitForLoadBalancerOrFail(namespace, serviceName, e2eservice.LoadBalancerCreateTimeoutDefault) hcName := gcecloud.MakeNodesHealthCheckName(clusterID) hc, err := gceCloud.GetHTTPHealthCheck(hcName) @@ -1676,7 +1677,7 @@ var _ = SIGDescribe("Services", func() { ginkgo.By("health check should be reconciled") pollInterval := framework.Poll * 10 - if pollErr := wait.PollImmediate(pollInterval, framework.LoadBalancerCreateTimeoutDefault, func() (bool, error) { + if pollErr := wait.PollImmediate(pollInterval, e2eservice.LoadBalancerCreateTimeoutDefault, func() (bool, error) { hc, err := gceCloud.GetHTTPHealthCheck(hcName) if err != nil { e2elog.Logf("ginkgo.Failed to get HttpHealthCheck(%q): %v", hcName, err) @@ -1775,15 +1776,15 @@ var _ = SIGDescribe("Services", func() { ginkgo.By("creating service-disabled in namespace " + ns) svcDisabled := getServeHostnameService("service-proxy-disabled") svcDisabled.ObjectMeta.Labels = serviceProxyNameLabels - _, svcDisabledIP, err := framework.StartServeHostnameService(cs, svcDisabled, ns, numPods) + _, svcDisabledIP, err := e2eservice.StartServeHostnameService(cs, svcDisabled, ns, numPods) framework.ExpectNoError(err, "failed to create replication controller with service: %s in the namespace: %s", svcDisabledIP, ns) ginkgo.By("creating service in namespace " + ns) svcToggled := getServeHostnameService("service-proxy-toggled") - podToggledNames, svcToggledIP, err := framework.StartServeHostnameService(cs, svcToggled, ns, numPods) + podToggledNames, svcToggledIP, err := e2eservice.StartServeHostnameService(cs, svcToggled, ns, numPods) framework.ExpectNoError(err, "failed to create replication controller with service: %s in the namespace: %s", svcToggledIP, ns) - jig := framework.NewServiceTestJig(cs, svcToggled.ObjectMeta.Name) + jig := e2eservice.NewTestJig(cs, svcToggled.ObjectMeta.Name) hosts, err := e2essh.NodeSSHHosts(cs) framework.ExpectNoError(err, "failed to find external/internal IPs for every node") @@ -1793,10 +1794,10 @@ var _ = SIGDescribe("Services", func() { host := hosts[0] ginkgo.By("verifying service is up") - framework.ExpectNoError(framework.VerifyServeHostnameServiceUp(cs, ns, host, podToggledNames, svcToggledIP, servicePort)) + framework.ExpectNoError(e2eservice.VerifyServeHostnameServiceUp(cs, ns, host, podToggledNames, svcToggledIP, servicePort)) ginkgo.By("verifying service-disabled is not up") - framework.ExpectNoError(framework.VerifyServeHostnameServiceDown(cs, host, svcDisabledIP, servicePort)) + framework.ExpectNoError(e2eservice.VerifyServeHostnameServiceDown(cs, host, svcDisabledIP, servicePort)) ginkgo.By("adding service-proxy-name label") jig.UpdateServiceOrFail(ns, svcToggled.ObjectMeta.Name, func(svc *v1.Service) { @@ -1804,7 +1805,7 @@ var _ = SIGDescribe("Services", func() { }) ginkgo.By("verifying service is not up") - framework.ExpectNoError(framework.VerifyServeHostnameServiceDown(cs, host, svcToggledIP, servicePort)) + framework.ExpectNoError(e2eservice.VerifyServeHostnameServiceDown(cs, host, svcToggledIP, servicePort)) ginkgo.By("removing service-proxy-name annotation") jig.UpdateServiceOrFail(ns, svcToggled.ObjectMeta.Name, func(svc *v1.Service) { @@ -1812,17 +1813,17 @@ var _ = SIGDescribe("Services", func() { }) ginkgo.By("verifying service is up") - framework.ExpectNoError(framework.VerifyServeHostnameServiceUp(cs, ns, host, podToggledNames, svcToggledIP, servicePort)) + framework.ExpectNoError(e2eservice.VerifyServeHostnameServiceUp(cs, ns, host, podToggledNames, svcToggledIP, servicePort)) ginkgo.By("verifying service-disabled is still not up") - framework.ExpectNoError(framework.VerifyServeHostnameServiceDown(cs, host, svcDisabledIP, servicePort)) + framework.ExpectNoError(e2eservice.VerifyServeHostnameServiceDown(cs, host, svcDisabledIP, servicePort)) }) ginkgo.It("should be rejected when no endpoints exist", func() { namespace := f.Namespace.Name serviceName := "no-pods" - jig := framework.NewServiceTestJig(cs, serviceName) - nodes := jig.GetNodes(framework.MaxNodesForEndpointsTests) + jig := e2eservice.NewTestJig(cs, serviceName) + nodes := jig.GetNodes(e2eservice.MaxNodesForEndpointsTests) labels := map[string]string{ "nopods": "nopods", } @@ -1849,12 +1850,12 @@ var _ = SIGDescribe("Services", func() { framework.ExpectNoError(err) serviceAddress := net.JoinHostPort(serviceName, strconv.Itoa(port)) - e2elog.Logf("waiting up to %v to connect to %v", framework.KubeProxyEndpointLagTimeout, serviceAddress) + e2elog.Logf("waiting up to %v to connect to %v", e2eservice.KubeProxyEndpointLagTimeout, serviceAddress) cmd := fmt.Sprintf("/agnhost connect --timeout=3s %s", serviceAddress) ginkgo.By(fmt.Sprintf("hitting service %v from pod %v on node %v", serviceAddress, podName, nodeName)) expectedErr := "REFUSED" - if pollErr := wait.PollImmediate(framework.Poll, framework.KubeProxyEndpointLagTimeout, func() (bool, error) { + if pollErr := wait.PollImmediate(framework.Poll, e2eservice.KubeProxyEndpointLagTimeout, func() (bool, error) { _, err := framework.RunHostCmd(execPod.Namespace, execPod.Name, cmd) if err != nil { @@ -1876,7 +1877,7 @@ var _ = SIGDescribe("Services", func() { // This ensures downgrading from higher version cluster will not break LoadBalancer // type service. ginkgo.It("should remove load balancer cleanup finalizer when service is deleted [Slow]", func() { - jig := framework.NewServiceTestJig(cs, "lb-remove-finalizer") + jig := e2eservice.NewTestJig(cs, "lb-remove-finalizer") ginkgo.By("Create load balancer service") svc := jig.CreateTCPServiceOrFail(f.Namespace.Name, func(svc *v1.Service) { @@ -1888,7 +1889,7 @@ var _ = SIGDescribe("Services", func() { }() ginkgo.By("Wait for load balancer to serve traffic") - svc = jig.WaitForLoadBalancerOrFail(svc.Namespace, svc.Name, framework.GetServiceLoadBalancerCreationTimeout(cs)) + svc = jig.WaitForLoadBalancerOrFail(svc.Namespace, svc.Name, e2eservice.GetServiceLoadBalancerCreationTimeout(cs)) ginkgo.By("Manually add load balancer cleanup finalizer to service") svc.Finalizers = append(svc.Finalizers, "service.kubernetes.io/load-balancer-cleanup") @@ -1904,7 +1905,7 @@ var _ = SIGDescribe("Services", func() { // 3. Update service to type=LoadBalancer. Finalizer should be added. // 4. Delete service with type=LoadBalancer. Finalizer should be removed. ginkgo.It("should handle load balancer cleanup finalizer for service [Slow] [Feature:ServiceFinalizer]", func() { - jig := framework.NewServiceTestJig(cs, "lb-finalizer") + jig := e2eservice.NewTestJig(cs, "lb-finalizer") ginkgo.By("Create load balancer service") svc := jig.CreateTCPServiceOrFail(f.Namespace.Name, func(svc *v1.Service) { @@ -1916,17 +1917,17 @@ var _ = SIGDescribe("Services", func() { }() ginkgo.By("Wait for load balancer to serve traffic") - svc = jig.WaitForLoadBalancerOrFail(svc.Namespace, svc.Name, framework.GetServiceLoadBalancerCreationTimeout(cs)) + svc = jig.WaitForLoadBalancerOrFail(svc.Namespace, svc.Name, e2eservice.GetServiceLoadBalancerCreationTimeout(cs)) ginkgo.By("Check if finalizer presents on service with type=LoadBalancer") waitForServiceUpdatedWithFinalizer(cs, svc.Namespace, svc.Name, true) ginkgo.By("Check if finalizer is removed on service after changed to type=ClusterIP") - jig.ChangeServiceType(svc.Namespace, svc.Name, v1.ServiceTypeClusterIP, framework.GetServiceLoadBalancerCreationTimeout(cs)) + jig.ChangeServiceType(svc.Namespace, svc.Name, v1.ServiceTypeClusterIP, e2eservice.GetServiceLoadBalancerCreationTimeout(cs)) waitForServiceUpdatedWithFinalizer(cs, svc.Namespace, svc.Name, false) ginkgo.By("Check if finalizer is added back to service after changed to type=LoadBalancer") - jig.ChangeServiceType(svc.Namespace, svc.Name, v1.ServiceTypeLoadBalancer, framework.GetServiceLoadBalancerCreationTimeout(cs)) + jig.ChangeServiceType(svc.Namespace, svc.Name, v1.ServiceTypeLoadBalancer, e2eservice.GetServiceLoadBalancerCreationTimeout(cs)) waitForServiceUpdatedWithFinalizer(cs, svc.Namespace, svc.Name, true) }) }) @@ -1938,7 +1939,7 @@ func waitForServiceDeletedWithFinalizer(cs clientset.Interface, namespace, name } ginkgo.By("Wait for service to disappear") - if pollErr := wait.PollImmediate(framework.LoadBalancerPollInterval, framework.GetServiceLoadBalancerCreationTimeout(cs), func() (bool, error) { + if pollErr := wait.PollImmediate(e2eservice.LoadBalancerPollInterval, e2eservice.GetServiceLoadBalancerCreationTimeout(cs), func() (bool, error) { svc, err := cs.CoreV1().Services(namespace).Get(name, metav1.GetOptions{}) if err != nil { if apierrors.IsNotFound(err) { @@ -1956,7 +1957,7 @@ func waitForServiceDeletedWithFinalizer(cs clientset.Interface, namespace, name func waitForServiceUpdatedWithFinalizer(cs clientset.Interface, namespace, name string, hasFinalizer bool) { ginkgo.By(fmt.Sprintf("Wait for service to hasFinalizer=%t", hasFinalizer)) - if pollErr := wait.PollImmediate(framework.LoadBalancerPollInterval, framework.GetServiceLoadBalancerCreationTimeout(cs), func() (bool, error) { + if pollErr := wait.PollImmediate(e2eservice.LoadBalancerPollInterval, e2eservice.GetServiceLoadBalancerCreationTimeout(cs), func() (bool, error) { svc, err := cs.CoreV1().Services(namespace).Get(name, metav1.GetOptions{}) if err != nil { return false, err @@ -1980,7 +1981,7 @@ func waitForServiceUpdatedWithFinalizer(cs clientset.Interface, namespace, name // TODO: Get rid of [DisabledForLargeClusters] tag when issue #56138 is fixed. var _ = SIGDescribe("ESIPP [Slow] [DisabledForLargeClusters]", func() { f := framework.NewDefaultFramework("esipp") - loadBalancerCreateTimeout := framework.LoadBalancerCreateTimeoutDefault + loadBalancerCreateTimeout := e2eservice.LoadBalancerCreateTimeoutDefault var cs clientset.Interface serviceLBNames := []string{} @@ -1990,18 +1991,18 @@ var _ = SIGDescribe("ESIPP [Slow] [DisabledForLargeClusters]", func() { framework.SkipUnlessProviderIs("gce", "gke") cs = f.ClientSet - if nodes := framework.GetReadySchedulableNodesOrDie(cs); len(nodes.Items) > framework.LargeClusterMinNodesNumber { - loadBalancerCreateTimeout = framework.LoadBalancerCreateTimeoutLarge + if nodes := framework.GetReadySchedulableNodesOrDie(cs); len(nodes.Items) > e2eservice.LargeClusterMinNodesNumber { + loadBalancerCreateTimeout = e2eservice.LoadBalancerCreateTimeoutLarge } }) ginkgo.AfterEach(func() { if ginkgo.CurrentGinkgoTestDescription().Failed { - framework.DescribeSvc(f.Namespace.Name) + e2eservice.DescribeSvc(f.Namespace.Name) } for _, lb := range serviceLBNames { e2elog.Logf("cleaning load balancer resource for %s", lb) - framework.CleanupServiceResources(cs, lb, framework.TestContext.CloudConfig.Region, framework.TestContext.CloudConfig.Zone) + e2eservice.CleanupServiceResources(cs, lb, framework.TestContext.CloudConfig.Region, framework.TestContext.CloudConfig.Zone) } //reset serviceLBNames serviceLBNames = []string{} @@ -2010,7 +2011,7 @@ var _ = SIGDescribe("ESIPP [Slow] [DisabledForLargeClusters]", func() { ginkgo.It("should work for type=LoadBalancer", func() { namespace := f.Namespace.Name serviceName := "external-local-lb" - jig := framework.NewServiceTestJig(cs, serviceName) + jig := e2eservice.NewTestJig(cs, serviceName) svc := jig.CreateOnlyLocalLoadBalancerService(namespace, serviceName, loadBalancerCreateTimeout, true, nil) serviceLBNames = append(serviceLBNames, cloudprovider.DefaultLoadBalancerName(svc)) @@ -2024,7 +2025,7 @@ var _ = SIGDescribe("ESIPP [Slow] [DisabledForLargeClusters]", func() { // Make sure we didn't leak the health check node port. threshold := 2 for _, ips := range jig.GetEndpointNodes(svc) { - err := jig.TestHTTPHealthCheckNodePort(ips[0], healthCheckNodePort, "/healthz", framework.KubeProxyEndpointLagTimeout, false, threshold) + err := jig.TestHTTPHealthCheckNodePort(ips[0], healthCheckNodePort, "/healthz", e2eservice.KubeProxyEndpointLagTimeout, false, threshold) framework.ExpectNoError(err) } err := cs.CoreV1().Services(svc.Namespace).Delete(svc.Name, nil) @@ -2032,10 +2033,10 @@ var _ = SIGDescribe("ESIPP [Slow] [DisabledForLargeClusters]", func() { }() svcTCPPort := int(svc.Spec.Ports[0].Port) - ingressIP := framework.GetIngressPoint(&svc.Status.LoadBalancer.Ingress[0]) + ingressIP := e2eservice.GetIngressPoint(&svc.Status.LoadBalancer.Ingress[0]) ginkgo.By("reading clientIP using the TCP service's service port via its external VIP") - content := jig.GetHTTPContent(ingressIP, svcTCPPort, framework.KubeProxyLagTimeout, "/clientip") + content := jig.GetHTTPContent(ingressIP, svcTCPPort, e2eservice.KubeProxyLagTimeout, "/clientip") clientIP := content.String() e2elog.Logf("ClientIP detected by target pod using VIP:SvcPort is %s", clientIP) @@ -2048,7 +2049,7 @@ var _ = SIGDescribe("ESIPP [Slow] [DisabledForLargeClusters]", func() { ginkgo.It("should work for type=NodePort", func() { namespace := f.Namespace.Name serviceName := "external-local-nodeport" - jig := framework.NewServiceTestJig(cs, serviceName) + jig := e2eservice.NewTestJig(cs, serviceName) svc := jig.CreateOnlyLocalNodePortService(namespace, serviceName, true) defer func() { @@ -2063,7 +2064,7 @@ var _ = SIGDescribe("ESIPP [Slow] [DisabledForLargeClusters]", func() { for nodeName, nodeIPs := range endpointsNodeMap { nodeIP := nodeIPs[0] ginkgo.By(fmt.Sprintf("reading clientIP using the TCP service's NodePort, on node %v: %v%v%v", nodeName, nodeIP, tcpNodePort, path)) - content := jig.GetHTTPContent(nodeIP, tcpNodePort, framework.KubeProxyLagTimeout, path) + content := jig.GetHTTPContent(nodeIP, tcpNodePort, e2eservice.KubeProxyLagTimeout, path) clientIP := content.String() e2elog.Logf("ClientIP detected by target pod using NodePort is %s", clientIP) if strings.HasPrefix(clientIP, "10.") { @@ -2075,8 +2076,8 @@ var _ = SIGDescribe("ESIPP [Slow] [DisabledForLargeClusters]", func() { ginkgo.It("should only target nodes with endpoints", func() { namespace := f.Namespace.Name serviceName := "external-local-nodes" - jig := framework.NewServiceTestJig(cs, serviceName) - nodes := jig.GetNodes(framework.MaxNodesForEndpointsTests) + jig := e2eservice.NewTestJig(cs, serviceName) + nodes := jig.GetNodes(e2eservice.MaxNodesForEndpointsTests) svc := jig.CreateOnlyLocalLoadBalancerService(namespace, serviceName, loadBalancerCreateTimeout, false, func(svc *v1.Service) { @@ -2102,7 +2103,7 @@ var _ = SIGDescribe("ESIPP [Slow] [DisabledForLargeClusters]", func() { ips := e2enode.CollectAddresses(nodes, v1.NodeExternalIP) - ingressIP := framework.GetIngressPoint(&svc.Status.LoadBalancer.Ingress[0]) + ingressIP := e2eservice.GetIngressPoint(&svc.Status.LoadBalancer.Ingress[0]) svcTCPPort := int(svc.Spec.Ports[0].Port) threshold := 2 @@ -2126,12 +2127,12 @@ var _ = SIGDescribe("ESIPP [Slow] [DisabledForLargeClusters]", func() { for n, publicIP := range ips { // Make sure the loadbalancer picked up the health check change. // Confirm traffic can reach backend through LB before checking healthcheck nodeport. - jig.TestReachableHTTP(ingressIP, svcTCPPort, framework.KubeProxyLagTimeout) + jig.TestReachableHTTP(ingressIP, svcTCPPort, e2eservice.KubeProxyLagTimeout) expectedSuccess := nodes.Items[n].Name == endpointNodeName port := strconv.Itoa(healthCheckNodePort) ipPort := net.JoinHostPort(publicIP, port) e2elog.Logf("Health checking %s, http://%s%s, expectedSuccess %v", nodes.Items[n].Name, ipPort, path, expectedSuccess) - err := jig.TestHTTPHealthCheckNodePort(publicIP, healthCheckNodePort, path, framework.KubeProxyEndpointLagTimeout, expectedSuccess, threshold) + err := jig.TestHTTPHealthCheckNodePort(publicIP, healthCheckNodePort, path, e2eservice.KubeProxyEndpointLagTimeout, expectedSuccess, threshold) framework.ExpectNoError(err) } framework.ExpectNoError(framework.DeleteRCAndWaitForGC(f.ClientSet, namespace, serviceName)) @@ -2141,8 +2142,8 @@ var _ = SIGDescribe("ESIPP [Slow] [DisabledForLargeClusters]", func() { ginkgo.It("should work from pods", func() { namespace := f.Namespace.Name serviceName := "external-local-pods" - jig := framework.NewServiceTestJig(cs, serviceName) - nodes := jig.GetNodes(framework.MaxNodesForEndpointsTests) + jig := e2eservice.NewTestJig(cs, serviceName) + nodes := jig.GetNodes(e2eservice.MaxNodesForEndpointsTests) svc := jig.CreateOnlyLocalLoadBalancerService(namespace, serviceName, loadBalancerCreateTimeout, true, nil) serviceLBNames = append(serviceLBNames, cloudprovider.DefaultLoadBalancerName(svc)) @@ -2152,7 +2153,7 @@ var _ = SIGDescribe("ESIPP [Slow] [DisabledForLargeClusters]", func() { framework.ExpectNoError(err) }() - ingressIP := framework.GetIngressPoint(&svc.Status.LoadBalancer.Ingress[0]) + ingressIP := e2eservice.GetIngressPoint(&svc.Status.LoadBalancer.Ingress[0]) port := strconv.Itoa(int(svc.Spec.Ports[0].Port)) ipPort := net.JoinHostPort(ingressIP, port) path := fmt.Sprintf("%s/clientip", ipPort) @@ -2170,12 +2171,12 @@ var _ = SIGDescribe("ESIPP [Slow] [DisabledForLargeClusters]", func() { execPod, err := f.ClientSet.CoreV1().Pods(namespace).Get(execPodName, metav1.GetOptions{}) framework.ExpectNoError(err) - e2elog.Logf("Waiting up to %v wget %v", framework.KubeProxyLagTimeout, path) + e2elog.Logf("Waiting up to %v wget %v", e2eservice.KubeProxyLagTimeout, path) cmd := fmt.Sprintf(`wget -T 30 -qO- %v`, path) var srcIP string ginkgo.By(fmt.Sprintf("Hitting external lb %v from pod %v on node %v", ingressIP, podName, nodeName)) - if pollErr := wait.PollImmediate(framework.Poll, framework.LoadBalancerCreateTimeoutDefault, func() (bool, error) { + if pollErr := wait.PollImmediate(framework.Poll, e2eservice.LoadBalancerCreateTimeoutDefault, func() (bool, error) { stdout, err := framework.RunHostCmd(execPod.Namespace, execPod.Name, cmd) if err != nil { e2elog.Logf("got err: %v, retry until timeout", err) @@ -2191,9 +2192,9 @@ var _ = SIGDescribe("ESIPP [Slow] [DisabledForLargeClusters]", func() { ginkgo.It("should handle updates to ExternalTrafficPolicy field", func() { namespace := f.Namespace.Name serviceName := "external-local-update" - jig := framework.NewServiceTestJig(cs, serviceName) + jig := e2eservice.NewTestJig(cs, serviceName) - nodes := jig.GetNodes(framework.MaxNodesForEndpointsTests) + nodes := jig.GetNodes(e2eservice.MaxNodesForEndpointsTests) if len(nodes.Items) < 2 { e2elog.Failf("Need at least 2 nodes to verify source ip from a node without endpoint") } @@ -2228,13 +2229,13 @@ var _ = SIGDescribe("ESIPP [Slow] [DisabledForLargeClusters]", func() { svcTCPPort := int(svc.Spec.Ports[0].Port) svcNodePort := int(svc.Spec.Ports[0].NodePort) - ingressIP := framework.GetIngressPoint(&svc.Status.LoadBalancer.Ingress[0]) + ingressIP := e2eservice.GetIngressPoint(&svc.Status.LoadBalancer.Ingress[0]) path := "/clientip" ginkgo.By(fmt.Sprintf("endpoints present on nodes %v, absent on nodes %v", endpointNodeMap, noEndpointNodeMap)) for nodeName, nodeIPs := range noEndpointNodeMap { ginkgo.By(fmt.Sprintf("Checking %v (%v:%v%v) proxies to endpoints on another node", nodeName, nodeIPs[0], svcNodePort, path)) - jig.GetHTTPContent(nodeIPs[0], svcNodePort, framework.KubeProxyLagTimeout, path) + jig.GetHTTPContent(nodeIPs[0], svcNodePort, e2eservice.KubeProxyLagTimeout, path) } for nodeName, nodeIPs := range endpointNodeMap { @@ -2249,7 +2250,7 @@ var _ = SIGDescribe("ESIPP [Slow] [DisabledForLargeClusters]", func() { body.Write(result.Body) return false, nil } - if pollErr := wait.PollImmediate(framework.Poll, framework.ServiceTestTimeout, pollfn); pollErr != nil { + if pollErr := wait.PollImmediate(framework.Poll, e2eservice.TestTimeout, pollfn); pollErr != nil { e2elog.Failf("Kube-proxy still exposing health check on node %v:%v, after ESIPP was turned off. body %s", nodeName, healthCheckNodePort, body.String()) } @@ -2258,8 +2259,8 @@ var _ = SIGDescribe("ESIPP [Slow] [DisabledForLargeClusters]", func() { // Poll till kube-proxy re-adds the MASQUERADE rule on the node. ginkgo.By(fmt.Sprintf("checking source ip is NOT preserved through loadbalancer %v", ingressIP)) var clientIP string - pollErr := wait.PollImmediate(framework.Poll, framework.KubeProxyLagTimeout, func() (bool, error) { - content := jig.GetHTTPContent(ingressIP, svcTCPPort, framework.KubeProxyLagTimeout, "/clientip") + pollErr := wait.PollImmediate(framework.Poll, e2eservice.KubeProxyLagTimeout, func() (bool, error) { + content := jig.GetHTTPContent(ingressIP, svcTCPPort, e2eservice.KubeProxyLagTimeout, "/clientip") clientIP = content.String() if strings.HasPrefix(clientIP, "10.") { return true, nil @@ -2282,8 +2283,8 @@ var _ = SIGDescribe("ESIPP [Slow] [DisabledForLargeClusters]", func() { // Request the same healthCheckNodePort as before, to test the user-requested allocation path svc.Spec.HealthCheckNodePort = int32(healthCheckNodePort) }) - pollErr = wait.PollImmediate(framework.Poll, framework.KubeProxyLagTimeout, func() (bool, error) { - content := jig.GetHTTPContent(ingressIP, svcTCPPort, framework.KubeProxyLagTimeout, path) + pollErr = wait.PollImmediate(framework.Poll, e2eservice.KubeProxyLagTimeout, func() (bool, error) { + content := jig.GetHTTPContent(ingressIP, svcTCPPort, e2eservice.KubeProxyLagTimeout, path) clientIP = content.String() ginkgo.By(fmt.Sprintf("Endpoint %v:%v%v returned client ip %v", ingressIP, svcTCPPort, path, clientIP)) if !strings.HasPrefix(clientIP, "10.") { @@ -2360,12 +2361,12 @@ func execAffinityTestForNonLBServiceWithOptionalTransition(f *framework.Framewor ginkgo.By("creating service in namespace " + ns) serviceType := svc.Spec.Type svc.Spec.SessionAffinity = v1.ServiceAffinityClientIP - _, _, err := framework.StartServeHostnameService(cs, svc, ns, numPods) + _, _, err := e2eservice.StartServeHostnameService(cs, svc, ns, numPods) framework.ExpectNoError(err, "failed to create replication controller with service in the namespace: %s", ns) defer func() { - framework.StopServeHostnameService(cs, ns, serviceName) + e2eservice.StopServeHostnameService(cs, ns, serviceName) }() - jig := framework.NewServiceTestJig(cs, serviceName) + jig := e2eservice.NewTestJig(cs, serviceName) svc, err = jig.Client.CoreV1().Services(ns).Get(serviceName, metav1.GetOptions{}) framework.ExpectNoError(err, "failed to fetch service: %s in namespace: %s", serviceName, ns) var svcIP string @@ -2389,17 +2390,17 @@ func execAffinityTestForNonLBServiceWithOptionalTransition(f *framework.Framewor framework.ExpectNoError(err, "failed to fetch pod: %s in namespace: %s", execPodName, ns) if !isTransitionTest { - gomega.Expect(framework.CheckAffinity(jig, execPod, svcIP, servicePort, true)).To(gomega.BeTrue()) + gomega.Expect(jig.CheckAffinity(execPod, svcIP, servicePort, true)).To(gomega.BeTrue()) } if isTransitionTest { svc = jig.UpdateServiceOrFail(svc.Namespace, svc.Name, func(svc *v1.Service) { svc.Spec.SessionAffinity = v1.ServiceAffinityNone }) - gomega.Expect(framework.CheckAffinity(jig, execPod, svcIP, servicePort, false)).To(gomega.BeTrue()) + gomega.Expect(jig.CheckAffinity(execPod, svcIP, servicePort, false)).To(gomega.BeTrue()) svc = jig.UpdateServiceOrFail(svc.Namespace, svc.Name, func(svc *v1.Service) { svc.Spec.SessionAffinity = v1.ServiceAffinityClientIP }) - gomega.Expect(framework.CheckAffinity(jig, execPod, svcIP, servicePort, true)).To(gomega.BeTrue()) + gomega.Expect(jig.CheckAffinity(execPod, svcIP, servicePort, true)).To(gomega.BeTrue()) } } @@ -2419,34 +2420,34 @@ func execAffinityTestForLBServiceWithOptionalTransition(f *framework.Framework, ginkgo.By("creating service in namespace " + ns) svc.Spec.SessionAffinity = v1.ServiceAffinityClientIP - _, _, err := framework.StartServeHostnameService(cs, svc, ns, numPods) + _, _, err := e2eservice.StartServeHostnameService(cs, svc, ns, numPods) framework.ExpectNoError(err, "failed to create replication controller with service in the namespace: %s", ns) - jig := framework.NewServiceTestJig(cs, serviceName) + jig := e2eservice.NewTestJig(cs, serviceName) ginkgo.By("waiting for loadbalancer for service " + ns + "/" + serviceName) - svc = jig.WaitForLoadBalancerOrFail(ns, serviceName, framework.LoadBalancerCreateTimeoutDefault) + svc = jig.WaitForLoadBalancerOrFail(ns, serviceName, e2eservice.LoadBalancerCreateTimeoutDefault) jig.SanityCheckService(svc, v1.ServiceTypeLoadBalancer) defer func() { podNodePairs, err := e2enode.PodNodePairs(cs, ns) e2elog.Logf("[pod,node] pairs: %+v; err: %v", podNodePairs, err) - framework.StopServeHostnameService(cs, ns, serviceName) + e2eservice.StopServeHostnameService(cs, ns, serviceName) lb := cloudprovider.DefaultLoadBalancerName(svc) e2elog.Logf("cleaning load balancer resource for %s", lb) - framework.CleanupServiceResources(cs, lb, framework.TestContext.CloudConfig.Region, framework.TestContext.CloudConfig.Zone) + e2eservice.CleanupServiceResources(cs, lb, framework.TestContext.CloudConfig.Region, framework.TestContext.CloudConfig.Zone) }() - ingressIP := framework.GetIngressPoint(&svc.Status.LoadBalancer.Ingress[0]) + ingressIP := e2eservice.GetIngressPoint(&svc.Status.LoadBalancer.Ingress[0]) port := int(svc.Spec.Ports[0].Port) if !isTransitionTest { - gomega.Expect(framework.CheckAffinity(jig, nil, ingressIP, port, true)).To(gomega.BeTrue()) + gomega.Expect(jig.CheckAffinity(nil, ingressIP, port, true)).To(gomega.BeTrue()) } if isTransitionTest { svc = jig.UpdateServiceOrFail(svc.Namespace, svc.Name, func(svc *v1.Service) { svc.Spec.SessionAffinity = v1.ServiceAffinityNone }) - gomega.Expect(framework.CheckAffinity(jig, nil, ingressIP, port, false)).To(gomega.BeTrue()) + gomega.Expect(jig.CheckAffinity(nil, ingressIP, port, false)).To(gomega.BeTrue()) svc = jig.UpdateServiceOrFail(svc.Namespace, svc.Name, func(svc *v1.Service) { svc.Spec.SessionAffinity = v1.ServiceAffinityClientIP }) - gomega.Expect(framework.CheckAffinity(jig, nil, ingressIP, port, true)).To(gomega.BeTrue()) + gomega.Expect(jig.CheckAffinity(nil, ingressIP, port, true)).To(gomega.BeTrue()) } } diff --git a/test/e2e/scheduling/BUILD b/test/e2e/scheduling/BUILD index 38cac879d4c..7e211943992 100644 --- a/test/e2e/scheduling/BUILD +++ b/test/e2e/scheduling/BUILD @@ -52,6 +52,7 @@ go_library( "//test/e2e/framework/pod:go_default_library", "//test/e2e/framework/providers/gce:go_default_library", "//test/e2e/framework/replicaset:go_default_library", + "//test/e2e/framework/service:go_default_library", "//test/utils:go_default_library", "//test/utils/image:go_default_library", "//vendor/github.com/onsi/ginkgo:go_default_library", diff --git a/test/e2e/scheduling/limit_range.go b/test/e2e/scheduling/limit_range.go index ba0370366f4..5b8a5b84af1 100644 --- a/test/e2e/scheduling/limit_range.go +++ b/test/e2e/scheduling/limit_range.go @@ -22,7 +22,7 @@ import ( "strconv" "time" - "k8s.io/api/core/v1" + v1 "k8s.io/api/core/v1" "k8s.io/apimachinery/pkg/api/resource" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/labels" @@ -34,6 +34,7 @@ import ( watchtools "k8s.io/client-go/tools/watch" "k8s.io/kubernetes/test/e2e/framework" e2elog "k8s.io/kubernetes/test/e2e/framework/log" + e2eservice "k8s.io/kubernetes/test/e2e/framework/service" "github.com/onsi/ginkgo" "github.com/onsi/gomega" @@ -113,10 +114,10 @@ var _ = SIGDescribe("LimitRange", func() { if event.Type != watch.Added { e2elog.Failf("Failed to observe limitRange creation : %v", event) } - case <-time.After(framework.ServiceRespondingTimeout): + case <-time.After(e2eservice.RespondingTimeout): e2elog.Failf("Timeout while waiting for LimitRange creation") } - case <-time.After(framework.ServiceRespondingTimeout): + case <-time.After(e2eservice.RespondingTimeout): e2elog.Failf("Timeout while waiting for LimitRange list complete") } @@ -205,7 +206,7 @@ var _ = SIGDescribe("LimitRange", func() { framework.ExpectNoError(err) ginkgo.By("Verifying the LimitRange was deleted") - gomega.Expect(wait.Poll(time.Second*5, framework.ServiceRespondingTimeout, func() (bool, error) { + gomega.Expect(wait.Poll(time.Second*5, e2eservice.RespondingTimeout, func() (bool, error) { selector := labels.SelectorFromSet(labels.Set(map[string]string{"name": limitRange.Name})) options := metav1.ListOptions{LabelSelector: selector.String()} limitRanges, err := f.ClientSet.CoreV1().LimitRanges(f.Namespace.Name).List(options) diff --git a/test/e2e/ui/BUILD b/test/e2e/ui/BUILD index a953385001b..7dc395b8017 100644 --- a/test/e2e/ui/BUILD +++ b/test/e2e/ui/BUILD @@ -15,6 +15,7 @@ go_library( "//staging/src/k8s.io/apimachinery/pkg/util/wait:go_default_library", "//test/e2e/framework:go_default_library", "//test/e2e/framework/log:go_default_library", + "//test/e2e/framework/service:go_default_library", "//test/utils:go_default_library", "//vendor/github.com/onsi/ginkgo:go_default_library", ], diff --git a/test/e2e/ui/dashboard.go b/test/e2e/ui/dashboard.go index 4de47f76040..dd18b7d0dbe 100644 --- a/test/e2e/ui/dashboard.go +++ b/test/e2e/ui/dashboard.go @@ -27,6 +27,7 @@ import ( "k8s.io/apimachinery/pkg/util/wait" "k8s.io/kubernetes/test/e2e/framework" e2elog "k8s.io/kubernetes/test/e2e/framework/log" + e2eservice "k8s.io/kubernetes/test/e2e/framework/service" testutils "k8s.io/kubernetes/test/utils" "github.com/onsi/ginkgo" @@ -62,7 +63,7 @@ var _ = SIGDescribe("Kubernetes Dashboard [Feature:Dashboard]", func() { ginkgo.By("Checking to make sure we get a response from the kubernetes-dashboard.") err = wait.Poll(framework.Poll, serverStartTimeout, func() (bool, error) { var status int - proxyRequest, errProxy := framework.GetServicesProxyRequest(f.ClientSet, f.ClientSet.CoreV1().RESTClient().Get()) + proxyRequest, errProxy := e2eservice.GetServicesProxyRequest(f.ClientSet, f.ClientSet.CoreV1().RESTClient().Get()) if errProxy != nil { e2elog.Logf("Get services proxy request failed: %v", errProxy) } diff --git a/test/e2e/upgrades/BUILD b/test/e2e/upgrades/BUILD index 9aafc33a606..6a4e11d7165 100644 --- a/test/e2e/upgrades/BUILD +++ b/test/e2e/upgrades/BUILD @@ -38,6 +38,7 @@ go_library( "//test/e2e/framework:go_default_library", "//test/e2e/framework/job:go_default_library", "//test/e2e/framework/log:go_default_library", + "//test/e2e/framework/service:go_default_library", "//test/e2e/framework/statefulset:go_default_library", "//test/e2e/framework/testfiles:go_default_library", "//test/e2e/scheduling:go_default_library", diff --git a/test/e2e/upgrades/services.go b/test/e2e/upgrades/services.go index d878f950083..6f45fbd4662 100644 --- a/test/e2e/upgrades/services.go +++ b/test/e2e/upgrades/services.go @@ -20,6 +20,7 @@ import ( v1 "k8s.io/api/core/v1" "k8s.io/apimachinery/pkg/util/wait" "k8s.io/kubernetes/test/e2e/framework" + e2eservice "k8s.io/kubernetes/test/e2e/framework/service" "github.com/onsi/ginkgo" ) @@ -28,7 +29,7 @@ import ( // after a cluster upgrade. During a master-only upgrade, it will test // that a service remains available during the upgrade. type ServiceUpgradeTest struct { - jig *framework.ServiceTestJig + jig *e2eservice.TestJig tcpService *v1.Service tcpIngressIP string svcPort int @@ -42,7 +43,7 @@ func shouldTestPDBs() bool { return framework.ProviderIs("gce", "gke") } // Setup creates a service with a load balancer and makes sure it's reachable. func (t *ServiceUpgradeTest) Setup(f *framework.Framework) { serviceName := "service-test" - jig := framework.NewServiceTestJig(f.ClientSet, serviceName) + jig := e2eservice.NewTestJig(f.ClientSet, serviceName) ns := f.Namespace @@ -50,11 +51,11 @@ func (t *ServiceUpgradeTest) Setup(f *framework.Framework) { tcpService := jig.CreateTCPServiceOrFail(ns.Name, func(s *v1.Service) { s.Spec.Type = v1.ServiceTypeLoadBalancer }) - tcpService = jig.WaitForLoadBalancerOrFail(ns.Name, tcpService.Name, framework.LoadBalancerCreateTimeoutDefault) + tcpService = jig.WaitForLoadBalancerOrFail(ns.Name, tcpService.Name, e2eservice.LoadBalancerCreateTimeoutDefault) jig.SanityCheckService(tcpService, v1.ServiceTypeLoadBalancer) // Get info to hit it with - tcpIngressIP := framework.GetIngressPoint(&tcpService.Status.LoadBalancer.Ingress[0]) + tcpIngressIP := e2eservice.GetIngressPoint(&tcpService.Status.LoadBalancer.Ingress[0]) svcPort := int(tcpService.Spec.Ports[0].Port) ginkgo.By("creating pod to be part of service " + serviceName) @@ -67,9 +68,9 @@ func (t *ServiceUpgradeTest) Setup(f *framework.Framework) { // Hit it once before considering ourselves ready ginkgo.By("hitting the pod through the service's LoadBalancer") - timeout := framework.LoadBalancerLagTimeoutDefault + timeout := e2eservice.LoadBalancerLagTimeoutDefault if framework.ProviderIs("aws") { - timeout = framework.LoadBalancerLagTimeoutAWS + timeout = e2eservice.LoadBalancerLagTimeoutAWS } jig.TestReachableHTTP(tcpIngressIP, svcPort, timeout) @@ -102,7 +103,7 @@ func (t *ServiceUpgradeTest) test(f *framework.Framework, done <-chan struct{}, // Continuous validation ginkgo.By("continuously hitting the pod through the service's LoadBalancer") wait.Until(func() { - t.jig.TestReachableHTTP(t.tcpIngressIP, t.svcPort, framework.LoadBalancerLagTimeoutDefault) + t.jig.TestReachableHTTP(t.tcpIngressIP, t.svcPort, e2eservice.LoadBalancerLagTimeoutDefault) }, framework.Poll, done) } else { // Block until upgrade is done @@ -112,6 +113,6 @@ func (t *ServiceUpgradeTest) test(f *framework.Framework, done <-chan struct{}, // Sanity check and hit it once more ginkgo.By("hitting the pod through the service's LoadBalancer") - t.jig.TestReachableHTTP(t.tcpIngressIP, t.svcPort, framework.LoadBalancerLagTimeoutDefault) + t.jig.TestReachableHTTP(t.tcpIngressIP, t.svcPort, e2eservice.LoadBalancerLagTimeoutDefault) t.jig.SanityCheckService(t.tcpService, v1.ServiceTypeLoadBalancer) } diff --git a/test/e2e/windows/BUILD b/test/e2e/windows/BUILD index 6ca4c612784..003cc92b3ea 100644 --- a/test/e2e/windows/BUILD +++ b/test/e2e/windows/BUILD @@ -35,6 +35,7 @@ go_library( "//test/e2e/framework/metrics:go_default_library", "//test/e2e/framework/node:go_default_library", "//test/e2e/framework/pod:go_default_library", + "//test/e2e/framework/service:go_default_library", "//test/utils/image:go_default_library", "//vendor/github.com/onsi/ginkgo:go_default_library", "//vendor/github.com/onsi/gomega:go_default_library", diff --git a/test/e2e/windows/service.go b/test/e2e/windows/service.go index 8e67dbaeee6..517ef06aaf1 100644 --- a/test/e2e/windows/service.go +++ b/test/e2e/windows/service.go @@ -24,6 +24,7 @@ import ( "k8s.io/kubernetes/test/e2e/framework" e2elog "k8s.io/kubernetes/test/e2e/framework/log" e2enode "k8s.io/kubernetes/test/e2e/framework/node" + e2eservice "k8s.io/kubernetes/test/e2e/framework/service" "github.com/onsi/ginkgo" ) @@ -42,7 +43,7 @@ var _ = SIGDescribe("Services", func() { serviceName := "nodeport-test" ns := f.Namespace.Name - jig := framework.NewServiceTestJig(cs, serviceName) + jig := e2eservice.NewTestJig(cs, serviceName) nodeIP, err := e2enode.PickIP(jig.Client) if err != nil { e2elog.Logf("Unexpected error occurred: %v", err) @@ -51,11 +52,11 @@ var _ = SIGDescribe("Services", func() { framework.ExpectNoErrorWithOffset(0, err) ginkgo.By("creating service " + serviceName + " with type=NodePort in namespace " + ns) - service := jig.CreateTCPServiceOrFail(ns, func(svc *v1.Service) { + e2eservice := jig.CreateTCPServiceOrFail(ns, func(svc *v1.Service) { svc.Spec.Type = v1.ServiceTypeNodePort }) - jig.SanityCheckService(service, v1.ServiceTypeNodePort) - nodePort := int(service.Spec.Ports[0].NodePort) + jig.SanityCheckService(e2eservice, v1.ServiceTypeNodePort) + nodePort := int(e2eservice.Spec.Ports[0].NodePort) ginkgo.By("creating Pod to be part of service " + serviceName) jig.RunOrFail(ns, nil) diff --git a/test/e2e_node/device_plugin.go b/test/e2e_node/device_plugin.go index 2fd6cba712a..795142d5b21 100644 --- a/test/e2e_node/device_plugin.go +++ b/test/e2e_node/device_plugin.go @@ -34,7 +34,7 @@ import ( e2enode "k8s.io/kubernetes/test/e2e/framework/node" e2epod "k8s.io/kubernetes/test/e2e/framework/pod" - resapi "k8s.io/kubernetes/pkg/kubelet/apis/podresources/v1alpha1" + kubeletpodresourcesv1alpha1 "k8s.io/kubernetes/pkg/kubelet/apis/podresources/v1alpha1" . "github.com/onsi/ginkgo" . "github.com/onsi/gomega" @@ -102,7 +102,7 @@ func testDevicePlugin(f *framework.Framework, pluginSockDir string) { Expect(devId1).To(Not(Equal(""))) podResources, err := getNodeDevices() - var resourcesForOurPod *resapi.PodResources + var resourcesForOurPod *kubeletpodresourcesv1alpha1.PodResources e2elog.Logf("pod resources %v", podResources) Expect(err).To(BeNil()) Expect(len(podResources.PodResources)).To(Equal(2)) diff --git a/test/soak/serve_hostnames/BUILD b/test/soak/serve_hostnames/BUILD index 8fb14520d90..41652e1f2f5 100644 --- a/test/soak/serve_hostnames/BUILD +++ b/test/soak/serve_hostnames/BUILD @@ -26,6 +26,7 @@ go_library( "//staging/src/k8s.io/client-go/rest:go_default_library", "//staging/src/k8s.io/client-go/tools/clientcmd:go_default_library", "//test/e2e/framework:go_default_library", + "//test/e2e/framework/service:go_default_library", "//vendor/k8s.io/klog:go_default_library", ], ) diff --git a/test/soak/serve_hostnames/serve_hostnames.go b/test/soak/serve_hostnames/serve_hostnames.go index c3ee9dbc360..0f9c8d16b7d 100644 --- a/test/soak/serve_hostnames/serve_hostnames.go +++ b/test/soak/serve_hostnames/serve_hostnames.go @@ -29,7 +29,7 @@ import ( "path/filepath" "time" - "k8s.io/api/core/v1" + v1 "k8s.io/api/core/v1" "k8s.io/apimachinery/pkg/api/errors" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/runtime" @@ -39,6 +39,7 @@ import ( "k8s.io/client-go/tools/clientcmd" "k8s.io/kubernetes/pkg/api/legacyscheme" e2e "k8s.io/kubernetes/test/e2e/framework" + "k8s.io/kubernetes/test/e2e/framework/service" "k8s.io/klog" ) @@ -261,7 +262,7 @@ func main() { klog.Warningf("Failed to build restclient: %v", err) return } - proxyRequest, errProxy := e2e.GetServicesProxyRequest(client, rclient.Get()) + proxyRequest, errProxy := service.GetServicesProxyRequest(client, rclient.Get()) if errProxy != nil { klog.Warningf("Get services proxy request failed: %v", errProxy) return