From e5944f56dc932da4329f6f0bd91deb70e26ce4a5 Mon Sep 17 00:00:00 2001 From: Zihong Zheng Date: Thu, 29 Dec 2016 15:35:47 -0800 Subject: [PATCH] Moves e2e service util functions into service_util.go and cleans up test codes --- test/e2e/BUILD | 1 + test/e2e/cluster_upgrade.go | 12 +- test/e2e/daemon_restart.go | 2 +- test/e2e/disruption.go | 7 +- test/e2e/dns.go | 4 +- test/e2e/federated-ingress.go | 6 +- test/e2e/firewall.go | 39 +- test/e2e/framework/BUILD | 3 + test/e2e/framework/firewall_util.go | 9 + test/e2e/framework/networking_utils.go | 240 +++ test/e2e/framework/service_util.go | 1202 +++++++++++++++ test/e2e/framework/util.go | 122 ++ test/e2e/ingress.go | 12 +- test/e2e/ingress_utils.go | 14 +- test/e2e/kube_proxy.go | 2 +- test/e2e/kubectl.go | 2 +- test/e2e/resize_nodes.go | 49 +- test/e2e/service.go | 1866 +++--------------------- 18 files changed, 1812 insertions(+), 1780 deletions(-) create mode 100644 test/e2e/framework/service_util.go diff --git a/test/e2e/BUILD b/test/e2e/BUILD index 8c09375077a..92a8a2f4691 100644 --- a/test/e2e/BUILD +++ b/test/e2e/BUILD @@ -142,6 +142,7 @@ go_library( "//pkg/client/transport:go_default_library", "//pkg/client/unversioned/clientcmd:go_default_library", "//pkg/client/unversioned/clientcmd/api:go_default_library", + "//pkg/cloudprovider:go_default_library", "//pkg/cloudprovider/providers/aws:go_default_library", "//pkg/cloudprovider/providers/gce:go_default_library", "//pkg/controller:go_default_library", diff --git a/test/e2e/cluster_upgrade.go b/test/e2e/cluster_upgrade.go index ce74d3ac63c..ab2f66b96bf 100644 --- a/test/e2e/cluster_upgrade.go +++ b/test/e2e/cluster_upgrade.go @@ -146,8 +146,8 @@ func testService(f *framework.Framework, sem *chaosmonkey.Semaphore, testDuringD // Setup serviceName := "service-test" - jig := NewServiceTestJig(f.ClientSet, serviceName) - // nodeIP := pickNodeIP(jig.Client) // for later + jig := framework.NewServiceTestJig(f.ClientSet, serviceName) + // nodeIP := framework.PickNodeIP(jig.Client) // for later By("creating a TCP service " + serviceName + " with type=LoadBalancer in namespace " + f.Namespace.Name) // TODO it's weird that we have to do this and then wait WaitForLoadBalancer which changes @@ -155,11 +155,11 @@ func testService(f *framework.Framework, sem *chaosmonkey.Semaphore, testDuringD tcpService := jig.CreateTCPServiceOrFail(f.Namespace.Name, func(s *v1.Service) { s.Spec.Type = v1.ServiceTypeLoadBalancer }) - tcpService = jig.WaitForLoadBalancerOrFail(f.Namespace.Name, tcpService.Name, loadBalancerCreateTimeoutDefault) + tcpService = jig.WaitForLoadBalancerOrFail(f.Namespace.Name, tcpService.Name, framework.LoadBalancerCreateTimeoutDefault) jig.SanityCheckService(tcpService, v1.ServiceTypeLoadBalancer) // Get info to hit it with - tcpIngressIP := getIngressPoint(&tcpService.Status.LoadBalancer.Ingress[0]) + tcpIngressIP := framework.GetIngressPoint(&tcpService.Status.LoadBalancer.Ingress[0]) svcPort := int(tcpService.Spec.Ports[0].Port) By("creating pod to be part of service " + serviceName) @@ -169,7 +169,7 @@ func testService(f *framework.Framework, sem *chaosmonkey.Semaphore, testDuringD // Hit it once before considering ourselves ready By("hitting the pod through the service's LoadBalancer") - jig.TestReachableHTTP(tcpIngressIP, svcPort, loadBalancerLagTimeoutDefault) + jig.TestReachableHTTP(tcpIngressIP, svcPort, framework.LoadBalancerLagTimeoutDefault) sem.Ready() @@ -187,7 +187,7 @@ func testService(f *framework.Framework, sem *chaosmonkey.Semaphore, testDuringD // Sanity check and hit it once more By("hitting the pod through the service's LoadBalancer") - jig.TestReachableHTTP(tcpIngressIP, svcPort, loadBalancerLagTimeoutDefault) + jig.TestReachableHTTP(tcpIngressIP, svcPort, framework.LoadBalancerLagTimeoutDefault) jig.SanityCheckService(tcpService, v1.ServiceTypeLoadBalancer) } diff --git a/test/e2e/daemon_restart.go b/test/e2e/daemon_restart.go index 7dad612de56..54183621745 100644 --- a/test/e2e/daemon_restart.go +++ b/test/e2e/daemon_restart.go @@ -302,7 +302,7 @@ var _ = framework.KubeDescribe("DaemonRestart [Disruptive]", func() { It("Kubelet should not restart containers across restart", func() { - nodeIPs, err := getNodePublicIps(f.ClientSet) + nodeIPs, err := framework.GetNodePublicIps(f.ClientSet) framework.ExpectNoError(err) preRestarts, badNodes := getContainerRestarts(f.ClientSet, ns, labelSelector) if preRestarts != 0 { diff --git a/test/e2e/disruption.go b/test/e2e/disruption.go index a0c2e619288..3a5ed3a55dd 100644 --- a/test/e2e/disruption.go +++ b/test/e2e/disruption.go @@ -35,8 +35,11 @@ import ( // schedulingTimeout is longer specifically because sometimes we need to wait // awhile to guarantee that we've been patient waiting for something ordinary // to happen: a pod to get scheduled and move into Ready -const schedulingTimeout = 10 * time.Minute -const bigClusterSize = 7 +const ( + bigClusterSize = 7 + schedulingTimeout = 10 * time.Minute + timeout = 60 * time.Second +) var _ = framework.KubeDescribe("DisruptionController", func() { f := framework.NewDefaultFramework("disruption") diff --git a/test/e2e/dns.go b/test/e2e/dns.go index cab2c3d7b15..4c7a97606ef 100644 --- a/test/e2e/dns.go +++ b/test/e2e/dns.go @@ -464,7 +464,7 @@ var _ = framework.KubeDescribe("DNS", func() { // Test changing the externalName field By("changing the externalName to bar.example.com") - _, err = updateService(f.ClientSet, f.Namespace.Name, serviceName, func(s *v1.Service) { + _, err = framework.UpdateService(f.ClientSet, f.Namespace.Name, serviceName, func(s *v1.Service) { s.Spec.ExternalName = "bar.example.com" }) Expect(err).NotTo(HaveOccurred()) @@ -481,7 +481,7 @@ var _ = framework.KubeDescribe("DNS", func() { // Test changing type from ExternalName to ClusterIP By("changing the service to type=ClusterIP") - _, err = updateService(f.ClientSet, f.Namespace.Name, serviceName, func(s *v1.Service) { + _, err = framework.UpdateService(f.ClientSet, f.Namespace.Name, serviceName, func(s *v1.Service) { s.Spec.Type = v1.ServiceTypeClusterIP s.Spec.ClusterIP = "127.1.2.3" s.Spec.Ports = []v1.ServicePort{ diff --git a/test/e2e/federated-ingress.go b/test/e2e/federated-ingress.go index fb5caca70bf..4d0a432d062 100644 --- a/test/e2e/federated-ingress.go +++ b/test/e2e/federated-ingress.go @@ -408,9 +408,9 @@ func updateIngressOrFail(clientset *fedclientset.Clientset, namespace string) (n func (j *federationTestJig) waitForFederatedIngress() { // Wait for the loadbalancer IP. - address, err := waitForFederatedIngressAddress(j.client, j.ing.Namespace, j.ing.Name, lbPollTimeout) + address, err := waitForFederatedIngressAddress(j.client, j.ing.Namespace, j.ing.Name, framework.LoadBalancerPollTimeout) if err != nil { - framework.Failf("Ingress failed to acquire an IP address within %v", lbPollTimeout) + framework.Failf("Ingress failed to acquire an IP address within %v", framework.LoadBalancerPollTimeout) } j.address = address framework.Logf("Found address %v for ingress %v", j.address, j.ing.Name) @@ -422,7 +422,7 @@ func (j *federationTestJig) waitForFederatedIngress() { for _, p := range rules.IngressRuleValue.HTTP.Paths { route := fmt.Sprintf("%v://%v%v", proto, address, p.Path) framework.Logf("Testing route %v host %v with simple GET", route, rules.Host) - framework.ExpectNoError(pollURL(route, rules.Host, lbPollTimeout, lbPollInterval, timeoutClient, false)) + framework.ExpectNoError(pollURL(route, rules.Host, framework.LoadBalancerPollTimeout, framework.LoadBalancerPollInterval, timeoutClient, false)) } } } diff --git a/test/e2e/firewall.go b/test/e2e/firewall.go index c2850b0883a..73089d52690 100644 --- a/test/e2e/firewall.go +++ b/test/e2e/firewall.go @@ -18,7 +18,6 @@ package e2e import ( "fmt" - "time" "k8s.io/kubernetes/pkg/api/v1" clientset "k8s.io/kubernetes/pkg/client/clientset_generated/clientset" @@ -31,14 +30,6 @@ import ( . "github.com/onsi/gomega" ) -const ( - firewallTimeoutDefault = 3 * time.Minute - firewallTestTcpTimeout = time.Duration(1 * time.Second) - // Set ports outside of 30000-32767, 80 and 8080 to avoid being whitelisted by the e2e cluster - firewallTestHttpPort = int32(29999) - firewallTestUdpPort = int32(29998) -) - var _ = framework.KubeDescribe("Firewall rule", func() { var firewall_test_name = "firewall-test" f := framework.NewDefaultFramework(firewall_test_name) @@ -61,8 +52,8 @@ var _ = framework.KubeDescribe("Firewall rule", func() { firewallTestSourceRanges := []string{"0.0.0.0/1", "128.0.0.0/1"} serviceName := "firewall-test-loadbalancer" - jig := NewServiceTestJig(cs, serviceName) - nodesNames := jig.GetNodesNames(maxNodesForEndpointsTests) + jig := framework.NewServiceTestJig(cs, serviceName) + nodesNames := jig.GetNodesNames(framework.MaxNodesForEndpointsTests) if len(nodesNames) <= 0 { framework.Failf("Expect at least 1 node, got: %v", nodesNames) } @@ -70,9 +61,9 @@ var _ = framework.KubeDescribe("Firewall rule", func() { // OnlyLocal service is needed to examine which exact nodes the requests are being forwarded to by the Load Balancer on GCE By("Creating a LoadBalancer type service with onlyLocal annotation") - svc := jig.createOnlyLocalLoadBalancerService(ns, serviceName, - loadBalancerCreateTimeoutDefault, false, func(svc *v1.Service) { - svc.Spec.Ports = []v1.ServicePort{{Protocol: "TCP", Port: firewallTestHttpPort}} + svc := jig.CreateOnlyLocalLoadBalancerService(ns, serviceName, + framework.LoadBalancerCreateTimeoutDefault, false, func(svc *v1.Service) { + svc.Spec.Ports = []v1.ServicePort{{Protocol: "TCP", Port: framework.FirewallTestHttpPort}} svc.Spec.LoadBalancerSourceRanges = firewallTestSourceRanges }) defer func() { @@ -91,10 +82,10 @@ var _ = framework.KubeDescribe("Firewall rule", func() { Expect(err).NotTo(HaveOccurred()) Expect(framework.VerifyFirewallRule(fw, expFw, cloudConfig.Network, false)).NotTo(HaveOccurred()) - By(fmt.Sprintf("Creating netexec pods on at most %v nodes", maxNodesForEndpointsTests)) + By(fmt.Sprintf("Creating netexec pods on at most %v nodes", framework.MaxNodesForEndpointsTests)) for i, nodeName := range nodesNames { podName := fmt.Sprintf("netexec%v", i) - jig.LaunchNetexecPodOnNode(f, nodeName, podName, firewallTestHttpPort, firewallTestUdpPort, true) + jig.LaunchNetexecPodOnNode(f, nodeName, podName, framework.FirewallTestHttpPort, framework.FirewallTestUdpPort, true) defer func() { framework.Logf("Cleaning up the netexec pod: %v", podName) Expect(cs.Core().Pods(ns).Delete(podName, nil)).NotTo(HaveOccurred()) @@ -103,7 +94,7 @@ var _ = framework.KubeDescribe("Firewall rule", func() { // Send requests from outside of the cluster because internal traffic is whitelisted By("Accessing the external service ip from outside, all non-master nodes should be reached") - Expect(testHitNodesFromOutside(svcExternalIP, firewallTestHttpPort, firewallTimeoutDefault, nodesSet)).NotTo(HaveOccurred()) + Expect(framework.TestHitNodesFromOutside(svcExternalIP, framework.FirewallTestHttpPort, framework.FirewallTimeoutDefault, nodesSet)).NotTo(HaveOccurred()) // Check if there are overlapping tags on the firewall that extend beyond just the vms in our cluster // by removing the tag on one vm and make sure it doesn't get any traffic. This is an imperfect @@ -117,11 +108,11 @@ var _ = framework.KubeDescribe("Firewall rule", func() { nodesSet.Insert(nodesNames[0]) framework.SetInstanceTags(cloudConfig, nodesNames[0], removedTags) // Make sure traffic is recovered before exit - Expect(testHitNodesFromOutside(svcExternalIP, firewallTestHttpPort, firewallTimeoutDefault, nodesSet)).NotTo(HaveOccurred()) + Expect(framework.TestHitNodesFromOutside(svcExternalIP, framework.FirewallTestHttpPort, framework.FirewallTimeoutDefault, nodesSet)).NotTo(HaveOccurred()) }() By("Accessing serivce through the external ip and examine got no response from the node without tags") - Expect(testHitNodesFromOutsideWithCount(svcExternalIP, firewallTestHttpPort, firewallTimeoutDefault, nodesSet, 15)).NotTo(HaveOccurred()) + Expect(framework.TestHitNodesFromOutsideWithCount(svcExternalIP, framework.FirewallTestHttpPort, framework.FirewallTimeoutDefault, nodesSet, 15)).NotTo(HaveOccurred()) }) It("should have correct firewall rules for e2e cluster", func() { @@ -147,15 +138,15 @@ var _ = framework.KubeDescribe("Firewall rule", func() { nodeAddrs := framework.NodeAddresses(nodes, v1.NodeExternalIP) Expect(len(nodeAddrs)).NotTo(BeZero()) masterAddr := framework.GetMasterAddress(cs) - flag, _ := testNotReachableHTTPTimeout(masterAddr, ports.ControllerManagerPort, firewallTestTcpTimeout) + flag, _ := framework.TestNotReachableHTTPTimeout(masterAddr, ports.ControllerManagerPort, framework.FirewallTestTcpTimeout) Expect(flag).To(BeTrue()) - flag, _ = testNotReachableHTTPTimeout(masterAddr, ports.SchedulerPort, firewallTestTcpTimeout) + flag, _ = framework.TestNotReachableHTTPTimeout(masterAddr, ports.SchedulerPort, framework.FirewallTestTcpTimeout) Expect(flag).To(BeTrue()) - flag, _ = testNotReachableHTTPTimeout(nodeAddrs[0], ports.KubeletPort, firewallTestTcpTimeout) + flag, _ = framework.TestNotReachableHTTPTimeout(nodeAddrs[0], ports.KubeletPort, framework.FirewallTestTcpTimeout) Expect(flag).To(BeTrue()) - flag, _ = testNotReachableHTTPTimeout(nodeAddrs[0], ports.KubeletReadOnlyPort, firewallTestTcpTimeout) + flag, _ = framework.TestNotReachableHTTPTimeout(nodeAddrs[0], ports.KubeletReadOnlyPort, framework.FirewallTestTcpTimeout) Expect(flag).To(BeTrue()) - flag, _ = testNotReachableHTTPTimeout(nodeAddrs[0], ports.ProxyStatusPort, firewallTestTcpTimeout) + flag, _ = framework.TestNotReachableHTTPTimeout(nodeAddrs[0], ports.ProxyStatusPort, framework.FirewallTestTcpTimeout) Expect(flag).To(BeTrue()) }) }) diff --git a/test/e2e/framework/BUILD b/test/e2e/framework/BUILD index cf56e10511e..f30e2ab9f05 100644 --- a/test/e2e/framework/BUILD +++ b/test/e2e/framework/BUILD @@ -26,6 +26,7 @@ go_library( "perf_util.go", "pods.go", "resource_usage_gatherer.go", + "service_util.go", "test_context.go", "util.go", ], @@ -36,6 +37,7 @@ go_library( "//pkg/api:go_default_library", "//pkg/api/errors:go_default_library", "//pkg/api/v1:go_default_library", + "//pkg/api/v1/service:go_default_library", "//pkg/api/validation:go_default_library", "//pkg/apimachinery/registered:go_default_library", "//pkg/apis/apps/v1beta1:go_default_library", @@ -82,6 +84,7 @@ go_library( "//pkg/util/exec:go_default_library", "//pkg/util/intstr:go_default_library", "//pkg/util/labels:go_default_library", + "//pkg/util/net:go_default_library", "//pkg/util/rand:go_default_library", "//pkg/util/runtime:go_default_library", "//pkg/util/sets:go_default_library", diff --git a/test/e2e/framework/firewall_util.go b/test/e2e/framework/firewall_util.go index 059e0fa077e..8241b7d4b18 100644 --- a/test/e2e/framework/firewall_util.go +++ b/test/e2e/framework/firewall_util.go @@ -20,6 +20,7 @@ import ( "fmt" "strconv" "strings" + "time" "k8s.io/kubernetes/pkg/api/v1" clientset "k8s.io/kubernetes/pkg/client/clientset_generated/clientset" @@ -31,6 +32,14 @@ import ( compute "google.golang.org/api/compute/v1" ) +const ( + FirewallTimeoutDefault = 3 * time.Minute + FirewallTestTcpTimeout = time.Duration(1 * time.Second) + // Set ports outside of 30000-32767, 80 and 8080 to avoid being whitelisted by the e2e cluster + FirewallTestHttpPort = int32(29999) + FirewallTestUdpPort = int32(29998) +) + // MakeFirewallNameForLBService return the expected firewall name for a LB service. // This should match the formatting of makeFirewallName() in pkg/cloudprovider/providers/gce/gce.go func MakeFirewallNameForLBService(name string) string { diff --git a/test/e2e/framework/networking_utils.go b/test/e2e/framework/networking_utils.go index 0b05192d478..c737a5885cd 100644 --- a/test/e2e/framework/networking_utils.go +++ b/test/e2e/framework/networking_utils.go @@ -17,8 +17,13 @@ limitations under the License. package framework import ( + "bytes" "encoding/json" "fmt" + "io/ioutil" + "net" + "net/http" + "strconv" "strings" "time" @@ -30,6 +35,7 @@ import ( coreclientset "k8s.io/kubernetes/pkg/client/clientset_generated/clientset/typed/core/v1" "k8s.io/kubernetes/pkg/labels" "k8s.io/kubernetes/pkg/util/intstr" + utilnet "k8s.io/kubernetes/pkg/util/net" "k8s.io/kubernetes/pkg/util/rand" "k8s.io/kubernetes/pkg/util/sets" "k8s.io/kubernetes/pkg/util/uuid" @@ -586,3 +592,237 @@ func (config *NetworkingTestConfig) getServiceClient() coreclientset.ServiceInte func (config *NetworkingTestConfig) getNamespacesClient() coreclientset.NamespaceInterface { return config.f.ClientSet.Core().Namespaces() } + +func CheckReachabilityFromPod(expectToBeReachable bool, namespace, pod, target string) { + cmd := fmt.Sprintf("wget -T 5 -qO- %q", target) + err := wait.PollImmediate(Poll, 2*time.Minute, func() (bool, error) { + _, err := RunHostCmd(namespace, pod, cmd) + if expectToBeReachable && err != nil { + Logf("Expect target to be reachable. But got err: %v. Retry until timeout", err) + return false, nil + } + + if !expectToBeReachable && err == nil { + Logf("Expect target NOT to be reachable. But it is reachable. Retry until timeout") + return false, nil + } + return true, nil + }) + Expect(err).NotTo(HaveOccurred()) +} + +// Does an HTTP GET, but does not reuse TCP connections +// This masks problems where the iptables rule has changed, but we don't see it +// This is intended for relatively quick requests (status checks), so we set a short (5 seconds) timeout +func httpGetNoConnectionPool(url string) (*http.Response, error) { + return httpGetNoConnectionPoolTimeout(url, 5*time.Second) +} + +func httpGetNoConnectionPoolTimeout(url string, timeout time.Duration) (*http.Response, error) { + tr := utilnet.SetTransportDefaults(&http.Transport{ + DisableKeepAlives: true, + }) + client := &http.Client{ + Transport: tr, + Timeout: timeout, + } + + return client.Get(url) +} + +func TestReachableHTTP(ip string, port int, request string, expect string) (bool, error) { + return TestReachableHTTPWithContent(ip, port, request, expect, nil) +} + +func TestReachableHTTPWithContent(ip string, port int, request string, expect string, content *bytes.Buffer) (bool, error) { + return TestReachableHTTPWithContentTimeout(ip, port, request, expect, content, 5*time.Second) +} + +func TestReachableHTTPWithContentTimeout(ip string, port int, request string, expect string, content *bytes.Buffer, timeout time.Duration) (bool, error) { + url := fmt.Sprintf("http://%s:%d%s", ip, port, request) + if ip == "" { + Failf("Got empty IP for reachability check (%s)", url) + return false, nil + } + if port == 0 { + Failf("Got port==0 for reachability check (%s)", url) + return false, nil + } + + Logf("Testing HTTP reachability of %v", url) + + resp, err := httpGetNoConnectionPoolTimeout(url, timeout) + if err != nil { + Logf("Got error testing for reachability of %s: %v", url, err) + return false, nil + } + defer resp.Body.Close() + body, err := ioutil.ReadAll(resp.Body) + if err != nil { + Logf("Got error reading response from %s: %v", url, err) + return false, nil + } + if resp.StatusCode != 200 { + return false, fmt.Errorf("received non-success return status %q trying to access %s; got body: %s", + resp.Status, url, string(body)) + } + if !strings.Contains(string(body), expect) { + return false, fmt.Errorf("received response body without expected substring %q: %s", expect, string(body)) + } + if content != nil { + content.Write(body) + } + return true, nil +} + +func TestNotReachableHTTP(ip string, port int) (bool, error) { + return TestNotReachableHTTPTimeout(ip, port, 5*time.Second) +} + +func TestNotReachableHTTPTimeout(ip string, port int, timeout time.Duration) (bool, error) { + url := fmt.Sprintf("http://%s:%d", ip, port) + if ip == "" { + Failf("Got empty IP for non-reachability check (%s)", url) + return false, nil + } + if port == 0 { + Failf("Got port==0 for non-reachability check (%s)", url) + return false, nil + } + + Logf("Testing HTTP non-reachability of %v", url) + + resp, err := httpGetNoConnectionPoolTimeout(url, timeout) + if err != nil { + Logf("Confirmed that %s is not reachable", url) + return true, nil + } + resp.Body.Close() + return false, nil +} + +func TestReachableUDP(ip string, port int, request string, expect string) (bool, error) { + uri := fmt.Sprintf("udp://%s:%d", ip, port) + if ip == "" { + Failf("Got empty IP for reachability check (%s)", uri) + return false, nil + } + if port == 0 { + Failf("Got port==0 for reachability check (%s)", uri) + return false, nil + } + + Logf("Testing UDP reachability of %v", uri) + + con, err := net.Dial("udp", ip+":"+strconv.Itoa(port)) + if err != nil { + return false, fmt.Errorf("Failed to dial %s:%d: %v", ip, port, err) + } + + _, err = con.Write([]byte(fmt.Sprintf("%s\n", request))) + if err != nil { + return false, fmt.Errorf("Failed to send request: %v", err) + } + + var buf []byte = make([]byte, len(expect)+1) + + err = con.SetDeadline(time.Now().Add(3 * time.Second)) + if err != nil { + return false, fmt.Errorf("Failed to set deadline: %v", err) + } + + _, err = con.Read(buf) + if err != nil { + return false, nil + } + + if !strings.Contains(string(buf), expect) { + return false, fmt.Errorf("Failed to retrieve %q, got %q", expect, string(buf)) + } + + Logf("Successfully reached %v", uri) + return true, nil +} + +func TestNotReachableUDP(ip string, port int, request string) (bool, error) { + uri := fmt.Sprintf("udp://%s:%d", ip, port) + if ip == "" { + Failf("Got empty IP for reachability check (%s)", uri) + return false, nil + } + if port == 0 { + Failf("Got port==0 for reachability check (%s)", uri) + return false, nil + } + + Logf("Testing UDP non-reachability of %v", uri) + + con, err := net.Dial("udp", ip+":"+strconv.Itoa(port)) + if err != nil { + Logf("Confirmed that %s is not reachable", uri) + return true, nil + } + + _, err = con.Write([]byte(fmt.Sprintf("%s\n", request))) + if err != nil { + Logf("Confirmed that %s is not reachable", uri) + return true, nil + } + + var buf []byte = make([]byte, 1) + + err = con.SetDeadline(time.Now().Add(3 * time.Second)) + if err != nil { + return false, fmt.Errorf("Failed to set deadline: %v", err) + } + + _, err = con.Read(buf) + if err != nil { + Logf("Confirmed that %s is not reachable", uri) + return true, nil + } + + return false, nil +} + +func TestHitNodesFromOutside(externalIP string, httpPort int32, timeout time.Duration, expectedHosts sets.String) error { + return TestHitNodesFromOutsideWithCount(externalIP, httpPort, timeout, expectedHosts, 1) +} + +func TestHitNodesFromOutsideWithCount(externalIP string, httpPort int32, timeout time.Duration, expectedHosts sets.String, + countToSucceed int) error { + Logf("Waiting up to %v for satisfying expectedHosts for %v times", timeout, countToSucceed) + hittedHosts := sets.NewString() + count := 0 + condition := func() (bool, error) { + var respBody bytes.Buffer + reached, err := TestReachableHTTPWithContentTimeout(externalIP, int(httpPort), "/hostname", "", &respBody, + 1*time.Second) + if err != nil || !reached { + return false, nil + } + hittedHost := strings.TrimSpace(respBody.String()) + if !expectedHosts.Has(hittedHost) { + Logf("Error hitting unexpected host: %v, reset counter: %v", hittedHost, count) + count = 0 + return false, nil + } + if !hittedHosts.Has(hittedHost) { + hittedHosts.Insert(hittedHost) + Logf("Missing %+v, got %+v", expectedHosts.Difference(hittedHosts), hittedHosts) + } + if hittedHosts.Equal(expectedHosts) { + count++ + if count >= countToSucceed { + return true, nil + } + } + return false, nil + } + + if err := wait.Poll(time.Second, timeout, condition); err != nil { + return fmt.Errorf("error waiting for expectedHosts: %v, hittedHosts: %v, count: %v, expected count: %v", + expectedHosts, hittedHosts, count, countToSucceed) + } + return nil +} diff --git a/test/e2e/framework/service_util.go b/test/e2e/framework/service_util.go new file mode 100644 index 00000000000..5d0a423fcf8 --- /dev/null +++ b/test/e2e/framework/service_util.go @@ -0,0 +1,1202 @@ +/* +Copyright 2016 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package framework + +import ( + "bytes" + "fmt" + "sort" + "strconv" + "strings" + "time" + + "k8s.io/kubernetes/pkg/api/errors" + "k8s.io/kubernetes/pkg/api/v1" + "k8s.io/kubernetes/pkg/api/v1/service" + metav1 "k8s.io/kubernetes/pkg/apis/meta/v1" + clientset "k8s.io/kubernetes/pkg/client/clientset_generated/clientset" + "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset" + "k8s.io/kubernetes/pkg/labels" + "k8s.io/kubernetes/pkg/types" + "k8s.io/kubernetes/pkg/util/intstr" + utilnet "k8s.io/kubernetes/pkg/util/net" + "k8s.io/kubernetes/pkg/util/sets" + "k8s.io/kubernetes/pkg/util/uuid" + "k8s.io/kubernetes/pkg/util/wait" + testutils "k8s.io/kubernetes/test/utils" + + . "github.com/onsi/ginkgo" + . "github.com/onsi/gomega" +) + +const ( + // KubeProxyLagTimeout is the maximum time a kube-proxy daemon on a node is allowed + // to not notice a Service update, such as type=NodePort. + // TODO: This timeout should be O(10s), observed values are O(1m), 5m is very + // liberal. Fix tracked in #20567. + KubeProxyLagTimeout = 5 * time.Minute + + // LoadBalancerLagTimeoutDefault is the maximum time a load balancer is allowed to + // not respond after creation. + LoadBalancerLagTimeoutDefault = 2 * time.Minute + + // LoadBalancerLagTimeoutAWS is the delay between ELB creation and serving traffic + // on AWS. A few minutes is typical, so use 10m. + LoadBalancerLagTimeoutAWS = 10 * time.Minute + + // How long to wait for a load balancer to be created/modified. + //TODO: once support ticket 21807001 is resolved, reduce this timeout back to something reasonable + LoadBalancerCreateTimeoutDefault = 20 * time.Minute + LoadBalancerCreateTimeoutLarge = 2 * time.Hour + + // Time required by the loadbalancer to cleanup, proportional to numApps/Ing. + // Bring the cleanup timeout back down to 5m once b/33588344 is resolved. + LoadBalancerCleanupTimeout = 15 * time.Minute + + // On average it takes ~6 minutes for a single backend to come online in GCE. + LoadBalancerPollTimeout = 15 * time.Minute + LoadBalancerPollInterval = 30 * time.Second + + LargeClusterMinNodesNumber = 100 + + // Don't test with more than 3 nodes. + // Many tests create an endpoint per node, in large clusters, this is + // resource and time intensive. + MaxNodesForEndpointsTests = 3 + + // ServiceTestTimeout is used for most polling/waiting activities + ServiceTestTimeout = 60 * time.Second +) + +// This should match whatever the default/configured range is +var ServiceNodePortRange = utilnet.PortRange{Base: 30000, Size: 2768} + +// A test jig to help service testing. +type ServiceTestJig struct { + ID string + Name string + Client clientset.Interface + Labels map[string]string +} + +// NewServiceTestJig allocates and inits a new ServiceTestJig. +func NewServiceTestJig(client clientset.Interface, name string) *ServiceTestJig { + j := &ServiceTestJig{} + j.Client = client + j.Name = name + j.ID = j.Name + "-" + string(uuid.NewUUID()) + j.Labels = map[string]string{"testid": j.ID} + + return j +} + +// newServiceTemplate returns the default v1.Service template for this jig, but +// does not actually create the Service. The default Service has the same name +// as the jig and exposes the given port. +func (j *ServiceTestJig) newServiceTemplate(namespace string, proto v1.Protocol, port int32) *v1.Service { + service := &v1.Service{ + ObjectMeta: v1.ObjectMeta{ + Namespace: namespace, + Name: j.Name, + Labels: j.Labels, + }, + Spec: v1.ServiceSpec{ + Selector: j.Labels, + Ports: []v1.ServicePort{ + { + Protocol: proto, + Port: port, + }, + }, + }, + } + return service +} + +// CreateTCPServiceWithPort creates a new TCP Service with given port based on the +// jig's defaults. Callers can provide a function to tweak the Service object before +// it is created. +func (j *ServiceTestJig) CreateTCPServiceWithPort(namespace string, tweak func(svc *v1.Service), port int32) *v1.Service { + svc := j.newServiceTemplate(namespace, v1.ProtocolTCP, port) + if tweak != nil { + tweak(svc) + } + result, err := j.Client.Core().Services(namespace).Create(svc) + if err != nil { + Failf("Failed to create TCP Service %q: %v", svc.Name, err) + } + return result +} + +// CreateTCPServiceOrFail creates a new TCP Service based on the jig's +// defaults. Callers can provide a function to tweak the Service object before +// it is created. +func (j *ServiceTestJig) CreateTCPServiceOrFail(namespace string, tweak func(svc *v1.Service)) *v1.Service { + svc := j.newServiceTemplate(namespace, v1.ProtocolTCP, 80) + if tweak != nil { + tweak(svc) + } + result, err := j.Client.Core().Services(namespace).Create(svc) + if err != nil { + Failf("Failed to create TCP Service %q: %v", svc.Name, err) + } + return result +} + +// CreateUDPServiceOrFail creates a new UDP Service based on the jig's +// defaults. Callers can provide a function to tweak the Service object before +// it is created. +func (j *ServiceTestJig) CreateUDPServiceOrFail(namespace string, tweak func(svc *v1.Service)) *v1.Service { + svc := j.newServiceTemplate(namespace, v1.ProtocolUDP, 80) + if tweak != nil { + tweak(svc) + } + result, err := j.Client.Core().Services(namespace).Create(svc) + if err != nil { + Failf("Failed to create UDP Service %q: %v", svc.Name, err) + } + return result +} + +func (j *ServiceTestJig) ChangeServiceType(namespace, name string, newType v1.ServiceType, timeout time.Duration) { + ingressIP := "" + svc := j.UpdateServiceOrFail(namespace, name, func(s *v1.Service) { + for _, ing := range s.Status.LoadBalancer.Ingress { + if ing.IP != "" { + ingressIP = ing.IP + } + } + s.Spec.Type = newType + s.Spec.Ports[0].NodePort = 0 + }) + if ingressIP != "" { + j.WaitForLoadBalancerDestroyOrFail(namespace, svc.Name, ingressIP, int(svc.Spec.Ports[0].Port), timeout) + } +} + +// CreateOnlyLocalNodePortService creates a loadbalancer service and sanity checks its +// nodePort. If createPod is true, it also creates an RC with 1 replica of +// the standard netexec container used everywhere in this test. +func (j *ServiceTestJig) CreateOnlyLocalNodePortService(namespace, serviceName string, createPod bool) *v1.Service { + By("creating a service " + namespace + "/" + serviceName + " with type=NodePort and annotation for local-traffic-only") + svc := j.CreateTCPServiceOrFail(namespace, func(svc *v1.Service) { + svc.Spec.Type = v1.ServiceTypeNodePort + svc.ObjectMeta.Annotations = map[string]string{ + service.BetaAnnotationExternalTraffic: service.AnnotationValueExternalTrafficLocal} + svc.Spec.Ports = []v1.ServicePort{{Protocol: "TCP", Port: 80}} + }) + + if createPod { + By("creating a pod to be part of the service " + serviceName) + j.RunOrFail(namespace, nil) + } + j.SanityCheckService(svc, v1.ServiceTypeNodePort) + return svc +} + +// CreateOnlyLocalLoadBalancerService creates a loadbalancer service and waits for it to +// acquire an ingress IP. If createPod is true, it also creates an RC with 1 +// replica of the standard netexec container used everywhere in this test. +func (j *ServiceTestJig) CreateOnlyLocalLoadBalancerService(namespace, serviceName string, timeout time.Duration, createPod bool, + tweak func(svc *v1.Service)) *v1.Service { + By("creating a service " + namespace + "/" + serviceName + " with type=LoadBalancer and annotation for local-traffic-only") + svc := j.CreateTCPServiceOrFail(namespace, func(svc *v1.Service) { + svc.Spec.Type = v1.ServiceTypeLoadBalancer + // We need to turn affinity off for our LB distribution tests + svc.Spec.SessionAffinity = v1.ServiceAffinityNone + svc.ObjectMeta.Annotations = map[string]string{ + service.BetaAnnotationExternalTraffic: service.AnnotationValueExternalTrafficLocal} + if tweak != nil { + tweak(svc) + } + }) + + if createPod { + By("creating a pod to be part of the service " + serviceName) + j.RunOrFail(namespace, nil) + } + By("waiting for loadbalancer for service " + namespace + "/" + serviceName) + svc = j.WaitForLoadBalancerOrFail(namespace, serviceName, timeout) + j.SanityCheckService(svc, v1.ServiceTypeLoadBalancer) + return svc +} + +func GetNodeAddresses(node *v1.Node, addressType v1.NodeAddressType) (ips []string) { + for j := range node.Status.Addresses { + nodeAddress := &node.Status.Addresses[j] + if nodeAddress.Type == addressType { + ips = append(ips, nodeAddress.Address) + } + } + return +} + +func CollectAddresses(nodes *v1.NodeList, addressType v1.NodeAddressType) []string { + ips := []string{} + for i := range nodes.Items { + ips = append(ips, GetNodeAddresses(&nodes.Items[i], addressType)...) + } + return ips +} + +func GetNodePublicIps(c clientset.Interface) ([]string, error) { + nodes := GetReadySchedulableNodesOrDie(c) + + ips := CollectAddresses(nodes, v1.NodeExternalIP) + if len(ips) == 0 { + ips = CollectAddresses(nodes, v1.NodeLegacyHostIP) + } + return ips, nil +} + +func PickNodeIP(c clientset.Interface) string { + publicIps, err := GetNodePublicIps(c) + Expect(err).NotTo(HaveOccurred()) + if len(publicIps) == 0 { + Failf("got unexpected number (%d) of public IPs", len(publicIps)) + } + ip := publicIps[0] + return ip +} + +// GetEndpointNodes returns a map of nodenames:external-ip on which the +// endpoints of the given Service are running. +func (j *ServiceTestJig) GetEndpointNodes(svc *v1.Service) map[string][]string { + nodes := j.GetNodes(MaxNodesForEndpointsTests) + endpoints, err := j.Client.Core().Endpoints(svc.Namespace).Get(svc.Name, metav1.GetOptions{}) + if err != nil { + Failf("Get endpoints for service %s/%s failed (%s)", svc.Namespace, svc.Name, err) + } + if len(endpoints.Subsets) == 0 { + Failf("Endpoint has no subsets, cannot determine node addresses.") + } + epNodes := sets.NewString() + for _, ss := range endpoints.Subsets { + for _, e := range ss.Addresses { + if e.NodeName != nil { + epNodes.Insert(*e.NodeName) + } + } + } + nodeMap := map[string][]string{} + for _, n := range nodes.Items { + if epNodes.Has(n.Name) { + nodeMap[n.Name] = GetNodeAddresses(&n, v1.NodeExternalIP) + } + } + return nodeMap +} + +// getNodes returns the first maxNodesForTest nodes. Useful in large clusters +// where we don't eg: want to create an endpoint per node. +func (j *ServiceTestJig) GetNodes(maxNodesForTest int) (nodes *v1.NodeList) { + nodes = GetReadySchedulableNodesOrDie(j.Client) + if len(nodes.Items) <= maxNodesForTest { + maxNodesForTest = len(nodes.Items) + } + nodes.Items = nodes.Items[:maxNodesForTest] + return nodes +} + +func (j *ServiceTestJig) GetNodesNames(maxNodesForTest int) []string { + nodes := j.GetNodes(maxNodesForTest) + nodesNames := []string{} + for _, node := range nodes.Items { + nodesNames = append(nodesNames, node.Name) + } + return nodesNames +} + +func (j *ServiceTestJig) WaitForEndpointOnNode(namespace, serviceName, nodeName string) { + err := wait.PollImmediate(Poll, LoadBalancerCreateTimeoutDefault, func() (bool, error) { + endpoints, err := j.Client.Core().Endpoints(namespace).Get(serviceName, metav1.GetOptions{}) + if err != nil { + Logf("Get endpoints for service %s/%s failed (%s)", namespace, serviceName, err) + return false, nil + } + // TODO: Handle multiple endpoints + if len(endpoints.Subsets[0].Addresses) == 0 { + Logf("Expected Ready endpoints - found none") + return false, nil + } + epHostName := *endpoints.Subsets[0].Addresses[0].NodeName + Logf("Pod for service %s/%s is on node %s", namespace, serviceName, epHostName) + if epHostName != nodeName { + Logf("Found endpoint on wrong node, expected %v, got %v", nodeName, epHostName) + return false, nil + } + return true, nil + }) + ExpectNoError(err) +} + +func (j *ServiceTestJig) SanityCheckService(svc *v1.Service, svcType v1.ServiceType) { + if svc.Spec.Type != svcType { + Failf("unexpected Spec.Type (%s) for service, expected %s", svc.Spec.Type, svcType) + } + expectNodePorts := false + if svcType != v1.ServiceTypeClusterIP { + expectNodePorts = true + } + for i, port := range svc.Spec.Ports { + hasNodePort := (port.NodePort != 0) + if hasNodePort != expectNodePorts { + Failf("unexpected Spec.Ports[%d].NodePort (%d) for service", i, port.NodePort) + } + if hasNodePort { + if !ServiceNodePortRange.Contains(int(port.NodePort)) { + Failf("out-of-range nodePort (%d) for service", port.NodePort) + } + } + } + expectIngress := false + if svcType == v1.ServiceTypeLoadBalancer { + expectIngress = true + } + hasIngress := len(svc.Status.LoadBalancer.Ingress) != 0 + if hasIngress != expectIngress { + Failf("unexpected number of Status.LoadBalancer.Ingress (%d) for service", len(svc.Status.LoadBalancer.Ingress)) + } + if hasIngress { + for i, ing := range svc.Status.LoadBalancer.Ingress { + if ing.IP == "" && ing.Hostname == "" { + Failf("unexpected Status.LoadBalancer.Ingress[%d] for service: %#v", i, ing) + } + } + } +} + +// UpdateService fetches a service, calls the update function on it, and +// then attempts to send the updated service. It tries up to 3 times in the +// face of timeouts and conflicts. +func (j *ServiceTestJig) UpdateService(namespace, name string, update func(*v1.Service)) (*v1.Service, error) { + for i := 0; i < 3; i++ { + service, err := j.Client.Core().Services(namespace).Get(name, metav1.GetOptions{}) + if err != nil { + return nil, fmt.Errorf("Failed to get Service %q: %v", name, err) + } + update(service) + service, err = j.Client.Core().Services(namespace).Update(service) + if err == nil { + return service, nil + } + if !errors.IsConflict(err) && !errors.IsServerTimeout(err) { + return nil, fmt.Errorf("Failed to update Service %q: %v", name, err) + } + } + return nil, fmt.Errorf("Too many retries updating Service %q", name) +} + +// UpdateServiceOrFail fetches a service, calls the update function on it, and +// then attempts to send the updated service. It tries up to 3 times in the +// face of timeouts and conflicts. +func (j *ServiceTestJig) UpdateServiceOrFail(namespace, name string, update func(*v1.Service)) *v1.Service { + svc, err := j.UpdateService(namespace, name, update) + if err != nil { + Failf(err.Error()) + } + return svc +} + +func (j *ServiceTestJig) ChangeServiceNodePortOrFail(namespace, name string, initial int) *v1.Service { + var err error + var service *v1.Service + for i := 1; i < ServiceNodePortRange.Size; i++ { + offs1 := initial - ServiceNodePortRange.Base + offs2 := (offs1 + i) % ServiceNodePortRange.Size + newPort := ServiceNodePortRange.Base + offs2 + service, err = j.UpdateService(namespace, name, func(s *v1.Service) { + s.Spec.Ports[0].NodePort = int32(newPort) + }) + if err != nil && strings.Contains(err.Error(), "provided port is already allocated") { + Logf("tried nodePort %d, but it is in use, will try another", newPort) + continue + } + // Otherwise err was nil or err was a real error + break + } + if err != nil { + Failf("Could not change the nodePort: %v", err) + } + return service +} + +func (j *ServiceTestJig) WaitForLoadBalancerOrFail(namespace, name string, timeout time.Duration) *v1.Service { + var service *v1.Service + Logf("Waiting up to %v for service %q to have a LoadBalancer", timeout, name) + pollFunc := func() (bool, error) { + svc, err := j.Client.Core().Services(namespace).Get(name, metav1.GetOptions{}) + if err != nil { + return false, err + } + if len(svc.Status.LoadBalancer.Ingress) > 0 { + service = svc + return true, nil + } + return false, nil + } + if err := wait.PollImmediate(Poll, timeout, pollFunc); err != nil { + Failf("Timeout waiting for service %q to have a load balancer", name) + } + return service +} + +func (j *ServiceTestJig) WaitForLoadBalancerDestroyOrFail(namespace, name string, ip string, port int, timeout time.Duration) *v1.Service { + // TODO: once support ticket 21807001 is resolved, reduce this timeout back to something reasonable + defer func() { + if err := EnsureLoadBalancerResourcesDeleted(ip, strconv.Itoa(port)); err != nil { + Logf("Failed to delete cloud resources for service: %s %d (%v)", ip, port, err) + } + }() + + var service *v1.Service + Logf("Waiting up to %v for service %q to have no LoadBalancer", timeout, name) + pollFunc := func() (bool, error) { + svc, err := j.Client.Core().Services(namespace).Get(name, metav1.GetOptions{}) + if err != nil { + return false, err + } + if len(svc.Status.LoadBalancer.Ingress) == 0 { + service = svc + return true, nil + } + return false, nil + } + if err := wait.PollImmediate(Poll, timeout, pollFunc); err != nil { + Failf("Timeout waiting for service %q to have no load balancer", name) + } + return service +} + +// newRCTemplate returns the default v1.ReplicationController object for +// this jig, but does not actually create the RC. The default RC has the same +// name as the jig and runs the "netexec" container. +func (j *ServiceTestJig) newRCTemplate(namespace string) *v1.ReplicationController { + rc := &v1.ReplicationController{ + ObjectMeta: v1.ObjectMeta{ + Namespace: namespace, + Name: j.Name, + Labels: j.Labels, + }, + Spec: v1.ReplicationControllerSpec{ + Replicas: func(i int) *int32 { x := int32(i); return &x }(1), + Selector: j.Labels, + Template: &v1.PodTemplateSpec{ + ObjectMeta: v1.ObjectMeta{ + Labels: j.Labels, + }, + Spec: v1.PodSpec{ + Containers: []v1.Container{ + { + Name: "netexec", + Image: "gcr.io/google_containers/netexec:1.7", + Args: []string{"--http-port=80", "--udp-port=80"}, + ReadinessProbe: &v1.Probe{ + PeriodSeconds: 3, + Handler: v1.Handler{ + HTTPGet: &v1.HTTPGetAction{ + Port: intstr.FromInt(80), + Path: "/hostName", + }, + }, + }, + }, + }, + TerminationGracePeriodSeconds: new(int64), + }, + }, + }, + } + return rc +} + +// RunOrFail creates a ReplicationController and Pod(s) and waits for the +// Pod(s) to be running. Callers can provide a function to tweak the RC object +// before it is created. +func (j *ServiceTestJig) RunOrFail(namespace string, tweak func(rc *v1.ReplicationController)) *v1.ReplicationController { + rc := j.newRCTemplate(namespace) + if tweak != nil { + tweak(rc) + } + result, err := j.Client.Core().ReplicationControllers(namespace).Create(rc) + if err != nil { + Failf("Failed to created RC %q: %v", rc.Name, err) + } + pods, err := j.waitForPodsCreated(namespace, int(*(rc.Spec.Replicas))) + if err != nil { + Failf("Failed to create pods: %v", err) + } + if err := j.waitForPodsReady(namespace, pods); err != nil { + Failf("Failed waiting for pods to be running: %v", err) + } + return result +} + +func (j *ServiceTestJig) waitForPodsCreated(namespace string, replicas int) ([]string, error) { + timeout := 2 * time.Minute + // List the pods, making sure we observe all the replicas. + label := labels.SelectorFromSet(labels.Set(j.Labels)) + Logf("Waiting up to %v for %d pods to be created", timeout, replicas) + for start := time.Now(); time.Since(start) < timeout; time.Sleep(2 * time.Second) { + options := v1.ListOptions{LabelSelector: label.String()} + pods, err := j.Client.Core().Pods(namespace).List(options) + if err != nil { + return nil, err + } + + found := []string{} + for _, pod := range pods.Items { + if pod.DeletionTimestamp != nil { + continue + } + found = append(found, pod.Name) + } + if len(found) == replicas { + Logf("Found all %d pods", replicas) + return found, nil + } + Logf("Found %d/%d pods - will retry", len(found), replicas) + } + return nil, fmt.Errorf("Timeout waiting for %d pods to be created", replicas) +} + +func (j *ServiceTestJig) waitForPodsReady(namespace string, pods []string) error { + timeout := 2 * time.Minute + if !CheckPodsRunningReady(j.Client, namespace, pods, timeout) { + return fmt.Errorf("Timeout waiting for %d pods to be ready", len(pods)) + } + return nil +} + +// newNetexecPodSpec returns the pod spec of netexec pod +func newNetexecPodSpec(podName string, httpPort, udpPort int32, hostNetwork bool) *v1.Pod { + pod := &v1.Pod{ + ObjectMeta: v1.ObjectMeta{ + Name: podName, + }, + Spec: v1.PodSpec{ + Containers: []v1.Container{ + { + Name: "netexec", + Image: NetexecImageName, + Command: []string{ + "/netexec", + fmt.Sprintf("--http-port=%d", httpPort), + fmt.Sprintf("--udp-port=%d", udpPort), + }, + Ports: []v1.ContainerPort{ + { + Name: "http", + ContainerPort: httpPort, + }, + { + Name: "udp", + ContainerPort: udpPort, + }, + }, + }, + }, + HostNetwork: hostNetwork, + }, + } + return pod +} + +func (j *ServiceTestJig) LaunchNetexecPodOnNode(f *Framework, nodeName, podName string, httpPort, udpPort int32, hostNetwork bool) { + Logf("Creating netexec pod %q on node %v in namespace %q", podName, nodeName, f.Namespace.Name) + pod := newNetexecPodSpec(podName, httpPort, udpPort, hostNetwork) + pod.Spec.NodeName = nodeName + pod.ObjectMeta.Labels = j.Labels + podClient := f.ClientSet.Core().Pods(f.Namespace.Name) + _, err := podClient.Create(pod) + ExpectNoError(err) + ExpectNoError(f.WaitForPodRunning(podName)) + Logf("Netexec pod %q in namespace %q running", pod.Name, f.Namespace.Name) +} + +// newEchoServerPodSpec returns the pod spec of echo server pod +func newEchoServerPodSpec(podName string) *v1.Pod { + port := 8080 + pod := &v1.Pod{ + ObjectMeta: v1.ObjectMeta{ + Name: podName, + }, + Spec: v1.PodSpec{ + Containers: []v1.Container{ + { + Name: "echoserver", + Image: "gcr.io/google_containers/echoserver:1.4", + Ports: []v1.ContainerPort{{ContainerPort: int32(port)}}, + }, + }, + RestartPolicy: v1.RestartPolicyNever, + }, + } + return pod +} + +// LaunchEchoserverPodOnNode launches a pod serving http on port 8080 to act +// as the target for source IP preservation test. The client's source ip would +// be echoed back by the web server. +func (j *ServiceTestJig) LaunchEchoserverPodOnNode(f *Framework, nodeName, podName string) { + Logf("Creating echo server pod %q in namespace %q", podName, f.Namespace.Name) + pod := newEchoServerPodSpec(podName) + pod.Spec.NodeName = nodeName + pod.ObjectMeta.Labels = j.Labels + podClient := f.ClientSet.Core().Pods(f.Namespace.Name) + _, err := podClient.Create(pod) + ExpectNoError(err) + ExpectNoError(f.WaitForPodRunning(podName)) + Logf("Echo server pod %q in namespace %q running", pod.Name, f.Namespace.Name) +} + +func (j *ServiceTestJig) TestReachableHTTP(host string, port int, timeout time.Duration) { + if err := wait.PollImmediate(Poll, timeout, func() (bool, error) { return TestReachableHTTP(host, port, "/echo?msg=hello", "hello") }); err != nil { + Failf("Could not reach HTTP service through %v:%v after %v: %v", host, port, timeout, err) + } +} + +func (j *ServiceTestJig) TestNotReachableHTTP(host string, port int, timeout time.Duration) { + if err := wait.PollImmediate(Poll, timeout, func() (bool, error) { return TestNotReachableHTTP(host, port) }); err != nil { + Failf("Could still reach HTTP service through %v:%v after %v: %v", host, port, timeout, err) + } +} + +func (j *ServiceTestJig) TestReachableUDP(host string, port int, timeout time.Duration) { + if err := wait.PollImmediate(Poll, timeout, func() (bool, error) { return TestReachableUDP(host, port, "echo hello", "hello") }); err != nil { + Failf("Could not reach UDP service through %v:%v after %v: %v", host, port, timeout, err) + } +} + +func (j *ServiceTestJig) TestNotReachableUDP(host string, port int, timeout time.Duration) { + if err := wait.PollImmediate(Poll, timeout, func() (bool, error) { return TestNotReachableUDP(host, port, "echo hello") }); err != nil { + Failf("Could still reach UDP service through %v:%v after %v: %v", host, port, timeout, err) + } +} + +func (j *ServiceTestJig) GetHTTPContent(host string, port int, timeout time.Duration, url string) bytes.Buffer { + var body bytes.Buffer + var err error + if pollErr := wait.PollImmediate(Poll, timeout, func() (bool, error) { + result, err := TestReachableHTTPWithContent(host, port, url, "", &body) + if err != nil { + Logf("Error hitting %v:%v%v, retrying: %v", host, port, url, err) + return false, nil + } + return result, nil + }); pollErr != nil { + Failf("Could not reach HTTP service through %v:%v%v after %v: %v", host, port, url, timeout, err) + } + return body +} + +func testHTTPHealthCheckNodePort(ip string, port int, request string) (bool, error) { + url := fmt.Sprintf("http://%s:%d%s", ip, port, request) + if ip == "" || port == 0 { + Failf("Got empty IP for reachability check (%s)", url) + return false, fmt.Errorf("Invalid input ip or port") + } + Logf("Testing HTTP health check on %v", url) + resp, err := httpGetNoConnectionPool(url) + if err != nil { + Logf("Got error testing for reachability of %s: %v", url, err) + return false, err + } + defer resp.Body.Close() + if err != nil { + Logf("Got error reading response from %s: %v", url, err) + return false, err + } + // HealthCheck responder returns 503 for no local endpoints + if resp.StatusCode == 503 { + return false, nil + } + // HealthCheck responder returns 200 for non-zero local endpoints + if resp.StatusCode == 200 { + return true, nil + } + return false, fmt.Errorf("Unexpected HTTP response code %s from health check responder at %s", resp.Status, url) +} + +func (j *ServiceTestJig) TestHTTPHealthCheckNodePort(host string, port int, request string, tries int) (pass, fail int, statusMsg string) { + for i := 0; i < tries; i++ { + success, err := testHTTPHealthCheckNodePort(host, port, request) + if success { + pass++ + } else { + fail++ + } + statusMsg += fmt.Sprintf("\nAttempt %d Error %v", i, err) + time.Sleep(1 * time.Second) + } + return pass, fail, statusMsg +} + +// Simple helper class to avoid too much boilerplate in tests +type ServiceTestFixture struct { + ServiceName string + Namespace string + Client clientset.Interface + + TestId string + Labels map[string]string + + rcs map[string]bool + services map[string]bool + Name string + Image string +} + +func NewServerTest(client clientset.Interface, namespace string, serviceName string) *ServiceTestFixture { + t := &ServiceTestFixture{} + t.Client = client + t.Namespace = namespace + t.ServiceName = serviceName + t.TestId = t.ServiceName + "-" + string(uuid.NewUUID()) + t.Labels = map[string]string{ + "testid": t.TestId, + } + + t.rcs = make(map[string]bool) + t.services = make(map[string]bool) + + t.Name = "webserver" + t.Image = "gcr.io/google_containers/test-webserver:e2e" + + return t +} + +// Build default config for a service (which can then be changed) +func (t *ServiceTestFixture) BuildServiceSpec() *v1.Service { + service := &v1.Service{ + ObjectMeta: v1.ObjectMeta{ + Name: t.ServiceName, + Namespace: t.Namespace, + }, + Spec: v1.ServiceSpec{ + Selector: t.Labels, + Ports: []v1.ServicePort{{ + Port: 80, + TargetPort: intstr.FromInt(80), + }}, + }, + } + return service +} + +// CreateWebserverRC creates rc-backed pods with the well-known webserver +// configuration and records it for cleanup. +func (t *ServiceTestFixture) CreateWebserverRC(replicas int32) *v1.ReplicationController { + rcSpec := RcByNamePort(t.Name, replicas, t.Image, 80, v1.ProtocolTCP, t.Labels, nil) + rcAct, err := t.CreateRC(rcSpec) + if err != nil { + Failf("Failed to create rc %s: %v", rcSpec.Name, err) + } + if err := VerifyPods(t.Client, t.Namespace, t.Name, false, replicas); err != nil { + Failf("Failed to create %d pods with name %s: %v", replicas, t.Name, err) + } + return rcAct +} + +// CreateRC creates a replication controller and records it for cleanup. +func (t *ServiceTestFixture) CreateRC(rc *v1.ReplicationController) (*v1.ReplicationController, error) { + rc, err := t.Client.Core().ReplicationControllers(t.Namespace).Create(rc) + if err == nil { + t.rcs[rc.Name] = true + } + return rc, err +} + +// Create a service, and record it for cleanup +func (t *ServiceTestFixture) CreateService(service *v1.Service) (*v1.Service, error) { + result, err := t.Client.Core().Services(t.Namespace).Create(service) + if err == nil { + t.services[service.Name] = true + } + return result, err +} + +// Delete a service, and remove it from the cleanup list +func (t *ServiceTestFixture) DeleteService(serviceName string) error { + err := t.Client.Core().Services(t.Namespace).Delete(serviceName, nil) + if err == nil { + delete(t.services, serviceName) + } + return err +} + +func (t *ServiceTestFixture) Cleanup() []error { + var errs []error + for rcName := range t.rcs { + By("stopping RC " + rcName + " in namespace " + t.Namespace) + // First, resize the RC to 0. + old, err := t.Client.Core().ReplicationControllers(t.Namespace).Get(rcName, metav1.GetOptions{}) + if err != nil { + errs = append(errs, err) + } + x := int32(0) + old.Spec.Replicas = &x + if _, err := t.Client.Core().ReplicationControllers(t.Namespace).Update(old); err != nil { + errs = append(errs, err) + } + // TODO(mikedanese): Wait. + // Then, delete the RC altogether. + if err := t.Client.Core().ReplicationControllers(t.Namespace).Delete(rcName, nil); err != nil { + errs = append(errs, err) + } + } + + for serviceName := range t.services { + By("deleting service " + serviceName + " in namespace " + t.Namespace) + err := t.Client.Core().Services(t.Namespace).Delete(serviceName, nil) + if err != nil { + errs = append(errs, err) + } + } + + return errs +} + +func GetIngressPoint(ing *v1.LoadBalancerIngress) string { + host := ing.IP + if host == "" { + host = ing.Hostname + } + return host +} + +// UpdateService fetches a service, calls the update function on it, +// and then attempts to send the updated service. It retries up to 2 +// times in the face of timeouts and conflicts. +func UpdateService(c clientset.Interface, namespace, serviceName string, update func(*v1.Service)) (*v1.Service, error) { + var service *v1.Service + var err error + for i := 0; i < 3; i++ { + service, err = c.Core().Services(namespace).Get(serviceName, metav1.GetOptions{}) + if err != nil { + return service, err + } + + update(service) + + service, err = c.Core().Services(namespace).Update(service) + + if !errors.IsConflict(err) && !errors.IsServerTimeout(err) { + return service, err + } + } + return service, err +} + +func GetContainerPortsByPodUID(endpoints *v1.Endpoints) PortsByPodUID { + m := PortsByPodUID{} + for _, ss := range endpoints.Subsets { + for _, port := range ss.Ports { + for _, addr := range ss.Addresses { + containerPort := port.Port + hostPort := port.Port + + // use endpoint annotations to recover the container port in a Mesos setup + // compare contrib/mesos/pkg/service/endpoints_controller.syncService + key := fmt.Sprintf("k8s.mesosphere.io/containerPort_%s_%s_%d", port.Protocol, addr.IP, hostPort) + mesosContainerPortString := endpoints.Annotations[key] + if mesosContainerPortString != "" { + mesosContainerPort, err := strconv.Atoi(mesosContainerPortString) + if err != nil { + continue + } + containerPort = int32(mesosContainerPort) + Logf("Mapped mesos host port %d to container port %d via annotation %s=%s", hostPort, containerPort, key, mesosContainerPortString) + } + + // Logf("Found pod %v, host port %d and container port %d", addr.TargetRef.UID, hostPort, containerPort) + if _, ok := m[addr.TargetRef.UID]; !ok { + m[addr.TargetRef.UID] = make([]int, 0) + } + m[addr.TargetRef.UID] = append(m[addr.TargetRef.UID], int(containerPort)) + } + } + } + return m +} + +type PortsByPodName map[string][]int +type PortsByPodUID map[types.UID][]int + +func translatePodNameToUIDOrFail(c clientset.Interface, ns string, expectedEndpoints PortsByPodName) PortsByPodUID { + portsByUID := make(PortsByPodUID) + + for name, portList := range expectedEndpoints { + pod, err := c.Core().Pods(ns).Get(name, metav1.GetOptions{}) + if err != nil { + Failf("failed to get pod %s, that's pretty weird. validation failed: %s", name, err) + } + portsByUID[pod.ObjectMeta.UID] = portList + } + // Logf("successfully translated pod names to UIDs: %v -> %v on namespace %s", expectedEndpoints, portsByUID, ns) + return portsByUID +} + +func validatePortsOrFail(endpoints PortsByPodUID, expectedEndpoints PortsByPodUID) { + if len(endpoints) != len(expectedEndpoints) { + // should not happen because we check this condition before + Failf("invalid number of endpoints got %v, expected %v", endpoints, expectedEndpoints) + } + for podUID := range expectedEndpoints { + if _, ok := endpoints[podUID]; !ok { + Failf("endpoint %v not found", podUID) + } + if len(endpoints[podUID]) != len(expectedEndpoints[podUID]) { + Failf("invalid list of ports for uid %v. Got %v, expected %v", podUID, endpoints[podUID], expectedEndpoints[podUID]) + } + sort.Ints(endpoints[podUID]) + sort.Ints(expectedEndpoints[podUID]) + for index := range endpoints[podUID] { + if endpoints[podUID][index] != expectedEndpoints[podUID][index] { + Failf("invalid list of ports for uid %v. Got %v, expected %v", podUID, endpoints[podUID], expectedEndpoints[podUID]) + } + } + } +} + +func ValidateEndpointsOrFail(c clientset.Interface, namespace, serviceName string, expectedEndpoints PortsByPodName) { + By(fmt.Sprintf("waiting up to %v for service %s in namespace %s to expose endpoints %v", ServiceStartTimeout, serviceName, namespace, expectedEndpoints)) + i := 1 + for start := time.Now(); time.Since(start) < ServiceStartTimeout; time.Sleep(1 * time.Second) { + endpoints, err := c.Core().Endpoints(namespace).Get(serviceName, metav1.GetOptions{}) + if err != nil { + Logf("Get endpoints failed (%v elapsed, ignoring for 5s): %v", time.Since(start), err) + continue + } + // Logf("Found endpoints %v", endpoints) + + portsByPodUID := GetContainerPortsByPodUID(endpoints) + // Logf("Found port by pod UID %v", portsByPodUID) + + expectedPortsByPodUID := translatePodNameToUIDOrFail(c, namespace, expectedEndpoints) + if len(portsByPodUID) == len(expectedEndpoints) { + validatePortsOrFail(portsByPodUID, expectedPortsByPodUID) + Logf("successfully validated that service %s in namespace %s exposes endpoints %v (%v elapsed)", + serviceName, namespace, expectedEndpoints, time.Since(start)) + return + } + + if i%5 == 0 { + Logf("Unexpected endpoints: found %v, expected %v (%v elapsed, will retry)", portsByPodUID, expectedEndpoints, time.Since(start)) + } + i++ + } + + if pods, err := c.Core().Pods(v1.NamespaceAll).List(v1.ListOptions{}); err == nil { + for _, pod := range pods.Items { + Logf("Pod %s\t%s\t%s\t%s", pod.Namespace, pod.Name, pod.Spec.NodeName, pod.DeletionTimestamp) + } + } else { + Logf("Can't list pod debug info: %v", err) + } + Failf("Timed out waiting for service %s in namespace %s to expose endpoints %v (%v elapsed)", serviceName, namespace, expectedEndpoints, ServiceStartTimeout) +} + +// StartServeHostnameService creates a replication controller that serves its hostname and a service on top of it. +func StartServeHostnameService(c clientset.Interface, internalClient internalclientset.Interface, ns, name string, port, replicas int) ([]string, string, error) { + podNames := make([]string, replicas) + + By("creating service " + name + " in namespace " + ns) + _, err := c.Core().Services(ns).Create(&v1.Service{ + ObjectMeta: v1.ObjectMeta{ + Name: name, + }, + Spec: v1.ServiceSpec{ + Ports: []v1.ServicePort{{ + Port: int32(port), + TargetPort: intstr.FromInt(9376), + Protocol: "TCP", + }}, + Selector: map[string]string{ + "name": name, + }, + }, + }) + if err != nil { + return podNames, "", err + } + + var createdPods []*v1.Pod + maxContainerFailures := 0 + config := testutils.RCConfig{ + Client: c, + InternalClient: internalClient, + Image: "gcr.io/google_containers/serve_hostname:v1.4", + Name: name, + Namespace: ns, + PollInterval: 3 * time.Second, + Timeout: PodReadyBeforeTimeout, + Replicas: replicas, + CreatedPods: &createdPods, + MaxContainerFailures: &maxContainerFailures, + } + err = RunRC(config) + if err != nil { + return podNames, "", err + } + + if len(createdPods) != replicas { + return podNames, "", fmt.Errorf("Incorrect number of running pods: %v", len(createdPods)) + } + + for i := range createdPods { + podNames[i] = createdPods[i].ObjectMeta.Name + } + sort.StringSlice(podNames).Sort() + + service, err := c.Core().Services(ns).Get(name, metav1.GetOptions{}) + if err != nil { + return podNames, "", err + } + if service.Spec.ClusterIP == "" { + return podNames, "", fmt.Errorf("Service IP is blank for %v", name) + } + serviceIP := service.Spec.ClusterIP + return podNames, serviceIP, nil +} + +func StopServeHostnameService(clientset clientset.Interface, internalClientset internalclientset.Interface, ns, name string) error { + if err := DeleteRCAndPods(clientset, internalClientset, ns, name); err != nil { + return err + } + if err := clientset.Core().Services(ns).Delete(name, nil); err != nil { + return err + } + return nil +} + +// VerifyServeHostnameServiceUp wgets the given serviceIP:servicePort from the +// given host and from within a pod. The host is expected to be an SSH-able node +// in the cluster. Each pod in the service is expected to echo its name. These +// names are compared with the given expectedPods list after a sort | uniq. +func VerifyServeHostnameServiceUp(c clientset.Interface, ns, host string, expectedPods []string, serviceIP string, servicePort int) error { + execPodName := CreateExecPodOrFail(c, ns, "execpod-", nil) + defer func() { + DeletePodOrFail(c, ns, execPodName) + }() + + // Loop a bunch of times - the proxy is randomized, so we want a good + // chance of hitting each backend at least once. + buildCommand := func(wget string) string { + return fmt.Sprintf("for i in $(seq 1 %d); do %s http://%s:%d 2>&1 || true; echo; done", + 50*len(expectedPods), wget, serviceIP, servicePort) + } + commands := []func() string{ + // verify service from node + func() string { + cmd := "set -e; " + buildCommand("wget -q --timeout=0.2 --tries=1 -O -") + Logf("Executing cmd %q on host %v", cmd, host) + result, err := SSH(cmd, host, TestContext.Provider) + if err != nil || result.Code != 0 { + LogSSHResult(result) + Logf("error while SSH-ing to node: %v", err) + } + return result.Stdout + }, + // verify service from pod + func() string { + cmd := buildCommand("wget -q -T 1 -O -") + Logf("Executing cmd %q in pod %v/%v", cmd, ns, execPodName) + // TODO: Use exec-over-http via the netexec pod instead of kubectl exec. + output, err := RunHostCmd(ns, execPodName, cmd) + if err != nil { + Logf("error while kubectl execing %q in pod %v/%v: %v\nOutput: %v", cmd, ns, execPodName, err, output) + } + return output + }, + } + + expectedEndpoints := sets.NewString(expectedPods...) + By(fmt.Sprintf("verifying service has %d reachable backends", len(expectedPods))) + for _, cmdFunc := range commands { + passed := false + gotEndpoints := sets.NewString() + + // Retry cmdFunc for a while + for start := time.Now(); time.Since(start) < KubeProxyLagTimeout; time.Sleep(5 * time.Second) { + for _, endpoint := range strings.Split(cmdFunc(), "\n") { + trimmedEp := strings.TrimSpace(endpoint) + if trimmedEp != "" { + gotEndpoints.Insert(trimmedEp) + } + } + // TODO: simply checking that the retrieved endpoints is a superset + // of the expected allows us to ignore intermitten network flakes that + // result in output like "wget timed out", but these should be rare + // and we need a better way to track how often it occurs. + if gotEndpoints.IsSuperset(expectedEndpoints) { + if !gotEndpoints.Equal(expectedEndpoints) { + Logf("Ignoring unexpected output wgetting endpoints of service %s: %v", serviceIP, gotEndpoints.Difference(expectedEndpoints)) + } + passed = true + break + } + Logf("Unable to reach the following endpoints of service %s: %v", serviceIP, expectedEndpoints.Difference(gotEndpoints)) + } + if !passed { + // Sort the lists so they're easier to visually diff. + exp := expectedEndpoints.List() + got := gotEndpoints.List() + sort.StringSlice(exp).Sort() + sort.StringSlice(got).Sort() + return fmt.Errorf("service verification failed for: %s\nexpected %v\nreceived %v", serviceIP, exp, got) + } + } + return nil +} + +func VerifyServeHostnameServiceDown(c clientset.Interface, host string, serviceIP string, servicePort int) error { + command := fmt.Sprintf( + "curl -s --connect-timeout 2 http://%s:%d && exit 99", serviceIP, servicePort) + + for start := time.Now(); time.Since(start) < time.Minute; time.Sleep(5 * time.Second) { + result, err := SSH(command, host, TestContext.Provider) + if err != nil { + LogSSHResult(result) + Logf("error while SSH-ing to node: %v", err) + } + if result.Code != 99 { + return nil + } + Logf("service still alive - still waiting") + } + return fmt.Errorf("waiting for service to be down timed out") +} + +func CleanupServiceGCEResources(loadBalancerName string) { + if pollErr := wait.Poll(5*time.Second, LoadBalancerCleanupTimeout, func() (bool, error) { + if err := CleanupGCEResources(loadBalancerName); err != nil { + Logf("Still waiting for glbc to cleanup: %v", err) + return false, nil + } + return true, nil + }); pollErr != nil { + Failf("Failed to cleanup service GCE resources.") + } +} + +func DescribeSvc(ns string) { + Logf("\nOutput of kubectl describe svc:\n") + desc, _ := RunKubectl( + "describe", "svc", fmt.Sprintf("--namespace=%v", ns)) + Logf(desc) +} diff --git a/test/e2e/framework/util.go b/test/e2e/framework/util.go index 6bd63ce9a4c..0ceb90fe23f 100644 --- a/test/e2e/framework/util.go +++ b/test/e2e/framework/util.go @@ -3829,6 +3829,80 @@ func LaunchHostExecPod(client clientset.Interface, ns, name string) *v1.Pod { return pod } +// newExecPodSpec returns the pod spec of exec pod +func newExecPodSpec(ns, generateName string) *v1.Pod { + immediate := int64(0) + pod := &v1.Pod{ + ObjectMeta: v1.ObjectMeta{ + GenerateName: generateName, + Namespace: ns, + }, + Spec: v1.PodSpec{ + TerminationGracePeriodSeconds: &immediate, + Containers: []v1.Container{ + { + Name: "exec", + Image: "gcr.io/google_containers/busybox:1.24", + Command: []string{"sh", "-c", "while true; do sleep 5; done"}, + }, + }, + }, + } + return pod +} + +// CreateExecPodOrFail creates a simple busybox pod in a sleep loop used as a +// vessel for kubectl exec commands. +// Returns the name of the created pod. +func CreateExecPodOrFail(client clientset.Interface, ns, generateName string, tweak func(*v1.Pod)) string { + Logf("Creating new exec pod") + execPod := newExecPodSpec(ns, generateName) + if tweak != nil { + tweak(execPod) + } + created, err := client.Core().Pods(ns).Create(execPod) + Expect(err).NotTo(HaveOccurred()) + err = wait.PollImmediate(Poll, 5*time.Minute, func() (bool, error) { + retrievedPod, err := client.Core().Pods(execPod.Namespace).Get(created.Name, metav1.GetOptions{}) + if err != nil { + return false, nil + } + return retrievedPod.Status.Phase == v1.PodRunning, nil + }) + Expect(err).NotTo(HaveOccurred()) + return created.Name +} + +func CreatePodOrFail(c clientset.Interface, ns, name string, labels map[string]string, containerPorts []v1.ContainerPort) { + By(fmt.Sprintf("Creating pod %s in namespace %s", name, ns)) + pod := &v1.Pod{ + ObjectMeta: v1.ObjectMeta{ + Name: name, + Labels: labels, + }, + Spec: v1.PodSpec{ + Containers: []v1.Container{ + { + Name: "pause", + Image: GetPauseImageName(c), + Ports: containerPorts, + // Add a dummy environment variable to work around a docker issue. + // https://github.com/docker/docker/issues/14203 + Env: []v1.EnvVar{{Name: "FOO", Value: " "}}, + }, + }, + }, + } + _, err := c.Core().Pods(ns).Create(pod) + Expect(err).NotTo(HaveOccurred()) +} + +func DeletePodOrFail(c clientset.Interface, ns, name string) { + By(fmt.Sprintf("Deleting pod %s in namespace %s", name, ns)) + err := c.Core().Pods(ns).Delete(name, nil) + Expect(err).NotTo(HaveOccurred()) +} + // GetSigner returns an ssh.Signer for the provider ("gce", etc.) that can be // used to SSH to their nodes. func GetSigner(provider string) (ssh.Signer, error) { @@ -5159,3 +5233,51 @@ func GetNodeExternalIP(node *v1.Node) string { } return host } + +// RcByNamePort returns a ReplicationController with specified name and port +func RcByNamePort(name string, replicas int32, image string, port int, protocol v1.Protocol, + labels map[string]string, gracePeriod *int64) *v1.ReplicationController { + + return RcByNameContainer(name, replicas, image, labels, v1.Container{ + Name: name, + Image: image, + Ports: []v1.ContainerPort{{ContainerPort: int32(port), Protocol: protocol}}, + }, gracePeriod) +} + +// RcByNameContainer returns a ReplicationControoler with specified name and container +func RcByNameContainer(name string, replicas int32, image string, labels map[string]string, c v1.Container, + gracePeriod *int64) *v1.ReplicationController { + + zeroGracePeriod := int64(0) + + // Add "name": name to the labels, overwriting if it exists. + labels["name"] = name + if gracePeriod == nil { + gracePeriod = &zeroGracePeriod + } + return &v1.ReplicationController{ + TypeMeta: metav1.TypeMeta{ + Kind: "ReplicationController", + APIVersion: registered.GroupOrDie(v1.GroupName).GroupVersion.String(), + }, + ObjectMeta: v1.ObjectMeta{ + Name: name, + }, + Spec: v1.ReplicationControllerSpec{ + Replicas: func(i int32) *int32 { return &i }(replicas), + Selector: map[string]string{ + "name": name, + }, + Template: &v1.PodTemplateSpec{ + ObjectMeta: v1.ObjectMeta{ + Labels: labels, + }, + Spec: v1.PodSpec{ + Containers: []v1.Container{c}, + TerminationGracePeriodSeconds: gracePeriod, + }, + }, + }, + } +} diff --git a/test/e2e/ingress.go b/test/e2e/ingress.go index 41b35c7aabd..1ab0feb868e 100644 --- a/test/e2e/ingress.go +++ b/test/e2e/ingress.go @@ -40,17 +40,9 @@ const ( // healthz port used to verify glbc restarted correctly on the master. glbcHealthzPort = 8086 - // On average it takes ~6 minutes for a single backend to come online in GCE. - lbPollTimeout = 15 * time.Minute - // General cloud resource poll timeout (eg: create static ip, firewall etc) cloudResourcePollTimeout = 5 * time.Minute - // Time required by the loadbalancer to cleanup, proportional to numApps/Ing. - // Bring the cleanup timeout back down to 5m once b/33588344 is resolved. - lbCleanupTimeout = 15 * time.Minute - lbPollInterval = 30 * time.Second - // Name of the config-map and key the ingress controller stores its uid in. uidConfigMap = "ingress-uid" uidKey = "uid" @@ -145,10 +137,10 @@ var _ = framework.KubeDescribe("Loadbalancing: L7", func() { By("waiting for Ingress to come up with ip: " + ip) httpClient := buildInsecureClient(reqTimeout) - framework.ExpectNoError(pollURL(fmt.Sprintf("https://%v/", ip), "", lbPollTimeout, jig.pollInterval, httpClient, false)) + framework.ExpectNoError(pollURL(fmt.Sprintf("https://%v/", ip), "", framework.LoadBalancerPollTimeout, jig.pollInterval, httpClient, false)) By("should reject HTTP traffic") - framework.ExpectNoError(pollURL(fmt.Sprintf("http://%v/", ip), "", lbPollTimeout, jig.pollInterval, httpClient, true)) + framework.ExpectNoError(pollURL(fmt.Sprintf("http://%v/", ip), "", framework.LoadBalancerPollTimeout, jig.pollInterval, httpClient, true)) By("should have correct firewall rule for ingress") fw := gceController.getFirewallRule() diff --git a/test/e2e/ingress_utils.go b/test/e2e/ingress_utils.go index 5c20f5c49ae..8c11919633e 100644 --- a/test/e2e/ingress_utils.go +++ b/test/e2e/ingress_utils.go @@ -181,7 +181,7 @@ func createComformanceTests(jig *testJig, ns string) []conformanceTests { }) By("Checking that " + pathToFail + " is not exposed by polling for failure") route := fmt.Sprintf("http://%v%v", jig.address, pathToFail) - framework.ExpectNoError(pollURL(route, updateURLMapHost, lbCleanupTimeout, jig.pollInterval, &http.Client{Timeout: reqTimeout}, true)) + framework.ExpectNoError(pollURL(route, updateURLMapHost, framework.LoadBalancerCleanupTimeout, jig.pollInterval, &http.Client{Timeout: reqTimeout}, true)) }, fmt.Sprintf("Waiting for path updates to reflect in L7"), }, @@ -335,7 +335,7 @@ func describeIng(ns string) { } func cleanupGCE(gceController *GCEIngressController) { - pollErr := wait.Poll(5*time.Second, lbCleanupTimeout, func() (bool, error) { + pollErr := wait.Poll(5*time.Second, framework.LoadBalancerCleanupTimeout, func() (bool, error) { if err := gceController.Cleanup(false); err != nil { framework.Logf("Still waiting for glbc to cleanup:\n%v", err) return false, nil @@ -347,7 +347,7 @@ func cleanupGCE(gceController *GCEIngressController) { // controller. Delete this IP only after the controller has had a chance // to cleanup or it might interfere with the controller, causing it to // throw out confusing events. - if ipErr := wait.Poll(5*time.Second, lbCleanupTimeout, func() (bool, error) { + if ipErr := wait.Poll(5*time.Second, framework.LoadBalancerCleanupTimeout, func() (bool, error) { if err := gceController.deleteStaticIPs(); err != nil { framework.Logf("Failed to delete static-ip: %v\n", err) return false, nil @@ -864,9 +864,9 @@ func (j *testJig) deleteIngress() { // Ingress. func (j *testJig) waitForIngress(waitForNodePort bool) { // Wait for the loadbalancer IP. - address, err := framework.WaitForIngressAddress(j.client, j.ing.Namespace, j.ing.Name, lbPollTimeout) + address, err := framework.WaitForIngressAddress(j.client, j.ing.Namespace, j.ing.Name, framework.LoadBalancerPollTimeout) if err != nil { - framework.Failf("Ingress failed to acquire an IP address within %v", lbPollTimeout) + framework.Failf("Ingress failed to acquire an IP address within %v", framework.LoadBalancerPollTimeout) } j.address = address framework.Logf("Found address %v for ingress %v", j.address, j.ing.Name) @@ -889,7 +889,7 @@ func (j *testJig) waitForIngress(waitForNodePort bool) { } route := fmt.Sprintf("%v://%v%v", proto, address, p.Path) framework.Logf("Testing route %v host %v with simple GET", route, rules.Host) - framework.ExpectNoError(pollURL(route, rules.Host, lbPollTimeout, j.pollInterval, timeoutClient, false)) + framework.ExpectNoError(pollURL(route, rules.Host, framework.LoadBalancerPollTimeout, j.pollInterval, timeoutClient, false)) } } } @@ -1011,7 +1011,7 @@ type GCEIngressController struct { } func newTestJig(c clientset.Interface) *testJig { - return &testJig{client: c, rootCAs: map[string][]byte{}, pollInterval: lbPollInterval} + return &testJig{client: c, rootCAs: map[string][]byte{}, pollInterval: framework.LoadBalancerPollInterval} } // NginxIngressController manages implementation details of Ingress on Nginx. diff --git a/test/e2e/kube_proxy.go b/test/e2e/kube_proxy.go index e3070240e90..6c997faca61 100644 --- a/test/e2e/kube_proxy.go +++ b/test/e2e/kube_proxy.go @@ -47,7 +47,7 @@ var _ = framework.KubeDescribe("Network", func() { It("should set TCP CLOSE_WAIT timeout", func() { nodes := framework.GetReadySchedulableNodesOrDie(fr.ClientSet) - ips := collectAddresses(nodes, v1.NodeInternalIP) + ips := framework.CollectAddresses(nodes, v1.NodeInternalIP) if len(nodes.Items) < 2 { framework.Skipf( diff --git a/test/e2e/kubectl.go b/test/e2e/kubectl.go index 0dbbfffa9a8..b6c68589262 100644 --- a/test/e2e/kubectl.go +++ b/test/e2e/kubectl.go @@ -824,7 +824,7 @@ var _ = framework.KubeDescribe("Kubectl client", func() { return false, err } - uidToPort := getContainerPortsByPodUID(endpoints) + uidToPort := framework.GetContainerPortsByPodUID(endpoints) if len(uidToPort) == 0 { framework.Logf("No endpoint found, retrying") return false, nil diff --git a/test/e2e/resize_nodes.go b/test/e2e/resize_nodes.go index 3b12c0a6520..37a236b53ca 100644 --- a/test/e2e/resize_nodes.go +++ b/test/e2e/resize_nodes.go @@ -25,7 +25,6 @@ import ( "k8s.io/kubernetes/pkg/api" "k8s.io/kubernetes/pkg/api/v1" - "k8s.io/kubernetes/pkg/apimachinery/registered" metav1 "k8s.io/kubernetes/pkg/apis/meta/v1" clientset "k8s.io/kubernetes/pkg/client/clientset_generated/clientset" "k8s.io/kubernetes/pkg/util/intstr" @@ -160,56 +159,10 @@ func newSVCByName(c clientset.Interface, ns, name string) error { return err } -func rcByNamePort(name string, replicas int32, image string, port int, protocol v1.Protocol, - labels map[string]string, gracePeriod *int64) *v1.ReplicationController { - - return rcByNameContainer(name, replicas, image, labels, v1.Container{ - Name: name, - Image: image, - Ports: []v1.ContainerPort{{ContainerPort: int32(port), Protocol: protocol}}, - }, gracePeriod) -} - -func rcByNameContainer(name string, replicas int32, image string, labels map[string]string, c v1.Container, - gracePeriod *int64) *v1.ReplicationController { - - zeroGracePeriod := int64(0) - - // Add "name": name to the labels, overwriting if it exists. - labels["name"] = name - if gracePeriod == nil { - gracePeriod = &zeroGracePeriod - } - return &v1.ReplicationController{ - TypeMeta: metav1.TypeMeta{ - Kind: "ReplicationController", - APIVersion: registered.GroupOrDie(v1.GroupName).GroupVersion.String(), - }, - ObjectMeta: v1.ObjectMeta{ - Name: name, - }, - Spec: v1.ReplicationControllerSpec{ - Replicas: func(i int32) *int32 { return &i }(replicas), - Selector: map[string]string{ - "name": name, - }, - Template: &v1.PodTemplateSpec{ - ObjectMeta: v1.ObjectMeta{ - Labels: labels, - }, - Spec: v1.PodSpec{ - Containers: []v1.Container{c}, - TerminationGracePeriodSeconds: gracePeriod, - }, - }, - }, - } -} - // newRCByName creates a replication controller with a selector by name of name. func newRCByName(c clientset.Interface, ns, name string, replicas int32, gracePeriod *int64) (*v1.ReplicationController, error) { By(fmt.Sprintf("creating replication controller %s", name)) - return c.Core().ReplicationControllers(ns).Create(rcByNamePort( + return c.Core().ReplicationControllers(ns).Create(framework.RcByNamePort( name, replicas, serveHostnameImage, 9376, v1.ProtocolTCP, map[string]string{}, gracePeriod)) } diff --git a/test/e2e/service.go b/test/e2e/service.go index a67e322158d..c602000b4c5 100644 --- a/test/e2e/service.go +++ b/test/e2e/service.go @@ -19,68 +19,26 @@ package e2e import ( "bytes" "fmt" - "io/ioutil" "math/rand" - "net" - "net/http" - "sort" - "strconv" "strings" "time" - . "github.com/onsi/ginkgo" - . "github.com/onsi/gomega" - "k8s.io/kubernetes/pkg/api/errors" "k8s.io/kubernetes/pkg/api/v1" "k8s.io/kubernetes/pkg/api/v1/service" metav1 "k8s.io/kubernetes/pkg/apis/meta/v1" clientset "k8s.io/kubernetes/pkg/client/clientset_generated/clientset" "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset" + "k8s.io/kubernetes/pkg/cloudprovider" "k8s.io/kubernetes/pkg/controller/endpoint" "k8s.io/kubernetes/pkg/labels" - "k8s.io/kubernetes/pkg/types" "k8s.io/kubernetes/pkg/util/intstr" - utilnet "k8s.io/kubernetes/pkg/util/net" - "k8s.io/kubernetes/pkg/util/sets" - "k8s.io/kubernetes/pkg/util/uuid" "k8s.io/kubernetes/pkg/util/wait" "k8s.io/kubernetes/test/e2e/framework" - testutils "k8s.io/kubernetes/test/utils" + + . "github.com/onsi/ginkgo" + . "github.com/onsi/gomega" ) -const ( - // Maximum time a kube-proxy daemon on a node is allowed to not - // notice a Service update, such as type=NodePort. - // TODO: This timeout should be O(10s), observed values are O(1m), 5m is very - // liberal. Fix tracked in #20567. - kubeProxyLagTimeout = 5 * time.Minute - - // Maximum time a load balancer is allowed to not respond after creation. - loadBalancerLagTimeoutDefault = 2 * time.Minute - - // On AWS there is a delay between ELB creation and serving traffic; - // a few minutes is typical, so use 10m. - loadBalancerLagTimeoutAWS = 10 * time.Minute - - // How long to wait for a load balancer to be created/modified. - //TODO: once support ticket 21807001 is resolved, reduce this timeout back to something reasonable - loadBalancerCreateTimeoutDefault = 20 * time.Minute - loadBalancerCreateTimeoutLarge = 2 * time.Hour - - largeClusterMinNodesNumber = 100 - - // Don't test with more than 3 nodes. - // Many tests create an endpoint per node, in large clusters, this is - // resource and time intensive. - maxNodesForEndpointsTests = 3 - - // timeout is used for most polling/waiting activities - timeout = 60 * time.Second -) - -// This should match whatever the default/configured range is -var ServiceNodePortRange = utilnet.PortRange{Base: 30000, Size: 2768} - var _ = framework.KubeDescribe("Services", func() { f := framework.NewDefaultFramework("services") @@ -95,11 +53,11 @@ var _ = framework.KubeDescribe("Services", func() { AfterEach(func() { if CurrentGinkgoTestDescription().Failed { - describeSvc(f.Namespace.Name) + framework.DescribeSvc(f.Namespace.Name) } for _, lb := range serviceLBNames { framework.Logf("cleaning gce resource for %s", lb) - cleanupServiceGCEResources(lb) + framework.CleanupServiceGCEResources(lb) } //reset serviceLBNames serviceLBNames = []string{} @@ -142,7 +100,7 @@ var _ = framework.KubeDescribe("Services", func() { _, err := cs.Core().Services(ns).Create(service) Expect(err).NotTo(HaveOccurred()) - validateEndpointsOrFail(cs, ns, serviceName, PortsByPodName{}) + framework.ValidateEndpointsOrFail(cs, ns, serviceName, framework.PortsByPodName{}) names := map[string]bool{} defer func() { @@ -155,21 +113,21 @@ var _ = framework.KubeDescribe("Services", func() { name1 := "pod1" name2 := "pod2" - createPodOrFail(cs, ns, name1, labels, []v1.ContainerPort{{ContainerPort: 80}}) + framework.CreatePodOrFail(cs, ns, name1, labels, []v1.ContainerPort{{ContainerPort: 80}}) names[name1] = true - validateEndpointsOrFail(cs, ns, serviceName, PortsByPodName{name1: {80}}) + framework.ValidateEndpointsOrFail(cs, ns, serviceName, framework.PortsByPodName{name1: {80}}) - createPodOrFail(cs, ns, name2, labels, []v1.ContainerPort{{ContainerPort: 80}}) + framework.CreatePodOrFail(cs, ns, name2, labels, []v1.ContainerPort{{ContainerPort: 80}}) names[name2] = true - validateEndpointsOrFail(cs, ns, serviceName, PortsByPodName{name1: {80}, name2: {80}}) + framework.ValidateEndpointsOrFail(cs, ns, serviceName, framework.PortsByPodName{name1: {80}, name2: {80}}) - deletePodOrFail(cs, ns, name1) + framework.DeletePodOrFail(cs, ns, name1) delete(names, name1) - validateEndpointsOrFail(cs, ns, serviceName, PortsByPodName{name2: {80}}) + framework.ValidateEndpointsOrFail(cs, ns, serviceName, framework.PortsByPodName{name2: {80}}) - deletePodOrFail(cs, ns, name2) + framework.DeletePodOrFail(cs, ns, name2) delete(names, name2) - validateEndpointsOrFail(cs, ns, serviceName, PortsByPodName{}) + framework.ValidateEndpointsOrFail(cs, ns, serviceName, framework.PortsByPodName{}) }) It("should serve multiport endpoints from pods [Conformance]", func() { @@ -213,7 +171,7 @@ var _ = framework.KubeDescribe("Services", func() { Expect(err).NotTo(HaveOccurred()) port1 := 100 port2 := 101 - validateEndpointsOrFail(cs, ns, serviceName, PortsByPodName{}) + framework.ValidateEndpointsOrFail(cs, ns, serviceName, framework.PortsByPodName{}) names := map[string]bool{} defer func() { @@ -239,21 +197,21 @@ var _ = framework.KubeDescribe("Services", func() { podname1 := "pod1" podname2 := "pod2" - createPodOrFail(cs, ns, podname1, labels, containerPorts1) + framework.CreatePodOrFail(cs, ns, podname1, labels, containerPorts1) names[podname1] = true - validateEndpointsOrFail(cs, ns, serviceName, PortsByPodName{podname1: {port1}}) + framework.ValidateEndpointsOrFail(cs, ns, serviceName, framework.PortsByPodName{podname1: {port1}}) - createPodOrFail(cs, ns, podname2, labels, containerPorts2) + framework.CreatePodOrFail(cs, ns, podname2, labels, containerPorts2) names[podname2] = true - validateEndpointsOrFail(cs, ns, serviceName, PortsByPodName{podname1: {port1}, podname2: {port2}}) + framework.ValidateEndpointsOrFail(cs, ns, serviceName, framework.PortsByPodName{podname1: {port1}, podname2: {port2}}) - deletePodOrFail(cs, ns, podname1) + framework.DeletePodOrFail(cs, ns, podname1) delete(names, podname1) - validateEndpointsOrFail(cs, ns, serviceName, PortsByPodName{podname2: {port2}}) + framework.ValidateEndpointsOrFail(cs, ns, serviceName, framework.PortsByPodName{podname2: {port2}}) - deletePodOrFail(cs, ns, podname2) + framework.DeletePodOrFail(cs, ns, podname2) delete(names, podname2) - validateEndpointsOrFail(cs, ns, serviceName, PortsByPodName{}) + framework.ValidateEndpointsOrFail(cs, ns, serviceName, framework.PortsByPodName{}) }) It("should preserve source pod IP for traffic thru service cluster IP", func() { @@ -262,7 +220,7 @@ var _ = framework.KubeDescribe("Services", func() { ns := f.Namespace.Name By("creating a TCP service " + serviceName + " with type=ClusterIP in namespace " + ns) - jig := NewServiceTestJig(cs, serviceName) + jig := framework.NewServiceTestJig(cs, serviceName) servicePort := 8080 tcpService := jig.CreateTCPServiceWithPort(ns, nil, int32(servicePort)) jig.SanityCheckService(tcpService, v1.ServiceTypeClusterIP) @@ -286,7 +244,7 @@ var _ = framework.KubeDescribe("Services", func() { By("Creating a webserver pod be part of the TCP service which echoes back source ip") serverPodName := "echoserver-sourceip" - jig.launchEchoserverPodOnNode(f, node1.Name, serverPodName) + jig.LaunchEchoserverPodOnNode(f, node1.Name, serverPodName) defer func() { framework.Logf("Cleaning up the echo server pod") err := cs.Core().Pods(ns).Delete(serverPodName, nil) @@ -294,7 +252,7 @@ var _ = framework.KubeDescribe("Services", func() { }() // Waiting for service to expose endpoint. - validateEndpointsOrFail(cs, ns, serviceName, PortsByPodName{serverPodName: {servicePort}}) + framework.ValidateEndpointsOrFail(cs, ns, serviceName, framework.PortsByPodName{serverPodName: {servicePort}}) By("Retrieve sourceip from a pod on the same node") sourceIp1, execPodIp1 := execSourceipTest(f, cs, ns, node1.Name, serviceIp, servicePort) @@ -315,10 +273,10 @@ var _ = framework.KubeDescribe("Services", func() { numPods, servicePort := 3, 80 By("creating service1 in namespace " + ns) - podNames1, svc1IP, err := startServeHostnameService(cs, internalClientset, ns, "service1", servicePort, numPods) + podNames1, svc1IP, err := framework.StartServeHostnameService(cs, internalClientset, ns, "service1", servicePort, numPods) Expect(err).NotTo(HaveOccurred()) By("creating service2 in namespace " + ns) - podNames2, svc2IP, err := startServeHostnameService(cs, internalClientset, ns, "service2", servicePort, numPods) + podNames2, svc2IP, err := framework.StartServeHostnameService(cs, internalClientset, ns, "service2", servicePort, numPods) Expect(err).NotTo(HaveOccurred()) hosts, err := framework.NodeSSHHosts(cs) @@ -329,23 +287,23 @@ var _ = framework.KubeDescribe("Services", func() { host := hosts[0] By("verifying service1 is up") - framework.ExpectNoError(verifyServeHostnameServiceUp(cs, ns, host, podNames1, svc1IP, servicePort)) + framework.ExpectNoError(framework.VerifyServeHostnameServiceUp(cs, ns, host, podNames1, svc1IP, servicePort)) By("verifying service2 is up") - framework.ExpectNoError(verifyServeHostnameServiceUp(cs, ns, host, podNames2, svc2IP, servicePort)) + framework.ExpectNoError(framework.VerifyServeHostnameServiceUp(cs, ns, host, podNames2, svc2IP, servicePort)) // Stop service 1 and make sure it is gone. By("stopping service1") - framework.ExpectNoError(stopServeHostnameService(f.ClientSet, f.InternalClientset, ns, "service1")) + framework.ExpectNoError(framework.StopServeHostnameService(f.ClientSet, f.InternalClientset, ns, "service1")) By("verifying service1 is not up") - framework.ExpectNoError(verifyServeHostnameServiceDown(cs, host, svc1IP, servicePort)) + framework.ExpectNoError(framework.VerifyServeHostnameServiceDown(cs, host, svc1IP, servicePort)) By("verifying service2 is still up") - framework.ExpectNoError(verifyServeHostnameServiceUp(cs, ns, host, podNames2, svc2IP, servicePort)) + framework.ExpectNoError(framework.VerifyServeHostnameServiceUp(cs, ns, host, podNames2, svc2IP, servicePort)) // Start another service and verify both are up. By("creating service3 in namespace " + ns) - podNames3, svc3IP, err := startServeHostnameService(cs, internalClientset, ns, "service3", servicePort, numPods) + podNames3, svc3IP, err := framework.StartServeHostnameService(cs, internalClientset, ns, "service3", servicePort, numPods) Expect(err).NotTo(HaveOccurred()) if svc2IP == svc3IP { @@ -353,10 +311,10 @@ var _ = framework.KubeDescribe("Services", func() { } By("verifying service2 is still up") - framework.ExpectNoError(verifyServeHostnameServiceUp(cs, ns, host, podNames2, svc2IP, servicePort)) + framework.ExpectNoError(framework.VerifyServeHostnameServiceUp(cs, ns, host, podNames2, svc2IP, servicePort)) By("verifying service3 is up") - framework.ExpectNoError(verifyServeHostnameServiceUp(cs, ns, host, podNames3, svc3IP, servicePort)) + framework.ExpectNoError(framework.VerifyServeHostnameServiceUp(cs, ns, host, podNames3, svc3IP, servicePort)) }) It("should work after restarting kube-proxy [Disruptive]", func() { @@ -369,12 +327,16 @@ var _ = framework.KubeDescribe("Services", func() { svc1 := "service1" svc2 := "service2" - defer func() { framework.ExpectNoError(stopServeHostnameService(f.ClientSet, f.InternalClientset, ns, svc1)) }() - podNames1, svc1IP, err := startServeHostnameService(cs, internalClientset, ns, svc1, servicePort, numPods) + defer func() { + framework.ExpectNoError(framework.StopServeHostnameService(f.ClientSet, f.InternalClientset, ns, svc1)) + }() + podNames1, svc1IP, err := framework.StartServeHostnameService(cs, internalClientset, ns, svc1, servicePort, numPods) Expect(err).NotTo(HaveOccurred()) - defer func() { framework.ExpectNoError(stopServeHostnameService(f.ClientSet, f.InternalClientset, ns, svc2)) }() - podNames2, svc2IP, err := startServeHostnameService(cs, internalClientset, ns, svc2, servicePort, numPods) + defer func() { + framework.ExpectNoError(framework.StopServeHostnameService(f.ClientSet, f.InternalClientset, ns, svc2)) + }() + podNames2, svc2IP, err := framework.StartServeHostnameService(cs, internalClientset, ns, svc2, servicePort, numPods) Expect(err).NotTo(HaveOccurred()) if svc1IP == svc2IP { @@ -388,15 +350,15 @@ var _ = framework.KubeDescribe("Services", func() { } host := hosts[0] - framework.ExpectNoError(verifyServeHostnameServiceUp(cs, ns, host, podNames1, svc1IP, servicePort)) - framework.ExpectNoError(verifyServeHostnameServiceUp(cs, ns, host, podNames2, svc2IP, servicePort)) + framework.ExpectNoError(framework.VerifyServeHostnameServiceUp(cs, ns, host, podNames1, svc1IP, servicePort)) + framework.ExpectNoError(framework.VerifyServeHostnameServiceUp(cs, ns, host, podNames2, svc2IP, servicePort)) By(fmt.Sprintf("Restarting kube-proxy on %v", host)) if err := framework.RestartKubeProxy(host); err != nil { framework.Failf("error restarting kube-proxy: %v", err) } - framework.ExpectNoError(verifyServeHostnameServiceUp(cs, ns, host, podNames1, svc1IP, servicePort)) - framework.ExpectNoError(verifyServeHostnameServiceUp(cs, ns, host, podNames2, svc2IP, servicePort)) + framework.ExpectNoError(framework.VerifyServeHostnameServiceUp(cs, ns, host, podNames1, svc1IP, servicePort)) + framework.ExpectNoError(framework.VerifyServeHostnameServiceUp(cs, ns, host, podNames2, svc2IP, servicePort)) By("Removing iptable rules") result, err := framework.SSH(` @@ -407,8 +369,8 @@ var _ = framework.KubeDescribe("Services", func() { framework.LogSSHResult(result) framework.Failf("couldn't remove iptable rules: %v", err) } - framework.ExpectNoError(verifyServeHostnameServiceUp(cs, ns, host, podNames1, svc1IP, servicePort)) - framework.ExpectNoError(verifyServeHostnameServiceUp(cs, ns, host, podNames2, svc2IP, servicePort)) + framework.ExpectNoError(framework.VerifyServeHostnameServiceUp(cs, ns, host, podNames1, svc1IP, servicePort)) + framework.ExpectNoError(framework.VerifyServeHostnameServiceUp(cs, ns, host, podNames2, svc2IP, servicePort)) }) It("should work after restarting apiserver [Disruptive]", func() { @@ -419,9 +381,9 @@ var _ = framework.KubeDescribe("Services", func() { numPods, servicePort := 3, 80 defer func() { - framework.ExpectNoError(stopServeHostnameService(f.ClientSet, f.InternalClientset, ns, "service1")) + framework.ExpectNoError(framework.StopServeHostnameService(f.ClientSet, f.InternalClientset, ns, "service1")) }() - podNames1, svc1IP, err := startServeHostnameService(cs, internalClientset, ns, "service1", servicePort, numPods) + podNames1, svc1IP, err := framework.StartServeHostnameService(cs, internalClientset, ns, "service1", servicePort, numPods) Expect(err).NotTo(HaveOccurred()) hosts, err := framework.NodeSSHHosts(cs) @@ -431,7 +393,7 @@ var _ = framework.KubeDescribe("Services", func() { } host := hosts[0] - framework.ExpectNoError(verifyServeHostnameServiceUp(cs, ns, host, podNames1, svc1IP, servicePort)) + framework.ExpectNoError(framework.VerifyServeHostnameServiceUp(cs, ns, host, podNames1, svc1IP, servicePort)) // Restart apiserver By("Restarting apiserver") @@ -442,20 +404,20 @@ var _ = framework.KubeDescribe("Services", func() { if err := framework.WaitForApiserverUp(cs); err != nil { framework.Failf("error while waiting for apiserver up: %v", err) } - framework.ExpectNoError(verifyServeHostnameServiceUp(cs, ns, host, podNames1, svc1IP, servicePort)) + framework.ExpectNoError(framework.VerifyServeHostnameServiceUp(cs, ns, host, podNames1, svc1IP, servicePort)) // Create a new service and check if it's not reusing IP. defer func() { - framework.ExpectNoError(stopServeHostnameService(f.ClientSet, f.InternalClientset, ns, "service2")) + framework.ExpectNoError(framework.StopServeHostnameService(f.ClientSet, f.InternalClientset, ns, "service2")) }() - podNames2, svc2IP, err := startServeHostnameService(cs, internalClientset, ns, "service2", servicePort, numPods) + podNames2, svc2IP, err := framework.StartServeHostnameService(cs, internalClientset, ns, "service2", servicePort, numPods) Expect(err).NotTo(HaveOccurred()) if svc1IP == svc2IP { framework.Failf("VIPs conflict: %v", svc1IP) } - framework.ExpectNoError(verifyServeHostnameServiceUp(cs, ns, host, podNames1, svc1IP, servicePort)) - framework.ExpectNoError(verifyServeHostnameServiceUp(cs, ns, host, podNames2, svc2IP, servicePort)) + framework.ExpectNoError(framework.VerifyServeHostnameServiceUp(cs, ns, host, podNames1, svc1IP, servicePort)) + framework.ExpectNoError(framework.VerifyServeHostnameServiceUp(cs, ns, host, podNames2, svc2IP, servicePort)) }) // TODO: Run this test against the userspace proxy and nodes @@ -465,8 +427,8 @@ var _ = framework.KubeDescribe("Services", func() { serviceName := "nodeport-test" ns := f.Namespace.Name - jig := NewServiceTestJig(cs, serviceName) - nodeIP := pickNodeIP(jig.Client) // for later + jig := framework.NewServiceTestJig(cs, serviceName) + nodeIP := framework.PickNodeIP(jig.Client) // for later By("creating service " + serviceName + " with type=NodePort in namespace " + ns) service := jig.CreateTCPServiceOrFail(ns, func(svc *v1.Service) { @@ -479,7 +441,7 @@ var _ = framework.KubeDescribe("Services", func() { jig.RunOrFail(ns, nil) By("hitting the pod through the service's NodePort") - jig.TestReachableHTTP(nodeIP, nodePort, kubeProxyLagTimeout) + jig.TestReachableHTTP(nodeIP, nodePort, framework.KubeProxyLagTimeout) By("verifying the node port is locked") hostExec := framework.LaunchHostExecPod(f.ClientSet, f.Namespace.Name, "hostexec") @@ -498,14 +460,13 @@ var _ = framework.KubeDescribe("Services", func() { loadBalancerSupportsUDP := !framework.ProviderIs("aws") - loadBalancerLagTimeout := loadBalancerLagTimeoutDefault + loadBalancerLagTimeout := framework.LoadBalancerLagTimeoutDefault if framework.ProviderIs("aws") { - loadBalancerLagTimeout = loadBalancerLagTimeoutAWS + loadBalancerLagTimeout = framework.LoadBalancerLagTimeoutAWS } - loadBalancerCreateTimeout := loadBalancerCreateTimeoutDefault - largeClusterMinNodesNumber := 100 - if nodes := framework.GetReadySchedulableNodesOrDie(cs); len(nodes.Items) > largeClusterMinNodesNumber { - loadBalancerCreateTimeout = loadBalancerCreateTimeoutLarge + loadBalancerCreateTimeout := framework.LoadBalancerCreateTimeoutDefault + if nodes := framework.GetReadySchedulableNodesOrDie(cs); len(nodes.Items) > framework.LargeClusterMinNodesNumber { + loadBalancerCreateTimeout = framework.LoadBalancerCreateTimeoutLarge } // This test is more monolithic than we'd like because LB turnup can be @@ -521,8 +482,8 @@ var _ = framework.KubeDescribe("Services", func() { ns2 := namespacePtr.Name // LB2 in ns2 on UDP framework.Logf("namespace for UDP test: %s", ns2) - jig := NewServiceTestJig(cs, serviceName) - nodeIP := pickNodeIP(jig.Client) // for later + jig := framework.NewServiceTestJig(cs, serviceName) + nodeIP := framework.PickNodeIP(jig.Client) // for later // Test TCP and UDP Services. Services with the same name in different // namespaces should get different node ports and load balancers. @@ -567,10 +528,10 @@ var _ = framework.KubeDescribe("Services", func() { framework.Logf("UDP node port: %d", udpNodePort) By("hitting the TCP service's NodePort") - jig.TestReachableHTTP(nodeIP, tcpNodePort, kubeProxyLagTimeout) + jig.TestReachableHTTP(nodeIP, tcpNodePort, framework.KubeProxyLagTimeout) By("hitting the UDP service's NodePort") - jig.TestReachableUDP(nodeIP, udpNodePort, kubeProxyLagTimeout) + jig.TestReachableUDP(nodeIP, udpNodePort, framework.KubeProxyLagTimeout) // Change the services to LoadBalancer. @@ -606,9 +567,9 @@ var _ = framework.KubeDescribe("Services", func() { s.Spec.Type = v1.ServiceTypeLoadBalancer }) } - serviceLBNames = append(serviceLBNames, getLoadBalancerName(tcpService)) + serviceLBNames = append(serviceLBNames, cloudprovider.GetLoadBalancerName(tcpService)) if loadBalancerSupportsUDP { - serviceLBNames = append(serviceLBNames, getLoadBalancerName(udpService)) + serviceLBNames = append(serviceLBNames, cloudprovider.GetLoadBalancerName(udpService)) } By("waiting for the TCP service to have a load balancer") @@ -618,10 +579,10 @@ var _ = framework.KubeDescribe("Services", func() { if int(tcpService.Spec.Ports[0].NodePort) != tcpNodePort { framework.Failf("TCP Spec.Ports[0].NodePort changed (%d -> %d) when not expected", tcpNodePort, tcpService.Spec.Ports[0].NodePort) } - if requestedIP != "" && getIngressPoint(&tcpService.Status.LoadBalancer.Ingress[0]) != requestedIP { - framework.Failf("unexpected TCP Status.LoadBalancer.Ingress (expected %s, got %s)", requestedIP, getIngressPoint(&tcpService.Status.LoadBalancer.Ingress[0])) + if requestedIP != "" && framework.GetIngressPoint(&tcpService.Status.LoadBalancer.Ingress[0]) != requestedIP { + framework.Failf("unexpected TCP Status.LoadBalancer.Ingress (expected %s, got %s)", requestedIP, framework.GetIngressPoint(&tcpService.Status.LoadBalancer.Ingress[0])) } - tcpIngressIP := getIngressPoint(&tcpService.Status.LoadBalancer.Ingress[0]) + tcpIngressIP := framework.GetIngressPoint(&tcpService.Status.LoadBalancer.Ingress[0]) framework.Logf("TCP load balancer: %s", tcpIngressIP) if framework.ProviderIs("gce", "gke") { @@ -649,20 +610,20 @@ var _ = framework.KubeDescribe("Services", func() { if int(udpService.Spec.Ports[0].NodePort) != udpNodePort { framework.Failf("UDP Spec.Ports[0].NodePort changed (%d -> %d) when not expected", udpNodePort, udpService.Spec.Ports[0].NodePort) } - udpIngressIP = getIngressPoint(&udpService.Status.LoadBalancer.Ingress[0]) + udpIngressIP = framework.GetIngressPoint(&udpService.Status.LoadBalancer.Ingress[0]) framework.Logf("UDP load balancer: %s", udpIngressIP) By("verifying that TCP and UDP use different load balancers") if tcpIngressIP == udpIngressIP { - framework.Failf("Load balancers are not different: %s", getIngressPoint(&tcpService.Status.LoadBalancer.Ingress[0])) + framework.Failf("Load balancers are not different: %s", framework.GetIngressPoint(&tcpService.Status.LoadBalancer.Ingress[0])) } } By("hitting the TCP service's NodePort") - jig.TestReachableHTTP(nodeIP, tcpNodePort, kubeProxyLagTimeout) + jig.TestReachableHTTP(nodeIP, tcpNodePort, framework.KubeProxyLagTimeout) By("hitting the UDP service's NodePort") - jig.TestReachableUDP(nodeIP, udpNodePort, kubeProxyLagTimeout) + jig.TestReachableUDP(nodeIP, udpNodePort, framework.KubeProxyLagTimeout) By("hitting the TCP service's LoadBalancer") jig.TestReachableHTTP(tcpIngressIP, svcPort, loadBalancerLagTimeout) @@ -682,8 +643,8 @@ var _ = framework.KubeDescribe("Services", func() { if tcpNodePort == tcpNodePortOld { framework.Failf("TCP Spec.Ports[0].NodePort (%d) did not change", tcpNodePort) } - if getIngressPoint(&tcpService.Status.LoadBalancer.Ingress[0]) != tcpIngressIP { - framework.Failf("TCP Status.LoadBalancer.Ingress changed (%s -> %s) when not expected", tcpIngressIP, getIngressPoint(&tcpService.Status.LoadBalancer.Ingress[0])) + if framework.GetIngressPoint(&tcpService.Status.LoadBalancer.Ingress[0]) != tcpIngressIP { + framework.Failf("TCP Status.LoadBalancer.Ingress changed (%s -> %s) when not expected", tcpIngressIP, framework.GetIngressPoint(&tcpService.Status.LoadBalancer.Ingress[0])) } framework.Logf("TCP node port: %d", tcpNodePort) @@ -699,22 +660,22 @@ var _ = framework.KubeDescribe("Services", func() { if udpNodePort == udpNodePortOld { framework.Failf("UDP Spec.Ports[0].NodePort (%d) did not change", udpNodePort) } - if loadBalancerSupportsUDP && getIngressPoint(&udpService.Status.LoadBalancer.Ingress[0]) != udpIngressIP { - framework.Failf("UDP Status.LoadBalancer.Ingress changed (%s -> %s) when not expected", udpIngressIP, getIngressPoint(&udpService.Status.LoadBalancer.Ingress[0])) + if loadBalancerSupportsUDP && framework.GetIngressPoint(&udpService.Status.LoadBalancer.Ingress[0]) != udpIngressIP { + framework.Failf("UDP Status.LoadBalancer.Ingress changed (%s -> %s) when not expected", udpIngressIP, framework.GetIngressPoint(&udpService.Status.LoadBalancer.Ingress[0])) } framework.Logf("UDP node port: %d", udpNodePort) By("hitting the TCP service's new NodePort") - jig.TestReachableHTTP(nodeIP, tcpNodePort, kubeProxyLagTimeout) + jig.TestReachableHTTP(nodeIP, tcpNodePort, framework.KubeProxyLagTimeout) By("hitting the UDP service's new NodePort") - jig.TestReachableUDP(nodeIP, udpNodePort, kubeProxyLagTimeout) + jig.TestReachableUDP(nodeIP, udpNodePort, framework.KubeProxyLagTimeout) By("checking the old TCP NodePort is closed") - jig.TestNotReachableHTTP(nodeIP, tcpNodePortOld, kubeProxyLagTimeout) + jig.TestNotReachableHTTP(nodeIP, tcpNodePortOld, framework.KubeProxyLagTimeout) By("checking the old UDP NodePort is closed") - jig.TestNotReachableUDP(nodeIP, udpNodePortOld, kubeProxyLagTimeout) + jig.TestNotReachableUDP(nodeIP, udpNodePortOld, framework.KubeProxyLagTimeout) By("hitting the TCP service's LoadBalancer") jig.TestReachableHTTP(tcpIngressIP, svcPort, loadBalancerLagTimeout) @@ -739,8 +700,8 @@ var _ = framework.KubeDescribe("Services", func() { if int(tcpService.Spec.Ports[0].NodePort) != tcpNodePort { framework.Failf("TCP Spec.Ports[0].NodePort (%d) changed", tcpService.Spec.Ports[0].NodePort) } - if getIngressPoint(&tcpService.Status.LoadBalancer.Ingress[0]) != tcpIngressIP { - framework.Failf("TCP Status.LoadBalancer.Ingress changed (%s -> %s) when not expected", tcpIngressIP, getIngressPoint(&tcpService.Status.LoadBalancer.Ingress[0])) + if framework.GetIngressPoint(&tcpService.Status.LoadBalancer.Ingress[0]) != tcpIngressIP { + framework.Failf("TCP Status.LoadBalancer.Ingress changed (%s -> %s) when not expected", tcpIngressIP, framework.GetIngressPoint(&tcpService.Status.LoadBalancer.Ingress[0])) } By("changing the UDP service's port") @@ -758,17 +719,17 @@ var _ = framework.KubeDescribe("Services", func() { if int(udpService.Spec.Ports[0].NodePort) != udpNodePort { framework.Failf("UDP Spec.Ports[0].NodePort (%d) changed", udpService.Spec.Ports[0].NodePort) } - if loadBalancerSupportsUDP && getIngressPoint(&udpService.Status.LoadBalancer.Ingress[0]) != udpIngressIP { - framework.Failf("UDP Status.LoadBalancer.Ingress changed (%s -> %s) when not expected", udpIngressIP, getIngressPoint(&udpService.Status.LoadBalancer.Ingress[0])) + if loadBalancerSupportsUDP && framework.GetIngressPoint(&udpService.Status.LoadBalancer.Ingress[0]) != udpIngressIP { + framework.Failf("UDP Status.LoadBalancer.Ingress changed (%s -> %s) when not expected", udpIngressIP, framework.GetIngressPoint(&udpService.Status.LoadBalancer.Ingress[0])) } framework.Logf("service port (TCP and UDP): %d", svcPort) By("hitting the TCP service's NodePort") - jig.TestReachableHTTP(nodeIP, tcpNodePort, kubeProxyLagTimeout) + jig.TestReachableHTTP(nodeIP, tcpNodePort, framework.KubeProxyLagTimeout) By("hitting the UDP service's NodePort") - jig.TestReachableUDP(nodeIP, udpNodePort, kubeProxyLagTimeout) + jig.TestReachableUDP(nodeIP, udpNodePort, framework.KubeProxyLagTimeout) By("hitting the TCP service's LoadBalancer") jig.TestReachableHTTP(tcpIngressIP, svcPort, loadBalancerCreateTimeout) // this may actually recreate the LB @@ -801,10 +762,10 @@ var _ = framework.KubeDescribe("Services", func() { } By("checking the TCP NodePort is closed") - jig.TestNotReachableHTTP(nodeIP, tcpNodePort, kubeProxyLagTimeout) + jig.TestNotReachableHTTP(nodeIP, tcpNodePort, framework.KubeProxyLagTimeout) By("checking the UDP NodePort is closed") - jig.TestNotReachableUDP(nodeIP, udpNodePort, kubeProxyLagTimeout) + jig.TestNotReachableUDP(nodeIP, udpNodePort, framework.KubeProxyLagTimeout) By("checking the TCP LoadBalancer is closed") jig.TestNotReachableHTTP(tcpIngressIP, svcPort, loadBalancerLagTimeout) @@ -819,7 +780,7 @@ var _ = framework.KubeDescribe("Services", func() { serviceName := "nodeports" ns := f.Namespace.Name - t := NewServerTest(cs, ns, serviceName) + t := framework.NewServerTest(cs, ns, serviceName) defer func() { defer GinkgoRecover() errs := t.Cleanup() @@ -869,7 +830,7 @@ var _ = framework.KubeDescribe("Services", func() { serviceName2 := baseName + "2" ns := f.Namespace.Name - t := NewServerTest(cs, ns, serviceName1) + t := framework.NewServerTest(cs, ns, serviceName1) defer func() { defer GinkgoRecover() errs := t.Cleanup() @@ -921,7 +882,7 @@ var _ = framework.KubeDescribe("Services", func() { serviceName := "nodeport-range-test" ns := f.Namespace.Name - t := NewServerTest(cs, ns, serviceName) + t := framework.NewServerTest(cs, ns, serviceName) defer func() { defer GinkgoRecover() errs := t.Cleanup() @@ -947,7 +908,7 @@ var _ = framework.KubeDescribe("Services", func() { if port.NodePort == 0 { framework.Failf("got unexpected Spec.Ports[0].nodePort for new service: %v", service) } - if !ServiceNodePortRange.Contains(int(port.NodePort)) { + if !framework.ServiceNodePortRange.Contains(int(port.NodePort)) { framework.Failf("got unexpected (out-of-range) port for new service: %v", service) } @@ -955,12 +916,12 @@ var _ = framework.KubeDescribe("Services", func() { rand.Seed(time.Now().UTC().UnixNano()) for { outOfRangeNodePort = 1 + rand.Intn(65535) - if !ServiceNodePortRange.Contains(outOfRangeNodePort) { + if !framework.ServiceNodePortRange.Contains(outOfRangeNodePort) { break } } By(fmt.Sprintf("changing service "+serviceName+" to out-of-range NodePort %d", outOfRangeNodePort)) - result, err := updateService(cs, ns, serviceName, func(s *v1.Service) { + result, err := framework.UpdateService(cs, ns, serviceName, func(s *v1.Service) { s.Spec.Ports[0].NodePort = int32(outOfRangeNodePort) }) if err == nil { @@ -989,7 +950,7 @@ var _ = framework.KubeDescribe("Services", func() { serviceName := "nodeport-reuse" ns := f.Namespace.Name - t := NewServerTest(cs, ns, serviceName) + t := framework.NewServerTest(cs, ns, serviceName) defer func() { defer GinkgoRecover() errs := t.Cleanup() @@ -1015,7 +976,7 @@ var _ = framework.KubeDescribe("Services", func() { if port.NodePort == 0 { framework.Failf("got unexpected Spec.Ports[0].nodePort for new service: %v", service) } - if !ServiceNodePortRange.Contains(int(port.NodePort)) { + if !framework.ServiceNodePortRange.Contains(int(port.NodePort)) { framework.Failf("got unexpected (out-of-range) port for new service: %v", service) } nodePort := port.NodePort @@ -1027,7 +988,7 @@ var _ = framework.KubeDescribe("Services", func() { hostExec := framework.LaunchHostExecPod(f.ClientSet, f.Namespace.Name, "hostexec") cmd := fmt.Sprintf(`! ss -ant46 'sport = :%d' | tail -n +2 | grep LISTEN`, nodePort) var stdout string - if pollErr := wait.PollImmediate(framework.Poll, kubeProxyLagTimeout, func() (bool, error) { + if pollErr := wait.PollImmediate(framework.Poll, framework.KubeProxyLagTimeout, func() (bool, error) { var err error stdout, err = framework.RunHostCmd(hostExec.Namespace, hostExec.Name, cmd) if err != nil { @@ -1036,7 +997,7 @@ var _ = framework.KubeDescribe("Services", func() { } return true, nil }); pollErr != nil { - framework.Failf("expected node port (%d) to not be in use in %v, stdout: %v", nodePort, kubeProxyLagTimeout, stdout) + framework.Failf("expected node port (%d) to not be in use in %v, stdout: %v", nodePort, framework.KubeProxyLagTimeout, stdout) } By(fmt.Sprintf("creating service "+serviceName+" with same NodePort %d", nodePort)) @@ -1051,7 +1012,7 @@ var _ = framework.KubeDescribe("Services", func() { serviceName := "tolerate-unready" ns := f.Namespace.Name - t := NewServerTest(cs, ns, serviceName) + t := framework.NewServerTest(cs, ns, serviceName) defer func() { defer GinkgoRecover() errs := t.Cleanup() @@ -1060,8 +1021,8 @@ var _ = framework.KubeDescribe("Services", func() { } }() - t.name = "slow-terminating-unready-pod" - t.image = "gcr.io/google_containers/netexec:1.7" + t.Name = "slow-terminating-unready-pod" + t.Image = "gcr.io/google_containers/netexec:1.7" port := 80 terminateSeconds := int64(600) @@ -1080,10 +1041,10 @@ var _ = framework.KubeDescribe("Services", func() { }}, }, } - rcSpec := rcByNameContainer(t.name, 1, t.image, t.Labels, v1.Container{ + rcSpec := framework.RcByNameContainer(t.Name, 1, t.Image, t.Labels, v1.Container{ Args: []string{fmt.Sprintf("--http-port=%d", port)}, - Name: t.name, - Image: t.image, + Name: t.Name, + Image: t.Image, Ports: []v1.ContainerPort{{ContainerPort: int32(port), Protocol: v1.ProtocolTCP}}, ReadinessProbe: &v1.Probe{ Handler: v1.Handler{ @@ -1103,75 +1064,75 @@ var _ = framework.KubeDescribe("Services", func() { rcSpec.Spec.Template.Spec.TerminationGracePeriodSeconds = &terminateSeconds By(fmt.Sprintf("creating RC %v with selectors %v", rcSpec.Name, rcSpec.Spec.Selector)) - _, err := t.createRC(rcSpec) + _, err := t.CreateRC(rcSpec) framework.ExpectNoError(err) By(fmt.Sprintf("creating Service %v with selectors %v", service.Name, service.Spec.Selector)) _, err = t.CreateService(service) framework.ExpectNoError(err) - By("Verifying pods for RC " + t.name) - framework.ExpectNoError(framework.VerifyPods(t.Client, t.Namespace, t.name, false, 1)) + By("Verifying pods for RC " + t.Name) + framework.ExpectNoError(framework.VerifyPods(t.Client, t.Namespace, t.Name, false, 1)) svcName := fmt.Sprintf("%v.%v", serviceName, f.Namespace.Name) By("Waiting for endpoints of Service with DNS name " + svcName) - execPodName := createExecPodOrFail(f.ClientSet, f.Namespace.Name, "execpod-") + execPodName := framework.CreateExecPodOrFail(f.ClientSet, f.Namespace.Name, "execpod-", nil) cmd := fmt.Sprintf("wget -qO- http://%s:%d/", svcName, port) var stdout string - if pollErr := wait.PollImmediate(framework.Poll, kubeProxyLagTimeout, func() (bool, error) { + if pollErr := wait.PollImmediate(framework.Poll, framework.KubeProxyLagTimeout, func() (bool, error) { var err error stdout, err = framework.RunHostCmd(f.Namespace.Name, execPodName, cmd) if err != nil { - framework.Logf("expected un-ready endpoint for Service %v, stdout: %v, err %v", t.name, stdout, err) + framework.Logf("expected un-ready endpoint for Service %v, stdout: %v, err %v", t.Name, stdout, err) return false, nil } return true, nil }); pollErr != nil { - framework.Failf("expected un-ready endpoint for Service %v within %v, stdout: %v", t.name, kubeProxyLagTimeout, stdout) + framework.Failf("expected un-ready endpoint for Service %v within %v, stdout: %v", t.Name, framework.KubeProxyLagTimeout, stdout) } By("Scaling down replication controler to zero") framework.ScaleRC(f.ClientSet, f.InternalClientset, t.Namespace, rcSpec.Name, 0, false) By("Update service to not tolerate unready services") - _, err = updateService(f.ClientSet, t.Namespace, t.ServiceName, func(s *v1.Service) { + _, err = framework.UpdateService(f.ClientSet, t.Namespace, t.ServiceName, func(s *v1.Service) { s.ObjectMeta.Annotations[endpoint.TolerateUnreadyEndpointsAnnotation] = "false" }) framework.ExpectNoError(err) By("Check if pod is unreachable") cmd = fmt.Sprintf("wget -qO- -T 2 http://%s:%d/; test \"$?\" -eq \"1\"", svcName, port) - if pollErr := wait.PollImmediate(framework.Poll, kubeProxyLagTimeout, func() (bool, error) { + if pollErr := wait.PollImmediate(framework.Poll, framework.KubeProxyLagTimeout, func() (bool, error) { var err error stdout, err = framework.RunHostCmd(f.Namespace.Name, execPodName, cmd) if err != nil { - framework.Logf("expected un-ready endpoint for Service %v, stdout: %v, err %v", t.name, stdout, err) + framework.Logf("expected un-ready endpoint for Service %v, stdout: %v, err %v", t.Name, stdout, err) return false, nil } return true, nil }); pollErr != nil { - framework.Failf("expected un-ready endpoint for Service %v within %v, stdout: %v", t.name, kubeProxyLagTimeout, stdout) + framework.Failf("expected un-ready endpoint for Service %v within %v, stdout: %v", t.Name, framework.KubeProxyLagTimeout, stdout) } By("Update service to tolerate unready services again") - _, err = updateService(f.ClientSet, t.Namespace, t.ServiceName, func(s *v1.Service) { + _, err = framework.UpdateService(f.ClientSet, t.Namespace, t.ServiceName, func(s *v1.Service) { s.ObjectMeta.Annotations[endpoint.TolerateUnreadyEndpointsAnnotation] = "true" }) framework.ExpectNoError(err) By("Check if terminating pod is available through service") cmd = fmt.Sprintf("wget -qO- http://%s:%d/", svcName, port) - if pollErr := wait.PollImmediate(framework.Poll, kubeProxyLagTimeout, func() (bool, error) { + if pollErr := wait.PollImmediate(framework.Poll, framework.KubeProxyLagTimeout, func() (bool, error) { var err error stdout, err = framework.RunHostCmd(f.Namespace.Name, execPodName, cmd) if err != nil { - framework.Logf("expected un-ready endpoint for Service %v, stdout: %v, err %v", t.name, stdout, err) + framework.Logf("expected un-ready endpoint for Service %v, stdout: %v, err %v", t.Name, stdout, err) return false, nil } return true, nil }); pollErr != nil { - framework.Failf("expected un-ready endpoint for Service %v within %v, stdout: %v", t.name, kubeProxyLagTimeout, stdout) + framework.Failf("expected un-ready endpoint for Service %v within %v, stdout: %v", t.Name, framework.KubeProxyLagTimeout, stdout) } By("Remove pods immediately") @@ -1196,20 +1157,20 @@ var _ = framework.KubeDescribe("Services", func() { // this feature currently supported only on GCE/GKE/AWS framework.SkipUnlessProviderIs("gce", "gke", "aws") - loadBalancerCreateTimeout := loadBalancerCreateTimeoutDefault - if nodes := framework.GetReadySchedulableNodesOrDie(cs); len(nodes.Items) > largeClusterMinNodesNumber { - loadBalancerCreateTimeout = loadBalancerCreateTimeoutLarge + loadBalancerCreateTimeout := framework.LoadBalancerCreateTimeoutDefault + if nodes := framework.GetReadySchedulableNodesOrDie(cs); len(nodes.Items) > framework.LargeClusterMinNodesNumber { + loadBalancerCreateTimeout = framework.LoadBalancerCreateTimeoutLarge } namespace := f.Namespace.Name serviceName := "lb-sourcerange" - jig := NewServiceTestJig(cs, serviceName) + jig := framework.NewServiceTestJig(cs, serviceName) By("Prepare allow source ips") // prepare the exec pods // acceptPod are allowed to access the loadbalancer - acceptPodName := createExecPodOrFail(cs, namespace, "execpod-accept") - dropPodName := createExecPodOrFail(cs, namespace, "execpod-drop") + acceptPodName := framework.CreateExecPodOrFail(cs, namespace, "execpod-accept", nil) + dropPodName := framework.CreateExecPodOrFail(cs, namespace, "execpod-drop", nil) accpetPod, err := cs.Core().Pods(namespace).Get(acceptPodName, metav1.GetOptions{}) Expect(err).NotTo(HaveOccurred()) @@ -1239,30 +1200,30 @@ var _ = framework.KubeDescribe("Services", func() { jig.SanityCheckService(svc, v1.ServiceTypeLoadBalancer) By("check reachability from different sources") - svcIP := getIngressPoint(&svc.Status.LoadBalancer.Ingress[0]) - checkReachabilityFromPod(true, namespace, acceptPodName, svcIP) - checkReachabilityFromPod(false, namespace, dropPodName, svcIP) + svcIP := framework.GetIngressPoint(&svc.Status.LoadBalancer.Ingress[0]) + framework.CheckReachabilityFromPod(true, namespace, acceptPodName, svcIP) + framework.CheckReachabilityFromPod(false, namespace, dropPodName, svcIP) By("Update service LoadBalancerSourceRange and check reachability") jig.UpdateServiceOrFail(svc.Namespace, svc.Name, func(svc *v1.Service) { // only allow access from dropPod svc.Spec.LoadBalancerSourceRanges = []string{dropPod.Status.PodIP + "/32"} }) - checkReachabilityFromPod(false, namespace, acceptPodName, svcIP) - checkReachabilityFromPod(true, namespace, dropPodName, svcIP) + framework.CheckReachabilityFromPod(false, namespace, acceptPodName, svcIP) + framework.CheckReachabilityFromPod(true, namespace, dropPodName, svcIP) By("Delete LoadBalancerSourceRange field and check reachability") jig.UpdateServiceOrFail(svc.Namespace, svc.Name, func(svc *v1.Service) { svc.Spec.LoadBalancerSourceRanges = nil }) - checkReachabilityFromPod(true, namespace, acceptPodName, svcIP) - checkReachabilityFromPod(true, namespace, dropPodName, svcIP) + framework.CheckReachabilityFromPod(true, namespace, acceptPodName, svcIP) + framework.CheckReachabilityFromPod(true, namespace, dropPodName, svcIP) }) }) var _ = framework.KubeDescribe("ESIPP [Slow]", func() { f := framework.NewDefaultFramework("esipp") - loadBalancerCreateTimeout := loadBalancerCreateTimeoutDefault + loadBalancerCreateTimeout := framework.LoadBalancerCreateTimeoutDefault var cs clientset.Interface serviceLBNames := []string{} @@ -1272,18 +1233,18 @@ var _ = framework.KubeDescribe("ESIPP [Slow]", func() { framework.SkipUnlessProviderIs("gce", "gke") cs = f.ClientSet - if nodes := framework.GetReadySchedulableNodesOrDie(cs); len(nodes.Items) > largeClusterMinNodesNumber { - loadBalancerCreateTimeout = loadBalancerCreateTimeoutLarge + if nodes := framework.GetReadySchedulableNodesOrDie(cs); len(nodes.Items) > framework.LargeClusterMinNodesNumber { + loadBalancerCreateTimeout = framework.LoadBalancerCreateTimeoutLarge } }) AfterEach(func() { if CurrentGinkgoTestDescription().Failed { - describeSvc(f.Namespace.Name) + framework.DescribeSvc(f.Namespace.Name) } for _, lb := range serviceLBNames { framework.Logf("cleaning gce resource for %s", lb) - cleanupServiceGCEResources(lb) + framework.CleanupServiceGCEResources(lb) } //reset serviceLBNames serviceLBNames = []string{} @@ -1292,10 +1253,10 @@ var _ = framework.KubeDescribe("ESIPP [Slow]", func() { It("should work for type=LoadBalancer", func() { namespace := f.Namespace.Name serviceName := "external-local" - jig := NewServiceTestJig(cs, serviceName) + jig := framework.NewServiceTestJig(cs, serviceName) - svc := jig.createOnlyLocalLoadBalancerService(namespace, serviceName, loadBalancerCreateTimeout, true, nil) - serviceLBNames = append(serviceLBNames, getLoadBalancerName(svc)) + svc := jig.CreateOnlyLocalLoadBalancerService(namespace, serviceName, loadBalancerCreateTimeout, true, nil) + serviceLBNames = append(serviceLBNames, cloudprovider.GetLoadBalancerName(svc)) healthCheckNodePort := int(service.GetServiceHealthCheckNodePort(svc)) if healthCheckNodePort == 0 { framework.Failf("Service HealthCheck NodePort was not allocated") @@ -1304,7 +1265,7 @@ var _ = framework.KubeDescribe("ESIPP [Slow]", func() { jig.ChangeServiceType(svc.Namespace, svc.Name, v1.ServiceTypeClusterIP, loadBalancerCreateTimeout) // Make sure we didn't leak the health check node port. - for name, ips := range jig.getEndpointNodes(svc) { + for name, ips := range jig.GetEndpointNodes(svc) { _, fail, status := jig.TestHTTPHealthCheckNodePort(ips[0], healthCheckNodePort, "/healthz", 5) if fail < 2 { framework.Failf("Health check node port %v not released on node %v: %v", healthCheckNodePort, name, status) @@ -1315,10 +1276,10 @@ var _ = framework.KubeDescribe("ESIPP [Slow]", func() { }() svcTCPPort := int(svc.Spec.Ports[0].Port) - ingressIP := getIngressPoint(&svc.Status.LoadBalancer.Ingress[0]) + ingressIP := framework.GetIngressPoint(&svc.Status.LoadBalancer.Ingress[0]) By("reading clientIP using the TCP service's service port via its external VIP") - content := jig.GetHTTPContent(ingressIP, svcTCPPort, kubeProxyLagTimeout, "/clientip") + content := jig.GetHTTPContent(ingressIP, svcTCPPort, framework.KubeProxyLagTimeout, "/clientip") clientIP := content.String() framework.Logf("ClientIP detected by target pod using VIP:SvcPort is %s", clientIP) @@ -1331,21 +1292,21 @@ var _ = framework.KubeDescribe("ESIPP [Slow]", func() { It("should work for type=NodePort", func() { namespace := f.Namespace.Name serviceName := "external-local" - jig := NewServiceTestJig(cs, serviceName) + jig := framework.NewServiceTestJig(cs, serviceName) - svc := jig.createOnlyLocalNodePortService(namespace, serviceName, true) + svc := jig.CreateOnlyLocalNodePortService(namespace, serviceName, true) defer func() { Expect(cs.Core().Services(svc.Namespace).Delete(svc.Name, nil)).NotTo(HaveOccurred()) }() tcpNodePort := int(svc.Spec.Ports[0].NodePort) - endpointsNodeMap := jig.getEndpointNodes(svc) + endpointsNodeMap := jig.GetEndpointNodes(svc) path := "/clientip" for nodeName, nodeIPs := range endpointsNodeMap { nodeIP := nodeIPs[0] By(fmt.Sprintf("reading clientIP using the TCP service's NodePort, on node %v: %v%v%v", nodeName, nodeIP, tcpNodePort, path)) - content := jig.GetHTTPContent(nodeIP, tcpNodePort, kubeProxyLagTimeout, path) + content := jig.GetHTTPContent(nodeIP, tcpNodePort, framework.KubeProxyLagTimeout, path) clientIP := content.String() framework.Logf("ClientIP detected by target pod using NodePort is %s", clientIP) if strings.HasPrefix(clientIP, "10.") { @@ -1357,11 +1318,11 @@ var _ = framework.KubeDescribe("ESIPP [Slow]", func() { It("should only target nodes with endpoints [Feature:ExternalTrafficLocalOnly]", func() { namespace := f.Namespace.Name serviceName := "external-local" - jig := NewServiceTestJig(cs, serviceName) - nodes := jig.getNodes(maxNodesForEndpointsTests) + jig := framework.NewServiceTestJig(cs, serviceName) + nodes := jig.GetNodes(framework.MaxNodesForEndpointsTests) - svc := jig.createOnlyLocalLoadBalancerService(namespace, serviceName, loadBalancerCreateTimeout, false, nil) - serviceLBNames = append(serviceLBNames, getLoadBalancerName(svc)) + svc := jig.CreateOnlyLocalLoadBalancerService(namespace, serviceName, loadBalancerCreateTimeout, false, nil) + serviceLBNames = append(serviceLBNames, cloudprovider.GetLoadBalancerName(svc)) defer func() { jig.ChangeServiceType(svc.Namespace, svc.Name, v1.ServiceTypeClusterIP, loadBalancerCreateTimeout) Expect(cs.Core().Services(svc.Namespace).Delete(svc.Name, nil)).NotTo(HaveOccurred()) @@ -1372,12 +1333,12 @@ var _ = framework.KubeDescribe("ESIPP [Slow]", func() { framework.Failf("Service HealthCheck NodePort was not allocated") } - ips := collectAddresses(nodes, v1.NodeExternalIP) + ips := framework.CollectAddresses(nodes, v1.NodeExternalIP) if len(ips) == 0 { - ips = collectAddresses(nodes, v1.NodeLegacyHostIP) + ips = framework.CollectAddresses(nodes, v1.NodeLegacyHostIP) } - ingressIP := getIngressPoint(&svc.Status.LoadBalancer.Ingress[0]) + ingressIP := framework.GetIngressPoint(&svc.Status.LoadBalancer.Ingress[0]) svcTCPPort := int(svc.Spec.Ports[0].Port) threshold := 2 @@ -1394,7 +1355,7 @@ var _ = framework.KubeDescribe("ESIPP [Slow]", func() { }) By(fmt.Sprintf("waiting for service endpoint on node %v", endpointNodeName)) - jig.waitForEndpointOnNode(namespace, serviceName, endpointNodeName) + jig.WaitForEndpointOnNode(namespace, serviceName, endpointNodeName) // HealthCheck should pass only on the node where num(endpoints) > 0 // All other nodes should fail the healthcheck on the service healthCheckNodePort @@ -1408,7 +1369,7 @@ var _ = framework.KubeDescribe("ESIPP [Slow]", func() { framework.Failf("Expected %s failures on %v/%v, got %d, err %v", threshold, endpointNodeName, path, fail, err) } // Make sure the loadbalancer picked up the helth check change - jig.TestReachableHTTP(ingressIP, svcTCPPort, kubeProxyLagTimeout) + jig.TestReachableHTTP(ingressIP, svcTCPPort, framework.KubeProxyLagTimeout) } framework.ExpectNoError(framework.DeleteRCAndPods(f.ClientSet, f.InternalClientset, namespace, serviceName)) } @@ -1417,23 +1378,25 @@ var _ = framework.KubeDescribe("ESIPP [Slow]", func() { It("should work from pods [Feature:ExternalTrafficLocalOnly]", func() { namespace := f.Namespace.Name serviceName := "external-local" - jig := NewServiceTestJig(cs, serviceName) - nodes := jig.getNodes(maxNodesForEndpointsTests) + jig := framework.NewServiceTestJig(cs, serviceName) + nodes := jig.GetNodes(framework.MaxNodesForEndpointsTests) - svc := jig.createOnlyLocalLoadBalancerService(namespace, serviceName, loadBalancerCreateTimeout, true, nil) - serviceLBNames = append(serviceLBNames, getLoadBalancerName(svc)) + svc := jig.CreateOnlyLocalLoadBalancerService(namespace, serviceName, loadBalancerCreateTimeout, true, nil) + serviceLBNames = append(serviceLBNames, cloudprovider.GetLoadBalancerName(svc)) defer func() { jig.ChangeServiceType(svc.Namespace, svc.Name, v1.ServiceTypeClusterIP, loadBalancerCreateTimeout) Expect(cs.Core().Services(svc.Namespace).Delete(svc.Name, nil)).NotTo(HaveOccurred()) }() - ingressIP := getIngressPoint(&svc.Status.LoadBalancer.Ingress[0]) + ingressIP := framework.GetIngressPoint(&svc.Status.LoadBalancer.Ingress[0]) path := fmt.Sprintf("%s:%d/clientip", ingressIP, int(svc.Spec.Ports[0].Port)) nodeName := nodes.Items[0].Name podName := "execpod-sourceip" By(fmt.Sprintf("Creating %v on node %v", podName, nodeName)) - execPodName := createExecPodOnNode(f.ClientSet, namespace, nodeName, podName) + execPodName := framework.CreateExecPodOrFail(f.ClientSet, namespace, podName, func(pod *v1.Pod) { + pod.Spec.NodeName = nodeName + }) defer func() { err := cs.Core().Pods(namespace).Delete(execPodName, nil) Expect(err).NotTo(HaveOccurred()) @@ -1441,12 +1404,12 @@ var _ = framework.KubeDescribe("ESIPP [Slow]", func() { execPod, err := f.ClientSet.Core().Pods(namespace).Get(execPodName, metav1.GetOptions{}) framework.ExpectNoError(err) - framework.Logf("Waiting up to %v wget %v", kubeProxyLagTimeout, path) + framework.Logf("Waiting up to %v wget %v", framework.KubeProxyLagTimeout, path) cmd := fmt.Sprintf(`wget -T 30 -qO- %v`, path) var srcIP string By(fmt.Sprintf("Hitting external lb %v from pod %v on node %v", ingressIP, podName, nodeName)) - if pollErr := wait.PollImmediate(framework.Poll, loadBalancerCreateTimeoutDefault, func() (bool, error) { + if pollErr := wait.PollImmediate(framework.Poll, framework.LoadBalancerCreateTimeoutDefault, func() (bool, error) { stdout, err := framework.RunHostCmd(execPod.Namespace, execPod.Name, cmd) if err != nil { framework.Logf("got err: %v, retry until timeout", err) @@ -1462,15 +1425,15 @@ var _ = framework.KubeDescribe("ESIPP [Slow]", func() { It("should handle updates to source ip annotation [Feature:ExternalTrafficLocalOnly]", func() { namespace := f.Namespace.Name serviceName := "external-local" - jig := NewServiceTestJig(cs, serviceName) + jig := framework.NewServiceTestJig(cs, serviceName) - nodes := jig.getNodes(maxNodesForEndpointsTests) + nodes := jig.GetNodes(framework.MaxNodesForEndpointsTests) if len(nodes.Items) < 2 { framework.Failf("Need at least 2 nodes to verify source ip from a node without endpoint") } - svc := jig.createOnlyLocalLoadBalancerService(namespace, serviceName, loadBalancerCreateTimeout, true, nil) - serviceLBNames = append(serviceLBNames, getLoadBalancerName(svc)) + svc := jig.CreateOnlyLocalLoadBalancerService(namespace, serviceName, loadBalancerCreateTimeout, true, nil) + serviceLBNames = append(serviceLBNames, cloudprovider.GetLoadBalancerName(svc)) defer func() { jig.ChangeServiceType(svc.Namespace, svc.Name, v1.ServiceTypeClusterIP, loadBalancerCreateTimeout) Expect(cs.Core().Services(svc.Namespace).Delete(svc.Name, nil)).NotTo(HaveOccurred()) @@ -1488,24 +1451,24 @@ var _ = framework.KubeDescribe("ESIPP [Slow]", func() { framework.Failf("Service HealthCheck NodePort annotation still present") } - endpointNodeMap := jig.getEndpointNodes(svc) + endpointNodeMap := jig.GetEndpointNodes(svc) noEndpointNodeMap := map[string][]string{} for _, n := range nodes.Items { if _, ok := endpointNodeMap[n.Name]; ok { continue } - noEndpointNodeMap[n.Name] = getNodeAddresses(&n, v1.NodeExternalIP) + noEndpointNodeMap[n.Name] = framework.GetNodeAddresses(&n, v1.NodeExternalIP) } svcTCPPort := int(svc.Spec.Ports[0].Port) svcNodePort := int(svc.Spec.Ports[0].NodePort) - ingressIP := getIngressPoint(&svc.Status.LoadBalancer.Ingress[0]) + ingressIP := framework.GetIngressPoint(&svc.Status.LoadBalancer.Ingress[0]) path := "/clientip" By(fmt.Sprintf("endpoints present on nodes %v, absent on nodes %v", endpointNodeMap, noEndpointNodeMap)) for nodeName, nodeIPs := range noEndpointNodeMap { By(fmt.Sprintf("Checking %v (%v:%v%v) proxies to endpoints on another node", nodeName, nodeIPs[0], svcNodePort, path)) - jig.GetHTTPContent(nodeIPs[0], svcNodePort, kubeProxyLagTimeout, path) + jig.GetHTTPContent(nodeIPs[0], svcNodePort, framework.KubeProxyLagTimeout, path) } for nodeName, nodeIPs := range endpointNodeMap { @@ -1513,8 +1476,8 @@ var _ = framework.KubeDescribe("ESIPP [Slow]", func() { var body bytes.Buffer var result bool var err error - if pollErr := wait.PollImmediate(framework.Poll, timeout, func() (bool, error) { - result, err = testReachableHTTPWithContent(nodeIPs[0], healthCheckNodePort, "/healthz", "", &body) + if pollErr := wait.PollImmediate(framework.Poll, framework.ServiceTestTimeout, func() (bool, error) { + result, err = framework.TestReachableHTTPWithContent(nodeIPs[0], healthCheckNodePort, "/healthz", "", &body) return !result, nil }); pollErr != nil { framework.Failf("Kube-proxy still exposing health check on node %v:%v, after ESIPP was turned off. Last err %v, last body %v", @@ -1525,8 +1488,8 @@ var _ = framework.KubeDescribe("ESIPP [Slow]", func() { // Poll till kube-proxy re-adds the MASQUERADE rule on the node. By(fmt.Sprintf("checking source ip is NOT preserved through loadbalancer %v", ingressIP)) var clientIP string - pollErr := wait.PollImmediate(framework.Poll, kubeProxyLagTimeout, func() (bool, error) { - content := jig.GetHTTPContent(ingressIP, svcTCPPort, kubeProxyLagTimeout, "/clientip") + pollErr := wait.PollImmediate(framework.Poll, framework.KubeProxyLagTimeout, func() (bool, error) { + content := jig.GetHTTPContent(ingressIP, svcTCPPort, framework.KubeProxyLagTimeout, "/clientip") clientIP = content.String() if strings.HasPrefix(clientIP, "10.") { return true, nil @@ -1551,8 +1514,8 @@ var _ = framework.KubeDescribe("ESIPP [Slow]", func() { svc.ObjectMeta.Annotations[service.BetaAnnotationHealthCheckNodePort] = fmt.Sprintf("%d", healthCheckNodePort) }) - pollErr = wait.PollImmediate(framework.Poll, kubeProxyLagTimeout, func() (bool, error) { - content := jig.GetHTTPContent(ingressIP, svcTCPPort, kubeProxyLagTimeout, path) + pollErr = wait.PollImmediate(framework.Poll, framework.KubeProxyLagTimeout, func() (bool, error) { + content := jig.GetHTTPContent(ingressIP, svcTCPPort, framework.KubeProxyLagTimeout, path) clientIP = content.String() By(fmt.Sprintf("Endpoint %v:%v%v returned client ip %v", ingressIP, svcTCPPort, path, clientIP)) if !strings.HasPrefix(clientIP, "10.") { @@ -1566,1410 +1529,11 @@ var _ = framework.KubeDescribe("ESIPP [Slow]", func() { }) }) -// updateService fetches a service, calls the update function on it, -// and then attempts to send the updated service. It retries up to 2 -// times in the face of timeouts and conflicts. -func updateService(c clientset.Interface, namespace, serviceName string, update func(*v1.Service)) (*v1.Service, error) { - var service *v1.Service - var err error - for i := 0; i < 3; i++ { - service, err = c.Core().Services(namespace).Get(serviceName, metav1.GetOptions{}) - if err != nil { - return service, err - } - - update(service) - - service, err = c.Core().Services(namespace).Update(service) - - if !errors.IsConflict(err) && !errors.IsServerTimeout(err) { - return service, err - } - } - return service, err -} - -func getContainerPortsByPodUID(endpoints *v1.Endpoints) PortsByPodUID { - m := PortsByPodUID{} - for _, ss := range endpoints.Subsets { - for _, port := range ss.Ports { - for _, addr := range ss.Addresses { - containerPort := port.Port - hostPort := port.Port - - // use endpoint annotations to recover the container port in a Mesos setup - // compare contrib/mesos/pkg/service/endpoints_controller.syncService - key := fmt.Sprintf("k8s.mesosphere.io/containerPort_%s_%s_%d", port.Protocol, addr.IP, hostPort) - mesosContainerPortString := endpoints.Annotations[key] - if mesosContainerPortString != "" { - mesosContainerPort, err := strconv.Atoi(mesosContainerPortString) - if err != nil { - continue - } - containerPort = int32(mesosContainerPort) - framework.Logf("Mapped mesos host port %d to container port %d via annotation %s=%s", hostPort, containerPort, key, mesosContainerPortString) - } - - // framework.Logf("Found pod %v, host port %d and container port %d", addr.TargetRef.UID, hostPort, containerPort) - if _, ok := m[addr.TargetRef.UID]; !ok { - m[addr.TargetRef.UID] = make([]int, 0) - } - m[addr.TargetRef.UID] = append(m[addr.TargetRef.UID], int(containerPort)) - } - } - } - return m -} - -type PortsByPodName map[string][]int -type PortsByPodUID map[types.UID][]int - -func translatePodNameToUIDOrFail(c clientset.Interface, ns string, expectedEndpoints PortsByPodName) PortsByPodUID { - portsByUID := make(PortsByPodUID) - - for name, portList := range expectedEndpoints { - pod, err := c.Core().Pods(ns).Get(name, metav1.GetOptions{}) - if err != nil { - framework.Failf("failed to get pod %s, that's pretty weird. validation failed: %s", name, err) - } - portsByUID[pod.ObjectMeta.UID] = portList - } - // framework.Logf("successfully translated pod names to UIDs: %v -> %v on namespace %s", expectedEndpoints, portsByUID, ns) - return portsByUID -} - -func validatePortsOrFail(endpoints PortsByPodUID, expectedEndpoints PortsByPodUID) { - if len(endpoints) != len(expectedEndpoints) { - // should not happen because we check this condition before - framework.Failf("invalid number of endpoints got %v, expected %v", endpoints, expectedEndpoints) - } - for podUID := range expectedEndpoints { - if _, ok := endpoints[podUID]; !ok { - framework.Failf("endpoint %v not found", podUID) - } - if len(endpoints[podUID]) != len(expectedEndpoints[podUID]) { - framework.Failf("invalid list of ports for uid %v. Got %v, expected %v", podUID, endpoints[podUID], expectedEndpoints[podUID]) - } - sort.Ints(endpoints[podUID]) - sort.Ints(expectedEndpoints[podUID]) - for index := range endpoints[podUID] { - if endpoints[podUID][index] != expectedEndpoints[podUID][index] { - framework.Failf("invalid list of ports for uid %v. Got %v, expected %v", podUID, endpoints[podUID], expectedEndpoints[podUID]) - } - } - } -} - -func validateEndpointsOrFail(c clientset.Interface, namespace, serviceName string, expectedEndpoints PortsByPodName) { - By(fmt.Sprintf("waiting up to %v for service %s in namespace %s to expose endpoints %v", framework.ServiceStartTimeout, serviceName, namespace, expectedEndpoints)) - i := 1 - for start := time.Now(); time.Since(start) < framework.ServiceStartTimeout; time.Sleep(1 * time.Second) { - endpoints, err := c.Core().Endpoints(namespace).Get(serviceName, metav1.GetOptions{}) - if err != nil { - framework.Logf("Get endpoints failed (%v elapsed, ignoring for 5s): %v", time.Since(start), err) - continue - } - // framework.Logf("Found endpoints %v", endpoints) - - portsByPodUID := getContainerPortsByPodUID(endpoints) - // framework.Logf("Found port by pod UID %v", portsByPodUID) - - expectedPortsByPodUID := translatePodNameToUIDOrFail(c, namespace, expectedEndpoints) - if len(portsByPodUID) == len(expectedEndpoints) { - validatePortsOrFail(portsByPodUID, expectedPortsByPodUID) - framework.Logf("successfully validated that service %s in namespace %s exposes endpoints %v (%v elapsed)", - serviceName, namespace, expectedEndpoints, time.Since(start)) - return - } - - if i%5 == 0 { - framework.Logf("Unexpected endpoints: found %v, expected %v (%v elapsed, will retry)", portsByPodUID, expectedEndpoints, time.Since(start)) - } - i++ - } - - if pods, err := c.Core().Pods(v1.NamespaceAll).List(v1.ListOptions{}); err == nil { - for _, pod := range pods.Items { - framework.Logf("Pod %s\t%s\t%s\t%s", pod.Namespace, pod.Name, pod.Spec.NodeName, pod.DeletionTimestamp) - } - } else { - framework.Logf("Can't list pod debug info: %v", err) - } - framework.Failf("Timed out waiting for service %s in namespace %s to expose endpoints %v (%v elapsed)", serviceName, namespace, expectedEndpoints, framework.ServiceStartTimeout) -} - -// newExecPodSpec returns the pod spec of exec pod -func newExecPodSpec(ns, generateName string) *v1.Pod { - immediate := int64(0) - pod := &v1.Pod{ - ObjectMeta: v1.ObjectMeta{ - GenerateName: generateName, - Namespace: ns, - }, - Spec: v1.PodSpec{ - TerminationGracePeriodSeconds: &immediate, - Containers: []v1.Container{ - { - Name: "exec", - Image: "gcr.io/google_containers/busybox:1.24", - Command: []string{"sh", "-c", "while true; do sleep 5; done"}, - }, - }, - }, - } - return pod -} - -// createExecPodOrFail creates a simple busybox pod in a sleep loop used as a -// vessel for kubectl exec commands. -// Returns the name of the created pod. -func createExecPodOrFail(client clientset.Interface, ns, generateName string) string { - framework.Logf("Creating new exec pod") - execPod := newExecPodSpec(ns, generateName) - created, err := client.Core().Pods(ns).Create(execPod) - Expect(err).NotTo(HaveOccurred()) - err = wait.PollImmediate(framework.Poll, 5*time.Minute, func() (bool, error) { - retrievedPod, err := client.Core().Pods(execPod.Namespace).Get(created.Name, metav1.GetOptions{}) - if err != nil { - return false, nil - } - return retrievedPod.Status.Phase == v1.PodRunning, nil - }) - Expect(err).NotTo(HaveOccurred()) - return created.Name -} - -// createExecPodOnNode launches a exec pod in the given namespace and node -// waits until it's Running, created pod name would be returned -func createExecPodOnNode(client clientset.Interface, ns, nodeName, generateName string) string { - framework.Logf("Creating exec pod %q in namespace %q", generateName, ns) - execPod := newExecPodSpec(ns, generateName) - execPod.Spec.NodeName = nodeName - created, err := client.Core().Pods(ns).Create(execPod) - Expect(err).NotTo(HaveOccurred()) - err = wait.PollImmediate(framework.Poll, 5*time.Minute, func() (bool, error) { - retrievedPod, err := client.Core().Pods(execPod.Namespace).Get(created.Name, metav1.GetOptions{}) - if err != nil { - return false, nil - } - return retrievedPod.Status.Phase == v1.PodRunning, nil - }) - Expect(err).NotTo(HaveOccurred()) - return created.Name -} - -func createPodOrFail(c clientset.Interface, ns, name string, labels map[string]string, containerPorts []v1.ContainerPort) { - By(fmt.Sprintf("creating pod %s in namespace %s", name, ns)) - pod := &v1.Pod{ - ObjectMeta: v1.ObjectMeta{ - Name: name, - Labels: labels, - }, - Spec: v1.PodSpec{ - Containers: []v1.Container{ - { - Name: "pause", - Image: framework.GetPauseImageName(c), - Ports: containerPorts, - // Add a dummy environment variable to work around a docker issue. - // https://github.com/docker/docker/issues/14203 - Env: []v1.EnvVar{{Name: "FOO", Value: " "}}, - }, - }, - }, - } - _, err := c.Core().Pods(ns).Create(pod) - Expect(err).NotTo(HaveOccurred()) -} - -func deletePodOrFail(c clientset.Interface, ns, name string) { - By(fmt.Sprintf("deleting pod %s in namespace %s", name, ns)) - err := c.Core().Pods(ns).Delete(name, nil) - Expect(err).NotTo(HaveOccurred()) -} - -func getNodeAddresses(node *v1.Node, addressType v1.NodeAddressType) (ips []string) { - for j := range node.Status.Addresses { - nodeAddress := &node.Status.Addresses[j] - if nodeAddress.Type == addressType { - ips = append(ips, nodeAddress.Address) - } - } - return -} - -func collectAddresses(nodes *v1.NodeList, addressType v1.NodeAddressType) []string { - ips := []string{} - for i := range nodes.Items { - ips = append(ips, getNodeAddresses(&nodes.Items[i], addressType)...) - } - return ips -} - -func getNodePublicIps(c clientset.Interface) ([]string, error) { - nodes := framework.GetReadySchedulableNodesOrDie(c) - - ips := collectAddresses(nodes, v1.NodeExternalIP) - if len(ips) == 0 { - ips = collectAddresses(nodes, v1.NodeLegacyHostIP) - } - return ips, nil -} - -func pickNodeIP(c clientset.Interface) string { - publicIps, err := getNodePublicIps(c) - Expect(err).NotTo(HaveOccurred()) - if len(publicIps) == 0 { - framework.Failf("got unexpected number (%d) of public IPs", len(publicIps)) - } - ip := publicIps[0] - return ip -} - -func testReachableHTTP(ip string, port int, request string, expect string) (bool, error) { - return testReachableHTTPWithContent(ip, port, request, expect, nil) -} - -func testReachableHTTPWithContent(ip string, port int, request string, expect string, content *bytes.Buffer) (bool, error) { - return testReachableHTTPWithContentTimeout(ip, port, request, expect, content, 5*time.Second) -} - -func testReachableHTTPWithContentTimeout(ip string, port int, request string, expect string, content *bytes.Buffer, timeout time.Duration) (bool, error) { - url := fmt.Sprintf("http://%s:%d%s", ip, port, request) - if ip == "" { - framework.Failf("Got empty IP for reachability check (%s)", url) - return false, nil - } - if port == 0 { - framework.Failf("Got port==0 for reachability check (%s)", url) - return false, nil - } - - framework.Logf("Testing HTTP reachability of %v", url) - - resp, err := httpGetNoConnectionPoolTimeout(url, timeout) - if err != nil { - framework.Logf("Got error testing for reachability of %s: %v", url, err) - return false, nil - } - defer resp.Body.Close() - body, err := ioutil.ReadAll(resp.Body) - if err != nil { - framework.Logf("Got error reading response from %s: %v", url, err) - return false, nil - } - if resp.StatusCode != 200 { - return false, fmt.Errorf("received non-success return status %q trying to access %s; got body: %s", - resp.Status, url, string(body)) - } - if !strings.Contains(string(body), expect) { - return false, fmt.Errorf("received response body without expected substring %q: %s", expect, string(body)) - } - if content != nil { - content.Write(body) - } - return true, nil -} - -func testHTTPHealthCheckNodePort(ip string, port int, request string) (bool, error) { - url := fmt.Sprintf("http://%s:%d%s", ip, port, request) - if ip == "" || port == 0 { - framework.Failf("Got empty IP for reachability check (%s)", url) - return false, fmt.Errorf("Invalid input ip or port") - } - framework.Logf("Testing HTTP health check on %v", url) - resp, err := httpGetNoConnectionPool(url) - if err != nil { - framework.Logf("Got error testing for reachability of %s: %v", url, err) - return false, err - } - defer resp.Body.Close() - if err != nil { - framework.Logf("Got error reading response from %s: %v", url, err) - return false, err - } - // HealthCheck responder returns 503 for no local endpoints - if resp.StatusCode == 503 { - return false, nil - } - // HealthCheck responder returns 200 for non-zero local endpoints - if resp.StatusCode == 200 { - return true, nil - } - return false, fmt.Errorf("Unexpected HTTP response code %s from health check responder at %s", resp.Status, url) -} - -func testNotReachableHTTP(ip string, port int) (bool, error) { - return testNotReachableHTTPTimeout(ip, port, 5*time.Second) -} - -func testNotReachableHTTPTimeout(ip string, port int, timeout time.Duration) (bool, error) { - url := fmt.Sprintf("http://%s:%d", ip, port) - if ip == "" { - framework.Failf("Got empty IP for non-reachability check (%s)", url) - return false, nil - } - if port == 0 { - framework.Failf("Got port==0 for non-reachability check (%s)", url) - return false, nil - } - - framework.Logf("Testing HTTP non-reachability of %v", url) - - resp, err := httpGetNoConnectionPoolTimeout(url, timeout) - if err != nil { - framework.Logf("Confirmed that %s is not reachable", url) - return true, nil - } - resp.Body.Close() - return false, nil -} - -func testReachableUDP(ip string, port int, request string, expect string) (bool, error) { - uri := fmt.Sprintf("udp://%s:%d", ip, port) - if ip == "" { - framework.Failf("Got empty IP for reachability check (%s)", uri) - return false, nil - } - if port == 0 { - framework.Failf("Got port==0 for reachability check (%s)", uri) - return false, nil - } - - framework.Logf("Testing UDP reachability of %v", uri) - - con, err := net.Dial("udp", ip+":"+strconv.Itoa(port)) - if err != nil { - return false, fmt.Errorf("Failed to dial %s:%d: %v", ip, port, err) - } - - _, err = con.Write([]byte(fmt.Sprintf("%s\n", request))) - if err != nil { - return false, fmt.Errorf("Failed to send request: %v", err) - } - - var buf []byte = make([]byte, len(expect)+1) - - err = con.SetDeadline(time.Now().Add(3 * time.Second)) - if err != nil { - return false, fmt.Errorf("Failed to set deadline: %v", err) - } - - _, err = con.Read(buf) - if err != nil { - return false, nil - } - - if !strings.Contains(string(buf), expect) { - return false, fmt.Errorf("Failed to retrieve %q, got %q", expect, string(buf)) - } - - framework.Logf("Successfully reached %v", uri) - return true, nil -} - -func testNotReachableUDP(ip string, port int, request string) (bool, error) { - uri := fmt.Sprintf("udp://%s:%d", ip, port) - if ip == "" { - framework.Failf("Got empty IP for reachability check (%s)", uri) - return false, nil - } - if port == 0 { - framework.Failf("Got port==0 for reachability check (%s)", uri) - return false, nil - } - - framework.Logf("Testing UDP non-reachability of %v", uri) - - con, err := net.Dial("udp", ip+":"+strconv.Itoa(port)) - if err != nil { - framework.Logf("Confirmed that %s is not reachable", uri) - return true, nil - } - - _, err = con.Write([]byte(fmt.Sprintf("%s\n", request))) - if err != nil { - framework.Logf("Confirmed that %s is not reachable", uri) - return true, nil - } - - var buf []byte = make([]byte, 1) - - err = con.SetDeadline(time.Now().Add(3 * time.Second)) - if err != nil { - return false, fmt.Errorf("Failed to set deadline: %v", err) - } - - _, err = con.Read(buf) - if err != nil { - framework.Logf("Confirmed that %s is not reachable", uri) - return true, nil - } - - return false, nil -} - -func testHitNodesFromOutside(externalIP string, httpPort int32, timeout time.Duration, expectedHosts sets.String) error { - return testHitNodesFromOutsideWithCount(externalIP, httpPort, timeout, expectedHosts, 1) -} - -func testHitNodesFromOutsideWithCount(externalIP string, httpPort int32, timeout time.Duration, expectedHosts sets.String, countToSucceed int) error { - framework.Logf("Waiting up to %v for satisfying expectedHosts for %v times", timeout, countToSucceed) - hittedHosts := sets.NewString() - count := 0 - condition := func() (bool, error) { - var respBody bytes.Buffer - reached, err := testReachableHTTPWithContentTimeout(externalIP, int(httpPort), "/hostname", "", &respBody, 1*time.Second) - if err != nil || !reached { - return false, nil - } - hittedHost := strings.TrimSpace(respBody.String()) - if !expectedHosts.Has(hittedHost) { - framework.Logf("Error hitting unexpected host: %v, reset counter: %v", hittedHost, count) - count = 0 - return false, nil - } - if !hittedHosts.Has(hittedHost) { - hittedHosts.Insert(hittedHost) - framework.Logf("Missing %+v, got %+v", expectedHosts.Difference(hittedHosts), hittedHosts) - } - if hittedHosts.Equal(expectedHosts) { - count++ - if count >= countToSucceed { - return true, nil - } - } - return false, nil - } - - if err := wait.Poll(time.Second, timeout, condition); err != nil { - return fmt.Errorf("error waiting for expectedHosts: %v, hittedHosts: %v, count: %v, expected count: %v", expectedHosts, hittedHosts, count, countToSucceed) - } - return nil -} - -// Creates a replication controller that serves its hostname and a service on top of it. -func startServeHostnameService(c clientset.Interface, internalClient internalclientset.Interface, ns, name string, port, replicas int) ([]string, string, error) { - podNames := make([]string, replicas) - - By("creating service " + name + " in namespace " + ns) - _, err := c.Core().Services(ns).Create(&v1.Service{ - ObjectMeta: v1.ObjectMeta{ - Name: name, - }, - Spec: v1.ServiceSpec{ - Ports: []v1.ServicePort{{ - Port: int32(port), - TargetPort: intstr.FromInt(9376), - Protocol: "TCP", - }}, - Selector: map[string]string{ - "name": name, - }, - }, - }) - if err != nil { - return podNames, "", err - } - - var createdPods []*v1.Pod - maxContainerFailures := 0 - config := testutils.RCConfig{ - Client: c, - InternalClient: internalClient, - Image: "gcr.io/google_containers/serve_hostname:v1.4", - Name: name, - Namespace: ns, - PollInterval: 3 * time.Second, - Timeout: framework.PodReadyBeforeTimeout, - Replicas: replicas, - CreatedPods: &createdPods, - MaxContainerFailures: &maxContainerFailures, - } - err = framework.RunRC(config) - if err != nil { - return podNames, "", err - } - - if len(createdPods) != replicas { - return podNames, "", fmt.Errorf("Incorrect number of running pods: %v", len(createdPods)) - } - - for i := range createdPods { - podNames[i] = createdPods[i].ObjectMeta.Name - } - sort.StringSlice(podNames).Sort() - - service, err := c.Core().Services(ns).Get(name, metav1.GetOptions{}) - if err != nil { - return podNames, "", err - } - if service.Spec.ClusterIP == "" { - return podNames, "", fmt.Errorf("Service IP is blank for %v", name) - } - serviceIP := service.Spec.ClusterIP - return podNames, serviceIP, nil -} - -func stopServeHostnameService(clientset clientset.Interface, internalClientset internalclientset.Interface, ns, name string) error { - if err := framework.DeleteRCAndPods(clientset, internalClientset, ns, name); err != nil { - return err - } - if err := clientset.Core().Services(ns).Delete(name, nil); err != nil { - return err - } - return nil -} - -// verifyServeHostnameServiceUp wgets the given serviceIP:servicePort from the -// given host and from within a pod. The host is expected to be an SSH-able node -// in the cluster. Each pod in the service is expected to echo its name. These -// names are compared with the given expectedPods list after a sort | uniq. -func verifyServeHostnameServiceUp(c clientset.Interface, ns, host string, expectedPods []string, serviceIP string, servicePort int) error { - execPodName := createExecPodOrFail(c, ns, "execpod-") - defer func() { - deletePodOrFail(c, ns, execPodName) - }() - - // Loop a bunch of times - the proxy is randomized, so we want a good - // chance of hitting each backend at least once. - buildCommand := func(wget string) string { - return fmt.Sprintf("for i in $(seq 1 %d); do %s http://%s:%d 2>&1 || true; echo; done", - 50*len(expectedPods), wget, serviceIP, servicePort) - } - commands := []func() string{ - // verify service from node - func() string { - cmd := "set -e; " + buildCommand("wget -q --timeout=0.2 --tries=1 -O -") - framework.Logf("Executing cmd %q on host %v", cmd, host) - result, err := framework.SSH(cmd, host, framework.TestContext.Provider) - if err != nil || result.Code != 0 { - framework.LogSSHResult(result) - framework.Logf("error while SSH-ing to node: %v", err) - } - return result.Stdout - }, - // verify service from pod - func() string { - cmd := buildCommand("wget -q -T 1 -O -") - framework.Logf("Executing cmd %q in pod %v/%v", cmd, ns, execPodName) - // TODO: Use exec-over-http via the netexec pod instead of kubectl exec. - output, err := framework.RunHostCmd(ns, execPodName, cmd) - if err != nil { - framework.Logf("error while kubectl execing %q in pod %v/%v: %v\nOutput: %v", cmd, ns, execPodName, err, output) - } - return output - }, - } - - expectedEndpoints := sets.NewString(expectedPods...) - By(fmt.Sprintf("verifying service has %d reachable backends", len(expectedPods))) - for _, cmdFunc := range commands { - passed := false - gotEndpoints := sets.NewString() - - // Retry cmdFunc for a while - for start := time.Now(); time.Since(start) < kubeProxyLagTimeout; time.Sleep(5 * time.Second) { - for _, endpoint := range strings.Split(cmdFunc(), "\n") { - trimmedEp := strings.TrimSpace(endpoint) - if trimmedEp != "" { - gotEndpoints.Insert(trimmedEp) - } - } - // TODO: simply checking that the retrieved endpoints is a superset - // of the expected allows us to ignore intermitten network flakes that - // result in output like "wget timed out", but these should be rare - // and we need a better way to track how often it occurs. - if gotEndpoints.IsSuperset(expectedEndpoints) { - if !gotEndpoints.Equal(expectedEndpoints) { - framework.Logf("Ignoring unexpected output wgetting endpoints of service %s: %v", serviceIP, gotEndpoints.Difference(expectedEndpoints)) - } - passed = true - break - } - framework.Logf("Unable to reach the following endpoints of service %s: %v", serviceIP, expectedEndpoints.Difference(gotEndpoints)) - } - if !passed { - // Sort the lists so they're easier to visually diff. - exp := expectedEndpoints.List() - got := gotEndpoints.List() - sort.StringSlice(exp).Sort() - sort.StringSlice(got).Sort() - return fmt.Errorf("service verification failed for: %s\nexpected %v\nreceived %v", serviceIP, exp, got) - } - } - return nil -} - -func verifyServeHostnameServiceDown(c clientset.Interface, host string, serviceIP string, servicePort int) error { - command := fmt.Sprintf( - "curl -s --connect-timeout 2 http://%s:%d && exit 99", serviceIP, servicePort) - - for start := time.Now(); time.Since(start) < time.Minute; time.Sleep(5 * time.Second) { - result, err := framework.SSH(command, host, framework.TestContext.Provider) - if err != nil { - framework.LogSSHResult(result) - framework.Logf("error while SSH-ing to node: %v", err) - } - if result.Code != 99 { - return nil - } - framework.Logf("service still alive - still waiting") - } - return fmt.Errorf("waiting for service to be down timed out") -} - -// Does an HTTP GET, but does not reuse TCP connections -// This masks problems where the iptables rule has changed, but we don't see it -// This is intended for relatively quick requests (status checks), so we set a short (5 seconds) timeout -func httpGetNoConnectionPool(url string) (*http.Response, error) { - return httpGetNoConnectionPoolTimeout(url, 5*time.Second) -} - -func httpGetNoConnectionPoolTimeout(url string, timeout time.Duration) (*http.Response, error) { - tr := utilnet.SetTransportDefaults(&http.Transport{ - DisableKeepAlives: true, - }) - client := &http.Client{ - Transport: tr, - Timeout: timeout, - } - - return client.Get(url) -} - -// A test jig to help testing. -type ServiceTestJig struct { - ID string - Name string - Client clientset.Interface - Labels map[string]string -} - -// NewServiceTestJig allocates and inits a new ServiceTestJig. -func NewServiceTestJig(client clientset.Interface, name string) *ServiceTestJig { - j := &ServiceTestJig{} - j.Client = client - j.Name = name - j.ID = j.Name + "-" + string(uuid.NewUUID()) - j.Labels = map[string]string{"testid": j.ID} - - return j -} - -// newServiceTemplate returns the default v1.Service template for this jig, but -// does not actually create the Service. The default Service has the same name -// as the jig and exposes the given port. -func (j *ServiceTestJig) newServiceTemplate(namespace string, proto v1.Protocol, port int32) *v1.Service { - service := &v1.Service{ - ObjectMeta: v1.ObjectMeta{ - Namespace: namespace, - Name: j.Name, - Labels: j.Labels, - }, - Spec: v1.ServiceSpec{ - Selector: j.Labels, - Ports: []v1.ServicePort{ - { - Protocol: proto, - Port: port, - }, - }, - }, - } - return service -} - -// CreateTCPServiceWithPort creates a new TCP Service with given port based on the -// jig's defaults. Callers can provide a function to tweak the Service object before -// it is created. -func (j *ServiceTestJig) CreateTCPServiceWithPort(namespace string, tweak func(svc *v1.Service), port int32) *v1.Service { - svc := j.newServiceTemplate(namespace, v1.ProtocolTCP, port) - if tweak != nil { - tweak(svc) - } - result, err := j.Client.Core().Services(namespace).Create(svc) - if err != nil { - framework.Failf("Failed to create TCP Service %q: %v", svc.Name, err) - } - return result -} - -// CreateTCPServiceOrFail creates a new TCP Service based on the jig's -// defaults. Callers can provide a function to tweak the Service object before -// it is created. -func (j *ServiceTestJig) CreateTCPServiceOrFail(namespace string, tweak func(svc *v1.Service)) *v1.Service { - svc := j.newServiceTemplate(namespace, v1.ProtocolTCP, 80) - if tweak != nil { - tweak(svc) - } - result, err := j.Client.Core().Services(namespace).Create(svc) - if err != nil { - framework.Failf("Failed to create TCP Service %q: %v", svc.Name, err) - } - return result -} - -// CreateUDPServiceOrFail creates a new UDP Service based on the jig's -// defaults. Callers can provide a function to tweak the Service object before -// it is created. -func (j *ServiceTestJig) CreateUDPServiceOrFail(namespace string, tweak func(svc *v1.Service)) *v1.Service { - svc := j.newServiceTemplate(namespace, v1.ProtocolUDP, 80) - if tweak != nil { - tweak(svc) - } - result, err := j.Client.Core().Services(namespace).Create(svc) - if err != nil { - framework.Failf("Failed to create UDP Service %q: %v", svc.Name, err) - } - return result -} - -func (j *ServiceTestJig) ChangeServiceType(namespace, name string, newType v1.ServiceType, timeout time.Duration) { - ingressIP := "" - svc := j.UpdateServiceOrFail(namespace, name, func(s *v1.Service) { - for _, ing := range s.Status.LoadBalancer.Ingress { - if ing.IP != "" { - ingressIP = ing.IP - } - } - s.Spec.Type = newType - s.Spec.Ports[0].NodePort = 0 - }) - if ingressIP != "" { - j.WaitForLoadBalancerDestroyOrFail(namespace, svc.Name, ingressIP, int(svc.Spec.Ports[0].Port), timeout) - } -} - -// createOnlyLocalNodePortService creates a loadbalancer service and sanity checks its -// nodePort. If createPod is true, it also creates an RC with 1 replica of -// the standard netexec container used everywhere in this test. -func (j *ServiceTestJig) createOnlyLocalNodePortService(namespace, serviceName string, createPod bool) *v1.Service { - By("creating a service " + namespace + "/" + serviceName + " with type=NodePort and annotation for local-traffic-only") - svc := j.CreateTCPServiceOrFail(namespace, func(svc *v1.Service) { - svc.Spec.Type = v1.ServiceTypeNodePort - svc.ObjectMeta.Annotations = map[string]string{ - service.BetaAnnotationExternalTraffic: service.AnnotationValueExternalTrafficLocal} - svc.Spec.Ports = []v1.ServicePort{{Protocol: "TCP", Port: 80}} - }) - - if createPod { - By("creating a pod to be part of the service " + serviceName) - j.RunOrFail(namespace, nil) - } - j.SanityCheckService(svc, v1.ServiceTypeNodePort) - return svc -} - -// createOnlyLocalLoadBalancerService creates a loadbalancer service and waits for it to -// acquire an ingress IP. If createPod is true, it also creates an RC with 1 -// replica of the standard netexec container used everywhere in this test. -func (j *ServiceTestJig) createOnlyLocalLoadBalancerService(namespace, serviceName string, timeout time.Duration, createPod bool, - tweak func(svc *v1.Service)) *v1.Service { - By("creating a service " + namespace + "/" + serviceName + " with type=LoadBalancer and annotation for local-traffic-only") - svc := j.CreateTCPServiceOrFail(namespace, func(svc *v1.Service) { - svc.Spec.Type = v1.ServiceTypeLoadBalancer - // We need to turn affinity off for our LB distribution tests - svc.Spec.SessionAffinity = v1.ServiceAffinityNone - svc.ObjectMeta.Annotations = map[string]string{ - service.BetaAnnotationExternalTraffic: service.AnnotationValueExternalTrafficLocal} - if tweak != nil { - tweak(svc) - } - }) - - if createPod { - By("creating a pod to be part of the service " + serviceName) - j.RunOrFail(namespace, nil) - } - By("waiting for loadbalancer for service " + namespace + "/" + serviceName) - svc = j.WaitForLoadBalancerOrFail(namespace, serviceName, timeout) - j.SanityCheckService(svc, v1.ServiceTypeLoadBalancer) - return svc -} - -// getEndpointNodes returns a map of nodenames:external-ip on which the -// endpoints of the given Service are running. -func (j *ServiceTestJig) getEndpointNodes(svc *v1.Service) map[string][]string { - nodes := j.getNodes(maxNodesForEndpointsTests) - endpoints, err := j.Client.Core().Endpoints(svc.Namespace).Get(svc.Name, metav1.GetOptions{}) - if err != nil { - framework.Failf("Get endpoints for service %s/%s failed (%s)", svc.Namespace, svc.Name, err) - } - if len(endpoints.Subsets) == 0 { - framework.Failf("Endpoint has no subsets, cannot determine node addresses.") - } - epNodes := sets.NewString() - for _, ss := range endpoints.Subsets { - for _, e := range ss.Addresses { - if e.NodeName != nil { - epNodes.Insert(*e.NodeName) - } - } - } - nodeMap := map[string][]string{} - for _, n := range nodes.Items { - if epNodes.Has(n.Name) { - nodeMap[n.Name] = getNodeAddresses(&n, v1.NodeExternalIP) - } - } - return nodeMap -} - -// getNodes returns the first maxNodesForTest nodes. Useful in large clusters -// where we don't eg: want to create an endpoint per node. -func (j *ServiceTestJig) getNodes(maxNodesForTest int) (nodes *v1.NodeList) { - nodes = framework.GetReadySchedulableNodesOrDie(j.Client) - if len(nodes.Items) <= maxNodesForTest { - maxNodesForTest = len(nodes.Items) - } - nodes.Items = nodes.Items[:maxNodesForTest] - return nodes -} - -func (j *ServiceTestJig) GetNodesNames(maxNodesForTest int) []string { - nodes := j.getNodes(maxNodesForTest) - nodesNames := []string{} - for _, node := range nodes.Items { - nodesNames = append(nodesNames, node.Name) - } - return nodesNames -} - -func (j *ServiceTestJig) waitForEndpointOnNode(namespace, serviceName, nodeName string) { - err := wait.PollImmediate(framework.Poll, loadBalancerCreateTimeoutDefault, func() (bool, error) { - endpoints, err := j.Client.Core().Endpoints(namespace).Get(serviceName, metav1.GetOptions{}) - if err != nil { - framework.Logf("Get endpoints for service %s/%s failed (%s)", namespace, serviceName, err) - return false, nil - } - // TODO: Handle multiple endpoints - if len(endpoints.Subsets[0].Addresses) == 0 { - framework.Logf("Expected Ready endpoints - found none") - return false, nil - } - epHostName := *endpoints.Subsets[0].Addresses[0].NodeName - framework.Logf("Pod for service %s/%s is on node %s", namespace, serviceName, epHostName) - if epHostName != nodeName { - framework.Logf("Found endpoint on wrong node, expected %v, got %v", nodeName, epHostName) - return false, nil - } - return true, nil - }) - framework.ExpectNoError(err) -} - -func (j *ServiceTestJig) SanityCheckService(svc *v1.Service, svcType v1.ServiceType) { - if svc.Spec.Type != svcType { - framework.Failf("unexpected Spec.Type (%s) for service, expected %s", svc.Spec.Type, svcType) - } - expectNodePorts := false - if svcType != v1.ServiceTypeClusterIP { - expectNodePorts = true - } - for i, port := range svc.Spec.Ports { - hasNodePort := (port.NodePort != 0) - if hasNodePort != expectNodePorts { - framework.Failf("unexpected Spec.Ports[%d].NodePort (%d) for service", i, port.NodePort) - } - if hasNodePort { - if !ServiceNodePortRange.Contains(int(port.NodePort)) { - framework.Failf("out-of-range nodePort (%d) for service", port.NodePort) - } - } - } - expectIngress := false - if svcType == v1.ServiceTypeLoadBalancer { - expectIngress = true - } - hasIngress := len(svc.Status.LoadBalancer.Ingress) != 0 - if hasIngress != expectIngress { - framework.Failf("unexpected number of Status.LoadBalancer.Ingress (%d) for service", len(svc.Status.LoadBalancer.Ingress)) - } - if hasIngress { - for i, ing := range svc.Status.LoadBalancer.Ingress { - if ing.IP == "" && ing.Hostname == "" { - framework.Failf("unexpected Status.LoadBalancer.Ingress[%d] for service: %#v", i, ing) - } - } - } -} - -// UpdateService fetches a service, calls the update function on it, and -// then attempts to send the updated service. It tries up to 3 times in the -// face of timeouts and conflicts. -func (j *ServiceTestJig) UpdateService(namespace, name string, update func(*v1.Service)) (*v1.Service, error) { - for i := 0; i < 3; i++ { - service, err := j.Client.Core().Services(namespace).Get(name, metav1.GetOptions{}) - if err != nil { - return nil, fmt.Errorf("Failed to get Service %q: %v", name, err) - } - update(service) - service, err = j.Client.Core().Services(namespace).Update(service) - if err == nil { - return service, nil - } - if !errors.IsConflict(err) && !errors.IsServerTimeout(err) { - return nil, fmt.Errorf("Failed to update Service %q: %v", name, err) - } - } - return nil, fmt.Errorf("Too many retries updating Service %q", name) -} - -// UpdateServiceOrFail fetches a service, calls the update function on it, and -// then attempts to send the updated service. It tries up to 3 times in the -// face of timeouts and conflicts. -func (j *ServiceTestJig) UpdateServiceOrFail(namespace, name string, update func(*v1.Service)) *v1.Service { - svc, err := j.UpdateService(namespace, name, update) - if err != nil { - framework.Failf(err.Error()) - } - return svc -} - -func (j *ServiceTestJig) ChangeServiceNodePortOrFail(namespace, name string, initial int) *v1.Service { - var err error - var service *v1.Service - for i := 1; i < ServiceNodePortRange.Size; i++ { - offs1 := initial - ServiceNodePortRange.Base - offs2 := (offs1 + i) % ServiceNodePortRange.Size - newPort := ServiceNodePortRange.Base + offs2 - service, err = j.UpdateService(namespace, name, func(s *v1.Service) { - s.Spec.Ports[0].NodePort = int32(newPort) - }) - if err != nil && strings.Contains(err.Error(), "provided port is already allocated") { - framework.Logf("tried nodePort %d, but it is in use, will try another", newPort) - continue - } - // Otherwise err was nil or err was a real error - break - } - if err != nil { - framework.Failf("Could not change the nodePort: %v", err) - } - return service -} - -func (j *ServiceTestJig) WaitForLoadBalancerOrFail(namespace, name string, timeout time.Duration) *v1.Service { - var service *v1.Service - framework.Logf("Waiting up to %v for service %q to have a LoadBalancer", timeout, name) - pollFunc := func() (bool, error) { - svc, err := j.Client.Core().Services(namespace).Get(name, metav1.GetOptions{}) - if err != nil { - return false, err - } - if len(svc.Status.LoadBalancer.Ingress) > 0 { - service = svc - return true, nil - } - return false, nil - } - if err := wait.PollImmediate(framework.Poll, timeout, pollFunc); err != nil { - framework.Failf("Timeout waiting for service %q to have a load balancer", name) - } - return service -} - -func (j *ServiceTestJig) WaitForLoadBalancerDestroyOrFail(namespace, name string, ip string, port int, timeout time.Duration) *v1.Service { - // TODO: once support ticket 21807001 is resolved, reduce this timeout back to something reasonable - defer func() { - if err := framework.EnsureLoadBalancerResourcesDeleted(ip, strconv.Itoa(port)); err != nil { - framework.Logf("Failed to delete cloud resources for service: %s %d (%v)", ip, port, err) - } - }() - - var service *v1.Service - framework.Logf("Waiting up to %v for service %q to have no LoadBalancer", timeout, name) - pollFunc := func() (bool, error) { - svc, err := j.Client.Core().Services(namespace).Get(name, metav1.GetOptions{}) - if err != nil { - return false, err - } - if len(svc.Status.LoadBalancer.Ingress) == 0 { - service = svc - return true, nil - } - return false, nil - } - if err := wait.PollImmediate(framework.Poll, timeout, pollFunc); err != nil { - framework.Failf("Timeout waiting for service %q to have no load balancer", name) - } - return service -} - -func (j *ServiceTestJig) TestReachableHTTP(host string, port int, timeout time.Duration) { - if err := wait.PollImmediate(framework.Poll, timeout, func() (bool, error) { return testReachableHTTP(host, port, "/echo?msg=hello", "hello") }); err != nil { - framework.Failf("Could not reach HTTP service through %v:%v after %v: %v", host, port, timeout, err) - } -} - -func (j *ServiceTestJig) TestNotReachableHTTP(host string, port int, timeout time.Duration) { - if err := wait.PollImmediate(framework.Poll, timeout, func() (bool, error) { return testNotReachableHTTP(host, port) }); err != nil { - framework.Failf("Could still reach HTTP service through %v:%v after %v: %v", host, port, timeout, err) - } -} - -func (j *ServiceTestJig) TestReachableUDP(host string, port int, timeout time.Duration) { - if err := wait.PollImmediate(framework.Poll, timeout, func() (bool, error) { return testReachableUDP(host, port, "echo hello", "hello") }); err != nil { - framework.Failf("Could not reach UDP service through %v:%v after %v: %v", host, port, timeout, err) - } -} - -func (j *ServiceTestJig) TestNotReachableUDP(host string, port int, timeout time.Duration) { - if err := wait.PollImmediate(framework.Poll, timeout, func() (bool, error) { return testNotReachableUDP(host, port, "echo hello") }); err != nil { - framework.Failf("Could still reach UDP service through %v:%v after %v: %v", host, port, timeout, err) - } -} - -func (j *ServiceTestJig) GetHTTPContent(host string, port int, timeout time.Duration, url string) bytes.Buffer { - var body bytes.Buffer - var err error - if pollErr := wait.PollImmediate(framework.Poll, timeout, func() (bool, error) { - result, err := testReachableHTTPWithContent(host, port, url, "", &body) - if err != nil { - framework.Logf("Error hitting %v:%v%v, retrying: %v", host, port, url, err) - return false, nil - } - return result, nil - }); pollErr != nil { - framework.Failf("Could not reach HTTP service through %v:%v%v after %v: %v", host, port, url, timeout, err) - } - return body -} - -func (j *ServiceTestJig) TestHTTPHealthCheckNodePort(host string, port int, request string, tries int) (pass, fail int, statusMsg string) { - for i := 0; i < tries; i++ { - success, err := testHTTPHealthCheckNodePort(host, port, request) - if success { - pass++ - } else { - fail++ - } - statusMsg += fmt.Sprintf("\nAttempt %d Error %v", i, err) - time.Sleep(1 * time.Second) - } - return pass, fail, statusMsg -} - -func getIngressPoint(ing *v1.LoadBalancerIngress) string { - host := ing.IP - if host == "" { - host = ing.Hostname - } - return host -} - -// newRCTemplate returns the default v1.ReplicationController object for -// this jig, but does not actually create the RC. The default RC has the same -// name as the jig and runs the "netexec" container. -func (j *ServiceTestJig) newRCTemplate(namespace string) *v1.ReplicationController { - rc := &v1.ReplicationController{ - ObjectMeta: v1.ObjectMeta{ - Namespace: namespace, - Name: j.Name, - Labels: j.Labels, - }, - Spec: v1.ReplicationControllerSpec{ - Replicas: func(i int) *int32 { x := int32(i); return &x }(1), - Selector: j.Labels, - Template: &v1.PodTemplateSpec{ - ObjectMeta: v1.ObjectMeta{ - Labels: j.Labels, - }, - Spec: v1.PodSpec{ - Containers: []v1.Container{ - { - Name: "netexec", - Image: "gcr.io/google_containers/netexec:1.7", - Args: []string{"--http-port=80", "--udp-port=80"}, - ReadinessProbe: &v1.Probe{ - PeriodSeconds: 3, - Handler: v1.Handler{ - HTTPGet: &v1.HTTPGetAction{ - Port: intstr.FromInt(80), - Path: "/hostName", - }, - }, - }, - }, - }, - TerminationGracePeriodSeconds: new(int64), - }, - }, - }, - } - return rc -} - -// RunOrFail creates a ReplicationController and Pod(s) and waits for the -// Pod(s) to be running. Callers can provide a function to tweak the RC object -// before it is created. -func (j *ServiceTestJig) RunOrFail(namespace string, tweak func(rc *v1.ReplicationController)) *v1.ReplicationController { - rc := j.newRCTemplate(namespace) - if tweak != nil { - tweak(rc) - } - result, err := j.Client.Core().ReplicationControllers(namespace).Create(rc) - if err != nil { - framework.Failf("Failed to created RC %q: %v", rc.Name, err) - } - pods, err := j.waitForPodsCreated(namespace, int(*(rc.Spec.Replicas))) - if err != nil { - framework.Failf("Failed to create pods: %v", err) - } - if err := j.waitForPodsReady(namespace, pods); err != nil { - framework.Failf("Failed waiting for pods to be running: %v", err) - } - return result -} - -func (j *ServiceTestJig) waitForPodsCreated(namespace string, replicas int) ([]string, error) { - timeout := 2 * time.Minute - // List the pods, making sure we observe all the replicas. - label := labels.SelectorFromSet(labels.Set(j.Labels)) - framework.Logf("Waiting up to %v for %d pods to be created", timeout, replicas) - for start := time.Now(); time.Since(start) < timeout; time.Sleep(2 * time.Second) { - options := v1.ListOptions{LabelSelector: label.String()} - pods, err := j.Client.Core().Pods(namespace).List(options) - if err != nil { - return nil, err - } - - found := []string{} - for _, pod := range pods.Items { - if pod.DeletionTimestamp != nil { - continue - } - found = append(found, pod.Name) - } - if len(found) == replicas { - framework.Logf("Found all %d pods", replicas) - return found, nil - } - framework.Logf("Found %d/%d pods - will retry", len(found), replicas) - } - return nil, fmt.Errorf("Timeout waiting for %d pods to be created", replicas) -} - -func (j *ServiceTestJig) waitForPodsReady(namespace string, pods []string) error { - timeout := 2 * time.Minute - if !framework.CheckPodsRunningReady(j.Client, namespace, pods, timeout) { - return fmt.Errorf("Timeout waiting for %d pods to be ready", len(pods)) - } - return nil -} - -// Simple helper class to avoid too much boilerplate in tests -type ServiceTestFixture struct { - ServiceName string - Namespace string - Client clientset.Interface - - TestId string - Labels map[string]string - - rcs map[string]bool - services map[string]bool - name string - image string -} - -func NewServerTest(client clientset.Interface, namespace string, serviceName string) *ServiceTestFixture { - t := &ServiceTestFixture{} - t.Client = client - t.Namespace = namespace - t.ServiceName = serviceName - t.TestId = t.ServiceName + "-" + string(uuid.NewUUID()) - t.Labels = map[string]string{ - "testid": t.TestId, - } - - t.rcs = make(map[string]bool) - t.services = make(map[string]bool) - - t.name = "webserver" - t.image = "gcr.io/google_containers/test-webserver:e2e" - - return t -} - -// Build default config for a service (which can then be changed) -func (t *ServiceTestFixture) BuildServiceSpec() *v1.Service { - service := &v1.Service{ - ObjectMeta: v1.ObjectMeta{ - Name: t.ServiceName, - Namespace: t.Namespace, - }, - Spec: v1.ServiceSpec{ - Selector: t.Labels, - Ports: []v1.ServicePort{{ - Port: 80, - TargetPort: intstr.FromInt(80), - }}, - }, - } - return service -} - -// CreateWebserverRC creates rc-backed pods with the well-known webserver -// configuration and records it for cleanup. -func (t *ServiceTestFixture) CreateWebserverRC(replicas int32) *v1.ReplicationController { - rcSpec := rcByNamePort(t.name, replicas, t.image, 80, v1.ProtocolTCP, t.Labels, nil) - rcAct, err := t.createRC(rcSpec) - if err != nil { - framework.Failf("Failed to create rc %s: %v", rcSpec.Name, err) - } - if err := framework.VerifyPods(t.Client, t.Namespace, t.name, false, replicas); err != nil { - framework.Failf("Failed to create %d pods with name %s: %v", replicas, t.name, err) - } - return rcAct -} - -// createRC creates a replication controller and records it for cleanup. -func (t *ServiceTestFixture) createRC(rc *v1.ReplicationController) (*v1.ReplicationController, error) { - rc, err := t.Client.Core().ReplicationControllers(t.Namespace).Create(rc) - if err == nil { - t.rcs[rc.Name] = true - } - return rc, err -} - -// Create a service, and record it for cleanup -func (t *ServiceTestFixture) CreateService(service *v1.Service) (*v1.Service, error) { - result, err := t.Client.Core().Services(t.Namespace).Create(service) - if err == nil { - t.services[service.Name] = true - } - return result, err -} - -// Delete a service, and remove it from the cleanup list -func (t *ServiceTestFixture) DeleteService(serviceName string) error { - err := t.Client.Core().Services(t.Namespace).Delete(serviceName, nil) - if err == nil { - delete(t.services, serviceName) - } - return err -} - -func (t *ServiceTestFixture) Cleanup() []error { - var errs []error - for rcName := range t.rcs { - By("stopping RC " + rcName + " in namespace " + t.Namespace) - // First, resize the RC to 0. - old, err := t.Client.Core().ReplicationControllers(t.Namespace).Get(rcName, metav1.GetOptions{}) - if err != nil { - errs = append(errs, err) - } - x := int32(0) - old.Spec.Replicas = &x - if _, err := t.Client.Core().ReplicationControllers(t.Namespace).Update(old); err != nil { - errs = append(errs, err) - } - // TODO(mikedanese): Wait. - - // Then, delete the RC altogether. - if err := t.Client.Core().ReplicationControllers(t.Namespace).Delete(rcName, nil); err != nil { - errs = append(errs, err) - } - } - - for serviceName := range t.services { - By("deleting service " + serviceName + " in namespace " + t.Namespace) - err := t.Client.Core().Services(t.Namespace).Delete(serviceName, nil) - if err != nil { - errs = append(errs, err) - } - } - - return errs -} - -// newNetexecPodSpec returns the pod spec of netexec pod -func newNetexecPodSpec(podName string, httpPort, udpPort int32, hostNetwork bool) *v1.Pod { - pod := &v1.Pod{ - ObjectMeta: v1.ObjectMeta{ - Name: podName, - }, - Spec: v1.PodSpec{ - Containers: []v1.Container{ - { - Name: "netexec", - Image: framework.NetexecImageName, - Command: []string{ - "/netexec", - fmt.Sprintf("--http-port=%d", httpPort), - fmt.Sprintf("--udp-port=%d", udpPort), - }, - Ports: []v1.ContainerPort{ - { - Name: "http", - ContainerPort: httpPort, - }, - { - Name: "udp", - ContainerPort: udpPort, - }, - }, - }, - }, - HostNetwork: hostNetwork, - }, - } - return pod -} - -func (j *ServiceTestJig) LaunchNetexecPodOnNode(f *framework.Framework, nodeName, podName string, httpPort, udpPort int32, hostNetwork bool) { - framework.Logf("Creating netexec pod %q on node %v in namespace %q", podName, nodeName, f.Namespace.Name) - pod := newNetexecPodSpec(podName, httpPort, udpPort, hostNetwork) - pod.Spec.NodeName = nodeName - pod.ObjectMeta.Labels = j.Labels - podClient := f.ClientSet.Core().Pods(f.Namespace.Name) - _, err := podClient.Create(pod) - framework.ExpectNoError(err) - framework.ExpectNoError(f.WaitForPodRunning(podName)) - framework.Logf("Netexec pod %q in namespace %q running", pod.Name, f.Namespace.Name) -} - -// newEchoServerPodSpec returns the pod spec of echo server pod -func newEchoServerPodSpec(podName string) *v1.Pod { - port := 8080 - pod := &v1.Pod{ - ObjectMeta: v1.ObjectMeta{ - Name: podName, - }, - Spec: v1.PodSpec{ - Containers: []v1.Container{ - { - Name: "echoserver", - Image: "gcr.io/google_containers/echoserver:1.4", - Ports: []v1.ContainerPort{{ContainerPort: int32(port)}}, - }, - }, - RestartPolicy: v1.RestartPolicyNever, - }, - } - return pod -} - -// launchEchoserverPodOnNode launches a pod serving http on port 8080 to act -// as the target for source IP preservation test. The client's source ip would -// be echoed back by the web server. -func (j *ServiceTestJig) launchEchoserverPodOnNode(f *framework.Framework, nodeName, podName string) { - framework.Logf("Creating echo server pod %q in namespace %q", podName, f.Namespace.Name) - pod := newEchoServerPodSpec(podName) - pod.Spec.NodeName = nodeName - pod.ObjectMeta.Labels = j.Labels - podClient := f.ClientSet.Core().Pods(f.Namespace.Name) - _, err := podClient.Create(pod) - framework.ExpectNoError(err) - framework.ExpectNoError(f.WaitForPodRunning(podName)) - framework.Logf("Echo server pod %q in namespace %q running", pod.Name, f.Namespace.Name) -} - func execSourceipTest(f *framework.Framework, c clientset.Interface, ns, nodeName, serviceIP string, servicePort int) (string, string) { framework.Logf("Creating an exec pod on node %v", nodeName) - execPodName := createExecPodOnNode(f.ClientSet, ns, nodeName, fmt.Sprintf("execpod-sourceip-%s", nodeName)) + execPodName := framework.CreateExecPodOrFail(f.ClientSet, ns, fmt.Sprintf("execpod-sourceip-%s", nodeName), func(pod *v1.Pod) { + pod.Spec.NodeName = nodeName + }) defer func() { framework.Logf("Cleaning up the exec pod") err := c.Core().Pods(ns).Delete(execPodName, nil) @@ -3007,51 +1571,3 @@ func execSourceipTest(f *framework.Framework, c clientset.Interface, ns, nodeNam } return execPod.Status.PodIP, outputs[1] } - -func getLoadBalancerName(service *v1.Service) string { - //GCE requires that the name of a load balancer starts with a lower case letter. - ret := "a" + string(service.UID) - ret = strings.Replace(ret, "-", "", -1) - //AWS requires that the name of a load balancer is shorter than 32 bytes. - if len(ret) > 32 { - ret = ret[:32] - } - return ret -} - -func cleanupServiceGCEResources(loadBalancerName string) { - if pollErr := wait.Poll(5*time.Second, lbCleanupTimeout, func() (bool, error) { - if err := framework.CleanupGCEResources(loadBalancerName); err != nil { - framework.Logf("Still waiting for glbc to cleanup: %v", err) - return false, nil - } - return true, nil - }); pollErr != nil { - framework.Failf("Failed to cleanup service GCE resources.") - } -} - -func describeSvc(ns string) { - framework.Logf("\nOutput of kubectl describe svc:\n") - desc, _ := framework.RunKubectl( - "describe", "svc", fmt.Sprintf("--namespace=%v", ns)) - framework.Logf(desc) -} - -func checkReachabilityFromPod(expectToBeReachable bool, namespace, pod, target string) { - cmd := fmt.Sprintf("wget -T 5 -qO- %q", target) - err := wait.PollImmediate(framework.Poll, 2*time.Minute, func() (bool, error) { - _, err := framework.RunHostCmd(namespace, pod, cmd) - if expectToBeReachable && err != nil { - framework.Logf("Expect target to be reachable. But got err: %v. Retry until timeout", err) - return false, nil - } - - if !expectToBeReachable && err == nil { - framework.Logf("Expect target NOT to be reachable. But it is reachable. Retry until timeout") - return false, nil - } - return true, nil - }) - Expect(err).NotTo(HaveOccurred()) -}