Move a bunch of totally non-jiggy code out of e2eservice.TestJig

When test/e2e/framework/service was first created, a lot of service
test code got crammed into TestJig even though it didn't use any of
TestJig's members. Make that code available outside of TestJig.
This commit is contained in:
Dan Winship 2019-08-13 08:40:18 -04:00
parent c4c64673d7
commit 0b9fa1146f
7 changed files with 319 additions and 284 deletions

View File

@ -9,6 +9,7 @@ go_library(
"hostname.go",
"jig.go",
"resource.go",
"util.go",
"wait.go",
],
importpath = "k8s.io/kubernetes/test/e2e/framework/service",

View File

@ -17,9 +17,67 @@ limitations under the License.
package service
import (
"fmt"
"net"
"strconv"
v1 "k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/util/wait"
"k8s.io/kubernetes/test/e2e/framework"
)
// CheckAffinity function tests whether the service affinity works as expected.
// If affinity is expected, the test will return true once affinityConfirmCount
// number of same response observed in a row. If affinity is not expected, the
// test will keep observe until different responses observed. The function will
// return false only in case of unexpected errors.
func CheckAffinity(execPod *v1.Pod, serviceIP string, servicePort int, shouldHold bool) bool {
serviceIPPort := net.JoinHostPort(serviceIP, strconv.Itoa(servicePort))
cmd := fmt.Sprintf(`curl -q -s --connect-timeout 2 http://%s/`, serviceIPPort)
timeout := TestTimeout
if execPod == nil {
timeout = LoadBalancerPollTimeout
}
var tracker affinityTracker
if pollErr := wait.PollImmediate(framework.Poll, timeout, func() (bool, error) {
if execPod != nil {
stdout, err := framework.RunHostCmd(execPod.Namespace, execPod.Name, cmd)
if err != nil {
framework.Logf("Failed to get response from %s. Retry until timeout", serviceIPPort)
return false, nil
}
tracker.recordHost(stdout)
} else {
rawResponse := GetHTTPContent(serviceIP, servicePort, timeout, "")
tracker.recordHost(rawResponse.String())
}
trackerFulfilled, affinityHolds := tracker.checkHostTrace(AffinityConfirmCount)
if !shouldHold && !affinityHolds {
return true, nil
}
if shouldHold && trackerFulfilled && affinityHolds {
return true, nil
}
return false, nil
}); pollErr != nil {
trackerFulfilled, _ := tracker.checkHostTrace(AffinityConfirmCount)
if pollErr != wait.ErrWaitTimeout {
checkAffinityFailed(tracker, pollErr.Error())
return false
}
if !trackerFulfilled {
checkAffinityFailed(tracker, fmt.Sprintf("Connection to %s timed out or not enough responses.", serviceIPPort))
}
if shouldHold {
checkAffinityFailed(tracker, "Affinity should hold but didn't.")
} else {
checkAffinityFailed(tracker, "Affinity shouldn't hold but did.")
}
return true
}
return true
}
// affinityTracker tracks the destination of a request for the affinity tests.
type affinityTracker struct {
hostTrace []string

View File

@ -17,10 +17,8 @@ limitations under the License.
package service
import (
"bytes"
"fmt"
"net"
"net/http"
"regexp"
"strconv"
"strings"
@ -884,147 +882,6 @@ func (j *TestJig) CheckServiceReachability(namespace string, svc *v1.Service, po
}
}
// TestReachableHTTP tests that the given host serves HTTP on the given port.
func (j *TestJig) TestReachableHTTP(host string, port int, timeout time.Duration) {
j.TestReachableHTTPWithRetriableErrorCodes(host, port, []int{}, timeout)
}
// TestReachableHTTPWithRetriableErrorCodes tests that the given host serves HTTP on the given port with the given retriableErrCodes.
func (j *TestJig) TestReachableHTTPWithRetriableErrorCodes(host string, port int, retriableErrCodes []int, timeout time.Duration) {
pollfn := func() (bool, error) {
result := framework.PokeHTTP(host, port, "/echo?msg=hello",
&framework.HTTPPokeParams{
BodyContains: "hello",
RetriableCodes: retriableErrCodes,
})
if result.Status == framework.HTTPSuccess {
return true, nil
}
return false, nil // caller can retry
}
if err := wait.PollImmediate(framework.Poll, timeout, pollfn); err != nil {
if err == wait.ErrWaitTimeout {
framework.Failf("Could not reach HTTP service through %v:%v after %v", host, port, timeout)
} else {
framework.Failf("Failed to reach HTTP service through %v:%v: %v", host, port, err)
}
}
}
// TestNotReachableHTTP tests that a HTTP request doesn't connect to the given host and port.
func (j *TestJig) TestNotReachableHTTP(host string, port int, timeout time.Duration) {
pollfn := func() (bool, error) {
result := framework.PokeHTTP(host, port, "/", nil)
if result.Code == 0 {
return true, nil
}
return false, nil // caller can retry
}
if err := wait.PollImmediate(framework.Poll, timeout, pollfn); err != nil {
framework.Failf("HTTP service %v:%v reachable after %v: %v", host, port, timeout, err)
}
}
// TestRejectedHTTP tests that the given host rejects a HTTP request on the given port.
func (j *TestJig) TestRejectedHTTP(host string, port int, timeout time.Duration) {
pollfn := func() (bool, error) {
result := framework.PokeHTTP(host, port, "/", nil)
if result.Status == framework.HTTPRefused {
return true, nil
}
return false, nil // caller can retry
}
if err := wait.PollImmediate(framework.Poll, timeout, pollfn); err != nil {
framework.Failf("HTTP service %v:%v not rejected: %v", host, port, err)
}
}
// TestReachableUDP tests that the given host serves UDP on the given port.
func (j *TestJig) TestReachableUDP(host string, port int, timeout time.Duration) {
pollfn := func() (bool, error) {
result := framework.PokeUDP(host, port, "echo hello", &framework.UDPPokeParams{
Timeout: 3 * time.Second,
Response: "hello",
})
if result.Status == framework.UDPSuccess {
return true, nil
}
return false, nil // caller can retry
}
if err := wait.PollImmediate(framework.Poll, timeout, pollfn); err != nil {
framework.Failf("Could not reach UDP service through %v:%v after %v: %v", host, port, timeout, err)
}
}
// TestNotReachableUDP tests that the given host doesn't serve UDP on the given port.
func (j *TestJig) TestNotReachableUDP(host string, port int, timeout time.Duration) {
pollfn := func() (bool, error) {
result := framework.PokeUDP(host, port, "echo hello", &framework.UDPPokeParams{Timeout: 3 * time.Second})
if result.Status != framework.UDPSuccess && result.Status != framework.UDPError {
return true, nil
}
return false, nil // caller can retry
}
if err := wait.PollImmediate(framework.Poll, timeout, pollfn); err != nil {
framework.Failf("UDP service %v:%v reachable after %v: %v", host, port, timeout, err)
}
}
// TestRejectedUDP tests that the given host rejects a UDP request on the given port.
func (j *TestJig) TestRejectedUDP(host string, port int, timeout time.Duration) {
pollfn := func() (bool, error) {
result := framework.PokeUDP(host, port, "echo hello", &framework.UDPPokeParams{Timeout: 3 * time.Second})
if result.Status == framework.UDPRefused {
return true, nil
}
return false, nil // caller can retry
}
if err := wait.PollImmediate(framework.Poll, timeout, pollfn); err != nil {
framework.Failf("UDP service %v:%v not rejected: %v", host, port, err)
}
}
// GetHTTPContent returns the content of the given url by HTTP.
func (j *TestJig) GetHTTPContent(host string, port int, timeout time.Duration, url string) bytes.Buffer {
var body bytes.Buffer
if pollErr := wait.PollImmediate(framework.Poll, timeout, func() (bool, error) {
result := framework.PokeHTTP(host, port, url, nil)
if result.Status == framework.HTTPSuccess {
body.Write(result.Body)
return true, nil
}
return false, nil
}); pollErr != nil {
framework.Failf("Could not reach HTTP service through %v:%v%v after %v: %v", host, port, url, timeout, pollErr)
}
return body
}
// TestHTTPHealthCheckNodePort tests a HTTP connection by the given request to the given host and port.
func (j *TestJig) TestHTTPHealthCheckNodePort(host string, port int, request string, timeout time.Duration, expectSucceed bool, threshold int) error {
count := 0
condition := func() (bool, error) {
success, _ := testHTTPHealthCheckNodePort(host, port, request)
if success && expectSucceed ||
!success && !expectSucceed {
count++
}
if count >= threshold {
return true, nil
}
return false, nil
}
if err := wait.PollImmediate(time.Second, timeout, condition); err != nil {
return fmt.Errorf("error waiting for healthCheckNodePort: expected at least %d succeed=%v on %v%v, got %d", threshold, expectSucceed, host, port, count)
}
return nil
}
// CreateServicePods creates a replication controller with the label same as service
func (j *TestJig) CreateServicePods(c clientset.Interface, ns string, replica int) {
config := testutils.RCConfig{
@ -1042,100 +899,6 @@ func (j *TestJig) CreateServicePods(c clientset.Interface, ns string, replica in
framework.ExpectNoError(err, "Replica must be created")
}
// CheckAffinity function tests whether the service affinity works as expected.
// If affinity is expected, the test will return true once affinityConfirmCount
// number of same response observed in a row. If affinity is not expected, the
// test will keep observe until different responses observed. The function will
// return false only in case of unexpected errors.
func (j *TestJig) CheckAffinity(execPod *v1.Pod, targetIP string, targetPort int, shouldHold bool) bool {
targetIPPort := net.JoinHostPort(targetIP, strconv.Itoa(targetPort))
cmd := fmt.Sprintf(`curl -q -s --connect-timeout 2 http://%s/`, targetIPPort)
timeout := TestTimeout
if execPod == nil {
timeout = LoadBalancerPollTimeout
}
var tracker affinityTracker
if pollErr := wait.PollImmediate(framework.Poll, timeout, func() (bool, error) {
if execPod != nil {
stdout, err := framework.RunHostCmd(execPod.Namespace, execPod.Name, cmd)
if err != nil {
framework.Logf("Failed to get response from %s. Retry until timeout", targetIPPort)
return false, nil
}
tracker.recordHost(stdout)
} else {
rawResponse := j.GetHTTPContent(targetIP, targetPort, timeout, "")
tracker.recordHost(rawResponse.String())
}
trackerFulfilled, affinityHolds := tracker.checkHostTrace(AffinityConfirmCount)
if !shouldHold && !affinityHolds {
return true, nil
}
if shouldHold && trackerFulfilled && affinityHolds {
return true, nil
}
return false, nil
}); pollErr != nil {
trackerFulfilled, _ := tracker.checkHostTrace(AffinityConfirmCount)
if pollErr != wait.ErrWaitTimeout {
checkAffinityFailed(tracker, pollErr.Error())
return false
}
if !trackerFulfilled {
checkAffinityFailed(tracker, fmt.Sprintf("Connection to %s timed out or not enough responses.", targetIPPort))
}
if shouldHold {
checkAffinityFailed(tracker, "Affinity should hold but didn't.")
} else {
checkAffinityFailed(tracker, "Affinity shouldn't hold but did.")
}
return true
}
return true
}
func testHTTPHealthCheckNodePort(ip string, port int, request string) (bool, error) {
ipPort := net.JoinHostPort(ip, strconv.Itoa(port))
url := fmt.Sprintf("http://%s%s", ipPort, request)
if ip == "" || port == 0 {
framework.Failf("Got empty IP for reachability check (%s)", url)
return false, fmt.Errorf("invalid input ip or port")
}
framework.Logf("Testing HTTP health check on %v", url)
resp, err := httpGetNoConnectionPoolTimeout(url, 5*time.Second)
if err != nil {
framework.Logf("Got error testing for reachability of %s: %v", url, err)
return false, err
}
defer resp.Body.Close()
if err != nil {
framework.Logf("Got error reading response from %s: %v", url, err)
return false, err
}
// HealthCheck responder returns 503 for no local endpoints
if resp.StatusCode == 503 {
return false, nil
}
// HealthCheck responder returns 200 for non-zero local endpoints
if resp.StatusCode == 200 {
return true, nil
}
return false, fmt.Errorf("unexpected HTTP response code %s from health check responder at %s", resp.Status, url)
}
// Does an HTTP GET, but does not reuse TCP connections
// This masks problems where the iptables rule has changed, but we don't see it
func httpGetNoConnectionPoolTimeout(url string, timeout time.Duration) (*http.Response, error) {
tr := utilnet.SetTransportDefaults(&http.Transport{
DisableKeepAlives: true,
})
client := &http.Client{
Transport: tr,
Timeout: timeout,
}
return client.Get(url)
}
// CreatePausePodDeployment creates a deployment for agnhost-pause pod running in different nodes
func (j *TestJig) CreatePausePodDeployment(name, ns string, replica int32) *appsv1.Deployment {
// terminationGracePeriod is set to 0 to reduce deployment deletion time for infinitely running pause pod.

View File

@ -0,0 +1,213 @@
/*
Copyright 2019 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package service
import (
"bytes"
"fmt"
"net"
"net/http"
"strconv"
"time"
utilnet "k8s.io/apimachinery/pkg/util/net"
"k8s.io/apimachinery/pkg/util/wait"
"k8s.io/kubernetes/test/e2e/framework"
)
// TestReachableHTTP tests that the given host serves HTTP on the given port.
func TestReachableHTTP(host string, port int, timeout time.Duration) {
TestReachableHTTPWithRetriableErrorCodes(host, port, []int{}, timeout)
}
// TestReachableHTTPWithRetriableErrorCodes tests that the given host serves HTTP on the given port with the given retriableErrCodes.
func TestReachableHTTPWithRetriableErrorCodes(host string, port int, retriableErrCodes []int, timeout time.Duration) {
pollfn := func() (bool, error) {
result := framework.PokeHTTP(host, port, "/echo?msg=hello",
&framework.HTTPPokeParams{
BodyContains: "hello",
RetriableCodes: retriableErrCodes,
})
if result.Status == framework.HTTPSuccess {
return true, nil
}
return false, nil // caller can retry
}
if err := wait.PollImmediate(framework.Poll, timeout, pollfn); err != nil {
if err == wait.ErrWaitTimeout {
framework.Failf("Could not reach HTTP service through %v:%v after %v", host, port, timeout)
} else {
framework.Failf("Failed to reach HTTP service through %v:%v: %v", host, port, err)
}
}
}
// TestNotReachableHTTP tests that a HTTP request doesn't connect to the given host and port.
func TestNotReachableHTTP(host string, port int, timeout time.Duration) {
pollfn := func() (bool, error) {
result := framework.PokeHTTP(host, port, "/", nil)
if result.Code == 0 {
return true, nil
}
return false, nil // caller can retry
}
if err := wait.PollImmediate(framework.Poll, timeout, pollfn); err != nil {
framework.Failf("HTTP service %v:%v reachable after %v: %v", host, port, timeout, err)
}
}
// TestRejectedHTTP tests that the given host rejects a HTTP request on the given port.
func TestRejectedHTTP(host string, port int, timeout time.Duration) {
pollfn := func() (bool, error) {
result := framework.PokeHTTP(host, port, "/", nil)
if result.Status == framework.HTTPRefused {
return true, nil
}
return false, nil // caller can retry
}
if err := wait.PollImmediate(framework.Poll, timeout, pollfn); err != nil {
framework.Failf("HTTP service %v:%v not rejected: %v", host, port, err)
}
}
// TestReachableUDP tests that the given host serves UDP on the given port.
func TestReachableUDP(host string, port int, timeout time.Duration) {
pollfn := func() (bool, error) {
result := framework.PokeUDP(host, port, "echo hello", &framework.UDPPokeParams{
Timeout: 3 * time.Second,
Response: "hello",
})
if result.Status == framework.UDPSuccess {
return true, nil
}
return false, nil // caller can retry
}
if err := wait.PollImmediate(framework.Poll, timeout, pollfn); err != nil {
framework.Failf("Could not reach UDP service through %v:%v after %v: %v", host, port, timeout, err)
}
}
// TestNotReachableUDP tests that the given host doesn't serve UDP on the given port.
func TestNotReachableUDP(host string, port int, timeout time.Duration) {
pollfn := func() (bool, error) {
result := framework.PokeUDP(host, port, "echo hello", &framework.UDPPokeParams{Timeout: 3 * time.Second})
if result.Status != framework.UDPSuccess && result.Status != framework.UDPError {
return true, nil
}
return false, nil // caller can retry
}
if err := wait.PollImmediate(framework.Poll, timeout, pollfn); err != nil {
framework.Failf("UDP service %v:%v reachable after %v: %v", host, port, timeout, err)
}
}
// TestRejectedUDP tests that the given host rejects a UDP request on the given port.
func TestRejectedUDP(host string, port int, timeout time.Duration) {
pollfn := func() (bool, error) {
result := framework.PokeUDP(host, port, "echo hello", &framework.UDPPokeParams{Timeout: 3 * time.Second})
if result.Status == framework.UDPRefused {
return true, nil
}
return false, nil // caller can retry
}
if err := wait.PollImmediate(framework.Poll, timeout, pollfn); err != nil {
framework.Failf("UDP service %v:%v not rejected: %v", host, port, err)
}
}
// TestHTTPHealthCheckNodePort tests a HTTP connection by the given request to the given host and port.
func TestHTTPHealthCheckNodePort(host string, port int, request string, timeout time.Duration, expectSucceed bool, threshold int) error {
count := 0
condition := func() (bool, error) {
success, _ := testHTTPHealthCheckNodePort(host, port, request)
if success && expectSucceed ||
!success && !expectSucceed {
count++
}
if count >= threshold {
return true, nil
}
return false, nil
}
if err := wait.PollImmediate(time.Second, timeout, condition); err != nil {
return fmt.Errorf("error waiting for healthCheckNodePort: expected at least %d succeed=%v on %v%v, got %d", threshold, expectSucceed, host, port, count)
}
return nil
}
func testHTTPHealthCheckNodePort(ip string, port int, request string) (bool, error) {
ipPort := net.JoinHostPort(ip, strconv.Itoa(port))
url := fmt.Sprintf("http://%s%s", ipPort, request)
if ip == "" || port == 0 {
framework.Failf("Got empty IP for reachability check (%s)", url)
return false, fmt.Errorf("invalid input ip or port")
}
framework.Logf("Testing HTTP health check on %v", url)
resp, err := httpGetNoConnectionPoolTimeout(url, 5*time.Second)
if err != nil {
framework.Logf("Got error testing for reachability of %s: %v", url, err)
return false, err
}
defer resp.Body.Close()
if err != nil {
framework.Logf("Got error reading response from %s: %v", url, err)
return false, err
}
// HealthCheck responder returns 503 for no local endpoints
if resp.StatusCode == 503 {
return false, nil
}
// HealthCheck responder returns 200 for non-zero local endpoints
if resp.StatusCode == 200 {
return true, nil
}
return false, fmt.Errorf("unexpected HTTP response code %s from health check responder at %s", resp.Status, url)
}
// Does an HTTP GET, but does not reuse TCP connections
// This masks problems where the iptables rule has changed, but we don't see it
func httpGetNoConnectionPoolTimeout(url string, timeout time.Duration) (*http.Response, error) {
tr := utilnet.SetTransportDefaults(&http.Transport{
DisableKeepAlives: true,
})
client := &http.Client{
Transport: tr,
Timeout: timeout,
}
return client.Get(url)
}
// GetHTTPContent returns the content of the given url by HTTP.
func GetHTTPContent(host string, port int, timeout time.Duration, url string) bytes.Buffer {
var body bytes.Buffer
if pollErr := wait.PollImmediate(framework.Poll, timeout, func() (bool, error) {
result := framework.PokeHTTP(host, port, url, nil)
if result.Status == framework.HTTPSuccess {
body.Write(result.Body)
return true, nil
}
return false, nil
}); pollErr != nil {
framework.Failf("Could not reach HTTP service through %v:%v%v after %v: %v", host, port, url, timeout, pollErr)
}
return body
}

View File

@ -159,7 +159,7 @@ func waitAndVerifyLBWithTier(jig *e2eservice.TestJig, ns, svcName, existingIP st
// If the IP has been used by previous test, sometimes we get the lingering
// 404 errors even after the LB is long gone. Tolerate and retry until the
// the new LB is fully established since this feature is still Alpha in GCP.
jig.TestReachableHTTPWithRetriableErrorCodes(ingressIP, svcPort, []int{http.StatusNotFound}, checkTimeout)
e2eservice.TestReachableHTTPWithRetriableErrorCodes(ingressIP, svcPort, []int{http.StatusNotFound}, checkTimeout)
// Verify the network tier matches the desired.
svcNetTier, err := gcecloud.GetServiceNetworkTier(svc)

View File

@ -646,10 +646,10 @@ var _ = SIGDescribe("Services", func() {
e2elog.Logf("UDP node port: %d", udpNodePort)
ginkgo.By("hitting the TCP service's NodePort")
jig.TestReachableHTTP(nodeIP, tcpNodePort, e2eservice.KubeProxyLagTimeout)
e2eservice.TestReachableHTTP(nodeIP, tcpNodePort, e2eservice.KubeProxyLagTimeout)
ginkgo.By("hitting the UDP service's NodePort")
jig.TestReachableUDP(nodeIP, udpNodePort, e2eservice.KubeProxyLagTimeout)
e2eservice.TestReachableUDP(nodeIP, udpNodePort, e2eservice.KubeProxyLagTimeout)
// Change the services to LoadBalancer.
@ -747,17 +747,17 @@ var _ = SIGDescribe("Services", func() {
}
ginkgo.By("hitting the TCP service's NodePort")
jig.TestReachableHTTP(nodeIP, tcpNodePort, e2eservice.KubeProxyLagTimeout)
e2eservice.TestReachableHTTP(nodeIP, tcpNodePort, e2eservice.KubeProxyLagTimeout)
ginkgo.By("hitting the UDP service's NodePort")
jig.TestReachableUDP(nodeIP, udpNodePort, e2eservice.KubeProxyLagTimeout)
e2eservice.TestReachableUDP(nodeIP, udpNodePort, e2eservice.KubeProxyLagTimeout)
ginkgo.By("hitting the TCP service's LoadBalancer")
jig.TestReachableHTTP(tcpIngressIP, svcPort, loadBalancerLagTimeout)
e2eservice.TestReachableHTTP(tcpIngressIP, svcPort, loadBalancerLagTimeout)
if loadBalancerSupportsUDP {
ginkgo.By("hitting the UDP service's LoadBalancer")
jig.TestReachableUDP(udpIngressIP, svcPort, loadBalancerLagTimeout)
e2eservice.TestReachableUDP(udpIngressIP, svcPort, loadBalancerLagTimeout)
}
// Change the services' node ports.
@ -793,23 +793,23 @@ var _ = SIGDescribe("Services", func() {
e2elog.Logf("UDP node port: %d", udpNodePort)
ginkgo.By("hitting the TCP service's new NodePort")
jig.TestReachableHTTP(nodeIP, tcpNodePort, e2eservice.KubeProxyLagTimeout)
e2eservice.TestReachableHTTP(nodeIP, tcpNodePort, e2eservice.KubeProxyLagTimeout)
ginkgo.By("hitting the UDP service's new NodePort")
jig.TestReachableUDP(nodeIP, udpNodePort, e2eservice.KubeProxyLagTimeout)
e2eservice.TestReachableUDP(nodeIP, udpNodePort, e2eservice.KubeProxyLagTimeout)
ginkgo.By("checking the old TCP NodePort is closed")
jig.TestNotReachableHTTP(nodeIP, tcpNodePortOld, e2eservice.KubeProxyLagTimeout)
e2eservice.TestNotReachableHTTP(nodeIP, tcpNodePortOld, e2eservice.KubeProxyLagTimeout)
ginkgo.By("checking the old UDP NodePort is closed")
jig.TestNotReachableUDP(nodeIP, udpNodePortOld, e2eservice.KubeProxyLagTimeout)
e2eservice.TestNotReachableUDP(nodeIP, udpNodePortOld, e2eservice.KubeProxyLagTimeout)
ginkgo.By("hitting the TCP service's LoadBalancer")
jig.TestReachableHTTP(tcpIngressIP, svcPort, loadBalancerLagTimeout)
e2eservice.TestReachableHTTP(tcpIngressIP, svcPort, loadBalancerLagTimeout)
if loadBalancerSupportsUDP {
ginkgo.By("hitting the UDP service's LoadBalancer")
jig.TestReachableUDP(udpIngressIP, svcPort, loadBalancerLagTimeout)
e2eservice.TestReachableUDP(udpIngressIP, svcPort, loadBalancerLagTimeout)
}
// Change the services' main ports.
@ -853,17 +853,17 @@ var _ = SIGDescribe("Services", func() {
e2elog.Logf("service port (TCP and UDP): %d", svcPort)
ginkgo.By("hitting the TCP service's NodePort")
jig.TestReachableHTTP(nodeIP, tcpNodePort, e2eservice.KubeProxyLagTimeout)
e2eservice.TestReachableHTTP(nodeIP, tcpNodePort, e2eservice.KubeProxyLagTimeout)
ginkgo.By("hitting the UDP service's NodePort")
jig.TestReachableUDP(nodeIP, udpNodePort, e2eservice.KubeProxyLagTimeout)
e2eservice.TestReachableUDP(nodeIP, udpNodePort, e2eservice.KubeProxyLagTimeout)
ginkgo.By("hitting the TCP service's LoadBalancer")
jig.TestReachableHTTP(tcpIngressIP, svcPort, loadBalancerCreateTimeout)
e2eservice.TestReachableHTTP(tcpIngressIP, svcPort, loadBalancerCreateTimeout)
if loadBalancerSupportsUDP {
ginkgo.By("hitting the UDP service's LoadBalancer")
jig.TestReachableUDP(udpIngressIP, svcPort, loadBalancerCreateTimeout)
e2eservice.TestReachableUDP(udpIngressIP, svcPort, loadBalancerCreateTimeout)
}
ginkgo.By("Scaling the pods to 0")
@ -871,17 +871,17 @@ var _ = SIGDescribe("Services", func() {
jig.Scale(ns2, 0)
ginkgo.By("looking for ICMP REJECT on the TCP service's NodePort")
jig.TestRejectedHTTP(nodeIP, tcpNodePort, e2eservice.KubeProxyLagTimeout)
e2eservice.TestRejectedHTTP(nodeIP, tcpNodePort, e2eservice.KubeProxyLagTimeout)
ginkgo.By("looking for ICMP REJECT on the UDP service's NodePort")
jig.TestRejectedUDP(nodeIP, udpNodePort, e2eservice.KubeProxyLagTimeout)
e2eservice.TestRejectedUDP(nodeIP, udpNodePort, e2eservice.KubeProxyLagTimeout)
ginkgo.By("looking for ICMP REJECT on the TCP service's LoadBalancer")
jig.TestRejectedHTTP(tcpIngressIP, svcPort, loadBalancerCreateTimeout)
e2eservice.TestRejectedHTTP(tcpIngressIP, svcPort, loadBalancerCreateTimeout)
if loadBalancerSupportsUDP {
ginkgo.By("looking for ICMP REJECT on the UDP service's LoadBalancer")
jig.TestRejectedUDP(udpIngressIP, svcPort, loadBalancerCreateTimeout)
e2eservice.TestRejectedUDP(udpIngressIP, svcPort, loadBalancerCreateTimeout)
}
ginkgo.By("Scaling the pods to 1")
@ -889,17 +889,17 @@ var _ = SIGDescribe("Services", func() {
jig.Scale(ns2, 1)
ginkgo.By("hitting the TCP service's NodePort")
jig.TestReachableHTTP(nodeIP, tcpNodePort, e2eservice.KubeProxyLagTimeout)
e2eservice.TestReachableHTTP(nodeIP, tcpNodePort, e2eservice.KubeProxyLagTimeout)
ginkgo.By("hitting the UDP service's NodePort")
jig.TestReachableUDP(nodeIP, udpNodePort, e2eservice.KubeProxyLagTimeout)
e2eservice.TestReachableUDP(nodeIP, udpNodePort, e2eservice.KubeProxyLagTimeout)
ginkgo.By("hitting the TCP service's LoadBalancer")
jig.TestReachableHTTP(tcpIngressIP, svcPort, loadBalancerCreateTimeout)
e2eservice.TestReachableHTTP(tcpIngressIP, svcPort, loadBalancerCreateTimeout)
if loadBalancerSupportsUDP {
ginkgo.By("hitting the UDP service's LoadBalancer")
jig.TestReachableUDP(udpIngressIP, svcPort, loadBalancerCreateTimeout)
e2eservice.TestReachableUDP(udpIngressIP, svcPort, loadBalancerCreateTimeout)
}
// Change the services back to ClusterIP.
@ -925,17 +925,17 @@ var _ = SIGDescribe("Services", func() {
}
ginkgo.By("checking the TCP NodePort is closed")
jig.TestNotReachableHTTP(nodeIP, tcpNodePort, e2eservice.KubeProxyLagTimeout)
e2eservice.TestNotReachableHTTP(nodeIP, tcpNodePort, e2eservice.KubeProxyLagTimeout)
ginkgo.By("checking the UDP NodePort is closed")
jig.TestNotReachableUDP(nodeIP, udpNodePort, e2eservice.KubeProxyLagTimeout)
e2eservice.TestNotReachableUDP(nodeIP, udpNodePort, e2eservice.KubeProxyLagTimeout)
ginkgo.By("checking the TCP LoadBalancer is closed")
jig.TestNotReachableHTTP(tcpIngressIP, svcPort, loadBalancerLagTimeout)
e2eservice.TestNotReachableHTTP(tcpIngressIP, svcPort, loadBalancerLagTimeout)
if loadBalancerSupportsUDP {
ginkgo.By("checking the UDP LoadBalancer is closed")
jig.TestNotReachableUDP(udpIngressIP, svcPort, loadBalancerLagTimeout)
e2eservice.TestNotReachableUDP(udpIngressIP, svcPort, loadBalancerLagTimeout)
}
})
@ -1698,7 +1698,7 @@ var _ = SIGDescribe("Services", func() {
ginkgo.By("hitting the external load balancer")
e2elog.Logf("Waiting up to %v for service %q's external LB to respond to requests", createTimeout, serviceName)
tcpIngressIP = e2eservice.GetIngressPoint(lbIngress)
jig.TestReachableHTTP(tcpIngressIP, svcPort, e2eservice.LoadBalancerLagTimeoutDefault)
e2eservice.TestReachableHTTP(tcpIngressIP, svcPort, e2eservice.LoadBalancerLagTimeoutDefault)
// GCE cannot test a specific IP because the test may not own it. This cloud specific condition
// will be removed when GCP supports similar functionality.
@ -2122,7 +2122,7 @@ var _ = SIGDescribe("ESIPP [Slow] [DisabledForLargeClusters]", func() {
// Make sure we didn't leak the health check node port.
threshold := 2
for _, ips := range jig.GetEndpointNodes(svc) {
err := jig.TestHTTPHealthCheckNodePort(ips[0], healthCheckNodePort, "/healthz", e2eservice.KubeProxyEndpointLagTimeout, false, threshold)
err := e2eservice.TestHTTPHealthCheckNodePort(ips[0], healthCheckNodePort, "/healthz", e2eservice.KubeProxyEndpointLagTimeout, false, threshold)
framework.ExpectNoError(err)
}
err := cs.CoreV1().Services(svc.Namespace).Delete(svc.Name, nil)
@ -2133,7 +2133,7 @@ var _ = SIGDescribe("ESIPP [Slow] [DisabledForLargeClusters]", func() {
ingressIP := e2eservice.GetIngressPoint(&svc.Status.LoadBalancer.Ingress[0])
ginkgo.By("reading clientIP using the TCP service's service port via its external VIP")
content := jig.GetHTTPContent(ingressIP, svcTCPPort, e2eservice.KubeProxyLagTimeout, "/clientip")
content := e2eservice.GetHTTPContent(ingressIP, svcTCPPort, e2eservice.KubeProxyLagTimeout, "/clientip")
clientIP := content.String()
e2elog.Logf("ClientIP detected by target pod using VIP:SvcPort is %s", clientIP)
@ -2161,7 +2161,7 @@ var _ = SIGDescribe("ESIPP [Slow] [DisabledForLargeClusters]", func() {
for nodeName, nodeIPs := range endpointsNodeMap {
nodeIP := nodeIPs[0]
ginkgo.By(fmt.Sprintf("reading clientIP using the TCP service's NodePort, on node %v: %v%v%v", nodeName, nodeIP, tcpNodePort, path))
content := jig.GetHTTPContent(nodeIP, tcpNodePort, e2eservice.KubeProxyLagTimeout, path)
content := e2eservice.GetHTTPContent(nodeIP, tcpNodePort, e2eservice.KubeProxyLagTimeout, path)
clientIP := content.String()
e2elog.Logf("ClientIP detected by target pod using NodePort is %s", clientIP)
if strings.HasPrefix(clientIP, "10.") {
@ -2224,12 +2224,12 @@ var _ = SIGDescribe("ESIPP [Slow] [DisabledForLargeClusters]", func() {
for n, publicIP := range ips {
// Make sure the loadbalancer picked up the health check change.
// Confirm traffic can reach backend through LB before checking healthcheck nodeport.
jig.TestReachableHTTP(ingressIP, svcTCPPort, e2eservice.KubeProxyLagTimeout)
e2eservice.TestReachableHTTP(ingressIP, svcTCPPort, e2eservice.KubeProxyLagTimeout)
expectedSuccess := nodes.Items[n].Name == endpointNodeName
port := strconv.Itoa(healthCheckNodePort)
ipPort := net.JoinHostPort(publicIP, port)
e2elog.Logf("Health checking %s, http://%s%s, expectedSuccess %v", nodes.Items[n].Name, ipPort, path, expectedSuccess)
err := jig.TestHTTPHealthCheckNodePort(publicIP, healthCheckNodePort, path, e2eservice.KubeProxyEndpointLagTimeout, expectedSuccess, threshold)
err := e2eservice.TestHTTPHealthCheckNodePort(publicIP, healthCheckNodePort, path, e2eservice.KubeProxyEndpointLagTimeout, expectedSuccess, threshold)
framework.ExpectNoError(err)
}
framework.ExpectNoError(framework.DeleteRCAndWaitForGC(f.ClientSet, namespace, serviceName))
@ -2338,7 +2338,7 @@ var _ = SIGDescribe("ESIPP [Slow] [DisabledForLargeClusters]", func() {
ginkgo.By(fmt.Sprintf("endpoints present on nodes %v, absent on nodes %v", endpointNodeMap, noEndpointNodeMap))
for nodeName, nodeIPs := range noEndpointNodeMap {
ginkgo.By(fmt.Sprintf("Checking %v (%v:%v%v) proxies to endpoints on another node", nodeName, nodeIPs[0], svcNodePort, path))
jig.GetHTTPContent(nodeIPs[0], svcNodePort, e2eservice.KubeProxyLagTimeout, path)
e2eservice.GetHTTPContent(nodeIPs[0], svcNodePort, e2eservice.KubeProxyLagTimeout, path)
}
for nodeName, nodeIPs := range endpointNodeMap {
@ -2363,7 +2363,7 @@ var _ = SIGDescribe("ESIPP [Slow] [DisabledForLargeClusters]", func() {
ginkgo.By(fmt.Sprintf("checking source ip is NOT preserved through loadbalancer %v", ingressIP))
var clientIP string
pollErr := wait.PollImmediate(framework.Poll, e2eservice.KubeProxyLagTimeout, func() (bool, error) {
content := jig.GetHTTPContent(ingressIP, svcTCPPort, e2eservice.KubeProxyLagTimeout, "/clientip")
content := e2eservice.GetHTTPContent(ingressIP, svcTCPPort, e2eservice.KubeProxyLagTimeout, "/clientip")
clientIP = content.String()
if strings.HasPrefix(clientIP, "10.") {
return true, nil
@ -2387,7 +2387,7 @@ var _ = SIGDescribe("ESIPP [Slow] [DisabledForLargeClusters]", func() {
svc.Spec.HealthCheckNodePort = int32(healthCheckNodePort)
})
pollErr = wait.PollImmediate(framework.Poll, e2eservice.KubeProxyLagTimeout, func() (bool, error) {
content := jig.GetHTTPContent(ingressIP, svcTCPPort, e2eservice.KubeProxyLagTimeout, path)
content := e2eservice.GetHTTPContent(ingressIP, svcTCPPort, e2eservice.KubeProxyLagTimeout, path)
clientIP = content.String()
ginkgo.By(fmt.Sprintf("Endpoint %v:%v%v returned client ip %v", ingressIP, svcTCPPort, path, clientIP))
if !strings.HasPrefix(clientIP, "10.") {
@ -2481,17 +2481,17 @@ func execAffinityTestForNonLBServiceWithOptionalTransition(f *framework.Framewor
framework.ExpectNoError(err, "failed to fetch pod: %s in namespace: %s", execPod.Name, ns)
if !isTransitionTest {
gomega.Expect(jig.CheckAffinity(execPod, svcIP, servicePort, true)).To(gomega.BeTrue())
gomega.Expect(e2eservice.CheckAffinity(execPod, svcIP, servicePort, true)).To(gomega.BeTrue())
}
if isTransitionTest {
svc = jig.UpdateServiceOrFail(svc.Namespace, svc.Name, func(svc *v1.Service) {
svc.Spec.SessionAffinity = v1.ServiceAffinityNone
})
gomega.Expect(jig.CheckAffinity(execPod, svcIP, servicePort, false)).To(gomega.BeTrue())
gomega.Expect(e2eservice.CheckAffinity(execPod, svcIP, servicePort, false)).To(gomega.BeTrue())
svc = jig.UpdateServiceOrFail(svc.Namespace, svc.Name, func(svc *v1.Service) {
svc.Spec.SessionAffinity = v1.ServiceAffinityClientIP
})
gomega.Expect(jig.CheckAffinity(execPod, svcIP, servicePort, true)).To(gomega.BeTrue())
gomega.Expect(e2eservice.CheckAffinity(execPod, svcIP, servicePort, true)).To(gomega.BeTrue())
}
}
@ -2529,17 +2529,17 @@ func execAffinityTestForLBServiceWithOptionalTransition(f *framework.Framework,
port := int(svc.Spec.Ports[0].Port)
if !isTransitionTest {
gomega.Expect(jig.CheckAffinity(nil, ingressIP, port, true)).To(gomega.BeTrue())
gomega.Expect(e2eservice.CheckAffinity(nil, ingressIP, port, true)).To(gomega.BeTrue())
}
if isTransitionTest {
svc = jig.UpdateServiceOrFail(svc.Namespace, svc.Name, func(svc *v1.Service) {
svc.Spec.SessionAffinity = v1.ServiceAffinityNone
})
gomega.Expect(jig.CheckAffinity(nil, ingressIP, port, false)).To(gomega.BeTrue())
gomega.Expect(e2eservice.CheckAffinity(nil, ingressIP, port, false)).To(gomega.BeTrue())
svc = jig.UpdateServiceOrFail(svc.Namespace, svc.Name, func(svc *v1.Service) {
svc.Spec.SessionAffinity = v1.ServiceAffinityClientIP
})
gomega.Expect(jig.CheckAffinity(nil, ingressIP, port, true)).To(gomega.BeTrue())
gomega.Expect(e2eservice.CheckAffinity(nil, ingressIP, port, true)).To(gomega.BeTrue())
}
}

View File

@ -72,7 +72,7 @@ func (t *ServiceUpgradeTest) Setup(f *framework.Framework) {
if framework.ProviderIs("aws") {
timeout = e2eservice.LoadBalancerLagTimeoutAWS
}
jig.TestReachableHTTP(tcpIngressIP, svcPort, timeout)
e2eservice.TestReachableHTTP(tcpIngressIP, svcPort, timeout)
t.jig = jig
t.tcpService = tcpService
@ -103,7 +103,7 @@ func (t *ServiceUpgradeTest) test(f *framework.Framework, done <-chan struct{},
// Continuous validation
ginkgo.By("continuously hitting the pod through the service's LoadBalancer")
wait.Until(func() {
t.jig.TestReachableHTTP(t.tcpIngressIP, t.svcPort, e2eservice.LoadBalancerLagTimeoutDefault)
e2eservice.TestReachableHTTP(t.tcpIngressIP, t.svcPort, e2eservice.LoadBalancerLagTimeoutDefault)
}, framework.Poll, done)
} else {
// Block until upgrade is done
@ -113,7 +113,7 @@ func (t *ServiceUpgradeTest) test(f *framework.Framework, done <-chan struct{},
// Sanity check and hit it once more
ginkgo.By("hitting the pod through the service's LoadBalancer")
t.jig.TestReachableHTTP(t.tcpIngressIP, t.svcPort, e2eservice.LoadBalancerLagTimeoutDefault)
e2eservice.TestReachableHTTP(t.tcpIngressIP, t.svcPort, e2eservice.LoadBalancerLagTimeoutDefault)
t.jig.SanityCheckService(t.tcpService, v1.ServiceTypeLoadBalancer)
if testFinalizer {