mirror of
https://github.com/k3s-io/kubernetes.git
synced 2025-07-20 10:20:51 +00:00
feat: move service_util to separated package
This commit is contained in:
parent
3f1cb97f9a
commit
c38ae01f8e
@ -46,5 +46,6 @@
|
|||||||
"k8s.io/kubernetes/pkg/kubelet/apis/resourcemetrics/v1alpha1": "kubeletresourcemetricsv1alpha1",
|
"k8s.io/kubernetes/pkg/kubelet/apis/resourcemetrics/v1alpha1": "kubeletresourcemetricsv1alpha1",
|
||||||
"k8s.io/kubernetes/pkg/kubelet/apis/stats/v1alpha1": "kubeletstatsv1alpha1",
|
"k8s.io/kubernetes/pkg/kubelet/apis/stats/v1alpha1": "kubeletstatsv1alpha1",
|
||||||
"k8s.io/kubernetes/pkg/proxy/apis/config/v1alpha1": "proxyconfigv1alpha1",
|
"k8s.io/kubernetes/pkg/proxy/apis/config/v1alpha1": "proxyconfigv1alpha1",
|
||||||
"k8s.io/kubernetes/pkg/scheduler/apis/config/v1alpha1": "schedulerconfigv1alpha1"
|
"k8s.io/kubernetes/pkg/scheduler/apis/config/v1alpha1": "schedulerconfigv1alpha1",
|
||||||
}
|
"k8s.io/kubernetes/test/e2e/framework/service": "e2eservice"
|
||||||
|
}
|
||||||
|
@ -68,6 +68,7 @@ go_library(
|
|||||||
"//test/e2e/framework/node:go_default_library",
|
"//test/e2e/framework/node:go_default_library",
|
||||||
"//test/e2e/framework/pod:go_default_library",
|
"//test/e2e/framework/pod:go_default_library",
|
||||||
"//test/e2e/framework/replicaset:go_default_library",
|
"//test/e2e/framework/replicaset:go_default_library",
|
||||||
|
"//test/e2e/framework/service:go_default_library",
|
||||||
"//test/e2e/framework/ssh:go_default_library",
|
"//test/e2e/framework/ssh:go_default_library",
|
||||||
"//test/e2e/framework/statefulset:go_default_library",
|
"//test/e2e/framework/statefulset:go_default_library",
|
||||||
"//test/utils:go_default_library",
|
"//test/utils:go_default_library",
|
||||||
|
@ -298,7 +298,6 @@ var _ = SIGDescribe("DaemonRestart [Disruptive]", func() {
|
|||||||
})
|
})
|
||||||
|
|
||||||
ginkgo.It("Kubelet should not restart containers across restart", func() {
|
ginkgo.It("Kubelet should not restart containers across restart", func() {
|
||||||
|
|
||||||
nodeIPs, err := e2enode.GetPublicIps(f.ClientSet)
|
nodeIPs, err := e2enode.GetPublicIps(f.ClientSet)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
e2elog.Logf("Unexpected error occurred: %v", err)
|
e2elog.Logf("Unexpected error occurred: %v", err)
|
||||||
|
@ -30,7 +30,7 @@ import (
|
|||||||
|
|
||||||
"k8s.io/client-go/tools/cache"
|
"k8s.io/client-go/tools/cache"
|
||||||
|
|
||||||
"k8s.io/api/core/v1"
|
v1 "k8s.io/api/core/v1"
|
||||||
clientset "k8s.io/client-go/kubernetes"
|
clientset "k8s.io/client-go/kubernetes"
|
||||||
api "k8s.io/kubernetes/pkg/apis/core"
|
api "k8s.io/kubernetes/pkg/apis/core"
|
||||||
nodepkg "k8s.io/kubernetes/pkg/controller/nodelifecycle"
|
nodepkg "k8s.io/kubernetes/pkg/controller/nodelifecycle"
|
||||||
@ -40,6 +40,7 @@ import (
|
|||||||
e2elog "k8s.io/kubernetes/test/e2e/framework/log"
|
e2elog "k8s.io/kubernetes/test/e2e/framework/log"
|
||||||
e2enode "k8s.io/kubernetes/test/e2e/framework/node"
|
e2enode "k8s.io/kubernetes/test/e2e/framework/node"
|
||||||
e2epod "k8s.io/kubernetes/test/e2e/framework/pod"
|
e2epod "k8s.io/kubernetes/test/e2e/framework/pod"
|
||||||
|
e2eservice "k8s.io/kubernetes/test/e2e/framework/service"
|
||||||
e2esset "k8s.io/kubernetes/test/e2e/framework/statefulset"
|
e2esset "k8s.io/kubernetes/test/e2e/framework/statefulset"
|
||||||
testutils "k8s.io/kubernetes/test/utils"
|
testutils "k8s.io/kubernetes/test/utils"
|
||||||
|
|
||||||
@ -360,7 +361,7 @@ var _ = SIGDescribe("Network Partition [Disruptive] [Slow]", func() {
|
|||||||
// TODO(foxish): Re-enable testing on gce after kubernetes#56787 is fixed.
|
// TODO(foxish): Re-enable testing on gce after kubernetes#56787 is fixed.
|
||||||
framework.SkipUnlessProviderIs("gke")
|
framework.SkipUnlessProviderIs("gke")
|
||||||
ginkgo.By("creating service " + headlessSvcName + " in namespace " + f.Namespace.Name)
|
ginkgo.By("creating service " + headlessSvcName + " in namespace " + f.Namespace.Name)
|
||||||
headlessService := framework.CreateServiceSpec(headlessSvcName, "", true, labels)
|
headlessService := e2eservice.CreateServiceSpec(headlessSvcName, "", true, labels)
|
||||||
_, err := f.ClientSet.CoreV1().Services(f.Namespace.Name).Create(headlessService)
|
_, err := f.ClientSet.CoreV1().Services(f.Namespace.Name).Create(headlessService)
|
||||||
framework.ExpectNoError(err)
|
framework.ExpectNoError(err)
|
||||||
c = f.ClientSet
|
c = f.ClientSet
|
||||||
|
@ -25,7 +25,7 @@ import (
|
|||||||
"github.com/onsi/ginkgo"
|
"github.com/onsi/ginkgo"
|
||||||
"github.com/onsi/gomega"
|
"github.com/onsi/gomega"
|
||||||
appsv1 "k8s.io/api/apps/v1"
|
appsv1 "k8s.io/api/apps/v1"
|
||||||
"k8s.io/api/core/v1"
|
v1 "k8s.io/api/core/v1"
|
||||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||||
klabels "k8s.io/apimachinery/pkg/labels"
|
klabels "k8s.io/apimachinery/pkg/labels"
|
||||||
"k8s.io/apimachinery/pkg/types"
|
"k8s.io/apimachinery/pkg/types"
|
||||||
@ -36,6 +36,7 @@ import (
|
|||||||
"k8s.io/kubernetes/test/e2e/framework"
|
"k8s.io/kubernetes/test/e2e/framework"
|
||||||
e2elog "k8s.io/kubernetes/test/e2e/framework/log"
|
e2elog "k8s.io/kubernetes/test/e2e/framework/log"
|
||||||
e2epod "k8s.io/kubernetes/test/e2e/framework/pod"
|
e2epod "k8s.io/kubernetes/test/e2e/framework/pod"
|
||||||
|
e2eservice "k8s.io/kubernetes/test/e2e/framework/service"
|
||||||
e2esset "k8s.io/kubernetes/test/e2e/framework/statefulset"
|
e2esset "k8s.io/kubernetes/test/e2e/framework/statefulset"
|
||||||
imageutils "k8s.io/kubernetes/test/utils/image"
|
imageutils "k8s.io/kubernetes/test/utils/image"
|
||||||
)
|
)
|
||||||
@ -80,7 +81,7 @@ var _ = SIGDescribe("StatefulSet", func() {
|
|||||||
ss = e2esset.NewStatefulSet(ssName, ns, headlessSvcName, 2, statefulPodMounts, podMounts, labels)
|
ss = e2esset.NewStatefulSet(ssName, ns, headlessSvcName, 2, statefulPodMounts, podMounts, labels)
|
||||||
|
|
||||||
ginkgo.By("Creating service " + headlessSvcName + " in namespace " + ns)
|
ginkgo.By("Creating service " + headlessSvcName + " in namespace " + ns)
|
||||||
headlessService := framework.CreateServiceSpec(headlessSvcName, "", true, labels)
|
headlessService := e2eservice.CreateServiceSpec(headlessSvcName, "", true, labels)
|
||||||
_, err := c.CoreV1().Services(ns).Create(headlessService)
|
_, err := c.CoreV1().Services(ns).Create(headlessService)
|
||||||
framework.ExpectNoError(err)
|
framework.ExpectNoError(err)
|
||||||
})
|
})
|
||||||
|
@ -81,6 +81,7 @@ go_library(
|
|||||||
"//test/e2e/framework/node:go_default_library",
|
"//test/e2e/framework/node:go_default_library",
|
||||||
"//test/e2e/framework/pod:go_default_library",
|
"//test/e2e/framework/pod:go_default_library",
|
||||||
"//test/e2e/framework/replicaset:go_default_library",
|
"//test/e2e/framework/replicaset:go_default_library",
|
||||||
|
"//test/e2e/framework/service:go_default_library",
|
||||||
"//test/e2e/framework/volume:go_default_library",
|
"//test/e2e/framework/volume:go_default_library",
|
||||||
"//test/utils:go_default_library",
|
"//test/utils:go_default_library",
|
||||||
"//test/utils/image:go_default_library",
|
"//test/utils/image:go_default_library",
|
||||||
|
@ -35,6 +35,7 @@ import (
|
|||||||
e2edeploy "k8s.io/kubernetes/test/e2e/framework/deployment"
|
e2edeploy "k8s.io/kubernetes/test/e2e/framework/deployment"
|
||||||
e2elog "k8s.io/kubernetes/test/e2e/framework/log"
|
e2elog "k8s.io/kubernetes/test/e2e/framework/log"
|
||||||
"k8s.io/kubernetes/test/e2e/framework/replicaset"
|
"k8s.io/kubernetes/test/e2e/framework/replicaset"
|
||||||
|
e2eservice "k8s.io/kubernetes/test/e2e/framework/service"
|
||||||
testutils "k8s.io/kubernetes/test/utils"
|
testutils "k8s.io/kubernetes/test/utils"
|
||||||
|
|
||||||
"github.com/onsi/ginkgo"
|
"github.com/onsi/ginkgo"
|
||||||
@ -255,7 +256,7 @@ func (rc *ResourceConsumer) sendConsumeCPURequest(millicores int) {
|
|||||||
defer cancel()
|
defer cancel()
|
||||||
|
|
||||||
err := wait.PollImmediate(serviceInitializationInterval, serviceInitializationTimeout, func() (bool, error) {
|
err := wait.PollImmediate(serviceInitializationInterval, serviceInitializationTimeout, func() (bool, error) {
|
||||||
proxyRequest, err := framework.GetServicesProxyRequest(rc.clientSet, rc.clientSet.CoreV1().RESTClient().Post())
|
proxyRequest, err := e2eservice.GetServicesProxyRequest(rc.clientSet, rc.clientSet.CoreV1().RESTClient().Post())
|
||||||
framework.ExpectNoError(err)
|
framework.ExpectNoError(err)
|
||||||
req := proxyRequest.Namespace(rc.nsName).
|
req := proxyRequest.Namespace(rc.nsName).
|
||||||
Context(ctx).
|
Context(ctx).
|
||||||
@ -282,7 +283,7 @@ func (rc *ResourceConsumer) sendConsumeMemRequest(megabytes int) {
|
|||||||
defer cancel()
|
defer cancel()
|
||||||
|
|
||||||
err := wait.PollImmediate(serviceInitializationInterval, serviceInitializationTimeout, func() (bool, error) {
|
err := wait.PollImmediate(serviceInitializationInterval, serviceInitializationTimeout, func() (bool, error) {
|
||||||
proxyRequest, err := framework.GetServicesProxyRequest(rc.clientSet, rc.clientSet.CoreV1().RESTClient().Post())
|
proxyRequest, err := e2eservice.GetServicesProxyRequest(rc.clientSet, rc.clientSet.CoreV1().RESTClient().Post())
|
||||||
framework.ExpectNoError(err)
|
framework.ExpectNoError(err)
|
||||||
req := proxyRequest.Namespace(rc.nsName).
|
req := proxyRequest.Namespace(rc.nsName).
|
||||||
Context(ctx).
|
Context(ctx).
|
||||||
@ -309,7 +310,7 @@ func (rc *ResourceConsumer) sendConsumeCustomMetric(delta int) {
|
|||||||
defer cancel()
|
defer cancel()
|
||||||
|
|
||||||
err := wait.PollImmediate(serviceInitializationInterval, serviceInitializationTimeout, func() (bool, error) {
|
err := wait.PollImmediate(serviceInitializationInterval, serviceInitializationTimeout, func() (bool, error) {
|
||||||
proxyRequest, err := framework.GetServicesProxyRequest(rc.clientSet, rc.clientSet.CoreV1().RESTClient().Post())
|
proxyRequest, err := e2eservice.GetServicesProxyRequest(rc.clientSet, rc.clientSet.CoreV1().RESTClient().Post())
|
||||||
framework.ExpectNoError(err)
|
framework.ExpectNoError(err)
|
||||||
req := proxyRequest.Namespace(rc.nsName).
|
req := proxyRequest.Namespace(rc.nsName).
|
||||||
Context(ctx).
|
Context(ctx).
|
||||||
|
@ -24,7 +24,6 @@ go_library(
|
|||||||
"pv_util.go",
|
"pv_util.go",
|
||||||
"rc_util.go",
|
"rc_util.go",
|
||||||
"resource_usage_gatherer.go",
|
"resource_usage_gatherer.go",
|
||||||
"service_util.go",
|
|
||||||
"size.go",
|
"size.go",
|
||||||
"test_context.go",
|
"test_context.go",
|
||||||
"util.go",
|
"util.go",
|
||||||
@ -45,7 +44,6 @@ go_library(
|
|||||||
"//pkg/kubelet/metrics:go_default_library",
|
"//pkg/kubelet/metrics:go_default_library",
|
||||||
"//pkg/kubelet/sysctl:go_default_library",
|
"//pkg/kubelet/sysctl:go_default_library",
|
||||||
"//pkg/master/ports:go_default_library",
|
"//pkg/master/ports:go_default_library",
|
||||||
"//pkg/registry/core/service/portallocator:go_default_library",
|
|
||||||
"//pkg/scheduler/algorithm/predicates:go_default_library",
|
"//pkg/scheduler/algorithm/predicates:go_default_library",
|
||||||
"//pkg/scheduler/nodeinfo:go_default_library",
|
"//pkg/scheduler/nodeinfo:go_default_library",
|
||||||
"//pkg/security/podsecuritypolicy/seccomp:go_default_library",
|
"//pkg/security/podsecuritypolicy/seccomp:go_default_library",
|
||||||
@ -92,7 +90,6 @@ go_library(
|
|||||||
"//staging/src/k8s.io/client-go/tools/clientcmd/api:go_default_library",
|
"//staging/src/k8s.io/client-go/tools/clientcmd/api:go_default_library",
|
||||||
"//staging/src/k8s.io/client-go/tools/remotecommand:go_default_library",
|
"//staging/src/k8s.io/client-go/tools/remotecommand:go_default_library",
|
||||||
"//staging/src/k8s.io/client-go/tools/watch:go_default_library",
|
"//staging/src/k8s.io/client-go/tools/watch:go_default_library",
|
||||||
"//staging/src/k8s.io/client-go/util/retry:go_default_library",
|
|
||||||
"//staging/src/k8s.io/component-base/cli/flag:go_default_library",
|
"//staging/src/k8s.io/component-base/cli/flag:go_default_library",
|
||||||
"//test/e2e/framework/auth:go_default_library",
|
"//test/e2e/framework/auth:go_default_library",
|
||||||
"//test/e2e/framework/config:go_default_library",
|
"//test/e2e/framework/config:go_default_library",
|
||||||
@ -153,6 +150,7 @@ filegroup(
|
|||||||
"//test/e2e/framework/providers/vsphere:all-srcs",
|
"//test/e2e/framework/providers/vsphere:all-srcs",
|
||||||
"//test/e2e/framework/replicaset:all-srcs",
|
"//test/e2e/framework/replicaset:all-srcs",
|
||||||
"//test/e2e/framework/resource:all-srcs",
|
"//test/e2e/framework/resource:all-srcs",
|
||||||
|
"//test/e2e/framework/service:all-srcs",
|
||||||
"//test/e2e/framework/ssh:all-srcs",
|
"//test/e2e/framework/ssh:all-srcs",
|
||||||
"//test/e2e/framework/statefulset:all-srcs",
|
"//test/e2e/framework/statefulset:all-srcs",
|
||||||
"//test/e2e/framework/testfiles:all-srcs",
|
"//test/e2e/framework/testfiles:all-srcs",
|
||||||
|
@ -20,6 +20,7 @@ go_library(
|
|||||||
"//test/e2e/framework:go_default_library",
|
"//test/e2e/framework:go_default_library",
|
||||||
"//test/e2e/framework/log:go_default_library",
|
"//test/e2e/framework/log:go_default_library",
|
||||||
"//test/e2e/framework/node:go_default_library",
|
"//test/e2e/framework/node:go_default_library",
|
||||||
|
"//test/e2e/framework/service:go_default_library",
|
||||||
"//test/e2e/framework/testfiles:go_default_library",
|
"//test/e2e/framework/testfiles:go_default_library",
|
||||||
"//test/e2e/manifest:go_default_library",
|
"//test/e2e/manifest:go_default_library",
|
||||||
"//test/utils:go_default_library",
|
"//test/utils:go_default_library",
|
||||||
|
@ -51,6 +51,7 @@ import (
|
|||||||
"k8s.io/kubernetes/test/e2e/framework"
|
"k8s.io/kubernetes/test/e2e/framework"
|
||||||
e2elog "k8s.io/kubernetes/test/e2e/framework/log"
|
e2elog "k8s.io/kubernetes/test/e2e/framework/log"
|
||||||
e2enode "k8s.io/kubernetes/test/e2e/framework/node"
|
e2enode "k8s.io/kubernetes/test/e2e/framework/node"
|
||||||
|
e2eservice "k8s.io/kubernetes/test/e2e/framework/service"
|
||||||
"k8s.io/kubernetes/test/e2e/framework/testfiles"
|
"k8s.io/kubernetes/test/e2e/framework/testfiles"
|
||||||
"k8s.io/kubernetes/test/e2e/manifest"
|
"k8s.io/kubernetes/test/e2e/manifest"
|
||||||
testutils "k8s.io/kubernetes/test/utils"
|
testutils "k8s.io/kubernetes/test/utils"
|
||||||
@ -215,7 +216,7 @@ func CreateIngressComformanceTests(jig *TestJig, ns string, annotations map[stri
|
|||||||
})
|
})
|
||||||
ginkgo.By("Checking that " + pathToFail + " is not exposed by polling for failure")
|
ginkgo.By("Checking that " + pathToFail + " is not exposed by polling for failure")
|
||||||
route := fmt.Sprintf("http://%v%v", jig.Address, pathToFail)
|
route := fmt.Sprintf("http://%v%v", jig.Address, pathToFail)
|
||||||
framework.ExpectNoError(framework.PollURL(route, updateURLMapHost, framework.LoadBalancerCleanupTimeout, jig.PollInterval, &http.Client{Timeout: IngressReqTimeout}, true))
|
framework.ExpectNoError(framework.PollURL(route, updateURLMapHost, e2eservice.LoadBalancerCleanupTimeout, jig.PollInterval, &http.Client{Timeout: IngressReqTimeout}, true))
|
||||||
},
|
},
|
||||||
fmt.Sprintf("Waiting for path updates to reflect in L7"),
|
fmt.Sprintf("Waiting for path updates to reflect in L7"),
|
||||||
},
|
},
|
||||||
@ -385,7 +386,7 @@ func NewIngressTestJig(c clientset.Interface) *TestJig {
|
|||||||
return &TestJig{
|
return &TestJig{
|
||||||
Client: c,
|
Client: c,
|
||||||
RootCAs: map[string][]byte{},
|
RootCAs: map[string][]byte{},
|
||||||
PollInterval: framework.LoadBalancerPollInterval,
|
PollInterval: e2eservice.LoadBalancerPollInterval,
|
||||||
Logger: &E2ELogger{},
|
Logger: &E2ELogger{},
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -674,14 +675,14 @@ func (j *TestJig) pollIngressWithCert(ing *networkingv1beta1.Ingress, address st
|
|||||||
// WaitForIngress waits for the Ingress to get an address.
|
// WaitForIngress waits for the Ingress to get an address.
|
||||||
// WaitForIngress returns when it gets the first 200 response
|
// WaitForIngress returns when it gets the first 200 response
|
||||||
func (j *TestJig) WaitForIngress(waitForNodePort bool) {
|
func (j *TestJig) WaitForIngress(waitForNodePort bool) {
|
||||||
if err := j.WaitForGivenIngressWithTimeout(j.Ingress, waitForNodePort, framework.LoadBalancerPollTimeout); err != nil {
|
if err := j.WaitForGivenIngressWithTimeout(j.Ingress, waitForNodePort, e2eservice.LoadBalancerPollTimeout); err != nil {
|
||||||
e2elog.Failf("error in waiting for ingress to get an address: %s", err)
|
e2elog.Failf("error in waiting for ingress to get an address: %s", err)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// WaitForIngressToStable waits for the LB return 100 consecutive 200 responses.
|
// WaitForIngressToStable waits for the LB return 100 consecutive 200 responses.
|
||||||
func (j *TestJig) WaitForIngressToStable() {
|
func (j *TestJig) WaitForIngressToStable() {
|
||||||
if err := wait.Poll(10*time.Second, framework.LoadBalancerCreateTimeoutDefault, func() (bool, error) {
|
if err := wait.Poll(10*time.Second, e2eservice.LoadBalancerCreateTimeoutDefault, func() (bool, error) {
|
||||||
_, err := j.GetDistinctResponseFromIngress()
|
_, err := j.GetDistinctResponseFromIngress()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return false, nil
|
return false, nil
|
||||||
@ -720,12 +721,12 @@ func (j *TestJig) WaitForGivenIngressWithTimeout(ing *networkingv1beta1.Ingress,
|
|||||||
// Ingress. Hostnames and certificate need to be explicitly passed in.
|
// Ingress. Hostnames and certificate need to be explicitly passed in.
|
||||||
func (j *TestJig) WaitForIngressWithCert(waitForNodePort bool, knownHosts []string, cert []byte) error {
|
func (j *TestJig) WaitForIngressWithCert(waitForNodePort bool, knownHosts []string, cert []byte) error {
|
||||||
// Wait for the loadbalancer IP.
|
// Wait for the loadbalancer IP.
|
||||||
address, err := j.WaitForIngressAddress(j.Client, j.Ingress.Namespace, j.Ingress.Name, framework.LoadBalancerPollTimeout)
|
address, err := j.WaitForIngressAddress(j.Client, j.Ingress.Namespace, j.Ingress.Name, e2eservice.LoadBalancerPollTimeout)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return fmt.Errorf("Ingress failed to acquire an IP address within %v", framework.LoadBalancerPollTimeout)
|
return fmt.Errorf("Ingress failed to acquire an IP address within %v", e2eservice.LoadBalancerPollTimeout)
|
||||||
}
|
}
|
||||||
|
|
||||||
return j.pollIngressWithCert(j.Ingress, address, knownHosts, cert, waitForNodePort, framework.LoadBalancerPollTimeout)
|
return j.pollIngressWithCert(j.Ingress, address, knownHosts, cert, waitForNodePort, e2eservice.LoadBalancerPollTimeout)
|
||||||
}
|
}
|
||||||
|
|
||||||
// VerifyURL polls for the given iterations, in intervals, and fails if the
|
// VerifyURL polls for the given iterations, in intervals, and fails if the
|
||||||
@ -812,9 +813,9 @@ func (j *TestJig) ConstructFirewallForIngress(firewallRuleName string, nodeTags
|
|||||||
// GetDistinctResponseFromIngress tries GET call to the ingress VIP and return all distinct responses.
|
// GetDistinctResponseFromIngress tries GET call to the ingress VIP and return all distinct responses.
|
||||||
func (j *TestJig) GetDistinctResponseFromIngress() (sets.String, error) {
|
func (j *TestJig) GetDistinctResponseFromIngress() (sets.String, error) {
|
||||||
// Wait for the loadbalancer IP.
|
// Wait for the loadbalancer IP.
|
||||||
address, err := j.WaitForIngressAddress(j.Client, j.Ingress.Namespace, j.Ingress.Name, framework.LoadBalancerPollTimeout)
|
address, err := j.WaitForIngressAddress(j.Client, j.Ingress.Namespace, j.Ingress.Name, e2eservice.LoadBalancerPollTimeout)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
e2elog.Failf("Ingress failed to acquire an IP address within %v", framework.LoadBalancerPollTimeout)
|
e2elog.Failf("Ingress failed to acquire an IP address within %v", e2eservice.LoadBalancerPollTimeout)
|
||||||
}
|
}
|
||||||
responses := sets.NewString()
|
responses := sets.NewString()
|
||||||
timeoutClient := &http.Client{Timeout: IngressReqTimeout}
|
timeoutClient := &http.Client{Timeout: IngressReqTimeout}
|
||||||
|
@ -27,7 +27,7 @@ import (
|
|||||||
"time"
|
"time"
|
||||||
|
|
||||||
"github.com/onsi/ginkgo"
|
"github.com/onsi/ginkgo"
|
||||||
"k8s.io/api/core/v1"
|
v1 "k8s.io/api/core/v1"
|
||||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||||
"k8s.io/apimachinery/pkg/labels"
|
"k8s.io/apimachinery/pkg/labels"
|
||||||
"k8s.io/apimachinery/pkg/util/intstr"
|
"k8s.io/apimachinery/pkg/util/intstr"
|
||||||
@ -74,7 +74,8 @@ const (
|
|||||||
RegexIPv6 = "(?:(?:(?:(?:(?:(?:(?:[0-9a-fA-F]{1,4})):){6})(?:(?:(?:(?:(?:[0-9a-fA-F]{1,4})):(?:(?:[0-9a-fA-F]{1,4})))|(?:(?:(?:(?:(?:25[0-5]|(?:[1-9]|1[0-9]|2[0-4])?[0-9]))\\.){3}(?:(?:25[0-5]|(?:[1-9]|1[0-9]|2[0-4])?[0-9])))))))|(?:(?:::(?:(?:(?:[0-9a-fA-F]{1,4})):){5})(?:(?:(?:(?:(?:[0-9a-fA-F]{1,4})):(?:(?:[0-9a-fA-F]{1,4})))|(?:(?:(?:(?:(?:25[0-5]|(?:[1-9]|1[0-9]|2[0-4])?[0-9]))\\.){3}(?:(?:25[0-5]|(?:[1-9]|1[0-9]|2[0-4])?[0-9])))))))|(?:(?:(?:(?:(?:[0-9a-fA-F]{1,4})))?::(?:(?:(?:[0-9a-fA-F]{1,4})):){4})(?:(?:(?:(?:(?:[0-9a-fA-F]{1,4})):(?:(?:[0-9a-fA-F]{1,4})))|(?:(?:(?:(?:(?:25[0-5]|(?:[1-9]|1[0-9]|2[0-4])?[0-9]))\\.){3}(?:(?:25[0-5]|(?:[1-9]|1[0-9]|2[0-4])?[0-9])))))))|(?:(?:(?:(?:(?:(?:[0-9a-fA-F]{1,4})):){0,1}(?:(?:[0-9a-fA-F]{1,4})))?::(?:(?:(?:[0-9a-fA-F]{1,4})):){3})(?:(?:(?:(?:(?:[0-9a-fA-F]{1,4})):(?:(?:[0-9a-fA-F]{1,4})))|(?:(?:(?:(?:(?:25[0-5]|(?:[1-9]|1[0-9]|2[0-4])?[0-9]))\\.){3}(?:(?:25[0-5]|(?:[1-9]|1[0-9]|2[0-4])?[0-9])))))))|(?:(?:(?:(?:(?:(?:[0-9a-fA-F]{1,4})):){0,2}(?:(?:[0-9a-fA-F]{1,4})))?::(?:(?:(?:[0-9a-fA-F]{1,4})):){2})(?:(?:(?:(?:(?:[0-9a-fA-F]{1,4})):(?:(?:[0-9a-fA-F]{1,4})))|(?:(?:(?:(?:(?:25[0-5]|(?:[1-9]|1[0-9]|2[0-4])?[0-9]))\\.){3}(?:(?:25[0-5]|(?:[1-9]|1[0-9]|2[0-4])?[0-9])))))))|(?:(?:(?:(?:(?:(?:[0-9a-fA-F]{1,4})):){0,3}(?:(?:[0-9a-fA-F]{1,4})))?::(?:(?:[0-9a-fA-F]{1,4})):)(?:(?:(?:(?:(?:[0-9a-fA-F]{1,4})):(?:(?:[0-9a-fA-F]{1,4})))|(?:(?:(?:(?:(?:25[0-5]|(?:[1-9]|1[0-9]|2[0-4])?[0-9]))\\.){3}(?:(?:25[0-5]|(?:[1-9]|1[0-9]|2[0-4])?[0-9])))))))|(?:(?:(?:(?:(?:(?:[0-9a-fA-F]{1,4})):){0,4}(?:(?:[0-9a-fA-F]{1,4})))?::)(?:(?:(?:(?:(?:[0-9a-fA-F]{1,4})):(?:(?:[0-9a-fA-F]{1,4})))|(?:(?:(?:(?:(?:25[0-5]|(?:[1-9]|1[0-9]|2[0-4])?[0-9]))\\.){3}(?:(?:25[0-5]|(?:[1-9]|1[0-9]|2[0-4])?[0-9])))))))|(?:(?:(?:(?:(?:(?:[0-9a-fA-F]{1,4})):){0,5}(?:(?:[0-9a-fA-F]{1,4})))?::)(?:(?:[0-9a-fA-F]{1,4})))|(?:(?:(?:(?:(?:(?:[0-9a-fA-F]{1,4})):){0,6}(?:(?:[0-9a-fA-F]{1,4})))?::))))"
|
RegexIPv6 = "(?:(?:(?:(?:(?:(?:(?:[0-9a-fA-F]{1,4})):){6})(?:(?:(?:(?:(?:[0-9a-fA-F]{1,4})):(?:(?:[0-9a-fA-F]{1,4})))|(?:(?:(?:(?:(?:25[0-5]|(?:[1-9]|1[0-9]|2[0-4])?[0-9]))\\.){3}(?:(?:25[0-5]|(?:[1-9]|1[0-9]|2[0-4])?[0-9])))))))|(?:(?:::(?:(?:(?:[0-9a-fA-F]{1,4})):){5})(?:(?:(?:(?:(?:[0-9a-fA-F]{1,4})):(?:(?:[0-9a-fA-F]{1,4})))|(?:(?:(?:(?:(?:25[0-5]|(?:[1-9]|1[0-9]|2[0-4])?[0-9]))\\.){3}(?:(?:25[0-5]|(?:[1-9]|1[0-9]|2[0-4])?[0-9])))))))|(?:(?:(?:(?:(?:[0-9a-fA-F]{1,4})))?::(?:(?:(?:[0-9a-fA-F]{1,4})):){4})(?:(?:(?:(?:(?:[0-9a-fA-F]{1,4})):(?:(?:[0-9a-fA-F]{1,4})))|(?:(?:(?:(?:(?:25[0-5]|(?:[1-9]|1[0-9]|2[0-4])?[0-9]))\\.){3}(?:(?:25[0-5]|(?:[1-9]|1[0-9]|2[0-4])?[0-9])))))))|(?:(?:(?:(?:(?:(?:[0-9a-fA-F]{1,4})):){0,1}(?:(?:[0-9a-fA-F]{1,4})))?::(?:(?:(?:[0-9a-fA-F]{1,4})):){3})(?:(?:(?:(?:(?:[0-9a-fA-F]{1,4})):(?:(?:[0-9a-fA-F]{1,4})))|(?:(?:(?:(?:(?:25[0-5]|(?:[1-9]|1[0-9]|2[0-4])?[0-9]))\\.){3}(?:(?:25[0-5]|(?:[1-9]|1[0-9]|2[0-4])?[0-9])))))))|(?:(?:(?:(?:(?:(?:[0-9a-fA-F]{1,4})):){0,2}(?:(?:[0-9a-fA-F]{1,4})))?::(?:(?:(?:[0-9a-fA-F]{1,4})):){2})(?:(?:(?:(?:(?:[0-9a-fA-F]{1,4})):(?:(?:[0-9a-fA-F]{1,4})))|(?:(?:(?:(?:(?:25[0-5]|(?:[1-9]|1[0-9]|2[0-4])?[0-9]))\\.){3}(?:(?:25[0-5]|(?:[1-9]|1[0-9]|2[0-4])?[0-9])))))))|(?:(?:(?:(?:(?:(?:[0-9a-fA-F]{1,4})):){0,3}(?:(?:[0-9a-fA-F]{1,4})))?::(?:(?:[0-9a-fA-F]{1,4})):)(?:(?:(?:(?:(?:[0-9a-fA-F]{1,4})):(?:(?:[0-9a-fA-F]{1,4})))|(?:(?:(?:(?:(?:25[0-5]|(?:[1-9]|1[0-9]|2[0-4])?[0-9]))\\.){3}(?:(?:25[0-5]|(?:[1-9]|1[0-9]|2[0-4])?[0-9])))))))|(?:(?:(?:(?:(?:(?:[0-9a-fA-F]{1,4})):){0,4}(?:(?:[0-9a-fA-F]{1,4})))?::)(?:(?:(?:(?:(?:[0-9a-fA-F]{1,4})):(?:(?:[0-9a-fA-F]{1,4})))|(?:(?:(?:(?:(?:25[0-5]|(?:[1-9]|1[0-9]|2[0-4])?[0-9]))\\.){3}(?:(?:25[0-5]|(?:[1-9]|1[0-9]|2[0-4])?[0-9])))))))|(?:(?:(?:(?:(?:(?:[0-9a-fA-F]{1,4})):){0,5}(?:(?:[0-9a-fA-F]{1,4})))?::)(?:(?:[0-9a-fA-F]{1,4})))|(?:(?:(?:(?:(?:(?:[0-9a-fA-F]{1,4})):){0,6}(?:(?:[0-9a-fA-F]{1,4})))?::))))"
|
||||||
)
|
)
|
||||||
|
|
||||||
var netexecImageName = imageutils.GetE2EImage(imageutils.Agnhost)
|
// NetexecImageName is the image name for agnhost.
|
||||||
|
var NetexecImageName = imageutils.GetE2EImage(imageutils.Agnhost)
|
||||||
|
|
||||||
// NewNetworkingTestConfig creates and sets up a new test config helper.
|
// NewNetworkingTestConfig creates and sets up a new test config helper.
|
||||||
func NewNetworkingTestConfig(f *Framework) *NetworkingTestConfig {
|
func NewNetworkingTestConfig(f *Framework) *NetworkingTestConfig {
|
||||||
@ -425,7 +426,7 @@ func (config *NetworkingTestConfig) createNetShellPodSpec(podName, hostname stri
|
|||||||
Containers: []v1.Container{
|
Containers: []v1.Container{
|
||||||
{
|
{
|
||||||
Name: "webserver",
|
Name: "webserver",
|
||||||
Image: netexecImageName,
|
Image: NetexecImageName,
|
||||||
ImagePullPolicy: v1.PullIfNotPresent,
|
ImagePullPolicy: v1.PullIfNotPresent,
|
||||||
Args: []string{
|
Args: []string{
|
||||||
"netexec",
|
"netexec",
|
||||||
@ -469,7 +470,7 @@ func (config *NetworkingTestConfig) createTestPodSpec() *v1.Pod {
|
|||||||
Containers: []v1.Container{
|
Containers: []v1.Container{
|
||||||
{
|
{
|
||||||
Name: "webserver",
|
Name: "webserver",
|
||||||
Image: netexecImageName,
|
Image: NetexecImageName,
|
||||||
ImagePullPolicy: v1.PullIfNotPresent,
|
ImagePullPolicy: v1.PullIfNotPresent,
|
||||||
Args: []string{
|
Args: []string{
|
||||||
"netexec",
|
"netexec",
|
||||||
|
@ -26,6 +26,7 @@ go_library(
|
|||||||
"//test/e2e/framework/log:go_default_library",
|
"//test/e2e/framework/log:go_default_library",
|
||||||
"//test/e2e/framework/node:go_default_library",
|
"//test/e2e/framework/node:go_default_library",
|
||||||
"//test/e2e/framework/pod:go_default_library",
|
"//test/e2e/framework/pod:go_default_library",
|
||||||
|
"//test/e2e/framework/service:go_default_library",
|
||||||
"//test/utils:go_default_library",
|
"//test/utils:go_default_library",
|
||||||
"//vendor/github.com/onsi/ginkgo:go_default_library",
|
"//vendor/github.com/onsi/ginkgo:go_default_library",
|
||||||
"//vendor/google.golang.org/api/compute/v1:go_default_library",
|
"//vendor/google.golang.org/api/compute/v1:go_default_library",
|
||||||
|
@ -26,13 +26,14 @@ import (
|
|||||||
|
|
||||||
compute "google.golang.org/api/compute/v1"
|
compute "google.golang.org/api/compute/v1"
|
||||||
"google.golang.org/api/googleapi"
|
"google.golang.org/api/googleapi"
|
||||||
"k8s.io/api/core/v1"
|
v1 "k8s.io/api/core/v1"
|
||||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||||
"k8s.io/apimachinery/pkg/util/uuid"
|
"k8s.io/apimachinery/pkg/util/uuid"
|
||||||
"k8s.io/apimachinery/pkg/util/wait"
|
"k8s.io/apimachinery/pkg/util/wait"
|
||||||
clientset "k8s.io/client-go/kubernetes"
|
clientset "k8s.io/client-go/kubernetes"
|
||||||
"k8s.io/kubernetes/test/e2e/framework"
|
"k8s.io/kubernetes/test/e2e/framework"
|
||||||
e2elog "k8s.io/kubernetes/test/e2e/framework/log"
|
e2elog "k8s.io/kubernetes/test/e2e/framework/log"
|
||||||
|
e2eservice "k8s.io/kubernetes/test/e2e/framework/service"
|
||||||
gcecloud "k8s.io/legacy-cloud-providers/gce"
|
gcecloud "k8s.io/legacy-cloud-providers/gce"
|
||||||
)
|
)
|
||||||
|
|
||||||
@ -169,8 +170,8 @@ func (p *Provider) EnsureLoadBalancerResourcesDeleted(ip, portRange string) erro
|
|||||||
}
|
}
|
||||||
|
|
||||||
return wait.Poll(10*time.Second, 5*time.Minute, func() (bool, error) {
|
return wait.Poll(10*time.Second, 5*time.Minute, func() (bool, error) {
|
||||||
service := p.gceCloud.ComputeServices().GA
|
e2eservice := p.gceCloud.ComputeServices().GA
|
||||||
list, err := service.ForwardingRules.List(project, region).Do()
|
list, err := e2eservice.ForwardingRules.List(project, region).Do()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return false, err
|
return false, err
|
||||||
}
|
}
|
||||||
@ -255,7 +256,7 @@ func (p *Provider) DeletePVSource(pvSource *v1.PersistentVolumeSource) error {
|
|||||||
// the given name. The name is usually the UUID of the Service prefixed with an
|
// the given name. The name is usually the UUID of the Service prefixed with an
|
||||||
// alpha-numeric character ('a') to work around cloudprovider rules.
|
// alpha-numeric character ('a') to work around cloudprovider rules.
|
||||||
func (p *Provider) CleanupServiceResources(c clientset.Interface, loadBalancerName, region, zone string) {
|
func (p *Provider) CleanupServiceResources(c clientset.Interface, loadBalancerName, region, zone string) {
|
||||||
if pollErr := wait.Poll(5*time.Second, framework.LoadBalancerCleanupTimeout, func() (bool, error) {
|
if pollErr := wait.Poll(5*time.Second, e2eservice.LoadBalancerCleanupTimeout, func() (bool, error) {
|
||||||
if err := p.cleanupGCEResources(c, loadBalancerName, region, zone); err != nil {
|
if err := p.cleanupGCEResources(c, loadBalancerName, region, zone); err != nil {
|
||||||
e2elog.Logf("Still waiting for glbc to cleanup: %v", err)
|
e2elog.Logf("Still waiting for glbc to cleanup: %v", err)
|
||||||
return false, nil
|
return false, nil
|
||||||
|
@ -29,12 +29,13 @@ import (
|
|||||||
"github.com/onsi/ginkgo"
|
"github.com/onsi/ginkgo"
|
||||||
compute "google.golang.org/api/compute/v1"
|
compute "google.golang.org/api/compute/v1"
|
||||||
"google.golang.org/api/googleapi"
|
"google.golang.org/api/googleapi"
|
||||||
"k8s.io/api/core/v1"
|
v1 "k8s.io/api/core/v1"
|
||||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||||
"k8s.io/apimachinery/pkg/util/wait"
|
"k8s.io/apimachinery/pkg/util/wait"
|
||||||
clientset "k8s.io/client-go/kubernetes"
|
clientset "k8s.io/client-go/kubernetes"
|
||||||
"k8s.io/kubernetes/test/e2e/framework"
|
"k8s.io/kubernetes/test/e2e/framework"
|
||||||
e2elog "k8s.io/kubernetes/test/e2e/framework/log"
|
e2elog "k8s.io/kubernetes/test/e2e/framework/log"
|
||||||
|
e2eservice "k8s.io/kubernetes/test/e2e/framework/service"
|
||||||
utilexec "k8s.io/utils/exec"
|
utilexec "k8s.io/utils/exec"
|
||||||
)
|
)
|
||||||
|
|
||||||
@ -80,7 +81,7 @@ type IngressController struct {
|
|||||||
|
|
||||||
// CleanupIngressController calls cont.CleanupIngressControllerWithTimeout with hard-coded timeout
|
// CleanupIngressController calls cont.CleanupIngressControllerWithTimeout with hard-coded timeout
|
||||||
func (cont *IngressController) CleanupIngressController() error {
|
func (cont *IngressController) CleanupIngressController() error {
|
||||||
return cont.CleanupIngressControllerWithTimeout(framework.LoadBalancerCleanupTimeout)
|
return cont.CleanupIngressControllerWithTimeout(e2eservice.LoadBalancerCleanupTimeout)
|
||||||
}
|
}
|
||||||
|
|
||||||
// CleanupIngressControllerWithTimeout calls the IngressController.Cleanup(false)
|
// CleanupIngressControllerWithTimeout calls the IngressController.Cleanup(false)
|
||||||
|
55
test/e2e/framework/service/BUILD
Normal file
55
test/e2e/framework/service/BUILD
Normal file
@ -0,0 +1,55 @@
|
|||||||
|
load("@io_bazel_rules_go//go:def.bzl", "go_library")
|
||||||
|
|
||||||
|
go_library(
|
||||||
|
name = "go_default_library",
|
||||||
|
srcs = [
|
||||||
|
"affinity_checker.go",
|
||||||
|
"const.go",
|
||||||
|
"fixture.go",
|
||||||
|
"hostname.go",
|
||||||
|
"jig.go",
|
||||||
|
"resource.go",
|
||||||
|
"wait.go",
|
||||||
|
],
|
||||||
|
importpath = "k8s.io/kubernetes/test/e2e/framework/service",
|
||||||
|
visibility = ["//visibility:public"],
|
||||||
|
deps = [
|
||||||
|
"//pkg/apis/core:go_default_library",
|
||||||
|
"//pkg/registry/core/service/portallocator:go_default_library",
|
||||||
|
"//staging/src/k8s.io/api/core/v1:go_default_library",
|
||||||
|
"//staging/src/k8s.io/api/policy/v1beta1:go_default_library",
|
||||||
|
"//staging/src/k8s.io/apimachinery/pkg/api/errors:go_default_library",
|
||||||
|
"//staging/src/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library",
|
||||||
|
"//staging/src/k8s.io/apimachinery/pkg/labels:go_default_library",
|
||||||
|
"//staging/src/k8s.io/apimachinery/pkg/util/intstr:go_default_library",
|
||||||
|
"//staging/src/k8s.io/apimachinery/pkg/util/net:go_default_library",
|
||||||
|
"//staging/src/k8s.io/apimachinery/pkg/util/sets:go_default_library",
|
||||||
|
"//staging/src/k8s.io/apimachinery/pkg/util/uuid:go_default_library",
|
||||||
|
"//staging/src/k8s.io/apimachinery/pkg/util/wait:go_default_library",
|
||||||
|
"//staging/src/k8s.io/client-go/kubernetes:go_default_library",
|
||||||
|
"//staging/src/k8s.io/client-go/rest:go_default_library",
|
||||||
|
"//staging/src/k8s.io/client-go/util/retry:go_default_library",
|
||||||
|
"//test/e2e/framework:go_default_library",
|
||||||
|
"//test/e2e/framework/log:go_default_library",
|
||||||
|
"//test/e2e/framework/node:go_default_library",
|
||||||
|
"//test/e2e/framework/pod:go_default_library",
|
||||||
|
"//test/e2e/framework/ssh:go_default_library",
|
||||||
|
"//test/utils:go_default_library",
|
||||||
|
"//test/utils/image:go_default_library",
|
||||||
|
"//vendor/github.com/onsi/ginkgo:go_default_library",
|
||||||
|
],
|
||||||
|
)
|
||||||
|
|
||||||
|
filegroup(
|
||||||
|
name = "package-srcs",
|
||||||
|
srcs = glob(["**"]),
|
||||||
|
tags = ["automanaged"],
|
||||||
|
visibility = ["//visibility:private"],
|
||||||
|
)
|
||||||
|
|
||||||
|
filegroup(
|
||||||
|
name = "all-srcs",
|
||||||
|
srcs = [":package-srcs"],
|
||||||
|
tags = ["automanaged"],
|
||||||
|
visibility = ["//visibility:public"],
|
||||||
|
)
|
56
test/e2e/framework/service/affinity_checker.go
Normal file
56
test/e2e/framework/service/affinity_checker.go
Normal file
@ -0,0 +1,56 @@
|
|||||||
|
/*
|
||||||
|
Copyright 2019 The Kubernetes Authors.
|
||||||
|
|
||||||
|
Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
you may not use this file except in compliance with the License.
|
||||||
|
You may obtain a copy of the License at
|
||||||
|
|
||||||
|
http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
|
||||||
|
Unless required by applicable law or agreed to in writing, software
|
||||||
|
distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
See the License for the specific language governing permissions and
|
||||||
|
limitations under the License.
|
||||||
|
*/
|
||||||
|
|
||||||
|
package service
|
||||||
|
|
||||||
|
import (
|
||||||
|
e2elog "k8s.io/kubernetes/test/e2e/framework/log"
|
||||||
|
)
|
||||||
|
|
||||||
|
// affinityTracker tracks the destination of a request for the affinity tests.
|
||||||
|
type affinityTracker struct {
|
||||||
|
hostTrace []string
|
||||||
|
}
|
||||||
|
|
||||||
|
// Record the response going to a given host.
|
||||||
|
func (at *affinityTracker) recordHost(host string) {
|
||||||
|
at.hostTrace = append(at.hostTrace, host)
|
||||||
|
e2elog.Logf("Received response from host: %s", host)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Check that we got a constant count requests going to the same host.
|
||||||
|
func (at *affinityTracker) checkHostTrace(count int) (fulfilled, affinityHolds bool) {
|
||||||
|
fulfilled = (len(at.hostTrace) >= count)
|
||||||
|
if len(at.hostTrace) == 0 {
|
||||||
|
return fulfilled, true
|
||||||
|
}
|
||||||
|
last := at.hostTrace[0:]
|
||||||
|
if len(at.hostTrace)-count >= 0 {
|
||||||
|
last = at.hostTrace[len(at.hostTrace)-count:]
|
||||||
|
}
|
||||||
|
host := at.hostTrace[len(at.hostTrace)-1]
|
||||||
|
for _, h := range last {
|
||||||
|
if h != host {
|
||||||
|
return fulfilled, false
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return fulfilled, true
|
||||||
|
}
|
||||||
|
|
||||||
|
func checkAffinityFailed(tracker affinityTracker, err string) {
|
||||||
|
e2elog.Logf("%v", tracker.hostTrace)
|
||||||
|
e2elog.Failf(err)
|
||||||
|
}
|
78
test/e2e/framework/service/const.go
Normal file
78
test/e2e/framework/service/const.go
Normal file
@ -0,0 +1,78 @@
|
|||||||
|
/*
|
||||||
|
Copyright 2019 The Kubernetes Authors.
|
||||||
|
|
||||||
|
Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
you may not use this file except in compliance with the License.
|
||||||
|
You may obtain a copy of the License at
|
||||||
|
|
||||||
|
http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
|
||||||
|
Unless required by applicable law or agreed to in writing, software
|
||||||
|
distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
See the License for the specific language governing permissions and
|
||||||
|
limitations under the License.
|
||||||
|
*/
|
||||||
|
|
||||||
|
package service
|
||||||
|
|
||||||
|
import (
|
||||||
|
"time"
|
||||||
|
)
|
||||||
|
|
||||||
|
const (
|
||||||
|
// RespondingTimeout is how long to wait for a service to be responding.
|
||||||
|
RespondingTimeout = 2 * time.Minute
|
||||||
|
|
||||||
|
// MaxNodesForEndpointsTests is the max number for testing endpoints.
|
||||||
|
// Don't test with more than 3 nodes.
|
||||||
|
// Many tests create an endpoint per node, in large clusters, this is
|
||||||
|
// resource and time intensive.
|
||||||
|
MaxNodesForEndpointsTests = 3
|
||||||
|
)
|
||||||
|
|
||||||
|
const (
|
||||||
|
// KubeProxyLagTimeout is the maximum time a kube-proxy daemon on a node is allowed
|
||||||
|
// to not notice a Service update, such as type=NodePort.
|
||||||
|
// TODO: This timeout should be O(10s), observed values are O(1m), 5m is very
|
||||||
|
// liberal. Fix tracked in #20567.
|
||||||
|
KubeProxyLagTimeout = 5 * time.Minute
|
||||||
|
|
||||||
|
// KubeProxyEndpointLagTimeout is the maximum time a kube-proxy daemon on a node is allowed
|
||||||
|
// to not notice an Endpoint update.
|
||||||
|
KubeProxyEndpointLagTimeout = 30 * time.Second
|
||||||
|
|
||||||
|
// LoadBalancerLagTimeoutDefault is the maximum time a load balancer is allowed to
|
||||||
|
// not respond after creation.
|
||||||
|
LoadBalancerLagTimeoutDefault = 2 * time.Minute
|
||||||
|
|
||||||
|
// LoadBalancerLagTimeoutAWS is the delay between ELB creation and serving traffic
|
||||||
|
// on AWS. A few minutes is typical, so use 10m.
|
||||||
|
LoadBalancerLagTimeoutAWS = 10 * time.Minute
|
||||||
|
|
||||||
|
// LoadBalancerCreateTimeoutDefault is the default time to wait for a load balancer to be created/modified.
|
||||||
|
// TODO: once support ticket 21807001 is resolved, reduce this timeout back to something reasonable
|
||||||
|
LoadBalancerCreateTimeoutDefault = 20 * time.Minute
|
||||||
|
// LoadBalancerCreateTimeoutLarge is the maximum time to wait for a load balancer to be created/modified.
|
||||||
|
LoadBalancerCreateTimeoutLarge = 2 * time.Hour
|
||||||
|
|
||||||
|
// LoadBalancerCleanupTimeout is the time required by the loadbalancer to cleanup, proportional to numApps/Ing.
|
||||||
|
// Bring the cleanup timeout back down to 5m once b/33588344 is resolved.
|
||||||
|
LoadBalancerCleanupTimeout = 15 * time.Minute
|
||||||
|
|
||||||
|
// LoadBalancerPollTimeout is the time required by the loadbalancer to poll.
|
||||||
|
// On average it takes ~6 minutes for a single backend to come online in GCE.
|
||||||
|
LoadBalancerPollTimeout = 15 * time.Minute
|
||||||
|
// LoadBalancerPollInterval is the interval value in which the loadbalancer polls.
|
||||||
|
LoadBalancerPollInterval = 30 * time.Second
|
||||||
|
|
||||||
|
// LargeClusterMinNodesNumber is the number of nodes which a large cluster consists of.
|
||||||
|
LargeClusterMinNodesNumber = 100
|
||||||
|
|
||||||
|
// TestTimeout is used for most polling/waiting activities
|
||||||
|
TestTimeout = 60 * time.Second
|
||||||
|
|
||||||
|
// AffinityConfirmCount is the number of needed continuous requests to confirm that
|
||||||
|
// affinity is enabled.
|
||||||
|
AffinityConfirmCount = 15
|
||||||
|
)
|
159
test/e2e/framework/service/fixture.go
Normal file
159
test/e2e/framework/service/fixture.go
Normal file
@ -0,0 +1,159 @@
|
|||||||
|
/*
|
||||||
|
Copyright 2019 The Kubernetes Authors.
|
||||||
|
|
||||||
|
Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
you may not use this file except in compliance with the License.
|
||||||
|
You may obtain a copy of the License at
|
||||||
|
|
||||||
|
http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
|
||||||
|
Unless required by applicable law or agreed to in writing, software
|
||||||
|
distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
See the License for the specific language governing permissions and
|
||||||
|
limitations under the License.
|
||||||
|
*/
|
||||||
|
|
||||||
|
package service
|
||||||
|
|
||||||
|
import (
|
||||||
|
v1 "k8s.io/api/core/v1"
|
||||||
|
"k8s.io/apimachinery/pkg/api/errors"
|
||||||
|
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||||
|
"k8s.io/apimachinery/pkg/util/intstr"
|
||||||
|
"k8s.io/apimachinery/pkg/util/uuid"
|
||||||
|
clientset "k8s.io/client-go/kubernetes"
|
||||||
|
"k8s.io/client-go/util/retry"
|
||||||
|
imageutils "k8s.io/kubernetes/test/utils/image"
|
||||||
|
|
||||||
|
"github.com/onsi/ginkgo"
|
||||||
|
)
|
||||||
|
|
||||||
|
// TestFixture is a simple helper class to avoid too much boilerplate in tests
|
||||||
|
type TestFixture struct {
|
||||||
|
ServiceName string
|
||||||
|
Namespace string
|
||||||
|
Client clientset.Interface
|
||||||
|
|
||||||
|
TestID string
|
||||||
|
Labels map[string]string
|
||||||
|
|
||||||
|
rcs map[string]bool
|
||||||
|
services map[string]bool
|
||||||
|
Name string
|
||||||
|
Image string
|
||||||
|
}
|
||||||
|
|
||||||
|
// NewServerTest creates a new TestFixture for the tests.
|
||||||
|
func NewServerTest(client clientset.Interface, namespace string, serviceName string) *TestFixture {
|
||||||
|
t := &TestFixture{}
|
||||||
|
t.Client = client
|
||||||
|
t.Namespace = namespace
|
||||||
|
t.ServiceName = serviceName
|
||||||
|
t.TestID = t.ServiceName + "-" + string(uuid.NewUUID())
|
||||||
|
t.Labels = map[string]string{
|
||||||
|
"testid": t.TestID,
|
||||||
|
}
|
||||||
|
|
||||||
|
t.rcs = make(map[string]bool)
|
||||||
|
t.services = make(map[string]bool)
|
||||||
|
|
||||||
|
t.Name = "webserver"
|
||||||
|
t.Image = imageutils.GetE2EImage(imageutils.TestWebserver)
|
||||||
|
|
||||||
|
return t
|
||||||
|
}
|
||||||
|
|
||||||
|
// BuildServiceSpec builds default config for a service (which can then be changed)
|
||||||
|
func (t *TestFixture) BuildServiceSpec() *v1.Service {
|
||||||
|
service := &v1.Service{
|
||||||
|
ObjectMeta: metav1.ObjectMeta{
|
||||||
|
Name: t.ServiceName,
|
||||||
|
Namespace: t.Namespace,
|
||||||
|
},
|
||||||
|
Spec: v1.ServiceSpec{
|
||||||
|
Selector: t.Labels,
|
||||||
|
Ports: []v1.ServicePort{{
|
||||||
|
Port: 80,
|
||||||
|
TargetPort: intstr.FromInt(80),
|
||||||
|
}},
|
||||||
|
},
|
||||||
|
}
|
||||||
|
return service
|
||||||
|
}
|
||||||
|
|
||||||
|
// CreateRC creates a replication controller and records it for cleanup.
|
||||||
|
func (t *TestFixture) CreateRC(rc *v1.ReplicationController) (*v1.ReplicationController, error) {
|
||||||
|
rc, err := t.Client.CoreV1().ReplicationControllers(t.Namespace).Create(rc)
|
||||||
|
if err == nil {
|
||||||
|
t.rcs[rc.Name] = true
|
||||||
|
}
|
||||||
|
return rc, err
|
||||||
|
}
|
||||||
|
|
||||||
|
// CreateService creates a service, and record it for cleanup
|
||||||
|
func (t *TestFixture) CreateService(service *v1.Service) (*v1.Service, error) {
|
||||||
|
result, err := t.Client.CoreV1().Services(t.Namespace).Create(service)
|
||||||
|
if err == nil {
|
||||||
|
t.services[service.Name] = true
|
||||||
|
}
|
||||||
|
return result, err
|
||||||
|
}
|
||||||
|
|
||||||
|
// DeleteService deletes a service, and remove it from the cleanup list
|
||||||
|
func (t *TestFixture) DeleteService(serviceName string) error {
|
||||||
|
err := t.Client.CoreV1().Services(t.Namespace).Delete(serviceName, nil)
|
||||||
|
if err == nil {
|
||||||
|
delete(t.services, serviceName)
|
||||||
|
}
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
// Cleanup cleans all ReplicationControllers and Services which this object holds.
|
||||||
|
func (t *TestFixture) Cleanup() []error {
|
||||||
|
var errs []error
|
||||||
|
for rcName := range t.rcs {
|
||||||
|
ginkgo.By("stopping RC " + rcName + " in namespace " + t.Namespace)
|
||||||
|
err := retry.RetryOnConflict(retry.DefaultRetry, func() error {
|
||||||
|
// First, resize the RC to 0.
|
||||||
|
old, err := t.Client.CoreV1().ReplicationControllers(t.Namespace).Get(rcName, metav1.GetOptions{})
|
||||||
|
if err != nil {
|
||||||
|
if errors.IsNotFound(err) {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
x := int32(0)
|
||||||
|
old.Spec.Replicas = &x
|
||||||
|
if _, err := t.Client.CoreV1().ReplicationControllers(t.Namespace).Update(old); err != nil {
|
||||||
|
if errors.IsNotFound(err) {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
})
|
||||||
|
if err != nil {
|
||||||
|
errs = append(errs, err)
|
||||||
|
}
|
||||||
|
// TODO(mikedanese): Wait.
|
||||||
|
// Then, delete the RC altogether.
|
||||||
|
if err := t.Client.CoreV1().ReplicationControllers(t.Namespace).Delete(rcName, nil); err != nil {
|
||||||
|
if !errors.IsNotFound(err) {
|
||||||
|
errs = append(errs, err)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
for serviceName := range t.services {
|
||||||
|
ginkgo.By("deleting service " + serviceName + " in namespace " + t.Namespace)
|
||||||
|
err := t.Client.CoreV1().Services(t.Namespace).Delete(serviceName, nil)
|
||||||
|
if err != nil {
|
||||||
|
if !errors.IsNotFound(err) {
|
||||||
|
errs = append(errs, err)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return errs
|
||||||
|
}
|
202
test/e2e/framework/service/hostname.go
Normal file
202
test/e2e/framework/service/hostname.go
Normal file
@ -0,0 +1,202 @@
|
|||||||
|
/*
|
||||||
|
Copyright 2019 The Kubernetes Authors.
|
||||||
|
|
||||||
|
Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
you may not use this file except in compliance with the License.
|
||||||
|
You may obtain a copy of the License at
|
||||||
|
|
||||||
|
http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
|
||||||
|
Unless required by applicable law or agreed to in writing, software
|
||||||
|
distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
See the License for the specific language governing permissions and
|
||||||
|
limitations under the License.
|
||||||
|
*/
|
||||||
|
|
||||||
|
package service
|
||||||
|
|
||||||
|
import (
|
||||||
|
"fmt"
|
||||||
|
"net"
|
||||||
|
"sort"
|
||||||
|
"strconv"
|
||||||
|
"strings"
|
||||||
|
"time"
|
||||||
|
|
||||||
|
"github.com/onsi/ginkgo"
|
||||||
|
v1 "k8s.io/api/core/v1"
|
||||||
|
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||||
|
"k8s.io/apimachinery/pkg/util/sets"
|
||||||
|
clientset "k8s.io/client-go/kubernetes"
|
||||||
|
"k8s.io/kubernetes/test/e2e/framework"
|
||||||
|
e2elog "k8s.io/kubernetes/test/e2e/framework/log"
|
||||||
|
e2epod "k8s.io/kubernetes/test/e2e/framework/pod"
|
||||||
|
e2essh "k8s.io/kubernetes/test/e2e/framework/ssh"
|
||||||
|
testutils "k8s.io/kubernetes/test/utils"
|
||||||
|
)
|
||||||
|
|
||||||
|
// StartServeHostnameService creates a replication controller that serves its
|
||||||
|
// hostname and a service on top of it.
|
||||||
|
func StartServeHostnameService(c clientset.Interface, svc *v1.Service, ns string, replicas int) ([]string, string, error) {
|
||||||
|
podNames := make([]string, replicas)
|
||||||
|
name := svc.ObjectMeta.Name
|
||||||
|
ginkgo.By("creating service " + name + " in namespace " + ns)
|
||||||
|
_, err := c.CoreV1().Services(ns).Create(svc)
|
||||||
|
if err != nil {
|
||||||
|
return podNames, "", err
|
||||||
|
}
|
||||||
|
|
||||||
|
var createdPods []*v1.Pod
|
||||||
|
maxContainerFailures := 0
|
||||||
|
config := testutils.RCConfig{
|
||||||
|
Client: c,
|
||||||
|
Image: framework.ServeHostnameImage,
|
||||||
|
Command: []string{"/agnhost", "serve-hostname"},
|
||||||
|
Name: name,
|
||||||
|
Namespace: ns,
|
||||||
|
PollInterval: 3 * time.Second,
|
||||||
|
Timeout: framework.PodReadyBeforeTimeout,
|
||||||
|
Replicas: replicas,
|
||||||
|
CreatedPods: &createdPods,
|
||||||
|
MaxContainerFailures: &maxContainerFailures,
|
||||||
|
}
|
||||||
|
err = framework.RunRC(config)
|
||||||
|
if err != nil {
|
||||||
|
return podNames, "", err
|
||||||
|
}
|
||||||
|
|
||||||
|
if len(createdPods) != replicas {
|
||||||
|
return podNames, "", fmt.Errorf("incorrect number of running pods: %v", len(createdPods))
|
||||||
|
}
|
||||||
|
|
||||||
|
for i := range createdPods {
|
||||||
|
podNames[i] = createdPods[i].ObjectMeta.Name
|
||||||
|
}
|
||||||
|
sort.StringSlice(podNames).Sort()
|
||||||
|
|
||||||
|
service, err := c.CoreV1().Services(ns).Get(name, metav1.GetOptions{})
|
||||||
|
if err != nil {
|
||||||
|
return podNames, "", err
|
||||||
|
}
|
||||||
|
if service.Spec.ClusterIP == "" {
|
||||||
|
return podNames, "", fmt.Errorf("service IP is blank for %v", name)
|
||||||
|
}
|
||||||
|
serviceIP := service.Spec.ClusterIP
|
||||||
|
return podNames, serviceIP, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// StopServeHostnameService stops the given service.
|
||||||
|
func StopServeHostnameService(clientset clientset.Interface, ns, name string) error {
|
||||||
|
if err := framework.DeleteRCAndWaitForGC(clientset, ns, name); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
if err := clientset.CoreV1().Services(ns).Delete(name, nil); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// VerifyServeHostnameServiceUp wgets the given serviceIP:servicePort from the
|
||||||
|
// given host and from within a pod. The host is expected to be an SSH-able node
|
||||||
|
// in the cluster. Each pod in the service is expected to echo its name. These
|
||||||
|
// names are compared with the given expectedPods list after a sort | uniq.
|
||||||
|
func VerifyServeHostnameServiceUp(c clientset.Interface, ns, host string, expectedPods []string, serviceIP string, servicePort int) error {
|
||||||
|
execPodName := e2epod.CreateExecPodOrFail(c, ns, "execpod-", nil)
|
||||||
|
defer func() {
|
||||||
|
e2epod.DeletePodOrFail(c, ns, execPodName)
|
||||||
|
}()
|
||||||
|
|
||||||
|
// Loop a bunch of times - the proxy is randomized, so we want a good
|
||||||
|
// chance of hitting each backend at least once.
|
||||||
|
buildCommand := func(wget string) string {
|
||||||
|
serviceIPPort := net.JoinHostPort(serviceIP, strconv.Itoa(servicePort))
|
||||||
|
return fmt.Sprintf("for i in $(seq 1 %d); do %s http://%s 2>&1 || true; echo; done",
|
||||||
|
50*len(expectedPods), wget, serviceIPPort)
|
||||||
|
}
|
||||||
|
commands := []func() string{
|
||||||
|
// verify service from node
|
||||||
|
func() string {
|
||||||
|
cmd := "set -e; " + buildCommand("wget -q --timeout=0.2 --tries=1 -O -")
|
||||||
|
e2elog.Logf("Executing cmd %q on host %v", cmd, host)
|
||||||
|
result, err := e2essh.SSH(cmd, host, framework.TestContext.Provider)
|
||||||
|
if err != nil || result.Code != 0 {
|
||||||
|
e2essh.LogResult(result)
|
||||||
|
e2elog.Logf("error while SSH-ing to node: %v", err)
|
||||||
|
}
|
||||||
|
return result.Stdout
|
||||||
|
},
|
||||||
|
// verify service from pod
|
||||||
|
func() string {
|
||||||
|
cmd := buildCommand("wget -q -T 1 -O -")
|
||||||
|
e2elog.Logf("Executing cmd %q in pod %v/%v", cmd, ns, execPodName)
|
||||||
|
// TODO: Use exec-over-http via the netexec pod instead of kubectl exec.
|
||||||
|
output, err := framework.RunHostCmd(ns, execPodName, cmd)
|
||||||
|
if err != nil {
|
||||||
|
e2elog.Logf("error while kubectl execing %q in pod %v/%v: %v\nOutput: %v", cmd, ns, execPodName, err, output)
|
||||||
|
}
|
||||||
|
return output
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
expectedEndpoints := sets.NewString(expectedPods...)
|
||||||
|
ginkgo.By(fmt.Sprintf("verifying service has %d reachable backends", len(expectedPods)))
|
||||||
|
for _, cmdFunc := range commands {
|
||||||
|
passed := false
|
||||||
|
gotEndpoints := sets.NewString()
|
||||||
|
|
||||||
|
// Retry cmdFunc for a while
|
||||||
|
for start := time.Now(); time.Since(start) < KubeProxyLagTimeout; time.Sleep(5 * time.Second) {
|
||||||
|
for _, endpoint := range strings.Split(cmdFunc(), "\n") {
|
||||||
|
trimmedEp := strings.TrimSpace(endpoint)
|
||||||
|
if trimmedEp != "" {
|
||||||
|
gotEndpoints.Insert(trimmedEp)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
// TODO: simply checking that the retrieved endpoints is a superset
|
||||||
|
// of the expected allows us to ignore intermitten network flakes that
|
||||||
|
// result in output like "wget timed out", but these should be rare
|
||||||
|
// and we need a better way to track how often it occurs.
|
||||||
|
if gotEndpoints.IsSuperset(expectedEndpoints) {
|
||||||
|
if !gotEndpoints.Equal(expectedEndpoints) {
|
||||||
|
e2elog.Logf("Ignoring unexpected output wgetting endpoints of service %s: %v", serviceIP, gotEndpoints.Difference(expectedEndpoints))
|
||||||
|
}
|
||||||
|
passed = true
|
||||||
|
break
|
||||||
|
}
|
||||||
|
e2elog.Logf("Unable to reach the following endpoints of service %s: %v", serviceIP, expectedEndpoints.Difference(gotEndpoints))
|
||||||
|
}
|
||||||
|
if !passed {
|
||||||
|
// Sort the lists so they're easier to visually diff.
|
||||||
|
exp := expectedEndpoints.List()
|
||||||
|
got := gotEndpoints.List()
|
||||||
|
sort.StringSlice(exp).Sort()
|
||||||
|
sort.StringSlice(got).Sort()
|
||||||
|
return fmt.Errorf("service verification failed for: %s\nexpected %v\nreceived %v", serviceIP, exp, got)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// VerifyServeHostnameServiceDown verifies that the given service isn't served.
|
||||||
|
func VerifyServeHostnameServiceDown(c clientset.Interface, host string, serviceIP string, servicePort int) error {
|
||||||
|
ipPort := net.JoinHostPort(serviceIP, strconv.Itoa(servicePort))
|
||||||
|
// The current versions of curl included in CentOS and RHEL distros
|
||||||
|
// misinterpret square brackets around IPv6 as globbing, so use the -g
|
||||||
|
// argument to disable globbing to handle the IPv6 case.
|
||||||
|
command := fmt.Sprintf(
|
||||||
|
"curl -g -s --connect-timeout 2 http://%s && exit 99", ipPort)
|
||||||
|
|
||||||
|
for start := time.Now(); time.Since(start) < time.Minute; time.Sleep(5 * time.Second) {
|
||||||
|
result, err := e2essh.SSH(command, host, framework.TestContext.Provider)
|
||||||
|
if err != nil {
|
||||||
|
e2essh.LogResult(result)
|
||||||
|
e2elog.Logf("error while SSH-ing to node: %v", err)
|
||||||
|
}
|
||||||
|
if result.Code != 99 {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
e2elog.Logf("service still alive - still waiting")
|
||||||
|
}
|
||||||
|
return fmt.Errorf("waiting for service to be down timed out")
|
||||||
|
}
|
File diff suppressed because it is too large
Load Diff
174
test/e2e/framework/service/resource.go
Normal file
174
test/e2e/framework/service/resource.go
Normal file
@ -0,0 +1,174 @@
|
|||||||
|
/*
|
||||||
|
Copyright 2019 The Kubernetes Authors.
|
||||||
|
|
||||||
|
Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
you may not use this file except in compliance with the License.
|
||||||
|
You may obtain a copy of the License at
|
||||||
|
|
||||||
|
http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
|
||||||
|
Unless required by applicable law or agreed to in writing, software
|
||||||
|
distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
See the License for the specific language governing permissions and
|
||||||
|
limitations under the License.
|
||||||
|
*/
|
||||||
|
|
||||||
|
package service
|
||||||
|
|
||||||
|
import (
|
||||||
|
"fmt"
|
||||||
|
"time"
|
||||||
|
|
||||||
|
v1 "k8s.io/api/core/v1"
|
||||||
|
"k8s.io/apimachinery/pkg/api/errors"
|
||||||
|
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||||
|
clientset "k8s.io/client-go/kubernetes"
|
||||||
|
restclient "k8s.io/client-go/rest"
|
||||||
|
"k8s.io/kubernetes/test/e2e/framework"
|
||||||
|
e2elog "k8s.io/kubernetes/test/e2e/framework/log"
|
||||||
|
imageutils "k8s.io/kubernetes/test/utils/image"
|
||||||
|
)
|
||||||
|
|
||||||
|
// GetServicesProxyRequest returns a request for a service proxy.
|
||||||
|
func GetServicesProxyRequest(c clientset.Interface, request *restclient.Request) (*restclient.Request, error) {
|
||||||
|
return request.Resource("services").SubResource("proxy"), nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// CreateServiceSpec returns a Service object for testing.
|
||||||
|
func CreateServiceSpec(serviceName, externalName string, isHeadless bool, selector map[string]string) *v1.Service {
|
||||||
|
headlessService := &v1.Service{
|
||||||
|
ObjectMeta: metav1.ObjectMeta{
|
||||||
|
Name: serviceName,
|
||||||
|
},
|
||||||
|
Spec: v1.ServiceSpec{
|
||||||
|
Selector: selector,
|
||||||
|
},
|
||||||
|
}
|
||||||
|
if externalName != "" {
|
||||||
|
headlessService.Spec.Type = v1.ServiceTypeExternalName
|
||||||
|
headlessService.Spec.ExternalName = externalName
|
||||||
|
} else {
|
||||||
|
headlessService.Spec.Ports = []v1.ServicePort{
|
||||||
|
{Port: 80, Name: "http", Protocol: v1.ProtocolTCP},
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if isHeadless {
|
||||||
|
headlessService.Spec.ClusterIP = "None"
|
||||||
|
}
|
||||||
|
return headlessService
|
||||||
|
}
|
||||||
|
|
||||||
|
// UpdateService fetches a service, calls the update function on it,
|
||||||
|
// and then attempts to send the updated service. It retries up to 2
|
||||||
|
// times in the face of timeouts and conflicts.
|
||||||
|
func UpdateService(c clientset.Interface, namespace, serviceName string, update func(*v1.Service)) (*v1.Service, error) {
|
||||||
|
var service *v1.Service
|
||||||
|
var err error
|
||||||
|
for i := 0; i < 3; i++ {
|
||||||
|
service, err = c.CoreV1().Services(namespace).Get(serviceName, metav1.GetOptions{})
|
||||||
|
if err != nil {
|
||||||
|
return service, err
|
||||||
|
}
|
||||||
|
|
||||||
|
update(service)
|
||||||
|
|
||||||
|
service, err = c.CoreV1().Services(namespace).Update(service)
|
||||||
|
|
||||||
|
if !errors.IsConflict(err) && !errors.IsServerTimeout(err) {
|
||||||
|
return service, err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return service, err
|
||||||
|
}
|
||||||
|
|
||||||
|
// CleanupServiceResources cleans up service Type=LoadBalancer resources.
|
||||||
|
func CleanupServiceResources(c clientset.Interface, loadBalancerName, region, zone string) {
|
||||||
|
framework.TestContext.CloudConfig.Provider.CleanupServiceResources(c, loadBalancerName, region, zone)
|
||||||
|
}
|
||||||
|
|
||||||
|
// GetIngressPoint returns a host on which ingress serves.
|
||||||
|
func GetIngressPoint(ing *v1.LoadBalancerIngress) string {
|
||||||
|
host := ing.IP
|
||||||
|
if host == "" {
|
||||||
|
host = ing.Hostname
|
||||||
|
}
|
||||||
|
return host
|
||||||
|
}
|
||||||
|
|
||||||
|
// EnableAndDisableInternalLB returns two functions for enabling and disabling the internal load balancer
|
||||||
|
// setting for the supported cloud providers (currently GCE/GKE and Azure) and empty functions for others.
|
||||||
|
func EnableAndDisableInternalLB() (enable func(svc *v1.Service), disable func(svc *v1.Service)) {
|
||||||
|
return framework.TestContext.CloudConfig.Provider.EnableAndDisableInternalLB()
|
||||||
|
}
|
||||||
|
|
||||||
|
// DescribeSvc logs the output of kubectl describe svc for the given namespace
|
||||||
|
func DescribeSvc(ns string) {
|
||||||
|
e2elog.Logf("\nOutput of kubectl describe svc:\n")
|
||||||
|
desc, _ := framework.RunKubectl(
|
||||||
|
"describe", "svc", fmt.Sprintf("--namespace=%v", ns))
|
||||||
|
e2elog.Logf(desc)
|
||||||
|
}
|
||||||
|
|
||||||
|
// newNetexecPodSpec returns the pod spec of netexec pod
|
||||||
|
func newNetexecPodSpec(podName string, httpPort, udpPort int32, hostNetwork bool) *v1.Pod {
|
||||||
|
pod := &v1.Pod{
|
||||||
|
ObjectMeta: metav1.ObjectMeta{
|
||||||
|
Name: podName,
|
||||||
|
},
|
||||||
|
Spec: v1.PodSpec{
|
||||||
|
Containers: []v1.Container{
|
||||||
|
{
|
||||||
|
Name: "netexec",
|
||||||
|
Image: framework.NetexecImageName,
|
||||||
|
Args: []string{
|
||||||
|
"netexec",
|
||||||
|
fmt.Sprintf("--http-port=%d", httpPort),
|
||||||
|
fmt.Sprintf("--udp-port=%d", udpPort),
|
||||||
|
},
|
||||||
|
Ports: []v1.ContainerPort{
|
||||||
|
{
|
||||||
|
Name: "http",
|
||||||
|
ContainerPort: httpPort,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
Name: "udp",
|
||||||
|
ContainerPort: udpPort,
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
HostNetwork: hostNetwork,
|
||||||
|
},
|
||||||
|
}
|
||||||
|
return pod
|
||||||
|
}
|
||||||
|
|
||||||
|
// newEchoServerPodSpec returns the pod spec of echo server pod
|
||||||
|
func newEchoServerPodSpec(podName string) *v1.Pod {
|
||||||
|
port := 8080
|
||||||
|
pod := &v1.Pod{
|
||||||
|
ObjectMeta: metav1.ObjectMeta{
|
||||||
|
Name: podName,
|
||||||
|
},
|
||||||
|
Spec: v1.PodSpec{
|
||||||
|
Containers: []v1.Container{
|
||||||
|
{
|
||||||
|
Name: "echoserver",
|
||||||
|
Image: imageutils.GetE2EImage(imageutils.EchoServer),
|
||||||
|
Ports: []v1.ContainerPort{{ContainerPort: int32(port)}},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
RestartPolicy: v1.RestartPolicyNever,
|
||||||
|
},
|
||||||
|
}
|
||||||
|
return pod
|
||||||
|
}
|
||||||
|
|
||||||
|
// GetServiceLoadBalancerCreationTimeout returns a timeout value for creating a load balancer of a service.
|
||||||
|
func GetServiceLoadBalancerCreationTimeout(cs clientset.Interface) time.Duration {
|
||||||
|
if nodes := framework.GetReadySchedulableNodesOrDie(cs); len(nodes.Items) > LargeClusterMinNodesNumber {
|
||||||
|
return LoadBalancerCreateTimeoutLarge
|
||||||
|
}
|
||||||
|
return LoadBalancerCreateTimeoutDefault
|
||||||
|
}
|
65
test/e2e/framework/service/wait.go
Normal file
65
test/e2e/framework/service/wait.go
Normal file
@ -0,0 +1,65 @@
|
|||||||
|
/*
|
||||||
|
Copyright 2019 The Kubernetes Authors.
|
||||||
|
|
||||||
|
Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
you may not use this file except in compliance with the License.
|
||||||
|
You may obtain a copy of the License at
|
||||||
|
|
||||||
|
http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
|
||||||
|
Unless required by applicable law or agreed to in writing, software
|
||||||
|
distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
See the License for the specific language governing permissions and
|
||||||
|
limitations under the License.
|
||||||
|
*/
|
||||||
|
|
||||||
|
package service
|
||||||
|
|
||||||
|
import (
|
||||||
|
"context"
|
||||||
|
"fmt"
|
||||||
|
|
||||||
|
"github.com/onsi/ginkgo"
|
||||||
|
"k8s.io/apimachinery/pkg/util/wait"
|
||||||
|
clientset "k8s.io/client-go/kubernetes"
|
||||||
|
"k8s.io/kubernetes/test/e2e/framework"
|
||||||
|
e2elog "k8s.io/kubernetes/test/e2e/framework/log"
|
||||||
|
)
|
||||||
|
|
||||||
|
// WaitForServiceResponding waits for the service to be responding.
|
||||||
|
func WaitForServiceResponding(c clientset.Interface, ns, name string) error {
|
||||||
|
ginkgo.By(fmt.Sprintf("trying to dial the service %s.%s via the proxy", ns, name))
|
||||||
|
|
||||||
|
return wait.PollImmediate(framework.Poll, RespondingTimeout, func() (done bool, err error) {
|
||||||
|
proxyRequest, errProxy := GetServicesProxyRequest(c, c.CoreV1().RESTClient().Get())
|
||||||
|
if errProxy != nil {
|
||||||
|
e2elog.Logf("Failed to get services proxy request: %v:", errProxy)
|
||||||
|
return false, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
ctx, cancel := context.WithTimeout(context.Background(), framework.SingleCallTimeout)
|
||||||
|
defer cancel()
|
||||||
|
|
||||||
|
body, err := proxyRequest.Namespace(ns).
|
||||||
|
Context(ctx).
|
||||||
|
Name(name).
|
||||||
|
Do().
|
||||||
|
Raw()
|
||||||
|
if err != nil {
|
||||||
|
if ctx.Err() != nil {
|
||||||
|
e2elog.Failf("Failed to GET from service %s: %v", name, err)
|
||||||
|
return true, err
|
||||||
|
}
|
||||||
|
e2elog.Logf("Failed to GET from service %s: %v:", name, err)
|
||||||
|
return false, nil
|
||||||
|
}
|
||||||
|
got := string(body)
|
||||||
|
if len(got) == 0 {
|
||||||
|
e2elog.Logf("Service %s: expected non-empty response", name)
|
||||||
|
return false, err // stop polling
|
||||||
|
}
|
||||||
|
e2elog.Logf("Service %s: found nonempty answer: %s", name, got)
|
||||||
|
return true, nil
|
||||||
|
})
|
||||||
|
}
|
@ -24,7 +24,7 @@ import (
|
|||||||
"strconv"
|
"strconv"
|
||||||
|
|
||||||
appsv1 "k8s.io/api/apps/v1"
|
appsv1 "k8s.io/api/apps/v1"
|
||||||
corev1 "k8s.io/api/core/v1"
|
v1 "k8s.io/api/core/v1"
|
||||||
"k8s.io/apimachinery/pkg/api/resource"
|
"k8s.io/apimachinery/pkg/api/resource"
|
||||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||||
"k8s.io/apimachinery/pkg/util/intstr"
|
"k8s.io/apimachinery/pkg/util/intstr"
|
||||||
@ -38,19 +38,19 @@ import (
|
|||||||
// NewStatefulSet creates a new Webserver StatefulSet for testing. The StatefulSet is named name, is in namespace ns,
|
// NewStatefulSet creates a new Webserver StatefulSet for testing. The StatefulSet is named name, is in namespace ns,
|
||||||
// statefulPodsMounts are the mounts that will be backed by PVs. podsMounts are the mounts that are mounted directly
|
// statefulPodsMounts are the mounts that will be backed by PVs. podsMounts are the mounts that are mounted directly
|
||||||
// to the Pod. labels are the labels that will be usd for the StatefulSet selector.
|
// to the Pod. labels are the labels that will be usd for the StatefulSet selector.
|
||||||
func NewStatefulSet(name, ns, governingSvcName string, replicas int32, statefulPodMounts []corev1.VolumeMount, podMounts []corev1.VolumeMount, labels map[string]string) *appsv1.StatefulSet {
|
func NewStatefulSet(name, ns, governingSvcName string, replicas int32, statefulPodMounts []v1.VolumeMount, podMounts []v1.VolumeMount, labels map[string]string) *appsv1.StatefulSet {
|
||||||
mounts := append(statefulPodMounts, podMounts...)
|
mounts := append(statefulPodMounts, podMounts...)
|
||||||
claims := []corev1.PersistentVolumeClaim{}
|
claims := []v1.PersistentVolumeClaim{}
|
||||||
for _, m := range statefulPodMounts {
|
for _, m := range statefulPodMounts {
|
||||||
claims = append(claims, NewStatefulSetPVC(m.Name))
|
claims = append(claims, NewStatefulSetPVC(m.Name))
|
||||||
}
|
}
|
||||||
|
|
||||||
vols := []corev1.Volume{}
|
vols := []v1.Volume{}
|
||||||
for _, m := range podMounts {
|
for _, m := range podMounts {
|
||||||
vols = append(vols, corev1.Volume{
|
vols = append(vols, v1.Volume{
|
||||||
Name: m.Name,
|
Name: m.Name,
|
||||||
VolumeSource: corev1.VolumeSource{
|
VolumeSource: v1.VolumeSource{
|
||||||
HostPath: &corev1.HostPathVolumeSource{
|
HostPath: &v1.HostPathVolumeSource{
|
||||||
Path: fmt.Sprintf("/tmp/%v", m.Name),
|
Path: fmt.Sprintf("/tmp/%v", m.Name),
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
@ -71,13 +71,13 @@ func NewStatefulSet(name, ns, governingSvcName string, replicas int32, statefulP
|
|||||||
MatchLabels: labels,
|
MatchLabels: labels,
|
||||||
},
|
},
|
||||||
Replicas: func(i int32) *int32 { return &i }(replicas),
|
Replicas: func(i int32) *int32 { return &i }(replicas),
|
||||||
Template: corev1.PodTemplateSpec{
|
Template: v1.PodTemplateSpec{
|
||||||
ObjectMeta: metav1.ObjectMeta{
|
ObjectMeta: metav1.ObjectMeta{
|
||||||
Labels: labels,
|
Labels: labels,
|
||||||
Annotations: map[string]string{},
|
Annotations: map[string]string{},
|
||||||
},
|
},
|
||||||
Spec: corev1.PodSpec{
|
Spec: v1.PodSpec{
|
||||||
Containers: []corev1.Container{
|
Containers: []v1.Container{
|
||||||
{
|
{
|
||||||
Name: "webserver",
|
Name: "webserver",
|
||||||
Image: imageutils.GetE2EImage(imageutils.Httpd),
|
Image: imageutils.GetE2EImage(imageutils.Httpd),
|
||||||
@ -95,18 +95,18 @@ func NewStatefulSet(name, ns, governingSvcName string, replicas int32, statefulP
|
|||||||
}
|
}
|
||||||
|
|
||||||
// NewStatefulSetPVC returns a PersistentVolumeClaim named name, for testing StatefulSets.
|
// NewStatefulSetPVC returns a PersistentVolumeClaim named name, for testing StatefulSets.
|
||||||
func NewStatefulSetPVC(name string) corev1.PersistentVolumeClaim {
|
func NewStatefulSetPVC(name string) v1.PersistentVolumeClaim {
|
||||||
return corev1.PersistentVolumeClaim{
|
return v1.PersistentVolumeClaim{
|
||||||
ObjectMeta: metav1.ObjectMeta{
|
ObjectMeta: metav1.ObjectMeta{
|
||||||
Name: name,
|
Name: name,
|
||||||
},
|
},
|
||||||
Spec: corev1.PersistentVolumeClaimSpec{
|
Spec: v1.PersistentVolumeClaimSpec{
|
||||||
AccessModes: []corev1.PersistentVolumeAccessMode{
|
AccessModes: []v1.PersistentVolumeAccessMode{
|
||||||
corev1.ReadWriteOnce,
|
v1.ReadWriteOnce,
|
||||||
},
|
},
|
||||||
Resources: corev1.ResourceRequirements{
|
Resources: v1.ResourceRequirements{
|
||||||
Requests: corev1.ResourceList{
|
Requests: v1.ResourceList{
|
||||||
corev1.ResourceStorage: *resource.NewQuantity(1, resource.BinarySI),
|
v1.ResourceStorage: *resource.NewQuantity(1, resource.BinarySI),
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
@ -114,17 +114,17 @@ func NewStatefulSetPVC(name string) corev1.PersistentVolumeClaim {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// CreateStatefulSetService creates a Headless Service with Name name and Selector set to match labels.
|
// CreateStatefulSetService creates a Headless Service with Name name and Selector set to match labels.
|
||||||
func CreateStatefulSetService(name string, labels map[string]string) *corev1.Service {
|
func CreateStatefulSetService(name string, labels map[string]string) *v1.Service {
|
||||||
headlessService := &corev1.Service{
|
headlessService := &v1.Service{
|
||||||
ObjectMeta: metav1.ObjectMeta{
|
ObjectMeta: metav1.ObjectMeta{
|
||||||
Name: name,
|
Name: name,
|
||||||
},
|
},
|
||||||
Spec: corev1.ServiceSpec{
|
Spec: v1.ServiceSpec{
|
||||||
Selector: labels,
|
Selector: labels,
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
headlessService.Spec.Ports = []corev1.ServicePort{
|
headlessService.Spec.Ports = []v1.ServicePort{
|
||||||
{Port: 80, Name: "http", Protocol: corev1.ProtocolTCP},
|
{Port: 80, Name: "http", Protocol: v1.ProtocolTCP},
|
||||||
}
|
}
|
||||||
headlessService.Spec.ClusterIP = "None"
|
headlessService.Spec.ClusterIP = "None"
|
||||||
return headlessService
|
return headlessService
|
||||||
@ -149,7 +149,7 @@ func BreakHTTPProbe(c clientset.Interface, ss *appsv1.StatefulSet) error {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// BreakPodHTTPProbe breaks the readiness probe for Nginx StatefulSet containers in one pod.
|
// BreakPodHTTPProbe breaks the readiness probe for Nginx StatefulSet containers in one pod.
|
||||||
func BreakPodHTTPProbe(ss *appsv1.StatefulSet, pod *corev1.Pod) error {
|
func BreakPodHTTPProbe(ss *appsv1.StatefulSet, pod *v1.Pod) error {
|
||||||
path := httpProbe.HTTPGet.Path
|
path := httpProbe.HTTPGet.Path
|
||||||
if path == "" {
|
if path == "" {
|
||||||
return fmt.Errorf("path expected to be not empty: %v", path)
|
return fmt.Errorf("path expected to be not empty: %v", path)
|
||||||
@ -173,7 +173,7 @@ func RestoreHTTPProbe(c clientset.Interface, ss *appsv1.StatefulSet) error {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// RestorePodHTTPProbe restores the readiness probe for Nginx StatefulSet containers in pod.
|
// RestorePodHTTPProbe restores the readiness probe for Nginx StatefulSet containers in pod.
|
||||||
func RestorePodHTTPProbe(ss *appsv1.StatefulSet, pod *corev1.Pod) error {
|
func RestorePodHTTPProbe(ss *appsv1.StatefulSet, pod *v1.Pod) error {
|
||||||
path := httpProbe.HTTPGet.Path
|
path := httpProbe.HTTPGet.Path
|
||||||
if path == "" {
|
if path == "" {
|
||||||
return fmt.Errorf("path expected to be not empty: %v", path)
|
return fmt.Errorf("path expected to be not empty: %v", path)
|
||||||
@ -185,14 +185,14 @@ func RestorePodHTTPProbe(ss *appsv1.StatefulSet, pod *corev1.Pod) error {
|
|||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
func hasPauseProbe(pod *corev1.Pod) bool {
|
func hasPauseProbe(pod *v1.Pod) bool {
|
||||||
probe := pod.Spec.Containers[0].ReadinessProbe
|
probe := pod.Spec.Containers[0].ReadinessProbe
|
||||||
return probe != nil && reflect.DeepEqual(probe.Exec.Command, pauseProbe.Exec.Command)
|
return probe != nil && reflect.DeepEqual(probe.Exec.Command, pauseProbe.Exec.Command)
|
||||||
}
|
}
|
||||||
|
|
||||||
var httpProbe = &corev1.Probe{
|
var httpProbe = &v1.Probe{
|
||||||
Handler: corev1.Handler{
|
Handler: v1.Handler{
|
||||||
HTTPGet: &corev1.HTTPGetAction{
|
HTTPGet: &v1.HTTPGetAction{
|
||||||
Path: "/index.html",
|
Path: "/index.html",
|
||||||
Port: intstr.IntOrString{IntVal: 80},
|
Port: intstr.IntOrString{IntVal: 80},
|
||||||
},
|
},
|
||||||
@ -202,16 +202,16 @@ var httpProbe = &corev1.Probe{
|
|||||||
FailureThreshold: 1,
|
FailureThreshold: 1,
|
||||||
}
|
}
|
||||||
|
|
||||||
var pauseProbe = &corev1.Probe{
|
var pauseProbe = &v1.Probe{
|
||||||
Handler: corev1.Handler{
|
Handler: v1.Handler{
|
||||||
Exec: &corev1.ExecAction{Command: []string{"test", "-f", "/data/statefulset-continue"}},
|
Exec: &v1.ExecAction{Command: []string{"test", "-f", "/data/statefulset-continue"}},
|
||||||
},
|
},
|
||||||
PeriodSeconds: 1,
|
PeriodSeconds: 1,
|
||||||
SuccessThreshold: 1,
|
SuccessThreshold: 1,
|
||||||
FailureThreshold: 1,
|
FailureThreshold: 1,
|
||||||
}
|
}
|
||||||
|
|
||||||
type statefulPodsByOrdinal []corev1.Pod
|
type statefulPodsByOrdinal []v1.Pod
|
||||||
|
|
||||||
func (sp statefulPodsByOrdinal) Len() int {
|
func (sp statefulPodsByOrdinal) Len() int {
|
||||||
return len(sp)
|
return len(sp)
|
||||||
@ -242,7 +242,7 @@ func ResumeNextPod(c clientset.Interface, ss *appsv1.StatefulSet) {
|
|||||||
podList := GetPodList(c, ss)
|
podList := GetPodList(c, ss)
|
||||||
resumedPod := ""
|
resumedPod := ""
|
||||||
for _, pod := range podList.Items {
|
for _, pod := range podList.Items {
|
||||||
if pod.Status.Phase != corev1.PodRunning {
|
if pod.Status.Phase != v1.PodRunning {
|
||||||
e2elog.Failf("Found pod in phase %q, cannot resume", pod.Status.Phase)
|
e2elog.Failf("Found pod in phase %q, cannot resume", pod.Status.Phase)
|
||||||
}
|
}
|
||||||
if podutil.IsPodReady(&pod) || !hasPauseProbe(&pod) {
|
if podutil.IsPodReady(&pod) || !hasPauseProbe(&pod) {
|
||||||
@ -259,13 +259,13 @@ func ResumeNextPod(c clientset.Interface, ss *appsv1.StatefulSet) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// SortStatefulPods sorts pods by their ordinals
|
// SortStatefulPods sorts pods by their ordinals
|
||||||
func SortStatefulPods(pods *corev1.PodList) {
|
func SortStatefulPods(pods *v1.PodList) {
|
||||||
sort.Sort(statefulPodsByOrdinal(pods.Items))
|
sort.Sort(statefulPodsByOrdinal(pods.Items))
|
||||||
}
|
}
|
||||||
|
|
||||||
var statefulPodRegex = regexp.MustCompile("(.*)-([0-9]+)$")
|
var statefulPodRegex = regexp.MustCompile("(.*)-([0-9]+)$")
|
||||||
|
|
||||||
func getStatefulPodOrdinal(pod *corev1.Pod) int {
|
func getStatefulPodOrdinal(pod *v1.Pod) int {
|
||||||
ordinal := -1
|
ordinal := -1
|
||||||
subMatches := statefulPodRegex.FindStringSubmatch(pod.Name)
|
subMatches := statefulPodRegex.FindStringSubmatch(pod.Name)
|
||||||
if len(subMatches) < 3 {
|
if len(subMatches) < 3 {
|
||||||
|
@ -23,7 +23,7 @@ import (
|
|||||||
"time"
|
"time"
|
||||||
|
|
||||||
appsv1 "k8s.io/api/apps/v1"
|
appsv1 "k8s.io/api/apps/v1"
|
||||||
corev1 "k8s.io/api/core/v1"
|
v1 "k8s.io/api/core/v1"
|
||||||
apierrs "k8s.io/apimachinery/pkg/api/errors"
|
apierrs "k8s.io/apimachinery/pkg/api/errors"
|
||||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||||
"k8s.io/apimachinery/pkg/labels"
|
"k8s.io/apimachinery/pkg/labels"
|
||||||
@ -62,7 +62,7 @@ func CreateStatefulSet(c clientset.Interface, manifestPath, ns string) *appsv1.S
|
|||||||
}
|
}
|
||||||
|
|
||||||
// GetPodList gets the current Pods in ss.
|
// GetPodList gets the current Pods in ss.
|
||||||
func GetPodList(c clientset.Interface, ss *appsv1.StatefulSet) *corev1.PodList {
|
func GetPodList(c clientset.Interface, ss *appsv1.StatefulSet) *v1.PodList {
|
||||||
selector, err := metav1.LabelSelectorAsSelector(ss.Spec.Selector)
|
selector, err := metav1.LabelSelectorAsSelector(ss.Spec.Selector)
|
||||||
e2efwk.ExpectNoError(err)
|
e2efwk.ExpectNoError(err)
|
||||||
podList, err := c.CoreV1().Pods(ss.Namespace).List(metav1.ListOptions{LabelSelector: selector.String()})
|
podList, err := c.CoreV1().Pods(ss.Namespace).List(metav1.ListOptions{LabelSelector: selector.String()})
|
||||||
@ -182,7 +182,7 @@ func Scale(c clientset.Interface, ss *appsv1.StatefulSet, count int32) (*appsv1.
|
|||||||
e2elog.Logf("Scaling statefulset %s to %d", name, count)
|
e2elog.Logf("Scaling statefulset %s to %d", name, count)
|
||||||
ss = update(c, ns, name, func(ss *appsv1.StatefulSet) { *(ss.Spec.Replicas) = count })
|
ss = update(c, ns, name, func(ss *appsv1.StatefulSet) { *(ss.Spec.Replicas) = count })
|
||||||
|
|
||||||
var statefulPodList *corev1.PodList
|
var statefulPodList *v1.PodList
|
||||||
pollErr := wait.PollImmediate(StatefulSetPoll, StatefulSetTimeout, func() (bool, error) {
|
pollErr := wait.PollImmediate(StatefulSetPoll, StatefulSetTimeout, func() (bool, error) {
|
||||||
statefulPodList = GetPodList(c, ss)
|
statefulPodList = GetPodList(c, ss)
|
||||||
if int32(len(statefulPodList.Items)) == count {
|
if int32(len(statefulPodList.Items)) == count {
|
||||||
@ -194,7 +194,7 @@ func Scale(c clientset.Interface, ss *appsv1.StatefulSet, count int32) (*appsv1.
|
|||||||
unhealthy := []string{}
|
unhealthy := []string{}
|
||||||
for _, statefulPod := range statefulPodList.Items {
|
for _, statefulPod := range statefulPodList.Items {
|
||||||
delTs, phase, readiness := statefulPod.DeletionTimestamp, statefulPod.Status.Phase, podutil.IsPodReady(&statefulPod)
|
delTs, phase, readiness := statefulPod.DeletionTimestamp, statefulPod.Status.Phase, podutil.IsPodReady(&statefulPod)
|
||||||
if delTs != nil || phase != corev1.PodRunning || !readiness {
|
if delTs != nil || phase != v1.PodRunning || !readiness {
|
||||||
unhealthy = append(unhealthy, fmt.Sprintf("%v: deletion %v, phase %v, readiness %v", statefulPod.Name, delTs, phase, readiness))
|
unhealthy = append(unhealthy, fmt.Sprintf("%v: deletion %v, phase %v, readiness %v", statefulPod.Name, delTs, phase, readiness))
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -313,7 +313,7 @@ func ExecInStatefulPods(c clientset.Interface, ss *appsv1.StatefulSet, cmd strin
|
|||||||
type updateStatefulSetFunc func(*appsv1.StatefulSet)
|
type updateStatefulSetFunc func(*appsv1.StatefulSet)
|
||||||
|
|
||||||
// VerifyStatefulPodFunc is a func that examines a StatefulSetPod.
|
// VerifyStatefulPodFunc is a func that examines a StatefulSetPod.
|
||||||
type VerifyStatefulPodFunc func(*corev1.Pod)
|
type VerifyStatefulPodFunc func(*v1.Pod)
|
||||||
|
|
||||||
// VerifyPodAtIndex applies a visitor pattern to the Pod at index in ss. verify is applied to the Pod to "visit" it.
|
// VerifyPodAtIndex applies a visitor pattern to the Pod at index in ss. verify is applied to the Pod to "visit" it.
|
||||||
func VerifyPodAtIndex(c clientset.Interface, index int, ss *appsv1.StatefulSet, verify VerifyStatefulPodFunc) {
|
func VerifyPodAtIndex(c clientset.Interface, index int, ss *appsv1.StatefulSet, verify VerifyStatefulPodFunc) {
|
||||||
|
@ -20,7 +20,7 @@ import (
|
|||||||
"fmt"
|
"fmt"
|
||||||
|
|
||||||
appsv1 "k8s.io/api/apps/v1"
|
appsv1 "k8s.io/api/apps/v1"
|
||||||
corev1 "k8s.io/api/core/v1"
|
v1 "k8s.io/api/core/v1"
|
||||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||||
"k8s.io/apimachinery/pkg/util/wait"
|
"k8s.io/apimachinery/pkg/util/wait"
|
||||||
clientset "k8s.io/client-go/kubernetes"
|
clientset "k8s.io/client-go/kubernetes"
|
||||||
@ -32,8 +32,8 @@ import (
|
|||||||
// a RollingUpdateStatefulSetStrategyType with a non-nil RollingUpdate and Partition. All Pods with ordinals less
|
// a RollingUpdateStatefulSetStrategyType with a non-nil RollingUpdate and Partition. All Pods with ordinals less
|
||||||
// than or equal to the Partition are expected to be at set's current revision. All other Pods are expected to be
|
// than or equal to the Partition are expected to be at set's current revision. All other Pods are expected to be
|
||||||
// at its update revision.
|
// at its update revision.
|
||||||
func WaitForPartitionedRollingUpdate(c clientset.Interface, set *appsv1.StatefulSet) (*appsv1.StatefulSet, *corev1.PodList) {
|
func WaitForPartitionedRollingUpdate(c clientset.Interface, set *appsv1.StatefulSet) (*appsv1.StatefulSet, *v1.PodList) {
|
||||||
var pods *corev1.PodList
|
var pods *v1.PodList
|
||||||
if set.Spec.UpdateStrategy.Type != appsv1.RollingUpdateStatefulSetStrategyType {
|
if set.Spec.UpdateStrategy.Type != appsv1.RollingUpdateStatefulSetStrategyType {
|
||||||
e2elog.Failf("StatefulSet %s/%s attempt to wait for partitioned update with updateStrategy %s",
|
e2elog.Failf("StatefulSet %s/%s attempt to wait for partitioned update with updateStrategy %s",
|
||||||
set.Namespace,
|
set.Namespace,
|
||||||
@ -45,7 +45,7 @@ func WaitForPartitionedRollingUpdate(c clientset.Interface, set *appsv1.Stateful
|
|||||||
set.Namespace,
|
set.Namespace,
|
||||||
set.Name)
|
set.Name)
|
||||||
}
|
}
|
||||||
WaitForState(c, set, func(set2 *appsv1.StatefulSet, pods2 *corev1.PodList) (bool, error) {
|
WaitForState(c, set, func(set2 *appsv1.StatefulSet, pods2 *v1.PodList) (bool, error) {
|
||||||
set = set2
|
set = set2
|
||||||
pods = pods2
|
pods = pods2
|
||||||
partition := int(*set.Spec.UpdateStrategy.RollingUpdate.Partition)
|
partition := int(*set.Spec.UpdateStrategy.RollingUpdate.Partition)
|
||||||
@ -102,8 +102,8 @@ func WaitForRunning(c clientset.Interface, numPodsRunning, numPodsReady int32, s
|
|||||||
shouldBeReady := getStatefulPodOrdinal(&p) < int(numPodsReady)
|
shouldBeReady := getStatefulPodOrdinal(&p) < int(numPodsReady)
|
||||||
isReady := podutil.IsPodReady(&p)
|
isReady := podutil.IsPodReady(&p)
|
||||||
desiredReadiness := shouldBeReady == isReady
|
desiredReadiness := shouldBeReady == isReady
|
||||||
e2elog.Logf("Waiting for pod %v to enter %v - Ready=%v, currently %v - Ready=%v", p.Name, corev1.PodRunning, shouldBeReady, p.Status.Phase, isReady)
|
e2elog.Logf("Waiting for pod %v to enter %v - Ready=%v, currently %v - Ready=%v", p.Name, v1.PodRunning, shouldBeReady, p.Status.Phase, isReady)
|
||||||
if p.Status.Phase != corev1.PodRunning || !desiredReadiness {
|
if p.Status.Phase != v1.PodRunning || !desiredReadiness {
|
||||||
return false, nil
|
return false, nil
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -115,7 +115,7 @@ func WaitForRunning(c clientset.Interface, numPodsRunning, numPodsReady int32, s
|
|||||||
}
|
}
|
||||||
|
|
||||||
// WaitForState periodically polls for the ss and its pods until the until function returns either true or an error
|
// WaitForState periodically polls for the ss and its pods until the until function returns either true or an error
|
||||||
func WaitForState(c clientset.Interface, ss *appsv1.StatefulSet, until func(*appsv1.StatefulSet, *corev1.PodList) (bool, error)) {
|
func WaitForState(c clientset.Interface, ss *appsv1.StatefulSet, until func(*appsv1.StatefulSet, *v1.PodList) (bool, error)) {
|
||||||
pollErr := wait.PollImmediate(StatefulSetPoll, StatefulSetTimeout,
|
pollErr := wait.PollImmediate(StatefulSetPoll, StatefulSetTimeout,
|
||||||
func() (bool, error) {
|
func() (bool, error) {
|
||||||
ssGet, err := c.AppsV1().StatefulSets(ss.Namespace).Get(ss.Name, metav1.GetOptions{})
|
ssGet, err := c.AppsV1().StatefulSets(ss.Namespace).Get(ss.Name, metav1.GetOptions{})
|
||||||
@ -133,7 +133,7 @@ func WaitForState(c clientset.Interface, ss *appsv1.StatefulSet, until func(*app
|
|||||||
// WaitForStatus waits for the StatefulSetStatus's ObservedGeneration to be greater than or equal to set's Generation.
|
// WaitForStatus waits for the StatefulSetStatus's ObservedGeneration to be greater than or equal to set's Generation.
|
||||||
// The returned StatefulSet contains such a StatefulSetStatus
|
// The returned StatefulSet contains such a StatefulSetStatus
|
||||||
func WaitForStatus(c clientset.Interface, set *appsv1.StatefulSet) *appsv1.StatefulSet {
|
func WaitForStatus(c clientset.Interface, set *appsv1.StatefulSet) *appsv1.StatefulSet {
|
||||||
WaitForState(c, set, func(set2 *appsv1.StatefulSet, pods *corev1.PodList) (bool, error) {
|
WaitForState(c, set, func(set2 *appsv1.StatefulSet, pods *v1.PodList) (bool, error) {
|
||||||
if set2.Status.ObservedGeneration >= set.Generation {
|
if set2.Status.ObservedGeneration >= set.Generation {
|
||||||
set = set2
|
set = set2
|
||||||
return true, nil
|
return true, nil
|
||||||
@ -149,9 +149,9 @@ func WaitForRunningAndReady(c clientset.Interface, numStatefulPods int32, ss *ap
|
|||||||
}
|
}
|
||||||
|
|
||||||
// WaitForPodReady waits for the Pod named podName in set to exist and have a Ready condition.
|
// WaitForPodReady waits for the Pod named podName in set to exist and have a Ready condition.
|
||||||
func WaitForPodReady(c clientset.Interface, set *appsv1.StatefulSet, podName string) (*appsv1.StatefulSet, *corev1.PodList) {
|
func WaitForPodReady(c clientset.Interface, set *appsv1.StatefulSet, podName string) (*appsv1.StatefulSet, *v1.PodList) {
|
||||||
var pods *corev1.PodList
|
var pods *v1.PodList
|
||||||
WaitForState(c, set, func(set2 *appsv1.StatefulSet, pods2 *corev1.PodList) (bool, error) {
|
WaitForState(c, set, func(set2 *appsv1.StatefulSet, pods2 *v1.PodList) (bool, error) {
|
||||||
set = set2
|
set = set2
|
||||||
pods = pods2
|
pods = pods2
|
||||||
for i := range pods.Items {
|
for i := range pods.Items {
|
||||||
@ -165,9 +165,9 @@ func WaitForPodReady(c clientset.Interface, set *appsv1.StatefulSet, podName str
|
|||||||
}
|
}
|
||||||
|
|
||||||
// WaitForPodNotReady waits for the Pod named podName in set to exist and to not have a Ready condition.
|
// WaitForPodNotReady waits for the Pod named podName in set to exist and to not have a Ready condition.
|
||||||
func WaitForPodNotReady(c clientset.Interface, set *appsv1.StatefulSet, podName string) (*appsv1.StatefulSet, *corev1.PodList) {
|
func WaitForPodNotReady(c clientset.Interface, set *appsv1.StatefulSet, podName string) (*appsv1.StatefulSet, *v1.PodList) {
|
||||||
var pods *corev1.PodList
|
var pods *v1.PodList
|
||||||
WaitForState(c, set, func(set2 *appsv1.StatefulSet, pods2 *corev1.PodList) (bool, error) {
|
WaitForState(c, set, func(set2 *appsv1.StatefulSet, pods2 *v1.PodList) (bool, error) {
|
||||||
set = set2
|
set = set2
|
||||||
pods = pods2
|
pods = pods2
|
||||||
for i := range pods.Items {
|
for i := range pods.Items {
|
||||||
@ -182,15 +182,15 @@ func WaitForPodNotReady(c clientset.Interface, set *appsv1.StatefulSet, podName
|
|||||||
|
|
||||||
// WaitForRollingUpdate waits for all Pods in set to exist and have the correct revision and for the RollingUpdate to
|
// WaitForRollingUpdate waits for all Pods in set to exist and have the correct revision and for the RollingUpdate to
|
||||||
// complete. set must have a RollingUpdateStatefulSetStrategyType.
|
// complete. set must have a RollingUpdateStatefulSetStrategyType.
|
||||||
func WaitForRollingUpdate(c clientset.Interface, set *appsv1.StatefulSet) (*appsv1.StatefulSet, *corev1.PodList) {
|
func WaitForRollingUpdate(c clientset.Interface, set *appsv1.StatefulSet) (*appsv1.StatefulSet, *v1.PodList) {
|
||||||
var pods *corev1.PodList
|
var pods *v1.PodList
|
||||||
if set.Spec.UpdateStrategy.Type != appsv1.RollingUpdateStatefulSetStrategyType {
|
if set.Spec.UpdateStrategy.Type != appsv1.RollingUpdateStatefulSetStrategyType {
|
||||||
e2elog.Failf("StatefulSet %s/%s attempt to wait for rolling update with updateStrategy %s",
|
e2elog.Failf("StatefulSet %s/%s attempt to wait for rolling update with updateStrategy %s",
|
||||||
set.Namespace,
|
set.Namespace,
|
||||||
set.Name,
|
set.Name,
|
||||||
set.Spec.UpdateStrategy.Type)
|
set.Spec.UpdateStrategy.Type)
|
||||||
}
|
}
|
||||||
WaitForState(c, set, func(set2 *appsv1.StatefulSet, pods2 *corev1.PodList) (bool, error) {
|
WaitForState(c, set, func(set2 *appsv1.StatefulSet, pods2 *v1.PodList) (bool, error) {
|
||||||
set = set2
|
set = set2
|
||||||
pods = pods2
|
pods = pods2
|
||||||
if len(pods.Items) < int(*set.Spec.Replicas) {
|
if len(pods.Items) < int(*set.Spec.Replicas) {
|
||||||
|
@ -149,9 +149,6 @@ const (
|
|||||||
podScheduledBeforeTimeout = PodListTimeout + (20 * time.Second)
|
podScheduledBeforeTimeout = PodListTimeout + (20 * time.Second)
|
||||||
|
|
||||||
podRespondingTimeout = 15 * time.Minute
|
podRespondingTimeout = 15 * time.Minute
|
||||||
// ServiceRespondingTimeout is how long to wait for a service to be responding.
|
|
||||||
ServiceRespondingTimeout = 2 * time.Minute
|
|
||||||
|
|
||||||
// ClaimProvisionTimeout is how long claims have to become dynamically provisioned.
|
// ClaimProvisionTimeout is how long claims have to become dynamically provisioned.
|
||||||
ClaimProvisionTimeout = 5 * time.Minute
|
ClaimProvisionTimeout = 5 * time.Minute
|
||||||
|
|
||||||
@ -221,11 +218,6 @@ var (
|
|||||||
ServeHostnameImage = imageutils.GetE2EImage(imageutils.Agnhost)
|
ServeHostnameImage = imageutils.GetE2EImage(imageutils.Agnhost)
|
||||||
)
|
)
|
||||||
|
|
||||||
// GetServicesProxyRequest returns a request for a service proxy.
|
|
||||||
func GetServicesProxyRequest(c clientset.Interface, request *restclient.Request) (*restclient.Request, error) {
|
|
||||||
return request.Resource("services").SubResource("proxy"), nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// RunID is a unique identifier of the e2e run.
|
// RunID is a unique identifier of the e2e run.
|
||||||
// Beware that this ID is not the same for all tests in the e2e run, because each Ginkgo node creates it separately.
|
// Beware that this ID is not the same for all tests in the e2e run, because each Ginkgo node creates it separately.
|
||||||
var RunID = uuid.NewUUID()
|
var RunID = uuid.NewUUID()
|
||||||
@ -1254,43 +1246,6 @@ func KubectlVersion() (*utilversion.Version, error) {
|
|||||||
return utilversion.ParseSemantic(matches[1])
|
return utilversion.ParseSemantic(matches[1])
|
||||||
}
|
}
|
||||||
|
|
||||||
// ServiceResponding waits for the service to be responding.
|
|
||||||
func ServiceResponding(c clientset.Interface, ns, name string) error {
|
|
||||||
ginkgo.By(fmt.Sprintf("trying to dial the service %s.%s via the proxy", ns, name))
|
|
||||||
|
|
||||||
return wait.PollImmediate(Poll, ServiceRespondingTimeout, func() (done bool, err error) {
|
|
||||||
proxyRequest, errProxy := GetServicesProxyRequest(c, c.CoreV1().RESTClient().Get())
|
|
||||||
if errProxy != nil {
|
|
||||||
e2elog.Logf("Failed to get services proxy request: %v:", errProxy)
|
|
||||||
return false, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
ctx, cancel := context.WithTimeout(context.Background(), SingleCallTimeout)
|
|
||||||
defer cancel()
|
|
||||||
|
|
||||||
body, err := proxyRequest.Namespace(ns).
|
|
||||||
Context(ctx).
|
|
||||||
Name(name).
|
|
||||||
Do().
|
|
||||||
Raw()
|
|
||||||
if err != nil {
|
|
||||||
if ctx.Err() != nil {
|
|
||||||
e2elog.Failf("Failed to GET from service %s: %v", name, err)
|
|
||||||
return true, err
|
|
||||||
}
|
|
||||||
e2elog.Logf("Failed to GET from service %s: %v:", name, err)
|
|
||||||
return false, nil
|
|
||||||
}
|
|
||||||
got := string(body)
|
|
||||||
if len(got) == 0 {
|
|
||||||
e2elog.Logf("Service %s: expected non-empty response", name)
|
|
||||||
return false, err // stop polling
|
|
||||||
}
|
|
||||||
e2elog.Logf("Service %s: found nonempty answer: %s", name, got)
|
|
||||||
return true, nil
|
|
||||||
})
|
|
||||||
}
|
|
||||||
|
|
||||||
// RestclientConfig returns a config holds the information needed to build connection to kubernetes clusters.
|
// RestclientConfig returns a config holds the information needed to build connection to kubernetes clusters.
|
||||||
func RestclientConfig(kubeContext string) (*clientcmdapi.Config, error) {
|
func RestclientConfig(kubeContext string) (*clientcmdapi.Config, error) {
|
||||||
e2elog.Logf(">>> kubeConfig: %s", TestContext.KubeConfig)
|
e2elog.Logf(">>> kubeConfig: %s", TestContext.KubeConfig)
|
||||||
|
@ -22,6 +22,7 @@ go_library(
|
|||||||
"//test/e2e/framework:go_default_library",
|
"//test/e2e/framework:go_default_library",
|
||||||
"//test/e2e/framework/log:go_default_library",
|
"//test/e2e/framework/log:go_default_library",
|
||||||
"//test/e2e/framework/pod:go_default_library",
|
"//test/e2e/framework/pod:go_default_library",
|
||||||
|
"//test/e2e/framework/service:go_default_library",
|
||||||
"//test/e2e/instrumentation/common:go_default_library",
|
"//test/e2e/instrumentation/common:go_default_library",
|
||||||
"//test/e2e/instrumentation/logging/utils:go_default_library",
|
"//test/e2e/instrumentation/logging/utils:go_default_library",
|
||||||
"//vendor/github.com/onsi/ginkgo:go_default_library",
|
"//vendor/github.com/onsi/ginkgo:go_default_library",
|
||||||
|
@ -25,6 +25,7 @@ import (
|
|||||||
"k8s.io/kubernetes/test/e2e/framework"
|
"k8s.io/kubernetes/test/e2e/framework"
|
||||||
e2elog "k8s.io/kubernetes/test/e2e/framework/log"
|
e2elog "k8s.io/kubernetes/test/e2e/framework/log"
|
||||||
e2epod "k8s.io/kubernetes/test/e2e/framework/pod"
|
e2epod "k8s.io/kubernetes/test/e2e/framework/pod"
|
||||||
|
e2eservice "k8s.io/kubernetes/test/e2e/framework/service"
|
||||||
instrumentation "k8s.io/kubernetes/test/e2e/instrumentation/common"
|
instrumentation "k8s.io/kubernetes/test/e2e/instrumentation/common"
|
||||||
|
|
||||||
"github.com/onsi/ginkgo"
|
"github.com/onsi/ginkgo"
|
||||||
@ -83,7 +84,7 @@ func ClusterLevelLoggingWithKibana(f *framework.Framework) {
|
|||||||
|
|
||||||
ginkgo.By("Checking to make sure we get a response from the Kibana UI.")
|
ginkgo.By("Checking to make sure we get a response from the Kibana UI.")
|
||||||
err = wait.Poll(pollingInterval, pollingTimeout, func() (bool, error) {
|
err = wait.Poll(pollingInterval, pollingTimeout, func() (bool, error) {
|
||||||
req, err := framework.GetServicesProxyRequest(f.ClientSet, f.ClientSet.CoreV1().RESTClient().Get())
|
req, err := e2eservice.GetServicesProxyRequest(f.ClientSet, f.ClientSet.CoreV1().RESTClient().Get())
|
||||||
if err != nil {
|
if err != nil {
|
||||||
e2elog.Logf("Failed to get services proxy request: %v", err)
|
e2elog.Logf("Failed to get services proxy request: %v", err)
|
||||||
return false, nil
|
return false, nil
|
||||||
|
@ -28,6 +28,7 @@ import (
|
|||||||
"k8s.io/kubernetes/test/e2e/framework"
|
"k8s.io/kubernetes/test/e2e/framework"
|
||||||
e2elog "k8s.io/kubernetes/test/e2e/framework/log"
|
e2elog "k8s.io/kubernetes/test/e2e/framework/log"
|
||||||
e2epod "k8s.io/kubernetes/test/e2e/framework/pod"
|
e2epod "k8s.io/kubernetes/test/e2e/framework/pod"
|
||||||
|
e2eservice "k8s.io/kubernetes/test/e2e/framework/service"
|
||||||
"k8s.io/kubernetes/test/e2e/instrumentation/logging/utils"
|
"k8s.io/kubernetes/test/e2e/instrumentation/logging/utils"
|
||||||
)
|
)
|
||||||
|
|
||||||
@ -92,7 +93,7 @@ func (p *esLogProvider) Init() error {
|
|||||||
err = nil
|
err = nil
|
||||||
var body []byte
|
var body []byte
|
||||||
for start := time.Now(); time.Since(start) < esRetryTimeout; time.Sleep(esRetryDelay) {
|
for start := time.Now(); time.Since(start) < esRetryTimeout; time.Sleep(esRetryDelay) {
|
||||||
proxyRequest, errProxy := framework.GetServicesProxyRequest(f.ClientSet, f.ClientSet.CoreV1().RESTClient().Get())
|
proxyRequest, errProxy := e2eservice.GetServicesProxyRequest(f.ClientSet, f.ClientSet.CoreV1().RESTClient().Get())
|
||||||
if errProxy != nil {
|
if errProxy != nil {
|
||||||
e2elog.Logf("After %v failed to get services proxy request: %v", time.Since(start), errProxy)
|
e2elog.Logf("After %v failed to get services proxy request: %v", time.Since(start), errProxy)
|
||||||
continue
|
continue
|
||||||
@ -126,7 +127,7 @@ func (p *esLogProvider) Init() error {
|
|||||||
e2elog.Logf("Checking health of Elasticsearch service.")
|
e2elog.Logf("Checking health of Elasticsearch service.")
|
||||||
healthy := false
|
healthy := false
|
||||||
for start := time.Now(); time.Since(start) < esRetryTimeout; time.Sleep(esRetryDelay) {
|
for start := time.Now(); time.Since(start) < esRetryTimeout; time.Sleep(esRetryDelay) {
|
||||||
proxyRequest, errProxy := framework.GetServicesProxyRequest(f.ClientSet, f.ClientSet.CoreV1().RESTClient().Get())
|
proxyRequest, errProxy := e2eservice.GetServicesProxyRequest(f.ClientSet, f.ClientSet.CoreV1().RESTClient().Get())
|
||||||
if errProxy != nil {
|
if errProxy != nil {
|
||||||
e2elog.Logf("After %v failed to get services proxy request: %v", time.Since(start), errProxy)
|
e2elog.Logf("After %v failed to get services proxy request: %v", time.Since(start), errProxy)
|
||||||
continue
|
continue
|
||||||
@ -174,7 +175,7 @@ func (p *esLogProvider) Cleanup() {
|
|||||||
func (p *esLogProvider) ReadEntries(name string) []utils.LogEntry {
|
func (p *esLogProvider) ReadEntries(name string) []utils.LogEntry {
|
||||||
f := p.Framework
|
f := p.Framework
|
||||||
|
|
||||||
proxyRequest, errProxy := framework.GetServicesProxyRequest(f.ClientSet, f.ClientSet.CoreV1().RESTClient().Get())
|
proxyRequest, errProxy := e2eservice.GetServicesProxyRequest(f.ClientSet, f.ClientSet.CoreV1().RESTClient().Get())
|
||||||
if errProxy != nil {
|
if errProxy != nil {
|
||||||
e2elog.Logf("Failed to get services proxy request: %v", errProxy)
|
e2elog.Logf("Failed to get services proxy request: %v", errProxy)
|
||||||
return nil
|
return nil
|
||||||
|
@ -37,6 +37,7 @@ go_library(
|
|||||||
"//test/e2e/framework/job:go_default_library",
|
"//test/e2e/framework/job:go_default_library",
|
||||||
"//test/e2e/framework/log:go_default_library",
|
"//test/e2e/framework/log:go_default_library",
|
||||||
"//test/e2e/framework/pod:go_default_library",
|
"//test/e2e/framework/pod:go_default_library",
|
||||||
|
"//test/e2e/framework/service:go_default_library",
|
||||||
"//test/e2e/framework/testfiles:go_default_library",
|
"//test/e2e/framework/testfiles:go_default_library",
|
||||||
"//test/e2e/scheduling:go_default_library",
|
"//test/e2e/scheduling:go_default_library",
|
||||||
"//test/utils:go_default_library",
|
"//test/utils:go_default_library",
|
||||||
|
@ -64,6 +64,7 @@ import (
|
|||||||
jobutil "k8s.io/kubernetes/test/e2e/framework/job"
|
jobutil "k8s.io/kubernetes/test/e2e/framework/job"
|
||||||
e2elog "k8s.io/kubernetes/test/e2e/framework/log"
|
e2elog "k8s.io/kubernetes/test/e2e/framework/log"
|
||||||
e2epod "k8s.io/kubernetes/test/e2e/framework/pod"
|
e2epod "k8s.io/kubernetes/test/e2e/framework/pod"
|
||||||
|
e2eservice "k8s.io/kubernetes/test/e2e/framework/service"
|
||||||
"k8s.io/kubernetes/test/e2e/framework/testfiles"
|
"k8s.io/kubernetes/test/e2e/framework/testfiles"
|
||||||
"k8s.io/kubernetes/test/e2e/scheduling"
|
"k8s.io/kubernetes/test/e2e/scheduling"
|
||||||
testutils "k8s.io/kubernetes/test/utils"
|
testutils "k8s.io/kubernetes/test/utils"
|
||||||
@ -1179,13 +1180,13 @@ metadata:
|
|||||||
})
|
})
|
||||||
framework.ExpectNoError(err)
|
framework.ExpectNoError(err)
|
||||||
|
|
||||||
service, err := c.CoreV1().Services(ns).Get(name, metav1.GetOptions{})
|
e2eservice, err := c.CoreV1().Services(ns).Get(name, metav1.GetOptions{})
|
||||||
framework.ExpectNoError(err)
|
framework.ExpectNoError(err)
|
||||||
|
|
||||||
if len(service.Spec.Ports) != 1 {
|
if len(e2eservice.Spec.Ports) != 1 {
|
||||||
e2elog.Failf("1 port is expected")
|
e2elog.Failf("1 port is expected")
|
||||||
}
|
}
|
||||||
port := service.Spec.Ports[0]
|
port := e2eservice.Spec.Ports[0]
|
||||||
if port.Port != int32(servicePort) {
|
if port.Port != int32(servicePort) {
|
||||||
e2elog.Failf("Wrong service port: %d", port.Port)
|
e2elog.Failf("Wrong service port: %d", port.Port)
|
||||||
}
|
}
|
||||||
@ -2180,7 +2181,7 @@ func waitForGuestbookResponse(c clientset.Interface, cmd, arg, expectedResponse
|
|||||||
}
|
}
|
||||||
|
|
||||||
func makeRequestToGuestbook(c clientset.Interface, cmd, value string, ns string) (string, error) {
|
func makeRequestToGuestbook(c clientset.Interface, cmd, value string, ns string) (string, error) {
|
||||||
proxyRequest, errProxy := framework.GetServicesProxyRequest(c, c.CoreV1().RESTClient().Get())
|
proxyRequest, errProxy := e2eservice.GetServicesProxyRequest(c, c.CoreV1().RESTClient().Get())
|
||||||
if errProxy != nil {
|
if errProxy != nil {
|
||||||
return "", errProxy
|
return "", errProxy
|
||||||
}
|
}
|
||||||
|
@ -69,6 +69,7 @@ go_library(
|
|||||||
"//test/e2e/framework/node:go_default_library",
|
"//test/e2e/framework/node:go_default_library",
|
||||||
"//test/e2e/framework/pod:go_default_library",
|
"//test/e2e/framework/pod:go_default_library",
|
||||||
"//test/e2e/framework/providers/gce:go_default_library",
|
"//test/e2e/framework/providers/gce:go_default_library",
|
||||||
|
"//test/e2e/framework/service:go_default_library",
|
||||||
"//test/e2e/framework/ssh:go_default_library",
|
"//test/e2e/framework/ssh:go_default_library",
|
||||||
"//test/e2e/network/scale:go_default_library",
|
"//test/e2e/network/scale:go_default_library",
|
||||||
"//test/images/agnhost/net/nat:go_default_library",
|
"//test/images/agnhost/net/nat:go_default_library",
|
||||||
|
@ -26,6 +26,7 @@ import (
|
|||||||
"k8s.io/apimachinery/pkg/util/wait"
|
"k8s.io/apimachinery/pkg/util/wait"
|
||||||
"k8s.io/kubernetes/test/e2e/framework"
|
"k8s.io/kubernetes/test/e2e/framework"
|
||||||
e2elog "k8s.io/kubernetes/test/e2e/framework/log"
|
e2elog "k8s.io/kubernetes/test/e2e/framework/log"
|
||||||
|
e2eservice "k8s.io/kubernetes/test/e2e/framework/service"
|
||||||
|
|
||||||
"github.com/onsi/ginkgo"
|
"github.com/onsi/ginkgo"
|
||||||
)
|
)
|
||||||
@ -129,7 +130,7 @@ var _ = SIGDescribe("DNS", func() {
|
|||||||
testServiceSelector := map[string]string{
|
testServiceSelector := map[string]string{
|
||||||
"dns-test": "true",
|
"dns-test": "true",
|
||||||
}
|
}
|
||||||
headlessService := framework.CreateServiceSpec(dnsTestServiceName, "", true, testServiceSelector)
|
headlessService := e2eservice.CreateServiceSpec(dnsTestServiceName, "", true, testServiceSelector)
|
||||||
_, err := f.ClientSet.CoreV1().Services(f.Namespace.Name).Create(headlessService)
|
_, err := f.ClientSet.CoreV1().Services(f.Namespace.Name).Create(headlessService)
|
||||||
framework.ExpectNoError(err, "failed to create headless service: %s", dnsTestServiceName)
|
framework.ExpectNoError(err, "failed to create headless service: %s", dnsTestServiceName)
|
||||||
defer func() {
|
defer func() {
|
||||||
@ -139,7 +140,7 @@ var _ = SIGDescribe("DNS", func() {
|
|||||||
}()
|
}()
|
||||||
|
|
||||||
regularServiceName := "test-service-2"
|
regularServiceName := "test-service-2"
|
||||||
regularService := framework.CreateServiceSpec(regularServiceName, "", false, testServiceSelector)
|
regularService := e2eservice.CreateServiceSpec(regularServiceName, "", false, testServiceSelector)
|
||||||
regularService, err = f.ClientSet.CoreV1().Services(f.Namespace.Name).Create(regularService)
|
regularService, err = f.ClientSet.CoreV1().Services(f.Namespace.Name).Create(regularService)
|
||||||
framework.ExpectNoError(err, "failed to create regular service: %s", regularServiceName)
|
framework.ExpectNoError(err, "failed to create regular service: %s", regularServiceName)
|
||||||
|
|
||||||
@ -178,7 +179,7 @@ var _ = SIGDescribe("DNS", func() {
|
|||||||
testServiceSelector := map[string]string{
|
testServiceSelector := map[string]string{
|
||||||
"dns-test": "true",
|
"dns-test": "true",
|
||||||
}
|
}
|
||||||
headlessService := framework.CreateServiceSpec(dnsTestServiceName, "", true, testServiceSelector)
|
headlessService := e2eservice.CreateServiceSpec(dnsTestServiceName, "", true, testServiceSelector)
|
||||||
_, err := f.ClientSet.CoreV1().Services(f.Namespace.Name).Create(headlessService)
|
_, err := f.ClientSet.CoreV1().Services(f.Namespace.Name).Create(headlessService)
|
||||||
framework.ExpectNoError(err, "failed to create headless service: %s", dnsTestServiceName)
|
framework.ExpectNoError(err, "failed to create headless service: %s", dnsTestServiceName)
|
||||||
defer func() {
|
defer func() {
|
||||||
@ -188,7 +189,7 @@ var _ = SIGDescribe("DNS", func() {
|
|||||||
}()
|
}()
|
||||||
|
|
||||||
regularServiceName := "test-service-2"
|
regularServiceName := "test-service-2"
|
||||||
regularService := framework.CreateServiceSpec(regularServiceName, "", false, testServiceSelector)
|
regularService := e2eservice.CreateServiceSpec(regularServiceName, "", false, testServiceSelector)
|
||||||
regularService, err = f.ClientSet.CoreV1().Services(f.Namespace.Name).Create(regularService)
|
regularService, err = f.ClientSet.CoreV1().Services(f.Namespace.Name).Create(regularService)
|
||||||
framework.ExpectNoError(err, "failed to create regular service: %s", regularServiceName)
|
framework.ExpectNoError(err, "failed to create regular service: %s", regularServiceName)
|
||||||
defer func() {
|
defer func() {
|
||||||
@ -235,7 +236,7 @@ var _ = SIGDescribe("DNS", func() {
|
|||||||
}
|
}
|
||||||
serviceName := "dns-test-service-2"
|
serviceName := "dns-test-service-2"
|
||||||
podHostname := "dns-querier-2"
|
podHostname := "dns-querier-2"
|
||||||
headlessService := framework.CreateServiceSpec(serviceName, "", true, testServiceSelector)
|
headlessService := e2eservice.CreateServiceSpec(serviceName, "", true, testServiceSelector)
|
||||||
_, err := f.ClientSet.CoreV1().Services(f.Namespace.Name).Create(headlessService)
|
_, err := f.ClientSet.CoreV1().Services(f.Namespace.Name).Create(headlessService)
|
||||||
framework.ExpectNoError(err, "failed to create headless service: %s", serviceName)
|
framework.ExpectNoError(err, "failed to create headless service: %s", serviceName)
|
||||||
|
|
||||||
@ -276,7 +277,7 @@ var _ = SIGDescribe("DNS", func() {
|
|||||||
}
|
}
|
||||||
serviceName := "dns-test-service-2"
|
serviceName := "dns-test-service-2"
|
||||||
podHostname := "dns-querier-2"
|
podHostname := "dns-querier-2"
|
||||||
headlessService := framework.CreateServiceSpec(serviceName, "", true, testServiceSelector)
|
headlessService := e2eservice.CreateServiceSpec(serviceName, "", true, testServiceSelector)
|
||||||
_, err := f.ClientSet.CoreV1().Services(f.Namespace.Name).Create(headlessService)
|
_, err := f.ClientSet.CoreV1().Services(f.Namespace.Name).Create(headlessService)
|
||||||
framework.ExpectNoError(err, "failed to create headless service: %s", serviceName)
|
framework.ExpectNoError(err, "failed to create headless service: %s", serviceName)
|
||||||
|
|
||||||
@ -314,7 +315,7 @@ var _ = SIGDescribe("DNS", func() {
|
|||||||
// Create a test ExternalName service.
|
// Create a test ExternalName service.
|
||||||
ginkgo.By("Creating a test externalName service")
|
ginkgo.By("Creating a test externalName service")
|
||||||
serviceName := "dns-test-service-3"
|
serviceName := "dns-test-service-3"
|
||||||
externalNameService := framework.CreateServiceSpec(serviceName, "foo.example.com", false, nil)
|
externalNameService := e2eservice.CreateServiceSpec(serviceName, "foo.example.com", false, nil)
|
||||||
_, err := f.ClientSet.CoreV1().Services(f.Namespace.Name).Create(externalNameService)
|
_, err := f.ClientSet.CoreV1().Services(f.Namespace.Name).Create(externalNameService)
|
||||||
framework.ExpectNoError(err, "failed to create ExternalName service: %s", serviceName)
|
framework.ExpectNoError(err, "failed to create ExternalName service: %s", serviceName)
|
||||||
|
|
||||||
@ -337,7 +338,7 @@ var _ = SIGDescribe("DNS", func() {
|
|||||||
|
|
||||||
// Test changing the externalName field
|
// Test changing the externalName field
|
||||||
ginkgo.By("changing the externalName to bar.example.com")
|
ginkgo.By("changing the externalName to bar.example.com")
|
||||||
_, err = framework.UpdateService(f.ClientSet, f.Namespace.Name, serviceName, func(s *v1.Service) {
|
_, err = e2eservice.UpdateService(f.ClientSet, f.Namespace.Name, serviceName, func(s *v1.Service) {
|
||||||
s.Spec.ExternalName = "bar.example.com"
|
s.Spec.ExternalName = "bar.example.com"
|
||||||
})
|
})
|
||||||
framework.ExpectNoError(err, "failed to change externalName of service: %s", serviceName)
|
framework.ExpectNoError(err, "failed to change externalName of service: %s", serviceName)
|
||||||
@ -354,7 +355,7 @@ var _ = SIGDescribe("DNS", func() {
|
|||||||
|
|
||||||
// Test changing type from ExternalName to ClusterIP
|
// Test changing type from ExternalName to ClusterIP
|
||||||
ginkgo.By("changing the service to type=ClusterIP")
|
ginkgo.By("changing the service to type=ClusterIP")
|
||||||
_, err = framework.UpdateService(f.ClientSet, f.Namespace.Name, serviceName, func(s *v1.Service) {
|
_, err = e2eservice.UpdateService(f.ClientSet, f.Namespace.Name, serviceName, func(s *v1.Service) {
|
||||||
s.Spec.Type = v1.ServiceTypeClusterIP
|
s.Spec.Type = v1.ServiceTypeClusterIP
|
||||||
s.Spec.Ports = []v1.ServicePort{
|
s.Spec.Ports = []v1.ServicePort{
|
||||||
{Port: 80, Name: "http", Protocol: v1.ProtocolTCP},
|
{Port: 80, Name: "http", Protocol: v1.ProtocolTCP},
|
||||||
|
@ -20,9 +20,10 @@ import (
|
|||||||
"fmt"
|
"fmt"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
"k8s.io/api/core/v1"
|
v1 "k8s.io/api/core/v1"
|
||||||
"k8s.io/apimachinery/pkg/util/wait"
|
"k8s.io/apimachinery/pkg/util/wait"
|
||||||
"k8s.io/kubernetes/test/e2e/framework"
|
"k8s.io/kubernetes/test/e2e/framework"
|
||||||
|
e2eservice "k8s.io/kubernetes/test/e2e/framework/service"
|
||||||
|
|
||||||
"github.com/onsi/ginkgo"
|
"github.com/onsi/ginkgo"
|
||||||
)
|
)
|
||||||
@ -405,12 +406,12 @@ func (t *dnsExternalNameTest) run(isIPv6 bool) {
|
|||||||
|
|
||||||
f := t.f
|
f := t.f
|
||||||
serviceName := "dns-externalname-upstream-test"
|
serviceName := "dns-externalname-upstream-test"
|
||||||
externalNameService := framework.CreateServiceSpec(serviceName, googleDNSHostname, false, nil)
|
externalNameService := e2eservice.CreateServiceSpec(serviceName, googleDNSHostname, false, nil)
|
||||||
if _, err := f.ClientSet.CoreV1().Services(f.Namespace.Name).Create(externalNameService); err != nil {
|
if _, err := f.ClientSet.CoreV1().Services(f.Namespace.Name).Create(externalNameService); err != nil {
|
||||||
ginkgo.Fail(fmt.Sprintf("ginkgo.Failed when creating service: %v", err))
|
ginkgo.Fail(fmt.Sprintf("ginkgo.Failed when creating service: %v", err))
|
||||||
}
|
}
|
||||||
serviceNameLocal := "dns-externalname-upstream-local"
|
serviceNameLocal := "dns-externalname-upstream-local"
|
||||||
externalNameServiceLocal := framework.CreateServiceSpec(serviceNameLocal, fooHostname, false, nil)
|
externalNameServiceLocal := e2eservice.CreateServiceSpec(serviceNameLocal, fooHostname, false, nil)
|
||||||
if _, err := f.ClientSet.CoreV1().Services(f.Namespace.Name).Create(externalNameServiceLocal); err != nil {
|
if _, err := f.ClientSet.CoreV1().Services(f.Namespace.Name).Create(externalNameServiceLocal); err != nil {
|
||||||
ginkgo.Fail(fmt.Sprintf("ginkgo.Failed when creating service: %v", err))
|
ginkgo.Fail(fmt.Sprintf("ginkgo.Failed when creating service: %v", err))
|
||||||
}
|
}
|
||||||
|
@ -24,6 +24,7 @@ import (
|
|||||||
"strings"
|
"strings"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
|
"github.com/onsi/ginkgo"
|
||||||
v1 "k8s.io/api/core/v1"
|
v1 "k8s.io/api/core/v1"
|
||||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||||
"k8s.io/apimachinery/pkg/labels"
|
"k8s.io/apimachinery/pkg/labels"
|
||||||
@ -32,8 +33,7 @@ import (
|
|||||||
"k8s.io/kubernetes/test/e2e/framework"
|
"k8s.io/kubernetes/test/e2e/framework"
|
||||||
e2elog "k8s.io/kubernetes/test/e2e/framework/log"
|
e2elog "k8s.io/kubernetes/test/e2e/framework/log"
|
||||||
e2epod "k8s.io/kubernetes/test/e2e/framework/pod"
|
e2epod "k8s.io/kubernetes/test/e2e/framework/pod"
|
||||||
|
e2eservice "k8s.io/kubernetes/test/e2e/framework/service"
|
||||||
"github.com/onsi/ginkgo"
|
|
||||||
)
|
)
|
||||||
|
|
||||||
const (
|
const (
|
||||||
@ -111,7 +111,7 @@ var _ = SIGDescribe("ClusterDns [Feature:Example]", func() {
|
|||||||
framework.ExpectNoError(err, "waiting for all pods to respond")
|
framework.ExpectNoError(err, "waiting for all pods to respond")
|
||||||
e2elog.Logf("found %d backend pods responding in namespace %s", len(pods.Items), ns.Name)
|
e2elog.Logf("found %d backend pods responding in namespace %s", len(pods.Items), ns.Name)
|
||||||
|
|
||||||
err = framework.ServiceResponding(c, ns.Name, backendSvcName)
|
err = e2eservice.WaitForServiceResponding(c, ns.Name, backendSvcName)
|
||||||
framework.ExpectNoError(err, "waiting for the service to respond")
|
framework.ExpectNoError(err, "waiting for the service to respond")
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -29,6 +29,7 @@ import (
|
|||||||
e2elog "k8s.io/kubernetes/test/e2e/framework/log"
|
e2elog "k8s.io/kubernetes/test/e2e/framework/log"
|
||||||
e2enode "k8s.io/kubernetes/test/e2e/framework/node"
|
e2enode "k8s.io/kubernetes/test/e2e/framework/node"
|
||||||
"k8s.io/kubernetes/test/e2e/framework/providers/gce"
|
"k8s.io/kubernetes/test/e2e/framework/providers/gce"
|
||||||
|
e2eservice "k8s.io/kubernetes/test/e2e/framework/service"
|
||||||
gcecloud "k8s.io/legacy-cloud-providers/gce"
|
gcecloud "k8s.io/legacy-cloud-providers/gce"
|
||||||
|
|
||||||
"github.com/onsi/ginkgo"
|
"github.com/onsi/ginkgo"
|
||||||
@ -72,17 +73,17 @@ var _ = SIGDescribe("Firewall rule", func() {
|
|||||||
framework.ExpectNoError(err)
|
framework.ExpectNoError(err)
|
||||||
e2elog.Logf("Got cluster ID: %v", clusterID)
|
e2elog.Logf("Got cluster ID: %v", clusterID)
|
||||||
|
|
||||||
jig := framework.NewServiceTestJig(cs, serviceName)
|
jig := e2eservice.NewTestJig(cs, serviceName)
|
||||||
nodeList := jig.GetNodes(framework.MaxNodesForEndpointsTests)
|
nodeList := jig.GetNodes(e2eservice.MaxNodesForEndpointsTests)
|
||||||
gomega.Expect(nodeList).NotTo(gomega.BeNil())
|
gomega.Expect(nodeList).NotTo(gomega.BeNil())
|
||||||
nodesNames := jig.GetNodesNames(framework.MaxNodesForEndpointsTests)
|
nodesNames := jig.GetNodesNames(e2eservice.MaxNodesForEndpointsTests)
|
||||||
if len(nodesNames) <= 0 {
|
if len(nodesNames) <= 0 {
|
||||||
e2elog.Failf("Expect at least 1 node, got: %v", nodesNames)
|
e2elog.Failf("Expect at least 1 node, got: %v", nodesNames)
|
||||||
}
|
}
|
||||||
nodesSet := sets.NewString(nodesNames...)
|
nodesSet := sets.NewString(nodesNames...)
|
||||||
|
|
||||||
ginkgo.By("Creating a LoadBalancer type service with ExternalTrafficPolicy=Global")
|
ginkgo.By("Creating a LoadBalancer type service with ExternalTrafficPolicy=Global")
|
||||||
svc := jig.CreateLoadBalancerService(ns, serviceName, framework.LoadBalancerCreateTimeoutDefault, func(svc *v1.Service) {
|
svc := jig.CreateLoadBalancerService(ns, serviceName, e2eservice.LoadBalancerCreateTimeoutDefault, func(svc *v1.Service) {
|
||||||
svc.Spec.Ports = []v1.ServicePort{{Protocol: v1.ProtocolTCP, Port: firewallTestHTTPPort}}
|
svc.Spec.Ports = []v1.ServicePort{{Protocol: v1.ProtocolTCP, Port: firewallTestHTTPPort}}
|
||||||
svc.Spec.LoadBalancerSourceRanges = firewallTestSourceRanges
|
svc.Spec.LoadBalancerSourceRanges = firewallTestSourceRanges
|
||||||
})
|
})
|
||||||
@ -95,7 +96,7 @@ var _ = SIGDescribe("Firewall rule", func() {
|
|||||||
framework.ExpectNoError(err)
|
framework.ExpectNoError(err)
|
||||||
ginkgo.By("Waiting for the local traffic health check firewall rule to be deleted")
|
ginkgo.By("Waiting for the local traffic health check firewall rule to be deleted")
|
||||||
localHCFwName := gce.MakeHealthCheckFirewallNameForLBService(clusterID, cloudprovider.DefaultLoadBalancerName(svc), false)
|
localHCFwName := gce.MakeHealthCheckFirewallNameForLBService(clusterID, cloudprovider.DefaultLoadBalancerName(svc), false)
|
||||||
_, err := gce.WaitForFirewallRule(gceCloud, localHCFwName, false, framework.LoadBalancerCleanupTimeout)
|
_, err := gce.WaitForFirewallRule(gceCloud, localHCFwName, false, e2eservice.LoadBalancerCleanupTimeout)
|
||||||
framework.ExpectNoError(err)
|
framework.ExpectNoError(err)
|
||||||
}()
|
}()
|
||||||
svcExternalIP := svc.Status.LoadBalancer.Ingress[0].IP
|
svcExternalIP := svc.Status.LoadBalancer.Ingress[0].IP
|
||||||
@ -121,17 +122,17 @@ var _ = SIGDescribe("Firewall rule", func() {
|
|||||||
})
|
})
|
||||||
|
|
||||||
ginkgo.By("Waiting for the nodes health check firewall rule to be deleted")
|
ginkgo.By("Waiting for the nodes health check firewall rule to be deleted")
|
||||||
_, err = gce.WaitForFirewallRule(gceCloud, nodesHCFw.Name, false, framework.LoadBalancerCleanupTimeout)
|
_, err = gce.WaitForFirewallRule(gceCloud, nodesHCFw.Name, false, e2eservice.LoadBalancerCleanupTimeout)
|
||||||
framework.ExpectNoError(err)
|
framework.ExpectNoError(err)
|
||||||
|
|
||||||
ginkgo.By("Waiting for the correct local traffic health check firewall rule to be created")
|
ginkgo.By("Waiting for the correct local traffic health check firewall rule to be created")
|
||||||
localHCFw := gce.ConstructHealthCheckFirewallForLBService(clusterID, svc, cloudConfig.NodeTag, false)
|
localHCFw := gce.ConstructHealthCheckFirewallForLBService(clusterID, svc, cloudConfig.NodeTag, false)
|
||||||
fw, err = gce.WaitForFirewallRule(gceCloud, localHCFw.Name, true, framework.LoadBalancerCreateTimeoutDefault)
|
fw, err = gce.WaitForFirewallRule(gceCloud, localHCFw.Name, true, e2eservice.LoadBalancerCreateTimeoutDefault)
|
||||||
framework.ExpectNoError(err)
|
framework.ExpectNoError(err)
|
||||||
err = gce.VerifyFirewallRule(fw, localHCFw, cloudConfig.Network, false)
|
err = gce.VerifyFirewallRule(fw, localHCFw, cloudConfig.Network, false)
|
||||||
framework.ExpectNoError(err)
|
framework.ExpectNoError(err)
|
||||||
|
|
||||||
ginkgo.By(fmt.Sprintf("Creating netexec pods on at most %v nodes", framework.MaxNodesForEndpointsTests))
|
ginkgo.By(fmt.Sprintf("Creating netexec pods on at most %v nodes", e2eservice.MaxNodesForEndpointsTests))
|
||||||
for i, nodeName := range nodesNames {
|
for i, nodeName := range nodesNames {
|
||||||
podName := fmt.Sprintf("netexec%v", i)
|
podName := fmt.Sprintf("netexec%v", i)
|
||||||
jig.LaunchNetexecPodOnNode(f, nodeName, podName, firewallTestHTTPPort, firewallTestUDPPort, true)
|
jig.LaunchNetexecPodOnNode(f, nodeName, podName, firewallTestHTTPPort, firewallTestUDPPort, true)
|
||||||
@ -144,7 +145,7 @@ var _ = SIGDescribe("Firewall rule", func() {
|
|||||||
|
|
||||||
// Send requests from outside of the cluster because internal traffic is whitelisted
|
// Send requests from outside of the cluster because internal traffic is whitelisted
|
||||||
ginkgo.By("Accessing the external service ip from outside, all non-master nodes should be reached")
|
ginkgo.By("Accessing the external service ip from outside, all non-master nodes should be reached")
|
||||||
err = framework.TestHitNodesFromOutside(svcExternalIP, firewallTestHTTPPort, framework.LoadBalancerCreateTimeoutDefault, nodesSet)
|
err = framework.TestHitNodesFromOutside(svcExternalIP, firewallTestHTTPPort, e2eservice.LoadBalancerCreateTimeoutDefault, nodesSet)
|
||||||
framework.ExpectNoError(err)
|
framework.ExpectNoError(err)
|
||||||
|
|
||||||
// Check if there are overlapping tags on the firewall that extend beyond just the vms in our cluster
|
// Check if there are overlapping tags on the firewall that extend beyond just the vms in our cluster
|
||||||
@ -165,12 +166,12 @@ var _ = SIGDescribe("Firewall rule", func() {
|
|||||||
nodesSet.Insert(nodesNames[0])
|
nodesSet.Insert(nodesNames[0])
|
||||||
gce.SetInstanceTags(cloudConfig, nodesNames[0], zone, removedTags)
|
gce.SetInstanceTags(cloudConfig, nodesNames[0], zone, removedTags)
|
||||||
// Make sure traffic is recovered before exit
|
// Make sure traffic is recovered before exit
|
||||||
err = framework.TestHitNodesFromOutside(svcExternalIP, firewallTestHTTPPort, framework.LoadBalancerCreateTimeoutDefault, nodesSet)
|
err = framework.TestHitNodesFromOutside(svcExternalIP, firewallTestHTTPPort, e2eservice.LoadBalancerCreateTimeoutDefault, nodesSet)
|
||||||
framework.ExpectNoError(err)
|
framework.ExpectNoError(err)
|
||||||
}()
|
}()
|
||||||
|
|
||||||
ginkgo.By("Accessing serivce through the external ip and examine got no response from the node without tags")
|
ginkgo.By("Accessing serivce through the external ip and examine got no response from the node without tags")
|
||||||
err = framework.TestHitNodesFromOutsideWithCount(svcExternalIP, firewallTestHTTPPort, framework.LoadBalancerCreateTimeoutDefault, nodesSet, 15)
|
err = framework.TestHitNodesFromOutsideWithCount(svcExternalIP, firewallTestHTTPPort, e2eservice.LoadBalancerCreateTimeoutDefault, nodesSet, 15)
|
||||||
framework.ExpectNoError(err)
|
framework.ExpectNoError(err)
|
||||||
})
|
})
|
||||||
|
|
||||||
|
@ -39,6 +39,7 @@ import (
|
|||||||
"k8s.io/kubernetes/test/e2e/framework/ingress"
|
"k8s.io/kubernetes/test/e2e/framework/ingress"
|
||||||
e2elog "k8s.io/kubernetes/test/e2e/framework/log"
|
e2elog "k8s.io/kubernetes/test/e2e/framework/log"
|
||||||
"k8s.io/kubernetes/test/e2e/framework/providers/gce"
|
"k8s.io/kubernetes/test/e2e/framework/providers/gce"
|
||||||
|
e2eservice "k8s.io/kubernetes/test/e2e/framework/service"
|
||||||
|
|
||||||
"github.com/onsi/ginkgo"
|
"github.com/onsi/ginkgo"
|
||||||
)
|
)
|
||||||
@ -170,7 +171,7 @@ var _ = SIGDescribe("Loadbalancing: L7", func() {
|
|||||||
}, map[string]string{})
|
}, map[string]string{})
|
||||||
|
|
||||||
ginkgo.By(fmt.Sprintf("waiting for Ingress %s to get instance group annotation", name))
|
ginkgo.By(fmt.Sprintf("waiting for Ingress %s to get instance group annotation", name))
|
||||||
pollErr := wait.Poll(2*time.Second, framework.LoadBalancerPollTimeout, func() (bool, error) {
|
pollErr := wait.Poll(2*time.Second, e2eservice.LoadBalancerPollTimeout, func() (bool, error) {
|
||||||
ing, err := f.ClientSet.NetworkingV1beta1().Ingresses(ns).Get(name, metav1.GetOptions{})
|
ing, err := f.ClientSet.NetworkingV1beta1().Ingresses(ns).Get(name, metav1.GetOptions{})
|
||||||
framework.ExpectNoError(err)
|
framework.ExpectNoError(err)
|
||||||
annotations := ing.Annotations
|
annotations := ing.Annotations
|
||||||
@ -301,7 +302,7 @@ var _ = SIGDescribe("Loadbalancing: L7", func() {
|
|||||||
_, err = f.ClientSet.CoreV1().Services(ns).Update(&svc)
|
_, err = f.ClientSet.CoreV1().Services(ns).Update(&svc)
|
||||||
framework.ExpectNoError(err)
|
framework.ExpectNoError(err)
|
||||||
}
|
}
|
||||||
err = wait.Poll(5*time.Second, framework.LoadBalancerPollTimeout, func() (bool, error) {
|
err = wait.Poll(5*time.Second, e2eservice.LoadBalancerPollTimeout, func() (bool, error) {
|
||||||
if err := gceController.BackendServiceUsingIG(jig.GetServicePorts(false)); err != nil {
|
if err := gceController.BackendServiceUsingIG(jig.GetServicePorts(false)); err != nil {
|
||||||
e2elog.Logf("ginkgo.Failed to verify IG backend service: %v", err)
|
e2elog.Logf("ginkgo.Failed to verify IG backend service: %v", err)
|
||||||
return false, nil
|
return false, nil
|
||||||
@ -319,7 +320,7 @@ var _ = SIGDescribe("Loadbalancing: L7", func() {
|
|||||||
_, err = f.ClientSet.CoreV1().Services(ns).Update(&svc)
|
_, err = f.ClientSet.CoreV1().Services(ns).Update(&svc)
|
||||||
framework.ExpectNoError(err)
|
framework.ExpectNoError(err)
|
||||||
}
|
}
|
||||||
err = wait.Poll(5*time.Second, framework.LoadBalancerPollTimeout, func() (bool, error) {
|
err = wait.Poll(5*time.Second, e2eservice.LoadBalancerPollTimeout, func() (bool, error) {
|
||||||
if err := gceController.BackendServiceUsingNEG(jig.GetServicePorts(false)); err != nil {
|
if err := gceController.BackendServiceUsingNEG(jig.GetServicePorts(false)); err != nil {
|
||||||
e2elog.Logf("ginkgo.Failed to verify NEG backend service: %v", err)
|
e2elog.Logf("ginkgo.Failed to verify NEG backend service: %v", err)
|
||||||
return false, nil
|
return false, nil
|
||||||
@ -404,7 +405,7 @@ var _ = SIGDescribe("Loadbalancing: L7", func() {
|
|||||||
_, err = f.ClientSet.AppsV1().Deployments(ns).UpdateScale(name, scale)
|
_, err = f.ClientSet.AppsV1().Deployments(ns).UpdateScale(name, scale)
|
||||||
framework.ExpectNoError(err)
|
framework.ExpectNoError(err)
|
||||||
|
|
||||||
err = wait.Poll(10*time.Second, framework.LoadBalancerPollTimeout, func() (bool, error) {
|
err = wait.Poll(10*time.Second, e2eservice.LoadBalancerPollTimeout, func() (bool, error) {
|
||||||
res, err := jig.GetDistinctResponseFromIngress()
|
res, err := jig.GetDistinctResponseFromIngress()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return false, nil
|
return false, nil
|
||||||
@ -421,7 +422,7 @@ var _ = SIGDescribe("Loadbalancing: L7", func() {
|
|||||||
deploy.Spec.Template.Spec.TerminationGracePeriodSeconds = &gracePeriod
|
deploy.Spec.Template.Spec.TerminationGracePeriodSeconds = &gracePeriod
|
||||||
_, err = f.ClientSet.AppsV1().Deployments(ns).Update(deploy)
|
_, err = f.ClientSet.AppsV1().Deployments(ns).Update(deploy)
|
||||||
framework.ExpectNoError(err)
|
framework.ExpectNoError(err)
|
||||||
err = wait.Poll(10*time.Second, framework.LoadBalancerPollTimeout, func() (bool, error) {
|
err = wait.Poll(10*time.Second, e2eservice.LoadBalancerPollTimeout, func() (bool, error) {
|
||||||
res, err := jig.GetDistinctResponseFromIngress()
|
res, err := jig.GetDistinctResponseFromIngress()
|
||||||
framework.ExpectNoError(err)
|
framework.ExpectNoError(err)
|
||||||
deploy, err := f.ClientSet.AppsV1().Deployments(ns).Get(name, metav1.GetOptions{})
|
deploy, err := f.ClientSet.AppsV1().Deployments(ns).Get(name, metav1.GetOptions{})
|
||||||
@ -787,7 +788,7 @@ func executePresharedCertTest(f *framework.Framework, jig *ingress.TestJig, stat
|
|||||||
jig.TryDeleteIngress()
|
jig.TryDeleteIngress()
|
||||||
}
|
}
|
||||||
ginkgo.By(fmt.Sprintf("Deleting ssl certificate %q on GCE", preSharedCertName))
|
ginkgo.By(fmt.Sprintf("Deleting ssl certificate %q on GCE", preSharedCertName))
|
||||||
err := wait.Poll(framework.LoadBalancerPollInterval, framework.LoadBalancerCleanupTimeout, func() (bool, error) {
|
err := wait.Poll(e2eservice.LoadBalancerPollInterval, e2eservice.LoadBalancerCleanupTimeout, func() (bool, error) {
|
||||||
if err := gceCloud.DeleteSslCertificate(preSharedCertName); err != nil && !errors.IsNotFound(err) {
|
if err := gceCloud.DeleteSslCertificate(preSharedCertName); err != nil && !errors.IsNotFound(err) {
|
||||||
e2elog.Logf("ginkgo.Failed to delete ssl certificate %q: %v. Retrying...", preSharedCertName, err)
|
e2elog.Logf("ginkgo.Failed to delete ssl certificate %q: %v. Retrying...", preSharedCertName, err)
|
||||||
return false, nil
|
return false, nil
|
||||||
@ -830,10 +831,10 @@ func executeStaticIPHttpsOnlyTest(f *framework.Framework, jig *ingress.TestJig,
|
|||||||
|
|
||||||
ginkgo.By("waiting for Ingress to come up with ip: " + ip)
|
ginkgo.By("waiting for Ingress to come up with ip: " + ip)
|
||||||
httpClient := ingress.BuildInsecureClient(ingress.IngressReqTimeout)
|
httpClient := ingress.BuildInsecureClient(ingress.IngressReqTimeout)
|
||||||
framework.ExpectNoError(framework.PollURL(fmt.Sprintf("https://%s/", ip), "", framework.LoadBalancerPollTimeout, jig.PollInterval, httpClient, false))
|
framework.ExpectNoError(framework.PollURL(fmt.Sprintf("https://%s/", ip), "", e2eservice.LoadBalancerPollTimeout, jig.PollInterval, httpClient, false))
|
||||||
|
|
||||||
ginkgo.By("should reject HTTP traffic")
|
ginkgo.By("should reject HTTP traffic")
|
||||||
framework.ExpectNoError(framework.PollURL(fmt.Sprintf("http://%s/", ip), "", framework.LoadBalancerPollTimeout, jig.PollInterval, httpClient, true))
|
framework.ExpectNoError(framework.PollURL(fmt.Sprintf("http://%s/", ip), "", e2eservice.LoadBalancerPollTimeout, jig.PollInterval, httpClient, true))
|
||||||
}
|
}
|
||||||
|
|
||||||
func executeBacksideBacksideHTTPSTest(f *framework.Framework, jig *ingress.TestJig, staticIPName string) {
|
func executeBacksideBacksideHTTPSTest(f *framework.Framework, jig *ingress.TestJig, staticIPName string) {
|
||||||
@ -848,12 +849,12 @@ func executeBacksideBacksideHTTPSTest(f *framework.Framework, jig *ingress.TestJ
|
|||||||
framework.ExpectNoError(err, "ginkgo.Failed to create re-encryption ingress")
|
framework.ExpectNoError(err, "ginkgo.Failed to create re-encryption ingress")
|
||||||
|
|
||||||
ginkgo.By(fmt.Sprintf("Waiting for ingress %s to come up", ingCreated.Name))
|
ginkgo.By(fmt.Sprintf("Waiting for ingress %s to come up", ingCreated.Name))
|
||||||
ingIP, err := jig.WaitForIngressAddress(f.ClientSet, f.Namespace.Name, ingCreated.Name, framework.LoadBalancerPollTimeout)
|
ingIP, err := jig.WaitForIngressAddress(f.ClientSet, f.Namespace.Name, ingCreated.Name, e2eservice.LoadBalancerPollTimeout)
|
||||||
framework.ExpectNoError(err, "ginkgo.Failed to wait for ingress IP")
|
framework.ExpectNoError(err, "ginkgo.Failed to wait for ingress IP")
|
||||||
|
|
||||||
ginkgo.By(fmt.Sprintf("Polling on address %s and verify the backend is serving HTTPS", ingIP))
|
ginkgo.By(fmt.Sprintf("Polling on address %s and verify the backend is serving HTTPS", ingIP))
|
||||||
timeoutClient := &http.Client{Timeout: ingress.IngressReqTimeout}
|
timeoutClient := &http.Client{Timeout: ingress.IngressReqTimeout}
|
||||||
err = wait.PollImmediate(framework.LoadBalancerPollInterval, framework.LoadBalancerPollTimeout, func() (bool, error) {
|
err = wait.PollImmediate(e2eservice.LoadBalancerPollInterval, e2eservice.LoadBalancerPollTimeout, func() (bool, error) {
|
||||||
resp, err := framework.SimpleGET(timeoutClient, fmt.Sprintf("http://%s", ingIP), "")
|
resp, err := framework.SimpleGET(timeoutClient, fmt.Sprintf("http://%s", ingIP), "")
|
||||||
if err != nil {
|
if err != nil {
|
||||||
e2elog.Logf("SimpleGET failed: %v", err)
|
e2elog.Logf("SimpleGET failed: %v", err)
|
||||||
|
@ -31,6 +31,7 @@ import (
|
|||||||
"k8s.io/kubernetes/test/e2e/framework"
|
"k8s.io/kubernetes/test/e2e/framework"
|
||||||
e2elog "k8s.io/kubernetes/test/e2e/framework/log"
|
e2elog "k8s.io/kubernetes/test/e2e/framework/log"
|
||||||
"k8s.io/kubernetes/test/e2e/framework/providers/gce"
|
"k8s.io/kubernetes/test/e2e/framework/providers/gce"
|
||||||
|
e2eservice "k8s.io/kubernetes/test/e2e/framework/service"
|
||||||
gcecloud "k8s.io/legacy-cloud-providers/gce"
|
gcecloud "k8s.io/legacy-cloud-providers/gce"
|
||||||
|
|
||||||
"github.com/onsi/ginkgo"
|
"github.com/onsi/ginkgo"
|
||||||
@ -50,7 +51,7 @@ var _ = SIGDescribe("Services [Feature:GCEAlphaFeature][Slow]", func() {
|
|||||||
|
|
||||||
ginkgo.AfterEach(func() {
|
ginkgo.AfterEach(func() {
|
||||||
if ginkgo.CurrentGinkgoTestDescription().Failed {
|
if ginkgo.CurrentGinkgoTestDescription().Failed {
|
||||||
framework.DescribeSvc(f.Namespace.Name)
|
e2eservice.DescribeSvc(f.Namespace.Name)
|
||||||
}
|
}
|
||||||
for _, lb := range serviceLBNames {
|
for _, lb := range serviceLBNames {
|
||||||
e2elog.Logf("cleaning gce resource for %s", lb)
|
e2elog.Logf("cleaning gce resource for %s", lb)
|
||||||
@ -60,12 +61,12 @@ var _ = SIGDescribe("Services [Feature:GCEAlphaFeature][Slow]", func() {
|
|||||||
serviceLBNames = []string{}
|
serviceLBNames = []string{}
|
||||||
})
|
})
|
||||||
ginkgo.It("should be able to create and tear down a standard-tier load balancer [Slow]", func() {
|
ginkgo.It("should be able to create and tear down a standard-tier load balancer [Slow]", func() {
|
||||||
lagTimeout := framework.LoadBalancerLagTimeoutDefault
|
lagTimeout := e2eservice.LoadBalancerLagTimeoutDefault
|
||||||
createTimeout := framework.GetServiceLoadBalancerCreationTimeout(cs)
|
createTimeout := e2eservice.GetServiceLoadBalancerCreationTimeout(cs)
|
||||||
|
|
||||||
svcName := "net-tiers-svc"
|
svcName := "net-tiers-svc"
|
||||||
ns := f.Namespace.Name
|
ns := f.Namespace.Name
|
||||||
jig := framework.NewServiceTestJig(cs, svcName)
|
jig := e2eservice.NewTestJig(cs, svcName)
|
||||||
|
|
||||||
ginkgo.By("creating a pod to be part of the service " + svcName)
|
ginkgo.By("creating a pod to be part of the service " + svcName)
|
||||||
jig.RunOrFail(ns, nil)
|
jig.RunOrFail(ns, nil)
|
||||||
@ -134,7 +135,7 @@ var _ = SIGDescribe("Services [Feature:GCEAlphaFeature][Slow]", func() {
|
|||||||
})
|
})
|
||||||
})
|
})
|
||||||
|
|
||||||
func waitAndVerifyLBWithTier(jig *framework.ServiceTestJig, ns, svcName, existingIP string, waitTimeout, checkTimeout time.Duration) string {
|
func waitAndVerifyLBWithTier(jig *e2eservice.TestJig, ns, svcName, existingIP string, waitTimeout, checkTimeout time.Duration) string {
|
||||||
var svc *v1.Service
|
var svc *v1.Service
|
||||||
if existingIP == "" {
|
if existingIP == "" {
|
||||||
// Creating the LB for the first time; wait for any ingress IP to show
|
// Creating the LB for the first time; wait for any ingress IP to show
|
||||||
@ -147,7 +148,7 @@ func waitAndVerifyLBWithTier(jig *framework.ServiceTestJig, ns, svcName, existin
|
|||||||
|
|
||||||
svcPort := int(svc.Spec.Ports[0].Port)
|
svcPort := int(svc.Spec.Ports[0].Port)
|
||||||
lbIngress := &svc.Status.LoadBalancer.Ingress[0]
|
lbIngress := &svc.Status.LoadBalancer.Ingress[0]
|
||||||
ingressIP := framework.GetIngressPoint(lbIngress)
|
ingressIP := e2eservice.GetIngressPoint(lbIngress)
|
||||||
|
|
||||||
ginkgo.By("running sanity and reachability checks")
|
ginkgo.By("running sanity and reachability checks")
|
||||||
if svc.Spec.LoadBalancerIP != "" {
|
if svc.Spec.LoadBalancerIP != "" {
|
||||||
|
File diff suppressed because it is too large
Load Diff
@ -52,6 +52,7 @@ go_library(
|
|||||||
"//test/e2e/framework/pod:go_default_library",
|
"//test/e2e/framework/pod:go_default_library",
|
||||||
"//test/e2e/framework/providers/gce:go_default_library",
|
"//test/e2e/framework/providers/gce:go_default_library",
|
||||||
"//test/e2e/framework/replicaset:go_default_library",
|
"//test/e2e/framework/replicaset:go_default_library",
|
||||||
|
"//test/e2e/framework/service:go_default_library",
|
||||||
"//test/utils:go_default_library",
|
"//test/utils:go_default_library",
|
||||||
"//test/utils/image:go_default_library",
|
"//test/utils/image:go_default_library",
|
||||||
"//vendor/github.com/onsi/ginkgo:go_default_library",
|
"//vendor/github.com/onsi/ginkgo:go_default_library",
|
||||||
|
@ -22,7 +22,7 @@ import (
|
|||||||
"strconv"
|
"strconv"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
"k8s.io/api/core/v1"
|
v1 "k8s.io/api/core/v1"
|
||||||
"k8s.io/apimachinery/pkg/api/resource"
|
"k8s.io/apimachinery/pkg/api/resource"
|
||||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||||
"k8s.io/apimachinery/pkg/labels"
|
"k8s.io/apimachinery/pkg/labels"
|
||||||
@ -34,6 +34,7 @@ import (
|
|||||||
watchtools "k8s.io/client-go/tools/watch"
|
watchtools "k8s.io/client-go/tools/watch"
|
||||||
"k8s.io/kubernetes/test/e2e/framework"
|
"k8s.io/kubernetes/test/e2e/framework"
|
||||||
e2elog "k8s.io/kubernetes/test/e2e/framework/log"
|
e2elog "k8s.io/kubernetes/test/e2e/framework/log"
|
||||||
|
e2eservice "k8s.io/kubernetes/test/e2e/framework/service"
|
||||||
|
|
||||||
"github.com/onsi/ginkgo"
|
"github.com/onsi/ginkgo"
|
||||||
"github.com/onsi/gomega"
|
"github.com/onsi/gomega"
|
||||||
@ -113,10 +114,10 @@ var _ = SIGDescribe("LimitRange", func() {
|
|||||||
if event.Type != watch.Added {
|
if event.Type != watch.Added {
|
||||||
e2elog.Failf("Failed to observe limitRange creation : %v", event)
|
e2elog.Failf("Failed to observe limitRange creation : %v", event)
|
||||||
}
|
}
|
||||||
case <-time.After(framework.ServiceRespondingTimeout):
|
case <-time.After(e2eservice.RespondingTimeout):
|
||||||
e2elog.Failf("Timeout while waiting for LimitRange creation")
|
e2elog.Failf("Timeout while waiting for LimitRange creation")
|
||||||
}
|
}
|
||||||
case <-time.After(framework.ServiceRespondingTimeout):
|
case <-time.After(e2eservice.RespondingTimeout):
|
||||||
e2elog.Failf("Timeout while waiting for LimitRange list complete")
|
e2elog.Failf("Timeout while waiting for LimitRange list complete")
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -205,7 +206,7 @@ var _ = SIGDescribe("LimitRange", func() {
|
|||||||
framework.ExpectNoError(err)
|
framework.ExpectNoError(err)
|
||||||
|
|
||||||
ginkgo.By("Verifying the LimitRange was deleted")
|
ginkgo.By("Verifying the LimitRange was deleted")
|
||||||
gomega.Expect(wait.Poll(time.Second*5, framework.ServiceRespondingTimeout, func() (bool, error) {
|
gomega.Expect(wait.Poll(time.Second*5, e2eservice.RespondingTimeout, func() (bool, error) {
|
||||||
selector := labels.SelectorFromSet(labels.Set(map[string]string{"name": limitRange.Name}))
|
selector := labels.SelectorFromSet(labels.Set(map[string]string{"name": limitRange.Name}))
|
||||||
options := metav1.ListOptions{LabelSelector: selector.String()}
|
options := metav1.ListOptions{LabelSelector: selector.String()}
|
||||||
limitRanges, err := f.ClientSet.CoreV1().LimitRanges(f.Namespace.Name).List(options)
|
limitRanges, err := f.ClientSet.CoreV1().LimitRanges(f.Namespace.Name).List(options)
|
||||||
|
@ -15,6 +15,7 @@ go_library(
|
|||||||
"//staging/src/k8s.io/apimachinery/pkg/util/wait:go_default_library",
|
"//staging/src/k8s.io/apimachinery/pkg/util/wait:go_default_library",
|
||||||
"//test/e2e/framework:go_default_library",
|
"//test/e2e/framework:go_default_library",
|
||||||
"//test/e2e/framework/log:go_default_library",
|
"//test/e2e/framework/log:go_default_library",
|
||||||
|
"//test/e2e/framework/service:go_default_library",
|
||||||
"//test/utils:go_default_library",
|
"//test/utils:go_default_library",
|
||||||
"//vendor/github.com/onsi/ginkgo:go_default_library",
|
"//vendor/github.com/onsi/ginkgo:go_default_library",
|
||||||
],
|
],
|
||||||
|
@ -27,6 +27,7 @@ import (
|
|||||||
"k8s.io/apimachinery/pkg/util/wait"
|
"k8s.io/apimachinery/pkg/util/wait"
|
||||||
"k8s.io/kubernetes/test/e2e/framework"
|
"k8s.io/kubernetes/test/e2e/framework"
|
||||||
e2elog "k8s.io/kubernetes/test/e2e/framework/log"
|
e2elog "k8s.io/kubernetes/test/e2e/framework/log"
|
||||||
|
e2eservice "k8s.io/kubernetes/test/e2e/framework/service"
|
||||||
testutils "k8s.io/kubernetes/test/utils"
|
testutils "k8s.io/kubernetes/test/utils"
|
||||||
|
|
||||||
"github.com/onsi/ginkgo"
|
"github.com/onsi/ginkgo"
|
||||||
@ -62,7 +63,7 @@ var _ = SIGDescribe("Kubernetes Dashboard [Feature:Dashboard]", func() {
|
|||||||
ginkgo.By("Checking to make sure we get a response from the kubernetes-dashboard.")
|
ginkgo.By("Checking to make sure we get a response from the kubernetes-dashboard.")
|
||||||
err = wait.Poll(framework.Poll, serverStartTimeout, func() (bool, error) {
|
err = wait.Poll(framework.Poll, serverStartTimeout, func() (bool, error) {
|
||||||
var status int
|
var status int
|
||||||
proxyRequest, errProxy := framework.GetServicesProxyRequest(f.ClientSet, f.ClientSet.CoreV1().RESTClient().Get())
|
proxyRequest, errProxy := e2eservice.GetServicesProxyRequest(f.ClientSet, f.ClientSet.CoreV1().RESTClient().Get())
|
||||||
if errProxy != nil {
|
if errProxy != nil {
|
||||||
e2elog.Logf("Get services proxy request failed: %v", errProxy)
|
e2elog.Logf("Get services proxy request failed: %v", errProxy)
|
||||||
}
|
}
|
||||||
|
@ -38,6 +38,7 @@ go_library(
|
|||||||
"//test/e2e/framework:go_default_library",
|
"//test/e2e/framework:go_default_library",
|
||||||
"//test/e2e/framework/job:go_default_library",
|
"//test/e2e/framework/job:go_default_library",
|
||||||
"//test/e2e/framework/log:go_default_library",
|
"//test/e2e/framework/log:go_default_library",
|
||||||
|
"//test/e2e/framework/service:go_default_library",
|
||||||
"//test/e2e/framework/statefulset:go_default_library",
|
"//test/e2e/framework/statefulset:go_default_library",
|
||||||
"//test/e2e/framework/testfiles:go_default_library",
|
"//test/e2e/framework/testfiles:go_default_library",
|
||||||
"//test/e2e/scheduling:go_default_library",
|
"//test/e2e/scheduling:go_default_library",
|
||||||
|
@ -20,6 +20,7 @@ import (
|
|||||||
v1 "k8s.io/api/core/v1"
|
v1 "k8s.io/api/core/v1"
|
||||||
"k8s.io/apimachinery/pkg/util/wait"
|
"k8s.io/apimachinery/pkg/util/wait"
|
||||||
"k8s.io/kubernetes/test/e2e/framework"
|
"k8s.io/kubernetes/test/e2e/framework"
|
||||||
|
e2eservice "k8s.io/kubernetes/test/e2e/framework/service"
|
||||||
|
|
||||||
"github.com/onsi/ginkgo"
|
"github.com/onsi/ginkgo"
|
||||||
)
|
)
|
||||||
@ -28,7 +29,7 @@ import (
|
|||||||
// after a cluster upgrade. During a master-only upgrade, it will test
|
// after a cluster upgrade. During a master-only upgrade, it will test
|
||||||
// that a service remains available during the upgrade.
|
// that a service remains available during the upgrade.
|
||||||
type ServiceUpgradeTest struct {
|
type ServiceUpgradeTest struct {
|
||||||
jig *framework.ServiceTestJig
|
jig *e2eservice.TestJig
|
||||||
tcpService *v1.Service
|
tcpService *v1.Service
|
||||||
tcpIngressIP string
|
tcpIngressIP string
|
||||||
svcPort int
|
svcPort int
|
||||||
@ -42,7 +43,7 @@ func shouldTestPDBs() bool { return framework.ProviderIs("gce", "gke") }
|
|||||||
// Setup creates a service with a load balancer and makes sure it's reachable.
|
// Setup creates a service with a load balancer and makes sure it's reachable.
|
||||||
func (t *ServiceUpgradeTest) Setup(f *framework.Framework) {
|
func (t *ServiceUpgradeTest) Setup(f *framework.Framework) {
|
||||||
serviceName := "service-test"
|
serviceName := "service-test"
|
||||||
jig := framework.NewServiceTestJig(f.ClientSet, serviceName)
|
jig := e2eservice.NewTestJig(f.ClientSet, serviceName)
|
||||||
|
|
||||||
ns := f.Namespace
|
ns := f.Namespace
|
||||||
|
|
||||||
@ -50,11 +51,11 @@ func (t *ServiceUpgradeTest) Setup(f *framework.Framework) {
|
|||||||
tcpService := jig.CreateTCPServiceOrFail(ns.Name, func(s *v1.Service) {
|
tcpService := jig.CreateTCPServiceOrFail(ns.Name, func(s *v1.Service) {
|
||||||
s.Spec.Type = v1.ServiceTypeLoadBalancer
|
s.Spec.Type = v1.ServiceTypeLoadBalancer
|
||||||
})
|
})
|
||||||
tcpService = jig.WaitForLoadBalancerOrFail(ns.Name, tcpService.Name, framework.LoadBalancerCreateTimeoutDefault)
|
tcpService = jig.WaitForLoadBalancerOrFail(ns.Name, tcpService.Name, e2eservice.LoadBalancerCreateTimeoutDefault)
|
||||||
jig.SanityCheckService(tcpService, v1.ServiceTypeLoadBalancer)
|
jig.SanityCheckService(tcpService, v1.ServiceTypeLoadBalancer)
|
||||||
|
|
||||||
// Get info to hit it with
|
// Get info to hit it with
|
||||||
tcpIngressIP := framework.GetIngressPoint(&tcpService.Status.LoadBalancer.Ingress[0])
|
tcpIngressIP := e2eservice.GetIngressPoint(&tcpService.Status.LoadBalancer.Ingress[0])
|
||||||
svcPort := int(tcpService.Spec.Ports[0].Port)
|
svcPort := int(tcpService.Spec.Ports[0].Port)
|
||||||
|
|
||||||
ginkgo.By("creating pod to be part of service " + serviceName)
|
ginkgo.By("creating pod to be part of service " + serviceName)
|
||||||
@ -67,9 +68,9 @@ func (t *ServiceUpgradeTest) Setup(f *framework.Framework) {
|
|||||||
|
|
||||||
// Hit it once before considering ourselves ready
|
// Hit it once before considering ourselves ready
|
||||||
ginkgo.By("hitting the pod through the service's LoadBalancer")
|
ginkgo.By("hitting the pod through the service's LoadBalancer")
|
||||||
timeout := framework.LoadBalancerLagTimeoutDefault
|
timeout := e2eservice.LoadBalancerLagTimeoutDefault
|
||||||
if framework.ProviderIs("aws") {
|
if framework.ProviderIs("aws") {
|
||||||
timeout = framework.LoadBalancerLagTimeoutAWS
|
timeout = e2eservice.LoadBalancerLagTimeoutAWS
|
||||||
}
|
}
|
||||||
jig.TestReachableHTTP(tcpIngressIP, svcPort, timeout)
|
jig.TestReachableHTTP(tcpIngressIP, svcPort, timeout)
|
||||||
|
|
||||||
@ -102,7 +103,7 @@ func (t *ServiceUpgradeTest) test(f *framework.Framework, done <-chan struct{},
|
|||||||
// Continuous validation
|
// Continuous validation
|
||||||
ginkgo.By("continuously hitting the pod through the service's LoadBalancer")
|
ginkgo.By("continuously hitting the pod through the service's LoadBalancer")
|
||||||
wait.Until(func() {
|
wait.Until(func() {
|
||||||
t.jig.TestReachableHTTP(t.tcpIngressIP, t.svcPort, framework.LoadBalancerLagTimeoutDefault)
|
t.jig.TestReachableHTTP(t.tcpIngressIP, t.svcPort, e2eservice.LoadBalancerLagTimeoutDefault)
|
||||||
}, framework.Poll, done)
|
}, framework.Poll, done)
|
||||||
} else {
|
} else {
|
||||||
// Block until upgrade is done
|
// Block until upgrade is done
|
||||||
@ -112,6 +113,6 @@ func (t *ServiceUpgradeTest) test(f *framework.Framework, done <-chan struct{},
|
|||||||
|
|
||||||
// Sanity check and hit it once more
|
// Sanity check and hit it once more
|
||||||
ginkgo.By("hitting the pod through the service's LoadBalancer")
|
ginkgo.By("hitting the pod through the service's LoadBalancer")
|
||||||
t.jig.TestReachableHTTP(t.tcpIngressIP, t.svcPort, framework.LoadBalancerLagTimeoutDefault)
|
t.jig.TestReachableHTTP(t.tcpIngressIP, t.svcPort, e2eservice.LoadBalancerLagTimeoutDefault)
|
||||||
t.jig.SanityCheckService(t.tcpService, v1.ServiceTypeLoadBalancer)
|
t.jig.SanityCheckService(t.tcpService, v1.ServiceTypeLoadBalancer)
|
||||||
}
|
}
|
||||||
|
@ -35,6 +35,7 @@ go_library(
|
|||||||
"//test/e2e/framework/metrics:go_default_library",
|
"//test/e2e/framework/metrics:go_default_library",
|
||||||
"//test/e2e/framework/node:go_default_library",
|
"//test/e2e/framework/node:go_default_library",
|
||||||
"//test/e2e/framework/pod:go_default_library",
|
"//test/e2e/framework/pod:go_default_library",
|
||||||
|
"//test/e2e/framework/service:go_default_library",
|
||||||
"//test/utils/image:go_default_library",
|
"//test/utils/image:go_default_library",
|
||||||
"//vendor/github.com/onsi/ginkgo:go_default_library",
|
"//vendor/github.com/onsi/ginkgo:go_default_library",
|
||||||
"//vendor/github.com/onsi/gomega:go_default_library",
|
"//vendor/github.com/onsi/gomega:go_default_library",
|
||||||
|
@ -24,6 +24,7 @@ import (
|
|||||||
"k8s.io/kubernetes/test/e2e/framework"
|
"k8s.io/kubernetes/test/e2e/framework"
|
||||||
e2elog "k8s.io/kubernetes/test/e2e/framework/log"
|
e2elog "k8s.io/kubernetes/test/e2e/framework/log"
|
||||||
e2enode "k8s.io/kubernetes/test/e2e/framework/node"
|
e2enode "k8s.io/kubernetes/test/e2e/framework/node"
|
||||||
|
e2eservice "k8s.io/kubernetes/test/e2e/framework/service"
|
||||||
|
|
||||||
"github.com/onsi/ginkgo"
|
"github.com/onsi/ginkgo"
|
||||||
)
|
)
|
||||||
@ -42,7 +43,7 @@ var _ = SIGDescribe("Services", func() {
|
|||||||
serviceName := "nodeport-test"
|
serviceName := "nodeport-test"
|
||||||
ns := f.Namespace.Name
|
ns := f.Namespace.Name
|
||||||
|
|
||||||
jig := framework.NewServiceTestJig(cs, serviceName)
|
jig := e2eservice.NewTestJig(cs, serviceName)
|
||||||
nodeIP, err := e2enode.PickIP(jig.Client)
|
nodeIP, err := e2enode.PickIP(jig.Client)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
e2elog.Logf("Unexpected error occurred: %v", err)
|
e2elog.Logf("Unexpected error occurred: %v", err)
|
||||||
@ -51,11 +52,11 @@ var _ = SIGDescribe("Services", func() {
|
|||||||
framework.ExpectNoErrorWithOffset(0, err)
|
framework.ExpectNoErrorWithOffset(0, err)
|
||||||
|
|
||||||
ginkgo.By("creating service " + serviceName + " with type=NodePort in namespace " + ns)
|
ginkgo.By("creating service " + serviceName + " with type=NodePort in namespace " + ns)
|
||||||
service := jig.CreateTCPServiceOrFail(ns, func(svc *v1.Service) {
|
e2eservice := jig.CreateTCPServiceOrFail(ns, func(svc *v1.Service) {
|
||||||
svc.Spec.Type = v1.ServiceTypeNodePort
|
svc.Spec.Type = v1.ServiceTypeNodePort
|
||||||
})
|
})
|
||||||
jig.SanityCheckService(service, v1.ServiceTypeNodePort)
|
jig.SanityCheckService(e2eservice, v1.ServiceTypeNodePort)
|
||||||
nodePort := int(service.Spec.Ports[0].NodePort)
|
nodePort := int(e2eservice.Spec.Ports[0].NodePort)
|
||||||
|
|
||||||
ginkgo.By("creating Pod to be part of service " + serviceName)
|
ginkgo.By("creating Pod to be part of service " + serviceName)
|
||||||
jig.RunOrFail(ns, nil)
|
jig.RunOrFail(ns, nil)
|
||||||
|
@ -34,7 +34,7 @@ import (
|
|||||||
e2enode "k8s.io/kubernetes/test/e2e/framework/node"
|
e2enode "k8s.io/kubernetes/test/e2e/framework/node"
|
||||||
e2epod "k8s.io/kubernetes/test/e2e/framework/pod"
|
e2epod "k8s.io/kubernetes/test/e2e/framework/pod"
|
||||||
|
|
||||||
resapi "k8s.io/kubernetes/pkg/kubelet/apis/podresources/v1alpha1"
|
kubeletpodresourcesv1alpha1 "k8s.io/kubernetes/pkg/kubelet/apis/podresources/v1alpha1"
|
||||||
|
|
||||||
. "github.com/onsi/ginkgo"
|
. "github.com/onsi/ginkgo"
|
||||||
. "github.com/onsi/gomega"
|
. "github.com/onsi/gomega"
|
||||||
@ -102,7 +102,7 @@ func testDevicePlugin(f *framework.Framework, pluginSockDir string) {
|
|||||||
Expect(devId1).To(Not(Equal("")))
|
Expect(devId1).To(Not(Equal("")))
|
||||||
|
|
||||||
podResources, err := getNodeDevices()
|
podResources, err := getNodeDevices()
|
||||||
var resourcesForOurPod *resapi.PodResources
|
var resourcesForOurPod *kubeletpodresourcesv1alpha1.PodResources
|
||||||
e2elog.Logf("pod resources %v", podResources)
|
e2elog.Logf("pod resources %v", podResources)
|
||||||
Expect(err).To(BeNil())
|
Expect(err).To(BeNil())
|
||||||
Expect(len(podResources.PodResources)).To(Equal(2))
|
Expect(len(podResources.PodResources)).To(Equal(2))
|
||||||
|
@ -26,6 +26,7 @@ go_library(
|
|||||||
"//staging/src/k8s.io/client-go/rest:go_default_library",
|
"//staging/src/k8s.io/client-go/rest:go_default_library",
|
||||||
"//staging/src/k8s.io/client-go/tools/clientcmd:go_default_library",
|
"//staging/src/k8s.io/client-go/tools/clientcmd:go_default_library",
|
||||||
"//test/e2e/framework:go_default_library",
|
"//test/e2e/framework:go_default_library",
|
||||||
|
"//test/e2e/framework/service:go_default_library",
|
||||||
"//vendor/k8s.io/klog:go_default_library",
|
"//vendor/k8s.io/klog:go_default_library",
|
||||||
],
|
],
|
||||||
)
|
)
|
||||||
|
@ -29,7 +29,7 @@ import (
|
|||||||
"path/filepath"
|
"path/filepath"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
"k8s.io/api/core/v1"
|
v1 "k8s.io/api/core/v1"
|
||||||
"k8s.io/apimachinery/pkg/api/errors"
|
"k8s.io/apimachinery/pkg/api/errors"
|
||||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||||
"k8s.io/apimachinery/pkg/runtime"
|
"k8s.io/apimachinery/pkg/runtime"
|
||||||
@ -39,6 +39,7 @@ import (
|
|||||||
"k8s.io/client-go/tools/clientcmd"
|
"k8s.io/client-go/tools/clientcmd"
|
||||||
"k8s.io/kubernetes/pkg/api/legacyscheme"
|
"k8s.io/kubernetes/pkg/api/legacyscheme"
|
||||||
e2e "k8s.io/kubernetes/test/e2e/framework"
|
e2e "k8s.io/kubernetes/test/e2e/framework"
|
||||||
|
"k8s.io/kubernetes/test/e2e/framework/service"
|
||||||
|
|
||||||
"k8s.io/klog"
|
"k8s.io/klog"
|
||||||
)
|
)
|
||||||
@ -261,7 +262,7 @@ func main() {
|
|||||||
klog.Warningf("Failed to build restclient: %v", err)
|
klog.Warningf("Failed to build restclient: %v", err)
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
proxyRequest, errProxy := e2e.GetServicesProxyRequest(client, rclient.Get())
|
proxyRequest, errProxy := service.GetServicesProxyRequest(client, rclient.Get())
|
||||||
if errProxy != nil {
|
if errProxy != nil {
|
||||||
klog.Warningf("Get services proxy request failed: %v", errProxy)
|
klog.Warningf("Get services proxy request failed: %v", errProxy)
|
||||||
return
|
return
|
||||||
|
Loading…
Reference in New Issue
Block a user