diff --git a/test/e2e/network/proxy.go b/test/e2e/network/proxy.go
index 584ef0a801f..41813427664 100644
--- a/test/e2e/network/proxy.go
+++ b/test/e2e/network/proxy.go
@@ -29,6 +29,7 @@ import (
"sync"
"time"
+ appsv1 "k8s.io/api/apps/v1"
v1 "k8s.io/api/core/v1"
apierrors "k8s.io/apimachinery/pkg/api/errors"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
@@ -38,11 +39,10 @@ import (
clientset "k8s.io/client-go/kubernetes"
"k8s.io/client-go/transport"
"k8s.io/kubernetes/test/e2e/framework"
+ e2edeployment "k8s.io/kubernetes/test/e2e/framework/deployment"
e2enode "k8s.io/kubernetes/test/e2e/framework/node"
e2epod "k8s.io/kubernetes/test/e2e/framework/pod"
- e2erc "k8s.io/kubernetes/test/e2e/framework/rc"
"k8s.io/kubernetes/test/e2e/network/common"
- testutils "k8s.io/kubernetes/test/utils"
imageutils "k8s.io/kubernetes/test/utils/image"
admissionapi "k8s.io/pod-security-admission/api"
@@ -133,52 +133,95 @@ var _ = common.SIGDescribe("Proxy", func() {
}, metav1.CreateOptions{})
framework.ExpectNoError(err)
- // Make an RC with a single pod. The 'porter' image is
+ // Make a deployment with a single pod. The 'porter' image is
// a simple server which serves the values of the
// environmental variables below.
ginkgo.By("starting an echo server on multiple ports")
- pods := []*v1.Pod{}
- cfg := testutils.RCConfig{
- Client: f.ClientSet,
- Image: imageutils.GetE2EImage(imageutils.Agnhost),
- Command: []string{"/agnhost", "porter"},
- Name: service.Name,
- Namespace: f.Namespace.Name,
- Replicas: 1,
- PollInterval: time.Second,
- Env: map[string]string{
- "SERVE_PORT_80": `test`,
- "SERVE_PORT_1080": `test`,
- "SERVE_PORT_160": "foo",
- "SERVE_PORT_162": "bar",
- "SERVE_TLS_PORT_443": `test`,
- "SERVE_TLS_PORT_460": `tls baz`,
- "SERVE_TLS_PORT_462": `tls qux`,
+ deploymentConfig := e2edeployment.NewDeployment(service.Name,
+ 1,
+ labels,
+ service.Name,
+ imageutils.GetE2EImage(imageutils.Agnhost),
+ appsv1.RecreateDeploymentStrategyType)
+ deploymentConfig.Spec.Template.Spec.Containers[0].Command = []string{"/agnhost", "porter"}
+ deploymentConfig.Spec.Template.Spec.Containers[0].Env = []v1.EnvVar{
+ {
+ Name: "SERVE_PORT_80",
+ Value: `test`,
},
- Ports: map[string]int{
- "dest1": 160,
- "dest2": 162,
-
- "tlsdest1": 460,
- "tlsdest2": 462,
+ {
+ Name: "SERVE_PORT_1080",
+ Value: `test`,
},
- ReadinessProbe: &v1.Probe{
- ProbeHandler: v1.ProbeHandler{
- HTTPGet: &v1.HTTPGetAction{
- Port: intstr.FromInt32(80),
- },
- },
- InitialDelaySeconds: 1,
- TimeoutSeconds: 5,
- PeriodSeconds: 10,
+ {
+ Name: "SERVE_PORT_160",
+ Value: "foo",
+ },
+ {
+ Name: "SERVE_PORT_162",
+ Value: "bar",
+ },
+ {
+ Name: "SERVE_TLS_PORT_443",
+ Value: `test`,
+ },
+ {
+ Name: "SERVE_TLS_PORT_460",
+ Value: "tls baz",
+ },
+ {
+ Name: "SERVE_TLS_PORT_462",
+ Value: "tls qux",
},
- Labels: labels,
- CreatedPods: &pods,
}
- err = e2erc.RunRC(ctx, cfg)
+ deploymentConfig.Spec.Template.Spec.Containers[0].Ports = []v1.ContainerPort{
+ {
+ ContainerPort: 80,
+ },
+ {
+ Name: "dest1",
+ ContainerPort: 160,
+ },
+ {
+ Name: "dest2",
+ ContainerPort: 162,
+ },
+ {
+ Name: "tlsdest1",
+ ContainerPort: 460,
+ },
+ {
+ Name: "tlsdest2",
+ ContainerPort: 462,
+ },
+ }
+ deploymentConfig.Spec.Template.Spec.Containers[0].ReadinessProbe = &v1.Probe{
+ ProbeHandler: v1.ProbeHandler{
+ HTTPGet: &v1.HTTPGetAction{
+ Port: intstr.FromInt32(80),
+ },
+ },
+ InitialDelaySeconds: 1,
+ TimeoutSeconds: 5,
+ PeriodSeconds: 10,
+ }
+
+ deployment, err := f.ClientSet.AppsV1().Deployments(f.Namespace.Name).Create(ctx,
+ deploymentConfig,
+ metav1.CreateOptions{})
framework.ExpectNoError(err)
- ginkgo.DeferCleanup(e2erc.DeleteRCAndWaitForGC, f.ClientSet, f.Namespace.Name, cfg.Name)
+
+ ginkgo.DeferCleanup(func(ctx context.Context, name string) error {
+ return f.ClientSet.AppsV1().Deployments(f.Namespace.Name).Delete(ctx, name, metav1.DeleteOptions{})
+ }, deployment.Name)
+
+ err = e2edeployment.WaitForDeploymentComplete(f.ClientSet, deployment)
+ framework.ExpectNoError(err)
+
+ podList, err := e2edeployment.GetPodsForDeployment(ctx, f.ClientSet, deployment)
+ framework.ExpectNoError(err)
+ pods := podList.Items
err = waitForEndpoint(ctx, f.ClientSet, f.Namespace.Name, service.Name)
framework.ExpectNoError(err)
diff --git a/test/e2e/network/service_latency.go b/test/e2e/network/service_latency.go
index 5c1d9738367..9385d977858 100644
--- a/test/e2e/network/service_latency.go
+++ b/test/e2e/network/service_latency.go
@@ -23,6 +23,7 @@ import (
"strings"
"time"
+ appsv1 "k8s.io/api/apps/v1"
v1 "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/runtime"
@@ -32,9 +33,8 @@ import (
"k8s.io/client-go/tools/cache"
"k8s.io/client-go/util/flowcontrol"
"k8s.io/kubernetes/test/e2e/framework"
- e2erc "k8s.io/kubernetes/test/e2e/framework/rc"
+ e2edeployment "k8s.io/kubernetes/test/e2e/framework/deployment"
"k8s.io/kubernetes/test/e2e/network/common"
- testutils "k8s.io/kubernetes/test/utils"
imageutils "k8s.io/kubernetes/test/utils/image"
admissionapi "k8s.io/pod-security-admission/api"
@@ -135,18 +135,13 @@ var _ = common.SIGDescribe("Service endpoints latency", func() {
})
func runServiceLatencies(ctx context.Context, f *framework.Framework, inParallel, total int, acceptableFailureRatio float32) (output []time.Duration, err error) {
- cfg := testutils.RCConfig{
- Client: f.ClientSet,
- Image: imageutils.GetPauseImageName(),
- Name: "svc-latency-rc",
- Namespace: f.Namespace.Name,
- Replicas: 1,
- PollInterval: time.Second,
- }
- if err := e2erc.RunRC(ctx, cfg); err != nil {
- return nil, err
- }
+ name := "svc-latency-rc"
+ deploymentConf := e2edeployment.NewDeployment(name, 1, map[string]string{"name": name}, name, imageutils.GetPauseImageName(), appsv1.RecreateDeploymentStrategyType)
+ deployment, err := f.ClientSet.AppsV1().Deployments(f.Namespace.Name).Create(ctx, deploymentConf, metav1.CreateOptions{})
+ framework.ExpectNoError(err)
+ err = e2edeployment.WaitForDeploymentComplete(f.ClientSet, deployment)
+ framework.ExpectNoError(err)
// Run a single watcher, to reduce the number of API calls we have to
// make; this is to minimize the timing error. It's how kube-proxy
// consumes the endpoints data, so it seems like the right thing to
@@ -157,7 +152,7 @@ func runServiceLatencies(ctx context.Context, f *framework.Framework, inParallel
// run one test and throw it away-- this is to make sure that the pod's
// ready status has propagated.
- _, err = singleServiceLatency(ctx, f, cfg.Name, endpointQueries)
+ _, err = singleServiceLatency(ctx, f, name, endpointQueries)
framework.ExpectNoError(err)
// These channels are never closed, and each attempt sends on exactly
@@ -172,7 +167,7 @@ func runServiceLatencies(ctx context.Context, f *framework.Framework, inParallel
defer ginkgo.GinkgoRecover()
blocker <- struct{}{}
defer func() { <-blocker }()
- if d, err := singleServiceLatency(ctx, f, cfg.Name, endpointQueries); err != nil {
+ if d, err := singleServiceLatency(ctx, f, name, endpointQueries); err != nil {
errs <- err
} else {
durations <- d