Merge pull request #130083 from elizabeth-dev/replace-network-e2e-replicationcontrollers-5

test(network): replace calls to e2erc.RunRC with Deployments in SIG Network tests
This commit is contained in:
Kubernetes Prow Robot 2025-02-18 15:06:26 -08:00 committed by GitHub
commit 0e3a247859
No known key found for this signature in database
GPG Key ID: B5690EEEBB952194
2 changed files with 92 additions and 54 deletions

View File

@ -29,6 +29,7 @@ import (
"sync" "sync"
"time" "time"
appsv1 "k8s.io/api/apps/v1"
v1 "k8s.io/api/core/v1" v1 "k8s.io/api/core/v1"
apierrors "k8s.io/apimachinery/pkg/api/errors" apierrors "k8s.io/apimachinery/pkg/api/errors"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
@ -38,11 +39,10 @@ import (
clientset "k8s.io/client-go/kubernetes" clientset "k8s.io/client-go/kubernetes"
"k8s.io/client-go/transport" "k8s.io/client-go/transport"
"k8s.io/kubernetes/test/e2e/framework" "k8s.io/kubernetes/test/e2e/framework"
e2edeployment "k8s.io/kubernetes/test/e2e/framework/deployment"
e2enode "k8s.io/kubernetes/test/e2e/framework/node" e2enode "k8s.io/kubernetes/test/e2e/framework/node"
e2epod "k8s.io/kubernetes/test/e2e/framework/pod" e2epod "k8s.io/kubernetes/test/e2e/framework/pod"
e2erc "k8s.io/kubernetes/test/e2e/framework/rc"
"k8s.io/kubernetes/test/e2e/network/common" "k8s.io/kubernetes/test/e2e/network/common"
testutils "k8s.io/kubernetes/test/utils"
imageutils "k8s.io/kubernetes/test/utils/image" imageutils "k8s.io/kubernetes/test/utils/image"
admissionapi "k8s.io/pod-security-admission/api" admissionapi "k8s.io/pod-security-admission/api"
@ -133,52 +133,95 @@ var _ = common.SIGDescribe("Proxy", func() {
}, metav1.CreateOptions{}) }, metav1.CreateOptions{})
framework.ExpectNoError(err) framework.ExpectNoError(err)
// Make an RC with a single pod. The 'porter' image is // Make a deployment with a single pod. The 'porter' image is
// a simple server which serves the values of the // a simple server which serves the values of the
// environmental variables below. // environmental variables below.
ginkgo.By("starting an echo server on multiple ports") ginkgo.By("starting an echo server on multiple ports")
pods := []*v1.Pod{}
cfg := testutils.RCConfig{
Client: f.ClientSet,
Image: imageutils.GetE2EImage(imageutils.Agnhost),
Command: []string{"/agnhost", "porter"},
Name: service.Name,
Namespace: f.Namespace.Name,
Replicas: 1,
PollInterval: time.Second,
Env: map[string]string{
"SERVE_PORT_80": `<a href="/rewriteme">test</a>`,
"SERVE_PORT_1080": `<a href="/rewriteme">test</a>`,
"SERVE_PORT_160": "foo",
"SERVE_PORT_162": "bar",
"SERVE_TLS_PORT_443": `<a href="/tlsrewriteme">test</a>`, deploymentConfig := e2edeployment.NewDeployment(service.Name,
"SERVE_TLS_PORT_460": `tls baz`, 1,
"SERVE_TLS_PORT_462": `tls qux`, labels,
service.Name,
imageutils.GetE2EImage(imageutils.Agnhost),
appsv1.RecreateDeploymentStrategyType)
deploymentConfig.Spec.Template.Spec.Containers[0].Command = []string{"/agnhost", "porter"}
deploymentConfig.Spec.Template.Spec.Containers[0].Env = []v1.EnvVar{
{
Name: "SERVE_PORT_80",
Value: `<a href="/rewriteme">test</a>`,
}, },
Ports: map[string]int{ {
"dest1": 160, Name: "SERVE_PORT_1080",
"dest2": 162, Value: `<a href="/rewriteme">test</a>`,
"tlsdest1": 460,
"tlsdest2": 462,
}, },
ReadinessProbe: &v1.Probe{ {
ProbeHandler: v1.ProbeHandler{ Name: "SERVE_PORT_160",
HTTPGet: &v1.HTTPGetAction{ Value: "foo",
Port: intstr.FromInt32(80), },
}, {
}, Name: "SERVE_PORT_162",
InitialDelaySeconds: 1, Value: "bar",
TimeoutSeconds: 5, },
PeriodSeconds: 10, {
Name: "SERVE_TLS_PORT_443",
Value: `<a href="/tlsrewriteme">test</a>`,
},
{
Name: "SERVE_TLS_PORT_460",
Value: "tls baz",
},
{
Name: "SERVE_TLS_PORT_462",
Value: "tls qux",
}, },
Labels: labels,
CreatedPods: &pods,
} }
err = e2erc.RunRC(ctx, cfg) deploymentConfig.Spec.Template.Spec.Containers[0].Ports = []v1.ContainerPort{
{
ContainerPort: 80,
},
{
Name: "dest1",
ContainerPort: 160,
},
{
Name: "dest2",
ContainerPort: 162,
},
{
Name: "tlsdest1",
ContainerPort: 460,
},
{
Name: "tlsdest2",
ContainerPort: 462,
},
}
deploymentConfig.Spec.Template.Spec.Containers[0].ReadinessProbe = &v1.Probe{
ProbeHandler: v1.ProbeHandler{
HTTPGet: &v1.HTTPGetAction{
Port: intstr.FromInt32(80),
},
},
InitialDelaySeconds: 1,
TimeoutSeconds: 5,
PeriodSeconds: 10,
}
deployment, err := f.ClientSet.AppsV1().Deployments(f.Namespace.Name).Create(ctx,
deploymentConfig,
metav1.CreateOptions{})
framework.ExpectNoError(err) framework.ExpectNoError(err)
ginkgo.DeferCleanup(e2erc.DeleteRCAndWaitForGC, f.ClientSet, f.Namespace.Name, cfg.Name)
ginkgo.DeferCleanup(func(ctx context.Context, name string) error {
return f.ClientSet.AppsV1().Deployments(f.Namespace.Name).Delete(ctx, name, metav1.DeleteOptions{})
}, deployment.Name)
err = e2edeployment.WaitForDeploymentComplete(f.ClientSet, deployment)
framework.ExpectNoError(err)
podList, err := e2edeployment.GetPodsForDeployment(ctx, f.ClientSet, deployment)
framework.ExpectNoError(err)
pods := podList.Items
err = waitForEndpoint(ctx, f.ClientSet, f.Namespace.Name, service.Name) err = waitForEndpoint(ctx, f.ClientSet, f.Namespace.Name, service.Name)
framework.ExpectNoError(err) framework.ExpectNoError(err)

View File

@ -23,6 +23,7 @@ import (
"strings" "strings"
"time" "time"
appsv1 "k8s.io/api/apps/v1"
v1 "k8s.io/api/core/v1" v1 "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/runtime" "k8s.io/apimachinery/pkg/runtime"
@ -32,9 +33,8 @@ import (
"k8s.io/client-go/tools/cache" "k8s.io/client-go/tools/cache"
"k8s.io/client-go/util/flowcontrol" "k8s.io/client-go/util/flowcontrol"
"k8s.io/kubernetes/test/e2e/framework" "k8s.io/kubernetes/test/e2e/framework"
e2erc "k8s.io/kubernetes/test/e2e/framework/rc" e2edeployment "k8s.io/kubernetes/test/e2e/framework/deployment"
"k8s.io/kubernetes/test/e2e/network/common" "k8s.io/kubernetes/test/e2e/network/common"
testutils "k8s.io/kubernetes/test/utils"
imageutils "k8s.io/kubernetes/test/utils/image" imageutils "k8s.io/kubernetes/test/utils/image"
admissionapi "k8s.io/pod-security-admission/api" admissionapi "k8s.io/pod-security-admission/api"
@ -135,18 +135,13 @@ var _ = common.SIGDescribe("Service endpoints latency", func() {
}) })
func runServiceLatencies(ctx context.Context, f *framework.Framework, inParallel, total int, acceptableFailureRatio float32) (output []time.Duration, err error) { func runServiceLatencies(ctx context.Context, f *framework.Framework, inParallel, total int, acceptableFailureRatio float32) (output []time.Duration, err error) {
cfg := testutils.RCConfig{ name := "svc-latency-rc"
Client: f.ClientSet, deploymentConf := e2edeployment.NewDeployment(name, 1, map[string]string{"name": name}, name, imageutils.GetPauseImageName(), appsv1.RecreateDeploymentStrategyType)
Image: imageutils.GetPauseImageName(), deployment, err := f.ClientSet.AppsV1().Deployments(f.Namespace.Name).Create(ctx, deploymentConf, metav1.CreateOptions{})
Name: "svc-latency-rc", framework.ExpectNoError(err)
Namespace: f.Namespace.Name,
Replicas: 1,
PollInterval: time.Second,
}
if err := e2erc.RunRC(ctx, cfg); err != nil {
return nil, err
}
err = e2edeployment.WaitForDeploymentComplete(f.ClientSet, deployment)
framework.ExpectNoError(err)
// Run a single watcher, to reduce the number of API calls we have to // Run a single watcher, to reduce the number of API calls we have to
// make; this is to minimize the timing error. It's how kube-proxy // make; this is to minimize the timing error. It's how kube-proxy
// consumes the endpoints data, so it seems like the right thing to // consumes the endpoints data, so it seems like the right thing to
@ -157,7 +152,7 @@ func runServiceLatencies(ctx context.Context, f *framework.Framework, inParallel
// run one test and throw it away-- this is to make sure that the pod's // run one test and throw it away-- this is to make sure that the pod's
// ready status has propagated. // ready status has propagated.
_, err = singleServiceLatency(ctx, f, cfg.Name, endpointQueries) _, err = singleServiceLatency(ctx, f, name, endpointQueries)
framework.ExpectNoError(err) framework.ExpectNoError(err)
// These channels are never closed, and each attempt sends on exactly // These channels are never closed, and each attempt sends on exactly
@ -172,7 +167,7 @@ func runServiceLatencies(ctx context.Context, f *framework.Framework, inParallel
defer ginkgo.GinkgoRecover() defer ginkgo.GinkgoRecover()
blocker <- struct{}{} blocker <- struct{}{}
defer func() { <-blocker }() defer func() { <-blocker }()
if d, err := singleServiceLatency(ctx, f, cfg.Name, endpointQueries); err != nil { if d, err := singleServiceLatency(ctx, f, name, endpointQueries); err != nil {
errs <- err errs <- err
} else { } else {
durations <- d durations <- d