mirror of
https://github.com/k3s-io/kubernetes.git
synced 2025-08-04 09:49:50 +00:00
test(network): replace jig.CreateRC with jig.CreateDeployment
See #119021
This commit is contained in:
parent
8ccba7f54e
commit
464fd0765d
@ -18,6 +18,7 @@ package network
|
||||
|
||||
import (
|
||||
"context"
|
||||
appsv1 "k8s.io/api/apps/v1"
|
||||
|
||||
v1 "k8s.io/api/core/v1"
|
||||
apierrors "k8s.io/apimachinery/pkg/api/errors"
|
||||
@ -25,7 +26,6 @@ import (
|
||||
"k8s.io/apimachinery/pkg/util/intstr"
|
||||
"k8s.io/apimachinery/pkg/util/uuid"
|
||||
clientset "k8s.io/client-go/kubernetes"
|
||||
"k8s.io/client-go/util/retry"
|
||||
imageutils "k8s.io/kubernetes/test/utils/image"
|
||||
|
||||
"github.com/onsi/ginkgo/v2"
|
||||
@ -40,10 +40,10 @@ type TestFixture struct {
|
||||
TestID string
|
||||
Labels map[string]string
|
||||
|
||||
rcs map[string]bool
|
||||
services map[string]bool
|
||||
Name string
|
||||
Image string
|
||||
deployments map[string]bool
|
||||
services map[string]bool
|
||||
Name string
|
||||
Image string
|
||||
}
|
||||
|
||||
// NewServerTest creates a new TestFixture for the tests.
|
||||
@ -57,7 +57,7 @@ func NewServerTest(client clientset.Interface, namespace string, serviceName str
|
||||
"testid": t.TestID,
|
||||
}
|
||||
|
||||
t.rcs = make(map[string]bool)
|
||||
t.deployments = make(map[string]bool)
|
||||
t.services = make(map[string]bool)
|
||||
|
||||
t.Name = "webserver"
|
||||
@ -84,13 +84,12 @@ func (t *TestFixture) BuildServiceSpec() *v1.Service {
|
||||
return service
|
||||
}
|
||||
|
||||
// CreateRC creates a replication controller and records it for cleanup.
|
||||
func (t *TestFixture) CreateRC(rc *v1.ReplicationController) (*v1.ReplicationController, error) {
|
||||
rc, err := t.Client.CoreV1().ReplicationControllers(t.Namespace).Create(context.TODO(), rc, metav1.CreateOptions{})
|
||||
func (t *TestFixture) CreateDeployment(deployment *appsv1.Deployment) (*appsv1.Deployment, error) {
|
||||
deployment, err := t.Client.AppsV1().Deployments(t.Namespace).Create(context.TODO(), deployment, metav1.CreateOptions{})
|
||||
if err == nil {
|
||||
t.rcs[rc.Name] = true
|
||||
t.deployments[deployment.Name] = true
|
||||
}
|
||||
return rc, err
|
||||
return deployment, err
|
||||
}
|
||||
|
||||
// CreateService creates a service, and record it for cleanup
|
||||
@ -114,33 +113,10 @@ func (t *TestFixture) DeleteService(serviceName string) error {
|
||||
// Cleanup cleans all ReplicationControllers and Services which this object holds.
|
||||
func (t *TestFixture) Cleanup() []error {
|
||||
var errs []error
|
||||
for rcName := range t.rcs {
|
||||
ginkgo.By("stopping RC " + rcName + " in namespace " + t.Namespace)
|
||||
err := retry.RetryOnConflict(retry.DefaultRetry, func() error {
|
||||
// First, resize the RC to 0.
|
||||
old, err := t.Client.CoreV1().ReplicationControllers(t.Namespace).Get(context.TODO(), rcName, metav1.GetOptions{})
|
||||
if err != nil {
|
||||
if apierrors.IsNotFound(err) {
|
||||
return nil
|
||||
}
|
||||
return err
|
||||
}
|
||||
x := int32(0)
|
||||
old.Spec.Replicas = &x
|
||||
if _, err := t.Client.CoreV1().ReplicationControllers(t.Namespace).Update(context.TODO(), old, metav1.UpdateOptions{}); err != nil {
|
||||
if apierrors.IsNotFound(err) {
|
||||
return nil
|
||||
}
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
})
|
||||
for deploymentName := range t.deployments {
|
||||
ginkgo.By("deleting deployment " + deploymentName + " in namespace " + t.Namespace)
|
||||
err := t.Client.AppsV1().Deployments(t.Namespace).Delete(context.TODO(), deploymentName, metav1.DeleteOptions{})
|
||||
if err != nil {
|
||||
errs = append(errs, err)
|
||||
}
|
||||
// TODO(mikedanese): Wait.
|
||||
// Then, delete the RC altogether.
|
||||
if err := t.Client.CoreV1().ReplicationControllers(t.Namespace).Delete(context.TODO(), rcName, metav1.DeleteOptions{}); err != nil {
|
||||
if !apierrors.IsNotFound(err) {
|
||||
errs = append(errs, err)
|
||||
}
|
||||
|
@ -1785,11 +1785,18 @@ var _ = common.SIGDescribe("Services", func() {
|
||||
PublishNotReadyAddresses: true,
|
||||
},
|
||||
}
|
||||
rcSpec := e2erc.ByNameContainer(t.Name, 1, t.Labels, v1.Container{
|
||||
|
||||
deploymentSpec := e2edeployment.NewDeployment(t.Name,
|
||||
1,
|
||||
t.Labels,
|
||||
t.Name,
|
||||
t.Image,
|
||||
appsv1.RecreateDeploymentStrategyType)
|
||||
deploymentSpec.Spec.Template.Spec.Containers[0] = v1.Container{
|
||||
Args: []string{"netexec", fmt.Sprintf("--http-port=%d", port)},
|
||||
Name: t.Name,
|
||||
Image: t.Image,
|
||||
Ports: []v1.ContainerPort{{ContainerPort: int32(port), Protocol: v1.ProtocolTCP}},
|
||||
Ports: []v1.ContainerPort{{ContainerPort: port, Protocol: v1.ProtocolTCP}},
|
||||
ReadinessProbe: &v1.Probe{
|
||||
ProbeHandler: v1.ProbeHandler{
|
||||
Exec: &v1.ExecAction{
|
||||
@ -1804,18 +1811,18 @@ var _ = common.SIGDescribe("Services", func() {
|
||||
},
|
||||
},
|
||||
},
|
||||
}, nil)
|
||||
rcSpec.Spec.Template.Spec.TerminationGracePeriodSeconds = &terminateSeconds
|
||||
}
|
||||
deploymentSpec.Spec.Template.Spec.TerminationGracePeriodSeconds = &terminateSeconds
|
||||
|
||||
ginkgo.By(fmt.Sprintf("creating RC %v with selectors %v", rcSpec.Name, rcSpec.Spec.Selector))
|
||||
_, err := t.CreateRC(rcSpec)
|
||||
ginkgo.By(fmt.Sprintf("creating Deployment %v with selectors %v", deploymentSpec.Name, deploymentSpec.Spec.Selector))
|
||||
_, err := t.CreateDeployment(deploymentSpec)
|
||||
framework.ExpectNoError(err)
|
||||
|
||||
ginkgo.By(fmt.Sprintf("creating Service %v with selectors %v", service.Name, service.Spec.Selector))
|
||||
_, err = t.CreateService(service)
|
||||
framework.ExpectNoError(err)
|
||||
|
||||
ginkgo.By("Verifying pods for RC " + t.Name)
|
||||
ginkgo.By("Verifying pods for Deployment " + t.Name)
|
||||
framework.ExpectNoError(e2epod.VerifyPods(ctx, t.Client, t.Namespace, t.Name, false, 1))
|
||||
|
||||
svcName := fmt.Sprintf("%v.%v.svc.%v", serviceName, f.Namespace.Name, framework.TestContext.ClusterDNSDomain)
|
||||
@ -1837,8 +1844,11 @@ var _ = common.SIGDescribe("Services", func() {
|
||||
framework.Failf("expected un-ready endpoint for Service %v within %v, stdout: %v", t.Name, e2eservice.KubeProxyLagTimeout, stdout)
|
||||
}
|
||||
|
||||
ginkgo.By("Scaling down replication controller to zero")
|
||||
e2erc.ScaleRC(ctx, f.ClientSet, f.ScalesGetter, t.Namespace, rcSpec.Name, 0, false)
|
||||
ginkgo.By("Scaling down deployment to zero")
|
||||
_, err = e2edeployment.UpdateDeploymentWithRetries(f.ClientSet, t.Namespace, t.Name, func(deployment *appsv1.Deployment) {
|
||||
deployment.Spec.Replicas = ptr.To[int32](0)
|
||||
})
|
||||
framework.ExpectNoError(err)
|
||||
|
||||
ginkgo.By("Update service to not tolerate unready services")
|
||||
_, err = e2eservice.UpdateService(ctx, f.ClientSet, t.Namespace, t.ServiceName, func(s *v1.Service) {
|
||||
@ -1881,7 +1891,7 @@ var _ = common.SIGDescribe("Services", func() {
|
||||
}
|
||||
|
||||
ginkgo.By("Remove pods immediately")
|
||||
label := labels.SelectorFromSet(labels.Set(t.Labels))
|
||||
label := labels.SelectorFromSet(t.Labels)
|
||||
options := metav1.ListOptions{LabelSelector: label.String()}
|
||||
podClient := t.Client.CoreV1().Pods(f.Namespace.Name)
|
||||
pods, err := podClient.List(ctx, options)
|
||||
|
Loading…
Reference in New Issue
Block a user