mirror of
https://github.com/k3s-io/kubernetes.git
synced 2025-08-06 18:54:06 +00:00
Merge pull request #130082 from elizabeth-dev/replace-network-e2e-replicationcontrollers-4
test(network): replace RCs with Deployments in util function jig.Run
This commit is contained in:
commit
78f7217993
@ -28,12 +28,12 @@ import (
|
|||||||
"time"
|
"time"
|
||||||
|
|
||||||
"github.com/onsi/ginkgo/v2"
|
"github.com/onsi/ginkgo/v2"
|
||||||
|
appsv1 "k8s.io/api/apps/v1"
|
||||||
v1 "k8s.io/api/core/v1"
|
v1 "k8s.io/api/core/v1"
|
||||||
discoveryv1 "k8s.io/api/discovery/v1"
|
discoveryv1 "k8s.io/api/discovery/v1"
|
||||||
policyv1 "k8s.io/api/policy/v1"
|
policyv1 "k8s.io/api/policy/v1"
|
||||||
apierrors "k8s.io/apimachinery/pkg/api/errors"
|
apierrors "k8s.io/apimachinery/pkg/api/errors"
|
||||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||||
"k8s.io/apimachinery/pkg/labels"
|
|
||||||
"k8s.io/apimachinery/pkg/util/intstr"
|
"k8s.io/apimachinery/pkg/util/intstr"
|
||||||
utilnet "k8s.io/apimachinery/pkg/util/net"
|
utilnet "k8s.io/apimachinery/pkg/util/net"
|
||||||
"k8s.io/apimachinery/pkg/util/sets"
|
"k8s.io/apimachinery/pkg/util/sets"
|
||||||
@ -41,13 +41,14 @@ import (
|
|||||||
"k8s.io/apimachinery/pkg/util/wait"
|
"k8s.io/apimachinery/pkg/util/wait"
|
||||||
clientset "k8s.io/client-go/kubernetes"
|
clientset "k8s.io/client-go/kubernetes"
|
||||||
"k8s.io/kubernetes/test/e2e/framework"
|
"k8s.io/kubernetes/test/e2e/framework"
|
||||||
|
e2edeployment "k8s.io/kubernetes/test/e2e/framework/deployment"
|
||||||
e2enode "k8s.io/kubernetes/test/e2e/framework/node"
|
e2enode "k8s.io/kubernetes/test/e2e/framework/node"
|
||||||
e2epod "k8s.io/kubernetes/test/e2e/framework/pod"
|
|
||||||
e2epodoutput "k8s.io/kubernetes/test/e2e/framework/pod/output"
|
e2epodoutput "k8s.io/kubernetes/test/e2e/framework/pod/output"
|
||||||
e2erc "k8s.io/kubernetes/test/e2e/framework/rc"
|
e2erc "k8s.io/kubernetes/test/e2e/framework/rc"
|
||||||
testutils "k8s.io/kubernetes/test/utils"
|
testutils "k8s.io/kubernetes/test/utils"
|
||||||
imageutils "k8s.io/kubernetes/test/utils/image"
|
imageutils "k8s.io/kubernetes/test/utils/image"
|
||||||
netutils "k8s.io/utils/net"
|
netutils "k8s.io/utils/net"
|
||||||
|
"k8s.io/utils/ptr"
|
||||||
)
|
)
|
||||||
|
|
||||||
// NodePortRange should match whatever the default/configured range is
|
// NodePortRange should match whatever the default/configured range is
|
||||||
@ -655,23 +656,25 @@ func (j *TestJig) waitForCondition(ctx context.Context, timeout time.Duration, m
|
|||||||
return service, nil
|
return service, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// newRCTemplate returns the default v1.ReplicationController object for
|
// newDeploymentTemplate returns the default appsv1.Deployment object for
|
||||||
// this j, but does not actually create the RC. The default RC has the same
|
// this j, but does not actually create the Deployment. The default Deployment has the same
|
||||||
// name as the j and runs the "netexec" container.
|
// name as the j and runs the "netexec" container.
|
||||||
func (j *TestJig) newRCTemplate() *v1.ReplicationController {
|
func (j *TestJig) newDeploymentTemplate() *appsv1.Deployment {
|
||||||
var replicas int32 = 1
|
var replicas int32 = 1
|
||||||
var grace int64 = 3 // so we don't race with kube-proxy when scaling up/down
|
var grace int64 = 3 // so we don't race with kube-proxy when scaling up/down
|
||||||
|
|
||||||
rc := &v1.ReplicationController{
|
deployment := &appsv1.Deployment{
|
||||||
ObjectMeta: metav1.ObjectMeta{
|
ObjectMeta: metav1.ObjectMeta{
|
||||||
Namespace: j.Namespace,
|
Namespace: j.Namespace,
|
||||||
Name: j.Name,
|
Name: j.Name,
|
||||||
Labels: j.Labels,
|
Labels: j.Labels,
|
||||||
},
|
},
|
||||||
Spec: v1.ReplicationControllerSpec{
|
Spec: appsv1.DeploymentSpec{
|
||||||
Replicas: &replicas,
|
Replicas: &replicas,
|
||||||
Selector: j.Labels,
|
Selector: &metav1.LabelSelector{
|
||||||
Template: &v1.PodTemplateSpec{
|
MatchLabels: j.Labels,
|
||||||
|
},
|
||||||
|
Template: v1.PodTemplateSpec{
|
||||||
ObjectMeta: metav1.ObjectMeta{
|
ObjectMeta: metav1.ObjectMeta{
|
||||||
Labels: j.Labels,
|
Labels: j.Labels,
|
||||||
},
|
},
|
||||||
@ -697,22 +700,22 @@ func (j *TestJig) newRCTemplate() *v1.ReplicationController {
|
|||||||
},
|
},
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
return rc
|
return deployment
|
||||||
}
|
}
|
||||||
|
|
||||||
// AddRCAntiAffinity adds AntiAffinity to the given ReplicationController.
|
// AddDeploymentAntiAffinity adds AntiAffinity to the given Deployment.
|
||||||
func (j *TestJig) AddRCAntiAffinity(rc *v1.ReplicationController) {
|
func (j *TestJig) AddDeploymentAntiAffinity(deployment *appsv1.Deployment) {
|
||||||
var replicas int32 = 2
|
var replicas int32 = 2
|
||||||
|
|
||||||
rc.Spec.Replicas = &replicas
|
deployment.Spec.Replicas = &replicas
|
||||||
if rc.Spec.Template.Spec.Affinity == nil {
|
if deployment.Spec.Template.Spec.Affinity == nil {
|
||||||
rc.Spec.Template.Spec.Affinity = &v1.Affinity{}
|
deployment.Spec.Template.Spec.Affinity = &v1.Affinity{}
|
||||||
}
|
}
|
||||||
if rc.Spec.Template.Spec.Affinity.PodAntiAffinity == nil {
|
if deployment.Spec.Template.Spec.Affinity.PodAntiAffinity == nil {
|
||||||
rc.Spec.Template.Spec.Affinity.PodAntiAffinity = &v1.PodAntiAffinity{}
|
deployment.Spec.Template.Spec.Affinity.PodAntiAffinity = &v1.PodAntiAffinity{}
|
||||||
}
|
}
|
||||||
rc.Spec.Template.Spec.Affinity.PodAntiAffinity.RequiredDuringSchedulingIgnoredDuringExecution = append(
|
deployment.Spec.Template.Spec.Affinity.PodAntiAffinity.RequiredDuringSchedulingIgnoredDuringExecution = append(
|
||||||
rc.Spec.Template.Spec.Affinity.PodAntiAffinity.RequiredDuringSchedulingIgnoredDuringExecution,
|
deployment.Spec.Template.Spec.Affinity.PodAntiAffinity.RequiredDuringSchedulingIgnoredDuringExecution,
|
||||||
v1.PodAffinityTerm{
|
v1.PodAffinityTerm{
|
||||||
LabelSelector: &metav1.LabelSelector{MatchLabels: j.Labels},
|
LabelSelector: &metav1.LabelSelector{MatchLabels: j.Labels},
|
||||||
Namespaces: nil,
|
Namespaces: nil,
|
||||||
@ -720,9 +723,9 @@ func (j *TestJig) AddRCAntiAffinity(rc *v1.ReplicationController) {
|
|||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
// CreatePDB returns a PodDisruptionBudget for the given ReplicationController, or returns an error if a PodDisruptionBudget isn't ready
|
// CreatePDB returns a PodDisruptionBudget for the given Deployment, or returns an error if a PodDisruptionBudget isn't ready
|
||||||
func (j *TestJig) CreatePDB(ctx context.Context, rc *v1.ReplicationController) (*policyv1.PodDisruptionBudget, error) {
|
func (j *TestJig) CreatePDB(ctx context.Context, deployment *appsv1.Deployment) (*policyv1.PodDisruptionBudget, error) {
|
||||||
pdb := j.newPDBTemplate(rc)
|
pdb := j.newPDBTemplate(deployment)
|
||||||
newPdb, err := j.Client.PolicyV1().PodDisruptionBudgets(j.Namespace).Create(ctx, pdb, metav1.CreateOptions{})
|
newPdb, err := j.Client.PolicyV1().PodDisruptionBudgets(j.Namespace).Create(ctx, pdb, metav1.CreateOptions{})
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, fmt.Errorf("failed to create PDB %q %v", pdb.Name, err)
|
return nil, fmt.Errorf("failed to create PDB %q %v", pdb.Name, err)
|
||||||
@ -736,8 +739,8 @@ func (j *TestJig) CreatePDB(ctx context.Context, rc *v1.ReplicationController) (
|
|||||||
|
|
||||||
// newPDBTemplate returns the default policyv1.PodDisruptionBudget object for
|
// newPDBTemplate returns the default policyv1.PodDisruptionBudget object for
|
||||||
// this j, but does not actually create the PDB. The default PDB specifies a
|
// this j, but does not actually create the PDB. The default PDB specifies a
|
||||||
// MinAvailable of N-1 and matches the pods created by the RC.
|
// MinAvailable of N-1 and matches the pods created by the Deployment.
|
||||||
func (j *TestJig) newPDBTemplate(rc *v1.ReplicationController) *policyv1.PodDisruptionBudget {
|
func (j *TestJig) newPDBTemplate(rc *appsv1.Deployment) *policyv1.PodDisruptionBudget {
|
||||||
minAvailable := intstr.FromInt32(*rc.Spec.Replicas - 1)
|
minAvailable := intstr.FromInt32(*rc.Spec.Replicas - 1)
|
||||||
|
|
||||||
pdb := &policyv1.PodDisruptionBudget{
|
pdb := &policyv1.PodDisruptionBudget{
|
||||||
@ -755,49 +758,43 @@ func (j *TestJig) newPDBTemplate(rc *v1.ReplicationController) *policyv1.PodDisr
|
|||||||
return pdb
|
return pdb
|
||||||
}
|
}
|
||||||
|
|
||||||
// Run creates a ReplicationController and Pod(s) and waits for the
|
// Run creates a Deployment and Pod(s) and waits for the
|
||||||
// Pod(s) to be running. Callers can provide a function to tweak the RC object
|
// Pod(s) to be running. Callers can provide a function to tweak the Deployment object
|
||||||
// before it is created.
|
// before it is created.
|
||||||
func (j *TestJig) Run(ctx context.Context, tweak func(rc *v1.ReplicationController)) (*v1.ReplicationController, error) {
|
func (j *TestJig) Run(ctx context.Context, tweak func(rc *appsv1.Deployment)) (*appsv1.Deployment, error) {
|
||||||
rc := j.newRCTemplate()
|
deployment := j.newDeploymentTemplate()
|
||||||
if tweak != nil {
|
if tweak != nil {
|
||||||
tweak(rc)
|
tweak(deployment)
|
||||||
}
|
}
|
||||||
result, err := j.Client.CoreV1().ReplicationControllers(j.Namespace).Create(ctx, rc, metav1.CreateOptions{})
|
|
||||||
|
result, err := j.Client.AppsV1().Deployments(j.Namespace).Create(ctx, deployment, metav1.CreateOptions{})
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, fmt.Errorf("failed to create RC %q: %w", rc.Name, err)
|
return nil, fmt.Errorf("failed to create Deployment %q: %w", deployment.Name, err)
|
||||||
}
|
}
|
||||||
pods, err := j.waitForPodsCreated(ctx, int(*(rc.Spec.Replicas)))
|
|
||||||
|
err = e2edeployment.WaitForDeploymentComplete(j.Client, result)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, fmt.Errorf("failed to create pods: %w", err)
|
return nil, fmt.Errorf("failed waiting for Deployment %q: %w", deployment.Name, err)
|
||||||
}
|
|
||||||
if err := j.waitForPodsReady(ctx, pods); err != nil {
|
|
||||||
return nil, fmt.Errorf("failed waiting for pods to be running: %w", err)
|
|
||||||
}
|
}
|
||||||
|
|
||||||
return result, nil
|
return result, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// Scale scales pods to the given replicas
|
// Scale scales pods to the given replicas
|
||||||
func (j *TestJig) Scale(ctx context.Context, replicas int) error {
|
func (j *TestJig) Scale(replicas int) error {
|
||||||
rc := j.Name
|
deployment, err := e2edeployment.UpdateDeploymentWithRetries(j.Client, j.Namespace, j.Name, func(deployment *appsv1.Deployment) {
|
||||||
scale, err := j.Client.CoreV1().ReplicationControllers(j.Namespace).GetScale(ctx, rc, metav1.GetOptions{})
|
deployment.Spec.Replicas = ptr.To(int32(replicas))
|
||||||
|
})
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return fmt.Errorf("failed to get scale for RC %q: %w", rc, err)
|
return fmt.Errorf("failed to scale Deployment %q: %w", j.Name, err)
|
||||||
}
|
}
|
||||||
|
|
||||||
scale.ResourceVersion = "" // indicate the scale update should be unconditional
|
err = e2edeployment.WaitForDeploymentComplete(j.Client, deployment)
|
||||||
scale.Spec.Replicas = int32(replicas)
|
|
||||||
_, err = j.Client.CoreV1().ReplicationControllers(j.Namespace).UpdateScale(ctx, rc, scale, metav1.UpdateOptions{})
|
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return fmt.Errorf("failed to scale RC %q: %w", rc, err)
|
return fmt.Errorf("failed waiting for Deployment %q: %w", j.Name, err)
|
||||||
}
|
|
||||||
pods, err := j.waitForPodsCreated(ctx, replicas)
|
|
||||||
if err != nil {
|
|
||||||
return fmt.Errorf("failed waiting for pods: %w", err)
|
|
||||||
}
|
|
||||||
if err := j.waitForPodsReady(ctx, pods); err != nil {
|
|
||||||
return fmt.Errorf("failed waiting for pods to be running: %w", err)
|
|
||||||
}
|
}
|
||||||
|
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -816,43 +813,6 @@ func (j *TestJig) waitForPdbReady(ctx context.Context) error {
|
|||||||
return fmt.Errorf("timeout waiting for PDB %q to be ready", j.Name)
|
return fmt.Errorf("timeout waiting for PDB %q to be ready", j.Name)
|
||||||
}
|
}
|
||||||
|
|
||||||
func (j *TestJig) waitForPodsCreated(ctx context.Context, replicas int) ([]string, error) {
|
|
||||||
// TODO (pohly): replace with gomega.Eventually
|
|
||||||
timeout := 2 * time.Minute
|
|
||||||
// List the pods, making sure we observe all the replicas.
|
|
||||||
label := labels.SelectorFromSet(labels.Set(j.Labels))
|
|
||||||
framework.Logf("Waiting up to %v for %d pods to be created", timeout, replicas)
|
|
||||||
for start := time.Now(); time.Since(start) < timeout && ctx.Err() == nil; time.Sleep(2 * time.Second) {
|
|
||||||
options := metav1.ListOptions{LabelSelector: label.String()}
|
|
||||||
pods, err := j.Client.CoreV1().Pods(j.Namespace).List(ctx, options)
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
|
|
||||||
found := []string{}
|
|
||||||
for _, pod := range pods.Items {
|
|
||||||
if pod.DeletionTimestamp != nil {
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
found = append(found, pod.Name)
|
|
||||||
}
|
|
||||||
if len(found) == replicas {
|
|
||||||
framework.Logf("Found all %d pods", replicas)
|
|
||||||
return found, nil
|
|
||||||
}
|
|
||||||
framework.Logf("Found %d/%d pods - will retry", len(found), replicas)
|
|
||||||
}
|
|
||||||
return nil, fmt.Errorf("timeout waiting for %d pods to be created", replicas)
|
|
||||||
}
|
|
||||||
|
|
||||||
func (j *TestJig) waitForPodsReady(ctx context.Context, pods []string) error {
|
|
||||||
timeout := 2 * time.Minute
|
|
||||||
if !e2epod.CheckPodsRunningReady(ctx, j.Client, j.Namespace, pods, timeout) {
|
|
||||||
return fmt.Errorf("timeout waiting for %d pods to be ready", len(pods))
|
|
||||||
}
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func testReachabilityOverServiceName(ctx context.Context, serviceName string, sp v1.ServicePort, execPod *v1.Pod) error {
|
func testReachabilityOverServiceName(ctx context.Context, serviceName string, sp v1.ServicePort, execPod *v1.Pod) error {
|
||||||
return testEndpointReachability(ctx, serviceName, sp.Port, sp.Protocol, execPod)
|
return testEndpointReachability(ctx, serviceName, sp.Port, sp.Protocol, execPod)
|
||||||
}
|
}
|
||||||
|
@ -47,7 +47,6 @@ import (
|
|||||||
e2enode "k8s.io/kubernetes/test/e2e/framework/node"
|
e2enode "k8s.io/kubernetes/test/e2e/framework/node"
|
||||||
e2epod "k8s.io/kubernetes/test/e2e/framework/pod"
|
e2epod "k8s.io/kubernetes/test/e2e/framework/pod"
|
||||||
e2eoutput "k8s.io/kubernetes/test/e2e/framework/pod/output"
|
e2eoutput "k8s.io/kubernetes/test/e2e/framework/pod/output"
|
||||||
e2erc "k8s.io/kubernetes/test/e2e/framework/rc"
|
|
||||||
e2eservice "k8s.io/kubernetes/test/e2e/framework/service"
|
e2eservice "k8s.io/kubernetes/test/e2e/framework/service"
|
||||||
e2eskipper "k8s.io/kubernetes/test/e2e/framework/skipper"
|
e2eskipper "k8s.io/kubernetes/test/e2e/framework/skipper"
|
||||||
"k8s.io/kubernetes/test/e2e/network/common"
|
"k8s.io/kubernetes/test/e2e/network/common"
|
||||||
@ -236,14 +235,14 @@ var _ = common.SIGDescribe("LoadBalancers", feature.LoadBalancer, func() {
|
|||||||
e2eservice.TestReachableHTTP(ctx, tcpIngressIP, svcPort, loadBalancerLagTimeout)
|
e2eservice.TestReachableHTTP(ctx, tcpIngressIP, svcPort, loadBalancerLagTimeout)
|
||||||
|
|
||||||
ginkgo.By("Scaling the pods to 0")
|
ginkgo.By("Scaling the pods to 0")
|
||||||
err = tcpJig.Scale(ctx, 0)
|
err = tcpJig.Scale(0)
|
||||||
framework.ExpectNoError(err)
|
framework.ExpectNoError(err)
|
||||||
|
|
||||||
ginkgo.By("hitting the TCP service's LoadBalancer with no backends, no answer expected")
|
ginkgo.By("hitting the TCP service's LoadBalancer with no backends, no answer expected")
|
||||||
testNotReachableHTTP(ctx, tcpIngressIP, svcPort, loadBalancerLagTimeout)
|
testNotReachableHTTP(ctx, tcpIngressIP, svcPort, loadBalancerLagTimeout)
|
||||||
|
|
||||||
ginkgo.By("Scaling the pods to 1")
|
ginkgo.By("Scaling the pods to 1")
|
||||||
err = tcpJig.Scale(ctx, 1)
|
err = tcpJig.Scale(1)
|
||||||
framework.ExpectNoError(err)
|
framework.ExpectNoError(err)
|
||||||
|
|
||||||
ginkgo.By("hitting the TCP service's LoadBalancer")
|
ginkgo.By("hitting the TCP service's LoadBalancer")
|
||||||
@ -384,14 +383,14 @@ var _ = common.SIGDescribe("LoadBalancers", feature.LoadBalancer, func() {
|
|||||||
testReachableUDP(ctx, udpIngressIP, svcPort, loadBalancerCreateTimeout)
|
testReachableUDP(ctx, udpIngressIP, svcPort, loadBalancerCreateTimeout)
|
||||||
|
|
||||||
ginkgo.By("Scaling the pods to 0")
|
ginkgo.By("Scaling the pods to 0")
|
||||||
err = udpJig.Scale(ctx, 0)
|
err = udpJig.Scale(0)
|
||||||
framework.ExpectNoError(err)
|
framework.ExpectNoError(err)
|
||||||
|
|
||||||
ginkgo.By("checking that the UDP service's LoadBalancer is not reachable")
|
ginkgo.By("checking that the UDP service's LoadBalancer is not reachable")
|
||||||
testNotReachableUDP(ctx, udpIngressIP, svcPort, loadBalancerCreateTimeout)
|
testNotReachableUDP(ctx, udpIngressIP, svcPort, loadBalancerCreateTimeout)
|
||||||
|
|
||||||
ginkgo.By("Scaling the pods to 1")
|
ginkgo.By("Scaling the pods to 1")
|
||||||
err = udpJig.Scale(ctx, 1)
|
err = udpJig.Scale(1)
|
||||||
framework.ExpectNoError(err)
|
framework.ExpectNoError(err)
|
||||||
|
|
||||||
ginkgo.By("hitting the UDP service's NodePort")
|
ginkgo.By("hitting the UDP service's NodePort")
|
||||||
@ -1115,10 +1114,10 @@ var _ = common.SIGDescribe("LoadBalancers ExternalTrafficPolicy: Local", feature
|
|||||||
endpointNodeName := nodes.Items[i].Name
|
endpointNodeName := nodes.Items[i].Name
|
||||||
|
|
||||||
ginkgo.By("creating a pod to be part of the service " + serviceName + " on node " + endpointNodeName)
|
ginkgo.By("creating a pod to be part of the service " + serviceName + " on node " + endpointNodeName)
|
||||||
_, err = jig.Run(ctx, func(rc *v1.ReplicationController) {
|
_, err = jig.Run(ctx, func(deployment *appsv1.Deployment) {
|
||||||
rc.Name = serviceName
|
deployment.Name = serviceName
|
||||||
if endpointNodeName != "" {
|
if endpointNodeName != "" {
|
||||||
rc.Spec.Template.Spec.NodeName = endpointNodeName
|
deployment.Spec.Template.Spec.NodeName = endpointNodeName
|
||||||
}
|
}
|
||||||
})
|
})
|
||||||
framework.ExpectNoError(err)
|
framework.ExpectNoError(err)
|
||||||
@ -1146,7 +1145,9 @@ var _ = common.SIGDescribe("LoadBalancers ExternalTrafficPolicy: Local", feature
|
|||||||
threshold)
|
threshold)
|
||||||
framework.ExpectNoError(err)
|
framework.ExpectNoError(err)
|
||||||
}
|
}
|
||||||
framework.ExpectNoError(e2erc.DeleteRCAndWaitForGC(ctx, f.ClientSet, namespace, serviceName))
|
|
||||||
|
err = f.ClientSet.AppsV1().Deployments(namespace).Delete(ctx, serviceName, metav1.DeleteOptions{})
|
||||||
|
framework.ExpectNoError(err)
|
||||||
}
|
}
|
||||||
})
|
})
|
||||||
|
|
||||||
@ -1172,9 +1173,9 @@ var _ = common.SIGDescribe("LoadBalancers ExternalTrafficPolicy: Local", feature
|
|||||||
framework.Logf("ingress is %s:%d", ingress, svcPort)
|
framework.Logf("ingress is %s:%d", ingress, svcPort)
|
||||||
|
|
||||||
ginkgo.By("creating endpoints on multiple nodes")
|
ginkgo.By("creating endpoints on multiple nodes")
|
||||||
_, err = jig.Run(ctx, func(rc *v1.ReplicationController) {
|
_, err = jig.Run(ctx, func(deployment *appsv1.Deployment) {
|
||||||
rc.Spec.Replicas = ptr.To[int32](2)
|
deployment.Spec.Replicas = ptr.To[int32](2)
|
||||||
rc.Spec.Template.Spec.Affinity = &v1.Affinity{
|
deployment.Spec.Template.Spec.Affinity = &v1.Affinity{
|
||||||
PodAntiAffinity: &v1.PodAntiAffinity{
|
PodAntiAffinity: &v1.PodAntiAffinity{
|
||||||
RequiredDuringSchedulingIgnoredDuringExecution: []v1.PodAffinityTerm{
|
RequiredDuringSchedulingIgnoredDuringExecution: []v1.PodAffinityTerm{
|
||||||
{
|
{
|
||||||
|
@ -62,11 +62,11 @@ func (t *ServiceUpgradeTest) Setup(ctx context.Context, f *framework.Framework)
|
|||||||
svcPort := int(tcpService.Spec.Ports[0].Port)
|
svcPort := int(tcpService.Spec.Ports[0].Port)
|
||||||
|
|
||||||
ginkgo.By("creating pod to be part of service " + serviceName)
|
ginkgo.By("creating pod to be part of service " + serviceName)
|
||||||
rc, err := jig.Run(ctx, jig.AddRCAntiAffinity)
|
deployment, err := jig.Run(ctx, jig.AddDeploymentAntiAffinity)
|
||||||
framework.ExpectNoError(err)
|
framework.ExpectNoError(err)
|
||||||
|
|
||||||
ginkgo.By("creating a PodDisruptionBudget to cover the ReplicationController")
|
ginkgo.By("creating a PodDisruptionBudget to cover the Deployment")
|
||||||
_, err = jig.CreatePDB(ctx, rc)
|
_, err = jig.CreatePDB(ctx, deployment)
|
||||||
framework.ExpectNoError(err)
|
framework.ExpectNoError(err)
|
||||||
|
|
||||||
// Hit it once before considering ourselves ready
|
// Hit it once before considering ourselves ready
|
||||||
|
@ -19,6 +19,7 @@ package windows
|
|||||||
import (
|
import (
|
||||||
"context"
|
"context"
|
||||||
"fmt"
|
"fmt"
|
||||||
|
appsv1 "k8s.io/api/apps/v1"
|
||||||
"net"
|
"net"
|
||||||
"strconv"
|
"strconv"
|
||||||
|
|
||||||
@ -66,7 +67,7 @@ var _ = sigDescribe("Services", skipUnlessWindows(func() {
|
|||||||
|
|
||||||
ginkgo.By("creating Pod to be part of service " + serviceName)
|
ginkgo.By("creating Pod to be part of service " + serviceName)
|
||||||
// tweak the Jig to use windows...
|
// tweak the Jig to use windows...
|
||||||
windowsNodeSelectorTweak := func(rc *v1.ReplicationController) {
|
windowsNodeSelectorTweak := func(rc *appsv1.Deployment) {
|
||||||
rc.Spec.Template.Spec.NodeSelector = map[string]string{
|
rc.Spec.Template.Spec.NodeSelector = map[string]string{
|
||||||
"kubernetes.io/os": "windows",
|
"kubernetes.io/os": "windows",
|
||||||
}
|
}
|
||||||
|
Loading…
Reference in New Issue
Block a user