Merge pull request #119494 from carlory/cleanup-e2e-network-framework-equal

e2e_network: stop using deprecated framework.ExpectEqual
This commit is contained in:
Kubernetes Prow Robot 2023-08-18 03:06:28 -07:00 committed by GitHub
commit 5929d49f87
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
12 changed files with 91 additions and 70 deletions

View File

@ -497,7 +497,7 @@ func assertFilesContain(ctx context.Context, fileNames []string, fileDir string,
return false, nil
}))
framework.ExpectEqual(len(failed), 0)
gomega.Expect(failed).To(gomega.BeEmpty())
}
func validateDNSResults(ctx context.Context, f *framework.Framework, pod *v1.Pod, fileNames []string) {

View File

@ -65,7 +65,7 @@ var _ = common.SIGDescribe("[Feature:IPv6DualStack]", func() {
// get all internal ips for node
internalIPs := e2enode.GetAddresses(&node, v1.NodeInternalIP)
framework.ExpectEqual(len(internalIPs), 2)
gomega.Expect(internalIPs).To(gomega.HaveLen(2))
// assert 2 ips belong to different families
if netutils.IsIPv4String(internalIPs[0]) == netutils.IsIPv4String(internalIPs[1]) {
framework.Failf("both internalIPs %s and %s belong to the same families", internalIPs[0], internalIPs[1])
@ -98,9 +98,9 @@ var _ = common.SIGDescribe("[Feature:IPv6DualStack]", func() {
gomega.Expect(p.Status.PodIPs).ShouldNot(gomega.BeNil())
// validate there are 2 ips in podIPs
framework.ExpectEqual(len(p.Status.PodIPs), 2)
gomega.Expect(p.Status.PodIPs).To(gomega.HaveLen(2))
// validate first ip in PodIPs is same as PodIP
framework.ExpectEqual(p.Status.PodIP, p.Status.PodIPs[0].IP)
gomega.Expect(p.Status.PodIP).To(gomega.Equal(p.Status.PodIPs[0].IP))
// assert 2 pod ips belong to different families
if netutils.IsIPv4String(p.Status.PodIPs[0].IP) == netutils.IsIPv4String(p.Status.PodIPs[1].IP) {
framework.Failf("both internalIPs %s and %s belong to the same families", p.Status.PodIPs[0].IP, p.Status.PodIPs[1].IP)

View File

@ -42,6 +42,7 @@ import (
"k8s.io/utils/pointer"
"github.com/onsi/ginkgo/v2"
"github.com/onsi/gomega"
)
var _ = common.SIGDescribe("EndpointSlice", func() {
@ -435,12 +436,12 @@ var _ = common.SIGDescribe("EndpointSlice", func() {
ginkgo.By("getting")
queriedEPS, err := epsClient.Get(ctx, createdEPS.Name, metav1.GetOptions{})
framework.ExpectNoError(err)
framework.ExpectEqual(queriedEPS.UID, createdEPS.UID)
gomega.Expect(queriedEPS.UID).To(gomega.Equal(createdEPS.UID))
ginkgo.By("listing")
epsList, err := epsClient.List(ctx, metav1.ListOptions{LabelSelector: "special-label=" + f.UniqueName})
framework.ExpectNoError(err)
framework.ExpectEqual(len(epsList.Items), 3, "filtered list should have 3 items")
gomega.Expect(epsList.Items).To(gomega.HaveLen(3), "filtered list should have 3 items")
ginkgo.By("watching")
framework.Logf("starting watch")
@ -452,7 +453,7 @@ var _ = common.SIGDescribe("EndpointSlice", func() {
ginkgo.By("cluster-wide listing")
clusterEPSList, err := clusterEPSClient.List(ctx, metav1.ListOptions{LabelSelector: "special-label=" + f.UniqueName})
framework.ExpectNoError(err)
framework.ExpectEqual(len(clusterEPSList.Items), 3, "filtered list should have 3 items")
gomega.Expect(clusterEPSList.Items).To(gomega.HaveLen(3), "filtered list should have 3 items")
ginkgo.By("cluster-wide watching")
framework.Logf("starting watch")
@ -462,7 +463,7 @@ var _ = common.SIGDescribe("EndpointSlice", func() {
ginkgo.By("patching")
patchedEPS, err := epsClient.Patch(ctx, createdEPS.Name, types.MergePatchType, []byte(`{"metadata":{"annotations":{"patched":"true"}}}`), metav1.PatchOptions{})
framework.ExpectNoError(err)
framework.ExpectEqual(patchedEPS.Annotations["patched"], "true", "patched object should have the applied annotation")
gomega.Expect(patchedEPS.Annotations).To(gomega.HaveKeyWithValue("patched", "true"), "patched object should have the applied annotation")
ginkgo.By("updating")
var epsToUpdate, updatedEPS *discoveryv1.EndpointSlice
@ -476,7 +477,7 @@ var _ = common.SIGDescribe("EndpointSlice", func() {
return err
})
framework.ExpectNoError(err)
framework.ExpectEqual(updatedEPS.Annotations["updated"], "true", "updated object should have the applied annotation")
gomega.Expect(updatedEPS.Annotations).To(gomega.HaveKeyWithValue("updated", "true"), "updated object should have the applied annotation")
framework.Logf("waiting for watch events with expected annotations")
for sawAnnotations := false; !sawAnnotations; {
@ -485,7 +486,7 @@ var _ = common.SIGDescribe("EndpointSlice", func() {
if !ok {
framework.Fail("watch channel should not close")
}
framework.ExpectEqual(evt.Type, watch.Modified)
gomega.Expect(evt.Type).To(gomega.Equal(watch.Modified))
watchedEPS, isEPS := evt.Object.(*discoveryv1.EndpointSlice)
if !isEPS {
framework.Failf("expected EndpointSlice, got %T", evt.Object)
@ -512,7 +513,7 @@ var _ = common.SIGDescribe("EndpointSlice", func() {
}
epsList, err = epsClient.List(ctx, metav1.ListOptions{LabelSelector: "special-label=" + f.UniqueName})
framework.ExpectNoError(err)
framework.ExpectEqual(len(epsList.Items), 2, "filtered list should have 2 items")
gomega.Expect(epsList.Items).To(gomega.HaveLen(2), "filtered list should have 2 items")
for _, eps := range epsList.Items {
if eps.Namespace == createdEPS.Namespace && eps.Name == createdEPS.Name {
framework.Fail("listing after deleting createdEPS")
@ -524,7 +525,7 @@ var _ = common.SIGDescribe("EndpointSlice", func() {
framework.ExpectNoError(err)
epsList, err = epsClient.List(ctx, metav1.ListOptions{LabelSelector: "special-label=" + f.UniqueName})
framework.ExpectNoError(err)
framework.ExpectEqual(len(epsList.Items), 0, "filtered list should have 0 items")
gomega.Expect(epsList.Items).To(gomega.BeEmpty(), "filtered list should have 0 items")
})
ginkgo.It("should support a Service with multiple ports specified in multiple EndpointSlices", func(ctx context.Context) {

View File

@ -45,6 +45,7 @@ import (
admissionapi "k8s.io/pod-security-admission/api"
"github.com/onsi/ginkgo/v2"
"github.com/onsi/gomega"
)
const (
@ -233,7 +234,7 @@ var _ = common.SIGDescribe("Loadbalancing: L7", func() {
// ClusterIP ServicePorts have no NodePort
for _, sp := range svcPorts {
framework.ExpectEqual(sp.NodePort, int32(0))
gomega.Expect(sp.NodePort).To(gomega.Equal(int32(0)))
}
})
@ -676,12 +677,12 @@ var _ = common.SIGDescribe("Ingress API", func() {
ginkgo.By("getting")
gottenIngress, err := ingClient.Get(ctx, createdIngress.Name, metav1.GetOptions{})
framework.ExpectNoError(err)
framework.ExpectEqual(gottenIngress.UID, createdIngress.UID)
gomega.Expect(gottenIngress.UID).To(gomega.Equal(createdIngress.UID))
ginkgo.By("listing")
ings, err := ingClient.List(ctx, metav1.ListOptions{LabelSelector: "special-label=" + f.UniqueName})
framework.ExpectNoError(err)
framework.ExpectEqual(len(ings.Items), 3, "filtered list should have 3 items")
gomega.Expect(ings.Items).To(gomega.HaveLen(3), "filtered list should have 3 items")
ginkgo.By("watching")
framework.Logf("starting watch")
@ -693,7 +694,7 @@ var _ = common.SIGDescribe("Ingress API", func() {
ginkgo.By("cluster-wide listing")
clusterIngs, err := clusterIngClient.List(ctx, metav1.ListOptions{LabelSelector: "special-label=" + f.UniqueName})
framework.ExpectNoError(err)
framework.ExpectEqual(len(clusterIngs.Items), 3, "filtered list should have 3 items")
gomega.Expect(clusterIngs.Items).To(gomega.HaveLen(3), "filtered list should have 3 items")
ginkgo.By("cluster-wide watching")
framework.Logf("starting watch")
@ -703,7 +704,7 @@ var _ = common.SIGDescribe("Ingress API", func() {
ginkgo.By("patching")
patchedIngress, err := ingClient.Patch(ctx, createdIngress.Name, types.MergePatchType, []byte(`{"metadata":{"annotations":{"patched":"true"}}}`), metav1.PatchOptions{})
framework.ExpectNoError(err)
framework.ExpectEqual(patchedIngress.Annotations["patched"], "true", "patched object should have the applied annotation")
gomega.Expect(patchedIngress.Annotations).To(gomega.HaveKeyWithValue("patched", "true"), "patched object should have the applied annotation")
ginkgo.By("updating")
var ingToUpdate, updatedIngress *networkingv1.Ingress
@ -717,7 +718,7 @@ var _ = common.SIGDescribe("Ingress API", func() {
return err
})
framework.ExpectNoError(err)
framework.ExpectEqual(updatedIngress.Annotations["updated"], "true", "updated object should have the applied annotation")
gomega.Expect(updatedIngress.Annotations).To(gomega.HaveKeyWithValue("updated", "true"), "updated object should have the applied annotation")
framework.Logf("waiting for watch events with expected annotations")
for sawAnnotations := false; !sawAnnotations; {
@ -726,7 +727,7 @@ var _ = common.SIGDescribe("Ingress API", func() {
if !ok {
framework.Fail("watch channel should not close")
}
framework.ExpectEqual(evt.Type, watch.Modified)
gomega.Expect(evt.Type).To(gomega.Equal(watch.Modified))
watchedIngress, isIngress := evt.Object.(*networkingv1.Ingress)
if !isIngress {
framework.Failf("expected Ingress, got %T", evt.Object)
@ -754,8 +755,8 @@ var _ = common.SIGDescribe("Ingress API", func() {
[]byte(`{"metadata":{"annotations":{"patchedstatus":"true"}},"status":{"loadBalancer":`+string(lbStatusJSON)+`}}`),
metav1.PatchOptions{}, "status")
framework.ExpectNoError(err)
framework.ExpectEqual(patchedStatus.Status.LoadBalancer, lbStatus, "patched object should have the applied loadBalancer status")
framework.ExpectEqual(patchedStatus.Annotations["patchedstatus"], "true", "patched object should have the applied annotation")
gomega.Expect(patchedStatus.Status.LoadBalancer).To(gomega.Equal(lbStatus), "patched object should have the applied loadBalancer status")
gomega.Expect(patchedStatus.Annotations).To(gomega.HaveKeyWithValue("patchedstatus", "true"), "patched object should have the applied annotation")
ginkgo.By("updating /status")
var statusToUpdate, updatedStatus *networkingv1.Ingress
@ -771,7 +772,7 @@ var _ = common.SIGDescribe("Ingress API", func() {
return err
})
framework.ExpectNoError(err)
framework.ExpectEqual(updatedStatus.Status.LoadBalancer, statusToUpdate.Status.LoadBalancer, fmt.Sprintf("updated object expected to have updated loadbalancer status %#v, got %#v", statusToUpdate.Status.LoadBalancer, updatedStatus.Status.LoadBalancer))
gomega.Expect(updatedStatus.Status.LoadBalancer).To(gomega.Equal(statusToUpdate.Status.LoadBalancer), "updated object expected to have updated loadbalancer status %#v, got %#v", statusToUpdate.Status.LoadBalancer, updatedStatus.Status.LoadBalancer)
ginkgo.By("get /status")
ingResource := schema.GroupVersionResource{Group: "networking.k8s.io", Version: ingVersion, Resource: "ingresses"}
@ -779,7 +780,7 @@ var _ = common.SIGDescribe("Ingress API", func() {
framework.ExpectNoError(err)
statusUID, _, err := unstructured.NestedFieldCopy(gottenStatus.Object, "metadata", "uid")
framework.ExpectNoError(err)
framework.ExpectEqual(string(createdIngress.UID), statusUID, fmt.Sprintf("createdIngress.UID: %v expected to match statusUID: %v ", createdIngress.UID, statusUID))
gomega.Expect(string(createdIngress.UID)).To(gomega.Equal(statusUID), "createdIngress.UID: %v expected to match statusUID: %v ", createdIngress.UID, statusUID)
// Ingress resource delete operations
ginkgo.By("deleting")

View File

@ -35,6 +35,7 @@ import (
utilpointer "k8s.io/utils/pointer"
"github.com/onsi/ginkgo/v2"
"github.com/onsi/gomega"
)
var _ = common.SIGDescribe("IngressClass [Feature:Ingress]", func() {
@ -334,13 +335,13 @@ var _ = common.SIGDescribe("IngressClass API", func() {
ginkgo.By("getting")
gottenIC, err := icClient.Get(ctx, ingressClass1.Name, metav1.GetOptions{})
framework.ExpectNoError(err)
framework.ExpectEqual(gottenIC.UID, ingressClass1.UID)
framework.ExpectEqual(gottenIC.UID, ingressClass1.UID)
gomega.Expect(gottenIC.UID).To(gomega.Equal(ingressClass1.UID))
gomega.Expect(gottenIC.UID).To(gomega.Equal(ingressClass1.UID))
ginkgo.By("listing")
ics, err := icClient.List(ctx, metav1.ListOptions{LabelSelector: "special-label=generic"})
framework.ExpectNoError(err)
framework.ExpectEqual(len(ics.Items), 3, "filtered list should have 3 items")
gomega.Expect(ics.Items).To(gomega.HaveLen(3), "filtered list should have 3 items")
ginkgo.By("watching")
framework.Logf("starting watch")
@ -350,14 +351,14 @@ var _ = common.SIGDescribe("IngressClass API", func() {
ginkgo.By("patching")
patchedIC, err := icClient.Patch(ctx, ingressClass1.Name, types.MergePatchType, []byte(`{"metadata":{"annotations":{"patched":"true"}}}`), metav1.PatchOptions{})
framework.ExpectNoError(err)
framework.ExpectEqual(patchedIC.Annotations["patched"], "true", "patched object should have the applied annotation")
gomega.Expect(patchedIC.Annotations).To(gomega.HaveKeyWithValue("patched", "true"), "patched object should have the applied annotation")
ginkgo.By("updating")
icToUpdate := patchedIC.DeepCopy()
icToUpdate.Annotations["updated"] = "true"
updatedIC, err := icClient.Update(ctx, icToUpdate, metav1.UpdateOptions{})
framework.ExpectNoError(err)
framework.ExpectEqual(updatedIC.Annotations["updated"], "true", "updated object should have the applied annotation")
gomega.Expect(updatedIC.Annotations).To(gomega.HaveKeyWithValue("updated", "true"), "updated object should have the applied annotation")
framework.Logf("waiting for watch events with expected annotations")
for sawAnnotations := false; !sawAnnotations; {
@ -366,7 +367,7 @@ var _ = common.SIGDescribe("IngressClass API", func() {
if !ok {
framework.Fail("watch channel should not close")
}
framework.ExpectEqual(evt.Type, watch.Modified)
gomega.Expect(evt.Type).To(gomega.Equal(watch.Modified))
watchedIngress, isIngress := evt.Object.(*networkingv1.IngressClass)
if !isIngress {
framework.Failf("expected Ingress, got %T", evt.Object)
@ -393,14 +394,14 @@ var _ = common.SIGDescribe("IngressClass API", func() {
}
ics, err = icClient.List(ctx, metav1.ListOptions{LabelSelector: "ingressclass=" + f.UniqueName})
framework.ExpectNoError(err)
framework.ExpectEqual(len(ics.Items), 2, "filtered list should have 2 items")
gomega.Expect(ics.Items).To(gomega.HaveLen(2), "filtered list should have 2 items")
ginkgo.By("deleting a collection")
err = icClient.DeleteCollection(ctx, metav1.DeleteOptions{}, metav1.ListOptions{LabelSelector: "ingressclass=" + f.UniqueName})
framework.ExpectNoError(err)
ics, err = icClient.List(ctx, metav1.ListOptions{LabelSelector: "ingressclass=" + f.UniqueName})
framework.ExpectNoError(err)
framework.ExpectEqual(len(ics.Items), 0, "filtered list should have 0 items")
gomega.Expect(ics.Items).To(gomega.BeEmpty(), "filtered list should have 0 items")
})
})

View File

@ -550,7 +550,7 @@ var _ = common.SIGDescribe("LoadBalancers", func() {
// Make sure acceptPod is running. There are certain chances that pod might be terminated due to unexpected reasons.
acceptPod, err = cs.CoreV1().Pods(namespace).Get(ctx, acceptPod.Name, metav1.GetOptions{})
framework.ExpectNoError(err, "Unable to get pod %s", acceptPod.Name)
framework.ExpectEqual(acceptPod.Status.Phase, v1.PodRunning)
gomega.Expect(acceptPod.Status.Phase).To(gomega.Equal(v1.PodRunning))
framework.ExpectNotEqual(acceptPod.Status.PodIP, "")
// Create loadbalancer service with source range from node[0] and podAccept
@ -580,7 +580,7 @@ var _ = common.SIGDescribe("LoadBalancers", func() {
// Make sure dropPod is running. There are certain chances that the pod might be terminated due to unexpected reasons.
dropPod, err = cs.CoreV1().Pods(namespace).Get(ctx, dropPod.Name, metav1.GetOptions{})
framework.ExpectNoError(err, "Unable to get pod %s", dropPod.Name)
framework.ExpectEqual(acceptPod.Status.Phase, v1.PodRunning)
gomega.Expect(acceptPod.Status.Phase).To(gomega.Equal(v1.PodRunning))
framework.ExpectNotEqual(acceptPod.Status.PodIP, "")
ginkgo.By("Update service LoadBalancerSourceRange and check reachability")
@ -735,7 +735,7 @@ var _ = common.SIGDescribe("LoadBalancers", func() {
framework.Failf("Loadbalancer IP not changed to internal.")
}
// should have the given static internal IP.
framework.ExpectEqual(e2eservice.GetIngressPoint(lbIngress), internalStaticIP)
gomega.Expect(e2eservice.GetIngressPoint(lbIngress)).To(gomega.Equal(internalStaticIP))
}
})
@ -782,7 +782,7 @@ var _ = common.SIGDescribe("LoadBalancers", func() {
if err != nil {
framework.Failf("gceCloud.GetHttpHealthCheck(%q) = _, %v; want nil", hcName, err)
}
framework.ExpectEqual(hc.CheckIntervalSec, gceHcCheckIntervalSeconds)
gomega.Expect(hc.CheckIntervalSec).To(gomega.Equal(gceHcCheckIntervalSeconds))
ginkgo.By("modify the health check interval")
hc.CheckIntervalSec = gceHcCheckIntervalSeconds - 1

View File

@ -28,6 +28,7 @@ import (
admissionapi "k8s.io/pod-security-admission/api"
"github.com/onsi/ginkgo/v2"
"github.com/onsi/gomega"
networkingv1 "k8s.io/api/networking/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/kubernetes/test/e2e/framework"
@ -133,12 +134,12 @@ var _ = common.SIGDescribe("Netpol API", func() {
ginkgo.By("getting")
gottenNetPol, err := npClient.Get(ctx, createdNetPol.Name, metav1.GetOptions{})
framework.ExpectNoError(err)
framework.ExpectEqual(gottenNetPol.UID, createdNetPol.UID)
gomega.Expect(gottenNetPol.UID).To(gomega.Equal(createdNetPol.UID))
ginkgo.By("listing")
nps, err := npClient.List(ctx, metav1.ListOptions{LabelSelector: "special-label=" + f.UniqueName})
framework.ExpectNoError(err)
framework.ExpectEqual(len(nps.Items), 3, "filtered list should have 3 items")
gomega.Expect(nps.Items).To(gomega.HaveLen(3), "filtered list should have 3 items")
ginkgo.By("watching")
framework.Logf("starting watch")
@ -149,7 +150,7 @@ var _ = common.SIGDescribe("Netpol API", func() {
ginkgo.By("cluster-wide listing")
clusterNPs, err := clusterNPClient.List(ctx, metav1.ListOptions{LabelSelector: "special-label=" + f.UniqueName})
framework.ExpectNoError(err)
framework.ExpectEqual(len(clusterNPs.Items), 3, "filtered list should have 3 items")
gomega.Expect(clusterNPs.Items).To(gomega.HaveLen(3), "filtered list should have 3 items")
ginkgo.By("cluster-wide watching")
framework.Logf("starting watch")
@ -159,14 +160,14 @@ var _ = common.SIGDescribe("Netpol API", func() {
ginkgo.By("patching")
patchedNetPols, err := npClient.Patch(ctx, createdNetPol.Name, types.MergePatchType, []byte(`{"metadata":{"annotations":{"patched":"true"}}}`), metav1.PatchOptions{})
framework.ExpectNoError(err)
framework.ExpectEqual(patchedNetPols.Annotations["patched"], "true", "patched object should have the applied annotation")
gomega.Expect(patchedNetPols.Annotations).To(gomega.HaveKeyWithValue("patched", "true"), "patched object should have the applied annotation")
ginkgo.By("updating")
npToUpdate := patchedNetPols.DeepCopy()
npToUpdate.Annotations["updated"] = "true"
updatedNetPols, err := npClient.Update(ctx, npToUpdate, metav1.UpdateOptions{})
framework.ExpectNoError(err)
framework.ExpectEqual(updatedNetPols.Annotations["updated"], "true", "updated object should have the applied annotation")
gomega.Expect(updatedNetPols.Annotations).To(gomega.HaveKeyWithValue("updated", "true"), "updated object should have the applied annotation")
framework.Logf("waiting for watch events with expected annotations")
for sawAnnotations := false; !sawAnnotations; {
@ -175,7 +176,7 @@ var _ = common.SIGDescribe("Netpol API", func() {
if !ok {
framework.Fail("watch channel should not close")
}
framework.ExpectEqual(evt.Type, watch.Modified)
gomega.Expect(evt.Type).To(gomega.Equal(watch.Modified))
watchedNetPol, isNetPol := evt.Object.(*networkingv1.NetworkPolicy)
if !isNetPol {
framework.Failf("expected NetworkPolicy, got %T", evt.Object)
@ -201,14 +202,14 @@ var _ = common.SIGDescribe("Netpol API", func() {
}
nps, err = npClient.List(ctx, metav1.ListOptions{LabelSelector: "special-label=" + f.UniqueName})
framework.ExpectNoError(err)
framework.ExpectEqual(len(nps.Items), 2, "filtered list should have 2 items")
gomega.Expect(nps.Items).To(gomega.HaveLen(2), "filtered list should have 2 items")
ginkgo.By("deleting a collection")
err = npClient.DeleteCollection(ctx, metav1.DeleteOptions{}, metav1.ListOptions{LabelSelector: "special-label=" + f.UniqueName})
framework.ExpectNoError(err)
nps, err = npClient.List(ctx, metav1.ListOptions{LabelSelector: "special-label=" + f.UniqueName})
framework.ExpectNoError(err)
framework.ExpectEqual(len(nps.Items), 0, "filtered list should have 0 items")
gomega.Expect(nps.Items).To(gomega.BeEmpty(), "filtered list should have 0 items")
})
/*
@ -267,6 +268,6 @@ var _ = common.SIGDescribe("Netpol API", func() {
framework.ExpectNoError(err)
nps, err := npClient.List(ctx, metav1.ListOptions{LabelSelector: "special-label=" + f.UniqueName})
framework.ExpectNoError(err)
framework.ExpectEqual(len(nps.Items), 0, "filtered list should be 0 items")
gomega.Expect(nps.Items).To(gomega.BeEmpty(), "filtered list should be 0 items")
})
})

View File

@ -38,6 +38,7 @@ import (
admissionapi "k8s.io/pod-security-admission/api"
"github.com/onsi/ginkgo/v2"
"github.com/onsi/gomega"
)
var _ = common.SIGDescribe("Services GCE [Slow]", func() {
@ -86,7 +87,7 @@ var _ = common.SIGDescribe("Services GCE [Slow]", func() {
// Verify that service has been updated properly.
svcTier, err := gcecloud.GetServiceNetworkTier(svc)
framework.ExpectNoError(err)
framework.ExpectEqual(svcTier, cloud.NetworkTierStandard)
gomega.Expect(svcTier).To(gomega.Equal(cloud.NetworkTierStandard))
// Record the LB name for test cleanup.
serviceLBNames = append(serviceLBNames, cloudprovider.DefaultLoadBalancerName(svc))
@ -102,7 +103,7 @@ var _ = common.SIGDescribe("Services GCE [Slow]", func() {
// Verify that service has been updated properly.
svcTier, err = gcecloud.GetServiceNetworkTier(svc)
framework.ExpectNoError(err)
framework.ExpectEqual(svcTier, cloud.NetworkTierDefault)
gomega.Expect(svcTier).To(gomega.Equal(cloud.NetworkTierDefault))
// Wait until the ingress IP changes. Each tier has its own pool of
// IPs, so changing tiers implies changing IPs.
@ -133,10 +134,10 @@ var _ = common.SIGDescribe("Services GCE [Slow]", func() {
})
framework.ExpectNoError(err)
// Verify that service has been updated properly.
framework.ExpectEqual(svc.Spec.LoadBalancerIP, requestedIP)
gomega.Expect(svc.Spec.LoadBalancerIP).To(gomega.Equal(requestedIP))
svcTier, err = gcecloud.GetServiceNetworkTier(svc)
framework.ExpectNoError(err)
framework.ExpectEqual(svcTier, cloud.NetworkTierStandard)
gomega.Expect(svcTier).To(gomega.Equal(cloud.NetworkTierStandard))
// Wait until the ingress IP changes and verifies the LB.
waitAndVerifyLBWithTier(ctx, jig, ingressIP, createTimeout, lagTimeout)
@ -156,7 +157,7 @@ func waitAndVerifyLBWithTier(ctx context.Context, jig *e2eservice.TestJig, exist
ginkgo.By("running sanity and reachability checks")
if svc.Spec.LoadBalancerIP != "" {
// Verify that the new ingress IP is the requested IP if it's set.
framework.ExpectEqual(ingressIP, svc.Spec.LoadBalancerIP)
gomega.Expect(ingressIP).To(gomega.Equal(svc.Spec.LoadBalancerIP))
}
// If the IP has been used by previous test, sometimes we get the lingering
// 404 errors even after the LB is long gone. Tolerate and retry until the
@ -168,7 +169,7 @@ func waitAndVerifyLBWithTier(ctx context.Context, jig *e2eservice.TestJig, exist
framework.ExpectNoError(err)
netTier, err := getLBNetworkTierByIP(ingressIP)
framework.ExpectNoError(err, "failed to get the network tier of the load balancer")
framework.ExpectEqual(netTier, svcNetTier)
gomega.Expect(netTier).To(gomega.Equal(svcNetTier))
return ingressIP
}

View File

@ -23,6 +23,7 @@ import (
"time"
"github.com/onsi/ginkgo/v2"
"github.com/onsi/gomega"
v1 "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/util/wait"
@ -116,7 +117,7 @@ var _ = common.SIGDescribe("NoSNAT [Feature:NoSNAT] [Slow]", func() {
targetAddr := net.JoinHostPort(targetPod.Status.PodIP, testPodPort)
sourceIP, execPodIP := execSourceIPTest(sourcePod, targetAddr)
ginkgo.By("Verifying the preserved source ip")
framework.ExpectEqual(sourceIP, execPodIP)
gomega.Expect(sourceIP).To(gomega.Equal(execPodIP))
}
}
})

View File

@ -485,7 +485,7 @@ func validateRedirectRequest(client *http.Client, redirectVerb string, urlString
defer resp.Body.Close()
framework.Logf("http.Client request:%s StatusCode:%d", redirectVerb, resp.StatusCode)
framework.ExpectEqual(resp.StatusCode, 301, "The resp.StatusCode returned: %d", resp.StatusCode)
gomega.Expect(resp.StatusCode).To(gomega.Equal(301), "The resp.StatusCode returned: %d", resp.StatusCode)
}
// validateProxyVerbRequest checks that a http request to a pod
@ -587,7 +587,7 @@ func nodeProxyTest(ctx context.Context, f *framework.Framework, prefix, nodeDest
serviceUnavailableErrors++
} else {
framework.ExpectNoError(err)
framework.ExpectEqual(status, http.StatusOK)
gomega.Expect(status).To(gomega.Equal(http.StatusOK))
gomega.Expect(d).To(gomega.BeNumerically("<", proxyHTTPCallTimeout))
}
}

View File

@ -1080,7 +1080,7 @@ var _ = common.SIGDescribe("Services", func() {
for _, pausePod := range pausePods.Items {
sourceIP, execPodIP := execSourceIPTest(pausePod, serviceAddress)
ginkgo.By("Verifying the preserved source ip")
framework.ExpectEqual(sourceIP, execPodIP)
gomega.Expect(sourceIP).To(gomega.Equal(execPodIP))
}
})
@ -1398,7 +1398,7 @@ var _ = common.SIGDescribe("Services", func() {
err = jig.CheckServiceReachability(ctx, nodePortService, execPod)
framework.ExpectNoError(err)
nodePortCounts := len(nodePortService.Spec.Ports)
framework.ExpectEqual(nodePortCounts, 2, "updated service should have two Ports but found %d Ports", nodePortCounts)
gomega.Expect(nodePortCounts).To(gomega.Equal(2), "updated service should have two Ports but found %d Ports", nodePortCounts)
for _, port := range nodePortService.Spec.Ports {
framework.ExpectNotEqual(port.NodePort, 0, "NodePort service failed to allocate NodePort for Port %s", port.Name)
@ -3231,7 +3231,7 @@ var _ = common.SIGDescribe("Services", func() {
ginkgo.By("fetching the Endpoint")
endpoints, err := f.ClientSet.CoreV1().Endpoints(testNamespaceName).Get(ctx, testEndpointName, metav1.GetOptions{})
framework.ExpectNoError(err, "failed to fetch Endpoint")
framework.ExpectEqual(foundEndpoint.ObjectMeta.Labels["test-service"], "updated", "failed to update Endpoint %v in namespace %v label not updated", testEndpointName, testNamespaceName)
gomega.Expect(foundEndpoint.ObjectMeta.Labels).To(gomega.HaveKeyWithValue("test-service", "updated"), "failed to update Endpoint %v in namespace %v label not updated", testEndpointName, testNamespaceName)
endpointPatch, err := json.Marshal(map[string]interface{}{
"metadata": map[string]interface{}{
@ -3279,13 +3279,13 @@ var _ = common.SIGDescribe("Services", func() {
ginkgo.By("fetching the Endpoint")
endpoints, err = f.ClientSet.CoreV1().Endpoints(testNamespaceName).Get(ctx, testEndpointName, metav1.GetOptions{})
framework.ExpectNoError(err, "failed to fetch Endpoint")
framework.ExpectEqual(endpoints.ObjectMeta.Labels["test-service"], "patched", "failed to patch Endpoint with Label")
gomega.Expect(endpoints.ObjectMeta.Labels).To(gomega.HaveKeyWithValue("test-service", "patched"), "failed to patch Endpoint with Label")
endpointSubsetOne := endpoints.Subsets[0]
endpointSubsetOneAddresses := endpointSubsetOne.Addresses[0]
endpointSubsetOnePorts := endpointSubsetOne.Ports[0]
framework.ExpectEqual(endpointSubsetOneAddresses.IP, "10.0.0.25", "failed to patch Endpoint")
framework.ExpectEqual(endpointSubsetOnePorts.Name, "http-test", "failed to patch Endpoint")
framework.ExpectEqual(endpointSubsetOnePorts.Port, int32(8080), "failed to patch Endpoint")
gomega.Expect(endpointSubsetOneAddresses.IP).To(gomega.Equal("10.0.0.25"), "failed to patch Endpoint")
gomega.Expect(endpointSubsetOnePorts.Name).To(gomega.Equal("http-test"), "failed to patch Endpoint")
gomega.Expect(endpointSubsetOnePorts.Port).To(gomega.Equal(int32(8080)), "failed to patch Endpoint")
ginkgo.By("deleting the Endpoint by Collection")
err = f.ClientSet.CoreV1().Endpoints(testNamespaceName).DeleteCollection(ctx, metav1.DeleteOptions{}, metav1.ListOptions{LabelSelector: "test-endpoint-static=true"})
@ -3610,7 +3610,7 @@ var _ = common.SIGDescribe("Services", func() {
svcList, err := cs.CoreV1().Services(ns).List(ctx, metav1.ListOptions{})
framework.ExpectNoError(err, "failed to list Services")
framework.ExpectEqual(len(svcList.Items), 3, "Required count of services out of sync")
gomega.Expect(svcList.Items).To(gomega.HaveLen(3), "Required count of services out of sync")
ginkgo.By("deleting service collection")
err = svcDynamicClient.DeleteCollection(ctx, metav1.DeleteOptions{}, metav1.ListOptions{LabelSelector: deleteLabel})
@ -3618,7 +3618,7 @@ var _ = common.SIGDescribe("Services", func() {
svcList, err = cs.CoreV1().Services(ns).List(ctx, metav1.ListOptions{})
framework.ExpectNoError(err, "failed to list Services")
framework.ExpectEqual(len(svcList.Items), 1, "Required count of services out of sync")
gomega.Expect(svcList.Items).To(gomega.HaveLen(1), "Required count of services out of sync")
framework.Logf("Collection of services has been deleted")
})
@ -3918,7 +3918,9 @@ func execAffinityTestForSessionAffinityTimeout(ctx context.Context, f *framework
framework.ExpectNoError(err)
// the service should be sticky until the timeout expires
framework.ExpectEqual(checkAffinity(ctx, cs, execPod, svcIP, servicePort, true), true)
if !checkAffinity(ctx, cs, execPod, svcIP, servicePort, true) {
framework.Failf("the service %s (%s:%d) should be sticky until the timeout expires", svc.Name, svcIP, servicePort)
}
// but it should return different hostnames after the timeout expires
// try several times to avoid the probability that we hit the same pod twice
hosts := sets.NewString()
@ -3999,19 +4001,25 @@ func execAffinityTestForNonLBServiceWithOptionalTransition(ctx context.Context,
framework.ExpectNoError(err)
if !isTransitionTest {
framework.ExpectEqual(checkAffinity(ctx, cs, execPod, svcIP, servicePort, true), true)
if !checkAffinity(ctx, cs, execPod, svcIP, servicePort, true) {
framework.Failf("Failed to check affinity for service %s/%s", ns, svc.Name)
}
}
if isTransitionTest {
_, err = jig.UpdateService(ctx, func(svc *v1.Service) {
svc.Spec.SessionAffinity = v1.ServiceAffinityNone
})
framework.ExpectNoError(err)
framework.ExpectEqual(checkAffinity(ctx, cs, execPod, svcIP, servicePort, false), true)
if !checkAffinity(ctx, cs, execPod, svcIP, servicePort, false) {
framework.Failf("Failed to check affinity for service %s/%s without session affinity", ns, svc.Name)
}
_, err = jig.UpdateService(ctx, func(svc *v1.Service) {
svc.Spec.SessionAffinity = v1.ServiceAffinityClientIP
})
framework.ExpectNoError(err)
framework.ExpectEqual(checkAffinity(ctx, cs, execPod, svcIP, servicePort, true), true)
if !checkAffinity(ctx, cs, execPod, svcIP, servicePort, true) {
framework.Failf("Failed to check affinity for service %s/%s with session affinity", ns, svc.Name)
}
}
}
@ -4049,19 +4057,25 @@ func execAffinityTestForLBServiceWithOptionalTransition(ctx context.Context, f *
port := int(svc.Spec.Ports[0].Port)
if !isTransitionTest {
framework.ExpectEqual(checkAffinity(ctx, cs, nil, ingressIP, port, true), true)
if !checkAffinity(ctx, cs, nil, ingressIP, port, true) {
framework.Failf("Failed to verify affinity for loadbalance service %s/%s", ns, serviceName)
}
}
if isTransitionTest {
svc, err = jig.UpdateService(ctx, func(svc *v1.Service) {
svc.Spec.SessionAffinity = v1.ServiceAffinityNone
})
framework.ExpectNoError(err)
framework.ExpectEqual(checkAffinity(ctx, cs, nil, ingressIP, port, false), true)
if !checkAffinity(ctx, cs, nil, ingressIP, port, false) {
framework.Failf("Failed to verify affinity for loadbalance service %s/%s without session affinity ", ns, serviceName)
}
svc, err = jig.UpdateService(ctx, func(svc *v1.Service) {
svc.Spec.SessionAffinity = v1.ServiceAffinityClientIP
})
framework.ExpectNoError(err)
framework.ExpectEqual(checkAffinity(ctx, cs, nil, ingressIP, port, true), true)
if !checkAffinity(ctx, cs, nil, ingressIP, port, true) {
framework.Failf("Failed to verify affinity for loadbalance service %s/%s with session affinity ", ns, serviceName)
}
}
}

View File

@ -26,6 +26,7 @@ import (
"time"
"github.com/onsi/ginkgo/v2"
"github.com/onsi/gomega"
v1 "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
@ -178,7 +179,7 @@ func execHostnameTest(sourcePod v1.Pod, targetAddr, targetHostname string) {
hostname := strings.TrimSpace(strings.Split(stdout, ".")[0])
framework.ExpectNoError(err)
framework.ExpectEqual(hostname, targetHostname)
gomega.Expect(hostname).To(gomega.Equal(targetHostname))
}
// createSecondNodePortService creates a service with the same selector as config.NodePortService and same HTTP Port