Merge pull request #120403 from wlq1212/cheanup/notwork/expectnotequal

e2e_network:stop using deprecated framework.ExpectNotEqual
This commit is contained in:
Kubernetes Prow Robot 2023-09-14 01:00:16 -07:00 committed by GitHub
commit 8eaaf2f9b9
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
6 changed files with 15 additions and 12 deletions

View File

@ -22,7 +22,6 @@ import (
"strings" "strings"
"time" "time"
"github.com/onsi/ginkgo/v2"
v1 "k8s.io/api/core/v1" v1 "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/util/intstr" "k8s.io/apimachinery/pkg/util/intstr"
@ -36,6 +35,9 @@ import (
"k8s.io/kubernetes/test/e2e/network/common" "k8s.io/kubernetes/test/e2e/network/common"
imageutils "k8s.io/kubernetes/test/utils/image" imageutils "k8s.io/kubernetes/test/utils/image"
admissionapi "k8s.io/pod-security-admission/api" admissionapi "k8s.io/pod-security-admission/api"
"github.com/onsi/ginkgo/v2"
"github.com/onsi/gomega"
) )
const ( const (
@ -113,7 +115,7 @@ var _ = common.SIGDescribe("Conntrack", func() {
} }
ips := e2enode.GetAddressesByTypeAndFamily(&nodes.Items[0], v1.NodeInternalIP, family) ips := e2enode.GetAddressesByTypeAndFamily(&nodes.Items[0], v1.NodeInternalIP, family)
framework.ExpectNotEqual(len(ips), 0) gomega.Expect(ips).ToNot(gomega.BeEmpty())
clientNodeInfo = nodeInfo{ clientNodeInfo = nodeInfo{
name: nodes.Items[0].Name, name: nodes.Items[0].Name,
@ -121,7 +123,7 @@ var _ = common.SIGDescribe("Conntrack", func() {
} }
ips = e2enode.GetAddressesByTypeAndFamily(&nodes.Items[1], v1.NodeInternalIP, family) ips = e2enode.GetAddressesByTypeAndFamily(&nodes.Items[1], v1.NodeInternalIP, family)
framework.ExpectNotEqual(len(ips), 0) gomega.Expect(ips).ToNot(gomega.BeEmpty())
serverNodeInfo = nodeInfo{ serverNodeInfo = nodeInfo{
name: nodes.Items[1].Name, name: nodes.Items[1].Name,

View File

@ -786,7 +786,7 @@ var _ = common.SIGDescribe("Ingress API", func() {
ginkgo.By("deleting") ginkgo.By("deleting")
expectFinalizer := func(ing *networkingv1.Ingress, msg string) { expectFinalizer := func(ing *networkingv1.Ingress, msg string) {
framework.ExpectNotEqual(ing.DeletionTimestamp, nil, fmt.Sprintf("expected deletionTimestamp, got nil on step: %q, ingress: %+v", msg, ing)) gomega.Expect(ing.DeletionTimestamp).ToNot(gomega.BeNil(), "expected deletionTimestamp, got nil on step: %q, ingress: %+v", msg, ing)
if len(ing.Finalizers) == 0 { if len(ing.Finalizers) == 0 {
framework.Failf("expected finalizers on ingress, got none on step: %q, ingress: %+v", msg, ing) framework.Failf("expected finalizers on ingress, got none on step: %q, ingress: %+v", msg, ing)
} }

View File

@ -40,6 +40,7 @@ import (
netutils "k8s.io/utils/net" netutils "k8s.io/utils/net"
"github.com/onsi/ginkgo/v2" "github.com/onsi/ginkgo/v2"
"github.com/onsi/gomega"
) )
var kubeProxyE2eImage = imageutils.GetE2EImage(imageutils.Agnhost) var kubeProxyE2eImage = imageutils.GetE2EImage(imageutils.Agnhost)
@ -76,7 +77,7 @@ var _ = common.SIGDescribe("KubeProxy", func() {
} }
ips := e2enode.GetAddressesByTypeAndFamily(&nodes.Items[0], v1.NodeInternalIP, family) ips := e2enode.GetAddressesByTypeAndFamily(&nodes.Items[0], v1.NodeInternalIP, family)
framework.ExpectNotEqual(len(ips), 0) gomega.Expect(ips).ToNot(gomega.BeEmpty())
clientNodeInfo := NodeInfo{ clientNodeInfo := NodeInfo{
node: &nodes.Items[0], node: &nodes.Items[0],
@ -85,7 +86,7 @@ var _ = common.SIGDescribe("KubeProxy", func() {
} }
ips = e2enode.GetAddressesByTypeAndFamily(&nodes.Items[1], v1.NodeInternalIP, family) ips = e2enode.GetAddressesByTypeAndFamily(&nodes.Items[1], v1.NodeInternalIP, family)
framework.ExpectNotEqual(len(ips), 0) gomega.Expect(ips).ToNot(gomega.BeEmpty())
serverNodeInfo := NodeInfo{ serverNodeInfo := NodeInfo{
node: &nodes.Items[1], node: &nodes.Items[1],

View File

@ -551,7 +551,7 @@ var _ = common.SIGDescribe("LoadBalancers", func() {
acceptPod, err = cs.CoreV1().Pods(namespace).Get(ctx, acceptPod.Name, metav1.GetOptions{}) acceptPod, err = cs.CoreV1().Pods(namespace).Get(ctx, acceptPod.Name, metav1.GetOptions{})
framework.ExpectNoError(err, "Unable to get pod %s", acceptPod.Name) framework.ExpectNoError(err, "Unable to get pod %s", acceptPod.Name)
gomega.Expect(acceptPod.Status.Phase).To(gomega.Equal(v1.PodRunning)) gomega.Expect(acceptPod.Status.Phase).To(gomega.Equal(v1.PodRunning))
framework.ExpectNotEqual(acceptPod.Status.PodIP, "") gomega.Expect(acceptPod.Status.PodIP).ToNot(gomega.BeEmpty())
// Create loadbalancer service with source range from node[0] and podAccept // Create loadbalancer service with source range from node[0] and podAccept
svc, err := jig.CreateTCPService(ctx, func(svc *v1.Service) { svc, err := jig.CreateTCPService(ctx, func(svc *v1.Service) {
@ -581,7 +581,7 @@ var _ = common.SIGDescribe("LoadBalancers", func() {
dropPod, err = cs.CoreV1().Pods(namespace).Get(ctx, dropPod.Name, metav1.GetOptions{}) dropPod, err = cs.CoreV1().Pods(namespace).Get(ctx, dropPod.Name, metav1.GetOptions{})
framework.ExpectNoError(err, "Unable to get pod %s", dropPod.Name) framework.ExpectNoError(err, "Unable to get pod %s", dropPod.Name)
gomega.Expect(acceptPod.Status.Phase).To(gomega.Equal(v1.PodRunning)) gomega.Expect(acceptPod.Status.Phase).To(gomega.Equal(v1.PodRunning))
framework.ExpectNotEqual(acceptPod.Status.PodIP, "") gomega.Expect(acceptPod.Status.PodIP).ToNot(gomega.BeEmpty())
ginkgo.By("Update service LoadBalancerSourceRange and check reachability") ginkgo.By("Update service LoadBalancerSourceRange and check reachability")
_, err = jig.UpdateService(ctx, func(svc *v1.Service) { _, err = jig.UpdateService(ctx, func(svc *v1.Service) {
@ -1602,8 +1602,8 @@ var _ = common.SIGDescribe("LoadBalancers ESIPP [Slow]", func() {
noEndpointNodeMap[n.Name] = ips[0] noEndpointNodeMap[n.Name] = ips[0]
} }
} }
framework.ExpectNotEqual(len(endpointNodeMap), 0) gomega.Expect(endpointNodeMap).ToNot(gomega.BeEmpty())
framework.ExpectNotEqual(len(noEndpointNodeMap), 0) gomega.Expect(noEndpointNodeMap).ToNot(gomega.BeEmpty())
svcTCPPort := int(svc.Spec.Ports[0].Port) svcTCPPort := int(svc.Spec.Ports[0].Port)
svcNodePort := int(svc.Spec.Ports[0].NodePort) svcNodePort := int(svc.Spec.Ports[0].NodePort)

View File

@ -73,7 +73,7 @@ var _ = common.SIGDescribe("NoSNAT [Feature:NoSNAT] [Slow]", func() {
ginkgo.By("creating a test pod on each Node") ginkgo.By("creating a test pod on each Node")
nodes, err := e2enode.GetReadySchedulableNodes(ctx, cs) nodes, err := e2enode.GetReadySchedulableNodes(ctx, cs)
framework.ExpectNoError(err) framework.ExpectNoError(err)
framework.ExpectNotEqual(len(nodes.Items), 0, "no Nodes in the cluster") gomega.Expect(nodes.Items).ToNot(gomega.BeEmpty(), "no Nodes in the cluster")
for _, node := range nodes.Items { for _, node := range nodes.Items {
// target Pod at Node // target Pod at Node

View File

@ -1401,7 +1401,7 @@ var _ = common.SIGDescribe("Services", func() {
gomega.Expect(nodePortCounts).To(gomega.Equal(2), "updated service should have two Ports but found %d Ports", nodePortCounts) gomega.Expect(nodePortCounts).To(gomega.Equal(2), "updated service should have two Ports but found %d Ports", nodePortCounts)
for _, port := range nodePortService.Spec.Ports { for _, port := range nodePortService.Spec.Ports {
framework.ExpectNotEqual(port.NodePort, 0, "NodePort service failed to allocate NodePort for Port %s", port.Name) gomega.Expect(port.NodePort).ToNot(gomega.BeZero(), "NodePort service failed to allocate NodePort for Port %s", port.Name)
framework.Logf("NodePort service allocates NodePort: %d for Port: %s over Protocol: %s", port.NodePort, port.Name, port.Protocol) framework.Logf("NodePort service allocates NodePort: %d for Port: %s over Protocol: %s", port.NodePort, port.Name, port.Protocol)
} }
}) })