From e9b69d1958d5df380096c9813870d7cd716d2695 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?=E5=AE=8B=E6=96=87=E6=9D=B0?= Date: Fri, 21 Jul 2023 17:35:16 +0800 Subject: [PATCH] e2e_scheduling: stop using deprecated framework.ExpectEqual --- test/e2e/scheduling/limit_range.go | 9 +++++---- test/e2e/scheduling/predicates.go | 15 ++++++++------- test/e2e/scheduling/preemption.go | 4 ++-- test/e2e/scheduling/priorities.go | 5 +++-- 4 files changed, 18 insertions(+), 15 deletions(-) diff --git a/test/e2e/scheduling/limit_range.go b/test/e2e/scheduling/limit_range.go index 5cceb80c89a..1321f10cb1c 100644 --- a/test/e2e/scheduling/limit_range.go +++ b/test/e2e/scheduling/limit_range.go @@ -43,6 +43,7 @@ import ( admissionapi "k8s.io/pod-security-admission/api" "github.com/onsi/ginkgo/v2" + "github.com/onsi/gomega" ) const ( @@ -77,7 +78,7 @@ var _ = SIGDescribe("LimitRange", func() { options := metav1.ListOptions{LabelSelector: selector.String()} limitRanges, err := f.ClientSet.CoreV1().LimitRanges(f.Namespace.Name).List(ctx, options) framework.ExpectNoError(err, "failed to query for limitRanges") - framework.ExpectEqual(len(limitRanges.Items), 0) + gomega.Expect(limitRanges.Items).To(gomega.BeEmpty()) lw := &cache.ListWatch{ ListFunc: func(options metav1.ListOptions) (runtime.Object, error) { @@ -291,7 +292,7 @@ var _ = SIGDescribe("LimitRange", func() { ginkgo.By(fmt.Sprintf("Listing all LimitRanges with label %q", e2eLabelSelector)) limitRangeList, err := f.ClientSet.CoreV1().LimitRanges("").List(ctx, metav1.ListOptions{LabelSelector: e2eLabelSelector}) framework.ExpectNoError(err, "Failed to list any limitRanges: %v", err) - framework.ExpectEqual(len(limitRangeList.Items), 2, "Failed to find the correct limitRange count") + gomega.Expect(limitRangeList.Items).To(gomega.HaveLen(2), "Failed to find the correct limitRange count") framework.Logf("Found %d limitRanges", len(limitRangeList.Items)) ginkgo.By(fmt.Sprintf("Patching LimitRange %q in %q namespace", lrName, ns)) @@ -313,7 +314,7 @@ var _ = SIGDescribe("LimitRange", func() { patchedLimitRange, err := lrClient.Patch(ctx, lrName, types.StrategicMergePatchType, []byte(limitRangePayload), metav1.PatchOptions{}) framework.ExpectNoError(err, "Failed to patch limitRange %q", lrName) - framework.ExpectEqual(patchedLimitRange.Labels[lrName], "patched", "%q label didn't have value 'patched' for this limitRange. Current labels: %v", lrName, patchedLimitRange.Labels) + gomega.Expect(patchedLimitRange.Labels[lrName]).To(gomega.Equal("patched"), "%q label didn't have value 'patched' for this limitRange. Current labels: %v", lrName, patchedLimitRange.Labels) checkMinLimitRange := apiequality.Semantic.DeepEqual(patchedLimitRange.Spec.Limits[0].Min, newMin) if !checkMinLimitRange { framework.Failf("LimitRange does not have the correct min limitRange. Currently is %#v ", patchedLimitRange.Spec.Limits[0].Min) @@ -332,7 +333,7 @@ var _ = SIGDescribe("LimitRange", func() { ginkgo.By(fmt.Sprintf("Confirm that a single LimitRange still exists with label %q", e2eLabelSelector)) limitRangeList, err = f.ClientSet.CoreV1().LimitRanges("").List(ctx, metav1.ListOptions{LabelSelector: e2eLabelSelector}) framework.ExpectNoError(err, "Failed to list any limitRanges: %v", err) - framework.ExpectEqual(len(limitRangeList.Items), 1, "Failed to find the correct limitRange count") + gomega.Expect(limitRangeList.Items).To(gomega.HaveLen(1), "Failed to find the correct limitRange count") framework.Logf("Found %d limitRange", len(limitRangeList.Items)) }) }) diff --git a/test/e2e/scheduling/predicates.go b/test/e2e/scheduling/predicates.go index bbd9e78b7ac..92df41bfa0f 100644 --- a/test/e2e/scheduling/predicates.go +++ b/test/e2e/scheduling/predicates.go @@ -45,6 +45,7 @@ import ( admissionapi "k8s.io/pod-security-admission/api" "github.com/onsi/ginkgo/v2" + "github.com/onsi/gomega" // ensure libs have a chance to initialize _ "github.com/stretchr/testify/assert" @@ -491,7 +492,7 @@ var _ = SIGDescribe("SchedulerPredicates [Serial]", func() { framework.ExpectNoError(e2epod.WaitForPodNotPending(ctx, cs, ns, labelPodName)) labelPod, err := cs.CoreV1().Pods(ns).Get(ctx, labelPodName, metav1.GetOptions{}) framework.ExpectNoError(err) - framework.ExpectEqual(labelPod.Spec.NodeName, nodeName) + gomega.Expect(labelPod.Spec.NodeName).To(gomega.Equal(nodeName)) }) // Test Nodes does not have any label, hence it should be impossible to schedule Pod with @@ -578,7 +579,7 @@ var _ = SIGDescribe("SchedulerPredicates [Serial]", func() { framework.ExpectNoError(e2epod.WaitForPodNotPending(ctx, cs, ns, labelPodName)) labelPod, err := cs.CoreV1().Pods(ns).Get(ctx, labelPodName, metav1.GetOptions{}) framework.ExpectNoError(err) - framework.ExpectEqual(labelPod.Spec.NodeName, nodeName) + gomega.Expect(labelPod.Spec.NodeName).To(gomega.Equal(nodeName)) }) // 1. Run a pod to get an available node, then delete the pod @@ -621,7 +622,7 @@ var _ = SIGDescribe("SchedulerPredicates [Serial]", func() { framework.ExpectNoError(e2epod.WaitForPodNotPending(ctx, cs, ns, tolerationPodName)) deployedPod, err := cs.CoreV1().Pods(ns).Get(ctx, tolerationPodName, metav1.GetOptions{}) framework.ExpectNoError(err) - framework.ExpectEqual(deployedPod.Spec.NodeName, nodeName) + gomega.Expect(deployedPod.Spec.NodeName).To(gomega.Equal(nodeName)) }) // 1. Run a pod to get an available node, then delete the pod @@ -801,8 +802,8 @@ var _ = SIGDescribe("SchedulerPredicates [Serial]", func() { } } expected := replicas / len(nodeNames) - framework.ExpectEqual(numInNode1, expected, fmt.Sprintf("Pods are not distributed as expected on node %q", nodeNames[0])) - framework.ExpectEqual(numInNode2, expected, fmt.Sprintf("Pods are not distributed as expected on node %q", nodeNames[1])) + gomega.Expect(numInNode1).To(gomega.Equal(expected), fmt.Sprintf("Pods are not distributed as expected on node %q", nodeNames[0])) + gomega.Expect(numInNode2).To(gomega.Equal(expected), fmt.Sprintf("Pods are not distributed as expected on node %q", nodeNames[1])) }) }) @@ -1029,8 +1030,8 @@ func verifyResult(ctx context.Context, c clientset.Interface, expectedScheduled framework.ExpectNoError(err) scheduledPods, notScheduledPods := GetPodsScheduled(workerNodes, allPods) - framework.ExpectEqual(len(notScheduledPods), expectedNotScheduled, fmt.Sprintf("Not scheduled Pods: %#v", notScheduledPods)) - framework.ExpectEqual(len(scheduledPods), expectedScheduled, fmt.Sprintf("Scheduled Pods: %#v", scheduledPods)) + gomega.Expect(notScheduledPods).To(gomega.HaveLen(expectedNotScheduled), fmt.Sprintf("Not scheduled Pods: %#v", notScheduledPods)) + gomega.Expect(scheduledPods).To(gomega.HaveLen(expectedScheduled), fmt.Sprintf("Scheduled Pods: %#v", scheduledPods)) } // GetNodeThatCanRunPod trying to launch a pod without a label to get a node which can launch it diff --git a/test/e2e/scheduling/preemption.go b/test/e2e/scheduling/preemption.go index 137198a42b3..5fccfb52e91 100644 --- a/test/e2e/scheduling/preemption.go +++ b/test/e2e/scheduling/preemption.go @@ -843,8 +843,8 @@ var _ = SIGDescribe("SchedulerPreemption [Serial]", func() { for _, pc := range pcs { livePC, err := cs.SchedulingV1().PriorityClasses().Get(ctx, pc.Name, metav1.GetOptions{}) framework.ExpectNoError(err) - framework.ExpectEqual(livePC.Value, pc.Value) - framework.ExpectEqual(livePC.Description, newDesc) + gomega.Expect(livePC.Value).To(gomega.Equal(pc.Value)) + gomega.Expect(livePC.Description).To(gomega.Equal(newDesc)) } }) }) diff --git a/test/e2e/scheduling/priorities.go b/test/e2e/scheduling/priorities.go index 63197bc67fa..dd86a4c49f1 100644 --- a/test/e2e/scheduling/priorities.go +++ b/test/e2e/scheduling/priorities.go @@ -24,6 +24,7 @@ import ( "time" "github.com/onsi/ginkgo/v2" + "github.com/onsi/gomega" // ensure libs have a chance to initialize _ "github.com/stretchr/testify/assert" @@ -256,7 +257,7 @@ var _ = SIGDescribe("SchedulerPriorities [Serial]", func() { ginkgo.By("Pod should prefer scheduled to the node that pod can tolerate.") tolePod, err := cs.CoreV1().Pods(ns).Get(ctx, tolerationPodName, metav1.GetOptions{}) framework.ExpectNoError(err) - framework.ExpectEqual(tolePod.Spec.NodeName, nodeName) + gomega.Expect(tolePod.Spec.NodeName).To(gomega.Equal(nodeName)) }) ginkgo.Context("PodTopologySpread Scoring", func() { @@ -348,7 +349,7 @@ var _ = SIGDescribe("SchedulerPriorities [Serial]", func() { } testPod := runPausePod(ctx, f, podCfg) ginkgo.By(fmt.Sprintf("Verifying if the test-pod lands on node %q", nodeNames[1])) - framework.ExpectEqual(nodeNames[1], testPod.Spec.NodeName) + gomega.Expect(testPod.Spec.NodeName).To(gomega.Equal(nodeNames[1])) }) }) })