Merge pull request #119502 from Songjoy/cleanup-e2e-scheduling-framework-equal

e2e_scheduling: stop using deprecated framework.ExpectEqual
This commit is contained in:
Kubernetes Prow Robot 2023-08-15 19:42:43 -07:00 committed by GitHub
commit de8c36cb56
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
4 changed files with 18 additions and 15 deletions

View File

@ -43,6 +43,7 @@ import (
admissionapi "k8s.io/pod-security-admission/api"
"github.com/onsi/ginkgo/v2"
"github.com/onsi/gomega"
)
const (
@ -77,7 +78,7 @@ var _ = SIGDescribe("LimitRange", func() {
options := metav1.ListOptions{LabelSelector: selector.String()}
limitRanges, err := f.ClientSet.CoreV1().LimitRanges(f.Namespace.Name).List(ctx, options)
framework.ExpectNoError(err, "failed to query for limitRanges")
framework.ExpectEqual(len(limitRanges.Items), 0)
gomega.Expect(limitRanges.Items).To(gomega.BeEmpty())
lw := &cache.ListWatch{
ListFunc: func(options metav1.ListOptions) (runtime.Object, error) {
@ -291,7 +292,7 @@ var _ = SIGDescribe("LimitRange", func() {
ginkgo.By(fmt.Sprintf("Listing all LimitRanges with label %q", e2eLabelSelector))
limitRangeList, err := f.ClientSet.CoreV1().LimitRanges("").List(ctx, metav1.ListOptions{LabelSelector: e2eLabelSelector})
framework.ExpectNoError(err, "Failed to list any limitRanges: %v", err)
framework.ExpectEqual(len(limitRangeList.Items), 2, "Failed to find the correct limitRange count")
gomega.Expect(limitRangeList.Items).To(gomega.HaveLen(2), "Failed to find the correct limitRange count")
framework.Logf("Found %d limitRanges", len(limitRangeList.Items))
ginkgo.By(fmt.Sprintf("Patching LimitRange %q in %q namespace", lrName, ns))
@ -313,7 +314,7 @@ var _ = SIGDescribe("LimitRange", func() {
patchedLimitRange, err := lrClient.Patch(ctx, lrName, types.StrategicMergePatchType, []byte(limitRangePayload), metav1.PatchOptions{})
framework.ExpectNoError(err, "Failed to patch limitRange %q", lrName)
framework.ExpectEqual(patchedLimitRange.Labels[lrName], "patched", "%q label didn't have value 'patched' for this limitRange. Current labels: %v", lrName, patchedLimitRange.Labels)
gomega.Expect(patchedLimitRange.Labels[lrName]).To(gomega.Equal("patched"), "%q label didn't have value 'patched' for this limitRange. Current labels: %v", lrName, patchedLimitRange.Labels)
checkMinLimitRange := apiequality.Semantic.DeepEqual(patchedLimitRange.Spec.Limits[0].Min, newMin)
if !checkMinLimitRange {
framework.Failf("LimitRange does not have the correct min limitRange. Currently is %#v ", patchedLimitRange.Spec.Limits[0].Min)
@ -332,7 +333,7 @@ var _ = SIGDescribe("LimitRange", func() {
ginkgo.By(fmt.Sprintf("Confirm that a single LimitRange still exists with label %q", e2eLabelSelector))
limitRangeList, err = f.ClientSet.CoreV1().LimitRanges("").List(ctx, metav1.ListOptions{LabelSelector: e2eLabelSelector})
framework.ExpectNoError(err, "Failed to list any limitRanges: %v", err)
framework.ExpectEqual(len(limitRangeList.Items), 1, "Failed to find the correct limitRange count")
gomega.Expect(limitRangeList.Items).To(gomega.HaveLen(1), "Failed to find the correct limitRange count")
framework.Logf("Found %d limitRange", len(limitRangeList.Items))
})
})

View File

@ -45,6 +45,7 @@ import (
admissionapi "k8s.io/pod-security-admission/api"
"github.com/onsi/ginkgo/v2"
"github.com/onsi/gomega"
// ensure libs have a chance to initialize
_ "github.com/stretchr/testify/assert"
@ -491,7 +492,7 @@ var _ = SIGDescribe("SchedulerPredicates [Serial]", func() {
framework.ExpectNoError(e2epod.WaitForPodNotPending(ctx, cs, ns, labelPodName))
labelPod, err := cs.CoreV1().Pods(ns).Get(ctx, labelPodName, metav1.GetOptions{})
framework.ExpectNoError(err)
framework.ExpectEqual(labelPod.Spec.NodeName, nodeName)
gomega.Expect(labelPod.Spec.NodeName).To(gomega.Equal(nodeName))
})
// Test Nodes does not have any label, hence it should be impossible to schedule Pod with
@ -578,7 +579,7 @@ var _ = SIGDescribe("SchedulerPredicates [Serial]", func() {
framework.ExpectNoError(e2epod.WaitForPodNotPending(ctx, cs, ns, labelPodName))
labelPod, err := cs.CoreV1().Pods(ns).Get(ctx, labelPodName, metav1.GetOptions{})
framework.ExpectNoError(err)
framework.ExpectEqual(labelPod.Spec.NodeName, nodeName)
gomega.Expect(labelPod.Spec.NodeName).To(gomega.Equal(nodeName))
})
// 1. Run a pod to get an available node, then delete the pod
@ -621,7 +622,7 @@ var _ = SIGDescribe("SchedulerPredicates [Serial]", func() {
framework.ExpectNoError(e2epod.WaitForPodNotPending(ctx, cs, ns, tolerationPodName))
deployedPod, err := cs.CoreV1().Pods(ns).Get(ctx, tolerationPodName, metav1.GetOptions{})
framework.ExpectNoError(err)
framework.ExpectEqual(deployedPod.Spec.NodeName, nodeName)
gomega.Expect(deployedPod.Spec.NodeName).To(gomega.Equal(nodeName))
})
// 1. Run a pod to get an available node, then delete the pod
@ -801,8 +802,8 @@ var _ = SIGDescribe("SchedulerPredicates [Serial]", func() {
}
}
expected := replicas / len(nodeNames)
framework.ExpectEqual(numInNode1, expected, fmt.Sprintf("Pods are not distributed as expected on node %q", nodeNames[0]))
framework.ExpectEqual(numInNode2, expected, fmt.Sprintf("Pods are not distributed as expected on node %q", nodeNames[1]))
gomega.Expect(numInNode1).To(gomega.Equal(expected), fmt.Sprintf("Pods are not distributed as expected on node %q", nodeNames[0]))
gomega.Expect(numInNode2).To(gomega.Equal(expected), fmt.Sprintf("Pods are not distributed as expected on node %q", nodeNames[1]))
})
})
@ -1029,8 +1030,8 @@ func verifyResult(ctx context.Context, c clientset.Interface, expectedScheduled
framework.ExpectNoError(err)
scheduledPods, notScheduledPods := GetPodsScheduled(workerNodes, allPods)
framework.ExpectEqual(len(notScheduledPods), expectedNotScheduled, fmt.Sprintf("Not scheduled Pods: %#v", notScheduledPods))
framework.ExpectEqual(len(scheduledPods), expectedScheduled, fmt.Sprintf("Scheduled Pods: %#v", scheduledPods))
gomega.Expect(notScheduledPods).To(gomega.HaveLen(expectedNotScheduled), fmt.Sprintf("Not scheduled Pods: %#v", notScheduledPods))
gomega.Expect(scheduledPods).To(gomega.HaveLen(expectedScheduled), fmt.Sprintf("Scheduled Pods: %#v", scheduledPods))
}
// GetNodeThatCanRunPod trying to launch a pod without a label to get a node which can launch it

View File

@ -843,8 +843,8 @@ var _ = SIGDescribe("SchedulerPreemption [Serial]", func() {
for _, pc := range pcs {
livePC, err := cs.SchedulingV1().PriorityClasses().Get(ctx, pc.Name, metav1.GetOptions{})
framework.ExpectNoError(err)
framework.ExpectEqual(livePC.Value, pc.Value)
framework.ExpectEqual(livePC.Description, newDesc)
gomega.Expect(livePC.Value).To(gomega.Equal(pc.Value))
gomega.Expect(livePC.Description).To(gomega.Equal(newDesc))
}
})
})

View File

@ -24,6 +24,7 @@ import (
"time"
"github.com/onsi/ginkgo/v2"
"github.com/onsi/gomega"
// ensure libs have a chance to initialize
_ "github.com/stretchr/testify/assert"
@ -256,7 +257,7 @@ var _ = SIGDescribe("SchedulerPriorities [Serial]", func() {
ginkgo.By("Pod should prefer scheduled to the node that pod can tolerate.")
tolePod, err := cs.CoreV1().Pods(ns).Get(ctx, tolerationPodName, metav1.GetOptions{})
framework.ExpectNoError(err)
framework.ExpectEqual(tolePod.Spec.NodeName, nodeName)
gomega.Expect(tolePod.Spec.NodeName).To(gomega.Equal(nodeName))
})
ginkgo.Context("PodTopologySpread Scoring", func() {
@ -348,7 +349,7 @@ var _ = SIGDescribe("SchedulerPriorities [Serial]", func() {
}
testPod := runPausePod(ctx, f, podCfg)
ginkgo.By(fmt.Sprintf("Verifying if the test-pod lands on node %q", nodeNames[1]))
framework.ExpectEqual(nodeNames[1], testPod.Spec.NodeName)
gomega.Expect(testPod.Spec.NodeName).To(gomega.Equal(nodeNames[1]))
})
})
})