mirror of
https://github.com/k3s-io/kubernetes.git
synced 2026-01-29 21:29:24 +00:00
remove deprecated framework.ExpectEqual
This commit is contained in:
@@ -94,7 +94,7 @@ var _ = SIGDescribe("Container Manager Misc [Serial]", func() {
|
||||
ginkgo.It("Kubelet's oom-score-adj should be -999", func(ctx context.Context) {
|
||||
kubeletPids, err := getPidsForProcess(kubeletProcessName, "")
|
||||
framework.ExpectNoError(err, "failed to get list of kubelet pids")
|
||||
framework.ExpectEqual(len(kubeletPids), 1, "expected only one kubelet process; found %d", len(kubeletPids))
|
||||
gomega.Expect(kubeletPids).To(gomega.HaveLen(1), "expected only one kubelet process; found %d", len(kubeletPids))
|
||||
gomega.Eventually(ctx, func() error {
|
||||
return validateOOMScoreAdjSetting(kubeletPids[0], -999)
|
||||
}, 5*time.Minute, 30*time.Second).Should(gomega.BeNil())
|
||||
|
||||
@@ -378,8 +378,8 @@ func runDensityBatchTest(ctx context.Context, f *framework.Framework, rc *Resour
|
||||
)
|
||||
|
||||
for name, create := range createTimes {
|
||||
watch, ok := watchTimes[name]
|
||||
framework.ExpectEqual(ok, true)
|
||||
watch := watchTimes[name]
|
||||
gomega.Expect(watchTimes).To(gomega.HaveKey(name))
|
||||
|
||||
e2eLags = append(e2eLags,
|
||||
e2emetrics.PodLatencyData{Name: name, Latency: watch.Time.Sub(create.Time)})
|
||||
@@ -513,12 +513,16 @@ func newInformerWatchPod(ctx context.Context, f *framework.Framework, mutex *syn
|
||||
cache.ResourceEventHandlerFuncs{
|
||||
AddFunc: func(obj interface{}) {
|
||||
p, ok := obj.(*v1.Pod)
|
||||
framework.ExpectEqual(ok, true)
|
||||
if !ok {
|
||||
framework.Failf("Failed to cast object %T to Pod", obj)
|
||||
}
|
||||
go checkPodRunning(p)
|
||||
},
|
||||
UpdateFunc: func(oldObj, newObj interface{}) {
|
||||
p, ok := newObj.(*v1.Pod)
|
||||
framework.ExpectEqual(ok, true)
|
||||
if !ok {
|
||||
framework.Failf("Failed to cast object %T to Pod", newObj)
|
||||
}
|
||||
go checkPodRunning(p)
|
||||
},
|
||||
},
|
||||
@@ -554,8 +558,8 @@ func createBatchPodSequential(ctx context.Context, f *framework.Framework, pods
|
||||
}
|
||||
|
||||
for name, create := range createTimes {
|
||||
watch, ok := watchTimes[name]
|
||||
framework.ExpectEqual(ok, true)
|
||||
watch := watchTimes[name]
|
||||
gomega.Expect(watchTimes).To(gomega.HaveKey(name))
|
||||
if !init {
|
||||
if firstCreate.Time.After(create.Time) {
|
||||
firstCreate = create
|
||||
@@ -635,8 +639,9 @@ func logAndVerifyLatency(ctx context.Context, batchLag time.Duration, e2eLags []
|
||||
|
||||
// check bactch pod creation latency
|
||||
if podBatchStartupLimit > 0 {
|
||||
framework.ExpectEqual(batchLag <= podBatchStartupLimit, true, "Batch creation startup time %v exceed limit %v",
|
||||
batchLag, podBatchStartupLimit)
|
||||
if batchLag > podBatchStartupLimit {
|
||||
framework.Failf("Batch creation startup time %v exceed limit %v", batchLag, podBatchStartupLimit)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@@ -355,7 +355,7 @@ func getNode(c *clientset.Clientset) (*v1.Node, error) {
|
||||
if nodes == nil {
|
||||
return nil, fmt.Errorf("the node list is nil")
|
||||
}
|
||||
framework.ExpectEqual(len(nodes.Items) > 1, false, "the number of nodes is more than 1.")
|
||||
gomega.Expect(len(nodes.Items)).To(gomega.BeNumerically("<=", 1), "the number of nodes is more than 1.")
|
||||
if len(nodes.Items) == 0 {
|
||||
return nil, fmt.Errorf("empty node list: %+v", nodes)
|
||||
}
|
||||
|
||||
@@ -280,7 +280,7 @@ var _ = SIGDescribe("Memory Manager [Disruptive] [Serial] [Feature:MemoryManager
|
||||
currentNUMANodeIDs, err := cpuset.Parse(strings.Trim(output, "\n"))
|
||||
framework.ExpectNoError(err)
|
||||
|
||||
framework.ExpectEqual(numaNodeIDs, currentNUMANodeIDs.List())
|
||||
gomega.Expect(numaNodeIDs).To(gomega.Equal(currentNUMANodeIDs.List()))
|
||||
}
|
||||
|
||||
waitingForHugepages := func(ctx context.Context, hugepagesCount int) {
|
||||
@@ -394,16 +394,16 @@ var _ = SIGDescribe("Memory Manager [Disruptive] [Serial] [Feature:MemoryManager
|
||||
framework.ExpectNoError(err)
|
||||
|
||||
stateAllocatableMemory := getAllocatableMemoryFromStateFile(stateData)
|
||||
framework.ExpectEqual(len(resp.Memory), len(stateAllocatableMemory))
|
||||
gomega.Expect(resp.Memory).To(gomega.HaveLen(len(stateAllocatableMemory)))
|
||||
|
||||
for _, containerMemory := range resp.Memory {
|
||||
gomega.Expect(containerMemory.Topology).NotTo(gomega.BeNil())
|
||||
framework.ExpectEqual(len(containerMemory.Topology.Nodes), 1)
|
||||
gomega.Expect(containerMemory.Topology.Nodes).To(gomega.HaveLen(1))
|
||||
gomega.Expect(containerMemory.Topology.Nodes[0]).NotTo(gomega.BeNil())
|
||||
|
||||
numaNodeID := int(containerMemory.Topology.Nodes[0].ID)
|
||||
for _, numaStateMemory := range stateAllocatableMemory {
|
||||
framework.ExpectEqual(len(numaStateMemory.NUMAAffinity), 1)
|
||||
gomega.Expect(numaStateMemory.NUMAAffinity).To(gomega.HaveLen(1))
|
||||
if numaNodeID != numaStateMemory.NUMAAffinity[0] {
|
||||
continue
|
||||
}
|
||||
|
||||
@@ -134,7 +134,7 @@ var _ = SIGDescribe("GracefulNodeShutdown [Serial] [NodeFeature:GracefulNodeShut
|
||||
})
|
||||
|
||||
framework.ExpectNoError(err)
|
||||
framework.ExpectEqual(len(list.Items), len(pods), "the number of pods is not as expected")
|
||||
gomega.Expect(list.Items).To(gomega.HaveLen(len(pods)), "the number of pods is not as expected")
|
||||
|
||||
list, err = e2epod.NewPodClient(f).List(ctx, metav1.ListOptions{
|
||||
FieldSelector: nodeSelector,
|
||||
@@ -142,7 +142,7 @@ var _ = SIGDescribe("GracefulNodeShutdown [Serial] [NodeFeature:GracefulNodeShut
|
||||
if err != nil {
|
||||
framework.Failf("Failed to start batch pod: %q", err)
|
||||
}
|
||||
framework.ExpectEqual(len(list.Items), len(pods), "the number of pods is not as expected")
|
||||
gomega.Expect(list.Items).To(gomega.HaveLen(len(pods)), "the number of pods is not as expected")
|
||||
|
||||
for _, pod := range list.Items {
|
||||
framework.Logf("Pod (%v/%v) status conditions: %q", pod.Namespace, pod.Name, &pod.Status.Conditions)
|
||||
@@ -168,7 +168,7 @@ var _ = SIGDescribe("GracefulNodeShutdown [Serial] [NodeFeature:GracefulNodeShut
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
framework.ExpectEqual(len(list.Items), len(pods), "the number of pods is not as expected")
|
||||
gomega.Expect(list.Items).To(gomega.HaveLen(len(pods)), "the number of pods is not as expected")
|
||||
|
||||
for _, pod := range list.Items {
|
||||
if !isPodShutdown(&pod) {
|
||||
@@ -236,7 +236,7 @@ var _ = SIGDescribe("GracefulNodeShutdown [Serial] [NodeFeature:GracefulNodeShut
|
||||
FieldSelector: nodeSelector,
|
||||
})
|
||||
framework.ExpectNoError(err)
|
||||
framework.ExpectEqual(len(list.Items), len(pods), "the number of pods is not as expected")
|
||||
gomega.Expect(list.Items).To(gomega.HaveLen(len(pods)), "the number of pods is not as expected")
|
||||
|
||||
ctx, cancel := context.WithCancel(ctx)
|
||||
defer cancel()
|
||||
@@ -286,7 +286,7 @@ var _ = SIGDescribe("GracefulNodeShutdown [Serial] [NodeFeature:GracefulNodeShut
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
framework.ExpectEqual(len(list.Items), len(pods), "the number of pods is not as expected")
|
||||
gomega.Expect(list.Items).To(gomega.HaveLen(len(pods)), "the number of pods is not as expected")
|
||||
|
||||
for _, pod := range list.Items {
|
||||
if kubelettypes.IsCriticalPod(&pod) {
|
||||
@@ -313,7 +313,7 @@ var _ = SIGDescribe("GracefulNodeShutdown [Serial] [NodeFeature:GracefulNodeShut
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
framework.ExpectEqual(len(list.Items), len(pods), "the number of pods is not as expected")
|
||||
gomega.Expect(list.Items).To(gomega.HaveLen(len(pods)), "the number of pods is not as expected")
|
||||
|
||||
for _, pod := range list.Items {
|
||||
if !isPodShutdown(&pod) {
|
||||
@@ -514,7 +514,7 @@ var _ = SIGDescribe("GracefulNodeShutdown [Serial] [NodeFeature:GracefulNodeShut
|
||||
FieldSelector: nodeSelector,
|
||||
})
|
||||
framework.ExpectNoError(err)
|
||||
framework.ExpectEqual(len(list.Items), len(pods), "the number of pods is not as expected")
|
||||
gomega.Expect(list.Items).To(gomega.HaveLen(len(pods)), "the number of pods is not as expected")
|
||||
|
||||
ginkgo.By("Verifying batch pods are running")
|
||||
for _, pod := range list.Items {
|
||||
@@ -537,7 +537,7 @@ var _ = SIGDescribe("GracefulNodeShutdown [Serial] [NodeFeature:GracefulNodeShut
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
framework.ExpectEqual(len(list.Items), len(pods), "the number of pods is not as expected")
|
||||
gomega.Expect(list.Items).To(gomega.HaveLen(len(pods)), "the number of pods is not as expected")
|
||||
for _, pod := range list.Items {
|
||||
shouldShutdown := false
|
||||
for _, podName := range step {
|
||||
@@ -628,10 +628,14 @@ func getGracePeriodOverrideTestPod(name string, node string, gracePeriod int64,
|
||||
kubelettypes.ConfigSourceAnnotationKey: kubelettypes.FileSource,
|
||||
}
|
||||
pod.Spec.PriorityClassName = priorityClassName
|
||||
framework.ExpectEqual(kubelettypes.IsCriticalPod(pod), true, "pod should be a critical pod")
|
||||
if !kubelettypes.IsCriticalPod(pod) {
|
||||
framework.Failf("pod %q should be a critical pod", pod.Name)
|
||||
}
|
||||
} else {
|
||||
pod.Spec.PriorityClassName = priorityClassName
|
||||
framework.ExpectEqual(kubelettypes.IsCriticalPod(pod), false, "pod should not be a critical pod")
|
||||
if kubelettypes.IsCriticalPod(pod) {
|
||||
framework.Failf("pod %q should not be a critical pod", pod.Name)
|
||||
}
|
||||
}
|
||||
return pod
|
||||
}
|
||||
@@ -650,7 +654,7 @@ func getNodeReadyStatus(ctx context.Context, f *framework.Framework) bool {
|
||||
nodeList, err := f.ClientSet.CoreV1().Nodes().List(ctx, metav1.ListOptions{})
|
||||
framework.ExpectNoError(err)
|
||||
// Assuming that there is only one node, because this is a node e2e test.
|
||||
framework.ExpectEqual(len(nodeList.Items), 1)
|
||||
gomega.Expect(nodeList.Items).To(gomega.HaveLen(1), "the number of nodes is not as expected")
|
||||
return isNodeReady(&nodeList.Items[0])
|
||||
}
|
||||
|
||||
|
||||
Reference in New Issue
Block a user