Merge pull request #121067 from carlory/cleanup-e2enode-framework-equal

e2e_node: stop using deprecated framework.ExpectEqual
This commit is contained in:
Kubernetes Prow Robot 2023-10-10 14:44:55 +02:00 committed by GitHub
commit 09edfe4ebb
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
17 changed files with 91 additions and 73 deletions

View File

@ -229,10 +229,10 @@ func createPodWithAppArmor(ctx context.Context, f *framework.Framework, profile
func expectSoftRejection(status v1.PodStatus) {
args := []interface{}{"PodStatus: %+v", status}
framework.ExpectEqual(status.Phase, v1.PodPending, args...)
framework.ExpectEqual(status.Reason, "AppArmor", args...)
gomega.Expect(status.Phase).To(gomega.Equal(v1.PodPending), args...)
gomega.Expect(status.Reason).To(gomega.Equal("AppArmor"), args...)
gomega.Expect(status.Message).To(gomega.ContainSubstring("AppArmor"), args...)
framework.ExpectEqual(status.ContainerStatuses[0].State.Waiting.Reason, "Blocked", args...)
gomega.Expect(status.ContainerStatuses[0].State.Waiting.Reason).To(gomega.Equal("Blocked"), args...)
}
func isAppArmorEnabled() bool {

View File

@ -103,11 +103,9 @@ var _ = SIGDescribe("Checkpoint Container [NodeFeature:CheckpointContainer]", fu
framework.ExpectNoError(err)
isReady, err := testutils.PodRunningReady(p)
framework.ExpectNoError(err)
framework.ExpectEqual(
isReady,
true,
"pod should be ready",
)
if !isReady {
framework.Failf("pod %q should be ready", p.Name)
}
framework.Logf(
"About to checkpoint container %q on %q",
@ -199,7 +197,9 @@ var _ = SIGDescribe("Checkpoint Container [NodeFeature:CheckpointContainer]", fu
}
}
for fileName := range checkForFiles {
framework.ExpectEqual(checkForFiles[fileName], true)
if !checkForFiles[fileName] {
framework.Failf("File %q not found in checkpoint archive %q", fileName, item)
}
}
// cleanup checkpoint archive
os.RemoveAll(item)

View File

@ -80,7 +80,7 @@ var _ = SIGDescribe("ContainerLogRotation [Slow] [Serial] [Disruptive]", func()
ginkgo.It("should be rotated and limited to a fixed amount of files", func(ctx context.Context) {
ginkgo.By("get container log path")
framework.ExpectEqual(len(logRotationPod.Status.ContainerStatuses), 1, "log rotation pod should have one container")
gomega.Expect(logRotationPod.Status.ContainerStatuses).To(gomega.HaveLen(1), "log rotation pod should have one container")
id := kubecontainer.ParseContainerID(logRotationPod.Status.ContainerStatuses[0].ContainerID).ID
r, _, err := getCRIClient()
framework.ExpectNoError(err, "should connect to CRI and obtain runtime service clients and image service client")

View File

@ -241,7 +241,7 @@ func runGuPodTest(ctx context.Context, f *framework.Framework, cpuCount int) {
cpus, err := cpuset.Parse(strings.TrimSpace(logs))
framework.ExpectNoError(err, "parsing cpuset from logs for [%s] of pod [%s]", cnt.Name, pod.Name)
framework.ExpectEqual(cpus.Size(), cpuCount, "expected cpu set size == %d, got %q", cpuCount, cpus.String())
gomega.Expect(cpus.Size()).To(gomega.Equal(cpuCount), "expected cpu set size == %d, got %q", cpuCount, cpus.String())
}
ginkgo.By("by deleting the pods and waiting for container removal")

View File

@ -33,6 +33,7 @@ import (
admissionapi "k8s.io/pod-security-admission/api"
"github.com/onsi/ginkgo/v2"
"github.com/onsi/gomega"
)
const (
@ -82,9 +83,9 @@ var _ = SIGDescribe("CriticalPod [Serial] [Disruptive] [NodeFeature:CriticalPod]
framework.ExpectNoError(err)
for _, p := range updatedPodList.Items {
if p.Name == nonCriticalBestEffort.Name {
framework.ExpectEqual(p.Status.Phase, v1.PodRunning, fmt.Sprintf("pod: %v should not be preempted with status: %#v", p.Name, p.Status))
gomega.Expect(p.Status.Phase).To(gomega.Equal(v1.PodRunning), "pod: %v should not be preempted with status: %#v", p.Name, p.Status)
} else {
framework.ExpectEqual(p.Status.Phase, v1.PodSucceeded, fmt.Sprintf("pod: %v should be preempted with status: %#v", p.Name, p.Status))
gomega.Expect(p.Status.Phase).To(gomega.Equal(v1.PodSucceeded), "pod: %v should be preempted with status: %#v", p.Name, p.Status)
}
}
})
@ -125,7 +126,7 @@ var _ = SIGDescribe("CriticalPod [Serial] [Disruptive] [NodeFeature:CriticalPod]
framework.ExpectNoError(err)
for _, p := range updatedPodList.Items {
ginkgo.By(fmt.Sprintf("verify that the non-critical pod %q is preempted and has the DisruptionTarget condition", klog.KObj(&p)))
framework.ExpectEqual(p.Status.Phase, v1.PodSucceeded, fmt.Sprintf("pod: %v should be preempted with status: %#v", p.Name, p.Status))
gomega.Expect(p.Status.Phase).To(gomega.Equal(v1.PodSucceeded), "pod: %v should be preempted with status: %#v", p.Name, p.Status)
if condition := e2epod.FindPodConditionByType(&p.Status, v1.DisruptionTarget); condition == nil {
framework.Failf("pod %q should have the condition: %q, pod status: %v", klog.KObj(&p), v1.DisruptionTarget, p.Status)
}
@ -149,7 +150,7 @@ func getNodeCPUAndMemoryCapacity(ctx context.Context, f *framework.Framework) v1
nodeList, err := f.ClientSet.CoreV1().Nodes().List(ctx, metav1.ListOptions{})
framework.ExpectNoError(err)
// Assuming that there is only one node, because this is a node e2e test.
framework.ExpectEqual(len(nodeList.Items), 1)
gomega.Expect(nodeList.Items).To(gomega.HaveLen(1))
capacity := nodeList.Items[0].Status.Allocatable
return v1.ResourceList{
v1.ResourceCPU: capacity[v1.ResourceCPU],
@ -161,7 +162,7 @@ func getNodeName(ctx context.Context, f *framework.Framework) string {
nodeList, err := f.ClientSet.CoreV1().Nodes().List(ctx, metav1.ListOptions{})
framework.ExpectNoError(err)
// Assuming that there is only one node, because this is a node e2e test.
framework.ExpectEqual(len(nodeList.Items), 1)
gomega.Expect(nodeList.Items).To(gomega.HaveLen(1))
return nodeList.Items[0].GetName()
}
@ -190,9 +191,13 @@ func getTestPod(critical bool, name string, resources v1.ResourceRequirements, n
}
pod.Spec.PriorityClassName = scheduling.SystemNodeCritical
framework.ExpectEqual(kubelettypes.IsCriticalPod(pod), true, "pod should be a critical pod")
if !kubelettypes.IsCriticalPod(pod) {
framework.Failf("pod %q should be a critical pod", pod.Name)
}
} else {
framework.ExpectEqual(kubelettypes.IsCriticalPod(pod), false, "pod should not be a critical pod")
if kubelettypes.IsCriticalPod(pod) {
framework.Failf("pod %q should not be a critical pod", pod.Name)
}
}
return pod
}

View File

@ -197,8 +197,8 @@ func testDevicePlugin(f *framework.Framework, pluginSockDir string) {
framework.Logf("len(v1alphaPodResources.PodResources):%+v", len(v1alphaPodResources.PodResources))
framework.Logf("len(v1PodResources.PodResources):%+v", len(v1PodResources.PodResources))
framework.ExpectEqual(len(v1alphaPodResources.PodResources), 2)
framework.ExpectEqual(len(v1PodResources.PodResources), 2)
gomega.Expect(v1alphaPodResources.PodResources).To(gomega.HaveLen(2))
gomega.Expect(v1PodResources.PodResources).To(gomega.HaveLen(2))
var v1alphaResourcesForOurPod *kubeletpodresourcesv1alpha1.PodResources
for _, res := range v1alphaPodResources.GetPodResources() {
@ -217,26 +217,26 @@ func testDevicePlugin(f *framework.Framework, pluginSockDir string) {
gomega.Expect(v1alphaResourcesForOurPod).NotTo(gomega.BeNil())
gomega.Expect(v1ResourcesForOurPod).NotTo(gomega.BeNil())
framework.ExpectEqual(v1alphaResourcesForOurPod.Name, pod1.Name)
framework.ExpectEqual(v1ResourcesForOurPod.Name, pod1.Name)
gomega.Expect(v1alphaResourcesForOurPod.Name).To(gomega.Equal(pod1.Name))
gomega.Expect(v1ResourcesForOurPod.Name).To(gomega.Equal(pod1.Name))
framework.ExpectEqual(v1alphaResourcesForOurPod.Namespace, pod1.Namespace)
framework.ExpectEqual(v1ResourcesForOurPod.Namespace, pod1.Namespace)
gomega.Expect(v1alphaResourcesForOurPod.Namespace).To(gomega.Equal(pod1.Namespace))
gomega.Expect(v1ResourcesForOurPod.Namespace).To(gomega.Equal(pod1.Namespace))
framework.ExpectEqual(len(v1alphaResourcesForOurPod.Containers), 1)
framework.ExpectEqual(len(v1ResourcesForOurPod.Containers), 1)
gomega.Expect(v1alphaResourcesForOurPod.Containers).To(gomega.HaveLen(1))
gomega.Expect(v1ResourcesForOurPod.Containers).To(gomega.HaveLen(1))
framework.ExpectEqual(v1alphaResourcesForOurPod.Containers[0].Name, pod1.Spec.Containers[0].Name)
framework.ExpectEqual(v1ResourcesForOurPod.Containers[0].Name, pod1.Spec.Containers[0].Name)
gomega.Expect(v1alphaResourcesForOurPod.Containers[0].Name).To(gomega.Equal(pod1.Spec.Containers[0].Name))
gomega.Expect(v1ResourcesForOurPod.Containers[0].Name).To(gomega.Equal(pod1.Spec.Containers[0].Name))
framework.ExpectEqual(len(v1alphaResourcesForOurPod.Containers[0].Devices), 1)
framework.ExpectEqual(len(v1ResourcesForOurPod.Containers[0].Devices), 1)
gomega.Expect(v1alphaResourcesForOurPod.Containers[0].Devices).To(gomega.HaveLen(1))
gomega.Expect(v1ResourcesForOurPod.Containers[0].Devices).To(gomega.HaveLen(1))
framework.ExpectEqual(v1alphaResourcesForOurPod.Containers[0].Devices[0].ResourceName, SampleDeviceResourceName)
framework.ExpectEqual(v1ResourcesForOurPod.Containers[0].Devices[0].ResourceName, SampleDeviceResourceName)
gomega.Expect(v1alphaResourcesForOurPod.Containers[0].Devices[0].ResourceName).To(gomega.Equal(SampleDeviceResourceName))
gomega.Expect(v1ResourcesForOurPod.Containers[0].Devices[0].ResourceName).To(gomega.Equal(SampleDeviceResourceName))
framework.ExpectEqual(len(v1alphaResourcesForOurPod.Containers[0].Devices[0].DeviceIds), 1)
framework.ExpectEqual(len(v1ResourcesForOurPod.Containers[0].Devices[0].DeviceIds), 1)
gomega.Expect(v1alphaResourcesForOurPod.Containers[0].Devices[0].DeviceIds).To(gomega.HaveLen(1))
gomega.Expect(v1ResourcesForOurPod.Containers[0].Devices[0].DeviceIds).To(gomega.HaveLen(1))
})
// simulate container restart, while all other involved components (kubelet, device plugin) stay stable. To do so, in the container
@ -260,7 +260,7 @@ func testDevicePlugin(f *framework.Framework, pluginSockDir string) {
ginkgo.By("Confirming that after a container restart, fake-device assignment is kept")
devIDRestart1, err := parseLog(ctx, f, pod1.Name, pod1.Name, deviceIDRE)
framework.ExpectNoError(err, "getting logs for pod %q", pod1.Name)
framework.ExpectEqual(devIDRestart1, devID1)
gomega.Expect(devIDRestart1).To(gomega.Equal(devID1))
// crosscheck from the device assignment is preserved and stable from perspective of the kubelet.
// needs to match the container perspective.
@ -373,7 +373,7 @@ func testDevicePlugin(f *framework.Framework, pluginSockDir string) {
ginkgo.By("Confirming that after a container restart, fake-device assignment is kept")
devIDRestart1, err := parseLog(ctx, f, pod1.Name, pod1.Name, deviceIDRE)
framework.ExpectNoError(err, "getting logs for pod %q", pod1.Name)
framework.ExpectEqual(devIDRestart1, devID1)
gomega.Expect(devIDRestart1).To(gomega.Equal(devID1))
ginkgo.By("Restarting Kubelet")
restartKubelet(true)

View File

@ -368,7 +368,9 @@ var _ = SIGDescribe("PriorityMemoryEvictionOrdering [Slow] [Serial] [Disruptive]
})
ginkgo.BeforeEach(func(ctx context.Context) {
_, err := f.ClientSet.SchedulingV1().PriorityClasses().Create(ctx, &schedulingv1.PriorityClass{ObjectMeta: metav1.ObjectMeta{Name: highPriorityClassName}, Value: highPriority}, metav1.CreateOptions{})
framework.ExpectEqual(err == nil || apierrors.IsAlreadyExists(err), true)
if err != nil && !apierrors.IsAlreadyExists(err) {
framework.ExpectNoError(err, "failed to create priority class")
}
})
ginkgo.AfterEach(func(ctx context.Context) {
err := f.ClientSet.SchedulingV1().PriorityClasses().Delete(ctx, highPriorityClassName, metav1.DeleteOptions{})
@ -426,7 +428,9 @@ var _ = SIGDescribe("PriorityLocalStorageEvictionOrdering [Slow] [Serial] [Disru
})
ginkgo.BeforeEach(func(ctx context.Context) {
_, err := f.ClientSet.SchedulingV1().PriorityClasses().Create(ctx, &schedulingv1.PriorityClass{ObjectMeta: metav1.ObjectMeta{Name: highPriorityClassName}, Value: highPriority}, metav1.CreateOptions{})
framework.ExpectEqual(err == nil || apierrors.IsAlreadyExists(err), true)
if err != nil && !apierrors.IsAlreadyExists(err) {
framework.ExpectNoError(err, "failed to create priority class")
}
})
ginkgo.AfterEach(func(ctx context.Context) {
err := f.ClientSet.SchedulingV1().PriorityClasses().Delete(ctx, highPriorityClassName, metav1.DeleteOptions{})
@ -480,7 +484,9 @@ var _ = SIGDescribe("PriorityPidEvictionOrdering [Slow] [Serial] [Disruptive][No
})
ginkgo.BeforeEach(func(ctx context.Context) {
_, err := f.ClientSet.SchedulingV1().PriorityClasses().Create(ctx, &schedulingv1.PriorityClass{ObjectMeta: metav1.ObjectMeta{Name: highPriorityClassName}, Value: highPriority}, metav1.CreateOptions{})
framework.ExpectEqual(err == nil || apierrors.IsAlreadyExists(err), true)
if err != nil && !apierrors.IsAlreadyExists(err) {
framework.ExpectNoError(err, "failed to create priority class")
}
})
ginkgo.AfterEach(func(ctx context.Context) {
err := f.ClientSet.SchedulingV1().PriorityClasses().Delete(ctx, highPriorityClassName, metav1.DeleteOptions{})
@ -731,7 +737,7 @@ func verifyEvictionOrdering(ctx context.Context, f *framework.Framework, testSpe
}
if priorityPod.Status.Phase == v1.PodFailed {
framework.ExpectEqual(priorityPod.Status.Reason, eviction.Reason, "pod %s failed; expected Status.Reason to be %s, but got %s",
gomega.Expect(priorityPod.Status.Reason).To(gomega.Equal(eviction.Reason), "pod %s failed; expected Status.Reason to be %s, but got %s",
priorityPod.Name, eviction.Reason, priorityPod.Status.Reason)
}
@ -779,41 +785,47 @@ func verifyEvictionEvents(ctx context.Context, f *framework.Framework, testSpecs
}.AsSelector().String()
podEvictEvents, err := f.ClientSet.CoreV1().Events(f.Namespace.Name).List(ctx, metav1.ListOptions{FieldSelector: selector})
framework.ExpectNoError(err, "getting events")
framework.ExpectEqual(len(podEvictEvents.Items), 1, "Expected to find 1 eviction event for pod %s, got %d", pod.Name, len(podEvictEvents.Items))
gomega.Expect(podEvictEvents.Items).To(gomega.HaveLen(1), "Expected to find 1 eviction event for pod %s, got %d", pod.Name, len(podEvictEvents.Items))
event := podEvictEvents.Items[0]
if expectedStarvedResource != noStarvedResource {
// Check the eviction.StarvedResourceKey
starved, found := event.Annotations[eviction.StarvedResourceKey]
framework.ExpectEqual(found, true, "Expected to find an annotation on the eviction event for pod %s containing the starved resource %s, but it was not found",
pod.Name, expectedStarvedResource)
if !found {
framework.Failf("Expected to find an annotation on the eviction event for pod %s containing the starved resource %s, but it was not found",
pod.Name, expectedStarvedResource)
}
starvedResource := v1.ResourceName(starved)
framework.ExpectEqual(starvedResource, expectedStarvedResource, "Expected to the starved_resource annotation on pod %s to contain %s, but got %s instead",
gomega.Expect(starvedResource).To(gomega.Equal(expectedStarvedResource), "Expected to the starved_resource annotation on pod %s to contain %s, but got %s instead",
pod.Name, expectedStarvedResource, starvedResource)
// We only check these keys for memory, because ephemeral storage evictions may be due to volume usage, in which case these values are not present
if expectedStarvedResource == v1.ResourceMemory {
// Check the eviction.OffendingContainersKey
offendersString, found := event.Annotations[eviction.OffendingContainersKey]
framework.ExpectEqual(found, true, "Expected to find an annotation on the eviction event for pod %s containing the offending containers, but it was not found",
pod.Name)
if !found {
framework.Failf("Expected to find an annotation on the eviction event for pod %s containing the offending containers, but it was not found",
pod.Name)
}
offendingContainers := strings.Split(offendersString, ",")
framework.ExpectEqual(len(offendingContainers), 1, "Expected to find the offending container's usage in the %s annotation, but no container was found",
gomega.Expect(offendingContainers).To(gomega.HaveLen(1), "Expected to find the offending container's usage in the %s annotation, but no container was found",
eviction.OffendingContainersKey)
framework.ExpectEqual(offendingContainers[0], pod.Spec.Containers[0].Name, "Expected to find the offending container: %s's usage in the %s annotation, but found %s instead",
gomega.Expect(offendingContainers[0]).To(gomega.Equal(pod.Spec.Containers[0].Name), "Expected to find the offending container: %s's usage in the %s annotation, but found %s instead",
pod.Spec.Containers[0].Name, eviction.OffendingContainersKey, offendingContainers[0])
// Check the eviction.OffendingContainersUsageKey
offendingUsageString, found := event.Annotations[eviction.OffendingContainersUsageKey]
framework.ExpectEqual(found, true, "Expected to find an annotation on the eviction event for pod %s containing the offending containers' usage, but it was not found",
pod.Name)
if !found {
framework.Failf("Expected to find an annotation on the eviction event for pod %s containing the offending containers' usage, but it was not found",
pod.Name)
}
offendingContainersUsage := strings.Split(offendingUsageString, ",")
framework.ExpectEqual(len(offendingContainersUsage), 1, "Expected to find the offending container's usage in the %s annotation, but found %+v",
gomega.Expect(offendingContainersUsage).To(gomega.HaveLen(1), "Expected to find the offending container's usage in the %s annotation, but found %+v",
eviction.OffendingContainersUsageKey, offendingContainersUsage)
usageQuantity, err := resource.ParseQuantity(offendingContainersUsage[0])
framework.ExpectNoError(err, "parsing pod %s's %s annotation as a quantity", pod.Name, eviction.OffendingContainersUsageKey)
request := pod.Spec.Containers[0].Resources.Requests[starvedResource]
framework.ExpectEqual(usageQuantity.Cmp(request), 1, "Expected usage of offending container: %s in pod %s to exceed its request %s",
gomega.Expect(usageQuantity.Cmp(request)).To(gomega.Equal(1), "Expected usage of offending container: %s in pod %s to exceed its request %s",
usageQuantity.String(), pod.Name, request.String())
}
}

View File

@ -214,9 +214,7 @@ var _ = SIGDescribe("HugePages [Serial] [Feature:HugePages][NodeSpecialFeature:H
framework.ExpectNoError(err, "while getting node status")
ginkgo.By("Verifying that the node now supports huge pages with size 3Mi")
value, ok := node.Status.Capacity["hugepages-3Mi"]
framework.ExpectEqual(ok, true, "capacity should contain resource hugepages-3Mi")
framework.ExpectEqual(value.String(), "9Mi", "huge pages with size 3Mi should be supported")
gomega.Expect(node.Status.Capacity).To(gomega.HaveKeyWithValue("hugepages-3Mi", resource.MustParse("9Mi")), "capacity should contain resource hugepages-3Mi and huge pages with size 3Mi should be supported")
ginkgo.By("restarting the node and verifying that huge pages with size 3Mi are not supported")
restartKubelet(true)

View File

@ -29,6 +29,7 @@ import (
"strconv"
"time"
"github.com/onsi/gomega"
"k8s.io/apimachinery/pkg/util/wait"
kubeletconfigv1beta1 "k8s.io/kubelet/config/v1beta1"
"k8s.io/kubernetes/pkg/cluster/ports"
@ -139,7 +140,7 @@ func pollConfigz(ctx context.Context, timeout time.Duration, pollInterval time.D
output := string(buf[:n])
proxyRegexp := regexp.MustCompile("Starting to serve on 127.0.0.1:([0-9]+)")
match := proxyRegexp.FindStringSubmatch(output)
framework.ExpectEqual(len(match), 2)
gomega.Expect(match).To(gomega.HaveLen(2))
port, err := strconv.Atoi(match[1])
framework.ExpectNoError(err)
framework.Logf("http requesting node kubelet /configz")

View File

@ -102,8 +102,8 @@ var _ = SIGDescribe("MirrorPodWithGracePeriod", func() {
ginkgo.By("check the mirror pod container image is updated")
pod, err = f.ClientSet.CoreV1().Pods(ns).Get(ctx, mirrorPodName, metav1.GetOptions{})
framework.ExpectNoError(err)
framework.ExpectEqual(len(pod.Spec.Containers), 1)
framework.ExpectEqual(pod.Spec.Containers[0].Image, image)
gomega.Expect(pod.Spec.Containers).To(gomega.HaveLen(1))
gomega.Expect(pod.Spec.Containers[0].Image).To(gomega.Equal(image))
})
ginkgo.It("should update a static pod when the static pod is updated multiple times during the graceful termination period [NodeConformance]", func(ctx context.Context) {
@ -131,8 +131,8 @@ var _ = SIGDescribe("MirrorPodWithGracePeriod", func() {
ginkgo.By("check the mirror pod container image is updated")
pod, err = f.ClientSet.CoreV1().Pods(ns).Get(ctx, mirrorPodName, metav1.GetOptions{})
framework.ExpectNoError(err)
framework.ExpectEqual(len(pod.Spec.Containers), 1)
framework.ExpectEqual(pod.Spec.Containers[0].Image, image)
gomega.Expect(pod.Spec.Containers).To(gomega.HaveLen(1))
gomega.Expect(pod.Spec.Containers[0].Image).To(gomega.Equal(image))
})
ginkgo.Context("and the container runtime is temporarily down during pod termination [NodeConformance] [Serial] [Disruptive]", func() {

View File

@ -89,8 +89,8 @@ var _ = SIGDescribe("MirrorPod", func() {
ginkgo.By("check the mirror pod container image is updated")
pod, err = f.ClientSet.CoreV1().Pods(ns).Get(ctx, mirrorPodName, metav1.GetOptions{})
framework.ExpectNoError(err)
framework.ExpectEqual(len(pod.Spec.Containers), 1)
framework.ExpectEqual(pod.Spec.Containers[0].Image, image)
gomega.Expect(pod.Spec.Containers).To(gomega.HaveLen(1))
gomega.Expect(pod.Spec.Containers[0].Image).To(gomega.Equal(image))
})
/*
Release: v1.9

View File

@ -150,7 +150,9 @@ func runPodReadyConditionsTest(f *framework.Framework, hasInitContainers, checkP
framework.ExpectNoError(err)
isReady, err := testutils.PodRunningReady(p)
framework.ExpectNoError(err)
framework.ExpectEqual(isReady, true, "pod should be ready")
if !isReady {
framework.Failf("pod %q should be ready", p.Name)
}
ginkgo.By("checking order of pod condition transitions for a pod with no container/sandbox restarts")

View File

@ -146,7 +146,7 @@ var _ = common.SIGDescribe("Dual Stack Host IP [Feature:PodHostIPs]", func() {
gomega.Expect(p.Status.HostIPs).ShouldNot(gomega.BeNil())
// validate first ip in HostIPs is same as HostIP
framework.ExpectEqual(p.Status.HostIP, p.Status.HostIPs[0].IP)
gomega.Expect(p.Status.HostIP).Should(gomega.Equal(p.Status.HostIPs[0].IP))
if len(p.Status.HostIPs) > 1 {
// assert 2 host ips belong to different families
if netutils.IsIPv4String(p.Status.HostIPs[0].IP) == netutils.IsIPv4String(p.Status.HostIPs[1].IP) {
@ -202,7 +202,7 @@ var _ = common.SIGDescribe("Dual Stack Host IP [Feature:PodHostIPs]", func() {
gomega.Expect(p.Status.HostIPs).ShouldNot(gomega.BeNil())
// validate first ip in HostIPs is same as HostIP
framework.ExpectEqual(p.Status.HostIP, p.Status.HostIPs[0].IP)
gomega.Expect(p.Status.HostIP).Should(gomega.Equal(p.Status.HostIPs[0].IP))
if len(p.Status.HostIPs) > 1 {
// assert 2 host ips belong to different families
if netutils.IsIPv4String(p.Status.HostIPs[0].IP) == netutils.IsIPv4String(p.Status.HostIPs[1].IP) {

View File

@ -40,6 +40,7 @@ import (
imageutils "k8s.io/kubernetes/test/utils/image"
"github.com/onsi/ginkgo/v2"
"github.com/onsi/gomega"
)
func generatePodName(base string) string {
@ -143,7 +144,7 @@ var _ = SIGDescribe("Hostname of Pod [NodeConformance]", func() {
hostFQDN := fmt.Sprintf("%s.%s.%s.svc.%s", pod.ObjectMeta.Name, subdomain, f.Namespace.Name, framework.TestContext.ClusterDNSDomain)
// Fail if FQDN is longer than 64 characters, otherwise the Pod will remain pending until test timeout.
// In Linux, 64 characters is the limit of the hostname kernel field, which this test sets to the pod FQDN.
framework.ExpectEqual(len(hostFQDN) < 65, true, fmt.Sprintf("The FQDN of the Pod cannot be longer than 64 characters, requested %s which is %d characters long.", hostFQDN, len(hostFQDN)))
gomega.Expect(len(hostFQDN)).Should(gomega.BeNumerically("<=", 64), "The FQDN of the Pod cannot be longer than 64 characters, requested %s which is %d characters long.", hostFQDN, len(hostFQDN))
output := []string{fmt.Sprintf("%s;%s;", hostFQDN, hostFQDN)}
// Create Pod
e2eoutput.TestContainerOutput(ctx, f, "fqdn and fqdn", pod, 0, output)

View File

@ -566,7 +566,7 @@ func podresourcesGetAllocatableResourcesTests(ctx context.Context, cli kubeletpo
ginkgo.By(fmt.Sprintf("expecting some %q devices reported", sd.resourceName))
gomega.ExpectWithOffset(1, devs).ToNot(gomega.BeEmpty())
for _, dev := range devs {
framework.ExpectEqual(dev.ResourceName, sd.resourceName)
gomega.Expect(dev.ResourceName).To(gomega.Equal(sd.resourceName))
gomega.ExpectWithOffset(1, dev.DeviceIds).ToNot(gomega.BeEmpty())
}
}
@ -859,8 +859,7 @@ var _ = SIGDescribe("POD Resources [Serial] [Feature:PodResources][NodeFeature:P
if dev.ResourceName != defaultTopologyUnawareResourceName {
continue
}
framework.ExpectEqual(dev.Topology == nil, true, "Topology is expected to be empty for topology unaware resources")
gomega.Expect(dev.Topology).To(gomega.BeNil(), "Topology is expected to be empty for topology unaware resources")
}
desc := podDesc{

View File

@ -79,7 +79,7 @@ var _ = SIGDescribe("Pod SIGKILL [LinuxOnly] [NodeConformance]", func() {
gomega.Expect(containerStatus.State.Terminated.ExitCode).Should(gomega.Equal(int32(137)))
ginkgo.By(fmt.Sprintf("Verify exit reason of the pod (%v/%v) container", f.Namespace.Name, podSpec.Name))
framework.ExpectEqual(containerStatus.State.Terminated.Reason, "Error", fmt.Sprintf("Container terminated by sigkill expect Error but got %v", containerStatus.State.Terminated.Reason))
gomega.Expect(containerStatus.State.Terminated.Reason).Should(gomega.Equal("Error"), "Container terminated by sigkill expect Error but got %v", containerStatus.State.Terminated.Reason)
})
ginkgo.AfterEach(func() {

View File

@ -279,7 +279,7 @@ func logNodeEvents(ctx context.Context, f *framework.Framework) {
func getLocalNode(ctx context.Context, f *framework.Framework) *v1.Node {
nodeList, err := e2enode.GetReadySchedulableNodes(ctx, f.ClientSet)
framework.ExpectNoError(err)
framework.ExpectEqual(len(nodeList.Items), 1, "Unexpected number of node objects for node e2e. Expects only one node.")
gomega.Expect(nodeList.Items).Should(gomega.HaveLen(1), "Unexpected number of node objects for node e2e. Expects only one node.")
return &nodeList.Items[0]
}
@ -375,7 +375,7 @@ func findContainerRuntimeServiceName() (string, error) {
runtimePids, err := getPidsForProcess(framework.TestContext.ContainerRuntimeProcessName, framework.TestContext.ContainerRuntimePidFile)
framework.ExpectNoError(err, "failed to get list of container runtime pids")
framework.ExpectEqual(len(runtimePids), 1, "Unexpected number of container runtime pids. Expected 1 but got %v", len(runtimePids))
gomega.Expect(runtimePids).To(gomega.HaveLen(1), "Unexpected number of container runtime pids. Expected 1 but got %v", len(runtimePids))
containerRuntimePid := runtimePids[0]
@ -418,7 +418,7 @@ func performContainerRuntimeUnitOp(op containerRuntimeUnitOp) error {
framework.ExpectNoError(err, "dbus connection error")
job := <-reschan
framework.ExpectEqual(job, "done", "Expected job to complete with done")
gomega.Expect(job).To(gomega.Equal("done"), "Expected job to complete with done")
return nil
}