mirror of
https://github.com/k3s-io/kubernetes.git
synced 2025-07-23 11:50:44 +00:00
test/e2e_node/:use framework.Equal() instead of using gomega.Expect(bool).To(gomega.BeTrue(),explain)
This commit is contained in:
parent
eef4c00ae9
commit
35b0f1f7dd
@ -215,7 +215,7 @@ var _ = framework.KubeDescribe("InitContainer [NodeConformance]", func() {
|
|||||||
|
|
||||||
framework.ExpectEqual(len(endPod.Status.InitContainerStatuses), 2)
|
framework.ExpectEqual(len(endPod.Status.InitContainerStatuses), 2)
|
||||||
for _, status := range endPod.Status.InitContainerStatuses {
|
for _, status := range endPod.Status.InitContainerStatuses {
|
||||||
gomega.Expect(status.Ready).To(gomega.BeTrue())
|
framework.ExpectEqual(status.Ready, true)
|
||||||
gomega.Expect(status.State.Terminated).NotTo(gomega.BeNil())
|
gomega.Expect(status.State.Terminated).NotTo(gomega.BeNil())
|
||||||
gomega.Expect(status.State.Terminated.ExitCode).To(gomega.BeZero())
|
gomega.Expect(status.State.Terminated.ExitCode).To(gomega.BeZero())
|
||||||
}
|
}
|
||||||
@ -285,7 +285,7 @@ var _ = framework.KubeDescribe("InitContainer [NodeConformance]", func() {
|
|||||||
|
|
||||||
framework.ExpectEqual(len(endPod.Status.InitContainerStatuses), 2)
|
framework.ExpectEqual(len(endPod.Status.InitContainerStatuses), 2)
|
||||||
for _, status := range endPod.Status.InitContainerStatuses {
|
for _, status := range endPod.Status.InitContainerStatuses {
|
||||||
gomega.Expect(status.Ready).To(gomega.BeTrue())
|
framework.ExpectEqual(status.Ready, true)
|
||||||
gomega.Expect(status.State.Terminated).NotTo(gomega.BeNil())
|
gomega.Expect(status.State.Terminated).NotTo(gomega.BeNil())
|
||||||
gomega.Expect(status.State.Terminated.ExitCode).To(gomega.BeZero())
|
gomega.Expect(status.State.Terminated.ExitCode).To(gomega.BeZero())
|
||||||
}
|
}
|
||||||
|
@ -153,7 +153,7 @@ func invokeTest(f *framework.Framework, client clientset.Interface, namespace st
|
|||||||
framework.ExpectNoError(err)
|
framework.ExpectNoError(err)
|
||||||
|
|
||||||
ginkgo.By("Verify Disk Format")
|
ginkgo.By("Verify Disk Format")
|
||||||
gomega.Expect(verifyDiskFormat(client, nodeName, pv.Spec.VsphereVolume.VolumePath, diskFormat)).To(gomega.BeTrue(), "DiskFormat Verification Failed")
|
framework.ExpectEqual(verifyDiskFormat(client, nodeName, pv.Spec.VsphereVolume.VolumePath, diskFormat), true, "DiskFormat Verification Failed")
|
||||||
|
|
||||||
var volumePaths []string
|
var volumePaths []string
|
||||||
volumePaths = append(volumePaths, pv.Spec.VsphereVolume.VolumePath)
|
volumePaths = append(volumePaths, pv.Spec.VsphereVolume.VolumePath)
|
||||||
|
@ -32,7 +32,6 @@ import (
|
|||||||
imageutils "k8s.io/kubernetes/test/utils/image"
|
imageutils "k8s.io/kubernetes/test/utils/image"
|
||||||
|
|
||||||
"github.com/onsi/ginkgo"
|
"github.com/onsi/ginkgo"
|
||||||
"github.com/onsi/gomega"
|
|
||||||
)
|
)
|
||||||
|
|
||||||
const (
|
const (
|
||||||
@ -86,7 +85,7 @@ var _ = framework.KubeDescribe("CriticalPod [Serial] [Disruptive] [NodeFeature:C
|
|||||||
})
|
})
|
||||||
|
|
||||||
_, err = f.ClientSet.SchedulingV1().PriorityClasses().Create(systemCriticalPriority)
|
_, err = f.ClientSet.SchedulingV1().PriorityClasses().Create(systemCriticalPriority)
|
||||||
gomega.Expect(err == nil || errors.IsAlreadyExists(err)).To(gomega.BeTrue(), "failed to create PriorityClasses with an error: %v", err)
|
framework.ExpectEqual(err == nil || errors.IsAlreadyExists(err), true, "failed to create PriorityClasses with an error: %v", err)
|
||||||
|
|
||||||
// Create pods, starting with non-critical so that the critical preempts the other pods.
|
// Create pods, starting with non-critical so that the critical preempts the other pods.
|
||||||
f.PodClient().CreateBatch([]*v1.Pod{nonCriticalBestEffort, nonCriticalBurstable, nonCriticalGuaranteed})
|
f.PodClient().CreateBatch([]*v1.Pod{nonCriticalBestEffort, nonCriticalBurstable, nonCriticalGuaranteed})
|
||||||
@ -157,9 +156,9 @@ func getTestPod(critical bool, name string, resources v1.ResourceRequirements) *
|
|||||||
pod.Spec.PriorityClassName = systemCriticalPriorityName
|
pod.Spec.PriorityClassName = systemCriticalPriorityName
|
||||||
pod.Spec.Priority = &value
|
pod.Spec.Priority = &value
|
||||||
|
|
||||||
gomega.Expect(kubelettypes.IsCriticalPod(pod)).To(gomega.BeTrue(), "pod should be a critical pod")
|
framework.ExpectEqual(kubelettypes.IsCriticalPod(pod), true, "pod should be a critical pod")
|
||||||
} else {
|
} else {
|
||||||
gomega.Expect(kubelettypes.IsCriticalPod(pod)).To(gomega.BeFalse(), "pod should not be a critical pod")
|
framework.ExpectEqual(kubelettypes.IsCriticalPod(pod), false, "pod should not be a critical pod")
|
||||||
}
|
}
|
||||||
return pod
|
return pod
|
||||||
}
|
}
|
||||||
|
@ -307,7 +307,7 @@ func getNode(c *clientset.Clientset) (*v1.Node, error) {
|
|||||||
if nodes == nil {
|
if nodes == nil {
|
||||||
return nil, fmt.Errorf("the node list is nil")
|
return nil, fmt.Errorf("the node list is nil")
|
||||||
}
|
}
|
||||||
gomega.Expect(len(nodes.Items) > 1).NotTo(gomega.BeTrue(), "the number of nodes is more than 1.")
|
framework.ExpectEqual(len(nodes.Items) > 1, false, "the number of nodes is more than 1.")
|
||||||
if len(nodes.Items) == 0 {
|
if len(nodes.Items) == 0 {
|
||||||
return nil, fmt.Errorf("empty node list: %+v", nodes)
|
return nil, fmt.Errorf("empty node list: %+v", nodes)
|
||||||
}
|
}
|
||||||
|
@ -23,7 +23,7 @@ import (
|
|||||||
"strings"
|
"strings"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
"k8s.io/api/core/v1"
|
v1 "k8s.io/api/core/v1"
|
||||||
schedulingv1 "k8s.io/api/scheduling/v1"
|
schedulingv1 "k8s.io/api/scheduling/v1"
|
||||||
"k8s.io/apimachinery/pkg/api/errors"
|
"k8s.io/apimachinery/pkg/api/errors"
|
||||||
"k8s.io/apimachinery/pkg/api/resource"
|
"k8s.io/apimachinery/pkg/api/resource"
|
||||||
@ -302,7 +302,7 @@ var _ = framework.KubeDescribe("PriorityMemoryEvictionOrdering [Slow] [Serial] [
|
|||||||
})
|
})
|
||||||
ginkgo.BeforeEach(func() {
|
ginkgo.BeforeEach(func() {
|
||||||
_, err := f.ClientSet.SchedulingV1().PriorityClasses().Create(&schedulingv1.PriorityClass{ObjectMeta: metav1.ObjectMeta{Name: highPriorityClassName}, Value: highPriority})
|
_, err := f.ClientSet.SchedulingV1().PriorityClasses().Create(&schedulingv1.PriorityClass{ObjectMeta: metav1.ObjectMeta{Name: highPriorityClassName}, Value: highPriority})
|
||||||
gomega.Expect(err == nil || errors.IsAlreadyExists(err)).To(gomega.BeTrue())
|
framework.ExpectEqual(err == nil || errors.IsAlreadyExists(err), true)
|
||||||
})
|
})
|
||||||
ginkgo.AfterEach(func() {
|
ginkgo.AfterEach(func() {
|
||||||
err := f.ClientSet.SchedulingV1().PriorityClasses().Delete(highPriorityClassName, &metav1.DeleteOptions{})
|
err := f.ClientSet.SchedulingV1().PriorityClasses().Delete(highPriorityClassName, &metav1.DeleteOptions{})
|
||||||
@ -359,7 +359,7 @@ var _ = framework.KubeDescribe("PriorityLocalStorageEvictionOrdering [Slow] [Ser
|
|||||||
})
|
})
|
||||||
ginkgo.BeforeEach(func() {
|
ginkgo.BeforeEach(func() {
|
||||||
_, err := f.ClientSet.SchedulingV1().PriorityClasses().Create(&schedulingv1.PriorityClass{ObjectMeta: metav1.ObjectMeta{Name: highPriorityClassName}, Value: highPriority})
|
_, err := f.ClientSet.SchedulingV1().PriorityClasses().Create(&schedulingv1.PriorityClass{ObjectMeta: metav1.ObjectMeta{Name: highPriorityClassName}, Value: highPriority})
|
||||||
gomega.Expect(err == nil || errors.IsAlreadyExists(err)).To(gomega.BeTrue())
|
framework.ExpectEqual(err == nil || errors.IsAlreadyExists(err), true)
|
||||||
})
|
})
|
||||||
ginkgo.AfterEach(func() {
|
ginkgo.AfterEach(func() {
|
||||||
err := f.ClientSet.SchedulingV1().PriorityClasses().Delete(highPriorityClassName, &metav1.DeleteOptions{})
|
err := f.ClientSet.SchedulingV1().PriorityClasses().Delete(highPriorityClassName, &metav1.DeleteOptions{})
|
||||||
@ -412,7 +412,7 @@ var _ = framework.KubeDescribe("PriorityPidEvictionOrdering [Slow] [Serial] [Dis
|
|||||||
})
|
})
|
||||||
ginkgo.BeforeEach(func() {
|
ginkgo.BeforeEach(func() {
|
||||||
_, err := f.ClientSet.SchedulingV1().PriorityClasses().Create(&schedulingv1.PriorityClass{ObjectMeta: metav1.ObjectMeta{Name: highPriorityClassName}, Value: highPriority})
|
_, err := f.ClientSet.SchedulingV1().PriorityClasses().Create(&schedulingv1.PriorityClass{ObjectMeta: metav1.ObjectMeta{Name: highPriorityClassName}, Value: highPriority})
|
||||||
gomega.Expect(err == nil || errors.IsAlreadyExists(err)).To(gomega.BeTrue())
|
framework.ExpectEqual(err == nil || errors.IsAlreadyExists(err), true)
|
||||||
})
|
})
|
||||||
ginkgo.AfterEach(func() {
|
ginkgo.AfterEach(func() {
|
||||||
err := f.ClientSet.SchedulingV1().PriorityClasses().Delete(highPriorityClassName, &metav1.DeleteOptions{})
|
err := f.ClientSet.SchedulingV1().PriorityClasses().Delete(highPriorityClassName, &metav1.DeleteOptions{})
|
||||||
@ -661,7 +661,7 @@ func verifyEvictionEvents(f *framework.Framework, testSpecs []podEvictSpec, expe
|
|||||||
if expectedStarvedResource != noStarvedResource {
|
if expectedStarvedResource != noStarvedResource {
|
||||||
// Check the eviction.StarvedResourceKey
|
// Check the eviction.StarvedResourceKey
|
||||||
starved, found := event.Annotations[eviction.StarvedResourceKey]
|
starved, found := event.Annotations[eviction.StarvedResourceKey]
|
||||||
gomega.Expect(found).To(gomega.BeTrue(), "Expected to find an annotation on the eviction event for pod %s containing the starved resource %s, but it was not found",
|
framework.ExpectEqual(found, true, "Expected to find an annotation on the eviction event for pod %s containing the starved resource %s, but it was not found",
|
||||||
pod.Name, expectedStarvedResource)
|
pod.Name, expectedStarvedResource)
|
||||||
starvedResource := v1.ResourceName(starved)
|
starvedResource := v1.ResourceName(starved)
|
||||||
framework.ExpectEqual(starvedResource, expectedStarvedResource, "Expected to the starved_resource annotation on pod %s to contain %s, but got %s instead",
|
framework.ExpectEqual(starvedResource, expectedStarvedResource, "Expected to the starved_resource annotation on pod %s to contain %s, but got %s instead",
|
||||||
@ -671,7 +671,7 @@ func verifyEvictionEvents(f *framework.Framework, testSpecs []podEvictSpec, expe
|
|||||||
if expectedStarvedResource == v1.ResourceMemory {
|
if expectedStarvedResource == v1.ResourceMemory {
|
||||||
// Check the eviction.OffendingContainersKey
|
// Check the eviction.OffendingContainersKey
|
||||||
offendersString, found := event.Annotations[eviction.OffendingContainersKey]
|
offendersString, found := event.Annotations[eviction.OffendingContainersKey]
|
||||||
gomega.Expect(found).To(gomega.BeTrue(), "Expected to find an annotation on the eviction event for pod %s containing the offending containers, but it was not found",
|
framework.ExpectEqual(found, true, "Expected to find an annotation on the eviction event for pod %s containing the offending containers, but it was not found",
|
||||||
pod.Name)
|
pod.Name)
|
||||||
offendingContainers := strings.Split(offendersString, ",")
|
offendingContainers := strings.Split(offendersString, ",")
|
||||||
framework.ExpectEqual(len(offendingContainers), 1, "Expected to find the offending container's usage in the %s annotation, but no container was found",
|
framework.ExpectEqual(len(offendingContainers), 1, "Expected to find the offending container's usage in the %s annotation, but no container was found",
|
||||||
|
@ -30,7 +30,6 @@ import (
|
|||||||
imageutils "k8s.io/kubernetes/test/utils/image"
|
imageutils "k8s.io/kubernetes/test/utils/image"
|
||||||
|
|
||||||
"github.com/onsi/ginkgo"
|
"github.com/onsi/ginkgo"
|
||||||
"github.com/onsi/gomega"
|
|
||||||
)
|
)
|
||||||
|
|
||||||
const (
|
const (
|
||||||
@ -179,7 +178,7 @@ var _ = framework.KubeDescribe("StartupProbe [Serial] [Disruptive] [NodeAlphaFea
|
|||||||
|
|
||||||
isReady, err := testutils.PodRunningReady(p)
|
isReady, err := testutils.PodRunningReady(p)
|
||||||
framework.ExpectNoError(err)
|
framework.ExpectNoError(err)
|
||||||
gomega.Expect(isReady).To(gomega.BeTrue(), "pod should be ready")
|
framework.ExpectEqual(isReady, true, "pod should be ready")
|
||||||
|
|
||||||
// We assume the pod became ready when the container became ready. This
|
// We assume the pod became ready when the container became ready. This
|
||||||
// is true for a single container pod.
|
// is true for a single container pod.
|
||||||
|
Loading…
Reference in New Issue
Block a user