Revert "Use ExpectEqual test/e2e_node"

This reverts commit 561ee6ece9.
This commit is contained in:
Lantao Liu 2019-12-04 18:14:13 -08:00
parent e8bc121341
commit 32850dc47d
8 changed files with 29 additions and 26 deletions

View File

@ -27,7 +27,7 @@ import (
"strconv" "strconv"
"strings" "strings"
v1 "k8s.io/api/core/v1" "k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/api/errors" "k8s.io/apimachinery/pkg/api/errors"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/runtime/schema" "k8s.io/apimachinery/pkg/runtime/schema"
@ -63,8 +63,9 @@ var _ = framework.KubeDescribe("AppArmor [Feature:AppArmor][NodeFeature:AppArmor
return return
} }
state := status.ContainerStatuses[0].State.Terminated state := status.ContainerStatuses[0].State.Terminated
framework.ExpectNotEqual(state, nil, "ContainerState: %+v", status.ContainerStatuses[0].State) gomega.Expect(state).ToNot(gomega.BeNil(), "ContainerState: %+v", status.ContainerStatuses[0].State)
framework.ExpectEqual(state.ExitCode, 0, "ContainerStateTerminated: %+v", state) gomega.Expect(state.ExitCode).To(gomega.Not(gomega.BeZero()), "ContainerStateTerminated: %+v", state)
}) })
ginkgo.It("should enforce a permissive profile", func() { ginkgo.It("should enforce a permissive profile", func() {
status := runAppArmorTest(f, true, apparmor.ProfileNamePrefix+apparmorProfilePrefix+"audit-write") status := runAppArmorTest(f, true, apparmor.ProfileNamePrefix+apparmorProfilePrefix+"audit-write")
@ -73,8 +74,8 @@ var _ = framework.KubeDescribe("AppArmor [Feature:AppArmor][NodeFeature:AppArmor
return return
} }
state := status.ContainerStatuses[0].State.Terminated state := status.ContainerStatuses[0].State.Terminated
framework.ExpectNotEqual(state, nil, "ContainerState: %+v", status.ContainerStatuses[0].State) gomega.Expect(state).ToNot(gomega.BeNil(), "ContainerState: %+v", status.ContainerStatuses[0].State)
framework.ExpectEqual(state.ExitCode, 0, "ContainerStateTerminated: %+v", state) gomega.Expect(state.ExitCode).To(gomega.BeZero(), "ContainerStateTerminated: %+v", state)
}) })
}) })
} else { } else {

View File

@ -79,7 +79,7 @@ var _ = framework.KubeDescribe("Container Manager Misc [Serial]", func() {
ginkgo.Context("once the node is setup", func() { ginkgo.Context("once the node is setup", func() {
ginkgo.It("container runtime's oom-score-adj should be -999", func() { ginkgo.It("container runtime's oom-score-adj should be -999", func() {
runtimePids, err := getPidsForProcess(framework.TestContext.ContainerRuntimeProcessName, framework.TestContext.ContainerRuntimePidFile) runtimePids, err := getPidsForProcess(framework.TestContext.ContainerRuntimeProcessName, framework.TestContext.ContainerRuntimePidFile)
framework.ExpectEqual(err, nil, "failed to get list of container runtime pids") gomega.Expect(err).To(gomega.BeNil(), "failed to get list of container runtime pids")
for _, pid := range runtimePids { for _, pid := range runtimePids {
gomega.Eventually(func() error { gomega.Eventually(func() error {
return validateOOMScoreAdjSetting(pid, -999) return validateOOMScoreAdjSetting(pid, -999)
@ -88,7 +88,7 @@ var _ = framework.KubeDescribe("Container Manager Misc [Serial]", func() {
}) })
ginkgo.It("Kubelet's oom-score-adj should be -999", func() { ginkgo.It("Kubelet's oom-score-adj should be -999", func() {
kubeletPids, err := getPidsForProcess(kubeletProcessName, "") kubeletPids, err := getPidsForProcess(kubeletProcessName, "")
framework.ExpectEqual(err, nil, "failed to get list of kubelet pids") gomega.Expect(err).To(gomega.BeNil(), "failed to get list of kubelet pids")
framework.ExpectEqual(len(kubeletPids), 1, "expected only one kubelet process; found %d", len(kubeletPids)) framework.ExpectEqual(len(kubeletPids), 1, "expected only one kubelet process; found %d", len(kubeletPids))
gomega.Eventually(func() error { gomega.Eventually(func() error {
return validateOOMScoreAdjSetting(kubeletPids[0], -999) return validateOOMScoreAdjSetting(kubeletPids[0], -999)
@ -100,7 +100,7 @@ var _ = framework.KubeDescribe("Container Manager Misc [Serial]", func() {
// created before this test, and may not be infra // created before this test, and may not be infra
// containers. They should be excluded from the test. // containers. They should be excluded from the test.
existingPausePIDs, err := getPidsForProcess("pause", "") existingPausePIDs, err := getPidsForProcess("pause", "")
framework.ExpectEqual(err, nil, "failed to list all pause processes on the node") gomega.Expect(err).To(gomega.BeNil(), "failed to list all pause processes on the node")
existingPausePIDSet := sets.NewInt(existingPausePIDs...) existingPausePIDSet := sets.NewInt(existingPausePIDs...)
podClient := f.PodClient() podClient := f.PodClient()

View File

@ -32,6 +32,7 @@ import (
imageutils "k8s.io/kubernetes/test/utils/image" imageutils "k8s.io/kubernetes/test/utils/image"
"github.com/onsi/ginkgo" "github.com/onsi/ginkgo"
"github.com/onsi/gomega"
) )
const ( const (
@ -85,7 +86,7 @@ var _ = framework.KubeDescribe("CriticalPod [Serial] [Disruptive] [NodeFeature:C
}) })
_, err = f.ClientSet.SchedulingV1().PriorityClasses().Create(systemCriticalPriority) _, err = f.ClientSet.SchedulingV1().PriorityClasses().Create(systemCriticalPriority)
framework.ExpectEqual(err == nil || errors.IsAlreadyExists(err), true, "failed to create PriorityClasses with an error: %v", err) gomega.Expect(err == nil || errors.IsAlreadyExists(err)).To(gomega.BeTrue(), "failed to create PriorityClasses with an error: %v", err)
// Create pods, starting with non-critical so that the critical preempts the other pods. // Create pods, starting with non-critical so that the critical preempts the other pods.
f.PodClient().CreateBatch([]*v1.Pod{nonCriticalBestEffort, nonCriticalBurstable, nonCriticalGuaranteed}) f.PodClient().CreateBatch([]*v1.Pod{nonCriticalBestEffort, nonCriticalBurstable, nonCriticalGuaranteed})
@ -156,9 +157,9 @@ func getTestPod(critical bool, name string, resources v1.ResourceRequirements) *
pod.Spec.PriorityClassName = systemCriticalPriorityName pod.Spec.PriorityClassName = systemCriticalPriorityName
pod.Spec.Priority = &value pod.Spec.Priority = &value
framework.ExpectEqual(kubelettypes.IsCriticalPod(pod), true, "pod should be a critical pod") gomega.Expect(kubelettypes.IsCriticalPod(pod)).To(gomega.BeTrue(), "pod should be a critical pod")
} else { } else {
framework.ExpectEqual(kubelettypes.IsCriticalPod(pod), false, "pod should not be a critical pod") gomega.Expect(kubelettypes.IsCriticalPod(pod)).To(gomega.BeFalse(), "pod should not be a critical pod")
} }
return pod return pod
} }

View File

@ -22,7 +22,7 @@ import (
"regexp" "regexp"
v1 "k8s.io/api/core/v1" "k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/api/resource" "k8s.io/apimachinery/pkg/api/resource"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/util/uuid" "k8s.io/apimachinery/pkg/util/uuid"
@ -97,12 +97,12 @@ func testDevicePlugin(f *framework.Framework, pluginSockDir string) {
pod1 := f.PodClient().CreateSync(makeBusyboxPod(resourceName, podRECMD)) pod1 := f.PodClient().CreateSync(makeBusyboxPod(resourceName, podRECMD))
deviceIDRE := "stub devices: (Dev-[0-9]+)" deviceIDRE := "stub devices: (Dev-[0-9]+)"
devID1 := parseLog(f, pod1.Name, pod1.Name, deviceIDRE) devID1 := parseLog(f, pod1.Name, pod1.Name, deviceIDRE)
framework.ExpectNotEqual(devID1, "") gomega.Expect(devID1).To(gomega.Not(gomega.Equal("")))
podResources, err := getNodeDevices() podResources, err := getNodeDevices()
var resourcesForOurPod *kubeletpodresourcesv1alpha1.PodResources var resourcesForOurPod *kubeletpodresourcesv1alpha1.PodResources
framework.Logf("pod resources %v", podResources) framework.Logf("pod resources %v", podResources)
framework.ExpectEqual(err, nil) gomega.Expect(err).To(gomega.BeNil())
framework.ExpectEqual(len(podResources.PodResources), 2) framework.ExpectEqual(len(podResources.PodResources), 2)
for _, res := range podResources.GetPodResources() { for _, res := range podResources.GetPodResources() {
if res.Name == pod1.Name { if res.Name == pod1.Name {
@ -110,7 +110,7 @@ func testDevicePlugin(f *framework.Framework, pluginSockDir string) {
} }
} }
framework.Logf("resourcesForOurPod %v", resourcesForOurPod) framework.Logf("resourcesForOurPod %v", resourcesForOurPod)
framework.ExpectNotEqual(resourcesForOurPod, nil) gomega.Expect(resourcesForOurPod).NotTo(gomega.BeNil())
framework.ExpectEqual(resourcesForOurPod.Name, pod1.Name) framework.ExpectEqual(resourcesForOurPod.Name, pod1.Name)
framework.ExpectEqual(resourcesForOurPod.Namespace, pod1.Namespace) framework.ExpectEqual(resourcesForOurPod.Namespace, pod1.Namespace)
framework.ExpectEqual(len(resourcesForOurPod.Containers), 1) framework.ExpectEqual(len(resourcesForOurPod.Containers), 1)
@ -181,7 +181,7 @@ func testDevicePlugin(f *framework.Framework, pluginSockDir string) {
ginkgo.By("Checking that pod got a different fake device") ginkgo.By("Checking that pod got a different fake device")
devID2 := parseLog(f, pod2.Name, pod2.Name, deviceIDRE) devID2 := parseLog(f, pod2.Name, pod2.Name, deviceIDRE)
framework.ExpectNotEqual(devID1, devID2) gomega.Expect(devID1).To(gomega.Not(gomega.Equal(devID2)))
ginkgo.By("By deleting the pods and waiting for container removal") ginkgo.By("By deleting the pods and waiting for container removal")
err = f.ClientSet.CoreV1().Pods(metav1.NamespaceSystem).Delete(dp.Name, &deleteOptions) err = f.ClientSet.CoreV1().Pods(metav1.NamespaceSystem).Delete(dp.Name, &deleteOptions)

View File

@ -307,7 +307,7 @@ func getNode(c *clientset.Clientset) (*v1.Node, error) {
if nodes == nil { if nodes == nil {
return nil, fmt.Errorf("the node list is nil") return nil, fmt.Errorf("the node list is nil")
} }
framework.ExpectNotEqual(len(nodes.Items) > 1, true, "the number of nodes is more than 1.") gomega.Expect(len(nodes.Items) > 1).NotTo(gomega.BeTrue(), "the number of nodes is more than 1.")
if len(nodes.Items) == 0 { if len(nodes.Items) == 0 {
return nil, fmt.Errorf("empty node list: %+v", nodes) return nil, fmt.Errorf("empty node list: %+v", nodes)
} }

View File

@ -23,7 +23,7 @@ import (
"strings" "strings"
"time" "time"
v1 "k8s.io/api/core/v1" "k8s.io/api/core/v1"
schedulingv1 "k8s.io/api/scheduling/v1" schedulingv1 "k8s.io/api/scheduling/v1"
"k8s.io/apimachinery/pkg/api/errors" "k8s.io/apimachinery/pkg/api/errors"
"k8s.io/apimachinery/pkg/api/resource" "k8s.io/apimachinery/pkg/api/resource"
@ -302,7 +302,7 @@ var _ = framework.KubeDescribe("PriorityMemoryEvictionOrdering [Slow] [Serial] [
}) })
ginkgo.BeforeEach(func() { ginkgo.BeforeEach(func() {
_, err := f.ClientSet.SchedulingV1().PriorityClasses().Create(&schedulingv1.PriorityClass{ObjectMeta: metav1.ObjectMeta{Name: highPriorityClassName}, Value: highPriority}) _, err := f.ClientSet.SchedulingV1().PriorityClasses().Create(&schedulingv1.PriorityClass{ObjectMeta: metav1.ObjectMeta{Name: highPriorityClassName}, Value: highPriority})
framework.ExpectEqual(err == nil || errors.IsAlreadyExists(err), true) gomega.Expect(err == nil || errors.IsAlreadyExists(err)).To(gomega.BeTrue())
}) })
ginkgo.AfterEach(func() { ginkgo.AfterEach(func() {
err := f.ClientSet.SchedulingV1().PriorityClasses().Delete(highPriorityClassName, &metav1.DeleteOptions{}) err := f.ClientSet.SchedulingV1().PriorityClasses().Delete(highPriorityClassName, &metav1.DeleteOptions{})
@ -359,7 +359,7 @@ var _ = framework.KubeDescribe("PriorityLocalStorageEvictionOrdering [Slow] [Ser
}) })
ginkgo.BeforeEach(func() { ginkgo.BeforeEach(func() {
_, err := f.ClientSet.SchedulingV1().PriorityClasses().Create(&schedulingv1.PriorityClass{ObjectMeta: metav1.ObjectMeta{Name: highPriorityClassName}, Value: highPriority}) _, err := f.ClientSet.SchedulingV1().PriorityClasses().Create(&schedulingv1.PriorityClass{ObjectMeta: metav1.ObjectMeta{Name: highPriorityClassName}, Value: highPriority})
framework.ExpectEqual(err == nil || errors.IsAlreadyExists(err), true) gomega.Expect(err == nil || errors.IsAlreadyExists(err)).To(gomega.BeTrue())
}) })
ginkgo.AfterEach(func() { ginkgo.AfterEach(func() {
err := f.ClientSet.SchedulingV1().PriorityClasses().Delete(highPriorityClassName, &metav1.DeleteOptions{}) err := f.ClientSet.SchedulingV1().PriorityClasses().Delete(highPriorityClassName, &metav1.DeleteOptions{})
@ -412,7 +412,7 @@ var _ = framework.KubeDescribe("PriorityPidEvictionOrdering [Slow] [Serial] [Dis
}) })
ginkgo.BeforeEach(func() { ginkgo.BeforeEach(func() {
_, err := f.ClientSet.SchedulingV1().PriorityClasses().Create(&schedulingv1.PriorityClass{ObjectMeta: metav1.ObjectMeta{Name: highPriorityClassName}, Value: highPriority}) _, err := f.ClientSet.SchedulingV1().PriorityClasses().Create(&schedulingv1.PriorityClass{ObjectMeta: metav1.ObjectMeta{Name: highPriorityClassName}, Value: highPriority})
framework.ExpectEqual(err == nil || errors.IsAlreadyExists(err), true) gomega.Expect(err == nil || errors.IsAlreadyExists(err)).To(gomega.BeTrue())
}) })
ginkgo.AfterEach(func() { ginkgo.AfterEach(func() {
err := f.ClientSet.SchedulingV1().PriorityClasses().Delete(highPriorityClassName, &metav1.DeleteOptions{}) err := f.ClientSet.SchedulingV1().PriorityClasses().Delete(highPriorityClassName, &metav1.DeleteOptions{})
@ -661,7 +661,7 @@ func verifyEvictionEvents(f *framework.Framework, testSpecs []podEvictSpec, expe
if expectedStarvedResource != noStarvedResource { if expectedStarvedResource != noStarvedResource {
// Check the eviction.StarvedResourceKey // Check the eviction.StarvedResourceKey
starved, found := event.Annotations[eviction.StarvedResourceKey] starved, found := event.Annotations[eviction.StarvedResourceKey]
framework.ExpectEqual(found, true, "Expected to find an annotation on the eviction event for pod %s containing the starved resource %s, but it was not found", gomega.Expect(found).To(gomega.BeTrue(), "Expected to find an annotation on the eviction event for pod %s containing the starved resource %s, but it was not found",
pod.Name, expectedStarvedResource) pod.Name, expectedStarvedResource)
starvedResource := v1.ResourceName(starved) starvedResource := v1.ResourceName(starved)
framework.ExpectEqual(starvedResource, expectedStarvedResource, "Expected to the starved_resource annotation on pod %s to contain %s, but got %s instead", framework.ExpectEqual(starvedResource, expectedStarvedResource, "Expected to the starved_resource annotation on pod %s to contain %s, but got %s instead",
@ -671,7 +671,7 @@ func verifyEvictionEvents(f *framework.Framework, testSpecs []podEvictSpec, expe
if expectedStarvedResource == v1.ResourceMemory { if expectedStarvedResource == v1.ResourceMemory {
// Check the eviction.OffendingContainersKey // Check the eviction.OffendingContainersKey
offendersString, found := event.Annotations[eviction.OffendingContainersKey] offendersString, found := event.Annotations[eviction.OffendingContainersKey]
framework.ExpectEqual(found, true, "Expected to find an annotation on the eviction event for pod %s containing the offending containers, but it was not found", gomega.Expect(found).To(gomega.BeTrue(), "Expected to find an annotation on the eviction event for pod %s containing the offending containers, but it was not found",
pod.Name) pod.Name)
offendingContainers := strings.Split(offendersString, ",") offendingContainers := strings.Split(offendersString, ",")
framework.ExpectEqual(len(offendingContainers), 1, "Expected to find the offending container's usage in the %s annotation, but no container was found", framework.ExpectEqual(len(offendingContainers), 1, "Expected to find the offending container's usage in the %s annotation, but no container was found",

View File

@ -24,7 +24,7 @@ import (
"path" "path"
"time" "time"
v1 "k8s.io/api/core/v1" "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/fields" "k8s.io/apimachinery/pkg/fields"
"k8s.io/apimachinery/pkg/labels" "k8s.io/apimachinery/pkg/labels"
@ -104,7 +104,7 @@ var _ = framework.KubeDescribe("NodeProblemDetector [NodeFeature:NodeProblemDete
nodeTime = time.Now() nodeTime = time.Now()
bootTime, err = util.GetBootTime() bootTime, err = util.GetBootTime()
framework.ExpectEqual(err, nil) gomega.Expect(err).To(gomega.BeNil())
// Set lookback duration longer than node up time. // Set lookback duration longer than node up time.
// Assume the test won't take more than 1 hour, in fact it usually only takes 90 seconds. // Assume the test won't take more than 1 hour, in fact it usually only takes 90 seconds.

View File

@ -30,6 +30,7 @@ import (
imageutils "k8s.io/kubernetes/test/utils/image" imageutils "k8s.io/kubernetes/test/utils/image"
"github.com/onsi/ginkgo" "github.com/onsi/ginkgo"
"github.com/onsi/gomega"
) )
const ( const (
@ -178,7 +179,7 @@ var _ = framework.KubeDescribe("StartupProbe [Serial] [Disruptive] [NodeAlphaFea
isReady, err := testutils.PodRunningReady(p) isReady, err := testutils.PodRunningReady(p)
framework.ExpectNoError(err) framework.ExpectNoError(err)
framework.ExpectEqual(isReady, true, "pod should be ready") gomega.Expect(isReady).To(gomega.BeTrue(), "pod should be ready")
// We assume the pod became ready when the container became ready. This // We assume the pod became ready when the container became ready. This
// is true for a single container pod. // is true for a single container pod.