diff --git a/test/e2e_node/apparmor_test.go b/test/e2e_node/apparmor_test.go index 49c64f2e9ea..053fb59d152 100644 --- a/test/e2e_node/apparmor_test.go +++ b/test/e2e_node/apparmor_test.go @@ -27,7 +27,7 @@ import ( "strconv" "strings" - v1 "k8s.io/api/core/v1" + "k8s.io/api/core/v1" "k8s.io/apimachinery/pkg/api/errors" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/runtime/schema" @@ -63,8 +63,9 @@ var _ = framework.KubeDescribe("AppArmor [Feature:AppArmor][NodeFeature:AppArmor return } state := status.ContainerStatuses[0].State.Terminated - framework.ExpectNotEqual(state, nil, "ContainerState: %+v", status.ContainerStatuses[0].State) - framework.ExpectEqual(state.ExitCode, 0, "ContainerStateTerminated: %+v", state) + gomega.Expect(state).ToNot(gomega.BeNil(), "ContainerState: %+v", status.ContainerStatuses[0].State) + gomega.Expect(state.ExitCode).To(gomega.Not(gomega.BeZero()), "ContainerStateTerminated: %+v", state) + }) ginkgo.It("should enforce a permissive profile", func() { status := runAppArmorTest(f, true, apparmor.ProfileNamePrefix+apparmorProfilePrefix+"audit-write") @@ -73,8 +74,8 @@ var _ = framework.KubeDescribe("AppArmor [Feature:AppArmor][NodeFeature:AppArmor return } state := status.ContainerStatuses[0].State.Terminated - framework.ExpectNotEqual(state, nil, "ContainerState: %+v", status.ContainerStatuses[0].State) - framework.ExpectEqual(state.ExitCode, 0, "ContainerStateTerminated: %+v", state) + gomega.Expect(state).ToNot(gomega.BeNil(), "ContainerState: %+v", status.ContainerStatuses[0].State) + gomega.Expect(state.ExitCode).To(gomega.BeZero(), "ContainerStateTerminated: %+v", state) }) }) } else { diff --git a/test/e2e_node/container_manager_test.go b/test/e2e_node/container_manager_test.go index 27c27038940..3cec8138657 100644 --- a/test/e2e_node/container_manager_test.go +++ b/test/e2e_node/container_manager_test.go @@ -79,7 +79,7 @@ var _ = framework.KubeDescribe("Container Manager Misc [Serial]", func() { ginkgo.Context("once the node is setup", func() { ginkgo.It("container runtime's oom-score-adj should be -999", func() { runtimePids, err := getPidsForProcess(framework.TestContext.ContainerRuntimeProcessName, framework.TestContext.ContainerRuntimePidFile) - framework.ExpectEqual(err, nil, "failed to get list of container runtime pids") + gomega.Expect(err).To(gomega.BeNil(), "failed to get list of container runtime pids") for _, pid := range runtimePids { gomega.Eventually(func() error { return validateOOMScoreAdjSetting(pid, -999) @@ -88,7 +88,7 @@ var _ = framework.KubeDescribe("Container Manager Misc [Serial]", func() { }) ginkgo.It("Kubelet's oom-score-adj should be -999", func() { kubeletPids, err := getPidsForProcess(kubeletProcessName, "") - framework.ExpectEqual(err, nil, "failed to get list of kubelet pids") + gomega.Expect(err).To(gomega.BeNil(), "failed to get list of kubelet pids") framework.ExpectEqual(len(kubeletPids), 1, "expected only one kubelet process; found %d", len(kubeletPids)) gomega.Eventually(func() error { return validateOOMScoreAdjSetting(kubeletPids[0], -999) @@ -100,7 +100,7 @@ var _ = framework.KubeDescribe("Container Manager Misc [Serial]", func() { // created before this test, and may not be infra // containers. They should be excluded from the test. existingPausePIDs, err := getPidsForProcess("pause", "") - framework.ExpectEqual(err, nil, "failed to list all pause processes on the node") + gomega.Expect(err).To(gomega.BeNil(), "failed to list all pause processes on the node") existingPausePIDSet := sets.NewInt(existingPausePIDs...) podClient := f.PodClient() diff --git a/test/e2e_node/critical_pod_test.go b/test/e2e_node/critical_pod_test.go index 1670a5abf76..f374e4e6440 100644 --- a/test/e2e_node/critical_pod_test.go +++ b/test/e2e_node/critical_pod_test.go @@ -32,6 +32,7 @@ import ( imageutils "k8s.io/kubernetes/test/utils/image" "github.com/onsi/ginkgo" + "github.com/onsi/gomega" ) const ( @@ -85,7 +86,7 @@ var _ = framework.KubeDescribe("CriticalPod [Serial] [Disruptive] [NodeFeature:C }) _, err = f.ClientSet.SchedulingV1().PriorityClasses().Create(systemCriticalPriority) - framework.ExpectEqual(err == nil || errors.IsAlreadyExists(err), true, "failed to create PriorityClasses with an error: %v", err) + gomega.Expect(err == nil || errors.IsAlreadyExists(err)).To(gomega.BeTrue(), "failed to create PriorityClasses with an error: %v", err) // Create pods, starting with non-critical so that the critical preempts the other pods. f.PodClient().CreateBatch([]*v1.Pod{nonCriticalBestEffort, nonCriticalBurstable, nonCriticalGuaranteed}) @@ -156,9 +157,9 @@ func getTestPod(critical bool, name string, resources v1.ResourceRequirements) * pod.Spec.PriorityClassName = systemCriticalPriorityName pod.Spec.Priority = &value - framework.ExpectEqual(kubelettypes.IsCriticalPod(pod), true, "pod should be a critical pod") + gomega.Expect(kubelettypes.IsCriticalPod(pod)).To(gomega.BeTrue(), "pod should be a critical pod") } else { - framework.ExpectEqual(kubelettypes.IsCriticalPod(pod), false, "pod should not be a critical pod") + gomega.Expect(kubelettypes.IsCriticalPod(pod)).To(gomega.BeFalse(), "pod should not be a critical pod") } return pod } diff --git a/test/e2e_node/device_plugin_test.go b/test/e2e_node/device_plugin_test.go index 188f97088b8..7cf4a934023 100644 --- a/test/e2e_node/device_plugin_test.go +++ b/test/e2e_node/device_plugin_test.go @@ -22,7 +22,7 @@ import ( "regexp" - v1 "k8s.io/api/core/v1" + "k8s.io/api/core/v1" "k8s.io/apimachinery/pkg/api/resource" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/util/uuid" @@ -97,12 +97,12 @@ func testDevicePlugin(f *framework.Framework, pluginSockDir string) { pod1 := f.PodClient().CreateSync(makeBusyboxPod(resourceName, podRECMD)) deviceIDRE := "stub devices: (Dev-[0-9]+)" devID1 := parseLog(f, pod1.Name, pod1.Name, deviceIDRE) - framework.ExpectNotEqual(devID1, "") + gomega.Expect(devID1).To(gomega.Not(gomega.Equal(""))) podResources, err := getNodeDevices() var resourcesForOurPod *kubeletpodresourcesv1alpha1.PodResources framework.Logf("pod resources %v", podResources) - framework.ExpectEqual(err, nil) + gomega.Expect(err).To(gomega.BeNil()) framework.ExpectEqual(len(podResources.PodResources), 2) for _, res := range podResources.GetPodResources() { if res.Name == pod1.Name { @@ -110,7 +110,7 @@ func testDevicePlugin(f *framework.Framework, pluginSockDir string) { } } framework.Logf("resourcesForOurPod %v", resourcesForOurPod) - framework.ExpectNotEqual(resourcesForOurPod, nil) + gomega.Expect(resourcesForOurPod).NotTo(gomega.BeNil()) framework.ExpectEqual(resourcesForOurPod.Name, pod1.Name) framework.ExpectEqual(resourcesForOurPod.Namespace, pod1.Namespace) framework.ExpectEqual(len(resourcesForOurPod.Containers), 1) @@ -181,7 +181,7 @@ func testDevicePlugin(f *framework.Framework, pluginSockDir string) { ginkgo.By("Checking that pod got a different fake device") devID2 := parseLog(f, pod2.Name, pod2.Name, deviceIDRE) - framework.ExpectNotEqual(devID1, devID2) + gomega.Expect(devID1).To(gomega.Not(gomega.Equal(devID2))) ginkgo.By("By deleting the pods and waiting for container removal") err = f.ClientSet.CoreV1().Pods(metav1.NamespaceSystem).Delete(dp.Name, &deleteOptions) diff --git a/test/e2e_node/e2e_node_suite_test.go b/test/e2e_node/e2e_node_suite_test.go index 44176d532e0..0a34fba0402 100644 --- a/test/e2e_node/e2e_node_suite_test.go +++ b/test/e2e_node/e2e_node_suite_test.go @@ -307,7 +307,7 @@ func getNode(c *clientset.Clientset) (*v1.Node, error) { if nodes == nil { return nil, fmt.Errorf("the node list is nil") } - framework.ExpectNotEqual(len(nodes.Items) > 1, true, "the number of nodes is more than 1.") + gomega.Expect(len(nodes.Items) > 1).NotTo(gomega.BeTrue(), "the number of nodes is more than 1.") if len(nodes.Items) == 0 { return nil, fmt.Errorf("empty node list: %+v", nodes) } diff --git a/test/e2e_node/eviction_test.go b/test/e2e_node/eviction_test.go index d398b491c28..cd6e8dac3ff 100644 --- a/test/e2e_node/eviction_test.go +++ b/test/e2e_node/eviction_test.go @@ -23,7 +23,7 @@ import ( "strings" "time" - v1 "k8s.io/api/core/v1" + "k8s.io/api/core/v1" schedulingv1 "k8s.io/api/scheduling/v1" "k8s.io/apimachinery/pkg/api/errors" "k8s.io/apimachinery/pkg/api/resource" @@ -302,7 +302,7 @@ var _ = framework.KubeDescribe("PriorityMemoryEvictionOrdering [Slow] [Serial] [ }) ginkgo.BeforeEach(func() { _, err := f.ClientSet.SchedulingV1().PriorityClasses().Create(&schedulingv1.PriorityClass{ObjectMeta: metav1.ObjectMeta{Name: highPriorityClassName}, Value: highPriority}) - framework.ExpectEqual(err == nil || errors.IsAlreadyExists(err), true) + gomega.Expect(err == nil || errors.IsAlreadyExists(err)).To(gomega.BeTrue()) }) ginkgo.AfterEach(func() { err := f.ClientSet.SchedulingV1().PriorityClasses().Delete(highPriorityClassName, &metav1.DeleteOptions{}) @@ -359,7 +359,7 @@ var _ = framework.KubeDescribe("PriorityLocalStorageEvictionOrdering [Slow] [Ser }) ginkgo.BeforeEach(func() { _, err := f.ClientSet.SchedulingV1().PriorityClasses().Create(&schedulingv1.PriorityClass{ObjectMeta: metav1.ObjectMeta{Name: highPriorityClassName}, Value: highPriority}) - framework.ExpectEqual(err == nil || errors.IsAlreadyExists(err), true) + gomega.Expect(err == nil || errors.IsAlreadyExists(err)).To(gomega.BeTrue()) }) ginkgo.AfterEach(func() { err := f.ClientSet.SchedulingV1().PriorityClasses().Delete(highPriorityClassName, &metav1.DeleteOptions{}) @@ -412,7 +412,7 @@ var _ = framework.KubeDescribe("PriorityPidEvictionOrdering [Slow] [Serial] [Dis }) ginkgo.BeforeEach(func() { _, err := f.ClientSet.SchedulingV1().PriorityClasses().Create(&schedulingv1.PriorityClass{ObjectMeta: metav1.ObjectMeta{Name: highPriorityClassName}, Value: highPriority}) - framework.ExpectEqual(err == nil || errors.IsAlreadyExists(err), true) + gomega.Expect(err == nil || errors.IsAlreadyExists(err)).To(gomega.BeTrue()) }) ginkgo.AfterEach(func() { err := f.ClientSet.SchedulingV1().PriorityClasses().Delete(highPriorityClassName, &metav1.DeleteOptions{}) @@ -661,7 +661,7 @@ func verifyEvictionEvents(f *framework.Framework, testSpecs []podEvictSpec, expe if expectedStarvedResource != noStarvedResource { // Check the eviction.StarvedResourceKey starved, found := event.Annotations[eviction.StarvedResourceKey] - framework.ExpectEqual(found, true, "Expected to find an annotation on the eviction event for pod %s containing the starved resource %s, but it was not found", + gomega.Expect(found).To(gomega.BeTrue(), "Expected to find an annotation on the eviction event for pod %s containing the starved resource %s, but it was not found", pod.Name, expectedStarvedResource) starvedResource := v1.ResourceName(starved) framework.ExpectEqual(starvedResource, expectedStarvedResource, "Expected to the starved_resource annotation on pod %s to contain %s, but got %s instead", @@ -671,7 +671,7 @@ func verifyEvictionEvents(f *framework.Framework, testSpecs []podEvictSpec, expe if expectedStarvedResource == v1.ResourceMemory { // Check the eviction.OffendingContainersKey offendersString, found := event.Annotations[eviction.OffendingContainersKey] - framework.ExpectEqual(found, true, "Expected to find an annotation on the eviction event for pod %s containing the offending containers, but it was not found", + gomega.Expect(found).To(gomega.BeTrue(), "Expected to find an annotation on the eviction event for pod %s containing the offending containers, but it was not found", pod.Name) offendingContainers := strings.Split(offendersString, ",") framework.ExpectEqual(len(offendingContainers), 1, "Expected to find the offending container's usage in the %s annotation, but no container was found", diff --git a/test/e2e_node/node_problem_detector_linux.go b/test/e2e_node/node_problem_detector_linux.go index 0b0ba434136..5c84248e385 100644 --- a/test/e2e_node/node_problem_detector_linux.go +++ b/test/e2e_node/node_problem_detector_linux.go @@ -24,7 +24,7 @@ import ( "path" "time" - v1 "k8s.io/api/core/v1" + "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/fields" "k8s.io/apimachinery/pkg/labels" @@ -104,7 +104,7 @@ var _ = framework.KubeDescribe("NodeProblemDetector [NodeFeature:NodeProblemDete nodeTime = time.Now() bootTime, err = util.GetBootTime() - framework.ExpectEqual(err, nil) + gomega.Expect(err).To(gomega.BeNil()) // Set lookback duration longer than node up time. // Assume the test won't take more than 1 hour, in fact it usually only takes 90 seconds. diff --git a/test/e2e_node/startup_probe_test.go b/test/e2e_node/startup_probe_test.go index 336bbd5f141..2ff7cad0b4f 100644 --- a/test/e2e_node/startup_probe_test.go +++ b/test/e2e_node/startup_probe_test.go @@ -30,6 +30,7 @@ import ( imageutils "k8s.io/kubernetes/test/utils/image" "github.com/onsi/ginkgo" + "github.com/onsi/gomega" ) const ( @@ -178,7 +179,7 @@ var _ = framework.KubeDescribe("StartupProbe [Serial] [Disruptive] [NodeAlphaFea isReady, err := testutils.PodRunningReady(p) framework.ExpectNoError(err) - framework.ExpectEqual(isReady, true, "pod should be ready") + gomega.Expect(isReady).To(gomega.BeTrue(), "pod should be ready") // We assume the pod became ready when the container became ready. This // is true for a single container pod.