Merge pull request #119501 from Songjoy/cleanup-e2e-node-framework-equal

e2e_node: stop using deprecated framework.ExpectEqual
This commit is contained in:
Kubernetes Prow Robot 2023-08-16 16:36:30 -07:00 committed by GitHub
commit 9ee7185be6
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
6 changed files with 31 additions and 29 deletions

View File

@ -32,6 +32,7 @@ import (
admissionapi "k8s.io/pod-security-admission/api"
"github.com/onsi/ginkgo/v2"
"github.com/onsi/gomega"
)
var _ = SIGDescribe("Events", func() {
@ -81,7 +82,7 @@ var _ = SIGDescribe("Events", func() {
options := metav1.ListOptions{LabelSelector: selector.String()}
pods, err := podClient.List(ctx, options)
framework.ExpectNoError(err)
framework.ExpectEqual(len(pods.Items), 1)
gomega.Expect(pods.Items).To(gomega.HaveLen(1))
ginkgo.By("retrieving the pod")
podWithUID, err := podClient.Get(ctx, pod.Name, metav1.GetOptions{})

View File

@ -32,6 +32,7 @@ import (
admissionapi "k8s.io/pod-security-admission/api"
"github.com/onsi/ginkgo/v2"
"github.com/onsi/gomega"
)
func preparePod(name string, node *v1.Node, propagation *v1.MountPropagationMode, hostDir string) *v1.Pod {
@ -178,7 +179,7 @@ var _ = SIGDescribe("Mount propagation", func() {
shouldBeVisible := mounts.Has(mountName)
if shouldBeVisible {
framework.ExpectNoError(err, "%s: failed to run %q", msg, cmd)
framework.ExpectEqual(stdout, mountName, msg)
gomega.Expect(stdout).To(gomega.Equal(mountName), msg)
} else {
// We *expect* cat to return error here
framework.ExpectError(err, msg)
@ -191,7 +192,7 @@ var _ = SIGDescribe("Mount propagation", func() {
kubeletPid, err := hostExec.IssueCommandWithResult(ctx, cmd, node)
framework.ExpectNoError(err, "Checking kubelet pid")
kubeletPid = strings.TrimSuffix(kubeletPid, "\n")
framework.ExpectEqual(strings.Count(kubeletPid, " "), 0, "kubelet should only have a single PID in the system (pidof returned %q)", kubeletPid)
gomega.Expect(strings.Count(kubeletPid, " ")).To(gomega.Equal(0), "kubelet should only have a single PID in the system (pidof returned %q)", kubeletPid)
enterKubeletMountNS := fmt.Sprintf("nsenter -t %s -m", kubeletPid)
// Check that the master and host mounts are propagated to the container runtime's mount namespace
@ -200,7 +201,7 @@ var _ = SIGDescribe("Mount propagation", func() {
output, err := hostExec.IssueCommandWithResult(ctx, cmd, node)
framework.ExpectNoError(err, "host container namespace should see mount from %s: %s", mountName, output)
output = strings.TrimSuffix(output, "\n")
framework.ExpectEqual(output, mountName, "host container namespace should see mount contents from %s", mountName)
gomega.Expect(output).To(gomega.Equal(mountName), "host container namespace should see mount contents from %s", mountName)
}
// Check that the slave, private, and default mounts are not propagated to the container runtime's mount namespace

View File

@ -121,14 +121,14 @@ var _ = SIGDescribe("NodeProblemDetector", func() {
psCmd := "ps aux | grep [n]ode-problem-detector"
result, err = e2essh.SSH(ctx, psCmd, host, framework.TestContext.Provider)
framework.ExpectNoError(err)
framework.ExpectEqual(result.Code, 0)
gomega.Expect(result.Code).To(gomega.Equal(0))
gomega.Expect(result.Stdout).To(gomega.ContainSubstring("node-problem-detector"))
ginkgo.By(fmt.Sprintf("Check node-problem-detector is running fine on node %q", host))
journalctlCmd := "sudo journalctl -r -u node-problem-detector"
result, err = e2essh.SSH(ctx, journalctlCmd, host, framework.TestContext.Provider)
framework.ExpectNoError(err)
framework.ExpectEqual(result.Code, 0)
gomega.Expect(result.Code).To(gomega.Equal(0))
gomega.Expect(result.Stdout).NotTo(gomega.ContainSubstring("node-problem-detector.service: Failed"))
// We only will check for the KubeletStart even if parsing of date here succeeded.
@ -136,7 +136,7 @@ var _ = SIGDescribe("NodeProblemDetector", func() {
npdStartTimeCommand := "sudo systemctl show --timestamp=utc node-problem-detector -P ActiveEnterTimestamp"
result, err = e2essh.SSH(ctx, npdStartTimeCommand, host, framework.TestContext.Provider)
framework.ExpectNoError(err)
framework.ExpectEqual(result.Code, 0)
gomega.Expect(result.Code).To(gomega.Equal(0))
// The time format matches the systemd format.
// 'utc': 'Day YYYY-MM-DD HH:MM:SS UTC (see https://www.freedesktop.org/software/systemd/man/systemd.time.html)
@ -157,7 +157,7 @@ var _ = SIGDescribe("NodeProblemDetector", func() {
injectLogCmd := "sudo sh -c \"echo 'kernel: " + log + "' >> /dev/kmsg\""
result, err = e2essh.SSH(ctx, injectLogCmd, host, framework.TestContext.Provider)
framework.ExpectNoError(err)
framework.ExpectEqual(result.Code, 0)
gomega.Expect(result.Code).To(gomega.Equal(0))
}
ginkgo.By("Check node-problem-detector can post conditions and events to API server")
@ -297,7 +297,7 @@ func getMemoryStat(ctx context.Context, f *framework.Framework, host string) (rs
result, err := e2essh.SSH(ctx, memCmd, host, framework.TestContext.Provider)
framework.ExpectNoError(err)
framework.ExpectEqual(result.Code, 0)
gomega.Expect(result.Code).To(gomega.Equal(0))
lines := strings.Split(result.Stdout, "\n")
memoryUsage, err := strconv.ParseFloat(lines[0], 64)
@ -351,7 +351,7 @@ func getCPUStat(ctx context.Context, f *framework.Framework, host string) (usage
result, err := e2essh.SSH(ctx, cpuCmd, host, framework.TestContext.Provider)
framework.ExpectNoError(err)
framework.ExpectEqual(result.Code, 0)
gomega.Expect(result.Code).To(gomega.Equal(0))
lines := strings.Split(result.Stdout, "\n")
usage, err = strconv.ParseFloat(lines[0], 64)
@ -367,7 +367,7 @@ func getCPUStat(ctx context.Context, f *framework.Framework, host string) (usage
func isHostRunningCgroupV2(ctx context.Context, f *framework.Framework, host string) bool {
result, err := e2essh.SSH(ctx, "stat -fc %T /sys/fs/cgroup/", host, framework.TestContext.Provider)
framework.ExpectNoError(err)
framework.ExpectEqual(result.Code, 0)
gomega.Expect(result.Code).To(gomega.Equal(0))
// 0x63677270 == CGROUP2_SUPER_MAGIC
// https://www.kernel.org/doc/html/latest/admin-guide/cgroup-v2.html

View File

@ -241,7 +241,7 @@ func verifyPodResizePolicy(pod *v1.Pod, tcInfo []TestContainerInfo) {
gomega.Expect(cMap).Should(gomega.HaveKey(ci.Name))
c := cMap[ci.Name]
tc, _ := makeTestContainer(ci)
framework.ExpectEqual(tc.ResizePolicy, c.ResizePolicy)
gomega.Expect(tc.ResizePolicy).To(gomega.Equal(c.ResizePolicy))
}
}
@ -254,7 +254,7 @@ func verifyPodResources(pod *v1.Pod, tcInfo []TestContainerInfo) {
gomega.Expect(cMap).Should(gomega.HaveKey(ci.Name))
c := cMap[ci.Name]
tc, _ := makeTestContainer(ci)
framework.ExpectEqual(tc.Resources, c.Resources)
gomega.Expect(tc.Resources).To(gomega.Equal(c.Resources))
}
}
@ -279,7 +279,7 @@ func verifyPodAllocations(pod *v1.Pod, tcInfo []TestContainerInfo, flagError boo
_, tcStatus := makeTestContainer(ci)
if flagError {
framework.ExpectEqual(tcStatus.AllocatedResources, cStatus.AllocatedResources)
gomega.Expect(tcStatus.AllocatedResources).To(gomega.Equal(cStatus.AllocatedResources))
}
if !cmp.Equal(cStatus.AllocatedResources, tcStatus.AllocatedResources) {
return false
@ -297,8 +297,8 @@ func verifyPodStatusResources(pod *v1.Pod, tcInfo []TestContainerInfo) {
gomega.Expect(csMap).Should(gomega.HaveKey(ci.Name))
cs := csMap[ci.Name]
tc, _ := makeTestContainer(ci)
framework.ExpectEqual(tc.Resources, *cs.Resources)
//framework.ExpectEqual(cs.RestartCount, ci.RestartCount)
gomega.Expect(tc.Resources).To(gomega.Equal(*cs.Resources))
//gomega.Expect(cs.RestartCount).To(gomega.Equal(ci.RestartCount))
}
}
@ -1555,13 +1555,13 @@ func doPodResizeSchedulerTests() {
ginkgo.By(fmt.Sprintf("TEST1: Create pod '%s' that fits the node '%s'", testPod1.Name, node.Name))
testPod1 = podClient.CreateSync(ctx, testPod1)
framework.ExpectEqual(testPod1.Status.Phase, v1.PodRunning)
gomega.Expect(testPod1.Status.Phase).To(gomega.Equal(v1.PodRunning))
ginkgo.By(fmt.Sprintf("TEST1: Create pod '%s' that won't fit node '%s' with pod '%s' on it", testPod2.Name, node.Name, testPod1.Name))
testPod2 = podClient.Create(ctx, testPod2)
err = e2epod.WaitForPodNameUnschedulableInNamespace(ctx, f.ClientSet, testPod2.Name, testPod2.Namespace)
framework.ExpectNoError(err)
framework.ExpectEqual(testPod2.Status.Phase, v1.PodPending)
gomega.Expect(testPod2.Status.Phase).To(gomega.Equal(v1.PodPending))
ginkgo.By(fmt.Sprintf("TEST1: Resize pod '%s' to fit in node '%s'", testPod2.Name, node.Name))
testPod2, pErr := f.ClientSet.CoreV1().Pods(testPod2.Namespace).Patch(ctx,
@ -1610,7 +1610,7 @@ func doPodResizeSchedulerTests() {
testPod3 = podClient.Create(ctx, testPod3)
p3Err := e2epod.WaitForPodNameUnschedulableInNamespace(ctx, f.ClientSet, testPod3.Name, testPod3.Namespace)
framework.ExpectNoError(p3Err, "failed to create pod3 or pod3 did not become pending!")
framework.ExpectEqual(testPod3.Status.Phase, v1.PodPending)
gomega.Expect(testPod3.Status.Phase).To(gomega.Equal(v1.PodPending))
ginkgo.By(fmt.Sprintf("TEST2: Resize pod '%s' to make enough space for pod '%s'", testPod1.Name, testPod3.Name))
testPod1, p1Err := f.ClientSet.CoreV1().Pods(testPod1.Namespace).Patch(context.TODO(),

View File

@ -48,6 +48,7 @@ import (
utilpointer "k8s.io/utils/pointer"
"github.com/onsi/ginkgo/v2"
"github.com/onsi/gomega"
"github.com/prometheus/client_golang/prometheus"
"github.com/prometheus/common/expfmt"
)
@ -82,7 +83,7 @@ var _ = SIGDescribe("Pods Extended", func() {
options := metav1.ListOptions{LabelSelector: selector.String()}
pods, err := podClient.List(ctx, options)
framework.ExpectNoError(err, "failed to query for pod")
framework.ExpectEqual(len(pods.Items), 0)
gomega.Expect(pods.Items).To(gomega.BeEmpty())
ginkgo.By("submitting the pod to kubernetes")
podClient.Create(ctx, pod)
@ -92,7 +93,7 @@ var _ = SIGDescribe("Pods Extended", func() {
options = metav1.ListOptions{LabelSelector: selector.String()}
pods, err = podClient.List(ctx, options)
framework.ExpectNoError(err, "failed to query for pod")
framework.ExpectEqual(len(pods.Items), 1)
gomega.Expect(pods.Items).To(gomega.HaveLen(1))
// We need to wait for the pod to be running, otherwise the deletion
// may be carried out immediately rather than gracefully.
@ -106,7 +107,7 @@ var _ = SIGDescribe("Pods Extended", func() {
var statusCode int
err = f.ClientSet.CoreV1().RESTClient().Delete().AbsPath("/api/v1/namespaces", pod.Namespace, "pods", pod.Name).Param("gracePeriodSeconds", "30").Do(ctx).StatusCode(&statusCode).Into(&lastPod)
framework.ExpectNoError(err, "failed to use http client to send delete")
framework.ExpectEqual(statusCode, http.StatusOK, "failed to delete gracefully by client request")
gomega.Expect(statusCode).To(gomega.Equal(http.StatusOK), "failed to delete gracefully by client request")
ginkgo.By("verifying the kubelet observed the termination notice")
@ -144,8 +145,7 @@ var _ = SIGDescribe("Pods Extended", func() {
options = metav1.ListOptions{LabelSelector: selector.String()}
pods, err = podClient.List(ctx, options)
framework.ExpectNoError(err, "failed to query for pods")
framework.ExpectEqual(len(pods.Items), 0)
gomega.Expect(pods.Items).To(gomega.BeEmpty())
})
})
@ -197,7 +197,7 @@ var _ = SIGDescribe("Pods Extended", func() {
ginkgo.By("verifying QOS class is set on the pod")
pod, err := podClient.Get(ctx, name, metav1.GetOptions{})
framework.ExpectNoError(err, "failed to query for pod")
framework.ExpectEqual(pod.Status.QOSClass, v1.PodQOSGuaranteed)
gomega.Expect(pod.Status.QOSClass).To(gomega.Equal(v1.PodQOSGuaranteed))
})
})

View File

@ -123,8 +123,8 @@ var _ = SIGDescribe("RuntimeClass", func() {
// check that pod got scheduled on specified node.
scheduledPod, err := f.ClientSet.CoreV1().Pods(f.Namespace.Name).Get(ctx, pod.Name, metav1.GetOptions{})
framework.ExpectNoError(err)
framework.ExpectEqual(nodeName, scheduledPod.Spec.NodeName)
framework.ExpectEqual(nodeSelector, pod.Spec.NodeSelector)
gomega.Expect(nodeName).To(gomega.Equal(scheduledPod.Spec.NodeName))
gomega.Expect(nodeSelector).To(gomega.Equal(pod.Spec.NodeSelector))
gomega.Expect(pod.Spec.Tolerations).To(gomega.ContainElement(tolerations[0]))
})
@ -169,8 +169,8 @@ var _ = SIGDescribe("RuntimeClass", func() {
// check that pod got scheduled on specified node.
scheduledPod, err := f.ClientSet.CoreV1().Pods(f.Namespace.Name).Get(ctx, pod.Name, metav1.GetOptions{})
framework.ExpectNoError(err)
framework.ExpectEqual(nodeName, scheduledPod.Spec.NodeName)
framework.ExpectEqual(nodeSelector, pod.Spec.NodeSelector)
gomega.Expect(nodeName).To(gomega.Equal(scheduledPod.Spec.NodeName))
gomega.Expect(nodeSelector).To(gomega.Equal(pod.Spec.NodeSelector))
})
})