Merge pull request #117962 from humblec/gomega-equal

e2e: use gomega.Expect instead of framework.ExpectEqual in windows/*
This commit is contained in:
Kubernetes Prow Robot 2023-05-16 09:53:47 -07:00 committed by GitHub
commit 3588d091ff
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
7 changed files with 26 additions and 25 deletions

View File

@ -32,6 +32,7 @@ import (
admissionapi "k8s.io/pod-security-admission/api"
"github.com/onsi/ginkgo/v2"
"github.com/onsi/gomega"
)
var _ = SIGDescribe("[Feature:Windows] Cpu Resources [Serial]", func() {
@ -59,7 +60,7 @@ var _ = SIGDescribe("[Feature:Windows] Cpu Resources [Serial]", func() {
p.Name,
metav1.GetOptions{})
framework.ExpectNoError(err, "Error retrieving pod")
framework.ExpectEqual(pod.Status.Phase, v1.PodRunning)
gomega.Expect(pod.Status.Phase).To(gomega.Equal(v1.PodRunning))
allPods = append(allPods, pod)
}
for _, p := range podsMilli {
@ -68,7 +69,7 @@ var _ = SIGDescribe("[Feature:Windows] Cpu Resources [Serial]", func() {
p.Name,
metav1.GetOptions{})
framework.ExpectNoError(err, "Error retrieving pod")
framework.ExpectEqual(pod.Status.Phase, v1.PodRunning)
gomega.Expect(pod.Status.Phase).To(gomega.Equal(v1.PodRunning))
allPods = append(allPods, pod)
}
ginkgo.By("Ensuring cpu doesn't exceed limit by >5%")

View File

@ -28,6 +28,7 @@ import (
admissionapi "k8s.io/pod-security-admission/api"
"github.com/onsi/ginkgo/v2"
"github.com/onsi/gomega"
)
var _ = SIGDescribe("[Feature:Windows] DNS", func() {
@ -64,7 +65,8 @@ var _ = SIGDescribe("[Feature:Windows] DNS", func() {
framework.ExpectNoError(err)
ginkgo.By("confirming that the pod has a windows label")
framework.ExpectEqual(testPod.Spec.NodeSelector["kubernetes.io/os"], "windows")
gomega.Expect(testPod.Spec.NodeSelector).To(gomega.HaveKeyWithValue("kubernetes.io/os", "windows"), "pod.spec.nodeSelector")
framework.Logf("Created pod %v", testPod)
defer func() {
framework.Logf("Deleting pod %s...", testPod.Name)

View File

@ -22,10 +22,9 @@ import (
"strings"
"time"
"github.com/onsi/gomega"
semver "github.com/blang/semver/v4"
"github.com/onsi/ginkgo/v2"
"github.com/onsi/gomega"
v1 "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/fields"
@ -142,7 +141,7 @@ var _ = SIGDescribe("[Feature:WindowsHostProcessContainers] [MinimumKubeletVersi
metav1.GetOptions{})
framework.ExpectNoError(err, "Error retrieving pod")
framework.ExpectEqual(p.Status.Phase, v1.PodSucceeded)
gomega.Expect(p.Status.Phase).To(gomega.Equal(v1.PodSucceeded))
})
ginkgo.It("should support init containers", func(ctx context.Context) {
@ -202,7 +201,7 @@ var _ = SIGDescribe("[Feature:WindowsHostProcessContainers] [MinimumKubeletVersi
}
framework.Logf("Pod phase: %v\nlogs:\n%s", p.Status.Phase, logs)
}
framework.ExpectEqual(p.Status.Phase, v1.PodSucceeded)
gomega.Expect(p.Status.Phase).To(gomega.Equal(v1.PodSucceeded))
})
ginkgo.It("container command path validation", func(ctx context.Context) {
@ -511,7 +510,7 @@ var _ = SIGDescribe("[Feature:WindowsHostProcessContainers] [MinimumKubeletVersi
metav1.GetOptions{})
framework.ExpectNoError(err, "Error retrieving pod")
framework.ExpectEqual(p.Status.Phase, v1.PodSucceeded)
gomega.Expect(p.Status.Phase).To(gomega.Equal(v1.PodSucceeded))
})
ginkgo.It("metrics should report count of started and failed to start HostProcess containers", func(ctx context.Context) {
@ -793,7 +792,7 @@ var _ = SIGDescribe("[Feature:WindowsHostProcessContainers] [MinimumKubeletVersi
metav1.GetOptions{})
framework.ExpectNoError(err, "error retrieving pod")
framework.ExpectEqual(p.Status.Phase, v1.PodSucceeded)
gomega.Expect(p.Status.Phase).To(gomega.Equal(v1.PodSucceeded))
// whoami will output %COMPUTER_NAME%/{randomly generated username} here.
// It is sufficient to just check that the logs do not container `nt authority`

View File

@ -19,7 +19,6 @@ package windows
import (
"context"
"encoding/json"
"fmt"
"time"
kubeletconfigv1beta1 "k8s.io/kubelet/config/v1beta1"
@ -93,10 +92,8 @@ func checkNodeAllocatableTest(ctx context.Context, f *framework.Framework) {
calculatedNodeAlloc.Sub(nodeMem.softEviction)
calculatedNodeAlloc.Sub(nodeMem.hardEviction)
ginkgo.By(fmt.Sprintf("Checking stated allocatable memory %v against calculated allocatable memory %v", &nodeMem.allocatable, calculatedNodeAlloc))
// sanity check against stated allocatable
framework.ExpectEqual(calculatedNodeAlloc.Cmp(nodeMem.allocatable), 0)
gomega.Expect(calculatedNodeAlloc.Cmp(nodeMem.allocatable)).To(gomega.Equal(0), "calculated allocatable memory %+v and stated allocatable memory %+v are same", calculatedNodeAlloc, nodeMem.allocatable)
}
// Deploys `allocatablePods + 1` pods, each with a memory limit of `1/allocatablePods` of the total allocatable
@ -185,7 +182,7 @@ func getNodeMemory(ctx context.Context, f *framework.Framework) nodeMemory {
// Assuming that agent nodes have the same config
// Make sure there is >0 agent nodes, then use the first one for info
framework.ExpectNotEqual(nodeList.Size(), 0)
gomega.Expect(nodeList).To(gomega.BeEmpty())
ginkgo.By("Getting memory details from node status and kubelet config")
status := nodeList.Items[0].Status

View File

@ -21,6 +21,7 @@ import (
"time"
"github.com/onsi/ginkgo/v2"
"github.com/onsi/gomega"
v1 "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
@ -168,7 +169,7 @@ var _ = SIGDescribe("[Feature:Windows] [Excluded:WindowsDocker] [MinimumKubeletV
metav1.GetOptions{})
framework.ExpectNoError(err, "Error retrieving pod")
framework.ExpectEqual(p.Status.Phase, v1.PodSucceeded)
gomega.Expect(p.Status.Phase).To(gomega.Equal(v1.PodSucceeded))
ginkgo.By("Waiting for Windows worker rebooting")
@ -195,10 +196,10 @@ var _ = SIGDescribe("[Feature:Windows] [Excluded:WindowsDocker] [MinimumKubeletV
}
ginkgo.By("Checking whether agn-test-pod is rebooted")
framework.ExpectEqual(restartCount, 1)
gomega.Expect(restartCount).To(gomega.Equal(1), "restart count of agn-test-pod is 1")
agnPodOut, err := f.ClientSet.CoreV1().Pods(f.Namespace.Name).Get(ctx, agnPod.Name, metav1.GetOptions{})
framework.ExpectEqual(agnPodOut.Status.Phase, v1.PodRunning)
gomega.Expect(agnPodOut.Status.Phase).To(gomega.Equal(v1.PodRunning))
framework.ExpectNoError(err, "getting pod info after reboot")
assertConsistentConnectivity(ctx, f, nginxPod.ObjectMeta.Name, "linux", linuxCheck(agnPodOut.Status.PodIP, 80))
@ -251,6 +252,6 @@ var _ = SIGDescribe("[Feature:Windows] [Excluded:WindowsDocker] [MinimumKubeletV
metav1.GetOptions{})
framework.ExpectNoError(err, "Error retrieving pod")
framework.ExpectEqual(p.Status.Phase, v1.PodSucceeded)
gomega.Expect(p.Status.Phase).To(gomega.Equal(v1.PodSucceeded))
})
})

View File

@ -33,6 +33,7 @@ import (
admissionapi "k8s.io/pod-security-admission/api"
"github.com/onsi/ginkgo/v2"
"github.com/onsi/gomega"
)
var _ = SIGDescribe("Services", func() {
@ -79,8 +80,7 @@ var _ = SIGDescribe("Services", func() {
ginkgo.By("verifying that pod has the correct nodeSelector")
// Admission controllers may sometimes do the wrong thing
framework.ExpectEqual(testPod.Spec.NodeSelector["kubernetes.io/os"], "windows")
gomega.Expect(testPod.Spec.NodeSelector).To(gomega.HaveKeyWithValue("kubernetes.io/os", "windows"), "pod.spec.nodeSelector")
ginkgo.By(fmt.Sprintf("checking connectivity Pod to curl http://%s:%d", nodeIP, nodePort))
assertConsistentConnectivity(ctx, f, testPod.ObjectMeta.Name, windowsOS, windowsCheck(fmt.Sprintf("http://%s", net.JoinHostPort(nodeIP, strconv.Itoa(nodePort)))))

View File

@ -30,6 +30,7 @@ import (
admissionapi "k8s.io/pod-security-admission/api"
"github.com/onsi/ginkgo/v2"
"github.com/onsi/gomega"
)
const (
@ -100,13 +101,13 @@ func doReadOnlyTest(ctx context.Context, f *framework.Framework, source v1.Volum
pod = e2epod.NewPodClient(f).CreateSync(ctx, pod)
ginkgo.By("verifying that pod has the correct nodeSelector")
framework.ExpectEqual(pod.Spec.NodeSelector["kubernetes.io/os"], "windows")
gomega.Expect(pod.Spec.NodeSelector).To(gomega.HaveKeyWithValue("kubernetes.io/os", "windows"), "pod.spec.nodeSelector")
cmd := []string{"cmd", "/c", "echo windows-volume-test", ">", filePath}
ginkgo.By("verifying that pod will get an error when writing to a volume that is readonly")
_, stderr, _ := e2epod.ExecCommandInContainerWithFullOutput(f, podName, containerName, cmd...)
framework.ExpectEqual(stderr, "Access is denied.")
gomega.Expect(stderr).To(gomega.Equal("Access is denied."))
}
func doReadWriteReadOnlyTest(ctx context.Context, f *framework.Framework, source v1.VolumeSource, volumePath string) {
@ -135,7 +136,7 @@ func doReadWriteReadOnlyTest(ctx context.Context, f *framework.Framework, source
pod = e2epod.NewPodClient(f).CreateSync(ctx, pod)
ginkgo.By("verifying that pod has the correct nodeSelector")
framework.ExpectEqual(pod.Spec.NodeSelector["kubernetes.io/os"], "windows")
gomega.Expect(pod.Spec.NodeSelector).To(gomega.HaveKeyWithValue("kubernetes.io/os", "windows"), "pod.spec.nodeSelector")
ginkgo.By("verifying that pod can write to a volume with read/write access")
writecmd := []string{"cmd", "/c", "echo windows-volume-test", ">", filePath}
@ -145,13 +146,13 @@ func doReadWriteReadOnlyTest(ctx context.Context, f *framework.Framework, source
ginkgo.By("verifying that pod will get an error when writing to a volume that is readonly")
_, stderr, _ := e2epod.ExecCommandInContainerWithFullOutput(f, podName, containerName, writecmd...)
framework.ExpectEqual(stderr, "Access is denied.")
gomega.Expect(stderr).To(gomega.Equal("Access is denied."))
ginkgo.By("verifying that pod can read from a volume that is readonly")
readcmd := []string{"cmd", "/c", "type", filePath}
readout, readerr, err := e2epod.ExecCommandInContainerWithFullOutput(f, podName, containerName, readcmd...)
readmsg := fmt.Sprintf("cmd: %v, stdout: %q, stderr: %q", readcmd, readout, readerr)
framework.ExpectEqual(readout, "windows-volume-test")
gomega.Expect(readout).To(gomega.Equal("windows-volume-test"))
framework.ExpectNoError(err, readmsg)
}