enhance assertions in test/e2e/common/node

This commit is contained in:
Wantong Jiang 2022-05-17 06:54:03 +00:00
parent c84d0864dd
commit 93692ef57d
7 changed files with 74 additions and 24 deletions

View File

@ -223,7 +223,9 @@ var _ = SIGDescribe("ConfigMap", func() {
break
}
}
framework.ExpectEqual(testConfigMapFound, true, "failed to find ConfigMap by label selector")
if !testConfigMapFound {
framework.Failf("failed to find ConfigMap %s/%s by label selector", testNamespaceName, testConfigMap.ObjectMeta.Name)
}
ginkgo.By("deleting the ConfigMap by collection with a label selector")
err = f.ClientSet.CoreV1().ConfigMaps(testNamespaceName).DeleteCollection(context.TODO(), metav1.DeleteOptions{}, metav1.ListOptions{

View File

@ -71,7 +71,9 @@ var _ = SIGDescribe("Probing container", func() {
framework.ExpectNoError(err)
isReady, err := testutils.PodRunningReady(p)
framework.ExpectNoError(err)
framework.ExpectEqual(isReady, true, "pod should be ready")
if !isReady {
framework.Failf("pod %s/%s should be ready", f.Namespace.Name, p.Name)
}
// We assume the pod became ready when the container became ready. This
// is true for a single container pod.
@ -110,7 +112,9 @@ var _ = SIGDescribe("Probing container", func() {
framework.ExpectNoError(err)
isReady, _ := testutils.PodRunningReady(p)
framework.ExpectNotEqual(isReady, true, "pod should be not ready")
if isReady {
framework.Failf("pod %s/%s should be not ready", f.Namespace.Name, p.Name)
}
restartCount := getRestartCount(p)
framework.ExpectEqual(restartCount, 0, "pod should have a restart count of 0 but got %v", restartCount)
@ -430,7 +434,9 @@ var _ = SIGDescribe("Probing container", func() {
isReady, err := testutils.PodRunningReady(p)
framework.ExpectNoError(err)
framework.ExpectEqual(isReady, true, "pod should be ready")
if !isReady {
framework.Failf("pod %s/%s should be ready", f.Namespace.Name, p.Name)
}
readyIn := readyTime.Sub(startedTime)
framework.Logf("Container started at %v, pod became ready at %v, %v after startupProbe succeeded", startedTime, readyTime, readyIn)

View File

@ -235,7 +235,9 @@ var _ = SIGDescribe("InitContainer [NodeConformance]", func() {
framework.ExpectEqual(len(endPod.Status.InitContainerStatuses), 2)
for _, status := range endPod.Status.InitContainerStatuses {
framework.ExpectEqual(status.Ready, true)
if !status.Ready {
framework.Failf("init container %s should be in Ready status", status.Name)
}
gomega.Expect(status.State.Terminated).NotTo(gomega.BeNil())
gomega.Expect(status.State.Terminated.ExitCode).To(gomega.BeZero())
}
@ -312,7 +314,9 @@ var _ = SIGDescribe("InitContainer [NodeConformance]", func() {
framework.ExpectEqual(len(endPod.Status.InitContainerStatuses), 2)
for _, status := range endPod.Status.InitContainerStatuses {
framework.ExpectEqual(status.Ready, true)
if !status.Ready {
framework.Failf("init container %s should be in Ready status", status.Name)
}
gomega.Expect(status.State.Terminated).NotTo(gomega.BeNil())
gomega.Expect(status.State.Terminated.ExitCode).To(gomega.BeZero())
}

View File

@ -32,6 +32,8 @@ import (
"k8s.io/kubernetes/test/e2e/framework"
admissionapi "k8s.io/pod-security-admission/api"
"k8s.io/utils/pointer"
"github.com/google/go-cmp/cmp"
)
func getPatchBytes(oldLease, newLease *coordinationv1.Lease) ([]byte, error) {
@ -89,7 +91,9 @@ var _ = SIGDescribe("Lease", func() {
readLease, err := leaseClient.Get(context.TODO(), name, metav1.GetOptions{})
framework.ExpectNoError(err, "couldn't read Lease")
framework.ExpectEqual(apiequality.Semantic.DeepEqual(lease.Spec, readLease.Spec), true)
if !apiequality.Semantic.DeepEqual(lease.Spec, readLease.Spec) {
framework.Failf("Leases don't match. Diff (- for expected, + for actual):\n%s", cmp.Diff(lease.Spec, readLease.Spec))
}
createdLease.Spec = coordinationv1.LeaseSpec{
HolderIdentity: pointer.StringPtr("holder2"),
@ -104,7 +108,9 @@ var _ = SIGDescribe("Lease", func() {
readLease, err = leaseClient.Get(context.TODO(), name, metav1.GetOptions{})
framework.ExpectNoError(err, "couldn't read Lease")
framework.ExpectEqual(apiequality.Semantic.DeepEqual(createdLease.Spec, readLease.Spec), true)
if !apiequality.Semantic.DeepEqual(createdLease.Spec, readLease.Spec) {
framework.Failf("Leases don't match. Diff (- for expected, + for actual):\n%s", cmp.Diff(createdLease.Spec, readLease.Spec))
}
patchedLease := readLease.DeepCopy()
patchedLease.Spec = coordinationv1.LeaseSpec{
@ -122,7 +128,9 @@ var _ = SIGDescribe("Lease", func() {
readLease, err = leaseClient.Get(context.TODO(), name, metav1.GetOptions{})
framework.ExpectNoError(err, "couldn't read Lease")
framework.ExpectEqual(apiequality.Semantic.DeepEqual(patchedLease.Spec, readLease.Spec), true)
if !apiequality.Semantic.DeepEqual(patchedLease.Spec, readLease.Spec) {
framework.Failf("Leases don't match. Diff (- for expected, + for actual):\n%s", cmp.Diff(patchedLease.Spec, readLease.Spec))
}
name2 := "lease2"
lease2 := &coordinationv1.Lease{
@ -157,7 +165,9 @@ var _ = SIGDescribe("Lease", func() {
framework.ExpectNoError(err, "deleting Lease failed")
_, err = leaseClient.Get(context.TODO(), name, metav1.GetOptions{})
framework.ExpectEqual(apierrors.IsNotFound(err), true)
if !apierrors.IsNotFound(err) {
framework.Failf("expected IsNotFound error, got %#v", err)
}
leaseClient = f.ClientSet.CoordinationV1().Leases(metav1.NamespaceAll)
// Number of leases may be high in large clusters, as Lease object is

View File

@ -816,7 +816,9 @@ var _ = SIGDescribe("Pods", func() {
ginkgo.By("submitting the pod to kubernetes")
f.PodClient().Create(pod)
e2epod.WaitForPodNameRunningInNamespace(f.ClientSet, pod.Name, f.Namespace.Name)
framework.ExpectEqual(podClient.PodIsReady(podName), false, "Expect pod's Ready condition to be false initially.")
if podClient.PodIsReady(podName) {
framework.Failf("Expect pod(%s/%s)'s Ready condition to be false initially.", f.Namespace.Name, pod.Name)
}
ginkgo.By(fmt.Sprintf("patching pod status with condition %q to true", readinessGate1))
_, err := podClient.Patch(context.TODO(), podName, types.StrategicMergePatchType, []byte(fmt.Sprintf(patchStatusFmt, readinessGate1, "True")), metav1.PatchOptions{}, "status")
@ -824,7 +826,9 @@ var _ = SIGDescribe("Pods", func() {
// Sleep for 10 seconds.
time.Sleep(syncLoopFrequency)
// Verify the pod is still not ready
framework.ExpectEqual(podClient.PodIsReady(podName), false, "Expect pod's Ready condition to be false with only one condition in readinessGates equal to True")
if podClient.PodIsReady(podName) {
framework.Failf("Expect pod(%s/%s)'s Ready condition to be false with only one condition in readinessGates equal to True", f.Namespace.Name, pod.Name)
}
ginkgo.By(fmt.Sprintf("patching pod status with condition %q to true", readinessGate2))
_, err = podClient.Patch(context.TODO(), podName, types.StrategicMergePatchType, []byte(fmt.Sprintf(patchStatusFmt, readinessGate2, "True")), metav1.PatchOptions{}, "status")
@ -1067,7 +1071,9 @@ var _ = SIGDescribe("Pods", func() {
postDeletePodJSON, _ = json.Marshal(postDeletePod)
}
framework.ExpectError(err, "pod %v found in namespace %v, but it should be deleted: %s", testPodName, testNamespaceName, string(postDeletePodJSON))
framework.ExpectEqual(apierrors.IsNotFound(err), true, fmt.Sprintf("expected IsNotFound error, got %#v", err))
if !apierrors.IsNotFound(err) {
framework.Failf("expected IsNotFound error, got %#v", err)
}
})
})

View File

@ -219,7 +219,9 @@ var _ = SIGDescribe("RuntimeClass", func() {
}
}
}
framework.ExpectEqual(found, true, fmt.Sprintf("expected RuntimeClass API group/version, got %#v", discoveryGroups.Groups))
if !found {
framework.Failf("expected RuntimeClass API group/version, got %#v", discoveryGroups.Groups)
}
}
ginkgo.By("getting /apis/node.k8s.io")
@ -234,7 +236,9 @@ var _ = SIGDescribe("RuntimeClass", func() {
break
}
}
framework.ExpectEqual(found, true, fmt.Sprintf("expected RuntimeClass API version, got %#v", group.Versions))
if !found {
framework.Failf("expected RuntimeClass API version, got %#v", group.Versions)
}
}
ginkgo.By("getting /apis/node.k8s.io/" + rcVersion)
@ -248,7 +252,9 @@ var _ = SIGDescribe("RuntimeClass", func() {
found = true
}
}
framework.ExpectEqual(found, true, fmt.Sprintf("expected runtimeclasses, got %#v", resources.APIResources))
if !found {
framework.Failf("expected runtimeclasses, got %#v", resources.APIResources)
}
}
// Main resource create/read/update/watch operations
@ -257,7 +263,9 @@ var _ = SIGDescribe("RuntimeClass", func() {
createdRC, err := rcClient.Create(context.TODO(), rc, metav1.CreateOptions{})
framework.ExpectNoError(err)
_, err = rcClient.Create(context.TODO(), rc, metav1.CreateOptions{})
framework.ExpectEqual(apierrors.IsAlreadyExists(err), true, fmt.Sprintf("expected 409, got %#v", err))
if !apierrors.IsAlreadyExists(err) {
framework.Failf("expected 409, got %#v", err)
}
_, err = rcClient.Create(context.TODO(), rc2, metav1.CreateOptions{})
framework.ExpectNoError(err)
@ -296,10 +304,14 @@ var _ = SIGDescribe("RuntimeClass", func() {
for sawAdded, sawPatched, sawUpdated := false, false, false; !sawAdded && !sawPatched && !sawUpdated; {
select {
case evt, ok := <-rcWatch.ResultChan():
framework.ExpectEqual(ok, true, "watch channel should not close")
if !ok {
framework.Fail("watch channel should not close")
}
if evt.Type == watch.Modified {
watchedRC, isRC := evt.Object.(*nodev1.RuntimeClass)
framework.ExpectEqual(isRC, true, fmt.Sprintf("expected RC, got %T", evt.Object))
if !isRC {
framework.Failf("expected RC, got %T", evt.Object)
}
if watchedRC.Annotations["patched"] == "true" {
framework.Logf("saw patched annotations")
sawPatched = true
@ -311,7 +323,9 @@ var _ = SIGDescribe("RuntimeClass", func() {
}
} else if evt.Type == watch.Added {
_, isRC := evt.Object.(*nodev1.RuntimeClass)
framework.ExpectEqual(isRC, true, fmt.Sprintf("expected RC, got %T", evt.Object))
if !isRC {
framework.Failf("expected RC, got %T", evt.Object)
}
sawAdded = true
}
@ -327,7 +341,9 @@ var _ = SIGDescribe("RuntimeClass", func() {
err = rcClient.Delete(context.TODO(), createdRC.Name, metav1.DeleteOptions{})
framework.ExpectNoError(err)
_, err = rcClient.Get(context.TODO(), createdRC.Name, metav1.GetOptions{})
framework.ExpectEqual(apierrors.IsNotFound(err), true, fmt.Sprintf("expected 404, got %#v", err))
if !apierrors.IsNotFound(err) {
framework.Failf("expected 404, got %#v", err)
}
rcs, err = rcClient.List(context.TODO(), metav1.ListOptions{LabelSelector: "test=" + f.UniqueName})
framework.ExpectNoError(err)
framework.ExpectEqual(len(rcs.Items), 2, "filtered list should have 2 items")
@ -360,7 +376,9 @@ func createRuntimeClass(f *framework.Framework, name, handler string, overhead *
func expectPodRejection(f *framework.Framework, pod *v1.Pod) {
_, err := f.ClientSet.CoreV1().Pods(f.Namespace.Name).Create(context.TODO(), pod, metav1.CreateOptions{})
framework.ExpectError(err, "should be forbidden")
framework.ExpectEqual(apierrors.IsForbidden(err), true, "should be forbidden error")
if !apierrors.IsForbidden(err) {
framework.Failf("expected forbidden error, got %#v", err)
}
}
// expectPodSuccess waits for the given pod to terminate successfully.

View File

@ -187,7 +187,9 @@ var _ = SIGDescribe("Secrets", func() {
break
}
}
framework.ExpectEqual(foundCreatedSecret, true, "unable to find secret by its value")
if !foundCreatedSecret {
framework.Failf("unable to find secret %s/%s by name", f.Namespace.Name, secretTestName)
}
ginkgo.By("patching the secret")
// patch the secret in the test namespace
@ -230,7 +232,9 @@ var _ = SIGDescribe("Secrets", func() {
break
}
}
framework.ExpectEqual(foundCreatedSecret, false, "secret was not deleted successfully")
if foundCreatedSecret {
framework.Failf("secret %s/%s was not deleted successfully", f.Namespace.Name, secretTestName)
}
})
})