mirror of
https://github.com/k3s-io/kubernetes.git
synced 2025-07-19 01:40:13 +00:00
Changed code to improve output messages on error for files under test/e2e/apps (#109944)
* Improving the output of tests in case of error * Better error message Also, the condition in the second case was reversed * Fixing 2 tests whose condition was inverted * Again I got the conditions wrong * Sorry for the confusion * Improved error messages on failures
This commit is contained in:
parent
cfa6ad50e6
commit
a887a3b4fd
@ -261,7 +261,9 @@ var _ = SIGDescribe("CronJob", func() {
|
||||
ginkgo.By("Ensuring job was deleted")
|
||||
_, err = e2ejob.GetJob(ctx, f.ClientSet, f.Namespace.Name, job.Name)
|
||||
framework.ExpectError(err)
|
||||
framework.ExpectEqual(apierrors.IsNotFound(err), true)
|
||||
if !apierrors.IsNotFound(err) {
|
||||
framework.Failf("Failed to delete %s cronjob in namespace %s", cronJob.Name, f.Namespace.Name)
|
||||
}
|
||||
|
||||
ginkgo.By("Ensuring the job is not in the cronjob active list")
|
||||
err = waitForJobNotActive(ctx, f.ClientSet, f.Namespace.Name, cronJob.Name, job.Name)
|
||||
@ -385,10 +387,14 @@ var _ = SIGDescribe("CronJob", func() {
|
||||
for sawAnnotations := false; !sawAnnotations; {
|
||||
select {
|
||||
case evt, ok := <-cjWatch.ResultChan():
|
||||
framework.ExpectEqual(ok, true, "watch channel should not close")
|
||||
if !ok {
|
||||
framework.Fail("watch channel should not close")
|
||||
}
|
||||
framework.ExpectEqual(evt.Type, watch.Modified)
|
||||
watchedCronJob, isCronJob := evt.Object.(*batchv1.CronJob)
|
||||
framework.ExpectEqual(isCronJob, true, fmt.Sprintf("expected CronJob, got %T", evt.Object))
|
||||
if !isCronJob {
|
||||
framework.Failf("expected CronJob, got %T", evt.Object)
|
||||
}
|
||||
if watchedCronJob.Annotations["patched"] == "true" {
|
||||
framework.Logf("saw patched and updated annotations")
|
||||
sawAnnotations = true
|
||||
@ -414,7 +420,9 @@ var _ = SIGDescribe("CronJob", func() {
|
||||
[]byte(`{"metadata":{"annotations":{"patchedstatus":"true"}},"status":`+string(cjStatusJSON)+`}`),
|
||||
metav1.PatchOptions{}, "status")
|
||||
framework.ExpectNoError(err)
|
||||
framework.ExpectEqual(patchedStatus.Status.LastScheduleTime.Equal(&now1), true, "patched object should have the applied lastScheduleTime status")
|
||||
if !patchedStatus.Status.LastScheduleTime.Equal(&now1) {
|
||||
framework.Failf("patched object should have the applied lastScheduleTime %#v, got %#v instead", cjStatus.LastScheduleTime, patchedStatus.Status.LastScheduleTime)
|
||||
}
|
||||
framework.ExpectEqual(patchedStatus.Annotations["patchedstatus"], "true", "patched object should have the applied annotation")
|
||||
|
||||
ginkgo.By("updating /status")
|
||||
@ -431,7 +439,9 @@ var _ = SIGDescribe("CronJob", func() {
|
||||
return err
|
||||
})
|
||||
framework.ExpectNoError(err)
|
||||
framework.ExpectEqual(updatedStatus.Status.LastScheduleTime.Equal(&now2), true, fmt.Sprintf("updated object status expected to have updated lastScheduleTime %#v, got %#v", statusToUpdate.Status.LastScheduleTime, updatedStatus.Status.LastScheduleTime))
|
||||
if !updatedStatus.Status.LastScheduleTime.Equal(&now2) {
|
||||
framework.Failf("updated object status expected to have updated lastScheduleTime %#v, got %#v", statusToUpdate.Status.LastScheduleTime, updatedStatus.Status.LastScheduleTime)
|
||||
}
|
||||
|
||||
ginkgo.By("get /status")
|
||||
cjResource := schema.GroupVersionResource{Group: "batch", Version: cjVersion, Resource: "cronjobs"}
|
||||
@ -444,7 +454,9 @@ var _ = SIGDescribe("CronJob", func() {
|
||||
// CronJob resource delete operations
|
||||
expectFinalizer := func(cj *batchv1.CronJob, msg string) {
|
||||
framework.ExpectNotEqual(cj.DeletionTimestamp, nil, fmt.Sprintf("expected deletionTimestamp, got nil on step: %q, cronjob: %+v", msg, cj))
|
||||
framework.ExpectEqual(len(cj.Finalizers) > 0, true, fmt.Sprintf("expected finalizers on cronjob, got none on step: %q, cronjob: %+v", msg, cj))
|
||||
if len(cj.Finalizers) == 0 {
|
||||
framework.Failf("expected finalizers on cronjob, got none on step: %q, cronjob: %+v", msg, cj)
|
||||
}
|
||||
}
|
||||
|
||||
ginkgo.By("deleting")
|
||||
@ -458,7 +470,9 @@ var _ = SIGDescribe("CronJob", func() {
|
||||
if err == nil {
|
||||
expectFinalizer(cj, "deleting cronjob")
|
||||
} else {
|
||||
framework.ExpectEqual(apierrors.IsNotFound(err), true, fmt.Sprintf("expected 404, got %v", err))
|
||||
if !apierrors.IsNotFound(err) {
|
||||
framework.Failf("expected 404, got %v", err)
|
||||
}
|
||||
}
|
||||
|
||||
ginkgo.By("deleting a collection")
|
||||
@ -467,7 +481,10 @@ var _ = SIGDescribe("CronJob", func() {
|
||||
cjs, err = cjClient.List(ctx, metav1.ListOptions{LabelSelector: "special-label=" + f.UniqueName})
|
||||
framework.ExpectNoError(err)
|
||||
// Should have <= 2 items since some cronjobs might not have been deleted yet due to finalizers
|
||||
framework.ExpectEqual(len(cjs.Items) <= 2, true, "filtered list should be <= 2")
|
||||
if len(cjs.Items) > 2 {
|
||||
framework.Logf("got unexpected filtered list: %v", cjs.Items)
|
||||
framework.Fail("filtered list should be <= 2")
|
||||
}
|
||||
// Validate finalizers
|
||||
for _, cj := range cjs.Items {
|
||||
expectFinalizer(&cj, "deleting cronjob collection")
|
||||
|
@ -496,7 +496,9 @@ var _ = SIGDescribe("Daemon set [Serial]", func() {
|
||||
rollbackPods[pod.Name] = true
|
||||
}
|
||||
for _, pod := range existingPods {
|
||||
framework.ExpectEqual(rollbackPods[pod.Name], true, fmt.Sprintf("unexpected pod %s be restarted", pod.Name))
|
||||
if !rollbackPods[pod.Name] {
|
||||
framework.Failf("unexpected pod %s be restarted", pod.Name)
|
||||
}
|
||||
}
|
||||
})
|
||||
|
||||
|
@ -325,7 +325,9 @@ var _ = SIGDescribe("Deployment", func() {
|
||||
break
|
||||
}
|
||||
}
|
||||
framework.ExpectEqual(foundDeployment, true, "unable to find the Deployment in list", deploymentsList)
|
||||
if !foundDeployment {
|
||||
framework.Failf("unable to find the Deployment in the following list %v", deploymentsList)
|
||||
}
|
||||
|
||||
ginkgo.By("updating the Deployment")
|
||||
testDeploymentUpdate := testDeployment
|
||||
@ -681,7 +683,9 @@ func stopDeployment(ctx context.Context, c clientset.Interface, ns, deploymentNa
|
||||
framework.Logf("Ensuring deployment %s was deleted", deploymentName)
|
||||
_, err = c.AppsV1().Deployments(ns).Get(ctx, deployment.Name, metav1.GetOptions{})
|
||||
framework.ExpectError(err)
|
||||
framework.ExpectEqual(apierrors.IsNotFound(err), true)
|
||||
if !apierrors.IsNotFound(err) {
|
||||
framework.Failf("Expected deployment %s to be deleted", deploymentName)
|
||||
}
|
||||
framework.Logf("Ensuring deployment %s's RSes were deleted", deploymentName)
|
||||
selector, err := metav1.LabelSelectorAsSelector(deployment.Spec.Selector)
|
||||
framework.ExpectNoError(err)
|
||||
|
@ -318,7 +318,9 @@ var _ = SIGDescribe("DisruptionController", func() {
|
||||
if c.shouldDeny {
|
||||
err = cs.CoreV1().Pods(ns).EvictV1(ctx, e)
|
||||
framework.ExpectError(err, "pod eviction should fail")
|
||||
framework.ExpectEqual(apierrors.HasStatusCause(err, policyv1.DisruptionBudgetCause), true, "pod eviction should fail with DisruptionBudget cause")
|
||||
if !apierrors.HasStatusCause(err, policyv1.DisruptionBudgetCause) {
|
||||
framework.Fail("pod eviction should fail with DisruptionBudget cause")
|
||||
}
|
||||
} else {
|
||||
// Only wait for running pods in the "allow" case
|
||||
// because one of shouldDeny cases relies on the
|
||||
@ -362,7 +364,9 @@ var _ = SIGDescribe("DisruptionController", func() {
|
||||
}
|
||||
err = cs.CoreV1().Pods(ns).EvictV1(ctx, e)
|
||||
framework.ExpectError(err, "pod eviction should fail")
|
||||
framework.ExpectEqual(apierrors.HasStatusCause(err, policyv1.DisruptionBudgetCause), true, "pod eviction should fail with DisruptionBudget cause")
|
||||
if !apierrors.HasStatusCause(err, policyv1.DisruptionBudgetCause) {
|
||||
framework.Failf("pod eviction should fail with DisruptionBudget cause. The error was \"%v\"", err)
|
||||
}
|
||||
|
||||
ginkgo.By("Updating the pdb to allow a pod to be evicted")
|
||||
updatePDBOrDie(ctx, cs, ns, defaultName, func(pdb *policyv1.PodDisruptionBudget) *policyv1.PodDisruptionBudget {
|
||||
@ -400,7 +404,9 @@ var _ = SIGDescribe("DisruptionController", func() {
|
||||
}
|
||||
err = cs.CoreV1().Pods(ns).EvictV1(ctx, e)
|
||||
framework.ExpectError(err, "pod eviction should fail")
|
||||
framework.ExpectEqual(apierrors.HasStatusCause(err, policyv1.DisruptionBudgetCause), true, "pod eviction should fail with DisruptionBudget cause")
|
||||
if !apierrors.HasStatusCause(err, policyv1.DisruptionBudgetCause) {
|
||||
framework.Failf("pod eviction should fail with DisruptionBudget cause. The error was \"%v\"", err)
|
||||
}
|
||||
|
||||
ginkgo.By("Deleting the pdb to allow a pod to be evicted")
|
||||
deletePDBOrDie(ctx, cs, ns, defaultName)
|
||||
|
@ -298,7 +298,9 @@ var _ = SIGDescribe("Job", func() {
|
||||
break
|
||||
}
|
||||
}
|
||||
framework.ExpectEqual(exists, true)
|
||||
if !exists {
|
||||
framework.Failf("Expected suspended job to exist. It was not found.")
|
||||
}
|
||||
|
||||
ginkgo.By("Updating the job with suspend=false")
|
||||
job.Spec.Suspend = pointer.BoolPtr(false)
|
||||
@ -354,7 +356,9 @@ var _ = SIGDescribe("Job", func() {
|
||||
break
|
||||
}
|
||||
}
|
||||
framework.ExpectEqual(exists, true)
|
||||
if !exists {
|
||||
framework.Failf("Expected suspended job to exist. It was not found.")
|
||||
}
|
||||
})
|
||||
|
||||
/*
|
||||
@ -494,7 +498,9 @@ var _ = SIGDescribe("Job", func() {
|
||||
ginkgo.By("Ensuring job was deleted")
|
||||
_, err = e2ejob.GetJob(ctx, f.ClientSet, f.Namespace.Name, job.Name)
|
||||
framework.ExpectError(err, "failed to ensure job %s was deleted in namespace: %s", job.Name, f.Namespace.Name)
|
||||
framework.ExpectEqual(apierrors.IsNotFound(err), true)
|
||||
if !apierrors.IsNotFound(err) {
|
||||
framework.Failf("failed to ensure job %s was deleted in namespace: %s", job.Name, f.Namespace.Name)
|
||||
}
|
||||
})
|
||||
|
||||
/*
|
||||
@ -661,7 +667,9 @@ var _ = SIGDescribe("Job", func() {
|
||||
[]byte(`{"metadata":{"annotations":{"patchedstatus":"true"}},"status":`+string(jStatusJSON)+`}`),
|
||||
metav1.PatchOptions{}, "status")
|
||||
framework.ExpectNoError(err)
|
||||
framework.ExpectEqual(patchedStatus.Status.StartTime.Equal(&now1), true, "patched object should have the applied StartTime status")
|
||||
if !patchedStatus.Status.StartTime.Equal(&now1) {
|
||||
framework.Failf("patched object should have the applied StartTime %#v, got %#v instead", jStatus.StartTime, patchedStatus.Status.StartTime)
|
||||
}
|
||||
framework.ExpectEqual(patchedStatus.Annotations["patchedstatus"], "true", "patched object should have the applied annotation")
|
||||
|
||||
ginkgo.By("updating /status")
|
||||
@ -678,7 +686,9 @@ var _ = SIGDescribe("Job", func() {
|
||||
return err
|
||||
})
|
||||
framework.ExpectNoError(err)
|
||||
framework.ExpectEqual(updatedStatus.Status.StartTime.Equal(&now2), true, fmt.Sprintf("updated object status expected to have updated StartTime %#v, got %#v", statusToUpdate.Status.StartTime, updatedStatus.Status.StartTime))
|
||||
if !updatedStatus.Status.StartTime.Equal(&now2) {
|
||||
framework.Failf("updated object status expected to have updated StartTime %#v, got %#v", statusToUpdate.Status.StartTime, updatedStatus.Status.StartTime)
|
||||
}
|
||||
|
||||
ginkgo.By("get /status")
|
||||
jResource := schema.GroupVersionResource{Group: "batch", Version: "v1", Resource: "jobs"}
|
||||
|
@ -164,7 +164,9 @@ var _ = SIGDescribe("ReplicationController", func() {
|
||||
return true, nil
|
||||
})
|
||||
framework.ExpectNoError(err, "Wait until condition with watch events should not return an error")
|
||||
framework.ExpectEqual(eventFound, true, "failed to find RC %v event", watch.Added)
|
||||
if !eventFound {
|
||||
framework.Failf("failed to find RC %v event", watch.Added)
|
||||
}
|
||||
|
||||
ginkgo.By("waiting for available Replicas")
|
||||
eventFound = false
|
||||
@ -187,7 +189,9 @@ var _ = SIGDescribe("ReplicationController", func() {
|
||||
return true, nil
|
||||
})
|
||||
framework.ExpectNoError(err, "Wait for condition with watch events should not return an error")
|
||||
framework.ExpectEqual(eventFound, true, "RC has not reached ReadyReplicas count of %v", testRcInitialReplicaCount)
|
||||
if !eventFound {
|
||||
framework.Failf("RC has not reached ReadyReplicas count of %v", testRcInitialReplicaCount)
|
||||
}
|
||||
|
||||
rcLabelPatchPayload, err := json.Marshal(v1.ReplicationController{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
@ -213,7 +217,9 @@ var _ = SIGDescribe("ReplicationController", func() {
|
||||
return true, nil
|
||||
})
|
||||
framework.ExpectNoError(err, "Wait until condition with watch events should not return an error")
|
||||
framework.ExpectEqual(eventFound, true, "failed to find RC %v event", watch.Added)
|
||||
if !eventFound {
|
||||
framework.Failf("failed to find RC %v event", watch.Added)
|
||||
}
|
||||
|
||||
rcStatusPatchPayload, err := json.Marshal(map[string]interface{}{
|
||||
"status": map[string]interface{}{
|
||||
@ -241,7 +247,9 @@ var _ = SIGDescribe("ReplicationController", func() {
|
||||
return true, nil
|
||||
})
|
||||
framework.ExpectNoError(err, "Wait until condition with watch events should not return an error")
|
||||
framework.ExpectEqual(eventFound, true, "failed to find RC %v event", watch.Added)
|
||||
if !eventFound {
|
||||
framework.Failf("failed to find RC %v event", watch.Added)
|
||||
}
|
||||
|
||||
ginkgo.By("waiting for available Replicas")
|
||||
_, err = watchUntilWithoutRetry(ctx, retryWatcher, func(watchEvent watch.Event) (bool, error) {
|
||||
@ -260,7 +268,9 @@ var _ = SIGDescribe("ReplicationController", func() {
|
||||
return true, nil
|
||||
})
|
||||
framework.ExpectNoError(err, "Failed to find updated ready replica count")
|
||||
framework.ExpectEqual(eventFound, true, "Failed to find updated ready replica count")
|
||||
if !eventFound {
|
||||
framework.Fail("Failed to find updated ready replica count")
|
||||
}
|
||||
|
||||
ginkgo.By("fetching ReplicationController status")
|
||||
rcStatusUnstructured, err := dc.Resource(rcResource).Namespace(testRcNamespace).Get(ctx, testRcName, metav1.GetOptions{}, "status")
|
||||
@ -295,7 +305,9 @@ var _ = SIGDescribe("ReplicationController", func() {
|
||||
return true, nil
|
||||
})
|
||||
framework.ExpectNoError(err, "Wait until condition with watch events should not return an error")
|
||||
framework.ExpectEqual(eventFound, true, "failed to find RC %v event", watch.Added)
|
||||
if !eventFound {
|
||||
framework.Failf("failed to find RC %v event", watch.Added)
|
||||
}
|
||||
|
||||
ginkgo.By("waiting for ReplicationController's scale to be the max amount")
|
||||
eventFound = false
|
||||
@ -316,7 +328,9 @@ var _ = SIGDescribe("ReplicationController", func() {
|
||||
return true, nil
|
||||
})
|
||||
framework.ExpectNoError(err, "Wait until condition with watch events should not return an error")
|
||||
framework.ExpectEqual(eventFound, true, "Failed to find updated ready replica count")
|
||||
if !eventFound {
|
||||
framework.Fail("Failed to find updated ready replica count")
|
||||
}
|
||||
|
||||
// Get the ReplicationController
|
||||
ginkgo.By("fetching ReplicationController; ensuring that it's patched")
|
||||
@ -346,12 +360,16 @@ var _ = SIGDescribe("ReplicationController", func() {
|
||||
return true, nil
|
||||
})
|
||||
framework.ExpectNoError(err, "Wait until condition with watch events should not return an error")
|
||||
framework.ExpectEqual(eventFound, true, "failed to find RC %v event", watch.Added)
|
||||
if !eventFound {
|
||||
framework.Failf("failed to find RC %v event", watch.Added)
|
||||
}
|
||||
|
||||
ginkgo.By("listing all ReplicationControllers")
|
||||
rcs, err := f.ClientSet.CoreV1().ReplicationControllers("").List(ctx, metav1.ListOptions{LabelSelector: "test-rc-static=true"})
|
||||
framework.ExpectNoError(err, "failed to list ReplicationController")
|
||||
framework.ExpectEqual(len(rcs.Items) > 0, true)
|
||||
if len(rcs.Items) == 0 {
|
||||
framework.Fail("Expected to find a ReplicationController but none was found")
|
||||
}
|
||||
|
||||
ginkgo.By("checking that ReplicationController has expected values")
|
||||
foundRc := false
|
||||
@ -363,7 +381,10 @@ var _ = SIGDescribe("ReplicationController", func() {
|
||||
foundRc = true
|
||||
}
|
||||
}
|
||||
framework.ExpectEqual(foundRc, true)
|
||||
if !foundRc {
|
||||
framework.Logf("Got unexpected replication controller list %v", rcs.Items)
|
||||
framework.Failf("could not find ReplicationController %s", testRcName)
|
||||
}
|
||||
|
||||
// Delete ReplicationController
|
||||
ginkgo.By("deleting ReplicationControllers by collection")
|
||||
@ -383,7 +404,9 @@ var _ = SIGDescribe("ReplicationController", func() {
|
||||
return true, nil
|
||||
})
|
||||
framework.ExpectNoError(err, "Wait until condition with watch events should not return an error")
|
||||
framework.ExpectEqual(eventFound, true, "failed to find RC %v event", watch.Added)
|
||||
if !eventFound {
|
||||
framework.Failf("failed to find RC %v event", watch.Added)
|
||||
}
|
||||
|
||||
return actualWatchEvents
|
||||
}, func() (err error) {
|
||||
|
@ -97,13 +97,17 @@ func testFinishedJob(ctx context.Context, f *framework.Framework) {
|
||||
framework.ExpectNoError(err)
|
||||
jobFinishTime := finishTime(job)
|
||||
finishTimeUTC := jobFinishTime.UTC()
|
||||
framework.ExpectNotEqual(jobFinishTime.IsZero(), true)
|
||||
if jobFinishTime.IsZero() {
|
||||
framework.Fail("Expected job finish time not to be zero.")
|
||||
}
|
||||
|
||||
deleteAtUTC := job.ObjectMeta.DeletionTimestamp.UTC()
|
||||
framework.ExpectNotEqual(deleteAtUTC, nil)
|
||||
|
||||
expireAtUTC := finishTimeUTC.Add(time.Duration(ttl) * time.Second)
|
||||
framework.ExpectEqual(deleteAtUTC.Before(expireAtUTC), false)
|
||||
if deleteAtUTC.Before(expireAtUTC) {
|
||||
framework.Fail("Expected job's deletion time to be after expiration time.")
|
||||
}
|
||||
}
|
||||
|
||||
// finishTime returns finish time of the specified job.
|
||||
|
Loading…
Reference in New Issue
Block a user