Merge pull request #88930 from oomichi/Failf

Replace ExpectNoError(fmt.Errorf(..)) with funcs
This commit is contained in:
Kubernetes Prow Robot 2020-03-17 19:28:20 -07:00 committed by GitHub
commit b88ea8d2bb
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
5 changed files with 8 additions and 21 deletions

View File

@ -157,9 +157,7 @@ func TestReplicationControllerServeImageOrFail(f *framework.Framework, test stri
}
// Sanity check
if running != replicas {
framework.ExpectNoError(fmt.Errorf("unexpected number of running pods: %+v", pods.Items))
}
framework.ExpectEqual(running, replicas, "unexpected number of running pods: %+v", pods.Items)
// Verify that something is listening.
framework.Logf("Trying to dial the pod")

View File

@ -159,9 +159,7 @@ func testReplicaSetServeImageOrFail(f *framework.Framework, test string, image s
}
// Sanity check
if running != replicas {
framework.ExpectNoError(fmt.Errorf("unexpected number of running pods: %+v", pods.Items))
}
framework.ExpectEqual(running, replicas, "unexpected number of running pods: %+v", pods.Items)
// Verify that something is listening.
framework.Logf("Trying to dial the pod")

View File

@ -182,9 +182,7 @@ var _ = SIGDescribe("Loadbalancing: L7", func() {
}
return true, nil
})
if pollErr != nil {
framework.ExpectNoError(fmt.Errorf("timed out waiting for ingress %s to get %s annotation", name, instanceGroupAnnotation))
}
framework.ExpectNoError(pollErr, "timed out waiting for ingress %s to get %s annotation", name, instanceGroupAnnotation)
// Verify that the ingress does not get other annotations like url-map, target-proxy, backends, etc.
// Note: All resources except the firewall rule have an annotation.

View File

@ -141,9 +141,8 @@ var _ = SIGDescribe("NoSNAT [Feature:NoSNAT] [Slow]", func() {
ginkgo.By("creating a test pod on each Node")
nodes, err := nc.List(context.TODO(), metav1.ListOptions{})
framework.ExpectNoError(err)
if len(nodes.Items) == 0 {
framework.ExpectNoError(fmt.Errorf("no Nodes in the cluster"))
}
framework.ExpectNotEqual(len(nodes.Items), 0, "no Nodes in the cluster")
for _, node := range nodes.Items {
// find the Node's internal ip address to feed to the Pod
inIP, err := getIP(v1.NodeInternalIP, &node)

View File

@ -79,9 +79,7 @@ func (t *DeploymentUpgradeTest) Setup(f *framework.Framework) {
rsList, err := rsClient.List(context.TODO(), metav1.ListOptions{LabelSelector: rsSelector.String()})
framework.ExpectNoError(err)
rss := rsList.Items
if len(rss) != 1 {
framework.ExpectNoError(fmt.Errorf("expected one replicaset, got %d", len(rss)))
}
framework.ExpectEqual(len(rss), 1, "expected one replicaset, got %d", len(rss))
t.oldRSUID = rss[0].UID
ginkgo.By(fmt.Sprintf("Waiting for revision of the deployment %q to become 1", deploymentName))
@ -101,9 +99,7 @@ func (t *DeploymentUpgradeTest) Setup(f *framework.Framework) {
rsList, err = rsClient.List(context.TODO(), metav1.ListOptions{LabelSelector: rsSelector.String()})
framework.ExpectNoError(err)
rss = rsList.Items
if len(rss) != 2 {
framework.ExpectNoError(fmt.Errorf("expected 2 replicaset, got %d", len(rss)))
}
framework.ExpectEqual(len(rss), 2, "expected 2 replicaset, got %d", len(rss))
ginkgo.By(fmt.Sprintf("Checking replicaset of deployment %q that is created before rollout survives the rollout", deploymentName))
switch t.oldRSUID {
@ -144,9 +140,7 @@ func (t *DeploymentUpgradeTest) Test(f *framework.Framework, done <-chan struct{
rsList, err := rsClient.List(context.TODO(), metav1.ListOptions{LabelSelector: rsSelector.String()})
framework.ExpectNoError(err)
rss := rsList.Items
if len(rss) != 2 {
framework.ExpectNoError(fmt.Errorf("expected 2 replicaset, got %d", len(rss)))
}
framework.ExpectEqual(len(rss), 2, "expected 2 replicaset, got %d", len(rss))
switch t.oldRSUID {
case rss[0].UID: