From 9d1948a2f52b29880046337accd05a029f225cc2 Mon Sep 17 00:00:00 2001 From: wawa0210 Date: Thu, 2 Jul 2020 18:14:12 +0800 Subject: [PATCH] fix test/e2e/apps staticcheck --- hack/.staticcheck_failures | 1 - test/e2e/apps/cronjob.go | 2 +- test/e2e/apps/daemon_set.go | 6 +++--- test/e2e/apps/deployment.go | 6 ++++-- test/e2e/apps/disruption.go | 2 ++ test/e2e/apps/network_partition.go | 2 +- test/e2e/apps/rc.go | 5 +++-- test/e2e/apps/replica_set.go | 4 ++-- test/e2e/apps/statefulset.go | 11 +++++------ 9 files changed, 21 insertions(+), 18 deletions(-) diff --git a/hack/.staticcheck_failures b/hack/.staticcheck_failures index 1e170220eaf..35aca4bec05 100644 --- a/hack/.staticcheck_failures +++ b/hack/.staticcheck_failures @@ -9,7 +9,6 @@ pkg/volume/azure_dd pkg/volume/gcepd pkg/volume/rbd pkg/volume/testing -test/e2e/apps test/e2e/autoscaling test/e2e_node test/integration/examples diff --git a/test/e2e/apps/cronjob.go b/test/e2e/apps/cronjob.go index d24872dabd3..228bf6944ff 100644 --- a/test/e2e/apps/cronjob.go +++ b/test/e2e/apps/cronjob.go @@ -453,7 +453,7 @@ func waitForJobReplaced(c clientset.Interface, ns, previousJobName string) error // Ignore Jobs pending deletion, since deletion of Jobs is now asynchronous. aliveJobs := filterNotDeletedJobs(jobs) if len(aliveJobs) > 1 { - return false, fmt.Errorf("More than one job is running %+v", jobs.Items) + return false, fmt.Errorf("more than one job is running %+v", jobs.Items) } else if len(aliveJobs) == 0 { framework.Logf("Warning: Found 0 jobs in namespace %v", ns) return false, nil diff --git a/test/e2e/apps/daemon_set.go b/test/e2e/apps/daemon_set.go index 89e20b3ae42..0d49405fb27 100644 --- a/test/e2e/apps/daemon_set.go +++ b/test/e2e/apps/daemon_set.go @@ -606,7 +606,7 @@ func setDaemonSetNodeLabels(c clientset.Interface, nodeName string, labels map[s if err != nil { return nil, err } else if len(newLabels) != len(labels) { - return nil, fmt.Errorf("Could not set daemon set test labels as expected") + return nil, fmt.Errorf("could not set daemon set test labels as expected") } return newNode, nil @@ -698,11 +698,11 @@ func checkRunningOnNoNodes(f *framework.Framework, ds *appsv1.DaemonSet) func() func checkDaemonStatus(f *framework.Framework, dsName string) error { ds, err := f.ClientSet.AppsV1().DaemonSets(f.Namespace.Name).Get(context.TODO(), dsName, metav1.GetOptions{}) if err != nil { - return fmt.Errorf("Could not get daemon set from v1") + return fmt.Errorf("could not get daemon set from v1") } desired, scheduled, ready := ds.Status.DesiredNumberScheduled, ds.Status.CurrentNumberScheduled, ds.Status.NumberReady if desired != scheduled && desired != ready { - return fmt.Errorf("Error in daemon status. DesiredScheduled: %d, CurrentScheduled: %d, Ready: %d", desired, scheduled, ready) + return fmt.Errorf("error in daemon status. DesiredScheduled: %d, CurrentScheduled: %d, Ready: %d", desired, scheduled, ready) } return nil } diff --git a/test/e2e/apps/deployment.go b/test/e2e/apps/deployment.go index eb7726dbd0d..f153bbb20b2 100644 --- a/test/e2e/apps/deployment.go +++ b/test/e2e/apps/deployment.go @@ -378,7 +378,7 @@ func testDeploymentCleanUpPolicy(f *framework.Framework) { numPodCreation := 1 for { select { - case event, _ := <-w.ResultChan(): + case event := <-w.ResultChan(): if event.Type != watch.Added { continue } @@ -455,6 +455,7 @@ func testRolloverDeployment(f *framework.Framework) { framework.Logf("Make sure deployment %q performs scaling operations", deploymentName) // Make sure the deployment starts to scale up and down replica sets by checking if its updated replicas >= 1 err = waitForDeploymentUpdatedReplicasGTE(c, ns, deploymentName, deploymentReplicas, deployment.Generation) + framework.ExpectNoError(err) // Check if it's updated to revision 1 correctly framework.Logf("Check revision of new replica set for deployment %q", deploymentName) err = checkDeploymentRevisionAndImage(c, ns, deploymentName, "1", deploymentImage) @@ -626,6 +627,7 @@ func testIterativeDeployments(f *framework.Framework) { deployment, err = e2edeployment.UpdateDeploymentWithRetries(c, ns, deployment.Name, func(update *appsv1.Deployment) { update.Spec.Paused = false }) + framework.ExpectNoError(err) } framework.Logf("Waiting for deployment %q to be observed by the controller", deploymentName) @@ -799,7 +801,7 @@ func testProportionalScalingDeployment(f *framework.Framework) { // Scale the deployment to 30 replicas. newReplicas = int32(30) framework.Logf("Scaling up the deployment %q from %d to %d", deploymentName, replicas, newReplicas) - deployment, err = e2edeployment.UpdateDeploymentWithRetries(c, ns, deployment.Name, func(update *appsv1.Deployment) { + _, err = e2edeployment.UpdateDeploymentWithRetries(c, ns, deployment.Name, func(update *appsv1.Deployment) { update.Spec.Replicas = &newReplicas }) framework.ExpectNoError(err) diff --git a/test/e2e/apps/disruption.go b/test/e2e/apps/disruption.go index 2c27add159a..0062f901479 100644 --- a/test/e2e/apps/disruption.go +++ b/test/e2e/apps/disruption.go @@ -396,9 +396,11 @@ func patchPDBOrDie(cs kubernetes.Interface, dc dynamic.Interface, ns string, nam err := retry.RetryOnConflict(retry.DefaultRetry, func() error { old := getPDBStatusOrDie(dc, ns, name) patchBytes, err := f(old) + framework.ExpectNoError(err) if updated, err = cs.PolicyV1beta1().PodDisruptionBudgets(ns).Patch(context.TODO(), old.Name, types.MergePatchType, patchBytes, metav1.PatchOptions{}, subresources...); err != nil { return err } + framework.ExpectNoError(err) return nil }) diff --git a/test/e2e/apps/network_partition.go b/test/e2e/apps/network_partition.go index 03b72bb387b..c4737eb9513 100644 --- a/test/e2e/apps/network_partition.go +++ b/test/e2e/apps/network_partition.go @@ -123,7 +123,7 @@ var _ = SIGDescribe("Network Partition [Disruptive] [Slow]", func() { // TODO(foxish): Re-enable testing on gce after kubernetes#56787 is fixed. e2eskipper.SkipUnlessProviderIs("gke", "aws") - if strings.Index(framework.TestContext.CloudConfig.NodeInstanceGroup, ",") >= 0 { + if strings.Contains(framework.TestContext.CloudConfig.NodeInstanceGroup, ",") { framework.Failf("Test dose not support cluster setup with more than one MIG: %s", framework.TestContext.CloudConfig.NodeInstanceGroup) } }) diff --git a/test/e2e/apps/rc.go b/test/e2e/apps/rc.go index ae433f8e1f0..08573c293a0 100644 --- a/test/e2e/apps/rc.go +++ b/test/e2e/apps/rc.go @@ -251,6 +251,7 @@ var _ = SIGDescribe("ReplicationController", func() { } return true, nil }) + framework.ExpectNoError(err, "Failed to find updated ready replica count") framework.ExpectEqual(eventFound, true, "Failed to find updated ready replica count") ginkgo.By("fetching ReplicationController status") @@ -445,9 +446,9 @@ func TestReplicationControllerServeImageOrFail(f *framework.Framework, test stri if err != nil { updatePod, getErr := f.ClientSet.CoreV1().Pods(f.Namespace.Name).Get(context.TODO(), pod.Name, metav1.GetOptions{}) if getErr == nil { - err = fmt.Errorf("Pod %q never run (phase: %s, conditions: %+v): %v", updatePod.Name, updatePod.Status.Phase, updatePod.Status.Conditions, err) + err = fmt.Errorf("pod %q never run (phase: %s, conditions: %+v): %v", updatePod.Name, updatePod.Status.Phase, updatePod.Status.Conditions, err) } else { - err = fmt.Errorf("Pod %q never run: %v", pod.Name, err) + err = fmt.Errorf("pod %q never run: %v", pod.Name, err) } } framework.ExpectNoError(err) diff --git a/test/e2e/apps/replica_set.go b/test/e2e/apps/replica_set.go index f6fb4d8bbd7..9b01d8b3207 100644 --- a/test/e2e/apps/replica_set.go +++ b/test/e2e/apps/replica_set.go @@ -148,9 +148,9 @@ func testReplicaSetServeImageOrFail(f *framework.Framework, test string, image s if err != nil { updatePod, getErr := f.ClientSet.CoreV1().Pods(f.Namespace.Name).Get(context.TODO(), pod.Name, metav1.GetOptions{}) if getErr == nil { - err = fmt.Errorf("Pod %q never run (phase: %s, conditions: %+v): %v", updatePod.Name, updatePod.Status.Phase, updatePod.Status.Conditions, err) + err = fmt.Errorf("pod %q never run (phase: %s, conditions: %+v): %v", updatePod.Name, updatePod.Status.Phase, updatePod.Status.Conditions, err) } else { - err = fmt.Errorf("Pod %q never run: %v", pod.Name, err) + err = fmt.Errorf("pod %q never run: %v", pod.Name, err) } } framework.ExpectNoError(err) diff --git a/test/e2e/apps/statefulset.go b/test/e2e/apps/statefulset.go index a956f863bd9..29bf7d1dbb5 100644 --- a/test/e2e/apps/statefulset.go +++ b/test/e2e/apps/statefulset.go @@ -843,7 +843,7 @@ var _ = SIGDescribe("StatefulSet", func() { ss, err := c.AppsV1().StatefulSets(ns).Create(context.TODO(), ss, metav1.CreateOptions{}) framework.ExpectNoError(err) e2estatefulset.WaitForRunningAndReady(c, *ss.Spec.Replicas, ss) - ss = waitForStatus(c, ss) + waitForStatus(c, ss) ginkgo.By("getting scale subresource") scale, err := c.AppsV1().StatefulSets(ns).GetScale(context.TODO(), ssName, metav1.GetOptions{}) @@ -1151,7 +1151,7 @@ func rollbackTest(c clientset.Interface, ns string, ss *appsv1.StatefulSet) { e2estatefulset.SortStatefulPods(pods) err = breakPodHTTPProbe(ss, &pods.Items[1]) framework.ExpectNoError(err) - ss, pods = waitForPodNotReady(c, ss, pods.Items[1].Name) + ss, _ = waitForPodNotReady(c, ss, pods.Items[1].Name) newImage := NewWebserverImage oldImage := ss.Spec.Template.Spec.Containers[0].Image @@ -1172,7 +1172,7 @@ func rollbackTest(c clientset.Interface, ns string, ss *appsv1.StatefulSet) { e2estatefulset.SortStatefulPods(pods) err = restorePodHTTPProbe(ss, &pods.Items[1]) framework.ExpectNoError(err) - ss, pods = e2estatefulset.WaitForPodReady(c, ss, pods.Items[1].Name) + ss, _ = e2estatefulset.WaitForPodReady(c, ss, pods.Items[1].Name) ss, pods = waitForRollingUpdate(c, ss) framework.ExpectEqual(ss.Status.CurrentRevision, updateRevision, fmt.Sprintf("StatefulSet %s/%s current revision %s does not equal update revision %s on update completion", ss.Namespace, @@ -1195,9 +1195,8 @@ func rollbackTest(c clientset.Interface, ns string, ss *appsv1.StatefulSet) { ginkgo.By("Rolling back to a previous revision") err = breakPodHTTPProbe(ss, &pods.Items[1]) framework.ExpectNoError(err) - ss, pods = waitForPodNotReady(c, ss, pods.Items[1].Name) + ss, _ = waitForPodNotReady(c, ss, pods.Items[1].Name) priorRevision := currentRevision - currentRevision, updateRevision = ss.Status.CurrentRevision, ss.Status.UpdateRevision ss, err = updateStatefulSetWithRetries(c, ns, ss.Name, func(update *appsv1.StatefulSet) { update.Spec.Template.Spec.Containers[0].Image = oldImage }) @@ -1211,7 +1210,7 @@ func rollbackTest(c clientset.Interface, ns string, ss *appsv1.StatefulSet) { pods = e2estatefulset.GetPodList(c, ss) e2estatefulset.SortStatefulPods(pods) restorePodHTTPProbe(ss, &pods.Items[1]) - ss, pods = e2estatefulset.WaitForPodReady(c, ss, pods.Items[1].Name) + ss, _ = e2estatefulset.WaitForPodReady(c, ss, pods.Items[1].Name) ss, pods = waitForRollingUpdate(c, ss) framework.ExpectEqual(ss.Status.CurrentRevision, priorRevision, fmt.Sprintf("StatefulSet %s/%s current revision %s does not equal prior revision %s on rollback completion", ss.Namespace,