fix test/e2e/apps staticcheck

This commit is contained in:
wawa0210 2020-07-02 18:14:12 +08:00
parent a138be8722
commit 9d1948a2f5
No known key found for this signature in database
GPG Key ID: 900C83A2C098B3B1
9 changed files with 21 additions and 18 deletions

View File

@ -9,7 +9,6 @@ pkg/volume/azure_dd
pkg/volume/gcepd pkg/volume/gcepd
pkg/volume/rbd pkg/volume/rbd
pkg/volume/testing pkg/volume/testing
test/e2e/apps
test/e2e/autoscaling test/e2e/autoscaling
test/e2e_node test/e2e_node
test/integration/examples test/integration/examples

View File

@ -453,7 +453,7 @@ func waitForJobReplaced(c clientset.Interface, ns, previousJobName string) error
// Ignore Jobs pending deletion, since deletion of Jobs is now asynchronous. // Ignore Jobs pending deletion, since deletion of Jobs is now asynchronous.
aliveJobs := filterNotDeletedJobs(jobs) aliveJobs := filterNotDeletedJobs(jobs)
if len(aliveJobs) > 1 { if len(aliveJobs) > 1 {
return false, fmt.Errorf("More than one job is running %+v", jobs.Items) return false, fmt.Errorf("more than one job is running %+v", jobs.Items)
} else if len(aliveJobs) == 0 { } else if len(aliveJobs) == 0 {
framework.Logf("Warning: Found 0 jobs in namespace %v", ns) framework.Logf("Warning: Found 0 jobs in namespace %v", ns)
return false, nil return false, nil

View File

@ -606,7 +606,7 @@ func setDaemonSetNodeLabels(c clientset.Interface, nodeName string, labels map[s
if err != nil { if err != nil {
return nil, err return nil, err
} else if len(newLabels) != len(labels) { } else if len(newLabels) != len(labels) {
return nil, fmt.Errorf("Could not set daemon set test labels as expected") return nil, fmt.Errorf("could not set daemon set test labels as expected")
} }
return newNode, nil return newNode, nil
@ -698,11 +698,11 @@ func checkRunningOnNoNodes(f *framework.Framework, ds *appsv1.DaemonSet) func()
func checkDaemonStatus(f *framework.Framework, dsName string) error { func checkDaemonStatus(f *framework.Framework, dsName string) error {
ds, err := f.ClientSet.AppsV1().DaemonSets(f.Namespace.Name).Get(context.TODO(), dsName, metav1.GetOptions{}) ds, err := f.ClientSet.AppsV1().DaemonSets(f.Namespace.Name).Get(context.TODO(), dsName, metav1.GetOptions{})
if err != nil { if err != nil {
return fmt.Errorf("Could not get daemon set from v1") return fmt.Errorf("could not get daemon set from v1")
} }
desired, scheduled, ready := ds.Status.DesiredNumberScheduled, ds.Status.CurrentNumberScheduled, ds.Status.NumberReady desired, scheduled, ready := ds.Status.DesiredNumberScheduled, ds.Status.CurrentNumberScheduled, ds.Status.NumberReady
if desired != scheduled && desired != ready { if desired != scheduled && desired != ready {
return fmt.Errorf("Error in daemon status. DesiredScheduled: %d, CurrentScheduled: %d, Ready: %d", desired, scheduled, ready) return fmt.Errorf("error in daemon status. DesiredScheduled: %d, CurrentScheduled: %d, Ready: %d", desired, scheduled, ready)
} }
return nil return nil
} }

View File

@ -378,7 +378,7 @@ func testDeploymentCleanUpPolicy(f *framework.Framework) {
numPodCreation := 1 numPodCreation := 1
for { for {
select { select {
case event, _ := <-w.ResultChan(): case event := <-w.ResultChan():
if event.Type != watch.Added { if event.Type != watch.Added {
continue continue
} }
@ -455,6 +455,7 @@ func testRolloverDeployment(f *framework.Framework) {
framework.Logf("Make sure deployment %q performs scaling operations", deploymentName) framework.Logf("Make sure deployment %q performs scaling operations", deploymentName)
// Make sure the deployment starts to scale up and down replica sets by checking if its updated replicas >= 1 // Make sure the deployment starts to scale up and down replica sets by checking if its updated replicas >= 1
err = waitForDeploymentUpdatedReplicasGTE(c, ns, deploymentName, deploymentReplicas, deployment.Generation) err = waitForDeploymentUpdatedReplicasGTE(c, ns, deploymentName, deploymentReplicas, deployment.Generation)
framework.ExpectNoError(err)
// Check if it's updated to revision 1 correctly // Check if it's updated to revision 1 correctly
framework.Logf("Check revision of new replica set for deployment %q", deploymentName) framework.Logf("Check revision of new replica set for deployment %q", deploymentName)
err = checkDeploymentRevisionAndImage(c, ns, deploymentName, "1", deploymentImage) err = checkDeploymentRevisionAndImage(c, ns, deploymentName, "1", deploymentImage)
@ -626,6 +627,7 @@ func testIterativeDeployments(f *framework.Framework) {
deployment, err = e2edeployment.UpdateDeploymentWithRetries(c, ns, deployment.Name, func(update *appsv1.Deployment) { deployment, err = e2edeployment.UpdateDeploymentWithRetries(c, ns, deployment.Name, func(update *appsv1.Deployment) {
update.Spec.Paused = false update.Spec.Paused = false
}) })
framework.ExpectNoError(err)
} }
framework.Logf("Waiting for deployment %q to be observed by the controller", deploymentName) framework.Logf("Waiting for deployment %q to be observed by the controller", deploymentName)
@ -799,7 +801,7 @@ func testProportionalScalingDeployment(f *framework.Framework) {
// Scale the deployment to 30 replicas. // Scale the deployment to 30 replicas.
newReplicas = int32(30) newReplicas = int32(30)
framework.Logf("Scaling up the deployment %q from %d to %d", deploymentName, replicas, newReplicas) framework.Logf("Scaling up the deployment %q from %d to %d", deploymentName, replicas, newReplicas)
deployment, err = e2edeployment.UpdateDeploymentWithRetries(c, ns, deployment.Name, func(update *appsv1.Deployment) { _, err = e2edeployment.UpdateDeploymentWithRetries(c, ns, deployment.Name, func(update *appsv1.Deployment) {
update.Spec.Replicas = &newReplicas update.Spec.Replicas = &newReplicas
}) })
framework.ExpectNoError(err) framework.ExpectNoError(err)

View File

@ -396,9 +396,11 @@ func patchPDBOrDie(cs kubernetes.Interface, dc dynamic.Interface, ns string, nam
err := retry.RetryOnConflict(retry.DefaultRetry, func() error { err := retry.RetryOnConflict(retry.DefaultRetry, func() error {
old := getPDBStatusOrDie(dc, ns, name) old := getPDBStatusOrDie(dc, ns, name)
patchBytes, err := f(old) patchBytes, err := f(old)
framework.ExpectNoError(err)
if updated, err = cs.PolicyV1beta1().PodDisruptionBudgets(ns).Patch(context.TODO(), old.Name, types.MergePatchType, patchBytes, metav1.PatchOptions{}, subresources...); err != nil { if updated, err = cs.PolicyV1beta1().PodDisruptionBudgets(ns).Patch(context.TODO(), old.Name, types.MergePatchType, patchBytes, metav1.PatchOptions{}, subresources...); err != nil {
return err return err
} }
framework.ExpectNoError(err)
return nil return nil
}) })

View File

@ -123,7 +123,7 @@ var _ = SIGDescribe("Network Partition [Disruptive] [Slow]", func() {
// TODO(foxish): Re-enable testing on gce after kubernetes#56787 is fixed. // TODO(foxish): Re-enable testing on gce after kubernetes#56787 is fixed.
e2eskipper.SkipUnlessProviderIs("gke", "aws") e2eskipper.SkipUnlessProviderIs("gke", "aws")
if strings.Index(framework.TestContext.CloudConfig.NodeInstanceGroup, ",") >= 0 { if strings.Contains(framework.TestContext.CloudConfig.NodeInstanceGroup, ",") {
framework.Failf("Test dose not support cluster setup with more than one MIG: %s", framework.TestContext.CloudConfig.NodeInstanceGroup) framework.Failf("Test dose not support cluster setup with more than one MIG: %s", framework.TestContext.CloudConfig.NodeInstanceGroup)
} }
}) })

View File

@ -251,6 +251,7 @@ var _ = SIGDescribe("ReplicationController", func() {
} }
return true, nil return true, nil
}) })
framework.ExpectNoError(err, "Failed to find updated ready replica count")
framework.ExpectEqual(eventFound, true, "Failed to find updated ready replica count") framework.ExpectEqual(eventFound, true, "Failed to find updated ready replica count")
ginkgo.By("fetching ReplicationController status") ginkgo.By("fetching ReplicationController status")
@ -445,9 +446,9 @@ func TestReplicationControllerServeImageOrFail(f *framework.Framework, test stri
if err != nil { if err != nil {
updatePod, getErr := f.ClientSet.CoreV1().Pods(f.Namespace.Name).Get(context.TODO(), pod.Name, metav1.GetOptions{}) updatePod, getErr := f.ClientSet.CoreV1().Pods(f.Namespace.Name).Get(context.TODO(), pod.Name, metav1.GetOptions{})
if getErr == nil { if getErr == nil {
err = fmt.Errorf("Pod %q never run (phase: %s, conditions: %+v): %v", updatePod.Name, updatePod.Status.Phase, updatePod.Status.Conditions, err) err = fmt.Errorf("pod %q never run (phase: %s, conditions: %+v): %v", updatePod.Name, updatePod.Status.Phase, updatePod.Status.Conditions, err)
} else { } else {
err = fmt.Errorf("Pod %q never run: %v", pod.Name, err) err = fmt.Errorf("pod %q never run: %v", pod.Name, err)
} }
} }
framework.ExpectNoError(err) framework.ExpectNoError(err)

View File

@ -148,9 +148,9 @@ func testReplicaSetServeImageOrFail(f *framework.Framework, test string, image s
if err != nil { if err != nil {
updatePod, getErr := f.ClientSet.CoreV1().Pods(f.Namespace.Name).Get(context.TODO(), pod.Name, metav1.GetOptions{}) updatePod, getErr := f.ClientSet.CoreV1().Pods(f.Namespace.Name).Get(context.TODO(), pod.Name, metav1.GetOptions{})
if getErr == nil { if getErr == nil {
err = fmt.Errorf("Pod %q never run (phase: %s, conditions: %+v): %v", updatePod.Name, updatePod.Status.Phase, updatePod.Status.Conditions, err) err = fmt.Errorf("pod %q never run (phase: %s, conditions: %+v): %v", updatePod.Name, updatePod.Status.Phase, updatePod.Status.Conditions, err)
} else { } else {
err = fmt.Errorf("Pod %q never run: %v", pod.Name, err) err = fmt.Errorf("pod %q never run: %v", pod.Name, err)
} }
} }
framework.ExpectNoError(err) framework.ExpectNoError(err)

View File

@ -843,7 +843,7 @@ var _ = SIGDescribe("StatefulSet", func() {
ss, err := c.AppsV1().StatefulSets(ns).Create(context.TODO(), ss, metav1.CreateOptions{}) ss, err := c.AppsV1().StatefulSets(ns).Create(context.TODO(), ss, metav1.CreateOptions{})
framework.ExpectNoError(err) framework.ExpectNoError(err)
e2estatefulset.WaitForRunningAndReady(c, *ss.Spec.Replicas, ss) e2estatefulset.WaitForRunningAndReady(c, *ss.Spec.Replicas, ss)
ss = waitForStatus(c, ss) waitForStatus(c, ss)
ginkgo.By("getting scale subresource") ginkgo.By("getting scale subresource")
scale, err := c.AppsV1().StatefulSets(ns).GetScale(context.TODO(), ssName, metav1.GetOptions{}) scale, err := c.AppsV1().StatefulSets(ns).GetScale(context.TODO(), ssName, metav1.GetOptions{})
@ -1151,7 +1151,7 @@ func rollbackTest(c clientset.Interface, ns string, ss *appsv1.StatefulSet) {
e2estatefulset.SortStatefulPods(pods) e2estatefulset.SortStatefulPods(pods)
err = breakPodHTTPProbe(ss, &pods.Items[1]) err = breakPodHTTPProbe(ss, &pods.Items[1])
framework.ExpectNoError(err) framework.ExpectNoError(err)
ss, pods = waitForPodNotReady(c, ss, pods.Items[1].Name) ss, _ = waitForPodNotReady(c, ss, pods.Items[1].Name)
newImage := NewWebserverImage newImage := NewWebserverImage
oldImage := ss.Spec.Template.Spec.Containers[0].Image oldImage := ss.Spec.Template.Spec.Containers[0].Image
@ -1172,7 +1172,7 @@ func rollbackTest(c clientset.Interface, ns string, ss *appsv1.StatefulSet) {
e2estatefulset.SortStatefulPods(pods) e2estatefulset.SortStatefulPods(pods)
err = restorePodHTTPProbe(ss, &pods.Items[1]) err = restorePodHTTPProbe(ss, &pods.Items[1])
framework.ExpectNoError(err) framework.ExpectNoError(err)
ss, pods = e2estatefulset.WaitForPodReady(c, ss, pods.Items[1].Name) ss, _ = e2estatefulset.WaitForPodReady(c, ss, pods.Items[1].Name)
ss, pods = waitForRollingUpdate(c, ss) ss, pods = waitForRollingUpdate(c, ss)
framework.ExpectEqual(ss.Status.CurrentRevision, updateRevision, fmt.Sprintf("StatefulSet %s/%s current revision %s does not equal update revision %s on update completion", framework.ExpectEqual(ss.Status.CurrentRevision, updateRevision, fmt.Sprintf("StatefulSet %s/%s current revision %s does not equal update revision %s on update completion",
ss.Namespace, ss.Namespace,
@ -1195,9 +1195,8 @@ func rollbackTest(c clientset.Interface, ns string, ss *appsv1.StatefulSet) {
ginkgo.By("Rolling back to a previous revision") ginkgo.By("Rolling back to a previous revision")
err = breakPodHTTPProbe(ss, &pods.Items[1]) err = breakPodHTTPProbe(ss, &pods.Items[1])
framework.ExpectNoError(err) framework.ExpectNoError(err)
ss, pods = waitForPodNotReady(c, ss, pods.Items[1].Name) ss, _ = waitForPodNotReady(c, ss, pods.Items[1].Name)
priorRevision := currentRevision priorRevision := currentRevision
currentRevision, updateRevision = ss.Status.CurrentRevision, ss.Status.UpdateRevision
ss, err = updateStatefulSetWithRetries(c, ns, ss.Name, func(update *appsv1.StatefulSet) { ss, err = updateStatefulSetWithRetries(c, ns, ss.Name, func(update *appsv1.StatefulSet) {
update.Spec.Template.Spec.Containers[0].Image = oldImage update.Spec.Template.Spec.Containers[0].Image = oldImage
}) })
@ -1211,7 +1210,7 @@ func rollbackTest(c clientset.Interface, ns string, ss *appsv1.StatefulSet) {
pods = e2estatefulset.GetPodList(c, ss) pods = e2estatefulset.GetPodList(c, ss)
e2estatefulset.SortStatefulPods(pods) e2estatefulset.SortStatefulPods(pods)
restorePodHTTPProbe(ss, &pods.Items[1]) restorePodHTTPProbe(ss, &pods.Items[1])
ss, pods = e2estatefulset.WaitForPodReady(c, ss, pods.Items[1].Name) ss, _ = e2estatefulset.WaitForPodReady(c, ss, pods.Items[1].Name)
ss, pods = waitForRollingUpdate(c, ss) ss, pods = waitForRollingUpdate(c, ss)
framework.ExpectEqual(ss.Status.CurrentRevision, priorRevision, fmt.Sprintf("StatefulSet %s/%s current revision %s does not equal prior revision %s on rollback completion", framework.ExpectEqual(ss.Status.CurrentRevision, priorRevision, fmt.Sprintf("StatefulSet %s/%s current revision %s does not equal prior revision %s on rollback completion",
ss.Namespace, ss.Namespace,