mirror of
https://github.com/k3s-io/kubernetes.git
synced 2025-07-19 09:52:49 +00:00
fix test/e2e/apps staticcheck
This commit is contained in:
parent
a138be8722
commit
9d1948a2f5
@ -9,7 +9,6 @@ pkg/volume/azure_dd
|
||||
pkg/volume/gcepd
|
||||
pkg/volume/rbd
|
||||
pkg/volume/testing
|
||||
test/e2e/apps
|
||||
test/e2e/autoscaling
|
||||
test/e2e_node
|
||||
test/integration/examples
|
||||
|
@ -453,7 +453,7 @@ func waitForJobReplaced(c clientset.Interface, ns, previousJobName string) error
|
||||
// Ignore Jobs pending deletion, since deletion of Jobs is now asynchronous.
|
||||
aliveJobs := filterNotDeletedJobs(jobs)
|
||||
if len(aliveJobs) > 1 {
|
||||
return false, fmt.Errorf("More than one job is running %+v", jobs.Items)
|
||||
return false, fmt.Errorf("more than one job is running %+v", jobs.Items)
|
||||
} else if len(aliveJobs) == 0 {
|
||||
framework.Logf("Warning: Found 0 jobs in namespace %v", ns)
|
||||
return false, nil
|
||||
|
@ -606,7 +606,7 @@ func setDaemonSetNodeLabels(c clientset.Interface, nodeName string, labels map[s
|
||||
if err != nil {
|
||||
return nil, err
|
||||
} else if len(newLabels) != len(labels) {
|
||||
return nil, fmt.Errorf("Could not set daemon set test labels as expected")
|
||||
return nil, fmt.Errorf("could not set daemon set test labels as expected")
|
||||
}
|
||||
|
||||
return newNode, nil
|
||||
@ -698,11 +698,11 @@ func checkRunningOnNoNodes(f *framework.Framework, ds *appsv1.DaemonSet) func()
|
||||
func checkDaemonStatus(f *framework.Framework, dsName string) error {
|
||||
ds, err := f.ClientSet.AppsV1().DaemonSets(f.Namespace.Name).Get(context.TODO(), dsName, metav1.GetOptions{})
|
||||
if err != nil {
|
||||
return fmt.Errorf("Could not get daemon set from v1")
|
||||
return fmt.Errorf("could not get daemon set from v1")
|
||||
}
|
||||
desired, scheduled, ready := ds.Status.DesiredNumberScheduled, ds.Status.CurrentNumberScheduled, ds.Status.NumberReady
|
||||
if desired != scheduled && desired != ready {
|
||||
return fmt.Errorf("Error in daemon status. DesiredScheduled: %d, CurrentScheduled: %d, Ready: %d", desired, scheduled, ready)
|
||||
return fmt.Errorf("error in daemon status. DesiredScheduled: %d, CurrentScheduled: %d, Ready: %d", desired, scheduled, ready)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
@ -378,7 +378,7 @@ func testDeploymentCleanUpPolicy(f *framework.Framework) {
|
||||
numPodCreation := 1
|
||||
for {
|
||||
select {
|
||||
case event, _ := <-w.ResultChan():
|
||||
case event := <-w.ResultChan():
|
||||
if event.Type != watch.Added {
|
||||
continue
|
||||
}
|
||||
@ -455,6 +455,7 @@ func testRolloverDeployment(f *framework.Framework) {
|
||||
framework.Logf("Make sure deployment %q performs scaling operations", deploymentName)
|
||||
// Make sure the deployment starts to scale up and down replica sets by checking if its updated replicas >= 1
|
||||
err = waitForDeploymentUpdatedReplicasGTE(c, ns, deploymentName, deploymentReplicas, deployment.Generation)
|
||||
framework.ExpectNoError(err)
|
||||
// Check if it's updated to revision 1 correctly
|
||||
framework.Logf("Check revision of new replica set for deployment %q", deploymentName)
|
||||
err = checkDeploymentRevisionAndImage(c, ns, deploymentName, "1", deploymentImage)
|
||||
@ -626,6 +627,7 @@ func testIterativeDeployments(f *framework.Framework) {
|
||||
deployment, err = e2edeployment.UpdateDeploymentWithRetries(c, ns, deployment.Name, func(update *appsv1.Deployment) {
|
||||
update.Spec.Paused = false
|
||||
})
|
||||
framework.ExpectNoError(err)
|
||||
}
|
||||
|
||||
framework.Logf("Waiting for deployment %q to be observed by the controller", deploymentName)
|
||||
@ -799,7 +801,7 @@ func testProportionalScalingDeployment(f *framework.Framework) {
|
||||
// Scale the deployment to 30 replicas.
|
||||
newReplicas = int32(30)
|
||||
framework.Logf("Scaling up the deployment %q from %d to %d", deploymentName, replicas, newReplicas)
|
||||
deployment, err = e2edeployment.UpdateDeploymentWithRetries(c, ns, deployment.Name, func(update *appsv1.Deployment) {
|
||||
_, err = e2edeployment.UpdateDeploymentWithRetries(c, ns, deployment.Name, func(update *appsv1.Deployment) {
|
||||
update.Spec.Replicas = &newReplicas
|
||||
})
|
||||
framework.ExpectNoError(err)
|
||||
|
@ -396,9 +396,11 @@ func patchPDBOrDie(cs kubernetes.Interface, dc dynamic.Interface, ns string, nam
|
||||
err := retry.RetryOnConflict(retry.DefaultRetry, func() error {
|
||||
old := getPDBStatusOrDie(dc, ns, name)
|
||||
patchBytes, err := f(old)
|
||||
framework.ExpectNoError(err)
|
||||
if updated, err = cs.PolicyV1beta1().PodDisruptionBudgets(ns).Patch(context.TODO(), old.Name, types.MergePatchType, patchBytes, metav1.PatchOptions{}, subresources...); err != nil {
|
||||
return err
|
||||
}
|
||||
framework.ExpectNoError(err)
|
||||
return nil
|
||||
})
|
||||
|
||||
|
@ -123,7 +123,7 @@ var _ = SIGDescribe("Network Partition [Disruptive] [Slow]", func() {
|
||||
|
||||
// TODO(foxish): Re-enable testing on gce after kubernetes#56787 is fixed.
|
||||
e2eskipper.SkipUnlessProviderIs("gke", "aws")
|
||||
if strings.Index(framework.TestContext.CloudConfig.NodeInstanceGroup, ",") >= 0 {
|
||||
if strings.Contains(framework.TestContext.CloudConfig.NodeInstanceGroup, ",") {
|
||||
framework.Failf("Test dose not support cluster setup with more than one MIG: %s", framework.TestContext.CloudConfig.NodeInstanceGroup)
|
||||
}
|
||||
})
|
||||
|
@ -251,6 +251,7 @@ var _ = SIGDescribe("ReplicationController", func() {
|
||||
}
|
||||
return true, nil
|
||||
})
|
||||
framework.ExpectNoError(err, "Failed to find updated ready replica count")
|
||||
framework.ExpectEqual(eventFound, true, "Failed to find updated ready replica count")
|
||||
|
||||
ginkgo.By("fetching ReplicationController status")
|
||||
@ -445,9 +446,9 @@ func TestReplicationControllerServeImageOrFail(f *framework.Framework, test stri
|
||||
if err != nil {
|
||||
updatePod, getErr := f.ClientSet.CoreV1().Pods(f.Namespace.Name).Get(context.TODO(), pod.Name, metav1.GetOptions{})
|
||||
if getErr == nil {
|
||||
err = fmt.Errorf("Pod %q never run (phase: %s, conditions: %+v): %v", updatePod.Name, updatePod.Status.Phase, updatePod.Status.Conditions, err)
|
||||
err = fmt.Errorf("pod %q never run (phase: %s, conditions: %+v): %v", updatePod.Name, updatePod.Status.Phase, updatePod.Status.Conditions, err)
|
||||
} else {
|
||||
err = fmt.Errorf("Pod %q never run: %v", pod.Name, err)
|
||||
err = fmt.Errorf("pod %q never run: %v", pod.Name, err)
|
||||
}
|
||||
}
|
||||
framework.ExpectNoError(err)
|
||||
|
@ -148,9 +148,9 @@ func testReplicaSetServeImageOrFail(f *framework.Framework, test string, image s
|
||||
if err != nil {
|
||||
updatePod, getErr := f.ClientSet.CoreV1().Pods(f.Namespace.Name).Get(context.TODO(), pod.Name, metav1.GetOptions{})
|
||||
if getErr == nil {
|
||||
err = fmt.Errorf("Pod %q never run (phase: %s, conditions: %+v): %v", updatePod.Name, updatePod.Status.Phase, updatePod.Status.Conditions, err)
|
||||
err = fmt.Errorf("pod %q never run (phase: %s, conditions: %+v): %v", updatePod.Name, updatePod.Status.Phase, updatePod.Status.Conditions, err)
|
||||
} else {
|
||||
err = fmt.Errorf("Pod %q never run: %v", pod.Name, err)
|
||||
err = fmt.Errorf("pod %q never run: %v", pod.Name, err)
|
||||
}
|
||||
}
|
||||
framework.ExpectNoError(err)
|
||||
|
@ -843,7 +843,7 @@ var _ = SIGDescribe("StatefulSet", func() {
|
||||
ss, err := c.AppsV1().StatefulSets(ns).Create(context.TODO(), ss, metav1.CreateOptions{})
|
||||
framework.ExpectNoError(err)
|
||||
e2estatefulset.WaitForRunningAndReady(c, *ss.Spec.Replicas, ss)
|
||||
ss = waitForStatus(c, ss)
|
||||
waitForStatus(c, ss)
|
||||
|
||||
ginkgo.By("getting scale subresource")
|
||||
scale, err := c.AppsV1().StatefulSets(ns).GetScale(context.TODO(), ssName, metav1.GetOptions{})
|
||||
@ -1151,7 +1151,7 @@ func rollbackTest(c clientset.Interface, ns string, ss *appsv1.StatefulSet) {
|
||||
e2estatefulset.SortStatefulPods(pods)
|
||||
err = breakPodHTTPProbe(ss, &pods.Items[1])
|
||||
framework.ExpectNoError(err)
|
||||
ss, pods = waitForPodNotReady(c, ss, pods.Items[1].Name)
|
||||
ss, _ = waitForPodNotReady(c, ss, pods.Items[1].Name)
|
||||
newImage := NewWebserverImage
|
||||
oldImage := ss.Spec.Template.Spec.Containers[0].Image
|
||||
|
||||
@ -1172,7 +1172,7 @@ func rollbackTest(c clientset.Interface, ns string, ss *appsv1.StatefulSet) {
|
||||
e2estatefulset.SortStatefulPods(pods)
|
||||
err = restorePodHTTPProbe(ss, &pods.Items[1])
|
||||
framework.ExpectNoError(err)
|
||||
ss, pods = e2estatefulset.WaitForPodReady(c, ss, pods.Items[1].Name)
|
||||
ss, _ = e2estatefulset.WaitForPodReady(c, ss, pods.Items[1].Name)
|
||||
ss, pods = waitForRollingUpdate(c, ss)
|
||||
framework.ExpectEqual(ss.Status.CurrentRevision, updateRevision, fmt.Sprintf("StatefulSet %s/%s current revision %s does not equal update revision %s on update completion",
|
||||
ss.Namespace,
|
||||
@ -1195,9 +1195,8 @@ func rollbackTest(c clientset.Interface, ns string, ss *appsv1.StatefulSet) {
|
||||
ginkgo.By("Rolling back to a previous revision")
|
||||
err = breakPodHTTPProbe(ss, &pods.Items[1])
|
||||
framework.ExpectNoError(err)
|
||||
ss, pods = waitForPodNotReady(c, ss, pods.Items[1].Name)
|
||||
ss, _ = waitForPodNotReady(c, ss, pods.Items[1].Name)
|
||||
priorRevision := currentRevision
|
||||
currentRevision, updateRevision = ss.Status.CurrentRevision, ss.Status.UpdateRevision
|
||||
ss, err = updateStatefulSetWithRetries(c, ns, ss.Name, func(update *appsv1.StatefulSet) {
|
||||
update.Spec.Template.Spec.Containers[0].Image = oldImage
|
||||
})
|
||||
@ -1211,7 +1210,7 @@ func rollbackTest(c clientset.Interface, ns string, ss *appsv1.StatefulSet) {
|
||||
pods = e2estatefulset.GetPodList(c, ss)
|
||||
e2estatefulset.SortStatefulPods(pods)
|
||||
restorePodHTTPProbe(ss, &pods.Items[1])
|
||||
ss, pods = e2estatefulset.WaitForPodReady(c, ss, pods.Items[1].Name)
|
||||
ss, _ = e2estatefulset.WaitForPodReady(c, ss, pods.Items[1].Name)
|
||||
ss, pods = waitForRollingUpdate(c, ss)
|
||||
framework.ExpectEqual(ss.Status.CurrentRevision, priorRevision, fmt.Sprintf("StatefulSet %s/%s current revision %s does not equal prior revision %s on rollback completion",
|
||||
ss.Namespace,
|
||||
|
Loading…
Reference in New Issue
Block a user