From 0d73c0d0e59e94afdc3b00fea343b58404a770ac Mon Sep 17 00:00:00 2001 From: Patrick Ohly Date: Sat, 10 Dec 2022 20:35:46 +0100 Subject: [PATCH] e2e: fix linter errors Adding "ctx" as parameter in the previous commit led to some linter errors about code that overwrites "ctx" without using it. This gets fixed by replacing context.Background or context.TODO in those code lines with the new ctx parameter. Two context.WithCancel calls can get removed completely because the context automatically gets cancelled by Ginkgo when the test returns. --- test/e2e/apimachinery/resource_quota.go | 2 +- test/e2e/apps/daemon_set.go | 2 +- test/e2e/apps/deployment.go | 4 ++-- test/e2e/apps/statefulset.go | 4 ++-- test/e2e/common/node/init_container.go | 8 ++++---- test/e2e/common/node/pods.go | 4 ++-- test/e2e/dra/dra.go | 1 - test/e2e/network/service.go | 4 ++-- test/e2e/scheduling/limit_range.go | 4 ++-- test/e2e/storage/csi_mock_volume.go | 4 ++-- test/e2e/storage/vsphere/vsphere_volume_node_delete.go | 3 --- test/e2e/storage/vsphere/vsphere_volume_node_poweroff.go | 2 -- 12 files changed, 18 insertions(+), 24 deletions(-) diff --git a/test/e2e/apimachinery/resource_quota.go b/test/e2e/apimachinery/resource_quota.go index 4dbd7d7a665..9a8d9acfe2f 100644 --- a/test/e2e/apimachinery/resource_quota.go +++ b/test/e2e/apimachinery/resource_quota.go @@ -1063,7 +1063,7 @@ var _ = SIGDescribe("ResourceQuota", func() { framework.ExpectNoError(err, "Failed to update resourceQuota") ginkgo.By(fmt.Sprintf("Confirm /status for %q resourceQuota via watch", rqName)) - ctx, cancel := context.WithTimeout(context.Background(), f.Timeouts.PodStartShort) + ctx, cancel := context.WithTimeout(ctx, f.Timeouts.PodStartShort) defer cancel() _, err = watchtools.Until(ctx, rqList.ResourceVersion, w, func(event watch.Event) (bool, error) { diff --git a/test/e2e/apps/daemon_set.go b/test/e2e/apps/daemon_set.go index 57e39ad34b6..2e7227bc79c 100644 --- a/test/e2e/apps/daemon_set.go +++ b/test/e2e/apps/daemon_set.go @@ -919,7 +919,7 @@ var _ = SIGDescribe("Daemon set [Serial]", func() { framework.Logf("updatedStatus.Conditions: %#v", updatedStatus.Status.Conditions) ginkgo.By("watching for the daemon set status to be updated") - ctx, cancel := context.WithTimeout(context.Background(), dsRetryTimeout) + ctx, cancel := context.WithTimeout(ctx, dsRetryTimeout) defer cancel() _, err = watchtools.Until(ctx, dsList.ResourceVersion, w, func(event watch.Event) (bool, error) { if ds, ok := event.Object.(*appsv1.DaemonSet); ok { diff --git a/test/e2e/apps/deployment.go b/test/e2e/apps/deployment.go index b5f04d38640..cb987e185a5 100644 --- a/test/e2e/apps/deployment.go +++ b/test/e2e/apps/deployment.go @@ -215,7 +215,7 @@ var _ = SIGDescribe("Deployment", func() { framework.ExpectNoError(err, "failed to create Deployment %v in namespace %v", testDeploymentName, testNamespaceName) ginkgo.By("waiting for Deployment to be created") - ctx, cancel := context.WithTimeout(context.Background(), 30*time.Second) + ctx, cancel := context.WithTimeout(ctx, 30*time.Second) defer cancel() _, err = watchtools.Until(ctx, deploymentsList.ResourceVersion, w, func(event watch.Event) (bool, error) { switch event.Type { @@ -542,7 +542,7 @@ var _ = SIGDescribe("Deployment", func() { framework.Logf("updatedStatus.Conditions: %#v", updatedStatus.Status.Conditions) ginkgo.By("watching for the Deployment status to be updated") - ctx, cancel := context.WithTimeout(context.Background(), dRetryTimeout) + ctx, cancel := context.WithTimeout(ctx, dRetryTimeout) defer cancel() _, err = watchtools.Until(ctx, dList.ResourceVersion, w, func(event watch.Event) (bool, error) { diff --git a/test/e2e/apps/statefulset.go b/test/e2e/apps/statefulset.go index ba6f548a846..ed83fa6d11e 100644 --- a/test/e2e/apps/statefulset.go +++ b/test/e2e/apps/statefulset.go @@ -796,7 +796,7 @@ var _ = SIGDescribe("StatefulSet", func() { return f.ClientSet.CoreV1().Pods(f.Namespace.Name).Watch(context.TODO(), options) }, } - ctx, cancel := watchtools.ContextWithOptionalTimeout(context.Background(), statefulPodTimeout) + ctx, cancel := watchtools.ContextWithOptionalTimeout(ctx, statefulPodTimeout) defer cancel() // we need to get UID from pod in any state and wait until stateful set controller will remove pod at least once _, err = watchtools.Until(ctx, pl.ResourceVersion, lw, func(event watch.Event) (bool, error) { @@ -1034,7 +1034,7 @@ var _ = SIGDescribe("StatefulSet", func() { ginkgo.By("watching for the statefulset status to be updated") - ctx, cancel := context.WithTimeout(context.Background(), statefulSetTimeout) + ctx, cancel := context.WithTimeout(ctx, statefulSetTimeout) defer cancel() _, err = watchtools.Until(ctx, ssList.ResourceVersion, w, func(event watch.Event) (bool, error) { diff --git a/test/e2e/common/node/init_container.go b/test/e2e/common/node/init_container.go index 401c752defd..548d25d2b25 100644 --- a/test/e2e/common/node/init_container.go +++ b/test/e2e/common/node/init_container.go @@ -220,7 +220,7 @@ var _ = SIGDescribe("InitContainer [NodeConformance]", func() { }, } var events []watch.Event - ctx, cancel := watchtools.ContextWithOptionalTimeout(context.Background(), framework.PodStartTimeout) + ctx, cancel := watchtools.ContextWithOptionalTimeout(ctx, framework.PodStartTimeout) defer cancel() event, err := watchtools.Until(ctx, startedPod.ResourceVersion, w, recordEvents(events, conditions.PodCompleted), @@ -301,7 +301,7 @@ var _ = SIGDescribe("InitContainer [NodeConformance]", func() { }, } var events []watch.Event - ctx, cancel := watchtools.ContextWithOptionalTimeout(context.Background(), framework.PodStartTimeout) + ctx, cancel := watchtools.ContextWithOptionalTimeout(ctx, framework.PodStartTimeout) defer cancel() event, err := watchtools.Until(ctx, startedPod.ResourceVersion, w, recordEvents(events, conditions.PodRunning)) framework.ExpectNoError(err) @@ -382,7 +382,7 @@ var _ = SIGDescribe("InitContainer [NodeConformance]", func() { } var events []watch.Event - ctx, cancel := watchtools.ContextWithOptionalTimeout(context.Background(), framework.PodStartTimeout) + ctx, cancel := watchtools.ContextWithOptionalTimeout(ctx, framework.PodStartTimeout) defer cancel() event, err := watchtools.Until( ctx, @@ -507,7 +507,7 @@ var _ = SIGDescribe("InitContainer [NodeConformance]", func() { } var events []watch.Event - ctx, cancel := watchtools.ContextWithOptionalTimeout(context.Background(), framework.PodStartTimeout) + ctx, cancel := watchtools.ContextWithOptionalTimeout(ctx, framework.PodStartTimeout) defer cancel() event, err := watchtools.Until( ctx, startedPod.ResourceVersion, w, diff --git a/test/e2e/common/node/pods.go b/test/e2e/common/node/pods.go index 746892c0208..b017a2a80b6 100644 --- a/test/e2e/common/node/pods.go +++ b/test/e2e/common/node/pods.go @@ -266,7 +266,7 @@ var _ = SIGDescribe("Pods", func() { _, informer, w, _ := watchtools.NewIndexerInformerWatcher(lw, &v1.Pod{}) defer w.Stop() - ctx, cancelCtx := context.WithTimeout(context.TODO(), wait.ForeverTestTimeout) + ctx, cancelCtx := context.WithTimeout(ctx, wait.ForeverTestTimeout) defer cancelCtx() if !cache.WaitForCacheSync(ctx.Done(), informer.HasSynced) { framework.Failf("Timeout while waiting to Pod informer to sync") @@ -932,7 +932,7 @@ var _ = SIGDescribe("Pods", func() { framework.ExpectNoError(err, "failed to create Pod %v in namespace %v", testPod.ObjectMeta.Name, testNamespaceName) ginkgo.By("watching for Pod to be ready") - ctx, cancel := context.WithTimeout(context.Background(), f.Timeouts.PodStart) + ctx, cancel := context.WithTimeout(ctx, f.Timeouts.PodStart) defer cancel() _, err = watchtools.Until(ctx, podsList.ResourceVersion, w, func(event watch.Event) (bool, error) { if pod, ok := event.Object.(*v1.Pod); ok { diff --git a/test/e2e/dra/dra.go b/test/e2e/dra/dra.go index 65da3298c67..cabebc24e9a 100644 --- a/test/e2e/dra/dra.go +++ b/test/e2e/dra/dra.go @@ -52,7 +52,6 @@ func networkResources() app.Resources { var _ = ginkgo.Describe("[sig-node] DRA [Feature:DynamicResourceAllocation]", func() { f := framework.NewDefaultFramework("dra") - ctx := context.Background() // The driver containers have to run with sufficient privileges to // modify /var/lib/kubelet/plugins. diff --git a/test/e2e/network/service.go b/test/e2e/network/service.go index 08566e6abcc..05bef333d0e 100644 --- a/test/e2e/network/service.go +++ b/test/e2e/network/service.go @@ -3275,7 +3275,7 @@ var _ = common.SIGDescribe("Services", func() { _, err = f.ClientSet.CoreV1().Endpoints(testNamespaceName).Create(context.TODO(), &testEndpoints, metav1.CreateOptions{}) framework.ExpectNoError(err, "failed to create Endpoint") ginkgo.By("waiting for available Endpoint") - ctx, cancel := context.WithTimeout(context.Background(), 30*time.Second) + ctx, cancel := context.WithTimeout(ctx, 30*time.Second) defer cancel() _, err = watchtools.Until(ctx, endpointsList.ResourceVersion, w, func(event watch.Event) (bool, error) { switch event.Type { @@ -3465,7 +3465,7 @@ var _ = common.SIGDescribe("Services", func() { framework.ExpectNoError(err, "failed to create Service") ginkgo.By("watching for the Service to be added") - ctx, cancel := context.WithTimeout(context.Background(), svcReadyTimeout) + ctx, cancel := context.WithTimeout(ctx, svcReadyTimeout) defer cancel() _, err = watchtools.Until(ctx, svcList.ResourceVersion, w, func(event watch.Event) (bool, error) { if svc, ok := event.Object.(*v1.Service); ok { diff --git a/test/e2e/scheduling/limit_range.go b/test/e2e/scheduling/limit_range.go index 669b7fdf7ab..807de255e0e 100644 --- a/test/e2e/scheduling/limit_range.go +++ b/test/e2e/scheduling/limit_range.go @@ -93,7 +93,7 @@ var _ = SIGDescribe("LimitRange", func() { _, informer, w, _ := watchtools.NewIndexerInformerWatcher(lw, &v1.LimitRange{}) defer w.Stop() - ctx, cancelCtx := context.WithTimeout(context.TODO(), wait.ForeverTestTimeout) + ctx, cancelCtx := context.WithTimeout(ctx, wait.ForeverTestTimeout) defer cancelCtx() if !cache.WaitForCacheSync(ctx.Done(), informer.HasSynced) { framework.Failf("Timeout while waiting for LimitRange informer to sync") @@ -275,7 +275,7 @@ var _ = SIGDescribe("LimitRange", func() { limitRange2 := &v1.LimitRange{} *limitRange2 = *limitRange - ctx, cancelCtx := context.WithTimeout(context.Background(), wait.ForeverTestTimeout) + ctx, cancelCtx := context.WithTimeout(ctx, wait.ForeverTestTimeout) defer cancelCtx() ginkgo.By(fmt.Sprintf("Creating LimitRange %q in namespace %q", lrName, f.Namespace.Name)) diff --git a/test/e2e/storage/csi_mock_volume.go b/test/e2e/storage/csi_mock_volume.go index 390206c6732..799e1024a5b 100644 --- a/test/e2e/storage/csi_mock_volume.go +++ b/test/e2e/storage/csi_mock_volume.go @@ -961,7 +961,7 @@ var _ = utils.SIGDescribe("CSI mock volume", func() { ginkgo.By("Waiting for expected CSI calls") // Watch for all calls up to deletePod = true - ctx, cancel := context.WithTimeout(context.Background(), csiPodRunningTimeout) + ctx, cancel := context.WithTimeout(ctx, csiPodRunningTimeout) defer cancel() for { if ctx.Err() != nil { @@ -1513,7 +1513,7 @@ var _ = utils.SIGDescribe("CSI mock volume", func() { e2eskipper.Skipf("mock driver %s does not support snapshots -- skipping", m.driver.GetDriverInfo().Name) } - ctx, cancel := context.WithTimeout(context.Background(), csiPodRunningTimeout) + ctx, cancel := context.WithTimeout(ctx, csiPodRunningTimeout) defer cancel() defer cleanup() diff --git a/test/e2e/storage/vsphere/vsphere_volume_node_delete.go b/test/e2e/storage/vsphere/vsphere_volume_node_delete.go index 0921ba0dc7f..b8c8eff664a 100644 --- a/test/e2e/storage/vsphere/vsphere_volume_node_delete.go +++ b/test/e2e/storage/vsphere/vsphere_volume_node_delete.go @@ -69,9 +69,6 @@ var _ = utils.SIGDescribe("Node Unregister [Feature:vsphere] [Slow] [Disruptive] // They are required to register a node VM to VC vmxFilePath := getVMXFilePath(vmObject) - ctx, cancel := context.WithCancel(context.Background()) - defer cancel() - vmHost, err := vmObject.HostSystem(ctx) framework.ExpectNoError(err) diff --git a/test/e2e/storage/vsphere/vsphere_volume_node_poweroff.go b/test/e2e/storage/vsphere/vsphere_volume_node_poweroff.go index 64860054ddb..bf5bf66d36e 100644 --- a/test/e2e/storage/vsphere/vsphere_volume_node_poweroff.go +++ b/test/e2e/storage/vsphere/vsphere_volume_node_poweroff.go @@ -123,8 +123,6 @@ var _ = utils.SIGDescribe("Node Poweroff [Feature:vsphere] [Slow] [Disruptive]", nodeInfo := TestContext.NodeMapper.GetNodeInfo(node1) vm := object.NewVirtualMachine(nodeInfo.VSphere.Client.Client, nodeInfo.VirtualMachineRef) - ctx, cancel := context.WithCancel(context.Background()) - defer cancel() _, err = vm.PowerOff(ctx) framework.ExpectNoError(err) defer vm.PowerOn(ctx)