e2e: fix linter errors

Adding "ctx" as parameter in the previous commit led to some linter errors
about code that overwrites "ctx" without using it.

This gets fixed by replacing context.Background or context.TODO in those code
lines with the new ctx parameter.

Two context.WithCancel calls can get removed completely because the context
automatically gets cancelled by Ginkgo when the test returns.
This commit is contained in:
Patrick Ohly 2022-12-10 20:35:46 +01:00
parent df5d84ae81
commit 0d73c0d0e5
12 changed files with 18 additions and 24 deletions

View File

@ -1063,7 +1063,7 @@ var _ = SIGDescribe("ResourceQuota", func() {
framework.ExpectNoError(err, "Failed to update resourceQuota")
ginkgo.By(fmt.Sprintf("Confirm /status for %q resourceQuota via watch", rqName))
ctx, cancel := context.WithTimeout(context.Background(), f.Timeouts.PodStartShort)
ctx, cancel := context.WithTimeout(ctx, f.Timeouts.PodStartShort)
defer cancel()
_, err = watchtools.Until(ctx, rqList.ResourceVersion, w, func(event watch.Event) (bool, error) {

View File

@ -919,7 +919,7 @@ var _ = SIGDescribe("Daemon set [Serial]", func() {
framework.Logf("updatedStatus.Conditions: %#v", updatedStatus.Status.Conditions)
ginkgo.By("watching for the daemon set status to be updated")
ctx, cancel := context.WithTimeout(context.Background(), dsRetryTimeout)
ctx, cancel := context.WithTimeout(ctx, dsRetryTimeout)
defer cancel()
_, err = watchtools.Until(ctx, dsList.ResourceVersion, w, func(event watch.Event) (bool, error) {
if ds, ok := event.Object.(*appsv1.DaemonSet); ok {

View File

@ -215,7 +215,7 @@ var _ = SIGDescribe("Deployment", func() {
framework.ExpectNoError(err, "failed to create Deployment %v in namespace %v", testDeploymentName, testNamespaceName)
ginkgo.By("waiting for Deployment to be created")
ctx, cancel := context.WithTimeout(context.Background(), 30*time.Second)
ctx, cancel := context.WithTimeout(ctx, 30*time.Second)
defer cancel()
_, err = watchtools.Until(ctx, deploymentsList.ResourceVersion, w, func(event watch.Event) (bool, error) {
switch event.Type {
@ -542,7 +542,7 @@ var _ = SIGDescribe("Deployment", func() {
framework.Logf("updatedStatus.Conditions: %#v", updatedStatus.Status.Conditions)
ginkgo.By("watching for the Deployment status to be updated")
ctx, cancel := context.WithTimeout(context.Background(), dRetryTimeout)
ctx, cancel := context.WithTimeout(ctx, dRetryTimeout)
defer cancel()
_, err = watchtools.Until(ctx, dList.ResourceVersion, w, func(event watch.Event) (bool, error) {

View File

@ -796,7 +796,7 @@ var _ = SIGDescribe("StatefulSet", func() {
return f.ClientSet.CoreV1().Pods(f.Namespace.Name).Watch(context.TODO(), options)
},
}
ctx, cancel := watchtools.ContextWithOptionalTimeout(context.Background(), statefulPodTimeout)
ctx, cancel := watchtools.ContextWithOptionalTimeout(ctx, statefulPodTimeout)
defer cancel()
// we need to get UID from pod in any state and wait until stateful set controller will remove pod at least once
_, err = watchtools.Until(ctx, pl.ResourceVersion, lw, func(event watch.Event) (bool, error) {
@ -1034,7 +1034,7 @@ var _ = SIGDescribe("StatefulSet", func() {
ginkgo.By("watching for the statefulset status to be updated")
ctx, cancel := context.WithTimeout(context.Background(), statefulSetTimeout)
ctx, cancel := context.WithTimeout(ctx, statefulSetTimeout)
defer cancel()
_, err = watchtools.Until(ctx, ssList.ResourceVersion, w, func(event watch.Event) (bool, error) {

View File

@ -220,7 +220,7 @@ var _ = SIGDescribe("InitContainer [NodeConformance]", func() {
},
}
var events []watch.Event
ctx, cancel := watchtools.ContextWithOptionalTimeout(context.Background(), framework.PodStartTimeout)
ctx, cancel := watchtools.ContextWithOptionalTimeout(ctx, framework.PodStartTimeout)
defer cancel()
event, err := watchtools.Until(ctx, startedPod.ResourceVersion, w,
recordEvents(events, conditions.PodCompleted),
@ -301,7 +301,7 @@ var _ = SIGDescribe("InitContainer [NodeConformance]", func() {
},
}
var events []watch.Event
ctx, cancel := watchtools.ContextWithOptionalTimeout(context.Background(), framework.PodStartTimeout)
ctx, cancel := watchtools.ContextWithOptionalTimeout(ctx, framework.PodStartTimeout)
defer cancel()
event, err := watchtools.Until(ctx, startedPod.ResourceVersion, w, recordEvents(events, conditions.PodRunning))
framework.ExpectNoError(err)
@ -382,7 +382,7 @@ var _ = SIGDescribe("InitContainer [NodeConformance]", func() {
}
var events []watch.Event
ctx, cancel := watchtools.ContextWithOptionalTimeout(context.Background(), framework.PodStartTimeout)
ctx, cancel := watchtools.ContextWithOptionalTimeout(ctx, framework.PodStartTimeout)
defer cancel()
event, err := watchtools.Until(
ctx,
@ -507,7 +507,7 @@ var _ = SIGDescribe("InitContainer [NodeConformance]", func() {
}
var events []watch.Event
ctx, cancel := watchtools.ContextWithOptionalTimeout(context.Background(), framework.PodStartTimeout)
ctx, cancel := watchtools.ContextWithOptionalTimeout(ctx, framework.PodStartTimeout)
defer cancel()
event, err := watchtools.Until(
ctx, startedPod.ResourceVersion, w,

View File

@ -266,7 +266,7 @@ var _ = SIGDescribe("Pods", func() {
_, informer, w, _ := watchtools.NewIndexerInformerWatcher(lw, &v1.Pod{})
defer w.Stop()
ctx, cancelCtx := context.WithTimeout(context.TODO(), wait.ForeverTestTimeout)
ctx, cancelCtx := context.WithTimeout(ctx, wait.ForeverTestTimeout)
defer cancelCtx()
if !cache.WaitForCacheSync(ctx.Done(), informer.HasSynced) {
framework.Failf("Timeout while waiting to Pod informer to sync")
@ -932,7 +932,7 @@ var _ = SIGDescribe("Pods", func() {
framework.ExpectNoError(err, "failed to create Pod %v in namespace %v", testPod.ObjectMeta.Name, testNamespaceName)
ginkgo.By("watching for Pod to be ready")
ctx, cancel := context.WithTimeout(context.Background(), f.Timeouts.PodStart)
ctx, cancel := context.WithTimeout(ctx, f.Timeouts.PodStart)
defer cancel()
_, err = watchtools.Until(ctx, podsList.ResourceVersion, w, func(event watch.Event) (bool, error) {
if pod, ok := event.Object.(*v1.Pod); ok {

View File

@ -52,7 +52,6 @@ func networkResources() app.Resources {
var _ = ginkgo.Describe("[sig-node] DRA [Feature:DynamicResourceAllocation]", func() {
f := framework.NewDefaultFramework("dra")
ctx := context.Background()
// The driver containers have to run with sufficient privileges to
// modify /var/lib/kubelet/plugins.

View File

@ -3275,7 +3275,7 @@ var _ = common.SIGDescribe("Services", func() {
_, err = f.ClientSet.CoreV1().Endpoints(testNamespaceName).Create(context.TODO(), &testEndpoints, metav1.CreateOptions{})
framework.ExpectNoError(err, "failed to create Endpoint")
ginkgo.By("waiting for available Endpoint")
ctx, cancel := context.WithTimeout(context.Background(), 30*time.Second)
ctx, cancel := context.WithTimeout(ctx, 30*time.Second)
defer cancel()
_, err = watchtools.Until(ctx, endpointsList.ResourceVersion, w, func(event watch.Event) (bool, error) {
switch event.Type {
@ -3465,7 +3465,7 @@ var _ = common.SIGDescribe("Services", func() {
framework.ExpectNoError(err, "failed to create Service")
ginkgo.By("watching for the Service to be added")
ctx, cancel := context.WithTimeout(context.Background(), svcReadyTimeout)
ctx, cancel := context.WithTimeout(ctx, svcReadyTimeout)
defer cancel()
_, err = watchtools.Until(ctx, svcList.ResourceVersion, w, func(event watch.Event) (bool, error) {
if svc, ok := event.Object.(*v1.Service); ok {

View File

@ -93,7 +93,7 @@ var _ = SIGDescribe("LimitRange", func() {
_, informer, w, _ := watchtools.NewIndexerInformerWatcher(lw, &v1.LimitRange{})
defer w.Stop()
ctx, cancelCtx := context.WithTimeout(context.TODO(), wait.ForeverTestTimeout)
ctx, cancelCtx := context.WithTimeout(ctx, wait.ForeverTestTimeout)
defer cancelCtx()
if !cache.WaitForCacheSync(ctx.Done(), informer.HasSynced) {
framework.Failf("Timeout while waiting for LimitRange informer to sync")
@ -275,7 +275,7 @@ var _ = SIGDescribe("LimitRange", func() {
limitRange2 := &v1.LimitRange{}
*limitRange2 = *limitRange
ctx, cancelCtx := context.WithTimeout(context.Background(), wait.ForeverTestTimeout)
ctx, cancelCtx := context.WithTimeout(ctx, wait.ForeverTestTimeout)
defer cancelCtx()
ginkgo.By(fmt.Sprintf("Creating LimitRange %q in namespace %q", lrName, f.Namespace.Name))

View File

@ -961,7 +961,7 @@ var _ = utils.SIGDescribe("CSI mock volume", func() {
ginkgo.By("Waiting for expected CSI calls")
// Watch for all calls up to deletePod = true
ctx, cancel := context.WithTimeout(context.Background(), csiPodRunningTimeout)
ctx, cancel := context.WithTimeout(ctx, csiPodRunningTimeout)
defer cancel()
for {
if ctx.Err() != nil {
@ -1513,7 +1513,7 @@ var _ = utils.SIGDescribe("CSI mock volume", func() {
e2eskipper.Skipf("mock driver %s does not support snapshots -- skipping", m.driver.GetDriverInfo().Name)
}
ctx, cancel := context.WithTimeout(context.Background(), csiPodRunningTimeout)
ctx, cancel := context.WithTimeout(ctx, csiPodRunningTimeout)
defer cancel()
defer cleanup()

View File

@ -69,9 +69,6 @@ var _ = utils.SIGDescribe("Node Unregister [Feature:vsphere] [Slow] [Disruptive]
// They are required to register a node VM to VC
vmxFilePath := getVMXFilePath(vmObject)
ctx, cancel := context.WithCancel(context.Background())
defer cancel()
vmHost, err := vmObject.HostSystem(ctx)
framework.ExpectNoError(err)

View File

@ -123,8 +123,6 @@ var _ = utils.SIGDescribe("Node Poweroff [Feature:vsphere] [Slow] [Disruptive]",
nodeInfo := TestContext.NodeMapper.GetNodeInfo(node1)
vm := object.NewVirtualMachine(nodeInfo.VSphere.Client.Client, nodeInfo.VirtualMachineRef)
ctx, cancel := context.WithCancel(context.Background())
defer cancel()
_, err = vm.PowerOff(ctx)
framework.ExpectNoError(err)
defer vm.PowerOn(ctx)