e2e: simplify test cleanup

ginkgo.DeferCleanup has multiple advantages:
- The cleanup operation can get registered if and only if needed.
- No need to return a cleanup function that the caller must invoke.
- Automatically determines whether a context is needed, which will
  simplify the introduction of context parameters.
- Ginkgo's timeline shows when it executes the cleanup operation.
This commit is contained in:
Patrick Ohly
2022-12-11 18:51:37 +01:00
parent 5c09ca57ff
commit d4729008ef
101 changed files with 716 additions and 992 deletions

View File

@@ -246,7 +246,7 @@ var _ = SIGDescribe("Device Manager [Serial] [Feature:DeviceManager][NodeFeatur
serviceAccount: sd.serviceAccount,
}
sd2.pod = createSRIOVPodOrFail(f)
defer teardownSRIOVConfigOrFail(f, sd2)
ginkgo.DeferCleanup(teardownSRIOVConfigOrFail, f, sd2)
waitForSRIOVResources(f, sd2)
compareSRIOVResources(sd, sd2)

View File

@@ -183,8 +183,8 @@ func runTest(f *framework.Framework) error {
// Create a cgroup manager object for manipulating cgroups.
cgroupManager := cm.NewCgroupManager(subsystems, oldCfg.CgroupDriver)
defer destroyTemporaryCgroupsForReservation(cgroupManager)
defer func() {
ginkgo.DeferCleanup(destroyTemporaryCgroupsForReservation, cgroupManager)
ginkgo.DeferCleanup(func(ctx context.Context) {
if oldCfg != nil {
// Update the Kubelet configuration.
ginkgo.By("Stopping the kubelet")
@@ -205,7 +205,7 @@ func runTest(f *framework.Framework) error {
return kubeletHealthCheck(kubeletHealthCheckURL)
}, 2*time.Minute, 5*time.Second).Should(gomega.BeTrue())
}
}()
})
if err := createTemporaryCgroupsForReservation(cgroupManager); err != nil {
return err
}

View File

@@ -173,7 +173,7 @@ var _ = SIGDescribe("Node Performance Testing [Serial] [Slow]", func() {
wl = workloads.NodePerfWorkloads[0]
})
ginkgo.It("NAS parallel benchmark (NPB) suite - Integer Sort (IS) workload", func(ctx context.Context) {
defer cleanup()
ginkgo.DeferCleanup(cleanup)
runWorkload()
})
})
@@ -182,7 +182,7 @@ var _ = SIGDescribe("Node Performance Testing [Serial] [Slow]", func() {
wl = workloads.NodePerfWorkloads[1]
})
ginkgo.It("NAS parallel benchmark (NPB) suite - Embarrassingly Parallel (EP) workload", func(ctx context.Context) {
defer cleanup()
ginkgo.DeferCleanup(cleanup)
runWorkload()
})
})
@@ -191,7 +191,7 @@ var _ = SIGDescribe("Node Performance Testing [Serial] [Slow]", func() {
wl = workloads.NodePerfWorkloads[2]
})
ginkgo.It("TensorFlow workload", func(ctx context.Context) {
defer cleanup()
ginkgo.DeferCleanup(cleanup)
runWorkload()
})
})

View File

@@ -174,7 +174,7 @@ var _ = SIGDescribe("Hostname of Pod [NodeConformance]", func() {
// Create Pod
launchedPod := e2epod.NewPodClient(f).Create(pod)
// Ensure we delete pod
defer e2epod.NewPodClient(f).DeleteSync(launchedPod.Name, metav1.DeleteOptions{}, e2epod.DefaultPodDeletionTimeout)
ginkgo.DeferCleanup(e2epod.NewPodClient(f).DeleteSync, launchedPod.Name, metav1.DeleteOptions{}, e2epod.DefaultPodDeletionTimeout)
// Pod should remain in the pending state generating events with reason FailedCreatePodSandBox
// Expected Message Error Event

View File

@@ -594,7 +594,7 @@ var _ = SIGDescribe("POD Resources [Serial] [Feature:PodResources][NodeFeature:P
configMap := getSRIOVDevicePluginConfigMap(framework.TestContext.SriovdpConfigMapFile)
sd := setupSRIOVConfigOrFail(f, configMap)
defer teardownSRIOVConfigOrFail(f, sd)
ginkgo.DeferCleanup(teardownSRIOVConfigOrFail, f, sd)
waitForSRIOVResources(f, sd)
@@ -623,7 +623,7 @@ var _ = SIGDescribe("POD Resources [Serial] [Feature:PodResources][NodeFeature:P
configMap := getSRIOVDevicePluginConfigMap(framework.TestContext.SriovdpConfigMapFile)
sd := setupSRIOVConfigOrFail(f, configMap)
defer teardownSRIOVConfigOrFail(f, sd)
ginkgo.DeferCleanup(teardownSRIOVConfigOrFail, f, sd)
waitForSRIOVResources(f, sd)
@@ -762,7 +762,7 @@ var _ = SIGDescribe("POD Resources [Serial] [Feature:PodResources][NodeFeature:P
ginkgo.It("should return proper podresources the same as before the restart of kubelet", func(ctx context.Context) {
dpPod := setupKubeVirtDevicePluginOrFail(f)
defer teardownKubeVirtDevicePluginOrFail(f, dpPod)
ginkgo.DeferCleanup(teardownKubeVirtDevicePluginOrFail, f, dpPod)
waitForKubeVirtResources(f, dpPod)

View File

@@ -153,8 +153,8 @@ func runResourceUsageTest(f *framework.Framework, rc *ResourceCollector, testArg
rc.Start()
// Explicitly delete pods to prevent namespace controller cleanning up timeout
defer deletePodsSync(f, append(pods, getCadvisorPod()))
defer rc.Stop()
ginkgo.DeferCleanup(deletePodsSync, f, append(pods, getCadvisorPod()))
ginkgo.DeferCleanup(rc.Stop)
ginkgo.By("Creating a batch of Pods")
e2epod.NewPodClient(f).CreateBatch(pods)

View File

@@ -92,7 +92,7 @@ var _ = SIGDescribe("Restart [Serial] [Slow] [Disruptive]", func() {
pods := newTestPods(podCount, false, imageutils.GetPauseImageName(), "restart-container-runtime-test")
ginkgo.By(fmt.Sprintf("Trying to create %d pods on node", len(pods)))
createBatchPodWithRateControl(f, pods, podCreationInterval)
defer deletePodsSync(f, pods)
ginkgo.DeferCleanup(deletePodsSync, f, pods)
// Give the node some time to stabilize, assume pods that enter RunningReady within
// startTimeout fit on the node and the node is now saturated.
@@ -157,7 +157,7 @@ var _ = SIGDescribe("Restart [Serial] [Slow] [Disruptive]", func() {
ginkgo.By(fmt.Sprintf("creating %d RestartAlways pods on node", preRestartPodCount))
restartAlwaysPods := newTestPods(preRestartPodCount, false, imageutils.GetPauseImageName(), "restart-dbus-test")
createBatchPodWithRateControl(f, restartAlwaysPods, podCreationInterval)
defer deletePodsSync(f, restartAlwaysPods)
ginkgo.DeferCleanup(deletePodsSync, f, restartAlwaysPods)
allPods := waitForPodsCondition(f, preRestartPodCount, startTimeout, testutils.PodRunningReadyOrSucceeded)
if len(allPods) < preRestartPodCount {
@@ -188,7 +188,7 @@ var _ = SIGDescribe("Restart [Serial] [Slow] [Disruptive]", func() {
postRestartPodCount := 2
postRestartPods := newTestPods(postRestartPodCount, false, imageutils.GetPauseImageName(), "restart-dbus-test")
createBatchPodWithRateControl(f, postRestartPods, podCreationInterval)
defer deletePodsSync(f, postRestartPods)
ginkgo.DeferCleanup(deletePodsSync, f, postRestartPods)
allPods = waitForPodsCondition(f, preRestartPodCount+postRestartPodCount, startTimeout, testutils.PodRunningReadyOrSucceeded)
if len(allPods) < preRestartPodCount+postRestartPodCount {
@@ -224,7 +224,7 @@ var _ = SIGDescribe("Restart [Serial] [Slow] [Disruptive]", func() {
}
}
createBatchPodWithRateControl(f, restartNeverPods, podCreationInterval)
defer deletePodsSync(f, restartNeverPods)
ginkgo.DeferCleanup(deletePodsSync, f, restartNeverPods)
completedPods := waitForPodsCondition(f, podCountRestartNever, startTimeout, testutils.PodSucceeded)
if len(completedPods) < podCountRestartNever {
@@ -240,7 +240,7 @@ var _ = SIGDescribe("Restart [Serial] [Slow] [Disruptive]", func() {
}
}
createBatchPodWithRateControl(f, restartAlwaysPods, podCreationInterval)
defer deletePodsSync(f, restartAlwaysPods)
ginkgo.DeferCleanup(deletePodsSync, f, restartAlwaysPods)
numAllPods := podCountRestartNever + podCountRestartAlways
allPods := waitForPodsCondition(f, numAllPods, startTimeout, testutils.PodRunningReadyOrSucceeded)

View File

@@ -910,7 +910,7 @@ func runTopologyManagerTests(f *framework.Framework) {
framework.ExpectNoError(err)
sd := setupSRIOVConfigOrFail(f, configMap)
defer teardownSRIOVConfigOrFail(f, sd)
ginkgo.DeferCleanup(teardownSRIOVConfigOrFail, f, sd)
scope := containerScopeTopology
for _, policy := range policies {