diff --git a/test/e2e/daemon_restart.go b/test/e2e/daemon_restart.go index 498f74929a8..2c986fd8cbf 100644 --- a/test/e2e/daemon_restart.go +++ b/test/e2e/daemon_restart.go @@ -246,9 +246,9 @@ var _ = Describe("DaemonRestart", func() { }) AfterEach(func() { + defer framework.afterEach() close(stopCh) expectNoError(DeleteRC(framework.Client, ns, rcName)) - framework.afterEach() }) It("Controller Manager should not create/delete replicas across restart", func() { diff --git a/test/e2e/daemon_set.go b/test/e2e/daemon_set.go index 78455f7c426..a63ee252659 100644 --- a/test/e2e/daemon_set.go +++ b/test/e2e/daemon_set.go @@ -65,9 +65,9 @@ var _ = Describe("Daemon set", func() { }) AfterEach(func() { + defer f.afterEach() err := clearDaemonSetNodeLabels(f.Client) Expect(err).NotTo(HaveOccurred()) - f.afterEach() }) It("should run and stop simple daemon", func() { diff --git a/test/e2e/latency.go b/test/e2e/latency.go index 0275dd49752..ab29d90f78e 100644 --- a/test/e2e/latency.go +++ b/test/e2e/latency.go @@ -80,6 +80,7 @@ var _ = Describe("[Performance Suite] Latency", func() { }) AfterEach(func() { + defer framework.afterEach() By("Removing additional pods if any") for i := 1; i <= nodeCount; i++ { name := additionalPodsPrefix + "-" + strconv.Itoa(i) diff --git a/test/e2e/scheduler_predicates.go b/test/e2e/scheduler_predicates.go index 0c49b08b694..bbe9eb5e83e 100644 --- a/test/e2e/scheduler_predicates.go +++ b/test/e2e/scheduler_predicates.go @@ -176,13 +176,13 @@ var _ = Describe("SchedulerPredicates", func() { }) AfterEach(func() { + defer framework.afterEach() rc, err := c.ReplicationControllers(ns).Get(RCName) if err == nil && rc.Spec.Replicas != 0 { By("Cleaning up the replication controller") err := DeleteRC(c, ns, RCName) expectNoError(err) } - framework.afterEach() }) // This test verifies that max-pods flag works as advertised. It assumes that cluster add-on pods stay stable