e2e: Call AfterEach handlers in case of an abort

I was tired of aborted tests leaving debris in my cluster.
This commit is contained in:
Tim Hockin 2016-02-05 21:12:33 -08:00
parent 24b33964c9
commit 677d4d6dbc
3 changed files with 62 additions and 5 deletions

View File

@ -22,6 +22,7 @@ import (
"os"
"path"
"strings"
"sync"
"testing"
"time"
@ -189,12 +190,54 @@ var _ = ginkgo.SynchronizedBeforeSuite(func() []byte {
})
type CleanupActionHandle *int
var cleanupActionsLock sync.Mutex
var cleanupActions = map[CleanupActionHandle]func(){}
// AddCleanupAction installs a function that will be called in the event of the
// whole test being terminated. This allows arbitrary pieces of the overall
// test to hook into SynchronizedAfterSuite().
func AddCleanupAction(fn func()) CleanupActionHandle {
p := CleanupActionHandle(new(int))
cleanupActionsLock.Lock()
defer cleanupActionsLock.Unlock()
cleanupActions[p] = fn
return p
}
// RemoveCleanupAction removes a function that was installed by
// AddCleanupAction.
func RemoveCleanupAction(p CleanupActionHandle) {
cleanupActionsLock.Lock()
defer cleanupActionsLock.Unlock()
delete(cleanupActions, p)
}
// RunCleanupActions runs all functions installed by AddCleanupAction. It does
// not remove them (see RemoveCleanupAction) but it does run unlocked, so they
// may remove themselves.
func RunCleanupActions() {
list := []func(){}
func() {
cleanupActionsLock.Lock()
defer cleanupActionsLock.Unlock()
for _, fn := range cleanupActions {
list = append(list, fn)
}
}()
// Run unlocked.
for _, fn := range list {
fn()
}
}
// Similar to SynchornizedBeforeSuite, we want to run some operations only once (such as collecting cluster logs).
// Here, the order of functions is reversed; first, the function which runs everywhere,
// and then the function that only runs on the first Ginkgo node.
var _ = ginkgo.SynchronizedAfterSuite(func() {
// Run on all Ginkgo nodes
RunCleanupActions()
}, func() {
// Run only Ginkgo on node 1
if testContext.ReportDir != "" {

View File

@ -57,6 +57,11 @@ type Framework struct {
logsSizeWaitGroup sync.WaitGroup
logsSizeCloseChannel chan bool
logsSizeVerifier *LogsSizeVerifier
// To make sure that this framework cleans up after itself, no matter what,
// we install a cleanup action before each test and clear it after. If we
// should abort, the AfterSuite hook should run all cleanup actions.
cleanupHandle CleanupActionHandle
}
type TestDataSummary interface {
@ -80,6 +85,10 @@ func NewFramework(baseName string) *Framework {
// beforeEach gets a client and makes a namespace.
func (f *Framework) beforeEach() {
// The fact that we need this feels like a bug in ginkgo.
// https://github.com/onsi/ginkgo/issues/222
f.cleanupHandle = AddCleanupAction(f.afterEach)
By("Creating a kubernetes client")
c, err := loadClient()
Expect(err).NotTo(HaveOccurred())
@ -121,6 +130,8 @@ func (f *Framework) beforeEach() {
// afterEach deletes the namespace, after reading its events.
func (f *Framework) afterEach() {
RemoveCleanupAction(f.cleanupHandle)
// Print events if the test failed.
if CurrentGinkgoTestDescription().Failed {
By(fmt.Sprintf("Collecting events from namespace %q.", f.Namespace.Name))

View File

@ -54,6 +54,9 @@ const loadBalancerLagTimeout = 2 * time.Minute
//TODO: once support ticket 21807001 is resolved, reduce this timeout back to something reasonable
const loadBalancerCreateTimeout = 20 * time.Minute
// How long to wait for a namespace to be deleted.
const namespaceDeleteTimeout = 5 * time.Minute
// This should match whatever the default/configured range is
var ServiceNodePortRange = utilnet.PortRange{Base: 30000, Size: 2768}
@ -73,7 +76,7 @@ var _ = Describe("Services", func() {
if testContext.DeleteNamespace {
for _, ns := range extraNamespaces {
By(fmt.Sprintf("Destroying namespace %v", ns))
if err := deleteNS(c, ns, 5*time.Minute /* namespace deletion timeout */); err != nil {
if err := deleteNS(c, ns, namespaceDeleteTimeout); err != nil {
Failf("Couldn't delete namespace %s: %s", ns, err)
}
}