From 9a2e0fcb18bc9feb327122edc95a968a8d423af7 Mon Sep 17 00:00:00 2001 From: Daniel Smith Date: Mon, 28 Sep 2020 10:50:47 -0700 Subject: [PATCH] fix goroutine that lives too long --- .../src/k8s.io/apiserver/pkg/storage/tests/cacher_test.go | 7 ++++++- 1 file changed, 6 insertions(+), 1 deletion(-) diff --git a/staging/src/k8s.io/apiserver/pkg/storage/tests/cacher_test.go b/staging/src/k8s.io/apiserver/pkg/storage/tests/cacher_test.go index 0c4275e6af4..d412f295649 100644 --- a/staging/src/k8s.io/apiserver/pkg/storage/tests/cacher_test.go +++ b/staging/src/k8s.io/apiserver/pkg/storage/tests/cacher_test.go @@ -22,6 +22,7 @@ import ( "reflect" goruntime "runtime" "strconv" + "sync" "testing" "time" @@ -941,8 +942,12 @@ func TestWatchBookmarksWithCorrectResourceVersion(t *testing.T) { defer watcher.Stop() done := make(chan struct{}) - defer close(done) + var wg sync.WaitGroup + wg.Add(1) + defer wg.Wait() // We must wait for the waitgroup to exit before we terminate the cache or the server in prior defers + defer close(done) // call close first, so the goroutine knows to exit go func() { + defer wg.Done() for i := 0; i < 100; i++ { select { case <-done: