diff --git a/test/integration/apiserver/apply/apply_crd_test.go b/test/integration/apiserver/apply/apply_crd_test.go index 23dda4baf24..71a5e2fad46 100644 --- a/test/integration/apiserver/apply/apply_crd_test.go +++ b/test/integration/apiserver/apply/apply_crd_test.go @@ -435,6 +435,7 @@ func TestApplyCRDUnhandledSchema(t *testing.T) { if err != nil { t.Fatal(err) } + defer etcdclient.Close() server, err := apiservertesting.StartTestServer(t, apiservertesting.NewDefaultTestServerOptions(), nil, storageConfig) if err != nil { diff --git a/test/integration/framework/etcd.go b/test/integration/framework/etcd.go index 805fbeb76a4..966f6538394 100644 --- a/test/integration/framework/etcd.go +++ b/test/integration/framework/etcd.go @@ -194,11 +194,13 @@ func EtcdMain(tests func() int) { stop() // Don't defer this. See os.Exit documentation. checkNumberOfGoroutines := func() (bool, error) { - // Leave some room for goroutines we can not get rid of - // like k8s.io/klog/v2.(*loggingT).flushDaemon() - // TODO(#108483): Figure out if we can reduce this - // further (ideally down to zero). - if dg := runtime.NumGoroutine() - before; dg <= 4 { + // We leave some room for leaked goroutines as there are + // still some leaks, mostly: + // - leak from lumberjack package we're vendoring + // - leak from apiserve healthz + // - leak from opencensus library + // Once fixed, we should be able to bring it down to zero. + if dg := runtime.NumGoroutine() - before; dg <= 3 { return true, nil } // Allow goroutines to schedule and die off. @@ -210,7 +212,9 @@ func EtcdMain(tests func() int) { // But we keep the limit higher to account for cpu-starved environments. if err := wait.Poll(100*time.Millisecond, 5*time.Second, checkNumberOfGoroutines); err != nil { after := runtime.NumGoroutine() - klog.Fatalf("unexpected number of goroutines: before: %d after %d", before, after) + stacktraces := make([]byte, 1<<20) + runtime.Stack(stacktraces, true) + klog.Fatalf("unexpected number of goroutines: before: %d after %d\n%sd", before, after, string(stacktraces)) } os.Exit(result) } diff --git a/test/integration/namespace/ns_conditions_test.go b/test/integration/namespace/ns_conditions_test.go index 56ed8d0516e..049ed7face7 100644 --- a/test/integration/namespace/ns_conditions_test.go +++ b/test/integration/namespace/ns_conditions_test.go @@ -119,8 +119,15 @@ func TestNamespaceCondition(t *testing.T) { // TestNamespaceLabels tests for default labels added in https://github.com/kubernetes/kubernetes/pull/96968 func TestNamespaceLabels(t *testing.T) { - closeFn, _, _, kubeClient, _ := namespaceLifecycleSetup(t) + closeFn, nsController, _, kubeClient, _ := namespaceLifecycleSetup(t) defer closeFn() + + // Even though nscontroller isn't used in this test, its creation is already + // spawning some goroutines. So we need to run it to ensure they won't leak. + stopCh := make(chan struct{}) + close(stopCh) + go nsController.Run(5, stopCh) + nsName := "test-namespace-labels-generated" // Create a new namespace w/ no name ns, err := kubeClient.CoreV1().Namespaces().Create(context.TODO(), &corev1.Namespace{ diff --git a/test/integration/scheduler/queue_test.go b/test/integration/scheduler/queue_test.go index 908b757664d..338b61b3f9e 100644 --- a/test/integration/scheduler/queue_test.go +++ b/test/integration/scheduler/queue_test.go @@ -63,6 +63,7 @@ func TestCoreResourceEnqueue(t *testing.T) { testutils.SyncInformerFactory(testCtx) defer testutils.CleanupTest(t, testCtx) + defer testCtx.Scheduler.SchedulingQueue.Close() cs, ns, ctx := testCtx.ClientSet, testCtx.NS.Name, testCtx.Ctx // Create one Node with a taint. diff --git a/test/integration/util/util.go b/test/integration/util/util.go index 13335b3b448..42a51976f93 100644 --- a/test/integration/util/util.go +++ b/test/integration/util/util.go @@ -899,7 +899,7 @@ func timeout(ctx context.Context, d time.Duration, f func()) error { done := make(chan struct{}) go func() { f() - done <- struct{}{} + close(done) }() select {