Merge pull request #113669 from wojtek-t/clean_shutdown_final

Clean shutdown final
This commit is contained in:
Kubernetes Prow Robot 2022-11-07 16:01:45 -08:00 committed by GitHub
commit 0e9a2e6bcf
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
5 changed files with 21 additions and 8 deletions

View File

@ -435,6 +435,7 @@ func TestApplyCRDUnhandledSchema(t *testing.T) {
if err != nil { if err != nil {
t.Fatal(err) t.Fatal(err)
} }
defer etcdclient.Close()
server, err := apiservertesting.StartTestServer(t, apiservertesting.NewDefaultTestServerOptions(), nil, storageConfig) server, err := apiservertesting.StartTestServer(t, apiservertesting.NewDefaultTestServerOptions(), nil, storageConfig)
if err != nil { if err != nil {

View File

@ -194,11 +194,13 @@ func EtcdMain(tests func() int) {
stop() // Don't defer this. See os.Exit documentation. stop() // Don't defer this. See os.Exit documentation.
checkNumberOfGoroutines := func() (bool, error) { checkNumberOfGoroutines := func() (bool, error) {
// Leave some room for goroutines we can not get rid of // We leave some room for leaked goroutines as there are
// like k8s.io/klog/v2.(*loggingT).flushDaemon() // still some leaks, mostly:
// TODO(#108483): Figure out if we can reduce this // - leak from lumberjack package we're vendoring
// further (ideally down to zero). // - leak from apiserve healthz
if dg := runtime.NumGoroutine() - before; dg <= 4 { // - leak from opencensus library
// Once fixed, we should be able to bring it down to zero.
if dg := runtime.NumGoroutine() - before; dg <= 3 {
return true, nil return true, nil
} }
// Allow goroutines to schedule and die off. // Allow goroutines to schedule and die off.
@ -210,7 +212,9 @@ func EtcdMain(tests func() int) {
// But we keep the limit higher to account for cpu-starved environments. // But we keep the limit higher to account for cpu-starved environments.
if err := wait.Poll(100*time.Millisecond, 5*time.Second, checkNumberOfGoroutines); err != nil { if err := wait.Poll(100*time.Millisecond, 5*time.Second, checkNumberOfGoroutines); err != nil {
after := runtime.NumGoroutine() after := runtime.NumGoroutine()
klog.Fatalf("unexpected number of goroutines: before: %d after %d", before, after) stacktraces := make([]byte, 1<<20)
runtime.Stack(stacktraces, true)
klog.Fatalf("unexpected number of goroutines: before: %d after %d\n%sd", before, after, string(stacktraces))
} }
os.Exit(result) os.Exit(result)
} }

View File

@ -119,8 +119,15 @@ func TestNamespaceCondition(t *testing.T) {
// TestNamespaceLabels tests for default labels added in https://github.com/kubernetes/kubernetes/pull/96968 // TestNamespaceLabels tests for default labels added in https://github.com/kubernetes/kubernetes/pull/96968
func TestNamespaceLabels(t *testing.T) { func TestNamespaceLabels(t *testing.T) {
closeFn, _, _, kubeClient, _ := namespaceLifecycleSetup(t) closeFn, nsController, _, kubeClient, _ := namespaceLifecycleSetup(t)
defer closeFn() defer closeFn()
// Even though nscontroller isn't used in this test, its creation is already
// spawning some goroutines. So we need to run it to ensure they won't leak.
stopCh := make(chan struct{})
close(stopCh)
go nsController.Run(5, stopCh)
nsName := "test-namespace-labels-generated" nsName := "test-namespace-labels-generated"
// Create a new namespace w/ no name // Create a new namespace w/ no name
ns, err := kubeClient.CoreV1().Namespaces().Create(context.TODO(), &corev1.Namespace{ ns, err := kubeClient.CoreV1().Namespaces().Create(context.TODO(), &corev1.Namespace{

View File

@ -63,6 +63,7 @@ func TestCoreResourceEnqueue(t *testing.T) {
testutils.SyncInformerFactory(testCtx) testutils.SyncInformerFactory(testCtx)
defer testutils.CleanupTest(t, testCtx) defer testutils.CleanupTest(t, testCtx)
defer testCtx.Scheduler.SchedulingQueue.Close()
cs, ns, ctx := testCtx.ClientSet, testCtx.NS.Name, testCtx.Ctx cs, ns, ctx := testCtx.ClientSet, testCtx.NS.Name, testCtx.Ctx
// Create one Node with a taint. // Create one Node with a taint.

View File

@ -899,7 +899,7 @@ func timeout(ctx context.Context, d time.Duration, f func()) error {
done := make(chan struct{}) done := make(chan struct{})
go func() { go func() {
f() f()
done <- struct{}{} close(done)
}() }()
select { select {