mirror of
https://github.com/k3s-io/kubernetes.git
synced 2025-07-21 19:01:49 +00:00
kubelet: avoid manipulating global logger during unit test
The code as it stands now works, but it is still complicated and previous versions had race conditions (https://github.com/kubernetes/kubernetes/issues/108040). Now the test works without modifying global state. The individual test cases could run in parallel, this just isn't done because they complete quickly already (2 seconds).
This commit is contained in:
parent
65385fec20
commit
7f55a0bae0
@ -20,7 +20,6 @@ limitations under the License.
|
|||||||
package nodeshutdown
|
package nodeshutdown
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"bytes"
|
|
||||||
"fmt"
|
"fmt"
|
||||||
"os"
|
"os"
|
||||||
"strings"
|
"strings"
|
||||||
@ -35,7 +34,6 @@ import (
|
|||||||
utilfeature "k8s.io/apiserver/pkg/util/feature"
|
utilfeature "k8s.io/apiserver/pkg/util/feature"
|
||||||
"k8s.io/client-go/tools/record"
|
"k8s.io/client-go/tools/record"
|
||||||
featuregatetesting "k8s.io/component-base/featuregate/testing"
|
featuregatetesting "k8s.io/component-base/featuregate/testing"
|
||||||
"k8s.io/klog/v2"
|
|
||||||
"k8s.io/klog/v2/ktesting"
|
"k8s.io/klog/v2/ktesting"
|
||||||
_ "k8s.io/klog/v2/ktesting/init" // activate ktesting command line flags
|
_ "k8s.io/klog/v2/ktesting/init" // activate ktesting command line flags
|
||||||
"k8s.io/kubernetes/pkg/apis/scheduling"
|
"k8s.io/kubernetes/pkg/apis/scheduling"
|
||||||
@ -626,23 +624,6 @@ func Test_groupByPriority(t *testing.T) {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
type buffer struct {
|
|
||||||
b bytes.Buffer
|
|
||||||
rw sync.RWMutex
|
|
||||||
}
|
|
||||||
|
|
||||||
func (b *buffer) String() string {
|
|
||||||
b.rw.RLock()
|
|
||||||
defer b.rw.RUnlock()
|
|
||||||
return b.b.String()
|
|
||||||
}
|
|
||||||
|
|
||||||
func (b *buffer) Write(p []byte) (n int, err error) {
|
|
||||||
b.rw.Lock()
|
|
||||||
defer b.rw.Unlock()
|
|
||||||
return b.b.Write(p)
|
|
||||||
}
|
|
||||||
|
|
||||||
func Test_managerImpl_processShutdownEvent(t *testing.T) {
|
func Test_managerImpl_processShutdownEvent(t *testing.T) {
|
||||||
var (
|
var (
|
||||||
probeManager = probetest.FakeManager{}
|
probeManager = probetest.FakeManager{}
|
||||||
@ -669,7 +650,7 @@ func Test_managerImpl_processShutdownEvent(t *testing.T) {
|
|||||||
name string
|
name string
|
||||||
fields fields
|
fields fields
|
||||||
wantErr bool
|
wantErr bool
|
||||||
exceptOutputContains string
|
expectedOutputContains string
|
||||||
}{
|
}{
|
||||||
{
|
{
|
||||||
name: "kill pod func take too long",
|
name: "kill pod func take too long",
|
||||||
@ -702,20 +683,16 @@ func Test_managerImpl_processShutdownEvent(t *testing.T) {
|
|||||||
dbusCon: &fakeDbus{},
|
dbusCon: &fakeDbus{},
|
||||||
},
|
},
|
||||||
wantErr: false,
|
wantErr: false,
|
||||||
exceptOutputContains: "Shutdown manager pod killing time out",
|
expectedOutputContains: "Shutdown manager pod killing time out",
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
|
|
||||||
for _, tt := range tests {
|
for _, tt := range tests {
|
||||||
t.Run(tt.name, func(t *testing.T) {
|
t.Run(tt.name, func(t *testing.T) {
|
||||||
l := klog.Level(1)
|
logger, _ := ktesting.NewTestContext(t)
|
||||||
l.Set("1")
|
|
||||||
// hijack the klog output
|
|
||||||
tmpWriteBuffer := new(buffer)
|
|
||||||
klog.SetOutput(tmpWriteBuffer)
|
|
||||||
klog.LogToStderr(false)
|
|
||||||
|
|
||||||
m := &managerImpl{
|
m := &managerImpl{
|
||||||
logger: klog.TODO(), // This test will be updated in a separate commit.
|
logger: logger,
|
||||||
recorder: tt.fields.recorder,
|
recorder: tt.fields.recorder,
|
||||||
nodeRef: tt.fields.nodeRef,
|
nodeRef: tt.fields.nodeRef,
|
||||||
probeManager: tt.fields.probeManager,
|
probeManager: tt.fields.probeManager,
|
||||||
@ -732,11 +709,17 @@ func Test_managerImpl_processShutdownEvent(t *testing.T) {
|
|||||||
if err := m.processShutdownEvent(); (err != nil) != tt.wantErr {
|
if err := m.processShutdownEvent(); (err != nil) != tt.wantErr {
|
||||||
t.Errorf("managerImpl.processShutdownEvent() error = %v, wantErr %v", err, tt.wantErr)
|
t.Errorf("managerImpl.processShutdownEvent() error = %v, wantErr %v", err, tt.wantErr)
|
||||||
}
|
}
|
||||||
klog.Flush()
|
|
||||||
|
|
||||||
log := tmpWriteBuffer.String()
|
underlier, ok := logger.GetSink().(ktesting.Underlier)
|
||||||
if !strings.Contains(log, tt.exceptOutputContains) {
|
if !ok {
|
||||||
t.Errorf("managerImpl.processShutdownEvent() should log %s, got %s", tt.exceptOutputContains, log)
|
t.Fatalf("Should have had a ktesting LogSink, got %T", logger.GetSink())
|
||||||
|
}
|
||||||
|
|
||||||
|
log := underlier.GetBuffer().String()
|
||||||
|
if !strings.Contains(log, tt.expectedOutputContains) {
|
||||||
|
// Log will be shown on failure. To see it
|
||||||
|
// during a successful run use "go test -v".
|
||||||
|
t.Errorf("managerImpl.processShutdownEvent() should have logged %s, see actual output above.", tt.expectedOutputContains)
|
||||||
}
|
}
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
Loading…
Reference in New Issue
Block a user