Merge pull request #99859 from krzysiekg/replace_klog_in_pkg_kubelet_tests

Replace klog with with testing.T logging in pkg/kubelet tests
This commit is contained in:
Kubernetes Prow Robot 2021-03-08 19:24:02 -08:00 committed by GitHub
commit b48e7f1e7b
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
5 changed files with 38 additions and 39 deletions

View File

@ -892,7 +892,7 @@ func TestRemoveStaleState(t *testing.T) {
mgr.removeStaleState()
if !areContainerMemoryAssignmentsEqual(mgr.state.GetMemoryAssignments(), testCase.expectedAssignments) {
if !areContainerMemoryAssignmentsEqual(t, mgr.state.GetMemoryAssignments(), testCase.expectedAssignments) {
t.Errorf("Memory Manager removeStaleState() error, expected assignments %v, but got: %v",
testCase.expectedAssignments, mgr.state.GetMemoryAssignments())
}
@ -2012,7 +2012,7 @@ func TestRemoveContainer(t *testing.T) {
testCase.description, testCase.expectedError, err)
}
if !areContainerMemoryAssignmentsEqual(mgr.state.GetMemoryAssignments(), testCase.expectedAssignments) {
if !areContainerMemoryAssignmentsEqual(t, mgr.state.GetMemoryAssignments(), testCase.expectedAssignments) {
t.Fatalf("Memory Manager RemoveContainer() inconsistent assignment, expected: %+v, but got: %+v, start %+v",
testCase.expectedAssignments, mgr.state.GetMemoryAssignments(), testCase.expectedAssignments)
}

View File

@ -25,7 +25,6 @@ import (
v1 "k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/api/resource"
"k8s.io/klog/v2"
"k8s.io/kubernetes/pkg/kubelet/cm/memorymanager/state"
"k8s.io/kubernetes/pkg/kubelet/cm/topologymanager"
"k8s.io/kubernetes/pkg/kubelet/cm/topologymanager/bitmask"
@ -68,39 +67,41 @@ var (
)
type fakeTopologyManagerWithHint struct {
t *testing.T
hint *topologymanager.TopologyHint
}
// NewFakeTopologyManagerWithHint returns an instance of fake topology manager with specified topology hints
func NewFakeTopologyManagerWithHint(hint *topologymanager.TopologyHint) topologymanager.Manager {
func NewFakeTopologyManagerWithHint(t *testing.T, hint *topologymanager.TopologyHint) topologymanager.Manager {
return &fakeTopologyManagerWithHint{
t: t,
hint: hint,
}
}
func (m *fakeTopologyManagerWithHint) AddHintProvider(h topologymanager.HintProvider) {
klog.Infof("[fake topologymanager] AddHintProvider HintProvider: %v", h)
m.t.Logf("[fake topologymanager] AddHintProvider HintProvider: %v", h)
}
func (m *fakeTopologyManagerWithHint) AddContainer(pod *v1.Pod, containerID string) error {
klog.Infof("[fake topologymanager] AddContainer pod: %v container id: %v", format.Pod(pod), containerID)
m.t.Logf("[fake topologymanager] AddContainer pod: %v container id: %v", format.Pod(pod), containerID)
return nil
}
func (m *fakeTopologyManagerWithHint) RemoveContainer(containerID string) error {
klog.Infof("[fake topologymanager] RemoveContainer container id: %v", containerID)
m.t.Logf("[fake topologymanager] RemoveContainer container id: %v", containerID)
return nil
}
func (m *fakeTopologyManagerWithHint) Admit(attrs *lifecycle.PodAdmitAttributes) lifecycle.PodAdmitResult {
klog.Infof("[fake topologymanager] Topology Admit Handler")
m.t.Logf("[fake topologymanager] Topology Admit Handler")
return lifecycle.PodAdmitResult{
Admit: true,
}
}
func (m *fakeTopologyManagerWithHint) GetAffinity(podUID string, containerName string) topologymanager.TopologyHint {
klog.Infof("[fake topologymanager] GetAffinity podUID: %v container name: %v", podUID, containerName)
m.t.Logf("[fake topologymanager] GetAffinity podUID: %v container name: %v", podUID, containerName)
return *m.hint
}
@ -128,25 +129,25 @@ func areMemoryBlocksEqual(mb1, mb2 []state.Block) bool {
return len(copyMemoryBlocks) == 0
}
func areContainerMemoryAssignmentsEqual(cma1, cma2 state.ContainerMemoryAssignments) bool {
func areContainerMemoryAssignmentsEqual(t *testing.T, cma1, cma2 state.ContainerMemoryAssignments) bool {
if len(cma1) != len(cma2) {
return false
}
for podUID, container := range cma1 {
if _, ok := cma2[podUID]; !ok {
klog.Errorf("[memorymanager_tests] the assignment does not have pod UID %s", podUID)
t.Logf("[memorymanager_tests] the assignment does not have pod UID %s", podUID)
return false
}
for containerName, memoryBlocks := range container {
if _, ok := cma2[podUID][containerName]; !ok {
klog.Errorf("[memorymanager_tests] the assignment does not have container name %s", containerName)
t.Logf("[memorymanager_tests] the assignment does not have container name %s", containerName)
return false
}
if !areMemoryBlocksEqual(memoryBlocks, cma2[podUID][containerName]) {
klog.Errorf("[memorymanager_tests] assignments memory blocks are different: %v != %v", memoryBlocks, cma2[podUID][containerName])
t.Logf("[memorymanager_tests] assignments memory blocks are different: %v != %v", memoryBlocks, cma2[podUID][containerName])
return false
}
}
@ -168,10 +169,10 @@ type testStaticPolicy struct {
expectedTopologyHints map[string][]topologymanager.TopologyHint
}
func initTests(testCase *testStaticPolicy, hint *topologymanager.TopologyHint) (Policy, state.State, error) {
func initTests(t *testing.T, testCase *testStaticPolicy, hint *topologymanager.TopologyHint) (Policy, state.State, error) {
manager := topologymanager.NewFakeManager()
if hint != nil {
manager = NewFakeTopologyManagerWithHint(hint)
manager = NewFakeTopologyManagerWithHint(t, hint)
}
p, err := NewPolicyStatic(testCase.machineInfo, testCase.systemReserved, manager)
@ -211,7 +212,7 @@ func TestStaticPolicyNew(t *testing.T) {
for _, testCase := range testCases {
t.Run(testCase.description, func(t *testing.T) {
_, _, err := initTests(&testCase, nil)
_, _, err := initTests(t, &testCase, nil)
if !reflect.DeepEqual(err, testCase.expectedError) {
t.Fatalf("The actual error: %v is different from the expected one: %v", err, testCase.expectedError)
}
@ -232,7 +233,7 @@ func TestStaticPolicyName(t *testing.T) {
}
for _, testCase := range testCases {
t.Run(testCase.description, func(t *testing.T) {
p, _, err := initTests(&testCase, nil)
p, _, err := initTests(t, &testCase, nil)
if err != nil {
t.Fatalf("Unexpected error: %v", err)
}
@ -1059,8 +1060,8 @@ func TestStaticPolicyStart(t *testing.T) {
for _, testCase := range testCases {
t.Run(testCase.description, func(t *testing.T) {
klog.Infof("[Start] %s", testCase.description)
p, s, err := initTests(&testCase, nil)
t.Logf("[Start] %s", testCase.description)
p, s, err := initTests(t, &testCase, nil)
if err != nil {
t.Fatalf("Unexpected error: %v", err)
}
@ -1075,7 +1076,7 @@ func TestStaticPolicyStart(t *testing.T) {
}
assignments := s.GetMemoryAssignments()
if !areContainerMemoryAssignmentsEqual(assignments, testCase.expectedAssignments) {
if !areContainerMemoryAssignmentsEqual(t, assignments, testCase.expectedAssignments) {
t.Fatalf("Actual assignments: %v is different from the expected one: %v", assignments, testCase.expectedAssignments)
}
@ -1800,8 +1801,8 @@ func TestStaticPolicyAllocate(t *testing.T) {
for _, testCase := range testCases {
t.Run(testCase.description, func(t *testing.T) {
klog.Infof("TestStaticPolicyAllocate %s", testCase.description)
p, s, err := initTests(&testCase, testCase.topologyHint)
t.Logf("TestStaticPolicyAllocate %s", testCase.description)
p, s, err := initTests(t, &testCase, testCase.topologyHint)
if err != nil {
t.Fatalf("Unexpected error: %v", err)
}
@ -1816,7 +1817,7 @@ func TestStaticPolicyAllocate(t *testing.T) {
}
assignments := s.GetMemoryAssignments()
if !areContainerMemoryAssignmentsEqual(assignments, testCase.expectedAssignments) {
if !areContainerMemoryAssignmentsEqual(t, assignments, testCase.expectedAssignments) {
t.Fatalf("Actual assignments %v are different from the expected %v", assignments, testCase.expectedAssignments)
}
@ -2066,7 +2067,7 @@ func TestStaticPolicyRemoveContainer(t *testing.T) {
for _, testCase := range testCases {
t.Run(testCase.description, func(t *testing.T) {
p, s, err := initTests(&testCase, nil)
p, s, err := initTests(t, &testCase, nil)
if err != nil {
t.Fatalf("Unexpected error: %v", err)
}
@ -2081,7 +2082,7 @@ func TestStaticPolicyRemoveContainer(t *testing.T) {
}
assignments := s.GetMemoryAssignments()
if !areContainerMemoryAssignmentsEqual(assignments, testCase.expectedAssignments) {
if !areContainerMemoryAssignmentsEqual(t, assignments, testCase.expectedAssignments) {
t.Fatalf("Actual assignments %v are different from the expected %v", assignments, testCase.expectedAssignments)
}
@ -2367,7 +2368,7 @@ func TestStaticPolicyGetTopologyHints(t *testing.T) {
for _, testCase := range testCases {
t.Run(testCase.description, func(t *testing.T) {
p, s, err := initTests(&testCase, nil)
p, s, err := initTests(t, &testCase, nil)
if err != nil {
t.Fatalf("Unexpected error: %v", err)
}

View File

@ -28,7 +28,6 @@ import (
"k8s.io/apimachinery/pkg/util/runtime"
"k8s.io/apimachinery/pkg/util/sets"
"k8s.io/apimachinery/pkg/util/wait"
"k8s.io/klog/v2"
kubecontainer "k8s.io/kubernetes/pkg/kubelet/container"
"k8s.io/kubernetes/pkg/kubelet/prober/results"
"k8s.io/kubernetes/pkg/probe"
@ -119,7 +118,7 @@ func TestAddRemovePods(t *testing.T) {
// Removing probed pod.
m.RemovePod(&probePod)
if err := waitForWorkerExit(m, probePaths); err != nil {
if err := waitForWorkerExit(t, m, probePaths); err != nil {
t.Fatal(err)
}
if err := expectProbes(m, nil); err != nil {
@ -187,7 +186,7 @@ func TestCleanupPods(t *testing.T) {
{"pod_keep", "prober2", liveness},
{"pod_keep", "prober3", startup},
}
if err := waitForWorkerExit(m, removedProbes); err != nil {
if err := waitForWorkerExit(t, m, removedProbes); err != nil {
t.Fatal(err)
}
if err := expectProbes(m, expectedProbes); err != nil {
@ -354,7 +353,7 @@ func TestUpdateReadiness(t *testing.T) {
}
// Wait for ready status.
if err := waitForReadyStatus(m, true); err != nil {
if err := waitForReadyStatus(t, m, true); err != nil {
t.Error(err)
}
@ -362,7 +361,7 @@ func TestUpdateReadiness(t *testing.T) {
exec.set(probe.Failure, nil)
// Wait for failed status.
if err := waitForReadyStatus(m, false); err != nil {
if err := waitForReadyStatus(t, m, false); err != nil {
t.Error(err)
}
}
@ -396,7 +395,7 @@ outer:
const interval = 1 * time.Second
// Wait for the given workers to exit & clean up.
func waitForWorkerExit(m *manager, workerPaths []probeKey) error {
func waitForWorkerExit(t *testing.T, m *manager, workerPaths []probeKey) error {
for _, w := range workerPaths {
condition := func() (bool, error) {
_, exists := m.getWorker(w.podUID, w.containerName, w.probeType)
@ -405,7 +404,7 @@ func waitForWorkerExit(m *manager, workerPaths []probeKey) error {
if exited, _ := condition(); exited {
continue // Already exited, no need to poll.
}
klog.Infof("Polling %v", w)
t.Logf("Polling %v", w)
if err := wait.Poll(interval, wait.ForeverTestTimeout, condition); err != nil {
return err
}
@ -415,7 +414,7 @@ func waitForWorkerExit(m *manager, workerPaths []probeKey) error {
}
// Wait for the given workers to exit & clean up.
func waitForReadyStatus(m *manager, ready bool) error {
func waitForReadyStatus(t *testing.T, m *manager, ready bool) error {
condition := func() (bool, error) {
status, ok := m.statusManager.GetPodStatus(testPodUID)
if !ok {
@ -430,7 +429,7 @@ func waitForReadyStatus(m *manager, ready bool) error {
}
return status.ContainerStatuses[0].Ready == ready, nil
}
klog.Infof("Polling for ready state %v", ready)
t.Logf("Polling for ready state %v", ready)
if err := wait.Poll(interval, wait.ForeverTestTimeout, condition); err != nil {
return err
}
@ -445,7 +444,7 @@ func cleanup(t *testing.T, m *manager) {
condition := func() (bool, error) {
workerCount := m.workerCount()
if workerCount > 0 {
klog.Infof("Waiting for %d workers to exit...", workerCount)
t.Logf("Waiting for %d workers to exit...", workerCount)
}
return workerCount == 0, nil
}

View File

@ -283,7 +283,7 @@ func TestCleanUp(t *testing.T) {
for i := 0; i < 10; i++ {
w.stop() // Stop should be callable multiple times without consequence.
}
if err := waitForWorkerExit(m, []probeKey{key}); err != nil {
if err := waitForWorkerExit(t, m, []probeKey{key}); err != nil {
t.Fatalf("[%s] error waiting for worker exit: %v", probeType, err)
}

View File

@ -23,7 +23,6 @@ import (
"time"
"github.com/stretchr/testify/assert"
"k8s.io/klog/v2"
"k8s.io/mount-utils"
v1 "k8s.io/api/core/v1"
@ -1831,7 +1830,7 @@ func Test_Run_Positive_VolumeMountControllerAttachEnabledRace(t *testing.T) {
fakePlugin.UnmountDeviceHook = func(mountPath string) error {
// Act:
// 3. While a volume is being unmounted, add it back to the desired state of world
klog.Infof("UnmountDevice called")
t.Logf("UnmountDevice called")
generatedVolumeName, err = dsw.AddPodToVolume(
podName, pod, volumeSpec, volumeSpec.Name(), "" /* volumeGidValue */)
dsw.MarkVolumesReportedInUse([]v1.UniqueVolumeName{generatedVolumeName})