Migrate to k8s.io/utils/clock in pkg/kubelet

This commit is contained in:
wojtekt 2021-09-09 17:07:44 +02:00
parent 392292ba81
commit 53ce79a18a
34 changed files with 88 additions and 83 deletions

View File

@ -21,10 +21,10 @@ import (
"time"
"k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/util/clock"
"k8s.io/client-go/tools/record"
"k8s.io/kubernetes/pkg/kubelet/lifecycle"
"k8s.io/kubernetes/pkg/kubelet/status"
"k8s.io/utils/clock"
)
const (

View File

@ -23,8 +23,8 @@ import (
"k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/types"
"k8s.io/apimachinery/pkg/util/clock"
"k8s.io/client-go/tools/record"
testingclock "k8s.io/utils/clock/testing"
)
// mockPodStatusProvider returns the status on the specified pod
@ -45,7 +45,7 @@ func (m *mockPodStatusProvider) GetPodStatus(uid types.UID) (v1.PodStatus, bool)
// TestActiveDeadlineHandler verifies the active deadline handler functions as expected.
func TestActiveDeadlineHandler(t *testing.T) {
pods := newTestPods(4)
fakeClock := clock.NewFakeClock(time.Now())
fakeClock := testingclock.NewFakeClock(time.Now())
podStatusProvider := &mockPodStatusProvider{pods: pods}
fakeRecorder := &record.FakeRecorder{}
handler, err := newActiveDeadlineHandler(podStatusProvider, fakeRecorder, fakeClock)

View File

@ -29,9 +29,9 @@ import (
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/runtime"
"k8s.io/apimachinery/pkg/util/clock"
"k8s.io/apimachinery/pkg/util/sets"
"k8s.io/apimachinery/pkg/watch"
"k8s.io/utils/clock"
)
// Manager interface provides methods for Kubelet to manage ConfigMap.

View File

@ -27,11 +27,11 @@ import (
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/runtime"
"k8s.io/apimachinery/pkg/util/clock"
clientset "k8s.io/client-go/kubernetes"
"k8s.io/client-go/kubernetes/fake"
"k8s.io/kubernetes/pkg/kubelet/util/manager"
"k8s.io/utils/clock"
)
func checkObject(t *testing.T, store manager.Store, ns, name string, shouldExist bool) {

View File

@ -25,7 +25,7 @@ import (
"sync"
"time"
"k8s.io/apimachinery/pkg/util/clock"
"k8s.io/utils/clock"
)
var (

View File

@ -26,7 +26,7 @@ import (
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
"k8s.io/apimachinery/pkg/util/clock"
testingclock "k8s.io/utils/clock/testing"
)
func TestInsert(t *testing.T) {
@ -200,9 +200,9 @@ func TestGC(t *testing.T) {
assertCacheSize(t, c, 1)
}
func newTestCache() (*requestCache, *clock.FakeClock) {
func newTestCache() (*requestCache, *testingclock.FakeClock) {
c := newRequestCache()
fakeClock := clock.NewFakeClock(time.Now())
fakeClock := testingclock.NewFakeClock(time.Now())
c.clock = fakeClock
return c, fakeClock
}

View File

@ -30,7 +30,6 @@ import (
"github.com/golang/mock/gomock"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
"k8s.io/apimachinery/pkg/util/clock"
runtimeapi "k8s.io/cri-api/pkg/apis/runtime/v1alpha2"
"k8s.io/kubernetes/pkg/kubelet/checkpointmanager"
containertest "k8s.io/kubernetes/pkg/kubelet/container/testing"
@ -38,6 +37,7 @@ import (
"k8s.io/kubernetes/pkg/kubelet/dockershim/network"
nettest "k8s.io/kubernetes/pkg/kubelet/dockershim/network/testing"
"k8s.io/kubernetes/pkg/kubelet/util/cache"
testingclock "k8s.io/utils/clock/testing"
)
// newTestNetworkPlugin returns a mock plugin that implements network.NetworkPlugin
@ -80,8 +80,8 @@ func newMockCheckpointManager() checkpointmanager.CheckpointManager {
return &mockCheckpointManager{checkpoint: make(map[string]*PodSandboxCheckpoint)}
}
func newTestDockerService() (*dockerService, *libdocker.FakeDockerClient, *clock.FakeClock) {
fakeClock := clock.NewFakeClock(time.Time{})
func newTestDockerService() (*dockerService, *libdocker.FakeDockerClient, *testingclock.FakeClock) {
fakeClock := testingclock.NewFakeClock(time.Time{})
c := libdocker.NewFakeDockerClient().WithClock(fakeClock).WithVersion("1.11.2", "1.23").WithRandSource(rand.NewSource(0))
pm := network.NewPluginManager(&network.NoopNetworkPlugin{})
ckm := newMockCheckpointManager()
@ -94,7 +94,7 @@ func newTestDockerService() (*dockerService, *libdocker.FakeDockerClient, *clock
}, c, fakeClock
}
func newTestDockerServiceWithVersionCache() (*dockerService, *libdocker.FakeDockerClient, *clock.FakeClock) {
func newTestDockerServiceWithVersionCache() (*dockerService, *libdocker.FakeDockerClient, *testingclock.FakeClock) {
ds, c, fakeClock := newTestDockerService()
ds.versionCache = cache.NewObjectCache(
func() (interface{}, error) {

View File

@ -36,7 +36,7 @@ import (
dockerimagetypes "github.com/docker/docker/api/types/image"
"k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/util/clock"
"k8s.io/utils/clock"
)
type CalledDetail struct {

View File

@ -26,7 +26,6 @@ import (
v1 "k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/api/resource"
"k8s.io/apimachinery/pkg/util/clock"
utilfeature "k8s.io/apiserver/pkg/util/feature"
"k8s.io/client-go/tools/record"
v1helper "k8s.io/component-helpers/scheduling/corev1"
@ -40,6 +39,7 @@ import (
"k8s.io/kubernetes/pkg/kubelet/server/stats"
kubelettypes "k8s.io/kubernetes/pkg/kubelet/types"
"k8s.io/kubernetes/pkg/kubelet/util/format"
"k8s.io/utils/clock"
)
const (
@ -59,7 +59,7 @@ const (
// managerImpl implements Manager
type managerImpl struct {
// used to track time
clock clock.Clock
clock clock.WithTicker
// config is how the manager is configured
config Config
// the function to invoke to kill a pod
@ -113,7 +113,7 @@ func NewManager(
containerGC ContainerGC,
recorder record.EventRecorder,
nodeRef *v1.ObjectReference,
clock clock.Clock,
clock clock.WithTicker,
) (Manager, lifecycle.PodAdmitHandler) {
manager := &managerImpl{
clock: clock,

View File

@ -24,7 +24,6 @@ import (
v1 "k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/api/resource"
"k8s.io/apimachinery/pkg/types"
"k8s.io/apimachinery/pkg/util/clock"
utilfeature "k8s.io/apiserver/pkg/util/feature"
"k8s.io/client-go/tools/record"
featuregatetesting "k8s.io/component-base/featuregate/testing"
@ -35,6 +34,7 @@ import (
evictionapi "k8s.io/kubernetes/pkg/kubelet/eviction/api"
"k8s.io/kubernetes/pkg/kubelet/lifecycle"
kubelettypes "k8s.io/kubernetes/pkg/kubelet/types"
testingclock "k8s.io/utils/clock/testing"
)
const (
@ -206,7 +206,7 @@ func TestMemoryPressure(t *testing.T) {
return pods
}
fakeClock := clock.NewFakeClock(time.Now())
fakeClock := testingclock.NewFakeClock(time.Now())
podKiller := &mockPodKiller{}
diskInfoProvider := &mockDiskInfoProvider{dedicatedImageFs: false}
diskGC := &mockDiskGC{err: nil}
@ -471,7 +471,7 @@ func TestDiskPressureNodeFs(t *testing.T) {
return pods
}
fakeClock := clock.NewFakeClock(time.Now())
fakeClock := testingclock.NewFakeClock(time.Now())
podKiller := &mockPodKiller{}
diskInfoProvider := &mockDiskInfoProvider{dedicatedImageFs: false}
diskGC := &mockDiskGC{err: nil}
@ -668,7 +668,7 @@ func TestMinReclaim(t *testing.T) {
return pods
}
fakeClock := clock.NewFakeClock(time.Now())
fakeClock := testingclock.NewFakeClock(time.Now())
podKiller := &mockPodKiller{}
diskInfoProvider := &mockDiskInfoProvider{dedicatedImageFs: false}
diskGC := &mockDiskGC{err: nil}
@ -808,7 +808,7 @@ func TestNodeReclaimFuncs(t *testing.T) {
return pods
}
fakeClock := clock.NewFakeClock(time.Now())
fakeClock := testingclock.NewFakeClock(time.Now())
podKiller := &mockPodKiller{}
diskInfoProvider := &mockDiskInfoProvider{dedicatedImageFs: false}
nodeRef := &v1.ObjectReference{Kind: "Node", Name: "test", UID: types.UID("test"), Namespace: ""}
@ -1052,7 +1052,7 @@ func TestInodePressureNodeFsInodes(t *testing.T) {
return pods
}
fakeClock := clock.NewFakeClock(time.Now())
fakeClock := testingclock.NewFakeClock(time.Now())
podKiller := &mockPodKiller{}
diskInfoProvider := &mockDiskInfoProvider{dedicatedImageFs: false}
diskGC := &mockDiskGC{err: nil}
@ -1259,7 +1259,7 @@ func TestStaticCriticalPodsAreNotEvicted(t *testing.T) {
return mirrorPod, true
}
fakeClock := clock.NewFakeClock(time.Now())
fakeClock := testingclock.NewFakeClock(time.Now())
podKiller := &mockPodKiller{}
diskInfoProvider := &mockDiskInfoProvider{dedicatedImageFs: false}
diskGC := &mockDiskGC{err: nil}
@ -1385,7 +1385,7 @@ func TestAllocatableMemoryPressure(t *testing.T) {
return pods
}
fakeClock := clock.NewFakeClock(time.Now())
fakeClock := testingclock.NewFakeClock(time.Now())
podKiller := &mockPodKiller{}
diskInfoProvider := &mockDiskInfoProvider{dedicatedImageFs: false}
diskGC := &mockDiskGC{err: nil}
@ -1529,7 +1529,7 @@ func TestUpdateMemcgThreshold(t *testing.T) {
return []*v1.Pod{}
}
fakeClock := clock.NewFakeClock(time.Now())
fakeClock := testingclock.NewFakeClock(time.Now())
podKiller := &mockPodKiller{}
diskInfoProvider := &mockDiskInfoProvider{dedicatedImageFs: false}
diskGC := &mockDiskGC{err: nil}

View File

@ -24,12 +24,12 @@ import (
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
"k8s.io/apimachinery/pkg/util/clock"
"k8s.io/client-go/tools/record"
statsapi "k8s.io/kubelet/pkg/apis/stats/v1alpha1"
"k8s.io/kubernetes/pkg/kubelet/container"
containertest "k8s.io/kubernetes/pkg/kubelet/container/testing"
statstest "k8s.io/kubernetes/pkg/kubelet/server/stats/testing"
testingclock "k8s.io/utils/clock/testing"
)
var zero time.Time
@ -468,7 +468,7 @@ func TestGarbageCollectImageNotOldEnough(t *testing.T) {
}},
}
fakeClock := clock.NewFakeClock(time.Now())
fakeClock := testingclock.NewFakeClock(time.Now())
t.Log(fakeClock.Now())
_, err := manager.detectImages(fakeClock.Now())
require.NoError(t, err)

View File

@ -24,11 +24,11 @@ import (
"github.com/stretchr/testify/assert"
v1 "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/util/clock"
"k8s.io/client-go/tools/record"
"k8s.io/client-go/util/flowcontrol"
. "k8s.io/kubernetes/pkg/kubelet/container"
ctest "k8s.io/kubernetes/pkg/kubelet/container/testing"
testingclock "k8s.io/utils/clock/testing"
)
type pullerExpects struct {
@ -158,7 +158,7 @@ func pullerTestCases() []pullerTestCase {
}
}
func pullerTestEnv(c pullerTestCase, serialized bool) (puller ImageManager, fakeClock *clock.FakeClock, fakeRuntime *ctest.FakeRuntime, container *v1.Container) {
func pullerTestEnv(c pullerTestCase, serialized bool) (puller ImageManager, fakeClock *testingclock.FakeClock, fakeRuntime *ctest.FakeRuntime, container *v1.Container) {
container = &v1.Container{
Name: "container_name",
Image: c.containerImage,
@ -166,7 +166,7 @@ func pullerTestEnv(c pullerTestCase, serialized bool) (puller ImageManager, fake
}
backOff := flowcontrol.NewBackOff(time.Second, time.Minute)
fakeClock = clock.NewFakeClock(time.Now())
fakeClock = testingclock.NewFakeClock(time.Now())
backOff.Clock = fakeClock
fakeRuntime = &ctest.FakeRuntime{}

View File

@ -45,7 +45,6 @@ import (
"k8s.io/apimachinery/pkg/fields"
"k8s.io/apimachinery/pkg/labels"
"k8s.io/apimachinery/pkg/types"
"k8s.io/apimachinery/pkg/util/clock"
utilruntime "k8s.io/apimachinery/pkg/util/runtime"
"k8s.io/apimachinery/pkg/util/sets"
"k8s.io/apimachinery/pkg/util/wait"
@ -121,6 +120,7 @@ import (
"k8s.io/kubernetes/pkg/volume/util/hostutil"
"k8s.io/kubernetes/pkg/volume/util/subpath"
"k8s.io/kubernetes/pkg/volume/util/volumepathhandler"
"k8s.io/utils/clock"
)
const (
@ -1139,7 +1139,7 @@ type Kubelet struct {
// clock is an interface that provides time related functionality in a way that makes it
// easy to test the code.
clock clock.Clock
clock clock.WithTicker
// handlers called during the tryUpdateNodeStatus cycle
setNodeStatusFuncs []func(*v1.Node) error

View File

@ -37,7 +37,6 @@ import (
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/labels"
"k8s.io/apimachinery/pkg/types"
"k8s.io/apimachinery/pkg/util/clock"
utilruntime "k8s.io/apimachinery/pkg/util/runtime"
"k8s.io/apimachinery/pkg/util/sets"
"k8s.io/apimachinery/pkg/util/wait"
@ -81,6 +80,8 @@ import (
"k8s.io/kubernetes/pkg/volume/util"
"k8s.io/kubernetes/pkg/volume/util/hostutil"
"k8s.io/kubernetes/pkg/volume/util/subpath"
"k8s.io/utils/clock"
testingclock "k8s.io/utils/clock/testing"
)
func init() {
@ -114,7 +115,7 @@ type TestKubelet struct {
fakeContainerManager *cm.FakeContainerManager
fakeKubeClient *fake.Clientset
fakeMirrorClient *podtest.FakeMirrorClient
fakeClock *clock.FakeClock
fakeClock *testingclock.FakeClock
mounter mount.Interface
volumePlugin *volumetest.FakeVolumePlugin
}
@ -291,7 +292,7 @@ func newTestKubeletWithImageList(
assert.NoError(t, err)
kubelet.containerGC = containerGC
fakeClock := clock.NewFakeClock(time.Now())
fakeClock := testingclock.NewFakeClock(time.Now())
kubelet.backOff = flowcontrol.NewBackOff(time.Second, time.Minute)
kubelet.backOff.Clock = fakeClock
kubelet.resyncInterval = 10 * time.Second

View File

@ -30,11 +30,11 @@ import (
"k8s.io/klog/v2"
"k8s.io/apimachinery/pkg/api/resource"
"k8s.io/apimachinery/pkg/util/clock"
"k8s.io/apimachinery/pkg/util/wait"
internalapi "k8s.io/cri-api/pkg/apis"
runtimeapi "k8s.io/cri-api/pkg/apis/runtime/v1alpha2"
kubecontainer "k8s.io/kubernetes/pkg/kubelet/container"
"k8s.io/utils/clock"
)
const (

View File

@ -30,9 +30,9 @@ import (
"github.com/stretchr/testify/require"
"k8s.io/kubernetes/pkg/kubelet/container"
"k8s.io/apimachinery/pkg/util/clock"
runtimeapi "k8s.io/cri-api/pkg/apis/runtime/v1alpha2"
critest "k8s.io/cri-api/pkg/apis/testing"
testingclock "k8s.io/utils/clock/testing"
)
func TestGetAllLogs(t *testing.T) {
@ -92,7 +92,7 @@ func TestRotateLogs(t *testing.T) {
MaxFiles: testMaxFiles,
},
osInterface: container.RealOS{},
clock: clock.NewFakeClock(now),
clock: testingclock.NewFakeClock(now),
}
testLogs := []string{
"test-log-1",
@ -179,7 +179,7 @@ func TestClean(t *testing.T) {
MaxFiles: testMaxFiles,
},
osInterface: container.RealOS{},
clock: clock.NewFakeClock(now),
clock: testingclock.NewFakeClock(now),
}
testLogs := []string{
"test-log-1",
@ -382,7 +382,7 @@ func TestRotateLatestLog(t *testing.T) {
runtimeService: f,
policy: LogRotatePolicy{MaxFiles: test.maxFiles},
osInterface: container.RealOS{},
clock: clock.NewFakeClock(now),
clock: testingclock.NewFakeClock(now),
}
if test.runtimeError != nil {
f.InjectError("ReopenContainerLog", test.runtimeError)

View File

@ -26,7 +26,6 @@ import (
"time"
v1 "k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/util/clock"
utilfeature "k8s.io/apiserver/pkg/util/feature"
"k8s.io/client-go/tools/record"
"k8s.io/klog/v2"
@ -36,6 +35,7 @@ import (
"k8s.io/kubernetes/pkg/kubelet/lifecycle"
"k8s.io/kubernetes/pkg/kubelet/nodeshutdown/systemd"
kubelettypes "k8s.io/kubernetes/pkg/kubelet/types"
"k8s.io/utils/clock"
)
const (

View File

@ -30,13 +30,13 @@ import (
v1 "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/types"
"k8s.io/apimachinery/pkg/util/clock"
utilfeature "k8s.io/apiserver/pkg/util/feature"
"k8s.io/client-go/tools/record"
featuregatetesting "k8s.io/component-base/featuregate/testing"
"k8s.io/kubernetes/pkg/apis/scheduling"
pkgfeatures "k8s.io/kubernetes/pkg/features"
"k8s.io/kubernetes/pkg/kubelet/nodeshutdown/systemd"
testingclock "k8s.io/utils/clock/testing"
)
type fakeDbus struct {
@ -235,7 +235,7 @@ func TestManager(t *testing.T) {
nodeRef := &v1.ObjectReference{Kind: "Node", Name: "test", UID: types.UID("test"), Namespace: ""}
manager, _ := NewManager(fakeRecorder, nodeRef, activePodsFunc, killPodsFunc, func() {}, tc.shutdownGracePeriodRequested, tc.shutdownGracePeriodCriticalPods)
manager.clock = clock.NewFakeClock(time.Now())
manager.clock = testingclock.NewFakeClock(time.Now())
err := manager.Start()
if tc.expectedError != nil {
@ -312,7 +312,7 @@ func TestFeatureEnabled(t *testing.T) {
nodeRef := &v1.ObjectReference{Kind: "Node", Name: "test", UID: types.UID("test"), Namespace: ""}
manager, _ := NewManager(fakeRecorder, nodeRef, activePodsFunc, killPodsFunc, func() {}, tc.shutdownGracePeriodRequested, 0 /*shutdownGracePeriodCriticalPods*/)
manager.clock = clock.NewFakeClock(time.Now())
manager.clock = testingclock.NewFakeClock(time.Now())
assert.Equal(t, tc.expectEnabled, manager.isFeatureEnabled())
})

View File

@ -22,13 +22,13 @@ import (
"time"
"k8s.io/apimachinery/pkg/types"
"k8s.io/apimachinery/pkg/util/clock"
"k8s.io/apimachinery/pkg/util/sets"
"k8s.io/apimachinery/pkg/util/wait"
runtimeapi "k8s.io/cri-api/pkg/apis/runtime/v1alpha2"
"k8s.io/klog/v2"
kubecontainer "k8s.io/kubernetes/pkg/kubelet/container"
"k8s.io/kubernetes/pkg/kubelet/metrics"
"k8s.io/utils/clock"
)
// GenericPLEG is an extremely simple generic PLEG that relies solely on

View File

@ -28,12 +28,13 @@ import (
"github.com/stretchr/testify/assert"
"k8s.io/apimachinery/pkg/types"
"k8s.io/apimachinery/pkg/util/clock"
"k8s.io/apimachinery/pkg/util/diff"
"k8s.io/component-base/metrics/testutil"
kubecontainer "k8s.io/kubernetes/pkg/kubelet/container"
containertest "k8s.io/kubernetes/pkg/kubelet/container/testing"
"k8s.io/kubernetes/pkg/kubelet/metrics"
"k8s.io/utils/clock"
testingclock "k8s.io/utils/clock/testing"
)
const (
@ -45,7 +46,7 @@ const (
type TestGenericPLEG struct {
pleg *GenericPLEG
runtime *containertest.FakeRuntime
clock *clock.FakeClock
clock *testingclock.FakeClock
}
func newTestGenericPLEG() *TestGenericPLEG {
@ -54,7 +55,7 @@ func newTestGenericPLEG() *TestGenericPLEG {
func newTestGenericPLEGWithChannelSize(eventChannelCap int) *TestGenericPLEG {
fakeRuntime := &containertest.FakeRuntime{}
clock := clock.NewFakeClock(time.Time{})
clock := testingclock.NewFakeClock(time.Time{})
// The channel capacity should be large enough to hold all events in a
// single test.
pleg := &GenericPLEG{

View File

@ -27,13 +27,13 @@ import (
v1 "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/types"
"k8s.io/apimachinery/pkg/util/clock"
"k8s.io/apimachinery/pkg/util/sets"
"k8s.io/client-go/tools/record"
kubecontainer "k8s.io/kubernetes/pkg/kubelet/container"
containertest "k8s.io/kubernetes/pkg/kubelet/container/testing"
kubetypes "k8s.io/kubernetes/pkg/kubelet/types"
"k8s.io/kubernetes/pkg/kubelet/util/queue"
"k8s.io/utils/clock"
)
// fakePodWorkers runs sync pod function in serial, so we can have

View File

@ -22,7 +22,6 @@ import (
v1 "k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/types"
"k8s.io/apimachinery/pkg/util/clock"
"k8s.io/apimachinery/pkg/util/sets"
"k8s.io/client-go/tools/record"
"k8s.io/component-base/metrics"
@ -30,6 +29,7 @@ import (
kubecontainer "k8s.io/kubernetes/pkg/kubelet/container"
"k8s.io/kubernetes/pkg/kubelet/prober/results"
"k8s.io/kubernetes/pkg/kubelet/status"
"k8s.io/utils/clock"
)
// ProberResults stores the cumulative number of a probe by result as prometheus metrics.

View File

@ -28,7 +28,6 @@ import (
v1 "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/types"
"k8s.io/apimachinery/pkg/util/clock"
"k8s.io/client-go/kubernetes/fake"
"k8s.io/client-go/tools/record"
utiltesting "k8s.io/client-go/util/testing"
@ -48,6 +47,7 @@ import (
"k8s.io/kubernetes/pkg/volume"
volumetest "k8s.io/kubernetes/pkg/volume/testing"
"k8s.io/kubernetes/pkg/volume/util/hostutil"
"k8s.io/utils/clock"
)
func TestRunOnce(t *testing.T) {

View File

@ -29,9 +29,9 @@ import (
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/runtime"
"k8s.io/apimachinery/pkg/util/clock"
"k8s.io/apimachinery/pkg/util/sets"
"k8s.io/apimachinery/pkg/watch"
"k8s.io/utils/clock"
)
// Manager manages Kubernetes secrets. This includes retrieving

View File

@ -27,11 +27,11 @@ import (
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/runtime"
"k8s.io/apimachinery/pkg/util/clock"
clientset "k8s.io/client-go/kubernetes"
"k8s.io/client-go/kubernetes/fake"
"k8s.io/kubernetes/pkg/kubelet/util/manager"
"k8s.io/utils/clock"
)
func checkObject(t *testing.T, store manager.Store, ns, name string, shouldExist bool) {

View File

@ -30,10 +30,10 @@ import (
apierrors "k8s.io/apimachinery/pkg/api/errors"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/types"
"k8s.io/apimachinery/pkg/util/clock"
"k8s.io/apimachinery/pkg/util/wait"
clientset "k8s.io/client-go/kubernetes"
"k8s.io/klog/v2"
"k8s.io/utils/clock"
)
const (

View File

@ -24,12 +24,12 @@ import (
authenticationv1 "k8s.io/api/authentication/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/types"
"k8s.io/apimachinery/pkg/util/clock"
testingclock "k8s.io/utils/clock/testing"
)
func TestTokenCachingAndExpiration(t *testing.T) {
type suite struct {
clock *clock.FakeClock
clock *testingclock.FakeClock
tg *fakeTokenGetter
mgr *Manager
}
@ -87,7 +87,7 @@ func TestTokenCachingAndExpiration(t *testing.T) {
for _, c := range cases {
t.Run(c.name, func(t *testing.T) {
clock := clock.NewFakeClock(time.Time{}.Add(30 * 24 * time.Hour))
clock := testingclock.NewFakeClock(time.Time{}.Add(30 * 24 * time.Hour))
expSecs := int64(c.exp.Seconds())
s := &suite{
clock: clock,
@ -165,7 +165,7 @@ func TestRequiresRefresh(t *testing.T) {
for i, c := range cases {
t.Run(fmt.Sprint(i), func(t *testing.T) {
clock := clock.NewFakeClock(c.now)
clock := testingclock.NewFakeClock(c.now)
secs := int64(c.exp.Sub(start).Seconds())
tr := &authenticationv1.TokenRequest{
Spec: authenticationv1.TokenRequestSpec{
@ -335,7 +335,7 @@ func TestDeleteServiceAccountToken(t *testing.T) {
},
}
testMgr := NewManager(nil)
testMgr.clock = clock.NewFakeClock(time.Time{}.Add(30 * 24 * time.Hour))
testMgr.clock = testingclock.NewFakeClock(time.Time{}.Add(30 * 24 * time.Hour))
successGetToken := func(_, _ string, tr *authenticationv1.TokenRequest) (*authenticationv1.TokenRequest, error) {
tr.Status = authenticationv1.TokenRequestStatus{
@ -404,7 +404,7 @@ func TestCleanup(t *testing.T) {
}
for _, c := range cases {
t.Run(c.name, func(t *testing.T) {
clock := clock.NewFakeClock(time.Time{}.Add(24 * time.Hour))
clock := testingclock.NewFakeClock(time.Time{}.Add(24 * time.Hour))
mgr := NewManager(nil)
mgr.clock = clock
@ -568,7 +568,7 @@ func TestKeyFunc(t *testing.T) {
for _, c := range cases {
t.Run(c.name, func(t *testing.T) {
mgr := NewManager(nil)
mgr.clock = clock.NewFakeClock(time.Time{}.Add(30 * 24 * time.Hour))
mgr.clock = testingclock.NewFakeClock(time.Time{}.Add(30 * 24 * time.Hour))
for _, tru := range c.trus {
mgr.set(getKeyFunc(tru), &authenticationv1.TokenRequest{
Status: authenticationv1.TokenRequestStatus{

View File

@ -29,8 +29,8 @@ import (
apierrors "k8s.io/apimachinery/pkg/api/errors"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/runtime"
"k8s.io/apimachinery/pkg/util/clock"
"k8s.io/apimachinery/pkg/util/sets"
"k8s.io/utils/clock"
)
// GetObjectTTLFunc defines a function to get value of TTL.

View File

@ -30,13 +30,14 @@ import (
apierrors "k8s.io/apimachinery/pkg/api/errors"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/runtime"
"k8s.io/apimachinery/pkg/util/clock"
"k8s.io/apimachinery/pkg/util/sets"
clientset "k8s.io/client-go/kubernetes"
"k8s.io/client-go/kubernetes/fake"
core "k8s.io/client-go/testing"
podutil "k8s.io/kubernetes/pkg/api/v1/pod"
"k8s.io/utils/clock"
testingclock "k8s.io/utils/clock/testing"
"github.com/stretchr/testify/assert"
)
@ -149,7 +150,7 @@ func TestSecretStoreDeletingSecret(t *testing.T) {
func TestSecretStoreGetAlwaysRefresh(t *testing.T) {
fakeClient := &fake.Clientset{}
fakeClock := clock.NewFakeClock(time.Now())
fakeClock := testingclock.NewFakeClock(time.Now())
store := newSecretStore(fakeClient, fakeClock, noObjectTTL, 0)
for i := 0; i < 10; i++ {
@ -176,7 +177,7 @@ func TestSecretStoreGetAlwaysRefresh(t *testing.T) {
func TestSecretStoreGetNeverRefresh(t *testing.T) {
fakeClient := &fake.Clientset{}
fakeClock := clock.NewFakeClock(time.Now())
fakeClock := testingclock.NewFakeClock(time.Now())
store := newSecretStore(fakeClient, fakeClock, noObjectTTL, time.Minute)
for i := 0; i < 10; i++ {
@ -206,7 +207,7 @@ func TestCustomTTL(t *testing.T) {
}
fakeClient := &fake.Clientset{}
fakeClock := clock.NewFakeClock(time.Time{})
fakeClock := testingclock.NewFakeClock(time.Time{})
store := newSecretStore(fakeClient, fakeClock, customTTL, time.Minute)
store.AddReference("ns", "name")
@ -377,7 +378,7 @@ func podWithSecrets(ns, podName string, toAttach secretsToAttach) *v1.Pod {
func TestCacheInvalidation(t *testing.T) {
fakeClient := &fake.Clientset{}
fakeClock := clock.NewFakeClock(time.Now())
fakeClock := testingclock.NewFakeClock(time.Now())
store := newSecretStore(fakeClient, fakeClock, noObjectTTL, time.Minute)
manager := newCacheBasedSecretManager(store)
@ -432,7 +433,7 @@ func TestCacheInvalidation(t *testing.T) {
func TestRegisterIdempotence(t *testing.T) {
fakeClient := &fake.Clientset{}
fakeClock := clock.NewFakeClock(time.Now())
fakeClock := testingclock.NewFakeClock(time.Now())
store := newSecretStore(fakeClient, fakeClock, noObjectTTL, time.Minute)
manager := newCacheBasedSecretManager(store)
@ -467,7 +468,7 @@ func TestRegisterIdempotence(t *testing.T) {
func TestCacheRefcounts(t *testing.T) {
fakeClient := &fake.Clientset{}
fakeClock := clock.NewFakeClock(time.Now())
fakeClock := testingclock.NewFakeClock(time.Now())
store := newSecretStore(fakeClient, fakeClock, noObjectTTL, time.Minute)
manager := newCacheBasedSecretManager(store)

View File

@ -31,10 +31,10 @@ import (
"k8s.io/apimachinery/pkg/fields"
"k8s.io/apimachinery/pkg/runtime"
"k8s.io/apimachinery/pkg/runtime/schema"
"k8s.io/apimachinery/pkg/util/clock"
"k8s.io/apimachinery/pkg/util/sets"
"k8s.io/apimachinery/pkg/util/wait"
"k8s.io/apimachinery/pkg/watch"
"k8s.io/utils/clock"
)
type listObjectFunc func(string, metav1.ListOptions) (runtime.Object, error)

View File

@ -28,7 +28,6 @@ import (
apierrors "k8s.io/apimachinery/pkg/api/errors"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/runtime"
"k8s.io/apimachinery/pkg/util/clock"
"k8s.io/apimachinery/pkg/util/wait"
"k8s.io/apimachinery/pkg/watch"
@ -38,6 +37,9 @@ import (
corev1 "k8s.io/kubernetes/pkg/apis/core/v1"
"k8s.io/utils/clock"
testingclock "k8s.io/utils/clock/testing"
"github.com/stretchr/testify/assert"
)
@ -88,7 +90,7 @@ func TestSecretCache(t *testing.T) {
fakeWatch := watch.NewFake()
fakeClient.AddWatchReactor("secrets", core.DefaultWatchReactor(fakeWatch, nil))
fakeClock := clock.NewFakeClock(time.Now())
fakeClock := testingclock.NewFakeClock(time.Now())
store := newSecretCache(fakeClient, fakeClock, time.Minute)
store.AddReference("ns", "name")
@ -158,7 +160,7 @@ func TestSecretCacheMultipleRegistrations(t *testing.T) {
fakeWatch := watch.NewFake()
fakeClient.AddWatchReactor("secrets", core.DefaultWatchReactor(fakeWatch, nil))
fakeClock := clock.NewFakeClock(time.Now())
fakeClock := testingclock.NewFakeClock(time.Now())
store := newSecretCache(fakeClient, fakeClock, time.Minute)
store.AddReference("ns", "name")
@ -264,7 +266,7 @@ func TestImmutableSecretStopsTheReflector(t *testing.T) {
fakeWatch := watch.NewFake()
fakeClient.AddWatchReactor("secrets", core.DefaultWatchReactor(fakeWatch, nil))
fakeClock := clock.NewFakeClock(time.Now())
fakeClock := testingclock.NewFakeClock(time.Now())
store := newSecretCache(fakeClient, fakeClock, time.Minute)
key := objectKey{namespace: "ns", name: "name"}
@ -351,7 +353,7 @@ func TestMaxIdleTimeStopsTheReflector(t *testing.T) {
fakeClient.AddReactor("list", "secrets", listReactor)
fakeWatch := watch.NewFake()
fakeClient.AddWatchReactor("secrets", core.DefaultWatchReactor(fakeWatch, nil))
fakeClock := clock.NewFakeClock(time.Now())
fakeClock := testingclock.NewFakeClock(time.Now())
store := newSecretCache(fakeClient, fakeClock, time.Minute)
key := objectKey{namespace: "ns", name: "name"}
@ -415,7 +417,7 @@ func TestReflectorNotStopedOnSlowInitialization(t *testing.T) {
},
}
fakeClock := clock.NewFakeClock(time.Now())
fakeClock := testingclock.NewFakeClock(time.Now())
fakeClient := &fake.Clientset{}
listReactor := func(a core.Action) (bool, runtime.Object, error) {

View File

@ -21,7 +21,7 @@ import (
"time"
"k8s.io/apimachinery/pkg/types"
"k8s.io/apimachinery/pkg/util/clock"
"k8s.io/utils/clock"
)
// WorkQueue allows queuing items with a timestamp. An item is

View File

@ -21,12 +21,12 @@ import (
"time"
"k8s.io/apimachinery/pkg/types"
"k8s.io/apimachinery/pkg/util/clock"
"k8s.io/apimachinery/pkg/util/sets"
testingclock "k8s.io/utils/clock/testing"
)
func newTestBasicWorkQueue() (*basicWorkQueue, *clock.FakeClock) {
fakeClock := clock.NewFakeClock(time.Now())
func newTestBasicWorkQueue() (*basicWorkQueue, *testingclock.FakeClock) {
fakeClock := testingclock.NewFakeClock(time.Now())
wq := &basicWorkQueue{
clock: fakeClock,
queue: make(map[types.UID]time.Time),

View File

@ -27,13 +27,13 @@ import (
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/runtime"
"k8s.io/apimachinery/pkg/runtime/schema"
"k8s.io/apimachinery/pkg/util/clock"
"k8s.io/apimachinery/pkg/util/wait"
"k8s.io/apimachinery/pkg/watch"
"k8s.io/client-go/kubernetes"
kubeapiservertesting "k8s.io/kubernetes/cmd/kube-apiserver/app/testing"
"k8s.io/kubernetes/pkg/kubelet/util/manager"
"k8s.io/kubernetes/test/integration/framework"
testingclock "k8s.io/utils/clock/testing"
)
func TestWatchBasedManager(t *testing.T) {
@ -62,7 +62,7 @@ func TestWatchBasedManager(t *testing.T) {
// We want all watches to be up and running to stress test it.
// So don't treat any secret as immutable here.
isImmutable := func(_ runtime.Object) bool { return false }
fakeClock := clock.NewFakeClock(time.Now())
fakeClock := testingclock.NewFakeClock(time.Now())
store := manager.NewObjectCache(listObj, watchObj, newObj, isImmutable, schema.GroupResource{Group: "v1", Resource: "secrets"}, fakeClock, time.Minute)
// create 1000 secrets in parallel