Merge pull request #104874 from wojtek-t/migrate_clock_1

Unify towards k8s.io/utils/clock - part 1
This commit is contained in:
Kubernetes Prow Robot 2021-09-13 19:09:20 -07:00 committed by GitHub
commit 047a6b9f86
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
58 changed files with 154 additions and 145 deletions

View File

@ -30,7 +30,6 @@ import (
"github.com/google/go-cmp/cmp" "github.com/google/go-cmp/cmp"
capi "k8s.io/api/certificates/v1" capi "k8s.io/api/certificates/v1"
"k8s.io/apimachinery/pkg/util/clock"
"k8s.io/apimachinery/pkg/util/diff" "k8s.io/apimachinery/pkg/util/diff"
utilfeature "k8s.io/apiserver/pkg/util/feature" utilfeature "k8s.io/apiserver/pkg/util/feature"
"k8s.io/client-go/kubernetes/fake" "k8s.io/client-go/kubernetes/fake"
@ -41,10 +40,11 @@ import (
capihelper "k8s.io/kubernetes/pkg/apis/certificates/v1" capihelper "k8s.io/kubernetes/pkg/apis/certificates/v1"
"k8s.io/kubernetes/pkg/controller/certificates" "k8s.io/kubernetes/pkg/controller/certificates"
"k8s.io/kubernetes/pkg/features" "k8s.io/kubernetes/pkg/features"
testingclock "k8s.io/utils/clock/testing"
) )
func TestSigner(t *testing.T) { func TestSigner(t *testing.T) {
fakeClock := clock.FakeClock{} fakeClock := testingclock.FakeClock{}
s, err := newSigner("kubernetes.io/legacy-unknown", "./testdata/ca.crt", "./testdata/ca.key", nil, 1*time.Hour) s, err := newSigner("kubernetes.io/legacy-unknown", "./testdata/ca.crt", "./testdata/ca.key", nil, 1*time.Hour)
if err != nil { if err != nil {

View File

@ -35,7 +35,6 @@ import (
"k8s.io/apimachinery/pkg/labels" "k8s.io/apimachinery/pkg/labels"
"k8s.io/apimachinery/pkg/runtime" "k8s.io/apimachinery/pkg/runtime"
"k8s.io/apimachinery/pkg/types" "k8s.io/apimachinery/pkg/types"
"k8s.io/apimachinery/pkg/util/clock"
"k8s.io/apimachinery/pkg/util/rand" "k8s.io/apimachinery/pkg/util/rand"
"k8s.io/apimachinery/pkg/util/sets" "k8s.io/apimachinery/pkg/util/sets"
"k8s.io/apimachinery/pkg/util/strategicpatch" "k8s.io/apimachinery/pkg/util/strategicpatch"
@ -52,6 +51,7 @@ import (
"k8s.io/kubernetes/pkg/features" "k8s.io/kubernetes/pkg/features"
hashutil "k8s.io/kubernetes/pkg/util/hash" hashutil "k8s.io/kubernetes/pkg/util/hash"
taintutils "k8s.io/kubernetes/pkg/util/taints" taintutils "k8s.io/kubernetes/pkg/util/taints"
"k8s.io/utils/clock"
"k8s.io/utils/integer" "k8s.io/utils/integer"
"k8s.io/klog/v2" "k8s.io/klog/v2"

View File

@ -31,7 +31,6 @@ import (
"k8s.io/apimachinery/pkg/api/resource" "k8s.io/apimachinery/pkg/api/resource"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/runtime" "k8s.io/apimachinery/pkg/runtime"
"k8s.io/apimachinery/pkg/util/clock"
"k8s.io/apimachinery/pkg/util/intstr" "k8s.io/apimachinery/pkg/util/intstr"
"k8s.io/apimachinery/pkg/util/sets" "k8s.io/apimachinery/pkg/util/sets"
"k8s.io/apimachinery/pkg/util/uuid" "k8s.io/apimachinery/pkg/util/uuid"
@ -55,6 +54,7 @@ import (
"k8s.io/kubernetes/pkg/features" "k8s.io/kubernetes/pkg/features"
"k8s.io/kubernetes/pkg/securitycontext" "k8s.io/kubernetes/pkg/securitycontext"
labelsutil "k8s.io/kubernetes/pkg/util/labels" labelsutil "k8s.io/kubernetes/pkg/util/labels"
testingclock "k8s.io/utils/clock/testing"
) )
var ( var (
@ -309,7 +309,7 @@ func newTestController(initialObjects ...runtime.Object) (*daemonSetsController,
informerFactory.Core().V1().Pods(), informerFactory.Core().V1().Pods(),
informerFactory.Core().V1().Nodes(), informerFactory.Core().V1().Nodes(),
clientset, clientset,
flowcontrol.NewFakeBackOff(50*time.Millisecond, 500*time.Millisecond, clock.NewFakeClock(time.Now())), flowcontrol.NewFakeBackOff(50*time.Millisecond, 500*time.Millisecond, testingclock.NewFakeClock(time.Now())),
) )
if err != nil { if err != nil {
return nil, nil, nil, err return nil, nil, nil, err
@ -473,7 +473,7 @@ func TestExpectationsOnRecreate(t *testing.T) {
f.Core().V1().Pods(), f.Core().V1().Pods(),
f.Core().V1().Nodes(), f.Core().V1().Nodes(),
client, client,
flowcontrol.NewFakeBackOff(50*time.Millisecond, 500*time.Millisecond, clock.NewFakeClock(time.Now())), flowcontrol.NewFakeBackOff(50*time.Millisecond, 500*time.Millisecond, testingclock.NewFakeClock(time.Now())),
) )
if err != nil { if err != nil {
t.Fatal(err) t.Fatal(err)
@ -3411,7 +3411,7 @@ func TestSurgePreservesOldReadyWithUnsatisfiedMinReady(t *testing.T) {
addNodes(manager.nodeStore, 0, 5, nil) addNodes(manager.nodeStore, 0, 5, nil)
// the clock will be set 10s after the newest pod on node-1 went ready, which is not long enough to be available // the clock will be set 10s after the newest pod on node-1 went ready, which is not long enough to be available
manager.DaemonSetsController.failedPodsBackoff.Clock = clock.NewFakeClock(time.Unix(50+10, 0)) manager.DaemonSetsController.failedPodsBackoff.Clock = testingclock.NewFakeClock(time.Unix(50+10, 0))
// will be preserved because it has the newest hash // will be preserved because it has the newest hash
pod := newPod("node-1-", "node-1", simpleDaemonSetLabel, ds) pod := newPod("node-1-", "node-1", simpleDaemonSetLabel, ds)
@ -3456,7 +3456,7 @@ func TestSurgeDeletesOldReadyWithUnsatisfiedMinReady(t *testing.T) {
addNodes(manager.nodeStore, 0, 5, nil) addNodes(manager.nodeStore, 0, 5, nil)
// the clock will be set 20s after the newest pod on node-1 went ready, which is not long enough to be available // the clock will be set 20s after the newest pod on node-1 went ready, which is not long enough to be available
manager.DaemonSetsController.failedPodsBackoff.Clock = clock.NewFakeClock(time.Unix(50+20, 0)) manager.DaemonSetsController.failedPodsBackoff.Clock = testingclock.NewFakeClock(time.Unix(50+20, 0))
// will be preserved because it has the newest hash // will be preserved because it has the newest hash
pod := newPod("node-1-", "node-1", simpleDaemonSetLabel, ds) pod := newPod("node-1-", "node-1", simpleDaemonSetLabel, ds)

View File

@ -24,7 +24,6 @@ import (
v1 "k8s.io/api/core/v1" v1 "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/labels" "k8s.io/apimachinery/pkg/labels"
"k8s.io/apimachinery/pkg/util/clock"
"k8s.io/apimachinery/pkg/util/intstr" "k8s.io/apimachinery/pkg/util/intstr"
utilfeature "k8s.io/apiserver/pkg/util/feature" utilfeature "k8s.io/apiserver/pkg/util/feature"
featuregatetesting "k8s.io/component-base/featuregate/testing" featuregatetesting "k8s.io/component-base/featuregate/testing"
@ -32,6 +31,7 @@ import (
podutil "k8s.io/kubernetes/pkg/api/v1/pod" podutil "k8s.io/kubernetes/pkg/api/v1/pod"
"k8s.io/kubernetes/pkg/controller/daemon/util" "k8s.io/kubernetes/pkg/controller/daemon/util"
"k8s.io/kubernetes/pkg/features" "k8s.io/kubernetes/pkg/features"
testingclock "k8s.io/utils/clock/testing"
) )
func TestDaemonSetUpdatesPods(t *testing.T) { func TestDaemonSetUpdatesPods(t *testing.T) {
@ -205,12 +205,12 @@ func TestDaemonSetUpdatesAllOldPodsNotReadyMaxSurge(t *testing.T) {
manager.dsStore.Update(ds) manager.dsStore.Update(ds)
// all old pods are unavailable so should be surged // all old pods are unavailable so should be surged
manager.failedPodsBackoff.Clock = clock.NewFakeClock(time.Unix(100, 0)) manager.failedPodsBackoff.Clock = testingclock.NewFakeClock(time.Unix(100, 0))
clearExpectations(t, manager, ds, podControl) clearExpectations(t, manager, ds, podControl)
expectSyncDaemonSets(t, manager, ds, podControl, 5, 0, 0) expectSyncDaemonSets(t, manager, ds, podControl, 5, 0, 0)
// waiting for pods to go ready, old pods are deleted // waiting for pods to go ready, old pods are deleted
manager.failedPodsBackoff.Clock = clock.NewFakeClock(time.Unix(200, 0)) manager.failedPodsBackoff.Clock = testingclock.NewFakeClock(time.Unix(200, 0))
clearExpectations(t, manager, ds, podControl) clearExpectations(t, manager, ds, podControl)
expectSyncDaemonSets(t, manager, ds, podControl, 0, 5, 0) expectSyncDaemonSets(t, manager, ds, podControl, 0, 5, 0)
@ -219,7 +219,7 @@ func TestDaemonSetUpdatesAllOldPodsNotReadyMaxSurge(t *testing.T) {
ds.Spec.Template.Spec.Containers[0].Image = "foo3/bar3" ds.Spec.Template.Spec.Containers[0].Image = "foo3/bar3"
manager.dsStore.Update(ds) manager.dsStore.Update(ds)
manager.failedPodsBackoff.Clock = clock.NewFakeClock(time.Unix(300, 0)) manager.failedPodsBackoff.Clock = testingclock.NewFakeClock(time.Unix(300, 0))
clearExpectations(t, manager, ds, podControl) clearExpectations(t, manager, ds, podControl)
expectSyncDaemonSets(t, manager, ds, podControl, 3, 0, 0) expectSyncDaemonSets(t, manager, ds, podControl, 3, 0, 0)
@ -243,12 +243,12 @@ func TestDaemonSetUpdatesAllOldPodsNotReadyMaxSurge(t *testing.T) {
// the new pods should still be considered waiting to hit min readiness, so one pod should be created to replace // the new pods should still be considered waiting to hit min readiness, so one pod should be created to replace
// the deleted old pod // the deleted old pod
manager.failedPodsBackoff.Clock = clock.NewFakeClock(time.Unix(310, 0)) manager.failedPodsBackoff.Clock = testingclock.NewFakeClock(time.Unix(310, 0))
clearExpectations(t, manager, ds, podControl) clearExpectations(t, manager, ds, podControl)
expectSyncDaemonSets(t, manager, ds, podControl, 1, 0, 0) expectSyncDaemonSets(t, manager, ds, podControl, 1, 0, 0)
// the new pods are now considered available, so delete the old pods // the new pods are now considered available, so delete the old pods
manager.failedPodsBackoff.Clock = clock.NewFakeClock(time.Unix(320, 0)) manager.failedPodsBackoff.Clock = testingclock.NewFakeClock(time.Unix(320, 0))
clearExpectations(t, manager, ds, podControl) clearExpectations(t, manager, ds, podControl)
expectSyncDaemonSets(t, manager, ds, podControl, 1, 3, 0) expectSyncDaemonSets(t, manager, ds, podControl, 1, 3, 0)
@ -259,12 +259,12 @@ func TestDaemonSetUpdatesAllOldPodsNotReadyMaxSurge(t *testing.T) {
}) })
// the new pods are now considered available, so delete the old pods // the new pods are now considered available, so delete the old pods
manager.failedPodsBackoff.Clock = clock.NewFakeClock(time.Unix(340, 0)) manager.failedPodsBackoff.Clock = testingclock.NewFakeClock(time.Unix(340, 0))
clearExpectations(t, manager, ds, podControl) clearExpectations(t, manager, ds, podControl)
expectSyncDaemonSets(t, manager, ds, podControl, 0, 2, 0) expectSyncDaemonSets(t, manager, ds, podControl, 0, 2, 0)
// controller has completed upgrade // controller has completed upgrade
manager.failedPodsBackoff.Clock = clock.NewFakeClock(time.Unix(350, 0)) manager.failedPodsBackoff.Clock = testingclock.NewFakeClock(time.Unix(350, 0))
clearExpectations(t, manager, ds, podControl) clearExpectations(t, manager, ds, podControl)
expectSyncDaemonSets(t, manager, ds, podControl, 0, 0, 0) expectSyncDaemonSets(t, manager, ds, podControl, 0, 0, 0)
} }

View File

@ -21,9 +21,9 @@ import (
"time" "time"
"k8s.io/apimachinery/pkg/types" "k8s.io/apimachinery/pkg/types"
"k8s.io/apimachinery/pkg/util/clock"
"k8s.io/klog/v2" "k8s.io/klog/v2"
// TODO: Switch to k8s.io/utils/clock once it supports AfterFunc()
"k8s.io/apimachinery/pkg/util/clock"
) )
// WorkArgs keeps arguments that will be passed to the function executed by the worker. // WorkArgs keeps arguments that will be passed to the function executed by the worker.

View File

@ -25,7 +25,6 @@ import (
v1 "k8s.io/api/core/v1" v1 "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/labels" "k8s.io/apimachinery/pkg/labels"
"k8s.io/apimachinery/pkg/util/clock"
"k8s.io/apimachinery/pkg/util/sets" "k8s.io/apimachinery/pkg/util/sets"
"k8s.io/apimachinery/pkg/util/wait" "k8s.io/apimachinery/pkg/util/wait"
"k8s.io/client-go/informers" "k8s.io/client-go/informers"
@ -35,6 +34,7 @@ import (
"k8s.io/client-go/util/workqueue" "k8s.io/client-go/util/workqueue"
"k8s.io/kubernetes/pkg/controller" "k8s.io/kubernetes/pkg/controller"
"k8s.io/kubernetes/pkg/controller/testutil" "k8s.io/kubernetes/pkg/controller/testutil"
testingclock "k8s.io/utils/clock/testing"
) )
func alwaysReady() bool { return true } func alwaysReady() bool { return true }
@ -322,7 +322,7 @@ func TestGCOrphaned(t *testing.T) {
podInformer.Informer().GetStore().Add(pod) podInformer.Informer().GetStore().Add(pod)
} }
// Overwrite queue // Overwrite queue
fakeClock := clock.NewFakeClock(time.Now()) fakeClock := testingclock.NewFakeClock(time.Now())
gcc.nodeQueue.ShutDown() gcc.nodeQueue.ShutDown()
gcc.nodeQueue = workqueue.NewDelayingQueueWithCustomClock(fakeClock, "podgc_test_queue") gcc.nodeQueue = workqueue.NewDelayingQueueWithCustomClock(fakeClock, "podgc_test_queue")

View File

@ -26,7 +26,6 @@ import (
"k8s.io/api/core/v1" "k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/api/meta" "k8s.io/apimachinery/pkg/api/meta"
"k8s.io/apimachinery/pkg/runtime/schema" "k8s.io/apimachinery/pkg/runtime/schema"
"k8s.io/apimachinery/pkg/util/clock"
utilerrors "k8s.io/apimachinery/pkg/util/errors" utilerrors "k8s.io/apimachinery/pkg/util/errors"
utilruntime "k8s.io/apimachinery/pkg/util/runtime" utilruntime "k8s.io/apimachinery/pkg/util/runtime"
"k8s.io/apimachinery/pkg/util/wait" "k8s.io/apimachinery/pkg/util/wait"
@ -37,6 +36,7 @@ import (
"k8s.io/controller-manager/pkg/informerfactory" "k8s.io/controller-manager/pkg/informerfactory"
"k8s.io/kubernetes/pkg/controller" "k8s.io/kubernetes/pkg/controller"
"k8s.io/kubernetes/pkg/quota/v1/evaluator/core" "k8s.io/kubernetes/pkg/quota/v1/evaluator/core"
"k8s.io/utils/clock"
) )
type eventType int type eventType int

View File

@ -32,7 +32,6 @@ import (
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/runtime" "k8s.io/apimachinery/pkg/runtime"
"k8s.io/apimachinery/pkg/types" "k8s.io/apimachinery/pkg/types"
"k8s.io/apimachinery/pkg/util/clock"
"k8s.io/apimachinery/pkg/util/sets" "k8s.io/apimachinery/pkg/util/sets"
"k8s.io/apimachinery/pkg/util/strategicpatch" "k8s.io/apimachinery/pkg/util/strategicpatch"
"k8s.io/apimachinery/pkg/watch" "k8s.io/apimachinery/pkg/watch"
@ -45,6 +44,8 @@ import (
"k8s.io/klog/v2" "k8s.io/klog/v2"
"k8s.io/kubernetes/pkg/api/legacyscheme" "k8s.io/kubernetes/pkg/api/legacyscheme"
api "k8s.io/kubernetes/pkg/apis/core" api "k8s.io/kubernetes/pkg/apis/core"
"k8s.io/utils/clock"
testingclock "k8s.io/utils/clock/testing"
jsonpatch "github.com/evanphx/json-patch" jsonpatch "github.com/evanphx/json-patch"
) )
@ -455,7 +456,7 @@ func NewFakeRecorder() *FakeRecorder {
return &FakeRecorder{ return &FakeRecorder{
source: v1.EventSource{Component: "nodeControllerTest"}, source: v1.EventSource{Component: "nodeControllerTest"},
Events: []*v1.Event{}, Events: []*v1.Event{},
clock: clock.NewFakeClock(time.Now()), clock: testingclock.NewFakeClock(time.Now()),
} }
} }

View File

@ -27,7 +27,6 @@ import (
"k8s.io/api/core/v1" "k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/api/errors" "k8s.io/apimachinery/pkg/api/errors"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/util/clock"
utilruntime "k8s.io/apimachinery/pkg/util/runtime" utilruntime "k8s.io/apimachinery/pkg/util/runtime"
"k8s.io/apimachinery/pkg/util/wait" "k8s.io/apimachinery/pkg/util/wait"
batchinformers "k8s.io/client-go/informers/batch/v1" batchinformers "k8s.io/client-go/informers/batch/v1"
@ -42,6 +41,7 @@ import (
"k8s.io/kubernetes/pkg/controller" "k8s.io/kubernetes/pkg/controller"
jobutil "k8s.io/kubernetes/pkg/controller/job" jobutil "k8s.io/kubernetes/pkg/controller/job"
"k8s.io/kubernetes/pkg/controller/ttlafterfinished/metrics" "k8s.io/kubernetes/pkg/controller/ttlafterfinished/metrics"
"k8s.io/utils/clock"
) )
// Controller watches for changes of Jobs API objects. Triggered by Job creation // Controller watches for changes of Jobs API objects. Triggered by Job creation

View File

@ -55,7 +55,6 @@ import (
storageapiv1alpha1 "k8s.io/api/storage/v1alpha1" storageapiv1alpha1 "k8s.io/api/storage/v1alpha1"
storageapiv1beta1 "k8s.io/api/storage/v1beta1" storageapiv1beta1 "k8s.io/api/storage/v1beta1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/util/clock"
utilnet "k8s.io/apimachinery/pkg/util/net" utilnet "k8s.io/apimachinery/pkg/util/net"
"k8s.io/apimachinery/pkg/util/runtime" "k8s.io/apimachinery/pkg/util/runtime"
"k8s.io/apimachinery/pkg/util/wait" "k8s.io/apimachinery/pkg/util/wait"
@ -84,6 +83,7 @@ import (
"k8s.io/kubernetes/pkg/routes" "k8s.io/kubernetes/pkg/routes"
"k8s.io/kubernetes/pkg/serviceaccount" "k8s.io/kubernetes/pkg/serviceaccount"
nodeutil "k8s.io/kubernetes/pkg/util/node" nodeutil "k8s.io/kubernetes/pkg/util/node"
"k8s.io/utils/clock"
// RESTStorage installers // RESTStorage installers
admissionregistrationrest "k8s.io/kubernetes/pkg/registry/admissionregistration/rest" admissionregistrationrest "k8s.io/kubernetes/pkg/registry/admissionregistration/rest"

View File

@ -21,10 +21,10 @@ import (
"time" "time"
"k8s.io/api/core/v1" "k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/util/clock"
"k8s.io/client-go/tools/record" "k8s.io/client-go/tools/record"
"k8s.io/kubernetes/pkg/kubelet/lifecycle" "k8s.io/kubernetes/pkg/kubelet/lifecycle"
"k8s.io/kubernetes/pkg/kubelet/status" "k8s.io/kubernetes/pkg/kubelet/status"
"k8s.io/utils/clock"
) )
const ( const (

View File

@ -23,8 +23,8 @@ import (
"k8s.io/api/core/v1" "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/types" "k8s.io/apimachinery/pkg/types"
"k8s.io/apimachinery/pkg/util/clock"
"k8s.io/client-go/tools/record" "k8s.io/client-go/tools/record"
testingclock "k8s.io/utils/clock/testing"
) )
// mockPodStatusProvider returns the status on the specified pod // mockPodStatusProvider returns the status on the specified pod
@ -45,7 +45,7 @@ func (m *mockPodStatusProvider) GetPodStatus(uid types.UID) (v1.PodStatus, bool)
// TestActiveDeadlineHandler verifies the active deadline handler functions as expected. // TestActiveDeadlineHandler verifies the active deadline handler functions as expected.
func TestActiveDeadlineHandler(t *testing.T) { func TestActiveDeadlineHandler(t *testing.T) {
pods := newTestPods(4) pods := newTestPods(4)
fakeClock := clock.NewFakeClock(time.Now()) fakeClock := testingclock.NewFakeClock(time.Now())
podStatusProvider := &mockPodStatusProvider{pods: pods} podStatusProvider := &mockPodStatusProvider{pods: pods}
fakeRecorder := &record.FakeRecorder{} fakeRecorder := &record.FakeRecorder{}
handler, err := newActiveDeadlineHandler(podStatusProvider, fakeRecorder, fakeClock) handler, err := newActiveDeadlineHandler(podStatusProvider, fakeRecorder, fakeClock)

View File

@ -29,9 +29,9 @@ import (
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/runtime" "k8s.io/apimachinery/pkg/runtime"
"k8s.io/apimachinery/pkg/util/clock"
"k8s.io/apimachinery/pkg/util/sets" "k8s.io/apimachinery/pkg/util/sets"
"k8s.io/apimachinery/pkg/watch" "k8s.io/apimachinery/pkg/watch"
"k8s.io/utils/clock"
) )
// Manager interface provides methods for Kubelet to manage ConfigMap. // Manager interface provides methods for Kubelet to manage ConfigMap.

View File

@ -27,11 +27,11 @@ import (
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/runtime" "k8s.io/apimachinery/pkg/runtime"
"k8s.io/apimachinery/pkg/util/clock"
clientset "k8s.io/client-go/kubernetes" clientset "k8s.io/client-go/kubernetes"
"k8s.io/client-go/kubernetes/fake" "k8s.io/client-go/kubernetes/fake"
"k8s.io/kubernetes/pkg/kubelet/util/manager" "k8s.io/kubernetes/pkg/kubelet/util/manager"
"k8s.io/utils/clock"
) )
func checkObject(t *testing.T, store manager.Store, ns, name string, shouldExist bool) { func checkObject(t *testing.T, store manager.Store, ns, name string, shouldExist bool) {

View File

@ -25,7 +25,7 @@ import (
"sync" "sync"
"time" "time"
"k8s.io/apimachinery/pkg/util/clock" "k8s.io/utils/clock"
) )
var ( var (

View File

@ -26,7 +26,7 @@ import (
"github.com/stretchr/testify/assert" "github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require" "github.com/stretchr/testify/require"
"k8s.io/apimachinery/pkg/util/clock" testingclock "k8s.io/utils/clock/testing"
) )
func TestInsert(t *testing.T) { func TestInsert(t *testing.T) {
@ -200,9 +200,9 @@ func TestGC(t *testing.T) {
assertCacheSize(t, c, 1) assertCacheSize(t, c, 1)
} }
func newTestCache() (*requestCache, *clock.FakeClock) { func newTestCache() (*requestCache, *testingclock.FakeClock) {
c := newRequestCache() c := newRequestCache()
fakeClock := clock.NewFakeClock(time.Now()) fakeClock := testingclock.NewFakeClock(time.Now())
c.clock = fakeClock c.clock = fakeClock
return c, fakeClock return c, fakeClock
} }

View File

@ -30,7 +30,6 @@ import (
"github.com/golang/mock/gomock" "github.com/golang/mock/gomock"
"github.com/stretchr/testify/assert" "github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require" "github.com/stretchr/testify/require"
"k8s.io/apimachinery/pkg/util/clock"
runtimeapi "k8s.io/cri-api/pkg/apis/runtime/v1alpha2" runtimeapi "k8s.io/cri-api/pkg/apis/runtime/v1alpha2"
"k8s.io/kubernetes/pkg/kubelet/checkpointmanager" "k8s.io/kubernetes/pkg/kubelet/checkpointmanager"
containertest "k8s.io/kubernetes/pkg/kubelet/container/testing" containertest "k8s.io/kubernetes/pkg/kubelet/container/testing"
@ -38,6 +37,7 @@ import (
"k8s.io/kubernetes/pkg/kubelet/dockershim/network" "k8s.io/kubernetes/pkg/kubelet/dockershim/network"
nettest "k8s.io/kubernetes/pkg/kubelet/dockershim/network/testing" nettest "k8s.io/kubernetes/pkg/kubelet/dockershim/network/testing"
"k8s.io/kubernetes/pkg/kubelet/util/cache" "k8s.io/kubernetes/pkg/kubelet/util/cache"
testingclock "k8s.io/utils/clock/testing"
) )
type mockCheckpointManager struct { type mockCheckpointManager struct {
@ -74,8 +74,8 @@ func newMockCheckpointManager() checkpointmanager.CheckpointManager {
return &mockCheckpointManager{checkpoint: make(map[string]*PodSandboxCheckpoint)} return &mockCheckpointManager{checkpoint: make(map[string]*PodSandboxCheckpoint)}
} }
func newTestDockerService() (*dockerService, *libdocker.FakeDockerClient, *clock.FakeClock) { func newTestDockerService() (*dockerService, *libdocker.FakeDockerClient, *testingclock.FakeClock) {
fakeClock := clock.NewFakeClock(time.Time{}) fakeClock := testingclock.NewFakeClock(time.Time{})
c := libdocker.NewFakeDockerClient().WithClock(fakeClock).WithVersion("1.11.2", "1.23").WithRandSource(rand.NewSource(0)) c := libdocker.NewFakeDockerClient().WithClock(fakeClock).WithVersion("1.11.2", "1.23").WithRandSource(rand.NewSource(0))
pm := network.NewPluginManager(&network.NoopNetworkPlugin{}) pm := network.NewPluginManager(&network.NoopNetworkPlugin{})
ckm := newMockCheckpointManager() ckm := newMockCheckpointManager()
@ -88,7 +88,7 @@ func newTestDockerService() (*dockerService, *libdocker.FakeDockerClient, *clock
}, c, fakeClock }, c, fakeClock
} }
func newTestDockerServiceWithVersionCache() (*dockerService, *libdocker.FakeDockerClient, *clock.FakeClock) { func newTestDockerServiceWithVersionCache() (*dockerService, *libdocker.FakeDockerClient, *testingclock.FakeClock) {
ds, c, fakeClock := newTestDockerService() ds, c, fakeClock := newTestDockerService()
ds.versionCache = cache.NewObjectCache( ds.versionCache = cache.NewObjectCache(
func() (interface{}, error) { func() (interface{}, error) {

View File

@ -36,7 +36,7 @@ import (
dockerimagetypes "github.com/docker/docker/api/types/image" dockerimagetypes "github.com/docker/docker/api/types/image"
"k8s.io/api/core/v1" "k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/util/clock" "k8s.io/utils/clock"
) )
type CalledDetail struct { type CalledDetail struct {

View File

@ -26,7 +26,6 @@ import (
v1 "k8s.io/api/core/v1" v1 "k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/api/resource" "k8s.io/apimachinery/pkg/api/resource"
"k8s.io/apimachinery/pkg/util/clock"
utilfeature "k8s.io/apiserver/pkg/util/feature" utilfeature "k8s.io/apiserver/pkg/util/feature"
"k8s.io/client-go/tools/record" "k8s.io/client-go/tools/record"
v1helper "k8s.io/component-helpers/scheduling/corev1" v1helper "k8s.io/component-helpers/scheduling/corev1"
@ -40,6 +39,7 @@ import (
"k8s.io/kubernetes/pkg/kubelet/server/stats" "k8s.io/kubernetes/pkg/kubelet/server/stats"
kubelettypes "k8s.io/kubernetes/pkg/kubelet/types" kubelettypes "k8s.io/kubernetes/pkg/kubelet/types"
"k8s.io/kubernetes/pkg/kubelet/util/format" "k8s.io/kubernetes/pkg/kubelet/util/format"
"k8s.io/utils/clock"
) )
const ( const (
@ -59,7 +59,7 @@ const (
// managerImpl implements Manager // managerImpl implements Manager
type managerImpl struct { type managerImpl struct {
// used to track time // used to track time
clock clock.Clock clock clock.WithTicker
// config is how the manager is configured // config is how the manager is configured
config Config config Config
// the function to invoke to kill a pod // the function to invoke to kill a pod
@ -113,7 +113,7 @@ func NewManager(
containerGC ContainerGC, containerGC ContainerGC,
recorder record.EventRecorder, recorder record.EventRecorder,
nodeRef *v1.ObjectReference, nodeRef *v1.ObjectReference,
clock clock.Clock, clock clock.WithTicker,
) (Manager, lifecycle.PodAdmitHandler) { ) (Manager, lifecycle.PodAdmitHandler) {
manager := &managerImpl{ manager := &managerImpl{
clock: clock, clock: clock,

View File

@ -24,7 +24,6 @@ import (
v1 "k8s.io/api/core/v1" v1 "k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/api/resource" "k8s.io/apimachinery/pkg/api/resource"
"k8s.io/apimachinery/pkg/types" "k8s.io/apimachinery/pkg/types"
"k8s.io/apimachinery/pkg/util/clock"
utilfeature "k8s.io/apiserver/pkg/util/feature" utilfeature "k8s.io/apiserver/pkg/util/feature"
"k8s.io/client-go/tools/record" "k8s.io/client-go/tools/record"
featuregatetesting "k8s.io/component-base/featuregate/testing" featuregatetesting "k8s.io/component-base/featuregate/testing"
@ -35,6 +34,7 @@ import (
evictionapi "k8s.io/kubernetes/pkg/kubelet/eviction/api" evictionapi "k8s.io/kubernetes/pkg/kubelet/eviction/api"
"k8s.io/kubernetes/pkg/kubelet/lifecycle" "k8s.io/kubernetes/pkg/kubelet/lifecycle"
kubelettypes "k8s.io/kubernetes/pkg/kubelet/types" kubelettypes "k8s.io/kubernetes/pkg/kubelet/types"
testingclock "k8s.io/utils/clock/testing"
) )
const ( const (
@ -206,7 +206,7 @@ func TestMemoryPressure(t *testing.T) {
return pods return pods
} }
fakeClock := clock.NewFakeClock(time.Now()) fakeClock := testingclock.NewFakeClock(time.Now())
podKiller := &mockPodKiller{} podKiller := &mockPodKiller{}
diskInfoProvider := &mockDiskInfoProvider{dedicatedImageFs: false} diskInfoProvider := &mockDiskInfoProvider{dedicatedImageFs: false}
diskGC := &mockDiskGC{err: nil} diskGC := &mockDiskGC{err: nil}
@ -471,7 +471,7 @@ func TestDiskPressureNodeFs(t *testing.T) {
return pods return pods
} }
fakeClock := clock.NewFakeClock(time.Now()) fakeClock := testingclock.NewFakeClock(time.Now())
podKiller := &mockPodKiller{} podKiller := &mockPodKiller{}
diskInfoProvider := &mockDiskInfoProvider{dedicatedImageFs: false} diskInfoProvider := &mockDiskInfoProvider{dedicatedImageFs: false}
diskGC := &mockDiskGC{err: nil} diskGC := &mockDiskGC{err: nil}
@ -668,7 +668,7 @@ func TestMinReclaim(t *testing.T) {
return pods return pods
} }
fakeClock := clock.NewFakeClock(time.Now()) fakeClock := testingclock.NewFakeClock(time.Now())
podKiller := &mockPodKiller{} podKiller := &mockPodKiller{}
diskInfoProvider := &mockDiskInfoProvider{dedicatedImageFs: false} diskInfoProvider := &mockDiskInfoProvider{dedicatedImageFs: false}
diskGC := &mockDiskGC{err: nil} diskGC := &mockDiskGC{err: nil}
@ -808,7 +808,7 @@ func TestNodeReclaimFuncs(t *testing.T) {
return pods return pods
} }
fakeClock := clock.NewFakeClock(time.Now()) fakeClock := testingclock.NewFakeClock(time.Now())
podKiller := &mockPodKiller{} podKiller := &mockPodKiller{}
diskInfoProvider := &mockDiskInfoProvider{dedicatedImageFs: false} diskInfoProvider := &mockDiskInfoProvider{dedicatedImageFs: false}
nodeRef := &v1.ObjectReference{Kind: "Node", Name: "test", UID: types.UID("test"), Namespace: ""} nodeRef := &v1.ObjectReference{Kind: "Node", Name: "test", UID: types.UID("test"), Namespace: ""}
@ -1052,7 +1052,7 @@ func TestInodePressureNodeFsInodes(t *testing.T) {
return pods return pods
} }
fakeClock := clock.NewFakeClock(time.Now()) fakeClock := testingclock.NewFakeClock(time.Now())
podKiller := &mockPodKiller{} podKiller := &mockPodKiller{}
diskInfoProvider := &mockDiskInfoProvider{dedicatedImageFs: false} diskInfoProvider := &mockDiskInfoProvider{dedicatedImageFs: false}
diskGC := &mockDiskGC{err: nil} diskGC := &mockDiskGC{err: nil}
@ -1259,7 +1259,7 @@ func TestStaticCriticalPodsAreNotEvicted(t *testing.T) {
return mirrorPod, true return mirrorPod, true
} }
fakeClock := clock.NewFakeClock(time.Now()) fakeClock := testingclock.NewFakeClock(time.Now())
podKiller := &mockPodKiller{} podKiller := &mockPodKiller{}
diskInfoProvider := &mockDiskInfoProvider{dedicatedImageFs: false} diskInfoProvider := &mockDiskInfoProvider{dedicatedImageFs: false}
diskGC := &mockDiskGC{err: nil} diskGC := &mockDiskGC{err: nil}
@ -1385,7 +1385,7 @@ func TestAllocatableMemoryPressure(t *testing.T) {
return pods return pods
} }
fakeClock := clock.NewFakeClock(time.Now()) fakeClock := testingclock.NewFakeClock(time.Now())
podKiller := &mockPodKiller{} podKiller := &mockPodKiller{}
diskInfoProvider := &mockDiskInfoProvider{dedicatedImageFs: false} diskInfoProvider := &mockDiskInfoProvider{dedicatedImageFs: false}
diskGC := &mockDiskGC{err: nil} diskGC := &mockDiskGC{err: nil}
@ -1529,7 +1529,7 @@ func TestUpdateMemcgThreshold(t *testing.T) {
return []*v1.Pod{} return []*v1.Pod{}
} }
fakeClock := clock.NewFakeClock(time.Now()) fakeClock := testingclock.NewFakeClock(time.Now())
podKiller := &mockPodKiller{} podKiller := &mockPodKiller{}
diskInfoProvider := &mockDiskInfoProvider{dedicatedImageFs: false} diskInfoProvider := &mockDiskInfoProvider{dedicatedImageFs: false}
diskGC := &mockDiskGC{err: nil} diskGC := &mockDiskGC{err: nil}

View File

@ -24,12 +24,12 @@ import (
"github.com/stretchr/testify/assert" "github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require" "github.com/stretchr/testify/require"
"k8s.io/apimachinery/pkg/util/clock"
"k8s.io/client-go/tools/record" "k8s.io/client-go/tools/record"
statsapi "k8s.io/kubelet/pkg/apis/stats/v1alpha1" statsapi "k8s.io/kubelet/pkg/apis/stats/v1alpha1"
"k8s.io/kubernetes/pkg/kubelet/container" "k8s.io/kubernetes/pkg/kubelet/container"
containertest "k8s.io/kubernetes/pkg/kubelet/container/testing" containertest "k8s.io/kubernetes/pkg/kubelet/container/testing"
statstest "k8s.io/kubernetes/pkg/kubelet/server/stats/testing" statstest "k8s.io/kubernetes/pkg/kubelet/server/stats/testing"
testingclock "k8s.io/utils/clock/testing"
) )
var zero time.Time var zero time.Time
@ -468,7 +468,7 @@ func TestGarbageCollectImageNotOldEnough(t *testing.T) {
}}, }},
} }
fakeClock := clock.NewFakeClock(time.Now()) fakeClock := testingclock.NewFakeClock(time.Now())
t.Log(fakeClock.Now()) t.Log(fakeClock.Now())
_, err := manager.detectImages(fakeClock.Now()) _, err := manager.detectImages(fakeClock.Now())
require.NoError(t, err) require.NoError(t, err)

View File

@ -24,11 +24,11 @@ import (
"github.com/stretchr/testify/assert" "github.com/stretchr/testify/assert"
v1 "k8s.io/api/core/v1" v1 "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/util/clock"
"k8s.io/client-go/tools/record" "k8s.io/client-go/tools/record"
"k8s.io/client-go/util/flowcontrol" "k8s.io/client-go/util/flowcontrol"
. "k8s.io/kubernetes/pkg/kubelet/container" . "k8s.io/kubernetes/pkg/kubelet/container"
ctest "k8s.io/kubernetes/pkg/kubelet/container/testing" ctest "k8s.io/kubernetes/pkg/kubelet/container/testing"
testingclock "k8s.io/utils/clock/testing"
) )
type pullerExpects struct { type pullerExpects struct {
@ -158,7 +158,7 @@ func pullerTestCases() []pullerTestCase {
} }
} }
func pullerTestEnv(c pullerTestCase, serialized bool) (puller ImageManager, fakeClock *clock.FakeClock, fakeRuntime *ctest.FakeRuntime, container *v1.Container) { func pullerTestEnv(c pullerTestCase, serialized bool) (puller ImageManager, fakeClock *testingclock.FakeClock, fakeRuntime *ctest.FakeRuntime, container *v1.Container) {
container = &v1.Container{ container = &v1.Container{
Name: "container_name", Name: "container_name",
Image: c.containerImage, Image: c.containerImage,
@ -166,7 +166,7 @@ func pullerTestEnv(c pullerTestCase, serialized bool) (puller ImageManager, fake
} }
backOff := flowcontrol.NewBackOff(time.Second, time.Minute) backOff := flowcontrol.NewBackOff(time.Second, time.Minute)
fakeClock = clock.NewFakeClock(time.Now()) fakeClock = testingclock.NewFakeClock(time.Now())
backOff.Clock = fakeClock backOff.Clock = fakeClock
fakeRuntime = &ctest.FakeRuntime{} fakeRuntime = &ctest.FakeRuntime{}

View File

@ -45,7 +45,6 @@ import (
"k8s.io/apimachinery/pkg/fields" "k8s.io/apimachinery/pkg/fields"
"k8s.io/apimachinery/pkg/labels" "k8s.io/apimachinery/pkg/labels"
"k8s.io/apimachinery/pkg/types" "k8s.io/apimachinery/pkg/types"
"k8s.io/apimachinery/pkg/util/clock"
utilruntime "k8s.io/apimachinery/pkg/util/runtime" utilruntime "k8s.io/apimachinery/pkg/util/runtime"
"k8s.io/apimachinery/pkg/util/sets" "k8s.io/apimachinery/pkg/util/sets"
"k8s.io/apimachinery/pkg/util/wait" "k8s.io/apimachinery/pkg/util/wait"
@ -121,6 +120,7 @@ import (
"k8s.io/kubernetes/pkg/volume/util/hostutil" "k8s.io/kubernetes/pkg/volume/util/hostutil"
"k8s.io/kubernetes/pkg/volume/util/subpath" "k8s.io/kubernetes/pkg/volume/util/subpath"
"k8s.io/kubernetes/pkg/volume/util/volumepathhandler" "k8s.io/kubernetes/pkg/volume/util/volumepathhandler"
"k8s.io/utils/clock"
) )
const ( const (
@ -1138,7 +1138,7 @@ type Kubelet struct {
// clock is an interface that provides time related functionality in a way that makes it // clock is an interface that provides time related functionality in a way that makes it
// easy to test the code. // easy to test the code.
clock clock.Clock clock clock.WithTicker
// handlers called during the tryUpdateNodeStatus cycle // handlers called during the tryUpdateNodeStatus cycle
setNodeStatusFuncs []func(*v1.Node) error setNodeStatusFuncs []func(*v1.Node) error

View File

@ -37,7 +37,6 @@ import (
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/labels" "k8s.io/apimachinery/pkg/labels"
"k8s.io/apimachinery/pkg/types" "k8s.io/apimachinery/pkg/types"
"k8s.io/apimachinery/pkg/util/clock"
utilruntime "k8s.io/apimachinery/pkg/util/runtime" utilruntime "k8s.io/apimachinery/pkg/util/runtime"
"k8s.io/apimachinery/pkg/util/sets" "k8s.io/apimachinery/pkg/util/sets"
"k8s.io/apimachinery/pkg/util/wait" "k8s.io/apimachinery/pkg/util/wait"
@ -81,6 +80,8 @@ import (
"k8s.io/kubernetes/pkg/volume/util" "k8s.io/kubernetes/pkg/volume/util"
"k8s.io/kubernetes/pkg/volume/util/hostutil" "k8s.io/kubernetes/pkg/volume/util/hostutil"
"k8s.io/kubernetes/pkg/volume/util/subpath" "k8s.io/kubernetes/pkg/volume/util/subpath"
"k8s.io/utils/clock"
testingclock "k8s.io/utils/clock/testing"
) )
func init() { func init() {
@ -114,7 +115,7 @@ type TestKubelet struct {
fakeContainerManager *cm.FakeContainerManager fakeContainerManager *cm.FakeContainerManager
fakeKubeClient *fake.Clientset fakeKubeClient *fake.Clientset
fakeMirrorClient *podtest.FakeMirrorClient fakeMirrorClient *podtest.FakeMirrorClient
fakeClock *clock.FakeClock fakeClock *testingclock.FakeClock
mounter mount.Interface mounter mount.Interface
volumePlugin *volumetest.FakeVolumePlugin volumePlugin *volumetest.FakeVolumePlugin
} }
@ -291,7 +292,7 @@ func newTestKubeletWithImageList(
assert.NoError(t, err) assert.NoError(t, err)
kubelet.containerGC = containerGC kubelet.containerGC = containerGC
fakeClock := clock.NewFakeClock(time.Now()) fakeClock := testingclock.NewFakeClock(time.Now())
kubelet.backOff = flowcontrol.NewBackOff(time.Second, time.Minute) kubelet.backOff = flowcontrol.NewBackOff(time.Second, time.Minute)
kubelet.backOff.Clock = fakeClock kubelet.backOff.Clock = fakeClock
kubelet.resyncInterval = 10 * time.Second kubelet.resyncInterval = 10 * time.Second

View File

@ -30,11 +30,11 @@ import (
"k8s.io/klog/v2" "k8s.io/klog/v2"
"k8s.io/apimachinery/pkg/api/resource" "k8s.io/apimachinery/pkg/api/resource"
"k8s.io/apimachinery/pkg/util/clock"
"k8s.io/apimachinery/pkg/util/wait" "k8s.io/apimachinery/pkg/util/wait"
internalapi "k8s.io/cri-api/pkg/apis" internalapi "k8s.io/cri-api/pkg/apis"
runtimeapi "k8s.io/cri-api/pkg/apis/runtime/v1alpha2" runtimeapi "k8s.io/cri-api/pkg/apis/runtime/v1alpha2"
kubecontainer "k8s.io/kubernetes/pkg/kubelet/container" kubecontainer "k8s.io/kubernetes/pkg/kubelet/container"
"k8s.io/utils/clock"
) )
const ( const (

View File

@ -30,9 +30,9 @@ import (
"github.com/stretchr/testify/require" "github.com/stretchr/testify/require"
"k8s.io/kubernetes/pkg/kubelet/container" "k8s.io/kubernetes/pkg/kubelet/container"
"k8s.io/apimachinery/pkg/util/clock"
runtimeapi "k8s.io/cri-api/pkg/apis/runtime/v1alpha2" runtimeapi "k8s.io/cri-api/pkg/apis/runtime/v1alpha2"
critest "k8s.io/cri-api/pkg/apis/testing" critest "k8s.io/cri-api/pkg/apis/testing"
testingclock "k8s.io/utils/clock/testing"
) )
func TestGetAllLogs(t *testing.T) { func TestGetAllLogs(t *testing.T) {
@ -92,7 +92,7 @@ func TestRotateLogs(t *testing.T) {
MaxFiles: testMaxFiles, MaxFiles: testMaxFiles,
}, },
osInterface: container.RealOS{}, osInterface: container.RealOS{},
clock: clock.NewFakeClock(now), clock: testingclock.NewFakeClock(now),
} }
testLogs := []string{ testLogs := []string{
"test-log-1", "test-log-1",
@ -179,7 +179,7 @@ func TestClean(t *testing.T) {
MaxFiles: testMaxFiles, MaxFiles: testMaxFiles,
}, },
osInterface: container.RealOS{}, osInterface: container.RealOS{},
clock: clock.NewFakeClock(now), clock: testingclock.NewFakeClock(now),
} }
testLogs := []string{ testLogs := []string{
"test-log-1", "test-log-1",
@ -382,7 +382,7 @@ func TestRotateLatestLog(t *testing.T) {
runtimeService: f, runtimeService: f,
policy: LogRotatePolicy{MaxFiles: test.maxFiles}, policy: LogRotatePolicy{MaxFiles: test.maxFiles},
osInterface: container.RealOS{}, osInterface: container.RealOS{},
clock: clock.NewFakeClock(now), clock: testingclock.NewFakeClock(now),
} }
if test.runtimeError != nil { if test.runtimeError != nil {
f.InjectError("ReopenContainerLog", test.runtimeError) f.InjectError("ReopenContainerLog", test.runtimeError)

View File

@ -26,7 +26,6 @@ import (
"time" "time"
v1 "k8s.io/api/core/v1" v1 "k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/util/clock"
utilfeature "k8s.io/apiserver/pkg/util/feature" utilfeature "k8s.io/apiserver/pkg/util/feature"
"k8s.io/client-go/tools/record" "k8s.io/client-go/tools/record"
"k8s.io/klog/v2" "k8s.io/klog/v2"
@ -36,6 +35,7 @@ import (
"k8s.io/kubernetes/pkg/kubelet/lifecycle" "k8s.io/kubernetes/pkg/kubelet/lifecycle"
"k8s.io/kubernetes/pkg/kubelet/nodeshutdown/systemd" "k8s.io/kubernetes/pkg/kubelet/nodeshutdown/systemd"
kubelettypes "k8s.io/kubernetes/pkg/kubelet/types" kubelettypes "k8s.io/kubernetes/pkg/kubelet/types"
"k8s.io/utils/clock"
) )
const ( const (

View File

@ -30,13 +30,13 @@ import (
v1 "k8s.io/api/core/v1" v1 "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/types" "k8s.io/apimachinery/pkg/types"
"k8s.io/apimachinery/pkg/util/clock"
utilfeature "k8s.io/apiserver/pkg/util/feature" utilfeature "k8s.io/apiserver/pkg/util/feature"
"k8s.io/client-go/tools/record" "k8s.io/client-go/tools/record"
featuregatetesting "k8s.io/component-base/featuregate/testing" featuregatetesting "k8s.io/component-base/featuregate/testing"
"k8s.io/kubernetes/pkg/apis/scheduling" "k8s.io/kubernetes/pkg/apis/scheduling"
pkgfeatures "k8s.io/kubernetes/pkg/features" pkgfeatures "k8s.io/kubernetes/pkg/features"
"k8s.io/kubernetes/pkg/kubelet/nodeshutdown/systemd" "k8s.io/kubernetes/pkg/kubelet/nodeshutdown/systemd"
testingclock "k8s.io/utils/clock/testing"
) )
type fakeDbus struct { type fakeDbus struct {
@ -235,7 +235,7 @@ func TestManager(t *testing.T) {
nodeRef := &v1.ObjectReference{Kind: "Node", Name: "test", UID: types.UID("test"), Namespace: ""} nodeRef := &v1.ObjectReference{Kind: "Node", Name: "test", UID: types.UID("test"), Namespace: ""}
manager, _ := NewManager(fakeRecorder, nodeRef, activePodsFunc, killPodsFunc, func() {}, tc.shutdownGracePeriodRequested, tc.shutdownGracePeriodCriticalPods) manager, _ := NewManager(fakeRecorder, nodeRef, activePodsFunc, killPodsFunc, func() {}, tc.shutdownGracePeriodRequested, tc.shutdownGracePeriodCriticalPods)
manager.clock = clock.NewFakeClock(time.Now()) manager.clock = testingclock.NewFakeClock(time.Now())
err := manager.Start() err := manager.Start()
if tc.expectedError != nil { if tc.expectedError != nil {
@ -312,7 +312,7 @@ func TestFeatureEnabled(t *testing.T) {
nodeRef := &v1.ObjectReference{Kind: "Node", Name: "test", UID: types.UID("test"), Namespace: ""} nodeRef := &v1.ObjectReference{Kind: "Node", Name: "test", UID: types.UID("test"), Namespace: ""}
manager, _ := NewManager(fakeRecorder, nodeRef, activePodsFunc, killPodsFunc, func() {}, tc.shutdownGracePeriodRequested, 0 /*shutdownGracePeriodCriticalPods*/) manager, _ := NewManager(fakeRecorder, nodeRef, activePodsFunc, killPodsFunc, func() {}, tc.shutdownGracePeriodRequested, 0 /*shutdownGracePeriodCriticalPods*/)
manager.clock = clock.NewFakeClock(time.Now()) manager.clock = testingclock.NewFakeClock(time.Now())
assert.Equal(t, tc.expectEnabled, manager.isFeatureEnabled()) assert.Equal(t, tc.expectEnabled, manager.isFeatureEnabled())
}) })

View File

@ -22,13 +22,13 @@ import (
"time" "time"
"k8s.io/apimachinery/pkg/types" "k8s.io/apimachinery/pkg/types"
"k8s.io/apimachinery/pkg/util/clock"
"k8s.io/apimachinery/pkg/util/sets" "k8s.io/apimachinery/pkg/util/sets"
"k8s.io/apimachinery/pkg/util/wait" "k8s.io/apimachinery/pkg/util/wait"
runtimeapi "k8s.io/cri-api/pkg/apis/runtime/v1alpha2" runtimeapi "k8s.io/cri-api/pkg/apis/runtime/v1alpha2"
"k8s.io/klog/v2" "k8s.io/klog/v2"
kubecontainer "k8s.io/kubernetes/pkg/kubelet/container" kubecontainer "k8s.io/kubernetes/pkg/kubelet/container"
"k8s.io/kubernetes/pkg/kubelet/metrics" "k8s.io/kubernetes/pkg/kubelet/metrics"
"k8s.io/utils/clock"
) )
// GenericPLEG is an extremely simple generic PLEG that relies solely on // GenericPLEG is an extremely simple generic PLEG that relies solely on

View File

@ -28,12 +28,13 @@ import (
"github.com/stretchr/testify/assert" "github.com/stretchr/testify/assert"
"k8s.io/apimachinery/pkg/types" "k8s.io/apimachinery/pkg/types"
"k8s.io/apimachinery/pkg/util/clock"
"k8s.io/apimachinery/pkg/util/diff" "k8s.io/apimachinery/pkg/util/diff"
"k8s.io/component-base/metrics/testutil" "k8s.io/component-base/metrics/testutil"
kubecontainer "k8s.io/kubernetes/pkg/kubelet/container" kubecontainer "k8s.io/kubernetes/pkg/kubelet/container"
containertest "k8s.io/kubernetes/pkg/kubelet/container/testing" containertest "k8s.io/kubernetes/pkg/kubelet/container/testing"
"k8s.io/kubernetes/pkg/kubelet/metrics" "k8s.io/kubernetes/pkg/kubelet/metrics"
"k8s.io/utils/clock"
testingclock "k8s.io/utils/clock/testing"
) )
const ( const (
@ -45,7 +46,7 @@ const (
type TestGenericPLEG struct { type TestGenericPLEG struct {
pleg *GenericPLEG pleg *GenericPLEG
runtime *containertest.FakeRuntime runtime *containertest.FakeRuntime
clock *clock.FakeClock clock *testingclock.FakeClock
} }
func newTestGenericPLEG() *TestGenericPLEG { func newTestGenericPLEG() *TestGenericPLEG {
@ -54,7 +55,7 @@ func newTestGenericPLEG() *TestGenericPLEG {
func newTestGenericPLEGWithChannelSize(eventChannelCap int) *TestGenericPLEG { func newTestGenericPLEGWithChannelSize(eventChannelCap int) *TestGenericPLEG {
fakeRuntime := &containertest.FakeRuntime{} fakeRuntime := &containertest.FakeRuntime{}
clock := clock.NewFakeClock(time.Time{}) clock := testingclock.NewFakeClock(time.Time{})
// The channel capacity should be large enough to hold all events in a // The channel capacity should be large enough to hold all events in a
// single test. // single test.
pleg := &GenericPLEG{ pleg := &GenericPLEG{

View File

@ -27,13 +27,13 @@ import (
v1 "k8s.io/api/core/v1" v1 "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/types" "k8s.io/apimachinery/pkg/types"
"k8s.io/apimachinery/pkg/util/clock"
"k8s.io/apimachinery/pkg/util/sets" "k8s.io/apimachinery/pkg/util/sets"
"k8s.io/client-go/tools/record" "k8s.io/client-go/tools/record"
kubecontainer "k8s.io/kubernetes/pkg/kubelet/container" kubecontainer "k8s.io/kubernetes/pkg/kubelet/container"
containertest "k8s.io/kubernetes/pkg/kubelet/container/testing" containertest "k8s.io/kubernetes/pkg/kubelet/container/testing"
kubetypes "k8s.io/kubernetes/pkg/kubelet/types" kubetypes "k8s.io/kubernetes/pkg/kubelet/types"
"k8s.io/kubernetes/pkg/kubelet/util/queue" "k8s.io/kubernetes/pkg/kubelet/util/queue"
"k8s.io/utils/clock"
) )
// fakePodWorkers runs sync pod function in serial, so we can have // fakePodWorkers runs sync pod function in serial, so we can have

View File

@ -22,7 +22,6 @@ import (
v1 "k8s.io/api/core/v1" v1 "k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/types" "k8s.io/apimachinery/pkg/types"
"k8s.io/apimachinery/pkg/util/clock"
"k8s.io/apimachinery/pkg/util/sets" "k8s.io/apimachinery/pkg/util/sets"
"k8s.io/client-go/tools/record" "k8s.io/client-go/tools/record"
"k8s.io/component-base/metrics" "k8s.io/component-base/metrics"
@ -30,6 +29,7 @@ import (
kubecontainer "k8s.io/kubernetes/pkg/kubelet/container" kubecontainer "k8s.io/kubernetes/pkg/kubelet/container"
"k8s.io/kubernetes/pkg/kubelet/prober/results" "k8s.io/kubernetes/pkg/kubelet/prober/results"
"k8s.io/kubernetes/pkg/kubelet/status" "k8s.io/kubernetes/pkg/kubelet/status"
"k8s.io/utils/clock"
) )
// ProberResults stores the cumulative number of a probe by result as prometheus metrics. // ProberResults stores the cumulative number of a probe by result as prometheus metrics.

View File

@ -28,7 +28,6 @@ import (
v1 "k8s.io/api/core/v1" v1 "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/types" "k8s.io/apimachinery/pkg/types"
"k8s.io/apimachinery/pkg/util/clock"
"k8s.io/client-go/kubernetes/fake" "k8s.io/client-go/kubernetes/fake"
"k8s.io/client-go/tools/record" "k8s.io/client-go/tools/record"
utiltesting "k8s.io/client-go/util/testing" utiltesting "k8s.io/client-go/util/testing"
@ -48,6 +47,7 @@ import (
"k8s.io/kubernetes/pkg/volume" "k8s.io/kubernetes/pkg/volume"
volumetest "k8s.io/kubernetes/pkg/volume/testing" volumetest "k8s.io/kubernetes/pkg/volume/testing"
"k8s.io/kubernetes/pkg/volume/util/hostutil" "k8s.io/kubernetes/pkg/volume/util/hostutil"
"k8s.io/utils/clock"
) )
func TestRunOnce(t *testing.T) { func TestRunOnce(t *testing.T) {

View File

@ -29,9 +29,9 @@ import (
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/runtime" "k8s.io/apimachinery/pkg/runtime"
"k8s.io/apimachinery/pkg/util/clock"
"k8s.io/apimachinery/pkg/util/sets" "k8s.io/apimachinery/pkg/util/sets"
"k8s.io/apimachinery/pkg/watch" "k8s.io/apimachinery/pkg/watch"
"k8s.io/utils/clock"
) )
// Manager manages Kubernetes secrets. This includes retrieving // Manager manages Kubernetes secrets. This includes retrieving

View File

@ -27,11 +27,11 @@ import (
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/runtime" "k8s.io/apimachinery/pkg/runtime"
"k8s.io/apimachinery/pkg/util/clock"
clientset "k8s.io/client-go/kubernetes" clientset "k8s.io/client-go/kubernetes"
"k8s.io/client-go/kubernetes/fake" "k8s.io/client-go/kubernetes/fake"
"k8s.io/kubernetes/pkg/kubelet/util/manager" "k8s.io/kubernetes/pkg/kubelet/util/manager"
"k8s.io/utils/clock"
) )
func checkObject(t *testing.T, store manager.Store, ns, name string, shouldExist bool) { func checkObject(t *testing.T, store manager.Store, ns, name string, shouldExist bool) {

View File

@ -30,10 +30,10 @@ import (
apierrors "k8s.io/apimachinery/pkg/api/errors" apierrors "k8s.io/apimachinery/pkg/api/errors"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/types" "k8s.io/apimachinery/pkg/types"
"k8s.io/apimachinery/pkg/util/clock"
"k8s.io/apimachinery/pkg/util/wait" "k8s.io/apimachinery/pkg/util/wait"
clientset "k8s.io/client-go/kubernetes" clientset "k8s.io/client-go/kubernetes"
"k8s.io/klog/v2" "k8s.io/klog/v2"
"k8s.io/utils/clock"
) )
const ( const (

View File

@ -24,12 +24,12 @@ import (
authenticationv1 "k8s.io/api/authentication/v1" authenticationv1 "k8s.io/api/authentication/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/types" "k8s.io/apimachinery/pkg/types"
"k8s.io/apimachinery/pkg/util/clock" testingclock "k8s.io/utils/clock/testing"
) )
func TestTokenCachingAndExpiration(t *testing.T) { func TestTokenCachingAndExpiration(t *testing.T) {
type suite struct { type suite struct {
clock *clock.FakeClock clock *testingclock.FakeClock
tg *fakeTokenGetter tg *fakeTokenGetter
mgr *Manager mgr *Manager
} }
@ -87,7 +87,7 @@ func TestTokenCachingAndExpiration(t *testing.T) {
for _, c := range cases { for _, c := range cases {
t.Run(c.name, func(t *testing.T) { t.Run(c.name, func(t *testing.T) {
clock := clock.NewFakeClock(time.Time{}.Add(30 * 24 * time.Hour)) clock := testingclock.NewFakeClock(time.Time{}.Add(30 * 24 * time.Hour))
expSecs := int64(c.exp.Seconds()) expSecs := int64(c.exp.Seconds())
s := &suite{ s := &suite{
clock: clock, clock: clock,
@ -165,7 +165,7 @@ func TestRequiresRefresh(t *testing.T) {
for i, c := range cases { for i, c := range cases {
t.Run(fmt.Sprint(i), func(t *testing.T) { t.Run(fmt.Sprint(i), func(t *testing.T) {
clock := clock.NewFakeClock(c.now) clock := testingclock.NewFakeClock(c.now)
secs := int64(c.exp.Sub(start).Seconds()) secs := int64(c.exp.Sub(start).Seconds())
tr := &authenticationv1.TokenRequest{ tr := &authenticationv1.TokenRequest{
Spec: authenticationv1.TokenRequestSpec{ Spec: authenticationv1.TokenRequestSpec{
@ -335,7 +335,7 @@ func TestDeleteServiceAccountToken(t *testing.T) {
}, },
} }
testMgr := NewManager(nil) testMgr := NewManager(nil)
testMgr.clock = clock.NewFakeClock(time.Time{}.Add(30 * 24 * time.Hour)) testMgr.clock = testingclock.NewFakeClock(time.Time{}.Add(30 * 24 * time.Hour))
successGetToken := func(_, _ string, tr *authenticationv1.TokenRequest) (*authenticationv1.TokenRequest, error) { successGetToken := func(_, _ string, tr *authenticationv1.TokenRequest) (*authenticationv1.TokenRequest, error) {
tr.Status = authenticationv1.TokenRequestStatus{ tr.Status = authenticationv1.TokenRequestStatus{
@ -404,7 +404,7 @@ func TestCleanup(t *testing.T) {
} }
for _, c := range cases { for _, c := range cases {
t.Run(c.name, func(t *testing.T) { t.Run(c.name, func(t *testing.T) {
clock := clock.NewFakeClock(time.Time{}.Add(24 * time.Hour)) clock := testingclock.NewFakeClock(time.Time{}.Add(24 * time.Hour))
mgr := NewManager(nil) mgr := NewManager(nil)
mgr.clock = clock mgr.clock = clock
@ -568,7 +568,7 @@ func TestKeyFunc(t *testing.T) {
for _, c := range cases { for _, c := range cases {
t.Run(c.name, func(t *testing.T) { t.Run(c.name, func(t *testing.T) {
mgr := NewManager(nil) mgr := NewManager(nil)
mgr.clock = clock.NewFakeClock(time.Time{}.Add(30 * 24 * time.Hour)) mgr.clock = testingclock.NewFakeClock(time.Time{}.Add(30 * 24 * time.Hour))
for _, tru := range c.trus { for _, tru := range c.trus {
mgr.set(getKeyFunc(tru), &authenticationv1.TokenRequest{ mgr.set(getKeyFunc(tru), &authenticationv1.TokenRequest{
Status: authenticationv1.TokenRequestStatus{ Status: authenticationv1.TokenRequestStatus{

View File

@ -29,8 +29,8 @@ import (
apierrors "k8s.io/apimachinery/pkg/api/errors" apierrors "k8s.io/apimachinery/pkg/api/errors"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/runtime" "k8s.io/apimachinery/pkg/runtime"
"k8s.io/apimachinery/pkg/util/clock"
"k8s.io/apimachinery/pkg/util/sets" "k8s.io/apimachinery/pkg/util/sets"
"k8s.io/utils/clock"
) )
// GetObjectTTLFunc defines a function to get value of TTL. // GetObjectTTLFunc defines a function to get value of TTL.

View File

@ -30,13 +30,14 @@ import (
apierrors "k8s.io/apimachinery/pkg/api/errors" apierrors "k8s.io/apimachinery/pkg/api/errors"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/runtime" "k8s.io/apimachinery/pkg/runtime"
"k8s.io/apimachinery/pkg/util/clock"
"k8s.io/apimachinery/pkg/util/sets" "k8s.io/apimachinery/pkg/util/sets"
clientset "k8s.io/client-go/kubernetes" clientset "k8s.io/client-go/kubernetes"
"k8s.io/client-go/kubernetes/fake" "k8s.io/client-go/kubernetes/fake"
core "k8s.io/client-go/testing" core "k8s.io/client-go/testing"
podutil "k8s.io/kubernetes/pkg/api/v1/pod" podutil "k8s.io/kubernetes/pkg/api/v1/pod"
"k8s.io/utils/clock"
testingclock "k8s.io/utils/clock/testing"
"github.com/stretchr/testify/assert" "github.com/stretchr/testify/assert"
) )
@ -149,7 +150,7 @@ func TestSecretStoreDeletingSecret(t *testing.T) {
func TestSecretStoreGetAlwaysRefresh(t *testing.T) { func TestSecretStoreGetAlwaysRefresh(t *testing.T) {
fakeClient := &fake.Clientset{} fakeClient := &fake.Clientset{}
fakeClock := clock.NewFakeClock(time.Now()) fakeClock := testingclock.NewFakeClock(time.Now())
store := newSecretStore(fakeClient, fakeClock, noObjectTTL, 0) store := newSecretStore(fakeClient, fakeClock, noObjectTTL, 0)
for i := 0; i < 10; i++ { for i := 0; i < 10; i++ {
@ -176,7 +177,7 @@ func TestSecretStoreGetAlwaysRefresh(t *testing.T) {
func TestSecretStoreGetNeverRefresh(t *testing.T) { func TestSecretStoreGetNeverRefresh(t *testing.T) {
fakeClient := &fake.Clientset{} fakeClient := &fake.Clientset{}
fakeClock := clock.NewFakeClock(time.Now()) fakeClock := testingclock.NewFakeClock(time.Now())
store := newSecretStore(fakeClient, fakeClock, noObjectTTL, time.Minute) store := newSecretStore(fakeClient, fakeClock, noObjectTTL, time.Minute)
for i := 0; i < 10; i++ { for i := 0; i < 10; i++ {
@ -206,7 +207,7 @@ func TestCustomTTL(t *testing.T) {
} }
fakeClient := &fake.Clientset{} fakeClient := &fake.Clientset{}
fakeClock := clock.NewFakeClock(time.Time{}) fakeClock := testingclock.NewFakeClock(time.Time{})
store := newSecretStore(fakeClient, fakeClock, customTTL, time.Minute) store := newSecretStore(fakeClient, fakeClock, customTTL, time.Minute)
store.AddReference("ns", "name") store.AddReference("ns", "name")
@ -377,7 +378,7 @@ func podWithSecrets(ns, podName string, toAttach secretsToAttach) *v1.Pod {
func TestCacheInvalidation(t *testing.T) { func TestCacheInvalidation(t *testing.T) {
fakeClient := &fake.Clientset{} fakeClient := &fake.Clientset{}
fakeClock := clock.NewFakeClock(time.Now()) fakeClock := testingclock.NewFakeClock(time.Now())
store := newSecretStore(fakeClient, fakeClock, noObjectTTL, time.Minute) store := newSecretStore(fakeClient, fakeClock, noObjectTTL, time.Minute)
manager := newCacheBasedSecretManager(store) manager := newCacheBasedSecretManager(store)
@ -432,7 +433,7 @@ func TestCacheInvalidation(t *testing.T) {
func TestRegisterIdempotence(t *testing.T) { func TestRegisterIdempotence(t *testing.T) {
fakeClient := &fake.Clientset{} fakeClient := &fake.Clientset{}
fakeClock := clock.NewFakeClock(time.Now()) fakeClock := testingclock.NewFakeClock(time.Now())
store := newSecretStore(fakeClient, fakeClock, noObjectTTL, time.Minute) store := newSecretStore(fakeClient, fakeClock, noObjectTTL, time.Minute)
manager := newCacheBasedSecretManager(store) manager := newCacheBasedSecretManager(store)
@ -467,7 +468,7 @@ func TestRegisterIdempotence(t *testing.T) {
func TestCacheRefcounts(t *testing.T) { func TestCacheRefcounts(t *testing.T) {
fakeClient := &fake.Clientset{} fakeClient := &fake.Clientset{}
fakeClock := clock.NewFakeClock(time.Now()) fakeClock := testingclock.NewFakeClock(time.Now())
store := newSecretStore(fakeClient, fakeClock, noObjectTTL, time.Minute) store := newSecretStore(fakeClient, fakeClock, noObjectTTL, time.Minute)
manager := newCacheBasedSecretManager(store) manager := newCacheBasedSecretManager(store)

View File

@ -31,10 +31,10 @@ import (
"k8s.io/apimachinery/pkg/fields" "k8s.io/apimachinery/pkg/fields"
"k8s.io/apimachinery/pkg/runtime" "k8s.io/apimachinery/pkg/runtime"
"k8s.io/apimachinery/pkg/runtime/schema" "k8s.io/apimachinery/pkg/runtime/schema"
"k8s.io/apimachinery/pkg/util/clock"
"k8s.io/apimachinery/pkg/util/sets" "k8s.io/apimachinery/pkg/util/sets"
"k8s.io/apimachinery/pkg/util/wait" "k8s.io/apimachinery/pkg/util/wait"
"k8s.io/apimachinery/pkg/watch" "k8s.io/apimachinery/pkg/watch"
"k8s.io/utils/clock"
) )
type listObjectFunc func(string, metav1.ListOptions) (runtime.Object, error) type listObjectFunc func(string, metav1.ListOptions) (runtime.Object, error)

View File

@ -28,7 +28,6 @@ import (
apierrors "k8s.io/apimachinery/pkg/api/errors" apierrors "k8s.io/apimachinery/pkg/api/errors"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/runtime" "k8s.io/apimachinery/pkg/runtime"
"k8s.io/apimachinery/pkg/util/clock"
"k8s.io/apimachinery/pkg/util/wait" "k8s.io/apimachinery/pkg/util/wait"
"k8s.io/apimachinery/pkg/watch" "k8s.io/apimachinery/pkg/watch"
@ -38,6 +37,9 @@ import (
corev1 "k8s.io/kubernetes/pkg/apis/core/v1" corev1 "k8s.io/kubernetes/pkg/apis/core/v1"
"k8s.io/utils/clock"
testingclock "k8s.io/utils/clock/testing"
"github.com/stretchr/testify/assert" "github.com/stretchr/testify/assert"
) )
@ -88,7 +90,7 @@ func TestSecretCache(t *testing.T) {
fakeWatch := watch.NewFake() fakeWatch := watch.NewFake()
fakeClient.AddWatchReactor("secrets", core.DefaultWatchReactor(fakeWatch, nil)) fakeClient.AddWatchReactor("secrets", core.DefaultWatchReactor(fakeWatch, nil))
fakeClock := clock.NewFakeClock(time.Now()) fakeClock := testingclock.NewFakeClock(time.Now())
store := newSecretCache(fakeClient, fakeClock, time.Minute) store := newSecretCache(fakeClient, fakeClock, time.Minute)
store.AddReference("ns", "name") store.AddReference("ns", "name")
@ -158,7 +160,7 @@ func TestSecretCacheMultipleRegistrations(t *testing.T) {
fakeWatch := watch.NewFake() fakeWatch := watch.NewFake()
fakeClient.AddWatchReactor("secrets", core.DefaultWatchReactor(fakeWatch, nil)) fakeClient.AddWatchReactor("secrets", core.DefaultWatchReactor(fakeWatch, nil))
fakeClock := clock.NewFakeClock(time.Now()) fakeClock := testingclock.NewFakeClock(time.Now())
store := newSecretCache(fakeClient, fakeClock, time.Minute) store := newSecretCache(fakeClient, fakeClock, time.Minute)
store.AddReference("ns", "name") store.AddReference("ns", "name")
@ -264,7 +266,7 @@ func TestImmutableSecretStopsTheReflector(t *testing.T) {
fakeWatch := watch.NewFake() fakeWatch := watch.NewFake()
fakeClient.AddWatchReactor("secrets", core.DefaultWatchReactor(fakeWatch, nil)) fakeClient.AddWatchReactor("secrets", core.DefaultWatchReactor(fakeWatch, nil))
fakeClock := clock.NewFakeClock(time.Now()) fakeClock := testingclock.NewFakeClock(time.Now())
store := newSecretCache(fakeClient, fakeClock, time.Minute) store := newSecretCache(fakeClient, fakeClock, time.Minute)
key := objectKey{namespace: "ns", name: "name"} key := objectKey{namespace: "ns", name: "name"}
@ -351,7 +353,7 @@ func TestMaxIdleTimeStopsTheReflector(t *testing.T) {
fakeClient.AddReactor("list", "secrets", listReactor) fakeClient.AddReactor("list", "secrets", listReactor)
fakeWatch := watch.NewFake() fakeWatch := watch.NewFake()
fakeClient.AddWatchReactor("secrets", core.DefaultWatchReactor(fakeWatch, nil)) fakeClient.AddWatchReactor("secrets", core.DefaultWatchReactor(fakeWatch, nil))
fakeClock := clock.NewFakeClock(time.Now()) fakeClock := testingclock.NewFakeClock(time.Now())
store := newSecretCache(fakeClient, fakeClock, time.Minute) store := newSecretCache(fakeClient, fakeClock, time.Minute)
key := objectKey{namespace: "ns", name: "name"} key := objectKey{namespace: "ns", name: "name"}
@ -415,7 +417,7 @@ func TestReflectorNotStopedOnSlowInitialization(t *testing.T) {
}, },
} }
fakeClock := clock.NewFakeClock(time.Now()) fakeClock := testingclock.NewFakeClock(time.Now())
fakeClient := &fake.Clientset{} fakeClient := &fake.Clientset{}
listReactor := func(a core.Action) (bool, runtime.Object, error) { listReactor := func(a core.Action) (bool, runtime.Object, error) {

View File

@ -21,7 +21,7 @@ import (
"time" "time"
"k8s.io/apimachinery/pkg/types" "k8s.io/apimachinery/pkg/types"
"k8s.io/apimachinery/pkg/util/clock" "k8s.io/utils/clock"
) )
// WorkQueue allows queuing items with a timestamp. An item is // WorkQueue allows queuing items with a timestamp. An item is

View File

@ -21,12 +21,12 @@ import (
"time" "time"
"k8s.io/apimachinery/pkg/types" "k8s.io/apimachinery/pkg/types"
"k8s.io/apimachinery/pkg/util/clock"
"k8s.io/apimachinery/pkg/util/sets" "k8s.io/apimachinery/pkg/util/sets"
testingclock "k8s.io/utils/clock/testing"
) )
func newTestBasicWorkQueue() (*basicWorkQueue, *clock.FakeClock) { func newTestBasicWorkQueue() (*basicWorkQueue, *testingclock.FakeClock) {
fakeClock := clock.NewFakeClock(time.Now()) fakeClock := testingclock.NewFakeClock(time.Now())
wq := &basicWorkQueue{ wq := &basicWorkQueue{
clock: fakeClock, clock: fakeClock,
queue: make(map[types.UID]time.Time), queue: make(map[types.UID]time.Time),

View File

@ -26,7 +26,6 @@ import (
"k8s.io/apimachinery/pkg/labels" "k8s.io/apimachinery/pkg/labels"
"k8s.io/apimachinery/pkg/runtime" "k8s.io/apimachinery/pkg/runtime"
"k8s.io/apimachinery/pkg/runtime/schema" "k8s.io/apimachinery/pkg/runtime/schema"
"k8s.io/apimachinery/pkg/util/clock"
"k8s.io/apimachinery/pkg/util/sets" "k8s.io/apimachinery/pkg/util/sets"
"k8s.io/apiserver/pkg/admission" "k8s.io/apiserver/pkg/admission"
quota "k8s.io/apiserver/pkg/quota/v1" quota "k8s.io/apiserver/pkg/quota/v1"
@ -37,6 +36,7 @@ import (
"k8s.io/kubernetes/pkg/apis/core/v1/helper" "k8s.io/kubernetes/pkg/apis/core/v1/helper"
"k8s.io/kubernetes/pkg/apis/core/v1/helper/qos" "k8s.io/kubernetes/pkg/apis/core/v1/helper/qos"
"k8s.io/kubernetes/pkg/features" "k8s.io/kubernetes/pkg/features"
"k8s.io/utils/clock"
) )
// the name used for object count quota // the name used for object count quota

View File

@ -26,7 +26,6 @@ import (
"k8s.io/apimachinery/pkg/api/resource" "k8s.io/apimachinery/pkg/api/resource"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/runtime/schema" "k8s.io/apimachinery/pkg/runtime/schema"
"k8s.io/apimachinery/pkg/util/clock"
quota "k8s.io/apiserver/pkg/quota/v1" quota "k8s.io/apiserver/pkg/quota/v1"
"k8s.io/apiserver/pkg/quota/v1/generic" "k8s.io/apiserver/pkg/quota/v1/generic"
"k8s.io/apiserver/pkg/util/feature" "k8s.io/apiserver/pkg/util/feature"
@ -34,6 +33,8 @@ import (
api "k8s.io/kubernetes/pkg/apis/core" api "k8s.io/kubernetes/pkg/apis/core"
"k8s.io/kubernetes/pkg/features" "k8s.io/kubernetes/pkg/features"
"k8s.io/kubernetes/pkg/util/node" "k8s.io/kubernetes/pkg/util/node"
"k8s.io/utils/clock"
testingclock "k8s.io/utils/clock/testing"
) )
func TestPodConstraintsFunc(t *testing.T) { func TestPodConstraintsFunc(t *testing.T) {
@ -84,7 +85,7 @@ func TestPodConstraintsFunc(t *testing.T) {
} }
func TestPodEvaluatorUsage(t *testing.T) { func TestPodEvaluatorUsage(t *testing.T) {
fakeClock := clock.NewFakeClock(time.Now()) fakeClock := testingclock.NewFakeClock(time.Now())
evaluator := NewPodEvaluator(nil, fakeClock) evaluator := NewPodEvaluator(nil, fakeClock)
// fields use to simulate a pod undergoing termination // fields use to simulate a pod undergoing termination
@ -513,7 +514,7 @@ func TestPodEvaluatorUsage(t *testing.T) {
} }
func TestPodEvaluatorMatchingScopes(t *testing.T) { func TestPodEvaluatorMatchingScopes(t *testing.T) {
fakeClock := clock.NewFakeClock(time.Now()) fakeClock := testingclock.NewFakeClock(time.Now())
evaluator := NewPodEvaluator(nil, fakeClock) evaluator := NewPodEvaluator(nil, fakeClock)
activeDeadlineSeconds := int64(30) activeDeadlineSeconds := int64(30)
testCases := map[string]struct { testCases := map[string]struct {

View File

@ -19,9 +19,9 @@ package core
import ( import (
corev1 "k8s.io/api/core/v1" corev1 "k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/runtime/schema" "k8s.io/apimachinery/pkg/runtime/schema"
"k8s.io/apimachinery/pkg/util/clock"
quota "k8s.io/apiserver/pkg/quota/v1" quota "k8s.io/apiserver/pkg/quota/v1"
"k8s.io/apiserver/pkg/quota/v1/generic" "k8s.io/apiserver/pkg/quota/v1/generic"
"k8s.io/utils/clock"
) )
// legacyObjectCountAliases are what we used to do simple object counting quota with mapped to alias // legacyObjectCountAliases are what we used to do simple object counting quota with mapped to alias

View File

@ -54,6 +54,7 @@ import (
restclientwatch "k8s.io/client-go/rest/watch" restclientwatch "k8s.io/client-go/rest/watch"
"k8s.io/client-go/util/flowcontrol" "k8s.io/client-go/util/flowcontrol"
utiltesting "k8s.io/client-go/util/testing" utiltesting "k8s.io/client-go/util/testing"
testingclock "k8s.io/utils/clock/testing"
) )
func TestNewRequestSetsAccept(t *testing.T) { func TestNewRequestSetsAccept(t *testing.T) {
@ -1563,7 +1564,7 @@ func TestBackoffLifecycle(t *testing.T) {
// which are used in the server implementation returning StatusOK above. // which are used in the server implementation returning StatusOK above.
seconds := []int{0, 1, 2, 4, 8, 0, 1, 2, 4, 0} seconds := []int{0, 1, 2, 4, 8, 0, 1, 2, 4, 0}
request := c.Verb("POST").Prefix("backofftest").Suffix("abc") request := c.Verb("POST").Prefix("backofftest").Suffix("abc")
clock := clock.FakeClock{} clock := testingclock.FakeClock{}
request.backoff = &URLBackoff{ request.backoff = &URLBackoff{
// Use a fake backoff here to avoid flakes and speed the test up. // Use a fake backoff here to avoid flakes and speed the test up.
Backoff: flowcontrol.NewFakeBackOff( Backoff: flowcontrol.NewFakeBackOff(

View File

@ -20,7 +20,8 @@ import (
"sync" "sync"
"time" "time"
"k8s.io/apimachinery/pkg/util/clock" "k8s.io/utils/clock"
testingclock "k8s.io/utils/clock/testing"
"k8s.io/utils/integer" "k8s.io/utils/integer"
) )
@ -37,7 +38,7 @@ type Backoff struct {
perItemBackoff map[string]*backoffEntry perItemBackoff map[string]*backoffEntry
} }
func NewFakeBackOff(initial, max time.Duration, tc *clock.FakeClock) *Backoff { func NewFakeBackOff(initial, max time.Duration, tc *testingclock.FakeClock) *Backoff {
return &Backoff{ return &Backoff{
perItemBackoff: map[string]*backoffEntry{}, perItemBackoff: map[string]*backoffEntry{},
Clock: tc, Clock: tc,

View File

@ -20,12 +20,12 @@ import (
"testing" "testing"
"time" "time"
"k8s.io/apimachinery/pkg/util/clock" testingclock "k8s.io/utils/clock/testing"
) )
func TestSlowBackoff(t *testing.T) { func TestSlowBackoff(t *testing.T) {
id := "_idSlow" id := "_idSlow"
tc := clock.NewFakeClock(time.Now()) tc := testingclock.NewFakeClock(time.Now())
step := time.Second step := time.Second
maxDuration := 50 * step maxDuration := 50 * step
@ -51,7 +51,7 @@ func TestSlowBackoff(t *testing.T) {
func TestBackoffReset(t *testing.T) { func TestBackoffReset(t *testing.T) {
id := "_idReset" id := "_idReset"
tc := clock.NewFakeClock(time.Now()) tc := testingclock.NewFakeClock(time.Now())
step := time.Second step := time.Second
maxDuration := step * 5 maxDuration := step * 5
b := NewFakeBackOff(step, maxDuration, tc) b := NewFakeBackOff(step, maxDuration, tc)
@ -77,7 +77,7 @@ func TestBackoffReset(t *testing.T) {
func TestBackoffHighWaterMark(t *testing.T) { func TestBackoffHighWaterMark(t *testing.T) {
id := "_idHiWaterMark" id := "_idHiWaterMark"
tc := clock.NewFakeClock(time.Now()) tc := testingclock.NewFakeClock(time.Now())
step := time.Second step := time.Second
maxDuration := 5 * step maxDuration := 5 * step
b := NewFakeBackOff(step, maxDuration, tc) b := NewFakeBackOff(step, maxDuration, tc)
@ -99,7 +99,7 @@ func TestBackoffHighWaterMark(t *testing.T) {
func TestBackoffGC(t *testing.T) { func TestBackoffGC(t *testing.T) {
id := "_idGC" id := "_idGC"
tc := clock.NewFakeClock(time.Now()) tc := testingclock.NewFakeClock(time.Now())
step := time.Second step := time.Second
maxDuration := 5 * step maxDuration := 5 * step
@ -127,7 +127,7 @@ func TestBackoffGC(t *testing.T) {
func TestIsInBackOffSinceUpdate(t *testing.T) { func TestIsInBackOffSinceUpdate(t *testing.T) {
id := "_idIsInBackOffSinceUpdate" id := "_idIsInBackOffSinceUpdate"
tc := clock.NewFakeClock(time.Now()) tc := testingclock.NewFakeClock(time.Now())
step := time.Second step := time.Second
maxDuration := 10 * step maxDuration := 10 * step
b := NewFakeBackOff(step, maxDuration, tc) b := NewFakeBackOff(step, maxDuration, tc)

View File

@ -21,8 +21,8 @@ import (
"sync" "sync"
"time" "time"
"k8s.io/apimachinery/pkg/util/clock"
utilruntime "k8s.io/apimachinery/pkg/util/runtime" utilruntime "k8s.io/apimachinery/pkg/util/runtime"
"k8s.io/utils/clock"
) )
// DelayingInterface is an Interface that can Add an item at a later time. This makes it easier to // DelayingInterface is an Interface that can Add an item at a later time. This makes it easier to
@ -51,11 +51,11 @@ func NewNamedDelayingQueue(name string) DelayingInterface {
// NewDelayingQueueWithCustomClock constructs a new named workqueue // NewDelayingQueueWithCustomClock constructs a new named workqueue
// with ability to inject real or fake clock for testing purposes // with ability to inject real or fake clock for testing purposes
func NewDelayingQueueWithCustomClock(clock clock.Clock, name string) DelayingInterface { func NewDelayingQueueWithCustomClock(clock clock.WithTicker, name string) DelayingInterface {
return newDelayingQueue(clock, NewNamed(name), name) return newDelayingQueue(clock, NewNamed(name), name)
} }
func newDelayingQueue(clock clock.Clock, q Interface, name string) *delayingType { func newDelayingQueue(clock clock.WithTicker, q Interface, name string) *delayingType {
ret := &delayingType{ ret := &delayingType{
Interface: q, Interface: q,
clock: clock, clock: clock,

View File

@ -23,12 +23,12 @@ import (
"testing" "testing"
"time" "time"
"k8s.io/apimachinery/pkg/util/clock"
"k8s.io/apimachinery/pkg/util/wait" "k8s.io/apimachinery/pkg/util/wait"
testingclock "k8s.io/utils/clock/testing"
) )
func TestSimpleQueue(t *testing.T) { func TestSimpleQueue(t *testing.T) {
fakeClock := clock.NewFakeClock(time.Now()) fakeClock := testingclock.NewFakeClock(time.Now())
q := NewDelayingQueueWithCustomClock(fakeClock, "") q := NewDelayingQueueWithCustomClock(fakeClock, "")
first := "foo" first := "foo"
@ -70,7 +70,7 @@ func TestSimpleQueue(t *testing.T) {
} }
func TestDeduping(t *testing.T) { func TestDeduping(t *testing.T) {
fakeClock := clock.NewFakeClock(time.Now()) fakeClock := testingclock.NewFakeClock(time.Now())
q := NewDelayingQueueWithCustomClock(fakeClock, "") q := NewDelayingQueueWithCustomClock(fakeClock, "")
first := "foo" first := "foo"
@ -129,7 +129,7 @@ func TestDeduping(t *testing.T) {
} }
func TestAddTwoFireEarly(t *testing.T) { func TestAddTwoFireEarly(t *testing.T) {
fakeClock := clock.NewFakeClock(time.Now()) fakeClock := testingclock.NewFakeClock(time.Now())
q := NewDelayingQueueWithCustomClock(fakeClock, "") q := NewDelayingQueueWithCustomClock(fakeClock, "")
first := "foo" first := "foo"
@ -178,7 +178,7 @@ func TestAddTwoFireEarly(t *testing.T) {
} }
func TestCopyShifting(t *testing.T) { func TestCopyShifting(t *testing.T) {
fakeClock := clock.NewFakeClock(time.Now()) fakeClock := testingclock.NewFakeClock(time.Now())
q := NewDelayingQueueWithCustomClock(fakeClock, "") q := NewDelayingQueueWithCustomClock(fakeClock, "")
first := "foo" first := "foo"
@ -216,7 +216,7 @@ func TestCopyShifting(t *testing.T) {
} }
func BenchmarkDelayingQueue_AddAfter(b *testing.B) { func BenchmarkDelayingQueue_AddAfter(b *testing.B) {
fakeClock := clock.NewFakeClock(time.Now()) fakeClock := testingclock.NewFakeClock(time.Now())
q := NewDelayingQueueWithCustomClock(fakeClock, "") q := NewDelayingQueueWithCustomClock(fakeClock, "")
// Add items // Add items

View File

@ -20,7 +20,7 @@ import (
"sync" "sync"
"time" "time"
"k8s.io/apimachinery/pkg/util/clock" "k8s.io/utils/clock"
) )
// This file provides abstractions for setting the provider (e.g., prometheus) // This file provides abstractions for setting the provider (e.g., prometheus)

View File

@ -21,7 +21,7 @@ import (
"testing" "testing"
"time" "time"
"k8s.io/apimachinery/pkg/util/clock" testingclock "k8s.io/utils/clock/testing"
) )
type testMetrics struct { type testMetrics struct {
@ -40,7 +40,7 @@ func TestMetricShutdown(t *testing.T) {
m := &testMetrics{ m := &testMetrics{
updateCalled: ch, updateCalled: ch,
} }
c := clock.NewFakeClock(time.Now()) c := testingclock.NewFakeClock(time.Now())
q := newQueue(c, m, time.Millisecond) q := newQueue(c, m, time.Millisecond)
for !c.HasWaiters() { for !c.HasWaiters() {
// Wait for the go routine to call NewTicker() // Wait for the go routine to call NewTicker()
@ -170,7 +170,7 @@ func (m *testMetricsProvider) NewRetriesMetric(name string) CounterMetric {
func TestMetrics(t *testing.T) { func TestMetrics(t *testing.T) {
mp := testMetricsProvider{} mp := testMetricsProvider{}
t0 := time.Unix(0, 0) t0 := time.Unix(0, 0)
c := clock.NewFakeClock(t0) c := testingclock.NewFakeClock(t0)
mf := queueMetricsFactory{metricsProvider: &mp} mf := queueMetricsFactory{metricsProvider: &mp}
m := mf.newQueueMetrics("test", c) m := mf.newQueueMetrics("test", c)
q := newQueue(c, m, time.Millisecond) q := newQueue(c, m, time.Millisecond)

View File

@ -20,7 +20,7 @@ import (
"sync" "sync"
"time" "time"
"k8s.io/apimachinery/pkg/util/clock" "k8s.io/utils/clock"
) )
type Interface interface { type Interface interface {
@ -47,7 +47,7 @@ func NewNamed(name string) *Type {
) )
} }
func newQueue(c clock.Clock, metrics queueMetrics, updatePeriod time.Duration) *Type { func newQueue(c clock.WithTicker, metrics queueMetrics, updatePeriod time.Duration) *Type {
t := &Type{ t := &Type{
clock: c, clock: c,
dirty: set{}, dirty: set{},
@ -92,7 +92,7 @@ type Type struct {
metrics queueMetrics metrics queueMetrics
unfinishedWorkUpdatePeriod time.Duration unfinishedWorkUpdatePeriod time.Duration
clock clock.Clock clock clock.WithTicker
} }
type empty struct{} type empty struct{}

View File

@ -20,13 +20,13 @@ import (
"testing" "testing"
"time" "time"
"k8s.io/apimachinery/pkg/util/clock" testingclock "k8s.io/utils/clock/testing"
) )
func TestRateLimitingQueue(t *testing.T) { func TestRateLimitingQueue(t *testing.T) {
limiter := NewItemExponentialFailureRateLimiter(1*time.Millisecond, 1*time.Second) limiter := NewItemExponentialFailureRateLimiter(1*time.Millisecond, 1*time.Second)
queue := NewRateLimitingQueue(limiter).(*rateLimitingType) queue := NewRateLimitingQueue(limiter).(*rateLimitingType)
fakeClock := clock.NewFakeClock(time.Now()) fakeClock := testingclock.NewFakeClock(time.Now())
delayingQueue := &delayingType{ delayingQueue := &delayingType{
Interface: New(), Interface: New(),
clock: fakeClock, clock: fakeClock,

View File

@ -24,10 +24,10 @@ import (
coordinationv1 "k8s.io/api/coordination/v1" coordinationv1 "k8s.io/api/coordination/v1"
apierrors "k8s.io/apimachinery/pkg/api/errors" apierrors "k8s.io/apimachinery/pkg/api/errors"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/util/clock"
"k8s.io/apimachinery/pkg/util/wait" "k8s.io/apimachinery/pkg/util/wait"
clientset "k8s.io/client-go/kubernetes" clientset "k8s.io/client-go/kubernetes"
coordclientset "k8s.io/client-go/kubernetes/typed/coordination/v1" coordclientset "k8s.io/client-go/kubernetes/typed/coordination/v1"
"k8s.io/utils/clock"
"k8s.io/utils/pointer" "k8s.io/utils/pointer"
"k8s.io/klog/v2" "k8s.io/klog/v2"

View File

@ -31,18 +31,18 @@ import (
"k8s.io/apimachinery/pkg/runtime" "k8s.io/apimachinery/pkg/runtime"
"k8s.io/apimachinery/pkg/runtime/schema" "k8s.io/apimachinery/pkg/runtime/schema"
"k8s.io/apimachinery/pkg/types" "k8s.io/apimachinery/pkg/types"
"k8s.io/apimachinery/pkg/util/clock"
"k8s.io/apimachinery/pkg/util/diff" "k8s.io/apimachinery/pkg/util/diff"
clientset "k8s.io/client-go/kubernetes" clientset "k8s.io/client-go/kubernetes"
"k8s.io/client-go/kubernetes/fake" "k8s.io/client-go/kubernetes/fake"
clienttesting "k8s.io/client-go/testing" clienttesting "k8s.io/client-go/testing"
testingclock "k8s.io/utils/clock/testing"
"k8s.io/utils/pointer" "k8s.io/utils/pointer"
"k8s.io/klog/v2" "k8s.io/klog/v2"
) )
func TestNewNodeLease(t *testing.T) { func TestNewNodeLease(t *testing.T) {
fakeClock := clock.NewFakeClock(time.Now()) fakeClock := testingclock.NewFakeClock(time.Now())
node := &corev1.Node{ node := &corev1.Node{
ObjectMeta: metav1.ObjectMeta{ ObjectMeta: metav1.ObjectMeta{
Name: "foo", Name: "foo",
@ -276,7 +276,7 @@ func TestRetryUpdateNodeLease(t *testing.T) {
cl.PrependReactor("get", "leases", tc.getReactor) cl.PrependReactor("get", "leases", tc.getReactor)
} }
c := &controller{ c := &controller{
clock: clock.NewFakeClock(time.Now()), clock: testingclock.NewFakeClock(time.Now()),
client: cl, client: cl,
leaseClient: cl.CoordinationV1().Leases(corev1.NamespaceNodeLease), leaseClient: cl.CoordinationV1().Leases(corev1.NamespaceNodeLease),
holderIdentity: node.Name, holderIdentity: node.Name,
@ -414,7 +414,7 @@ func TestUpdateUsingLatestLease(t *testing.T) {
cl.PrependReactor("create", "leases", tc.createReactor) cl.PrependReactor("create", "leases", tc.createReactor)
} }
c := &controller{ c := &controller{
clock: clock.NewFakeClock(time.Now()), clock: testingclock.NewFakeClock(time.Now()),
client: cl, client: cl,
leaseClient: cl.CoordinationV1().Leases(corev1.NamespaceNodeLease), leaseClient: cl.CoordinationV1().Leases(corev1.NamespaceNodeLease),
holderIdentity: node.Name, holderIdentity: node.Name,

View File

@ -27,13 +27,13 @@ import (
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/runtime" "k8s.io/apimachinery/pkg/runtime"
"k8s.io/apimachinery/pkg/runtime/schema" "k8s.io/apimachinery/pkg/runtime/schema"
"k8s.io/apimachinery/pkg/util/clock"
"k8s.io/apimachinery/pkg/util/wait" "k8s.io/apimachinery/pkg/util/wait"
"k8s.io/apimachinery/pkg/watch" "k8s.io/apimachinery/pkg/watch"
"k8s.io/client-go/kubernetes" "k8s.io/client-go/kubernetes"
kubeapiservertesting "k8s.io/kubernetes/cmd/kube-apiserver/app/testing" kubeapiservertesting "k8s.io/kubernetes/cmd/kube-apiserver/app/testing"
"k8s.io/kubernetes/pkg/kubelet/util/manager" "k8s.io/kubernetes/pkg/kubelet/util/manager"
"k8s.io/kubernetes/test/integration/framework" "k8s.io/kubernetes/test/integration/framework"
testingclock "k8s.io/utils/clock/testing"
) )
func TestWatchBasedManager(t *testing.T) { func TestWatchBasedManager(t *testing.T) {
@ -62,7 +62,7 @@ func TestWatchBasedManager(t *testing.T) {
// We want all watches to be up and running to stress test it. // We want all watches to be up and running to stress test it.
// So don't treat any secret as immutable here. // So don't treat any secret as immutable here.
isImmutable := func(_ runtime.Object) bool { return false } isImmutable := func(_ runtime.Object) bool { return false }
fakeClock := clock.NewFakeClock(time.Now()) fakeClock := testingclock.NewFakeClock(time.Now())
store := manager.NewObjectCache(listObj, watchObj, newObj, isImmutable, schema.GroupResource{Group: "v1", Resource: "secrets"}, fakeClock, time.Minute) store := manager.NewObjectCache(listObj, watchObj, newObj, isImmutable, schema.GroupResource{Group: "v1", Resource: "secrets"}, fakeClock, time.Minute)
// create 1000 secrets in parallel // create 1000 secrets in parallel