From d9b08c611d3deba9f6d1ceb84ae313ee2d490571 Mon Sep 17 00:00:00 2001 From: wojtekt Date: Fri, 17 Sep 2021 11:48:22 +0200 Subject: [PATCH] Migrate to k8s.io/utils/clock --- pkg/controller/controller_utils_test.go | 6 ++-- pkg/credentialprovider/plugin/plugin.go | 2 +- pkg/credentialprovider/plugin/plugin_test.go | 5 +-- pkg/kubelet/util/cache/object_cache_test.go | 7 ++-- pkg/proxy/healthcheck/healthcheck_test.go | 4 +-- pkg/proxy/healthcheck/proxier_health.go | 2 +- pkg/scheduler/factory_test.go | 8 ++--- .../internal/queue/scheduling_queue_test.go | 36 +++++++++---------- pkg/volume/csi/csi_attacher.go | 2 +- .../eventratelimit/admission_test.go | 4 +-- .../clientbuilder/client_builder_dynamic.go | 2 +- 11 files changed, 40 insertions(+), 38 deletions(-) diff --git a/pkg/controller/controller_utils_test.go b/pkg/controller/controller_utils_test.go index 91afda38d86..e80668e46a4 100644 --- a/pkg/controller/controller_utils_test.go +++ b/pkg/controller/controller_utils_test.go @@ -35,7 +35,6 @@ import ( metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/runtime" "k8s.io/apimachinery/pkg/runtime/schema" - "k8s.io/apimachinery/pkg/util/clock" "k8s.io/apimachinery/pkg/util/sets" "k8s.io/apimachinery/pkg/util/uuid" utilfeature "k8s.io/apiserver/pkg/util/feature" @@ -52,14 +51,15 @@ import ( "k8s.io/kubernetes/pkg/controller/testutil" "k8s.io/kubernetes/pkg/features" "k8s.io/kubernetes/pkg/securitycontext" + testingclock "k8s.io/utils/clock/testing" "github.com/stretchr/testify/assert" ) // NewFakeControllerExpectationsLookup creates a fake store for PodExpectations. -func NewFakeControllerExpectationsLookup(ttl time.Duration) (*ControllerExpectations, *clock.FakeClock) { +func NewFakeControllerExpectationsLookup(ttl time.Duration) (*ControllerExpectations, *testingclock.FakeClock) { fakeTime := time.Date(2009, time.November, 10, 23, 0, 0, 0, time.UTC) - fakeClock := clock.NewFakeClock(fakeTime) + fakeClock := testingclock.NewFakeClock(fakeTime) ttlPolicy := &cache.TTLPolicy{TTL: ttl, Clock: fakeClock} ttlStore := cache.NewFakeExpirationStore( ExpKeyFunc, nil, ttlPolicy, fakeClock) diff --git a/pkg/credentialprovider/plugin/plugin.go b/pkg/credentialprovider/plugin/plugin.go index 57cb52590c7..69d0f189dcb 100644 --- a/pkg/credentialprovider/plugin/plugin.go +++ b/pkg/credentialprovider/plugin/plugin.go @@ -34,7 +34,6 @@ import ( "k8s.io/apimachinery/pkg/runtime/schema" "k8s.io/apimachinery/pkg/runtime/serializer" "k8s.io/apimachinery/pkg/runtime/serializer/json" - "k8s.io/apimachinery/pkg/util/clock" "k8s.io/client-go/tools/cache" "k8s.io/klog/v2" credentialproviderapi "k8s.io/kubelet/pkg/apis/credentialprovider" @@ -43,6 +42,7 @@ import ( "k8s.io/kubernetes/pkg/credentialprovider" kubeletconfig "k8s.io/kubernetes/pkg/kubelet/apis/config" kubeletconfigv1alpha1 "k8s.io/kubernetes/pkg/kubelet/apis/config/v1alpha1" + "k8s.io/utils/clock" ) const ( diff --git a/pkg/credentialprovider/plugin/plugin_test.go b/pkg/credentialprovider/plugin/plugin_test.go index 0990bd51273..db177c39a4b 100644 --- a/pkg/credentialprovider/plugin/plugin_test.go +++ b/pkg/credentialprovider/plugin/plugin_test.go @@ -28,13 +28,14 @@ import ( metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/runtime" - "k8s.io/apimachinery/pkg/util/clock" "k8s.io/apimachinery/pkg/util/rand" "k8s.io/client-go/tools/cache" credentialproviderapi "k8s.io/kubelet/pkg/apis/credentialprovider" credentialproviderv1alpha1 "k8s.io/kubelet/pkg/apis/credentialprovider/v1alpha1" "k8s.io/kubernetes/pkg/credentialprovider" + "k8s.io/utils/clock" + testingclock "k8s.io/utils/clock/testing" ) type fakeExecPlugin struct { @@ -305,7 +306,7 @@ func Test_ProvideParallel(t *testing.T) { } func Test_getCachedCredentials(t *testing.T) { - fakeClock := clock.NewFakeClock(time.Now()) + fakeClock := testingclock.NewFakeClock(time.Now()) p := &pluginProvider{ clock: fakeClock, lastCachePurge: fakeClock.Now(), diff --git a/pkg/kubelet/util/cache/object_cache_test.go b/pkg/kubelet/util/cache/object_cache_test.go index d117b2c5585..0db8d32fb6c 100644 --- a/pkg/kubelet/util/cache/object_cache_test.go +++ b/pkg/kubelet/util/cache/object_cache_test.go @@ -21,8 +21,9 @@ import ( "testing" "time" - "k8s.io/apimachinery/pkg/util/clock" expirationcache "k8s.io/client-go/tools/cache" + "k8s.io/utils/clock" + testingclock "k8s.io/utils/clock/testing" ) type testObject struct { @@ -47,7 +48,7 @@ func TestAddAndGet(t *testing.T) { } objectCache := NewFakeObjectCache(func() (interface{}, error) { return nil, fmt.Errorf("Unexpected Error: updater should never be called in this test") - }, 1*time.Hour, clock.NewFakeClock(time.Now())) + }, 1*time.Hour, testingclock.NewFakeClock(time.Now())) err := objectCache.Add(testObj.key, testObj.val) if err != nil { @@ -72,7 +73,7 @@ func TestExpirationBasic(t *testing.T) { val: unexpectedVal, } - fakeClock := clock.NewFakeClock(time.Now()) + fakeClock := testingclock.NewFakeClock(time.Now()) objectCache := NewFakeObjectCache(func() (interface{}, error) { return expectedVal, nil diff --git a/pkg/proxy/healthcheck/healthcheck_test.go b/pkg/proxy/healthcheck/healthcheck_test.go index 7609c2e2c3d..7f52c0c3eff 100644 --- a/pkg/proxy/healthcheck/healthcheck_test.go +++ b/pkg/proxy/healthcheck/healthcheck_test.go @@ -25,8 +25,8 @@ import ( "time" "k8s.io/apimachinery/pkg/types" - "k8s.io/apimachinery/pkg/util/clock" "k8s.io/apimachinery/pkg/util/sets" + testingclock "k8s.io/utils/clock/testing" "github.com/davecgh/go-spew/spew" ) @@ -379,7 +379,7 @@ func testHandler(hcs *server, nsn types.NamespacedName, status int, endpoints in func TestHealthzServer(t *testing.T) { listener := newFakeListener() httpFactory := newFakeHTTPServerFactory() - fakeClock := clock.NewFakeClock(time.Now()) + fakeClock := testingclock.NewFakeClock(time.Now()) hs := newProxierHealthServer(listener, httpFactory, fakeClock, "127.0.0.1:10256", 10*time.Second, nil, nil) server := hs.httpFactory.New(hs.addr, healthzHandler{hs: hs}) diff --git a/pkg/proxy/healthcheck/proxier_health.go b/pkg/proxy/healthcheck/proxier_health.go index 187a117f01b..d48ad517e2b 100644 --- a/pkg/proxy/healthcheck/proxier_health.go +++ b/pkg/proxy/healthcheck/proxier_health.go @@ -23,10 +23,10 @@ import ( "time" v1 "k8s.io/api/core/v1" - "k8s.io/apimachinery/pkg/util/clock" "k8s.io/client-go/tools/events" "k8s.io/klog/v2" api "k8s.io/kubernetes/pkg/apis/core" + "k8s.io/utils/clock" ) // ProxierHealthUpdater allows callers to update healthz timestamp only. diff --git a/pkg/scheduler/factory_test.go b/pkg/scheduler/factory_test.go index 71be9fb33b6..38760eae9ab 100644 --- a/pkg/scheduler/factory_test.go +++ b/pkg/scheduler/factory_test.go @@ -28,7 +28,6 @@ import ( v1 "k8s.io/api/core/v1" apierrors "k8s.io/apimachinery/pkg/api/errors" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/apimachinery/pkg/util/clock" "k8s.io/apimachinery/pkg/util/sets" "k8s.io/client-go/informers" clientset "k8s.io/client-go/kubernetes" @@ -51,6 +50,7 @@ import ( internalcache "k8s.io/kubernetes/pkg/scheduler/internal/cache" internalqueue "k8s.io/kubernetes/pkg/scheduler/internal/queue" "k8s.io/kubernetes/pkg/scheduler/profile" + testingclock "k8s.io/utils/clock/testing" ) const ( @@ -492,7 +492,7 @@ func TestDefaultErrorFunc(t *testing.T) { // Need to add/update/delete testPod to the store. podInformer.Informer().GetStore().Add(testPod) - queue := internalqueue.NewPriorityQueue(nil, informerFactory, internalqueue.WithClock(clock.NewFakeClock(time.Now()))) + queue := internalqueue.NewPriorityQueue(nil, informerFactory, internalqueue.WithClock(testingclock.NewFakeClock(time.Now()))) schedulerCache := internalcache.New(30*time.Second, stopCh) queue.Add(testPod) @@ -566,7 +566,7 @@ func TestDefaultErrorFunc_NodeNotFound(t *testing.T) { // Need to add testPod to the store. podInformer.Informer().GetStore().Add(testPod) - queue := internalqueue.NewPriorityQueue(nil, informerFactory, internalqueue.WithClock(clock.NewFakeClock(time.Now()))) + queue := internalqueue.NewPriorityQueue(nil, informerFactory, internalqueue.WithClock(testingclock.NewFakeClock(time.Now()))) schedulerCache := internalcache.New(30*time.Second, stopCh) for i := range tt.nodes { @@ -607,7 +607,7 @@ func TestDefaultErrorFunc_PodAlreadyBound(t *testing.T) { // Need to add testPod to the store. podInformer.Informer().GetStore().Add(testPod) - queue := internalqueue.NewPriorityQueue(nil, informerFactory, internalqueue.WithClock(clock.NewFakeClock(time.Now()))) + queue := internalqueue.NewPriorityQueue(nil, informerFactory, internalqueue.WithClock(testingclock.NewFakeClock(time.Now()))) schedulerCache := internalcache.New(30*time.Second, stopCh) // Add node to schedulerCache no matter it's deleted in API server or not. diff --git a/pkg/scheduler/internal/queue/scheduling_queue_test.go b/pkg/scheduler/internal/queue/scheduling_queue_test.go index 79e1656fa0c..a6c61c3e5be 100644 --- a/pkg/scheduler/internal/queue/scheduling_queue_test.go +++ b/pkg/scheduler/internal/queue/scheduling_queue_test.go @@ -32,7 +32,6 @@ import ( v1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/types" - "k8s.io/apimachinery/pkg/util/clock" "k8s.io/apimachinery/pkg/util/sets" "k8s.io/client-go/informers" "k8s.io/client-go/kubernetes/fake" @@ -42,6 +41,7 @@ import ( "k8s.io/kubernetes/pkg/scheduler/framework/plugins/queuesort" "k8s.io/kubernetes/pkg/scheduler/metrics" "k8s.io/kubernetes/pkg/scheduler/util" + testingclock "k8s.io/utils/clock/testing" "k8s.io/utils/pointer" ) @@ -223,7 +223,7 @@ func TestPriorityQueue_AddUnschedulableIfNotPresent(t *testing.T) { // Pods in and before current scheduling cycle will be put back to activeQueue // if we were trying to schedule them when we received move request. func TestPriorityQueue_AddUnschedulableIfNotPresent_Backoff(t *testing.T) { - q := NewTestQueue(context.Background(), newDefaultQueueSort(), WithClock(clock.NewFakeClock(time.Now()))) + q := NewTestQueue(context.Background(), newDefaultQueueSort(), WithClock(testingclock.NewFakeClock(time.Now()))) totalNum := 10 expectedPods := make([]v1.Pod, 0, totalNum) for i := 0; i < totalNum; i++ { @@ -307,7 +307,7 @@ func TestPriorityQueue_Pop(t *testing.T) { func TestPriorityQueue_Update(t *testing.T) { objs := []runtime.Object{highPriorityPodInfo.Pod, unschedulablePodInfo.Pod, medPriorityPodInfo.Pod} - c := clock.NewFakeClock(time.Now()) + c := testingclock.NewFakeClock(time.Now()) q := NewTestQueueWithObjects(context.Background(), newDefaultQueueSort(), objs, WithClock(c)) q.Update(nil, highPriorityPodInfo.Pod) if _, exists, _ := q.activeQ.Get(newQueuedPodInfoForLookup(highPriorityPodInfo.Pod)); !exists { @@ -468,7 +468,7 @@ func BenchmarkMoveAllToActiveOrBackoffQueue(b *testing.B) { b.Run(fmt.Sprintf("%v-%v", tt.name, podsInUnschedulableQ), func(b *testing.B) { for i := 0; i < b.N; i++ { b.StopTimer() - c := clock.NewFakeClock(time.Now()) + c := testingclock.NewFakeClock(time.Now()) m := make(map[framework.ClusterEvent]sets.String) // - All plugins registered for events[0], which is NodeAdd. @@ -526,7 +526,7 @@ func BenchmarkMoveAllToActiveOrBackoffQueue(b *testing.B) { } func TestPriorityQueue_MoveAllToActiveOrBackoffQueue(t *testing.T) { - c := clock.NewFakeClock(time.Now()) + c := testingclock.NewFakeClock(time.Now()) m := map[framework.ClusterEvent]sets.String{ {Resource: framework.Node, ActionType: framework.Add}: sets.NewString("fooPlugin"), } @@ -615,7 +615,7 @@ func TestPriorityQueue_AssignedPodAdded(t *testing.T) { Spec: v1.PodSpec{NodeName: "machine1"}, } - c := clock.NewFakeClock(time.Now()) + c := testingclock.NewFakeClock(time.Now()) m := map[framework.ClusterEvent]sets.String{AssignedPodAdd: sets.NewString("fakePlugin")} q := NewTestQueue(context.Background(), newDefaultQueueSort(), WithClock(c), WithClusterEventMap(m)) q.Add(medPriorityPodInfo.Pod) @@ -999,7 +999,7 @@ func TestSchedulingQueue_Close(t *testing.T) { // ensures that an unschedulable pod does not block head of the queue when there // are frequent events that move pods to the active queue. func TestRecentlyTriedPodsGoBack(t *testing.T) { - c := clock.NewFakeClock(time.Now()) + c := testingclock.NewFakeClock(time.Now()) q := NewTestQueue(context.Background(), newDefaultQueueSort(), WithClock(c)) // Add a few pods to priority queue. for i := 0; i < 5; i++ { @@ -1056,7 +1056,7 @@ func TestRecentlyTriedPodsGoBack(t *testing.T) { // This behavior ensures that an unschedulable pod does not block head of the queue when there // are frequent events that move pods to the active queue. func TestPodFailedSchedulingMultipleTimesDoesNotBlockNewerPod(t *testing.T) { - c := clock.NewFakeClock(time.Now()) + c := testingclock.NewFakeClock(time.Now()) q := NewTestQueue(context.Background(), newDefaultQueueSort(), WithClock(c)) // Add an unschedulable pod to a priority queue. @@ -1211,7 +1211,7 @@ func TestHighPriorityBackoff(t *testing.T) { // TestHighPriorityFlushUnschedulableQLeftover tests that pods will be moved to // activeQ after one minutes if it is in unschedulableQ func TestHighPriorityFlushUnschedulableQLeftover(t *testing.T) { - c := clock.NewFakeClock(time.Now()) + c := testingclock.NewFakeClock(time.Now()) m := map[framework.ClusterEvent]sets.String{ NodeAdd: sets.NewString("fakePlugin"), } @@ -1307,11 +1307,11 @@ var ( queue.MoveAllToActiveOrBackoffQueue(UnschedulableTimeout, nil) } flushBackoffQ = func(queue *PriorityQueue, _ *framework.QueuedPodInfo) { - queue.clock.(*clock.FakeClock).Step(2 * time.Second) + queue.clock.(*testingclock.FakeClock).Step(2 * time.Second) queue.flushBackoffQCompleted() } moveClockForward = func(queue *PriorityQueue, _ *framework.QueuedPodInfo) { - queue.clock.(*clock.FakeClock).Step(2 * time.Second) + queue.clock.(*testingclock.FakeClock).Step(2 * time.Second) } ) @@ -1399,7 +1399,7 @@ func TestPodTimestamp(t *testing.T) { for _, test := range tests { t.Run(test.name, func(t *testing.T) { - queue := NewTestQueue(context.Background(), newDefaultQueueSort(), WithClock(clock.NewFakeClock(timestamp))) + queue := NewTestQueue(context.Background(), newDefaultQueueSort(), WithClock(testingclock.NewFakeClock(timestamp))) var podInfoList []*framework.QueuedPodInfo for i, op := range test.operations { @@ -1556,7 +1556,7 @@ scheduler_pending_pods{queue="unschedulable"} 0 for _, test := range tests { t.Run(test.name, func(t *testing.T) { resetMetrics() - queue := NewTestQueue(context.Background(), newDefaultQueueSort(), WithClock(clock.NewFakeClock(timestamp))) + queue := NewTestQueue(context.Background(), newDefaultQueueSort(), WithClock(testingclock.NewFakeClock(timestamp))) for i, op := range test.operations { for _, pInfo := range test.operands[i] { op(queue, pInfo) @@ -1584,7 +1584,7 @@ func TestPerPodSchedulingMetrics(t *testing.T) { // Case 1: A pod is created and scheduled after 1 attempt. The queue operations are // Add -> Pop. - c := clock.NewFakeClock(timestamp) + c := testingclock.NewFakeClock(timestamp) queue := NewTestQueue(context.Background(), newDefaultQueueSort(), WithClock(c)) queue.Add(pod) pInfo, err := queue.Pop() @@ -1595,7 +1595,7 @@ func TestPerPodSchedulingMetrics(t *testing.T) { // Case 2: A pod is created and scheduled after 2 attempts. The queue operations are // Add -> Pop -> AddUnschedulableIfNotPresent -> flushUnschedulableQLeftover -> Pop. - c = clock.NewFakeClock(timestamp) + c = testingclock.NewFakeClock(timestamp) queue = NewTestQueue(context.Background(), newDefaultQueueSort(), WithClock(c)) queue.Add(pod) pInfo, err = queue.Pop() @@ -1615,7 +1615,7 @@ func TestPerPodSchedulingMetrics(t *testing.T) { // Case 3: Similar to case 2, but before the second pop, call update, the queue operations are // Add -> Pop -> AddUnschedulableIfNotPresent -> flushUnschedulableQLeftover -> Update -> Pop. - c = clock.NewFakeClock(timestamp) + c = testingclock.NewFakeClock(timestamp) queue = NewTestQueue(context.Background(), newDefaultQueueSort(), WithClock(c)) queue.Add(pod) pInfo, err = queue.Pop() @@ -1714,7 +1714,7 @@ func TestIncomingPodsMetrics(t *testing.T) { for _, test := range tests { t.Run(test.name, func(t *testing.T) { metrics.SchedulerQueueIncomingPods.Reset() - queue := NewTestQueue(context.Background(), newDefaultQueueSort(), WithClock(clock.NewFakeClock(timestamp))) + queue := NewTestQueue(context.Background(), newDefaultQueueSort(), WithClock(testingclock.NewFakeClock(timestamp))) for _, op := range test.operations { for _, pInfo := range pInfos { op(queue, pInfo) @@ -1739,7 +1739,7 @@ func checkPerPodSchedulingMetrics(name string, t *testing.T, pInfo *framework.Qu } func TestBackOffFlow(t *testing.T) { - cl := clock.NewFakeClock(time.Now()) + cl := testingclock.NewFakeClock(time.Now()) q := NewTestQueue(context.Background(), newDefaultQueueSort(), WithClock(cl)) steps := []struct { wantBackoff time.Duration diff --git a/pkg/volume/csi/csi_attacher.go b/pkg/volume/csi/csi_attacher.go index 233c23077e0..494e46d8eb0 100644 --- a/pkg/volume/csi/csi_attacher.go +++ b/pkg/volume/csi/csi_attacher.go @@ -26,7 +26,6 @@ import ( "strings" "time" - "k8s.io/apimachinery/pkg/util/clock" utilfeature "k8s.io/apiserver/pkg/util/feature" "k8s.io/klog/v2" @@ -42,6 +41,7 @@ import ( "k8s.io/kubernetes/pkg/features" "k8s.io/kubernetes/pkg/volume" volumetypes "k8s.io/kubernetes/pkg/volume/util/types" + "k8s.io/utils/clock" ) const ( diff --git a/plugin/pkg/admission/eventratelimit/admission_test.go b/plugin/pkg/admission/eventratelimit/admission_test.go index 4f8c014ff8d..20a7f594465 100644 --- a/plugin/pkg/admission/eventratelimit/admission_test.go +++ b/plugin/pkg/admission/eventratelimit/admission_test.go @@ -25,11 +25,11 @@ import ( "k8s.io/apimachinery/pkg/api/errors" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/types" - "k8s.io/apimachinery/pkg/util/clock" "k8s.io/apiserver/pkg/admission" "k8s.io/apiserver/pkg/authentication/user" api "k8s.io/kubernetes/pkg/apis/core" eventratelimitapi "k8s.io/kubernetes/plugin/pkg/admission/eventratelimit/apis/eventratelimit" + testingclock "k8s.io/utils/clock/testing" ) const ( @@ -461,7 +461,7 @@ func TestEventRateLimiting(t *testing.T) { for _, tc := range cases { t.Run(tc.name, func(t *testing.T) { - clock := clock.NewFakeClock(time.Now()) + clock := testingclock.NewFakeClock(time.Now()) config := &eventratelimitapi.Configuration{} if tc.serverBurst > 0 { serverLimit := eventratelimitapi.Limit{ diff --git a/staging/src/k8s.io/controller-manager/pkg/clientbuilder/client_builder_dynamic.go b/staging/src/k8s.io/controller-manager/pkg/clientbuilder/client_builder_dynamic.go index c8ceaefecea..514bb1b8375 100644 --- a/staging/src/k8s.io/controller-manager/pkg/clientbuilder/client_builder_dynamic.go +++ b/staging/src/k8s.io/controller-manager/pkg/clientbuilder/client_builder_dynamic.go @@ -28,7 +28,6 @@ import ( v1 "k8s.io/api/core/v1" apierrors "k8s.io/apimachinery/pkg/api/errors" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/apimachinery/pkg/util/clock" "k8s.io/apimachinery/pkg/util/wait" apiserverserviceaccount "k8s.io/apiserver/pkg/authentication/serviceaccount" "k8s.io/client-go/discovery" @@ -37,6 +36,7 @@ import ( restclient "k8s.io/client-go/rest" "k8s.io/client-go/transport" "k8s.io/klog/v2" + "k8s.io/utils/clock" utilpointer "k8s.io/utils/pointer" )