Merge pull request #105095 from wojtek-t/migrate_clock_3

Unify towards k8s.io/utils/clock - part 3
This commit is contained in:
Kubernetes Prow Robot 2021-09-20 12:46:45 -07:00 committed by GitHub
commit 353f0a5eab
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
21 changed files with 61 additions and 56 deletions

View File

@ -35,7 +35,6 @@ import (
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/runtime" "k8s.io/apimachinery/pkg/runtime"
"k8s.io/apimachinery/pkg/runtime/schema" "k8s.io/apimachinery/pkg/runtime/schema"
"k8s.io/apimachinery/pkg/util/clock"
"k8s.io/apimachinery/pkg/util/sets" "k8s.io/apimachinery/pkg/util/sets"
"k8s.io/apimachinery/pkg/util/uuid" "k8s.io/apimachinery/pkg/util/uuid"
utilfeature "k8s.io/apiserver/pkg/util/feature" utilfeature "k8s.io/apiserver/pkg/util/feature"
@ -52,14 +51,15 @@ import (
"k8s.io/kubernetes/pkg/controller/testutil" "k8s.io/kubernetes/pkg/controller/testutil"
"k8s.io/kubernetes/pkg/features" "k8s.io/kubernetes/pkg/features"
"k8s.io/kubernetes/pkg/securitycontext" "k8s.io/kubernetes/pkg/securitycontext"
testingclock "k8s.io/utils/clock/testing"
"github.com/stretchr/testify/assert" "github.com/stretchr/testify/assert"
) )
// NewFakeControllerExpectationsLookup creates a fake store for PodExpectations. // NewFakeControllerExpectationsLookup creates a fake store for PodExpectations.
func NewFakeControllerExpectationsLookup(ttl time.Duration) (*ControllerExpectations, *clock.FakeClock) { func NewFakeControllerExpectationsLookup(ttl time.Duration) (*ControllerExpectations, *testingclock.FakeClock) {
fakeTime := time.Date(2009, time.November, 10, 23, 0, 0, 0, time.UTC) fakeTime := time.Date(2009, time.November, 10, 23, 0, 0, 0, time.UTC)
fakeClock := clock.NewFakeClock(fakeTime) fakeClock := testingclock.NewFakeClock(fakeTime)
ttlPolicy := &cache.TTLPolicy{TTL: ttl, Clock: fakeClock} ttlPolicy := &cache.TTLPolicy{TTL: ttl, Clock: fakeClock}
ttlStore := cache.NewFakeExpirationStore( ttlStore := cache.NewFakeExpirationStore(
ExpKeyFunc, nil, ttlPolicy, fakeClock) ExpKeyFunc, nil, ttlPolicy, fakeClock)

View File

@ -34,7 +34,6 @@ import (
"k8s.io/apimachinery/pkg/runtime/schema" "k8s.io/apimachinery/pkg/runtime/schema"
"k8s.io/apimachinery/pkg/runtime/serializer" "k8s.io/apimachinery/pkg/runtime/serializer"
"k8s.io/apimachinery/pkg/runtime/serializer/json" "k8s.io/apimachinery/pkg/runtime/serializer/json"
"k8s.io/apimachinery/pkg/util/clock"
"k8s.io/client-go/tools/cache" "k8s.io/client-go/tools/cache"
"k8s.io/klog/v2" "k8s.io/klog/v2"
credentialproviderapi "k8s.io/kubelet/pkg/apis/credentialprovider" credentialproviderapi "k8s.io/kubelet/pkg/apis/credentialprovider"
@ -43,6 +42,7 @@ import (
"k8s.io/kubernetes/pkg/credentialprovider" "k8s.io/kubernetes/pkg/credentialprovider"
kubeletconfig "k8s.io/kubernetes/pkg/kubelet/apis/config" kubeletconfig "k8s.io/kubernetes/pkg/kubelet/apis/config"
kubeletconfigv1alpha1 "k8s.io/kubernetes/pkg/kubelet/apis/config/v1alpha1" kubeletconfigv1alpha1 "k8s.io/kubernetes/pkg/kubelet/apis/config/v1alpha1"
"k8s.io/utils/clock"
) )
const ( const (

View File

@ -28,13 +28,14 @@ import (
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/runtime" "k8s.io/apimachinery/pkg/runtime"
"k8s.io/apimachinery/pkg/util/clock"
"k8s.io/apimachinery/pkg/util/rand" "k8s.io/apimachinery/pkg/util/rand"
"k8s.io/client-go/tools/cache" "k8s.io/client-go/tools/cache"
credentialproviderapi "k8s.io/kubelet/pkg/apis/credentialprovider" credentialproviderapi "k8s.io/kubelet/pkg/apis/credentialprovider"
credentialproviderv1alpha1 "k8s.io/kubelet/pkg/apis/credentialprovider/v1alpha1" credentialproviderv1alpha1 "k8s.io/kubelet/pkg/apis/credentialprovider/v1alpha1"
"k8s.io/kubernetes/pkg/credentialprovider" "k8s.io/kubernetes/pkg/credentialprovider"
"k8s.io/utils/clock"
testingclock "k8s.io/utils/clock/testing"
) )
type fakeExecPlugin struct { type fakeExecPlugin struct {
@ -305,7 +306,7 @@ func Test_ProvideParallel(t *testing.T) {
} }
func Test_getCachedCredentials(t *testing.T) { func Test_getCachedCredentials(t *testing.T) {
fakeClock := clock.NewFakeClock(time.Now()) fakeClock := testingclock.NewFakeClock(time.Now())
p := &pluginProvider{ p := &pluginProvider{
clock: fakeClock, clock: fakeClock,
lastCachePurge: fakeClock.Now(), lastCachePurge: fakeClock.Now(),

View File

@ -21,8 +21,9 @@ import (
"testing" "testing"
"time" "time"
"k8s.io/apimachinery/pkg/util/clock"
expirationcache "k8s.io/client-go/tools/cache" expirationcache "k8s.io/client-go/tools/cache"
"k8s.io/utils/clock"
testingclock "k8s.io/utils/clock/testing"
) )
type testObject struct { type testObject struct {
@ -47,7 +48,7 @@ func TestAddAndGet(t *testing.T) {
} }
objectCache := NewFakeObjectCache(func() (interface{}, error) { objectCache := NewFakeObjectCache(func() (interface{}, error) {
return nil, fmt.Errorf("Unexpected Error: updater should never be called in this test") return nil, fmt.Errorf("Unexpected Error: updater should never be called in this test")
}, 1*time.Hour, clock.NewFakeClock(time.Now())) }, 1*time.Hour, testingclock.NewFakeClock(time.Now()))
err := objectCache.Add(testObj.key, testObj.val) err := objectCache.Add(testObj.key, testObj.val)
if err != nil { if err != nil {
@ -72,7 +73,7 @@ func TestExpirationBasic(t *testing.T) {
val: unexpectedVal, val: unexpectedVal,
} }
fakeClock := clock.NewFakeClock(time.Now()) fakeClock := testingclock.NewFakeClock(time.Now())
objectCache := NewFakeObjectCache(func() (interface{}, error) { objectCache := NewFakeObjectCache(func() (interface{}, error) {
return expectedVal, nil return expectedVal, nil

View File

@ -25,8 +25,8 @@ import (
"time" "time"
"k8s.io/apimachinery/pkg/types" "k8s.io/apimachinery/pkg/types"
"k8s.io/apimachinery/pkg/util/clock"
"k8s.io/apimachinery/pkg/util/sets" "k8s.io/apimachinery/pkg/util/sets"
testingclock "k8s.io/utils/clock/testing"
"github.com/davecgh/go-spew/spew" "github.com/davecgh/go-spew/spew"
) )
@ -379,7 +379,7 @@ func testHandler(hcs *server, nsn types.NamespacedName, status int, endpoints in
func TestHealthzServer(t *testing.T) { func TestHealthzServer(t *testing.T) {
listener := newFakeListener() listener := newFakeListener()
httpFactory := newFakeHTTPServerFactory() httpFactory := newFakeHTTPServerFactory()
fakeClock := clock.NewFakeClock(time.Now()) fakeClock := testingclock.NewFakeClock(time.Now())
hs := newProxierHealthServer(listener, httpFactory, fakeClock, "127.0.0.1:10256", 10*time.Second, nil, nil) hs := newProxierHealthServer(listener, httpFactory, fakeClock, "127.0.0.1:10256", 10*time.Second, nil, nil)
server := hs.httpFactory.New(hs.addr, healthzHandler{hs: hs}) server := hs.httpFactory.New(hs.addr, healthzHandler{hs: hs})

View File

@ -23,10 +23,10 @@ import (
"time" "time"
v1 "k8s.io/api/core/v1" v1 "k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/util/clock"
"k8s.io/client-go/tools/events" "k8s.io/client-go/tools/events"
"k8s.io/klog/v2" "k8s.io/klog/v2"
api "k8s.io/kubernetes/pkg/apis/core" api "k8s.io/kubernetes/pkg/apis/core"
"k8s.io/utils/clock"
) )
// ProxierHealthUpdater allows callers to update healthz timestamp only. // ProxierHealthUpdater allows callers to update healthz timestamp only.

View File

@ -28,7 +28,6 @@ import (
v1 "k8s.io/api/core/v1" v1 "k8s.io/api/core/v1"
apierrors "k8s.io/apimachinery/pkg/api/errors" apierrors "k8s.io/apimachinery/pkg/api/errors"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/util/clock"
"k8s.io/apimachinery/pkg/util/sets" "k8s.io/apimachinery/pkg/util/sets"
"k8s.io/client-go/informers" "k8s.io/client-go/informers"
clientset "k8s.io/client-go/kubernetes" clientset "k8s.io/client-go/kubernetes"
@ -51,6 +50,7 @@ import (
internalcache "k8s.io/kubernetes/pkg/scheduler/internal/cache" internalcache "k8s.io/kubernetes/pkg/scheduler/internal/cache"
internalqueue "k8s.io/kubernetes/pkg/scheduler/internal/queue" internalqueue "k8s.io/kubernetes/pkg/scheduler/internal/queue"
"k8s.io/kubernetes/pkg/scheduler/profile" "k8s.io/kubernetes/pkg/scheduler/profile"
testingclock "k8s.io/utils/clock/testing"
) )
const ( const (
@ -492,7 +492,7 @@ func TestDefaultErrorFunc(t *testing.T) {
// Need to add/update/delete testPod to the store. // Need to add/update/delete testPod to the store.
podInformer.Informer().GetStore().Add(testPod) podInformer.Informer().GetStore().Add(testPod)
queue := internalqueue.NewPriorityQueue(nil, informerFactory, internalqueue.WithClock(clock.NewFakeClock(time.Now()))) queue := internalqueue.NewPriorityQueue(nil, informerFactory, internalqueue.WithClock(testingclock.NewFakeClock(time.Now())))
schedulerCache := internalcache.New(30*time.Second, stopCh) schedulerCache := internalcache.New(30*time.Second, stopCh)
queue.Add(testPod) queue.Add(testPod)
@ -566,7 +566,7 @@ func TestDefaultErrorFunc_NodeNotFound(t *testing.T) {
// Need to add testPod to the store. // Need to add testPod to the store.
podInformer.Informer().GetStore().Add(testPod) podInformer.Informer().GetStore().Add(testPod)
queue := internalqueue.NewPriorityQueue(nil, informerFactory, internalqueue.WithClock(clock.NewFakeClock(time.Now()))) queue := internalqueue.NewPriorityQueue(nil, informerFactory, internalqueue.WithClock(testingclock.NewFakeClock(time.Now())))
schedulerCache := internalcache.New(30*time.Second, stopCh) schedulerCache := internalcache.New(30*time.Second, stopCh)
for i := range tt.nodes { for i := range tt.nodes {
@ -607,7 +607,7 @@ func TestDefaultErrorFunc_PodAlreadyBound(t *testing.T) {
// Need to add testPod to the store. // Need to add testPod to the store.
podInformer.Informer().GetStore().Add(testPod) podInformer.Informer().GetStore().Add(testPod)
queue := internalqueue.NewPriorityQueue(nil, informerFactory, internalqueue.WithClock(clock.NewFakeClock(time.Now()))) queue := internalqueue.NewPriorityQueue(nil, informerFactory, internalqueue.WithClock(testingclock.NewFakeClock(time.Now())))
schedulerCache := internalcache.New(30*time.Second, stopCh) schedulerCache := internalcache.New(30*time.Second, stopCh)
// Add node to schedulerCache no matter it's deleted in API server or not. // Add node to schedulerCache no matter it's deleted in API server or not.

View File

@ -32,7 +32,6 @@ import (
v1 "k8s.io/api/core/v1" v1 "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/types" "k8s.io/apimachinery/pkg/types"
"k8s.io/apimachinery/pkg/util/clock"
"k8s.io/apimachinery/pkg/util/sets" "k8s.io/apimachinery/pkg/util/sets"
"k8s.io/client-go/informers" "k8s.io/client-go/informers"
"k8s.io/client-go/kubernetes/fake" "k8s.io/client-go/kubernetes/fake"
@ -42,6 +41,7 @@ import (
"k8s.io/kubernetes/pkg/scheduler/framework/plugins/queuesort" "k8s.io/kubernetes/pkg/scheduler/framework/plugins/queuesort"
"k8s.io/kubernetes/pkg/scheduler/metrics" "k8s.io/kubernetes/pkg/scheduler/metrics"
"k8s.io/kubernetes/pkg/scheduler/util" "k8s.io/kubernetes/pkg/scheduler/util"
testingclock "k8s.io/utils/clock/testing"
"k8s.io/utils/pointer" "k8s.io/utils/pointer"
) )
@ -223,7 +223,7 @@ func TestPriorityQueue_AddUnschedulableIfNotPresent(t *testing.T) {
// Pods in and before current scheduling cycle will be put back to activeQueue // Pods in and before current scheduling cycle will be put back to activeQueue
// if we were trying to schedule them when we received move request. // if we were trying to schedule them when we received move request.
func TestPriorityQueue_AddUnschedulableIfNotPresent_Backoff(t *testing.T) { func TestPriorityQueue_AddUnschedulableIfNotPresent_Backoff(t *testing.T) {
q := NewTestQueue(context.Background(), newDefaultQueueSort(), WithClock(clock.NewFakeClock(time.Now()))) q := NewTestQueue(context.Background(), newDefaultQueueSort(), WithClock(testingclock.NewFakeClock(time.Now())))
totalNum := 10 totalNum := 10
expectedPods := make([]v1.Pod, 0, totalNum) expectedPods := make([]v1.Pod, 0, totalNum)
for i := 0; i < totalNum; i++ { for i := 0; i < totalNum; i++ {
@ -307,7 +307,7 @@ func TestPriorityQueue_Pop(t *testing.T) {
func TestPriorityQueue_Update(t *testing.T) { func TestPriorityQueue_Update(t *testing.T) {
objs := []runtime.Object{highPriorityPodInfo.Pod, unschedulablePodInfo.Pod, medPriorityPodInfo.Pod} objs := []runtime.Object{highPriorityPodInfo.Pod, unschedulablePodInfo.Pod, medPriorityPodInfo.Pod}
c := clock.NewFakeClock(time.Now()) c := testingclock.NewFakeClock(time.Now())
q := NewTestQueueWithObjects(context.Background(), newDefaultQueueSort(), objs, WithClock(c)) q := NewTestQueueWithObjects(context.Background(), newDefaultQueueSort(), objs, WithClock(c))
q.Update(nil, highPriorityPodInfo.Pod) q.Update(nil, highPriorityPodInfo.Pod)
if _, exists, _ := q.activeQ.Get(newQueuedPodInfoForLookup(highPriorityPodInfo.Pod)); !exists { if _, exists, _ := q.activeQ.Get(newQueuedPodInfoForLookup(highPriorityPodInfo.Pod)); !exists {
@ -468,7 +468,7 @@ func BenchmarkMoveAllToActiveOrBackoffQueue(b *testing.B) {
b.Run(fmt.Sprintf("%v-%v", tt.name, podsInUnschedulableQ), func(b *testing.B) { b.Run(fmt.Sprintf("%v-%v", tt.name, podsInUnschedulableQ), func(b *testing.B) {
for i := 0; i < b.N; i++ { for i := 0; i < b.N; i++ {
b.StopTimer() b.StopTimer()
c := clock.NewFakeClock(time.Now()) c := testingclock.NewFakeClock(time.Now())
m := make(map[framework.ClusterEvent]sets.String) m := make(map[framework.ClusterEvent]sets.String)
// - All plugins registered for events[0], which is NodeAdd. // - All plugins registered for events[0], which is NodeAdd.
@ -526,7 +526,7 @@ func BenchmarkMoveAllToActiveOrBackoffQueue(b *testing.B) {
} }
func TestPriorityQueue_MoveAllToActiveOrBackoffQueue(t *testing.T) { func TestPriorityQueue_MoveAllToActiveOrBackoffQueue(t *testing.T) {
c := clock.NewFakeClock(time.Now()) c := testingclock.NewFakeClock(time.Now())
m := map[framework.ClusterEvent]sets.String{ m := map[framework.ClusterEvent]sets.String{
{Resource: framework.Node, ActionType: framework.Add}: sets.NewString("fooPlugin"), {Resource: framework.Node, ActionType: framework.Add}: sets.NewString("fooPlugin"),
} }
@ -615,7 +615,7 @@ func TestPriorityQueue_AssignedPodAdded(t *testing.T) {
Spec: v1.PodSpec{NodeName: "machine1"}, Spec: v1.PodSpec{NodeName: "machine1"},
} }
c := clock.NewFakeClock(time.Now()) c := testingclock.NewFakeClock(time.Now())
m := map[framework.ClusterEvent]sets.String{AssignedPodAdd: sets.NewString("fakePlugin")} m := map[framework.ClusterEvent]sets.String{AssignedPodAdd: sets.NewString("fakePlugin")}
q := NewTestQueue(context.Background(), newDefaultQueueSort(), WithClock(c), WithClusterEventMap(m)) q := NewTestQueue(context.Background(), newDefaultQueueSort(), WithClock(c), WithClusterEventMap(m))
q.Add(medPriorityPodInfo.Pod) q.Add(medPriorityPodInfo.Pod)
@ -999,7 +999,7 @@ func TestSchedulingQueue_Close(t *testing.T) {
// ensures that an unschedulable pod does not block head of the queue when there // ensures that an unschedulable pod does not block head of the queue when there
// are frequent events that move pods to the active queue. // are frequent events that move pods to the active queue.
func TestRecentlyTriedPodsGoBack(t *testing.T) { func TestRecentlyTriedPodsGoBack(t *testing.T) {
c := clock.NewFakeClock(time.Now()) c := testingclock.NewFakeClock(time.Now())
q := NewTestQueue(context.Background(), newDefaultQueueSort(), WithClock(c)) q := NewTestQueue(context.Background(), newDefaultQueueSort(), WithClock(c))
// Add a few pods to priority queue. // Add a few pods to priority queue.
for i := 0; i < 5; i++ { for i := 0; i < 5; i++ {
@ -1056,7 +1056,7 @@ func TestRecentlyTriedPodsGoBack(t *testing.T) {
// This behavior ensures that an unschedulable pod does not block head of the queue when there // This behavior ensures that an unschedulable pod does not block head of the queue when there
// are frequent events that move pods to the active queue. // are frequent events that move pods to the active queue.
func TestPodFailedSchedulingMultipleTimesDoesNotBlockNewerPod(t *testing.T) { func TestPodFailedSchedulingMultipleTimesDoesNotBlockNewerPod(t *testing.T) {
c := clock.NewFakeClock(time.Now()) c := testingclock.NewFakeClock(time.Now())
q := NewTestQueue(context.Background(), newDefaultQueueSort(), WithClock(c)) q := NewTestQueue(context.Background(), newDefaultQueueSort(), WithClock(c))
// Add an unschedulable pod to a priority queue. // Add an unschedulable pod to a priority queue.
@ -1211,7 +1211,7 @@ func TestHighPriorityBackoff(t *testing.T) {
// TestHighPriorityFlushUnschedulableQLeftover tests that pods will be moved to // TestHighPriorityFlushUnschedulableQLeftover tests that pods will be moved to
// activeQ after one minutes if it is in unschedulableQ // activeQ after one minutes if it is in unschedulableQ
func TestHighPriorityFlushUnschedulableQLeftover(t *testing.T) { func TestHighPriorityFlushUnschedulableQLeftover(t *testing.T) {
c := clock.NewFakeClock(time.Now()) c := testingclock.NewFakeClock(time.Now())
m := map[framework.ClusterEvent]sets.String{ m := map[framework.ClusterEvent]sets.String{
NodeAdd: sets.NewString("fakePlugin"), NodeAdd: sets.NewString("fakePlugin"),
} }
@ -1307,11 +1307,11 @@ var (
queue.MoveAllToActiveOrBackoffQueue(UnschedulableTimeout, nil) queue.MoveAllToActiveOrBackoffQueue(UnschedulableTimeout, nil)
} }
flushBackoffQ = func(queue *PriorityQueue, _ *framework.QueuedPodInfo) { flushBackoffQ = func(queue *PriorityQueue, _ *framework.QueuedPodInfo) {
queue.clock.(*clock.FakeClock).Step(2 * time.Second) queue.clock.(*testingclock.FakeClock).Step(2 * time.Second)
queue.flushBackoffQCompleted() queue.flushBackoffQCompleted()
} }
moveClockForward = func(queue *PriorityQueue, _ *framework.QueuedPodInfo) { moveClockForward = func(queue *PriorityQueue, _ *framework.QueuedPodInfo) {
queue.clock.(*clock.FakeClock).Step(2 * time.Second) queue.clock.(*testingclock.FakeClock).Step(2 * time.Second)
} }
) )
@ -1399,7 +1399,7 @@ func TestPodTimestamp(t *testing.T) {
for _, test := range tests { for _, test := range tests {
t.Run(test.name, func(t *testing.T) { t.Run(test.name, func(t *testing.T) {
queue := NewTestQueue(context.Background(), newDefaultQueueSort(), WithClock(clock.NewFakeClock(timestamp))) queue := NewTestQueue(context.Background(), newDefaultQueueSort(), WithClock(testingclock.NewFakeClock(timestamp)))
var podInfoList []*framework.QueuedPodInfo var podInfoList []*framework.QueuedPodInfo
for i, op := range test.operations { for i, op := range test.operations {
@ -1556,7 +1556,7 @@ scheduler_pending_pods{queue="unschedulable"} 0
for _, test := range tests { for _, test := range tests {
t.Run(test.name, func(t *testing.T) { t.Run(test.name, func(t *testing.T) {
resetMetrics() resetMetrics()
queue := NewTestQueue(context.Background(), newDefaultQueueSort(), WithClock(clock.NewFakeClock(timestamp))) queue := NewTestQueue(context.Background(), newDefaultQueueSort(), WithClock(testingclock.NewFakeClock(timestamp)))
for i, op := range test.operations { for i, op := range test.operations {
for _, pInfo := range test.operands[i] { for _, pInfo := range test.operands[i] {
op(queue, pInfo) op(queue, pInfo)
@ -1584,7 +1584,7 @@ func TestPerPodSchedulingMetrics(t *testing.T) {
// Case 1: A pod is created and scheduled after 1 attempt. The queue operations are // Case 1: A pod is created and scheduled after 1 attempt. The queue operations are
// Add -> Pop. // Add -> Pop.
c := clock.NewFakeClock(timestamp) c := testingclock.NewFakeClock(timestamp)
queue := NewTestQueue(context.Background(), newDefaultQueueSort(), WithClock(c)) queue := NewTestQueue(context.Background(), newDefaultQueueSort(), WithClock(c))
queue.Add(pod) queue.Add(pod)
pInfo, err := queue.Pop() pInfo, err := queue.Pop()
@ -1595,7 +1595,7 @@ func TestPerPodSchedulingMetrics(t *testing.T) {
// Case 2: A pod is created and scheduled after 2 attempts. The queue operations are // Case 2: A pod is created and scheduled after 2 attempts. The queue operations are
// Add -> Pop -> AddUnschedulableIfNotPresent -> flushUnschedulableQLeftover -> Pop. // Add -> Pop -> AddUnschedulableIfNotPresent -> flushUnschedulableQLeftover -> Pop.
c = clock.NewFakeClock(timestamp) c = testingclock.NewFakeClock(timestamp)
queue = NewTestQueue(context.Background(), newDefaultQueueSort(), WithClock(c)) queue = NewTestQueue(context.Background(), newDefaultQueueSort(), WithClock(c))
queue.Add(pod) queue.Add(pod)
pInfo, err = queue.Pop() pInfo, err = queue.Pop()
@ -1615,7 +1615,7 @@ func TestPerPodSchedulingMetrics(t *testing.T) {
// Case 3: Similar to case 2, but before the second pop, call update, the queue operations are // Case 3: Similar to case 2, but before the second pop, call update, the queue operations are
// Add -> Pop -> AddUnschedulableIfNotPresent -> flushUnschedulableQLeftover -> Update -> Pop. // Add -> Pop -> AddUnschedulableIfNotPresent -> flushUnschedulableQLeftover -> Update -> Pop.
c = clock.NewFakeClock(timestamp) c = testingclock.NewFakeClock(timestamp)
queue = NewTestQueue(context.Background(), newDefaultQueueSort(), WithClock(c)) queue = NewTestQueue(context.Background(), newDefaultQueueSort(), WithClock(c))
queue.Add(pod) queue.Add(pod)
pInfo, err = queue.Pop() pInfo, err = queue.Pop()
@ -1714,7 +1714,7 @@ func TestIncomingPodsMetrics(t *testing.T) {
for _, test := range tests { for _, test := range tests {
t.Run(test.name, func(t *testing.T) { t.Run(test.name, func(t *testing.T) {
metrics.SchedulerQueueIncomingPods.Reset() metrics.SchedulerQueueIncomingPods.Reset()
queue := NewTestQueue(context.Background(), newDefaultQueueSort(), WithClock(clock.NewFakeClock(timestamp))) queue := NewTestQueue(context.Background(), newDefaultQueueSort(), WithClock(testingclock.NewFakeClock(timestamp)))
for _, op := range test.operations { for _, op := range test.operations {
for _, pInfo := range pInfos { for _, pInfo := range pInfos {
op(queue, pInfo) op(queue, pInfo)
@ -1739,7 +1739,7 @@ func checkPerPodSchedulingMetrics(name string, t *testing.T, pInfo *framework.Qu
} }
func TestBackOffFlow(t *testing.T) { func TestBackOffFlow(t *testing.T) {
cl := clock.NewFakeClock(time.Now()) cl := testingclock.NewFakeClock(time.Now())
q := NewTestQueue(context.Background(), newDefaultQueueSort(), WithClock(cl)) q := NewTestQueue(context.Background(), newDefaultQueueSort(), WithClock(cl))
steps := []struct { steps := []struct {
wantBackoff time.Duration wantBackoff time.Duration

View File

@ -26,7 +26,6 @@ import (
"strings" "strings"
"time" "time"
"k8s.io/apimachinery/pkg/util/clock"
utilfeature "k8s.io/apiserver/pkg/util/feature" utilfeature "k8s.io/apiserver/pkg/util/feature"
"k8s.io/klog/v2" "k8s.io/klog/v2"
@ -42,6 +41,7 @@ import (
"k8s.io/kubernetes/pkg/features" "k8s.io/kubernetes/pkg/features"
"k8s.io/kubernetes/pkg/volume" "k8s.io/kubernetes/pkg/volume"
volumetypes "k8s.io/kubernetes/pkg/volume/util/types" volumetypes "k8s.io/kubernetes/pkg/volume/util/types"
"k8s.io/utils/clock"
) )
const ( const (

View File

@ -25,11 +25,11 @@ import (
"k8s.io/apimachinery/pkg/api/errors" "k8s.io/apimachinery/pkg/api/errors"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/types" "k8s.io/apimachinery/pkg/types"
"k8s.io/apimachinery/pkg/util/clock"
"k8s.io/apiserver/pkg/admission" "k8s.io/apiserver/pkg/admission"
"k8s.io/apiserver/pkg/authentication/user" "k8s.io/apiserver/pkg/authentication/user"
api "k8s.io/kubernetes/pkg/apis/core" api "k8s.io/kubernetes/pkg/apis/core"
eventratelimitapi "k8s.io/kubernetes/plugin/pkg/admission/eventratelimit/apis/eventratelimit" eventratelimitapi "k8s.io/kubernetes/plugin/pkg/admission/eventratelimit/apis/eventratelimit"
testingclock "k8s.io/utils/clock/testing"
) )
const ( const (
@ -461,7 +461,7 @@ func TestEventRateLimiting(t *testing.T) {
for _, tc := range cases { for _, tc := range cases {
t.Run(tc.name, func(t *testing.T) { t.Run(tc.name, func(t *testing.T) {
clock := clock.NewFakeClock(time.Now()) clock := testingclock.NewFakeClock(time.Now())
config := &eventratelimitapi.Configuration{} config := &eventratelimitapi.Configuration{}
if tc.serverBurst > 0 { if tc.serverBurst > 0 {
serverLimit := eventratelimitapi.Limit{ serverLimit := eventratelimitapi.Limit{

View File

@ -24,8 +24,8 @@ import (
"sync" "sync"
"time" "time"
"k8s.io/apimachinery/pkg/util/clock"
"k8s.io/apimachinery/pkg/util/runtime" "k8s.io/apimachinery/pkg/util/runtime"
"k8s.io/utils/clock"
) )
// For any test of the style: // For any test of the style:

View File

@ -26,8 +26,9 @@ import (
"testing" "testing"
"time" "time"
"k8s.io/apimachinery/pkg/util/clock"
"k8s.io/apimachinery/pkg/util/runtime" "k8s.io/apimachinery/pkg/util/runtime"
"k8s.io/utils/clock"
testingclock "k8s.io/utils/clock/testing"
) )
func TestUntil(t *testing.T) { func TestUntil(t *testing.T) {
@ -713,7 +714,7 @@ func TestContextForChannel(t *testing.T) {
} }
func TestExponentialBackoffManagerGetNextBackoff(t *testing.T) { func TestExponentialBackoffManagerGetNextBackoff(t *testing.T) {
fc := clock.NewFakeClock(time.Now()) fc := testingclock.NewFakeClock(time.Now())
backoff := NewExponentialBackoffManager(1, 10, 10, 2.0, 0.0, fc) backoff := NewExponentialBackoffManager(1, 10, 10, 2.0, 0.0, fc)
durations := []time.Duration{1, 2, 4, 8, 10, 10, 10} durations := []time.Duration{1, 2, 4, 8, 10, 10, 10}
for i := 0; i < len(durations); i++ { for i := 0; i < len(durations); i++ {
@ -732,7 +733,7 @@ func TestExponentialBackoffManagerGetNextBackoff(t *testing.T) {
func TestJitteredBackoffManagerGetNextBackoff(t *testing.T) { func TestJitteredBackoffManagerGetNextBackoff(t *testing.T) {
// positive jitter // positive jitter
backoffMgr := NewJitteredBackoffManager(1, 1, clock.NewFakeClock(time.Now())) backoffMgr := NewJitteredBackoffManager(1, 1, testingclock.NewFakeClock(time.Now()))
for i := 0; i < 5; i++ { for i := 0; i < 5; i++ {
backoff := backoffMgr.(*jitteredBackoffManagerImpl).getNextBackoff() backoff := backoffMgr.(*jitteredBackoffManagerImpl).getNextBackoff()
if backoff < 1 || backoff > 2 { if backoff < 1 || backoff > 2 {
@ -741,7 +742,7 @@ func TestJitteredBackoffManagerGetNextBackoff(t *testing.T) {
} }
// negative jitter, shall be a fixed backoff // negative jitter, shall be a fixed backoff
backoffMgr = NewJitteredBackoffManager(1, -1, clock.NewFakeClock(time.Now())) backoffMgr = NewJitteredBackoffManager(1, -1, testingclock.NewFakeClock(time.Now()))
backoff := backoffMgr.(*jitteredBackoffManagerImpl).getNextBackoff() backoff := backoffMgr.(*jitteredBackoffManagerImpl).getNextBackoff()
if backoff != 1 { if backoff != 1 {
t.Errorf("backoff should be 1, but got %d", backoff) t.Errorf("backoff should be 1, but got %d", backoff)

View File

@ -21,9 +21,9 @@ import (
"time" "time"
"k8s.io/apimachinery/pkg/runtime" "k8s.io/apimachinery/pkg/runtime"
"k8s.io/apimachinery/pkg/util/clock"
utilruntime "k8s.io/apimachinery/pkg/util/runtime" utilruntime "k8s.io/apimachinery/pkg/util/runtime"
"k8s.io/apimachinery/pkg/util/wait" "k8s.io/apimachinery/pkg/util/wait"
"k8s.io/utils/clock"
) )
// This file implements a low-level controller that is used in // This file implements a low-level controller that is used in

View File

@ -20,8 +20,8 @@ import (
"sync" "sync"
"time" "time"
"k8s.io/apimachinery/pkg/util/clock"
"k8s.io/klog/v2" "k8s.io/klog/v2"
"k8s.io/utils/clock"
) )
// ExpirationCache implements the store interface // ExpirationCache implements the store interface

View File

@ -17,8 +17,8 @@ limitations under the License.
package cache package cache
import ( import (
"k8s.io/apimachinery/pkg/util/clock"
"k8s.io/apimachinery/pkg/util/sets" "k8s.io/apimachinery/pkg/util/sets"
"k8s.io/utils/clock"
) )
type fakeThreadSafeMap struct { type fakeThreadSafeMap struct {

View File

@ -21,9 +21,10 @@ import (
"testing" "testing"
"time" "time"
"k8s.io/apimachinery/pkg/util/clock"
"k8s.io/apimachinery/pkg/util/sets" "k8s.io/apimachinery/pkg/util/sets"
"k8s.io/apimachinery/pkg/util/wait" "k8s.io/apimachinery/pkg/util/wait"
"k8s.io/utils/clock"
testingclock "k8s.io/utils/clock/testing"
) )
func TestTTLExpirationBasic(t *testing.T) { func TestTTLExpirationBasic(t *testing.T) {
@ -167,7 +168,7 @@ func TestTTLPolicy(t *testing.T) {
exactlyOnTTL := fakeTime.Add(-ttl) exactlyOnTTL := fakeTime.Add(-ttl)
expiredTime := fakeTime.Add(-(ttl + 1)) expiredTime := fakeTime.Add(-(ttl + 1))
policy := TTLPolicy{ttl, clock.NewFakeClock(fakeTime)} policy := TTLPolicy{ttl, testingclock.NewFakeClock(fakeTime)}
item := testStoreObject{id: "foo", val: "bar"} item := testStoreObject{id: "foo", val: "bar"}
itemkey, _ := testStoreKeyFunc(item) itemkey, _ := testStoreKeyFunc(item)
fakeTimestampedEntry := &TimestampedEntry{Obj: item, Timestamp: exactlyOnTTL, key: itemkey} fakeTimestampedEntry := &TimestampedEntry{Obj: item, Timestamp: exactlyOnTTL, key: itemkey}

View File

@ -32,7 +32,6 @@ import (
"k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured"
"k8s.io/apimachinery/pkg/runtime" "k8s.io/apimachinery/pkg/runtime"
"k8s.io/apimachinery/pkg/runtime/schema" "k8s.io/apimachinery/pkg/runtime/schema"
"k8s.io/apimachinery/pkg/util/clock"
"k8s.io/apimachinery/pkg/util/naming" "k8s.io/apimachinery/pkg/util/naming"
utilnet "k8s.io/apimachinery/pkg/util/net" utilnet "k8s.io/apimachinery/pkg/util/net"
utilruntime "k8s.io/apimachinery/pkg/util/runtime" utilruntime "k8s.io/apimachinery/pkg/util/runtime"
@ -40,6 +39,7 @@ import (
"k8s.io/apimachinery/pkg/watch" "k8s.io/apimachinery/pkg/watch"
"k8s.io/client-go/tools/pager" "k8s.io/client-go/tools/pager"
"k8s.io/klog/v2" "k8s.io/klog/v2"
"k8s.io/utils/clock"
"k8s.io/utils/trace" "k8s.io/utils/trace"
) )

View File

@ -32,9 +32,10 @@ import (
"k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured"
"k8s.io/apimachinery/pkg/runtime" "k8s.io/apimachinery/pkg/runtime"
"k8s.io/apimachinery/pkg/runtime/schema" "k8s.io/apimachinery/pkg/runtime/schema"
"k8s.io/apimachinery/pkg/util/clock"
"k8s.io/apimachinery/pkg/util/wait" "k8s.io/apimachinery/pkg/util/wait"
"k8s.io/apimachinery/pkg/watch" "k8s.io/apimachinery/pkg/watch"
"k8s.io/utils/clock"
testingclock "k8s.io/utils/clock/testing"
) )
var nevererrc chan error var nevererrc chan error
@ -376,7 +377,7 @@ func TestReflectorListAndWatchInitConnBackoff(t *testing.T) {
func(t *testing.T) { func(t *testing.T) {
stopCh := make(chan struct{}) stopCh := make(chan struct{})
connFails := test.numConnFails connFails := test.numConnFails
fakeClock := clock.NewFakeClock(time.Unix(0, 0)) fakeClock := testingclock.NewFakeClock(time.Unix(0, 0))
bm := wait.NewExponentialBackoffManager(time.Millisecond, maxBackoff, 100*time.Millisecond, 2.0, 1.0, fakeClock) bm := wait.NewExponentialBackoffManager(time.Millisecond, maxBackoff, 100*time.Millisecond, 2.0, 1.0, fakeClock)
done := make(chan struct{}) done := make(chan struct{})
defer close(done) defer close(done)

View File

@ -23,10 +23,10 @@ import (
"k8s.io/apimachinery/pkg/api/meta" "k8s.io/apimachinery/pkg/api/meta"
"k8s.io/apimachinery/pkg/runtime" "k8s.io/apimachinery/pkg/runtime"
"k8s.io/apimachinery/pkg/util/clock"
utilruntime "k8s.io/apimachinery/pkg/util/runtime" utilruntime "k8s.io/apimachinery/pkg/util/runtime"
"k8s.io/apimachinery/pkg/util/wait" "k8s.io/apimachinery/pkg/util/wait"
"k8s.io/utils/buffer" "k8s.io/utils/buffer"
"k8s.io/utils/clock"
"k8s.io/klog/v2" "k8s.io/klog/v2"
) )

View File

@ -26,10 +26,10 @@ import (
"k8s.io/api/core/v1" "k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/api/meta" "k8s.io/apimachinery/pkg/api/meta"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/util/clock"
"k8s.io/apimachinery/pkg/util/sets" "k8s.io/apimachinery/pkg/util/sets"
"k8s.io/apimachinery/pkg/util/wait" "k8s.io/apimachinery/pkg/util/wait"
fcache "k8s.io/client-go/tools/cache/testing" fcache "k8s.io/client-go/tools/cache/testing"
testingclock "k8s.io/utils/clock/testing"
) )
type testListener struct { type testListener struct {
@ -105,7 +105,7 @@ func TestListenerResyncPeriods(t *testing.T) {
// create the shared informer and resync every 1s // create the shared informer and resync every 1s
informer := NewSharedInformer(source, &v1.Pod{}, 1*time.Second).(*sharedIndexInformer) informer := NewSharedInformer(source, &v1.Pod{}, 1*time.Second).(*sharedIndexInformer)
clock := clock.NewFakeClock(time.Now()) clock := testingclock.NewFakeClock(time.Now())
informer.clock = clock informer.clock = clock
informer.processor.clock = clock informer.processor.clock = clock
@ -190,7 +190,7 @@ func TestResyncCheckPeriod(t *testing.T) {
// create the shared informer and resync every 12 hours // create the shared informer and resync every 12 hours
informer := NewSharedInformer(source, &v1.Pod{}, 12*time.Hour).(*sharedIndexInformer) informer := NewSharedInformer(source, &v1.Pod{}, 12*time.Hour).(*sharedIndexInformer)
clock := clock.NewFakeClock(time.Now()) clock := testingclock.NewFakeClock(time.Now())
informer.clock = clock informer.clock = clock
informer.processor.clock = clock informer.processor.clock = clock
@ -278,7 +278,7 @@ func TestSharedInformerWatchDisruption(t *testing.T) {
// create the shared informer and resync every 1s // create the shared informer and resync every 1s
informer := NewSharedInformer(source, &v1.Pod{}, 1*time.Second).(*sharedIndexInformer) informer := NewSharedInformer(source, &v1.Pod{}, 1*time.Second).(*sharedIndexInformer)
clock := clock.NewFakeClock(time.Now()) clock := testingclock.NewFakeClock(time.Now())
informer.clock = clock informer.clock = clock
informer.processor.clock = clock informer.processor.clock = clock

View File

@ -28,7 +28,6 @@ import (
v1 "k8s.io/api/core/v1" v1 "k8s.io/api/core/v1"
apierrors "k8s.io/apimachinery/pkg/api/errors" apierrors "k8s.io/apimachinery/pkg/api/errors"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/util/clock"
"k8s.io/apimachinery/pkg/util/wait" "k8s.io/apimachinery/pkg/util/wait"
apiserverserviceaccount "k8s.io/apiserver/pkg/authentication/serviceaccount" apiserverserviceaccount "k8s.io/apiserver/pkg/authentication/serviceaccount"
"k8s.io/client-go/discovery" "k8s.io/client-go/discovery"
@ -37,6 +36,7 @@ import (
restclient "k8s.io/client-go/rest" restclient "k8s.io/client-go/rest"
"k8s.io/client-go/transport" "k8s.io/client-go/transport"
"k8s.io/klog/v2" "k8s.io/klog/v2"
"k8s.io/utils/clock"
utilpointer "k8s.io/utils/pointer" utilpointer "k8s.io/utils/pointer"
) )