From cb14b35bdeb6ce66bb7d835e30eb298cf77233b3 Mon Sep 17 00:00:00 2001 From: Harry Zhang Date: Wed, 25 May 2016 23:08:56 -0400 Subject: [PATCH] Refactor util clock into it's own pkg --- pkg/client/cache/expiration_cache.go | 10 +++++----- pkg/client/cache/expiration_cache_fakes.go | 4 ++-- pkg/client/cache/expiration_cache_test.go | 10 +++++----- pkg/client/record/event.go | 8 ++++---- pkg/client/record/event_test.go | 14 +++++++------- pkg/client/record/events_cache.go | 12 ++++++------ pkg/client/record/events_cache_test.go | 4 ++-- pkg/client/restclient/request_test.go | 4 ++-- pkg/controller/controller_utils.go | 6 +++--- pkg/controller/controller_utils_test.go | 5 +++-- pkg/genericapiserver/tunneler.go | 5 +++-- pkg/genericapiserver/tunneler_test.go | 16 ++++++++-------- pkg/kubelet/active_deadline.go | 6 +++--- pkg/kubelet/active_deadline_test.go | 4 ++-- pkg/kubelet/dockertools/docker_manager_test.go | 4 ++-- pkg/kubelet/eviction/eviction_manager.go | 6 +++--- pkg/kubelet/eviction/eviction_manager_test.go | 4 ++-- pkg/kubelet/image_manager_test.go | 4 ++-- pkg/kubelet/images/parallel_image_puller_test.go | 4 ++-- .../images/serialized_image_puller_test.go | 4 ++-- pkg/kubelet/kubelet.go | 8 ++++---- pkg/kubelet/kubelet_test.go | 8 ++++---- pkg/kubelet/pleg/generic.go | 6 +++--- pkg/kubelet/pleg/generic_test.go | 8 ++++---- pkg/kubelet/pod_workers_test.go | 6 +++--- pkg/kubelet/runonce_test.go | 4 ++-- pkg/kubelet/util/cache/object_cache_test.go | 8 ++++---- pkg/kubelet/util/queue/work_queue.go | 6 +++--- pkg/kubelet/util/queue/work_queue_test.go | 6 +++--- pkg/storage/watch_cache.go | 6 +++--- pkg/storage/watch_cache_test.go | 6 +++--- pkg/util/{ => clock}/clock.go | 2 +- pkg/util/{ => clock}/clock_test.go | 2 +- pkg/util/flowcontrol/backoff.go | 8 ++++---- pkg/util/flowcontrol/backoff_test.go | 12 ++++++------ pkg/util/workqueue/delaying_queue.go | 8 ++++---- pkg/util/workqueue/delaying_queue_test.go | 10 +++++----- pkg/util/workqueue/rate_limitting_queue_test.go | 4 ++-- 38 files changed, 127 insertions(+), 125 deletions(-) rename pkg/util/{ => clock}/clock.go (99%) rename pkg/util/{ => clock}/clock_test.go (99%) diff --git a/pkg/client/cache/expiration_cache.go b/pkg/client/cache/expiration_cache.go index d9f0559f9b7..88ef89b3ee9 100644 --- a/pkg/client/cache/expiration_cache.go +++ b/pkg/client/cache/expiration_cache.go @@ -21,7 +21,7 @@ import ( "time" "github.com/golang/glog" - "k8s.io/kubernetes/pkg/util" + "k8s.io/kubernetes/pkg/util/clock" ) // ExpirationCache implements the store interface @@ -38,7 +38,7 @@ import ( type ExpirationCache struct { cacheStorage ThreadSafeStore keyFunc KeyFunc - clock util.Clock + clock clock.Clock expirationPolicy ExpirationPolicy // expirationLock is a write lock used to guarantee that we don't clobber // newly inserted objects because of a stale expiration timestamp comparison @@ -58,7 +58,7 @@ type TTLPolicy struct { Ttl time.Duration // Clock used to calculate ttl expiration - Clock util.Clock + Clock clock.Clock } // IsExpired returns true if the given object is older than the ttl, or it can't @@ -202,7 +202,7 @@ func NewTTLStore(keyFunc KeyFunc, ttl time.Duration) Store { return &ExpirationCache{ cacheStorage: NewThreadSafeStore(Indexers{}, Indices{}), keyFunc: keyFunc, - clock: util.RealClock{}, - expirationPolicy: &TTLPolicy{ttl, util.RealClock{}}, + clock: clock.RealClock{}, + expirationPolicy: &TTLPolicy{ttl, clock.RealClock{}}, } } diff --git a/pkg/client/cache/expiration_cache_fakes.go b/pkg/client/cache/expiration_cache_fakes.go index eb1d5353afa..da180b77f1c 100644 --- a/pkg/client/cache/expiration_cache_fakes.go +++ b/pkg/client/cache/expiration_cache_fakes.go @@ -17,7 +17,7 @@ limitations under the License. package cache import ( - "k8s.io/kubernetes/pkg/util" + "k8s.io/kubernetes/pkg/util/clock" "k8s.io/kubernetes/pkg/util/sets" ) @@ -43,7 +43,7 @@ func (p *FakeExpirationPolicy) IsExpired(obj *timestampedEntry) bool { return !p.NeverExpire.Has(key) } -func NewFakeExpirationStore(keyFunc KeyFunc, deletedKeys chan<- string, expirationPolicy ExpirationPolicy, cacheClock util.Clock) Store { +func NewFakeExpirationStore(keyFunc KeyFunc, deletedKeys chan<- string, expirationPolicy ExpirationPolicy, cacheClock clock.Clock) Store { cacheStorage := NewThreadSafeStore(Indexers{}, Indices{}) return &ExpirationCache{ cacheStorage: &fakeThreadSafeMap{cacheStorage, deletedKeys}, diff --git a/pkg/client/cache/expiration_cache_test.go b/pkg/client/cache/expiration_cache_test.go index 4667d8777ac..6e73f2969c3 100644 --- a/pkg/client/cache/expiration_cache_test.go +++ b/pkg/client/cache/expiration_cache_test.go @@ -21,7 +21,7 @@ import ( "testing" "time" - "k8s.io/kubernetes/pkg/util" + "k8s.io/kubernetes/pkg/util/clock" "k8s.io/kubernetes/pkg/util/sets" "k8s.io/kubernetes/pkg/util/wait" ) @@ -37,7 +37,7 @@ func TestTTLExpirationBasic(t *testing.T) { return obj.(*timestampedEntry).obj.(testStoreObject).id, nil }, }, - util.RealClock{}, + clock.RealClock{}, ) err := ttlStore.Add(testObj) if err != nil { @@ -71,7 +71,7 @@ func TestReAddExpiredItem(t *testing.T) { }, } ttlStore := NewFakeExpirationStore( - testStoreKeyFunc, deleteChan, exp, util.RealClock{}) + testStoreKeyFunc, deleteChan, exp, clock.RealClock{}) testKey := "foo" testObj := testStoreObject{id: testKey, val: "bar"} err := ttlStore.Add(testObj) @@ -133,7 +133,7 @@ func TestTTLList(t *testing.T) { return obj.(*timestampedEntry).obj.(testStoreObject).id, nil }, }, - util.RealClock{}, + clock.RealClock{}, ) for _, obj := range testObjs { err := ttlStore.Add(obj) @@ -167,7 +167,7 @@ func TestTTLPolicy(t *testing.T) { exactlyOnTTL := fakeTime.Add(-ttl) expiredTime := fakeTime.Add(-(ttl + 1)) - policy := TTLPolicy{ttl, util.NewFakeClock(fakeTime)} + policy := TTLPolicy{ttl, clock.NewFakeClock(fakeTime)} fakeTimestampedEntry := ×tampedEntry{obj: struct{}{}, timestamp: exactlyOnTTL} if policy.IsExpired(fakeTimestampedEntry) { t.Errorf("TTL cache should not expire entries exactly on ttl") diff --git a/pkg/client/record/event.go b/pkg/client/record/event.go index 903da6352f8..9378f784b10 100644 --- a/pkg/client/record/event.go +++ b/pkg/client/record/event.go @@ -26,7 +26,7 @@ import ( "k8s.io/kubernetes/pkg/api/unversioned" "k8s.io/kubernetes/pkg/client/restclient" "k8s.io/kubernetes/pkg/runtime" - "k8s.io/kubernetes/pkg/util" + "k8s.io/kubernetes/pkg/util/clock" utilruntime "k8s.io/kubernetes/pkg/util/runtime" "k8s.io/kubernetes/pkg/watch" @@ -113,7 +113,7 @@ func (eventBroadcaster *eventBroadcasterImpl) StartRecordingToSink(sink EventSin // The default math/rand package functions aren't thread safe, so create a // new Rand object for each StartRecording call. randGen := rand.New(rand.NewSource(time.Now().UnixNano())) - eventCorrelator := NewEventCorrelator(util.RealClock{}) + eventCorrelator := NewEventCorrelator(clock.RealClock{}) return eventBroadcaster.StartEventWatcher( func(event *api.Event) { recordToSink(sink, event, eventCorrelator, randGen, eventBroadcaster.sleepDuration) @@ -242,13 +242,13 @@ func (eventBroadcaster *eventBroadcasterImpl) StartEventWatcher(eventHandler fun // NewRecorder returns an EventRecorder that records events with the given event source. func (eventBroadcaster *eventBroadcasterImpl) NewRecorder(source api.EventSource) EventRecorder { - return &recorderImpl{source, eventBroadcaster.Broadcaster, util.RealClock{}} + return &recorderImpl{source, eventBroadcaster.Broadcaster, clock.RealClock{}} } type recorderImpl struct { source api.EventSource *watch.Broadcaster - clock util.Clock + clock clock.Clock } func (recorder *recorderImpl) generateEvent(object runtime.Object, timestamp unversioned.Time, eventtype, reason, message string) { diff --git a/pkg/client/record/event_test.go b/pkg/client/record/event_test.go index 23d7cc3289f..2216847b160 100644 --- a/pkg/client/record/event_test.go +++ b/pkg/client/record/event_test.go @@ -31,7 +31,7 @@ import ( "k8s.io/kubernetes/pkg/api/unversioned" "k8s.io/kubernetes/pkg/client/restclient" k8sruntime "k8s.io/kubernetes/pkg/runtime" - "k8s.io/kubernetes/pkg/util" + "k8s.io/kubernetes/pkg/util/clock" "k8s.io/kubernetes/pkg/util/strategicpatch" ) @@ -346,7 +346,7 @@ func TestEventf(t *testing.T) { eventBroadcaster := NewBroadcasterForTests(0) sinkWatcher := eventBroadcaster.StartRecordingToSink(&testEvents) - clock := util.NewFakeClock(time.Now()) + clock := clock.NewFakeClock(time.Now()) recorder := recorderWithFakeClock(api.EventSource{Component: "eventTest"}, eventBroadcaster, clock) for index, item := range table { clock.Step(1 * time.Second) @@ -375,7 +375,7 @@ func TestEventf(t *testing.T) { sinkWatcher.Stop() } -func recorderWithFakeClock(eventSource api.EventSource, eventBroadcaster EventBroadcaster, clock util.Clock) EventRecorder { +func recorderWithFakeClock(eventSource api.EventSource, eventBroadcaster EventBroadcaster, clock clock.Clock) EventRecorder { return &recorderImpl{eventSource, eventBroadcaster.(*eventBroadcasterImpl).Broadcaster, clock} } @@ -413,7 +413,7 @@ func TestWriteEventError(t *testing.T) { }, } - eventCorrelator := NewEventCorrelator(util.RealClock{}) + eventCorrelator := NewEventCorrelator(clock.RealClock{}) randGen := rand.New(rand.NewSource(time.Now().UnixNano())) for caseName, ent := range table { @@ -436,7 +436,7 @@ func TestWriteEventError(t *testing.T) { } func TestUpdateExpiredEvent(t *testing.T) { - eventCorrelator := NewEventCorrelator(util.RealClock{}) + eventCorrelator := NewEventCorrelator(clock.RealClock{}) randGen := rand.New(rand.NewSource(time.Now().UnixNano())) var createdEvent *api.Event @@ -592,7 +592,7 @@ func TestEventfNoNamespace(t *testing.T) { eventBroadcaster := NewBroadcasterForTests(0) sinkWatcher := eventBroadcaster.StartRecordingToSink(&testEvents) - clock := util.NewFakeClock(time.Now()) + clock := clock.NewFakeClock(time.Now()) recorder := recorderWithFakeClock(api.EventSource{Component: "eventTest"}, eventBroadcaster, clock) for index, item := range table { @@ -879,7 +879,7 @@ func TestMultiSinkCache(t *testing.T) { } eventBroadcaster := NewBroadcasterForTests(0) - clock := util.NewFakeClock(time.Now()) + clock := clock.NewFakeClock(time.Now()) recorder := recorderWithFakeClock(api.EventSource{Component: "eventTest"}, eventBroadcaster, clock) sinkWatcher := eventBroadcaster.StartRecordingToSink(&testEvents) diff --git a/pkg/client/record/events_cache.go b/pkg/client/record/events_cache.go index 3b08655a80e..8ff65776cb1 100644 --- a/pkg/client/record/events_cache.go +++ b/pkg/client/record/events_cache.go @@ -27,7 +27,7 @@ import ( "k8s.io/kubernetes/pkg/api" "k8s.io/kubernetes/pkg/api/unversioned" - "k8s.io/kubernetes/pkg/util" + "k8s.io/kubernetes/pkg/util/clock" "k8s.io/kubernetes/pkg/util/sets" "k8s.io/kubernetes/pkg/util/strategicpatch" ) @@ -116,12 +116,12 @@ type EventAggregator struct { maxIntervalInSeconds int // clock is used to allow for testing over a time interval - clock util.Clock + clock clock.Clock } // NewEventAggregator returns a new instance of an EventAggregator func NewEventAggregator(lruCacheSize int, keyFunc EventAggregatorKeyFunc, messageFunc EventAggregatorMessageFunc, - maxEvents int, maxIntervalInSeconds int, clock util.Clock) *EventAggregator { + maxEvents int, maxIntervalInSeconds int, clock clock.Clock) *EventAggregator { return &EventAggregator{ cache: lru.New(lruCacheSize), keyFunc: keyFunc, @@ -207,11 +207,11 @@ type eventLog struct { type eventLogger struct { sync.RWMutex cache *lru.Cache - clock util.Clock + clock clock.Clock } // newEventLogger observes events and counts their frequencies -func newEventLogger(lruCacheEntries int, clock util.Clock) *eventLogger { +func newEventLogger(lruCacheEntries int, clock clock.Clock) *eventLogger { return &eventLogger{cache: lru.New(lruCacheEntries), clock: clock} } @@ -326,7 +326,7 @@ type EventCorrelateResult struct { // the same reason. // * Events are incrementally counted if the exact same event is encountered multiple // times. -func NewEventCorrelator(clock util.Clock) *EventCorrelator { +func NewEventCorrelator(clock clock.Clock) *EventCorrelator { cacheSize := maxLruCacheEntries return &EventCorrelator{ filterFunc: DefaultEventFilterFunc, diff --git a/pkg/client/record/events_cache_test.go b/pkg/client/record/events_cache_test.go index afe4acb13a9..0a9d15265ba 100644 --- a/pkg/client/record/events_cache_test.go +++ b/pkg/client/record/events_cache_test.go @@ -24,7 +24,7 @@ import ( "k8s.io/kubernetes/pkg/api" "k8s.io/kubernetes/pkg/api/unversioned" - "k8s.io/kubernetes/pkg/util" + "k8s.io/kubernetes/pkg/util/clock" "k8s.io/kubernetes/pkg/util/diff" ) @@ -223,7 +223,7 @@ func TestEventCorrelator(t *testing.T) { for testScenario, testInput := range scenario { eventInterval := time.Duration(testInput.intervalSeconds) * time.Second - clock := util.IntervalClock{Time: time.Now(), Duration: eventInterval} + clock := clock.IntervalClock{Time: time.Now(), Duration: eventInterval} correlator := NewEventCorrelator(&clock) for i := range testInput.previousEvents { event := testInput.previousEvents[i] diff --git a/pkg/client/restclient/request_test.go b/pkg/client/restclient/request_test.go index 1288bb50a07..29cd90884cf 100755 --- a/pkg/client/restclient/request_test.go +++ b/pkg/client/restclient/request_test.go @@ -39,7 +39,7 @@ import ( "k8s.io/kubernetes/pkg/labels" "k8s.io/kubernetes/pkg/runtime" "k8s.io/kubernetes/pkg/runtime/serializer/streaming" - "k8s.io/kubernetes/pkg/util" + "k8s.io/kubernetes/pkg/util/clock" "k8s.io/kubernetes/pkg/util/flowcontrol" "k8s.io/kubernetes/pkg/util/httpstream" "k8s.io/kubernetes/pkg/util/intstr" @@ -972,7 +972,7 @@ func TestBackoffLifecycle(t *testing.T) { // which are used in the server implementation returning StatusOK above. seconds := []int{0, 1, 2, 4, 8, 0, 1, 2, 4, 0} request := c.Verb("POST").Prefix("backofftest").Suffix("abc") - clock := util.FakeClock{} + clock := clock.FakeClock{} request.backoffMgr = &URLBackoff{ // Use a fake backoff here to avoid flakes and speed the test up. Backoff: flowcontrol.NewFakeBackOff( diff --git a/pkg/controller/controller_utils.go b/pkg/controller/controller_utils.go index 85224f8918a..8c2eb5edf9d 100644 --- a/pkg/controller/controller_utils.go +++ b/pkg/controller/controller_utils.go @@ -34,7 +34,7 @@ import ( "k8s.io/kubernetes/pkg/controller/framework" "k8s.io/kubernetes/pkg/labels" "k8s.io/kubernetes/pkg/runtime" - "k8s.io/kubernetes/pkg/util" + "k8s.io/kubernetes/pkg/util/clock" "k8s.io/kubernetes/pkg/util/integer" "k8s.io/kubernetes/pkg/util/sets" ) @@ -167,12 +167,12 @@ func (r *ControllerExpectations) SatisfiedExpectations(controllerKey string) boo // TODO: Make this possible to disable in tests. // TODO: Support injection of clock. func (exp *ControlleeExpectations) isExpired() bool { - return util.RealClock{}.Since(exp.timestamp) > ExpectationsTimeout + return clock.RealClock{}.Since(exp.timestamp) > ExpectationsTimeout } // SetExpectations registers new expectations for the given controller. Forgets existing expectations. func (r *ControllerExpectations) SetExpectations(controllerKey string, add, del int) error { - exp := &ControlleeExpectations{add: int64(add), del: int64(del), key: controllerKey, timestamp: util.RealClock{}.Now()} + exp := &ControlleeExpectations{add: int64(add), del: int64(del), key: controllerKey, timestamp: clock.RealClock{}.Now()} glog.V(4).Infof("Setting expectations %#v", exp) return r.Add(exp) } diff --git a/pkg/controller/controller_utils_test.go b/pkg/controller/controller_utils_test.go index 65dba496f69..29dd072593e 100644 --- a/pkg/controller/controller_utils_test.go +++ b/pkg/controller/controller_utils_test.go @@ -36,14 +36,15 @@ import ( "k8s.io/kubernetes/pkg/runtime" "k8s.io/kubernetes/pkg/securitycontext" "k8s.io/kubernetes/pkg/util" + "k8s.io/kubernetes/pkg/util/clock" "k8s.io/kubernetes/pkg/util/sets" utiltesting "k8s.io/kubernetes/pkg/util/testing" ) // NewFakeControllerExpectationsLookup creates a fake store for PodExpectations. -func NewFakeControllerExpectationsLookup(ttl time.Duration) (*ControllerExpectations, *util.FakeClock) { +func NewFakeControllerExpectationsLookup(ttl time.Duration) (*ControllerExpectations, *clock.FakeClock) { fakeTime := time.Date(2009, time.November, 10, 23, 0, 0, 0, time.UTC) - fakeClock := util.NewFakeClock(fakeTime) + fakeClock := clock.NewFakeClock(fakeTime) ttlPolicy := &cache.TTLPolicy{Ttl: ttl, Clock: fakeClock} ttlStore := cache.NewFakeExpirationStore( ExpKeyFunc, nil, ttlPolicy, fakeClock) diff --git a/pkg/genericapiserver/tunneler.go b/pkg/genericapiserver/tunneler.go index aea78233fda..a239bc90138 100644 --- a/pkg/genericapiserver/tunneler.go +++ b/pkg/genericapiserver/tunneler.go @@ -26,6 +26,7 @@ import ( "k8s.io/kubernetes/pkg/ssh" "k8s.io/kubernetes/pkg/util" + "k8s.io/kubernetes/pkg/util/clock" "k8s.io/kubernetes/pkg/util/wait" "github.com/golang/glog" @@ -54,7 +55,7 @@ type SSHTunneler struct { lastSync int64 // Seconds since Epoch lastSSHKeySync int64 // Seconds since Epoch lastSyncMetric prometheus.GaugeFunc - clock util.Clock + clock clock.Clock getAddresses AddressFunc stopChan chan struct{} @@ -66,7 +67,7 @@ func NewSSHTunneler(sshUser, sshKeyfile string, healthCheckURL *url.URL, install SSHKeyfile: sshKeyfile, InstallSSHKey: installSSHKey, HealthCheckURL: healthCheckURL, - clock: util.RealClock{}, + clock: clock.RealClock{}, } } diff --git a/pkg/genericapiserver/tunneler_test.go b/pkg/genericapiserver/tunneler_test.go index 43d9e3bd774..911ab426b41 100644 --- a/pkg/genericapiserver/tunneler_test.go +++ b/pkg/genericapiserver/tunneler_test.go @@ -23,7 +23,7 @@ import ( "testing" "time" - "k8s.io/kubernetes/pkg/util" + "k8s.io/kubernetes/pkg/util/clock" "github.com/stretchr/testify/assert" ) @@ -37,32 +37,32 @@ func TestSecondsSinceSync(t *testing.T) { tunneler.lastSync = time.Date(2015, time.January, 1, 1, 1, 1, 1, time.UTC).Unix() // Nano Second. No difference. - tunneler.clock = util.NewFakeClock(time.Date(2015, time.January, 1, 1, 1, 1, 2, time.UTC)) + tunneler.clock = clock.NewFakeClock(time.Date(2015, time.January, 1, 1, 1, 1, 2, time.UTC)) assert.Equal(int64(0), tunneler.SecondsSinceSync()) // Second - tunneler.clock = util.NewFakeClock(time.Date(2015, time.January, 1, 1, 1, 2, 1, time.UTC)) + tunneler.clock = clock.NewFakeClock(time.Date(2015, time.January, 1, 1, 1, 2, 1, time.UTC)) assert.Equal(int64(1), tunneler.SecondsSinceSync()) // Minute - tunneler.clock = util.NewFakeClock(time.Date(2015, time.January, 1, 1, 2, 1, 1, time.UTC)) + tunneler.clock = clock.NewFakeClock(time.Date(2015, time.January, 1, 1, 2, 1, 1, time.UTC)) assert.Equal(int64(60), tunneler.SecondsSinceSync()) // Hour - tunneler.clock = util.NewFakeClock(time.Date(2015, time.January, 1, 2, 1, 1, 1, time.UTC)) + tunneler.clock = clock.NewFakeClock(time.Date(2015, time.January, 1, 2, 1, 1, 1, time.UTC)) assert.Equal(int64(3600), tunneler.SecondsSinceSync()) // Day - tunneler.clock = util.NewFakeClock(time.Date(2015, time.January, 2, 1, 1, 1, 1, time.UTC)) + tunneler.clock = clock.NewFakeClock(time.Date(2015, time.January, 2, 1, 1, 1, 1, time.UTC)) assert.Equal(int64(86400), tunneler.SecondsSinceSync()) // Month - tunneler.clock = util.NewFakeClock(time.Date(2015, time.February, 1, 1, 1, 1, 1, time.UTC)) + tunneler.clock = clock.NewFakeClock(time.Date(2015, time.February, 1, 1, 1, 1, 1, time.UTC)) assert.Equal(int64(2678400), tunneler.SecondsSinceSync()) // Future Month. Should be -Month. tunneler.lastSync = time.Date(2015, time.February, 1, 1, 1, 1, 1, time.UTC).Unix() - tunneler.clock = util.NewFakeClock(time.Date(2015, time.January, 1, 1, 1, 1, 1, time.UTC)) + tunneler.clock = clock.NewFakeClock(time.Date(2015, time.January, 1, 1, 1, 1, 1, time.UTC)) assert.Equal(int64(-2678400), tunneler.SecondsSinceSync()) } diff --git a/pkg/kubelet/active_deadline.go b/pkg/kubelet/active_deadline.go index 4a613ef6aad..154bbd3bad1 100644 --- a/pkg/kubelet/active_deadline.go +++ b/pkg/kubelet/active_deadline.go @@ -24,7 +24,7 @@ import ( "k8s.io/kubernetes/pkg/client/record" "k8s.io/kubernetes/pkg/kubelet/lifecycle" "k8s.io/kubernetes/pkg/kubelet/status" - "k8s.io/kubernetes/pkg/util" + "k8s.io/kubernetes/pkg/util/clock" ) const ( @@ -35,7 +35,7 @@ const ( // activeDeadlineHandler knows how to enforce active deadlines on pods. type activeDeadlineHandler struct { // the clock to use for deadline enforcement - clock util.Clock + clock clock.Clock // the provider of pod status podStatusProvider status.PodStatusProvider // the recorder to dispatch events when we identify a pod has exceeded active deadline @@ -46,7 +46,7 @@ type activeDeadlineHandler struct { func newActiveDeadlineHandler( podStatusProvider status.PodStatusProvider, recorder record.EventRecorder, - clock util.Clock, + clock clock.Clock, ) (*activeDeadlineHandler, error) { // check for all required fields diff --git a/pkg/kubelet/active_deadline_test.go b/pkg/kubelet/active_deadline_test.go index e343ab283d5..52ebeafdcb5 100644 --- a/pkg/kubelet/active_deadline_test.go +++ b/pkg/kubelet/active_deadline_test.go @@ -24,7 +24,7 @@ import ( "k8s.io/kubernetes/pkg/api/unversioned" "k8s.io/kubernetes/pkg/client/record" "k8s.io/kubernetes/pkg/types" - "k8s.io/kubernetes/pkg/util" + "k8s.io/kubernetes/pkg/util/clock" ) // mockPodStatusProvider returns the status on the specified pod @@ -45,7 +45,7 @@ func (m *mockPodStatusProvider) GetPodStatus(uid types.UID) (api.PodStatus, bool // TestActiveDeadlineHandler verifies the active deadline handler functions as expected. func TestActiveDeadlineHandler(t *testing.T) { pods := newTestPods(4) - fakeClock := util.NewFakeClock(time.Now()) + fakeClock := clock.NewFakeClock(time.Now()) podStatusProvider := &mockPodStatusProvider{pods: pods} fakeRecorder := &record.FakeRecorder{} handler, err := newActiveDeadlineHandler(podStatusProvider, fakeRecorder, fakeClock) diff --git a/pkg/kubelet/dockertools/docker_manager_test.go b/pkg/kubelet/dockertools/docker_manager_test.go index d1869344159..a14edbd54c9 100644 --- a/pkg/kubelet/dockertools/docker_manager_test.go +++ b/pkg/kubelet/dockertools/docker_manager_test.go @@ -52,7 +52,7 @@ import ( "k8s.io/kubernetes/pkg/kubelet/types" "k8s.io/kubernetes/pkg/runtime" kubetypes "k8s.io/kubernetes/pkg/types" - "k8s.io/kubernetes/pkg/util" + "k8s.io/kubernetes/pkg/util/clock" uexec "k8s.io/kubernetes/pkg/util/exec" "k8s.io/kubernetes/pkg/util/flowcontrol" "k8s.io/kubernetes/pkg/util/intstr" @@ -998,7 +998,7 @@ func TestSyncPodWithRestartPolicy(t *testing.T) { } func TestSyncPodBackoff(t *testing.T) { - var fakeClock = util.NewFakeClock(time.Now()) + var fakeClock = clock.NewFakeClock(time.Now()) startTime := fakeClock.Now() dm, fakeDocker := newTestDockerManager() diff --git a/pkg/kubelet/eviction/eviction_manager.go b/pkg/kubelet/eviction/eviction_manager.go index 9cee8dd37b3..3aed3cea36c 100644 --- a/pkg/kubelet/eviction/eviction_manager.go +++ b/pkg/kubelet/eviction/eviction_manager.go @@ -28,14 +28,14 @@ import ( "k8s.io/kubernetes/pkg/kubelet/qos" "k8s.io/kubernetes/pkg/kubelet/server/stats" "k8s.io/kubernetes/pkg/kubelet/util/format" - "k8s.io/kubernetes/pkg/util" + "k8s.io/kubernetes/pkg/util/clock" "k8s.io/kubernetes/pkg/util/wait" ) // managerImpl implements NodeStabilityManager type managerImpl struct { // used to track time - clock util.Clock + clock clock.Clock // config is how the manager is configured config Config // the function to invoke to kill a pod @@ -66,7 +66,7 @@ func NewManager( killPodFunc KillPodFunc, recorder record.EventRecorder, nodeRef *api.ObjectReference, - clock util.Clock) (Manager, lifecycle.PodAdmitHandler, error) { + clock clock.Clock) (Manager, lifecycle.PodAdmitHandler, error) { manager := &managerImpl{ clock: clock, killPodFunc: killPodFunc, diff --git a/pkg/kubelet/eviction/eviction_manager_test.go b/pkg/kubelet/eviction/eviction_manager_test.go index f4fa542a6e1..6da0ed1d107 100644 --- a/pkg/kubelet/eviction/eviction_manager_test.go +++ b/pkg/kubelet/eviction/eviction_manager_test.go @@ -26,7 +26,7 @@ import ( statsapi "k8s.io/kubernetes/pkg/kubelet/api/v1alpha1/stats" "k8s.io/kubernetes/pkg/kubelet/lifecycle" "k8s.io/kubernetes/pkg/types" - "k8s.io/kubernetes/pkg/util" + "k8s.io/kubernetes/pkg/util/clock" ) // mockPodKiller is used to testing which pod is killed @@ -93,7 +93,7 @@ func TestMemoryPressure(t *testing.T) { return pods } - fakeClock := util.NewFakeClock(time.Now()) + fakeClock := clock.NewFakeClock(time.Now()) podKiller := &mockPodKiller{} nodeRef := &api.ObjectReference{Kind: "Node", Name: "test", UID: types.UID("test"), Namespace: ""} diff --git a/pkg/kubelet/image_manager_test.go b/pkg/kubelet/image_manager_test.go index 40276db8069..899e6a491ac 100644 --- a/pkg/kubelet/image_manager_test.go +++ b/pkg/kubelet/image_manager_test.go @@ -28,7 +28,7 @@ import ( cadvisortest "k8s.io/kubernetes/pkg/kubelet/cadvisor/testing" "k8s.io/kubernetes/pkg/kubelet/container" containertest "k8s.io/kubernetes/pkg/kubelet/container/testing" - "k8s.io/kubernetes/pkg/util" + "k8s.io/kubernetes/pkg/util/clock" ) var zero time.Time @@ -445,7 +445,7 @@ func TestGarbageCollectImageNotOldEnough(t *testing.T) { }}, } - fakeClock := util.NewFakeClock(time.Now()) + fakeClock := clock.NewFakeClock(time.Now()) t.Log(fakeClock.Now()) require.NoError(t, manager.detectImages(fakeClock.Now())) require.Equal(t, manager.imageRecordsLen(), 2) diff --git a/pkg/kubelet/images/parallel_image_puller_test.go b/pkg/kubelet/images/parallel_image_puller_test.go index c8957cc3261..f7e2efd8e6b 100644 --- a/pkg/kubelet/images/parallel_image_puller_test.go +++ b/pkg/kubelet/images/parallel_image_puller_test.go @@ -26,7 +26,7 @@ import ( "k8s.io/kubernetes/pkg/client/record" . "k8s.io/kubernetes/pkg/kubelet/container" ctest "k8s.io/kubernetes/pkg/kubelet/container/testing" - "k8s.io/kubernetes/pkg/util" + "k8s.io/kubernetes/pkg/util/clock" "k8s.io/kubernetes/pkg/util/flowcontrol" ) @@ -101,7 +101,7 @@ func TestPuller(t *testing.T) { } backOff := flowcontrol.NewBackOff(time.Second, time.Minute) - fakeClock := util.NewFakeClock(time.Now()) + fakeClock := clock.NewFakeClock(time.Now()) backOff.Clock = fakeClock fakeRuntime := &ctest.FakeRuntime{} diff --git a/pkg/kubelet/images/serialized_image_puller_test.go b/pkg/kubelet/images/serialized_image_puller_test.go index 182e1270592..731850873c9 100644 --- a/pkg/kubelet/images/serialized_image_puller_test.go +++ b/pkg/kubelet/images/serialized_image_puller_test.go @@ -26,7 +26,7 @@ import ( "k8s.io/kubernetes/pkg/client/record" . "k8s.io/kubernetes/pkg/kubelet/container" ctest "k8s.io/kubernetes/pkg/kubelet/container/testing" - "k8s.io/kubernetes/pkg/util" + "k8s.io/kubernetes/pkg/util/clock" "k8s.io/kubernetes/pkg/util/flowcontrol" ) @@ -101,7 +101,7 @@ func TestSerializedPuller(t *testing.T) { } backOff := flowcontrol.NewBackOff(time.Second, time.Minute) - fakeClock := util.NewFakeClock(time.Now()) + fakeClock := clock.NewFakeClock(time.Now()) backOff.Clock = fakeClock fakeRuntime := &ctest.FakeRuntime{} diff --git a/pkg/kubelet/kubelet.go b/pkg/kubelet/kubelet.go index 5f7e6449173..3d2896202fd 100644 --- a/pkg/kubelet/kubelet.go +++ b/pkg/kubelet/kubelet.go @@ -73,8 +73,8 @@ import ( "k8s.io/kubernetes/pkg/runtime" "k8s.io/kubernetes/pkg/securitycontext" "k8s.io/kubernetes/pkg/types" - "k8s.io/kubernetes/pkg/util" "k8s.io/kubernetes/pkg/util/bandwidth" + "k8s.io/kubernetes/pkg/util/clock" utilerrors "k8s.io/kubernetes/pkg/util/errors" utilexec "k8s.io/kubernetes/pkg/util/exec" "k8s.io/kubernetes/pkg/util/flowcontrol" @@ -354,7 +354,7 @@ func NewMainKubelet( flannelExperimentalOverlay: flannelExperimentalOverlay, flannelHelper: nil, nodeIP: nodeIP, - clock: util.RealClock{}, + clock: clock.RealClock{}, outOfDiskTransitionFrequency: outOfDiskTransitionFrequency, reservation: reservation, enableCustomMetrics: enableCustomMetrics, @@ -466,7 +466,7 @@ func NewMainKubelet( // TODO: Factor out "StatsProvider" from Kubelet so we don't have a cyclic dependency klet.resourceAnalyzer = stats.NewResourceAnalyzer(klet, volumeStatsAggPeriod, klet.containerRuntime) - klet.pleg = pleg.NewGenericPLEG(klet.containerRuntime, plegChannelCapacity, plegRelistPeriod, klet.podCache, util.RealClock{}) + klet.pleg = pleg.NewGenericPLEG(klet.containerRuntime, plegChannelCapacity, plegRelistPeriod, klet.podCache, clock.RealClock{}) klet.runtimeState = newRuntimeState(maxWaitForContainerRuntime) klet.updatePodCIDR(podCIDR) @@ -779,7 +779,7 @@ type Kubelet struct { // clock is an interface that provides time related functionality in a way that makes it // easy to test the code. - clock util.Clock + clock clock.Clock // outOfDiskTransitionFrequency specifies the amount of time the kubelet has to be actually // not out of disk before it can transition the node condition status from out-of-disk to diff --git a/pkg/kubelet/kubelet_test.go b/pkg/kubelet/kubelet_test.go index 1e9f15d7a67..28e74bba7c2 100644 --- a/pkg/kubelet/kubelet_test.go +++ b/pkg/kubelet/kubelet_test.go @@ -61,7 +61,7 @@ import ( kubeletvolume "k8s.io/kubernetes/pkg/kubelet/volumemanager" "k8s.io/kubernetes/pkg/runtime" "k8s.io/kubernetes/pkg/types" - "k8s.io/kubernetes/pkg/util" + "k8s.io/kubernetes/pkg/util/clock" "k8s.io/kubernetes/pkg/util/diff" "k8s.io/kubernetes/pkg/util/flowcontrol" "k8s.io/kubernetes/pkg/util/mount" @@ -99,7 +99,7 @@ type TestKubelet struct { fakeCadvisor *cadvisortest.Mock fakeKubeClient *fake.Clientset fakeMirrorClient *podtest.FakeMirrorClient - fakeClock *util.FakeClock + fakeClock *clock.FakeClock mounter mount.Interface volumePlugin *volumetest.FakeVolumePlugin } @@ -198,7 +198,7 @@ func newTestKubeletWithImageList( LowThresholdPercent: 80, } kubelet.imageManager, err = newImageManager(fakeRuntime, mockCadvisor, fakeRecorder, fakeNodeRef, fakeImageGCPolicy) - fakeClock := util.NewFakeClock(time.Now()) + fakeClock := clock.NewFakeClock(time.Now()) kubelet.backOff = flowcontrol.NewBackOff(time.Second, time.Minute) kubelet.backOff.Clock = fakeClock kubelet.podKillingCh = make(chan *kubecontainer.PodPair, 20) @@ -211,7 +211,7 @@ func newTestKubeletWithImageList( } kubelet.workQueue = queue.NewBasicWorkQueue(fakeClock) // Relist period does not affect the tests. - kubelet.pleg = pleg.NewGenericPLEG(fakeRuntime, 100, time.Hour, nil, util.RealClock{}) + kubelet.pleg = pleg.NewGenericPLEG(fakeRuntime, 100, time.Hour, nil, clock.RealClock{}) kubelet.clock = fakeClock kubelet.setNodeStatusFuncs = kubelet.defaultNodeStatusFuncs() diff --git a/pkg/kubelet/pleg/generic.go b/pkg/kubelet/pleg/generic.go index c359273472b..1babc45bc7f 100644 --- a/pkg/kubelet/pleg/generic.go +++ b/pkg/kubelet/pleg/generic.go @@ -25,7 +25,7 @@ import ( kubecontainer "k8s.io/kubernetes/pkg/kubelet/container" "k8s.io/kubernetes/pkg/kubelet/metrics" "k8s.io/kubernetes/pkg/types" - "k8s.io/kubernetes/pkg/util" + "k8s.io/kubernetes/pkg/util/clock" "k8s.io/kubernetes/pkg/util/sets" "k8s.io/kubernetes/pkg/util/wait" ) @@ -59,7 +59,7 @@ type GenericPLEG struct { // Cache for storing the runtime states required for syncing pods. cache kubecontainer.Cache // For testability. - clock util.Clock + clock clock.Clock // Pods that failed to have their status retrieved during a relist. These pods will be // retried during the next relisting. podsToReinspect map[types.UID]*kubecontainer.Pod @@ -98,7 +98,7 @@ type podRecord struct { type podRecords map[types.UID]*podRecord func NewGenericPLEG(runtime kubecontainer.Runtime, channelCapacity int, - relistPeriod time.Duration, cache kubecontainer.Cache, clock util.Clock) PodLifecycleEventGenerator { + relistPeriod time.Duration, cache kubecontainer.Cache, clock clock.Clock) PodLifecycleEventGenerator { return &GenericPLEG{ relistPeriod: relistPeriod, runtime: runtime, diff --git a/pkg/kubelet/pleg/generic_test.go b/pkg/kubelet/pleg/generic_test.go index 70792ab7ce9..6b5cce5110c 100644 --- a/pkg/kubelet/pleg/generic_test.go +++ b/pkg/kubelet/pleg/generic_test.go @@ -28,7 +28,7 @@ import ( kubecontainer "k8s.io/kubernetes/pkg/kubelet/container" containertest "k8s.io/kubernetes/pkg/kubelet/container/testing" "k8s.io/kubernetes/pkg/types" - "k8s.io/kubernetes/pkg/util" + "k8s.io/kubernetes/pkg/util/clock" "k8s.io/kubernetes/pkg/util/diff" ) @@ -39,12 +39,12 @@ const ( type TestGenericPLEG struct { pleg *GenericPLEG runtime *containertest.FakeRuntime - clock *util.FakeClock + clock *clock.FakeClock } func newTestGenericPLEG() *TestGenericPLEG { fakeRuntime := &containertest.FakeRuntime{} - clock := util.NewFakeClock(time.Time{}) + clock := clock.NewFakeClock(time.Time{}) // The channel capacity should be large enough to hold all events in a // single test. pleg := &GenericPLEG{ @@ -246,7 +246,7 @@ func newTestGenericPLEGWithRuntimeMock() (*GenericPLEG, *containertest.Mock) { eventChannel: make(chan *PodLifecycleEvent, 100), podRecords: make(podRecords), cache: kubecontainer.NewCache(), - clock: util.RealClock{}, + clock: clock.RealClock{}, } return pleg, runtimeMock } diff --git a/pkg/kubelet/pod_workers_test.go b/pkg/kubelet/pod_workers_test.go index 8839923656d..04ca76a1e61 100644 --- a/pkg/kubelet/pod_workers_test.go +++ b/pkg/kubelet/pod_workers_test.go @@ -29,7 +29,7 @@ import ( kubetypes "k8s.io/kubernetes/pkg/kubelet/types" "k8s.io/kubernetes/pkg/kubelet/util/queue" "k8s.io/kubernetes/pkg/types" - "k8s.io/kubernetes/pkg/util" + "k8s.io/kubernetes/pkg/util/clock" ) // fakePodWorkers runs sync pod function in serial, so we can have @@ -99,7 +99,7 @@ func createPodWorkers() (*podWorkers, map[types.UID][]syncPodRecord) { return nil }, fakeRecorder, - queue.NewBasicWorkQueue(&util.RealClock{}), + queue.NewBasicWorkQueue(&clock.RealClock{}), time.Second, time.Second, fakeCache, @@ -279,7 +279,7 @@ func TestFakePodWorkers(t *testing.T) { kubeletForRealWorkers := &simpleFakeKubelet{} kubeletForFakeWorkers := &simpleFakeKubelet{} - realPodWorkers := newPodWorkers(kubeletForRealWorkers.syncPodWithWaitGroup, fakeRecorder, queue.NewBasicWorkQueue(&util.RealClock{}), time.Second, time.Second, fakeCache) + realPodWorkers := newPodWorkers(kubeletForRealWorkers.syncPodWithWaitGroup, fakeRecorder, queue.NewBasicWorkQueue(&clock.RealClock{}), time.Second, time.Second, fakeCache) fakePodWorkers := &fakePodWorkers{kubeletForFakeWorkers.syncPod, fakeCache, t} tests := []struct { diff --git a/pkg/kubelet/runonce_test.go b/pkg/kubelet/runonce_test.go index 6abd9d3847b..bb434fb7197 100644 --- a/pkg/kubelet/runonce_test.go +++ b/pkg/kubelet/runonce_test.go @@ -40,7 +40,7 @@ import ( "k8s.io/kubernetes/pkg/kubelet/status" "k8s.io/kubernetes/pkg/kubelet/volumemanager" "k8s.io/kubernetes/pkg/types" - "k8s.io/kubernetes/pkg/util" + "k8s.io/kubernetes/pkg/util/clock" utiltesting "k8s.io/kubernetes/pkg/util/testing" "k8s.io/kubernetes/pkg/volume" volumetest "k8s.io/kubernetes/pkg/volume/testing" @@ -79,7 +79,7 @@ func TestRunOnce(t *testing.T) { diskSpaceManager: diskSpaceManager, containerRuntime: fakeRuntime, reasonCache: NewReasonCache(), - clock: util.RealClock{}, + clock: clock.RealClock{}, kubeClient: &fake.Clientset{}, hostname: testKubeletHostname, nodeName: testKubeletHostname, diff --git a/pkg/kubelet/util/cache/object_cache_test.go b/pkg/kubelet/util/cache/object_cache_test.go index 7b08bf6b113..b059dedf799 100644 --- a/pkg/kubelet/util/cache/object_cache_test.go +++ b/pkg/kubelet/util/cache/object_cache_test.go @@ -22,7 +22,7 @@ import ( "time" expirationCache "k8s.io/kubernetes/pkg/client/cache" - "k8s.io/kubernetes/pkg/util" + "k8s.io/kubernetes/pkg/util/clock" ) type testObject struct { @@ -31,7 +31,7 @@ type testObject struct { } // A fake objectCache for unit test. -func NewFakeObjectCache(f func() (interface{}, error), ttl time.Duration, clock util.Clock) *ObjectCache { +func NewFakeObjectCache(f func() (interface{}, error), ttl time.Duration, clock clock.Clock) *ObjectCache { ttlPolicy := &expirationCache.TTLPolicy{Ttl: ttl, Clock: clock} deleteChan := make(chan string, 1) return &ObjectCache{ @@ -47,7 +47,7 @@ func TestAddAndGet(t *testing.T) { } objectCache := NewFakeObjectCache(func() (interface{}, error) { return nil, fmt.Errorf("Unexpected Error: updater should never be called in this test!") - }, 1*time.Hour, util.NewFakeClock(time.Now())) + }, 1*time.Hour, clock.NewFakeClock(time.Now())) err := objectCache.Add(testObj.key, testObj.val) if err != nil { @@ -72,7 +72,7 @@ func TestExpirationBasic(t *testing.T) { val: unexpectedVal, } - fakeClock := util.NewFakeClock(time.Now()) + fakeClock := clock.NewFakeClock(time.Now()) objectCache := NewFakeObjectCache(func() (interface{}, error) { return expectedVal, nil diff --git a/pkg/kubelet/util/queue/work_queue.go b/pkg/kubelet/util/queue/work_queue.go index 33722b42d08..5ac45c3f2a3 100644 --- a/pkg/kubelet/util/queue/work_queue.go +++ b/pkg/kubelet/util/queue/work_queue.go @@ -21,7 +21,7 @@ import ( "time" "k8s.io/kubernetes/pkg/types" - "k8s.io/kubernetes/pkg/util" + "k8s.io/kubernetes/pkg/util/clock" ) // WorkQueue allows queuing items with a timestamp. An item is @@ -34,14 +34,14 @@ type WorkQueue interface { } type basicWorkQueue struct { - clock util.Clock + clock clock.Clock lock sync.Mutex queue map[types.UID]time.Time } var _ WorkQueue = &basicWorkQueue{} -func NewBasicWorkQueue(clock util.Clock) WorkQueue { +func NewBasicWorkQueue(clock clock.Clock) WorkQueue { queue := make(map[types.UID]time.Time) return &basicWorkQueue{queue: queue, clock: clock} } diff --git a/pkg/kubelet/util/queue/work_queue_test.go b/pkg/kubelet/util/queue/work_queue_test.go index 0859e4bb7e0..e0893409dda 100644 --- a/pkg/kubelet/util/queue/work_queue_test.go +++ b/pkg/kubelet/util/queue/work_queue_test.go @@ -21,12 +21,12 @@ import ( "time" "k8s.io/kubernetes/pkg/types" - "k8s.io/kubernetes/pkg/util" + "k8s.io/kubernetes/pkg/util/clock" "k8s.io/kubernetes/pkg/util/sets" ) -func newTestBasicWorkQueue() (*basicWorkQueue, *util.FakeClock) { - fakeClock := util.NewFakeClock(time.Now()) +func newTestBasicWorkQueue() (*basicWorkQueue, *clock.FakeClock) { + fakeClock := clock.NewFakeClock(time.Now()) wq := &basicWorkQueue{ clock: fakeClock, queue: make(map[types.UID]time.Time), diff --git a/pkg/storage/watch_cache.go b/pkg/storage/watch_cache.go index 1fd43fc00b0..b3f5e0371ca 100644 --- a/pkg/storage/watch_cache.go +++ b/pkg/storage/watch_cache.go @@ -27,7 +27,7 @@ import ( "k8s.io/kubernetes/pkg/api/meta" "k8s.io/kubernetes/pkg/client/cache" "k8s.io/kubernetes/pkg/runtime" - "k8s.io/kubernetes/pkg/util" + "k8s.io/kubernetes/pkg/util/clock" "k8s.io/kubernetes/pkg/watch" ) @@ -96,7 +96,7 @@ type watchCache struct { onEvent func(watchCacheEvent) // for testing timeouts. - clock util.Clock + clock clock.Clock } func newWatchCache(capacity int) *watchCache { @@ -107,7 +107,7 @@ func newWatchCache(capacity int) *watchCache { endIndex: 0, store: cache.NewStore(cache.MetaNamespaceKeyFunc), resourceVersion: 0, - clock: util.RealClock{}, + clock: clock.RealClock{}, } wc.cond = sync.NewCond(wc.RLocker()) return wc diff --git a/pkg/storage/watch_cache_test.go b/pkg/storage/watch_cache_test.go index bdee7a58d3c..b5b3c893e18 100644 --- a/pkg/storage/watch_cache_test.go +++ b/pkg/storage/watch_cache_test.go @@ -26,7 +26,7 @@ import ( "k8s.io/kubernetes/pkg/api/unversioned" "k8s.io/kubernetes/pkg/client/cache" "k8s.io/kubernetes/pkg/runtime" - "k8s.io/kubernetes/pkg/util" + "k8s.io/kubernetes/pkg/util/clock" "k8s.io/kubernetes/pkg/util/sets" "k8s.io/kubernetes/pkg/util/wait" "k8s.io/kubernetes/pkg/watch" @@ -45,7 +45,7 @@ func makeTestPod(name string, resourceVersion uint64) *api.Pod { // newTestWatchCache just adds a fake clock. func newTestWatchCache(capacity int) *watchCache { wc := newWatchCache(capacity) - wc.clock = util.NewFakeClock(time.Now()) + wc.clock = clock.NewFakeClock(time.Now()) return wc } @@ -262,7 +262,7 @@ func TestWaitUntilFreshAndList(t *testing.T) { func TestWaitUntilFreshAndListTimeout(t *testing.T) { store := newTestWatchCache(3) - fc := store.clock.(*util.FakeClock) + fc := store.clock.(*clock.FakeClock) // In background, step clock after the below call starts the timer. go func() { diff --git a/pkg/util/clock.go b/pkg/util/clock/clock.go similarity index 99% rename from pkg/util/clock.go rename to pkg/util/clock/clock.go index 71aca9eda45..2ea1b53c1a3 100644 --- a/pkg/util/clock.go +++ b/pkg/util/clock/clock.go @@ -14,7 +14,7 @@ See the License for the specific language governing permissions and limitations under the License. */ -package util +package clock import ( "sync" diff --git a/pkg/util/clock_test.go b/pkg/util/clock/clock_test.go similarity index 99% rename from pkg/util/clock_test.go rename to pkg/util/clock/clock_test.go index 61092dee000..27d34605f50 100644 --- a/pkg/util/clock_test.go +++ b/pkg/util/clock/clock_test.go @@ -14,7 +14,7 @@ See the License for the specific language governing permissions and limitations under the License. */ -package util +package clock import ( "testing" diff --git a/pkg/util/flowcontrol/backoff.go b/pkg/util/flowcontrol/backoff.go index 59b9976f1a8..2d91cc5e021 100644 --- a/pkg/util/flowcontrol/backoff.go +++ b/pkg/util/flowcontrol/backoff.go @@ -20,7 +20,7 @@ import ( "sync" "time" - "k8s.io/kubernetes/pkg/util" + "k8s.io/kubernetes/pkg/util/clock" "k8s.io/kubernetes/pkg/util/integer" ) @@ -31,13 +31,13 @@ type backoffEntry struct { type Backoff struct { sync.Mutex - Clock util.Clock + Clock clock.Clock defaultDuration time.Duration maxDuration time.Duration perItemBackoff map[string]*backoffEntry } -func NewFakeBackOff(initial, max time.Duration, tc *util.FakeClock) *Backoff { +func NewFakeBackOff(initial, max time.Duration, tc *clock.FakeClock) *Backoff { return &Backoff{ perItemBackoff: map[string]*backoffEntry{}, Clock: tc, @@ -49,7 +49,7 @@ func NewFakeBackOff(initial, max time.Duration, tc *util.FakeClock) *Backoff { func NewBackOff(initial, max time.Duration) *Backoff { return &Backoff{ perItemBackoff: map[string]*backoffEntry{}, - Clock: util.RealClock{}, + Clock: clock.RealClock{}, defaultDuration: initial, maxDuration: max, } diff --git a/pkg/util/flowcontrol/backoff_test.go b/pkg/util/flowcontrol/backoff_test.go index 7bf2535bc2f..a112f5dd636 100644 --- a/pkg/util/flowcontrol/backoff_test.go +++ b/pkg/util/flowcontrol/backoff_test.go @@ -17,14 +17,14 @@ limitations under the License. package flowcontrol import ( - "k8s.io/kubernetes/pkg/util" + "k8s.io/kubernetes/pkg/util/clock" "testing" "time" ) func TestSlowBackoff(t *testing.T) { id := "_idSlow" - tc := util.NewFakeClock(time.Now()) + tc := clock.NewFakeClock(time.Now()) step := time.Second maxDuration := 50 * step @@ -50,7 +50,7 @@ func TestSlowBackoff(t *testing.T) { func TestBackoffReset(t *testing.T) { id := "_idReset" - tc := util.NewFakeClock(time.Now()) + tc := clock.NewFakeClock(time.Now()) step := time.Second maxDuration := step * 5 b := NewFakeBackOff(step, maxDuration, tc) @@ -76,7 +76,7 @@ func TestBackoffReset(t *testing.T) { func TestBackoffHightWaterMark(t *testing.T) { id := "_idHiWaterMark" - tc := util.NewFakeClock(time.Now()) + tc := clock.NewFakeClock(time.Now()) step := time.Second maxDuration := 5 * step b := NewFakeBackOff(step, maxDuration, tc) @@ -98,7 +98,7 @@ func TestBackoffHightWaterMark(t *testing.T) { func TestBackoffGC(t *testing.T) { id := "_idGC" - tc := util.NewFakeClock(time.Now()) + tc := clock.NewFakeClock(time.Now()) step := time.Second maxDuration := 5 * step @@ -126,7 +126,7 @@ func TestBackoffGC(t *testing.T) { func TestIsInBackOffSinceUpdate(t *testing.T) { id := "_idIsInBackOffSinceUpdate" - tc := util.NewFakeClock(time.Now()) + tc := clock.NewFakeClock(time.Now()) step := time.Second maxDuration := 10 * step b := NewFakeBackOff(step, maxDuration, tc) diff --git a/pkg/util/workqueue/delaying_queue.go b/pkg/util/workqueue/delaying_queue.go index f24c8815592..a9aae96aa59 100644 --- a/pkg/util/workqueue/delaying_queue.go +++ b/pkg/util/workqueue/delaying_queue.go @@ -20,7 +20,7 @@ import ( "sort" "time" - "k8s.io/kubernetes/pkg/util" + "k8s.io/kubernetes/pkg/util/clock" utilruntime "k8s.io/kubernetes/pkg/util/runtime" ) @@ -34,10 +34,10 @@ type DelayingInterface interface { // NewDelayingQueue constructs a new workqueue with delayed queuing ability func NewDelayingQueue() DelayingInterface { - return newDelayingQueue(util.RealClock{}) + return newDelayingQueue(clock.RealClock{}) } -func newDelayingQueue(clock util.Clock) DelayingInterface { +func newDelayingQueue(clock clock.Clock) DelayingInterface { ret := &delayingType{ Interface: New(), clock: clock, @@ -57,7 +57,7 @@ type delayingType struct { Interface // clock tracks time for delayed firing - clock util.Clock + clock clock.Clock // stopCh lets us signal a shutdown to the waiting loop stopCh chan struct{} diff --git a/pkg/util/workqueue/delaying_queue_test.go b/pkg/util/workqueue/delaying_queue_test.go index e2a21dadf3b..aa1cc336f9e 100644 --- a/pkg/util/workqueue/delaying_queue_test.go +++ b/pkg/util/workqueue/delaying_queue_test.go @@ -22,12 +22,12 @@ import ( "testing" "time" - "k8s.io/kubernetes/pkg/util" + "k8s.io/kubernetes/pkg/util/clock" "k8s.io/kubernetes/pkg/util/wait" ) func TestSimpleQueue(t *testing.T) { - fakeClock := util.NewFakeClock(time.Now()) + fakeClock := clock.NewFakeClock(time.Now()) q := newDelayingQueue(fakeClock) first := "foo" @@ -69,7 +69,7 @@ func TestSimpleQueue(t *testing.T) { } func TestDeduping(t *testing.T) { - fakeClock := util.NewFakeClock(time.Now()) + fakeClock := clock.NewFakeClock(time.Now()) q := newDelayingQueue(fakeClock) first := "foo" @@ -128,7 +128,7 @@ func TestDeduping(t *testing.T) { } func TestAddTwoFireEarly(t *testing.T) { - fakeClock := util.NewFakeClock(time.Now()) + fakeClock := clock.NewFakeClock(time.Now()) q := newDelayingQueue(fakeClock) first := "foo" @@ -178,7 +178,7 @@ func TestAddTwoFireEarly(t *testing.T) { } func TestCopyShifting(t *testing.T) { - fakeClock := util.NewFakeClock(time.Now()) + fakeClock := clock.NewFakeClock(time.Now()) q := newDelayingQueue(fakeClock) first := "foo" diff --git a/pkg/util/workqueue/rate_limitting_queue_test.go b/pkg/util/workqueue/rate_limitting_queue_test.go index c6a2ef930b9..0be1dc2e9ad 100644 --- a/pkg/util/workqueue/rate_limitting_queue_test.go +++ b/pkg/util/workqueue/rate_limitting_queue_test.go @@ -20,13 +20,13 @@ import ( "testing" "time" - "k8s.io/kubernetes/pkg/util" + "k8s.io/kubernetes/pkg/util/clock" ) func TestRateLimitingQueue(t *testing.T) { limiter := NewItemExponentialFailureRateLimiter(1*time.Millisecond, 1*time.Second) queue := NewRateLimitingQueue(limiter).(*rateLimitingType) - fakeClock := util.NewFakeClock(time.Now()) + fakeClock := clock.NewFakeClock(time.Now()) delayingQueue := &delayingType{ Interface: New(), clock: fakeClock,