mirror of
https://github.com/k3s-io/kubernetes.git
synced 2025-07-21 19:01:49 +00:00
Merge pull request #104874 from wojtek-t/migrate_clock_1
Unify towards k8s.io/utils/clock - part 1
This commit is contained in:
commit
047a6b9f86
@ -30,7 +30,6 @@ import (
|
||||
"github.com/google/go-cmp/cmp"
|
||||
|
||||
capi "k8s.io/api/certificates/v1"
|
||||
"k8s.io/apimachinery/pkg/util/clock"
|
||||
"k8s.io/apimachinery/pkg/util/diff"
|
||||
utilfeature "k8s.io/apiserver/pkg/util/feature"
|
||||
"k8s.io/client-go/kubernetes/fake"
|
||||
@ -41,10 +40,11 @@ import (
|
||||
capihelper "k8s.io/kubernetes/pkg/apis/certificates/v1"
|
||||
"k8s.io/kubernetes/pkg/controller/certificates"
|
||||
"k8s.io/kubernetes/pkg/features"
|
||||
testingclock "k8s.io/utils/clock/testing"
|
||||
)
|
||||
|
||||
func TestSigner(t *testing.T) {
|
||||
fakeClock := clock.FakeClock{}
|
||||
fakeClock := testingclock.FakeClock{}
|
||||
|
||||
s, err := newSigner("kubernetes.io/legacy-unknown", "./testdata/ca.crt", "./testdata/ca.key", nil, 1*time.Hour)
|
||||
if err != nil {
|
||||
|
@ -35,7 +35,6 @@ import (
|
||||
"k8s.io/apimachinery/pkg/labels"
|
||||
"k8s.io/apimachinery/pkg/runtime"
|
||||
"k8s.io/apimachinery/pkg/types"
|
||||
"k8s.io/apimachinery/pkg/util/clock"
|
||||
"k8s.io/apimachinery/pkg/util/rand"
|
||||
"k8s.io/apimachinery/pkg/util/sets"
|
||||
"k8s.io/apimachinery/pkg/util/strategicpatch"
|
||||
@ -52,6 +51,7 @@ import (
|
||||
"k8s.io/kubernetes/pkg/features"
|
||||
hashutil "k8s.io/kubernetes/pkg/util/hash"
|
||||
taintutils "k8s.io/kubernetes/pkg/util/taints"
|
||||
"k8s.io/utils/clock"
|
||||
"k8s.io/utils/integer"
|
||||
|
||||
"k8s.io/klog/v2"
|
||||
|
@ -31,7 +31,6 @@ import (
|
||||
"k8s.io/apimachinery/pkg/api/resource"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/runtime"
|
||||
"k8s.io/apimachinery/pkg/util/clock"
|
||||
"k8s.io/apimachinery/pkg/util/intstr"
|
||||
"k8s.io/apimachinery/pkg/util/sets"
|
||||
"k8s.io/apimachinery/pkg/util/uuid"
|
||||
@ -55,6 +54,7 @@ import (
|
||||
"k8s.io/kubernetes/pkg/features"
|
||||
"k8s.io/kubernetes/pkg/securitycontext"
|
||||
labelsutil "k8s.io/kubernetes/pkg/util/labels"
|
||||
testingclock "k8s.io/utils/clock/testing"
|
||||
)
|
||||
|
||||
var (
|
||||
@ -309,7 +309,7 @@ func newTestController(initialObjects ...runtime.Object) (*daemonSetsController,
|
||||
informerFactory.Core().V1().Pods(),
|
||||
informerFactory.Core().V1().Nodes(),
|
||||
clientset,
|
||||
flowcontrol.NewFakeBackOff(50*time.Millisecond, 500*time.Millisecond, clock.NewFakeClock(time.Now())),
|
||||
flowcontrol.NewFakeBackOff(50*time.Millisecond, 500*time.Millisecond, testingclock.NewFakeClock(time.Now())),
|
||||
)
|
||||
if err != nil {
|
||||
return nil, nil, nil, err
|
||||
@ -473,7 +473,7 @@ func TestExpectationsOnRecreate(t *testing.T) {
|
||||
f.Core().V1().Pods(),
|
||||
f.Core().V1().Nodes(),
|
||||
client,
|
||||
flowcontrol.NewFakeBackOff(50*time.Millisecond, 500*time.Millisecond, clock.NewFakeClock(time.Now())),
|
||||
flowcontrol.NewFakeBackOff(50*time.Millisecond, 500*time.Millisecond, testingclock.NewFakeClock(time.Now())),
|
||||
)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
@ -3411,7 +3411,7 @@ func TestSurgePreservesOldReadyWithUnsatisfiedMinReady(t *testing.T) {
|
||||
addNodes(manager.nodeStore, 0, 5, nil)
|
||||
|
||||
// the clock will be set 10s after the newest pod on node-1 went ready, which is not long enough to be available
|
||||
manager.DaemonSetsController.failedPodsBackoff.Clock = clock.NewFakeClock(time.Unix(50+10, 0))
|
||||
manager.DaemonSetsController.failedPodsBackoff.Clock = testingclock.NewFakeClock(time.Unix(50+10, 0))
|
||||
|
||||
// will be preserved because it has the newest hash
|
||||
pod := newPod("node-1-", "node-1", simpleDaemonSetLabel, ds)
|
||||
@ -3456,7 +3456,7 @@ func TestSurgeDeletesOldReadyWithUnsatisfiedMinReady(t *testing.T) {
|
||||
addNodes(manager.nodeStore, 0, 5, nil)
|
||||
|
||||
// the clock will be set 20s after the newest pod on node-1 went ready, which is not long enough to be available
|
||||
manager.DaemonSetsController.failedPodsBackoff.Clock = clock.NewFakeClock(time.Unix(50+20, 0))
|
||||
manager.DaemonSetsController.failedPodsBackoff.Clock = testingclock.NewFakeClock(time.Unix(50+20, 0))
|
||||
|
||||
// will be preserved because it has the newest hash
|
||||
pod := newPod("node-1-", "node-1", simpleDaemonSetLabel, ds)
|
||||
|
@ -24,7 +24,6 @@ import (
|
||||
v1 "k8s.io/api/core/v1"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/labels"
|
||||
"k8s.io/apimachinery/pkg/util/clock"
|
||||
"k8s.io/apimachinery/pkg/util/intstr"
|
||||
utilfeature "k8s.io/apiserver/pkg/util/feature"
|
||||
featuregatetesting "k8s.io/component-base/featuregate/testing"
|
||||
@ -32,6 +31,7 @@ import (
|
||||
podutil "k8s.io/kubernetes/pkg/api/v1/pod"
|
||||
"k8s.io/kubernetes/pkg/controller/daemon/util"
|
||||
"k8s.io/kubernetes/pkg/features"
|
||||
testingclock "k8s.io/utils/clock/testing"
|
||||
)
|
||||
|
||||
func TestDaemonSetUpdatesPods(t *testing.T) {
|
||||
@ -205,12 +205,12 @@ func TestDaemonSetUpdatesAllOldPodsNotReadyMaxSurge(t *testing.T) {
|
||||
manager.dsStore.Update(ds)
|
||||
|
||||
// all old pods are unavailable so should be surged
|
||||
manager.failedPodsBackoff.Clock = clock.NewFakeClock(time.Unix(100, 0))
|
||||
manager.failedPodsBackoff.Clock = testingclock.NewFakeClock(time.Unix(100, 0))
|
||||
clearExpectations(t, manager, ds, podControl)
|
||||
expectSyncDaemonSets(t, manager, ds, podControl, 5, 0, 0)
|
||||
|
||||
// waiting for pods to go ready, old pods are deleted
|
||||
manager.failedPodsBackoff.Clock = clock.NewFakeClock(time.Unix(200, 0))
|
||||
manager.failedPodsBackoff.Clock = testingclock.NewFakeClock(time.Unix(200, 0))
|
||||
clearExpectations(t, manager, ds, podControl)
|
||||
expectSyncDaemonSets(t, manager, ds, podControl, 0, 5, 0)
|
||||
|
||||
@ -219,7 +219,7 @@ func TestDaemonSetUpdatesAllOldPodsNotReadyMaxSurge(t *testing.T) {
|
||||
ds.Spec.Template.Spec.Containers[0].Image = "foo3/bar3"
|
||||
manager.dsStore.Update(ds)
|
||||
|
||||
manager.failedPodsBackoff.Clock = clock.NewFakeClock(time.Unix(300, 0))
|
||||
manager.failedPodsBackoff.Clock = testingclock.NewFakeClock(time.Unix(300, 0))
|
||||
clearExpectations(t, manager, ds, podControl)
|
||||
expectSyncDaemonSets(t, manager, ds, podControl, 3, 0, 0)
|
||||
|
||||
@ -243,12 +243,12 @@ func TestDaemonSetUpdatesAllOldPodsNotReadyMaxSurge(t *testing.T) {
|
||||
|
||||
// the new pods should still be considered waiting to hit min readiness, so one pod should be created to replace
|
||||
// the deleted old pod
|
||||
manager.failedPodsBackoff.Clock = clock.NewFakeClock(time.Unix(310, 0))
|
||||
manager.failedPodsBackoff.Clock = testingclock.NewFakeClock(time.Unix(310, 0))
|
||||
clearExpectations(t, manager, ds, podControl)
|
||||
expectSyncDaemonSets(t, manager, ds, podControl, 1, 0, 0)
|
||||
|
||||
// the new pods are now considered available, so delete the old pods
|
||||
manager.failedPodsBackoff.Clock = clock.NewFakeClock(time.Unix(320, 0))
|
||||
manager.failedPodsBackoff.Clock = testingclock.NewFakeClock(time.Unix(320, 0))
|
||||
clearExpectations(t, manager, ds, podControl)
|
||||
expectSyncDaemonSets(t, manager, ds, podControl, 1, 3, 0)
|
||||
|
||||
@ -259,12 +259,12 @@ func TestDaemonSetUpdatesAllOldPodsNotReadyMaxSurge(t *testing.T) {
|
||||
})
|
||||
|
||||
// the new pods are now considered available, so delete the old pods
|
||||
manager.failedPodsBackoff.Clock = clock.NewFakeClock(time.Unix(340, 0))
|
||||
manager.failedPodsBackoff.Clock = testingclock.NewFakeClock(time.Unix(340, 0))
|
||||
clearExpectations(t, manager, ds, podControl)
|
||||
expectSyncDaemonSets(t, manager, ds, podControl, 0, 2, 0)
|
||||
|
||||
// controller has completed upgrade
|
||||
manager.failedPodsBackoff.Clock = clock.NewFakeClock(time.Unix(350, 0))
|
||||
manager.failedPodsBackoff.Clock = testingclock.NewFakeClock(time.Unix(350, 0))
|
||||
clearExpectations(t, manager, ds, podControl)
|
||||
expectSyncDaemonSets(t, manager, ds, podControl, 0, 0, 0)
|
||||
}
|
||||
|
@ -21,9 +21,9 @@ import (
|
||||
"time"
|
||||
|
||||
"k8s.io/apimachinery/pkg/types"
|
||||
"k8s.io/apimachinery/pkg/util/clock"
|
||||
|
||||
"k8s.io/klog/v2"
|
||||
// TODO: Switch to k8s.io/utils/clock once it supports AfterFunc()
|
||||
"k8s.io/apimachinery/pkg/util/clock"
|
||||
)
|
||||
|
||||
// WorkArgs keeps arguments that will be passed to the function executed by the worker.
|
||||
|
@ -25,7 +25,6 @@ import (
|
||||
v1 "k8s.io/api/core/v1"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/labels"
|
||||
"k8s.io/apimachinery/pkg/util/clock"
|
||||
"k8s.io/apimachinery/pkg/util/sets"
|
||||
"k8s.io/apimachinery/pkg/util/wait"
|
||||
"k8s.io/client-go/informers"
|
||||
@ -35,6 +34,7 @@ import (
|
||||
"k8s.io/client-go/util/workqueue"
|
||||
"k8s.io/kubernetes/pkg/controller"
|
||||
"k8s.io/kubernetes/pkg/controller/testutil"
|
||||
testingclock "k8s.io/utils/clock/testing"
|
||||
)
|
||||
|
||||
func alwaysReady() bool { return true }
|
||||
@ -322,7 +322,7 @@ func TestGCOrphaned(t *testing.T) {
|
||||
podInformer.Informer().GetStore().Add(pod)
|
||||
}
|
||||
// Overwrite queue
|
||||
fakeClock := clock.NewFakeClock(time.Now())
|
||||
fakeClock := testingclock.NewFakeClock(time.Now())
|
||||
gcc.nodeQueue.ShutDown()
|
||||
gcc.nodeQueue = workqueue.NewDelayingQueueWithCustomClock(fakeClock, "podgc_test_queue")
|
||||
|
||||
|
@ -26,7 +26,6 @@ import (
|
||||
"k8s.io/api/core/v1"
|
||||
"k8s.io/apimachinery/pkg/api/meta"
|
||||
"k8s.io/apimachinery/pkg/runtime/schema"
|
||||
"k8s.io/apimachinery/pkg/util/clock"
|
||||
utilerrors "k8s.io/apimachinery/pkg/util/errors"
|
||||
utilruntime "k8s.io/apimachinery/pkg/util/runtime"
|
||||
"k8s.io/apimachinery/pkg/util/wait"
|
||||
@ -37,6 +36,7 @@ import (
|
||||
"k8s.io/controller-manager/pkg/informerfactory"
|
||||
"k8s.io/kubernetes/pkg/controller"
|
||||
"k8s.io/kubernetes/pkg/quota/v1/evaluator/core"
|
||||
"k8s.io/utils/clock"
|
||||
)
|
||||
|
||||
type eventType int
|
||||
|
@ -32,7 +32,6 @@ import (
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/runtime"
|
||||
"k8s.io/apimachinery/pkg/types"
|
||||
"k8s.io/apimachinery/pkg/util/clock"
|
||||
"k8s.io/apimachinery/pkg/util/sets"
|
||||
"k8s.io/apimachinery/pkg/util/strategicpatch"
|
||||
"k8s.io/apimachinery/pkg/watch"
|
||||
@ -45,6 +44,8 @@ import (
|
||||
"k8s.io/klog/v2"
|
||||
"k8s.io/kubernetes/pkg/api/legacyscheme"
|
||||
api "k8s.io/kubernetes/pkg/apis/core"
|
||||
"k8s.io/utils/clock"
|
||||
testingclock "k8s.io/utils/clock/testing"
|
||||
|
||||
jsonpatch "github.com/evanphx/json-patch"
|
||||
)
|
||||
@ -455,7 +456,7 @@ func NewFakeRecorder() *FakeRecorder {
|
||||
return &FakeRecorder{
|
||||
source: v1.EventSource{Component: "nodeControllerTest"},
|
||||
Events: []*v1.Event{},
|
||||
clock: clock.NewFakeClock(time.Now()),
|
||||
clock: testingclock.NewFakeClock(time.Now()),
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -27,7 +27,6 @@ import (
|
||||
"k8s.io/api/core/v1"
|
||||
"k8s.io/apimachinery/pkg/api/errors"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/util/clock"
|
||||
utilruntime "k8s.io/apimachinery/pkg/util/runtime"
|
||||
"k8s.io/apimachinery/pkg/util/wait"
|
||||
batchinformers "k8s.io/client-go/informers/batch/v1"
|
||||
@ -42,6 +41,7 @@ import (
|
||||
"k8s.io/kubernetes/pkg/controller"
|
||||
jobutil "k8s.io/kubernetes/pkg/controller/job"
|
||||
"k8s.io/kubernetes/pkg/controller/ttlafterfinished/metrics"
|
||||
"k8s.io/utils/clock"
|
||||
)
|
||||
|
||||
// Controller watches for changes of Jobs API objects. Triggered by Job creation
|
||||
|
@ -55,7 +55,6 @@ import (
|
||||
storageapiv1alpha1 "k8s.io/api/storage/v1alpha1"
|
||||
storageapiv1beta1 "k8s.io/api/storage/v1beta1"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/util/clock"
|
||||
utilnet "k8s.io/apimachinery/pkg/util/net"
|
||||
"k8s.io/apimachinery/pkg/util/runtime"
|
||||
"k8s.io/apimachinery/pkg/util/wait"
|
||||
@ -84,6 +83,7 @@ import (
|
||||
"k8s.io/kubernetes/pkg/routes"
|
||||
"k8s.io/kubernetes/pkg/serviceaccount"
|
||||
nodeutil "k8s.io/kubernetes/pkg/util/node"
|
||||
"k8s.io/utils/clock"
|
||||
|
||||
// RESTStorage installers
|
||||
admissionregistrationrest "k8s.io/kubernetes/pkg/registry/admissionregistration/rest"
|
||||
|
@ -21,10 +21,10 @@ import (
|
||||
"time"
|
||||
|
||||
"k8s.io/api/core/v1"
|
||||
"k8s.io/apimachinery/pkg/util/clock"
|
||||
"k8s.io/client-go/tools/record"
|
||||
"k8s.io/kubernetes/pkg/kubelet/lifecycle"
|
||||
"k8s.io/kubernetes/pkg/kubelet/status"
|
||||
"k8s.io/utils/clock"
|
||||
)
|
||||
|
||||
const (
|
||||
|
@ -23,8 +23,8 @@ import (
|
||||
"k8s.io/api/core/v1"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/types"
|
||||
"k8s.io/apimachinery/pkg/util/clock"
|
||||
"k8s.io/client-go/tools/record"
|
||||
testingclock "k8s.io/utils/clock/testing"
|
||||
)
|
||||
|
||||
// mockPodStatusProvider returns the status on the specified pod
|
||||
@ -45,7 +45,7 @@ func (m *mockPodStatusProvider) GetPodStatus(uid types.UID) (v1.PodStatus, bool)
|
||||
// TestActiveDeadlineHandler verifies the active deadline handler functions as expected.
|
||||
func TestActiveDeadlineHandler(t *testing.T) {
|
||||
pods := newTestPods(4)
|
||||
fakeClock := clock.NewFakeClock(time.Now())
|
||||
fakeClock := testingclock.NewFakeClock(time.Now())
|
||||
podStatusProvider := &mockPodStatusProvider{pods: pods}
|
||||
fakeRecorder := &record.FakeRecorder{}
|
||||
handler, err := newActiveDeadlineHandler(podStatusProvider, fakeRecorder, fakeClock)
|
||||
|
@ -29,9 +29,9 @@ import (
|
||||
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/runtime"
|
||||
"k8s.io/apimachinery/pkg/util/clock"
|
||||
"k8s.io/apimachinery/pkg/util/sets"
|
||||
"k8s.io/apimachinery/pkg/watch"
|
||||
"k8s.io/utils/clock"
|
||||
)
|
||||
|
||||
// Manager interface provides methods for Kubelet to manage ConfigMap.
|
||||
|
@ -27,11 +27,11 @@ import (
|
||||
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/runtime"
|
||||
"k8s.io/apimachinery/pkg/util/clock"
|
||||
|
||||
clientset "k8s.io/client-go/kubernetes"
|
||||
"k8s.io/client-go/kubernetes/fake"
|
||||
"k8s.io/kubernetes/pkg/kubelet/util/manager"
|
||||
"k8s.io/utils/clock"
|
||||
)
|
||||
|
||||
func checkObject(t *testing.T, store manager.Store, ns, name string, shouldExist bool) {
|
||||
|
@ -25,7 +25,7 @@ import (
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"k8s.io/apimachinery/pkg/util/clock"
|
||||
"k8s.io/utils/clock"
|
||||
)
|
||||
|
||||
var (
|
||||
|
@ -26,7 +26,7 @@ import (
|
||||
"github.com/stretchr/testify/assert"
|
||||
"github.com/stretchr/testify/require"
|
||||
|
||||
"k8s.io/apimachinery/pkg/util/clock"
|
||||
testingclock "k8s.io/utils/clock/testing"
|
||||
)
|
||||
|
||||
func TestInsert(t *testing.T) {
|
||||
@ -200,9 +200,9 @@ func TestGC(t *testing.T) {
|
||||
assertCacheSize(t, c, 1)
|
||||
}
|
||||
|
||||
func newTestCache() (*requestCache, *clock.FakeClock) {
|
||||
func newTestCache() (*requestCache, *testingclock.FakeClock) {
|
||||
c := newRequestCache()
|
||||
fakeClock := clock.NewFakeClock(time.Now())
|
||||
fakeClock := testingclock.NewFakeClock(time.Now())
|
||||
c.clock = fakeClock
|
||||
return c, fakeClock
|
||||
}
|
||||
|
@ -30,7 +30,6 @@ import (
|
||||
"github.com/golang/mock/gomock"
|
||||
"github.com/stretchr/testify/assert"
|
||||
"github.com/stretchr/testify/require"
|
||||
"k8s.io/apimachinery/pkg/util/clock"
|
||||
runtimeapi "k8s.io/cri-api/pkg/apis/runtime/v1alpha2"
|
||||
"k8s.io/kubernetes/pkg/kubelet/checkpointmanager"
|
||||
containertest "k8s.io/kubernetes/pkg/kubelet/container/testing"
|
||||
@ -38,6 +37,7 @@ import (
|
||||
"k8s.io/kubernetes/pkg/kubelet/dockershim/network"
|
||||
nettest "k8s.io/kubernetes/pkg/kubelet/dockershim/network/testing"
|
||||
"k8s.io/kubernetes/pkg/kubelet/util/cache"
|
||||
testingclock "k8s.io/utils/clock/testing"
|
||||
)
|
||||
|
||||
type mockCheckpointManager struct {
|
||||
@ -74,8 +74,8 @@ func newMockCheckpointManager() checkpointmanager.CheckpointManager {
|
||||
return &mockCheckpointManager{checkpoint: make(map[string]*PodSandboxCheckpoint)}
|
||||
}
|
||||
|
||||
func newTestDockerService() (*dockerService, *libdocker.FakeDockerClient, *clock.FakeClock) {
|
||||
fakeClock := clock.NewFakeClock(time.Time{})
|
||||
func newTestDockerService() (*dockerService, *libdocker.FakeDockerClient, *testingclock.FakeClock) {
|
||||
fakeClock := testingclock.NewFakeClock(time.Time{})
|
||||
c := libdocker.NewFakeDockerClient().WithClock(fakeClock).WithVersion("1.11.2", "1.23").WithRandSource(rand.NewSource(0))
|
||||
pm := network.NewPluginManager(&network.NoopNetworkPlugin{})
|
||||
ckm := newMockCheckpointManager()
|
||||
@ -88,7 +88,7 @@ func newTestDockerService() (*dockerService, *libdocker.FakeDockerClient, *clock
|
||||
}, c, fakeClock
|
||||
}
|
||||
|
||||
func newTestDockerServiceWithVersionCache() (*dockerService, *libdocker.FakeDockerClient, *clock.FakeClock) {
|
||||
func newTestDockerServiceWithVersionCache() (*dockerService, *libdocker.FakeDockerClient, *testingclock.FakeClock) {
|
||||
ds, c, fakeClock := newTestDockerService()
|
||||
ds.versionCache = cache.NewObjectCache(
|
||||
func() (interface{}, error) {
|
||||
|
@ -36,7 +36,7 @@ import (
|
||||
dockerimagetypes "github.com/docker/docker/api/types/image"
|
||||
|
||||
"k8s.io/api/core/v1"
|
||||
"k8s.io/apimachinery/pkg/util/clock"
|
||||
"k8s.io/utils/clock"
|
||||
)
|
||||
|
||||
type CalledDetail struct {
|
||||
|
@ -26,7 +26,6 @@ import (
|
||||
|
||||
v1 "k8s.io/api/core/v1"
|
||||
"k8s.io/apimachinery/pkg/api/resource"
|
||||
"k8s.io/apimachinery/pkg/util/clock"
|
||||
utilfeature "k8s.io/apiserver/pkg/util/feature"
|
||||
"k8s.io/client-go/tools/record"
|
||||
v1helper "k8s.io/component-helpers/scheduling/corev1"
|
||||
@ -40,6 +39,7 @@ import (
|
||||
"k8s.io/kubernetes/pkg/kubelet/server/stats"
|
||||
kubelettypes "k8s.io/kubernetes/pkg/kubelet/types"
|
||||
"k8s.io/kubernetes/pkg/kubelet/util/format"
|
||||
"k8s.io/utils/clock"
|
||||
)
|
||||
|
||||
const (
|
||||
@ -59,7 +59,7 @@ const (
|
||||
// managerImpl implements Manager
|
||||
type managerImpl struct {
|
||||
// used to track time
|
||||
clock clock.Clock
|
||||
clock clock.WithTicker
|
||||
// config is how the manager is configured
|
||||
config Config
|
||||
// the function to invoke to kill a pod
|
||||
@ -113,7 +113,7 @@ func NewManager(
|
||||
containerGC ContainerGC,
|
||||
recorder record.EventRecorder,
|
||||
nodeRef *v1.ObjectReference,
|
||||
clock clock.Clock,
|
||||
clock clock.WithTicker,
|
||||
) (Manager, lifecycle.PodAdmitHandler) {
|
||||
manager := &managerImpl{
|
||||
clock: clock,
|
||||
|
@ -24,7 +24,6 @@ import (
|
||||
v1 "k8s.io/api/core/v1"
|
||||
"k8s.io/apimachinery/pkg/api/resource"
|
||||
"k8s.io/apimachinery/pkg/types"
|
||||
"k8s.io/apimachinery/pkg/util/clock"
|
||||
utilfeature "k8s.io/apiserver/pkg/util/feature"
|
||||
"k8s.io/client-go/tools/record"
|
||||
featuregatetesting "k8s.io/component-base/featuregate/testing"
|
||||
@ -35,6 +34,7 @@ import (
|
||||
evictionapi "k8s.io/kubernetes/pkg/kubelet/eviction/api"
|
||||
"k8s.io/kubernetes/pkg/kubelet/lifecycle"
|
||||
kubelettypes "k8s.io/kubernetes/pkg/kubelet/types"
|
||||
testingclock "k8s.io/utils/clock/testing"
|
||||
)
|
||||
|
||||
const (
|
||||
@ -206,7 +206,7 @@ func TestMemoryPressure(t *testing.T) {
|
||||
return pods
|
||||
}
|
||||
|
||||
fakeClock := clock.NewFakeClock(time.Now())
|
||||
fakeClock := testingclock.NewFakeClock(time.Now())
|
||||
podKiller := &mockPodKiller{}
|
||||
diskInfoProvider := &mockDiskInfoProvider{dedicatedImageFs: false}
|
||||
diskGC := &mockDiskGC{err: nil}
|
||||
@ -471,7 +471,7 @@ func TestDiskPressureNodeFs(t *testing.T) {
|
||||
return pods
|
||||
}
|
||||
|
||||
fakeClock := clock.NewFakeClock(time.Now())
|
||||
fakeClock := testingclock.NewFakeClock(time.Now())
|
||||
podKiller := &mockPodKiller{}
|
||||
diskInfoProvider := &mockDiskInfoProvider{dedicatedImageFs: false}
|
||||
diskGC := &mockDiskGC{err: nil}
|
||||
@ -668,7 +668,7 @@ func TestMinReclaim(t *testing.T) {
|
||||
return pods
|
||||
}
|
||||
|
||||
fakeClock := clock.NewFakeClock(time.Now())
|
||||
fakeClock := testingclock.NewFakeClock(time.Now())
|
||||
podKiller := &mockPodKiller{}
|
||||
diskInfoProvider := &mockDiskInfoProvider{dedicatedImageFs: false}
|
||||
diskGC := &mockDiskGC{err: nil}
|
||||
@ -808,7 +808,7 @@ func TestNodeReclaimFuncs(t *testing.T) {
|
||||
return pods
|
||||
}
|
||||
|
||||
fakeClock := clock.NewFakeClock(time.Now())
|
||||
fakeClock := testingclock.NewFakeClock(time.Now())
|
||||
podKiller := &mockPodKiller{}
|
||||
diskInfoProvider := &mockDiskInfoProvider{dedicatedImageFs: false}
|
||||
nodeRef := &v1.ObjectReference{Kind: "Node", Name: "test", UID: types.UID("test"), Namespace: ""}
|
||||
@ -1052,7 +1052,7 @@ func TestInodePressureNodeFsInodes(t *testing.T) {
|
||||
return pods
|
||||
}
|
||||
|
||||
fakeClock := clock.NewFakeClock(time.Now())
|
||||
fakeClock := testingclock.NewFakeClock(time.Now())
|
||||
podKiller := &mockPodKiller{}
|
||||
diskInfoProvider := &mockDiskInfoProvider{dedicatedImageFs: false}
|
||||
diskGC := &mockDiskGC{err: nil}
|
||||
@ -1259,7 +1259,7 @@ func TestStaticCriticalPodsAreNotEvicted(t *testing.T) {
|
||||
return mirrorPod, true
|
||||
}
|
||||
|
||||
fakeClock := clock.NewFakeClock(time.Now())
|
||||
fakeClock := testingclock.NewFakeClock(time.Now())
|
||||
podKiller := &mockPodKiller{}
|
||||
diskInfoProvider := &mockDiskInfoProvider{dedicatedImageFs: false}
|
||||
diskGC := &mockDiskGC{err: nil}
|
||||
@ -1385,7 +1385,7 @@ func TestAllocatableMemoryPressure(t *testing.T) {
|
||||
return pods
|
||||
}
|
||||
|
||||
fakeClock := clock.NewFakeClock(time.Now())
|
||||
fakeClock := testingclock.NewFakeClock(time.Now())
|
||||
podKiller := &mockPodKiller{}
|
||||
diskInfoProvider := &mockDiskInfoProvider{dedicatedImageFs: false}
|
||||
diskGC := &mockDiskGC{err: nil}
|
||||
@ -1529,7 +1529,7 @@ func TestUpdateMemcgThreshold(t *testing.T) {
|
||||
return []*v1.Pod{}
|
||||
}
|
||||
|
||||
fakeClock := clock.NewFakeClock(time.Now())
|
||||
fakeClock := testingclock.NewFakeClock(time.Now())
|
||||
podKiller := &mockPodKiller{}
|
||||
diskInfoProvider := &mockDiskInfoProvider{dedicatedImageFs: false}
|
||||
diskGC := &mockDiskGC{err: nil}
|
||||
|
@ -24,12 +24,12 @@ import (
|
||||
"github.com/stretchr/testify/assert"
|
||||
"github.com/stretchr/testify/require"
|
||||
|
||||
"k8s.io/apimachinery/pkg/util/clock"
|
||||
"k8s.io/client-go/tools/record"
|
||||
statsapi "k8s.io/kubelet/pkg/apis/stats/v1alpha1"
|
||||
"k8s.io/kubernetes/pkg/kubelet/container"
|
||||
containertest "k8s.io/kubernetes/pkg/kubelet/container/testing"
|
||||
statstest "k8s.io/kubernetes/pkg/kubelet/server/stats/testing"
|
||||
testingclock "k8s.io/utils/clock/testing"
|
||||
)
|
||||
|
||||
var zero time.Time
|
||||
@ -468,7 +468,7 @@ func TestGarbageCollectImageNotOldEnough(t *testing.T) {
|
||||
}},
|
||||
}
|
||||
|
||||
fakeClock := clock.NewFakeClock(time.Now())
|
||||
fakeClock := testingclock.NewFakeClock(time.Now())
|
||||
t.Log(fakeClock.Now())
|
||||
_, err := manager.detectImages(fakeClock.Now())
|
||||
require.NoError(t, err)
|
||||
|
@ -24,11 +24,11 @@ import (
|
||||
"github.com/stretchr/testify/assert"
|
||||
v1 "k8s.io/api/core/v1"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/util/clock"
|
||||
"k8s.io/client-go/tools/record"
|
||||
"k8s.io/client-go/util/flowcontrol"
|
||||
. "k8s.io/kubernetes/pkg/kubelet/container"
|
||||
ctest "k8s.io/kubernetes/pkg/kubelet/container/testing"
|
||||
testingclock "k8s.io/utils/clock/testing"
|
||||
)
|
||||
|
||||
type pullerExpects struct {
|
||||
@ -158,7 +158,7 @@ func pullerTestCases() []pullerTestCase {
|
||||
}
|
||||
}
|
||||
|
||||
func pullerTestEnv(c pullerTestCase, serialized bool) (puller ImageManager, fakeClock *clock.FakeClock, fakeRuntime *ctest.FakeRuntime, container *v1.Container) {
|
||||
func pullerTestEnv(c pullerTestCase, serialized bool) (puller ImageManager, fakeClock *testingclock.FakeClock, fakeRuntime *ctest.FakeRuntime, container *v1.Container) {
|
||||
container = &v1.Container{
|
||||
Name: "container_name",
|
||||
Image: c.containerImage,
|
||||
@ -166,7 +166,7 @@ func pullerTestEnv(c pullerTestCase, serialized bool) (puller ImageManager, fake
|
||||
}
|
||||
|
||||
backOff := flowcontrol.NewBackOff(time.Second, time.Minute)
|
||||
fakeClock = clock.NewFakeClock(time.Now())
|
||||
fakeClock = testingclock.NewFakeClock(time.Now())
|
||||
backOff.Clock = fakeClock
|
||||
|
||||
fakeRuntime = &ctest.FakeRuntime{}
|
||||
|
@ -45,7 +45,6 @@ import (
|
||||
"k8s.io/apimachinery/pkg/fields"
|
||||
"k8s.io/apimachinery/pkg/labels"
|
||||
"k8s.io/apimachinery/pkg/types"
|
||||
"k8s.io/apimachinery/pkg/util/clock"
|
||||
utilruntime "k8s.io/apimachinery/pkg/util/runtime"
|
||||
"k8s.io/apimachinery/pkg/util/sets"
|
||||
"k8s.io/apimachinery/pkg/util/wait"
|
||||
@ -121,6 +120,7 @@ import (
|
||||
"k8s.io/kubernetes/pkg/volume/util/hostutil"
|
||||
"k8s.io/kubernetes/pkg/volume/util/subpath"
|
||||
"k8s.io/kubernetes/pkg/volume/util/volumepathhandler"
|
||||
"k8s.io/utils/clock"
|
||||
)
|
||||
|
||||
const (
|
||||
@ -1138,7 +1138,7 @@ type Kubelet struct {
|
||||
|
||||
// clock is an interface that provides time related functionality in a way that makes it
|
||||
// easy to test the code.
|
||||
clock clock.Clock
|
||||
clock clock.WithTicker
|
||||
|
||||
// handlers called during the tryUpdateNodeStatus cycle
|
||||
setNodeStatusFuncs []func(*v1.Node) error
|
||||
|
@ -37,7 +37,6 @@ import (
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/labels"
|
||||
"k8s.io/apimachinery/pkg/types"
|
||||
"k8s.io/apimachinery/pkg/util/clock"
|
||||
utilruntime "k8s.io/apimachinery/pkg/util/runtime"
|
||||
"k8s.io/apimachinery/pkg/util/sets"
|
||||
"k8s.io/apimachinery/pkg/util/wait"
|
||||
@ -81,6 +80,8 @@ import (
|
||||
"k8s.io/kubernetes/pkg/volume/util"
|
||||
"k8s.io/kubernetes/pkg/volume/util/hostutil"
|
||||
"k8s.io/kubernetes/pkg/volume/util/subpath"
|
||||
"k8s.io/utils/clock"
|
||||
testingclock "k8s.io/utils/clock/testing"
|
||||
)
|
||||
|
||||
func init() {
|
||||
@ -114,7 +115,7 @@ type TestKubelet struct {
|
||||
fakeContainerManager *cm.FakeContainerManager
|
||||
fakeKubeClient *fake.Clientset
|
||||
fakeMirrorClient *podtest.FakeMirrorClient
|
||||
fakeClock *clock.FakeClock
|
||||
fakeClock *testingclock.FakeClock
|
||||
mounter mount.Interface
|
||||
volumePlugin *volumetest.FakeVolumePlugin
|
||||
}
|
||||
@ -291,7 +292,7 @@ func newTestKubeletWithImageList(
|
||||
assert.NoError(t, err)
|
||||
kubelet.containerGC = containerGC
|
||||
|
||||
fakeClock := clock.NewFakeClock(time.Now())
|
||||
fakeClock := testingclock.NewFakeClock(time.Now())
|
||||
kubelet.backOff = flowcontrol.NewBackOff(time.Second, time.Minute)
|
||||
kubelet.backOff.Clock = fakeClock
|
||||
kubelet.resyncInterval = 10 * time.Second
|
||||
|
@ -30,11 +30,11 @@ import (
|
||||
"k8s.io/klog/v2"
|
||||
|
||||
"k8s.io/apimachinery/pkg/api/resource"
|
||||
"k8s.io/apimachinery/pkg/util/clock"
|
||||
"k8s.io/apimachinery/pkg/util/wait"
|
||||
internalapi "k8s.io/cri-api/pkg/apis"
|
||||
runtimeapi "k8s.io/cri-api/pkg/apis/runtime/v1alpha2"
|
||||
kubecontainer "k8s.io/kubernetes/pkg/kubelet/container"
|
||||
"k8s.io/utils/clock"
|
||||
)
|
||||
|
||||
const (
|
||||
|
@ -30,9 +30,9 @@ import (
|
||||
"github.com/stretchr/testify/require"
|
||||
"k8s.io/kubernetes/pkg/kubelet/container"
|
||||
|
||||
"k8s.io/apimachinery/pkg/util/clock"
|
||||
runtimeapi "k8s.io/cri-api/pkg/apis/runtime/v1alpha2"
|
||||
critest "k8s.io/cri-api/pkg/apis/testing"
|
||||
testingclock "k8s.io/utils/clock/testing"
|
||||
)
|
||||
|
||||
func TestGetAllLogs(t *testing.T) {
|
||||
@ -92,7 +92,7 @@ func TestRotateLogs(t *testing.T) {
|
||||
MaxFiles: testMaxFiles,
|
||||
},
|
||||
osInterface: container.RealOS{},
|
||||
clock: clock.NewFakeClock(now),
|
||||
clock: testingclock.NewFakeClock(now),
|
||||
}
|
||||
testLogs := []string{
|
||||
"test-log-1",
|
||||
@ -179,7 +179,7 @@ func TestClean(t *testing.T) {
|
||||
MaxFiles: testMaxFiles,
|
||||
},
|
||||
osInterface: container.RealOS{},
|
||||
clock: clock.NewFakeClock(now),
|
||||
clock: testingclock.NewFakeClock(now),
|
||||
}
|
||||
testLogs := []string{
|
||||
"test-log-1",
|
||||
@ -382,7 +382,7 @@ func TestRotateLatestLog(t *testing.T) {
|
||||
runtimeService: f,
|
||||
policy: LogRotatePolicy{MaxFiles: test.maxFiles},
|
||||
osInterface: container.RealOS{},
|
||||
clock: clock.NewFakeClock(now),
|
||||
clock: testingclock.NewFakeClock(now),
|
||||
}
|
||||
if test.runtimeError != nil {
|
||||
f.InjectError("ReopenContainerLog", test.runtimeError)
|
||||
|
@ -26,7 +26,6 @@ import (
|
||||
"time"
|
||||
|
||||
v1 "k8s.io/api/core/v1"
|
||||
"k8s.io/apimachinery/pkg/util/clock"
|
||||
utilfeature "k8s.io/apiserver/pkg/util/feature"
|
||||
"k8s.io/client-go/tools/record"
|
||||
"k8s.io/klog/v2"
|
||||
@ -36,6 +35,7 @@ import (
|
||||
"k8s.io/kubernetes/pkg/kubelet/lifecycle"
|
||||
"k8s.io/kubernetes/pkg/kubelet/nodeshutdown/systemd"
|
||||
kubelettypes "k8s.io/kubernetes/pkg/kubelet/types"
|
||||
"k8s.io/utils/clock"
|
||||
)
|
||||
|
||||
const (
|
||||
|
@ -30,13 +30,13 @@ import (
|
||||
v1 "k8s.io/api/core/v1"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/types"
|
||||
"k8s.io/apimachinery/pkg/util/clock"
|
||||
utilfeature "k8s.io/apiserver/pkg/util/feature"
|
||||
"k8s.io/client-go/tools/record"
|
||||
featuregatetesting "k8s.io/component-base/featuregate/testing"
|
||||
"k8s.io/kubernetes/pkg/apis/scheduling"
|
||||
pkgfeatures "k8s.io/kubernetes/pkg/features"
|
||||
"k8s.io/kubernetes/pkg/kubelet/nodeshutdown/systemd"
|
||||
testingclock "k8s.io/utils/clock/testing"
|
||||
)
|
||||
|
||||
type fakeDbus struct {
|
||||
@ -235,7 +235,7 @@ func TestManager(t *testing.T) {
|
||||
nodeRef := &v1.ObjectReference{Kind: "Node", Name: "test", UID: types.UID("test"), Namespace: ""}
|
||||
|
||||
manager, _ := NewManager(fakeRecorder, nodeRef, activePodsFunc, killPodsFunc, func() {}, tc.shutdownGracePeriodRequested, tc.shutdownGracePeriodCriticalPods)
|
||||
manager.clock = clock.NewFakeClock(time.Now())
|
||||
manager.clock = testingclock.NewFakeClock(time.Now())
|
||||
|
||||
err := manager.Start()
|
||||
if tc.expectedError != nil {
|
||||
@ -312,7 +312,7 @@ func TestFeatureEnabled(t *testing.T) {
|
||||
nodeRef := &v1.ObjectReference{Kind: "Node", Name: "test", UID: types.UID("test"), Namespace: ""}
|
||||
|
||||
manager, _ := NewManager(fakeRecorder, nodeRef, activePodsFunc, killPodsFunc, func() {}, tc.shutdownGracePeriodRequested, 0 /*shutdownGracePeriodCriticalPods*/)
|
||||
manager.clock = clock.NewFakeClock(time.Now())
|
||||
manager.clock = testingclock.NewFakeClock(time.Now())
|
||||
|
||||
assert.Equal(t, tc.expectEnabled, manager.isFeatureEnabled())
|
||||
})
|
||||
|
@ -22,13 +22,13 @@ import (
|
||||
"time"
|
||||
|
||||
"k8s.io/apimachinery/pkg/types"
|
||||
"k8s.io/apimachinery/pkg/util/clock"
|
||||
"k8s.io/apimachinery/pkg/util/sets"
|
||||
"k8s.io/apimachinery/pkg/util/wait"
|
||||
runtimeapi "k8s.io/cri-api/pkg/apis/runtime/v1alpha2"
|
||||
"k8s.io/klog/v2"
|
||||
kubecontainer "k8s.io/kubernetes/pkg/kubelet/container"
|
||||
"k8s.io/kubernetes/pkg/kubelet/metrics"
|
||||
"k8s.io/utils/clock"
|
||||
)
|
||||
|
||||
// GenericPLEG is an extremely simple generic PLEG that relies solely on
|
||||
|
@ -28,12 +28,13 @@ import (
|
||||
"github.com/stretchr/testify/assert"
|
||||
|
||||
"k8s.io/apimachinery/pkg/types"
|
||||
"k8s.io/apimachinery/pkg/util/clock"
|
||||
"k8s.io/apimachinery/pkg/util/diff"
|
||||
"k8s.io/component-base/metrics/testutil"
|
||||
kubecontainer "k8s.io/kubernetes/pkg/kubelet/container"
|
||||
containertest "k8s.io/kubernetes/pkg/kubelet/container/testing"
|
||||
"k8s.io/kubernetes/pkg/kubelet/metrics"
|
||||
"k8s.io/utils/clock"
|
||||
testingclock "k8s.io/utils/clock/testing"
|
||||
)
|
||||
|
||||
const (
|
||||
@ -45,7 +46,7 @@ const (
|
||||
type TestGenericPLEG struct {
|
||||
pleg *GenericPLEG
|
||||
runtime *containertest.FakeRuntime
|
||||
clock *clock.FakeClock
|
||||
clock *testingclock.FakeClock
|
||||
}
|
||||
|
||||
func newTestGenericPLEG() *TestGenericPLEG {
|
||||
@ -54,7 +55,7 @@ func newTestGenericPLEG() *TestGenericPLEG {
|
||||
|
||||
func newTestGenericPLEGWithChannelSize(eventChannelCap int) *TestGenericPLEG {
|
||||
fakeRuntime := &containertest.FakeRuntime{}
|
||||
clock := clock.NewFakeClock(time.Time{})
|
||||
clock := testingclock.NewFakeClock(time.Time{})
|
||||
// The channel capacity should be large enough to hold all events in a
|
||||
// single test.
|
||||
pleg := &GenericPLEG{
|
||||
|
@ -27,13 +27,13 @@ import (
|
||||
v1 "k8s.io/api/core/v1"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/types"
|
||||
"k8s.io/apimachinery/pkg/util/clock"
|
||||
"k8s.io/apimachinery/pkg/util/sets"
|
||||
"k8s.io/client-go/tools/record"
|
||||
kubecontainer "k8s.io/kubernetes/pkg/kubelet/container"
|
||||
containertest "k8s.io/kubernetes/pkg/kubelet/container/testing"
|
||||
kubetypes "k8s.io/kubernetes/pkg/kubelet/types"
|
||||
"k8s.io/kubernetes/pkg/kubelet/util/queue"
|
||||
"k8s.io/utils/clock"
|
||||
)
|
||||
|
||||
// fakePodWorkers runs sync pod function in serial, so we can have
|
||||
|
@ -22,7 +22,6 @@ import (
|
||||
|
||||
v1 "k8s.io/api/core/v1"
|
||||
"k8s.io/apimachinery/pkg/types"
|
||||
"k8s.io/apimachinery/pkg/util/clock"
|
||||
"k8s.io/apimachinery/pkg/util/sets"
|
||||
"k8s.io/client-go/tools/record"
|
||||
"k8s.io/component-base/metrics"
|
||||
@ -30,6 +29,7 @@ import (
|
||||
kubecontainer "k8s.io/kubernetes/pkg/kubelet/container"
|
||||
"k8s.io/kubernetes/pkg/kubelet/prober/results"
|
||||
"k8s.io/kubernetes/pkg/kubelet/status"
|
||||
"k8s.io/utils/clock"
|
||||
)
|
||||
|
||||
// ProberResults stores the cumulative number of a probe by result as prometheus metrics.
|
||||
|
@ -28,7 +28,6 @@ import (
|
||||
v1 "k8s.io/api/core/v1"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/types"
|
||||
"k8s.io/apimachinery/pkg/util/clock"
|
||||
"k8s.io/client-go/kubernetes/fake"
|
||||
"k8s.io/client-go/tools/record"
|
||||
utiltesting "k8s.io/client-go/util/testing"
|
||||
@ -48,6 +47,7 @@ import (
|
||||
"k8s.io/kubernetes/pkg/volume"
|
||||
volumetest "k8s.io/kubernetes/pkg/volume/testing"
|
||||
"k8s.io/kubernetes/pkg/volume/util/hostutil"
|
||||
"k8s.io/utils/clock"
|
||||
)
|
||||
|
||||
func TestRunOnce(t *testing.T) {
|
||||
|
@ -29,9 +29,9 @@ import (
|
||||
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/runtime"
|
||||
"k8s.io/apimachinery/pkg/util/clock"
|
||||
"k8s.io/apimachinery/pkg/util/sets"
|
||||
"k8s.io/apimachinery/pkg/watch"
|
||||
"k8s.io/utils/clock"
|
||||
)
|
||||
|
||||
// Manager manages Kubernetes secrets. This includes retrieving
|
||||
|
@ -27,11 +27,11 @@ import (
|
||||
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/runtime"
|
||||
"k8s.io/apimachinery/pkg/util/clock"
|
||||
|
||||
clientset "k8s.io/client-go/kubernetes"
|
||||
"k8s.io/client-go/kubernetes/fake"
|
||||
"k8s.io/kubernetes/pkg/kubelet/util/manager"
|
||||
"k8s.io/utils/clock"
|
||||
)
|
||||
|
||||
func checkObject(t *testing.T, store manager.Store, ns, name string, shouldExist bool) {
|
||||
|
@ -30,10 +30,10 @@ import (
|
||||
apierrors "k8s.io/apimachinery/pkg/api/errors"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/types"
|
||||
"k8s.io/apimachinery/pkg/util/clock"
|
||||
"k8s.io/apimachinery/pkg/util/wait"
|
||||
clientset "k8s.io/client-go/kubernetes"
|
||||
"k8s.io/klog/v2"
|
||||
"k8s.io/utils/clock"
|
||||
)
|
||||
|
||||
const (
|
||||
|
@ -24,12 +24,12 @@ import (
|
||||
authenticationv1 "k8s.io/api/authentication/v1"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/types"
|
||||
"k8s.io/apimachinery/pkg/util/clock"
|
||||
testingclock "k8s.io/utils/clock/testing"
|
||||
)
|
||||
|
||||
func TestTokenCachingAndExpiration(t *testing.T) {
|
||||
type suite struct {
|
||||
clock *clock.FakeClock
|
||||
clock *testingclock.FakeClock
|
||||
tg *fakeTokenGetter
|
||||
mgr *Manager
|
||||
}
|
||||
@ -87,7 +87,7 @@ func TestTokenCachingAndExpiration(t *testing.T) {
|
||||
|
||||
for _, c := range cases {
|
||||
t.Run(c.name, func(t *testing.T) {
|
||||
clock := clock.NewFakeClock(time.Time{}.Add(30 * 24 * time.Hour))
|
||||
clock := testingclock.NewFakeClock(time.Time{}.Add(30 * 24 * time.Hour))
|
||||
expSecs := int64(c.exp.Seconds())
|
||||
s := &suite{
|
||||
clock: clock,
|
||||
@ -165,7 +165,7 @@ func TestRequiresRefresh(t *testing.T) {
|
||||
|
||||
for i, c := range cases {
|
||||
t.Run(fmt.Sprint(i), func(t *testing.T) {
|
||||
clock := clock.NewFakeClock(c.now)
|
||||
clock := testingclock.NewFakeClock(c.now)
|
||||
secs := int64(c.exp.Sub(start).Seconds())
|
||||
tr := &authenticationv1.TokenRequest{
|
||||
Spec: authenticationv1.TokenRequestSpec{
|
||||
@ -335,7 +335,7 @@ func TestDeleteServiceAccountToken(t *testing.T) {
|
||||
},
|
||||
}
|
||||
testMgr := NewManager(nil)
|
||||
testMgr.clock = clock.NewFakeClock(time.Time{}.Add(30 * 24 * time.Hour))
|
||||
testMgr.clock = testingclock.NewFakeClock(time.Time{}.Add(30 * 24 * time.Hour))
|
||||
|
||||
successGetToken := func(_, _ string, tr *authenticationv1.TokenRequest) (*authenticationv1.TokenRequest, error) {
|
||||
tr.Status = authenticationv1.TokenRequestStatus{
|
||||
@ -404,7 +404,7 @@ func TestCleanup(t *testing.T) {
|
||||
}
|
||||
for _, c := range cases {
|
||||
t.Run(c.name, func(t *testing.T) {
|
||||
clock := clock.NewFakeClock(time.Time{}.Add(24 * time.Hour))
|
||||
clock := testingclock.NewFakeClock(time.Time{}.Add(24 * time.Hour))
|
||||
mgr := NewManager(nil)
|
||||
mgr.clock = clock
|
||||
|
||||
@ -568,7 +568,7 @@ func TestKeyFunc(t *testing.T) {
|
||||
for _, c := range cases {
|
||||
t.Run(c.name, func(t *testing.T) {
|
||||
mgr := NewManager(nil)
|
||||
mgr.clock = clock.NewFakeClock(time.Time{}.Add(30 * 24 * time.Hour))
|
||||
mgr.clock = testingclock.NewFakeClock(time.Time{}.Add(30 * 24 * time.Hour))
|
||||
for _, tru := range c.trus {
|
||||
mgr.set(getKeyFunc(tru), &authenticationv1.TokenRequest{
|
||||
Status: authenticationv1.TokenRequestStatus{
|
||||
|
@ -29,8 +29,8 @@ import (
|
||||
apierrors "k8s.io/apimachinery/pkg/api/errors"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/runtime"
|
||||
"k8s.io/apimachinery/pkg/util/clock"
|
||||
"k8s.io/apimachinery/pkg/util/sets"
|
||||
"k8s.io/utils/clock"
|
||||
)
|
||||
|
||||
// GetObjectTTLFunc defines a function to get value of TTL.
|
||||
|
@ -30,13 +30,14 @@ import (
|
||||
apierrors "k8s.io/apimachinery/pkg/api/errors"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/runtime"
|
||||
"k8s.io/apimachinery/pkg/util/clock"
|
||||
"k8s.io/apimachinery/pkg/util/sets"
|
||||
|
||||
clientset "k8s.io/client-go/kubernetes"
|
||||
"k8s.io/client-go/kubernetes/fake"
|
||||
core "k8s.io/client-go/testing"
|
||||
podutil "k8s.io/kubernetes/pkg/api/v1/pod"
|
||||
"k8s.io/utils/clock"
|
||||
testingclock "k8s.io/utils/clock/testing"
|
||||
|
||||
"github.com/stretchr/testify/assert"
|
||||
)
|
||||
@ -149,7 +150,7 @@ func TestSecretStoreDeletingSecret(t *testing.T) {
|
||||
|
||||
func TestSecretStoreGetAlwaysRefresh(t *testing.T) {
|
||||
fakeClient := &fake.Clientset{}
|
||||
fakeClock := clock.NewFakeClock(time.Now())
|
||||
fakeClock := testingclock.NewFakeClock(time.Now())
|
||||
store := newSecretStore(fakeClient, fakeClock, noObjectTTL, 0)
|
||||
|
||||
for i := 0; i < 10; i++ {
|
||||
@ -176,7 +177,7 @@ func TestSecretStoreGetAlwaysRefresh(t *testing.T) {
|
||||
|
||||
func TestSecretStoreGetNeverRefresh(t *testing.T) {
|
||||
fakeClient := &fake.Clientset{}
|
||||
fakeClock := clock.NewFakeClock(time.Now())
|
||||
fakeClock := testingclock.NewFakeClock(time.Now())
|
||||
store := newSecretStore(fakeClient, fakeClock, noObjectTTL, time.Minute)
|
||||
|
||||
for i := 0; i < 10; i++ {
|
||||
@ -206,7 +207,7 @@ func TestCustomTTL(t *testing.T) {
|
||||
}
|
||||
|
||||
fakeClient := &fake.Clientset{}
|
||||
fakeClock := clock.NewFakeClock(time.Time{})
|
||||
fakeClock := testingclock.NewFakeClock(time.Time{})
|
||||
store := newSecretStore(fakeClient, fakeClock, customTTL, time.Minute)
|
||||
|
||||
store.AddReference("ns", "name")
|
||||
@ -377,7 +378,7 @@ func podWithSecrets(ns, podName string, toAttach secretsToAttach) *v1.Pod {
|
||||
|
||||
func TestCacheInvalidation(t *testing.T) {
|
||||
fakeClient := &fake.Clientset{}
|
||||
fakeClock := clock.NewFakeClock(time.Now())
|
||||
fakeClock := testingclock.NewFakeClock(time.Now())
|
||||
store := newSecretStore(fakeClient, fakeClock, noObjectTTL, time.Minute)
|
||||
manager := newCacheBasedSecretManager(store)
|
||||
|
||||
@ -432,7 +433,7 @@ func TestCacheInvalidation(t *testing.T) {
|
||||
|
||||
func TestRegisterIdempotence(t *testing.T) {
|
||||
fakeClient := &fake.Clientset{}
|
||||
fakeClock := clock.NewFakeClock(time.Now())
|
||||
fakeClock := testingclock.NewFakeClock(time.Now())
|
||||
store := newSecretStore(fakeClient, fakeClock, noObjectTTL, time.Minute)
|
||||
manager := newCacheBasedSecretManager(store)
|
||||
|
||||
@ -467,7 +468,7 @@ func TestRegisterIdempotence(t *testing.T) {
|
||||
|
||||
func TestCacheRefcounts(t *testing.T) {
|
||||
fakeClient := &fake.Clientset{}
|
||||
fakeClock := clock.NewFakeClock(time.Now())
|
||||
fakeClock := testingclock.NewFakeClock(time.Now())
|
||||
store := newSecretStore(fakeClient, fakeClock, noObjectTTL, time.Minute)
|
||||
manager := newCacheBasedSecretManager(store)
|
||||
|
||||
|
@ -31,10 +31,10 @@ import (
|
||||
"k8s.io/apimachinery/pkg/fields"
|
||||
"k8s.io/apimachinery/pkg/runtime"
|
||||
"k8s.io/apimachinery/pkg/runtime/schema"
|
||||
"k8s.io/apimachinery/pkg/util/clock"
|
||||
"k8s.io/apimachinery/pkg/util/sets"
|
||||
"k8s.io/apimachinery/pkg/util/wait"
|
||||
"k8s.io/apimachinery/pkg/watch"
|
||||
"k8s.io/utils/clock"
|
||||
)
|
||||
|
||||
type listObjectFunc func(string, metav1.ListOptions) (runtime.Object, error)
|
||||
|
@ -28,7 +28,6 @@ import (
|
||||
apierrors "k8s.io/apimachinery/pkg/api/errors"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/runtime"
|
||||
"k8s.io/apimachinery/pkg/util/clock"
|
||||
"k8s.io/apimachinery/pkg/util/wait"
|
||||
"k8s.io/apimachinery/pkg/watch"
|
||||
|
||||
@ -38,6 +37,9 @@ import (
|
||||
|
||||
corev1 "k8s.io/kubernetes/pkg/apis/core/v1"
|
||||
|
||||
"k8s.io/utils/clock"
|
||||
testingclock "k8s.io/utils/clock/testing"
|
||||
|
||||
"github.com/stretchr/testify/assert"
|
||||
)
|
||||
|
||||
@ -88,7 +90,7 @@ func TestSecretCache(t *testing.T) {
|
||||
fakeWatch := watch.NewFake()
|
||||
fakeClient.AddWatchReactor("secrets", core.DefaultWatchReactor(fakeWatch, nil))
|
||||
|
||||
fakeClock := clock.NewFakeClock(time.Now())
|
||||
fakeClock := testingclock.NewFakeClock(time.Now())
|
||||
store := newSecretCache(fakeClient, fakeClock, time.Minute)
|
||||
|
||||
store.AddReference("ns", "name")
|
||||
@ -158,7 +160,7 @@ func TestSecretCacheMultipleRegistrations(t *testing.T) {
|
||||
fakeWatch := watch.NewFake()
|
||||
fakeClient.AddWatchReactor("secrets", core.DefaultWatchReactor(fakeWatch, nil))
|
||||
|
||||
fakeClock := clock.NewFakeClock(time.Now())
|
||||
fakeClock := testingclock.NewFakeClock(time.Now())
|
||||
store := newSecretCache(fakeClient, fakeClock, time.Minute)
|
||||
|
||||
store.AddReference("ns", "name")
|
||||
@ -264,7 +266,7 @@ func TestImmutableSecretStopsTheReflector(t *testing.T) {
|
||||
fakeWatch := watch.NewFake()
|
||||
fakeClient.AddWatchReactor("secrets", core.DefaultWatchReactor(fakeWatch, nil))
|
||||
|
||||
fakeClock := clock.NewFakeClock(time.Now())
|
||||
fakeClock := testingclock.NewFakeClock(time.Now())
|
||||
store := newSecretCache(fakeClient, fakeClock, time.Minute)
|
||||
|
||||
key := objectKey{namespace: "ns", name: "name"}
|
||||
@ -351,7 +353,7 @@ func TestMaxIdleTimeStopsTheReflector(t *testing.T) {
|
||||
fakeClient.AddReactor("list", "secrets", listReactor)
|
||||
fakeWatch := watch.NewFake()
|
||||
fakeClient.AddWatchReactor("secrets", core.DefaultWatchReactor(fakeWatch, nil))
|
||||
fakeClock := clock.NewFakeClock(time.Now())
|
||||
fakeClock := testingclock.NewFakeClock(time.Now())
|
||||
store := newSecretCache(fakeClient, fakeClock, time.Minute)
|
||||
|
||||
key := objectKey{namespace: "ns", name: "name"}
|
||||
@ -415,7 +417,7 @@ func TestReflectorNotStopedOnSlowInitialization(t *testing.T) {
|
||||
},
|
||||
}
|
||||
|
||||
fakeClock := clock.NewFakeClock(time.Now())
|
||||
fakeClock := testingclock.NewFakeClock(time.Now())
|
||||
|
||||
fakeClient := &fake.Clientset{}
|
||||
listReactor := func(a core.Action) (bool, runtime.Object, error) {
|
||||
|
@ -21,7 +21,7 @@ import (
|
||||
"time"
|
||||
|
||||
"k8s.io/apimachinery/pkg/types"
|
||||
"k8s.io/apimachinery/pkg/util/clock"
|
||||
"k8s.io/utils/clock"
|
||||
)
|
||||
|
||||
// WorkQueue allows queuing items with a timestamp. An item is
|
||||
|
@ -21,12 +21,12 @@ import (
|
||||
"time"
|
||||
|
||||
"k8s.io/apimachinery/pkg/types"
|
||||
"k8s.io/apimachinery/pkg/util/clock"
|
||||
"k8s.io/apimachinery/pkg/util/sets"
|
||||
testingclock "k8s.io/utils/clock/testing"
|
||||
)
|
||||
|
||||
func newTestBasicWorkQueue() (*basicWorkQueue, *clock.FakeClock) {
|
||||
fakeClock := clock.NewFakeClock(time.Now())
|
||||
func newTestBasicWorkQueue() (*basicWorkQueue, *testingclock.FakeClock) {
|
||||
fakeClock := testingclock.NewFakeClock(time.Now())
|
||||
wq := &basicWorkQueue{
|
||||
clock: fakeClock,
|
||||
queue: make(map[types.UID]time.Time),
|
||||
|
@ -26,7 +26,6 @@ import (
|
||||
"k8s.io/apimachinery/pkg/labels"
|
||||
"k8s.io/apimachinery/pkg/runtime"
|
||||
"k8s.io/apimachinery/pkg/runtime/schema"
|
||||
"k8s.io/apimachinery/pkg/util/clock"
|
||||
"k8s.io/apimachinery/pkg/util/sets"
|
||||
"k8s.io/apiserver/pkg/admission"
|
||||
quota "k8s.io/apiserver/pkg/quota/v1"
|
||||
@ -37,6 +36,7 @@ import (
|
||||
"k8s.io/kubernetes/pkg/apis/core/v1/helper"
|
||||
"k8s.io/kubernetes/pkg/apis/core/v1/helper/qos"
|
||||
"k8s.io/kubernetes/pkg/features"
|
||||
"k8s.io/utils/clock"
|
||||
)
|
||||
|
||||
// the name used for object count quota
|
||||
|
@ -26,7 +26,6 @@ import (
|
||||
"k8s.io/apimachinery/pkg/api/resource"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/runtime/schema"
|
||||
"k8s.io/apimachinery/pkg/util/clock"
|
||||
quota "k8s.io/apiserver/pkg/quota/v1"
|
||||
"k8s.io/apiserver/pkg/quota/v1/generic"
|
||||
"k8s.io/apiserver/pkg/util/feature"
|
||||
@ -34,6 +33,8 @@ import (
|
||||
api "k8s.io/kubernetes/pkg/apis/core"
|
||||
"k8s.io/kubernetes/pkg/features"
|
||||
"k8s.io/kubernetes/pkg/util/node"
|
||||
"k8s.io/utils/clock"
|
||||
testingclock "k8s.io/utils/clock/testing"
|
||||
)
|
||||
|
||||
func TestPodConstraintsFunc(t *testing.T) {
|
||||
@ -84,7 +85,7 @@ func TestPodConstraintsFunc(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestPodEvaluatorUsage(t *testing.T) {
|
||||
fakeClock := clock.NewFakeClock(time.Now())
|
||||
fakeClock := testingclock.NewFakeClock(time.Now())
|
||||
evaluator := NewPodEvaluator(nil, fakeClock)
|
||||
|
||||
// fields use to simulate a pod undergoing termination
|
||||
@ -513,7 +514,7 @@ func TestPodEvaluatorUsage(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestPodEvaluatorMatchingScopes(t *testing.T) {
|
||||
fakeClock := clock.NewFakeClock(time.Now())
|
||||
fakeClock := testingclock.NewFakeClock(time.Now())
|
||||
evaluator := NewPodEvaluator(nil, fakeClock)
|
||||
activeDeadlineSeconds := int64(30)
|
||||
testCases := map[string]struct {
|
||||
|
@ -19,9 +19,9 @@ package core
|
||||
import (
|
||||
corev1 "k8s.io/api/core/v1"
|
||||
"k8s.io/apimachinery/pkg/runtime/schema"
|
||||
"k8s.io/apimachinery/pkg/util/clock"
|
||||
quota "k8s.io/apiserver/pkg/quota/v1"
|
||||
"k8s.io/apiserver/pkg/quota/v1/generic"
|
||||
"k8s.io/utils/clock"
|
||||
)
|
||||
|
||||
// legacyObjectCountAliases are what we used to do simple object counting quota with mapped to alias
|
||||
|
@ -54,6 +54,7 @@ import (
|
||||
restclientwatch "k8s.io/client-go/rest/watch"
|
||||
"k8s.io/client-go/util/flowcontrol"
|
||||
utiltesting "k8s.io/client-go/util/testing"
|
||||
testingclock "k8s.io/utils/clock/testing"
|
||||
)
|
||||
|
||||
func TestNewRequestSetsAccept(t *testing.T) {
|
||||
@ -1563,7 +1564,7 @@ func TestBackoffLifecycle(t *testing.T) {
|
||||
// which are used in the server implementation returning StatusOK above.
|
||||
seconds := []int{0, 1, 2, 4, 8, 0, 1, 2, 4, 0}
|
||||
request := c.Verb("POST").Prefix("backofftest").Suffix("abc")
|
||||
clock := clock.FakeClock{}
|
||||
clock := testingclock.FakeClock{}
|
||||
request.backoff = &URLBackoff{
|
||||
// Use a fake backoff here to avoid flakes and speed the test up.
|
||||
Backoff: flowcontrol.NewFakeBackOff(
|
||||
|
@ -20,7 +20,8 @@ import (
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"k8s.io/apimachinery/pkg/util/clock"
|
||||
"k8s.io/utils/clock"
|
||||
testingclock "k8s.io/utils/clock/testing"
|
||||
"k8s.io/utils/integer"
|
||||
)
|
||||
|
||||
@ -37,7 +38,7 @@ type Backoff struct {
|
||||
perItemBackoff map[string]*backoffEntry
|
||||
}
|
||||
|
||||
func NewFakeBackOff(initial, max time.Duration, tc *clock.FakeClock) *Backoff {
|
||||
func NewFakeBackOff(initial, max time.Duration, tc *testingclock.FakeClock) *Backoff {
|
||||
return &Backoff{
|
||||
perItemBackoff: map[string]*backoffEntry{},
|
||||
Clock: tc,
|
||||
|
@ -20,12 +20,12 @@ import (
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"k8s.io/apimachinery/pkg/util/clock"
|
||||
testingclock "k8s.io/utils/clock/testing"
|
||||
)
|
||||
|
||||
func TestSlowBackoff(t *testing.T) {
|
||||
id := "_idSlow"
|
||||
tc := clock.NewFakeClock(time.Now())
|
||||
tc := testingclock.NewFakeClock(time.Now())
|
||||
step := time.Second
|
||||
maxDuration := 50 * step
|
||||
|
||||
@ -51,7 +51,7 @@ func TestSlowBackoff(t *testing.T) {
|
||||
|
||||
func TestBackoffReset(t *testing.T) {
|
||||
id := "_idReset"
|
||||
tc := clock.NewFakeClock(time.Now())
|
||||
tc := testingclock.NewFakeClock(time.Now())
|
||||
step := time.Second
|
||||
maxDuration := step * 5
|
||||
b := NewFakeBackOff(step, maxDuration, tc)
|
||||
@ -77,7 +77,7 @@ func TestBackoffReset(t *testing.T) {
|
||||
|
||||
func TestBackoffHighWaterMark(t *testing.T) {
|
||||
id := "_idHiWaterMark"
|
||||
tc := clock.NewFakeClock(time.Now())
|
||||
tc := testingclock.NewFakeClock(time.Now())
|
||||
step := time.Second
|
||||
maxDuration := 5 * step
|
||||
b := NewFakeBackOff(step, maxDuration, tc)
|
||||
@ -99,7 +99,7 @@ func TestBackoffHighWaterMark(t *testing.T) {
|
||||
|
||||
func TestBackoffGC(t *testing.T) {
|
||||
id := "_idGC"
|
||||
tc := clock.NewFakeClock(time.Now())
|
||||
tc := testingclock.NewFakeClock(time.Now())
|
||||
step := time.Second
|
||||
maxDuration := 5 * step
|
||||
|
||||
@ -127,7 +127,7 @@ func TestBackoffGC(t *testing.T) {
|
||||
|
||||
func TestIsInBackOffSinceUpdate(t *testing.T) {
|
||||
id := "_idIsInBackOffSinceUpdate"
|
||||
tc := clock.NewFakeClock(time.Now())
|
||||
tc := testingclock.NewFakeClock(time.Now())
|
||||
step := time.Second
|
||||
maxDuration := 10 * step
|
||||
b := NewFakeBackOff(step, maxDuration, tc)
|
||||
|
@ -21,8 +21,8 @@ import (
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"k8s.io/apimachinery/pkg/util/clock"
|
||||
utilruntime "k8s.io/apimachinery/pkg/util/runtime"
|
||||
"k8s.io/utils/clock"
|
||||
)
|
||||
|
||||
// DelayingInterface is an Interface that can Add an item at a later time. This makes it easier to
|
||||
@ -51,11 +51,11 @@ func NewNamedDelayingQueue(name string) DelayingInterface {
|
||||
|
||||
// NewDelayingQueueWithCustomClock constructs a new named workqueue
|
||||
// with ability to inject real or fake clock for testing purposes
|
||||
func NewDelayingQueueWithCustomClock(clock clock.Clock, name string) DelayingInterface {
|
||||
func NewDelayingQueueWithCustomClock(clock clock.WithTicker, name string) DelayingInterface {
|
||||
return newDelayingQueue(clock, NewNamed(name), name)
|
||||
}
|
||||
|
||||
func newDelayingQueue(clock clock.Clock, q Interface, name string) *delayingType {
|
||||
func newDelayingQueue(clock clock.WithTicker, q Interface, name string) *delayingType {
|
||||
ret := &delayingType{
|
||||
Interface: q,
|
||||
clock: clock,
|
||||
|
@ -23,12 +23,12 @@ import (
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"k8s.io/apimachinery/pkg/util/clock"
|
||||
"k8s.io/apimachinery/pkg/util/wait"
|
||||
testingclock "k8s.io/utils/clock/testing"
|
||||
)
|
||||
|
||||
func TestSimpleQueue(t *testing.T) {
|
||||
fakeClock := clock.NewFakeClock(time.Now())
|
||||
fakeClock := testingclock.NewFakeClock(time.Now())
|
||||
q := NewDelayingQueueWithCustomClock(fakeClock, "")
|
||||
|
||||
first := "foo"
|
||||
@ -70,7 +70,7 @@ func TestSimpleQueue(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestDeduping(t *testing.T) {
|
||||
fakeClock := clock.NewFakeClock(time.Now())
|
||||
fakeClock := testingclock.NewFakeClock(time.Now())
|
||||
q := NewDelayingQueueWithCustomClock(fakeClock, "")
|
||||
|
||||
first := "foo"
|
||||
@ -129,7 +129,7 @@ func TestDeduping(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestAddTwoFireEarly(t *testing.T) {
|
||||
fakeClock := clock.NewFakeClock(time.Now())
|
||||
fakeClock := testingclock.NewFakeClock(time.Now())
|
||||
q := NewDelayingQueueWithCustomClock(fakeClock, "")
|
||||
|
||||
first := "foo"
|
||||
@ -178,7 +178,7 @@ func TestAddTwoFireEarly(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestCopyShifting(t *testing.T) {
|
||||
fakeClock := clock.NewFakeClock(time.Now())
|
||||
fakeClock := testingclock.NewFakeClock(time.Now())
|
||||
q := NewDelayingQueueWithCustomClock(fakeClock, "")
|
||||
|
||||
first := "foo"
|
||||
@ -216,7 +216,7 @@ func TestCopyShifting(t *testing.T) {
|
||||
}
|
||||
|
||||
func BenchmarkDelayingQueue_AddAfter(b *testing.B) {
|
||||
fakeClock := clock.NewFakeClock(time.Now())
|
||||
fakeClock := testingclock.NewFakeClock(time.Now())
|
||||
q := NewDelayingQueueWithCustomClock(fakeClock, "")
|
||||
|
||||
// Add items
|
||||
|
@ -20,7 +20,7 @@ import (
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"k8s.io/apimachinery/pkg/util/clock"
|
||||
"k8s.io/utils/clock"
|
||||
)
|
||||
|
||||
// This file provides abstractions for setting the provider (e.g., prometheus)
|
||||
|
@ -21,7 +21,7 @@ import (
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"k8s.io/apimachinery/pkg/util/clock"
|
||||
testingclock "k8s.io/utils/clock/testing"
|
||||
)
|
||||
|
||||
type testMetrics struct {
|
||||
@ -40,7 +40,7 @@ func TestMetricShutdown(t *testing.T) {
|
||||
m := &testMetrics{
|
||||
updateCalled: ch,
|
||||
}
|
||||
c := clock.NewFakeClock(time.Now())
|
||||
c := testingclock.NewFakeClock(time.Now())
|
||||
q := newQueue(c, m, time.Millisecond)
|
||||
for !c.HasWaiters() {
|
||||
// Wait for the go routine to call NewTicker()
|
||||
@ -170,7 +170,7 @@ func (m *testMetricsProvider) NewRetriesMetric(name string) CounterMetric {
|
||||
func TestMetrics(t *testing.T) {
|
||||
mp := testMetricsProvider{}
|
||||
t0 := time.Unix(0, 0)
|
||||
c := clock.NewFakeClock(t0)
|
||||
c := testingclock.NewFakeClock(t0)
|
||||
mf := queueMetricsFactory{metricsProvider: &mp}
|
||||
m := mf.newQueueMetrics("test", c)
|
||||
q := newQueue(c, m, time.Millisecond)
|
||||
|
@ -20,7 +20,7 @@ import (
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"k8s.io/apimachinery/pkg/util/clock"
|
||||
"k8s.io/utils/clock"
|
||||
)
|
||||
|
||||
type Interface interface {
|
||||
@ -47,7 +47,7 @@ func NewNamed(name string) *Type {
|
||||
)
|
||||
}
|
||||
|
||||
func newQueue(c clock.Clock, metrics queueMetrics, updatePeriod time.Duration) *Type {
|
||||
func newQueue(c clock.WithTicker, metrics queueMetrics, updatePeriod time.Duration) *Type {
|
||||
t := &Type{
|
||||
clock: c,
|
||||
dirty: set{},
|
||||
@ -92,7 +92,7 @@ type Type struct {
|
||||
metrics queueMetrics
|
||||
|
||||
unfinishedWorkUpdatePeriod time.Duration
|
||||
clock clock.Clock
|
||||
clock clock.WithTicker
|
||||
}
|
||||
|
||||
type empty struct{}
|
||||
|
@ -20,13 +20,13 @@ import (
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"k8s.io/apimachinery/pkg/util/clock"
|
||||
testingclock "k8s.io/utils/clock/testing"
|
||||
)
|
||||
|
||||
func TestRateLimitingQueue(t *testing.T) {
|
||||
limiter := NewItemExponentialFailureRateLimiter(1*time.Millisecond, 1*time.Second)
|
||||
queue := NewRateLimitingQueue(limiter).(*rateLimitingType)
|
||||
fakeClock := clock.NewFakeClock(time.Now())
|
||||
fakeClock := testingclock.NewFakeClock(time.Now())
|
||||
delayingQueue := &delayingType{
|
||||
Interface: New(),
|
||||
clock: fakeClock,
|
||||
|
@ -24,10 +24,10 @@ import (
|
||||
coordinationv1 "k8s.io/api/coordination/v1"
|
||||
apierrors "k8s.io/apimachinery/pkg/api/errors"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/util/clock"
|
||||
"k8s.io/apimachinery/pkg/util/wait"
|
||||
clientset "k8s.io/client-go/kubernetes"
|
||||
coordclientset "k8s.io/client-go/kubernetes/typed/coordination/v1"
|
||||
"k8s.io/utils/clock"
|
||||
"k8s.io/utils/pointer"
|
||||
|
||||
"k8s.io/klog/v2"
|
||||
|
@ -31,18 +31,18 @@ import (
|
||||
"k8s.io/apimachinery/pkg/runtime"
|
||||
"k8s.io/apimachinery/pkg/runtime/schema"
|
||||
"k8s.io/apimachinery/pkg/types"
|
||||
"k8s.io/apimachinery/pkg/util/clock"
|
||||
"k8s.io/apimachinery/pkg/util/diff"
|
||||
clientset "k8s.io/client-go/kubernetes"
|
||||
"k8s.io/client-go/kubernetes/fake"
|
||||
clienttesting "k8s.io/client-go/testing"
|
||||
testingclock "k8s.io/utils/clock/testing"
|
||||
"k8s.io/utils/pointer"
|
||||
|
||||
"k8s.io/klog/v2"
|
||||
)
|
||||
|
||||
func TestNewNodeLease(t *testing.T) {
|
||||
fakeClock := clock.NewFakeClock(time.Now())
|
||||
fakeClock := testingclock.NewFakeClock(time.Now())
|
||||
node := &corev1.Node{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "foo",
|
||||
@ -276,7 +276,7 @@ func TestRetryUpdateNodeLease(t *testing.T) {
|
||||
cl.PrependReactor("get", "leases", tc.getReactor)
|
||||
}
|
||||
c := &controller{
|
||||
clock: clock.NewFakeClock(time.Now()),
|
||||
clock: testingclock.NewFakeClock(time.Now()),
|
||||
client: cl,
|
||||
leaseClient: cl.CoordinationV1().Leases(corev1.NamespaceNodeLease),
|
||||
holderIdentity: node.Name,
|
||||
@ -414,7 +414,7 @@ func TestUpdateUsingLatestLease(t *testing.T) {
|
||||
cl.PrependReactor("create", "leases", tc.createReactor)
|
||||
}
|
||||
c := &controller{
|
||||
clock: clock.NewFakeClock(time.Now()),
|
||||
clock: testingclock.NewFakeClock(time.Now()),
|
||||
client: cl,
|
||||
leaseClient: cl.CoordinationV1().Leases(corev1.NamespaceNodeLease),
|
||||
holderIdentity: node.Name,
|
||||
|
@ -27,13 +27,13 @@ import (
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/runtime"
|
||||
"k8s.io/apimachinery/pkg/runtime/schema"
|
||||
"k8s.io/apimachinery/pkg/util/clock"
|
||||
"k8s.io/apimachinery/pkg/util/wait"
|
||||
"k8s.io/apimachinery/pkg/watch"
|
||||
"k8s.io/client-go/kubernetes"
|
||||
kubeapiservertesting "k8s.io/kubernetes/cmd/kube-apiserver/app/testing"
|
||||
"k8s.io/kubernetes/pkg/kubelet/util/manager"
|
||||
"k8s.io/kubernetes/test/integration/framework"
|
||||
testingclock "k8s.io/utils/clock/testing"
|
||||
)
|
||||
|
||||
func TestWatchBasedManager(t *testing.T) {
|
||||
@ -62,7 +62,7 @@ func TestWatchBasedManager(t *testing.T) {
|
||||
// We want all watches to be up and running to stress test it.
|
||||
// So don't treat any secret as immutable here.
|
||||
isImmutable := func(_ runtime.Object) bool { return false }
|
||||
fakeClock := clock.NewFakeClock(time.Now())
|
||||
fakeClock := testingclock.NewFakeClock(time.Now())
|
||||
store := manager.NewObjectCache(listObj, watchObj, newObj, isImmutable, schema.GroupResource{Group: "v1", Resource: "secrets"}, fakeClock, time.Minute)
|
||||
|
||||
// create 1000 secrets in parallel
|
||||
|
Loading…
Reference in New Issue
Block a user