mirror of
https://github.com/k3s-io/kubernetes.git
synced 2025-09-21 09:57:52 +00:00
Migrate to k8s.io/utils/clock in pkg/controller
This commit is contained in:
@@ -31,7 +31,6 @@ import (
|
||||
"k8s.io/apimachinery/pkg/api/resource"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/runtime"
|
||||
"k8s.io/apimachinery/pkg/util/clock"
|
||||
"k8s.io/apimachinery/pkg/util/intstr"
|
||||
"k8s.io/apimachinery/pkg/util/sets"
|
||||
"k8s.io/apimachinery/pkg/util/uuid"
|
||||
@@ -55,6 +54,7 @@ import (
|
||||
"k8s.io/kubernetes/pkg/features"
|
||||
"k8s.io/kubernetes/pkg/securitycontext"
|
||||
labelsutil "k8s.io/kubernetes/pkg/util/labels"
|
||||
testingclock "k8s.io/utils/clock/testing"
|
||||
)
|
||||
|
||||
var (
|
||||
@@ -309,7 +309,7 @@ func newTestController(initialObjects ...runtime.Object) (*daemonSetsController,
|
||||
informerFactory.Core().V1().Pods(),
|
||||
informerFactory.Core().V1().Nodes(),
|
||||
clientset,
|
||||
flowcontrol.NewFakeBackOff(50*time.Millisecond, 500*time.Millisecond, clock.NewFakeClock(time.Now())),
|
||||
flowcontrol.NewFakeBackOff(50*time.Millisecond, 500*time.Millisecond, testingclock.NewFakeClock(time.Now())),
|
||||
)
|
||||
if err != nil {
|
||||
return nil, nil, nil, err
|
||||
@@ -473,7 +473,7 @@ func TestExpectationsOnRecreate(t *testing.T) {
|
||||
f.Core().V1().Pods(),
|
||||
f.Core().V1().Nodes(),
|
||||
client,
|
||||
flowcontrol.NewFakeBackOff(50*time.Millisecond, 500*time.Millisecond, clock.NewFakeClock(time.Now())),
|
||||
flowcontrol.NewFakeBackOff(50*time.Millisecond, 500*time.Millisecond, testingclock.NewFakeClock(time.Now())),
|
||||
)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
@@ -3411,7 +3411,7 @@ func TestSurgePreservesOldReadyWithUnsatisfiedMinReady(t *testing.T) {
|
||||
addNodes(manager.nodeStore, 0, 5, nil)
|
||||
|
||||
// the clock will be set 10s after the newest pod on node-1 went ready, which is not long enough to be available
|
||||
manager.DaemonSetsController.failedPodsBackoff.Clock = clock.NewFakeClock(time.Unix(50+10, 0))
|
||||
manager.DaemonSetsController.failedPodsBackoff.Clock = testingclock.NewFakeClock(time.Unix(50+10, 0))
|
||||
|
||||
// will be preserved because it has the newest hash
|
||||
pod := newPod("node-1-", "node-1", simpleDaemonSetLabel, ds)
|
||||
@@ -3456,7 +3456,7 @@ func TestSurgeDeletesOldReadyWithUnsatisfiedMinReady(t *testing.T) {
|
||||
addNodes(manager.nodeStore, 0, 5, nil)
|
||||
|
||||
// the clock will be set 20s after the newest pod on node-1 went ready, which is not long enough to be available
|
||||
manager.DaemonSetsController.failedPodsBackoff.Clock = clock.NewFakeClock(time.Unix(50+20, 0))
|
||||
manager.DaemonSetsController.failedPodsBackoff.Clock = testingclock.NewFakeClock(time.Unix(50+20, 0))
|
||||
|
||||
// will be preserved because it has the newest hash
|
||||
pod := newPod("node-1-", "node-1", simpleDaemonSetLabel, ds)
|
||||
|
@@ -24,7 +24,6 @@ import (
|
||||
v1 "k8s.io/api/core/v1"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/labels"
|
||||
"k8s.io/apimachinery/pkg/util/clock"
|
||||
"k8s.io/apimachinery/pkg/util/intstr"
|
||||
utilfeature "k8s.io/apiserver/pkg/util/feature"
|
||||
featuregatetesting "k8s.io/component-base/featuregate/testing"
|
||||
@@ -32,6 +31,7 @@ import (
|
||||
podutil "k8s.io/kubernetes/pkg/api/v1/pod"
|
||||
"k8s.io/kubernetes/pkg/controller/daemon/util"
|
||||
"k8s.io/kubernetes/pkg/features"
|
||||
testingclock "k8s.io/utils/clock/testing"
|
||||
)
|
||||
|
||||
func TestDaemonSetUpdatesPods(t *testing.T) {
|
||||
@@ -205,12 +205,12 @@ func TestDaemonSetUpdatesAllOldPodsNotReadyMaxSurge(t *testing.T) {
|
||||
manager.dsStore.Update(ds)
|
||||
|
||||
// all old pods are unavailable so should be surged
|
||||
manager.failedPodsBackoff.Clock = clock.NewFakeClock(time.Unix(100, 0))
|
||||
manager.failedPodsBackoff.Clock = testingclock.NewFakeClock(time.Unix(100, 0))
|
||||
clearExpectations(t, manager, ds, podControl)
|
||||
expectSyncDaemonSets(t, manager, ds, podControl, 5, 0, 0)
|
||||
|
||||
// waiting for pods to go ready, old pods are deleted
|
||||
manager.failedPodsBackoff.Clock = clock.NewFakeClock(time.Unix(200, 0))
|
||||
manager.failedPodsBackoff.Clock = testingclock.NewFakeClock(time.Unix(200, 0))
|
||||
clearExpectations(t, manager, ds, podControl)
|
||||
expectSyncDaemonSets(t, manager, ds, podControl, 0, 5, 0)
|
||||
|
||||
@@ -219,7 +219,7 @@ func TestDaemonSetUpdatesAllOldPodsNotReadyMaxSurge(t *testing.T) {
|
||||
ds.Spec.Template.Spec.Containers[0].Image = "foo3/bar3"
|
||||
manager.dsStore.Update(ds)
|
||||
|
||||
manager.failedPodsBackoff.Clock = clock.NewFakeClock(time.Unix(300, 0))
|
||||
manager.failedPodsBackoff.Clock = testingclock.NewFakeClock(time.Unix(300, 0))
|
||||
clearExpectations(t, manager, ds, podControl)
|
||||
expectSyncDaemonSets(t, manager, ds, podControl, 3, 0, 0)
|
||||
|
||||
@@ -243,12 +243,12 @@ func TestDaemonSetUpdatesAllOldPodsNotReadyMaxSurge(t *testing.T) {
|
||||
|
||||
// the new pods should still be considered waiting to hit min readiness, so one pod should be created to replace
|
||||
// the deleted old pod
|
||||
manager.failedPodsBackoff.Clock = clock.NewFakeClock(time.Unix(310, 0))
|
||||
manager.failedPodsBackoff.Clock = testingclock.NewFakeClock(time.Unix(310, 0))
|
||||
clearExpectations(t, manager, ds, podControl)
|
||||
expectSyncDaemonSets(t, manager, ds, podControl, 1, 0, 0)
|
||||
|
||||
// the new pods are now considered available, so delete the old pods
|
||||
manager.failedPodsBackoff.Clock = clock.NewFakeClock(time.Unix(320, 0))
|
||||
manager.failedPodsBackoff.Clock = testingclock.NewFakeClock(time.Unix(320, 0))
|
||||
clearExpectations(t, manager, ds, podControl)
|
||||
expectSyncDaemonSets(t, manager, ds, podControl, 1, 3, 0)
|
||||
|
||||
@@ -259,12 +259,12 @@ func TestDaemonSetUpdatesAllOldPodsNotReadyMaxSurge(t *testing.T) {
|
||||
})
|
||||
|
||||
// the new pods are now considered available, so delete the old pods
|
||||
manager.failedPodsBackoff.Clock = clock.NewFakeClock(time.Unix(340, 0))
|
||||
manager.failedPodsBackoff.Clock = testingclock.NewFakeClock(time.Unix(340, 0))
|
||||
clearExpectations(t, manager, ds, podControl)
|
||||
expectSyncDaemonSets(t, manager, ds, podControl, 0, 2, 0)
|
||||
|
||||
// controller has completed upgrade
|
||||
manager.failedPodsBackoff.Clock = clock.NewFakeClock(time.Unix(350, 0))
|
||||
manager.failedPodsBackoff.Clock = testingclock.NewFakeClock(time.Unix(350, 0))
|
||||
clearExpectations(t, manager, ds, podControl)
|
||||
expectSyncDaemonSets(t, manager, ds, podControl, 0, 0, 0)
|
||||
}
|
||||
|
Reference in New Issue
Block a user