mirror of
https://github.com/k3s-io/kubernetes.git
synced 2025-07-29 14:37:00 +00:00
Merge pull request #127956 from carlory/KEP-3902-test
node-lifecycle-controller: improve processPod test-coverage
This commit is contained in:
commit
1ac23e24a0
@ -40,7 +40,6 @@ import (
|
|||||||
clientset "k8s.io/client-go/kubernetes"
|
clientset "k8s.io/client-go/kubernetes"
|
||||||
"k8s.io/client-go/kubernetes/fake"
|
"k8s.io/client-go/kubernetes/fake"
|
||||||
testcore "k8s.io/client-go/testing"
|
testcore "k8s.io/client-go/testing"
|
||||||
"k8s.io/klog/v2/ktesting"
|
|
||||||
kubeletapis "k8s.io/kubelet/pkg/apis"
|
kubeletapis "k8s.io/kubelet/pkg/apis"
|
||||||
"k8s.io/kubernetes/pkg/controller"
|
"k8s.io/kubernetes/pkg/controller"
|
||||||
"k8s.io/kubernetes/pkg/controller/nodelifecycle/scheduler"
|
"k8s.io/kubernetes/pkg/controller/nodelifecycle/scheduler"
|
||||||
@ -48,6 +47,7 @@ import (
|
|||||||
controllerutil "k8s.io/kubernetes/pkg/controller/util/node"
|
controllerutil "k8s.io/kubernetes/pkg/controller/util/node"
|
||||||
"k8s.io/kubernetes/pkg/util/node"
|
"k8s.io/kubernetes/pkg/util/node"
|
||||||
taintutils "k8s.io/kubernetes/pkg/util/taints"
|
taintutils "k8s.io/kubernetes/pkg/util/taints"
|
||||||
|
"k8s.io/kubernetes/test/utils/ktesting"
|
||||||
"k8s.io/utils/pointer"
|
"k8s.io/utils/pointer"
|
||||||
)
|
)
|
||||||
|
|
||||||
@ -85,6 +85,7 @@ type nodeLifecycleController struct {
|
|||||||
leaseInformer coordinformers.LeaseInformer
|
leaseInformer coordinformers.LeaseInformer
|
||||||
nodeInformer coreinformers.NodeInformer
|
nodeInformer coreinformers.NodeInformer
|
||||||
daemonSetInformer appsinformers.DaemonSetInformer
|
daemonSetInformer appsinformers.DaemonSetInformer
|
||||||
|
podInformer coreinformers.PodInformer
|
||||||
}
|
}
|
||||||
|
|
||||||
func createNodeLease(nodeName string, renewTime metav1.MicroTime) *coordv1.Lease {
|
func createNodeLease(nodeName string, renewTime metav1.MicroTime) *coordv1.Lease {
|
||||||
@ -121,6 +122,15 @@ func (nc *nodeLifecycleController) syncNodeStore(fakeNodeHandler *testutil.FakeN
|
|||||||
return nc.nodeInformer.Informer().GetStore().Replace(newElems, "newRV")
|
return nc.nodeInformer.Informer().GetStore().Replace(newElems, "newRV")
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func (nc *nodeLifecycleController) syncPodStore(pod *v1.Pod) error {
|
||||||
|
if pod == nil {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
newElems := make([]interface{}, 0, 1)
|
||||||
|
newElems = append(newElems, pod)
|
||||||
|
return nc.podInformer.Informer().GetStore().Replace(newElems, "newRV")
|
||||||
|
}
|
||||||
|
|
||||||
func newNodeLifecycleControllerFromClient(
|
func newNodeLifecycleControllerFromClient(
|
||||||
ctx context.Context,
|
ctx context.Context,
|
||||||
kubeClient clientset.Interface,
|
kubeClient clientset.Interface,
|
||||||
@ -138,11 +148,12 @@ func newNodeLifecycleControllerFromClient(
|
|||||||
leaseInformer := factory.Coordination().V1().Leases()
|
leaseInformer := factory.Coordination().V1().Leases()
|
||||||
nodeInformer := factory.Core().V1().Nodes()
|
nodeInformer := factory.Core().V1().Nodes()
|
||||||
daemonSetInformer := factory.Apps().V1().DaemonSets()
|
daemonSetInformer := factory.Apps().V1().DaemonSets()
|
||||||
|
podInformer := factory.Core().V1().Pods()
|
||||||
|
|
||||||
nc, err := NewNodeLifecycleController(
|
nc, err := NewNodeLifecycleController(
|
||||||
ctx,
|
ctx,
|
||||||
leaseInformer,
|
leaseInformer,
|
||||||
factory.Core().V1().Pods(),
|
podInformer,
|
||||||
nodeInformer,
|
nodeInformer,
|
||||||
daemonSetInformer,
|
daemonSetInformer,
|
||||||
kubeClient,
|
kubeClient,
|
||||||
@ -163,7 +174,7 @@ func newNodeLifecycleControllerFromClient(
|
|||||||
nc.nodeInformerSynced = alwaysReady
|
nc.nodeInformerSynced = alwaysReady
|
||||||
nc.daemonSetInformerSynced = alwaysReady
|
nc.daemonSetInformerSynced = alwaysReady
|
||||||
|
|
||||||
return &nodeLifecycleController{nc, leaseInformer, nodeInformer, daemonSetInformer}, nil
|
return &nodeLifecycleController{nc, leaseInformer, nodeInformer, daemonSetInformer, podInformer}, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestMonitorNodeHealth(t *testing.T) {
|
func TestMonitorNodeHealth(t *testing.T) {
|
||||||
@ -785,13 +796,13 @@ func TestMonitorNodeHealth(t *testing.T) {
|
|||||||
|
|
||||||
for testName, tt := range tests {
|
for testName, tt := range tests {
|
||||||
t.Run(testName, func(t *testing.T) {
|
t.Run(testName, func(t *testing.T) {
|
||||||
_, ctx := ktesting.NewTestContext(t)
|
tCtx := ktesting.Init(t)
|
||||||
fakeNodeHandler := &testutil.FakeNodeHandler{
|
fakeNodeHandler := &testutil.FakeNodeHandler{
|
||||||
Existing: tt.nodeList,
|
Existing: tt.nodeList,
|
||||||
Clientset: fake.NewSimpleClientset(),
|
Clientset: fake.NewSimpleClientset(),
|
||||||
}
|
}
|
||||||
nodeController, _ := newNodeLifecycleControllerFromClient(
|
nodeController, _ := newNodeLifecycleControllerFromClient(
|
||||||
ctx,
|
tCtx,
|
||||||
fakeNodeHandler,
|
fakeNodeHandler,
|
||||||
testRateLimiterQPS,
|
testRateLimiterQPS,
|
||||||
testRateLimiterQPS,
|
testRateLimiterQPS,
|
||||||
@ -812,7 +823,7 @@ func TestMonitorNodeHealth(t *testing.T) {
|
|||||||
if err := nodeController.syncNodeStore(fakeNodeHandler); err != nil {
|
if err := nodeController.syncNodeStore(fakeNodeHandler); err != nil {
|
||||||
t.Errorf("unexpected error: %v", err)
|
t.Errorf("unexpected error: %v", err)
|
||||||
}
|
}
|
||||||
if err := nodeController.monitorNodeHealth(ctx); err != nil {
|
if err := nodeController.monitorNodeHealth(tCtx); err != nil {
|
||||||
t.Errorf("unexpected error: %v", err)
|
t.Errorf("unexpected error: %v", err)
|
||||||
}
|
}
|
||||||
if diff := cmp.Diff(wanted, nodeController.zoneStates); diff != "" {
|
if diff := cmp.Diff(wanted, nodeController.zoneStates); diff != "" {
|
||||||
@ -930,10 +941,10 @@ func TestPodStatusChange(t *testing.T) {
|
|||||||
},
|
},
|
||||||
}
|
}
|
||||||
|
|
||||||
_, ctx := ktesting.NewTestContext(t)
|
tCtx := ktesting.Init(t)
|
||||||
for _, item := range table {
|
for _, item := range table {
|
||||||
nodeController, _ := newNodeLifecycleControllerFromClient(
|
nodeController, _ := newNodeLifecycleControllerFromClient(
|
||||||
ctx,
|
tCtx,
|
||||||
item.fakeNodeHandler,
|
item.fakeNodeHandler,
|
||||||
testRateLimiterQPS,
|
testRateLimiterQPS,
|
||||||
testRateLimiterQPS,
|
testRateLimiterQPS,
|
||||||
@ -949,7 +960,7 @@ func TestPodStatusChange(t *testing.T) {
|
|||||||
if err := nodeController.syncNodeStore(item.fakeNodeHandler); err != nil {
|
if err := nodeController.syncNodeStore(item.fakeNodeHandler); err != nil {
|
||||||
t.Errorf("unexpected error: %v", err)
|
t.Errorf("unexpected error: %v", err)
|
||||||
}
|
}
|
||||||
if err := nodeController.monitorNodeHealth(ctx); err != nil {
|
if err := nodeController.monitorNodeHealth(tCtx); err != nil {
|
||||||
t.Errorf("unexpected error: %v", err)
|
t.Errorf("unexpected error: %v", err)
|
||||||
}
|
}
|
||||||
if item.timeToPass > 0 {
|
if item.timeToPass > 0 {
|
||||||
@ -960,19 +971,21 @@ func TestPodStatusChange(t *testing.T) {
|
|||||||
if err := nodeController.syncNodeStore(item.fakeNodeHandler); err != nil {
|
if err := nodeController.syncNodeStore(item.fakeNodeHandler); err != nil {
|
||||||
t.Errorf("unexpected error: %v", err)
|
t.Errorf("unexpected error: %v", err)
|
||||||
}
|
}
|
||||||
if err := nodeController.monitorNodeHealth(ctx); err != nil {
|
if err := nodeController.monitorNodeHealth(tCtx); err != nil {
|
||||||
t.Errorf("unexpected error: %v", err)
|
t.Errorf("unexpected error: %v", err)
|
||||||
}
|
}
|
||||||
zones := testutil.GetZones(item.fakeNodeHandler)
|
zones := testutil.GetZones(item.fakeNodeHandler)
|
||||||
logger, _ := ktesting.NewTestContext(t)
|
|
||||||
for _, zone := range zones {
|
for _, zone := range zones {
|
||||||
nodeController.zoneNoExecuteTainter[zone].Try(logger, func(value scheduler.TimedValue) (bool, time.Duration) {
|
nodeController.zoneNoExecuteTainter[zone].Try(tCtx.Logger(), func(value scheduler.TimedValue) (bool, time.Duration) {
|
||||||
nodeUID, _ := value.UID.(string)
|
nodeUID, _ := value.UID.(string)
|
||||||
pods, err := nodeController.getPodsAssignedToNode(value.Value)
|
pods, err := nodeController.getPodsAssignedToNode(value.Value)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Errorf("unexpected error: %v", err)
|
t.Errorf("unexpected error: %v", err)
|
||||||
}
|
}
|
||||||
controllerutil.DeletePods(ctx, item.fakeNodeHandler, pods, nodeController.recorder, value.Value, nodeUID, nodeController.daemonSetStore)
|
_, err = controllerutil.DeletePods(tCtx, item.fakeNodeHandler, pods, nodeController.recorder, value.Value, nodeUID, nodeController.daemonSetStore)
|
||||||
|
if err != nil {
|
||||||
|
t.Errorf("unexpected error: %v", err)
|
||||||
|
}
|
||||||
return true, 0
|
return true, 0
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
@ -1213,10 +1226,10 @@ func TestMonitorNodeHealthUpdateStatus(t *testing.T) {
|
|||||||
expectedPodStatusUpdate: false,
|
expectedPodStatusUpdate: false,
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
_, ctx := ktesting.NewTestContext(t)
|
tCtx := ktesting.Init(t)
|
||||||
for i, item := range table {
|
for i, item := range table {
|
||||||
nodeController, _ := newNodeLifecycleControllerFromClient(
|
nodeController, _ := newNodeLifecycleControllerFromClient(
|
||||||
ctx,
|
tCtx,
|
||||||
item.fakeNodeHandler,
|
item.fakeNodeHandler,
|
||||||
testRateLimiterQPS,
|
testRateLimiterQPS,
|
||||||
testRateLimiterQPS,
|
testRateLimiterQPS,
|
||||||
@ -1232,7 +1245,7 @@ func TestMonitorNodeHealthUpdateStatus(t *testing.T) {
|
|||||||
if err := nodeController.syncNodeStore(item.fakeNodeHandler); err != nil {
|
if err := nodeController.syncNodeStore(item.fakeNodeHandler); err != nil {
|
||||||
t.Errorf("unexpected error: %v", err)
|
t.Errorf("unexpected error: %v", err)
|
||||||
}
|
}
|
||||||
if err := nodeController.monitorNodeHealth(ctx); err != nil {
|
if err := nodeController.monitorNodeHealth(tCtx); err != nil {
|
||||||
t.Errorf("unexpected error: %v", err)
|
t.Errorf("unexpected error: %v", err)
|
||||||
}
|
}
|
||||||
if item.timeToPass > 0 {
|
if item.timeToPass > 0 {
|
||||||
@ -1241,7 +1254,7 @@ func TestMonitorNodeHealthUpdateStatus(t *testing.T) {
|
|||||||
if err := nodeController.syncNodeStore(item.fakeNodeHandler); err != nil {
|
if err := nodeController.syncNodeStore(item.fakeNodeHandler); err != nil {
|
||||||
t.Errorf("unexpected error: %v", err)
|
t.Errorf("unexpected error: %v", err)
|
||||||
}
|
}
|
||||||
if err := nodeController.monitorNodeHealth(ctx); err != nil {
|
if err := nodeController.monitorNodeHealth(tCtx); err != nil {
|
||||||
t.Errorf("unexpected error: %v", err)
|
t.Errorf("unexpected error: %v", err)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -1758,9 +1771,9 @@ func TestMonitorNodeHealthUpdateNodeAndPodStatusWithLease(t *testing.T) {
|
|||||||
|
|
||||||
for _, item := range testcases {
|
for _, item := range testcases {
|
||||||
t.Run(item.description, func(t *testing.T) {
|
t.Run(item.description, func(t *testing.T) {
|
||||||
_, ctx := ktesting.NewTestContext(t)
|
tCtx := ktesting.Init(t)
|
||||||
nodeController, _ := newNodeLifecycleControllerFromClient(
|
nodeController, _ := newNodeLifecycleControllerFromClient(
|
||||||
ctx,
|
tCtx,
|
||||||
item.fakeNodeHandler,
|
item.fakeNodeHandler,
|
||||||
testRateLimiterQPS,
|
testRateLimiterQPS,
|
||||||
testRateLimiterQPS,
|
testRateLimiterQPS,
|
||||||
@ -1779,7 +1792,7 @@ func TestMonitorNodeHealthUpdateNodeAndPodStatusWithLease(t *testing.T) {
|
|||||||
if err := nodeController.syncLeaseStore(item.lease); err != nil {
|
if err := nodeController.syncLeaseStore(item.lease); err != nil {
|
||||||
t.Fatalf("unexpected error: %v", err)
|
t.Fatalf("unexpected error: %v", err)
|
||||||
}
|
}
|
||||||
if err := nodeController.monitorNodeHealth(ctx); err != nil {
|
if err := nodeController.monitorNodeHealth(tCtx); err != nil {
|
||||||
t.Fatalf("unexpected error: %v", err)
|
t.Fatalf("unexpected error: %v", err)
|
||||||
}
|
}
|
||||||
if item.timeToPass > 0 {
|
if item.timeToPass > 0 {
|
||||||
@ -1791,7 +1804,7 @@ func TestMonitorNodeHealthUpdateNodeAndPodStatusWithLease(t *testing.T) {
|
|||||||
if err := nodeController.syncLeaseStore(item.newLease); err != nil {
|
if err := nodeController.syncLeaseStore(item.newLease); err != nil {
|
||||||
t.Fatalf("unexpected error: %v", err)
|
t.Fatalf("unexpected error: %v", err)
|
||||||
}
|
}
|
||||||
if err := nodeController.monitorNodeHealth(ctx); err != nil {
|
if err := nodeController.monitorNodeHealth(tCtx); err != nil {
|
||||||
t.Fatalf("unexpected error: %v", err)
|
t.Fatalf("unexpected error: %v", err)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -1922,10 +1935,10 @@ func TestMonitorNodeHealthMarkPodsNotReady(t *testing.T) {
|
|||||||
},
|
},
|
||||||
}
|
}
|
||||||
|
|
||||||
_, ctx := ktesting.NewTestContext(t)
|
tCtx := ktesting.Init(t)
|
||||||
for i, item := range table {
|
for i, item := range table {
|
||||||
nodeController, _ := newNodeLifecycleControllerFromClient(
|
nodeController, _ := newNodeLifecycleControllerFromClient(
|
||||||
ctx,
|
tCtx,
|
||||||
item.fakeNodeHandler,
|
item.fakeNodeHandler,
|
||||||
testRateLimiterQPS,
|
testRateLimiterQPS,
|
||||||
testRateLimiterQPS,
|
testRateLimiterQPS,
|
||||||
@ -1941,7 +1954,7 @@ func TestMonitorNodeHealthMarkPodsNotReady(t *testing.T) {
|
|||||||
if err := nodeController.syncNodeStore(item.fakeNodeHandler); err != nil {
|
if err := nodeController.syncNodeStore(item.fakeNodeHandler); err != nil {
|
||||||
t.Errorf("unexpected error: %v", err)
|
t.Errorf("unexpected error: %v", err)
|
||||||
}
|
}
|
||||||
if err := nodeController.monitorNodeHealth(ctx); err != nil {
|
if err := nodeController.monitorNodeHealth(tCtx); err != nil {
|
||||||
t.Errorf("Case[%d] unexpected error: %v", i, err)
|
t.Errorf("Case[%d] unexpected error: %v", i, err)
|
||||||
}
|
}
|
||||||
if item.timeToPass > 0 {
|
if item.timeToPass > 0 {
|
||||||
@ -1950,7 +1963,7 @@ func TestMonitorNodeHealthMarkPodsNotReady(t *testing.T) {
|
|||||||
if err := nodeController.syncNodeStore(item.fakeNodeHandler); err != nil {
|
if err := nodeController.syncNodeStore(item.fakeNodeHandler); err != nil {
|
||||||
t.Errorf("unexpected error: %v", err)
|
t.Errorf("unexpected error: %v", err)
|
||||||
}
|
}
|
||||||
if err := nodeController.monitorNodeHealth(ctx); err != nil {
|
if err := nodeController.monitorNodeHealth(tCtx); err != nil {
|
||||||
t.Errorf("Case[%d] unexpected error: %v", i, err)
|
t.Errorf("Case[%d] unexpected error: %v", i, err)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -2017,7 +2030,7 @@ func TestMonitorNodeHealthMarkPodsNotReadyWithWorkerSize(t *testing.T) {
|
|||||||
{workers: 1},
|
{workers: 1},
|
||||||
}
|
}
|
||||||
|
|
||||||
_, ctx := ktesting.NewTestContext(t)
|
tCtx := ktesting.Init(t)
|
||||||
for i, item := range table {
|
for i, item := range table {
|
||||||
fakeNow := metav1.Date(2015, 1, 1, 12, 0, 0, 0, time.UTC)
|
fakeNow := metav1.Date(2015, 1, 1, 12, 0, 0, 0, time.UTC)
|
||||||
|
|
||||||
@ -2027,7 +2040,7 @@ func TestMonitorNodeHealthMarkPodsNotReadyWithWorkerSize(t *testing.T) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
nodeController, _ := newNodeLifecycleControllerFromClient(
|
nodeController, _ := newNodeLifecycleControllerFromClient(
|
||||||
ctx,
|
tCtx,
|
||||||
fakeNodeHandler,
|
fakeNodeHandler,
|
||||||
testRateLimiterQPS,
|
testRateLimiterQPS,
|
||||||
testRateLimiterQPS,
|
testRateLimiterQPS,
|
||||||
@ -2045,7 +2058,7 @@ func TestMonitorNodeHealthMarkPodsNotReadyWithWorkerSize(t *testing.T) {
|
|||||||
if err := nodeController.syncNodeStore(fakeNodeHandler); err != nil {
|
if err := nodeController.syncNodeStore(fakeNodeHandler); err != nil {
|
||||||
t.Errorf("unexpected error: %v", err)
|
t.Errorf("unexpected error: %v", err)
|
||||||
}
|
}
|
||||||
if err := nodeController.monitorNodeHealth(ctx); err != nil {
|
if err := nodeController.monitorNodeHealth(tCtx); err != nil {
|
||||||
t.Errorf("Case[%d] unexpected error: %v", i, err)
|
t.Errorf("Case[%d] unexpected error: %v", i, err)
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -2071,7 +2084,7 @@ func TestMonitorNodeHealthMarkPodsNotReadyWithWorkerSize(t *testing.T) {
|
|||||||
if err := nodeController.syncNodeStore(fakeNodeHandler); err != nil {
|
if err := nodeController.syncNodeStore(fakeNodeHandler); err != nil {
|
||||||
t.Errorf("unexpected error: %v", err)
|
t.Errorf("unexpected error: %v", err)
|
||||||
}
|
}
|
||||||
if err := nodeController.monitorNodeHealth(ctx); err != nil {
|
if err := nodeController.monitorNodeHealth(tCtx); err != nil {
|
||||||
t.Errorf("Case[%d] unexpected error: %v", i, err)
|
t.Errorf("Case[%d] unexpected error: %v", i, err)
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -2228,9 +2241,9 @@ func TestMonitorNodeHealthMarkPodsNotReadyRetry(t *testing.T) {
|
|||||||
|
|
||||||
for _, item := range table {
|
for _, item := range table {
|
||||||
t.Run(item.desc, func(t *testing.T) {
|
t.Run(item.desc, func(t *testing.T) {
|
||||||
_, ctx := ktesting.NewTestContext(t)
|
tCtx := ktesting.Init(t)
|
||||||
nodeController, _ := newNodeLifecycleControllerFromClient(
|
nodeController, _ := newNodeLifecycleControllerFromClient(
|
||||||
ctx,
|
tCtx,
|
||||||
item.fakeNodeHandler,
|
item.fakeNodeHandler,
|
||||||
testRateLimiterQPS,
|
testRateLimiterQPS,
|
||||||
testRateLimiterQPS,
|
testRateLimiterQPS,
|
||||||
@ -2252,7 +2265,7 @@ func TestMonitorNodeHealthMarkPodsNotReadyRetry(t *testing.T) {
|
|||||||
if err := nodeController.syncNodeStore(item.fakeNodeHandler); err != nil {
|
if err := nodeController.syncNodeStore(item.fakeNodeHandler); err != nil {
|
||||||
t.Errorf("unexpected error: %v", err)
|
t.Errorf("unexpected error: %v", err)
|
||||||
}
|
}
|
||||||
if err := nodeController.monitorNodeHealth(ctx); err != nil {
|
if err := nodeController.monitorNodeHealth(tCtx); err != nil {
|
||||||
t.Errorf("unexpected error: %v", err)
|
t.Errorf("unexpected error: %v", err)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -2407,9 +2420,9 @@ func TestApplyNoExecuteTaints(t *testing.T) {
|
|||||||
},
|
},
|
||||||
}
|
}
|
||||||
originalTaint := UnreachableTaintTemplate
|
originalTaint := UnreachableTaintTemplate
|
||||||
_, ctx := ktesting.NewTestContext(t)
|
tCtx := ktesting.Init(t)
|
||||||
nodeController, _ := newNodeLifecycleControllerFromClient(
|
nodeController, _ := newNodeLifecycleControllerFromClient(
|
||||||
ctx,
|
tCtx,
|
||||||
fakeNodeHandler,
|
fakeNodeHandler,
|
||||||
testRateLimiterQPS,
|
testRateLimiterQPS,
|
||||||
testRateLimiterQPS,
|
testRateLimiterQPS,
|
||||||
@ -2425,11 +2438,11 @@ func TestApplyNoExecuteTaints(t *testing.T) {
|
|||||||
if err := nodeController.syncNodeStore(fakeNodeHandler); err != nil {
|
if err := nodeController.syncNodeStore(fakeNodeHandler); err != nil {
|
||||||
t.Errorf("unexpected error: %v", err)
|
t.Errorf("unexpected error: %v", err)
|
||||||
}
|
}
|
||||||
if err := nodeController.monitorNodeHealth(ctx); err != nil {
|
if err := nodeController.monitorNodeHealth(tCtx); err != nil {
|
||||||
t.Errorf("unexpected error: %v", err)
|
t.Errorf("unexpected error: %v", err)
|
||||||
}
|
}
|
||||||
nodeController.doNoExecuteTaintingPass(ctx)
|
nodeController.doNoExecuteTaintingPass(tCtx)
|
||||||
node0, err := fakeNodeHandler.Get(ctx, "node0", metav1.GetOptions{})
|
node0, err := fakeNodeHandler.Get(tCtx, "node0", metav1.GetOptions{})
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Errorf("Can't get current node0...")
|
t.Errorf("Can't get current node0...")
|
||||||
return
|
return
|
||||||
@ -2437,7 +2450,7 @@ func TestApplyNoExecuteTaints(t *testing.T) {
|
|||||||
if !taintutils.TaintExists(node0.Spec.Taints, UnreachableTaintTemplate) {
|
if !taintutils.TaintExists(node0.Spec.Taints, UnreachableTaintTemplate) {
|
||||||
t.Errorf("Can't find taint %v in %v", originalTaint, node0.Spec.Taints)
|
t.Errorf("Can't find taint %v in %v", originalTaint, node0.Spec.Taints)
|
||||||
}
|
}
|
||||||
node2, err := fakeNodeHandler.Get(ctx, "node2", metav1.GetOptions{})
|
node2, err := fakeNodeHandler.Get(tCtx, "node2", metav1.GetOptions{})
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Errorf("Can't get current node2...")
|
t.Errorf("Can't get current node2...")
|
||||||
return
|
return
|
||||||
@ -2448,7 +2461,7 @@ func TestApplyNoExecuteTaints(t *testing.T) {
|
|||||||
|
|
||||||
// Make node3 healthy again.
|
// Make node3 healthy again.
|
||||||
node2.Status = healthyNodeNewStatus
|
node2.Status = healthyNodeNewStatus
|
||||||
_, err = fakeNodeHandler.UpdateStatus(ctx, node2, metav1.UpdateOptions{})
|
_, err = fakeNodeHandler.UpdateStatus(tCtx, node2, metav1.UpdateOptions{})
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Error(err.Error())
|
t.Error(err.Error())
|
||||||
return
|
return
|
||||||
@ -2456,12 +2469,12 @@ func TestApplyNoExecuteTaints(t *testing.T) {
|
|||||||
if err := nodeController.syncNodeStore(fakeNodeHandler); err != nil {
|
if err := nodeController.syncNodeStore(fakeNodeHandler); err != nil {
|
||||||
t.Errorf("unexpected error: %v", err)
|
t.Errorf("unexpected error: %v", err)
|
||||||
}
|
}
|
||||||
if err := nodeController.monitorNodeHealth(ctx); err != nil {
|
if err := nodeController.monitorNodeHealth(tCtx); err != nil {
|
||||||
t.Errorf("unexpected error: %v", err)
|
t.Errorf("unexpected error: %v", err)
|
||||||
}
|
}
|
||||||
nodeController.doNoExecuteTaintingPass(ctx)
|
nodeController.doNoExecuteTaintingPass(tCtx)
|
||||||
|
|
||||||
node2, err = fakeNodeHandler.Get(ctx, "node2", metav1.GetOptions{})
|
node2, err = fakeNodeHandler.Get(tCtx, "node2", metav1.GetOptions{})
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Errorf("Can't get current node2...")
|
t.Errorf("Can't get current node2...")
|
||||||
return
|
return
|
||||||
@ -2471,13 +2484,13 @@ func TestApplyNoExecuteTaints(t *testing.T) {
|
|||||||
t.Errorf("Found taint %v in %v, which should not be present", NotReadyTaintTemplate, node2.Spec.Taints)
|
t.Errorf("Found taint %v in %v, which should not be present", NotReadyTaintTemplate, node2.Spec.Taints)
|
||||||
}
|
}
|
||||||
|
|
||||||
node3, err := fakeNodeHandler.Get(ctx, "node3", metav1.GetOptions{})
|
node3, err := fakeNodeHandler.Get(tCtx, "node3", metav1.GetOptions{})
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Errorf("Can't get current node3...")
|
t.Errorf("Can't get current node3...")
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
node3.Status = unhealthyNodeNewStatus
|
node3.Status = unhealthyNodeNewStatus
|
||||||
_, err = fakeNodeHandler.UpdateStatus(ctx, node3, metav1.UpdateOptions{})
|
_, err = fakeNodeHandler.UpdateStatus(tCtx, node3, metav1.UpdateOptions{})
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Error(err.Error())
|
t.Error(err.Error())
|
||||||
return
|
return
|
||||||
@ -2485,12 +2498,12 @@ func TestApplyNoExecuteTaints(t *testing.T) {
|
|||||||
if err := nodeController.syncNodeStore(fakeNodeHandler); err != nil {
|
if err := nodeController.syncNodeStore(fakeNodeHandler); err != nil {
|
||||||
t.Errorf("unexpected error: %v", err)
|
t.Errorf("unexpected error: %v", err)
|
||||||
}
|
}
|
||||||
if err := nodeController.monitorNodeHealth(ctx); err != nil {
|
if err := nodeController.monitorNodeHealth(tCtx); err != nil {
|
||||||
t.Errorf("unexpected error: %v", err)
|
t.Errorf("unexpected error: %v", err)
|
||||||
}
|
}
|
||||||
// Before taint manager work, the status has been replaced(maybe merge-patch replace).
|
// Before taint manager work, the status has been replaced(maybe merge-patch replace).
|
||||||
node3.Status.Conditions = overrideNodeNewStatusConditions
|
node3.Status.Conditions = overrideNodeNewStatusConditions
|
||||||
_, err = fakeNodeHandler.UpdateStatus(ctx, node3, metav1.UpdateOptions{})
|
_, err = fakeNodeHandler.UpdateStatus(tCtx, node3, metav1.UpdateOptions{})
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Error(err.Error())
|
t.Error(err.Error())
|
||||||
return
|
return
|
||||||
@ -2498,8 +2511,8 @@ func TestApplyNoExecuteTaints(t *testing.T) {
|
|||||||
if err := nodeController.syncNodeStore(fakeNodeHandler); err != nil {
|
if err := nodeController.syncNodeStore(fakeNodeHandler); err != nil {
|
||||||
t.Errorf("unexpected error: %v", err)
|
t.Errorf("unexpected error: %v", err)
|
||||||
}
|
}
|
||||||
nodeController.doNoExecuteTaintingPass(ctx)
|
nodeController.doNoExecuteTaintingPass(tCtx)
|
||||||
node3, err = fakeNodeHandler.Get(ctx, "node3", metav1.GetOptions{})
|
node3, err = fakeNodeHandler.Get(tCtx, "node3", metav1.GetOptions{})
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Errorf("Can't get current node3...")
|
t.Errorf("Can't get current node3...")
|
||||||
return
|
return
|
||||||
@ -2603,9 +2616,9 @@ func TestApplyNoExecuteTaintsToNodesEnqueueTwice(t *testing.T) {
|
|||||||
},
|
},
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
_, ctx := ktesting.NewTestContext(t)
|
tCtx := ktesting.Init(t)
|
||||||
nodeController, _ := newNodeLifecycleControllerFromClient(
|
nodeController, _ := newNodeLifecycleControllerFromClient(
|
||||||
ctx,
|
tCtx,
|
||||||
fakeNodeHandler,
|
fakeNodeHandler,
|
||||||
testRateLimiterQPS,
|
testRateLimiterQPS,
|
||||||
testRateLimiterQPS,
|
testRateLimiterQPS,
|
||||||
@ -2622,21 +2635,21 @@ func TestApplyNoExecuteTaintsToNodesEnqueueTwice(t *testing.T) {
|
|||||||
t.Errorf("unexpected error: %v", err)
|
t.Errorf("unexpected error: %v", err)
|
||||||
}
|
}
|
||||||
// 1. monitor node health twice, add untainted node once
|
// 1. monitor node health twice, add untainted node once
|
||||||
if err := nodeController.monitorNodeHealth(ctx); err != nil {
|
if err := nodeController.monitorNodeHealth(tCtx); err != nil {
|
||||||
t.Errorf("unexpected error: %v", err)
|
t.Errorf("unexpected error: %v", err)
|
||||||
}
|
}
|
||||||
if err := nodeController.monitorNodeHealth(ctx); err != nil {
|
if err := nodeController.monitorNodeHealth(tCtx); err != nil {
|
||||||
t.Errorf("unexpected error: %v", err)
|
t.Errorf("unexpected error: %v", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
// 2. mark node0 healthy
|
// 2. mark node0 healthy
|
||||||
node0, err := fakeNodeHandler.Get(ctx, "node0", metav1.GetOptions{})
|
node0, err := fakeNodeHandler.Get(tCtx, "node0", metav1.GetOptions{})
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Errorf("Can't get current node0...")
|
t.Errorf("Can't get current node0...")
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
node0.Status = healthyNodeNewStatus
|
node0.Status = healthyNodeNewStatus
|
||||||
_, err = fakeNodeHandler.UpdateStatus(ctx, node0, metav1.UpdateOptions{})
|
_, err = fakeNodeHandler.UpdateStatus(tCtx, node0, metav1.UpdateOptions{})
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Error(err.Error())
|
t.Error(err.Error())
|
||||||
return
|
return
|
||||||
@ -2719,17 +2732,17 @@ func TestApplyNoExecuteTaintsToNodesEnqueueTwice(t *testing.T) {
|
|||||||
t.Errorf("unexpected error: %v", err)
|
t.Errorf("unexpected error: %v", err)
|
||||||
}
|
}
|
||||||
// 3. start monitor node health again, add untainted node twice, construct UniqueQueue with duplicated node cache
|
// 3. start monitor node health again, add untainted node twice, construct UniqueQueue with duplicated node cache
|
||||||
if err := nodeController.monitorNodeHealth(ctx); err != nil {
|
if err := nodeController.monitorNodeHealth(tCtx); err != nil {
|
||||||
t.Errorf("unexpected error: %v", err)
|
t.Errorf("unexpected error: %v", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
// 4. do NoExecute taint pass
|
// 4. do NoExecute taint pass
|
||||||
// when processing with node0, condition.Status is NodeReady, and return true with default case
|
// when processing with node0, condition.Status is NodeReady, and return true with default case
|
||||||
// then remove the set value and queue value both, the taint job never stuck
|
// then remove the set value and queue value both, the taint job never stuck
|
||||||
nodeController.doNoExecuteTaintingPass(ctx)
|
nodeController.doNoExecuteTaintingPass(tCtx)
|
||||||
|
|
||||||
// 5. get node3 and node5, see if it has ready got NoExecute taint
|
// 5. get node3 and node5, see if it has ready got NoExecute taint
|
||||||
node3, err := fakeNodeHandler.Get(ctx, "node3", metav1.GetOptions{})
|
node3, err := fakeNodeHandler.Get(tCtx, "node3", metav1.GetOptions{})
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Errorf("Can't get current node3...")
|
t.Errorf("Can't get current node3...")
|
||||||
return
|
return
|
||||||
@ -2737,7 +2750,7 @@ func TestApplyNoExecuteTaintsToNodesEnqueueTwice(t *testing.T) {
|
|||||||
if !taintutils.TaintExists(node3.Spec.Taints, UnreachableTaintTemplate) || len(node3.Spec.Taints) == 0 {
|
if !taintutils.TaintExists(node3.Spec.Taints, UnreachableTaintTemplate) || len(node3.Spec.Taints) == 0 {
|
||||||
t.Errorf("Not found taint %v in %v, which should be present in %s", UnreachableTaintTemplate, node3.Spec.Taints, node3.Name)
|
t.Errorf("Not found taint %v in %v, which should be present in %s", UnreachableTaintTemplate, node3.Spec.Taints, node3.Name)
|
||||||
}
|
}
|
||||||
node5, err := fakeNodeHandler.Get(ctx, "node5", metav1.GetOptions{})
|
node5, err := fakeNodeHandler.Get(tCtx, "node5", metav1.GetOptions{})
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Errorf("Can't get current node5...")
|
t.Errorf("Can't get current node5...")
|
||||||
return
|
return
|
||||||
@ -2826,9 +2839,9 @@ func TestSwapUnreachableNotReadyTaints(t *testing.T) {
|
|||||||
originalTaint := UnreachableTaintTemplate
|
originalTaint := UnreachableTaintTemplate
|
||||||
updatedTaint := NotReadyTaintTemplate
|
updatedTaint := NotReadyTaintTemplate
|
||||||
|
|
||||||
_, ctx := ktesting.NewTestContext(t)
|
tCtx := ktesting.Init(t)
|
||||||
nodeController, _ := newNodeLifecycleControllerFromClient(
|
nodeController, _ := newNodeLifecycleControllerFromClient(
|
||||||
ctx,
|
tCtx,
|
||||||
fakeNodeHandler,
|
fakeNodeHandler,
|
||||||
testRateLimiterQPS,
|
testRateLimiterQPS,
|
||||||
testRateLimiterQPS,
|
testRateLimiterQPS,
|
||||||
@ -2844,17 +2857,17 @@ func TestSwapUnreachableNotReadyTaints(t *testing.T) {
|
|||||||
if err := nodeController.syncNodeStore(fakeNodeHandler); err != nil {
|
if err := nodeController.syncNodeStore(fakeNodeHandler); err != nil {
|
||||||
t.Errorf("unexpected error: %v", err)
|
t.Errorf("unexpected error: %v", err)
|
||||||
}
|
}
|
||||||
if err := nodeController.monitorNodeHealth(ctx); err != nil {
|
if err := nodeController.monitorNodeHealth(tCtx); err != nil {
|
||||||
t.Errorf("unexpected error: %v", err)
|
t.Errorf("unexpected error: %v", err)
|
||||||
}
|
}
|
||||||
nodeController.doNoExecuteTaintingPass(ctx)
|
nodeController.doNoExecuteTaintingPass(tCtx)
|
||||||
|
|
||||||
node0, err := fakeNodeHandler.Get(ctx, "node0", metav1.GetOptions{})
|
node0, err := fakeNodeHandler.Get(tCtx, "node0", metav1.GetOptions{})
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Errorf("Can't get current node0...")
|
t.Errorf("Can't get current node0...")
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
node1, err := fakeNodeHandler.Get(ctx, "node1", metav1.GetOptions{})
|
node1, err := fakeNodeHandler.Get(tCtx, "node1", metav1.GetOptions{})
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Errorf("Can't get current node1...")
|
t.Errorf("Can't get current node1...")
|
||||||
return
|
return
|
||||||
@ -2868,12 +2881,12 @@ func TestSwapUnreachableNotReadyTaints(t *testing.T) {
|
|||||||
|
|
||||||
node0.Status = newNodeStatus
|
node0.Status = newNodeStatus
|
||||||
node1.Status = healthyNodeNewStatus
|
node1.Status = healthyNodeNewStatus
|
||||||
_, err = fakeNodeHandler.UpdateStatus(ctx, node0, metav1.UpdateOptions{})
|
_, err = fakeNodeHandler.UpdateStatus(tCtx, node0, metav1.UpdateOptions{})
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Error(err.Error())
|
t.Error(err.Error())
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
_, err = fakeNodeHandler.UpdateStatus(ctx, node1, metav1.UpdateOptions{})
|
_, err = fakeNodeHandler.UpdateStatus(tCtx, node1, metav1.UpdateOptions{})
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Error(err.Error())
|
t.Error(err.Error())
|
||||||
return
|
return
|
||||||
@ -2882,12 +2895,12 @@ func TestSwapUnreachableNotReadyTaints(t *testing.T) {
|
|||||||
if err := nodeController.syncNodeStore(fakeNodeHandler); err != nil {
|
if err := nodeController.syncNodeStore(fakeNodeHandler); err != nil {
|
||||||
t.Errorf("unexpected error: %v", err)
|
t.Errorf("unexpected error: %v", err)
|
||||||
}
|
}
|
||||||
if err := nodeController.monitorNodeHealth(ctx); err != nil {
|
if err := nodeController.monitorNodeHealth(tCtx); err != nil {
|
||||||
t.Errorf("unexpected error: %v", err)
|
t.Errorf("unexpected error: %v", err)
|
||||||
}
|
}
|
||||||
nodeController.doNoExecuteTaintingPass(ctx)
|
nodeController.doNoExecuteTaintingPass(tCtx)
|
||||||
|
|
||||||
node0, err = fakeNodeHandler.Get(ctx, "node0", metav1.GetOptions{})
|
node0, err = fakeNodeHandler.Get(tCtx, "node0", metav1.GetOptions{})
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Errorf("Can't get current node0...")
|
t.Errorf("Can't get current node0...")
|
||||||
return
|
return
|
||||||
@ -2930,9 +2943,9 @@ func TestTaintsNodeByCondition(t *testing.T) {
|
|||||||
Clientset: fake.NewSimpleClientset(&v1.PodList{Items: []v1.Pod{*testutil.NewPod("pod0", "node0")}}),
|
Clientset: fake.NewSimpleClientset(&v1.PodList{Items: []v1.Pod{*testutil.NewPod("pod0", "node0")}}),
|
||||||
}
|
}
|
||||||
|
|
||||||
_, ctx := ktesting.NewTestContext(t)
|
tCtx := ktesting.Init(t)
|
||||||
nodeController, _ := newNodeLifecycleControllerFromClient(
|
nodeController, _ := newNodeLifecycleControllerFromClient(
|
||||||
ctx,
|
tCtx,
|
||||||
fakeNodeHandler,
|
fakeNodeHandler,
|
||||||
testRateLimiterQPS,
|
testRateLimiterQPS,
|
||||||
testRateLimiterQPS,
|
testRateLimiterQPS,
|
||||||
@ -3083,11 +3096,15 @@ func TestTaintsNodeByCondition(t *testing.T) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
for _, test := range tests {
|
for _, test := range tests {
|
||||||
fakeNodeHandler.Update(ctx, test.Node, metav1.UpdateOptions{})
|
if _, err := fakeNodeHandler.Update(tCtx, test.Node, metav1.UpdateOptions{}); err != nil {
|
||||||
|
t.Errorf("unexpected error: %v", err)
|
||||||
|
}
|
||||||
if err := nodeController.syncNodeStore(fakeNodeHandler); err != nil {
|
if err := nodeController.syncNodeStore(fakeNodeHandler); err != nil {
|
||||||
t.Errorf("unexpected error: %v", err)
|
t.Errorf("unexpected error: %v", err)
|
||||||
}
|
}
|
||||||
nodeController.doNoScheduleTaintingPass(ctx, test.Node.Name)
|
if err := nodeController.doNoScheduleTaintingPass(tCtx, test.Node.Name); err != nil {
|
||||||
|
t.Errorf("unexpected error: %v", err)
|
||||||
|
}
|
||||||
if err := nodeController.syncNodeStore(fakeNodeHandler); err != nil {
|
if err := nodeController.syncNodeStore(fakeNodeHandler); err != nil {
|
||||||
t.Errorf("unexpected error: %v", err)
|
t.Errorf("unexpected error: %v", err)
|
||||||
}
|
}
|
||||||
@ -3133,9 +3150,9 @@ func TestNodeEventGeneration(t *testing.T) {
|
|||||||
Clientset: fake.NewSimpleClientset(&v1.PodList{Items: []v1.Pod{*testutil.NewPod("pod0", "node0")}}),
|
Clientset: fake.NewSimpleClientset(&v1.PodList{Items: []v1.Pod{*testutil.NewPod("pod0", "node0")}}),
|
||||||
}
|
}
|
||||||
|
|
||||||
_, ctx := ktesting.NewTestContext(t)
|
tCtx := ktesting.Init(t)
|
||||||
nodeController, _ := newNodeLifecycleControllerFromClient(
|
nodeController, _ := newNodeLifecycleControllerFromClient(
|
||||||
ctx,
|
tCtx,
|
||||||
fakeNodeHandler,
|
fakeNodeHandler,
|
||||||
testRateLimiterQPS,
|
testRateLimiterQPS,
|
||||||
testRateLimiterQPS,
|
testRateLimiterQPS,
|
||||||
@ -3153,7 +3170,7 @@ func TestNodeEventGeneration(t *testing.T) {
|
|||||||
if err := nodeController.syncNodeStore(fakeNodeHandler); err != nil {
|
if err := nodeController.syncNodeStore(fakeNodeHandler); err != nil {
|
||||||
t.Errorf("unexpected error: %v", err)
|
t.Errorf("unexpected error: %v", err)
|
||||||
}
|
}
|
||||||
if err := nodeController.monitorNodeHealth(ctx); err != nil {
|
if err := nodeController.monitorNodeHealth(tCtx); err != nil {
|
||||||
t.Errorf("unexpected error: %v", err)
|
t.Errorf("unexpected error: %v", err)
|
||||||
}
|
}
|
||||||
if len(fakeRecorder.Events) != 1 {
|
if len(fakeRecorder.Events) != 1 {
|
||||||
@ -3206,9 +3223,9 @@ func TestReconcileNodeLabels(t *testing.T) {
|
|||||||
Clientset: fake.NewSimpleClientset(&v1.PodList{Items: []v1.Pod{*testutil.NewPod("pod0", "node0")}}),
|
Clientset: fake.NewSimpleClientset(&v1.PodList{Items: []v1.Pod{*testutil.NewPod("pod0", "node0")}}),
|
||||||
}
|
}
|
||||||
|
|
||||||
_, ctx := ktesting.NewTestContext(t)
|
tCtx := ktesting.Init(t)
|
||||||
nodeController, _ := newNodeLifecycleControllerFromClient(
|
nodeController, _ := newNodeLifecycleControllerFromClient(
|
||||||
ctx,
|
tCtx,
|
||||||
fakeNodeHandler,
|
fakeNodeHandler,
|
||||||
testRateLimiterQPS,
|
testRateLimiterQPS,
|
||||||
testRateLimiterQPS,
|
testRateLimiterQPS,
|
||||||
@ -3295,11 +3312,15 @@ func TestReconcileNodeLabels(t *testing.T) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
for _, test := range tests {
|
for _, test := range tests {
|
||||||
fakeNodeHandler.Update(ctx, test.Node, metav1.UpdateOptions{})
|
if _, err := fakeNodeHandler.Update(tCtx, test.Node, metav1.UpdateOptions{}); err != nil {
|
||||||
|
t.Fatalf("unexpected error: %v", err)
|
||||||
|
}
|
||||||
if err := nodeController.syncNodeStore(fakeNodeHandler); err != nil {
|
if err := nodeController.syncNodeStore(fakeNodeHandler); err != nil {
|
||||||
t.Fatalf("unexpected error: %v", err)
|
t.Fatalf("unexpected error: %v", err)
|
||||||
}
|
}
|
||||||
nodeController.reconcileNodeLabels(ctx, test.Node.Name)
|
if err := nodeController.reconcileNodeLabels(tCtx, test.Node.Name); err != nil {
|
||||||
|
t.Fatalf("unexpected error: %v", err)
|
||||||
|
}
|
||||||
if err := nodeController.syncNodeStore(fakeNodeHandler); err != nil {
|
if err := nodeController.syncNodeStore(fakeNodeHandler); err != nil {
|
||||||
t.Fatalf("unexpected error: %v", err)
|
t.Fatalf("unexpected error: %v", err)
|
||||||
}
|
}
|
||||||
@ -3349,9 +3370,9 @@ func TestTryUpdateNodeHealth(t *testing.T) {
|
|||||||
Clientset: fake.NewSimpleClientset(&v1.PodList{Items: []v1.Pod{*testutil.NewPod("pod0", "node0")}}),
|
Clientset: fake.NewSimpleClientset(&v1.PodList{Items: []v1.Pod{*testutil.NewPod("pod0", "node0")}}),
|
||||||
}
|
}
|
||||||
|
|
||||||
_, ctx := ktesting.NewTestContext(t)
|
tCtx := ktesting.Init(t)
|
||||||
nodeController, _ := newNodeLifecycleControllerFromClient(
|
nodeController, _ := newNodeLifecycleControllerFromClient(
|
||||||
ctx,
|
tCtx,
|
||||||
fakeNodeHandler,
|
fakeNodeHandler,
|
||||||
testRateLimiterQPS,
|
testRateLimiterQPS,
|
||||||
testRateLimiterQPS,
|
testRateLimiterQPS,
|
||||||
@ -3523,7 +3544,7 @@ func TestTryUpdateNodeHealth(t *testing.T) {
|
|||||||
probeTimestamp: test.node.CreationTimestamp,
|
probeTimestamp: test.node.CreationTimestamp,
|
||||||
readyTransitionTimestamp: test.node.CreationTimestamp,
|
readyTransitionTimestamp: test.node.CreationTimestamp,
|
||||||
})
|
})
|
||||||
_, _, currentReadyCondition, err := nodeController.tryUpdateNodeHealth(ctx, test.node)
|
_, _, currentReadyCondition, err := nodeController.tryUpdateNodeHealth(tCtx, test.node)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatalf("unexpected error: %v", err)
|
t.Fatalf("unexpected error: %v", err)
|
||||||
}
|
}
|
||||||
@ -3557,3 +3578,141 @@ func Test_isNodeExcludedFromDisruptionChecks(t *testing.T) {
|
|||||||
})
|
})
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func TestProcessPodMarkPodNotReady(t *testing.T) {
|
||||||
|
fakeNow := metav1.Date(2015, 1, 1, 12, 0, 0, 0, time.UTC)
|
||||||
|
|
||||||
|
table := []struct {
|
||||||
|
desc string
|
||||||
|
fakeNodeHandler *testutil.FakeNodeHandler
|
||||||
|
pod *v1.Pod
|
||||||
|
expectedPodStatusUpdate bool
|
||||||
|
monitorNodeHealth bool
|
||||||
|
}{
|
||||||
|
{
|
||||||
|
desc: "Do not mark pod as NotReady when the scheduled node's healthy is not gathered yet",
|
||||||
|
fakeNodeHandler: &testutil.FakeNodeHandler{
|
||||||
|
Existing: []*v1.Node{
|
||||||
|
{
|
||||||
|
ObjectMeta: metav1.ObjectMeta{
|
||||||
|
Name: "node0",
|
||||||
|
CreationTimestamp: fakeNow,
|
||||||
|
},
|
||||||
|
Status: v1.NodeStatus{
|
||||||
|
Conditions: []v1.NodeCondition{
|
||||||
|
{
|
||||||
|
Type: v1.NodeReady,
|
||||||
|
Status: v1.ConditionFalse,
|
||||||
|
LastHeartbeatTime: fakeNow,
|
||||||
|
LastTransitionTime: fakeNow,
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
Clientset: fake.NewSimpleClientset(&v1.PodList{Items: []v1.Pod{*testutil.NewPod("pod0", "node0")}}),
|
||||||
|
},
|
||||||
|
pod: testutil.NewPod("pod0", "node0"),
|
||||||
|
monitorNodeHealth: false,
|
||||||
|
expectedPodStatusUpdate: false,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
desc: "Do not mark pod as NotReady when the scheduled node is ready",
|
||||||
|
fakeNodeHandler: &testutil.FakeNodeHandler{
|
||||||
|
Existing: []*v1.Node{
|
||||||
|
{
|
||||||
|
ObjectMeta: metav1.ObjectMeta{
|
||||||
|
Name: "node0",
|
||||||
|
CreationTimestamp: fakeNow,
|
||||||
|
},
|
||||||
|
Status: v1.NodeStatus{
|
||||||
|
Conditions: []v1.NodeCondition{
|
||||||
|
{
|
||||||
|
Type: v1.NodeReady,
|
||||||
|
Status: v1.ConditionTrue,
|
||||||
|
LastHeartbeatTime: fakeNow,
|
||||||
|
LastTransitionTime: fakeNow,
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
Clientset: fake.NewSimpleClientset(&v1.PodList{Items: []v1.Pod{*testutil.NewPod("pod0", "node0")}}),
|
||||||
|
},
|
||||||
|
pod: testutil.NewPod("pod0", "node0"),
|
||||||
|
monitorNodeHealth: true,
|
||||||
|
expectedPodStatusUpdate: false,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
desc: "Pod marked as NotReady when the scheduled node is not ready",
|
||||||
|
fakeNodeHandler: &testutil.FakeNodeHandler{
|
||||||
|
Existing: []*v1.Node{
|
||||||
|
{
|
||||||
|
ObjectMeta: metav1.ObjectMeta{
|
||||||
|
Name: "node0",
|
||||||
|
CreationTimestamp: fakeNow,
|
||||||
|
},
|
||||||
|
Status: v1.NodeStatus{
|
||||||
|
Conditions: []v1.NodeCondition{
|
||||||
|
{
|
||||||
|
Type: v1.NodeReady,
|
||||||
|
Status: v1.ConditionFalse,
|
||||||
|
LastHeartbeatTime: fakeNow,
|
||||||
|
LastTransitionTime: fakeNow,
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
Clientset: fake.NewSimpleClientset(&v1.PodList{Items: []v1.Pod{*testutil.NewPod("pod0", "node0")}}),
|
||||||
|
},
|
||||||
|
pod: testutil.NewPod("pod0", "node0"),
|
||||||
|
monitorNodeHealth: true,
|
||||||
|
expectedPodStatusUpdate: true,
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
for _, item := range table {
|
||||||
|
t.Run(item.desc, func(t *testing.T) {
|
||||||
|
tCtx := ktesting.Init(t)
|
||||||
|
nodeController, _ := newNodeLifecycleControllerFromClient(
|
||||||
|
tCtx,
|
||||||
|
item.fakeNodeHandler,
|
||||||
|
testRateLimiterQPS,
|
||||||
|
testRateLimiterQPS,
|
||||||
|
testLargeClusterThreshold,
|
||||||
|
testUnhealthyThreshold,
|
||||||
|
testNodeMonitorGracePeriod,
|
||||||
|
testNodeStartupGracePeriod,
|
||||||
|
testNodeMonitorPeriod,
|
||||||
|
)
|
||||||
|
nodeController.now = func() metav1.Time { return fakeNow }
|
||||||
|
nodeController.recorder = testutil.NewFakeRecorder()
|
||||||
|
nodeController.getPodsAssignedToNode = fakeGetPodsAssignedToNode(item.fakeNodeHandler.Clientset)
|
||||||
|
if err := nodeController.syncNodeStore(item.fakeNodeHandler); err != nil {
|
||||||
|
t.Errorf("unexpected error: %v", err)
|
||||||
|
}
|
||||||
|
if item.monitorNodeHealth {
|
||||||
|
if err := nodeController.monitorNodeHealth(tCtx); err != nil {
|
||||||
|
t.Errorf("unexpected error: %v", err)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if err := nodeController.syncPodStore(item.pod); err != nil {
|
||||||
|
t.Errorf("unexpected error: %v", err)
|
||||||
|
}
|
||||||
|
nodeController.podUpdated(nil, item.pod)
|
||||||
|
nodeController.processPod(tCtx, podUpdateItem{name: item.pod.Name, namespace: item.pod.Namespace})
|
||||||
|
|
||||||
|
podStatusUpdated := false
|
||||||
|
for _, action := range item.fakeNodeHandler.Actions() {
|
||||||
|
if action.GetVerb() == "update" && action.GetResource().Resource == "pods" && action.GetSubresource() == "status" {
|
||||||
|
podStatusUpdated = true
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if podStatusUpdated != item.expectedPodStatusUpdate {
|
||||||
|
t.Errorf("expect pod status updated to be %v, but got %v", item.expectedPodStatusUpdate, podStatusUpdated)
|
||||||
|
}
|
||||||
|
})
|
||||||
|
}
|
||||||
|
}
|
||||||
|
@ -39,6 +39,56 @@ func CheckSetEq(lhs, rhs sets.String) bool {
|
|||||||
return lhs.IsSuperset(rhs) && rhs.IsSuperset(lhs)
|
return lhs.IsSuperset(rhs) && rhs.IsSuperset(lhs)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func TestUniqueQueueGet(t *testing.T) {
|
||||||
|
var tick int64
|
||||||
|
now = func() time.Time {
|
||||||
|
t := time.Unix(tick, 0)
|
||||||
|
tick++
|
||||||
|
return t
|
||||||
|
}
|
||||||
|
|
||||||
|
queue := UniqueQueue{
|
||||||
|
queue: TimedQueue{},
|
||||||
|
set: sets.NewString(),
|
||||||
|
}
|
||||||
|
queue.Add(TimedValue{Value: "first", UID: "11111", AddedAt: now(), ProcessAt: now()})
|
||||||
|
queue.Add(TimedValue{Value: "second", UID: "22222", AddedAt: now(), ProcessAt: now()})
|
||||||
|
queue.Add(TimedValue{Value: "third", UID: "33333", AddedAt: now(), ProcessAt: now()})
|
||||||
|
|
||||||
|
queuePattern := []string{"first", "second", "third"}
|
||||||
|
if len(queue.queue) != len(queuePattern) {
|
||||||
|
t.Fatalf("Queue %v should have length %d", queue.queue, len(queuePattern))
|
||||||
|
}
|
||||||
|
if !CheckQueueEq(queuePattern, queue.queue) {
|
||||||
|
t.Errorf("Invalid queue. Got %v, expected %v", queue.queue, queuePattern)
|
||||||
|
}
|
||||||
|
|
||||||
|
setPattern := sets.NewString("first", "second", "third")
|
||||||
|
if len(queue.set) != len(setPattern) {
|
||||||
|
t.Fatalf("Map %v should have length %d", queue.set, len(setPattern))
|
||||||
|
}
|
||||||
|
if !CheckSetEq(setPattern, queue.set) {
|
||||||
|
t.Errorf("Invalid map. Got %v, expected %v", queue.set, setPattern)
|
||||||
|
}
|
||||||
|
|
||||||
|
queue.Get()
|
||||||
|
queuePattern = []string{"second", "third"}
|
||||||
|
if len(queue.queue) != len(queuePattern) {
|
||||||
|
t.Fatalf("Queue %v should have length %d", queue.queue, len(queuePattern))
|
||||||
|
}
|
||||||
|
if !CheckQueueEq(queuePattern, queue.queue) {
|
||||||
|
t.Errorf("Invalid queue. Got %v, expected %v", queue.queue, queuePattern)
|
||||||
|
}
|
||||||
|
|
||||||
|
setPattern = sets.NewString("second", "third")
|
||||||
|
if len(queue.set) != len(setPattern) {
|
||||||
|
t.Fatalf("Map %v should have length %d", queue.set, len(setPattern))
|
||||||
|
}
|
||||||
|
if !CheckSetEq(setPattern, queue.set) {
|
||||||
|
t.Errorf("Invalid map. Got %v, expected %v", queue.set, setPattern)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
func TestAddNode(t *testing.T) {
|
func TestAddNode(t *testing.T) {
|
||||||
evictor := NewRateLimitedTimedQueue(flowcontrol.NewFakeAlwaysRateLimiter())
|
evictor := NewRateLimitedTimedQueue(flowcontrol.NewFakeAlwaysRateLimiter())
|
||||||
evictor.Add("first", "11111")
|
evictor.Add("first", "11111")
|
||||||
@ -306,6 +356,12 @@ func TestSwapLimiter(t *testing.T) {
|
|||||||
if qps != createdQPS {
|
if qps != createdQPS {
|
||||||
t.Fatalf("QPS does not match create one: %v instead of %v", qps, createdQPS)
|
t.Fatalf("QPS does not match create one: %v instead of %v", qps, createdQPS)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
prev := evictor.limiter
|
||||||
|
evictor.SwapLimiter(createdQPS)
|
||||||
|
if prev != evictor.limiter {
|
||||||
|
t.Fatalf("Limiter should not be swapped if the QPS is the same.")
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestAddAfterTry(t *testing.T) {
|
func TestAddAfterTry(t *testing.T) {
|
||||||
|
Loading…
Reference in New Issue
Block a user