From 2df7e1ad5ce4807335e16f012765a74999d60bbb Mon Sep 17 00:00:00 2001 From: Michael Taufen Date: Wed, 27 Jun 2018 11:32:17 -0700 Subject: [PATCH] port setNodeVolumesInUseStatus to Setter abstraction, add test --- pkg/kubelet/kubelet_node_status.go | 11 +----- pkg/kubelet/nodestatus/setters.go | 13 +++++++ pkg/kubelet/nodestatus/setters_test.go | 51 ++++++++++++++++++++++++++ 3 files changed, 65 insertions(+), 10 deletions(-) diff --git a/pkg/kubelet/kubelet_node_status.go b/pkg/kubelet/kubelet_node_status.go index 41e648b4ee2..22f0f06136c 100644 --- a/pkg/kubelet/kubelet_node_status.go +++ b/pkg/kubelet/kubelet_node_status.go @@ -659,15 +659,6 @@ func (kl *Kubelet) recordNodeSchedulableEvent(node *v1.Node) { } } -// Update VolumesInUse field in Node Status only after states are synced up at least once -// in volume reconciler. -func (kl *Kubelet) setNodeVolumesInUseStatus(node *v1.Node) { - // Make sure to only update node status after reconciler starts syncing up states - if kl.volumeManager.ReconcilerStatesHasBeenSynced() { - node.Status.VolumesInUse = kl.volumeManager.GetVolumesInUse() - } -} - // setNodeStatus fills in the Status fields of the given Node, overwriting // any fields that are currently set. // TODO(madhusudancs): Simplify the logic for setting node conditions and @@ -719,7 +710,7 @@ func (kl *Kubelet) defaultNodeStatusFuncs() []func(*v1.Node) error { nodestatus.DiskPressureCondition(kl.clock.Now, kl.evictionManager.IsUnderDiskPressure, kl.recordNodeStatusEvent), nodestatus.PIDPressureCondition(kl.clock.Now, kl.evictionManager.IsUnderPIDPressure, kl.recordNodeStatusEvent), nodestatus.ReadyCondition(kl.clock.Now, kl.runtimeState.runtimeErrors, kl.runtimeState.networkErrors, validateHostFunc, kl.containerManager.Status, kl.recordNodeStatusEvent), - withoutError(kl.setNodeVolumesInUseStatus), + nodestatus.VolumesInUse(kl.volumeManager.ReconcilerStatesHasBeenSynced, kl.volumeManager.GetVolumesInUse), withoutError(kl.recordNodeSchedulableEvent), } } diff --git a/pkg/kubelet/nodestatus/setters.go b/pkg/kubelet/nodestatus/setters.go index 5e0a0ecb29c..0016918132e 100644 --- a/pkg/kubelet/nodestatus/setters.go +++ b/pkg/kubelet/nodestatus/setters.go @@ -457,3 +457,16 @@ func OutOfDiskCondition(nowFunc func() time.Time, // typically Kubelet.clock.Now return nil } } + +// VolumesInUse returns a Setter that updates the volumes in use on the node. +func VolumesInUse(syncedFunc func() bool, // typically Kubelet.volumeManager.ReconcilerStatesHasBeenSynced + volumesInUseFunc func() []v1.UniqueVolumeName, // typically Kubelet.volumeManager.GetVolumesInUse +) Setter { + return func(node *v1.Node) error { + // Make sure to only update node status after reconciler starts syncing up states + if syncedFunc() { + node.Status.VolumesInUse = volumesInUseFunc() + } + return nil + } +} diff --git a/pkg/kubelet/nodestatus/setters_test.go b/pkg/kubelet/nodestatus/setters_test.go index ee17e2de02a..ddd5508f63a 100644 --- a/pkg/kubelet/nodestatus/setters_test.go +++ b/pkg/kubelet/nodestatus/setters_test.go @@ -722,6 +722,57 @@ func TestDiskPressureCondition(t *testing.T) { } } +func TestVolumesInUse(t *testing.T) { + withVolumesInUse := &v1.Node{ + Status: v1.NodeStatus{ + VolumesInUse: []v1.UniqueVolumeName{"foo"}, + }, + } + + cases := []struct { + desc string + node *v1.Node + synced bool + volumesInUse []v1.UniqueVolumeName + expectVolumesInUse []v1.UniqueVolumeName + }{ + { + desc: "synced", + node: withVolumesInUse.DeepCopy(), + synced: true, + volumesInUse: []v1.UniqueVolumeName{"bar"}, + expectVolumesInUse: []v1.UniqueVolumeName{"bar"}, + }, + { + desc: "not synced", + node: withVolumesInUse.DeepCopy(), + synced: false, + volumesInUse: []v1.UniqueVolumeName{"bar"}, + expectVolumesInUse: []v1.UniqueVolumeName{"foo"}, + }, + } + + for _, tc := range cases { + t.Run(tc.desc, func(t *testing.T) { + syncedFunc := func() bool { + return tc.synced + } + volumesInUseFunc := func() []v1.UniqueVolumeName { + return tc.volumesInUse + } + // construct setter + setter := VolumesInUse(syncedFunc, volumesInUseFunc) + // call setter on node + if err := setter(tc.node); err != nil { + t.Fatalf("unexpected error: %v", err) + } + // check expected volumes + assert.True(t, apiequality.Semantic.DeepEqual(tc.expectVolumesInUse, tc.node.Status.VolumesInUse), + "Diff: %s", diff.ObjectDiff(tc.expectVolumesInUse, tc.node.Status.VolumesInUse)) + }) + } +} + // Test Helpers: // sortableNodeAddress is a type for sorting []v1.NodeAddress