mirror of
https://github.com/k3s-io/kubernetes.git
synced 2025-07-27 13:37:30 +00:00
port setNodeVolumesInUseStatus to Setter abstraction, add test
This commit is contained in:
parent
3e03e0611e
commit
2df7e1ad5c
@ -659,15 +659,6 @@ func (kl *Kubelet) recordNodeSchedulableEvent(node *v1.Node) {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// Update VolumesInUse field in Node Status only after states are synced up at least once
|
|
||||||
// in volume reconciler.
|
|
||||||
func (kl *Kubelet) setNodeVolumesInUseStatus(node *v1.Node) {
|
|
||||||
// Make sure to only update node status after reconciler starts syncing up states
|
|
||||||
if kl.volumeManager.ReconcilerStatesHasBeenSynced() {
|
|
||||||
node.Status.VolumesInUse = kl.volumeManager.GetVolumesInUse()
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// setNodeStatus fills in the Status fields of the given Node, overwriting
|
// setNodeStatus fills in the Status fields of the given Node, overwriting
|
||||||
// any fields that are currently set.
|
// any fields that are currently set.
|
||||||
// TODO(madhusudancs): Simplify the logic for setting node conditions and
|
// TODO(madhusudancs): Simplify the logic for setting node conditions and
|
||||||
@ -719,7 +710,7 @@ func (kl *Kubelet) defaultNodeStatusFuncs() []func(*v1.Node) error {
|
|||||||
nodestatus.DiskPressureCondition(kl.clock.Now, kl.evictionManager.IsUnderDiskPressure, kl.recordNodeStatusEvent),
|
nodestatus.DiskPressureCondition(kl.clock.Now, kl.evictionManager.IsUnderDiskPressure, kl.recordNodeStatusEvent),
|
||||||
nodestatus.PIDPressureCondition(kl.clock.Now, kl.evictionManager.IsUnderPIDPressure, kl.recordNodeStatusEvent),
|
nodestatus.PIDPressureCondition(kl.clock.Now, kl.evictionManager.IsUnderPIDPressure, kl.recordNodeStatusEvent),
|
||||||
nodestatus.ReadyCondition(kl.clock.Now, kl.runtimeState.runtimeErrors, kl.runtimeState.networkErrors, validateHostFunc, kl.containerManager.Status, kl.recordNodeStatusEvent),
|
nodestatus.ReadyCondition(kl.clock.Now, kl.runtimeState.runtimeErrors, kl.runtimeState.networkErrors, validateHostFunc, kl.containerManager.Status, kl.recordNodeStatusEvent),
|
||||||
withoutError(kl.setNodeVolumesInUseStatus),
|
nodestatus.VolumesInUse(kl.volumeManager.ReconcilerStatesHasBeenSynced, kl.volumeManager.GetVolumesInUse),
|
||||||
withoutError(kl.recordNodeSchedulableEvent),
|
withoutError(kl.recordNodeSchedulableEvent),
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -457,3 +457,16 @@ func OutOfDiskCondition(nowFunc func() time.Time, // typically Kubelet.clock.Now
|
|||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// VolumesInUse returns a Setter that updates the volumes in use on the node.
|
||||||
|
func VolumesInUse(syncedFunc func() bool, // typically Kubelet.volumeManager.ReconcilerStatesHasBeenSynced
|
||||||
|
volumesInUseFunc func() []v1.UniqueVolumeName, // typically Kubelet.volumeManager.GetVolumesInUse
|
||||||
|
) Setter {
|
||||||
|
return func(node *v1.Node) error {
|
||||||
|
// Make sure to only update node status after reconciler starts syncing up states
|
||||||
|
if syncedFunc() {
|
||||||
|
node.Status.VolumesInUse = volumesInUseFunc()
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
}
|
||||||
|
@ -722,6 +722,57 @@ func TestDiskPressureCondition(t *testing.T) {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func TestVolumesInUse(t *testing.T) {
|
||||||
|
withVolumesInUse := &v1.Node{
|
||||||
|
Status: v1.NodeStatus{
|
||||||
|
VolumesInUse: []v1.UniqueVolumeName{"foo"},
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
cases := []struct {
|
||||||
|
desc string
|
||||||
|
node *v1.Node
|
||||||
|
synced bool
|
||||||
|
volumesInUse []v1.UniqueVolumeName
|
||||||
|
expectVolumesInUse []v1.UniqueVolumeName
|
||||||
|
}{
|
||||||
|
{
|
||||||
|
desc: "synced",
|
||||||
|
node: withVolumesInUse.DeepCopy(),
|
||||||
|
synced: true,
|
||||||
|
volumesInUse: []v1.UniqueVolumeName{"bar"},
|
||||||
|
expectVolumesInUse: []v1.UniqueVolumeName{"bar"},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
desc: "not synced",
|
||||||
|
node: withVolumesInUse.DeepCopy(),
|
||||||
|
synced: false,
|
||||||
|
volumesInUse: []v1.UniqueVolumeName{"bar"},
|
||||||
|
expectVolumesInUse: []v1.UniqueVolumeName{"foo"},
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
for _, tc := range cases {
|
||||||
|
t.Run(tc.desc, func(t *testing.T) {
|
||||||
|
syncedFunc := func() bool {
|
||||||
|
return tc.synced
|
||||||
|
}
|
||||||
|
volumesInUseFunc := func() []v1.UniqueVolumeName {
|
||||||
|
return tc.volumesInUse
|
||||||
|
}
|
||||||
|
// construct setter
|
||||||
|
setter := VolumesInUse(syncedFunc, volumesInUseFunc)
|
||||||
|
// call setter on node
|
||||||
|
if err := setter(tc.node); err != nil {
|
||||||
|
t.Fatalf("unexpected error: %v", err)
|
||||||
|
}
|
||||||
|
// check expected volumes
|
||||||
|
assert.True(t, apiequality.Semantic.DeepEqual(tc.expectVolumesInUse, tc.node.Status.VolumesInUse),
|
||||||
|
"Diff: %s", diff.ObjectDiff(tc.expectVolumesInUse, tc.node.Status.VolumesInUse))
|
||||||
|
})
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
// Test Helpers:
|
// Test Helpers:
|
||||||
|
|
||||||
// sortableNodeAddress is a type for sorting []v1.NodeAddress
|
// sortableNodeAddress is a type for sorting []v1.NodeAddress
|
||||||
|
Loading…
Reference in New Issue
Block a user