[kubelet]: Sync label periodically

This commit is contained in:
ravisantoshgudimetla 2021-09-29 12:41:02 -04:00
parent 4ce435cc95
commit 02c1bac0b6
4 changed files with 189 additions and 11 deletions

View File

@ -509,11 +509,30 @@ func (kl *Kubelet) tryUpdateNodeStatus(tryNumber int) error {
}
}
areRequiredLabelsNotPresent := false
osName, osLabelExists := node.Labels[v1.LabelOSStable]
if !osLabelExists || osName != goruntime.GOOS {
if len(node.Labels) == 0 {
node.Labels = make(map[string]string)
}
node.Labels[v1.LabelOSStable] = goruntime.GOOS
areRequiredLabelsNotPresent = true
}
// Set the arch if there is a mismatch
arch, archLabelExists := node.Labels[v1.LabelArchStable]
if !archLabelExists || arch != goruntime.GOARCH {
if len(node.Labels) == 0 {
node.Labels = make(map[string]string)
}
node.Labels[v1.LabelArchStable] = goruntime.GOARCH
areRequiredLabelsNotPresent = true
}
kl.setNodeStatus(node)
now := kl.clock.Now()
if now.Before(kl.lastStatusReportTime.Add(kl.nodeStatusReportFrequency)) {
if !podCIDRChanged && !nodeStatusHasChanged(&originalNode.Status, &node.Status) {
if !podCIDRChanged && !nodeStatusHasChanged(&originalNode.Status, &node.Status) && !areRequiredLabelsNotPresent {
// We must mark the volumes as ReportedInUse in volume manager's dsw even
// if no changes were made to the node status (no volumes were added or removed
// from the VolumesInUse list).

View File

@ -217,7 +217,7 @@ func TestUpdateNewNodeStatus(t *testing.T) {
kubelet.setCachedMachineInfo(machineInfo)
expectedNode := &v1.Node{
ObjectMeta: metav1.ObjectMeta{Name: testKubeletHostname},
ObjectMeta: metav1.ObjectMeta{Name: testKubeletHostname, Labels: map[string]string{v1.LabelOSStable: goruntime.GOOS, v1.LabelArchStable: goruntime.GOARCH}},
Spec: v1.NodeSpec{},
Status: v1.NodeStatus{
Conditions: []v1.NodeCondition{
@ -395,7 +395,7 @@ func TestUpdateExistingNodeStatus(t *testing.T) {
kubelet.setCachedMachineInfo(machineInfo)
expectedNode := &v1.Node{
ObjectMeta: metav1.ObjectMeta{Name: testKubeletHostname},
ObjectMeta: metav1.ObjectMeta{Name: testKubeletHostname, Labels: map[string]string{v1.LabelOSStable: goruntime.GOOS, v1.LabelArchStable: goruntime.GOARCH}},
Spec: v1.NodeSpec{},
Status: v1.NodeStatus{
Conditions: []v1.NodeCondition{
@ -601,7 +601,7 @@ func TestUpdateNodeStatusWithRuntimeStateError(t *testing.T) {
kubelet.setCachedMachineInfo(machineInfo)
expectedNode := &v1.Node{
ObjectMeta: metav1.ObjectMeta{Name: testKubeletHostname},
ObjectMeta: metav1.ObjectMeta{Name: testKubeletHostname, Labels: map[string]string{v1.LabelOSStable: goruntime.GOOS, v1.LabelArchStable: goruntime.GOARCH}},
Spec: v1.NodeSpec{},
Status: v1.NodeStatus{
Conditions: []v1.NodeCondition{
@ -822,7 +822,7 @@ func TestUpdateNodeStatusWithLease(t *testing.T) {
now := metav1.NewTime(clock.Now()).Rfc3339Copy()
expectedNode := &v1.Node{
ObjectMeta: metav1.ObjectMeta{Name: testKubeletHostname},
ObjectMeta: metav1.ObjectMeta{Name: testKubeletHostname, Labels: map[string]string{v1.LabelOSStable: goruntime.GOOS, v1.LabelArchStable: goruntime.GOARCH}},
Spec: v1.NodeSpec{},
Status: v1.NodeStatus{
Conditions: []v1.NodeCondition{
@ -1033,13 +1033,13 @@ func TestUpdateNodeStatusAndVolumesInUseWithNodeLease(t *testing.T) {
}{
{
desc: "no volumes and no update",
existingNode: &v1.Node{ObjectMeta: metav1.ObjectMeta{Name: testKubeletHostname}},
existingNode: &v1.Node{ObjectMeta: metav1.ObjectMeta{Name: testKubeletHostname, Labels: map[string]string{v1.LabelOSStable: goruntime.GOOS, v1.LabelArchStable: goruntime.GOARCH}}},
},
{
desc: "volumes inuse on node and volumeManager",
existingVolumes: []v1.UniqueVolumeName{"vol1"},
existingNode: &v1.Node{
ObjectMeta: metav1.ObjectMeta{Name: testKubeletHostname},
ObjectMeta: metav1.ObjectMeta{Name: testKubeletHostname, Labels: map[string]string{v1.LabelOSStable: goruntime.GOOS, v1.LabelArchStable: goruntime.GOARCH}},
Status: v1.NodeStatus{
VolumesInUse: []v1.UniqueVolumeName{"vol1"},
},
@ -1054,14 +1054,14 @@ func TestUpdateNodeStatusAndVolumesInUseWithNodeLease(t *testing.T) {
VolumesInUse: []v1.UniqueVolumeName{"vol1"},
},
},
expectedNode: &v1.Node{ObjectMeta: metav1.ObjectMeta{Name: testKubeletHostname}},
expectedNode: &v1.Node{ObjectMeta: metav1.ObjectMeta{Name: testKubeletHostname, Labels: map[string]string{v1.LabelOSStable: goruntime.GOOS, v1.LabelArchStable: goruntime.GOARCH}}},
},
{
desc: "volumes inuse in volumeManager but not on node",
existingVolumes: []v1.UniqueVolumeName{"vol1"},
existingNode: &v1.Node{ObjectMeta: metav1.ObjectMeta{Name: testKubeletHostname}},
expectedNode: &v1.Node{
ObjectMeta: metav1.ObjectMeta{Name: testKubeletHostname},
ObjectMeta: metav1.ObjectMeta{Name: testKubeletHostname, Labels: map[string]string{v1.LabelOSStable: goruntime.GOOS, v1.LabelArchStable: goruntime.GOARCH}},
Status: v1.NodeStatus{
VolumesInUse: []v1.UniqueVolumeName{"vol1"},
},
@ -2819,7 +2819,7 @@ func TestUpdateNodeAddresses(t *testing.T) {
},
}
expectedNode := &v1.Node{
ObjectMeta: metav1.ObjectMeta{Name: testKubeletHostname},
ObjectMeta: metav1.ObjectMeta{Name: testKubeletHostname, Labels: map[string]string{v1.LabelOSStable: goruntime.GOOS, v1.LabelArchStable: goruntime.GOARCH}},
Spec: v1.NodeSpec{},
Status: v1.NodeStatus{
Addresses: test.After,

View File

@ -22,6 +22,7 @@ import (
"io/ioutil"
"os"
"reflect"
goruntime "runtime"
"sort"
"strconv"
"testing"
@ -30,6 +31,7 @@ import (
cadvisorapi "github.com/google/cadvisor/info/v1"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
core "k8s.io/client-go/testing"
"k8s.io/mount-utils"
v1 "k8s.io/api/core/v1"
@ -858,6 +860,71 @@ func TestHandleNodeSelector(t *testing.T) {
checkPodStatus(t, kl, fittingPod, v1.PodPending)
}
// Tests that we handle not matching labels selector correctly by setting the failed status in status map.
func TestHandleNodeSelectorBasedOnOS(t *testing.T) {
tests := []struct {
name string
nodeLabels map[string]string
podSelector map[string]string
podStatus v1.PodPhase
}{
{
name: "correct OS label, wrong pod selector, admission denied",
nodeLabels: map[string]string{v1.LabelOSStable: goruntime.GOOS, v1.LabelArchStable: goruntime.GOARCH},
podSelector: map[string]string{v1.LabelOSStable: "dummyOS"},
podStatus: v1.PodFailed,
},
{
name: "correct OS label, correct pod selector, admission denied",
nodeLabels: map[string]string{v1.LabelOSStable: goruntime.GOOS, v1.LabelArchStable: goruntime.GOARCH},
podSelector: map[string]string{v1.LabelOSStable: goruntime.GOOS},
podStatus: v1.PodPending,
},
{
// Expect no patching to happen, label B should be preserved and can be used for nodeAffinity.
name: "new node label, correct pod selector, admitted",
nodeLabels: map[string]string{v1.LabelOSStable: goruntime.GOOS, v1.LabelArchStable: goruntime.GOARCH, "key": "B"},
podSelector: map[string]string{"key": "B"},
podStatus: v1.PodPending,
},
}
for _, test := range tests {
t.Run(test.name, func(t *testing.T) {
testKubelet := newTestKubelet(t, false /* controllerAttachDetachEnabled */)
defer testKubelet.Cleanup()
kl := testKubelet.kubelet
nodes := []*v1.Node{
{
ObjectMeta: metav1.ObjectMeta{Name: testKubeletHostname, Labels: test.nodeLabels},
Status: v1.NodeStatus{
Allocatable: v1.ResourceList{
v1.ResourcePods: *resource.NewQuantity(110, resource.DecimalSI),
},
},
},
}
kl.nodeLister = testNodeLister{nodes: nodes}
recorder := record.NewFakeRecorder(20)
nodeRef := &v1.ObjectReference{
Kind: "Node",
Name: string("testNode"),
UID: types.UID("testNode"),
Namespace: "",
}
testClusterDNSDomain := "TEST"
kl.dnsConfigurer = dns.NewConfigurer(recorder, nodeRef, nil, nil, testClusterDNSDomain, "")
pod := podWithUIDNameNsSpec("123456789", "podA", "foo", v1.PodSpec{NodeSelector: test.podSelector})
kl.HandlePodAdditions([]*v1.Pod{pod})
// Check pod status stored in the status map.
checkPodStatus(t, kl, pod, test.podStatus)
})
}
}
// Tests that we handle exceeded resources correctly by setting the failed status in status map.
func TestHandleMemExceeded(t *testing.T) {
testKubelet := newTestKubelet(t, false /* controllerAttachDetachEnabled */)
@ -2291,6 +2358,97 @@ func TestPreInitRuntimeService(t *testing.T) {
}
}
func TestSyncLabels(t *testing.T) {
tests := []struct {
name string
existingNode *v1.Node
isPatchingNeeded bool
}{
{
name: "no labels",
existingNode: &v1.Node{ObjectMeta: metav1.ObjectMeta{Labels: map[string]string{}}},
isPatchingNeeded: true,
},
{
name: "wrong labels",
existingNode: &v1.Node{ObjectMeta: metav1.ObjectMeta{Labels: map[string]string{v1.LabelOSStable: "dummyOS", v1.LabelArchStable: "dummyArch"}}},
isPatchingNeeded: true,
},
{
name: "correct labels",
existingNode: &v1.Node{ObjectMeta: metav1.ObjectMeta{Labels: map[string]string{v1.LabelOSStable: goruntime.GOOS, v1.LabelArchStable: goruntime.GOARCH}}},
isPatchingNeeded: false,
},
{
name: "partially correct labels",
existingNode: &v1.Node{ObjectMeta: metav1.ObjectMeta{Labels: map[string]string{v1.LabelOSStable: goruntime.GOOS, v1.LabelArchStable: "dummyArch"}}},
isPatchingNeeded: true,
},
}
for _, test := range tests {
t.Run(test.name, func(t *testing.T) {
testKubelet := newTestKubelet(t, false)
defer testKubelet.Cleanup()
kl := testKubelet.kubelet
kubeClient := testKubelet.fakeKubeClient
test.existingNode.Name = string(kl.nodeName)
kl.nodeLister = testNodeLister{nodes: []*v1.Node{test.existingNode}}
go func() { kl.syncNodeStatus() }()
err := retryWithExponentialBackOff(
100*time.Millisecond,
func() (bool, error) {
var savedNode *v1.Node
if test.isPatchingNeeded {
actions := kubeClient.Actions()
if len(actions) == 0 {
t.Logf("No action yet")
return false, nil
}
action := actions[1]
if action.GetVerb() == "patch" {
patchAction := action.(core.PatchActionImpl)
var err error
savedNode, err = applyNodeStatusPatch(test.existingNode, patchAction.GetPatch())
if err != nil {
t.Logf("node patching failed, %v", err)
return false, nil
}
}
} else {
savedNode = test.existingNode
}
val, ok := savedNode.Labels[v1.LabelOSStable]
if !ok {
t.Logf("expected kubernetes.io/os label to be present")
return false, nil
}
if val != goruntime.GOOS {
t.Logf("expected kubernetes.io/os to match runtime.GOOS but got %v", val)
return false, nil
}
val, ok = savedNode.Labels[v1.LabelArchStable]
if !ok {
t.Logf("expected kubernetes.io/arch label to be present")
return false, nil
}
if val != goruntime.GOARCH {
t.Logf("expected kubernetes.io/arch to match runtime.GOARCH but got %v", val)
return false, nil
}
return true, nil
},
)
if err != nil {
t.Fatalf("expected labels to be reconciled but it failed with %v", err)
}
})
}
}
func waitForVolumeUnmount(
volumeManager kubeletvolume.VolumeManager,
pod *v1.Pod) error {

View File

@ -251,7 +251,7 @@ func PatchNodeCIDRs(c clientset.Interface, node types.NodeName, cidrs []string)
return nil
}
// PatchNodeStatus patches node status.
// PatchNodeStatus patches node status along with objectmetadata
func PatchNodeStatus(c v1core.CoreV1Interface, nodeName types.NodeName, oldNode *v1.Node, newNode *v1.Node) (*v1.Node, []byte, error) {
patchBytes, err := preparePatchBytesforNodeStatus(nodeName, oldNode, newNode)
if err != nil {
@ -265,6 +265,7 @@ func PatchNodeStatus(c v1core.CoreV1Interface, nodeName types.NodeName, oldNode
return updatedNode, patchBytes, nil
}
// preparePatchBytesforNodeStatus updates the node objectmetadata and status
func preparePatchBytesforNodeStatus(nodeName types.NodeName, oldNode *v1.Node, newNode *v1.Node) ([]byte, error) {
oldData, err := json.Marshal(oldNode)
if err != nil {