Merge pull request #104613 from ravisantoshgudimetla/reconcile-labels

[kubelet]: Reconcile OS and arch labels periodically
This commit is contained in:
Kubernetes Prow Robot 2021-11-08 14:15:19 -08:00 committed by GitHub
commit cda360c59f
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
7 changed files with 374 additions and 11 deletions

View File

@ -509,11 +509,30 @@ func (kl *Kubelet) tryUpdateNodeStatus(tryNumber int) error {
} }
} }
areRequiredLabelsNotPresent := false
osName, osLabelExists := node.Labels[v1.LabelOSStable]
if !osLabelExists || osName != goruntime.GOOS {
if len(node.Labels) == 0 {
node.Labels = make(map[string]string)
}
node.Labels[v1.LabelOSStable] = goruntime.GOOS
areRequiredLabelsNotPresent = true
}
// Set the arch if there is a mismatch
arch, archLabelExists := node.Labels[v1.LabelArchStable]
if !archLabelExists || arch != goruntime.GOARCH {
if len(node.Labels) == 0 {
node.Labels = make(map[string]string)
}
node.Labels[v1.LabelArchStable] = goruntime.GOARCH
areRequiredLabelsNotPresent = true
}
kl.setNodeStatus(node) kl.setNodeStatus(node)
now := kl.clock.Now() now := kl.clock.Now()
if now.Before(kl.lastStatusReportTime.Add(kl.nodeStatusReportFrequency)) { if now.Before(kl.lastStatusReportTime.Add(kl.nodeStatusReportFrequency)) {
if !podCIDRChanged && !nodeStatusHasChanged(&originalNode.Status, &node.Status) { if !podCIDRChanged && !nodeStatusHasChanged(&originalNode.Status, &node.Status) && !areRequiredLabelsNotPresent {
// We must mark the volumes as ReportedInUse in volume manager's dsw even // We must mark the volumes as ReportedInUse in volume manager's dsw even
// if no changes were made to the node status (no volumes were added or removed // if no changes were made to the node status (no volumes were added or removed
// from the VolumesInUse list). // from the VolumesInUse list).

View File

@ -217,7 +217,7 @@ func TestUpdateNewNodeStatus(t *testing.T) {
kubelet.setCachedMachineInfo(machineInfo) kubelet.setCachedMachineInfo(machineInfo)
expectedNode := &v1.Node{ expectedNode := &v1.Node{
ObjectMeta: metav1.ObjectMeta{Name: testKubeletHostname}, ObjectMeta: metav1.ObjectMeta{Name: testKubeletHostname, Labels: map[string]string{v1.LabelOSStable: goruntime.GOOS, v1.LabelArchStable: goruntime.GOARCH}},
Spec: v1.NodeSpec{}, Spec: v1.NodeSpec{},
Status: v1.NodeStatus{ Status: v1.NodeStatus{
Conditions: []v1.NodeCondition{ Conditions: []v1.NodeCondition{
@ -395,7 +395,7 @@ func TestUpdateExistingNodeStatus(t *testing.T) {
kubelet.setCachedMachineInfo(machineInfo) kubelet.setCachedMachineInfo(machineInfo)
expectedNode := &v1.Node{ expectedNode := &v1.Node{
ObjectMeta: metav1.ObjectMeta{Name: testKubeletHostname}, ObjectMeta: metav1.ObjectMeta{Name: testKubeletHostname, Labels: map[string]string{v1.LabelOSStable: goruntime.GOOS, v1.LabelArchStable: goruntime.GOARCH}},
Spec: v1.NodeSpec{}, Spec: v1.NodeSpec{},
Status: v1.NodeStatus{ Status: v1.NodeStatus{
Conditions: []v1.NodeCondition{ Conditions: []v1.NodeCondition{
@ -601,7 +601,7 @@ func TestUpdateNodeStatusWithRuntimeStateError(t *testing.T) {
kubelet.setCachedMachineInfo(machineInfo) kubelet.setCachedMachineInfo(machineInfo)
expectedNode := &v1.Node{ expectedNode := &v1.Node{
ObjectMeta: metav1.ObjectMeta{Name: testKubeletHostname}, ObjectMeta: metav1.ObjectMeta{Name: testKubeletHostname, Labels: map[string]string{v1.LabelOSStable: goruntime.GOOS, v1.LabelArchStable: goruntime.GOARCH}},
Spec: v1.NodeSpec{}, Spec: v1.NodeSpec{},
Status: v1.NodeStatus{ Status: v1.NodeStatus{
Conditions: []v1.NodeCondition{ Conditions: []v1.NodeCondition{
@ -822,7 +822,7 @@ func TestUpdateNodeStatusWithLease(t *testing.T) {
now := metav1.NewTime(clock.Now()).Rfc3339Copy() now := metav1.NewTime(clock.Now()).Rfc3339Copy()
expectedNode := &v1.Node{ expectedNode := &v1.Node{
ObjectMeta: metav1.ObjectMeta{Name: testKubeletHostname}, ObjectMeta: metav1.ObjectMeta{Name: testKubeletHostname, Labels: map[string]string{v1.LabelOSStable: goruntime.GOOS, v1.LabelArchStable: goruntime.GOARCH}},
Spec: v1.NodeSpec{}, Spec: v1.NodeSpec{},
Status: v1.NodeStatus{ Status: v1.NodeStatus{
Conditions: []v1.NodeCondition{ Conditions: []v1.NodeCondition{
@ -1033,13 +1033,13 @@ func TestUpdateNodeStatusAndVolumesInUseWithNodeLease(t *testing.T) {
}{ }{
{ {
desc: "no volumes and no update", desc: "no volumes and no update",
existingNode: &v1.Node{ObjectMeta: metav1.ObjectMeta{Name: testKubeletHostname}}, existingNode: &v1.Node{ObjectMeta: metav1.ObjectMeta{Name: testKubeletHostname, Labels: map[string]string{v1.LabelOSStable: goruntime.GOOS, v1.LabelArchStable: goruntime.GOARCH}}},
}, },
{ {
desc: "volumes inuse on node and volumeManager", desc: "volumes inuse on node and volumeManager",
existingVolumes: []v1.UniqueVolumeName{"vol1"}, existingVolumes: []v1.UniqueVolumeName{"vol1"},
existingNode: &v1.Node{ existingNode: &v1.Node{
ObjectMeta: metav1.ObjectMeta{Name: testKubeletHostname}, ObjectMeta: metav1.ObjectMeta{Name: testKubeletHostname, Labels: map[string]string{v1.LabelOSStable: goruntime.GOOS, v1.LabelArchStable: goruntime.GOARCH}},
Status: v1.NodeStatus{ Status: v1.NodeStatus{
VolumesInUse: []v1.UniqueVolumeName{"vol1"}, VolumesInUse: []v1.UniqueVolumeName{"vol1"},
}, },
@ -1054,14 +1054,14 @@ func TestUpdateNodeStatusAndVolumesInUseWithNodeLease(t *testing.T) {
VolumesInUse: []v1.UniqueVolumeName{"vol1"}, VolumesInUse: []v1.UniqueVolumeName{"vol1"},
}, },
}, },
expectedNode: &v1.Node{ObjectMeta: metav1.ObjectMeta{Name: testKubeletHostname}}, expectedNode: &v1.Node{ObjectMeta: metav1.ObjectMeta{Name: testKubeletHostname, Labels: map[string]string{v1.LabelOSStable: goruntime.GOOS, v1.LabelArchStable: goruntime.GOARCH}}},
}, },
{ {
desc: "volumes inuse in volumeManager but not on node", desc: "volumes inuse in volumeManager but not on node",
existingVolumes: []v1.UniqueVolumeName{"vol1"}, existingVolumes: []v1.UniqueVolumeName{"vol1"},
existingNode: &v1.Node{ObjectMeta: metav1.ObjectMeta{Name: testKubeletHostname}}, existingNode: &v1.Node{ObjectMeta: metav1.ObjectMeta{Name: testKubeletHostname}},
expectedNode: &v1.Node{ expectedNode: &v1.Node{
ObjectMeta: metav1.ObjectMeta{Name: testKubeletHostname}, ObjectMeta: metav1.ObjectMeta{Name: testKubeletHostname, Labels: map[string]string{v1.LabelOSStable: goruntime.GOOS, v1.LabelArchStable: goruntime.GOARCH}},
Status: v1.NodeStatus{ Status: v1.NodeStatus{
VolumesInUse: []v1.UniqueVolumeName{"vol1"}, VolumesInUse: []v1.UniqueVolumeName{"vol1"},
}, },
@ -2819,7 +2819,7 @@ func TestUpdateNodeAddresses(t *testing.T) {
}, },
} }
expectedNode := &v1.Node{ expectedNode := &v1.Node{
ObjectMeta: metav1.ObjectMeta{Name: testKubeletHostname}, ObjectMeta: metav1.ObjectMeta{Name: testKubeletHostname, Labels: map[string]string{v1.LabelOSStable: goruntime.GOOS, v1.LabelArchStable: goruntime.GOARCH}},
Spec: v1.NodeSpec{}, Spec: v1.NodeSpec{},
Status: v1.NodeStatus{ Status: v1.NodeStatus{
Addresses: test.After, Addresses: test.After,

View File

@ -22,6 +22,7 @@ import (
"io/ioutil" "io/ioutil"
"os" "os"
"reflect" "reflect"
goruntime "runtime"
"sort" "sort"
"strconv" "strconv"
"testing" "testing"
@ -30,6 +31,7 @@ import (
cadvisorapi "github.com/google/cadvisor/info/v1" cadvisorapi "github.com/google/cadvisor/info/v1"
"github.com/stretchr/testify/assert" "github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require" "github.com/stretchr/testify/require"
core "k8s.io/client-go/testing"
"k8s.io/mount-utils" "k8s.io/mount-utils"
v1 "k8s.io/api/core/v1" v1 "k8s.io/api/core/v1"
@ -858,6 +860,71 @@ func TestHandleNodeSelector(t *testing.T) {
checkPodStatus(t, kl, fittingPod, v1.PodPending) checkPodStatus(t, kl, fittingPod, v1.PodPending)
} }
// Tests that we handle not matching labels selector correctly by setting the failed status in status map.
func TestHandleNodeSelectorBasedOnOS(t *testing.T) {
tests := []struct {
name string
nodeLabels map[string]string
podSelector map[string]string
podStatus v1.PodPhase
}{
{
name: "correct OS label, wrong pod selector, admission denied",
nodeLabels: map[string]string{v1.LabelOSStable: goruntime.GOOS, v1.LabelArchStable: goruntime.GOARCH},
podSelector: map[string]string{v1.LabelOSStable: "dummyOS"},
podStatus: v1.PodFailed,
},
{
name: "correct OS label, correct pod selector, admission denied",
nodeLabels: map[string]string{v1.LabelOSStable: goruntime.GOOS, v1.LabelArchStable: goruntime.GOARCH},
podSelector: map[string]string{v1.LabelOSStable: goruntime.GOOS},
podStatus: v1.PodPending,
},
{
// Expect no patching to happen, label B should be preserved and can be used for nodeAffinity.
name: "new node label, correct pod selector, admitted",
nodeLabels: map[string]string{v1.LabelOSStable: goruntime.GOOS, v1.LabelArchStable: goruntime.GOARCH, "key": "B"},
podSelector: map[string]string{"key": "B"},
podStatus: v1.PodPending,
},
}
for _, test := range tests {
t.Run(test.name, func(t *testing.T) {
testKubelet := newTestKubelet(t, false /* controllerAttachDetachEnabled */)
defer testKubelet.Cleanup()
kl := testKubelet.kubelet
nodes := []*v1.Node{
{
ObjectMeta: metav1.ObjectMeta{Name: testKubeletHostname, Labels: test.nodeLabels},
Status: v1.NodeStatus{
Allocatable: v1.ResourceList{
v1.ResourcePods: *resource.NewQuantity(110, resource.DecimalSI),
},
},
},
}
kl.nodeLister = testNodeLister{nodes: nodes}
recorder := record.NewFakeRecorder(20)
nodeRef := &v1.ObjectReference{
Kind: "Node",
Name: string("testNode"),
UID: types.UID("testNode"),
Namespace: "",
}
testClusterDNSDomain := "TEST"
kl.dnsConfigurer = dns.NewConfigurer(recorder, nodeRef, nil, nil, testClusterDNSDomain, "")
pod := podWithUIDNameNsSpec("123456789", "podA", "foo", v1.PodSpec{NodeSelector: test.podSelector})
kl.HandlePodAdditions([]*v1.Pod{pod})
// Check pod status stored in the status map.
checkPodStatus(t, kl, pod, test.podStatus)
})
}
}
// Tests that we handle exceeded resources correctly by setting the failed status in status map. // Tests that we handle exceeded resources correctly by setting the failed status in status map.
func TestHandleMemExceeded(t *testing.T) { func TestHandleMemExceeded(t *testing.T) {
testKubelet := newTestKubelet(t, false /* controllerAttachDetachEnabled */) testKubelet := newTestKubelet(t, false /* controllerAttachDetachEnabled */)
@ -2291,6 +2358,97 @@ func TestPreInitRuntimeService(t *testing.T) {
} }
} }
func TestSyncLabels(t *testing.T) {
tests := []struct {
name string
existingNode *v1.Node
isPatchingNeeded bool
}{
{
name: "no labels",
existingNode: &v1.Node{ObjectMeta: metav1.ObjectMeta{Labels: map[string]string{}}},
isPatchingNeeded: true,
},
{
name: "wrong labels",
existingNode: &v1.Node{ObjectMeta: metav1.ObjectMeta{Labels: map[string]string{v1.LabelOSStable: "dummyOS", v1.LabelArchStable: "dummyArch"}}},
isPatchingNeeded: true,
},
{
name: "correct labels",
existingNode: &v1.Node{ObjectMeta: metav1.ObjectMeta{Labels: map[string]string{v1.LabelOSStable: goruntime.GOOS, v1.LabelArchStable: goruntime.GOARCH}}},
isPatchingNeeded: false,
},
{
name: "partially correct labels",
existingNode: &v1.Node{ObjectMeta: metav1.ObjectMeta{Labels: map[string]string{v1.LabelOSStable: goruntime.GOOS, v1.LabelArchStable: "dummyArch"}}},
isPatchingNeeded: true,
},
}
for _, test := range tests {
t.Run(test.name, func(t *testing.T) {
testKubelet := newTestKubelet(t, false)
defer testKubelet.Cleanup()
kl := testKubelet.kubelet
kubeClient := testKubelet.fakeKubeClient
test.existingNode.Name = string(kl.nodeName)
kl.nodeLister = testNodeLister{nodes: []*v1.Node{test.existingNode}}
go func() { kl.syncNodeStatus() }()
err := retryWithExponentialBackOff(
100*time.Millisecond,
func() (bool, error) {
var savedNode *v1.Node
if test.isPatchingNeeded {
actions := kubeClient.Actions()
if len(actions) == 0 {
t.Logf("No action yet")
return false, nil
}
action := actions[1]
if action.GetVerb() == "patch" {
patchAction := action.(core.PatchActionImpl)
var err error
savedNode, err = applyNodeStatusPatch(test.existingNode, patchAction.GetPatch())
if err != nil {
t.Logf("node patching failed, %v", err)
return false, nil
}
}
} else {
savedNode = test.existingNode
}
val, ok := savedNode.Labels[v1.LabelOSStable]
if !ok {
t.Logf("expected kubernetes.io/os label to be present")
return false, nil
}
if val != goruntime.GOOS {
t.Logf("expected kubernetes.io/os to match runtime.GOOS but got %v", val)
return false, nil
}
val, ok = savedNode.Labels[v1.LabelArchStable]
if !ok {
t.Logf("expected kubernetes.io/arch label to be present")
return false, nil
}
if val != goruntime.GOARCH {
t.Logf("expected kubernetes.io/arch to match runtime.GOARCH but got %v", val)
return false, nil
}
return true, nil
},
)
if err != nil {
t.Fatalf("expected labels to be reconciled but it failed with %v", err)
}
})
}
}
func waitForVolumeUnmount( func waitForVolumeUnmount(
volumeManager kubeletvolume.VolumeManager, volumeManager kubeletvolume.VolumeManager,
pod *v1.Pod) error { pod *v1.Pod) error {

View File

@ -18,6 +18,7 @@ package lifecycle
import ( import (
"fmt" "fmt"
"runtime"
v1 "k8s.io/api/core/v1" v1 "k8s.io/api/core/v1"
"k8s.io/apiserver/pkg/util/feature" "k8s.io/apiserver/pkg/util/feature"
@ -153,11 +154,39 @@ func (w *predicateAdmitHandler) Admit(attrs *PodAdmitAttributes) PodAdmitResult
Message: message, Message: message,
} }
} }
if rejectPodAdmissionBasedOnOSSelector(admitPod, node) {
return PodAdmitResult{
Admit: false,
Reason: "PodOSSelectorNodeLabelDoesNotMatch",
Message: "Failed to admit pod as the `kubernetes.io/os` label doesn't match node label",
}
}
return PodAdmitResult{ return PodAdmitResult{
Admit: true, Admit: true,
} }
} }
// rejectPodAdmissionBasedOnOSSelector rejects pod if it's nodeSelector doesn't match
// We expect the kubelet status reconcile which happens every 10sec to update the node labels if there is a mismatch.
func rejectPodAdmissionBasedOnOSSelector(pod *v1.Pod, node *v1.Node) bool {
labels := node.Labels
osName, osLabelExists := labels[v1.LabelOSStable]
if !osLabelExists || osName != runtime.GOOS {
if len(labels) == 0 {
labels = make(map[string]string)
}
labels[v1.LabelOSStable] = runtime.GOOS
}
podLabelSelector, podOSLabelExists := pod.Labels[v1.LabelOSStable]
if !podOSLabelExists {
// If the labelselector didn't exist, let's keep the current behavior as is
return false
} else if podOSLabelExists && podLabelSelector != labels[v1.LabelOSStable] {
return true
}
return false
}
func removeMissingExtendedResources(pod *v1.Pod, nodeInfo *schedulerframework.NodeInfo) *v1.Pod { func removeMissingExtendedResources(pod *v1.Pod, nodeInfo *schedulerframework.NodeInfo) *v1.Pod {
podCopy := pod.DeepCopy() podCopy := pod.DeepCopy()
for i, c := range pod.Spec.Containers { for i, c := range pod.Spec.Containers {

View File

@ -18,6 +18,7 @@ package lifecycle
import ( import (
"reflect" "reflect"
goruntime "runtime"
"testing" "testing"
v1 "k8s.io/api/core/v1" v1 "k8s.io/api/core/v1"
@ -267,3 +268,57 @@ func TestGeneralPredicates(t *testing.T) {
}) })
} }
} }
func TestRejectPodAdmissionBasedOnOSSelector(t *testing.T) {
tests := []struct {
name string
pod *v1.Pod
node *v1.Node
expectRejection bool
}{
{
name: "OS label match",
pod: &v1.Pod{ObjectMeta: metav1.ObjectMeta{Labels: map[string]string{v1.LabelOSStable: goruntime.GOOS}}},
node: &v1.Node{Spec: v1.NodeSpec{}, ObjectMeta: metav1.ObjectMeta{Labels: map[string]string{v1.LabelOSStable: goruntime.GOOS}}},
expectRejection: false,
},
{
name: "dummyOS label, but the underlying OS matches",
pod: &v1.Pod{ObjectMeta: metav1.ObjectMeta{Labels: map[string]string{v1.LabelOSStable: goruntime.GOOS}}},
node: &v1.Node{Spec: v1.NodeSpec{}, ObjectMeta: metav1.ObjectMeta{Labels: map[string]string{v1.LabelOSStable: "dummyOS"}}},
expectRejection: false,
},
{
name: "dummyOS label, but the underlying OS doesn't match",
pod: &v1.Pod{ObjectMeta: metav1.ObjectMeta{Labels: map[string]string{v1.LabelOSStable: "dummyOS"}}},
node: &v1.Node{Spec: v1.NodeSpec{}, ObjectMeta: metav1.ObjectMeta{Labels: map[string]string{v1.LabelOSStable: "dummyOS"}}},
expectRejection: true,
},
{
name: "dummyOS label, but the underlying OS doesn't match",
pod: &v1.Pod{ObjectMeta: metav1.ObjectMeta{Labels: map[string]string{v1.LabelOSStable: "dummyOS"}}},
node: &v1.Node{Spec: v1.NodeSpec{}, ObjectMeta: metav1.ObjectMeta{Labels: map[string]string{v1.LabelOSStable: "dummyOS"}}},
expectRejection: true,
},
{
name: "OS field mismatch, OS label on node object would be reset to correct value",
pod: &v1.Pod{ObjectMeta: metav1.ObjectMeta{Labels: map[string]string{v1.LabelOSStable: "dummyOS"}}},
node: &v1.Node{Spec: v1.NodeSpec{}, ObjectMeta: metav1.ObjectMeta{Labels: map[string]string{v1.LabelOSStable: "dummyOS"}}},
expectRejection: true,
},
{
name: "No label selector on the pod, should be admitted",
pod: &v1.Pod{},
node: &v1.Node{Spec: v1.NodeSpec{}, ObjectMeta: metav1.ObjectMeta{Labels: map[string]string{v1.LabelOSStable: "dummyOS"}}},
expectRejection: false,
},
}
for _, test := range tests {
t.Run(test.name, func(t *testing.T) {
actualResult := rejectPodAdmissionBasedOnOSSelector(test.pod, test.node)
if test.expectRejection != actualResult {
t.Errorf("unexpected result, expected %v but got %v", test.expectRejection, actualResult)
}
})
}
}

View File

@ -251,7 +251,7 @@ func PatchNodeCIDRs(c clientset.Interface, node types.NodeName, cidrs []string)
return nil return nil
} }
// PatchNodeStatus patches node status. // PatchNodeStatus patches node status along with objectmetadata
func PatchNodeStatus(c v1core.CoreV1Interface, nodeName types.NodeName, oldNode *v1.Node, newNode *v1.Node) (*v1.Node, []byte, error) { func PatchNodeStatus(c v1core.CoreV1Interface, nodeName types.NodeName, oldNode *v1.Node, newNode *v1.Node) (*v1.Node, []byte, error) {
patchBytes, err := preparePatchBytesforNodeStatus(nodeName, oldNode, newNode) patchBytes, err := preparePatchBytesforNodeStatus(nodeName, oldNode, newNode)
if err != nil { if err != nil {
@ -265,6 +265,7 @@ func PatchNodeStatus(c v1core.CoreV1Interface, nodeName types.NodeName, oldNode
return updatedNode, patchBytes, nil return updatedNode, patchBytes, nil
} }
// preparePatchBytesforNodeStatus updates the node objectmetadata and status
func preparePatchBytesforNodeStatus(nodeName types.NodeName, oldNode *v1.Node, newNode *v1.Node) ([]byte, error) { func preparePatchBytesforNodeStatus(nodeName types.NodeName, oldNode *v1.Node, newNode *v1.Node) ([]byte, error) {
oldData, err := json.Marshal(oldNode) oldData, err := json.Marshal(oldNode)
if err != nil { if err != nil {

View File

@ -0,0 +1,101 @@
//go:build cgo && linux
// +build cgo,linux
/*
Copyright 2021 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package e2enode
import (
"context"
"fmt"
"runtime"
"time"
"github.com/onsi/ginkgo"
v1 "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/types"
"k8s.io/apimachinery/pkg/util/wait"
v1core "k8s.io/client-go/kubernetes/typed/core/v1"
node2 "k8s.io/kubernetes/pkg/util/node"
"k8s.io/kubernetes/test/e2e/framework"
)
var _ = SIGDescribe("OSArchLabelReconciliation [Serial] [Slow] [Disruptive]", func() {
f := framework.NewDefaultFramework("node-label-reconciliation")
ginkgo.Context("Kubelet", func() {
ginkgo.It("should reconcile the OS and Arch labels when restarted", func() {
node := getLocalNode(f)
framework.ExpectNodeHasLabel(f.ClientSet, node.Name, v1.LabelOSStable, runtime.GOOS)
framework.ExpectNodeHasLabel(f.ClientSet, node.Name, v1.LabelArchStable, runtime.GOARCH)
ginkgo.By("killing and restarting kubelet")
// Let's kill the kubelet
startKubelet := stopKubelet()
// Update labels
newNode := node.DeepCopy()
newNode.Labels[v1.LabelOSStable] = "dummyOS"
newNode.Labels[v1.LabelArchStable] = "dummyArch"
_, _, err := node2.PatchNodeStatus(f.ClientSet.CoreV1(), types.NodeName(node.Name), node, newNode)
framework.ExpectNoError(err)
// Restart kubelet
startKubelet()
framework.ExpectNoError(framework.WaitForAllNodesSchedulable(f.ClientSet, framework.RestartNodeReadyAgainTimeout))
// If this happens right, node should have all the labels reset properly
err = waitForNodeLabels(f.ClientSet.CoreV1(), node.Name, 5*time.Minute)
framework.ExpectNoError(err)
})
ginkgo.It("should reconcile the OS and Arch labels when running", func() {
node := getLocalNode(f)
framework.ExpectNodeHasLabel(f.ClientSet, node.Name, v1.LabelOSStable, runtime.GOOS)
framework.ExpectNodeHasLabel(f.ClientSet, node.Name, v1.LabelArchStable, runtime.GOARCH)
// Update labels
newNode := node.DeepCopy()
newNode.Labels[v1.LabelOSStable] = "dummyOS"
newNode.Labels[v1.LabelArchStable] = "dummyArch"
_, _, err := node2.PatchNodeStatus(f.ClientSet.CoreV1(), types.NodeName(node.Name), node, newNode)
framework.ExpectNoError(err)
err = waitForNodeLabels(f.ClientSet.CoreV1(), node.Name, 5*time.Minute)
framework.ExpectNoError(err)
})
})
})
// waitForNodeLabels waits for the nodes to be have appropriate labels.
func waitForNodeLabels(c v1core.CoreV1Interface, nodeName string, timeout time.Duration) error {
ginkgo.By(fmt.Sprintf("Waiting for node %v to have appropriate labels", nodeName))
// Poll until the node has desired labels
return wait.Poll(framework.Poll, timeout,
func() (bool, error) {
node, err := c.Nodes().Get(context.TODO(), nodeName, metav1.GetOptions{})
if err != nil {
return false, err
}
osLabel, ok := node.Labels[v1.LabelOSStable]
if !ok || osLabel != runtime.GOOS {
return false, nil
}
archLabel, ok := node.Labels[v1.LabelArchStable]
if !ok || archLabel != runtime.GOARCH {
return false, nil
}
return true, nil
})
}