mirror of
https://github.com/k3s-io/kubernetes.git
synced 2025-07-23 03:41:45 +00:00
Merge pull request #81431 from andrewsykim/ga-node-topology
Promote Node Zone/Region Topology Labels to GA
This commit is contained in:
commit
3ee195f0e9
@ -19,6 +19,7 @@ go_library(
|
||||
"//pkg/scheduler/api:go_default_library",
|
||||
"//pkg/util/node:go_default_library",
|
||||
"//staging/src/k8s.io/api/core/v1:go_default_library",
|
||||
"//staging/src/k8s.io/apimachinery/pkg/api/errors:go_default_library",
|
||||
"//staging/src/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library",
|
||||
"//staging/src/k8s.io/apimachinery/pkg/labels:go_default_library",
|
||||
"//staging/src/k8s.io/apimachinery/pkg/types:go_default_library",
|
||||
|
@ -23,6 +23,7 @@ import (
|
||||
"time"
|
||||
|
||||
v1 "k8s.io/api/core/v1"
|
||||
apierrors "k8s.io/apimachinery/pkg/api/errors"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/types"
|
||||
utilruntime "k8s.io/apimachinery/pkg/util/runtime"
|
||||
@ -42,6 +43,35 @@ import (
|
||||
nodeutil "k8s.io/kubernetes/pkg/util/node"
|
||||
)
|
||||
|
||||
// labelReconcileInfo lists Node labels to reconcile, and how to reconcile them.
|
||||
// primaryKey and secondaryKey are keys of labels to reconcile.
|
||||
// - If both keys exist, but their values don't match. Use the value from the
|
||||
// primaryKey as the source of truth to reconcile.
|
||||
// - If ensureSecondaryExists is true, and the secondaryKey does not
|
||||
// exist, secondaryKey will be added with the value of the primaryKey.
|
||||
var labelReconcileInfo = []struct {
|
||||
primaryKey string
|
||||
secondaryKey string
|
||||
ensureSecondaryExists bool
|
||||
}{
|
||||
{
|
||||
// Reconcile the beta and the GA zone label using the beta label as
|
||||
// the source of truth
|
||||
// TODO: switch the primary key to GA labels in v1.21
|
||||
primaryKey: v1.LabelZoneFailureDomain,
|
||||
secondaryKey: v1.LabelZoneFailureDomainStable,
|
||||
ensureSecondaryExists: true,
|
||||
},
|
||||
{
|
||||
// Reconcile the beta and the stable region label using the beta label as
|
||||
// the source of truth
|
||||
// TODO: switch the primary key to GA labels in v1.21
|
||||
primaryKey: v1.LabelZoneRegion,
|
||||
secondaryKey: v1.LabelZoneRegionStable,
|
||||
ensureSecondaryExists: true,
|
||||
},
|
||||
}
|
||||
|
||||
var UpdateNodeSpecBackoff = wait.Backoff{
|
||||
Steps: 20,
|
||||
Duration: 50 * time.Millisecond,
|
||||
@ -125,6 +155,63 @@ func (cnc *CloudNodeController) UpdateNodeStatus(ctx context.Context) {
|
||||
for i := range nodes.Items {
|
||||
cnc.updateNodeAddress(ctx, &nodes.Items[i], instances)
|
||||
}
|
||||
|
||||
for _, node := range nodes.Items {
|
||||
err = cnc.reconcileNodeLabels(node.Name)
|
||||
if err != nil {
|
||||
klog.Errorf("Error reconciling node labels for node %q, err: %v", node.Name, err)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// reconcileNodeLabels reconciles node labels transitioning from beta to GA
|
||||
func (cnc *CloudNodeController) reconcileNodeLabels(nodeName string) error {
|
||||
node, err := cnc.nodeInformer.Lister().Get(nodeName)
|
||||
if err != nil {
|
||||
// If node not found, just ignore it.
|
||||
if apierrors.IsNotFound(err) {
|
||||
return nil
|
||||
}
|
||||
|
||||
return err
|
||||
}
|
||||
|
||||
if node.Labels == nil {
|
||||
// Nothing to reconcile.
|
||||
return nil
|
||||
}
|
||||
|
||||
labelsToUpdate := map[string]string{}
|
||||
for _, r := range labelReconcileInfo {
|
||||
primaryValue, primaryExists := node.Labels[r.primaryKey]
|
||||
secondaryValue, secondaryExists := node.Labels[r.secondaryKey]
|
||||
|
||||
if !primaryExists {
|
||||
// The primary label key does not exist. This should not happen
|
||||
// within our supported version skew range, when no external
|
||||
// components/factors modifying the node object. Ignore this case.
|
||||
continue
|
||||
}
|
||||
if secondaryExists && primaryValue != secondaryValue {
|
||||
// Secondary label exists, but not consistent with the primary
|
||||
// label. Need to reconcile.
|
||||
labelsToUpdate[r.secondaryKey] = primaryValue
|
||||
|
||||
} else if !secondaryExists && r.ensureSecondaryExists {
|
||||
// Apply secondary label based on primary label.
|
||||
labelsToUpdate[r.secondaryKey] = primaryValue
|
||||
}
|
||||
}
|
||||
|
||||
if len(labelsToUpdate) == 0 {
|
||||
return nil
|
||||
}
|
||||
|
||||
if !cloudnodeutil.AddOrUpdateLabelsOnNode(cnc.kubeClient, labelsToUpdate, node) {
|
||||
return fmt.Errorf("failed update labels for node %+v", node)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// UpdateNodeAddress updates the nodeAddress of a single node
|
||||
@ -298,10 +385,14 @@ func (cnc *CloudNodeController) initializeNode(ctx context.Context, node *v1.Nod
|
||||
if zone.FailureDomain != "" {
|
||||
klog.V(2).Infof("Adding node label from cloud provider: %s=%s", v1.LabelZoneFailureDomain, zone.FailureDomain)
|
||||
curNode.ObjectMeta.Labels[v1.LabelZoneFailureDomain] = zone.FailureDomain
|
||||
klog.V(2).Infof("Adding node label from cloud provider: %s=%s", v1.LabelZoneFailureDomainStable, zone.FailureDomain)
|
||||
curNode.ObjectMeta.Labels[v1.LabelZoneFailureDomainStable] = zone.FailureDomain
|
||||
}
|
||||
if zone.Region != "" {
|
||||
klog.V(2).Infof("Adding node label from cloud provider: %s=%s", v1.LabelZoneRegion, zone.Region)
|
||||
curNode.ObjectMeta.Labels[v1.LabelZoneRegion] = zone.Region
|
||||
klog.V(2).Infof("Adding node label from cloud provider: %s=%s", v1.LabelZoneRegionStable, zone.Region)
|
||||
curNode.ObjectMeta.Labels[v1.LabelZoneRegionStable] = zone.Region
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -19,6 +19,7 @@ package cloud
|
||||
import (
|
||||
"context"
|
||||
"errors"
|
||||
"reflect"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
@ -460,8 +461,12 @@ func TestZoneInitialized(t *testing.T) {
|
||||
|
||||
assert.Equal(t, 1, len(fnh.UpdatedNodes), "Node was not updated")
|
||||
assert.Equal(t, "node0", fnh.UpdatedNodes[0].Name, "Node was not updated")
|
||||
assert.Equal(t, 2, len(fnh.UpdatedNodes[0].ObjectMeta.Labels),
|
||||
assert.Equal(t, 4, len(fnh.UpdatedNodes[0].ObjectMeta.Labels),
|
||||
"Node label for Region and Zone were not set")
|
||||
assert.Equal(t, "us-west", fnh.UpdatedNodes[0].ObjectMeta.Labels[v1.LabelZoneRegionStable],
|
||||
"Node Region not correctly updated")
|
||||
assert.Equal(t, "us-west-1a", fnh.UpdatedNodes[0].ObjectMeta.Labels[v1.LabelZoneFailureDomainStable],
|
||||
"Node FailureDomain not correctly updated")
|
||||
assert.Equal(t, "us-west", fnh.UpdatedNodes[0].ObjectMeta.Labels[v1.LabelZoneRegion],
|
||||
"Node Region not correctly updated")
|
||||
assert.Equal(t, "us-west-1a", fnh.UpdatedNodes[0].ObjectMeta.Labels[v1.LabelZoneFailureDomain],
|
||||
@ -672,6 +677,105 @@ func TestNodeProvidedIPAddresses(t *testing.T) {
|
||||
assert.Equal(t, "10.0.0.1", updatedNodes[0].Status.Addresses[0].Address, "Node Addresses not correctly updated")
|
||||
}
|
||||
|
||||
func Test_reconcileNodeLabels(t *testing.T) {
|
||||
testcases := []struct {
|
||||
name string
|
||||
labels map[string]string
|
||||
expectedLabels map[string]string
|
||||
expectedErr error
|
||||
}{
|
||||
{
|
||||
name: "requires reconcile",
|
||||
labels: map[string]string{
|
||||
v1.LabelZoneFailureDomain: "foo",
|
||||
v1.LabelZoneRegion: "bar",
|
||||
},
|
||||
expectedLabels: map[string]string{
|
||||
v1.LabelZoneFailureDomain: "foo",
|
||||
v1.LabelZoneRegion: "bar",
|
||||
v1.LabelZoneFailureDomainStable: "foo",
|
||||
v1.LabelZoneRegionStable: "bar",
|
||||
},
|
||||
expectedErr: nil,
|
||||
},
|
||||
{
|
||||
name: "doesn't require reconcile",
|
||||
labels: map[string]string{
|
||||
v1.LabelZoneFailureDomain: "foo",
|
||||
v1.LabelZoneRegion: "bar",
|
||||
v1.LabelZoneFailureDomainStable: "foo",
|
||||
v1.LabelZoneRegionStable: "bar",
|
||||
},
|
||||
expectedLabels: map[string]string{
|
||||
v1.LabelZoneFailureDomain: "foo",
|
||||
v1.LabelZoneRegion: "bar",
|
||||
v1.LabelZoneFailureDomainStable: "foo",
|
||||
v1.LabelZoneRegionStable: "bar",
|
||||
},
|
||||
expectedErr: nil,
|
||||
},
|
||||
{
|
||||
name: "require reconcile -- secondary labels are different from primary",
|
||||
labels: map[string]string{
|
||||
v1.LabelZoneFailureDomain: "foo",
|
||||
v1.LabelZoneRegion: "bar",
|
||||
v1.LabelZoneFailureDomainStable: "wrongfoo",
|
||||
v1.LabelZoneRegionStable: "wrongbar",
|
||||
},
|
||||
expectedLabels: map[string]string{
|
||||
v1.LabelZoneFailureDomain: "foo",
|
||||
v1.LabelZoneRegion: "bar",
|
||||
v1.LabelZoneFailureDomainStable: "foo",
|
||||
v1.LabelZoneRegionStable: "bar",
|
||||
},
|
||||
expectedErr: nil,
|
||||
},
|
||||
}
|
||||
|
||||
for _, test := range testcases {
|
||||
t.Run(test.name, func(t *testing.T) {
|
||||
testNode := &v1.Node{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "node01",
|
||||
Labels: test.labels,
|
||||
},
|
||||
}
|
||||
|
||||
clientset := fake.NewSimpleClientset(testNode)
|
||||
factory := informers.NewSharedInformerFactory(clientset, 0)
|
||||
|
||||
cnc := &CloudNodeController{
|
||||
kubeClient: clientset,
|
||||
nodeInformer: factory.Core().V1().Nodes(),
|
||||
}
|
||||
|
||||
// activate node informer
|
||||
factory.Core().V1().Nodes().Informer()
|
||||
factory.Start(nil)
|
||||
factory.WaitForCacheSync(nil)
|
||||
|
||||
err := cnc.reconcileNodeLabels("node01")
|
||||
if err != test.expectedErr {
|
||||
t.Logf("actual err: %v", err)
|
||||
t.Logf("expected err: %v", test.expectedErr)
|
||||
t.Errorf("unexpected error")
|
||||
}
|
||||
|
||||
actualNode, err := clientset.CoreV1().Nodes().Get("node01", metav1.GetOptions{})
|
||||
if err != nil {
|
||||
t.Fatalf("error getting updated node: %v", err)
|
||||
}
|
||||
|
||||
if !reflect.DeepEqual(actualNode.Labels, test.expectedLabels) {
|
||||
t.Logf("actual node labels: %v", actualNode.Labels)
|
||||
t.Logf("expected node labels: %v", test.expectedLabels)
|
||||
t.Errorf("updated node did not match expected node")
|
||||
}
|
||||
})
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
// Tests that node address changes are detected correctly
|
||||
func TestNodeAddressesChangeDetected(t *testing.T) {
|
||||
addressSet1 := []v1.NodeAddress{
|
||||
|
@ -197,8 +197,10 @@ func TestMonitorNodeHealthEvictPods(t *testing.T) {
|
||||
fakeNow := metav1.Date(2015, 1, 1, 12, 0, 0, 0, time.UTC)
|
||||
evictionTimeout := 10 * time.Minute
|
||||
labels := map[string]string{
|
||||
v1.LabelZoneRegion: "region1",
|
||||
v1.LabelZoneFailureDomain: "zone1",
|
||||
v1.LabelZoneRegionStable: "region1",
|
||||
v1.LabelZoneFailureDomainStable: "zone1",
|
||||
v1.LabelZoneRegion: "region1",
|
||||
v1.LabelZoneFailureDomain: "zone1",
|
||||
}
|
||||
|
||||
// Because of the logic that prevents NC from evicting anything when all Nodes are NotReady
|
||||
@ -234,8 +236,10 @@ func TestMonitorNodeHealthEvictPods(t *testing.T) {
|
||||
Name: "node0",
|
||||
CreationTimestamp: fakeNow,
|
||||
Labels: map[string]string{
|
||||
v1.LabelZoneRegion: "region1",
|
||||
v1.LabelZoneFailureDomain: "zone1",
|
||||
v1.LabelZoneRegionStable: "region1",
|
||||
v1.LabelZoneFailureDomainStable: "zone1",
|
||||
v1.LabelZoneRegion: "region1",
|
||||
v1.LabelZoneFailureDomain: "zone1",
|
||||
},
|
||||
},
|
||||
},
|
||||
@ -244,8 +248,10 @@ func TestMonitorNodeHealthEvictPods(t *testing.T) {
|
||||
Name: "node1",
|
||||
CreationTimestamp: metav1.Date(2012, 1, 1, 0, 0, 0, 0, time.UTC),
|
||||
Labels: map[string]string{
|
||||
v1.LabelZoneRegion: "region1",
|
||||
v1.LabelZoneFailureDomain: "zone1",
|
||||
v1.LabelZoneRegionStable: "region1",
|
||||
v1.LabelZoneFailureDomainStable: "zone1",
|
||||
v1.LabelZoneRegion: "region1",
|
||||
v1.LabelZoneFailureDomain: "zone1",
|
||||
},
|
||||
},
|
||||
Status: v1.NodeStatus{
|
||||
@ -314,8 +320,10 @@ func TestMonitorNodeHealthEvictPods(t *testing.T) {
|
||||
Name: "node0",
|
||||
CreationTimestamp: metav1.Date(2012, 1, 1, 0, 0, 0, 0, time.UTC),
|
||||
Labels: map[string]string{
|
||||
v1.LabelZoneRegion: "region1",
|
||||
v1.LabelZoneFailureDomain: "zone1",
|
||||
v1.LabelZoneRegionStable: "region1",
|
||||
v1.LabelZoneFailureDomainStable: "zone1",
|
||||
v1.LabelZoneRegion: "region1",
|
||||
v1.LabelZoneFailureDomain: "zone1",
|
||||
},
|
||||
},
|
||||
Status: v1.NodeStatus{
|
||||
@ -334,8 +342,10 @@ func TestMonitorNodeHealthEvictPods(t *testing.T) {
|
||||
Name: "node1",
|
||||
CreationTimestamp: metav1.Date(2012, 1, 1, 0, 0, 0, 0, time.UTC),
|
||||
Labels: map[string]string{
|
||||
v1.LabelZoneRegion: "region1",
|
||||
v1.LabelZoneFailureDomain: "zone1",
|
||||
v1.LabelZoneRegionStable: "region1",
|
||||
v1.LabelZoneFailureDomainStable: "zone1",
|
||||
v1.LabelZoneRegion: "region1",
|
||||
v1.LabelZoneFailureDomain: "zone1",
|
||||
},
|
||||
},
|
||||
Status: v1.NodeStatus{
|
||||
@ -378,8 +388,10 @@ func TestMonitorNodeHealthEvictPods(t *testing.T) {
|
||||
Name: "node0",
|
||||
CreationTimestamp: metav1.Date(2012, 1, 1, 0, 0, 0, 0, time.UTC),
|
||||
Labels: map[string]string{
|
||||
v1.LabelZoneRegion: "region1",
|
||||
v1.LabelZoneFailureDomain: "zone1",
|
||||
v1.LabelZoneRegionStable: "region1",
|
||||
v1.LabelZoneFailureDomainStable: "zone1",
|
||||
v1.LabelZoneRegion: "region1",
|
||||
v1.LabelZoneFailureDomain: "zone1",
|
||||
},
|
||||
},
|
||||
Status: v1.NodeStatus{
|
||||
@ -398,8 +410,10 @@ func TestMonitorNodeHealthEvictPods(t *testing.T) {
|
||||
Name: "node1",
|
||||
CreationTimestamp: metav1.Date(2012, 1, 1, 0, 0, 0, 0, time.UTC),
|
||||
Labels: map[string]string{
|
||||
v1.LabelZoneRegion: "region1",
|
||||
v1.LabelZoneFailureDomain: "zone1",
|
||||
v1.LabelZoneRegionStable: "region1",
|
||||
v1.LabelZoneFailureDomainStable: "zone1",
|
||||
v1.LabelZoneRegion: "region1",
|
||||
v1.LabelZoneFailureDomain: "zone1",
|
||||
},
|
||||
},
|
||||
Status: v1.NodeStatus{
|
||||
@ -469,8 +483,10 @@ func TestMonitorNodeHealthEvictPods(t *testing.T) {
|
||||
Name: "node0",
|
||||
CreationTimestamp: metav1.Date(2012, 1, 1, 0, 0, 0, 0, time.UTC),
|
||||
Labels: map[string]string{
|
||||
v1.LabelZoneRegion: "region1",
|
||||
v1.LabelZoneFailureDomain: "zone1",
|
||||
v1.LabelZoneRegionStable: "region1",
|
||||
v1.LabelZoneFailureDomainStable: "zone1",
|
||||
v1.LabelZoneRegion: "region1",
|
||||
v1.LabelZoneFailureDomain: "zone1",
|
||||
},
|
||||
},
|
||||
Status: v1.NodeStatus{
|
||||
@ -489,8 +505,10 @@ func TestMonitorNodeHealthEvictPods(t *testing.T) {
|
||||
Name: "node1",
|
||||
CreationTimestamp: metav1.Date(2012, 1, 1, 0, 0, 0, 0, time.UTC),
|
||||
Labels: map[string]string{
|
||||
v1.LabelZoneRegion: "region1",
|
||||
v1.LabelZoneFailureDomain: "zone1",
|
||||
v1.LabelZoneRegionStable: "region1",
|
||||
v1.LabelZoneFailureDomainStable: "zone1",
|
||||
v1.LabelZoneRegion: "region1",
|
||||
v1.LabelZoneFailureDomain: "zone1",
|
||||
},
|
||||
},
|
||||
Status: v1.NodeStatus{
|
||||
@ -533,8 +551,10 @@ func TestMonitorNodeHealthEvictPods(t *testing.T) {
|
||||
Name: "node0",
|
||||
CreationTimestamp: metav1.Date(2012, 1, 1, 0, 0, 0, 0, time.UTC),
|
||||
Labels: map[string]string{
|
||||
v1.LabelZoneRegion: "region1",
|
||||
v1.LabelZoneFailureDomain: "zone1",
|
||||
v1.LabelZoneRegionStable: "region1",
|
||||
v1.LabelZoneFailureDomainStable: "zone1",
|
||||
v1.LabelZoneRegion: "region1",
|
||||
v1.LabelZoneFailureDomain: "zone1",
|
||||
},
|
||||
},
|
||||
Status: v1.NodeStatus{
|
||||
@ -553,8 +573,10 @@ func TestMonitorNodeHealthEvictPods(t *testing.T) {
|
||||
Name: "node1",
|
||||
CreationTimestamp: metav1.Date(2012, 1, 1, 0, 0, 0, 0, time.UTC),
|
||||
Labels: map[string]string{
|
||||
v1.LabelZoneRegion: "region1",
|
||||
v1.LabelZoneFailureDomain: "zone1",
|
||||
v1.LabelZoneRegionStable: "region1",
|
||||
v1.LabelZoneFailureDomainStable: "zone1",
|
||||
v1.LabelZoneRegion: "region1",
|
||||
v1.LabelZoneFailureDomain: "zone1",
|
||||
},
|
||||
},
|
||||
Status: v1.NodeStatus{
|
||||
@ -597,8 +619,10 @@ func TestMonitorNodeHealthEvictPods(t *testing.T) {
|
||||
Name: "node0",
|
||||
CreationTimestamp: metav1.Date(2012, 1, 1, 0, 0, 0, 0, time.UTC),
|
||||
Labels: map[string]string{
|
||||
v1.LabelZoneRegion: "region1",
|
||||
v1.LabelZoneFailureDomain: "zone1",
|
||||
v1.LabelZoneRegionStable: "region1",
|
||||
v1.LabelZoneFailureDomainStable: "zone1",
|
||||
v1.LabelZoneRegion: "region1",
|
||||
v1.LabelZoneFailureDomain: "zone1",
|
||||
},
|
||||
},
|
||||
Status: v1.NodeStatus{
|
||||
@ -617,8 +641,10 @@ func TestMonitorNodeHealthEvictPods(t *testing.T) {
|
||||
Name: "node1",
|
||||
CreationTimestamp: metav1.Date(2012, 1, 1, 0, 0, 0, 0, time.UTC),
|
||||
Labels: map[string]string{
|
||||
v1.LabelZoneRegion: "region1",
|
||||
v1.LabelZoneFailureDomain: "zone1",
|
||||
v1.LabelZoneRegionStable: "region1",
|
||||
v1.LabelZoneFailureDomainStable: "zone1",
|
||||
v1.LabelZoneRegion: "region1",
|
||||
v1.LabelZoneFailureDomain: "zone1",
|
||||
},
|
||||
},
|
||||
Status: v1.NodeStatus{
|
||||
@ -762,8 +788,10 @@ func TestPodStatusChange(t *testing.T) {
|
||||
Name: "node0",
|
||||
CreationTimestamp: metav1.Date(2012, 1, 1, 0, 0, 0, 0, time.UTC),
|
||||
Labels: map[string]string{
|
||||
v1.LabelZoneRegion: "region1",
|
||||
v1.LabelZoneFailureDomain: "zone1",
|
||||
v1.LabelZoneRegionStable: "region1",
|
||||
v1.LabelZoneFailureDomainStable: "zone1",
|
||||
v1.LabelZoneRegion: "region1",
|
||||
v1.LabelZoneFailureDomain: "zone1",
|
||||
},
|
||||
},
|
||||
Status: v1.NodeStatus{
|
||||
@ -930,8 +958,10 @@ func TestMonitorNodeHealthEvictPodsWithDisruption(t *testing.T) {
|
||||
Name: "node0",
|
||||
CreationTimestamp: metav1.Date(2012, 1, 1, 0, 0, 0, 0, time.UTC),
|
||||
Labels: map[string]string{
|
||||
v1.LabelZoneRegion: "region1",
|
||||
v1.LabelZoneFailureDomain: "zone1",
|
||||
v1.LabelZoneRegionStable: "region1",
|
||||
v1.LabelZoneFailureDomainStable: "zone1",
|
||||
v1.LabelZoneRegion: "region1",
|
||||
v1.LabelZoneFailureDomain: "zone1",
|
||||
},
|
||||
},
|
||||
Status: v1.NodeStatus{
|
||||
@ -950,8 +980,10 @@ func TestMonitorNodeHealthEvictPodsWithDisruption(t *testing.T) {
|
||||
Name: "node1",
|
||||
CreationTimestamp: metav1.Date(2012, 1, 1, 0, 0, 0, 0, time.UTC),
|
||||
Labels: map[string]string{
|
||||
v1.LabelZoneRegion: "region1",
|
||||
v1.LabelZoneFailureDomain: "zone1",
|
||||
v1.LabelZoneRegionStable: "region1",
|
||||
v1.LabelZoneFailureDomainStable: "zone1",
|
||||
v1.LabelZoneRegion: "region1",
|
||||
v1.LabelZoneFailureDomain: "zone1",
|
||||
},
|
||||
},
|
||||
Status: v1.NodeStatus{
|
||||
@ -985,8 +1017,10 @@ func TestMonitorNodeHealthEvictPodsWithDisruption(t *testing.T) {
|
||||
Name: "node0",
|
||||
CreationTimestamp: metav1.Date(2012, 1, 1, 0, 0, 0, 0, time.UTC),
|
||||
Labels: map[string]string{
|
||||
v1.LabelZoneRegion: "region1",
|
||||
v1.LabelZoneFailureDomain: "zone1",
|
||||
v1.LabelZoneRegionStable: "region1",
|
||||
v1.LabelZoneFailureDomainStable: "zone1",
|
||||
v1.LabelZoneRegion: "region1",
|
||||
v1.LabelZoneFailureDomain: "zone1",
|
||||
},
|
||||
},
|
||||
Status: v1.NodeStatus{
|
||||
@ -1005,8 +1039,10 @@ func TestMonitorNodeHealthEvictPodsWithDisruption(t *testing.T) {
|
||||
Name: "node1",
|
||||
CreationTimestamp: metav1.Date(2012, 1, 1, 0, 0, 0, 0, time.UTC),
|
||||
Labels: map[string]string{
|
||||
v1.LabelZoneRegion: "region2",
|
||||
v1.LabelZoneFailureDomain: "zone2",
|
||||
v1.LabelZoneRegionStable: "region2",
|
||||
v1.LabelZoneFailureDomainStable: "zone2",
|
||||
v1.LabelZoneRegion: "region2",
|
||||
v1.LabelZoneFailureDomain: "zone2",
|
||||
},
|
||||
},
|
||||
Status: v1.NodeStatus{
|
||||
@ -1047,8 +1083,10 @@ func TestMonitorNodeHealthEvictPodsWithDisruption(t *testing.T) {
|
||||
Name: "node0",
|
||||
CreationTimestamp: metav1.Date(2012, 1, 1, 0, 0, 0, 0, time.UTC),
|
||||
Labels: map[string]string{
|
||||
v1.LabelZoneRegion: "region1",
|
||||
v1.LabelZoneFailureDomain: "zone1",
|
||||
v1.LabelZoneRegionStable: "region1",
|
||||
v1.LabelZoneFailureDomainStable: "zone1",
|
||||
v1.LabelZoneRegion: "region1",
|
||||
v1.LabelZoneFailureDomain: "zone1",
|
||||
},
|
||||
},
|
||||
Status: v1.NodeStatus{
|
||||
@ -1067,8 +1105,10 @@ func TestMonitorNodeHealthEvictPodsWithDisruption(t *testing.T) {
|
||||
Name: "node1",
|
||||
CreationTimestamp: metav1.Date(2012, 1, 1, 0, 0, 0, 0, time.UTC),
|
||||
Labels: map[string]string{
|
||||
v1.LabelZoneRegion: "region1",
|
||||
v1.LabelZoneFailureDomain: "zone2",
|
||||
v1.LabelZoneRegionStable: "region1",
|
||||
v1.LabelZoneFailureDomainStable: "zone2",
|
||||
v1.LabelZoneRegion: "region1",
|
||||
v1.LabelZoneFailureDomain: "zone2",
|
||||
},
|
||||
},
|
||||
Status: v1.NodeStatus{
|
||||
@ -1108,8 +1148,10 @@ func TestMonitorNodeHealthEvictPodsWithDisruption(t *testing.T) {
|
||||
Name: "node0",
|
||||
CreationTimestamp: metav1.Date(2012, 1, 1, 0, 0, 0, 0, time.UTC),
|
||||
Labels: map[string]string{
|
||||
v1.LabelZoneRegion: "region1",
|
||||
v1.LabelZoneFailureDomain: "zone1",
|
||||
v1.LabelZoneRegionStable: "region1",
|
||||
v1.LabelZoneFailureDomainStable: "zone1",
|
||||
v1.LabelZoneRegion: "region1",
|
||||
v1.LabelZoneFailureDomain: "zone1",
|
||||
},
|
||||
},
|
||||
Status: v1.NodeStatus{
|
||||
@ -1128,8 +1170,10 @@ func TestMonitorNodeHealthEvictPodsWithDisruption(t *testing.T) {
|
||||
Name: "node-master",
|
||||
CreationTimestamp: metav1.Date(2012, 1, 1, 0, 0, 0, 0, time.UTC),
|
||||
Labels: map[string]string{
|
||||
v1.LabelZoneRegion: "region1",
|
||||
v1.LabelZoneFailureDomain: "zone1",
|
||||
v1.LabelZoneRegionStable: "region1",
|
||||
v1.LabelZoneFailureDomainStable: "zone1",
|
||||
v1.LabelZoneRegion: "region1",
|
||||
v1.LabelZoneFailureDomain: "zone1",
|
||||
},
|
||||
},
|
||||
Status: v1.NodeStatus{
|
||||
@ -1167,8 +1211,10 @@ func TestMonitorNodeHealthEvictPodsWithDisruption(t *testing.T) {
|
||||
Name: "node0",
|
||||
CreationTimestamp: metav1.Date(2012, 1, 1, 0, 0, 0, 0, time.UTC),
|
||||
Labels: map[string]string{
|
||||
v1.LabelZoneRegion: "region1",
|
||||
v1.LabelZoneFailureDomain: "zone1",
|
||||
v1.LabelZoneRegionStable: "region1",
|
||||
v1.LabelZoneFailureDomainStable: "zone1",
|
||||
v1.LabelZoneRegion: "region1",
|
||||
v1.LabelZoneFailureDomain: "zone1",
|
||||
},
|
||||
},
|
||||
Status: v1.NodeStatus{
|
||||
@ -1187,8 +1233,10 @@ func TestMonitorNodeHealthEvictPodsWithDisruption(t *testing.T) {
|
||||
Name: "node1",
|
||||
CreationTimestamp: metav1.Date(2012, 1, 1, 0, 0, 0, 0, time.UTC),
|
||||
Labels: map[string]string{
|
||||
v1.LabelZoneRegion: "region1",
|
||||
v1.LabelZoneFailureDomain: "zone2",
|
||||
v1.LabelZoneRegionStable: "region1",
|
||||
v1.LabelZoneFailureDomainStable: "zone2",
|
||||
v1.LabelZoneRegion: "region1",
|
||||
v1.LabelZoneFailureDomain: "zone2",
|
||||
},
|
||||
},
|
||||
Status: v1.NodeStatus{
|
||||
@ -1229,8 +1277,10 @@ func TestMonitorNodeHealthEvictPodsWithDisruption(t *testing.T) {
|
||||
Name: "node0",
|
||||
CreationTimestamp: metav1.Date(2012, 1, 1, 0, 0, 0, 0, time.UTC),
|
||||
Labels: map[string]string{
|
||||
v1.LabelZoneRegion: "region1",
|
||||
v1.LabelZoneFailureDomain: "zone1",
|
||||
v1.LabelZoneRegionStable: "region1",
|
||||
v1.LabelZoneFailureDomainStable: "zone1",
|
||||
v1.LabelZoneRegion: "region1",
|
||||
v1.LabelZoneFailureDomain: "zone1",
|
||||
},
|
||||
},
|
||||
Status: v1.NodeStatus{
|
||||
@ -1249,8 +1299,10 @@ func TestMonitorNodeHealthEvictPodsWithDisruption(t *testing.T) {
|
||||
Name: "node1",
|
||||
CreationTimestamp: metav1.Date(2012, 1, 1, 0, 0, 0, 0, time.UTC),
|
||||
Labels: map[string]string{
|
||||
v1.LabelZoneRegion: "region1",
|
||||
v1.LabelZoneFailureDomain: "zone1",
|
||||
v1.LabelZoneRegionStable: "region1",
|
||||
v1.LabelZoneFailureDomainStable: "zone1",
|
||||
v1.LabelZoneRegion: "region1",
|
||||
v1.LabelZoneFailureDomain: "zone1",
|
||||
},
|
||||
},
|
||||
Status: v1.NodeStatus{
|
||||
@ -1269,8 +1321,10 @@ func TestMonitorNodeHealthEvictPodsWithDisruption(t *testing.T) {
|
||||
Name: "node2",
|
||||
CreationTimestamp: metav1.Date(2012, 1, 1, 0, 0, 0, 0, time.UTC),
|
||||
Labels: map[string]string{
|
||||
v1.LabelZoneRegion: "region1",
|
||||
v1.LabelZoneFailureDomain: "zone1",
|
||||
v1.LabelZoneRegionStable: "region1",
|
||||
v1.LabelZoneFailureDomainStable: "zone1",
|
||||
v1.LabelZoneRegion: "region1",
|
||||
v1.LabelZoneFailureDomain: "zone1",
|
||||
},
|
||||
},
|
||||
Status: v1.NodeStatus{
|
||||
@ -1289,8 +1343,10 @@ func TestMonitorNodeHealthEvictPodsWithDisruption(t *testing.T) {
|
||||
Name: "node3",
|
||||
CreationTimestamp: metav1.Date(2012, 1, 1, 0, 0, 0, 0, time.UTC),
|
||||
Labels: map[string]string{
|
||||
v1.LabelZoneRegion: "region1",
|
||||
v1.LabelZoneFailureDomain: "zone1",
|
||||
v1.LabelZoneRegionStable: "region1",
|
||||
v1.LabelZoneFailureDomainStable: "zone1",
|
||||
v1.LabelZoneRegion: "region1",
|
||||
v1.LabelZoneFailureDomain: "zone1",
|
||||
},
|
||||
},
|
||||
Status: v1.NodeStatus{
|
||||
@ -1309,8 +1365,10 @@ func TestMonitorNodeHealthEvictPodsWithDisruption(t *testing.T) {
|
||||
Name: "node4",
|
||||
CreationTimestamp: metav1.Date(2012, 1, 1, 0, 0, 0, 0, time.UTC),
|
||||
Labels: map[string]string{
|
||||
v1.LabelZoneRegion: "region1",
|
||||
v1.LabelZoneFailureDomain: "zone1",
|
||||
v1.LabelZoneRegionStable: "region1",
|
||||
v1.LabelZoneFailureDomainStable: "zone1",
|
||||
v1.LabelZoneRegion: "region1",
|
||||
v1.LabelZoneFailureDomain: "zone1",
|
||||
},
|
||||
},
|
||||
Status: v1.NodeStatus{
|
||||
@ -2584,8 +2642,10 @@ func TestApplyNoExecuteTaints(t *testing.T) {
|
||||
Name: "node0",
|
||||
CreationTimestamp: metav1.Date(2012, 1, 1, 0, 0, 0, 0, time.UTC),
|
||||
Labels: map[string]string{
|
||||
v1.LabelZoneRegion: "region1",
|
||||
v1.LabelZoneFailureDomain: "zone1",
|
||||
v1.LabelZoneRegionStable: "region1",
|
||||
v1.LabelZoneFailureDomainStable: "zone1",
|
||||
v1.LabelZoneRegion: "region1",
|
||||
v1.LabelZoneFailureDomain: "zone1",
|
||||
},
|
||||
},
|
||||
Status: v1.NodeStatus{
|
||||
@ -2606,8 +2666,10 @@ func TestApplyNoExecuteTaints(t *testing.T) {
|
||||
Name: "node1",
|
||||
CreationTimestamp: metav1.Date(2012, 1, 1, 0, 0, 0, 0, time.UTC),
|
||||
Labels: map[string]string{
|
||||
v1.LabelZoneRegion: "region1",
|
||||
v1.LabelZoneFailureDomain: "zone1",
|
||||
v1.LabelZoneRegionStable: "region1",
|
||||
v1.LabelZoneFailureDomainStable: "zone1",
|
||||
v1.LabelZoneRegion: "region1",
|
||||
v1.LabelZoneFailureDomain: "zone1",
|
||||
},
|
||||
},
|
||||
Status: v1.NodeStatus{
|
||||
@ -2627,8 +2689,10 @@ func TestApplyNoExecuteTaints(t *testing.T) {
|
||||
Name: "node2",
|
||||
CreationTimestamp: metav1.Date(2012, 1, 1, 0, 0, 0, 0, time.UTC),
|
||||
Labels: map[string]string{
|
||||
v1.LabelZoneRegion: "region1",
|
||||
v1.LabelZoneFailureDomain: "zone1",
|
||||
v1.LabelZoneRegionStable: "region1",
|
||||
v1.LabelZoneFailureDomainStable: "zone1",
|
||||
v1.LabelZoneRegion: "region1",
|
||||
v1.LabelZoneFailureDomain: "zone1",
|
||||
},
|
||||
},
|
||||
Status: v1.NodeStatus{
|
||||
@ -2731,8 +2795,10 @@ func TestSwapUnreachableNotReadyTaints(t *testing.T) {
|
||||
Name: "node0",
|
||||
CreationTimestamp: metav1.Date(2012, 1, 1, 0, 0, 0, 0, time.UTC),
|
||||
Labels: map[string]string{
|
||||
v1.LabelZoneRegion: "region1",
|
||||
v1.LabelZoneFailureDomain: "zone1",
|
||||
v1.LabelZoneRegionStable: "region1",
|
||||
v1.LabelZoneFailureDomainStable: "zone1",
|
||||
v1.LabelZoneRegion: "region1",
|
||||
v1.LabelZoneFailureDomain: "zone1",
|
||||
},
|
||||
},
|
||||
Status: v1.NodeStatus{
|
||||
@ -2754,8 +2820,10 @@ func TestSwapUnreachableNotReadyTaints(t *testing.T) {
|
||||
Name: "node1",
|
||||
CreationTimestamp: metav1.Date(2012, 1, 1, 0, 0, 0, 0, time.UTC),
|
||||
Labels: map[string]string{
|
||||
v1.LabelZoneRegion: "region1",
|
||||
v1.LabelZoneFailureDomain: "zone1",
|
||||
v1.LabelZoneRegionStable: "region1",
|
||||
v1.LabelZoneFailureDomainStable: "zone1",
|
||||
v1.LabelZoneRegion: "region1",
|
||||
v1.LabelZoneFailureDomain: "zone1",
|
||||
},
|
||||
},
|
||||
Status: v1.NodeStatus{
|
||||
@ -2880,8 +2948,10 @@ func TestTaintsNodeByCondition(t *testing.T) {
|
||||
Name: "node0",
|
||||
CreationTimestamp: metav1.Date(2012, 1, 1, 0, 0, 0, 0, time.UTC),
|
||||
Labels: map[string]string{
|
||||
v1.LabelZoneRegion: "region1",
|
||||
v1.LabelZoneFailureDomain: "zone1",
|
||||
v1.LabelZoneRegionStable: "region1",
|
||||
v1.LabelZoneFailureDomainStable: "zone1",
|
||||
v1.LabelZoneRegion: "region1",
|
||||
v1.LabelZoneFailureDomain: "zone1",
|
||||
},
|
||||
},
|
||||
Status: v1.NodeStatus{
|
||||
@ -2939,8 +3009,10 @@ func TestTaintsNodeByCondition(t *testing.T) {
|
||||
Name: "node0",
|
||||
CreationTimestamp: metav1.Date(2012, 1, 1, 0, 0, 0, 0, time.UTC),
|
||||
Labels: map[string]string{
|
||||
v1.LabelZoneRegion: "region1",
|
||||
v1.LabelZoneFailureDomain: "zone1",
|
||||
v1.LabelZoneRegionStable: "region1",
|
||||
v1.LabelZoneFailureDomainStable: "zone1",
|
||||
v1.LabelZoneRegion: "region1",
|
||||
v1.LabelZoneFailureDomain: "zone1",
|
||||
},
|
||||
},
|
||||
Status: v1.NodeStatus{
|
||||
@ -2969,8 +3041,10 @@ func TestTaintsNodeByCondition(t *testing.T) {
|
||||
Name: "node0",
|
||||
CreationTimestamp: metav1.Date(2012, 1, 1, 0, 0, 0, 0, time.UTC),
|
||||
Labels: map[string]string{
|
||||
v1.LabelZoneRegion: "region1",
|
||||
v1.LabelZoneFailureDomain: "zone1",
|
||||
v1.LabelZoneRegionStable: "region1",
|
||||
v1.LabelZoneFailureDomainStable: "zone1",
|
||||
v1.LabelZoneRegion: "region1",
|
||||
v1.LabelZoneFailureDomain: "zone1",
|
||||
},
|
||||
},
|
||||
Status: v1.NodeStatus{
|
||||
@ -2999,8 +3073,10 @@ func TestTaintsNodeByCondition(t *testing.T) {
|
||||
Name: "node0",
|
||||
CreationTimestamp: metav1.Date(2012, 1, 1, 0, 0, 0, 0, time.UTC),
|
||||
Labels: map[string]string{
|
||||
v1.LabelZoneRegion: "region1",
|
||||
v1.LabelZoneFailureDomain: "zone1",
|
||||
v1.LabelZoneRegionStable: "region1",
|
||||
v1.LabelZoneFailureDomainStable: "zone1",
|
||||
v1.LabelZoneRegion: "region1",
|
||||
v1.LabelZoneFailureDomain: "zone1",
|
||||
},
|
||||
},
|
||||
Status: v1.NodeStatus{
|
||||
@ -3023,8 +3099,10 @@ func TestTaintsNodeByCondition(t *testing.T) {
|
||||
Name: "node0",
|
||||
CreationTimestamp: metav1.Date(2012, 1, 1, 0, 0, 0, 0, time.UTC),
|
||||
Labels: map[string]string{
|
||||
v1.LabelZoneRegion: "region1",
|
||||
v1.LabelZoneFailureDomain: "zone1",
|
||||
v1.LabelZoneRegionStable: "region1",
|
||||
v1.LabelZoneFailureDomainStable: "zone1",
|
||||
v1.LabelZoneRegion: "region1",
|
||||
v1.LabelZoneFailureDomain: "zone1",
|
||||
},
|
||||
},
|
||||
Status: v1.NodeStatus{
|
||||
@ -3145,8 +3223,10 @@ func TestReconcileNodeLabels(t *testing.T) {
|
||||
Name: "node0",
|
||||
CreationTimestamp: metav1.Date(2012, 1, 1, 0, 0, 0, 0, time.UTC),
|
||||
Labels: map[string]string{
|
||||
v1.LabelZoneRegion: "region1",
|
||||
v1.LabelZoneFailureDomain: "zone1",
|
||||
v1.LabelZoneRegionStable: "region1",
|
||||
v1.LabelZoneFailureDomainStable: "zone1",
|
||||
v1.LabelZoneRegion: "region1",
|
||||
v1.LabelZoneFailureDomain: "zone1",
|
||||
},
|
||||
},
|
||||
Status: v1.NodeStatus{
|
||||
@ -3201,12 +3281,12 @@ func TestReconcileNodeLabels(t *testing.T) {
|
||||
Name: "node0",
|
||||
CreationTimestamp: metav1.Date(2012, 1, 1, 0, 0, 0, 0, time.UTC),
|
||||
Labels: map[string]string{
|
||||
v1.LabelZoneRegion: "region1",
|
||||
v1.LabelZoneRegionStable: "region1",
|
||||
},
|
||||
},
|
||||
},
|
||||
ExpectedLabels: map[string]string{
|
||||
v1.LabelZoneRegion: "region1",
|
||||
v1.LabelZoneRegionStable: "region1",
|
||||
},
|
||||
},
|
||||
{
|
||||
|
@ -35,15 +35,14 @@ const (
|
||||
// TODO: stop applying the beta Arch labels in Kubernetes 1.18.
|
||||
LabelArch = "beta.kubernetes.io/arch"
|
||||
|
||||
// GA versions of the legacy beta labels.
|
||||
// TODO: update kubelet and controllers to set both beta and GA labels, then export these constants
|
||||
labelZoneFailureDomainGA = "failure-domain.kubernetes.io/zone"
|
||||
labelZoneRegionGA = "failure-domain.kubernetes.io/region"
|
||||
labelInstanceTypeGA = "kubernetes.io/instance-type"
|
||||
labelInstanceTypeGA = "kubernetes.io/instance-type"
|
||||
)
|
||||
|
||||
var kubeletLabels = sets.NewString(
|
||||
v1.LabelHostname,
|
||||
v1.LabelZoneFailureDomainStable,
|
||||
v1.LabelZoneRegionStable,
|
||||
v1.LabelZoneFailureDomain,
|
||||
v1.LabelZoneRegion,
|
||||
v1.LabelInstanceType,
|
||||
@ -53,8 +52,6 @@ var kubeletLabels = sets.NewString(
|
||||
LabelOS,
|
||||
LabelArch,
|
||||
|
||||
labelZoneFailureDomainGA,
|
||||
labelZoneRegionGA,
|
||||
labelInstanceTypeGA,
|
||||
)
|
||||
|
||||
|
@ -150,6 +150,8 @@ func (kl *Kubelet) reconcileExtendedResource(initialNode, node *v1.Node) bool {
|
||||
func (kl *Kubelet) updateDefaultLabels(initialNode, existingNode *v1.Node) bool {
|
||||
defaultLabels := []string{
|
||||
v1.LabelHostname,
|
||||
v1.LabelZoneFailureDomainStable,
|
||||
v1.LabelZoneRegionStable,
|
||||
v1.LabelZoneFailureDomain,
|
||||
v1.LabelZoneRegion,
|
||||
v1.LabelInstanceType,
|
||||
@ -342,10 +344,14 @@ func (kl *Kubelet) initialNode(ctx context.Context) (*v1.Node, error) {
|
||||
if zone.FailureDomain != "" {
|
||||
klog.Infof("Adding node label from cloud provider: %s=%s", v1.LabelZoneFailureDomain, zone.FailureDomain)
|
||||
node.ObjectMeta.Labels[v1.LabelZoneFailureDomain] = zone.FailureDomain
|
||||
klog.Infof("Adding node label from cloud provider: %s=%s", v1.LabelZoneFailureDomainStable, zone.FailureDomain)
|
||||
node.ObjectMeta.Labels[v1.LabelZoneFailureDomainStable] = zone.FailureDomain
|
||||
}
|
||||
if zone.Region != "" {
|
||||
klog.Infof("Adding node label from cloud provider: %s=%s", v1.LabelZoneRegion, zone.Region)
|
||||
node.ObjectMeta.Labels[v1.LabelZoneRegion] = zone.Region
|
||||
klog.Infof("Adding node label from cloud provider: %s=%s", v1.LabelZoneRegionStable, zone.Region)
|
||||
node.ObjectMeta.Labels[v1.LabelZoneRegionStable] = zone.Region
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -1523,12 +1523,14 @@ func TestUpdateDefaultLabels(t *testing.T) {
|
||||
initialNode: &v1.Node{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Labels: map[string]string{
|
||||
v1.LabelHostname: "new-hostname",
|
||||
v1.LabelZoneFailureDomain: "new-zone-failure-domain",
|
||||
v1.LabelZoneRegion: "new-zone-region",
|
||||
v1.LabelInstanceType: "new-instance-type",
|
||||
kubeletapis.LabelOS: "new-os",
|
||||
kubeletapis.LabelArch: "new-arch",
|
||||
v1.LabelHostname: "new-hostname",
|
||||
v1.LabelZoneFailureDomainStable: "new-zone-failure-domain",
|
||||
v1.LabelZoneRegionStable: "new-zone-region",
|
||||
v1.LabelZoneFailureDomain: "new-zone-failure-domain",
|
||||
v1.LabelZoneRegion: "new-zone-region",
|
||||
v1.LabelInstanceType: "new-instance-type",
|
||||
kubeletapis.LabelOS: "new-os",
|
||||
kubeletapis.LabelArch: "new-arch",
|
||||
},
|
||||
},
|
||||
},
|
||||
@ -1539,12 +1541,14 @@ func TestUpdateDefaultLabels(t *testing.T) {
|
||||
},
|
||||
needsUpdate: true,
|
||||
finalLabels: map[string]string{
|
||||
v1.LabelHostname: "new-hostname",
|
||||
v1.LabelZoneFailureDomain: "new-zone-failure-domain",
|
||||
v1.LabelZoneRegion: "new-zone-region",
|
||||
v1.LabelInstanceType: "new-instance-type",
|
||||
kubeletapis.LabelOS: "new-os",
|
||||
kubeletapis.LabelArch: "new-arch",
|
||||
v1.LabelHostname: "new-hostname",
|
||||
v1.LabelZoneFailureDomainStable: "new-zone-failure-domain",
|
||||
v1.LabelZoneRegionStable: "new-zone-region",
|
||||
v1.LabelZoneFailureDomain: "new-zone-failure-domain",
|
||||
v1.LabelZoneRegion: "new-zone-region",
|
||||
v1.LabelInstanceType: "new-instance-type",
|
||||
kubeletapis.LabelOS: "new-os",
|
||||
kubeletapis.LabelArch: "new-arch",
|
||||
},
|
||||
},
|
||||
{
|
||||
@ -1552,35 +1556,41 @@ func TestUpdateDefaultLabels(t *testing.T) {
|
||||
initialNode: &v1.Node{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Labels: map[string]string{
|
||||
v1.LabelHostname: "new-hostname",
|
||||
v1.LabelZoneFailureDomain: "new-zone-failure-domain",
|
||||
v1.LabelZoneRegion: "new-zone-region",
|
||||
v1.LabelInstanceType: "new-instance-type",
|
||||
kubeletapis.LabelOS: "new-os",
|
||||
kubeletapis.LabelArch: "new-arch",
|
||||
v1.LabelHostname: "new-hostname",
|
||||
v1.LabelZoneFailureDomainStable: "new-zone-failure-domain",
|
||||
v1.LabelZoneRegionStable: "new-zone-region",
|
||||
v1.LabelZoneFailureDomain: "new-zone-failure-domain",
|
||||
v1.LabelZoneRegion: "new-zone-region",
|
||||
v1.LabelInstanceType: "new-instance-type",
|
||||
kubeletapis.LabelOS: "new-os",
|
||||
kubeletapis.LabelArch: "new-arch",
|
||||
},
|
||||
},
|
||||
},
|
||||
existingNode: &v1.Node{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Labels: map[string]string{
|
||||
v1.LabelHostname: "old-hostname",
|
||||
v1.LabelZoneFailureDomain: "old-zone-failure-domain",
|
||||
v1.LabelZoneRegion: "old-zone-region",
|
||||
v1.LabelInstanceType: "old-instance-type",
|
||||
kubeletapis.LabelOS: "old-os",
|
||||
kubeletapis.LabelArch: "old-arch",
|
||||
v1.LabelHostname: "old-hostname",
|
||||
v1.LabelZoneFailureDomainStable: "old-zone-failure-domain",
|
||||
v1.LabelZoneRegionStable: "old-zone-region",
|
||||
v1.LabelZoneFailureDomain: "old-zone-failure-domain",
|
||||
v1.LabelZoneRegion: "old-zone-region",
|
||||
v1.LabelInstanceType: "old-instance-type",
|
||||
kubeletapis.LabelOS: "old-os",
|
||||
kubeletapis.LabelArch: "old-arch",
|
||||
},
|
||||
},
|
||||
},
|
||||
needsUpdate: true,
|
||||
finalLabels: map[string]string{
|
||||
v1.LabelHostname: "new-hostname",
|
||||
v1.LabelZoneFailureDomain: "new-zone-failure-domain",
|
||||
v1.LabelZoneRegion: "new-zone-region",
|
||||
v1.LabelInstanceType: "new-instance-type",
|
||||
kubeletapis.LabelOS: "new-os",
|
||||
kubeletapis.LabelArch: "new-arch",
|
||||
v1.LabelHostname: "new-hostname",
|
||||
v1.LabelZoneFailureDomainStable: "new-zone-failure-domain",
|
||||
v1.LabelZoneRegionStable: "new-zone-region",
|
||||
v1.LabelZoneFailureDomain: "new-zone-failure-domain",
|
||||
v1.LabelZoneRegion: "new-zone-region",
|
||||
v1.LabelInstanceType: "new-instance-type",
|
||||
kubeletapis.LabelOS: "new-os",
|
||||
kubeletapis.LabelArch: "new-arch",
|
||||
},
|
||||
},
|
||||
{
|
||||
@ -1588,37 +1598,43 @@ func TestUpdateDefaultLabels(t *testing.T) {
|
||||
initialNode: &v1.Node{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Labels: map[string]string{
|
||||
v1.LabelHostname: "new-hostname",
|
||||
v1.LabelZoneFailureDomain: "new-zone-failure-domain",
|
||||
v1.LabelZoneRegion: "new-zone-region",
|
||||
v1.LabelInstanceType: "new-instance-type",
|
||||
kubeletapis.LabelOS: "new-os",
|
||||
kubeletapis.LabelArch: "new-arch",
|
||||
v1.LabelHostname: "new-hostname",
|
||||
v1.LabelZoneFailureDomainStable: "new-zone-failure-domain",
|
||||
v1.LabelZoneRegionStable: "new-zone-region",
|
||||
v1.LabelZoneFailureDomain: "new-zone-failure-domain",
|
||||
v1.LabelZoneRegion: "new-zone-region",
|
||||
v1.LabelInstanceType: "new-instance-type",
|
||||
kubeletapis.LabelOS: "new-os",
|
||||
kubeletapis.LabelArch: "new-arch",
|
||||
},
|
||||
},
|
||||
},
|
||||
existingNode: &v1.Node{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Labels: map[string]string{
|
||||
v1.LabelHostname: "new-hostname",
|
||||
v1.LabelZoneFailureDomain: "new-zone-failure-domain",
|
||||
v1.LabelZoneRegion: "new-zone-region",
|
||||
v1.LabelInstanceType: "new-instance-type",
|
||||
kubeletapis.LabelOS: "new-os",
|
||||
kubeletapis.LabelArch: "new-arch",
|
||||
"please-persist": "foo",
|
||||
v1.LabelHostname: "new-hostname",
|
||||
v1.LabelZoneFailureDomainStable: "new-zone-failure-domain",
|
||||
v1.LabelZoneRegionStable: "new-zone-region",
|
||||
v1.LabelZoneFailureDomain: "new-zone-failure-domain",
|
||||
v1.LabelZoneRegion: "new-zone-region",
|
||||
v1.LabelInstanceType: "new-instance-type",
|
||||
kubeletapis.LabelOS: "new-os",
|
||||
kubeletapis.LabelArch: "new-arch",
|
||||
"please-persist": "foo",
|
||||
},
|
||||
},
|
||||
},
|
||||
needsUpdate: false,
|
||||
finalLabels: map[string]string{
|
||||
v1.LabelHostname: "new-hostname",
|
||||
v1.LabelZoneFailureDomain: "new-zone-failure-domain",
|
||||
v1.LabelZoneRegion: "new-zone-region",
|
||||
v1.LabelInstanceType: "new-instance-type",
|
||||
kubeletapis.LabelOS: "new-os",
|
||||
kubeletapis.LabelArch: "new-arch",
|
||||
"please-persist": "foo",
|
||||
v1.LabelHostname: "new-hostname",
|
||||
v1.LabelZoneFailureDomainStable: "new-zone-failure-domain",
|
||||
v1.LabelZoneRegionStable: "new-zone-region",
|
||||
v1.LabelZoneFailureDomain: "new-zone-failure-domain",
|
||||
v1.LabelZoneRegion: "new-zone-region",
|
||||
v1.LabelInstanceType: "new-instance-type",
|
||||
kubeletapis.LabelOS: "new-os",
|
||||
kubeletapis.LabelArch: "new-arch",
|
||||
"please-persist": "foo",
|
||||
},
|
||||
},
|
||||
{
|
||||
@ -1631,25 +1647,29 @@ func TestUpdateDefaultLabels(t *testing.T) {
|
||||
existingNode: &v1.Node{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Labels: map[string]string{
|
||||
v1.LabelHostname: "new-hostname",
|
||||
v1.LabelZoneFailureDomain: "new-zone-failure-domain",
|
||||
v1.LabelZoneRegion: "new-zone-region",
|
||||
v1.LabelInstanceType: "new-instance-type",
|
||||
kubeletapis.LabelOS: "new-os",
|
||||
kubeletapis.LabelArch: "new-arch",
|
||||
"please-persist": "foo",
|
||||
v1.LabelHostname: "new-hostname",
|
||||
v1.LabelZoneFailureDomainStable: "new-zone-failure-domain",
|
||||
v1.LabelZoneRegionStable: "new-zone-region",
|
||||
v1.LabelZoneFailureDomain: "new-zone-failure-domain",
|
||||
v1.LabelZoneRegion: "new-zone-region",
|
||||
v1.LabelInstanceType: "new-instance-type",
|
||||
kubeletapis.LabelOS: "new-os",
|
||||
kubeletapis.LabelArch: "new-arch",
|
||||
"please-persist": "foo",
|
||||
},
|
||||
},
|
||||
},
|
||||
needsUpdate: false,
|
||||
finalLabels: map[string]string{
|
||||
v1.LabelHostname: "new-hostname",
|
||||
v1.LabelZoneFailureDomain: "new-zone-failure-domain",
|
||||
v1.LabelZoneRegion: "new-zone-region",
|
||||
v1.LabelInstanceType: "new-instance-type",
|
||||
kubeletapis.LabelOS: "new-os",
|
||||
kubeletapis.LabelArch: "new-arch",
|
||||
"please-persist": "foo",
|
||||
v1.LabelHostname: "new-hostname",
|
||||
v1.LabelZoneFailureDomainStable: "new-zone-failure-domain",
|
||||
v1.LabelZoneRegionStable: "new-zone-region",
|
||||
v1.LabelZoneFailureDomain: "new-zone-failure-domain",
|
||||
v1.LabelZoneRegion: "new-zone-region",
|
||||
v1.LabelInstanceType: "new-instance-type",
|
||||
kubeletapis.LabelOS: "new-os",
|
||||
kubeletapis.LabelArch: "new-arch",
|
||||
"please-persist": "foo",
|
||||
},
|
||||
},
|
||||
{
|
||||
@ -1657,35 +1677,41 @@ func TestUpdateDefaultLabels(t *testing.T) {
|
||||
initialNode: &v1.Node{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Labels: map[string]string{
|
||||
v1.LabelHostname: "new-hostname",
|
||||
v1.LabelZoneFailureDomain: "new-zone-failure-domain",
|
||||
v1.LabelZoneRegion: "new-zone-region",
|
||||
v1.LabelInstanceType: "new-instance-type",
|
||||
kubeletapis.LabelOS: "new-os",
|
||||
kubeletapis.LabelArch: "new-arch",
|
||||
v1.LabelHostname: "new-hostname",
|
||||
v1.LabelZoneFailureDomainStable: "new-zone-failure-domain",
|
||||
v1.LabelZoneRegionStable: "new-zone-region",
|
||||
v1.LabelZoneFailureDomain: "new-zone-failure-domain",
|
||||
v1.LabelZoneRegion: "new-zone-region",
|
||||
v1.LabelInstanceType: "new-instance-type",
|
||||
kubeletapis.LabelOS: "new-os",
|
||||
kubeletapis.LabelArch: "new-arch",
|
||||
},
|
||||
},
|
||||
},
|
||||
existingNode: &v1.Node{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Labels: map[string]string{
|
||||
v1.LabelHostname: "new-hostname",
|
||||
v1.LabelZoneFailureDomain: "new-zone-failure-domain",
|
||||
v1.LabelZoneRegion: "new-zone-region",
|
||||
v1.LabelInstanceType: "new-instance-type",
|
||||
kubeletapis.LabelOS: "new-os",
|
||||
kubeletapis.LabelArch: "new-arch",
|
||||
v1.LabelHostname: "new-hostname",
|
||||
v1.LabelZoneFailureDomainStable: "new-zone-failure-domain",
|
||||
v1.LabelZoneRegionStable: "new-zone-region",
|
||||
v1.LabelZoneFailureDomain: "new-zone-failure-domain",
|
||||
v1.LabelZoneRegion: "new-zone-region",
|
||||
v1.LabelInstanceType: "new-instance-type",
|
||||
kubeletapis.LabelOS: "new-os",
|
||||
kubeletapis.LabelArch: "new-arch",
|
||||
},
|
||||
},
|
||||
},
|
||||
needsUpdate: false,
|
||||
finalLabels: map[string]string{
|
||||
v1.LabelHostname: "new-hostname",
|
||||
v1.LabelZoneFailureDomain: "new-zone-failure-domain",
|
||||
v1.LabelZoneRegion: "new-zone-region",
|
||||
v1.LabelInstanceType: "new-instance-type",
|
||||
kubeletapis.LabelOS: "new-os",
|
||||
kubeletapis.LabelArch: "new-arch",
|
||||
v1.LabelHostname: "new-hostname",
|
||||
v1.LabelZoneFailureDomainStable: "new-zone-failure-domain",
|
||||
v1.LabelZoneRegionStable: "new-zone-region",
|
||||
v1.LabelZoneFailureDomain: "new-zone-failure-domain",
|
||||
v1.LabelZoneRegion: "new-zone-region",
|
||||
v1.LabelInstanceType: "new-instance-type",
|
||||
kubeletapis.LabelOS: "new-os",
|
||||
kubeletapis.LabelArch: "new-arch",
|
||||
},
|
||||
},
|
||||
{
|
||||
@ -1693,12 +1719,14 @@ func TestUpdateDefaultLabels(t *testing.T) {
|
||||
initialNode: &v1.Node{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Labels: map[string]string{
|
||||
v1.LabelHostname: "new-hostname",
|
||||
v1.LabelZoneFailureDomain: "new-zone-failure-domain",
|
||||
v1.LabelZoneRegion: "new-zone-region",
|
||||
v1.LabelInstanceType: "new-instance-type",
|
||||
kubeletapis.LabelOS: "new-os",
|
||||
kubeletapis.LabelArch: "new-arch",
|
||||
v1.LabelHostname: "new-hostname",
|
||||
v1.LabelZoneFailureDomainStable: "new-zone-failure-domain",
|
||||
v1.LabelZoneRegionStable: "new-zone-region",
|
||||
v1.LabelZoneFailureDomain: "new-zone-failure-domain",
|
||||
v1.LabelZoneRegion: "new-zone-region",
|
||||
v1.LabelInstanceType: "new-instance-type",
|
||||
kubeletapis.LabelOS: "new-os",
|
||||
kubeletapis.LabelArch: "new-arch",
|
||||
},
|
||||
},
|
||||
},
|
||||
@ -1707,12 +1735,14 @@ func TestUpdateDefaultLabels(t *testing.T) {
|
||||
},
|
||||
needsUpdate: true,
|
||||
finalLabels: map[string]string{
|
||||
v1.LabelHostname: "new-hostname",
|
||||
v1.LabelZoneFailureDomain: "new-zone-failure-domain",
|
||||
v1.LabelZoneRegion: "new-zone-region",
|
||||
v1.LabelInstanceType: "new-instance-type",
|
||||
kubeletapis.LabelOS: "new-os",
|
||||
kubeletapis.LabelArch: "new-arch",
|
||||
v1.LabelHostname: "new-hostname",
|
||||
v1.LabelZoneFailureDomainStable: "new-zone-failure-domain",
|
||||
v1.LabelZoneRegionStable: "new-zone-region",
|
||||
v1.LabelZoneFailureDomain: "new-zone-failure-domain",
|
||||
v1.LabelZoneRegion: "new-zone-region",
|
||||
v1.LabelInstanceType: "new-instance-type",
|
||||
kubeletapis.LabelOS: "new-os",
|
||||
kubeletapis.LabelArch: "new-arch",
|
||||
},
|
||||
},
|
||||
}
|
||||
|
35
pkg/scheduler/internal/cache/node_tree_test.go
vendored
35
pkg/scheduler/internal/cache/node_tree_test.go
vendored
@ -108,7 +108,30 @@ var allNodes = []*v1.Node{
|
||||
v1.LabelZoneFailureDomain: "zone-2",
|
||||
},
|
||||
},
|
||||
}}
|
||||
},
|
||||
// Node 9: a node with zone + region label and the deprecated zone + region label
|
||||
{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "node-9",
|
||||
Labels: map[string]string{
|
||||
v1.LabelZoneRegionStable: "region-2",
|
||||
v1.LabelZoneFailureDomainStable: "zone-2",
|
||||
v1.LabelZoneRegion: "region-2",
|
||||
v1.LabelZoneFailureDomain: "zone-2",
|
||||
},
|
||||
},
|
||||
},
|
||||
// Node 10: a node with only the deprecated zone + region labels
|
||||
{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "node-10",
|
||||
Labels: map[string]string{
|
||||
v1.LabelZoneRegion: "region-2",
|
||||
v1.LabelZoneFailureDomain: "zone-3",
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
func verifyNodeTree(t *testing.T, nt *nodeTree, expectedTree map[string]*nodeArray) {
|
||||
expectedNumNodes := int(0)
|
||||
@ -164,6 +187,14 @@ func TestNodeTree_AddNode(t *testing.T) {
|
||||
"region-2:\x00:zone-2": {[]string{"node-6"}, 0},
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "nodes also using deprecated zone/region label",
|
||||
nodesToAdd: allNodes[9:],
|
||||
expectedTree: map[string]*nodeArray{
|
||||
"region-2:\x00:zone-2": {[]string{"node-9"}, 0},
|
||||
"region-2:\x00:zone-3": {[]string{"node-10"}, 0},
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
for _, test := range tests {
|
||||
@ -400,7 +431,7 @@ func TestNodeTreeMultiOperations(t *testing.T) {
|
||||
nodesToAdd: append(allNodes[4:9], allNodes[3]),
|
||||
nodesToRemove: nil,
|
||||
operations: []string{"add", "add", "add", "add", "add", "next", "next", "next", "next", "add", "next", "next", "next"},
|
||||
expectedOutput: []string{"node-4", "node-5", "node-6", "node-7", "node-3", "node-8", "node-4"},
|
||||
expectedOutput: []string{"node-4", "node-6", "node-7", "node-8", "node-3", "node-4", "node-6"},
|
||||
},
|
||||
{
|
||||
name: "remove zone and add new to ensure exhausted is reset correctly",
|
||||
|
@ -139,23 +139,37 @@ func GetNodeIP(client clientset.Interface, hostname string) net.IP {
|
||||
|
||||
// GetZoneKey is a helper function that builds a string identifier that is unique per failure-zone;
|
||||
// it returns empty-string for no zone.
|
||||
// Since there are currently two separate zone keys:
|
||||
// * "failure-domain.beta.kubernetes.io/zone"
|
||||
// * "topology.kubernetes.io/zone"
|
||||
// GetZoneKey will first check failure-domain.beta.kubernetes.io/zone and if not exists, will then check
|
||||
// topology.kubernetes.io/zone
|
||||
func GetZoneKey(node *v1.Node) string {
|
||||
labels := node.Labels
|
||||
if labels == nil {
|
||||
return ""
|
||||
}
|
||||
|
||||
region, _ := labels[v1.LabelZoneRegion]
|
||||
failureDomain, _ := labels[v1.LabelZoneFailureDomain]
|
||||
// TODO: prefer stable labels for zone in v1.18
|
||||
zone, ok := labels[v1.LabelZoneFailureDomain]
|
||||
if !ok {
|
||||
zone, _ = labels[v1.LabelZoneFailureDomainStable]
|
||||
}
|
||||
|
||||
if region == "" && failureDomain == "" {
|
||||
// TODO: prefer stable labels for region in v1.18
|
||||
region, ok := labels[v1.LabelZoneRegion]
|
||||
if !ok {
|
||||
region, _ = labels[v1.LabelZoneRegionStable]
|
||||
}
|
||||
|
||||
if region == "" && zone == "" {
|
||||
return ""
|
||||
}
|
||||
|
||||
// We include the null character just in case region or failureDomain has a colon
|
||||
// (We do assume there's no null characters in a region or failureDomain)
|
||||
// As a nice side-benefit, the null character is not printed by fmt.Print or glog
|
||||
return region + ":\x00:" + failureDomain
|
||||
return region + ":\x00:" + zone
|
||||
}
|
||||
|
||||
type nodeForConditionPatch struct {
|
||||
|
@ -120,3 +120,84 @@ func TestGetHostname(t *testing.T) {
|
||||
|
||||
}
|
||||
}
|
||||
|
||||
func Test_GetZoneKey(t *testing.T) {
|
||||
tests := []struct {
|
||||
name string
|
||||
node *v1.Node
|
||||
zone string
|
||||
}{
|
||||
{
|
||||
name: "has no zone or region keys",
|
||||
node: &v1.Node{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Labels: map[string]string{},
|
||||
},
|
||||
},
|
||||
zone: "",
|
||||
},
|
||||
{
|
||||
name: "has beta zone and region keys",
|
||||
node: &v1.Node{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Labels: map[string]string{
|
||||
v1.LabelZoneFailureDomain: "zone1",
|
||||
v1.LabelZoneRegion: "region1",
|
||||
},
|
||||
},
|
||||
},
|
||||
zone: "region1:\x00:zone1",
|
||||
},
|
||||
{
|
||||
name: "has GA zone and region keys",
|
||||
node: &v1.Node{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Labels: map[string]string{
|
||||
v1.LabelZoneFailureDomainStable: "zone1",
|
||||
v1.LabelZoneRegionStable: "region1",
|
||||
},
|
||||
},
|
||||
},
|
||||
zone: "region1:\x00:zone1",
|
||||
},
|
||||
{
|
||||
name: "has both beta and GA zone and region keys",
|
||||
node: &v1.Node{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Labels: map[string]string{
|
||||
v1.LabelZoneFailureDomainStable: "zone1",
|
||||
v1.LabelZoneRegionStable: "region1",
|
||||
v1.LabelZoneFailureDomain: "zone1",
|
||||
v1.LabelZoneRegion: "region1",
|
||||
},
|
||||
},
|
||||
},
|
||||
zone: "region1:\x00:zone1",
|
||||
},
|
||||
{
|
||||
name: "has both beta and GA zone and region keys, beta labels take precedent",
|
||||
node: &v1.Node{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Labels: map[string]string{
|
||||
v1.LabelZoneFailureDomainStable: "zone1",
|
||||
v1.LabelZoneRegionStable: "region1",
|
||||
v1.LabelZoneFailureDomain: "zone2",
|
||||
v1.LabelZoneRegion: "region2",
|
||||
},
|
||||
},
|
||||
},
|
||||
zone: "region2:\x00:zone2",
|
||||
},
|
||||
}
|
||||
|
||||
for _, test := range tests {
|
||||
t.Run(test.name, func(t *testing.T) {
|
||||
zone := GetZoneKey(test.node)
|
||||
if zone != test.zone {
|
||||
t.Logf("actual zone key: %q", zone)
|
||||
t.Logf("expected zone key: %q", test.zone)
|
||||
t.Errorf("unexpected zone key")
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
@ -163,11 +163,11 @@ func setAllowedUpdateLabels(node *api.Node, value string) *api.Node {
|
||||
node.Labels["kubernetes.io/hostname"] = value
|
||||
node.Labels["failure-domain.beta.kubernetes.io/zone"] = value
|
||||
node.Labels["failure-domain.beta.kubernetes.io/region"] = value
|
||||
node.Labels["topology.kubernetes.io/zone"] = value
|
||||
node.Labels["topology.kubernetes.io/region"] = value
|
||||
node.Labels["beta.kubernetes.io/instance-type"] = value
|
||||
node.Labels["beta.kubernetes.io/os"] = value
|
||||
node.Labels["beta.kubernetes.io/arch"] = value
|
||||
node.Labels["failure-domain.kubernetes.io/zone"] = value
|
||||
node.Labels["failure-domain.kubernetes.io/region"] = value
|
||||
node.Labels["kubernetes.io/instance-type"] = value
|
||||
node.Labels["kubernetes.io/os"] = value
|
||||
node.Labels["kubernetes.io/arch"] = value
|
||||
|
@ -17,9 +17,12 @@ limitations under the License.
|
||||
package v1
|
||||
|
||||
const (
|
||||
LabelHostname = "kubernetes.io/hostname"
|
||||
LabelZoneFailureDomain = "failure-domain.beta.kubernetes.io/zone"
|
||||
LabelZoneRegion = "failure-domain.beta.kubernetes.io/region"
|
||||
LabelHostname = "kubernetes.io/hostname"
|
||||
|
||||
LabelZoneFailureDomain = "failure-domain.beta.kubernetes.io/zone"
|
||||
LabelZoneRegion = "failure-domain.beta.kubernetes.io/region"
|
||||
LabelZoneFailureDomainStable = "topology.kubernetes.io/zone"
|
||||
LabelZoneRegionStable = "topology.kubernetes.io/region"
|
||||
|
||||
LabelInstanceType = "beta.kubernetes.io/instance-type"
|
||||
|
||||
|
@ -5,6 +5,7 @@ go_library(
|
||||
srcs = [
|
||||
"address.go",
|
||||
"conditions.go",
|
||||
"labels.go",
|
||||
"taints.go",
|
||||
],
|
||||
importmap = "k8s.io/kubernetes/vendor/k8s.io/cloud-provider/node/helpers",
|
||||
@ -15,10 +16,12 @@ go_library(
|
||||
"//staging/src/k8s.io/apimachinery/pkg/api/equality:go_default_library",
|
||||
"//staging/src/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library",
|
||||
"//staging/src/k8s.io/apimachinery/pkg/types:go_default_library",
|
||||
"//staging/src/k8s.io/apimachinery/pkg/util/runtime:go_default_library",
|
||||
"//staging/src/k8s.io/apimachinery/pkg/util/strategicpatch:go_default_library",
|
||||
"//staging/src/k8s.io/apimachinery/pkg/util/wait:go_default_library",
|
||||
"//staging/src/k8s.io/client-go/kubernetes:go_default_library",
|
||||
"//staging/src/k8s.io/client-go/util/retry:go_default_library",
|
||||
"//vendor/k8s.io/klog:go_default_library",
|
||||
],
|
||||
)
|
||||
|
||||
|
102
staging/src/k8s.io/cloud-provider/node/helpers/labels.go
Normal file
102
staging/src/k8s.io/cloud-provider/node/helpers/labels.go
Normal file
@ -0,0 +1,102 @@
|
||||
/*
|
||||
Copyright 2019 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package helpers
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"time"
|
||||
|
||||
"k8s.io/api/core/v1"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/types"
|
||||
utilruntime "k8s.io/apimachinery/pkg/util/runtime"
|
||||
"k8s.io/apimachinery/pkg/util/strategicpatch"
|
||||
"k8s.io/apimachinery/pkg/util/wait"
|
||||
clientset "k8s.io/client-go/kubernetes"
|
||||
clientretry "k8s.io/client-go/util/retry"
|
||||
"k8s.io/klog"
|
||||
)
|
||||
|
||||
var updateLabelBackoff = wait.Backoff{
|
||||
Steps: 5,
|
||||
Duration: 100 * time.Millisecond,
|
||||
Jitter: 1.0,
|
||||
}
|
||||
|
||||
// AddOrUpdateLabelsOnNode updates the labels on the node and returns true on
|
||||
// success and false on failure.
|
||||
func AddOrUpdateLabelsOnNode(kubeClient clientset.Interface, labelsToUpdate map[string]string, node *v1.Node) bool {
|
||||
err := addOrUpdateLabelsOnNode(kubeClient, node.Name, labelsToUpdate)
|
||||
if err != nil {
|
||||
utilruntime.HandleError(
|
||||
fmt.Errorf(
|
||||
"unable to update labels %+v for Node %q: %v",
|
||||
labelsToUpdate,
|
||||
node.Name,
|
||||
err))
|
||||
return false
|
||||
}
|
||||
|
||||
klog.V(4).Infof("Updated labels %+v to Node %v", labelsToUpdate, node.Name)
|
||||
return true
|
||||
}
|
||||
|
||||
func addOrUpdateLabelsOnNode(kubeClient clientset.Interface, nodeName string, labelsToUpdate map[string]string) error {
|
||||
firstTry := true
|
||||
return clientretry.RetryOnConflict(updateLabelBackoff, func() error {
|
||||
var err error
|
||||
var node *v1.Node
|
||||
// First we try getting node from the API server cache, as it's cheaper. If it fails
|
||||
// we get it from etcd to be sure to have fresh data.
|
||||
if firstTry {
|
||||
node, err = kubeClient.CoreV1().Nodes().Get(nodeName, metav1.GetOptions{ResourceVersion: "0"})
|
||||
firstTry = false
|
||||
} else {
|
||||
node, err = kubeClient.CoreV1().Nodes().Get(nodeName, metav1.GetOptions{})
|
||||
}
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Make a copy of the node and update the labels.
|
||||
newNode := node.DeepCopy()
|
||||
if newNode.Labels == nil {
|
||||
newNode.Labels = make(map[string]string)
|
||||
}
|
||||
for key, value := range labelsToUpdate {
|
||||
newNode.Labels[key] = value
|
||||
}
|
||||
|
||||
oldData, err := json.Marshal(node)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to marshal the existing node %#v: %v", node, err)
|
||||
}
|
||||
newData, err := json.Marshal(newNode)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to marshal the new node %#v: %v", newNode, err)
|
||||
}
|
||||
patchBytes, err := strategicpatch.CreateTwoWayMergePatch(oldData, newData, &v1.Node{})
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to create a two-way merge patch: %v", err)
|
||||
}
|
||||
if _, err := kubeClient.CoreV1().Nodes().Patch(node.Name, types.StrategicMergePatchType, patchBytes); err != nil {
|
||||
return fmt.Errorf("failed to patch the node: %v", err)
|
||||
}
|
||||
return nil
|
||||
})
|
||||
}
|
@ -34,11 +34,19 @@ func RecreateNodes(c clientset.Interface, nodes []v1.Node) error {
|
||||
nodeNamesByZone := make(map[string][]string)
|
||||
for i := range nodes {
|
||||
node := &nodes[i]
|
||||
zone := framework.TestContext.CloudConfig.Zone
|
||||
if z, ok := node.Labels[v1.LabelZoneFailureDomain]; ok {
|
||||
zone = z
|
||||
|
||||
if zone, ok := node.Labels[v1.LabelZoneFailureDomain]; ok {
|
||||
nodeNamesByZone[zone] = append(nodeNamesByZone[zone], node.Name)
|
||||
continue
|
||||
}
|
||||
nodeNamesByZone[zone] = append(nodeNamesByZone[zone], node.Name)
|
||||
|
||||
if zone, ok := node.Labels[v1.LabelZoneFailureDomainStable]; ok {
|
||||
nodeNamesByZone[zone] = append(nodeNamesByZone[zone], node.Name)
|
||||
continue
|
||||
}
|
||||
|
||||
defaultZone := framework.TestContext.CloudConfig.Zone
|
||||
nodeNamesByZone[defaultZone] = append(nodeNamesByZone[defaultZone], node.Name)
|
||||
}
|
||||
|
||||
// Find the sole managed instance group name
|
||||
|
@ -2355,6 +2355,10 @@ func GetClusterZones(c clientset.Interface) (sets.String, error) {
|
||||
if zone, found := node.Labels[v1.LabelZoneFailureDomain]; found {
|
||||
zones.Insert(zone)
|
||||
}
|
||||
|
||||
if zone, found := node.Labels[v1.LabelZoneFailureDomainStable]; found {
|
||||
zones.Insert(zone)
|
||||
}
|
||||
}
|
||||
return zones, nil
|
||||
}
|
||||
|
Loading…
Reference in New Issue
Block a user