diff --git a/pkg/controller/cloud/node_controller.go b/pkg/controller/cloud/node_controller.go index 17fedb72b13..f69ce0b5a24 100644 --- a/pkg/controller/cloud/node_controller.go +++ b/pkg/controller/cloud/node_controller.go @@ -70,6 +70,14 @@ var labelReconcileInfo = []struct { secondaryKey: v1.LabelZoneRegionStable, ensureSecondaryExists: true, }, + { + // Reconcile the beta and the stable instance-type label using the beta label as + // the source of truth + // TODO: switch the primary key to GA labels in v1.21 + primaryKey: v1.LabelInstanceType, + secondaryKey: v1.LabelInstanceTypeStable, + ensureSecondaryExists: true, + }, } var UpdateNodeSpecBackoff = wait.Backoff{ @@ -375,6 +383,8 @@ func (cnc *CloudNodeController) initializeNode(ctx context.Context, node *v1.Nod } else if instanceType != "" { klog.V(2).Infof("Adding node label from cloud provider: %s=%s", v1.LabelInstanceType, instanceType) curNode.ObjectMeta.Labels[v1.LabelInstanceType] = instanceType + klog.V(2).Infof("Adding node label from cloud provider: %s=%s", v1.LabelInstanceTypeStable, instanceType) + curNode.ObjectMeta.Labels[v1.LabelInstanceTypeStable] = instanceType } if zones, ok := cnc.cloud.Zones(); ok { diff --git a/pkg/controller/cloud/node_controller_test.go b/pkg/controller/cloud/node_controller_test.go index 5e10eb9236a..37470060f78 100644 --- a/pkg/controller/cloud/node_controller_test.go +++ b/pkg/controller/cloud/node_controller_test.go @@ -689,12 +689,15 @@ func Test_reconcileNodeLabels(t *testing.T) { labels: map[string]string{ v1.LabelZoneFailureDomain: "foo", v1.LabelZoneRegion: "bar", + v1.LabelInstanceType: "the-best-type", }, expectedLabels: map[string]string{ v1.LabelZoneFailureDomain: "foo", v1.LabelZoneRegion: "bar", v1.LabelZoneFailureDomainStable: "foo", v1.LabelZoneRegionStable: "bar", + v1.LabelInstanceType: "the-best-type", + v1.LabelInstanceTypeStable: "the-best-type", }, expectedErr: nil, }, @@ -705,12 +708,16 @@ func Test_reconcileNodeLabels(t *testing.T) { v1.LabelZoneRegion: "bar", v1.LabelZoneFailureDomainStable: "foo", v1.LabelZoneRegionStable: "bar", + v1.LabelInstanceType: "the-best-type", + v1.LabelInstanceTypeStable: "the-best-type", }, expectedLabels: map[string]string{ v1.LabelZoneFailureDomain: "foo", v1.LabelZoneRegion: "bar", v1.LabelZoneFailureDomainStable: "foo", v1.LabelZoneRegionStable: "bar", + v1.LabelInstanceType: "the-best-type", + v1.LabelInstanceTypeStable: "the-best-type", }, expectedErr: nil, }, @@ -721,12 +728,16 @@ func Test_reconcileNodeLabels(t *testing.T) { v1.LabelZoneRegion: "bar", v1.LabelZoneFailureDomainStable: "wrongfoo", v1.LabelZoneRegionStable: "wrongbar", + v1.LabelInstanceType: "the-best-type", + v1.LabelInstanceTypeStable: "the-wrong-type", }, expectedLabels: map[string]string{ v1.LabelZoneFailureDomain: "foo", v1.LabelZoneRegion: "bar", v1.LabelZoneFailureDomainStable: "foo", v1.LabelZoneRegionStable: "bar", + v1.LabelInstanceType: "the-best-type", + v1.LabelInstanceTypeStable: "the-best-type", }, expectedErr: nil, }, diff --git a/pkg/kubelet/apis/well_known_labels.go b/pkg/kubelet/apis/well_known_labels.go index 2f29f8d3d95..a38ad13b545 100644 --- a/pkg/kubelet/apis/well_known_labels.go +++ b/pkg/kubelet/apis/well_known_labels.go @@ -34,9 +34,6 @@ const ( // and GA labels to ensure backward compatibility. // TODO: stop applying the beta Arch labels in Kubernetes 1.18. LabelArch = "beta.kubernetes.io/arch" - - // TODO: update kubelet and controllers to set both beta and GA labels, then export these constants - labelInstanceTypeGA = "kubernetes.io/instance-type" ) var kubeletLabels = sets.NewString( @@ -46,13 +43,12 @@ var kubeletLabels = sets.NewString( v1.LabelZoneFailureDomain, v1.LabelZoneRegion, v1.LabelInstanceType, + v1.LabelInstanceTypeStable, v1.LabelOSStable, v1.LabelArchStable, LabelOS, LabelArch, - - labelInstanceTypeGA, ) var kubeletLabelNamespaces = sets.NewString( diff --git a/pkg/kubelet/kubelet_node_status.go b/pkg/kubelet/kubelet_node_status.go index 6bd44ebd49a..af41b4856a7 100644 --- a/pkg/kubelet/kubelet_node_status.go +++ b/pkg/kubelet/kubelet_node_status.go @@ -154,6 +154,7 @@ func (kl *Kubelet) updateDefaultLabels(initialNode, existingNode *v1.Node) bool v1.LabelZoneRegionStable, v1.LabelZoneFailureDomain, v1.LabelZoneRegion, + v1.LabelInstanceTypeStable, v1.LabelInstanceType, v1.LabelOSStable, v1.LabelArchStable, @@ -333,6 +334,8 @@ func (kl *Kubelet) initialNode(ctx context.Context) (*v1.Node, error) { if instanceType != "" { klog.Infof("Adding node label from cloud provider: %s=%s", v1.LabelInstanceType, instanceType) node.ObjectMeta.Labels[v1.LabelInstanceType] = instanceType + klog.Infof("Adding node label from cloud provider: %s=%s", v1.LabelInstanceTypeStable, instanceType) + node.ObjectMeta.Labels[v1.LabelInstanceTypeStable] = instanceType } // If the cloud has zone information, label the node with the zone information zones, ok := kl.cloud.Zones() diff --git a/pkg/kubelet/kubelet_node_status_test.go b/pkg/kubelet/kubelet_node_status_test.go index a44f7e334f1..8bb0bab367b 100644 --- a/pkg/kubelet/kubelet_node_status_test.go +++ b/pkg/kubelet/kubelet_node_status_test.go @@ -1414,6 +1414,7 @@ func TestUpdateDefaultLabels(t *testing.T) { v1.LabelZoneRegionStable: "new-zone-region", v1.LabelZoneFailureDomain: "new-zone-failure-domain", v1.LabelZoneRegion: "new-zone-region", + v1.LabelInstanceTypeStable: "new-instance-type", v1.LabelInstanceType: "new-instance-type", kubeletapis.LabelOS: "new-os", kubeletapis.LabelArch: "new-arch", @@ -1432,6 +1433,7 @@ func TestUpdateDefaultLabels(t *testing.T) { v1.LabelZoneRegionStable: "new-zone-region", v1.LabelZoneFailureDomain: "new-zone-failure-domain", v1.LabelZoneRegion: "new-zone-region", + v1.LabelInstanceTypeStable: "new-instance-type", v1.LabelInstanceType: "new-instance-type", kubeletapis.LabelOS: "new-os", kubeletapis.LabelArch: "new-arch", @@ -1447,6 +1449,7 @@ func TestUpdateDefaultLabels(t *testing.T) { v1.LabelZoneRegionStable: "new-zone-region", v1.LabelZoneFailureDomain: "new-zone-failure-domain", v1.LabelZoneRegion: "new-zone-region", + v1.LabelInstanceTypeStable: "new-instance-type", v1.LabelInstanceType: "new-instance-type", kubeletapis.LabelOS: "new-os", kubeletapis.LabelArch: "new-arch", @@ -1461,6 +1464,7 @@ func TestUpdateDefaultLabels(t *testing.T) { v1.LabelZoneRegionStable: "old-zone-region", v1.LabelZoneFailureDomain: "old-zone-failure-domain", v1.LabelZoneRegion: "old-zone-region", + v1.LabelInstanceTypeStable: "old-instance-type", v1.LabelInstanceType: "old-instance-type", kubeletapis.LabelOS: "old-os", kubeletapis.LabelArch: "old-arch", @@ -1474,6 +1478,7 @@ func TestUpdateDefaultLabels(t *testing.T) { v1.LabelZoneRegionStable: "new-zone-region", v1.LabelZoneFailureDomain: "new-zone-failure-domain", v1.LabelZoneRegion: "new-zone-region", + v1.LabelInstanceTypeStable: "new-instance-type", v1.LabelInstanceType: "new-instance-type", kubeletapis.LabelOS: "new-os", kubeletapis.LabelArch: "new-arch", @@ -1489,6 +1494,7 @@ func TestUpdateDefaultLabels(t *testing.T) { v1.LabelZoneRegionStable: "new-zone-region", v1.LabelZoneFailureDomain: "new-zone-failure-domain", v1.LabelZoneRegion: "new-zone-region", + v1.LabelInstanceTypeStable: "new-instance-type", v1.LabelInstanceType: "new-instance-type", kubeletapis.LabelOS: "new-os", kubeletapis.LabelArch: "new-arch", @@ -1503,6 +1509,7 @@ func TestUpdateDefaultLabels(t *testing.T) { v1.LabelZoneRegionStable: "new-zone-region", v1.LabelZoneFailureDomain: "new-zone-failure-domain", v1.LabelZoneRegion: "new-zone-region", + v1.LabelInstanceTypeStable: "new-instance-type", v1.LabelInstanceType: "new-instance-type", kubeletapis.LabelOS: "new-os", kubeletapis.LabelArch: "new-arch", @@ -1517,6 +1524,7 @@ func TestUpdateDefaultLabels(t *testing.T) { v1.LabelZoneRegionStable: "new-zone-region", v1.LabelZoneFailureDomain: "new-zone-failure-domain", v1.LabelZoneRegion: "new-zone-region", + v1.LabelInstanceTypeStable: "new-instance-type", v1.LabelInstanceType: "new-instance-type", kubeletapis.LabelOS: "new-os", kubeletapis.LabelArch: "new-arch", @@ -1538,6 +1546,7 @@ func TestUpdateDefaultLabels(t *testing.T) { v1.LabelZoneRegionStable: "new-zone-region", v1.LabelZoneFailureDomain: "new-zone-failure-domain", v1.LabelZoneRegion: "new-zone-region", + v1.LabelInstanceTypeStable: "new-instance-type", v1.LabelInstanceType: "new-instance-type", kubeletapis.LabelOS: "new-os", kubeletapis.LabelArch: "new-arch", @@ -1552,6 +1561,7 @@ func TestUpdateDefaultLabels(t *testing.T) { v1.LabelZoneRegionStable: "new-zone-region", v1.LabelZoneFailureDomain: "new-zone-failure-domain", v1.LabelZoneRegion: "new-zone-region", + v1.LabelInstanceTypeStable: "new-instance-type", v1.LabelInstanceType: "new-instance-type", kubeletapis.LabelOS: "new-os", kubeletapis.LabelArch: "new-arch", @@ -1568,6 +1578,7 @@ func TestUpdateDefaultLabels(t *testing.T) { v1.LabelZoneRegionStable: "new-zone-region", v1.LabelZoneFailureDomain: "new-zone-failure-domain", v1.LabelZoneRegion: "new-zone-region", + v1.LabelInstanceTypeStable: "new-instance-type", v1.LabelInstanceType: "new-instance-type", kubeletapis.LabelOS: "new-os", kubeletapis.LabelArch: "new-arch", @@ -1582,6 +1593,7 @@ func TestUpdateDefaultLabels(t *testing.T) { v1.LabelZoneRegionStable: "new-zone-region", v1.LabelZoneFailureDomain: "new-zone-failure-domain", v1.LabelZoneRegion: "new-zone-region", + v1.LabelInstanceTypeStable: "new-instance-type", v1.LabelInstanceType: "new-instance-type", kubeletapis.LabelOS: "new-os", kubeletapis.LabelArch: "new-arch", @@ -1595,6 +1607,7 @@ func TestUpdateDefaultLabels(t *testing.T) { v1.LabelZoneRegionStable: "new-zone-region", v1.LabelZoneFailureDomain: "new-zone-failure-domain", v1.LabelZoneRegion: "new-zone-region", + v1.LabelInstanceTypeStable: "new-instance-type", v1.LabelInstanceType: "new-instance-type", kubeletapis.LabelOS: "new-os", kubeletapis.LabelArch: "new-arch", @@ -1610,6 +1623,7 @@ func TestUpdateDefaultLabels(t *testing.T) { v1.LabelZoneRegionStable: "new-zone-region", v1.LabelZoneFailureDomain: "new-zone-failure-domain", v1.LabelZoneRegion: "new-zone-region", + v1.LabelInstanceTypeStable: "new-instance-type", v1.LabelInstanceType: "new-instance-type", kubeletapis.LabelOS: "new-os", kubeletapis.LabelArch: "new-arch", @@ -1626,11 +1640,58 @@ func TestUpdateDefaultLabels(t *testing.T) { v1.LabelZoneRegionStable: "new-zone-region", v1.LabelZoneFailureDomain: "new-zone-failure-domain", v1.LabelZoneRegion: "new-zone-region", + v1.LabelInstanceTypeStable: "new-instance-type", v1.LabelInstanceType: "new-instance-type", kubeletapis.LabelOS: "new-os", kubeletapis.LabelArch: "new-arch", }, }, + { + name: "backfill required for new stable labels for os/arch/zones/regions/instance-type", + initialNode: &v1.Node{ + ObjectMeta: metav1.ObjectMeta{ + Labels: map[string]string{ + v1.LabelHostname: "new-hostname", + v1.LabelZoneFailureDomainStable: "new-zone-failure-domain", + v1.LabelZoneRegionStable: "new-zone-region", + v1.LabelZoneFailureDomain: "new-zone-failure-domain", + v1.LabelZoneRegion: "new-zone-region", + v1.LabelInstanceTypeStable: "new-instance-type", + v1.LabelInstanceType: "new-instance-type", + kubeletapis.LabelOS: "new-os", + kubeletapis.LabelArch: "new-arch", + v1.LabelOSStable: "new-os", + v1.LabelArchStable: "new-arch", + }, + }, + }, + existingNode: &v1.Node{ + ObjectMeta: metav1.ObjectMeta{ + Labels: map[string]string{ + v1.LabelHostname: "new-hostname", + v1.LabelZoneFailureDomain: "new-zone-failure-domain", + v1.LabelZoneRegion: "new-zone-region", + v1.LabelInstanceType: "new-instance-type", + kubeletapis.LabelOS: "new-os", + kubeletapis.LabelArch: "new-arch", + }, + }, + }, + needsUpdate: true, + finalLabels: map[string]string{ + v1.LabelHostname: "new-hostname", + v1.LabelZoneFailureDomainStable: "new-zone-failure-domain", + v1.LabelZoneRegionStable: "new-zone-region", + v1.LabelZoneFailureDomain: "new-zone-failure-domain", + v1.LabelZoneRegion: "new-zone-region", + v1.LabelInstanceTypeStable: "new-instance-type", + v1.LabelInstanceType: "new-instance-type", + kubeletapis.LabelOS: "new-os", + kubeletapis.LabelArch: "new-arch", + v1.LabelOSStable: "new-os", + v1.LabelArchStable: "new-arch", + }, + }, } for _, tc := range cases { diff --git a/pkg/scheduler/algorithm/predicates/max_attachable_volume_predicate_test.go b/pkg/scheduler/algorithm/predicates/max_attachable_volume_predicate_test.go index af658268a79..1ad456ac317 100644 --- a/pkg/scheduler/algorithm/predicates/max_attachable_volume_predicate_test.go +++ b/pkg/scheduler/algorithm/predicates/max_attachable_volume_predicate_test.go @@ -1067,6 +1067,41 @@ func TestMaxVolumeFuncM4(t *testing.T) { } } +func TestMaxVolumeFuncM4WithOnlyStableLabels(t *testing.T) { + node := &v1.Node{ + ObjectMeta: metav1.ObjectMeta{ + Name: "node-for-m4-instance", + Labels: map[string]string{ + v1.LabelInstanceTypeStable: "m4.2xlarge", + }, + }, + } + os.Unsetenv(KubeMaxPDVols) + maxVolumeFunc := getMaxVolumeFunc(EBSVolumeFilterType) + maxVolume := maxVolumeFunc(node) + if maxVolume != volumeutil.DefaultMaxEBSVolumes { + t.Errorf("Expected max volume to be %d got %d", volumeutil.DefaultMaxEBSVolumes, maxVolume) + } +} + +func TestMaxVolumeFuncM4WithBothBetaAndStableLabels(t *testing.T) { + node := &v1.Node{ + ObjectMeta: metav1.ObjectMeta{ + Name: "node-for-m4-instance", + Labels: map[string]string{ + v1.LabelInstanceType: "m4.2xlarge", + v1.LabelInstanceTypeStable: "m4.2xlarge", + }, + }, + } + os.Unsetenv(KubeMaxPDVols) + maxVolumeFunc := getMaxVolumeFunc(EBSVolumeFilterType) + maxVolume := maxVolumeFunc(node) + if maxVolume != volumeutil.DefaultMaxEBSVolumes { + t.Errorf("Expected max volume to be %d got %d", volumeutil.DefaultMaxEBSVolumes, maxVolume) + } +} + func getNodeWithPodAndVolumeLimits(limitSource string, pods []*v1.Pod, limit int64, driverNames ...string) (*schedulernodeinfo.NodeInfo, *v1beta1.CSINode) { nodeInfo := schedulernodeinfo.NewNodeInfo(pods...) node := &v1.Node{ diff --git a/pkg/scheduler/algorithm/predicates/predicates.go b/pkg/scheduler/algorithm/predicates/predicates.go index d4ff8cade1b..1005c5ac8f9 100644 --- a/pkg/scheduler/algorithm/predicates/predicates.go +++ b/pkg/scheduler/algorithm/predicates/predicates.go @@ -305,8 +305,9 @@ func getMaxVolumeFunc(filterName string) func(node *v1.Node) int { var nodeInstanceType string for k, v := range node.ObjectMeta.Labels { - if k == v1.LabelInstanceType { + if k == v1.LabelInstanceType || k == v1.LabelInstanceTypeStable { nodeInstanceType = v + break } } switch filterName { diff --git a/plugin/pkg/admission/noderestriction/admission_test.go b/plugin/pkg/admission/noderestriction/admission_test.go index d93a4bf5b90..1349ec7c952 100644 --- a/plugin/pkg/admission/noderestriction/admission_test.go +++ b/plugin/pkg/admission/noderestriction/admission_test.go @@ -158,9 +158,9 @@ func setAllowedUpdateLabels(node *api.Node, value string) *api.Node { node.Labels["topology.kubernetes.io/zone"] = value node.Labels["topology.kubernetes.io/region"] = value node.Labels["beta.kubernetes.io/instance-type"] = value + node.Labels["node.kubernetes.io/instance-type"] = value node.Labels["beta.kubernetes.io/os"] = value node.Labels["beta.kubernetes.io/arch"] = value - node.Labels["kubernetes.io/instance-type"] = value node.Labels["kubernetes.io/os"] = value node.Labels["kubernetes.io/arch"] = value diff --git a/staging/src/k8s.io/api/core/v1/well_known_labels.go b/staging/src/k8s.io/api/core/v1/well_known_labels.go index 9057bfa5bef..9017cb17799 100644 --- a/staging/src/k8s.io/api/core/v1/well_known_labels.go +++ b/staging/src/k8s.io/api/core/v1/well_known_labels.go @@ -24,7 +24,8 @@ const ( LabelZoneFailureDomainStable = "topology.kubernetes.io/zone" LabelZoneRegionStable = "topology.kubernetes.io/region" - LabelInstanceType = "beta.kubernetes.io/instance-type" + LabelInstanceType = "beta.kubernetes.io/instance-type" + LabelInstanceTypeStable = "node.kubernetes.io/instance-type" LabelOSStable = "kubernetes.io/os" LabelArchStable = "kubernetes.io/arch"