diff --git a/pkg/controller/nodelifecycle/BUILD b/pkg/controller/nodelifecycle/BUILD index 4b6e8a869cd..72cfe1738b3 100644 --- a/pkg/controller/nodelifecycle/BUILD +++ b/pkg/controller/nodelifecycle/BUILD @@ -12,7 +12,6 @@ go_library( "//pkg/controller:go_default_library", "//pkg/controller/nodelifecycle/scheduler:go_default_library", "//pkg/controller/util/node:go_default_library", - "//pkg/features:go_default_library", "//pkg/kubelet/apis:go_default_library", "//pkg/util/node:go_default_library", "//pkg/util/taints:go_default_library", @@ -24,7 +23,6 @@ go_library( "//staging/src/k8s.io/apimachinery/pkg/labels:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/util/runtime:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/util/wait:go_default_library", - "//staging/src/k8s.io/apiserver/pkg/util/feature:go_default_library", "//staging/src/k8s.io/client-go/informers/apps/v1:go_default_library", "//staging/src/k8s.io/client-go/informers/coordination/v1:go_default_library", "//staging/src/k8s.io/client-go/informers/core/v1:go_default_library", @@ -54,7 +52,6 @@ go_test( "//pkg/controller/nodelifecycle/scheduler:go_default_library", "//pkg/controller/testutil:go_default_library", "//pkg/controller/util/node:go_default_library", - "//pkg/features:go_default_library", "//pkg/kubelet/apis:go_default_library", "//pkg/util/node:go_default_library", "//pkg/util/taints:go_default_library", @@ -68,7 +65,6 @@ go_test( "//staging/src/k8s.io/apimachinery/pkg/labels:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/runtime:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/util/diff:go_default_library", - "//staging/src/k8s.io/apiserver/pkg/util/feature:go_default_library", "//staging/src/k8s.io/client-go/informers:go_default_library", "//staging/src/k8s.io/client-go/informers/apps/v1:go_default_library", "//staging/src/k8s.io/client-go/informers/coordination/v1:go_default_library", @@ -76,7 +72,6 @@ go_test( "//staging/src/k8s.io/client-go/kubernetes:go_default_library", "//staging/src/k8s.io/client-go/kubernetes/fake:go_default_library", "//staging/src/k8s.io/client-go/testing:go_default_library", - "//staging/src/k8s.io/component-base/featuregate/testing:go_default_library", "//vendor/k8s.io/utils/pointer:go_default_library", ], ) diff --git a/pkg/controller/nodelifecycle/node_lifecycle_controller.go b/pkg/controller/nodelifecycle/node_lifecycle_controller.go index 33f4c35b59c..817c6241057 100644 --- a/pkg/controller/nodelifecycle/node_lifecycle_controller.go +++ b/pkg/controller/nodelifecycle/node_lifecycle_controller.go @@ -24,7 +24,6 @@ package nodelifecycle import ( "context" "fmt" - "strings" "sync" "time" @@ -38,7 +37,6 @@ import ( "k8s.io/apimachinery/pkg/labels" utilruntime "k8s.io/apimachinery/pkg/util/runtime" "k8s.io/apimachinery/pkg/util/wait" - utilfeature "k8s.io/apiserver/pkg/util/feature" appsv1informers "k8s.io/client-go/informers/apps/v1" coordinformers "k8s.io/client-go/informers/coordination/v1" coreinformers "k8s.io/client-go/informers/core/v1" @@ -56,7 +54,6 @@ import ( "k8s.io/kubernetes/pkg/controller" "k8s.io/kubernetes/pkg/controller/nodelifecycle/scheduler" nodeutil "k8s.io/kubernetes/pkg/controller/util/node" - kubefeatures "k8s.io/kubernetes/pkg/features" kubeletapis "k8s.io/kubernetes/pkg/kubelet/apis" utilnode "k8s.io/kubernetes/pkg/util/node" taintutils "k8s.io/kubernetes/pkg/util/taints" @@ -949,37 +946,9 @@ func (nc *Controller) processNoTaintBaseEviction(node *v1.Node, observedReadyCon const labelNodeDisruptionExclusion = "node.kubernetes.io/exclude-disruption" func isNodeExcludedFromDisruptionChecks(node *v1.Node) bool { - // DEPRECATED: will be removed in 1.19 - if utilfeature.DefaultFeatureGate.Enabled(kubefeatures.LegacyNodeRoleBehavior) { - if legacyIsMasterNode(node.Name) { - return true - } - } - if utilfeature.DefaultFeatureGate.Enabled(kubefeatures.NodeDisruptionExclusion) { - if _, ok := node.Labels[labelNodeDisruptionExclusion]; ok { - return true - } - } - return false -} - -// legacyIsMasterNode returns true if given node is a registered master according -// to the logic historically used for this function. This code path is deprecated -// and the node disruption exclusion label should be used in the future. -// This code will not be allowed to update to use the node-role label, since -// node-roles may not be used for feature enablement. -// DEPRECATED: Will be removed in 1.19 -func legacyIsMasterNode(nodeName string) bool { - // We are trying to capture "master(-...)?$" regexp. - // However, using regexp.MatchString() results even in more than 35% - // of all space allocations in ControllerManager spent in this function. - // That's why we are trying to be a bit smarter. - if strings.HasSuffix(nodeName, "master") { + if _, ok := node.Labels[labelNodeDisruptionExclusion]; ok { return true } - if len(nodeName) >= 10 { - return strings.HasSuffix(nodeName[:len(nodeName)-3], "master-") - } return false } diff --git a/pkg/controller/nodelifecycle/node_lifecycle_controller_test.go b/pkg/controller/nodelifecycle/node_lifecycle_controller_test.go index acb14c9f14d..d81b13fb4b2 100644 --- a/pkg/controller/nodelifecycle/node_lifecycle_controller_test.go +++ b/pkg/controller/nodelifecycle/node_lifecycle_controller_test.go @@ -33,7 +33,6 @@ import ( "k8s.io/apimachinery/pkg/labels" "k8s.io/apimachinery/pkg/runtime" "k8s.io/apimachinery/pkg/util/diff" - utilfeature "k8s.io/apiserver/pkg/util/feature" "k8s.io/client-go/informers" appsinformers "k8s.io/client-go/informers/apps/v1" coordinformers "k8s.io/client-go/informers/coordination/v1" @@ -41,12 +40,10 @@ import ( clientset "k8s.io/client-go/kubernetes" "k8s.io/client-go/kubernetes/fake" testcore "k8s.io/client-go/testing" - featuregatetesting "k8s.io/component-base/featuregate/testing" "k8s.io/kubernetes/pkg/controller" "k8s.io/kubernetes/pkg/controller/nodelifecycle/scheduler" "k8s.io/kubernetes/pkg/controller/testutil" nodeutil "k8s.io/kubernetes/pkg/controller/util/node" - "k8s.io/kubernetes/pkg/features" kubeletapis "k8s.io/kubernetes/pkg/kubelet/apis" "k8s.io/kubernetes/pkg/util/node" taintutils "k8s.io/kubernetes/pkg/util/taints" @@ -1140,7 +1137,8 @@ func TestMonitorNodeHealthEvictPodsWithDisruption(t *testing.T) { description: "Network Disruption: One zone is down - eviction should take place.", }, // NetworkDisruption: Node created long time ago, node controller posted Unknown for a long period - // of on first Node, eviction should stop even though -master Node is healthy. + // of on first Node, eviction should stop even though Node with label + // node.kubernetes.io/exclude-disruption is healthy. { nodeList: []*v1.Node{ { @@ -1174,6 +1172,7 @@ func TestMonitorNodeHealthEvictPodsWithDisruption(t *testing.T) { v1.LabelTopologyZone: "zone1", v1.LabelFailureDomainBetaRegion: "region1", v1.LabelFailureDomainBetaZone: "zone1", + labelNodeDisruptionExclusion: "", }, }, Status: v1.NodeStatus{ @@ -1200,7 +1199,7 @@ func TestMonitorNodeHealthEvictPodsWithDisruption(t *testing.T) { testutil.CreateZoneID("region1", "zone1"): stateFullDisruption, }, expectedEvictPods: false, - description: "NetworkDisruption: eviction should stop, only -master Node is healthy", + description: "NetworkDisruption: eviction should stop, only Node with label node.kubernetes.io/exclude-disruption is healthy", }, // NetworkDisruption: Node created long time ago, node controller posted Unknown for a long period of time on both Nodes. // Initially both zones down, one comes back - eviction should take place @@ -3577,26 +3576,15 @@ func Test_isNodeExcludedFromDisruptionChecks(t *testing.T) { tests := []struct { name string - enableExclusion bool - enableLegacy bool - input *v1.Node - want bool + input *v1.Node + want bool }{ {want: false, input: &v1.Node{Status: validNodeStatus, ObjectMeta: metav1.ObjectMeta{Labels: map[string]string{}}}}, {want: false, input: &v1.Node{Status: validNodeStatus, ObjectMeta: metav1.ObjectMeta{Name: "master-abc"}}}, - {want: false, input: &v1.Node{Status: validNodeStatus, ObjectMeta: metav1.ObjectMeta{Labels: map[string]string{labelNodeDisruptionExclusion: ""}}}}, - - {want: false, enableExclusion: true, input: &v1.Node{Status: validNodeStatus, ObjectMeta: metav1.ObjectMeta{Name: "master-abc"}}}, - {want: false, enableLegacy: true, input: &v1.Node{Status: validNodeStatus, ObjectMeta: metav1.ObjectMeta{Labels: map[string]string{labelNodeDisruptionExclusion: ""}}}}, - - {want: true, enableLegacy: true, input: &v1.Node{Status: validNodeStatus, ObjectMeta: metav1.ObjectMeta{Name: "master-abc"}}}, - {want: true, enableExclusion: true, input: &v1.Node{Status: validNodeStatus, ObjectMeta: metav1.ObjectMeta{Labels: map[string]string{labelNodeDisruptionExclusion: ""}}}}, + {want: true, input: &v1.Node{Status: validNodeStatus, ObjectMeta: metav1.ObjectMeta{Labels: map[string]string{labelNodeDisruptionExclusion: ""}}}}, } for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { - defer featuregatetesting.SetFeatureGateDuringTest(t, utilfeature.DefaultFeatureGate, features.NodeDisruptionExclusion, tt.enableExclusion)() - defer featuregatetesting.SetFeatureGateDuringTest(t, utilfeature.DefaultFeatureGate, features.LegacyNodeRoleBehavior, tt.enableLegacy)() - if result := isNodeExcludedFromDisruptionChecks(tt.input); result != tt.want { t.Errorf("isNodeExcludedFromDisruptionChecks() = %v, want %v", result, tt.want) } diff --git a/pkg/features/kube_features.go b/pkg/features/kube_features.go index fedddd75f2a..833cdfad490 100644 --- a/pkg/features/kube_features.go +++ b/pkg/features/kube_features.go @@ -140,13 +140,16 @@ const ( // owner @smarterclayton // alpha: v1.16 // beta: v1.19 + // ga: v1.21 // // Enable legacy behavior to vary cluster functionality on the node-role.kubernetes.io labels. On by default (legacy), will be turned off in 1.18. + // Lock to false in v1.21 and remove in v1.22. LegacyNodeRoleBehavior featuregate.Feature = "LegacyNodeRoleBehavior" // owner @brendandburns // alpha: v1.9 // beta: v1.19 + // ga: v1.21 // // Enable nodes to exclude themselves from service load balancers ServiceNodeExclusion featuregate.Feature = "ServiceNodeExclusion" @@ -154,6 +157,7 @@ const ( // owner @smarterclayton // alpha: v1.16 // beta: v1.19 + // ga: v1.21 // // Enable nodes to exclude themselves from network disruption checks NodeDisruptionExclusion featuregate.Feature = "NodeDisruptionExclusion" @@ -753,8 +757,8 @@ var defaultKubernetesFeatureGates = map[featuregate.Feature]featuregate.FeatureS CPUManager: {Default: true, PreRelease: featuregate.Beta}, CPUCFSQuotaPeriod: {Default: false, PreRelease: featuregate.Alpha}, TopologyManager: {Default: true, PreRelease: featuregate.Beta}, - ServiceNodeExclusion: {Default: true, PreRelease: featuregate.Beta}, - NodeDisruptionExclusion: {Default: true, PreRelease: featuregate.Beta}, + ServiceNodeExclusion: {Default: true, PreRelease: featuregate.GA, LockToDefault: true}, // remove in 1.22 + NodeDisruptionExclusion: {Default: true, PreRelease: featuregate.GA, LockToDefault: true}, // remove in 1.22 CSIDriverRegistry: {Default: true, PreRelease: featuregate.GA, LockToDefault: true}, // remove in 1.20 CSINodeInfo: {Default: true, PreRelease: featuregate.GA, LockToDefault: true}, // remove in 1.19 BlockVolume: {Default: true, PreRelease: featuregate.GA, LockToDefault: true}, // remove in 1.20 @@ -851,5 +855,5 @@ var defaultKubernetesFeatureGates = map[featuregate.Feature]featuregate.FeatureS // features that enable backwards compatibility but are scheduled to be removed // ... HPAScaleToZero: {Default: false, PreRelease: featuregate.Alpha}, - LegacyNodeRoleBehavior: {Default: true, PreRelease: featuregate.Beta}, + LegacyNodeRoleBehavior: {Default: false, PreRelease: featuregate.GA, LockToDefault: true}, // remove in 1.22 } diff --git a/staging/src/k8s.io/cloud-provider/controllers/service/controller.go b/staging/src/k8s.io/cloud-provider/controllers/service/controller.go index 9722439c2c5..5bd4a4211b3 100644 --- a/staging/src/k8s.io/cloud-provider/controllers/service/controller.go +++ b/staging/src/k8s.io/cloud-provider/controllers/service/controller.go @@ -56,28 +56,12 @@ const ( minRetryDelay = 5 * time.Second maxRetryDelay = 300 * time.Second - // labelNodeRoleMaster specifies that a node is a master. The use of this label within the - // controller is deprecated and only considered when the LegacyNodeRoleBehavior feature gate - // is on. - labelNodeRoleMaster = "node-role.kubernetes.io/master" - // labelNodeRoleExcludeBalancer specifies that the node should not be considered as a target // for external load-balancers which use nodes as a second hop (e.g. many cloud LBs which only // understand nodes). For services that use externalTrafficPolicy=Local, this may mean that // any backends on excluded nodes are not reachable by those external load-balancers. - // Implementations of this exclusion may vary based on provider. This label is honored starting - // in 1.16 when the ServiceNodeExclusion gate is on. + // Implementations of this exclusion may vary based on provider. labelNodeRoleExcludeBalancer = "node.kubernetes.io/exclude-from-external-load-balancers" - - // serviceNodeExclusionFeature is the feature gate name that - // enables nodes to exclude themselves from service load balancers - // originated from: https://github.com/kubernetes/kubernetes/blob/28e800245e/pkg/features/kube_features.go#L178 - serviceNodeExclusionFeature = "ServiceNodeExclusion" - - // legacyNodeRoleBehaviorFeature is the feature gate name that enables legacy - // behavior to vary cluster functionality on the node-role.kubernetes.io - // labels. - legacyNodeRoleBehaviorFeature = "LegacyNodeRoleBehavior" ) type cachedService struct { @@ -110,9 +94,6 @@ type Controller struct { nodeListerSynced cache.InformerSynced // services that need to be synced queue workqueue.RateLimitingInterface - // feature gates stored in local field for better testability - legacyNodeRoleFeatureEnabled bool - serviceNodeExclusionFeatureEnabled bool } // New returns a new service controller to keep cloud provider service resources @@ -137,18 +118,16 @@ func New( } s := &Controller{ - cloud: cloud, - knownHosts: []*v1.Node{}, - kubeClient: kubeClient, - clusterName: clusterName, - cache: &serviceCache{serviceMap: make(map[string]*cachedService)}, - eventBroadcaster: broadcaster, - eventRecorder: recorder, - nodeLister: nodeInformer.Lister(), - nodeListerSynced: nodeInformer.Informer().HasSynced, - queue: workqueue.NewNamedRateLimitingQueue(workqueue.NewItemExponentialFailureRateLimiter(minRetryDelay, maxRetryDelay), "service"), - legacyNodeRoleFeatureEnabled: featureGate.Enabled(legacyNodeRoleBehaviorFeature), - serviceNodeExclusionFeatureEnabled: featureGate.Enabled(serviceNodeExclusionFeature), + cloud: cloud, + knownHosts: []*v1.Node{}, + kubeClient: kubeClient, + clusterName: clusterName, + cache: &serviceCache{serviceMap: make(map[string]*cachedService)}, + eventBroadcaster: broadcaster, + eventRecorder: recorder, + nodeLister: nodeInformer.Lister(), + nodeListerSynced: nodeInformer.Informer().HasSynced, + queue: workqueue.NewNamedRateLimitingQueue(workqueue.NewItemExponentialFailureRateLimiter(minRetryDelay, maxRetryDelay), "service"), } serviceInformer.Informer().AddEventHandlerWithResyncPeriod( @@ -635,17 +614,8 @@ func nodeSlicesEqualForLB(x, y []*v1.Node) bool { func (s *Controller) getNodeConditionPredicate() NodeConditionPredicate { return func(node *v1.Node) bool { - if s.legacyNodeRoleFeatureEnabled { - // As of 1.6, we will taint the master, but not necessarily mark it unschedulable. - // Recognize nodes labeled as master, and filter them also, as we were doing previously. - if _, hasMasterRoleLabel := node.Labels[labelNodeRoleMaster]; hasMasterRoleLabel { - return false - } - } - if s.serviceNodeExclusionFeatureEnabled { - if _, hasExcludeBalancerLabel := node.Labels[labelNodeRoleExcludeBalancer]; hasExcludeBalancerLabel { - return false - } + if _, hasExcludeBalancerLabel := node.Labels[labelNodeRoleExcludeBalancer]; hasExcludeBalancerLabel { + return false } // If we have no info, don't accept diff --git a/staging/src/k8s.io/cloud-provider/controllers/service/controller_test.go b/staging/src/k8s.io/cloud-provider/controllers/service/controller_test.go index f2f92c0e8ef..1b308000a81 100644 --- a/staging/src/k8s.io/cloud-provider/controllers/service/controller_test.go +++ b/staging/src/k8s.io/cloud-provider/controllers/service/controller_test.go @@ -1373,10 +1373,8 @@ func Test_getNodeConditionPredicate(t *testing.T) { tests := []struct { name string - enableExclusion bool - enableLegacy bool - input *v1.Node - want bool + input *v1.Node + want bool }{ {want: false, input: &v1.Node{}}, {want: true, input: &v1.Node{Status: v1.NodeStatus{Conditions: []v1.NodeCondition{{Type: v1.NodeReady, Status: v1.ConditionTrue}}}}}, @@ -1384,21 +1382,11 @@ func Test_getNodeConditionPredicate(t *testing.T) { {want: true, input: &v1.Node{Spec: v1.NodeSpec{Unschedulable: true}, Status: v1.NodeStatus{Conditions: []v1.NodeCondition{{Type: v1.NodeReady, Status: v1.ConditionTrue}}}}}, {want: true, input: &v1.Node{Status: validNodeStatus, ObjectMeta: metav1.ObjectMeta{Labels: map[string]string{}}}}, - {want: true, input: &v1.Node{Status: validNodeStatus, ObjectMeta: metav1.ObjectMeta{Labels: map[string]string{labelNodeRoleMaster: ""}}}}, - {want: true, input: &v1.Node{Status: validNodeStatus, ObjectMeta: metav1.ObjectMeta{Labels: map[string]string{labelNodeRoleExcludeBalancer: ""}}}}, - - {want: true, enableExclusion: true, input: &v1.Node{Status: validNodeStatus, ObjectMeta: metav1.ObjectMeta{Labels: map[string]string{labelNodeRoleMaster: ""}}}}, - {want: true, enableLegacy: true, input: &v1.Node{Status: validNodeStatus, ObjectMeta: metav1.ObjectMeta{Labels: map[string]string{labelNodeRoleExcludeBalancer: ""}}}}, - - {want: false, enableLegacy: true, input: &v1.Node{Status: validNodeStatus, ObjectMeta: metav1.ObjectMeta{Labels: map[string]string{labelNodeRoleMaster: ""}}}}, - {want: false, enableExclusion: true, input: &v1.Node{Status: validNodeStatus, ObjectMeta: metav1.ObjectMeta{Labels: map[string]string{labelNodeRoleExcludeBalancer: ""}}}}, + {want: false, input: &v1.Node{Status: validNodeStatus, ObjectMeta: metav1.ObjectMeta{Labels: map[string]string{labelNodeRoleExcludeBalancer: ""}}}}, } for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { - c := &Controller{ - legacyNodeRoleFeatureEnabled: tt.enableLegacy, - serviceNodeExclusionFeatureEnabled: tt.enableExclusion, - } + c := &Controller{} if result := c.getNodeConditionPredicate()(tt.input); result != tt.want { t.Errorf("getNodeConditionPredicate() = %v, want %v", result, tt.want) diff --git a/staging/src/k8s.io/controller-manager/pkg/features/kube_features.go b/staging/src/k8s.io/controller-manager/pkg/features/kube_features.go index f1f550a7c2e..46805652f43 100644 --- a/staging/src/k8s.io/controller-manager/pkg/features/kube_features.go +++ b/staging/src/k8s.io/controller-manager/pkg/features/kube_features.go @@ -38,6 +38,7 @@ const ( // owner @brendandburns // alpha: v1.9 // beta: v1.19 + // ga: v1.21 // // Enable nodes to exclude themselves from service load balancers // Original copy from k8s.io/kubernetes/pkg/features/kube_features.go @@ -58,7 +59,7 @@ func SetupCurrentKubernetesSpecificFeatureGates(featuregates featuregate.Mutable // cloudPublicFeatureGates consists of cloud-specific feature keys. // To add a new feature, define a key for it at k8s.io/api/pkg/features and add it here. var cloudPublicFeatureGates = map[featuregate.Feature]featuregate.FeatureSpec{ - LegacyNodeRoleBehavior: {Default: true, PreRelease: featuregate.Beta}, - ServiceNodeExclusion: {Default: true, PreRelease: featuregate.Beta}, + LegacyNodeRoleBehavior: {Default: false, PreRelease: featuregate.GA, LockToDefault: true}, + ServiceNodeExclusion: {Default: true, PreRelease: featuregate.GA, LockToDefault: true}, IPv6DualStack: {Default: false, PreRelease: featuregate.Alpha}, }