mirror of
https://github.com/k3s-io/kubernetes.git
synced 2025-08-03 09:22:44 +00:00
Merge pull request #97543 from pacoxu/fix/94495-ga
remove LegacyNodeRoleBehavior and mv ServiceNodeExclusion to GA
This commit is contained in:
commit
0300aa712e
@ -12,7 +12,6 @@ go_library(
|
|||||||
"//pkg/controller:go_default_library",
|
"//pkg/controller:go_default_library",
|
||||||
"//pkg/controller/nodelifecycle/scheduler:go_default_library",
|
"//pkg/controller/nodelifecycle/scheduler:go_default_library",
|
||||||
"//pkg/controller/util/node:go_default_library",
|
"//pkg/controller/util/node:go_default_library",
|
||||||
"//pkg/features:go_default_library",
|
|
||||||
"//pkg/kubelet/apis:go_default_library",
|
"//pkg/kubelet/apis:go_default_library",
|
||||||
"//pkg/util/node:go_default_library",
|
"//pkg/util/node:go_default_library",
|
||||||
"//pkg/util/taints:go_default_library",
|
"//pkg/util/taints:go_default_library",
|
||||||
@ -24,7 +23,6 @@ go_library(
|
|||||||
"//staging/src/k8s.io/apimachinery/pkg/labels:go_default_library",
|
"//staging/src/k8s.io/apimachinery/pkg/labels:go_default_library",
|
||||||
"//staging/src/k8s.io/apimachinery/pkg/util/runtime:go_default_library",
|
"//staging/src/k8s.io/apimachinery/pkg/util/runtime:go_default_library",
|
||||||
"//staging/src/k8s.io/apimachinery/pkg/util/wait:go_default_library",
|
"//staging/src/k8s.io/apimachinery/pkg/util/wait:go_default_library",
|
||||||
"//staging/src/k8s.io/apiserver/pkg/util/feature:go_default_library",
|
|
||||||
"//staging/src/k8s.io/client-go/informers/apps/v1:go_default_library",
|
"//staging/src/k8s.io/client-go/informers/apps/v1:go_default_library",
|
||||||
"//staging/src/k8s.io/client-go/informers/coordination/v1:go_default_library",
|
"//staging/src/k8s.io/client-go/informers/coordination/v1:go_default_library",
|
||||||
"//staging/src/k8s.io/client-go/informers/core/v1:go_default_library",
|
"//staging/src/k8s.io/client-go/informers/core/v1:go_default_library",
|
||||||
@ -54,7 +52,6 @@ go_test(
|
|||||||
"//pkg/controller/nodelifecycle/scheduler:go_default_library",
|
"//pkg/controller/nodelifecycle/scheduler:go_default_library",
|
||||||
"//pkg/controller/testutil:go_default_library",
|
"//pkg/controller/testutil:go_default_library",
|
||||||
"//pkg/controller/util/node:go_default_library",
|
"//pkg/controller/util/node:go_default_library",
|
||||||
"//pkg/features:go_default_library",
|
|
||||||
"//pkg/kubelet/apis:go_default_library",
|
"//pkg/kubelet/apis:go_default_library",
|
||||||
"//pkg/util/node:go_default_library",
|
"//pkg/util/node:go_default_library",
|
||||||
"//pkg/util/taints:go_default_library",
|
"//pkg/util/taints:go_default_library",
|
||||||
@ -68,7 +65,6 @@ go_test(
|
|||||||
"//staging/src/k8s.io/apimachinery/pkg/labels:go_default_library",
|
"//staging/src/k8s.io/apimachinery/pkg/labels:go_default_library",
|
||||||
"//staging/src/k8s.io/apimachinery/pkg/runtime:go_default_library",
|
"//staging/src/k8s.io/apimachinery/pkg/runtime:go_default_library",
|
||||||
"//staging/src/k8s.io/apimachinery/pkg/util/diff:go_default_library",
|
"//staging/src/k8s.io/apimachinery/pkg/util/diff:go_default_library",
|
||||||
"//staging/src/k8s.io/apiserver/pkg/util/feature:go_default_library",
|
|
||||||
"//staging/src/k8s.io/client-go/informers:go_default_library",
|
"//staging/src/k8s.io/client-go/informers:go_default_library",
|
||||||
"//staging/src/k8s.io/client-go/informers/apps/v1:go_default_library",
|
"//staging/src/k8s.io/client-go/informers/apps/v1:go_default_library",
|
||||||
"//staging/src/k8s.io/client-go/informers/coordination/v1:go_default_library",
|
"//staging/src/k8s.io/client-go/informers/coordination/v1:go_default_library",
|
||||||
@ -76,7 +72,6 @@ go_test(
|
|||||||
"//staging/src/k8s.io/client-go/kubernetes:go_default_library",
|
"//staging/src/k8s.io/client-go/kubernetes:go_default_library",
|
||||||
"//staging/src/k8s.io/client-go/kubernetes/fake:go_default_library",
|
"//staging/src/k8s.io/client-go/kubernetes/fake:go_default_library",
|
||||||
"//staging/src/k8s.io/client-go/testing:go_default_library",
|
"//staging/src/k8s.io/client-go/testing:go_default_library",
|
||||||
"//staging/src/k8s.io/component-base/featuregate/testing:go_default_library",
|
|
||||||
"//vendor/k8s.io/utils/pointer:go_default_library",
|
"//vendor/k8s.io/utils/pointer:go_default_library",
|
||||||
],
|
],
|
||||||
)
|
)
|
||||||
|
@ -24,7 +24,6 @@ package nodelifecycle
|
|||||||
import (
|
import (
|
||||||
"context"
|
"context"
|
||||||
"fmt"
|
"fmt"
|
||||||
"strings"
|
|
||||||
"sync"
|
"sync"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
@ -38,7 +37,6 @@ import (
|
|||||||
"k8s.io/apimachinery/pkg/labels"
|
"k8s.io/apimachinery/pkg/labels"
|
||||||
utilruntime "k8s.io/apimachinery/pkg/util/runtime"
|
utilruntime "k8s.io/apimachinery/pkg/util/runtime"
|
||||||
"k8s.io/apimachinery/pkg/util/wait"
|
"k8s.io/apimachinery/pkg/util/wait"
|
||||||
utilfeature "k8s.io/apiserver/pkg/util/feature"
|
|
||||||
appsv1informers "k8s.io/client-go/informers/apps/v1"
|
appsv1informers "k8s.io/client-go/informers/apps/v1"
|
||||||
coordinformers "k8s.io/client-go/informers/coordination/v1"
|
coordinformers "k8s.io/client-go/informers/coordination/v1"
|
||||||
coreinformers "k8s.io/client-go/informers/core/v1"
|
coreinformers "k8s.io/client-go/informers/core/v1"
|
||||||
@ -56,7 +54,6 @@ import (
|
|||||||
"k8s.io/kubernetes/pkg/controller"
|
"k8s.io/kubernetes/pkg/controller"
|
||||||
"k8s.io/kubernetes/pkg/controller/nodelifecycle/scheduler"
|
"k8s.io/kubernetes/pkg/controller/nodelifecycle/scheduler"
|
||||||
nodeutil "k8s.io/kubernetes/pkg/controller/util/node"
|
nodeutil "k8s.io/kubernetes/pkg/controller/util/node"
|
||||||
kubefeatures "k8s.io/kubernetes/pkg/features"
|
|
||||||
kubeletapis "k8s.io/kubernetes/pkg/kubelet/apis"
|
kubeletapis "k8s.io/kubernetes/pkg/kubelet/apis"
|
||||||
utilnode "k8s.io/kubernetes/pkg/util/node"
|
utilnode "k8s.io/kubernetes/pkg/util/node"
|
||||||
taintutils "k8s.io/kubernetes/pkg/util/taints"
|
taintutils "k8s.io/kubernetes/pkg/util/taints"
|
||||||
@ -949,37 +946,9 @@ func (nc *Controller) processNoTaintBaseEviction(node *v1.Node, observedReadyCon
|
|||||||
const labelNodeDisruptionExclusion = "node.kubernetes.io/exclude-disruption"
|
const labelNodeDisruptionExclusion = "node.kubernetes.io/exclude-disruption"
|
||||||
|
|
||||||
func isNodeExcludedFromDisruptionChecks(node *v1.Node) bool {
|
func isNodeExcludedFromDisruptionChecks(node *v1.Node) bool {
|
||||||
// DEPRECATED: will be removed in 1.19
|
|
||||||
if utilfeature.DefaultFeatureGate.Enabled(kubefeatures.LegacyNodeRoleBehavior) {
|
|
||||||
if legacyIsMasterNode(node.Name) {
|
|
||||||
return true
|
|
||||||
}
|
|
||||||
}
|
|
||||||
if utilfeature.DefaultFeatureGate.Enabled(kubefeatures.NodeDisruptionExclusion) {
|
|
||||||
if _, ok := node.Labels[labelNodeDisruptionExclusion]; ok {
|
if _, ok := node.Labels[labelNodeDisruptionExclusion]; ok {
|
||||||
return true
|
return true
|
||||||
}
|
}
|
||||||
}
|
|
||||||
return false
|
|
||||||
}
|
|
||||||
|
|
||||||
// legacyIsMasterNode returns true if given node is a registered master according
|
|
||||||
// to the logic historically used for this function. This code path is deprecated
|
|
||||||
// and the node disruption exclusion label should be used in the future.
|
|
||||||
// This code will not be allowed to update to use the node-role label, since
|
|
||||||
// node-roles may not be used for feature enablement.
|
|
||||||
// DEPRECATED: Will be removed in 1.19
|
|
||||||
func legacyIsMasterNode(nodeName string) bool {
|
|
||||||
// We are trying to capture "master(-...)?$" regexp.
|
|
||||||
// However, using regexp.MatchString() results even in more than 35%
|
|
||||||
// of all space allocations in ControllerManager spent in this function.
|
|
||||||
// That's why we are trying to be a bit smarter.
|
|
||||||
if strings.HasSuffix(nodeName, "master") {
|
|
||||||
return true
|
|
||||||
}
|
|
||||||
if len(nodeName) >= 10 {
|
|
||||||
return strings.HasSuffix(nodeName[:len(nodeName)-3], "master-")
|
|
||||||
}
|
|
||||||
return false
|
return false
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -33,7 +33,6 @@ import (
|
|||||||
"k8s.io/apimachinery/pkg/labels"
|
"k8s.io/apimachinery/pkg/labels"
|
||||||
"k8s.io/apimachinery/pkg/runtime"
|
"k8s.io/apimachinery/pkg/runtime"
|
||||||
"k8s.io/apimachinery/pkg/util/diff"
|
"k8s.io/apimachinery/pkg/util/diff"
|
||||||
utilfeature "k8s.io/apiserver/pkg/util/feature"
|
|
||||||
"k8s.io/client-go/informers"
|
"k8s.io/client-go/informers"
|
||||||
appsinformers "k8s.io/client-go/informers/apps/v1"
|
appsinformers "k8s.io/client-go/informers/apps/v1"
|
||||||
coordinformers "k8s.io/client-go/informers/coordination/v1"
|
coordinformers "k8s.io/client-go/informers/coordination/v1"
|
||||||
@ -41,12 +40,10 @@ import (
|
|||||||
clientset "k8s.io/client-go/kubernetes"
|
clientset "k8s.io/client-go/kubernetes"
|
||||||
"k8s.io/client-go/kubernetes/fake"
|
"k8s.io/client-go/kubernetes/fake"
|
||||||
testcore "k8s.io/client-go/testing"
|
testcore "k8s.io/client-go/testing"
|
||||||
featuregatetesting "k8s.io/component-base/featuregate/testing"
|
|
||||||
"k8s.io/kubernetes/pkg/controller"
|
"k8s.io/kubernetes/pkg/controller"
|
||||||
"k8s.io/kubernetes/pkg/controller/nodelifecycle/scheduler"
|
"k8s.io/kubernetes/pkg/controller/nodelifecycle/scheduler"
|
||||||
"k8s.io/kubernetes/pkg/controller/testutil"
|
"k8s.io/kubernetes/pkg/controller/testutil"
|
||||||
nodeutil "k8s.io/kubernetes/pkg/controller/util/node"
|
nodeutil "k8s.io/kubernetes/pkg/controller/util/node"
|
||||||
"k8s.io/kubernetes/pkg/features"
|
|
||||||
kubeletapis "k8s.io/kubernetes/pkg/kubelet/apis"
|
kubeletapis "k8s.io/kubernetes/pkg/kubelet/apis"
|
||||||
"k8s.io/kubernetes/pkg/util/node"
|
"k8s.io/kubernetes/pkg/util/node"
|
||||||
taintutils "k8s.io/kubernetes/pkg/util/taints"
|
taintutils "k8s.io/kubernetes/pkg/util/taints"
|
||||||
@ -1140,7 +1137,8 @@ func TestMonitorNodeHealthEvictPodsWithDisruption(t *testing.T) {
|
|||||||
description: "Network Disruption: One zone is down - eviction should take place.",
|
description: "Network Disruption: One zone is down - eviction should take place.",
|
||||||
},
|
},
|
||||||
// NetworkDisruption: Node created long time ago, node controller posted Unknown for a long period
|
// NetworkDisruption: Node created long time ago, node controller posted Unknown for a long period
|
||||||
// of on first Node, eviction should stop even though -master Node is healthy.
|
// of on first Node, eviction should stop even though Node with label
|
||||||
|
// node.kubernetes.io/exclude-disruption is healthy.
|
||||||
{
|
{
|
||||||
nodeList: []*v1.Node{
|
nodeList: []*v1.Node{
|
||||||
{
|
{
|
||||||
@ -1174,6 +1172,7 @@ func TestMonitorNodeHealthEvictPodsWithDisruption(t *testing.T) {
|
|||||||
v1.LabelTopologyZone: "zone1",
|
v1.LabelTopologyZone: "zone1",
|
||||||
v1.LabelFailureDomainBetaRegion: "region1",
|
v1.LabelFailureDomainBetaRegion: "region1",
|
||||||
v1.LabelFailureDomainBetaZone: "zone1",
|
v1.LabelFailureDomainBetaZone: "zone1",
|
||||||
|
labelNodeDisruptionExclusion: "",
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
Status: v1.NodeStatus{
|
Status: v1.NodeStatus{
|
||||||
@ -1200,7 +1199,7 @@ func TestMonitorNodeHealthEvictPodsWithDisruption(t *testing.T) {
|
|||||||
testutil.CreateZoneID("region1", "zone1"): stateFullDisruption,
|
testutil.CreateZoneID("region1", "zone1"): stateFullDisruption,
|
||||||
},
|
},
|
||||||
expectedEvictPods: false,
|
expectedEvictPods: false,
|
||||||
description: "NetworkDisruption: eviction should stop, only -master Node is healthy",
|
description: "NetworkDisruption: eviction should stop, only Node with label node.kubernetes.io/exclude-disruption is healthy",
|
||||||
},
|
},
|
||||||
// NetworkDisruption: Node created long time ago, node controller posted Unknown for a long period of time on both Nodes.
|
// NetworkDisruption: Node created long time ago, node controller posted Unknown for a long period of time on both Nodes.
|
||||||
// Initially both zones down, one comes back - eviction should take place
|
// Initially both zones down, one comes back - eviction should take place
|
||||||
@ -3577,26 +3576,15 @@ func Test_isNodeExcludedFromDisruptionChecks(t *testing.T) {
|
|||||||
tests := []struct {
|
tests := []struct {
|
||||||
name string
|
name string
|
||||||
|
|
||||||
enableExclusion bool
|
|
||||||
enableLegacy bool
|
|
||||||
input *v1.Node
|
input *v1.Node
|
||||||
want bool
|
want bool
|
||||||
}{
|
}{
|
||||||
{want: false, input: &v1.Node{Status: validNodeStatus, ObjectMeta: metav1.ObjectMeta{Labels: map[string]string{}}}},
|
{want: false, input: &v1.Node{Status: validNodeStatus, ObjectMeta: metav1.ObjectMeta{Labels: map[string]string{}}}},
|
||||||
{want: false, input: &v1.Node{Status: validNodeStatus, ObjectMeta: metav1.ObjectMeta{Name: "master-abc"}}},
|
{want: false, input: &v1.Node{Status: validNodeStatus, ObjectMeta: metav1.ObjectMeta{Name: "master-abc"}}},
|
||||||
{want: false, input: &v1.Node{Status: validNodeStatus, ObjectMeta: metav1.ObjectMeta{Labels: map[string]string{labelNodeDisruptionExclusion: ""}}}},
|
{want: true, input: &v1.Node{Status: validNodeStatus, ObjectMeta: metav1.ObjectMeta{Labels: map[string]string{labelNodeDisruptionExclusion: ""}}}},
|
||||||
|
|
||||||
{want: false, enableExclusion: true, input: &v1.Node{Status: validNodeStatus, ObjectMeta: metav1.ObjectMeta{Name: "master-abc"}}},
|
|
||||||
{want: false, enableLegacy: true, input: &v1.Node{Status: validNodeStatus, ObjectMeta: metav1.ObjectMeta{Labels: map[string]string{labelNodeDisruptionExclusion: ""}}}},
|
|
||||||
|
|
||||||
{want: true, enableLegacy: true, input: &v1.Node{Status: validNodeStatus, ObjectMeta: metav1.ObjectMeta{Name: "master-abc"}}},
|
|
||||||
{want: true, enableExclusion: true, input: &v1.Node{Status: validNodeStatus, ObjectMeta: metav1.ObjectMeta{Labels: map[string]string{labelNodeDisruptionExclusion: ""}}}},
|
|
||||||
}
|
}
|
||||||
for _, tt := range tests {
|
for _, tt := range tests {
|
||||||
t.Run(tt.name, func(t *testing.T) {
|
t.Run(tt.name, func(t *testing.T) {
|
||||||
defer featuregatetesting.SetFeatureGateDuringTest(t, utilfeature.DefaultFeatureGate, features.NodeDisruptionExclusion, tt.enableExclusion)()
|
|
||||||
defer featuregatetesting.SetFeatureGateDuringTest(t, utilfeature.DefaultFeatureGate, features.LegacyNodeRoleBehavior, tt.enableLegacy)()
|
|
||||||
|
|
||||||
if result := isNodeExcludedFromDisruptionChecks(tt.input); result != tt.want {
|
if result := isNodeExcludedFromDisruptionChecks(tt.input); result != tt.want {
|
||||||
t.Errorf("isNodeExcludedFromDisruptionChecks() = %v, want %v", result, tt.want)
|
t.Errorf("isNodeExcludedFromDisruptionChecks() = %v, want %v", result, tt.want)
|
||||||
}
|
}
|
||||||
|
@ -132,13 +132,16 @@ const (
|
|||||||
// owner @smarterclayton
|
// owner @smarterclayton
|
||||||
// alpha: v1.16
|
// alpha: v1.16
|
||||||
// beta: v1.19
|
// beta: v1.19
|
||||||
|
// ga: v1.21
|
||||||
//
|
//
|
||||||
// Enable legacy behavior to vary cluster functionality on the node-role.kubernetes.io labels. On by default (legacy), will be turned off in 1.18.
|
// Enable legacy behavior to vary cluster functionality on the node-role.kubernetes.io labels. On by default (legacy), will be turned off in 1.18.
|
||||||
|
// Lock to false in v1.21 and remove in v1.22.
|
||||||
LegacyNodeRoleBehavior featuregate.Feature = "LegacyNodeRoleBehavior"
|
LegacyNodeRoleBehavior featuregate.Feature = "LegacyNodeRoleBehavior"
|
||||||
|
|
||||||
// owner @brendandburns
|
// owner @brendandburns
|
||||||
// alpha: v1.9
|
// alpha: v1.9
|
||||||
// beta: v1.19
|
// beta: v1.19
|
||||||
|
// ga: v1.21
|
||||||
//
|
//
|
||||||
// Enable nodes to exclude themselves from service load balancers
|
// Enable nodes to exclude themselves from service load balancers
|
||||||
ServiceNodeExclusion featuregate.Feature = "ServiceNodeExclusion"
|
ServiceNodeExclusion featuregate.Feature = "ServiceNodeExclusion"
|
||||||
@ -146,6 +149,7 @@ const (
|
|||||||
// owner @smarterclayton
|
// owner @smarterclayton
|
||||||
// alpha: v1.16
|
// alpha: v1.16
|
||||||
// beta: v1.19
|
// beta: v1.19
|
||||||
|
// ga: v1.21
|
||||||
//
|
//
|
||||||
// Enable nodes to exclude themselves from network disruption checks
|
// Enable nodes to exclude themselves from network disruption checks
|
||||||
NodeDisruptionExclusion featuregate.Feature = "NodeDisruptionExclusion"
|
NodeDisruptionExclusion featuregate.Feature = "NodeDisruptionExclusion"
|
||||||
@ -711,8 +715,8 @@ var defaultKubernetesFeatureGates = map[featuregate.Feature]featuregate.FeatureS
|
|||||||
CPUManager: {Default: true, PreRelease: featuregate.Beta},
|
CPUManager: {Default: true, PreRelease: featuregate.Beta},
|
||||||
CPUCFSQuotaPeriod: {Default: false, PreRelease: featuregate.Alpha},
|
CPUCFSQuotaPeriod: {Default: false, PreRelease: featuregate.Alpha},
|
||||||
TopologyManager: {Default: true, PreRelease: featuregate.Beta},
|
TopologyManager: {Default: true, PreRelease: featuregate.Beta},
|
||||||
ServiceNodeExclusion: {Default: true, PreRelease: featuregate.Beta},
|
ServiceNodeExclusion: {Default: true, PreRelease: featuregate.GA, LockToDefault: true}, // remove in 1.22
|
||||||
NodeDisruptionExclusion: {Default: true, PreRelease: featuregate.Beta},
|
NodeDisruptionExclusion: {Default: true, PreRelease: featuregate.GA, LockToDefault: true}, // remove in 1.22
|
||||||
CSIDriverRegistry: {Default: true, PreRelease: featuregate.GA, LockToDefault: true}, // remove in 1.20
|
CSIDriverRegistry: {Default: true, PreRelease: featuregate.GA, LockToDefault: true}, // remove in 1.20
|
||||||
BlockVolume: {Default: true, PreRelease: featuregate.GA, LockToDefault: true}, // remove in 1.20
|
BlockVolume: {Default: true, PreRelease: featuregate.GA, LockToDefault: true}, // remove in 1.20
|
||||||
StorageObjectInUseProtection: {Default: true, PreRelease: featuregate.GA},
|
StorageObjectInUseProtection: {Default: true, PreRelease: featuregate.GA},
|
||||||
@ -806,5 +810,5 @@ var defaultKubernetesFeatureGates = map[featuregate.Feature]featuregate.FeatureS
|
|||||||
// features that enable backwards compatibility but are scheduled to be removed
|
// features that enable backwards compatibility but are scheduled to be removed
|
||||||
// ...
|
// ...
|
||||||
HPAScaleToZero: {Default: false, PreRelease: featuregate.Alpha},
|
HPAScaleToZero: {Default: false, PreRelease: featuregate.Alpha},
|
||||||
LegacyNodeRoleBehavior: {Default: true, PreRelease: featuregate.Beta},
|
LegacyNodeRoleBehavior: {Default: false, PreRelease: featuregate.GA, LockToDefault: true}, // remove in 1.22
|
||||||
}
|
}
|
||||||
|
@ -56,28 +56,12 @@ const (
|
|||||||
minRetryDelay = 5 * time.Second
|
minRetryDelay = 5 * time.Second
|
||||||
maxRetryDelay = 300 * time.Second
|
maxRetryDelay = 300 * time.Second
|
||||||
|
|
||||||
// labelNodeRoleMaster specifies that a node is a master. The use of this label within the
|
|
||||||
// controller is deprecated and only considered when the LegacyNodeRoleBehavior feature gate
|
|
||||||
// is on.
|
|
||||||
labelNodeRoleMaster = "node-role.kubernetes.io/master"
|
|
||||||
|
|
||||||
// labelNodeRoleExcludeBalancer specifies that the node should not be considered as a target
|
// labelNodeRoleExcludeBalancer specifies that the node should not be considered as a target
|
||||||
// for external load-balancers which use nodes as a second hop (e.g. many cloud LBs which only
|
// for external load-balancers which use nodes as a second hop (e.g. many cloud LBs which only
|
||||||
// understand nodes). For services that use externalTrafficPolicy=Local, this may mean that
|
// understand nodes). For services that use externalTrafficPolicy=Local, this may mean that
|
||||||
// any backends on excluded nodes are not reachable by those external load-balancers.
|
// any backends on excluded nodes are not reachable by those external load-balancers.
|
||||||
// Implementations of this exclusion may vary based on provider. This label is honored starting
|
// Implementations of this exclusion may vary based on provider.
|
||||||
// in 1.16 when the ServiceNodeExclusion gate is on.
|
|
||||||
labelNodeRoleExcludeBalancer = "node.kubernetes.io/exclude-from-external-load-balancers"
|
labelNodeRoleExcludeBalancer = "node.kubernetes.io/exclude-from-external-load-balancers"
|
||||||
|
|
||||||
// serviceNodeExclusionFeature is the feature gate name that
|
|
||||||
// enables nodes to exclude themselves from service load balancers
|
|
||||||
// originated from: https://github.com/kubernetes/kubernetes/blob/28e800245e/pkg/features/kube_features.go#L178
|
|
||||||
serviceNodeExclusionFeature = "ServiceNodeExclusion"
|
|
||||||
|
|
||||||
// legacyNodeRoleBehaviorFeature is the feature gate name that enables legacy
|
|
||||||
// behavior to vary cluster functionality on the node-role.kubernetes.io
|
|
||||||
// labels.
|
|
||||||
legacyNodeRoleBehaviorFeature = "LegacyNodeRoleBehavior"
|
|
||||||
)
|
)
|
||||||
|
|
||||||
type cachedService struct {
|
type cachedService struct {
|
||||||
@ -110,9 +94,6 @@ type Controller struct {
|
|||||||
nodeListerSynced cache.InformerSynced
|
nodeListerSynced cache.InformerSynced
|
||||||
// services that need to be synced
|
// services that need to be synced
|
||||||
queue workqueue.RateLimitingInterface
|
queue workqueue.RateLimitingInterface
|
||||||
// feature gates stored in local field for better testability
|
|
||||||
legacyNodeRoleFeatureEnabled bool
|
|
||||||
serviceNodeExclusionFeatureEnabled bool
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// New returns a new service controller to keep cloud provider service resources
|
// New returns a new service controller to keep cloud provider service resources
|
||||||
@ -147,8 +128,6 @@ func New(
|
|||||||
nodeLister: nodeInformer.Lister(),
|
nodeLister: nodeInformer.Lister(),
|
||||||
nodeListerSynced: nodeInformer.Informer().HasSynced,
|
nodeListerSynced: nodeInformer.Informer().HasSynced,
|
||||||
queue: workqueue.NewNamedRateLimitingQueue(workqueue.NewItemExponentialFailureRateLimiter(minRetryDelay, maxRetryDelay), "service"),
|
queue: workqueue.NewNamedRateLimitingQueue(workqueue.NewItemExponentialFailureRateLimiter(minRetryDelay, maxRetryDelay), "service"),
|
||||||
legacyNodeRoleFeatureEnabled: featureGate.Enabled(legacyNodeRoleBehaviorFeature),
|
|
||||||
serviceNodeExclusionFeatureEnabled: featureGate.Enabled(serviceNodeExclusionFeature),
|
|
||||||
}
|
}
|
||||||
|
|
||||||
serviceInformer.Informer().AddEventHandlerWithResyncPeriod(
|
serviceInformer.Informer().AddEventHandlerWithResyncPeriod(
|
||||||
@ -635,18 +614,9 @@ func nodeSlicesEqualForLB(x, y []*v1.Node) bool {
|
|||||||
|
|
||||||
func (s *Controller) getNodeConditionPredicate() NodeConditionPredicate {
|
func (s *Controller) getNodeConditionPredicate() NodeConditionPredicate {
|
||||||
return func(node *v1.Node) bool {
|
return func(node *v1.Node) bool {
|
||||||
if s.legacyNodeRoleFeatureEnabled {
|
|
||||||
// As of 1.6, we will taint the master, but not necessarily mark it unschedulable.
|
|
||||||
// Recognize nodes labeled as master, and filter them also, as we were doing previously.
|
|
||||||
if _, hasMasterRoleLabel := node.Labels[labelNodeRoleMaster]; hasMasterRoleLabel {
|
|
||||||
return false
|
|
||||||
}
|
|
||||||
}
|
|
||||||
if s.serviceNodeExclusionFeatureEnabled {
|
|
||||||
if _, hasExcludeBalancerLabel := node.Labels[labelNodeRoleExcludeBalancer]; hasExcludeBalancerLabel {
|
if _, hasExcludeBalancerLabel := node.Labels[labelNodeRoleExcludeBalancer]; hasExcludeBalancerLabel {
|
||||||
return false
|
return false
|
||||||
}
|
}
|
||||||
}
|
|
||||||
|
|
||||||
// If we have no info, don't accept
|
// If we have no info, don't accept
|
||||||
if len(node.Status.Conditions) == 0 {
|
if len(node.Status.Conditions) == 0 {
|
||||||
|
@ -1373,8 +1373,6 @@ func Test_getNodeConditionPredicate(t *testing.T) {
|
|||||||
tests := []struct {
|
tests := []struct {
|
||||||
name string
|
name string
|
||||||
|
|
||||||
enableExclusion bool
|
|
||||||
enableLegacy bool
|
|
||||||
input *v1.Node
|
input *v1.Node
|
||||||
want bool
|
want bool
|
||||||
}{
|
}{
|
||||||
@ -1384,21 +1382,11 @@ func Test_getNodeConditionPredicate(t *testing.T) {
|
|||||||
{want: true, input: &v1.Node{Spec: v1.NodeSpec{Unschedulable: true}, Status: v1.NodeStatus{Conditions: []v1.NodeCondition{{Type: v1.NodeReady, Status: v1.ConditionTrue}}}}},
|
{want: true, input: &v1.Node{Spec: v1.NodeSpec{Unschedulable: true}, Status: v1.NodeStatus{Conditions: []v1.NodeCondition{{Type: v1.NodeReady, Status: v1.ConditionTrue}}}}},
|
||||||
|
|
||||||
{want: true, input: &v1.Node{Status: validNodeStatus, ObjectMeta: metav1.ObjectMeta{Labels: map[string]string{}}}},
|
{want: true, input: &v1.Node{Status: validNodeStatus, ObjectMeta: metav1.ObjectMeta{Labels: map[string]string{}}}},
|
||||||
{want: true, input: &v1.Node{Status: validNodeStatus, ObjectMeta: metav1.ObjectMeta{Labels: map[string]string{labelNodeRoleMaster: ""}}}},
|
{want: false, input: &v1.Node{Status: validNodeStatus, ObjectMeta: metav1.ObjectMeta{Labels: map[string]string{labelNodeRoleExcludeBalancer: ""}}}},
|
||||||
{want: true, input: &v1.Node{Status: validNodeStatus, ObjectMeta: metav1.ObjectMeta{Labels: map[string]string{labelNodeRoleExcludeBalancer: ""}}}},
|
|
||||||
|
|
||||||
{want: true, enableExclusion: true, input: &v1.Node{Status: validNodeStatus, ObjectMeta: metav1.ObjectMeta{Labels: map[string]string{labelNodeRoleMaster: ""}}}},
|
|
||||||
{want: true, enableLegacy: true, input: &v1.Node{Status: validNodeStatus, ObjectMeta: metav1.ObjectMeta{Labels: map[string]string{labelNodeRoleExcludeBalancer: ""}}}},
|
|
||||||
|
|
||||||
{want: false, enableLegacy: true, input: &v1.Node{Status: validNodeStatus, ObjectMeta: metav1.ObjectMeta{Labels: map[string]string{labelNodeRoleMaster: ""}}}},
|
|
||||||
{want: false, enableExclusion: true, input: &v1.Node{Status: validNodeStatus, ObjectMeta: metav1.ObjectMeta{Labels: map[string]string{labelNodeRoleExcludeBalancer: ""}}}},
|
|
||||||
}
|
}
|
||||||
for _, tt := range tests {
|
for _, tt := range tests {
|
||||||
t.Run(tt.name, func(t *testing.T) {
|
t.Run(tt.name, func(t *testing.T) {
|
||||||
c := &Controller{
|
c := &Controller{}
|
||||||
legacyNodeRoleFeatureEnabled: tt.enableLegacy,
|
|
||||||
serviceNodeExclusionFeatureEnabled: tt.enableExclusion,
|
|
||||||
}
|
|
||||||
|
|
||||||
if result := c.getNodeConditionPredicate()(tt.input); result != tt.want {
|
if result := c.getNodeConditionPredicate()(tt.input); result != tt.want {
|
||||||
t.Errorf("getNodeConditionPredicate() = %v, want %v", result, tt.want)
|
t.Errorf("getNodeConditionPredicate() = %v, want %v", result, tt.want)
|
||||||
|
@ -38,6 +38,7 @@ const (
|
|||||||
// owner @brendandburns
|
// owner @brendandburns
|
||||||
// alpha: v1.9
|
// alpha: v1.9
|
||||||
// beta: v1.19
|
// beta: v1.19
|
||||||
|
// ga: v1.21
|
||||||
//
|
//
|
||||||
// Enable nodes to exclude themselves from service load balancers
|
// Enable nodes to exclude themselves from service load balancers
|
||||||
// Original copy from k8s.io/kubernetes/pkg/features/kube_features.go
|
// Original copy from k8s.io/kubernetes/pkg/features/kube_features.go
|
||||||
@ -58,7 +59,7 @@ func SetupCurrentKubernetesSpecificFeatureGates(featuregates featuregate.Mutable
|
|||||||
// cloudPublicFeatureGates consists of cloud-specific feature keys.
|
// cloudPublicFeatureGates consists of cloud-specific feature keys.
|
||||||
// To add a new feature, define a key for it at k8s.io/api/pkg/features and add it here.
|
// To add a new feature, define a key for it at k8s.io/api/pkg/features and add it here.
|
||||||
var cloudPublicFeatureGates = map[featuregate.Feature]featuregate.FeatureSpec{
|
var cloudPublicFeatureGates = map[featuregate.Feature]featuregate.FeatureSpec{
|
||||||
LegacyNodeRoleBehavior: {Default: true, PreRelease: featuregate.Beta},
|
LegacyNodeRoleBehavior: {Default: false, PreRelease: featuregate.GA, LockToDefault: true},
|
||||||
ServiceNodeExclusion: {Default: true, PreRelease: featuregate.Beta},
|
ServiceNodeExclusion: {Default: true, PreRelease: featuregate.GA, LockToDefault: true},
|
||||||
IPv6DualStack: {Default: false, PreRelease: featuregate.Alpha},
|
IPv6DualStack: {Default: false, PreRelease: featuregate.Alpha},
|
||||||
}
|
}
|
||||||
|
Loading…
Reference in New Issue
Block a user