mirror of
https://github.com/k3s-io/kubernetes.git
synced 2025-07-21 19:01:49 +00:00
Merge pull request #115204 from alexanderConstantinescu/kccm-del-taint-pred
[KCCM - service controller]: KEP-3458 implementation
This commit is contained in:
commit
4b7bd457c4
@ -739,6 +739,14 @@ const (
|
||||
// Enables kubelet support to size memory backed volumes
|
||||
SizeMemoryBackedVolumes featuregate.Feature = "SizeMemoryBackedVolumes"
|
||||
|
||||
// owner: @alexanderConstantinescu
|
||||
// kep: http://kep.k8s.io/3458
|
||||
// beta: v1.27
|
||||
//
|
||||
// Enables less load balancer re-configurations by the service controller
|
||||
// (KCCM) as an effect of changing node state.
|
||||
StableLoadBalancerNodeSet featuregate.Feature = "StableLoadBalancerNodeSet"
|
||||
|
||||
// owner: @mattcary
|
||||
// alpha: v1.22
|
||||
//
|
||||
@ -1057,6 +1065,8 @@ var defaultKubernetesFeatureGates = map[featuregate.Feature]featuregate.FeatureS
|
||||
|
||||
SizeMemoryBackedVolumes: {Default: true, PreRelease: featuregate.Beta},
|
||||
|
||||
StableLoadBalancerNodeSet: {Default: true, PreRelease: featuregate.Beta},
|
||||
|
||||
StatefulSetAutoDeletePVC: {Default: false, PreRelease: featuregate.Alpha},
|
||||
|
||||
StatefulSetStartOrdinal: {Default: true, PreRelease: featuregate.Beta},
|
||||
|
@ -29,6 +29,7 @@ import (
|
||||
"k8s.io/apimachinery/pkg/util/runtime"
|
||||
"k8s.io/apimachinery/pkg/util/sets"
|
||||
"k8s.io/apimachinery/pkg/util/wait"
|
||||
utilfeature "k8s.io/apiserver/pkg/util/feature"
|
||||
coreinformers "k8s.io/client-go/informers/core/v1"
|
||||
clientset "k8s.io/client-go/kubernetes"
|
||||
"k8s.io/client-go/kubernetes/scheme"
|
||||
@ -41,6 +42,7 @@ import (
|
||||
servicehelper "k8s.io/cloud-provider/service/helpers"
|
||||
"k8s.io/component-base/featuregate"
|
||||
controllersmetrics "k8s.io/component-base/metrics/prometheus/controllers"
|
||||
"k8s.io/controller-manager/pkg/features"
|
||||
"k8s.io/klog/v2"
|
||||
)
|
||||
|
||||
@ -658,6 +660,12 @@ func nodeNames(nodes []*v1.Node) sets.String {
|
||||
}
|
||||
|
||||
func shouldSyncUpdatedNode(oldNode, newNode *v1.Node) bool {
|
||||
if utilfeature.DefaultFeatureGate.Enabled(features.StableLoadBalancerNodeSet) {
|
||||
// Only Nodes with changes to the label
|
||||
// "node.kubernetes.io/exclude-from-external-load-balancers" will
|
||||
// trigger a load balancer re-sync.
|
||||
return respectsPredicates(oldNode, nodeIncludedPredicate) != respectsPredicates(newNode, nodeIncludedPredicate)
|
||||
}
|
||||
// Evaluate the individual node exclusion predicate before evaluating the
|
||||
// compounded result of all predicates. We don't sync ETP=local services
|
||||
// for changes on the readiness condition, hence if a node remains NotReady
|
||||
@ -712,6 +720,7 @@ func (c *Controller) nodeSyncService(svc *v1.Service, oldNodes, newNodes []*v1.N
|
||||
klog.V(4).Infof("nodeSyncService started for service %s/%s", svc.Namespace, svc.Name)
|
||||
if err := c.lockedUpdateLoadBalancerHosts(svc, newNodes); err != nil {
|
||||
runtime.HandleError(fmt.Errorf("failed to update load balancer hosts for service %s/%s: %v", svc.Namespace, svc.Name, err))
|
||||
nodeSyncErrorCount.Inc()
|
||||
return retNeedRetry
|
||||
}
|
||||
klog.V(4).Infof("nodeSyncService finished successfully for service %s/%s", svc.Namespace, svc.Name)
|
||||
@ -755,6 +764,7 @@ func (c *Controller) updateLoadBalancerHosts(ctx context.Context, services []*v1
|
||||
// associated with the service.
|
||||
func (c *Controller) lockedUpdateLoadBalancerHosts(service *v1.Service, hosts []*v1.Node) error {
|
||||
startTime := time.Now()
|
||||
loadBalancerSyncCount.Inc()
|
||||
defer func() {
|
||||
latency := time.Since(startTime).Seconds()
|
||||
klog.V(4).Infof("It took %v seconds to update load balancer hosts for service %s/%s", latency, service.Namespace, service.Name)
|
||||
@ -932,9 +942,21 @@ var (
|
||||
nodeIncludedPredicate,
|
||||
nodeUnTaintedPredicate,
|
||||
}
|
||||
stableNodeSetPredicates []NodeConditionPredicate = []NodeConditionPredicate{
|
||||
nodeIncludedPredicate,
|
||||
// This is not perfect, but probably good enough. We won't update the
|
||||
// LBs just because the taint was added (see shouldSyncUpdatedNode) but
|
||||
// if any other situation causes an LB sync, tainted nodes will be
|
||||
// excluded at that time and cause connections on said node to not
|
||||
// connection drain.
|
||||
nodeUnTaintedPredicate,
|
||||
}
|
||||
)
|
||||
|
||||
func getNodePredicatesForService(service *v1.Service) []NodeConditionPredicate {
|
||||
if utilfeature.DefaultFeatureGate.Enabled(features.StableLoadBalancerNodeSet) {
|
||||
return stableNodeSetPredicates
|
||||
}
|
||||
if service.Spec.ExternalTrafficPolicy == v1.ServiceExternalTrafficPolicyLocal {
|
||||
return etpLocalNodePredicates
|
||||
}
|
||||
|
@ -36,6 +36,7 @@ import (
|
||||
"k8s.io/apimachinery/pkg/types"
|
||||
"k8s.io/apimachinery/pkg/util/intstr"
|
||||
"k8s.io/apimachinery/pkg/util/sets"
|
||||
utilfeature "k8s.io/apiserver/pkg/util/feature"
|
||||
"k8s.io/client-go/informers"
|
||||
"k8s.io/client-go/kubernetes/fake"
|
||||
"k8s.io/client-go/kubernetes/scheme"
|
||||
@ -45,6 +46,9 @@ import (
|
||||
"k8s.io/client-go/util/workqueue"
|
||||
fakecloud "k8s.io/cloud-provider/fake"
|
||||
servicehelper "k8s.io/cloud-provider/service/helpers"
|
||||
featuregatetesting "k8s.io/component-base/featuregate/testing"
|
||||
"k8s.io/controller-manager/pkg/features"
|
||||
_ "k8s.io/controller-manager/pkg/features/register"
|
||||
|
||||
utilpointer "k8s.io/utils/pointer"
|
||||
)
|
||||
@ -573,6 +577,7 @@ func TestUpdateNodesInExternalLoadBalancer(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestNodeChangesForExternalTrafficPolicyLocalServices(t *testing.T) {
|
||||
defer featuregatetesting.SetFeatureGateDuringTest(t, utilfeature.DefaultFeatureGate, features.StableLoadBalancerNodeSet, false)()
|
||||
node1 := &v1.Node{ObjectMeta: metav1.ObjectMeta{Name: "node1"}, Status: v1.NodeStatus{Conditions: []v1.NodeCondition{{Type: v1.NodeReady, Status: v1.ConditionTrue}}}}
|
||||
node2 := &v1.Node{ObjectMeta: metav1.ObjectMeta{Name: "node2"}, Status: v1.NodeStatus{Conditions: []v1.NodeCondition{{Type: v1.NodeReady, Status: v1.ConditionTrue}}}}
|
||||
node2NotReady := &v1.Node{ObjectMeta: metav1.ObjectMeta{Name: "node2"}, Status: v1.NodeStatus{Conditions: []v1.NodeCondition{{Type: v1.NodeReady, Status: v1.ConditionFalse}}}}
|
||||
@ -744,6 +749,174 @@ func TestNodeChangesForExternalTrafficPolicyLocalServices(t *testing.T) {
|
||||
}
|
||||
}
|
||||
|
||||
func TestNodeChangesForStableNodeSetEnabled(t *testing.T) {
|
||||
node1 := &v1.Node{ObjectMeta: metav1.ObjectMeta{Name: "node0"}, Status: v1.NodeStatus{Conditions: []v1.NodeCondition{{Type: v1.NodeReady, Status: v1.ConditionTrue}}}}
|
||||
node2 := &v1.Node{ObjectMeta: metav1.ObjectMeta{Name: "node1"}, Status: v1.NodeStatus{Conditions: []v1.NodeCondition{{Type: v1.NodeReady, Status: v1.ConditionTrue}}}}
|
||||
node2NotReady := &v1.Node{ObjectMeta: metav1.ObjectMeta{Name: "node1"}, Status: v1.NodeStatus{Conditions: []v1.NodeCondition{{Type: v1.NodeReady, Status: v1.ConditionFalse}}}}
|
||||
node2Tainted := &v1.Node{ObjectMeta: metav1.ObjectMeta{Name: "node1"}, Spec: v1.NodeSpec{Taints: []v1.Taint{{Key: ToBeDeletedTaint}}}, Status: v1.NodeStatus{Conditions: []v1.NodeCondition{{Type: v1.NodeReady, Status: v1.ConditionFalse}}}}
|
||||
node2SpuriousChange := &v1.Node{ObjectMeta: metav1.ObjectMeta{Name: "node1"}, Status: v1.NodeStatus{Phase: v1.NodeTerminated, Conditions: []v1.NodeCondition{{Type: v1.NodeReady, Status: v1.ConditionTrue}}}}
|
||||
node2Exclude := &v1.Node{ObjectMeta: metav1.ObjectMeta{Name: "node1", Labels: map[string]string{v1.LabelNodeExcludeBalancers: ""}}, Status: v1.NodeStatus{Conditions: []v1.NodeCondition{{Type: v1.NodeReady, Status: v1.ConditionTrue}}}}
|
||||
node3 := &v1.Node{ObjectMeta: metav1.ObjectMeta{Name: "node73"}, Status: v1.NodeStatus{Conditions: []v1.NodeCondition{{Type: v1.NodeReady, Status: v1.ConditionTrue}}}}
|
||||
|
||||
type stateChanges struct {
|
||||
nodes []*v1.Node
|
||||
syncCallErr bool
|
||||
}
|
||||
|
||||
etpLocalservice1 := newETPLocalService("s0", v1.ServiceTypeLoadBalancer)
|
||||
etpLocalservice2 := newETPLocalService("s1", v1.ServiceTypeLoadBalancer)
|
||||
service3 := defaultExternalService()
|
||||
|
||||
services := []*v1.Service{etpLocalservice1, etpLocalservice2, service3}
|
||||
|
||||
for _, tc := range []struct {
|
||||
desc string
|
||||
expectedUpdateCalls []fakecloud.UpdateBalancerCall
|
||||
stateChanges []stateChanges
|
||||
initialState []*v1.Node
|
||||
}{
|
||||
{
|
||||
desc: "No node changes",
|
||||
initialState: []*v1.Node{node1, node2, node3},
|
||||
stateChanges: []stateChanges{
|
||||
{
|
||||
nodes: []*v1.Node{node1, node2, node3},
|
||||
},
|
||||
},
|
||||
expectedUpdateCalls: []fakecloud.UpdateBalancerCall{},
|
||||
},
|
||||
{
|
||||
desc: "1 new node gets added",
|
||||
initialState: []*v1.Node{node1, node2},
|
||||
stateChanges: []stateChanges{
|
||||
{
|
||||
nodes: []*v1.Node{node1, node2, node3},
|
||||
},
|
||||
},
|
||||
expectedUpdateCalls: []fakecloud.UpdateBalancerCall{
|
||||
{Service: etpLocalservice1, Hosts: []*v1.Node{node1, node2, node3}},
|
||||
{Service: etpLocalservice2, Hosts: []*v1.Node{node1, node2, node3}},
|
||||
{Service: service3, Hosts: []*v1.Node{node1, node2, node3}},
|
||||
},
|
||||
},
|
||||
{
|
||||
desc: "1 new node gets added - with retries",
|
||||
initialState: []*v1.Node{node1, node2},
|
||||
stateChanges: []stateChanges{
|
||||
{
|
||||
nodes: []*v1.Node{node1, node2, node3},
|
||||
syncCallErr: true,
|
||||
},
|
||||
{
|
||||
nodes: []*v1.Node{node1, node2, node3},
|
||||
},
|
||||
},
|
||||
expectedUpdateCalls: []fakecloud.UpdateBalancerCall{
|
||||
{Service: etpLocalservice1, Hosts: []*v1.Node{node1, node2, node3}},
|
||||
{Service: etpLocalservice2, Hosts: []*v1.Node{node1, node2, node3}},
|
||||
{Service: service3, Hosts: []*v1.Node{node1, node2, node3}},
|
||||
},
|
||||
},
|
||||
{
|
||||
desc: "1 node goes NotReady",
|
||||
initialState: []*v1.Node{node1, node2, node3},
|
||||
stateChanges: []stateChanges{
|
||||
{
|
||||
nodes: []*v1.Node{node1, node2NotReady, node3},
|
||||
},
|
||||
},
|
||||
expectedUpdateCalls: []fakecloud.UpdateBalancerCall{},
|
||||
},
|
||||
{
|
||||
desc: "1 node gets Tainted",
|
||||
initialState: []*v1.Node{node1, node2, node3},
|
||||
stateChanges: []stateChanges{
|
||||
{
|
||||
nodes: []*v1.Node{node1, node2Tainted, node3},
|
||||
},
|
||||
},
|
||||
expectedUpdateCalls: []fakecloud.UpdateBalancerCall{
|
||||
{Service: etpLocalservice1, Hosts: []*v1.Node{node1, node3}},
|
||||
{Service: etpLocalservice2, Hosts: []*v1.Node{node1, node3}},
|
||||
{Service: service3, Hosts: []*v1.Node{node1, node3}},
|
||||
},
|
||||
},
|
||||
{
|
||||
desc: "1 node goes Ready",
|
||||
initialState: []*v1.Node{node1, node2NotReady, node3},
|
||||
stateChanges: []stateChanges{
|
||||
{
|
||||
nodes: []*v1.Node{node1, node2, node3},
|
||||
},
|
||||
},
|
||||
expectedUpdateCalls: []fakecloud.UpdateBalancerCall{},
|
||||
},
|
||||
{
|
||||
desc: "1 node get excluded",
|
||||
initialState: []*v1.Node{node1, node2, node3},
|
||||
stateChanges: []stateChanges{
|
||||
{
|
||||
nodes: []*v1.Node{node1, node2Exclude, node3},
|
||||
},
|
||||
},
|
||||
expectedUpdateCalls: []fakecloud.UpdateBalancerCall{
|
||||
{Service: etpLocalservice1, Hosts: []*v1.Node{node1, node3}},
|
||||
{Service: etpLocalservice2, Hosts: []*v1.Node{node1, node3}},
|
||||
{Service: service3, Hosts: []*v1.Node{node1, node3}},
|
||||
},
|
||||
},
|
||||
{
|
||||
desc: "1 old node gets deleted",
|
||||
initialState: []*v1.Node{node1, node2, node3},
|
||||
stateChanges: []stateChanges{
|
||||
{
|
||||
nodes: []*v1.Node{node1, node2},
|
||||
},
|
||||
},
|
||||
expectedUpdateCalls: []fakecloud.UpdateBalancerCall{
|
||||
{Service: etpLocalservice1, Hosts: []*v1.Node{node1, node2}},
|
||||
{Service: etpLocalservice2, Hosts: []*v1.Node{node1, node2}},
|
||||
{Service: service3, Hosts: []*v1.Node{node1, node2}},
|
||||
},
|
||||
},
|
||||
{
|
||||
desc: "1 spurious node update",
|
||||
initialState: []*v1.Node{node1, node2, node3},
|
||||
stateChanges: []stateChanges{
|
||||
{
|
||||
nodes: []*v1.Node{node1, node2SpuriousChange, node3},
|
||||
},
|
||||
},
|
||||
expectedUpdateCalls: []fakecloud.UpdateBalancerCall{},
|
||||
},
|
||||
} {
|
||||
t.Run(tc.desc, func(t *testing.T) {
|
||||
controller, cloud, _ := newController()
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
defer cancel()
|
||||
|
||||
controller.lastSyncedNodes = tc.initialState
|
||||
|
||||
for _, state := range tc.stateChanges {
|
||||
setupState := func() {
|
||||
controller.nodeLister = newFakeNodeLister(nil, state.nodes...)
|
||||
if state.syncCallErr {
|
||||
cloud.Err = fmt.Errorf("error please")
|
||||
}
|
||||
}
|
||||
cleanupState := func() {
|
||||
cloud.Err = nil
|
||||
}
|
||||
setupState()
|
||||
controller.updateLoadBalancerHosts(ctx, services, 3)
|
||||
cleanupState()
|
||||
}
|
||||
|
||||
compareUpdateCalls(t, tc.expectedUpdateCalls, cloud.UpdateCalls)
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestNodeChangesInExternalLoadBalancer(t *testing.T) {
|
||||
node1 := &v1.Node{ObjectMeta: metav1.ObjectMeta{Name: "node1"}, Status: v1.NodeStatus{Conditions: []v1.NodeCondition{{Type: v1.NodeReady, Status: v1.ConditionTrue}}}}
|
||||
node2 := &v1.Node{ObjectMeta: metav1.ObjectMeta{Name: "node2"}, Status: v1.NodeStatus{Conditions: []v1.NodeCondition{{Type: v1.NodeReady, Status: v1.ConditionTrue}}}}
|
||||
@ -1891,6 +2064,7 @@ func Test_shouldSyncUpdatedNode_individualPredicates(t *testing.T) {
|
||||
oldNode *v1.Node
|
||||
newNode *v1.Node
|
||||
shouldSync bool
|
||||
stableNodeSetEnabled bool
|
||||
}{
|
||||
{
|
||||
name: "taint F->T",
|
||||
@ -2268,6 +2442,146 @@ func Test_shouldSyncUpdatedNode_individualPredicates(t *testing.T) {
|
||||
},
|
||||
shouldSync: false,
|
||||
},
|
||||
{
|
||||
name: "excluded F->T",
|
||||
oldNode: &v1.Node{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "node",
|
||||
Labels: map[string]string{},
|
||||
},
|
||||
Status: v1.NodeStatus{
|
||||
Conditions: []v1.NodeCondition{
|
||||
{
|
||||
Type: v1.NodeReady,
|
||||
Status: v1.ConditionTrue,
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
newNode: &v1.Node{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "node",
|
||||
Labels: map[string]string{
|
||||
v1.LabelNodeExcludeBalancers: "",
|
||||
},
|
||||
},
|
||||
Status: v1.NodeStatus{
|
||||
Conditions: []v1.NodeCondition{
|
||||
{
|
||||
Type: v1.NodeReady,
|
||||
Status: v1.ConditionTrue,
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
shouldSync: true,
|
||||
stableNodeSetEnabled: true,
|
||||
},
|
||||
{
|
||||
name: "excluded changed T->F",
|
||||
oldNode: &v1.Node{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "node",
|
||||
Labels: map[string]string{
|
||||
v1.LabelNodeExcludeBalancers: "",
|
||||
},
|
||||
},
|
||||
Status: v1.NodeStatus{
|
||||
Conditions: []v1.NodeCondition{
|
||||
{
|
||||
Type: v1.NodeReady,
|
||||
Status: v1.ConditionTrue,
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
newNode: &v1.Node{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "node",
|
||||
Labels: map[string]string{},
|
||||
},
|
||||
Status: v1.NodeStatus{
|
||||
Conditions: []v1.NodeCondition{
|
||||
{
|
||||
Type: v1.NodeReady,
|
||||
Status: v1.ConditionTrue,
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
shouldSync: true,
|
||||
stableNodeSetEnabled: true,
|
||||
},
|
||||
{
|
||||
name: "excluded changed T->T",
|
||||
oldNode: &v1.Node{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "node",
|
||||
Labels: map[string]string{
|
||||
v1.LabelNodeExcludeBalancers: "",
|
||||
},
|
||||
},
|
||||
Status: v1.NodeStatus{
|
||||
Conditions: []v1.NodeCondition{
|
||||
{
|
||||
Type: v1.NodeReady,
|
||||
Status: v1.ConditionTrue,
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
newNode: &v1.Node{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "node",
|
||||
Labels: map[string]string{
|
||||
v1.LabelNodeExcludeBalancers: "",
|
||||
},
|
||||
},
|
||||
Status: v1.NodeStatus{
|
||||
Conditions: []v1.NodeCondition{
|
||||
{
|
||||
Type: v1.NodeReady,
|
||||
Status: v1.ConditionTrue,
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
shouldSync: false,
|
||||
stableNodeSetEnabled: true,
|
||||
},
|
||||
{
|
||||
name: "excluded changed F->F",
|
||||
oldNode: &v1.Node{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "node",
|
||||
Labels: map[string]string{},
|
||||
},
|
||||
Status: v1.NodeStatus{
|
||||
Conditions: []v1.NodeCondition{
|
||||
{
|
||||
Type: v1.NodeReady,
|
||||
Status: v1.ConditionTrue,
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
newNode: &v1.Node{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "node",
|
||||
Labels: map[string]string{},
|
||||
},
|
||||
Status: v1.NodeStatus{
|
||||
Conditions: []v1.NodeCondition{
|
||||
{
|
||||
Type: v1.NodeReady,
|
||||
Status: v1.ConditionTrue,
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
shouldSync: false,
|
||||
stableNodeSetEnabled: true,
|
||||
},
|
||||
{
|
||||
name: "other label changed F->T",
|
||||
oldNode: &v1.Node{
|
||||
@ -2730,7 +3044,8 @@ func Test_shouldSyncUpdatedNode_individualPredicates(t *testing.T) {
|
||||
},
|
||||
}
|
||||
for _, testcase := range testcases {
|
||||
t.Run(testcase.name, func(t *testing.T) {
|
||||
t.Run(fmt.Sprintf("%s - StableLoadBalancerNodeSet: %v", testcase.name, testcase.stableNodeSetEnabled), func(t *testing.T) {
|
||||
defer featuregatetesting.SetFeatureGateDuringTest(t, utilfeature.DefaultFeatureGate, features.StableLoadBalancerNodeSet, testcase.stableNodeSetEnabled)()
|
||||
shouldSync := shouldSyncUpdatedNode(testcase.oldNode, testcase.newNode)
|
||||
if shouldSync != testcase.shouldSync {
|
||||
t.Errorf("unexpected result from shouldSyncNode, expected: %v, actual: %v", testcase.shouldSync, shouldSync)
|
||||
@ -2740,6 +3055,8 @@ func Test_shouldSyncUpdatedNode_individualPredicates(t *testing.T) {
|
||||
}
|
||||
|
||||
func Test_shouldSyncUpdatedNode_compoundedPredicates(t *testing.T) {
|
||||
for _, fgEnabled := range []bool{true, false} {
|
||||
defer featuregatetesting.SetFeatureGateDuringTest(t, utilfeature.DefaultFeatureGate, features.StableLoadBalancerNodeSet, fgEnabled)()
|
||||
testcases := []struct {
|
||||
name string
|
||||
oldNode *v1.Node
|
||||
@ -3244,7 +3561,7 @@ func Test_shouldSyncUpdatedNode_compoundedPredicates(t *testing.T) {
|
||||
},
|
||||
}
|
||||
for _, testcase := range testcases {
|
||||
t.Run(testcase.name, func(t *testing.T) {
|
||||
t.Run(fmt.Sprintf("%s - StableLoadBalancerNodeSet: %v", testcase.name, fgEnabled), func(t *testing.T) {
|
||||
shouldSync := shouldSyncUpdatedNode(testcase.oldNode, testcase.newNode)
|
||||
if shouldSync != testcase.shouldSync {
|
||||
t.Errorf("unexpected result from shouldSyncNode, expected: %v, actual: %v", testcase.shouldSync, shouldSync)
|
||||
@ -3252,6 +3569,7 @@ func Test_shouldSyncUpdatedNode_compoundedPredicates(t *testing.T) {
|
||||
})
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
type fakeNodeLister struct {
|
||||
cache []*v1.Node
|
||||
|
@ -17,9 +17,10 @@ limitations under the License.
|
||||
package service
|
||||
|
||||
import (
|
||||
"sync"
|
||||
|
||||
"k8s.io/component-base/metrics"
|
||||
"k8s.io/component-base/metrics/legacyregistry"
|
||||
"sync"
|
||||
)
|
||||
|
||||
const (
|
||||
@ -32,12 +33,26 @@ var register sync.Once
|
||||
// registerMetrics registers service-controller metrics.
|
||||
func registerMetrics() {
|
||||
register.Do(func() {
|
||||
legacyregistry.MustRegister(loadBalancerSyncCount)
|
||||
legacyregistry.MustRegister(nodeSyncLatency)
|
||||
legacyregistry.MustRegister(nodeSyncErrorCount)
|
||||
legacyregistry.MustRegister(updateLoadBalancerHostLatency)
|
||||
})
|
||||
}
|
||||
|
||||
var (
|
||||
loadBalancerSyncCount = metrics.NewCounter(&metrics.CounterOpts{
|
||||
Name: "loadbalancer_sync_total",
|
||||
Subsystem: subSystemName,
|
||||
Help: "A metric counting the amount of times any load balancer has been configured, as an effect of service/node changes on the cluster",
|
||||
StabilityLevel: metrics.ALPHA,
|
||||
})
|
||||
nodeSyncErrorCount = metrics.NewCounter(&metrics.CounterOpts{
|
||||
Name: "nodesync_error_total",
|
||||
Subsystem: subSystemName,
|
||||
Help: "A metric counting the amount of times any load balancer has been configured and errored, as an effect of node changes on the cluster",
|
||||
StabilityLevel: metrics.ALPHA,
|
||||
})
|
||||
nodeSyncLatency = metrics.NewHistogram(&metrics.HistogramOpts{
|
||||
Name: "nodesync_latency_seconds",
|
||||
Subsystem: subSystemName,
|
||||
@ -46,7 +61,6 @@ var (
|
||||
Buckets: metrics.ExponentialBuckets(1, 2, 15),
|
||||
StabilityLevel: metrics.ALPHA,
|
||||
})
|
||||
|
||||
updateLoadBalancerHostLatency = metrics.NewHistogram(&metrics.HistogramOpts{
|
||||
Name: "update_loadbalancer_host_latency_seconds",
|
||||
Subsystem: subSystemName,
|
||||
|
@ -31,6 +31,14 @@ const (
|
||||
// (upper before any lower case character) order. This reduces the risk
|
||||
// of code conflicts because changes are more likely to be scattered
|
||||
// across the file.
|
||||
|
||||
// owner: @alexanderConstantinescu
|
||||
// kep: http://kep.k8s.io/3458
|
||||
// beta: v1.27
|
||||
//
|
||||
// Enables less load balancer re-configurations by the service controller
|
||||
// (KCCM) as an effect of changing node state.
|
||||
StableLoadBalancerNodeSet featuregate.Feature = "StableLoadBalancerNodeSet"
|
||||
)
|
||||
|
||||
func SetupCurrentKubernetesSpecificFeatureGates(featuregates featuregate.MutableFeatureGate) error {
|
||||
@ -39,4 +47,6 @@ func SetupCurrentKubernetesSpecificFeatureGates(featuregates featuregate.Mutable
|
||||
|
||||
// cloudPublicFeatureGates consists of cloud-specific feature keys.
|
||||
// To add a new feature, define a key for it at k8s.io/api/pkg/features and add it here.
|
||||
var cloudPublicFeatureGates = map[featuregate.Feature]featuregate.FeatureSpec{}
|
||||
var cloudPublicFeatureGates = map[featuregate.Feature]featuregate.FeatureSpec{
|
||||
StableLoadBalancerNodeSet: {Default: true, PreRelease: featuregate.Beta},
|
||||
}
|
||||
|
Loading…
Reference in New Issue
Block a user