mirror of
https://github.com/k3s-io/kubernetes.git
synced 2025-08-03 09:22:44 +00:00
fix: remove VMSS and VMSS instances from SLB backend pool only when necessary
This commit is contained in:
parent
313b43a8cb
commit
81eb757430
@ -278,6 +278,8 @@ type Cloud struct {
|
|||||||
nodeResourceGroups map[string]string
|
nodeResourceGroups map[string]string
|
||||||
// unmanagedNodes holds a list of nodes not managed by Azure cloud provider.
|
// unmanagedNodes holds a list of nodes not managed by Azure cloud provider.
|
||||||
unmanagedNodes sets.String
|
unmanagedNodes sets.String
|
||||||
|
// excludeLoadBalancerNodes holds a list of nodes that should be excluded from LoadBalancer.
|
||||||
|
excludeLoadBalancerNodes sets.String
|
||||||
// nodeInformerSynced is for determining if the informer has synced.
|
// nodeInformerSynced is for determining if the informer has synced.
|
||||||
nodeInformerSynced cache.InformerSynced
|
nodeInformerSynced cache.InformerSynced
|
||||||
|
|
||||||
@ -339,11 +341,12 @@ func NewCloudWithoutFeatureGates(configReader io.Reader) (*Cloud, error) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
az := &Cloud{
|
az := &Cloud{
|
||||||
nodeNames: sets.NewString(),
|
nodeNames: sets.NewString(),
|
||||||
nodeZones: map[string]sets.String{},
|
nodeZones: map[string]sets.String{},
|
||||||
nodeResourceGroups: map[string]string{},
|
nodeResourceGroups: map[string]string{},
|
||||||
unmanagedNodes: sets.NewString(),
|
unmanagedNodes: sets.NewString(),
|
||||||
routeCIDRs: map[string]string{},
|
excludeLoadBalancerNodes: sets.NewString(),
|
||||||
|
routeCIDRs: map[string]string{},
|
||||||
}
|
}
|
||||||
|
|
||||||
err = az.InitializeCloudFromConfig(config, false)
|
err = az.InitializeCloudFromConfig(config, false)
|
||||||
@ -746,10 +749,6 @@ func (az *Cloud) SetInformers(informerFactory informers.SharedInformerFactory) {
|
|||||||
UpdateFunc: func(prev, obj interface{}) {
|
UpdateFunc: func(prev, obj interface{}) {
|
||||||
prevNode := prev.(*v1.Node)
|
prevNode := prev.(*v1.Node)
|
||||||
newNode := obj.(*v1.Node)
|
newNode := obj.(*v1.Node)
|
||||||
if newNode.Labels[v1.LabelFailureDomainBetaZone] ==
|
|
||||||
prevNode.Labels[v1.LabelFailureDomainBetaZone] {
|
|
||||||
return
|
|
||||||
}
|
|
||||||
if newNode.Labels[v1.LabelTopologyZone] ==
|
if newNode.Labels[v1.LabelTopologyZone] ==
|
||||||
prevNode.Labels[v1.LabelTopologyZone] {
|
prevNode.Labels[v1.LabelTopologyZone] {
|
||||||
return
|
return
|
||||||
@ -817,6 +816,12 @@ func (az *Cloud) updateNodeCaches(prevNode, newNode *v1.Node) {
|
|||||||
managed, ok := prevNode.ObjectMeta.Labels[managedByAzureLabel]
|
managed, ok := prevNode.ObjectMeta.Labels[managedByAzureLabel]
|
||||||
if ok && managed == "false" {
|
if ok && managed == "false" {
|
||||||
az.unmanagedNodes.Delete(prevNode.ObjectMeta.Name)
|
az.unmanagedNodes.Delete(prevNode.ObjectMeta.Name)
|
||||||
|
az.excludeLoadBalancerNodes.Delete(prevNode.ObjectMeta.Name)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Remove from excludeLoadBalancerNodes cache.
|
||||||
|
if _, hasExcludeBalancerLabel := prevNode.ObjectMeta.Labels[v1.LabelNodeExcludeBalancers]; hasExcludeBalancerLabel {
|
||||||
|
az.excludeLoadBalancerNodes.Delete(prevNode.ObjectMeta.Name)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -843,6 +848,12 @@ func (az *Cloud) updateNodeCaches(prevNode, newNode *v1.Node) {
|
|||||||
managed, ok := newNode.ObjectMeta.Labels[managedByAzureLabel]
|
managed, ok := newNode.ObjectMeta.Labels[managedByAzureLabel]
|
||||||
if ok && managed == "false" {
|
if ok && managed == "false" {
|
||||||
az.unmanagedNodes.Insert(newNode.ObjectMeta.Name)
|
az.unmanagedNodes.Insert(newNode.ObjectMeta.Name)
|
||||||
|
az.excludeLoadBalancerNodes.Insert(newNode.ObjectMeta.Name)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Add to excludeLoadBalancerNodes cache.
|
||||||
|
if _, hasExcludeBalancerLabel := newNode.ObjectMeta.Labels[v1.LabelNodeExcludeBalancers]; hasExcludeBalancerLabel {
|
||||||
|
az.excludeLoadBalancerNodes.Insert(newNode.ObjectMeta.Name)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -948,16 +959,23 @@ func (az *Cloud) GetUnmanagedNodes() (sets.String, error) {
|
|||||||
return sets.NewString(az.unmanagedNodes.List()...), nil
|
return sets.NewString(az.unmanagedNodes.List()...), nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// ShouldNodeExcludedFromLoadBalancer returns true if node is unmanaged or in external resource group.
|
// ShouldNodeExcludedFromLoadBalancer returns true if node is unmanaged, in external resource group or labeled with "node.kubernetes.io/exclude-from-external-load-balancers".
|
||||||
func (az *Cloud) ShouldNodeExcludedFromLoadBalancer(node *v1.Node) bool {
|
func (az *Cloud) ShouldNodeExcludedFromLoadBalancer(nodeName string) (bool, error) {
|
||||||
labels := node.ObjectMeta.Labels
|
// Kubelet won't set az.nodeInformerSynced, always return nil.
|
||||||
if rg, ok := labels[externalResourceGroupLabel]; ok && !strings.EqualFold(rg, az.ResourceGroup) {
|
if az.nodeInformerSynced == nil {
|
||||||
return true
|
return false, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
if managed, ok := labels[managedByAzureLabel]; ok && managed == "false" {
|
az.nodeCachesLock.RLock()
|
||||||
return true
|
defer az.nodeCachesLock.RUnlock()
|
||||||
|
if !az.nodeInformerSynced() {
|
||||||
|
return false, fmt.Errorf("node informer is not synced when trying to fetch node caches")
|
||||||
}
|
}
|
||||||
|
|
||||||
return false
|
// Return true if the node is in external resource group.
|
||||||
|
if cachedRG, ok := az.nodeResourceGroups[nodeName]; ok && !strings.EqualFold(cachedRG, az.ResourceGroup) {
|
||||||
|
return true, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
return az.excludeLoadBalancerNodes.Has(nodeName), nil
|
||||||
}
|
}
|
||||||
|
@ -66,12 +66,13 @@ func GetTestCloud(ctrl *gomock.Controller) (az *Cloud) {
|
|||||||
MaximumLoadBalancerRuleCount: 250,
|
MaximumLoadBalancerRuleCount: 250,
|
||||||
VMType: vmTypeStandard,
|
VMType: vmTypeStandard,
|
||||||
},
|
},
|
||||||
nodeZones: map[string]sets.String{},
|
nodeZones: map[string]sets.String{},
|
||||||
nodeInformerSynced: func() bool { return true },
|
nodeInformerSynced: func() bool { return true },
|
||||||
nodeResourceGroups: map[string]string{},
|
nodeResourceGroups: map[string]string{},
|
||||||
unmanagedNodes: sets.NewString(),
|
unmanagedNodes: sets.NewString(),
|
||||||
routeCIDRs: map[string]string{},
|
excludeLoadBalancerNodes: sets.NewString(),
|
||||||
eventRecorder: &record.FakeRecorder{},
|
routeCIDRs: map[string]string{},
|
||||||
|
eventRecorder: &record.FakeRecorder{},
|
||||||
}
|
}
|
||||||
az.DisksClient = mockdiskclient.NewMockInterface(ctrl)
|
az.DisksClient = mockdiskclient.NewMockInterface(ctrl)
|
||||||
az.InterfacesClient = mockinterfaceclient.NewMockInterface(ctrl)
|
az.InterfacesClient = mockinterfaceclient.NewMockInterface(ctrl)
|
||||||
|
@ -335,7 +335,7 @@ func (az *Cloud) cleanBackendpoolForPrimarySLB(primarySLB *network.LoadBalancer,
|
|||||||
},
|
},
|
||||||
}
|
}
|
||||||
// decouple the backendPool from the node
|
// decouple the backendPool from the node
|
||||||
err := az.VMSet.EnsureBackendPoolDeleted(service, lbBackendPoolID, vmSetName, backendpoolToBeDeleted)
|
err := az.VMSet.EnsureBackendPoolDeleted(service, lbBackendPoolID, vmSetName, backendpoolToBeDeleted, true)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
@ -1075,15 +1075,6 @@ func (az *Cloud) findFrontendIPConfigOfService(
|
|||||||
return nil, false, nil
|
return nil, false, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func nodeNameInNodes(nodeName string, nodes []*v1.Node) bool {
|
|
||||||
for _, node := range nodes {
|
|
||||||
if strings.EqualFold(nodeName, node.Name) {
|
|
||||||
return true
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return false
|
|
||||||
}
|
|
||||||
|
|
||||||
// reconcileLoadBalancer ensures load balancer exists and the frontend ip config is setup.
|
// reconcileLoadBalancer ensures load balancer exists and the frontend ip config is setup.
|
||||||
// This also reconciles the Service's Ports with the LoadBalancer config.
|
// This also reconciles the Service's Ports with the LoadBalancer config.
|
||||||
// This entails adding rules/probes for expected Ports and removing stale rules/ports.
|
// This entails adding rules/probes for expected Ports and removing stale rules/ports.
|
||||||
@ -1139,7 +1130,12 @@ func (az *Cloud) reconcileLoadBalancer(clusterName string, service *v1.Service,
|
|||||||
// would not be in the `nodes` slice. We need to check the nodes that
|
// would not be in the `nodes` slice. We need to check the nodes that
|
||||||
// have been added to the LB's backendpool, find the unwanted ones and
|
// have been added to the LB's backendpool, find the unwanted ones and
|
||||||
// delete them from the pool.
|
// delete them from the pool.
|
||||||
if !nodeNameInNodes(nodeName, nodes) {
|
shouldExcludeLoadBalancer, err := az.ShouldNodeExcludedFromLoadBalancer(nodeName)
|
||||||
|
if err != nil {
|
||||||
|
klog.Errorf("ShouldNodeExcludedFromLoadBalancer(%s) failed with error: %v", nodeName, err)
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
if shouldExcludeLoadBalancer {
|
||||||
klog.V(2).Infof("reconcileLoadBalancer for service (%s)(%t): lb backendpool - found unwanted node %s, decouple it from the LB", serviceName, wantLb, nodeName)
|
klog.V(2).Infof("reconcileLoadBalancer for service (%s)(%t): lb backendpool - found unwanted node %s, decouple it from the LB", serviceName, wantLb, nodeName)
|
||||||
// construct a backendPool that only contains the IP config of the node to be deleted
|
// construct a backendPool that only contains the IP config of the node to be deleted
|
||||||
backendIPConfigurationsToBeDeleted = append(backendIPConfigurationsToBeDeleted, network.InterfaceIPConfiguration{ID: to.StringPtr(ipConfID)})
|
backendIPConfigurationsToBeDeleted = append(backendIPConfigurationsToBeDeleted, network.InterfaceIPConfiguration{ID: to.StringPtr(ipConfID)})
|
||||||
@ -1156,7 +1152,7 @@ func (az *Cloud) reconcileLoadBalancer(clusterName string, service *v1.Service,
|
|||||||
}
|
}
|
||||||
vmSetName := az.mapLoadBalancerNameToVMSet(lbName, clusterName)
|
vmSetName := az.mapLoadBalancerNameToVMSet(lbName, clusterName)
|
||||||
// decouple the backendPool from the node
|
// decouple the backendPool from the node
|
||||||
err = az.VMSet.EnsureBackendPoolDeleted(service, lbBackendPoolID, vmSetName, backendpoolToBeDeleted)
|
err = az.VMSet.EnsureBackendPoolDeleted(service, lbBackendPoolID, vmSetName, backendpoolToBeDeleted, false)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
@ -1440,7 +1436,7 @@ func (az *Cloud) reconcileLoadBalancer(clusterName string, service *v1.Service,
|
|||||||
// do nothing for availability set
|
// do nothing for availability set
|
||||||
lb.BackendAddressPools = nil
|
lb.BackendAddressPools = nil
|
||||||
}
|
}
|
||||||
err := az.VMSet.EnsureBackendPoolDeleted(service, lbBackendPoolID, vmSetName, lb.BackendAddressPools)
|
err := az.VMSet.EnsureBackendPoolDeleted(service, lbBackendPoolID, vmSetName, lb.BackendAddressPools, true)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
klog.Errorf("EnsureBackendPoolDeleted(%s) for service %s failed: %v", lbBackendPoolID, serviceName, err)
|
klog.Errorf("EnsureBackendPoolDeleted(%s) for service %s failed: %v", lbBackendPoolID, serviceName, err)
|
||||||
return nil, err
|
return nil, err
|
||||||
|
@ -915,7 +915,12 @@ func (as *availabilitySet) EnsureHostsInPool(service *v1.Service, nodes []*v1.No
|
|||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
|
|
||||||
if as.ShouldNodeExcludedFromLoadBalancer(node) {
|
shouldExcludeLoadBalancer, err := as.ShouldNodeExcludedFromLoadBalancer(localNodeName)
|
||||||
|
if err != nil {
|
||||||
|
klog.Errorf("ShouldNodeExcludedFromLoadBalancer(%s) failed with error: %v", localNodeName, err)
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
if shouldExcludeLoadBalancer {
|
||||||
klog.V(4).Infof("Excluding unmanaged/external-resource-group node %q", localNodeName)
|
klog.V(4).Infof("Excluding unmanaged/external-resource-group node %q", localNodeName)
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
@ -940,7 +945,7 @@ func (as *availabilitySet) EnsureHostsInPool(service *v1.Service, nodes []*v1.No
|
|||||||
}
|
}
|
||||||
|
|
||||||
// EnsureBackendPoolDeleted ensures the loadBalancer backendAddressPools deleted from the specified nodes.
|
// EnsureBackendPoolDeleted ensures the loadBalancer backendAddressPools deleted from the specified nodes.
|
||||||
func (as *availabilitySet) EnsureBackendPoolDeleted(service *v1.Service, backendPoolID, vmSetName string, backendAddressPools *[]network.BackendAddressPool) error {
|
func (as *availabilitySet) EnsureBackendPoolDeleted(service *v1.Service, backendPoolID, vmSetName string, backendAddressPools *[]network.BackendAddressPool, deleteFromVMSet bool) error {
|
||||||
// Returns nil if backend address pools already deleted.
|
// Returns nil if backend address pools already deleted.
|
||||||
if backendAddressPools == nil {
|
if backendAddressPools == nil {
|
||||||
return nil
|
return nil
|
||||||
|
@ -34,6 +34,7 @@ import (
|
|||||||
v1 "k8s.io/api/core/v1"
|
v1 "k8s.io/api/core/v1"
|
||||||
meta "k8s.io/apimachinery/pkg/apis/meta/v1"
|
meta "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||||
"k8s.io/apimachinery/pkg/types"
|
"k8s.io/apimachinery/pkg/types"
|
||||||
|
"k8s.io/apimachinery/pkg/util/sets"
|
||||||
cloudprovider "k8s.io/cloud-provider"
|
cloudprovider "k8s.io/cloud-provider"
|
||||||
"k8s.io/legacy-cloud-providers/azure/clients/interfaceclient/mockinterfaceclient"
|
"k8s.io/legacy-cloud-providers/azure/clients/interfaceclient/mockinterfaceclient"
|
||||||
"k8s.io/legacy-cloud-providers/azure/clients/publicipclient/mockpublicipclient"
|
"k8s.io/legacy-cloud-providers/azure/clients/publicipclient/mockpublicipclient"
|
||||||
@ -1414,6 +1415,7 @@ func TestStandardEnsureHostsInPool(t *testing.T) {
|
|||||||
name string
|
name string
|
||||||
service *v1.Service
|
service *v1.Service
|
||||||
nodes []*v1.Node
|
nodes []*v1.Node
|
||||||
|
excludeLBNodes []string
|
||||||
nodeName string
|
nodeName string
|
||||||
backendPoolID string
|
backendPoolID string
|
||||||
nicName string
|
nicName string
|
||||||
@ -1439,9 +1441,10 @@ func TestStandardEnsureHostsInPool(t *testing.T) {
|
|||||||
vmSetName: "availabilityset-1",
|
vmSetName: "availabilityset-1",
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
name: "EnsureHostsInPool should skip if node is master node",
|
name: "EnsureHostsInPool should skip if node is master node",
|
||||||
service: &v1.Service{},
|
service: &v1.Service{},
|
||||||
nodeName: "vm2",
|
nodeName: "vm2",
|
||||||
|
excludeLBNodes: []string{"vm2"},
|
||||||
nodes: []*v1.Node{
|
nodes: []*v1.Node{
|
||||||
{
|
{
|
||||||
ObjectMeta: meta.ObjectMeta{
|
ObjectMeta: meta.ObjectMeta{
|
||||||
@ -1455,9 +1458,10 @@ func TestStandardEnsureHostsInPool(t *testing.T) {
|
|||||||
vmSetName: "availabilityset-1",
|
vmSetName: "availabilityset-1",
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
name: "EnsureHostsInPool should skip if node is in external resource group",
|
name: "EnsureHostsInPool should skip if node is in external resource group",
|
||||||
service: &v1.Service{},
|
service: &v1.Service{},
|
||||||
nodeName: "vm3",
|
nodeName: "vm3",
|
||||||
|
excludeLBNodes: []string{"vm3"},
|
||||||
nodes: []*v1.Node{
|
nodes: []*v1.Node{
|
||||||
{
|
{
|
||||||
ObjectMeta: meta.ObjectMeta{
|
ObjectMeta: meta.ObjectMeta{
|
||||||
@ -1471,9 +1475,10 @@ func TestStandardEnsureHostsInPool(t *testing.T) {
|
|||||||
vmSetName: "availabilityset-1",
|
vmSetName: "availabilityset-1",
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
name: "EnsureHostsInPool should skip if node is unmanaged",
|
name: "EnsureHostsInPool should skip if node is unmanaged",
|
||||||
service: &v1.Service{},
|
service: &v1.Service{},
|
||||||
nodeName: "vm4",
|
nodeName: "vm4",
|
||||||
|
excludeLBNodes: []string{"vm4"},
|
||||||
nodes: []*v1.Node{
|
nodes: []*v1.Node{
|
||||||
{
|
{
|
||||||
ObjectMeta: meta.ObjectMeta{
|
ObjectMeta: meta.ObjectMeta{
|
||||||
@ -1518,6 +1523,7 @@ func TestStandardEnsureHostsInPool(t *testing.T) {
|
|||||||
for _, test := range testCases {
|
for _, test := range testCases {
|
||||||
cloud.Config.LoadBalancerSku = loadBalancerSkuStandard
|
cloud.Config.LoadBalancerSku = loadBalancerSkuStandard
|
||||||
cloud.Config.ExcludeMasterFromStandardLB = to.BoolPtr(true)
|
cloud.Config.ExcludeMasterFromStandardLB = to.BoolPtr(true)
|
||||||
|
cloud.excludeLoadBalancerNodes = sets.NewString(test.excludeLBNodes...)
|
||||||
|
|
||||||
testVM := buildDefaultTestVirtualMachine(availabilitySetID, []string{test.nicID})
|
testVM := buildDefaultTestVirtualMachine(availabilitySetID, []string{test.nicID})
|
||||||
testNIC := buildDefaultTestInterface(false, []string{backendAddressPoolID})
|
testNIC := buildDefaultTestInterface(false, []string{backendAddressPoolID})
|
||||||
@ -1744,7 +1750,7 @@ func TestStandardEnsureBackendPoolDeleted(t *testing.T) {
|
|||||||
mockNICClient.EXPECT().CreateOrUpdate(gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any()).Return(nil)
|
mockNICClient.EXPECT().CreateOrUpdate(gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any()).Return(nil)
|
||||||
cloud.InterfacesClient = mockNICClient
|
cloud.InterfacesClient = mockNICClient
|
||||||
|
|
||||||
err := cloud.VMSet.EnsureBackendPoolDeleted(&service, backendPoolID, vmSetName, test.backendAddressPools)
|
err := cloud.VMSet.EnsureBackendPoolDeleted(&service, backendPoolID, vmSetName, test.backendAddressPools, true)
|
||||||
assert.NoError(t, err, test.desc)
|
assert.NoError(t, err, test.desc)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -3070,17 +3070,6 @@ func TestCanCombineSharedAndPrivateRulesInSameGroup(t *testing.T) {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// TODO: sanity check if the same IP address incorrectly gets put in twice?
|
|
||||||
// (shouldn't happen but...)
|
|
||||||
|
|
||||||
// func TestIfServiceIsEditedFromOwnRuleToSharedRuleThenOwnRuleIsDeletedAndSharedRuleIsCreated(t *testing.T) {
|
|
||||||
// t.Error()
|
|
||||||
// }
|
|
||||||
|
|
||||||
// func TestIfServiceIsEditedFromSharedRuleToOwnRuleThenItIsRemovedFromSharedRuleAndOwnRuleIsCreated(t *testing.T) {
|
|
||||||
// t.Error()
|
|
||||||
// }
|
|
||||||
|
|
||||||
func TestGetResourceGroupFromDiskURI(t *testing.T) {
|
func TestGetResourceGroupFromDiskURI(t *testing.T) {
|
||||||
tests := []struct {
|
tests := []struct {
|
||||||
diskURL string
|
diskURL string
|
||||||
@ -3245,6 +3234,7 @@ func TestUpdateNodeCaches(t *testing.T) {
|
|||||||
az.nodeZones = map[string]sets.String{zone: nodesInZone}
|
az.nodeZones = map[string]sets.String{zone: nodesInZone}
|
||||||
az.nodeResourceGroups = map[string]string{"prevNode": "rg"}
|
az.nodeResourceGroups = map[string]string{"prevNode": "rg"}
|
||||||
az.unmanagedNodes = sets.NewString("prevNode")
|
az.unmanagedNodes = sets.NewString("prevNode")
|
||||||
|
az.excludeLoadBalancerNodes = sets.NewString("prevNode")
|
||||||
az.nodeNames = sets.NewString("prevNode")
|
az.nodeNames = sets.NewString("prevNode")
|
||||||
|
|
||||||
prevNode := v1.Node{
|
prevNode := v1.Node{
|
||||||
@ -3263,14 +3253,16 @@ func TestUpdateNodeCaches(t *testing.T) {
|
|||||||
assert.Equal(t, 0, len(az.nodeZones[zone]))
|
assert.Equal(t, 0, len(az.nodeZones[zone]))
|
||||||
assert.Equal(t, 0, len(az.nodeResourceGroups))
|
assert.Equal(t, 0, len(az.nodeResourceGroups))
|
||||||
assert.Equal(t, 0, len(az.unmanagedNodes))
|
assert.Equal(t, 0, len(az.unmanagedNodes))
|
||||||
|
assert.Equal(t, 0, len(az.excludeLoadBalancerNodes))
|
||||||
assert.Equal(t, 0, len(az.nodeNames))
|
assert.Equal(t, 0, len(az.nodeNames))
|
||||||
|
|
||||||
newNode := v1.Node{
|
newNode := v1.Node{
|
||||||
ObjectMeta: metav1.ObjectMeta{
|
ObjectMeta: metav1.ObjectMeta{
|
||||||
Labels: map[string]string{
|
Labels: map[string]string{
|
||||||
v1.LabelTopologyZone: zone,
|
v1.LabelTopologyZone: zone,
|
||||||
externalResourceGroupLabel: "true",
|
externalResourceGroupLabel: "true",
|
||||||
managedByAzureLabel: "false",
|
managedByAzureLabel: "false",
|
||||||
|
v1.LabelNodeExcludeBalancers: "true",
|
||||||
},
|
},
|
||||||
Name: "newNode",
|
Name: "newNode",
|
||||||
},
|
},
|
||||||
@ -3280,6 +3272,7 @@ func TestUpdateNodeCaches(t *testing.T) {
|
|||||||
assert.Equal(t, 1, len(az.nodeZones[zone]))
|
assert.Equal(t, 1, len(az.nodeZones[zone]))
|
||||||
assert.Equal(t, 1, len(az.nodeResourceGroups))
|
assert.Equal(t, 1, len(az.nodeResourceGroups))
|
||||||
assert.Equal(t, 1, len(az.unmanagedNodes))
|
assert.Equal(t, 1, len(az.unmanagedNodes))
|
||||||
|
assert.Equal(t, 1, len(az.excludeLoadBalancerNodes))
|
||||||
assert.Equal(t, 1, len(az.nodeNames))
|
assert.Equal(t, 1, len(az.nodeNames))
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -65,7 +65,7 @@ type VMSet interface {
|
|||||||
// participating in the specified LoadBalancer Backend Pool.
|
// participating in the specified LoadBalancer Backend Pool.
|
||||||
EnsureHostInPool(service *v1.Service, nodeName types.NodeName, backendPoolID string, vmSetName string, isInternal bool) (string, string, string, *compute.VirtualMachineScaleSetVM, error)
|
EnsureHostInPool(service *v1.Service, nodeName types.NodeName, backendPoolID string, vmSetName string, isInternal bool) (string, string, string, *compute.VirtualMachineScaleSetVM, error)
|
||||||
// EnsureBackendPoolDeleted ensures the loadBalancer backendAddressPools deleted from the specified nodes.
|
// EnsureBackendPoolDeleted ensures the loadBalancer backendAddressPools deleted from the specified nodes.
|
||||||
EnsureBackendPoolDeleted(service *v1.Service, backendPoolID, vmSetName string, backendAddressPools *[]network.BackendAddressPool) error
|
EnsureBackendPoolDeleted(service *v1.Service, backendPoolID, vmSetName string, backendAddressPools *[]network.BackendAddressPool, deleteFromVMSet bool) error
|
||||||
|
|
||||||
// AttachDisk attaches a vhd to vm. The vhd must exist, can be identified by diskName, diskURI, and lun.
|
// AttachDisk attaches a vhd to vm. The vhd must exist, can be identified by diskName, diskURI, and lun.
|
||||||
AttachDisk(isManagedDisk bool, diskName, diskURI string, nodeName types.NodeName, lun int32, cachingMode compute.CachingTypes, diskEncryptionSetID string, writeAcceleratorEnabled bool) error
|
AttachDisk(isManagedDisk bool, diskName, diskURI string, nodeName types.NodeName, lun int32, cachingMode compute.CachingTypes, diskEncryptionSetID string, writeAcceleratorEnabled bool) error
|
||||||
|
@ -745,11 +745,16 @@ func (ss *scaleSet) getAgentPoolScaleSets(nodes []*v1.Node) (*[]string, error) {
|
|||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
|
|
||||||
if ss.ShouldNodeExcludedFromLoadBalancer(nodes[nx]) {
|
nodeName := nodes[nx].Name
|
||||||
|
shouldExcludeLoadBalancer, err := ss.ShouldNodeExcludedFromLoadBalancer(nodeName)
|
||||||
|
if err != nil {
|
||||||
|
klog.Errorf("ShouldNodeExcludedFromLoadBalancer(%s) failed with error: %v", nodeName, err)
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
if shouldExcludeLoadBalancer {
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
|
|
||||||
nodeName := nodes[nx].Name
|
|
||||||
ssName, _, _, err := ss.getVmssVM(nodeName, azcache.CacheReadTypeDefault)
|
ssName, _, _, err := ss.getVmssVM(nodeName, azcache.CacheReadTypeDefault)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
@ -1106,7 +1111,12 @@ func (ss *scaleSet) ensureVMSSInPool(service *v1.Service, nodes []*v1.Node, back
|
|||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
|
|
||||||
if ss.ShouldNodeExcludedFromLoadBalancer(node) {
|
shouldExcludeLoadBalancer, err := ss.ShouldNodeExcludedFromLoadBalancer(node.Name)
|
||||||
|
if err != nil {
|
||||||
|
klog.Errorf("ShouldNodeExcludedFromLoadBalancer(%s) failed with error: %v", node.Name, err)
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
if shouldExcludeLoadBalancer {
|
||||||
klog.V(4).Infof("Excluding unmanaged/external-resource-group node %q", node.Name)
|
klog.V(4).Infof("Excluding unmanaged/external-resource-group node %q", node.Name)
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
@ -1249,7 +1259,12 @@ func (ss *scaleSet) EnsureHostsInPool(service *v1.Service, nodes []*v1.Node, bac
|
|||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
|
|
||||||
if ss.ShouldNodeExcludedFromLoadBalancer(node) {
|
shouldExcludeLoadBalancer, err := ss.ShouldNodeExcludedFromLoadBalancer(localNodeName)
|
||||||
|
if err != nil {
|
||||||
|
klog.Errorf("ShouldNodeExcludedFromLoadBalancer(%s) failed with error: %v", localNodeName, err)
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
if shouldExcludeLoadBalancer {
|
||||||
klog.V(4).Infof("Excluding unmanaged/external-resource-group node %q", localNodeName)
|
klog.V(4).Infof("Excluding unmanaged/external-resource-group node %q", localNodeName)
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
@ -1569,7 +1584,7 @@ func (ss *scaleSet) ensureBackendPoolDeletedFromVMSS(service *v1.Service, backen
|
|||||||
}
|
}
|
||||||
|
|
||||||
// EnsureBackendPoolDeleted ensures the loadBalancer backendAddressPools deleted from the specified nodes.
|
// EnsureBackendPoolDeleted ensures the loadBalancer backendAddressPools deleted from the specified nodes.
|
||||||
func (ss *scaleSet) EnsureBackendPoolDeleted(service *v1.Service, backendPoolID, vmSetName string, backendAddressPools *[]network.BackendAddressPool) error {
|
func (ss *scaleSet) EnsureBackendPoolDeleted(service *v1.Service, backendPoolID, vmSetName string, backendAddressPools *[]network.BackendAddressPool, deleteFromVMSet bool) error {
|
||||||
// Returns nil if backend address pools already deleted.
|
// Returns nil if backend address pools already deleted.
|
||||||
if backendAddressPools == nil {
|
if backendAddressPools == nil {
|
||||||
return nil
|
return nil
|
||||||
@ -1681,9 +1696,11 @@ func (ss *scaleSet) EnsureBackendPoolDeleted(service *v1.Service, backendPoolID,
|
|||||||
}
|
}
|
||||||
|
|
||||||
// Ensure the backendPoolID is also deleted on VMSS itself.
|
// Ensure the backendPoolID is also deleted on VMSS itself.
|
||||||
err := ss.ensureBackendPoolDeletedFromVMSS(service, backendPoolID, vmSetName, ipConfigurationIDs)
|
if deleteFromVMSet {
|
||||||
if err != nil {
|
err := ss.ensureBackendPoolDeletedFromVMSS(service, backendPoolID, vmSetName, ipConfigurationIDs)
|
||||||
return err
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
isOperationSucceeded = true
|
isOperationSucceeded = true
|
||||||
|
@ -28,6 +28,7 @@ import (
|
|||||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||||
"k8s.io/apimachinery/pkg/types"
|
"k8s.io/apimachinery/pkg/types"
|
||||||
utilerrors "k8s.io/apimachinery/pkg/util/errors"
|
utilerrors "k8s.io/apimachinery/pkg/util/errors"
|
||||||
|
"k8s.io/apimachinery/pkg/util/sets"
|
||||||
cloudprovider "k8s.io/cloud-provider"
|
cloudprovider "k8s.io/cloud-provider"
|
||||||
azcache "k8s.io/legacy-cloud-providers/azure/cache"
|
azcache "k8s.io/legacy-cloud-providers/azure/cache"
|
||||||
"k8s.io/legacy-cloud-providers/azure/clients/interfaceclient/mockinterfaceclient"
|
"k8s.io/legacy-cloud-providers/azure/clients/interfaceclient/mockinterfaceclient"
|
||||||
@ -1406,12 +1407,14 @@ func TestGetAgentPoolScaleSets(t *testing.T) {
|
|||||||
|
|
||||||
testCases := []struct {
|
testCases := []struct {
|
||||||
description string
|
description string
|
||||||
|
excludeLBNodes []string
|
||||||
nodes []*v1.Node
|
nodes []*v1.Node
|
||||||
expectedVMSSNames *[]string
|
expectedVMSSNames *[]string
|
||||||
expectedErr error
|
expectedErr error
|
||||||
}{
|
}{
|
||||||
{
|
{
|
||||||
description: "getAgentPoolScaleSets should return the correct vmss names",
|
description: "getAgentPoolScaleSets should return the correct vmss names",
|
||||||
|
excludeLBNodes: []string{"vmss-vm-000000", "vmss-vm-000001"},
|
||||||
nodes: []*v1.Node{
|
nodes: []*v1.Node{
|
||||||
{
|
{
|
||||||
ObjectMeta: metav1.ObjectMeta{
|
ObjectMeta: metav1.ObjectMeta{
|
||||||
@ -1433,11 +1436,30 @@ func TestGetAgentPoolScaleSets(t *testing.T) {
|
|||||||
},
|
},
|
||||||
expectedVMSSNames: &[]string{"vmss"},
|
expectedVMSSNames: &[]string{"vmss"},
|
||||||
},
|
},
|
||||||
|
{
|
||||||
|
description: "getAgentPoolScaleSets should return the correct vmss names",
|
||||||
|
excludeLBNodes: []string{"vmss-vm-000001"},
|
||||||
|
nodes: []*v1.Node{
|
||||||
|
{
|
||||||
|
ObjectMeta: metav1.ObjectMeta{
|
||||||
|
Name: "vmss-vm-000001",
|
||||||
|
Labels: map[string]string{v1.LabelNodeExcludeBalancers: "true"},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
ObjectMeta: metav1.ObjectMeta{
|
||||||
|
Name: "vmss-vm-000002",
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
expectedVMSSNames: &[]string{"vmss"},
|
||||||
|
},
|
||||||
}
|
}
|
||||||
|
|
||||||
for _, test := range testCases {
|
for _, test := range testCases {
|
||||||
ss, err := newTestScaleSet(ctrl)
|
ss, err := newTestScaleSet(ctrl)
|
||||||
assert.NoError(t, err, "unexpected error when creating test VMSS")
|
assert.NoError(t, err, "unexpected error when creating test VMSS")
|
||||||
|
ss.excludeLoadBalancerNodes = sets.NewString(test.excludeLBNodes...)
|
||||||
|
|
||||||
expectedVMSS := buildTestVMSS(testVMSSName, "vmss-vm-")
|
expectedVMSS := buildTestVMSS(testVMSSName, "vmss-vm-")
|
||||||
mockVMSSClient := ss.cloud.VirtualMachineScaleSetsClient.(*mockvmssclient.MockInterface)
|
mockVMSSClient := ss.cloud.VirtualMachineScaleSetsClient.(*mockvmssclient.MockInterface)
|
||||||
@ -2471,7 +2493,7 @@ func TestEnsureBackendPoolDeleted(t *testing.T) {
|
|||||||
mockVMSSVMClient.EXPECT().List(gomock.Any(), ss.ResourceGroup, testVMSSName, gomock.Any()).Return(expectedVMSSVMs, nil).AnyTimes()
|
mockVMSSVMClient.EXPECT().List(gomock.Any(), ss.ResourceGroup, testVMSSName, gomock.Any()).Return(expectedVMSSVMs, nil).AnyTimes()
|
||||||
mockVMSSVMClient.EXPECT().UpdateVMs(gomock.Any(), ss.ResourceGroup, testVMSSName, gomock.Any(), gomock.Any()).Return(test.vmClientErr).Times(test.expectedVMSSVMPutTimes)
|
mockVMSSVMClient.EXPECT().UpdateVMs(gomock.Any(), ss.ResourceGroup, testVMSSName, gomock.Any(), gomock.Any()).Return(test.vmClientErr).Times(test.expectedVMSSVMPutTimes)
|
||||||
|
|
||||||
err = ss.EnsureBackendPoolDeleted(&v1.Service{}, test.backendpoolID, testVMSSName, test.backendAddressPools)
|
err = ss.EnsureBackendPoolDeleted(&v1.Service{}, test.backendpoolID, testVMSSName, test.backendAddressPools, true)
|
||||||
assert.Equal(t, test.expectedErr, err != nil, test.description+", but an error occurs")
|
assert.Equal(t, test.expectedErr, err != nil, test.description+", but an error occurs")
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -2551,7 +2573,7 @@ func TestEnsureBackendPoolDeletedConcurrently(t *testing.T) {
|
|||||||
i := i
|
i := i
|
||||||
id := id
|
id := id
|
||||||
testFunc = append(testFunc, func() error {
|
testFunc = append(testFunc, func() error {
|
||||||
return ss.EnsureBackendPoolDeleted(&v1.Service{}, id, testVMSSNames[i], backendAddressPools)
|
return ss.EnsureBackendPoolDeleted(&v1.Service{}, id, testVMSSNames[i], backendAddressPools, true)
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
errs := utilerrors.AggregateGoroutines(testFunc...)
|
errs := utilerrors.AggregateGoroutines(testFunc...)
|
||||||
|
@ -210,17 +210,17 @@ func (mr *MockVMSetMockRecorder) EnsureHostInPool(service, nodeName, backendPool
|
|||||||
}
|
}
|
||||||
|
|
||||||
// EnsureBackendPoolDeleted mocks base method
|
// EnsureBackendPoolDeleted mocks base method
|
||||||
func (m *MockVMSet) EnsureBackendPoolDeleted(service *v1.Service, backendPoolID, vmSetName string, backendAddressPools *[]network.BackendAddressPool) error {
|
func (m *MockVMSet) EnsureBackendPoolDeleted(service *v1.Service, backendPoolID, vmSetName string, backendAddressPools *[]network.BackendAddressPool, deleteFromVMSet bool) error {
|
||||||
m.ctrl.T.Helper()
|
m.ctrl.T.Helper()
|
||||||
ret := m.ctrl.Call(m, "EnsureBackendPoolDeleted", service, backendPoolID, vmSetName, backendAddressPools)
|
ret := m.ctrl.Call(m, "EnsureBackendPoolDeleted", service, backendPoolID, vmSetName, backendAddressPools, deleteFromVMSet)
|
||||||
ret0, _ := ret[0].(error)
|
ret0, _ := ret[0].(error)
|
||||||
return ret0
|
return ret0
|
||||||
}
|
}
|
||||||
|
|
||||||
// EnsureBackendPoolDeleted indicates an expected call of EnsureBackendPoolDeleted
|
// EnsureBackendPoolDeleted indicates an expected call of EnsureBackendPoolDeleted
|
||||||
func (mr *MockVMSetMockRecorder) EnsureBackendPoolDeleted(service, backendPoolID, vmSetName, backendAddressPools interface{}) *gomock.Call {
|
func (mr *MockVMSetMockRecorder) EnsureBackendPoolDeleted(service, backendPoolID, vmSetName, backendAddressPools, deleteFromVMSet interface{}) *gomock.Call {
|
||||||
mr.mock.ctrl.T.Helper()
|
mr.mock.ctrl.T.Helper()
|
||||||
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "EnsureBackendPoolDeleted", reflect.TypeOf((*MockVMSet)(nil).EnsureBackendPoolDeleted), service, backendPoolID, vmSetName, backendAddressPools)
|
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "EnsureBackendPoolDeleted", reflect.TypeOf((*MockVMSet)(nil).EnsureBackendPoolDeleted), service, backendPoolID, vmSetName, backendAddressPools, deleteFromVMSet)
|
||||||
}
|
}
|
||||||
|
|
||||||
// AttachDisk mocks base method
|
// AttachDisk mocks base method
|
||||||
|
Loading…
Reference in New Issue
Block a user