mirror of
https://github.com/k3s-io/kubernetes.git
synced 2025-07-22 19:31:44 +00:00
Merge pull request #96111 from nilo19/feature/support-multiple-slb
Azure: Support multiple standard load balancers in one cluster
This commit is contained in:
commit
3e7e6c3455
@ -197,6 +197,12 @@ type Config struct {
|
||||
// "external": for external LoadBalancer
|
||||
// "all": for both internal and external LoadBalancer
|
||||
PreConfiguredBackendPoolLoadBalancerTypes string `json:"preConfiguredBackendPoolLoadBalancerTypes,omitempty" yaml:"preConfiguredBackendPoolLoadBalancerTypes,omitempty"`
|
||||
// EnableMultipleStandardLoadBalancers determines the behavior of the standard load balancer. If set to true
|
||||
// there would be one standard load balancer per VMAS or VMSS, which is similar with the behavior of the basic
|
||||
// load balancer. Users could select the specific standard load balancer for their service by the service
|
||||
// annotation `service.beta.kubernetes.io/azure-load-balancer-mode`, If set to false, the same standard load balancer
|
||||
// would be shared by all services in the cluster. In this case, the mode selection annotation would be ignored.
|
||||
EnableMultipleStandardLoadBalancers bool `json:"enableMultipleStandardLoadBalancers,omitempty" yaml:"enableMultipleStandardLoadBalancers,omitempty"`
|
||||
|
||||
// AvailabilitySetNodesCacheTTLInSeconds sets the Cache TTL for availabilitySetNodesCache
|
||||
// if not set, will use default value
|
||||
|
@ -274,6 +274,65 @@ func (az *Cloud) getLoadBalancerResourceGroup() string {
|
||||
return az.ResourceGroup
|
||||
}
|
||||
|
||||
// cleanBackendpoolForPrimarySLB decouples the unwanted nodes from the standard load balancer.
|
||||
// This is needed because when migrating from single SLB to multiple SLBs, The existing
|
||||
// SLB's backend pool contains nodes from different agent pools, while we only want the
|
||||
// nodes from the primary agent pool to join the backend pool.
|
||||
func (az *Cloud) cleanBackendpoolForPrimarySLB(primarySLB *network.LoadBalancer, service *v1.Service, clusterName string) (*network.LoadBalancer, error) {
|
||||
lbBackendPoolName := getBackendPoolName(clusterName, service)
|
||||
lbResourceGroup := az.getLoadBalancerResourceGroup()
|
||||
lbBackendPoolID := az.getBackendPoolID(to.String(primarySLB.Name), lbResourceGroup, lbBackendPoolName)
|
||||
newBackendPools := make([]network.BackendAddressPool, 0)
|
||||
if primarySLB.LoadBalancerPropertiesFormat != nil && primarySLB.BackendAddressPools != nil {
|
||||
newBackendPools = *primarySLB.BackendAddressPools
|
||||
}
|
||||
vmSetNameToBackendIPConfigurationsToBeDeleted := make(map[string][]network.InterfaceIPConfiguration)
|
||||
for j, bp := range newBackendPools {
|
||||
if strings.EqualFold(to.String(bp.Name), lbBackendPoolName) {
|
||||
klog.V(2).Infof("cleanBackendpoolForPrimarySLB: checking the backend pool %s from standard load balancer %s", to.String(bp.Name), to.String(primarySLB.Name))
|
||||
if bp.BackendAddressPoolPropertiesFormat != nil && bp.BackendIPConfigurations != nil {
|
||||
for i := len(*bp.BackendIPConfigurations) - 1; i >= 0; i-- {
|
||||
ipConf := (*bp.BackendIPConfigurations)[i]
|
||||
ipConfigID := to.String(ipConf.ID)
|
||||
_, vmSetName, err := az.VMSet.GetNodeNameByIPConfigurationID(ipConfigID)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
primaryVMSetName := az.VMSet.GetPrimaryVMSetName()
|
||||
if !strings.EqualFold(primaryVMSetName, vmSetName) {
|
||||
klog.V(2).Infof("cleanBackendpoolForPrimarySLB: found unwanted vmSet %s, decouple it from the LB", vmSetName)
|
||||
// construct a backendPool that only contains the IP config of the node to be deleted
|
||||
interfaceIPConfigToBeDeleted := network.InterfaceIPConfiguration{
|
||||
ID: to.StringPtr(ipConfigID),
|
||||
}
|
||||
vmSetNameToBackendIPConfigurationsToBeDeleted[vmSetName] = append(vmSetNameToBackendIPConfigurationsToBeDeleted[vmSetName], interfaceIPConfigToBeDeleted)
|
||||
*bp.BackendIPConfigurations = append((*bp.BackendIPConfigurations)[:i], (*bp.BackendIPConfigurations)[i+1:]...)
|
||||
}
|
||||
}
|
||||
}
|
||||
newBackendPools[j] = bp
|
||||
break
|
||||
}
|
||||
}
|
||||
for vmSetName, backendIPConfigurationsToBeDeleted := range vmSetNameToBackendIPConfigurationsToBeDeleted {
|
||||
backendpoolToBeDeleted := &[]network.BackendAddressPool{
|
||||
{
|
||||
ID: to.StringPtr(lbBackendPoolID),
|
||||
BackendAddressPoolPropertiesFormat: &network.BackendAddressPoolPropertiesFormat{
|
||||
BackendIPConfigurations: &backendIPConfigurationsToBeDeleted,
|
||||
},
|
||||
},
|
||||
}
|
||||
// decouple the backendPool from the node
|
||||
err := az.VMSet.EnsureBackendPoolDeleted(service, lbBackendPoolID, vmSetName, backendpoolToBeDeleted)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
primarySLB.BackendAddressPools = &newBackendPools
|
||||
}
|
||||
return primarySLB, nil
|
||||
}
|
||||
|
||||
// getServiceLoadBalancer gets the loadbalancer for the service if it already exists.
|
||||
// If wantLb is TRUE then -it selects a new load balancer.
|
||||
// In case the selected load balancer does not exist it returns network.LoadBalancer struct
|
||||
@ -284,6 +343,7 @@ func (az *Cloud) getServiceLoadBalancer(service *v1.Service, clusterName string,
|
||||
var defaultLB *network.LoadBalancer
|
||||
primaryVMSetName := az.VMSet.GetPrimaryVMSetName()
|
||||
defaultLBName := az.getAzureLoadBalancerName(clusterName, primaryVMSetName, isInternal)
|
||||
useMultipleSLBs := az.useStandardLoadBalancer() && az.EnableMultipleStandardLoadBalancers
|
||||
|
||||
existingLBs, err := az.ListLB(service)
|
||||
if err != nil {
|
||||
@ -293,6 +353,13 @@ func (az *Cloud) getServiceLoadBalancer(service *v1.Service, clusterName string,
|
||||
// check if the service already has a load balancer
|
||||
for i := range existingLBs {
|
||||
existingLB := existingLBs[i]
|
||||
if strings.EqualFold(to.String(existingLB.Name), clusterName) && useMultipleSLBs {
|
||||
cleanedLB, err := az.cleanBackendpoolForPrimarySLB(&existingLB, service, clusterName)
|
||||
if err != nil {
|
||||
return nil, nil, false, err
|
||||
}
|
||||
existingLB = *cleanedLB
|
||||
}
|
||||
if strings.EqualFold(*existingLB.Name, defaultLBName) {
|
||||
defaultLB = &existingLB
|
||||
}
|
||||
@ -312,13 +379,15 @@ func (az *Cloud) getServiceLoadBalancer(service *v1.Service, clusterName string,
|
||||
}
|
||||
|
||||
hasMode, _, _ := getServiceLoadBalancerMode(service)
|
||||
if az.useStandardLoadBalancer() && hasMode {
|
||||
return nil, nil, false, fmt.Errorf("standard load balancer doesn't work with annotation %q", ServiceAnnotationLoadBalancerMode)
|
||||
useSingleSLB := az.useStandardLoadBalancer() && !az.EnableMultipleStandardLoadBalancers
|
||||
if useSingleSLB && hasMode {
|
||||
klog.Warningf("single standard load balancer doesn't work with annotation %q, would ignore it", ServiceAnnotationLoadBalancerMode)
|
||||
}
|
||||
|
||||
// service does not have a basic load balancer, select one.
|
||||
// Standard load balancer doesn't need this because all backends nodes should be added to same LB.
|
||||
if wantLb && !az.useStandardLoadBalancer() {
|
||||
// Service does not have a load balancer, select one.
|
||||
// Single standard load balancer doesn't need this because
|
||||
// all backends nodes should be added to same LB.
|
||||
if wantLb && !useSingleSLB {
|
||||
// select new load balancer for service
|
||||
selectedLB, exists, err := az.selectLoadBalancer(clusterName, service, &existingLBs, nodes)
|
||||
if err != nil {
|
||||
@ -358,7 +427,7 @@ func (az *Cloud) selectLoadBalancer(clusterName string, service *v1.Service, exi
|
||||
klog.Errorf("az.selectLoadBalancer: cluster(%s) service(%s) isInternal(%t) - az.GetVMSetNames failed, err=(%v)", clusterName, serviceName, isInternal, err)
|
||||
return nil, false, err
|
||||
}
|
||||
klog.Infof("selectLoadBalancer: cluster(%s) service(%s) isInternal(%t) - vmSetNames %v", clusterName, serviceName, isInternal, *vmSetNames)
|
||||
klog.V(2).Infof("selectLoadBalancer: cluster(%s) service(%s) isInternal(%t) - vmSetNames %v", clusterName, serviceName, isInternal, *vmSetNames)
|
||||
|
||||
mapExistingLBs := map[string]network.LoadBalancer{}
|
||||
for _, lb := range *existingLBs {
|
||||
@ -371,9 +440,16 @@ func (az *Cloud) selectLoadBalancer(clusterName string, service *v1.Service, exi
|
||||
if !exists {
|
||||
// select this LB as this is a new LB and will have minimum rules
|
||||
// create tmp lb struct to hold metadata for the new load-balancer
|
||||
var loadBalancerSKU network.LoadBalancerSkuName
|
||||
if az.useStandardLoadBalancer() {
|
||||
loadBalancerSKU = network.LoadBalancerSkuNameStandard
|
||||
} else {
|
||||
loadBalancerSKU = network.LoadBalancerSkuNameBasic
|
||||
}
|
||||
selectedLB = &network.LoadBalancer{
|
||||
Name: &currLBName,
|
||||
Location: &az.Location,
|
||||
Sku: &network.LoadBalancerSku{Name: loadBalancerSKU},
|
||||
LoadBalancerPropertiesFormat: &network.LoadBalancerPropertiesFormat{},
|
||||
}
|
||||
|
||||
@ -852,17 +928,17 @@ func (az *Cloud) isFrontendIPChanged(clusterName string, config network.Frontend
|
||||
return config.PublicIPAddress != nil && !strings.EqualFold(to.String(pip.ID), to.String(config.PublicIPAddress.ID)), nil
|
||||
}
|
||||
|
||||
// isFrontendIPConfigIsUnsafeToDelete checks if a frontend IP config is safe to be deleted.
|
||||
// isFrontendIPConfigUnsafeToDelete checks if a frontend IP config is safe to be deleted.
|
||||
// It is safe to be deleted if and only if there is no reference from other
|
||||
// loadBalancing resources, including loadBalancing rules, outbound rules, inbound NAT rules
|
||||
// and inbound NAT pools.
|
||||
func (az *Cloud) isFrontendIPConfigIsUnsafeToDelete(
|
||||
func (az *Cloud) isFrontendIPConfigUnsafeToDelete(
|
||||
lb *network.LoadBalancer,
|
||||
service *v1.Service,
|
||||
fipConfigID *string,
|
||||
) (bool, error) {
|
||||
if lb == nil || fipConfigID == nil || *fipConfigID == "" {
|
||||
return false, fmt.Errorf("isFrontendIPConfigIsUnsafeToDelete: incorrect parameters")
|
||||
return false, fmt.Errorf("isFrontendIPConfigUnsafeToDelete: incorrect parameters")
|
||||
}
|
||||
|
||||
var (
|
||||
@ -896,7 +972,7 @@ func (az *Cloud) isFrontendIPConfigIsUnsafeToDelete(
|
||||
lbRule.FrontendIPConfiguration.ID != nil &&
|
||||
strings.EqualFold(*lbRule.FrontendIPConfiguration.ID, *fipConfigID) {
|
||||
if !az.serviceOwnsRule(service, *lbRule.Name) {
|
||||
warningMsg := fmt.Sprintf("isFrontendIPConfigIsUnsafeToDelete: frontend IP configuration with ID %s on LB %s cannot be deleted because it is being referenced by load balancing rules of other services", *fipConfigID, *lb.Name)
|
||||
warningMsg := fmt.Sprintf("isFrontendIPConfigUnsafeToDelete: frontend IP configuration with ID %s on LB %s cannot be deleted because it is being referenced by load balancing rules of other services", *fipConfigID, *lb.Name)
|
||||
klog.Warning(warningMsg)
|
||||
az.Event(service, v1.EventTypeWarning, "DeletingFrontendIPConfiguration", warningMsg)
|
||||
unsafe = true
|
||||
@ -911,7 +987,7 @@ func (az *Cloud) isFrontendIPConfigIsUnsafeToDelete(
|
||||
if outboundRule.OutboundRulePropertiesFormat != nil && outboundRule.FrontendIPConfigurations != nil {
|
||||
outboundRuleFIPConfigs := *outboundRule.FrontendIPConfigurations
|
||||
if found := findMatchedOutboundRuleFIPConfig(fipConfigID, outboundRuleFIPConfigs); found {
|
||||
warningMsg := fmt.Sprintf("isFrontendIPConfigIsUnsafeToDelete: frontend IP configuration with ID %s on LB %s cannot be deleted because it is being referenced by the outbound rule %s", *fipConfigID, *lb.Name, *outboundRule.Name)
|
||||
warningMsg := fmt.Sprintf("isFrontendIPConfigUnsafeToDelete: frontend IP configuration with ID %s on LB %s cannot be deleted because it is being referenced by the outbound rule %s", *fipConfigID, *lb.Name, *outboundRule.Name)
|
||||
klog.Warning(warningMsg)
|
||||
az.Event(service, v1.EventTypeWarning, "DeletingFrontendIPConfiguration", warningMsg)
|
||||
unsafe = true
|
||||
@ -927,7 +1003,7 @@ func (az *Cloud) isFrontendIPConfigIsUnsafeToDelete(
|
||||
inboundNatRule.FrontendIPConfiguration != nil &&
|
||||
inboundNatRule.FrontendIPConfiguration.ID != nil &&
|
||||
strings.EqualFold(*inboundNatRule.FrontendIPConfiguration.ID, *fipConfigID) {
|
||||
warningMsg := fmt.Sprintf("isFrontendIPConfigIsUnsafeToDelete: frontend IP configuration with ID %s on LB %s cannot be deleted because it is being referenced by the inbound NAT rule %s", *fipConfigID, *lb.Name, *inboundNatRule.Name)
|
||||
warningMsg := fmt.Sprintf("isFrontendIPConfigUnsafeToDelete: frontend IP configuration with ID %s on LB %s cannot be deleted because it is being referenced by the inbound NAT rule %s", *fipConfigID, *lb.Name, *inboundNatRule.Name)
|
||||
klog.Warning(warningMsg)
|
||||
az.Event(service, v1.EventTypeWarning, "DeletingFrontendIPConfiguration", warningMsg)
|
||||
unsafe = true
|
||||
@ -942,7 +1018,7 @@ func (az *Cloud) isFrontendIPConfigIsUnsafeToDelete(
|
||||
inboundNatPool.FrontendIPConfiguration != nil &&
|
||||
inboundNatPool.FrontendIPConfiguration.ID != nil &&
|
||||
strings.EqualFold(*inboundNatPool.FrontendIPConfiguration.ID, *fipConfigID) {
|
||||
warningMsg := fmt.Sprintf("isFrontendIPConfigIsUnsafeToDelete: frontend IP configuration with ID %s on LB %s cannot be deleted because it is being referenced by the inbound NAT pool %s", *fipConfigID, *lb.Name, *inboundNatPool.Name)
|
||||
warningMsg := fmt.Sprintf("isFrontendIPConfigUnsafeToDelete: frontend IP configuration with ID %s on LB %s cannot be deleted because it is being referenced by the inbound NAT pool %s", *fipConfigID, *lb.Name, *inboundNatPool.Name)
|
||||
klog.Warning(warningMsg)
|
||||
az.Event(service, v1.EventTypeWarning, "DeletingFrontendIPConfiguration", warningMsg)
|
||||
unsafe = true
|
||||
@ -989,7 +1065,7 @@ func nodeNameInNodes(nodeName string, nodes []*v1.Node) bool {
|
||||
return false
|
||||
}
|
||||
|
||||
// This ensures load balancer exists and the frontend ip config is setup.
|
||||
// reconcileLoadBalancer ensures load balancer exists and the frontend ip config is setup.
|
||||
// This also reconciles the Service's Ports with the LoadBalancer config.
|
||||
// This entails adding rules/probes for expected Ports and removing stale rules/ports.
|
||||
// nodes only used if wantLb is true
|
||||
@ -1035,7 +1111,7 @@ func (az *Cloud) reconcileLoadBalancer(clusterName string, service *v1.Service,
|
||||
if bp.BackendAddressPoolPropertiesFormat != nil && bp.BackendIPConfigurations != nil {
|
||||
for _, ipConf := range *bp.BackendIPConfigurations {
|
||||
ipConfID := to.String(ipConf.ID)
|
||||
nodeName, err := az.VMSet.GetNodeNameByIPConfigurationID(ipConfID)
|
||||
nodeName, _, err := az.VMSet.GetNodeNameByIPConfigurationID(ipConfID)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@ -1106,7 +1182,7 @@ func (az *Cloud) reconcileLoadBalancer(clusterName string, service *v1.Service,
|
||||
return nil, err
|
||||
}
|
||||
if isServiceOwnsFrontendIP {
|
||||
unsafe, err := az.isFrontendIPConfigIsUnsafeToDelete(lb, service, config.ID)
|
||||
unsafe, err := az.isFrontendIPConfigUnsafeToDelete(lb, service, config.ID)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
@ -37,10 +37,12 @@ import (
|
||||
|
||||
v1 "k8s.io/api/core/v1"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/legacy-cloud-providers/azure/clients/interfaceclient/mockinterfaceclient"
|
||||
"k8s.io/legacy-cloud-providers/azure/clients/loadbalancerclient/mockloadbalancerclient"
|
||||
"k8s.io/legacy-cloud-providers/azure/clients/publicipclient/mockpublicipclient"
|
||||
"k8s.io/legacy-cloud-providers/azure/clients/securitygroupclient/mocksecuritygroupclient"
|
||||
"k8s.io/legacy-cloud-providers/azure/clients/subnetclient/mocksubnetclient"
|
||||
"k8s.io/legacy-cloud-providers/azure/clients/vmclient/mockvmclient"
|
||||
"k8s.io/legacy-cloud-providers/azure/retry"
|
||||
)
|
||||
|
||||
@ -1169,14 +1171,6 @@ func TestGetServiceLoadBalancer(t *testing.T) {
|
||||
expectedExists: true,
|
||||
expectedError: false,
|
||||
},
|
||||
{
|
||||
desc: "getServiceLoadBalancer shall report error if there are loadbalancer mode annotations on a standard lb",
|
||||
service: getTestService("service1", v1.ProtocolTCP, nil, false, 80),
|
||||
annotations: map[string]string{ServiceAnnotationLoadBalancerMode: "__auto__"},
|
||||
sku: "standard",
|
||||
expectedExists: false,
|
||||
expectedError: true,
|
||||
},
|
||||
{
|
||||
desc: "getServiceLoadBalancer shall select the lb with minimum lb rules if wantLb is true, the sku is " +
|
||||
"not standard and there are existing lbs already",
|
||||
@ -3310,7 +3304,7 @@ func TestIsFrontendIPConfigIsUnsafeToDelete(t *testing.T) {
|
||||
unsafe bool
|
||||
}{
|
||||
{
|
||||
desc: "isFrontendIPConfigIsUnsafeToDelete should return true if there is a " +
|
||||
desc: "isFrontendIPConfigUnsafeToDelete should return true if there is a " +
|
||||
"loadBalancing rule from other service referencing the frontend IP config",
|
||||
existingLB: &network.LoadBalancer{
|
||||
Name: to.StringPtr("lb"),
|
||||
@ -3328,7 +3322,7 @@ func TestIsFrontendIPConfigIsUnsafeToDelete(t *testing.T) {
|
||||
unsafe: true,
|
||||
},
|
||||
{
|
||||
desc: "isFrontendIPConfigIsUnsafeToDelete should return false if there is a " +
|
||||
desc: "isFrontendIPConfigUnsafeToDelete should return false if there is a " +
|
||||
"loadBalancing rule from this service referencing the frontend IP config",
|
||||
existingLB: &network.LoadBalancer{
|
||||
Name: to.StringPtr("lb"),
|
||||
@ -3348,7 +3342,7 @@ func TestIsFrontendIPConfigIsUnsafeToDelete(t *testing.T) {
|
||||
unsafe: true,
|
||||
},
|
||||
{
|
||||
desc: "isFrontendIPConfigIsUnsafeToDelete should return false if there is a " +
|
||||
desc: "isFrontendIPConfigUnsafeToDelete should return false if there is a " +
|
||||
"outbound rule referencing the frontend IP config",
|
||||
existingLB: &network.LoadBalancer{
|
||||
Name: to.StringPtr("lb"),
|
||||
@ -3365,7 +3359,7 @@ func TestIsFrontendIPConfigIsUnsafeToDelete(t *testing.T) {
|
||||
},
|
||||
},
|
||||
{
|
||||
desc: "isFrontendIPConfigIsUnsafeToDelete should return true if there is a " +
|
||||
desc: "isFrontendIPConfigUnsafeToDelete should return true if there is a " +
|
||||
"inbound NAT rule referencing the frontend IP config",
|
||||
existingLB: &network.LoadBalancer{
|
||||
Name: to.StringPtr("lb"),
|
||||
@ -3383,7 +3377,7 @@ func TestIsFrontendIPConfigIsUnsafeToDelete(t *testing.T) {
|
||||
unsafe: true,
|
||||
},
|
||||
{
|
||||
desc: "isFrontendIPConfigIsUnsafeToDelete should return true if there is a " +
|
||||
desc: "isFrontendIPConfigUnsafeToDelete should return true if there is a " +
|
||||
"inbound NAT pool referencing the frontend IP config",
|
||||
existingLB: &network.LoadBalancer{
|
||||
Name: to.StringPtr("lb"),
|
||||
@ -3403,7 +3397,7 @@ func TestIsFrontendIPConfigIsUnsafeToDelete(t *testing.T) {
|
||||
}
|
||||
|
||||
for _, testCase := range testCases {
|
||||
unsafe, _ := az.isFrontendIPConfigIsUnsafeToDelete(testCase.existingLB, &service, fipID)
|
||||
unsafe, _ := az.isFrontendIPConfigUnsafeToDelete(testCase.existingLB, &service, fipID)
|
||||
assert.Equal(t, testCase.unsafe, unsafe, testCase.desc)
|
||||
}
|
||||
}
|
||||
@ -3529,3 +3523,71 @@ func TestCheckLoadBalancerResourcesConflicted(t *testing.T) {
|
||||
assert.Equal(t, testCase.expectedErr, err != nil, testCase.desc)
|
||||
}
|
||||
}
|
||||
|
||||
func TestCleanBackendpoolForPrimarySLB(t *testing.T) {
|
||||
ctrl := gomock.NewController(t)
|
||||
defer ctrl.Finish()
|
||||
cloud := GetTestCloud(ctrl)
|
||||
cloud.LoadBalancerSku = loadBalancerSkuStandard
|
||||
cloud.EnableMultipleStandardLoadBalancers = true
|
||||
cloud.PrimaryAvailabilitySetName = "agentpool1-availabilitySet-00000000"
|
||||
clusterName := "testCluster"
|
||||
service := getTestService("test", v1.ProtocolTCP, nil, false, 80)
|
||||
lb := buildDefaultTestLB("testCluster", []string{
|
||||
"/subscriptions/subscription/resourceGroups/rg/providers/Microsoft.Network/networkInterfaces/k8s-agentpool1-00000000-nic-1/ipConfigurations/ipconfig1",
|
||||
"/subscriptions/subscription/resourceGroups/rg/providers/Microsoft.Network/networkInterfaces/k8s-agentpool2-00000000-nic-1/ipConfigurations/ipconfig1",
|
||||
})
|
||||
existingVMForAS1 := buildDefaultTestVirtualMachine("/subscriptions/subscription/resourceGroups/rg/providers/Microsoft.Compute/availabilitySets/agentpool1-availabilitySet-00000000", []string{"/subscriptions/subscription/resourceGroups/rg/providers/Microsoft.Network/networkInterfaces/k8s-agentpool1-00000000-nic-1"})
|
||||
existingVMForAS2 := buildDefaultTestVirtualMachine("/subscriptions/subscription/resourceGroups/rg/providers/Microsoft.Compute/availabilitySets/agentpool2-availabilitySet-00000000", []string{"/subscriptions/subscription/resourceGroups/rg/providers/Microsoft.Network/networkInterfaces/k8s-agentpool2-00000000-nic-1"})
|
||||
existingNIC := buildDefaultTestInterface(true, []string{"/subscriptions/sub/resourceGroups/gh/providers/Microsoft.Network/loadBalancers/testCluster/backendAddressPools/testCluster"})
|
||||
mockVMClient := mockvmclient.NewMockInterface(ctrl)
|
||||
mockVMClient.EXPECT().Get(gomock.Any(), cloud.ResourceGroup, "k8s-agentpool1-00000000-1", gomock.Any()).Return(existingVMForAS1, nil)
|
||||
mockVMClient.EXPECT().Get(gomock.Any(), cloud.ResourceGroup, "k8s-agentpool2-00000000-1", gomock.Any()).Return(existingVMForAS2, nil)
|
||||
cloud.VirtualMachinesClient = mockVMClient
|
||||
mockNICClient := mockinterfaceclient.NewMockInterface(ctrl)
|
||||
mockNICClient.EXPECT().Get(gomock.Any(), "rg", "k8s-agentpool2-00000000-nic-1", gomock.Any()).Return(existingNIC, nil)
|
||||
mockNICClient.EXPECT().CreateOrUpdate(gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any()).Return(nil)
|
||||
cloud.InterfacesClient = mockNICClient
|
||||
cleanedLB, err := cloud.cleanBackendpoolForPrimarySLB(&lb, &service, clusterName)
|
||||
assert.NoError(t, err)
|
||||
expectedLB := network.LoadBalancer{
|
||||
Name: to.StringPtr("testCluster"),
|
||||
LoadBalancerPropertiesFormat: &network.LoadBalancerPropertiesFormat{
|
||||
BackendAddressPools: &[]network.BackendAddressPool{
|
||||
{
|
||||
Name: to.StringPtr("testCluster"),
|
||||
BackendAddressPoolPropertiesFormat: &network.BackendAddressPoolPropertiesFormat{
|
||||
BackendIPConfigurations: &[]network.InterfaceIPConfiguration{
|
||||
{
|
||||
ID: to.StringPtr("/subscriptions/subscription/resourceGroups/rg/providers/Microsoft.Network/networkInterfaces/k8s-agentpool1-00000000-nic-1/ipConfigurations/ipconfig1"),
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
assert.Equal(t, expectedLB, *cleanedLB)
|
||||
}
|
||||
|
||||
func buildDefaultTestLB(name string, backendIPConfigs []string) network.LoadBalancer {
|
||||
expectedLB := network.LoadBalancer{
|
||||
Name: to.StringPtr(name),
|
||||
LoadBalancerPropertiesFormat: &network.LoadBalancerPropertiesFormat{
|
||||
BackendAddressPools: &[]network.BackendAddressPool{
|
||||
{
|
||||
Name: to.StringPtr(name),
|
||||
BackendAddressPoolPropertiesFormat: &network.BackendAddressPoolPropertiesFormat{
|
||||
BackendIPConfigurations: &[]network.InterfaceIPConfiguration{},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
backendIPConfigurations := make([]network.InterfaceIPConfiguration, 0)
|
||||
for _, ipConfig := range backendIPConfigs {
|
||||
backendIPConfigurations = append(backendIPConfigurations, network.InterfaceIPConfiguration{ID: to.StringPtr(ipConfig)})
|
||||
}
|
||||
(*expectedLB.BackendAddressPools)[0].BackendIPConfigurations = &backendIPConfigurations
|
||||
return expectedLB
|
||||
}
|
||||
|
@ -154,7 +154,11 @@ func (az *Cloud) getAzureLoadBalancerName(clusterName string, vmSetName string,
|
||||
clusterName = az.LoadBalancerName
|
||||
}
|
||||
lbNamePrefix := vmSetName
|
||||
if strings.EqualFold(vmSetName, az.VMSet.GetPrimaryVMSetName()) || az.useStandardLoadBalancer() {
|
||||
// The LB name prefix is set to the name of the cluster when:
|
||||
// 1. the LB belongs to the primary agent pool.
|
||||
// 2. using the single SLB;
|
||||
useSingleSLB := az.useStandardLoadBalancer() && !az.EnableMultipleStandardLoadBalancers
|
||||
if strings.EqualFold(vmSetName, az.VMSet.GetPrimaryVMSetName()) || useSingleSLB {
|
||||
lbNamePrefix = clusterName
|
||||
}
|
||||
if isInternal {
|
||||
@ -215,7 +219,7 @@ func getPrimaryInterfaceID(machine compute.VirtualMachine) (string, error) {
|
||||
}
|
||||
|
||||
for _, ref := range *machine.NetworkProfile.NetworkInterfaces {
|
||||
if *ref.Primary {
|
||||
if to.Bool(ref.Primary) {
|
||||
return *ref.ID, nil
|
||||
}
|
||||
}
|
||||
@ -258,7 +262,7 @@ func getIPConfigByIPFamily(nic network.Interface, IPv6 bool) (*network.Interface
|
||||
return &ref, nil
|
||||
}
|
||||
}
|
||||
return nil, fmt.Errorf("failed to determine the ipconfig(IPv6=%v). nicname=%q", IPv6, *nic.Name)
|
||||
return nil, fmt.Errorf("failed to determine the ipconfig(IPv6=%v). nicname=%q", IPv6, to.String(nic.Name))
|
||||
}
|
||||
|
||||
func isInternalLoadBalancer(lb *network.LoadBalancer) bool {
|
||||
@ -658,11 +662,14 @@ func (as *availabilitySet) getAgentPoolAvailabilitySets(nodes []*v1.Node) (agent
|
||||
// GetVMSetNames selects all possible availability sets or scale sets
|
||||
// (depending vmType configured) for service load balancer, if the service has
|
||||
// no loadbalancer mode annotation returns the primary VMSet. If service annotation
|
||||
// for loadbalancer exists then return the eligible VMSet.
|
||||
// for loadbalancer exists then returns the eligible VMSet. The mode selection
|
||||
// annotation would be ignored when using one SLB per cluster.
|
||||
func (as *availabilitySet) GetVMSetNames(service *v1.Service, nodes []*v1.Node) (availabilitySetNames *[]string, err error) {
|
||||
hasMode, isAuto, serviceAvailabilitySetNames := getServiceLoadBalancerMode(service)
|
||||
if !hasMode {
|
||||
// no mode specified in service annotation default to PrimaryAvailabilitySetName
|
||||
useSingleSLB := as.useStandardLoadBalancer() && !as.EnableMultipleStandardLoadBalancers
|
||||
if !hasMode || useSingleSLB {
|
||||
// no mode specified in service annotation or use single SLB mode
|
||||
// default to PrimaryAvailabilitySetName
|
||||
availabilitySetNames = &[]string{as.Config.PrimaryAvailabilitySetName}
|
||||
return availabilitySetNames, nil
|
||||
}
|
||||
@ -746,11 +753,20 @@ func (as *availabilitySet) getPrimaryInterfaceWithVMSet(nodeName, vmSetName stri
|
||||
// Node's real availability set name:
|
||||
// - For basic SKU load balancer, errNotInVMSet should be returned if the node's
|
||||
// availability set is mismatched with vmSetName.
|
||||
// - For standard SKU load balancer, backend could belong to multiple VMAS, so we
|
||||
// - For single standard SKU load balancer, backend could belong to multiple VMAS, so we
|
||||
// don't check vmSet for it.
|
||||
if vmSetName != "" && !as.useStandardLoadBalancer() {
|
||||
expectedAvailabilitySetName := as.getAvailabilitySetID(nodeResourceGroup, vmSetName)
|
||||
if machine.AvailabilitySet == nil || !strings.EqualFold(*machine.AvailabilitySet.ID, expectedAvailabilitySetName) {
|
||||
// - For multiple standard SKU load balancers, the behavior is similar to the basic LB.
|
||||
needCheck := false
|
||||
if !as.useStandardLoadBalancer() {
|
||||
// need to check the vmSet name when using the basic LB
|
||||
needCheck = true
|
||||
} else if as.EnableMultipleStandardLoadBalancers {
|
||||
// need to check the vmSet name when using multiple standard LBs
|
||||
needCheck = true
|
||||
}
|
||||
if vmSetName != "" && needCheck {
|
||||
expectedAvailabilitySetID := as.getAvailabilitySetID(nodeResourceGroup, vmSetName)
|
||||
if machine.AvailabilitySet == nil || !strings.EqualFold(*machine.AvailabilitySet.ID, expectedAvailabilitySetID) {
|
||||
klog.V(3).Infof(
|
||||
"GetPrimaryInterface: nic (%s) is not in the availabilitySet(%s)", nicName, vmSetName)
|
||||
return network.Interface{}, "", errNotInVMSet
|
||||
@ -933,7 +949,7 @@ func (as *availabilitySet) EnsureBackendPoolDeleted(service *v1.Service, backend
|
||||
errors := make([]error, 0)
|
||||
for i := range ipConfigurationIDs {
|
||||
ipConfigurationID := ipConfigurationIDs[i]
|
||||
nodeName, err := as.GetNodeNameByIPConfigurationID(ipConfigurationID)
|
||||
nodeName, _, err := as.GetNodeNameByIPConfigurationID(ipConfigurationID)
|
||||
if err != nil {
|
||||
klog.Errorf("Failed to GetNodeNameByIPConfigurationID(%s): %v", ipConfigurationID, err)
|
||||
errors = append(errors, err)
|
||||
@ -951,11 +967,10 @@ func (as *availabilitySet) EnsureBackendPoolDeleted(service *v1.Service, backend
|
||||
klog.Errorf("error: az.EnsureBackendPoolDeleted(%s), az.VMSet.GetPrimaryInterface.Get(%s, %s), err=%v", nodeName, vmName, vmSetName, err)
|
||||
return err
|
||||
}
|
||||
matches := vmasIDRE.FindStringSubmatch(vmasID)
|
||||
if len(matches) != 2 {
|
||||
vmasName, err := getAvailabilitySetNameByID(vmasID)
|
||||
if err != nil {
|
||||
return fmt.Errorf("EnsureBackendPoolDeleted: failed to parse the VMAS ID %s: %v", vmasID, err)
|
||||
}
|
||||
vmasName := matches[1]
|
||||
// Only remove nodes belonging to specified vmSet to basic LB backends.
|
||||
if !strings.EqualFold(vmasName, vmSetName) {
|
||||
klog.V(2).Infof("EnsureBackendPoolDeleted: skipping the node %s belonging to another vm set %s", nodeName, vmasName)
|
||||
@ -976,7 +991,8 @@ func (as *availabilitySet) EnsureBackendPoolDeleted(service *v1.Service, backend
|
||||
// found primary ip configuration
|
||||
if ipConf.LoadBalancerBackendAddressPools != nil {
|
||||
newLBAddressPools := *ipConf.LoadBalancerBackendAddressPools
|
||||
for k, pool := range newLBAddressPools {
|
||||
for k := len(newLBAddressPools) - 1; k >= 0; k-- {
|
||||
pool := newLBAddressPools[k]
|
||||
if strings.EqualFold(to.String(pool.ID), backendPoolID) {
|
||||
newLBAddressPools = append(newLBAddressPools[:k], newLBAddressPools[k+1:]...)
|
||||
break
|
||||
@ -1012,6 +1028,15 @@ func (as *availabilitySet) EnsureBackendPoolDeleted(service *v1.Service, backend
|
||||
return nil
|
||||
}
|
||||
|
||||
func getAvailabilitySetNameByID(asID string) (string, error) {
|
||||
matches := vmasIDRE.FindStringSubmatch(asID)
|
||||
if len(matches) != 2 {
|
||||
return "", fmt.Errorf("getAvailabilitySetNameByID: failed to parse the VMAS ID %s", asID)
|
||||
}
|
||||
vmasName := matches[1]
|
||||
return vmasName, nil
|
||||
}
|
||||
|
||||
// get a storage account by UUID
|
||||
func generateStorageAccountName(accountNamePrefix string) string {
|
||||
uniqueID := strings.Replace(string(uuid.NewUUID()), "-", "", -1)
|
||||
@ -1022,15 +1047,32 @@ func generateStorageAccountName(accountNamePrefix string) string {
|
||||
return accountName
|
||||
}
|
||||
|
||||
// GetNodeNameByIPConfigurationID gets the node name by IP configuration ID.
|
||||
func (as *availabilitySet) GetNodeNameByIPConfigurationID(ipConfigurationID string) (string, error) {
|
||||
// GetNodeNameByIPConfigurationID gets the node name and the availabilitySet name by IP configuration ID.
|
||||
func (as *availabilitySet) GetNodeNameByIPConfigurationID(ipConfigurationID string) (string, string, error) {
|
||||
matches := nicIDRE.FindStringSubmatch(ipConfigurationID)
|
||||
if len(matches) != 3 {
|
||||
klog.V(4).Infof("Can not extract VM name from ipConfigurationID (%s)", ipConfigurationID)
|
||||
return "", fmt.Errorf("invalid ip config ID %s", ipConfigurationID)
|
||||
return "", "", fmt.Errorf("invalid ip config ID %s", ipConfigurationID)
|
||||
}
|
||||
|
||||
prefix := matches[1]
|
||||
suffix := matches[2]
|
||||
return fmt.Sprintf("%s-%s", prefix, suffix), nil
|
||||
nodeName := fmt.Sprintf("%s-%s", prefix, suffix)
|
||||
|
||||
vm, err := as.getVirtualMachine(types.NodeName(nodeName), azcache.CacheReadTypeDefault)
|
||||
if err != nil {
|
||||
return "", "", fmt.Errorf("cannot get the virtual machine by node name %s", nodeName)
|
||||
}
|
||||
asID := ""
|
||||
if vm.VirtualMachineProperties != nil && vm.AvailabilitySet != nil {
|
||||
asID = to.String(vm.AvailabilitySet.ID)
|
||||
}
|
||||
if asID == "" {
|
||||
return "", "", fmt.Errorf("cannot get the availability set ID from the virtual machine with node name %s", nodeName)
|
||||
}
|
||||
asName, err := getAvailabilitySetNameByID(asID)
|
||||
if err != nil {
|
||||
return "", "", fmt.Errorf("cannot get the availability set name by the availability set ID %s", asID)
|
||||
}
|
||||
return nodeName, strings.ToLower(asName), nil
|
||||
}
|
||||
|
@ -583,19 +583,8 @@ func TestGetStandardVMPrimaryInterfaceID(t *testing.T) {
|
||||
expectedErrMsg error
|
||||
}{
|
||||
{
|
||||
name: "GetPrimaryInterfaceID should get the only NIC ID",
|
||||
vm: compute.VirtualMachine{
|
||||
Name: to.StringPtr("vm1"),
|
||||
VirtualMachineProperties: &compute.VirtualMachineProperties{
|
||||
NetworkProfile: &compute.NetworkProfile{
|
||||
NetworkInterfaces: &[]compute.NetworkInterfaceReference{
|
||||
{
|
||||
ID: to.StringPtr("/subscriptions/sub/resourceGroups/rg/providers/Microsoft.Network/networkInterfaces/nic"),
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
name: "GetPrimaryInterfaceID should get the only NIC ID",
|
||||
vm: buildDefaultTestVirtualMachine("", []string{"/subscriptions/sub/resourceGroups/rg/providers/Microsoft.Network/networkInterfaces/nic"}),
|
||||
expectedNicID: "/subscriptions/sub/resourceGroups/rg/providers/Microsoft.Network/networkInterfaces/nic",
|
||||
},
|
||||
{
|
||||
@ -1074,7 +1063,6 @@ func TestGetStandardVMZoneByNodeName(t *testing.T) {
|
||||
func TestGetStandardVMSetNames(t *testing.T) {
|
||||
ctrl := gomock.NewController(t)
|
||||
defer ctrl.Finish()
|
||||
cloud := GetTestCloud(ctrl)
|
||||
|
||||
asID := "/subscriptions/subscription/resourceGroups/rg/providers/Microsoft.Compute/availabilitySets/myAvailabilitySet"
|
||||
testVM := compute.VirtualMachine{
|
||||
@ -1092,6 +1080,7 @@ func TestGetStandardVMSetNames(t *testing.T) {
|
||||
vm []compute.VirtualMachine
|
||||
service *v1.Service
|
||||
nodes []*v1.Node
|
||||
usingSingleSLBS bool
|
||||
expectedVMSetNames *[]string
|
||||
expectedErrMsg error
|
||||
}{
|
||||
@ -1101,6 +1090,15 @@ func TestGetStandardVMSetNames(t *testing.T) {
|
||||
service: &v1.Service{},
|
||||
expectedVMSetNames: &[]string{"as"},
|
||||
},
|
||||
{
|
||||
name: "GetVMSetNames should return the primary vm set name when using the single SLB",
|
||||
vm: []compute.VirtualMachine{testVM},
|
||||
service: &v1.Service{
|
||||
ObjectMeta: meta.ObjectMeta{Annotations: map[string]string{ServiceAnnotationLoadBalancerMode: ServiceAnnotationLoadBalancerAutoModeValue}},
|
||||
},
|
||||
usingSingleSLBS: true,
|
||||
expectedVMSetNames: &[]string{"as"},
|
||||
},
|
||||
{
|
||||
name: "GetVMSetNames should return the correct as names if the service has auto mode annotation",
|
||||
vm: []compute.VirtualMachine{testVM},
|
||||
@ -1164,6 +1162,11 @@ func TestGetStandardVMSetNames(t *testing.T) {
|
||||
}
|
||||
|
||||
for _, test := range testCases {
|
||||
cloud := GetTestCloud(ctrl)
|
||||
if test.usingSingleSLBS {
|
||||
cloud.EnableMultipleStandardLoadBalancers = false
|
||||
cloud.LoadBalancerSku = loadBalancerSkuStandard
|
||||
}
|
||||
mockVMClient := cloud.VirtualMachinesClient.(*mockvmclient.MockInterface)
|
||||
mockVMClient.EXPECT().List(gomock.Any(), cloud.ResourceGroup).Return(test.vm, nil).AnyTimes()
|
||||
|
||||
@ -1217,6 +1220,7 @@ func TestStandardEnsureHostInPool(t *testing.T) {
|
||||
vmSetName string
|
||||
nicProvisionState string
|
||||
isStandardLB bool
|
||||
useMultipleSLBs bool
|
||||
expectedErrMsg error
|
||||
}{
|
||||
{
|
||||
@ -1227,6 +1231,16 @@ func TestStandardEnsureHostInPool(t *testing.T) {
|
||||
nicID: "/subscriptions/sub/resourceGroups/rg/providers/Microsoft.Network/networkInterfaces/nic1",
|
||||
vmSetName: "availabilityset-1",
|
||||
},
|
||||
{
|
||||
name: "EnsureHostInPool should return nil if node is not in VMSet when using multiple SLBs",
|
||||
service: &v1.Service{},
|
||||
nodeName: "vm1",
|
||||
nicName: "nic1",
|
||||
nicID: "/subscriptions/sub/resourceGroups/rg/providers/Microsoft.Network/networkInterfaces/nic1",
|
||||
vmSetName: "availabilityset-1",
|
||||
isStandardLB: true,
|
||||
useMultipleSLBs: true,
|
||||
},
|
||||
{
|
||||
name: "EnsureHostInPool should report error if last segment of nicID is nil",
|
||||
service: &v1.Service{},
|
||||
@ -1300,39 +1314,17 @@ func TestStandardEnsureHostInPool(t *testing.T) {
|
||||
cloud.Config.LoadBalancerSku = loadBalancerSkuStandard
|
||||
}
|
||||
|
||||
testVM := compute.VirtualMachine{
|
||||
Name: to.StringPtr(string(test.nodeName)),
|
||||
VirtualMachineProperties: &compute.VirtualMachineProperties{
|
||||
AvailabilitySet: &compute.SubResource{ID: to.StringPtr(availabilitySetID)},
|
||||
NetworkProfile: &compute.NetworkProfile{
|
||||
NetworkInterfaces: &[]compute.NetworkInterfaceReference{
|
||||
{
|
||||
ID: to.StringPtr(test.nicID),
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
testNIC := network.Interface{
|
||||
Name: to.StringPtr(test.nicName),
|
||||
ID: to.StringPtr(test.nicID),
|
||||
InterfacePropertiesFormat: &network.InterfacePropertiesFormat{
|
||||
ProvisioningState: to.StringPtr(test.nicProvisionState),
|
||||
IPConfigurations: &[]network.InterfaceIPConfiguration{
|
||||
{
|
||||
Name: to.StringPtr("ifconfig1"),
|
||||
InterfaceIPConfigurationPropertiesFormat: &network.InterfaceIPConfigurationPropertiesFormat{
|
||||
LoadBalancerBackendAddressPools: &[]network.BackendAddressPool{
|
||||
{
|
||||
ID: to.StringPtr(backendAddressPoolID),
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
if test.useMultipleSLBs {
|
||||
cloud.EnableMultipleStandardLoadBalancers = true
|
||||
}
|
||||
|
||||
testVM := buildDefaultTestVirtualMachine(availabilitySetID, []string{test.nicID})
|
||||
testVM.Name = to.StringPtr(string(test.nodeName))
|
||||
testNIC := buildDefaultTestInterface(false, []string{backendAddressPoolID})
|
||||
testNIC.Name = to.StringPtr(test.nicName)
|
||||
testNIC.ID = to.StringPtr(test.nicID)
|
||||
testNIC.ProvisioningState = to.StringPtr(test.nicProvisionState)
|
||||
|
||||
mockVMClient := cloud.VirtualMachinesClient.(*mockvmclient.MockInterface)
|
||||
mockVMClient.EXPECT().Get(gomock.Any(), cloud.ResourceGroup, string(test.nodeName), gomock.Any()).Return(testVM, nil).AnyTimes()
|
||||
|
||||
@ -1463,37 +1455,10 @@ func TestStandardEnsureHostsInPool(t *testing.T) {
|
||||
cloud.Config.LoadBalancerSku = loadBalancerSkuStandard
|
||||
cloud.Config.ExcludeMasterFromStandardLB = to.BoolPtr(true)
|
||||
|
||||
testVM := compute.VirtualMachine{
|
||||
Name: to.StringPtr(string(test.nodeName)),
|
||||
VirtualMachineProperties: &compute.VirtualMachineProperties{
|
||||
AvailabilitySet: &compute.SubResource{ID: to.StringPtr(availabilitySetID)},
|
||||
NetworkProfile: &compute.NetworkProfile{
|
||||
NetworkInterfaces: &[]compute.NetworkInterfaceReference{
|
||||
{
|
||||
ID: to.StringPtr(test.nicID),
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
testNIC := network.Interface{
|
||||
Name: to.StringPtr(test.nicName),
|
||||
ID: to.StringPtr(test.nicID),
|
||||
InterfacePropertiesFormat: &network.InterfacePropertiesFormat{
|
||||
IPConfigurations: &[]network.InterfaceIPConfiguration{
|
||||
{
|
||||
Name: to.StringPtr("ifconfig1"),
|
||||
InterfaceIPConfigurationPropertiesFormat: &network.InterfaceIPConfigurationPropertiesFormat{
|
||||
LoadBalancerBackendAddressPools: &[]network.BackendAddressPool{
|
||||
{
|
||||
ID: to.StringPtr(backendAddressPoolID),
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
testVM := buildDefaultTestVirtualMachine(availabilitySetID, []string{test.nicID})
|
||||
testNIC := buildDefaultTestInterface(false, []string{backendAddressPoolID})
|
||||
testNIC.Name = to.StringPtr(test.nicName)
|
||||
testNIC.ID = to.StringPtr(test.nicID)
|
||||
|
||||
mockVMClient := cloud.VirtualMachinesClient.(*mockvmclient.MockInterface)
|
||||
mockVMClient.EXPECT().Get(gomock.Any(), cloud.ResourceGroup, test.nodeName, gomock.Any()).Return(testVM, nil).AnyTimes()
|
||||
@ -1682,7 +1647,7 @@ func TestStandardEnsureBackendPoolDeleted(t *testing.T) {
|
||||
existingNIC network.Interface
|
||||
}{
|
||||
{
|
||||
desc: "",
|
||||
desc: "EnsureBackendPoolDeleted should decouple the nic and the load balancer properly",
|
||||
backendAddressPools: &[]network.BackendAddressPool{
|
||||
{
|
||||
ID: to.StringPtr(backendPoolID),
|
||||
@ -1695,37 +1660,10 @@ func TestStandardEnsureBackendPoolDeleted(t *testing.T) {
|
||||
},
|
||||
},
|
||||
},
|
||||
existingVM: compute.VirtualMachine{
|
||||
VirtualMachineProperties: &compute.VirtualMachineProperties{
|
||||
AvailabilitySet: &compute.SubResource{
|
||||
ID: to.StringPtr("/subscriptions/subscription/resourceGroups/rg/providers/Microsoft.Compute/availabilitySets/as"),
|
||||
},
|
||||
NetworkProfile: &compute.NetworkProfile{
|
||||
NetworkInterfaces: &[]compute.NetworkInterfaceReference{
|
||||
{
|
||||
ID: to.StringPtr("/subscriptions/subscription/resourceGroups/rg/providers/Microsoft.Network/networkInterfaces/k8s-agentpool1-00000000-nic-1"),
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
existingNIC: network.Interface{
|
||||
InterfacePropertiesFormat: &network.InterfacePropertiesFormat{
|
||||
ProvisioningState: to.StringPtr("Succeeded"),
|
||||
IPConfigurations: &[]network.InterfaceIPConfiguration{
|
||||
{
|
||||
InterfaceIPConfigurationPropertiesFormat: &network.InterfaceIPConfigurationPropertiesFormat{
|
||||
Primary: to.BoolPtr(true),
|
||||
LoadBalancerBackendAddressPools: &[]network.BackendAddressPool{
|
||||
{
|
||||
ID: to.StringPtr("/subscriptions/subscription/resourceGroups/rg/providers/Microsoft.Network/networkInterfaces/k8s-agentpool1-00000000-nic-1/ipConfigurations/ipconfig1"),
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
existingVM: buildDefaultTestVirtualMachine("/subscriptions/subscription/resourceGroups/rg/providers/Microsoft.Compute/availabilitySets/as", []string{
|
||||
"/subscriptions/subscription/resourceGroups/rg/providers/Microsoft.Network/networkInterfaces/k8s-agentpool1-00000000-nic-1",
|
||||
}),
|
||||
existingNIC: buildDefaultTestInterface(true, []string{"/subscriptions/sub/resourceGroups/gh/providers/Microsoft.Network/loadBalancers/testCluster/backendAddressPools/testCluster"}),
|
||||
},
|
||||
}
|
||||
|
||||
@ -1743,3 +1681,59 @@ func TestStandardEnsureBackendPoolDeleted(t *testing.T) {
|
||||
assert.NoError(t, err, test.desc)
|
||||
}
|
||||
}
|
||||
|
||||
func buildDefaultTestInterface(isPrimary bool, lbBackendpoolIDs []string) network.Interface {
|
||||
expectedNIC := network.Interface{
|
||||
InterfacePropertiesFormat: &network.InterfacePropertiesFormat{
|
||||
ProvisioningState: to.StringPtr("Succeeded"),
|
||||
IPConfigurations: &[]network.InterfaceIPConfiguration{
|
||||
{
|
||||
InterfaceIPConfigurationPropertiesFormat: &network.InterfaceIPConfigurationPropertiesFormat{
|
||||
Primary: to.BoolPtr(isPrimary),
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
backendAddressPool := make([]network.BackendAddressPool, 0)
|
||||
for _, id := range lbBackendpoolIDs {
|
||||
backendAddressPool = append(backendAddressPool, network.BackendAddressPool{
|
||||
ID: to.StringPtr(id),
|
||||
})
|
||||
}
|
||||
(*expectedNIC.IPConfigurations)[0].LoadBalancerBackendAddressPools = &backendAddressPool
|
||||
return expectedNIC
|
||||
}
|
||||
|
||||
func buildDefaultTestVirtualMachine(asID string, nicIDs []string) compute.VirtualMachine {
|
||||
expectedVM := compute.VirtualMachine{
|
||||
VirtualMachineProperties: &compute.VirtualMachineProperties{
|
||||
AvailabilitySet: &compute.SubResource{
|
||||
ID: to.StringPtr(asID),
|
||||
},
|
||||
NetworkProfile: &compute.NetworkProfile{},
|
||||
},
|
||||
}
|
||||
networkInterfaces := make([]compute.NetworkInterfaceReference, 0)
|
||||
for _, nicID := range nicIDs {
|
||||
networkInterfaces = append(networkInterfaces, compute.NetworkInterfaceReference{
|
||||
ID: to.StringPtr(nicID),
|
||||
})
|
||||
}
|
||||
expectedVM.VirtualMachineProperties.NetworkProfile.NetworkInterfaces = &networkInterfaces
|
||||
return expectedVM
|
||||
}
|
||||
|
||||
func TestStandardGetNodeNameByIPConfigurationID(t *testing.T) {
|
||||
ctrl := gomock.NewController(t)
|
||||
defer ctrl.Finish()
|
||||
cloud := GetTestCloud(ctrl)
|
||||
expectedVM := buildDefaultTestVirtualMachine("/subscriptions/sub/resourceGroups/rg/providers/Microsoft.Compute/availabilitySets/AGENTPOOL1-AVAILABILITYSET-00000000", []string{})
|
||||
mockVMClient := cloud.VirtualMachinesClient.(*mockvmclient.MockInterface)
|
||||
mockVMClient.EXPECT().Get(gomock.Any(), "rg", "k8s-agentpool1-00000000-0", gomock.Any()).Return(expectedVM, nil)
|
||||
ipConfigurationID := `/subscriptions/sub/resourceGroups/rg/providers/Microsoft.Network/networkInterfaces/k8s-agentpool1-00000000-nic-0/ipConfigurations/ipconfig1`
|
||||
nodeName, asName, err := cloud.VMSet.GetNodeNameByIPConfigurationID(ipConfigurationID)
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t, nodeName, "k8s-agentpool1-00000000-0")
|
||||
assert.Equal(t, asName, "agentpool1-availabilityset-00000000")
|
||||
}
|
||||
|
@ -79,6 +79,6 @@ type VMSet interface {
|
||||
// GetPrivateIPsByNodeName returns a slice of all private ips assigned to node (ipv6 and ipv4)
|
||||
GetPrivateIPsByNodeName(name string) ([]string, error)
|
||||
|
||||
// GetNodeNameByIPConfigurationID gets the node name by IP configuration ID.
|
||||
GetNodeNameByIPConfigurationID(ipConfigurationID string) (string, error)
|
||||
// GetNodeNameByIPConfigurationID gets the nodeName and vmSetName by IP configuration ID.
|
||||
GetNodeNameByIPConfigurationID(ipConfigurationID string) (string, string, error)
|
||||
}
|
||||
|
@ -742,8 +742,10 @@ func (ss *scaleSet) getAgentPoolScaleSets(nodes []*v1.Node) (*[]string, error) {
|
||||
// for loadbalancer exists then return the eligible VMSet.
|
||||
func (ss *scaleSet) GetVMSetNames(service *v1.Service, nodes []*v1.Node) (vmSetNames *[]string, err error) {
|
||||
hasMode, isAuto, serviceVMSetNames := getServiceLoadBalancerMode(service)
|
||||
if !hasMode {
|
||||
// no mode specified in service annotation default to PrimaryScaleSetName.
|
||||
useSingleSLB := ss.useStandardLoadBalancer() && !ss.EnableMultipleStandardLoadBalancers
|
||||
if !hasMode || useSingleSLB {
|
||||
// no mode specified in service annotation or use single SLB mode
|
||||
// default to PrimaryScaleSetName
|
||||
scaleSetNames := &[]string{ss.Config.PrimaryScaleSetName}
|
||||
return scaleSetNames, nil
|
||||
}
|
||||
@ -938,9 +940,18 @@ func (ss *scaleSet) EnsureHostInPool(service *v1.Service, nodeName types.NodeNam
|
||||
|
||||
// Check scale set name:
|
||||
// - For basic SKU load balancer, return nil if the node's scale set is mismatched with vmSetName.
|
||||
// - For standard SKU load balancer, backend could belong to multiple VMSS, so we
|
||||
// - For single standard SKU load balancer, backend could belong to multiple VMSS, so we
|
||||
// don't check vmSet for it.
|
||||
if vmSetName != "" && !ss.useStandardLoadBalancer() && !strings.EqualFold(vmSetName, ssName) {
|
||||
// - For multiple standard SKU load balancers, the behavior is similar to the basic load balancer
|
||||
needCheck := false
|
||||
if !ss.useStandardLoadBalancer() {
|
||||
// need to check the vmSet name when using the basic LB
|
||||
needCheck = true
|
||||
} else if ss.EnableMultipleStandardLoadBalancers {
|
||||
// need to check the vmSet name when using multiple standard LBs
|
||||
needCheck = true
|
||||
}
|
||||
if vmSetName != "" && needCheck && !strings.EqualFold(vmSetName, ssName) {
|
||||
klog.V(3).Infof("EnsureHostInPool skips node %s because it is not in the scaleSet %s", vmName, vmSetName)
|
||||
return "", "", "", nil, nil
|
||||
}
|
||||
@ -1053,8 +1064,9 @@ func (ss *scaleSet) ensureVMSSInPool(service *v1.Service, nodes []*v1.Node, back
|
||||
klog.V(2).Infof("ensureVMSSInPool: ensuring VMSS with backendPoolID %s", backendPoolID)
|
||||
vmssNamesMap := make(map[string]bool)
|
||||
|
||||
// the standard load balancer supports multiple vmss in its backend while the basic sku doesn't
|
||||
if ss.useStandardLoadBalancer() {
|
||||
// the single standard load balancer supports multiple vmss in its backend while
|
||||
// multiple standard load balancers and the basic load balancer doesn't
|
||||
if ss.useStandardLoadBalancer() && !ss.EnableMultipleStandardLoadBalancers {
|
||||
for _, node := range nodes {
|
||||
if ss.excludeMasterNodesFromStandardLB() && isMasterNode(node) {
|
||||
continue
|
||||
@ -1363,12 +1375,12 @@ func (ss *scaleSet) ensureBackendPoolDeletedFromNode(nodeName, backendPoolID str
|
||||
return nodeResourceGroup, ssName, instanceID, newVM, nil
|
||||
}
|
||||
|
||||
// GetNodeNameByIPConfigurationID gets the node name by IP configuration ID.
|
||||
func (ss *scaleSet) GetNodeNameByIPConfigurationID(ipConfigurationID string) (string, error) {
|
||||
// GetNodeNameByIPConfigurationID gets the node name and the VMSS name by IP configuration ID.
|
||||
func (ss *scaleSet) GetNodeNameByIPConfigurationID(ipConfigurationID string) (string, string, error) {
|
||||
matches := vmssIPConfigurationRE.FindStringSubmatch(ipConfigurationID)
|
||||
if len(matches) != 4 {
|
||||
klog.V(4).Infof("Can not extract scale set name from ipConfigurationID (%s), assuming it is mananaged by availability set", ipConfigurationID)
|
||||
return "", ErrorNotVmssInstance
|
||||
klog.V(4).Infof("Can not extract scale set name from ipConfigurationID (%s), assuming it is managed by availability set", ipConfigurationID)
|
||||
return "", "", ErrorNotVmssInstance
|
||||
}
|
||||
|
||||
resourceGroup := matches[1]
|
||||
@ -1376,20 +1388,20 @@ func (ss *scaleSet) GetNodeNameByIPConfigurationID(ipConfigurationID string) (st
|
||||
instanceID := matches[3]
|
||||
vm, err := ss.getVmssVMByInstanceID(resourceGroup, scaleSetName, instanceID, azcache.CacheReadTypeUnsafe)
|
||||
if err != nil {
|
||||
return "", err
|
||||
return "", "", err
|
||||
}
|
||||
|
||||
if vm.OsProfile != nil && vm.OsProfile.ComputerName != nil {
|
||||
return strings.ToLower(*vm.OsProfile.ComputerName), nil
|
||||
return strings.ToLower(*vm.OsProfile.ComputerName), scaleSetName, nil
|
||||
}
|
||||
|
||||
return "", nil
|
||||
return "", "", nil
|
||||
}
|
||||
|
||||
func getScaleSetAndResourceGroupNameByIPConfigurationID(ipConfigurationID string) (string, string, error) {
|
||||
matches := vmssIPConfigurationRE.FindStringSubmatch(ipConfigurationID)
|
||||
if len(matches) != 4 {
|
||||
klog.V(4).Infof("Can not extract scale set name from ipConfigurationID (%s), assuming it is mananaged by availability set", ipConfigurationID)
|
||||
klog.V(4).Infof("Can not extract scale set name from ipConfigurationID (%s), assuming it is managed by availability set", ipConfigurationID)
|
||||
return "", "", ErrorNotVmssInstance
|
||||
}
|
||||
|
||||
@ -1529,7 +1541,7 @@ func (ss *scaleSet) EnsureBackendPoolDeleted(service *v1.Service, backendPoolID,
|
||||
}
|
||||
}
|
||||
|
||||
nodeName, err := ss.GetNodeNameByIPConfigurationID(ipConfigurationID)
|
||||
nodeName, _, err := ss.GetNodeNameByIPConfigurationID(ipConfigurationID)
|
||||
if err != nil {
|
||||
if err == ErrorNotVmssInstance { // Do nothing for the VMAS nodes.
|
||||
continue
|
||||
|
@ -573,19 +573,21 @@ func TestGetNodeNameByIPConfigurationID(t *testing.T) {
|
||||
ipConfigurationIDTemplate := "/subscriptions/script/resourceGroups/rg/providers/Microsoft.Compute/virtualMachineScaleSets/%s/virtualMachines/%s/networkInterfaces/%s/ipConfigurations/ipconfig1"
|
||||
|
||||
testCases := []struct {
|
||||
description string
|
||||
scaleSet string
|
||||
vmList []string
|
||||
ipConfigurationID string
|
||||
expected string
|
||||
expectError bool
|
||||
description string
|
||||
scaleSet string
|
||||
vmList []string
|
||||
ipConfigurationID string
|
||||
expectedNodeName string
|
||||
expectedScaleSetName string
|
||||
expectError bool
|
||||
}{
|
||||
{
|
||||
description: "GetNodeNameByIPConfigurationID should get node's Name when the node is existing",
|
||||
scaleSet: "scaleset1",
|
||||
ipConfigurationID: fmt.Sprintf(ipConfigurationIDTemplate, "scaleset1", "0", "scaleset1"),
|
||||
vmList: []string{"vmssee6c2000000", "vmssee6c2000001"},
|
||||
expected: "vmssee6c2000000",
|
||||
description: "GetNodeNameByIPConfigurationID should get node's Name when the node is existing",
|
||||
scaleSet: "scaleset1",
|
||||
ipConfigurationID: fmt.Sprintf(ipConfigurationIDTemplate, "scaleset1", "0", "scaleset1"),
|
||||
vmList: []string{"vmssee6c2000000", "vmssee6c2000001"},
|
||||
expectedNodeName: "vmssee6c2000000",
|
||||
expectedScaleSetName: "scaleset1",
|
||||
},
|
||||
{
|
||||
description: "GetNodeNameByIPConfigurationID should return error for non-exist nodes",
|
||||
@ -618,14 +620,15 @@ func TestGetNodeNameByIPConfigurationID(t *testing.T) {
|
||||
expectedVMs, _, _ := buildTestVirtualMachineEnv(ss.cloud, test.scaleSet, "", 0, test.vmList, "", false)
|
||||
mockVMSSVMClient.EXPECT().List(gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any()).Return(expectedVMs, nil).AnyTimes()
|
||||
|
||||
nodeName, err := ss.GetNodeNameByIPConfigurationID(test.ipConfigurationID)
|
||||
nodeName, scalesetName, err := ss.GetNodeNameByIPConfigurationID(test.ipConfigurationID)
|
||||
if test.expectError {
|
||||
assert.Error(t, err, test.description)
|
||||
continue
|
||||
}
|
||||
|
||||
assert.NoError(t, err, test.description)
|
||||
assert.Equal(t, test.expected, nodeName, test.description)
|
||||
assert.Equal(t, test.expectedNodeName, nodeName, test.description)
|
||||
assert.Equal(t, test.expectedScaleSetName, scalesetName, test.description)
|
||||
}
|
||||
}
|
||||
|
||||
@ -1415,6 +1418,7 @@ func TestGetVMSetNames(t *testing.T) {
|
||||
description string
|
||||
service *v1.Service
|
||||
nodes []*v1.Node
|
||||
useSingleSLB bool
|
||||
expectedVMSetNames *[]string
|
||||
expectedErr error
|
||||
}{
|
||||
@ -1423,6 +1427,14 @@ func TestGetVMSetNames(t *testing.T) {
|
||||
service: &v1.Service{},
|
||||
expectedVMSetNames: &[]string{"vmss"},
|
||||
},
|
||||
{
|
||||
description: "GetVMSetNames should return the primary vm set name when using the single SLB",
|
||||
service: &v1.Service{
|
||||
ObjectMeta: metav1.ObjectMeta{Annotations: map[string]string{ServiceAnnotationLoadBalancerMode: ServiceAnnotationLoadBalancerAutoModeValue}},
|
||||
},
|
||||
useSingleSLB: true,
|
||||
expectedVMSetNames: &[]string{"vmss"},
|
||||
},
|
||||
{
|
||||
description: "GetVMSetNames should return nil if the service has auto mode annotation",
|
||||
service: &v1.Service{
|
||||
@ -1470,6 +1482,11 @@ func TestGetVMSetNames(t *testing.T) {
|
||||
ss, err := newTestScaleSet(ctrl)
|
||||
assert.NoError(t, err, "unexpected error when creating test VMSS")
|
||||
|
||||
if test.useSingleSLB {
|
||||
ss.EnableMultipleStandardLoadBalancers = false
|
||||
ss.LoadBalancerSku = loadBalancerSkuStandard
|
||||
}
|
||||
|
||||
expectedVMSS := buildTestVMSS(testVMSSName, "vmss-vm-")
|
||||
mockVMSSClient := ss.cloud.VirtualMachineScaleSetsClient.(*mockvmssclient.MockInterface)
|
||||
mockVMSSClient.EXPECT().List(gomock.Any(), ss.ResourceGroup).Return([]compute.VirtualMachineScaleSet{expectedVMSS}, nil).AnyTimes()
|
||||
@ -1663,6 +1680,7 @@ func TestEnsureHostInPool(t *testing.T) {
|
||||
vmSetName string
|
||||
isBasicLB bool
|
||||
isNilVMNetworkConfigs bool
|
||||
useMultipleSLBs bool
|
||||
expectedNodeResourceGroup string
|
||||
expectedVMSSName string
|
||||
expectedInstanceID string
|
||||
@ -1675,6 +1693,12 @@ func TestEnsureHostInPool(t *testing.T) {
|
||||
vmSetName: "vmss-1",
|
||||
isBasicLB: true,
|
||||
},
|
||||
{
|
||||
description: "EnsureHostInPool should skip the current node if the vmSetName is not equal to the node's vmss name and multiple SLBs are used",
|
||||
nodeName: "vmss-vm-000000",
|
||||
vmSetName: "vmss-1",
|
||||
useMultipleSLBs: true,
|
||||
},
|
||||
{
|
||||
description: "EnsureHostInPool should skip the current node if the network configs of the VMSS VM is nil",
|
||||
nodeName: "vmss-vm-000000",
|
||||
@ -1749,6 +1773,10 @@ func TestEnsureHostInPool(t *testing.T) {
|
||||
ss.LoadBalancerSku = loadBalancerSkuStandard
|
||||
}
|
||||
|
||||
if test.useMultipleSLBs {
|
||||
ss.EnableMultipleStandardLoadBalancers = true
|
||||
}
|
||||
|
||||
expectedVMSS := buildTestVMSS(testVMSSName, "vmss-vm-")
|
||||
mockVMSSClient := ss.cloud.VirtualMachineScaleSetsClient.(*mockvmssclient.MockInterface)
|
||||
mockVMSSClient.EXPECT().List(gomock.Any(), ss.ResourceGroup).Return([]compute.VirtualMachineScaleSet{expectedVMSS}, nil).AnyTimes()
|
||||
@ -1798,6 +1826,7 @@ func TestEnsureVMSSInPool(t *testing.T) {
|
||||
expectedPutVMSS bool
|
||||
setIPv6Config bool
|
||||
clusterIP string
|
||||
useMultipleSLBs bool
|
||||
expectedErr error
|
||||
}{
|
||||
{
|
||||
@ -1907,6 +1936,34 @@ func TestEnsureVMSSInPool(t *testing.T) {
|
||||
clusterIP: "fd00::e68b",
|
||||
expectedPutVMSS: true,
|
||||
},
|
||||
{
|
||||
description: "ensureVMSSInPool should work for the basic load balancer",
|
||||
nodes: []*v1.Node{
|
||||
{
|
||||
Spec: v1.NodeSpec{
|
||||
ProviderID: "azure:///subscriptions/sub/resourceGroups/rg/providers/Microsoft.Compute/virtualMachineScaleSets/vmss/virtualMachines/0",
|
||||
},
|
||||
},
|
||||
},
|
||||
vmSetName: testVMSSName,
|
||||
isBasicLB: true,
|
||||
backendPoolID: testLBBackendpoolID1,
|
||||
expectedPutVMSS: true,
|
||||
},
|
||||
{
|
||||
description: "ensureVMSSInPool should work for multiple standard load balancers",
|
||||
nodes: []*v1.Node{
|
||||
{
|
||||
Spec: v1.NodeSpec{
|
||||
ProviderID: "azure:///subscriptions/sub/resourceGroups/rg/providers/Microsoft.Compute/virtualMachineScaleSets/vmss/virtualMachines/0",
|
||||
},
|
||||
},
|
||||
},
|
||||
vmSetName: testVMSSName,
|
||||
backendPoolID: testLBBackendpoolID1,
|
||||
useMultipleSLBs: true,
|
||||
expectedPutVMSS: true,
|
||||
},
|
||||
}
|
||||
|
||||
for _, test := range testCases {
|
||||
|
@ -293,12 +293,13 @@ func (mr *MockVMSetMockRecorder) GetPrivateIPsByNodeName(name interface{}) *gomo
|
||||
}
|
||||
|
||||
// GetNodeNameByIPConfigurationID mocks base method
|
||||
func (m *MockVMSet) GetNodeNameByIPConfigurationID(ipConfigurationID string) (string, error) {
|
||||
func (m *MockVMSet) GetNodeNameByIPConfigurationID(ipConfigurationID string) (string, string, error) {
|
||||
m.ctrl.T.Helper()
|
||||
ret := m.ctrl.Call(m, "GetNodeNameByIPConfigurationID", ipConfigurationID)
|
||||
ret0, _ := ret[0].(string)
|
||||
ret1, _ := ret[1].(error)
|
||||
return ret0, ret1
|
||||
ret1, _ := ret[1].(string)
|
||||
ret2, _ := ret[2].(error)
|
||||
return ret0, ret1, ret2
|
||||
}
|
||||
|
||||
// GetNodeNameByIPConfigurationID indicates an expected call of GetNodeNameByIPConfigurationID
|
||||
|
Loading…
Reference in New Issue
Block a user