mirror of
https://github.com/k3s-io/kubernetes.git
synced 2025-07-23 19:56:01 +00:00
Merge pull request #64180 from feiskyer/verbose
Automatic merge from submit-queue (batch tested with PRs 63434, 64172, 63975, 64180, 63755). If you want to cherry-pick this change to another branch, please follow the instructions <a href="https://github.com/kubernetes/community/blob/master/contributors/devel/cherry-picks.md">here</a>. Check SessionAffinity for Azure load balancer **What this PR does / why we need it**: Currently azure load balance rule comparing is based on name which does not have affinity information. So during updating, the loadDistribution property is not updated. This PR fixes this and adds some verbose logs for better understanding what's happening. **Which issue(s) this PR fixes** *(optional, in `fixes #<issue number>(, fixes #<issue_number>, ...)` format, will close the issue(s) when PR gets merged)*: Fixes #63908 **Special notes for your reviewer**: Cherry pick to 1.10. **Release note**: ```release-note Fix SessionAffinity not updated issue for Azure load balancer ```
This commit is contained in:
commit
2feb093783
@ -55,10 +55,10 @@ func (az *Cloud) GetVirtualMachineWithRetry(name types.NodeName) (compute.Virtua
|
|||||||
return true, cloudprovider.InstanceNotFound
|
return true, cloudprovider.InstanceNotFound
|
||||||
}
|
}
|
||||||
if retryErr != nil {
|
if retryErr != nil {
|
||||||
glog.Errorf("backoff: failure, will retry,err=%v", retryErr)
|
glog.Errorf("GetVirtualMachineWithRetry(%s): backoff failure, will retry, err=%v", name, retryErr)
|
||||||
return false, nil
|
return false, nil
|
||||||
}
|
}
|
||||||
glog.V(2).Info("backoff: success")
|
glog.V(2).Infof("GetVirtualMachineWithRetry(%s): backoff success", name)
|
||||||
return true, nil
|
return true, nil
|
||||||
})
|
})
|
||||||
if err == wait.ErrWaitTimeout {
|
if err == wait.ErrWaitTimeout {
|
||||||
@ -99,10 +99,10 @@ func (az *Cloud) GetIPForMachineWithRetry(name types.NodeName) (string, string,
|
|||||||
var retryErr error
|
var retryErr error
|
||||||
ip, publicIP, retryErr = az.getIPForMachine(name)
|
ip, publicIP, retryErr = az.getIPForMachine(name)
|
||||||
if retryErr != nil {
|
if retryErr != nil {
|
||||||
glog.Errorf("backoff: failure, will retry,err=%v", retryErr)
|
glog.Errorf("GetIPForMachineWithRetry(%s): backoff failure, will retry,err=%v", name, retryErr)
|
||||||
return false, nil
|
return false, nil
|
||||||
}
|
}
|
||||||
glog.V(2).Info("backoff: success")
|
glog.V(2).Infof("GetIPForMachineWithRetry(%s): backoff success", name)
|
||||||
return true, nil
|
return true, nil
|
||||||
})
|
})
|
||||||
return ip, publicIP, err
|
return ip, publicIP, err
|
||||||
@ -304,11 +304,11 @@ func (az *Cloud) UpdateVmssVMWithRetry(ctx context.Context, resourceGroupName st
|
|||||||
// A wait.ConditionFunc function to deal with common HTTP backoff response conditions
|
// A wait.ConditionFunc function to deal with common HTTP backoff response conditions
|
||||||
func processRetryResponse(resp autorest.Response, err error) (bool, error) {
|
func processRetryResponse(resp autorest.Response, err error) (bool, error) {
|
||||||
if isSuccessHTTPResponse(resp) {
|
if isSuccessHTTPResponse(resp) {
|
||||||
glog.V(2).Infof("backoff: success, HTTP response=%d", resp.StatusCode)
|
glog.V(2).Infof("processRetryResponse: backoff success, HTTP response=%d", resp.StatusCode)
|
||||||
return true, nil
|
return true, nil
|
||||||
}
|
}
|
||||||
if shouldRetryAPIRequest(resp, err) {
|
if shouldRetryAPIRequest(resp, err) {
|
||||||
glog.Errorf("backoff: failure, will retry, HTTP response=%d, err=%v", resp.StatusCode, err)
|
glog.Errorf("processRetryResponse: backoff failure, will retry, HTTP response=%d, err=%v", resp.StatusCode, err)
|
||||||
// suppress the error object so that backoff process continues
|
// suppress the error object so that backoff process continues
|
||||||
return false, nil
|
return false, nil
|
||||||
}
|
}
|
||||||
@ -361,7 +361,7 @@ func processHTTPRetryResponse(resp *http.Response, err error) (bool, error) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
if shouldRetryHTTPRequest(resp, err) {
|
if shouldRetryHTTPRequest(resp, err) {
|
||||||
glog.Errorf("backoff: failure, will retry, HTTP response=%d, err=%v", resp.StatusCode, err)
|
glog.Errorf("processHTTPRetryResponse: backoff failure, will retry, HTTP response=%d, err=%v", resp.StatusCode, err)
|
||||||
// suppress the error object so that backoff process continues
|
// suppress the error object so that backoff process continues
|
||||||
return false, nil
|
return false, nil
|
||||||
}
|
}
|
||||||
|
@ -33,7 +33,7 @@ func (az *Cloud) NodeAddresses(ctx context.Context, name types.NodeName) ([]v1.N
|
|||||||
addressGetter := func(nodeName types.NodeName) ([]v1.NodeAddress, error) {
|
addressGetter := func(nodeName types.NodeName) ([]v1.NodeAddress, error) {
|
||||||
ip, publicIP, err := az.GetIPForMachineWithRetry(nodeName)
|
ip, publicIP, err := az.GetIPForMachineWithRetry(nodeName)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
glog.V(2).Infof("NodeAddresses(%s) abort backoff", nodeName)
|
glog.V(2).Infof("NodeAddresses(%s) abort backoff: %v", nodeName, err)
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -20,6 +20,7 @@ import (
|
|||||||
"context"
|
"context"
|
||||||
"fmt"
|
"fmt"
|
||||||
"math"
|
"math"
|
||||||
|
"reflect"
|
||||||
"strconv"
|
"strconv"
|
||||||
"strings"
|
"strings"
|
||||||
|
|
||||||
@ -134,7 +135,7 @@ func (az *Cloud) EnsureLoadBalancer(ctx context.Context, clusterName string, ser
|
|||||||
if lbStatus != nil && len(lbStatus.Ingress) > 0 {
|
if lbStatus != nil && len(lbStatus.Ingress) > 0 {
|
||||||
serviceIP = &lbStatus.Ingress[0].IP
|
serviceIP = &lbStatus.Ingress[0].IP
|
||||||
}
|
}
|
||||||
glog.V(10).Infof("Calling reconcileSecurityGroup from EnsureLoadBalancer for %s with IP %s, wantLb = true", service.Name, logSafe(serviceIP))
|
glog.V(2).Infof("EnsureLoadBalancer: reconciling security group for service %q with IP %q, wantLb = true", serviceName, logSafe(serviceIP))
|
||||||
if _, err := az.reconcileSecurityGroup(clusterName, service, serviceIP, true /* wantLb */); err != nil {
|
if _, err := az.reconcileSecurityGroup(clusterName, service, serviceIP, true /* wantLb */); err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
@ -164,7 +165,7 @@ func (az *Cloud) EnsureLoadBalancerDeleted(ctx context.Context, clusterName stri
|
|||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
glog.V(10).Infof("Calling reconcileSecurityGroup from EnsureLoadBalancerDeleted for %s with IP %s, wantLb = false", service.Name, serviceIPToCleanup)
|
glog.V(2).Infof("EnsureLoadBalancerDeleted: reconciling security group for service %q with IP %q, wantLb = false", serviceName, serviceIPToCleanup)
|
||||||
if _, err := az.reconcileSecurityGroup(clusterName, service, &serviceIPToCleanup, false /* wantLb */); err != nil {
|
if _, err := az.reconcileSecurityGroup(clusterName, service, &serviceIPToCleanup, false /* wantLb */); err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
@ -261,7 +262,7 @@ func (az *Cloud) getServiceLoadBalancer(service *v1.Service, clusterName string,
|
|||||||
func (az *Cloud) selectLoadBalancer(clusterName string, service *v1.Service, existingLBs *[]network.LoadBalancer, nodes []*v1.Node) (selectedLB *network.LoadBalancer, existsLb bool, err error) {
|
func (az *Cloud) selectLoadBalancer(clusterName string, service *v1.Service, existingLBs *[]network.LoadBalancer, nodes []*v1.Node) (selectedLB *network.LoadBalancer, existsLb bool, err error) {
|
||||||
isInternal := requiresInternalLoadBalancer(service)
|
isInternal := requiresInternalLoadBalancer(service)
|
||||||
serviceName := getServiceName(service)
|
serviceName := getServiceName(service)
|
||||||
glog.V(3).Infof("selectLoadBalancer(%s): isInternal(%s) - start", serviceName, isInternal)
|
glog.V(2).Infof("selectLoadBalancer for service (%s): isInternal(%s) - start", serviceName, isInternal)
|
||||||
vmSetNames, err := az.vmSet.GetVMSetNames(service, nodes)
|
vmSetNames, err := az.vmSet.GetVMSetNames(service, nodes)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
glog.Errorf("az.selectLoadBalancer: cluster(%s) service(%s) isInternal(%t) - az.GetVMSetNames failed, err=(%v)", clusterName, serviceName, isInternal, err)
|
glog.Errorf("az.selectLoadBalancer: cluster(%s) service(%s) isInternal(%t) - az.GetVMSetNames failed, err=(%v)", clusterName, serviceName, isInternal, err)
|
||||||
@ -317,10 +318,11 @@ func (az *Cloud) selectLoadBalancer(clusterName string, service *v1.Service, exi
|
|||||||
|
|
||||||
func (az *Cloud) getServiceLoadBalancerStatus(service *v1.Service, lb *network.LoadBalancer) (status *v1.LoadBalancerStatus, err error) {
|
func (az *Cloud) getServiceLoadBalancerStatus(service *v1.Service, lb *network.LoadBalancer) (status *v1.LoadBalancerStatus, err error) {
|
||||||
if lb == nil {
|
if lb == nil {
|
||||||
glog.V(10).Info("getServiceLoadBalancerStatus lb is nil")
|
glog.V(10).Info("getServiceLoadBalancerStatus: lb is nil")
|
||||||
return nil, nil
|
return nil, nil
|
||||||
}
|
}
|
||||||
if lb.FrontendIPConfigurations == nil || *lb.FrontendIPConfigurations == nil {
|
if lb.FrontendIPConfigurations == nil || *lb.FrontendIPConfigurations == nil {
|
||||||
|
glog.V(10).Info("getServiceLoadBalancerStatus: lb.FrontendIPConfigurations is nil")
|
||||||
return nil, nil
|
return nil, nil
|
||||||
}
|
}
|
||||||
isInternal := requiresInternalLoadBalancer(service)
|
isInternal := requiresInternalLoadBalancer(service)
|
||||||
@ -352,6 +354,7 @@ func (az *Cloud) getServiceLoadBalancerStatus(service *v1.Service, lb *network.L
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
glog.V(2).Infof("getServiceLoadBalancerStatus gets ingress IP %q from frontendIPConfiguration %q for service %q", *lbIP, lbFrontendIPConfigName, serviceName)
|
||||||
return &v1.LoadBalancerStatus{Ingress: []v1.LoadBalancerIngress{{IP: *lbIP}}}, nil
|
return &v1.LoadBalancerStatus{Ingress: []v1.LoadBalancerIngress{{IP: *lbIP}}}, nil
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -445,7 +448,7 @@ func (az *Cloud) ensurePublicIPExists(service *v1.Service, pipName string, domai
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
glog.V(3).Infof("ensure(%s): pip(%s) - creating", serviceName, *pip.Name)
|
glog.V(2).Infof("ensurePublicIPExists for service(%s): pip(%s) - creating", serviceName, *pip.Name)
|
||||||
glog.V(10).Infof("CreateOrUpdatePIPWithRetry(%s, %q): start", pipResourceGroup, *pip.Name)
|
glog.V(10).Infof("CreateOrUpdatePIPWithRetry(%s, %q): start", pipResourceGroup, *pip.Name)
|
||||||
err = az.CreateOrUpdatePIPWithRetry(pipResourceGroup, pip)
|
err = az.CreateOrUpdatePIPWithRetry(pipResourceGroup, pip)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
@ -471,13 +474,14 @@ func (az *Cloud) ensurePublicIPExists(service *v1.Service, pipName string, domai
|
|||||||
func (az *Cloud) reconcileLoadBalancer(clusterName string, service *v1.Service, nodes []*v1.Node, wantLb bool) (*network.LoadBalancer, error) {
|
func (az *Cloud) reconcileLoadBalancer(clusterName string, service *v1.Service, nodes []*v1.Node, wantLb bool) (*network.LoadBalancer, error) {
|
||||||
isInternal := requiresInternalLoadBalancer(service)
|
isInternal := requiresInternalLoadBalancer(service)
|
||||||
serviceName := getServiceName(service)
|
serviceName := getServiceName(service)
|
||||||
glog.V(2).Infof("reconcileLoadBalancer(%s) - wantLb(%t): started", serviceName, wantLb)
|
glog.V(2).Infof("reconcileLoadBalancer for service(%s) - wantLb(%t): started", serviceName, wantLb)
|
||||||
lb, _, _, err := az.getServiceLoadBalancer(service, clusterName, nodes, wantLb)
|
lb, _, _, err := az.getServiceLoadBalancer(service, clusterName, nodes, wantLb)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
|
glog.Errorf("reconcileLoadBalancer: failed to get load balancer for service %q, error: %v", serviceName, err)
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
lbName := *lb.Name
|
lbName := *lb.Name
|
||||||
glog.V(2).Infof("reconcileLoadBalancer(%s): lb(%s) wantLb(%t) resolved load balancer name", serviceName, lbName, wantLb)
|
glog.V(2).Infof("reconcileLoadBalancer for service(%s): lb(%s) wantLb(%t) resolved load balancer name", serviceName, lbName, wantLb)
|
||||||
lbFrontendIPConfigName := getFrontendIPConfigName(service, subnet(service))
|
lbFrontendIPConfigName := getFrontendIPConfigName(service, subnet(service))
|
||||||
lbFrontendIPConfigID := az.getFrontendIPConfigID(lbName, lbFrontendIPConfigName)
|
lbFrontendIPConfigID := az.getFrontendIPConfigID(lbName, lbFrontendIPConfigName)
|
||||||
lbBackendPoolName := getBackendPoolName(clusterName)
|
lbBackendPoolName := getBackendPoolName(clusterName)
|
||||||
@ -495,18 +499,18 @@ func (az *Cloud) reconcileLoadBalancer(clusterName string, service *v1.Service,
|
|||||||
foundBackendPool := false
|
foundBackendPool := false
|
||||||
for _, bp := range newBackendPools {
|
for _, bp := range newBackendPools {
|
||||||
if strings.EqualFold(*bp.Name, lbBackendPoolName) {
|
if strings.EqualFold(*bp.Name, lbBackendPoolName) {
|
||||||
glog.V(10).Infof("reconcile(%s)(%t): lb backendpool - found wanted backendpool. not adding anything", serviceName, wantLb)
|
glog.V(10).Infof("reconcileLoadBalancer for service (%s)(%t): lb backendpool - found wanted backendpool. not adding anything", serviceName, wantLb)
|
||||||
foundBackendPool = true
|
foundBackendPool = true
|
||||||
break
|
break
|
||||||
} else {
|
} else {
|
||||||
glog.V(10).Infof("reconcile(%s)(%t): lb backendpool - found other backendpool %s", serviceName, wantLb, *bp.Name)
|
glog.V(10).Infof("reconcileLoadBalancer for service (%s)(%t): lb backendpool - found other backendpool %s", serviceName, wantLb, *bp.Name)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
if !foundBackendPool {
|
if !foundBackendPool {
|
||||||
newBackendPools = append(newBackendPools, network.BackendAddressPool{
|
newBackendPools = append(newBackendPools, network.BackendAddressPool{
|
||||||
Name: to.StringPtr(lbBackendPoolName),
|
Name: to.StringPtr(lbBackendPoolName),
|
||||||
})
|
})
|
||||||
glog.V(10).Infof("reconcile(%s)(%t): lb backendpool - adding backendpool", serviceName, wantLb)
|
glog.V(10).Infof("reconcileLoadBalancer for service (%s)(%t): lb backendpool - adding backendpool", serviceName, wantLb)
|
||||||
|
|
||||||
dirtyLb = true
|
dirtyLb = true
|
||||||
lb.BackendAddressPools = &newBackendPools
|
lb.BackendAddressPools = &newBackendPools
|
||||||
@ -524,7 +528,7 @@ func (az *Cloud) reconcileLoadBalancer(clusterName string, service *v1.Service,
|
|||||||
for i := len(newConfigs) - 1; i >= 0; i-- {
|
for i := len(newConfigs) - 1; i >= 0; i-- {
|
||||||
config := newConfigs[i]
|
config := newConfigs[i]
|
||||||
if serviceOwnsFrontendIP(config, service) {
|
if serviceOwnsFrontendIP(config, service) {
|
||||||
glog.V(3).Infof("reconcile(%s)(%t): lb frontendconfig(%s) - dropping", serviceName, wantLb, lbFrontendIPConfigName)
|
glog.V(2).Infof("reconcileLoadBalancer for service (%s)(%t): lb frontendconfig(%s) - dropping", serviceName, wantLb, lbFrontendIPConfigName)
|
||||||
newConfigs = append(newConfigs[:i], newConfigs[i+1:]...)
|
newConfigs = append(newConfigs[:i], newConfigs[i+1:]...)
|
||||||
dirtyConfigs = true
|
dirtyConfigs = true
|
||||||
}
|
}
|
||||||
@ -534,7 +538,7 @@ func (az *Cloud) reconcileLoadBalancer(clusterName string, service *v1.Service,
|
|||||||
for i := len(newConfigs) - 1; i >= 0; i-- {
|
for i := len(newConfigs) - 1; i >= 0; i-- {
|
||||||
config := newConfigs[i]
|
config := newConfigs[i]
|
||||||
if serviceOwnsFrontendIP(config, service) && !strings.EqualFold(*config.Name, lbFrontendIPConfigName) {
|
if serviceOwnsFrontendIP(config, service) && !strings.EqualFold(*config.Name, lbFrontendIPConfigName) {
|
||||||
glog.V(3).Infof("reconcile(%s)(%t): lb frontendconfig(%s) - dropping", serviceName, wantLb, *config.Name)
|
glog.V(2).Infof("reconcileLoadBalancer for service (%s)(%t): lb frontendconfig(%s) - dropping", serviceName, wantLb, *config.Name)
|
||||||
newConfigs = append(newConfigs[:i], newConfigs[i+1:]...)
|
newConfigs = append(newConfigs[:i], newConfigs[i+1:]...)
|
||||||
dirtyConfigs = true
|
dirtyConfigs = true
|
||||||
}
|
}
|
||||||
@ -598,7 +602,7 @@ func (az *Cloud) reconcileLoadBalancer(clusterName string, service *v1.Service,
|
|||||||
Name: to.StringPtr(lbFrontendIPConfigName),
|
Name: to.StringPtr(lbFrontendIPConfigName),
|
||||||
FrontendIPConfigurationPropertiesFormat: fipConfigurationProperties,
|
FrontendIPConfigurationPropertiesFormat: fipConfigurationProperties,
|
||||||
})
|
})
|
||||||
glog.V(10).Infof("reconcile(%s)(%t): lb frontendconfig(%s) - adding", serviceName, wantLb, lbFrontendIPConfigName)
|
glog.V(10).Infof("reconcileLoadBalancer for service (%s)(%t): lb frontendconfig(%s) - adding", serviceName, wantLb, lbFrontendIPConfigName)
|
||||||
dirtyConfigs = true
|
dirtyConfigs = true
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -699,15 +703,15 @@ func (az *Cloud) reconcileLoadBalancer(clusterName string, service *v1.Service,
|
|||||||
for i := len(updatedProbes) - 1; i >= 0; i-- {
|
for i := len(updatedProbes) - 1; i >= 0; i-- {
|
||||||
existingProbe := updatedProbes[i]
|
existingProbe := updatedProbes[i]
|
||||||
if serviceOwnsRule(service, *existingProbe.Name) {
|
if serviceOwnsRule(service, *existingProbe.Name) {
|
||||||
glog.V(10).Infof("reconcile(%s)(%t): lb probe(%s) - considering evicting", serviceName, wantLb, *existingProbe.Name)
|
glog.V(10).Infof("reconcileLoadBalancer for service (%s)(%t): lb probe(%s) - considering evicting", serviceName, wantLb, *existingProbe.Name)
|
||||||
keepProbe := false
|
keepProbe := false
|
||||||
if findProbe(expectedProbes, existingProbe) {
|
if findProbe(expectedProbes, existingProbe) {
|
||||||
glog.V(10).Infof("reconcile(%s)(%t): lb probe(%s) - keeping", serviceName, wantLb, *existingProbe.Name)
|
glog.V(10).Infof("reconcileLoadBalancer for service (%s)(%t): lb probe(%s) - keeping", serviceName, wantLb, *existingProbe.Name)
|
||||||
keepProbe = true
|
keepProbe = true
|
||||||
}
|
}
|
||||||
if !keepProbe {
|
if !keepProbe {
|
||||||
updatedProbes = append(updatedProbes[:i], updatedProbes[i+1:]...)
|
updatedProbes = append(updatedProbes[:i], updatedProbes[i+1:]...)
|
||||||
glog.V(10).Infof("reconcile(%s)(%t): lb probe(%s) - dropping", serviceName, wantLb, *existingProbe.Name)
|
glog.V(10).Infof("reconcileLoadBalancer for service (%s)(%t): lb probe(%s) - dropping", serviceName, wantLb, *existingProbe.Name)
|
||||||
dirtyProbes = true
|
dirtyProbes = true
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -716,11 +720,11 @@ func (az *Cloud) reconcileLoadBalancer(clusterName string, service *v1.Service,
|
|||||||
for _, expectedProbe := range expectedProbes {
|
for _, expectedProbe := range expectedProbes {
|
||||||
foundProbe := false
|
foundProbe := false
|
||||||
if findProbe(updatedProbes, expectedProbe) {
|
if findProbe(updatedProbes, expectedProbe) {
|
||||||
glog.V(10).Infof("reconcile(%s)(%t): lb probe(%s) - already exists", serviceName, wantLb, *expectedProbe.Name)
|
glog.V(10).Infof("reconcileLoadBalancer for service (%s)(%t): lb probe(%s) - already exists", serviceName, wantLb, *expectedProbe.Name)
|
||||||
foundProbe = true
|
foundProbe = true
|
||||||
}
|
}
|
||||||
if !foundProbe {
|
if !foundProbe {
|
||||||
glog.V(10).Infof("reconcile(%s)(%t): lb probe(%s) - adding", serviceName, wantLb, *expectedProbe.Name)
|
glog.V(10).Infof("reconcileLoadBalancer for service (%s)(%t): lb probe(%s) - adding", serviceName, wantLb, *expectedProbe.Name)
|
||||||
updatedProbes = append(updatedProbes, expectedProbe)
|
updatedProbes = append(updatedProbes, expectedProbe)
|
||||||
dirtyProbes = true
|
dirtyProbes = true
|
||||||
}
|
}
|
||||||
@ -741,13 +745,13 @@ func (az *Cloud) reconcileLoadBalancer(clusterName string, service *v1.Service,
|
|||||||
existingRule := updatedRules[i]
|
existingRule := updatedRules[i]
|
||||||
if serviceOwnsRule(service, *existingRule.Name) {
|
if serviceOwnsRule(service, *existingRule.Name) {
|
||||||
keepRule := false
|
keepRule := false
|
||||||
glog.V(10).Infof("reconcile(%s)(%t): lb rule(%s) - considering evicting", serviceName, wantLb, *existingRule.Name)
|
glog.V(10).Infof("reconcileLoadBalancer for service (%s)(%t): lb rule(%s) - considering evicting", serviceName, wantLb, *existingRule.Name)
|
||||||
if findRule(expectedRules, existingRule) {
|
if findRule(expectedRules, existingRule) {
|
||||||
glog.V(10).Infof("reconcile(%s)(%t): lb rule(%s) - keeping", serviceName, wantLb, *existingRule.Name)
|
glog.V(10).Infof("reconcileLoadBalancer for service (%s)(%t): lb rule(%s) - keeping", serviceName, wantLb, *existingRule.Name)
|
||||||
keepRule = true
|
keepRule = true
|
||||||
}
|
}
|
||||||
if !keepRule {
|
if !keepRule {
|
||||||
glog.V(3).Infof("reconcile(%s)(%t): lb rule(%s) - dropping", serviceName, wantLb, *existingRule.Name)
|
glog.V(2).Infof("reconcileLoadBalancer for service (%s)(%t): lb rule(%s) - dropping", serviceName, wantLb, *existingRule.Name)
|
||||||
updatedRules = append(updatedRules[:i], updatedRules[i+1:]...)
|
updatedRules = append(updatedRules[:i], updatedRules[i+1:]...)
|
||||||
dirtyRules = true
|
dirtyRules = true
|
||||||
}
|
}
|
||||||
@ -757,11 +761,11 @@ func (az *Cloud) reconcileLoadBalancer(clusterName string, service *v1.Service,
|
|||||||
for _, expectedRule := range expectedRules {
|
for _, expectedRule := range expectedRules {
|
||||||
foundRule := false
|
foundRule := false
|
||||||
if findRule(updatedRules, expectedRule) {
|
if findRule(updatedRules, expectedRule) {
|
||||||
glog.V(10).Infof("reconcile(%s)(%t): lb rule(%s) - already exists", serviceName, wantLb, *expectedRule.Name)
|
glog.V(10).Infof("reconcileLoadBalancer for service (%s)(%t): lb rule(%s) - already exists", serviceName, wantLb, *expectedRule.Name)
|
||||||
foundRule = true
|
foundRule = true
|
||||||
}
|
}
|
||||||
if !foundRule {
|
if !foundRule {
|
||||||
glog.V(10).Infof("reconcile(%s)(%t): lb rule(%s) adding", serviceName, wantLb, *expectedRule.Name)
|
glog.V(10).Infof("reconcileLoadBalancer for service (%s)(%t): lb rule(%s) adding", serviceName, wantLb, *expectedRule.Name)
|
||||||
updatedRules = append(updatedRules, expectedRule)
|
updatedRules = append(updatedRules, expectedRule)
|
||||||
dirtyRules = true
|
dirtyRules = true
|
||||||
}
|
}
|
||||||
@ -778,7 +782,7 @@ func (az *Cloud) reconcileLoadBalancer(clusterName string, service *v1.Service,
|
|||||||
if lb.FrontendIPConfigurations == nil || len(*lb.FrontendIPConfigurations) == 0 {
|
if lb.FrontendIPConfigurations == nil || len(*lb.FrontendIPConfigurations) == 0 {
|
||||||
// When FrontendIPConfigurations is empty, we need to delete the Azure load balancer resource itself,
|
// When FrontendIPConfigurations is empty, we need to delete the Azure load balancer resource itself,
|
||||||
// because an Azure load balancer cannot have an empty FrontendIPConfigurations collection
|
// because an Azure load balancer cannot have an empty FrontendIPConfigurations collection
|
||||||
glog.V(3).Infof("delete(%s): lb(%s) - deleting; no remaining frontendipconfigs", serviceName, lbName)
|
glog.V(2).Infof("reconcileLoadBalancer for service(%s): lb(%s) - deleting; no remaining frontendIPConfigurations", serviceName, lbName)
|
||||||
|
|
||||||
// Remove backend pools from vmSets. This is required for virtual machine scale sets before removing the LB.
|
// Remove backend pools from vmSets. This is required for virtual machine scale sets before removing the LB.
|
||||||
vmSetName := az.mapLoadBalancerNameToVMSet(lbName, clusterName)
|
vmSetName := az.mapLoadBalancerNameToVMSet(lbName, clusterName)
|
||||||
@ -791,18 +795,18 @@ func (az *Cloud) reconcileLoadBalancer(clusterName string, service *v1.Service,
|
|||||||
glog.V(10).Infof("EnsureBackendPoolDeleted(%s, %s): end", lbBackendPoolID, vmSetName)
|
glog.V(10).Infof("EnsureBackendPoolDeleted(%s, %s): end", lbBackendPoolID, vmSetName)
|
||||||
|
|
||||||
// Remove the LB.
|
// Remove the LB.
|
||||||
glog.V(10).Infof("az.DeleteLBWithRetry(%q): start", lbName)
|
glog.V(10).Infof("reconcileLoadBalancer: az.DeleteLBWithRetry(%q): start", lbName)
|
||||||
err = az.DeleteLBWithRetry(lbName)
|
err = az.DeleteLBWithRetry(lbName)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
glog.V(2).Infof("delete(%s) abort backoff: lb(%s) - deleting; no remaining frontendipconfigs", serviceName, lbName)
|
glog.V(2).Infof("reconcileLoadBalancer for service(%s) abort backoff: lb(%s) - deleting; no remaining frontendIPConfigurations", serviceName, lbName)
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
glog.V(10).Infof("az.DeleteLBWithRetry(%q): end", lbName)
|
glog.V(10).Infof("az.DeleteLBWithRetry(%q): end", lbName)
|
||||||
} else {
|
} else {
|
||||||
glog.V(3).Infof("ensure(%s): lb(%s) - updating", serviceName, lbName)
|
glog.V(2).Infof("reconcileLoadBalancer: reconcileLoadBalancer for service(%s): lb(%s) - updating", serviceName, lbName)
|
||||||
err := az.CreateOrUpdateLBWithRetry(*lb)
|
err := az.CreateOrUpdateLBWithRetry(*lb)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
glog.V(2).Infof("ensure(%s) abort backoff: lb(%s) - updating", serviceName, lbName)
|
glog.V(2).Infof("reconcileLoadBalancer for service(%s) abort backoff: lb(%s) - updating", serviceName, lbName)
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -810,7 +814,7 @@ func (az *Cloud) reconcileLoadBalancer(clusterName string, service *v1.Service,
|
|||||||
// Refresh updated lb which will be used later in other places.
|
// Refresh updated lb which will be used later in other places.
|
||||||
newLB, exist, err := az.getAzureLoadBalancer(lbName)
|
newLB, exist, err := az.getAzureLoadBalancer(lbName)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
glog.V(2).Infof("getAzureLoadBalancer(%s) failed: %v", lbName, err)
|
glog.V(2).Infof("reconcileLoadBalancer for service(%s): getAzureLoadBalancer(%s) failed: %v", serviceName, lbName, err)
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
if !exist {
|
if !exist {
|
||||||
@ -830,7 +834,7 @@ func (az *Cloud) reconcileLoadBalancer(clusterName string, service *v1.Service,
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
glog.V(2).Infof("ensure(%s): lb(%s) finished", serviceName, lbName)
|
glog.V(2).Infof("reconcileLoadBalancer for service(%s): lb(%s) finished", serviceName, lbName)
|
||||||
return lb, nil
|
return lb, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -1024,7 +1028,7 @@ func (az *Cloud) reconcileSecurityGroup(clusterName string, service *v1.Service,
|
|||||||
|
|
||||||
if dirtySg {
|
if dirtySg {
|
||||||
sg.SecurityRules = &updatedRules
|
sg.SecurityRules = &updatedRules
|
||||||
glog.V(3).Infof("ensure(%s): sg(%s) - updating", serviceName, *sg.Name)
|
glog.V(2).Infof("reconcileSecurityGroup for service(%s): sg(%s) - updating", serviceName, *sg.Name)
|
||||||
glog.V(10).Infof("CreateOrUpdateSGWithRetry(%q): start", *sg.Name)
|
glog.V(10).Infof("CreateOrUpdateSGWithRetry(%q): start", *sg.Name)
|
||||||
err := az.CreateOrUpdateSGWithRetry(sg)
|
err := az.CreateOrUpdateSGWithRetry(sg)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
@ -1212,7 +1216,7 @@ func (az *Cloud) reconcilePublicIP(clusterName string, service *v1.Service, want
|
|||||||
// This is the only case we should preserve the
|
// This is the only case we should preserve the
|
||||||
// Public ip resource with match service tag
|
// Public ip resource with match service tag
|
||||||
} else {
|
} else {
|
||||||
glog.V(2).Infof("ensure(%s): pip(%s) - deleting", serviceName, pipName)
|
glog.V(2).Infof("reconcilePublicIP for service(%s): pip(%s) - deleting", serviceName, pipName)
|
||||||
glog.V(10).Infof("DeletePublicIPWithRetry(%s, %q): start", pipResourceGroup, pipName)
|
glog.V(10).Infof("DeletePublicIPWithRetry(%s, %q): start", pipResourceGroup, pipName)
|
||||||
err = az.DeletePublicIPWithRetry(pipResourceGroup, pipName)
|
err = az.DeletePublicIPWithRetry(pipResourceGroup, pipName)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
@ -1226,7 +1230,7 @@ func (az *Cloud) reconcilePublicIP(clusterName string, service *v1.Service, want
|
|||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
glog.V(2).Infof("ensure(%s): pip(%s) - finished", serviceName, pipName)
|
glog.V(2).Infof("reconcilePublicIP for service(%s): pip(%s) - finished", serviceName, pipName)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -1255,13 +1259,30 @@ func findProbe(probes []network.Probe, probe network.Probe) bool {
|
|||||||
|
|
||||||
func findRule(rules []network.LoadBalancingRule, rule network.LoadBalancingRule) bool {
|
func findRule(rules []network.LoadBalancingRule, rule network.LoadBalancingRule) bool {
|
||||||
for _, existingRule := range rules {
|
for _, existingRule := range rules {
|
||||||
if strings.EqualFold(*existingRule.Name, *rule.Name) {
|
if strings.EqualFold(*existingRule.Name, *rule.Name) &&
|
||||||
|
equalLoadBalancingRulePropertiesFormat(existingRule.LoadBalancingRulePropertiesFormat, rule.LoadBalancingRulePropertiesFormat) {
|
||||||
return true
|
return true
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
return false
|
return false
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// equalLoadBalancingRulePropertiesFormat checks whether the provided LoadBalancingRulePropertiesFormat are equal.
|
||||||
|
// Note: only fields used in reconcileLoadBalancer are considered.
|
||||||
|
func equalLoadBalancingRulePropertiesFormat(s, t *network.LoadBalancingRulePropertiesFormat) bool {
|
||||||
|
if s == nil || t == nil {
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
|
||||||
|
return reflect.DeepEqual(s.Protocol, t.Protocol) &&
|
||||||
|
reflect.DeepEqual(s.FrontendIPConfiguration, t.FrontendIPConfiguration) &&
|
||||||
|
reflect.DeepEqual(s.BackendAddressPool, t.BackendAddressPool) &&
|
||||||
|
reflect.DeepEqual(s.LoadDistribution, t.LoadDistribution) &&
|
||||||
|
reflect.DeepEqual(s.FrontendPort, t.FrontendPort) &&
|
||||||
|
reflect.DeepEqual(s.BackendPort, t.BackendPort) &&
|
||||||
|
reflect.DeepEqual(s.EnableFloatingIP, t.EnableFloatingIP)
|
||||||
|
}
|
||||||
|
|
||||||
// This compares rule's Name, Protocol, SourcePortRange, DestinationPortRange, SourceAddressPrefix, Access, and Direction.
|
// This compares rule's Name, Protocol, SourcePortRange, DestinationPortRange, SourceAddressPrefix, Access, and Direction.
|
||||||
// Note that it compares rule's DestinationAddressPrefix only when it's not consolidated rule as such rule does not have DestinationAddressPrefix defined.
|
// Note that it compares rule's DestinationAddressPrefix only when it's not consolidated rule as such rule does not have DestinationAddressPrefix defined.
|
||||||
// We intentionally do not compare DestinationAddressPrefixes in consolidated case because reconcileSecurityRule has to consider the two rules equal,
|
// We intentionally do not compare DestinationAddressPrefixes in consolidated case because reconcileSecurityRule has to consider the two rules equal,
|
||||||
|
@ -97,3 +97,116 @@ func TestFindProbe(t *testing.T) {
|
|||||||
assert.Equal(t, test.expected, findResult, fmt.Sprintf("TestCase[%d]: %s", i, test.msg))
|
assert.Equal(t, test.expected, findResult, fmt.Sprintf("TestCase[%d]: %s", i, test.msg))
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func TestFindRule(t *testing.T) {
|
||||||
|
tests := []struct {
|
||||||
|
msg string
|
||||||
|
existingRule []network.LoadBalancingRule
|
||||||
|
curRule network.LoadBalancingRule
|
||||||
|
expected bool
|
||||||
|
}{
|
||||||
|
{
|
||||||
|
msg: "empty existing rules should return false",
|
||||||
|
expected: false,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
msg: "rule names unmatch should return false",
|
||||||
|
existingRule: []network.LoadBalancingRule{
|
||||||
|
{
|
||||||
|
Name: to.StringPtr("httpProbe1"),
|
||||||
|
LoadBalancingRulePropertiesFormat: &network.LoadBalancingRulePropertiesFormat{
|
||||||
|
FrontendPort: to.Int32Ptr(1),
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
curRule: network.LoadBalancingRule{
|
||||||
|
Name: to.StringPtr("httpProbe2"),
|
||||||
|
LoadBalancingRulePropertiesFormat: &network.LoadBalancingRulePropertiesFormat{
|
||||||
|
FrontendPort: to.Int32Ptr(1),
|
||||||
|
},
|
||||||
|
},
|
||||||
|
expected: false,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
msg: "rule names match while frontend ports unmatch should return false",
|
||||||
|
existingRule: []network.LoadBalancingRule{
|
||||||
|
{
|
||||||
|
Name: to.StringPtr("httpProbe"),
|
||||||
|
LoadBalancingRulePropertiesFormat: &network.LoadBalancingRulePropertiesFormat{
|
||||||
|
FrontendPort: to.Int32Ptr(1),
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
curRule: network.LoadBalancingRule{
|
||||||
|
Name: to.StringPtr("httpProbe"),
|
||||||
|
LoadBalancingRulePropertiesFormat: &network.LoadBalancingRulePropertiesFormat{
|
||||||
|
FrontendPort: to.Int32Ptr(2),
|
||||||
|
},
|
||||||
|
},
|
||||||
|
expected: false,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
msg: "rule names match while backend ports unmatch should return false",
|
||||||
|
existingRule: []network.LoadBalancingRule{
|
||||||
|
{
|
||||||
|
Name: to.StringPtr("httpProbe"),
|
||||||
|
LoadBalancingRulePropertiesFormat: &network.LoadBalancingRulePropertiesFormat{
|
||||||
|
BackendPort: to.Int32Ptr(1),
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
curRule: network.LoadBalancingRule{
|
||||||
|
Name: to.StringPtr("httpProbe"),
|
||||||
|
LoadBalancingRulePropertiesFormat: &network.LoadBalancingRulePropertiesFormat{
|
||||||
|
BackendPort: to.Int32Ptr(2),
|
||||||
|
},
|
||||||
|
},
|
||||||
|
expected: false,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
msg: "rule names match while LoadDistribution unmatch should return false",
|
||||||
|
existingRule: []network.LoadBalancingRule{
|
||||||
|
{
|
||||||
|
Name: to.StringPtr("probe1"),
|
||||||
|
LoadBalancingRulePropertiesFormat: &network.LoadBalancingRulePropertiesFormat{
|
||||||
|
LoadDistribution: network.Default,
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
curRule: network.LoadBalancingRule{
|
||||||
|
Name: to.StringPtr("probe2"),
|
||||||
|
LoadBalancingRulePropertiesFormat: &network.LoadBalancingRulePropertiesFormat{
|
||||||
|
LoadDistribution: network.SourceIP,
|
||||||
|
},
|
||||||
|
},
|
||||||
|
expected: false,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
msg: "both rule names and LoadBalancingRulePropertiesFormats match should return true",
|
||||||
|
existingRule: []network.LoadBalancingRule{
|
||||||
|
{
|
||||||
|
Name: to.StringPtr("matchName"),
|
||||||
|
LoadBalancingRulePropertiesFormat: &network.LoadBalancingRulePropertiesFormat{
|
||||||
|
BackendPort: to.Int32Ptr(2),
|
||||||
|
FrontendPort: to.Int32Ptr(2),
|
||||||
|
LoadDistribution: network.SourceIP,
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
curRule: network.LoadBalancingRule{
|
||||||
|
Name: to.StringPtr("matchName"),
|
||||||
|
LoadBalancingRulePropertiesFormat: &network.LoadBalancingRulePropertiesFormat{
|
||||||
|
BackendPort: to.Int32Ptr(2),
|
||||||
|
FrontendPort: to.Int32Ptr(2),
|
||||||
|
LoadDistribution: network.SourceIP,
|
||||||
|
},
|
||||||
|
},
|
||||||
|
expected: true,
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
for i, test := range tests {
|
||||||
|
findResult := findRule(test.existingRule, test.curRule)
|
||||||
|
assert.Equal(t, test.expected, findResult, fmt.Sprintf("TestCase[%d]: %s", i, test.msg))
|
||||||
|
}
|
||||||
|
}
|
||||||
|
@ -30,7 +30,7 @@ import (
|
|||||||
|
|
||||||
// ListRoutes lists all managed routes that belong to the specified clusterName
|
// ListRoutes lists all managed routes that belong to the specified clusterName
|
||||||
func (az *Cloud) ListRoutes(ctx context.Context, clusterName string) ([]*cloudprovider.Route, error) {
|
func (az *Cloud) ListRoutes(ctx context.Context, clusterName string) ([]*cloudprovider.Route, error) {
|
||||||
glog.V(10).Infof("list: START clusterName=%q", clusterName)
|
glog.V(10).Infof("ListRoutes: START clusterName=%q", clusterName)
|
||||||
routeTable, existsRouteTable, err := az.getRouteTable()
|
routeTable, existsRouteTable, err := az.getRouteTable()
|
||||||
return processRoutes(routeTable, existsRouteTable, err)
|
return processRoutes(routeTable, existsRouteTable, err)
|
||||||
}
|
}
|
||||||
@ -50,7 +50,7 @@ func processRoutes(routeTable network.RouteTable, exists bool, err error) ([]*cl
|
|||||||
for i, route := range *routeTable.Routes {
|
for i, route := range *routeTable.Routes {
|
||||||
instance := mapRouteNameToNodeName(*route.Name)
|
instance := mapRouteNameToNodeName(*route.Name)
|
||||||
cidr := *route.AddressPrefix
|
cidr := *route.AddressPrefix
|
||||||
glog.V(10).Infof("list: * instance=%q, cidr=%q", instance, cidr)
|
glog.V(10).Infof("ListRoutes: * instance=%q, cidr=%q", instance, cidr)
|
||||||
|
|
||||||
kubeRoutes[i] = &cloudprovider.Route{
|
kubeRoutes[i] = &cloudprovider.Route{
|
||||||
Name: *route.Name,
|
Name: *route.Name,
|
||||||
@ -60,13 +60,13 @@ func processRoutes(routeTable network.RouteTable, exists bool, err error) ([]*cl
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
glog.V(10).Info("list: FINISH")
|
glog.V(10).Info("ListRoutes: FINISH")
|
||||||
return kubeRoutes, nil
|
return kubeRoutes, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (az *Cloud) createRouteTableIfNotExists(clusterName string, kubeRoute *cloudprovider.Route) error {
|
func (az *Cloud) createRouteTableIfNotExists(clusterName string, kubeRoute *cloudprovider.Route) error {
|
||||||
if _, existsRouteTable, err := az.getRouteTable(); err != nil {
|
if _, existsRouteTable, err := az.getRouteTable(); err != nil {
|
||||||
glog.V(2).Infof("create error: couldn't get routetable. clusterName=%q instance=%q cidr=%q", clusterName, kubeRoute.TargetNode, kubeRoute.DestinationCIDR)
|
glog.V(2).Infof("createRouteTableIfNotExists error: couldn't get routetable. clusterName=%q instance=%q cidr=%q", clusterName, kubeRoute.TargetNode, kubeRoute.DestinationCIDR)
|
||||||
return err
|
return err
|
||||||
} else if existsRouteTable {
|
} else if existsRouteTable {
|
||||||
return nil
|
return nil
|
||||||
@ -81,17 +81,17 @@ func (az *Cloud) createRouteTable() error {
|
|||||||
RouteTablePropertiesFormat: &network.RouteTablePropertiesFormat{},
|
RouteTablePropertiesFormat: &network.RouteTablePropertiesFormat{},
|
||||||
}
|
}
|
||||||
|
|
||||||
glog.V(3).Infof("create: creating routetable. routeTableName=%q", az.RouteTableName)
|
glog.V(3).Infof("createRouteTableIfNotExists: creating routetable. routeTableName=%q", az.RouteTableName)
|
||||||
ctx, cancel := getContextWithCancel()
|
ctx, cancel := getContextWithCancel()
|
||||||
defer cancel()
|
defer cancel()
|
||||||
resp, err := az.RouteTablesClient.CreateOrUpdate(ctx, az.ResourceGroup, az.RouteTableName, routeTable)
|
resp, err := az.RouteTablesClient.CreateOrUpdate(ctx, az.ResourceGroup, az.RouteTableName, routeTable)
|
||||||
glog.V(10).Infof("RouteTablesClient.CreateOrUpdate(%q): end", az.RouteTableName)
|
glog.V(10).Infof("RouteTablesClient.CreateOrUpdate(%q): end", az.RouteTableName)
|
||||||
if az.CloudProviderBackoff && shouldRetryHTTPRequest(resp, err) {
|
if az.CloudProviderBackoff && shouldRetryHTTPRequest(resp, err) {
|
||||||
glog.V(2).Infof("create backing off: creating routetable. routeTableName=%q", az.RouteTableName)
|
glog.V(2).Infof("createRouteTableIfNotExists backing off: creating routetable. routeTableName=%q", az.RouteTableName)
|
||||||
retryErr := az.CreateOrUpdateRouteTableWithRetry(routeTable)
|
retryErr := az.CreateOrUpdateRouteTableWithRetry(routeTable)
|
||||||
if retryErr != nil {
|
if retryErr != nil {
|
||||||
err = retryErr
|
err = retryErr
|
||||||
glog.V(2).Infof("create abort backoff: creating routetable. routeTableName=%q", az.RouteTableName)
|
glog.V(2).Infof("createRouteTableIfNotExists abort backoff: creating routetable. routeTableName=%q", az.RouteTableName)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
if err != nil {
|
if err != nil {
|
||||||
@ -107,7 +107,7 @@ func (az *Cloud) createRouteTable() error {
|
|||||||
// route.Name will be ignored, although the cloud-provider may use nameHint
|
// route.Name will be ignored, although the cloud-provider may use nameHint
|
||||||
// to create a more user-meaningful name.
|
// to create a more user-meaningful name.
|
||||||
func (az *Cloud) CreateRoute(ctx context.Context, clusterName string, nameHint string, kubeRoute *cloudprovider.Route) error {
|
func (az *Cloud) CreateRoute(ctx context.Context, clusterName string, nameHint string, kubeRoute *cloudprovider.Route) error {
|
||||||
glog.V(2).Infof("create: creating route. clusterName=%q instance=%q cidr=%q", clusterName, kubeRoute.TargetNode, kubeRoute.DestinationCIDR)
|
glog.V(2).Infof("CreateRoute: creating route. clusterName=%q instance=%q cidr=%q", clusterName, kubeRoute.TargetNode, kubeRoute.DestinationCIDR)
|
||||||
if err := az.createRouteTableIfNotExists(clusterName, kubeRoute); err != nil {
|
if err := az.createRouteTableIfNotExists(clusterName, kubeRoute); err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
@ -126,31 +126,31 @@ func (az *Cloud) CreateRoute(ctx context.Context, clusterName string, nameHint s
|
|||||||
},
|
},
|
||||||
}
|
}
|
||||||
|
|
||||||
glog.V(3).Infof("create: creating route: instance=%q cidr=%q", kubeRoute.TargetNode, kubeRoute.DestinationCIDR)
|
glog.V(3).Infof("CreateRoute: creating route: instance=%q cidr=%q", kubeRoute.TargetNode, kubeRoute.DestinationCIDR)
|
||||||
ctx, cancel := getContextWithCancel()
|
ctx, cancel := getContextWithCancel()
|
||||||
defer cancel()
|
defer cancel()
|
||||||
resp, err := az.RoutesClient.CreateOrUpdate(ctx, az.ResourceGroup, az.RouteTableName, *route.Name, route)
|
resp, err := az.RoutesClient.CreateOrUpdate(ctx, az.ResourceGroup, az.RouteTableName, *route.Name, route)
|
||||||
glog.V(10).Infof("RoutesClient.CreateOrUpdate(%q): end", az.RouteTableName)
|
glog.V(10).Infof("RoutesClient.CreateOrUpdate(%q): end", az.RouteTableName)
|
||||||
if az.CloudProviderBackoff && shouldRetryHTTPRequest(resp, err) {
|
if az.CloudProviderBackoff && shouldRetryHTTPRequest(resp, err) {
|
||||||
glog.V(2).Infof("create backing off: creating route: instance=%q cidr=%q", kubeRoute.TargetNode, kubeRoute.DestinationCIDR)
|
glog.V(2).Infof("CreateRoute backing off: creating route: instance=%q cidr=%q", kubeRoute.TargetNode, kubeRoute.DestinationCIDR)
|
||||||
retryErr := az.CreateOrUpdateRouteWithRetry(route)
|
retryErr := az.CreateOrUpdateRouteWithRetry(route)
|
||||||
if retryErr != nil {
|
if retryErr != nil {
|
||||||
err = retryErr
|
err = retryErr
|
||||||
glog.V(2).Infof("create abort backoff: creating route: instance=%q cidr=%q", kubeRoute.TargetNode, kubeRoute.DestinationCIDR)
|
glog.V(2).Infof("CreateRoute abort backoff: creating route: instance=%q cidr=%q", kubeRoute.TargetNode, kubeRoute.DestinationCIDR)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
glog.V(2).Infof("create: route created. clusterName=%q instance=%q cidr=%q", clusterName, kubeRoute.TargetNode, kubeRoute.DestinationCIDR)
|
glog.V(2).Infof("CreateRoute: route created. clusterName=%q instance=%q cidr=%q", clusterName, kubeRoute.TargetNode, kubeRoute.DestinationCIDR)
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// DeleteRoute deletes the specified managed route
|
// DeleteRoute deletes the specified managed route
|
||||||
// Route should be as returned by ListRoutes
|
// Route should be as returned by ListRoutes
|
||||||
func (az *Cloud) DeleteRoute(ctx context.Context, clusterName string, kubeRoute *cloudprovider.Route) error {
|
func (az *Cloud) DeleteRoute(ctx context.Context, clusterName string, kubeRoute *cloudprovider.Route) error {
|
||||||
glog.V(2).Infof("delete: deleting route. clusterName=%q instance=%q cidr=%q", clusterName, kubeRoute.TargetNode, kubeRoute.DestinationCIDR)
|
glog.V(2).Infof("DeleteRoute: deleting route. clusterName=%q instance=%q cidr=%q", clusterName, kubeRoute.TargetNode, kubeRoute.DestinationCIDR)
|
||||||
|
|
||||||
ctx, cancel := getContextWithCancel()
|
ctx, cancel := getContextWithCancel()
|
||||||
defer cancel()
|
defer cancel()
|
||||||
@ -159,18 +159,18 @@ func (az *Cloud) DeleteRoute(ctx context.Context, clusterName string, kubeRoute
|
|||||||
glog.V(10).Infof("RoutesClient.Delete(%q): end", az.RouteTableName)
|
glog.V(10).Infof("RoutesClient.Delete(%q): end", az.RouteTableName)
|
||||||
|
|
||||||
if az.CloudProviderBackoff && shouldRetryHTTPRequest(resp, err) {
|
if az.CloudProviderBackoff && shouldRetryHTTPRequest(resp, err) {
|
||||||
glog.V(2).Infof("delete backing off: deleting route. clusterName=%q instance=%q cidr=%q", clusterName, kubeRoute.TargetNode, kubeRoute.DestinationCIDR)
|
glog.V(2).Infof("DeleteRoute backing off: deleting route. clusterName=%q instance=%q cidr=%q", clusterName, kubeRoute.TargetNode, kubeRoute.DestinationCIDR)
|
||||||
retryErr := az.DeleteRouteWithRetry(routeName)
|
retryErr := az.DeleteRouteWithRetry(routeName)
|
||||||
if retryErr != nil {
|
if retryErr != nil {
|
||||||
err = retryErr
|
err = retryErr
|
||||||
glog.V(2).Infof("delete abort backoff: deleting route. clusterName=%q instance=%q cidr=%q", clusterName, kubeRoute.TargetNode, kubeRoute.DestinationCIDR)
|
glog.V(2).Infof("DeleteRoute abort backoff: deleting route. clusterName=%q instance=%q cidr=%q", clusterName, kubeRoute.TargetNode, kubeRoute.DestinationCIDR)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
glog.V(2).Infof("delete: route deleted. clusterName=%q instance=%q cidr=%q", clusterName, kubeRoute.TargetNode, kubeRoute.DestinationCIDR)
|
glog.V(2).Infof("DeleteRoute: route deleted. clusterName=%q instance=%q cidr=%q", clusterName, kubeRoute.TargetNode, kubeRoute.DestinationCIDR)
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -173,7 +173,7 @@ func getProtocolsFromKubernetesProtocol(protocol v1.Protocol) (*network.Transpor
|
|||||||
securityProto = network.SecurityRuleProtocolUDP
|
securityProto = network.SecurityRuleProtocolUDP
|
||||||
return &transportProto, &securityProto, nil, nil
|
return &transportProto, &securityProto, nil, nil
|
||||||
default:
|
default:
|
||||||
return &transportProto, &securityProto, &probeProto, fmt.Errorf("Only TCP and UDP are supported for Azure LoadBalancers")
|
return &transportProto, &securityProto, &probeProto, fmt.Errorf("only TCP and UDP are supported for Azure LoadBalancers")
|
||||||
}
|
}
|
||||||
|
|
||||||
}
|
}
|
||||||
@ -285,7 +285,7 @@ outer:
|
|||||||
return smallest, nil
|
return smallest, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
return -1, fmt.Errorf("SecurityGroup priorities are exhausted")
|
return -1, fmt.Errorf("securityGroup priorities are exhausted")
|
||||||
}
|
}
|
||||||
|
|
||||||
func (az *Cloud) getIPForMachine(nodeName types.NodeName) (string, string, error) {
|
func (az *Cloud) getIPForMachine(nodeName types.NodeName) (string, string, error) {
|
||||||
@ -372,10 +372,10 @@ func (as *availabilitySet) GetInstanceIDByNodeName(name string) (string, error)
|
|||||||
}
|
}
|
||||||
if err != nil {
|
if err != nil {
|
||||||
if as.CloudProviderBackoff {
|
if as.CloudProviderBackoff {
|
||||||
glog.V(2).Infof("InstanceID(%s) backing off", name)
|
glog.V(2).Infof("GetInstanceIDByNodeName(%s) backing off", name)
|
||||||
machine, err = as.GetVirtualMachineWithRetry(types.NodeName(name))
|
machine, err = as.GetVirtualMachineWithRetry(types.NodeName(name))
|
||||||
if err != nil {
|
if err != nil {
|
||||||
glog.V(2).Infof("InstanceID(%s) abort backoff", name)
|
glog.V(2).Infof("GetInstanceIDByNodeName(%s) abort backoff", name)
|
||||||
return "", err
|
return "", err
|
||||||
}
|
}
|
||||||
} else {
|
} else {
|
||||||
@ -400,7 +400,7 @@ func (as *availabilitySet) GetNodeNameByProviderID(providerID string) (types.Nod
|
|||||||
func (as *availabilitySet) GetInstanceTypeByNodeName(name string) (string, error) {
|
func (as *availabilitySet) GetInstanceTypeByNodeName(name string) (string, error) {
|
||||||
machine, err := as.getVirtualMachine(types.NodeName(name))
|
machine, err := as.getVirtualMachine(types.NodeName(name))
|
||||||
if err != nil {
|
if err != nil {
|
||||||
glog.Errorf("error: as.GetInstanceTypeByNodeName(%s), as.getVirtualMachine(%s) err=%v", name, name, err)
|
glog.Errorf("as.GetInstanceTypeByNodeName(%s) failed: as.getVirtualMachine(%s) err=%v", name, name, err)
|
||||||
return "", err
|
return "", err
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -437,7 +437,7 @@ func (as *availabilitySet) GetIPByNodeName(name string) (string, string, error)
|
|||||||
|
|
||||||
ipConfig, err := getPrimaryIPConfig(nic)
|
ipConfig, err := getPrimaryIPConfig(nic)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
glog.Errorf("error: as.GetIPByNodeName(%s), getPrimaryIPConfig(%v), err=%v", name, nic, err)
|
glog.Errorf("as.GetIPByNodeName(%s) failed: getPrimaryIPConfig(%v), err=%v", name, nic, err)
|
||||||
return "", "", err
|
return "", "", err
|
||||||
}
|
}
|
||||||
|
|
||||||
|
Loading…
Reference in New Issue
Block a user