mirror of
https://github.com/k3s-io/kubernetes.git
synced 2025-07-23 11:50:44 +00:00
Add verbose logs for azure cloud provider
This commit is contained in:
parent
00dc6b5ed8
commit
b9b6a9e98d
@ -55,10 +55,10 @@ func (az *Cloud) GetVirtualMachineWithRetry(name types.NodeName) (compute.Virtua
|
||||
return true, cloudprovider.InstanceNotFound
|
||||
}
|
||||
if retryErr != nil {
|
||||
glog.Errorf("backoff: failure, will retry,err=%v", retryErr)
|
||||
glog.Errorf("GetVirtualMachineWithRetry(%s): backoff failure, will retry, err=%v", name, retryErr)
|
||||
return false, nil
|
||||
}
|
||||
glog.V(2).Info("backoff: success")
|
||||
glog.V(2).Infof("GetVirtualMachineWithRetry(%s): backoff success", name)
|
||||
return true, nil
|
||||
})
|
||||
if err == wait.ErrWaitTimeout {
|
||||
@ -99,10 +99,10 @@ func (az *Cloud) GetIPForMachineWithRetry(name types.NodeName) (string, string,
|
||||
var retryErr error
|
||||
ip, publicIP, retryErr = az.getIPForMachine(name)
|
||||
if retryErr != nil {
|
||||
glog.Errorf("backoff: failure, will retry,err=%v", retryErr)
|
||||
glog.Errorf("GetIPForMachineWithRetry(%s): backoff failure, will retry,err=%v", name, retryErr)
|
||||
return false, nil
|
||||
}
|
||||
glog.V(2).Info("backoff: success")
|
||||
glog.V(2).Infof("GetIPForMachineWithRetry(%s): backoff success", name)
|
||||
return true, nil
|
||||
})
|
||||
return ip, publicIP, err
|
||||
@ -304,11 +304,11 @@ func (az *Cloud) UpdateVmssVMWithRetry(ctx context.Context, resourceGroupName st
|
||||
// A wait.ConditionFunc function to deal with common HTTP backoff response conditions
|
||||
func processRetryResponse(resp autorest.Response, err error) (bool, error) {
|
||||
if isSuccessHTTPResponse(resp) {
|
||||
glog.V(2).Infof("backoff: success, HTTP response=%d", resp.StatusCode)
|
||||
glog.V(2).Infof("processRetryResponse: backoff success, HTTP response=%d", resp.StatusCode)
|
||||
return true, nil
|
||||
}
|
||||
if shouldRetryAPIRequest(resp, err) {
|
||||
glog.Errorf("backoff: failure, will retry, HTTP response=%d, err=%v", resp.StatusCode, err)
|
||||
glog.Errorf("processRetryResponse: backoff failure, will retry, HTTP response=%d, err=%v", resp.StatusCode, err)
|
||||
// suppress the error object so that backoff process continues
|
||||
return false, nil
|
||||
}
|
||||
@ -361,7 +361,7 @@ func processHTTPRetryResponse(resp *http.Response, err error) (bool, error) {
|
||||
}
|
||||
|
||||
if shouldRetryHTTPRequest(resp, err) {
|
||||
glog.Errorf("backoff: failure, will retry, HTTP response=%d, err=%v", resp.StatusCode, err)
|
||||
glog.Errorf("processHTTPRetryResponse: backoff failure, will retry, HTTP response=%d, err=%v", resp.StatusCode, err)
|
||||
// suppress the error object so that backoff process continues
|
||||
return false, nil
|
||||
}
|
||||
|
@ -33,7 +33,7 @@ func (az *Cloud) NodeAddresses(ctx context.Context, name types.NodeName) ([]v1.N
|
||||
addressGetter := func(nodeName types.NodeName) ([]v1.NodeAddress, error) {
|
||||
ip, publicIP, err := az.GetIPForMachineWithRetry(nodeName)
|
||||
if err != nil {
|
||||
glog.V(2).Infof("NodeAddresses(%s) abort backoff", nodeName)
|
||||
glog.V(2).Infof("NodeAddresses(%s) abort backoff: %v", nodeName, err)
|
||||
return nil, err
|
||||
}
|
||||
|
||||
|
@ -135,7 +135,7 @@ func (az *Cloud) EnsureLoadBalancer(ctx context.Context, clusterName string, ser
|
||||
if lbStatus != nil && len(lbStatus.Ingress) > 0 {
|
||||
serviceIP = &lbStatus.Ingress[0].IP
|
||||
}
|
||||
glog.V(10).Infof("Calling reconcileSecurityGroup from EnsureLoadBalancer for %s with IP %s, wantLb = true", service.Name, logSafe(serviceIP))
|
||||
glog.V(2).Infof("EnsureLoadBalancer: reconciling security group for service %q with IP %q, wantLb = true", serviceName, logSafe(serviceIP))
|
||||
if _, err := az.reconcileSecurityGroup(clusterName, service, serviceIP, true /* wantLb */); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@ -165,7 +165,7 @@ func (az *Cloud) EnsureLoadBalancerDeleted(ctx context.Context, clusterName stri
|
||||
return err
|
||||
}
|
||||
|
||||
glog.V(10).Infof("Calling reconcileSecurityGroup from EnsureLoadBalancerDeleted for %s with IP %s, wantLb = false", service.Name, serviceIPToCleanup)
|
||||
glog.V(2).Infof("EnsureLoadBalancerDeleted: reconciling security group for service %q with IP %q, wantLb = false", serviceName, serviceIPToCleanup)
|
||||
if _, err := az.reconcileSecurityGroup(clusterName, service, &serviceIPToCleanup, false /* wantLb */); err != nil {
|
||||
return err
|
||||
}
|
||||
@ -262,7 +262,7 @@ func (az *Cloud) getServiceLoadBalancer(service *v1.Service, clusterName string,
|
||||
func (az *Cloud) selectLoadBalancer(clusterName string, service *v1.Service, existingLBs *[]network.LoadBalancer, nodes []*v1.Node) (selectedLB *network.LoadBalancer, existsLb bool, err error) {
|
||||
isInternal := requiresInternalLoadBalancer(service)
|
||||
serviceName := getServiceName(service)
|
||||
glog.V(3).Infof("selectLoadBalancer(%s): isInternal(%s) - start", serviceName, isInternal)
|
||||
glog.V(2).Infof("selectLoadBalancer for service (%s): isInternal(%s) - start", serviceName, isInternal)
|
||||
vmSetNames, err := az.vmSet.GetVMSetNames(service, nodes)
|
||||
if err != nil {
|
||||
glog.Errorf("az.selectLoadBalancer: cluster(%s) service(%s) isInternal(%t) - az.GetVMSetNames failed, err=(%v)", clusterName, serviceName, isInternal, err)
|
||||
@ -318,10 +318,11 @@ func (az *Cloud) selectLoadBalancer(clusterName string, service *v1.Service, exi
|
||||
|
||||
func (az *Cloud) getServiceLoadBalancerStatus(service *v1.Service, lb *network.LoadBalancer) (status *v1.LoadBalancerStatus, err error) {
|
||||
if lb == nil {
|
||||
glog.V(10).Info("getServiceLoadBalancerStatus lb is nil")
|
||||
glog.V(10).Info("getServiceLoadBalancerStatus: lb is nil")
|
||||
return nil, nil
|
||||
}
|
||||
if lb.FrontendIPConfigurations == nil || *lb.FrontendIPConfigurations == nil {
|
||||
glog.V(10).Info("getServiceLoadBalancerStatus: lb.FrontendIPConfigurations is nil")
|
||||
return nil, nil
|
||||
}
|
||||
isInternal := requiresInternalLoadBalancer(service)
|
||||
@ -353,6 +354,7 @@ func (az *Cloud) getServiceLoadBalancerStatus(service *v1.Service, lb *network.L
|
||||
}
|
||||
}
|
||||
|
||||
glog.V(2).Infof("getServiceLoadBalancerStatus gets ingress IP %q from frontendIPConfiguration %q for service %q", *lbIP, lbFrontendIPConfigName, serviceName)
|
||||
return &v1.LoadBalancerStatus{Ingress: []v1.LoadBalancerIngress{{IP: *lbIP}}}, nil
|
||||
}
|
||||
}
|
||||
@ -446,7 +448,7 @@ func (az *Cloud) ensurePublicIPExists(service *v1.Service, pipName string, domai
|
||||
}
|
||||
}
|
||||
|
||||
glog.V(3).Infof("ensure(%s): pip(%s) - creating", serviceName, *pip.Name)
|
||||
glog.V(2).Infof("ensurePublicIPExists for service(%s): pip(%s) - creating", serviceName, *pip.Name)
|
||||
glog.V(10).Infof("CreateOrUpdatePIPWithRetry(%s, %q): start", pipResourceGroup, *pip.Name)
|
||||
err = az.CreateOrUpdatePIPWithRetry(pipResourceGroup, pip)
|
||||
if err != nil {
|
||||
@ -472,13 +474,14 @@ func (az *Cloud) ensurePublicIPExists(service *v1.Service, pipName string, domai
|
||||
func (az *Cloud) reconcileLoadBalancer(clusterName string, service *v1.Service, nodes []*v1.Node, wantLb bool) (*network.LoadBalancer, error) {
|
||||
isInternal := requiresInternalLoadBalancer(service)
|
||||
serviceName := getServiceName(service)
|
||||
glog.V(2).Infof("reconcileLoadBalancer(%s) - wantLb(%t): started", serviceName, wantLb)
|
||||
glog.V(2).Infof("reconcileLoadBalancer for service(%s) - wantLb(%t): started", serviceName, wantLb)
|
||||
lb, _, _, err := az.getServiceLoadBalancer(service, clusterName, nodes, wantLb)
|
||||
if err != nil {
|
||||
glog.Errorf("reconcileLoadBalancer: failed to get load balancer for service %q, error: %v", serviceName, err)
|
||||
return nil, err
|
||||
}
|
||||
lbName := *lb.Name
|
||||
glog.V(2).Infof("reconcileLoadBalancer(%s): lb(%s) wantLb(%t) resolved load balancer name", serviceName, lbName, wantLb)
|
||||
glog.V(2).Infof("reconcileLoadBalancer for service(%s): lb(%s) wantLb(%t) resolved load balancer name", serviceName, lbName, wantLb)
|
||||
lbFrontendIPConfigName := getFrontendIPConfigName(service, subnet(service))
|
||||
lbFrontendIPConfigID := az.getFrontendIPConfigID(lbName, lbFrontendIPConfigName)
|
||||
lbBackendPoolName := getBackendPoolName(clusterName)
|
||||
@ -496,18 +499,18 @@ func (az *Cloud) reconcileLoadBalancer(clusterName string, service *v1.Service,
|
||||
foundBackendPool := false
|
||||
for _, bp := range newBackendPools {
|
||||
if strings.EqualFold(*bp.Name, lbBackendPoolName) {
|
||||
glog.V(10).Infof("reconcile(%s)(%t): lb backendpool - found wanted backendpool. not adding anything", serviceName, wantLb)
|
||||
glog.V(10).Infof("reconcileLoadBalancer for service (%s)(%t): lb backendpool - found wanted backendpool. not adding anything", serviceName, wantLb)
|
||||
foundBackendPool = true
|
||||
break
|
||||
} else {
|
||||
glog.V(10).Infof("reconcile(%s)(%t): lb backendpool - found other backendpool %s", serviceName, wantLb, *bp.Name)
|
||||
glog.V(10).Infof("reconcileLoadBalancer for service (%s)(%t): lb backendpool - found other backendpool %s", serviceName, wantLb, *bp.Name)
|
||||
}
|
||||
}
|
||||
if !foundBackendPool {
|
||||
newBackendPools = append(newBackendPools, network.BackendAddressPool{
|
||||
Name: to.StringPtr(lbBackendPoolName),
|
||||
})
|
||||
glog.V(10).Infof("reconcile(%s)(%t): lb backendpool - adding backendpool", serviceName, wantLb)
|
||||
glog.V(10).Infof("reconcileLoadBalancer for service (%s)(%t): lb backendpool - adding backendpool", serviceName, wantLb)
|
||||
|
||||
dirtyLb = true
|
||||
lb.BackendAddressPools = &newBackendPools
|
||||
@ -525,7 +528,7 @@ func (az *Cloud) reconcileLoadBalancer(clusterName string, service *v1.Service,
|
||||
for i := len(newConfigs) - 1; i >= 0; i-- {
|
||||
config := newConfigs[i]
|
||||
if serviceOwnsFrontendIP(config, service) {
|
||||
glog.V(3).Infof("reconcile(%s)(%t): lb frontendconfig(%s) - dropping", serviceName, wantLb, lbFrontendIPConfigName)
|
||||
glog.V(2).Infof("reconcileLoadBalancer for service (%s)(%t): lb frontendconfig(%s) - dropping", serviceName, wantLb, lbFrontendIPConfigName)
|
||||
newConfigs = append(newConfigs[:i], newConfigs[i+1:]...)
|
||||
dirtyConfigs = true
|
||||
}
|
||||
@ -535,7 +538,7 @@ func (az *Cloud) reconcileLoadBalancer(clusterName string, service *v1.Service,
|
||||
for i := len(newConfigs) - 1; i >= 0; i-- {
|
||||
config := newConfigs[i]
|
||||
if serviceOwnsFrontendIP(config, service) && !strings.EqualFold(*config.Name, lbFrontendIPConfigName) {
|
||||
glog.V(3).Infof("reconcile(%s)(%t): lb frontendconfig(%s) - dropping", serviceName, wantLb, *config.Name)
|
||||
glog.V(2).Infof("reconcileLoadBalancer for service (%s)(%t): lb frontendconfig(%s) - dropping", serviceName, wantLb, *config.Name)
|
||||
newConfigs = append(newConfigs[:i], newConfigs[i+1:]...)
|
||||
dirtyConfigs = true
|
||||
}
|
||||
@ -599,7 +602,7 @@ func (az *Cloud) reconcileLoadBalancer(clusterName string, service *v1.Service,
|
||||
Name: to.StringPtr(lbFrontendIPConfigName),
|
||||
FrontendIPConfigurationPropertiesFormat: fipConfigurationProperties,
|
||||
})
|
||||
glog.V(10).Infof("reconcile(%s)(%t): lb frontendconfig(%s) - adding", serviceName, wantLb, lbFrontendIPConfigName)
|
||||
glog.V(10).Infof("reconcileLoadBalancer for service (%s)(%t): lb frontendconfig(%s) - adding", serviceName, wantLb, lbFrontendIPConfigName)
|
||||
dirtyConfigs = true
|
||||
}
|
||||
}
|
||||
@ -700,15 +703,15 @@ func (az *Cloud) reconcileLoadBalancer(clusterName string, service *v1.Service,
|
||||
for i := len(updatedProbes) - 1; i >= 0; i-- {
|
||||
existingProbe := updatedProbes[i]
|
||||
if serviceOwnsRule(service, *existingProbe.Name) {
|
||||
glog.V(10).Infof("reconcile(%s)(%t): lb probe(%s) - considering evicting", serviceName, wantLb, *existingProbe.Name)
|
||||
glog.V(10).Infof("reconcileLoadBalancer for service (%s)(%t): lb probe(%s) - considering evicting", serviceName, wantLb, *existingProbe.Name)
|
||||
keepProbe := false
|
||||
if findProbe(expectedProbes, existingProbe) {
|
||||
glog.V(10).Infof("reconcile(%s)(%t): lb probe(%s) - keeping", serviceName, wantLb, *existingProbe.Name)
|
||||
glog.V(10).Infof("reconcileLoadBalancer for service (%s)(%t): lb probe(%s) - keeping", serviceName, wantLb, *existingProbe.Name)
|
||||
keepProbe = true
|
||||
}
|
||||
if !keepProbe {
|
||||
updatedProbes = append(updatedProbes[:i], updatedProbes[i+1:]...)
|
||||
glog.V(10).Infof("reconcile(%s)(%t): lb probe(%s) - dropping", serviceName, wantLb, *existingProbe.Name)
|
||||
glog.V(10).Infof("reconcileLoadBalancer for service (%s)(%t): lb probe(%s) - dropping", serviceName, wantLb, *existingProbe.Name)
|
||||
dirtyProbes = true
|
||||
}
|
||||
}
|
||||
@ -717,11 +720,11 @@ func (az *Cloud) reconcileLoadBalancer(clusterName string, service *v1.Service,
|
||||
for _, expectedProbe := range expectedProbes {
|
||||
foundProbe := false
|
||||
if findProbe(updatedProbes, expectedProbe) {
|
||||
glog.V(10).Infof("reconcile(%s)(%t): lb probe(%s) - already exists", serviceName, wantLb, *expectedProbe.Name)
|
||||
glog.V(10).Infof("reconcileLoadBalancer for service (%s)(%t): lb probe(%s) - already exists", serviceName, wantLb, *expectedProbe.Name)
|
||||
foundProbe = true
|
||||
}
|
||||
if !foundProbe {
|
||||
glog.V(10).Infof("reconcile(%s)(%t): lb probe(%s) - adding", serviceName, wantLb, *expectedProbe.Name)
|
||||
glog.V(10).Infof("reconcileLoadBalancer for service (%s)(%t): lb probe(%s) - adding", serviceName, wantLb, *expectedProbe.Name)
|
||||
updatedProbes = append(updatedProbes, expectedProbe)
|
||||
dirtyProbes = true
|
||||
}
|
||||
@ -742,13 +745,13 @@ func (az *Cloud) reconcileLoadBalancer(clusterName string, service *v1.Service,
|
||||
existingRule := updatedRules[i]
|
||||
if serviceOwnsRule(service, *existingRule.Name) {
|
||||
keepRule := false
|
||||
glog.V(10).Infof("reconcile(%s)(%t): lb rule(%s) - considering evicting", serviceName, wantLb, *existingRule.Name)
|
||||
glog.V(10).Infof("reconcileLoadBalancer for service (%s)(%t): lb rule(%s) - considering evicting", serviceName, wantLb, *existingRule.Name)
|
||||
if findRule(expectedRules, existingRule) {
|
||||
glog.V(10).Infof("reconcile(%s)(%t): lb rule(%s) - keeping", serviceName, wantLb, *existingRule.Name)
|
||||
glog.V(10).Infof("reconcileLoadBalancer for service (%s)(%t): lb rule(%s) - keeping", serviceName, wantLb, *existingRule.Name)
|
||||
keepRule = true
|
||||
}
|
||||
if !keepRule {
|
||||
glog.V(3).Infof("reconcile(%s)(%t): lb rule(%s) - dropping", serviceName, wantLb, *existingRule.Name)
|
||||
glog.V(2).Infof("reconcileLoadBalancer for service (%s)(%t): lb rule(%s) - dropping", serviceName, wantLb, *existingRule.Name)
|
||||
updatedRules = append(updatedRules[:i], updatedRules[i+1:]...)
|
||||
dirtyRules = true
|
||||
}
|
||||
@ -758,11 +761,11 @@ func (az *Cloud) reconcileLoadBalancer(clusterName string, service *v1.Service,
|
||||
for _, expectedRule := range expectedRules {
|
||||
foundRule := false
|
||||
if findRule(updatedRules, expectedRule) {
|
||||
glog.V(10).Infof("reconcile(%s)(%t): lb rule(%s) - already exists", serviceName, wantLb, *expectedRule.Name)
|
||||
glog.V(10).Infof("reconcileLoadBalancer for service (%s)(%t): lb rule(%s) - already exists", serviceName, wantLb, *expectedRule.Name)
|
||||
foundRule = true
|
||||
}
|
||||
if !foundRule {
|
||||
glog.V(10).Infof("reconcile(%s)(%t): lb rule(%s) adding", serviceName, wantLb, *expectedRule.Name)
|
||||
glog.V(10).Infof("reconcileLoadBalancer for service (%s)(%t): lb rule(%s) adding", serviceName, wantLb, *expectedRule.Name)
|
||||
updatedRules = append(updatedRules, expectedRule)
|
||||
dirtyRules = true
|
||||
}
|
||||
@ -779,7 +782,7 @@ func (az *Cloud) reconcileLoadBalancer(clusterName string, service *v1.Service,
|
||||
if lb.FrontendIPConfigurations == nil || len(*lb.FrontendIPConfigurations) == 0 {
|
||||
// When FrontendIPConfigurations is empty, we need to delete the Azure load balancer resource itself,
|
||||
// because an Azure load balancer cannot have an empty FrontendIPConfigurations collection
|
||||
glog.V(3).Infof("delete(%s): lb(%s) - deleting; no remaining frontendipconfigs", serviceName, lbName)
|
||||
glog.V(2).Infof("reconcileLoadBalancer for service(%s): lb(%s) - deleting; no remaining frontendIPConfigurations", serviceName, lbName)
|
||||
|
||||
// Remove backend pools from vmSets. This is required for virtual machine scale sets before removing the LB.
|
||||
vmSetName := az.mapLoadBalancerNameToVMSet(lbName, clusterName)
|
||||
@ -792,18 +795,18 @@ func (az *Cloud) reconcileLoadBalancer(clusterName string, service *v1.Service,
|
||||
glog.V(10).Infof("EnsureBackendPoolDeleted(%s, %s): end", lbBackendPoolID, vmSetName)
|
||||
|
||||
// Remove the LB.
|
||||
glog.V(10).Infof("az.DeleteLBWithRetry(%q): start", lbName)
|
||||
glog.V(10).Infof("reconcileLoadBalancer: az.DeleteLBWithRetry(%q): start", lbName)
|
||||
err = az.DeleteLBWithRetry(lbName)
|
||||
if err != nil {
|
||||
glog.V(2).Infof("delete(%s) abort backoff: lb(%s) - deleting; no remaining frontendipconfigs", serviceName, lbName)
|
||||
glog.V(2).Infof("reconcileLoadBalancer for service(%s) abort backoff: lb(%s) - deleting; no remaining frontendIPConfigurations", serviceName, lbName)
|
||||
return nil, err
|
||||
}
|
||||
glog.V(10).Infof("az.DeleteLBWithRetry(%q): end", lbName)
|
||||
} else {
|
||||
glog.V(3).Infof("ensure(%s): lb(%s) - updating", serviceName, lbName)
|
||||
glog.V(2).Infof("reconcileLoadBalancer: reconcileLoadBalancer for service(%s): lb(%s) - updating", serviceName, lbName)
|
||||
err := az.CreateOrUpdateLBWithRetry(*lb)
|
||||
if err != nil {
|
||||
glog.V(2).Infof("ensure(%s) abort backoff: lb(%s) - updating", serviceName, lbName)
|
||||
glog.V(2).Infof("reconcileLoadBalancer for service(%s) abort backoff: lb(%s) - updating", serviceName, lbName)
|
||||
return nil, err
|
||||
}
|
||||
|
||||
@ -811,7 +814,7 @@ func (az *Cloud) reconcileLoadBalancer(clusterName string, service *v1.Service,
|
||||
// Refresh updated lb which will be used later in other places.
|
||||
newLB, exist, err := az.getAzureLoadBalancer(lbName)
|
||||
if err != nil {
|
||||
glog.V(2).Infof("getAzureLoadBalancer(%s) failed: %v", lbName, err)
|
||||
glog.V(2).Infof("reconcileLoadBalancer for service(%s): getAzureLoadBalancer(%s) failed: %v", serviceName, lbName, err)
|
||||
return nil, err
|
||||
}
|
||||
if !exist {
|
||||
@ -831,7 +834,7 @@ func (az *Cloud) reconcileLoadBalancer(clusterName string, service *v1.Service,
|
||||
}
|
||||
}
|
||||
|
||||
glog.V(2).Infof("ensure(%s): lb(%s) finished", serviceName, lbName)
|
||||
glog.V(2).Infof("reconcileLoadBalancer for service(%s): lb(%s) finished", serviceName, lbName)
|
||||
return lb, nil
|
||||
}
|
||||
|
||||
@ -1025,7 +1028,7 @@ func (az *Cloud) reconcileSecurityGroup(clusterName string, service *v1.Service,
|
||||
|
||||
if dirtySg {
|
||||
sg.SecurityRules = &updatedRules
|
||||
glog.V(3).Infof("ensure(%s): sg(%s) - updating", serviceName, *sg.Name)
|
||||
glog.V(2).Infof("reconcileSecurityGroup for service(%s): sg(%s) - updating", serviceName, *sg.Name)
|
||||
glog.V(10).Infof("CreateOrUpdateSGWithRetry(%q): start", *sg.Name)
|
||||
err := az.CreateOrUpdateSGWithRetry(sg)
|
||||
if err != nil {
|
||||
@ -1213,7 +1216,7 @@ func (az *Cloud) reconcilePublicIP(clusterName string, service *v1.Service, want
|
||||
// This is the only case we should preserve the
|
||||
// Public ip resource with match service tag
|
||||
} else {
|
||||
glog.V(2).Infof("ensure(%s): pip(%s) - deleting", serviceName, pipName)
|
||||
glog.V(2).Infof("reconcilePublicIP for service(%s): pip(%s) - deleting", serviceName, pipName)
|
||||
glog.V(10).Infof("DeletePublicIPWithRetry(%s, %q): start", pipResourceGroup, pipName)
|
||||
err = az.DeletePublicIPWithRetry(pipResourceGroup, pipName)
|
||||
if err != nil {
|
||||
@ -1227,7 +1230,7 @@ func (az *Cloud) reconcilePublicIP(clusterName string, service *v1.Service, want
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
glog.V(2).Infof("ensure(%s): pip(%s) - finished", serviceName, pipName)
|
||||
glog.V(2).Infof("reconcilePublicIP for service(%s): pip(%s) - finished", serviceName, pipName)
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -30,7 +30,7 @@ import (
|
||||
|
||||
// ListRoutes lists all managed routes that belong to the specified clusterName
|
||||
func (az *Cloud) ListRoutes(ctx context.Context, clusterName string) ([]*cloudprovider.Route, error) {
|
||||
glog.V(10).Infof("list: START clusterName=%q", clusterName)
|
||||
glog.V(10).Infof("ListRoutes: START clusterName=%q", clusterName)
|
||||
routeTable, existsRouteTable, err := az.getRouteTable()
|
||||
return processRoutes(routeTable, existsRouteTable, err)
|
||||
}
|
||||
@ -50,7 +50,7 @@ func processRoutes(routeTable network.RouteTable, exists bool, err error) ([]*cl
|
||||
for i, route := range *routeTable.Routes {
|
||||
instance := mapRouteNameToNodeName(*route.Name)
|
||||
cidr := *route.AddressPrefix
|
||||
glog.V(10).Infof("list: * instance=%q, cidr=%q", instance, cidr)
|
||||
glog.V(10).Infof("ListRoutes: * instance=%q, cidr=%q", instance, cidr)
|
||||
|
||||
kubeRoutes[i] = &cloudprovider.Route{
|
||||
Name: *route.Name,
|
||||
@ -60,13 +60,13 @@ func processRoutes(routeTable network.RouteTable, exists bool, err error) ([]*cl
|
||||
}
|
||||
}
|
||||
|
||||
glog.V(10).Info("list: FINISH")
|
||||
glog.V(10).Info("ListRoutes: FINISH")
|
||||
return kubeRoutes, nil
|
||||
}
|
||||
|
||||
func (az *Cloud) createRouteTableIfNotExists(clusterName string, kubeRoute *cloudprovider.Route) error {
|
||||
if _, existsRouteTable, err := az.getRouteTable(); err != nil {
|
||||
glog.V(2).Infof("create error: couldn't get routetable. clusterName=%q instance=%q cidr=%q", clusterName, kubeRoute.TargetNode, kubeRoute.DestinationCIDR)
|
||||
glog.V(2).Infof("createRouteTableIfNotExists error: couldn't get routetable. clusterName=%q instance=%q cidr=%q", clusterName, kubeRoute.TargetNode, kubeRoute.DestinationCIDR)
|
||||
return err
|
||||
} else if existsRouteTable {
|
||||
return nil
|
||||
@ -81,17 +81,17 @@ func (az *Cloud) createRouteTable() error {
|
||||
RouteTablePropertiesFormat: &network.RouteTablePropertiesFormat{},
|
||||
}
|
||||
|
||||
glog.V(3).Infof("create: creating routetable. routeTableName=%q", az.RouteTableName)
|
||||
glog.V(3).Infof("createRouteTableIfNotExists: creating routetable. routeTableName=%q", az.RouteTableName)
|
||||
ctx, cancel := getContextWithCancel()
|
||||
defer cancel()
|
||||
resp, err := az.RouteTablesClient.CreateOrUpdate(ctx, az.ResourceGroup, az.RouteTableName, routeTable)
|
||||
glog.V(10).Infof("RouteTablesClient.CreateOrUpdate(%q): end", az.RouteTableName)
|
||||
if az.CloudProviderBackoff && shouldRetryHTTPRequest(resp, err) {
|
||||
glog.V(2).Infof("create backing off: creating routetable. routeTableName=%q", az.RouteTableName)
|
||||
glog.V(2).Infof("createRouteTableIfNotExists backing off: creating routetable. routeTableName=%q", az.RouteTableName)
|
||||
retryErr := az.CreateOrUpdateRouteTableWithRetry(routeTable)
|
||||
if retryErr != nil {
|
||||
err = retryErr
|
||||
glog.V(2).Infof("create abort backoff: creating routetable. routeTableName=%q", az.RouteTableName)
|
||||
glog.V(2).Infof("createRouteTableIfNotExists abort backoff: creating routetable. routeTableName=%q", az.RouteTableName)
|
||||
}
|
||||
}
|
||||
if err != nil {
|
||||
@ -107,7 +107,7 @@ func (az *Cloud) createRouteTable() error {
|
||||
// route.Name will be ignored, although the cloud-provider may use nameHint
|
||||
// to create a more user-meaningful name.
|
||||
func (az *Cloud) CreateRoute(ctx context.Context, clusterName string, nameHint string, kubeRoute *cloudprovider.Route) error {
|
||||
glog.V(2).Infof("create: creating route. clusterName=%q instance=%q cidr=%q", clusterName, kubeRoute.TargetNode, kubeRoute.DestinationCIDR)
|
||||
glog.V(2).Infof("CreateRoute: creating route. clusterName=%q instance=%q cidr=%q", clusterName, kubeRoute.TargetNode, kubeRoute.DestinationCIDR)
|
||||
if err := az.createRouteTableIfNotExists(clusterName, kubeRoute); err != nil {
|
||||
return err
|
||||
}
|
||||
@ -126,31 +126,31 @@ func (az *Cloud) CreateRoute(ctx context.Context, clusterName string, nameHint s
|
||||
},
|
||||
}
|
||||
|
||||
glog.V(3).Infof("create: creating route: instance=%q cidr=%q", kubeRoute.TargetNode, kubeRoute.DestinationCIDR)
|
||||
glog.V(3).Infof("CreateRoute: creating route: instance=%q cidr=%q", kubeRoute.TargetNode, kubeRoute.DestinationCIDR)
|
||||
ctx, cancel := getContextWithCancel()
|
||||
defer cancel()
|
||||
resp, err := az.RoutesClient.CreateOrUpdate(ctx, az.ResourceGroup, az.RouteTableName, *route.Name, route)
|
||||
glog.V(10).Infof("RoutesClient.CreateOrUpdate(%q): end", az.RouteTableName)
|
||||
if az.CloudProviderBackoff && shouldRetryHTTPRequest(resp, err) {
|
||||
glog.V(2).Infof("create backing off: creating route: instance=%q cidr=%q", kubeRoute.TargetNode, kubeRoute.DestinationCIDR)
|
||||
glog.V(2).Infof("CreateRoute backing off: creating route: instance=%q cidr=%q", kubeRoute.TargetNode, kubeRoute.DestinationCIDR)
|
||||
retryErr := az.CreateOrUpdateRouteWithRetry(route)
|
||||
if retryErr != nil {
|
||||
err = retryErr
|
||||
glog.V(2).Infof("create abort backoff: creating route: instance=%q cidr=%q", kubeRoute.TargetNode, kubeRoute.DestinationCIDR)
|
||||
glog.V(2).Infof("CreateRoute abort backoff: creating route: instance=%q cidr=%q", kubeRoute.TargetNode, kubeRoute.DestinationCIDR)
|
||||
}
|
||||
}
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
glog.V(2).Infof("create: route created. clusterName=%q instance=%q cidr=%q", clusterName, kubeRoute.TargetNode, kubeRoute.DestinationCIDR)
|
||||
glog.V(2).Infof("CreateRoute: route created. clusterName=%q instance=%q cidr=%q", clusterName, kubeRoute.TargetNode, kubeRoute.DestinationCIDR)
|
||||
return nil
|
||||
}
|
||||
|
||||
// DeleteRoute deletes the specified managed route
|
||||
// Route should be as returned by ListRoutes
|
||||
func (az *Cloud) DeleteRoute(ctx context.Context, clusterName string, kubeRoute *cloudprovider.Route) error {
|
||||
glog.V(2).Infof("delete: deleting route. clusterName=%q instance=%q cidr=%q", clusterName, kubeRoute.TargetNode, kubeRoute.DestinationCIDR)
|
||||
glog.V(2).Infof("DeleteRoute: deleting route. clusterName=%q instance=%q cidr=%q", clusterName, kubeRoute.TargetNode, kubeRoute.DestinationCIDR)
|
||||
|
||||
ctx, cancel := getContextWithCancel()
|
||||
defer cancel()
|
||||
@ -159,18 +159,18 @@ func (az *Cloud) DeleteRoute(ctx context.Context, clusterName string, kubeRoute
|
||||
glog.V(10).Infof("RoutesClient.Delete(%q): end", az.RouteTableName)
|
||||
|
||||
if az.CloudProviderBackoff && shouldRetryHTTPRequest(resp, err) {
|
||||
glog.V(2).Infof("delete backing off: deleting route. clusterName=%q instance=%q cidr=%q", clusterName, kubeRoute.TargetNode, kubeRoute.DestinationCIDR)
|
||||
glog.V(2).Infof("DeleteRoute backing off: deleting route. clusterName=%q instance=%q cidr=%q", clusterName, kubeRoute.TargetNode, kubeRoute.DestinationCIDR)
|
||||
retryErr := az.DeleteRouteWithRetry(routeName)
|
||||
if retryErr != nil {
|
||||
err = retryErr
|
||||
glog.V(2).Infof("delete abort backoff: deleting route. clusterName=%q instance=%q cidr=%q", clusterName, kubeRoute.TargetNode, kubeRoute.DestinationCIDR)
|
||||
glog.V(2).Infof("DeleteRoute abort backoff: deleting route. clusterName=%q instance=%q cidr=%q", clusterName, kubeRoute.TargetNode, kubeRoute.DestinationCIDR)
|
||||
}
|
||||
}
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
glog.V(2).Infof("delete: route deleted. clusterName=%q instance=%q cidr=%q", clusterName, kubeRoute.TargetNode, kubeRoute.DestinationCIDR)
|
||||
glog.V(2).Infof("DeleteRoute: route deleted. clusterName=%q instance=%q cidr=%q", clusterName, kubeRoute.TargetNode, kubeRoute.DestinationCIDR)
|
||||
return nil
|
||||
}
|
||||
|
||||
|
@ -173,7 +173,7 @@ func getProtocolsFromKubernetesProtocol(protocol v1.Protocol) (*network.Transpor
|
||||
securityProto = network.SecurityRuleProtocolUDP
|
||||
return &transportProto, &securityProto, nil, nil
|
||||
default:
|
||||
return &transportProto, &securityProto, &probeProto, fmt.Errorf("Only TCP and UDP are supported for Azure LoadBalancers")
|
||||
return &transportProto, &securityProto, &probeProto, fmt.Errorf("only TCP and UDP are supported for Azure LoadBalancers")
|
||||
}
|
||||
|
||||
}
|
||||
@ -285,7 +285,7 @@ outer:
|
||||
return smallest, nil
|
||||
}
|
||||
|
||||
return -1, fmt.Errorf("SecurityGroup priorities are exhausted")
|
||||
return -1, fmt.Errorf("securityGroup priorities are exhausted")
|
||||
}
|
||||
|
||||
func (az *Cloud) getIPForMachine(nodeName types.NodeName) (string, string, error) {
|
||||
@ -372,10 +372,10 @@ func (as *availabilitySet) GetInstanceIDByNodeName(name string) (string, error)
|
||||
}
|
||||
if err != nil {
|
||||
if as.CloudProviderBackoff {
|
||||
glog.V(2).Infof("InstanceID(%s) backing off", name)
|
||||
glog.V(2).Infof("GetInstanceIDByNodeName(%s) backing off", name)
|
||||
machine, err = as.GetVirtualMachineWithRetry(types.NodeName(name))
|
||||
if err != nil {
|
||||
glog.V(2).Infof("InstanceID(%s) abort backoff", name)
|
||||
glog.V(2).Infof("GetInstanceIDByNodeName(%s) abort backoff", name)
|
||||
return "", err
|
||||
}
|
||||
} else {
|
||||
@ -400,7 +400,7 @@ func (as *availabilitySet) GetNodeNameByProviderID(providerID string) (types.Nod
|
||||
func (as *availabilitySet) GetInstanceTypeByNodeName(name string) (string, error) {
|
||||
machine, err := as.getVirtualMachine(types.NodeName(name))
|
||||
if err != nil {
|
||||
glog.Errorf("error: as.GetInstanceTypeByNodeName(%s), as.getVirtualMachine(%s) err=%v", name, name, err)
|
||||
glog.Errorf("as.GetInstanceTypeByNodeName(%s) failed: as.getVirtualMachine(%s) err=%v", name, name, err)
|
||||
return "", err
|
||||
}
|
||||
|
||||
@ -437,7 +437,7 @@ func (as *availabilitySet) GetIPByNodeName(name string) (string, string, error)
|
||||
|
||||
ipConfig, err := getPrimaryIPConfig(nic)
|
||||
if err != nil {
|
||||
glog.Errorf("error: as.GetIPByNodeName(%s), getPrimaryIPConfig(%v), err=%v", name, nic, err)
|
||||
glog.Errorf("as.GetIPByNodeName(%s) failed: getPrimaryIPConfig(%v), err=%v", name, nic, err)
|
||||
return "", "", err
|
||||
}
|
||||
|
||||
|
Loading…
Reference in New Issue
Block a user