mirror of
https://github.com/k3s-io/kubernetes.git
synced 2025-07-25 04:33:26 +00:00
Fix a lot of typos in Azure codes
This commit is contained in:
parent
16e18a590f
commit
ecdc1ba57c
@ -213,7 +213,7 @@ func GetNetworkResourceServicePrincipalToken(config *AzureAuthConfig, env *azure
|
||||
}
|
||||
|
||||
// ParseAzureEnvironment returns the azure environment.
|
||||
// If 'resourceManagerEndpoint' is set, the environment is computed by quering the cloud's resource manager endpoint.
|
||||
// If 'resourceManagerEndpoint' is set, the environment is computed by querying the cloud's resource manager endpoint.
|
||||
// Otherwise, a pre-defined Environment is looked up by name.
|
||||
func ParseAzureEnvironment(cloudName, resourceManagerEndpoint, identitySystem string) (*azure.Environment, error) {
|
||||
var env azure.Environment
|
||||
|
@ -192,7 +192,7 @@ func TestGetNetworkResourceServicePrincipalTokenNegative(t *testing.T) {
|
||||
}
|
||||
}
|
||||
|
||||
func TestParseAzureEngironment(t *testing.T) {
|
||||
func TestParseAzureEnvironment(t *testing.T) {
|
||||
cases := []struct {
|
||||
cloudName string
|
||||
resourceManagerEndpoint string
|
||||
|
@ -96,8 +96,8 @@ const (
|
||||
const (
|
||||
// PreConfiguredBackendPoolLoadBalancerTypesNone means that the load balancers are not pre-configured
|
||||
PreConfiguredBackendPoolLoadBalancerTypesNone = ""
|
||||
// PreConfiguredBackendPoolLoadBalancerTypesInteral means that the `internal` load balancers are pre-configured
|
||||
PreConfiguredBackendPoolLoadBalancerTypesInteral = "internal"
|
||||
// PreConfiguredBackendPoolLoadBalancerTypesInternal means that the `internal` load balancers are pre-configured
|
||||
PreConfiguredBackendPoolLoadBalancerTypesInternal = "internal"
|
||||
// PreConfiguredBackendPoolLoadBalancerTypesExternal means that the `external` load balancers are pre-configured
|
||||
PreConfiguredBackendPoolLoadBalancerTypesExternal = "external"
|
||||
// PreConfiguredBackendPoolLoadBalancerTypesAll means that all load balancers are pre-configured
|
||||
@ -383,7 +383,7 @@ func (az *Cloud) InitializeCloudFromConfig(config *Config, fromSecret bool) erro
|
||||
if err == auth.ErrorNoAuth {
|
||||
// Only controller-manager would lazy-initialize from secret, and credentials are required for such case.
|
||||
if fromSecret {
|
||||
err := fmt.Errorf("No credentials provided for Azure cloud provider")
|
||||
err := fmt.Errorf("no credentials provided for Azure cloud provider")
|
||||
klog.Fatalf("%v", err)
|
||||
return err
|
||||
}
|
||||
@ -810,7 +810,7 @@ func (az *Cloud) updateNodeCaches(prevNode, newNode *v1.Node) {
|
||||
// GetActiveZones returns all the zones in which k8s nodes are currently running.
|
||||
func (az *Cloud) GetActiveZones() (sets.String, error) {
|
||||
if az.nodeInformerSynced == nil {
|
||||
return nil, fmt.Errorf("Azure cloud provider doesn't have informers set")
|
||||
return nil, fmt.Errorf("azure cloud provider doesn't have informers set")
|
||||
}
|
||||
|
||||
az.nodeCachesLock.RLock()
|
||||
|
@ -68,9 +68,9 @@ func (az *Cloud) RequestBackoff() (resourceRequestBackoff wait.Backoff) {
|
||||
}
|
||||
|
||||
// Event creates a event for the specified object.
|
||||
func (az *Cloud) Event(obj runtime.Object, eventtype, reason, message string) {
|
||||
func (az *Cloud) Event(obj runtime.Object, eventType, reason, message string) {
|
||||
if obj != nil && reason != "" {
|
||||
az.eventRecorder.Event(obj, eventtype, reason, message)
|
||||
az.eventRecorder.Event(obj, eventType, reason, message)
|
||||
}
|
||||
}
|
||||
|
||||
@ -359,7 +359,7 @@ func (az *Cloud) CreateOrUpdateRouteTable(routeTable network.RouteTable) error {
|
||||
}
|
||||
// Invalidate the cache because another new operation has canceled the current request.
|
||||
if strings.Contains(strings.ToLower(rerr.Error().Error()), operationCanceledErrorMessage) {
|
||||
klog.V(3).Infof("Route table cache for %s is cleanup because CreateOrUpdateRouteTable is canceld by another operation", *routeTable.Name)
|
||||
klog.V(3).Infof("Route table cache for %s is cleanup because CreateOrUpdateRouteTable is canceled by another operation", *routeTable.Name)
|
||||
az.rtCache.Delete(*routeTable.Name)
|
||||
}
|
||||
klog.Errorf("RouteTablesClient.CreateOrUpdate(%s) failed: %v", az.RouteTableName, rerr.Error())
|
||||
@ -384,7 +384,7 @@ func (az *Cloud) CreateOrUpdateRoute(route network.Route) error {
|
||||
}
|
||||
// Invalidate the cache because another new operation has canceled the current request.
|
||||
if strings.Contains(strings.ToLower(rerr.Error().Error()), operationCanceledErrorMessage) {
|
||||
klog.V(3).Infof("Route cache for %s is cleanup because CreateOrUpdateRouteTable is canceld by another operation", *route.Name)
|
||||
klog.V(3).Infof("Route cache for %s is cleanup because CreateOrUpdateRouteTable is canceled by another operation", *route.Name)
|
||||
az.rtCache.Delete(az.RouteTableName)
|
||||
}
|
||||
return rerr.Error()
|
||||
|
@ -254,7 +254,7 @@ func (c *BlobDiskController) CreateBlobDisk(dataDiskName string, storageAccountT
|
||||
|
||||
//DeleteBlobDisk : delete a blob disk from a node
|
||||
func (c *BlobDiskController) DeleteBlobDisk(diskURI string) error {
|
||||
storageAccountName, vhdName, err := diskNameandSANameFromURI(diskURI)
|
||||
storageAccountName, vhdName, err := diskNameAndSANameFromURI(diskURI)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@ -468,13 +468,13 @@ func (c *BlobDiskController) getAllStorageAccounts() (map[string]*storageAccount
|
||||
}
|
||||
klog.Infof("azureDisk - identified account %s as part of shared PVC accounts", *v.Name)
|
||||
|
||||
sastate := &storageAccountState{
|
||||
saState := &storageAccountState{
|
||||
name: *v.Name,
|
||||
saType: (*v.Sku).Name,
|
||||
diskCount: -1,
|
||||
}
|
||||
|
||||
accounts[*v.Name] = sastate
|
||||
accounts[*v.Name] = saState
|
||||
}
|
||||
|
||||
return accounts, nil
|
||||
@ -578,7 +578,7 @@ func (c *BlobDiskController) findSANameForDisk(storageAccountType storage.SkuNam
|
||||
disksAfter := totalDiskCounts + 1 // with the new one!
|
||||
|
||||
avgUtilization := float64(disksAfter) / float64(countAccounts*maxDisksPerStorageAccounts)
|
||||
aboveAvg := (avgUtilization > storageAccountUtilizationBeforeGrowing)
|
||||
aboveAvg := avgUtilization > storageAccountUtilizationBeforeGrowing
|
||||
|
||||
// avg are not create and we should create more accounts if we can
|
||||
if aboveAvg && countAccounts < maxStorageAccounts {
|
||||
@ -631,7 +631,7 @@ func createVHDHeader(size uint64) ([]byte, error) {
|
||||
return b.Bytes(), nil
|
||||
}
|
||||
|
||||
func diskNameandSANameFromURI(diskURI string) (string, string, error) {
|
||||
func diskNameAndSANameFromURI(diskURI string) (string, string, error) {
|
||||
uri, err := url.Parse(diskURI)
|
||||
if err != nil {
|
||||
return "", "", err
|
||||
|
@ -294,7 +294,7 @@ func TestCommonDetachDisk(t *testing.T) {
|
||||
expectedErr: false,
|
||||
},
|
||||
{
|
||||
desc: "no error shall be returned if the disk exsists",
|
||||
desc: "no error shall be returned if the disk exists",
|
||||
vmList: map[string]string{"vm1": "PowerState/Running"},
|
||||
nodeName: "vm1",
|
||||
diskName: "disk1",
|
||||
|
@ -153,7 +153,7 @@ func (ims *InstanceMetadataService) GetMetadata(crt azcache.AzureCacheReadType)
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// Cache shouldn't be nil, but added a check incase something wrong.
|
||||
// Cache shouldn't be nil, but added a check in case something is wrong.
|
||||
if cache == nil {
|
||||
return nil, fmt.Errorf("failure of getting instance metadata")
|
||||
}
|
||||
|
@ -258,7 +258,7 @@ func (az *Cloud) isCurrentInstance(name types.NodeName, metadataVMName string) (
|
||||
}
|
||||
|
||||
metadataVMName = strings.ToLower(metadataVMName)
|
||||
return (metadataVMName == nodeName), nil
|
||||
return metadataVMName == nodeName, nil
|
||||
}
|
||||
|
||||
// InstanceID returns the cloud provider ID of the specified instance.
|
||||
|
@ -1609,7 +1609,7 @@ func (az *Cloud) reconcileSecurityGroup(clusterName string, service *v1.Service,
|
||||
if ports == nil {
|
||||
if useSharedSecurityRule(service) {
|
||||
klog.V(2).Infof("Attempting to reconcile security group for service %s, but service uses shared rule and we don't know which port it's for", service.Name)
|
||||
return nil, fmt.Errorf("No port info for reconciling shared rule for service %s", service.Name)
|
||||
return nil, fmt.Errorf("no port info for reconciling shared rule for service %s", service.Name)
|
||||
}
|
||||
ports = []v1.ServicePort{}
|
||||
}
|
||||
@ -1621,7 +1621,7 @@ func (az *Cloud) reconcileSecurityGroup(clusterName string, service *v1.Service,
|
||||
|
||||
destinationIPAddress := ""
|
||||
if wantLb && lbIP == nil {
|
||||
return nil, fmt.Errorf("No load balancer IP for setting up security rules for service %s", service.Name)
|
||||
return nil, fmt.Errorf("no load balancer IP for setting up security rules for service %s", service.Name)
|
||||
}
|
||||
if lbIP != nil {
|
||||
destinationIPAddress = *lbIP
|
||||
@ -1724,17 +1724,17 @@ func (az *Cloud) reconcileSecurityGroup(clusterName string, service *v1.Service,
|
||||
sharedIndex, sharedRule, sharedRuleFound := findSecurityRuleByName(updatedRules, sharedRuleName)
|
||||
if !sharedRuleFound {
|
||||
klog.V(4).Infof("Expected to find shared rule %s for service %s being deleted, but did not", sharedRuleName, service.Name)
|
||||
return nil, fmt.Errorf("Expected to find shared rule %s for service %s being deleted, but did not", sharedRuleName, service.Name)
|
||||
return nil, fmt.Errorf("expected to find shared rule %s for service %s being deleted, but did not", sharedRuleName, service.Name)
|
||||
}
|
||||
if sharedRule.DestinationAddressPrefixes == nil {
|
||||
klog.V(4).Infof("Expected to have array of destinations in shared rule for service %s being deleted, but did not", service.Name)
|
||||
return nil, fmt.Errorf("Expected to have array of destinations in shared rule for service %s being deleted, but did not", service.Name)
|
||||
return nil, fmt.Errorf("expected to have array of destinations in shared rule for service %s being deleted, but did not", service.Name)
|
||||
}
|
||||
existingPrefixes := *sharedRule.DestinationAddressPrefixes
|
||||
addressIndex, found := findIndex(existingPrefixes, destinationIPAddress)
|
||||
if !found {
|
||||
klog.V(4).Infof("Expected to find destination address %s in shared rule %s for service %s being deleted, but did not", destinationIPAddress, sharedRuleName, service.Name)
|
||||
return nil, fmt.Errorf("Expected to find destination address %s in shared rule %s for service %s being deleted, but did not", destinationIPAddress, sharedRuleName, service.Name)
|
||||
return nil, fmt.Errorf("expected to find destination address %s in shared rule %s for service %s being deleted, but did not", destinationIPAddress, sharedRuleName, service.Name)
|
||||
}
|
||||
if len(existingPrefixes) == 1 {
|
||||
updatedRules = append(updatedRules[:sharedIndex], updatedRules[sharedIndex+1:]...)
|
||||
@ -2282,7 +2282,7 @@ func (az *Cloud) isBackendPoolPreConfigured(service *v1.Service) bool {
|
||||
if az.PreConfiguredBackendPoolLoadBalancerTypes == PreConfiguredBackendPoolLoadBalancerTypesAll {
|
||||
preConfigured = true
|
||||
}
|
||||
if (az.PreConfiguredBackendPoolLoadBalancerTypes == PreConfiguredBackendPoolLoadBalancerTypesInteral) && isInternal {
|
||||
if (az.PreConfiguredBackendPoolLoadBalancerTypes == PreConfiguredBackendPoolLoadBalancerTypesInternal) && isInternal {
|
||||
preConfigured = true
|
||||
}
|
||||
if (az.PreConfiguredBackendPoolLoadBalancerTypes == PreConfiguredBackendPoolLoadBalancerTypesExternal) && !isInternal {
|
||||
|
@ -56,7 +56,7 @@ func TestFindProbe(t *testing.T) {
|
||||
expected: false,
|
||||
},
|
||||
{
|
||||
msg: "probe names match while ports unmatch should return false",
|
||||
msg: "probe names match while ports don't should return false",
|
||||
existingProbe: []network.Probe{
|
||||
{
|
||||
Name: to.StringPtr("httpProbe"),
|
||||
@ -74,7 +74,7 @@ func TestFindProbe(t *testing.T) {
|
||||
expected: false,
|
||||
},
|
||||
{
|
||||
msg: "probe ports match while names unmatch should return false",
|
||||
msg: "probe ports match while names don't should return false",
|
||||
existingProbe: []network.Probe{
|
||||
{
|
||||
Name: to.StringPtr("probe1"),
|
||||
@ -129,7 +129,7 @@ func TestFindRule(t *testing.T) {
|
||||
expected: false,
|
||||
},
|
||||
{
|
||||
msg: "rule names unmatch should return false",
|
||||
msg: "rule names don't match should return false",
|
||||
existingRule: []network.LoadBalancingRule{
|
||||
{
|
||||
Name: to.StringPtr("httpProbe1"),
|
||||
@ -147,7 +147,7 @@ func TestFindRule(t *testing.T) {
|
||||
expected: false,
|
||||
},
|
||||
{
|
||||
msg: "rule names match while frontend ports unmatch should return false",
|
||||
msg: "rule names match while frontend ports don't should return false",
|
||||
existingRule: []network.LoadBalancingRule{
|
||||
{
|
||||
Name: to.StringPtr("httpProbe"),
|
||||
@ -165,7 +165,7 @@ func TestFindRule(t *testing.T) {
|
||||
expected: false,
|
||||
},
|
||||
{
|
||||
msg: "rule names match while backend ports unmatch should return false",
|
||||
msg: "rule names match while backend ports don't should return false",
|
||||
existingRule: []network.LoadBalancingRule{
|
||||
{
|
||||
Name: to.StringPtr("httpProbe"),
|
||||
@ -183,7 +183,7 @@ func TestFindRule(t *testing.T) {
|
||||
expected: false,
|
||||
},
|
||||
{
|
||||
msg: "rule names match while idletimeout unmatch should return false",
|
||||
msg: "rule names match while idletimeout don't should return false",
|
||||
existingRule: []network.LoadBalancingRule{
|
||||
{
|
||||
Name: to.StringPtr("httpRule"),
|
||||
@ -217,7 +217,7 @@ func TestFindRule(t *testing.T) {
|
||||
expected: true,
|
||||
},
|
||||
{
|
||||
msg: "rule names match while LoadDistribution unmatch should return false",
|
||||
msg: "rule names match while LoadDistribution don't should return false",
|
||||
existingRule: []network.LoadBalancingRule{
|
||||
{
|
||||
Name: to.StringPtr("probe1"),
|
||||
@ -722,7 +722,7 @@ func TestShouldReleaseExistingOwnedPublicIP(t *testing.T) {
|
||||
}
|
||||
}
|
||||
|
||||
func TestgetIPTagMap(t *testing.T) {
|
||||
func TestGetIPTagMap(t *testing.T) {
|
||||
tests := []struct {
|
||||
desc string
|
||||
input string
|
||||
@ -1170,7 +1170,7 @@ func TestGetServiceLoadBalancer(t *testing.T) {
|
||||
expectedError: false,
|
||||
},
|
||||
{
|
||||
desc: "getServiceLoadBalancer shall report error if there're loadbalancer mode annotations on a standard lb",
|
||||
desc: "getServiceLoadBalancer shall report error if there are loadbalancer mode annotations on a standard lb",
|
||||
service: getTestService("service1", v1.ProtocolTCP, nil, false, 80),
|
||||
annotations: map[string]string{ServiceAnnotationLoadBalancerMode: "__auto__"},
|
||||
sku: "standard",
|
||||
|
@ -56,7 +56,7 @@ func InitializeCloudProviderRateLimitConfig(config *CloudProviderRateLimitConfig
|
||||
if config.CloudProviderRateLimitBucket == 0 {
|
||||
config.CloudProviderRateLimitBucket = rateLimitBucketDefault
|
||||
}
|
||||
// Assing write rate limit defaults if no configuration was passed in.
|
||||
// Assign write rate limit defaults if no configuration was passed in.
|
||||
if config.CloudProviderRateLimitQPSWrite == 0 {
|
||||
config.CloudProviderRateLimitQPSWrite = config.CloudProviderRateLimitQPS
|
||||
}
|
||||
|
@ -151,7 +151,7 @@ func TestCreateRoute(t *testing.T) {
|
||||
routeTableName string
|
||||
initialRoute *[]network.Route
|
||||
updatedRoute *[]network.Route
|
||||
hasUnmangedNodes bool
|
||||
hasUnmanagedNodes bool
|
||||
nodeInformerNotSynced bool
|
||||
ipv6DualStackEnabled bool
|
||||
routeTableNotExist bool
|
||||
@ -230,14 +230,14 @@ func TestCreateRoute(t *testing.T) {
|
||||
{
|
||||
name: "CreateRoute should add route to cloud.RouteCIDRs if node is unmanaged",
|
||||
routeTableName: "rt8",
|
||||
hasUnmangedNodes: true,
|
||||
hasUnmanagedNodes: true,
|
||||
unmanagedNodeName: "node",
|
||||
routeCIDRs: map[string]string{},
|
||||
expectedRouteCIDRs: map[string]string{"node": "1.2.3.4/24"},
|
||||
},
|
||||
{
|
||||
name: "CreateRoute should report error if node is unmanaged and cloud.ipv6DualStackEnabled is true",
|
||||
hasUnmangedNodes: true,
|
||||
hasUnmanagedNodes: true,
|
||||
ipv6DualStackEnabled: true,
|
||||
unmanagedNodeName: "node",
|
||||
expectedErrMsg: fmt.Errorf("unmanaged nodes are not supported in dual stack mode"),
|
||||
@ -282,7 +282,7 @@ func TestCreateRoute(t *testing.T) {
|
||||
|
||||
cloud.RouteTableName = test.routeTableName
|
||||
cloud.ipv6DualStackEnabled = test.ipv6DualStackEnabled
|
||||
if test.hasUnmangedNodes {
|
||||
if test.hasUnmanagedNodes {
|
||||
cloud.unmanagedNodes.Insert(test.unmanagedNodeName)
|
||||
cloud.routeCIDRs = test.routeCIDRs
|
||||
} else {
|
||||
@ -544,7 +544,7 @@ func TestListRoutes(t *testing.T) {
|
||||
name string
|
||||
routeTableName string
|
||||
routeTable network.RouteTable
|
||||
hasUnmangedNodes bool
|
||||
hasUnmanagedNodes bool
|
||||
nodeInformerNotSynced bool
|
||||
unmanagedNodeName string
|
||||
routeCIDRs map[string]string
|
||||
@ -580,9 +580,9 @@ func TestListRoutes(t *testing.T) {
|
||||
{
|
||||
name: "ListRoutes should return correct routes if there's unmanaged nodes",
|
||||
routeTableName: "rt2",
|
||||
hasUnmangedNodes: true,
|
||||
unmanagedNodeName: "umanaged-node",
|
||||
routeCIDRs: map[string]string{"umanaged-node": "2.2.3.4/24"},
|
||||
hasUnmanagedNodes: true,
|
||||
unmanagedNodeName: "unmanaged-node",
|
||||
routeCIDRs: map[string]string{"unmanaged-node": "2.2.3.4/24"},
|
||||
routeTable: network.RouteTable{
|
||||
Name: to.StringPtr("rt2"),
|
||||
Location: &cloud.Location,
|
||||
@ -604,14 +604,14 @@ func TestListRoutes(t *testing.T) {
|
||||
DestinationCIDR: "1.2.3.4/24",
|
||||
},
|
||||
{
|
||||
Name: "umanaged-node",
|
||||
TargetNode: mapRouteNameToNodeName(false, "umanaged-node"),
|
||||
Name: "unmanaged-node",
|
||||
TargetNode: mapRouteNameToNodeName(false, "unmanaged-node"),
|
||||
DestinationCIDR: "2.2.3.4/24",
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "ListRoutes should return nil if routeTabel don't exist",
|
||||
name: "ListRoutes should return nil if routeTable don't exist",
|
||||
routeTableName: "rt3",
|
||||
routeTable: network.RouteTable{},
|
||||
getErr: &retry.Error{
|
||||
@ -640,7 +640,7 @@ func TestListRoutes(t *testing.T) {
|
||||
}
|
||||
|
||||
for _, test := range testCases {
|
||||
if test.hasUnmangedNodes {
|
||||
if test.hasUnmanagedNodes {
|
||||
cloud.unmanagedNodes.Insert(test.unmanagedNodeName)
|
||||
cloud.routeCIDRs = test.routeCIDRs
|
||||
} else {
|
||||
|
@ -59,7 +59,7 @@ const (
|
||||
backendPoolIDTemplate = "/subscriptions/%s/resourceGroups/%s/providers/Microsoft.Network/loadBalancers/%s/backendAddressPools/%s"
|
||||
loadBalancerProbeIDTemplate = "/subscriptions/%s/resourceGroups/%s/providers/Microsoft.Network/loadBalancers/%s/probes/%s"
|
||||
|
||||
// InternalLoadBalancerNameSuffix is load balancer posfix
|
||||
// InternalLoadBalancerNameSuffix is load balancer suffix
|
||||
InternalLoadBalancerNameSuffix = "-internal"
|
||||
|
||||
// nodeLabelRole specifies the role of a node
|
||||
@ -609,9 +609,9 @@ func (as *availabilitySet) GetPrivateIPsByNodeName(name string) ([]string, error
|
||||
return ips, nil
|
||||
}
|
||||
|
||||
// getAgentPoolAvailabiliySets lists the virtual machines for the resource group and then builds
|
||||
// getAgentPoolAvailabilitySets lists the virtual machines for the resource group and then builds
|
||||
// a list of availability sets that match the nodes available to k8s.
|
||||
func (as *availabilitySet) getAgentPoolAvailabiliySets(nodes []*v1.Node) (agentPoolAvailabilitySets *[]string, err error) {
|
||||
func (as *availabilitySet) getAgentPoolAvailabilitySets(nodes []*v1.Node) (agentPoolAvailabilitySets *[]string, err error) {
|
||||
vms, err := as.ListVirtualMachines(as.ResourceGroup)
|
||||
if err != nil {
|
||||
klog.Errorf("as.getNodeAvailabilitySet - ListVirtualMachines failed, err=%v", err)
|
||||
@ -634,7 +634,7 @@ func (as *availabilitySet) getAgentPoolAvailabiliySets(nodes []*v1.Node) (agentP
|
||||
asID, ok := vmNameToAvailabilitySetID[nodeName]
|
||||
if !ok {
|
||||
klog.Errorf("as.getNodeAvailabilitySet - Node(%s) has no availability sets", nodeName)
|
||||
return nil, fmt.Errorf("Node (%s) - has no availability sets", nodeName)
|
||||
return nil, fmt.Errorf("node (%s) - has no availability sets", nodeName)
|
||||
}
|
||||
if availabilitySetIDs.Has(asID) {
|
||||
// already added in the list
|
||||
@ -645,7 +645,7 @@ func (as *availabilitySet) getAgentPoolAvailabiliySets(nodes []*v1.Node) (agentP
|
||||
klog.Errorf("as.getNodeAvailabilitySet - Node (%s)- getLastSegment(%s), err=%v", nodeName, asID, err)
|
||||
return nil, err
|
||||
}
|
||||
// AvailabilitySet ID is currently upper cased in a indeterministic way
|
||||
// AvailabilitySet ID is currently upper cased in a non-deterministic way
|
||||
// We want to keep it lower case, before the ID get fixed
|
||||
asName = strings.ToLower(asName)
|
||||
|
||||
@ -657,7 +657,7 @@ func (as *availabilitySet) getAgentPoolAvailabiliySets(nodes []*v1.Node) (agentP
|
||||
|
||||
// GetVMSetNames selects all possible availability sets or scale sets
|
||||
// (depending vmType configured) for service load balancer, if the service has
|
||||
// no loadbalancer mode annotaion returns the primary VMSet. If service annotation
|
||||
// no loadbalancer mode annotation returns the primary VMSet. If service annotation
|
||||
// for loadbalancer exists then return the eligible VMSet.
|
||||
func (as *availabilitySet) GetVMSetNames(service *v1.Service, nodes []*v1.Node) (availabilitySetNames *[]string, err error) {
|
||||
hasMode, isAuto, serviceAvailabilitySetNames := getServiceLoadBalancerMode(service)
|
||||
@ -666,14 +666,14 @@ func (as *availabilitySet) GetVMSetNames(service *v1.Service, nodes []*v1.Node)
|
||||
availabilitySetNames = &[]string{as.Config.PrimaryAvailabilitySetName}
|
||||
return availabilitySetNames, nil
|
||||
}
|
||||
availabilitySetNames, err = as.getAgentPoolAvailabiliySets(nodes)
|
||||
availabilitySetNames, err = as.getAgentPoolAvailabilitySets(nodes)
|
||||
if err != nil {
|
||||
klog.Errorf("as.GetVMSetNames - getAgentPoolAvailabiliySets failed err=(%v)", err)
|
||||
klog.Errorf("as.GetVMSetNames - getAgentPoolAvailabilitySets failed err=(%v)", err)
|
||||
return nil, err
|
||||
}
|
||||
if len(*availabilitySetNames) == 0 {
|
||||
klog.Errorf("as.GetVMSetNames - No availability sets found for nodes in the cluster, node count(%d)", len(nodes))
|
||||
return nil, fmt.Errorf("No availability sets found for nodes, node count(%d)", len(nodes))
|
||||
return nil, fmt.Errorf("no availability sets found for nodes, node count(%d)", len(nodes))
|
||||
}
|
||||
// sort the list to have deterministic selection
|
||||
sort.Strings(*availabilitySetNames)
|
||||
|
@ -1129,7 +1129,7 @@ func TestGetStandardVMSetNames(t *testing.T) {
|
||||
},
|
||||
},
|
||||
},
|
||||
expectedErrMsg: fmt.Errorf("Node (vm2) - has no availability sets"),
|
||||
expectedErrMsg: fmt.Errorf("node (vm2) - has no availability sets"),
|
||||
},
|
||||
{
|
||||
name: "GetVMSetNames should report the error if there's no such availability set",
|
||||
|
@ -179,7 +179,7 @@ func (az *Cloud) EnsureStorageAccount(accountOptions *AccountOptions, genAccount
|
||||
accountOptions.Tags = make(map[string]string)
|
||||
}
|
||||
accountOptions.Tags["created-by"] = "azure"
|
||||
tags := convertMaptoMapPointer(accountOptions.Tags)
|
||||
tags := convertMapToMapPointer(accountOptions.Tags)
|
||||
|
||||
klog.V(2).Infof("azure - no matching account found, begin to create a new account %s in resource group %s, location: %s, accountType: %s, accountKind: %s, tags: %+v",
|
||||
accountName, resourceGroup, location, accountType, kind, accountOptions.Tags)
|
||||
|
@ -947,7 +947,7 @@ func findLBRuleForPort(lbRules []network.LoadBalancingRule, port int32) (network
|
||||
return lbRule, nil
|
||||
}
|
||||
}
|
||||
return network.LoadBalancingRule{}, fmt.Errorf("Expected LB rule with port %d but none found", port)
|
||||
return network.LoadBalancingRule{}, fmt.Errorf("expected LB rule with port %d but none found", port)
|
||||
}
|
||||
|
||||
func TestServiceDefaultsToNoSessionPersistence(t *testing.T) {
|
||||
@ -1883,7 +1883,7 @@ func TestSecurityRulePriorityFailsIfExhausted(t *testing.T) {
|
||||
|
||||
_, err := getNextAvailablePriority(rules)
|
||||
if err == nil {
|
||||
t.Error("Expectected an error. There are no priority levels left.")
|
||||
t.Error("Expect an error. There are no priority levels left.")
|
||||
}
|
||||
}
|
||||
|
||||
@ -3092,7 +3092,7 @@ func TestGetResourceGroupFromDiskURI(t *testing.T) {
|
||||
expectError: false,
|
||||
},
|
||||
{
|
||||
// case insentive check
|
||||
// case insensitive check
|
||||
diskURL: "/subscriptions/4be8920b-2978-43d7-axyz-04d8549c1d05/resourcegroups/azure-k8s1102/providers/Microsoft.Compute/disks/andy-mghyb1102-dynamic-pvc-f7f014c9-49f4-11e8-ab5c-000d3af7b38e",
|
||||
expectedResult: "azure-k8s1102",
|
||||
expectError: false,
|
||||
@ -3285,7 +3285,7 @@ func TestGetActiveZones(t *testing.T) {
|
||||
|
||||
az.nodeInformerSynced = nil
|
||||
zones, err := az.GetActiveZones()
|
||||
expectedErr := fmt.Errorf("Azure cloud provider doesn't have informers set")
|
||||
expectedErr := fmt.Errorf("azure cloud provider doesn't have informers set")
|
||||
assert.Equal(t, expectedErr, err)
|
||||
assert.Nil(t, zones)
|
||||
|
||||
|
@ -83,7 +83,7 @@ func getContextWithCancel() (context.Context, context.CancelFunc) {
|
||||
}
|
||||
|
||||
// ConvertTagsToMap convert the tags from string to map
|
||||
// the valid tags fomat is "key1=value1,key2=value2", which could be converted to
|
||||
// the valid tags format is "key1=value1,key2=value2", which could be converted to
|
||||
// {"key1": "value1", "key2": "value2"}
|
||||
func ConvertTagsToMap(tags string) (map[string]string, error) {
|
||||
m := make(map[string]string)
|
||||
@ -107,7 +107,7 @@ func ConvertTagsToMap(tags string) (map[string]string, error) {
|
||||
return m, nil
|
||||
}
|
||||
|
||||
func convertMaptoMapPointer(origin map[string]string) map[string]*string {
|
||||
func convertMapToMapPointer(origin map[string]string) map[string]*string {
|
||||
newly := make(map[string]*string)
|
||||
for k, v := range origin {
|
||||
value := v
|
||||
|
@ -755,7 +755,7 @@ func (ss *scaleSet) GetVMSetNames(service *v1.Service, nodes []*v1.Node) (vmSetN
|
||||
}
|
||||
if len(*scaleSetNames) == 0 {
|
||||
klog.Errorf("ss.GetVMSetNames - No scale sets found for nodes in the cluster, node count(%d)", len(nodes))
|
||||
return nil, fmt.Errorf("No scale sets found for nodes, node count(%d)", len(nodes))
|
||||
return nil, fmt.Errorf("no scale sets found for nodes, node count(%d)", len(nodes))
|
||||
}
|
||||
|
||||
// sort the list to have deterministic selection
|
||||
@ -1279,7 +1279,7 @@ func (ss *scaleSet) EnsureHostsInPool(service *v1.Service, nodes []*v1.Node, bac
|
||||
return utilerrors.Flatten(errs)
|
||||
}
|
||||
|
||||
// Fail if there're other errors.
|
||||
// Fail if there are other errors.
|
||||
if len(errors) > 0 {
|
||||
return utilerrors.Flatten(utilerrors.NewAggregate(errors))
|
||||
}
|
||||
@ -1588,7 +1588,7 @@ func (ss *scaleSet) EnsureBackendPoolDeleted(service *v1.Service, backendPoolID,
|
||||
return utilerrors.Flatten(errs)
|
||||
}
|
||||
|
||||
// Fail if there're other errors.
|
||||
// Fail if there are other errors.
|
||||
if len(errors) > 0 {
|
||||
return utilerrors.Flatten(utilerrors.NewAggregate(errors))
|
||||
}
|
||||
|
@ -152,7 +152,7 @@ func (ss *scaleSet) gcVMSSVMCache() error {
|
||||
return nil
|
||||
}
|
||||
|
||||
// newVMSSVirtualMachinesCache instanciates a new VMs cache for VMs belonging to the provided VMSS.
|
||||
// newVMSSVirtualMachinesCache instantiates a new VMs cache for VMs belonging to the provided VMSS.
|
||||
func (ss *scaleSet) newVMSSVirtualMachinesCache(resourceGroupName, vmssName, cacheKey string) (*azcache.TimedCache, error) {
|
||||
vmssVirtualMachinesCacheTTL := time.Duration(ss.Config.VmssVirtualMachinesCacheTTLInSeconds) * time.Second
|
||||
|
||||
|
@ -62,7 +62,7 @@ func checkResourceExistsFromError(err *retry.Error) (bool, *retry.Error) {
|
||||
}
|
||||
|
||||
/// getVirtualMachine calls 'VirtualMachinesClient.Get' with a timed cache
|
||||
/// The service side has throttling control that delays responses if there're multiple requests onto certain vm
|
||||
/// The service side has throttling control that delays responses if there are multiple requests onto certain vm
|
||||
/// resource request in short period.
|
||||
func (az *Cloud) getVirtualMachine(nodeName types.NodeName, crt azcache.AzureCacheReadType) (vm compute.VirtualMachine, err error) {
|
||||
vmName := string(nodeName)
|
||||
|
@ -52,7 +52,7 @@ func TestIsAvailabilityZone(t *testing.T) {
|
||||
expected bool
|
||||
}{
|
||||
{"empty string should return false", "", false},
|
||||
{"wrong farmat should return false", "123", false},
|
||||
{"wrong format should return false", "123", false},
|
||||
{"wrong location should return false", "chinanorth-1", false},
|
||||
{"correct zone should return true", "eastus-1", true},
|
||||
}
|
||||
@ -78,7 +78,7 @@ func TestGetZoneID(t *testing.T) {
|
||||
expected string
|
||||
}{
|
||||
{"empty string should return empty string", "", ""},
|
||||
{"wrong farmat should return empty string", "123", ""},
|
||||
{"wrong format should return empty string", "123", ""},
|
||||
{"wrong location should return empty string", "chinanorth-1", ""},
|
||||
{"correct zone should return zone ID", "eastus-1", "1"},
|
||||
}
|
||||
|
@ -167,7 +167,7 @@ func (c *Client) Send(ctx context.Context, request *http.Request) (*http.Respons
|
||||
|
||||
// only use the result if the regional request actually goes through and returns 2xx status code, for two reasons:
|
||||
// 1. the retry on regional ARM host approach is a hack.
|
||||
// 2. the concatted regional uri could be wrong as the rule is not officially declared by ARM.
|
||||
// 2. the concatenated regional uri could be wrong as the rule is not officially declared by ARM.
|
||||
if regionalResponse == nil || regionalResponse.StatusCode > 299 {
|
||||
regionalErrStr := ""
|
||||
if regionalError != nil {
|
||||
|
@ -68,7 +68,7 @@ type Interface interface {
|
||||
PutResource(ctx context.Context, resourceID string, parameters interface{}) (*http.Response, *retry.Error)
|
||||
|
||||
// PutResources puts a list of resources from resources map[resourceID]parameters.
|
||||
// Those resources sync requests are sequential while async requests are concurent. It 's especially
|
||||
// Those resources sync requests are sequential while async requests are concurrent. It 's especially
|
||||
// useful when the ARM API doesn't support concurrent requests.
|
||||
PutResources(ctx context.Context, resources map[string]interface{}) map[string]*PutResourcesResponse
|
||||
|
||||
|
@ -87,7 +87,7 @@ func TestCreateOrUpdateWithNeverRateLimiter(t *testing.T) {
|
||||
ctrl := gomock.NewController(t)
|
||||
defer ctrl.Finish()
|
||||
|
||||
rcCreateOrUpdatetErr := &retry.Error{
|
||||
rcCreateOrUpdateErr := &retry.Error{
|
||||
RawError: fmt.Errorf("azure cloud provider rate limited(%s) for operation %q", "write", "RouteCreateOrUpdate"),
|
||||
Retriable: true,
|
||||
}
|
||||
@ -97,7 +97,7 @@ func TestCreateOrUpdateWithNeverRateLimiter(t *testing.T) {
|
||||
|
||||
routeClient := getTestRouteClientWithNeverRateLimiter(armClient)
|
||||
rerr := routeClient.CreateOrUpdate(context.TODO(), "rg", "rt", "r1", r, "")
|
||||
assert.Equal(t, rcCreateOrUpdatetErr, rerr)
|
||||
assert.Equal(t, rcCreateOrUpdateErr, rerr)
|
||||
}
|
||||
|
||||
func TestCreateOrUpdateRetryAfterReader(t *testing.T) {
|
||||
|
@ -214,7 +214,7 @@ func TestCreateOrUpdateWithNeverRateLimiter(t *testing.T) {
|
||||
ctrl := gomock.NewController(t)
|
||||
defer ctrl.Finish()
|
||||
|
||||
rcCreateOrUpdatetErr := &retry.Error{
|
||||
rcCreateOrUpdateErr := &retry.Error{
|
||||
RawError: fmt.Errorf("azure cloud provider rate limited(%s) for operation %q", "write", "RouteTableCreateOrUpdate"),
|
||||
Retriable: true,
|
||||
}
|
||||
@ -224,7 +224,7 @@ func TestCreateOrUpdateWithNeverRateLimiter(t *testing.T) {
|
||||
|
||||
routetableClient := getTestRouteTableClientWithNeverRateLimiter(armClient)
|
||||
rerr := routetableClient.CreateOrUpdate(context.TODO(), "rg", "rt1", rt1, "")
|
||||
assert.Equal(t, rcCreateOrUpdatetErr, rerr)
|
||||
assert.Equal(t, rcCreateOrUpdateErr, rerr)
|
||||
}
|
||||
|
||||
func TestCreateOrUpdateRetryAfterReader(t *testing.T) {
|
||||
|
@ -179,7 +179,7 @@ func getRawError(resp *http.Response, err error) error {
|
||||
return fmt.Errorf("empty HTTP response")
|
||||
}
|
||||
|
||||
// return the http status if unabled to get response body.
|
||||
// return the http status if it is unable to get response body.
|
||||
defer resp.Body.Close()
|
||||
respBody, _ := ioutil.ReadAll(resp.Body)
|
||||
resp.Body = ioutil.NopCloser(bytes.NewReader(respBody))
|
||||
|
@ -31,25 +31,25 @@ import (
|
||||
|
||||
func TestNewError(t *testing.T) {
|
||||
rawErr := fmt.Errorf("HTTP status code (404)")
|
||||
newerr := NewError(true, rawErr)
|
||||
assert.Equal(t, true, newerr.Retriable)
|
||||
assert.Equal(t, rawErr, newerr.RawError)
|
||||
newErr := NewError(true, rawErr)
|
||||
assert.Equal(t, true, newErr.Retriable)
|
||||
assert.Equal(t, rawErr, newErr.RawError)
|
||||
}
|
||||
|
||||
func TestGetRetriableError(t *testing.T) {
|
||||
rawErr := fmt.Errorf("HTTP status code (404)")
|
||||
newerr := GetRetriableError(rawErr)
|
||||
assert.Equal(t, true, newerr.Retriable)
|
||||
assert.Equal(t, rawErr, newerr.RawError)
|
||||
newErr := GetRetriableError(rawErr)
|
||||
assert.Equal(t, true, newErr.Retriable)
|
||||
assert.Equal(t, rawErr, newErr.RawError)
|
||||
}
|
||||
|
||||
func TestGetRateLimitError(t *testing.T) {
|
||||
opType := "write"
|
||||
opName := "opNameTest"
|
||||
rawErr := fmt.Errorf("azure cloud provider rate limited(%s) for operation %q", opType, opName)
|
||||
newerr := GetRateLimitError(true, opName)
|
||||
assert.Equal(t, true, newerr.Retriable)
|
||||
assert.Equal(t, rawErr, newerr.RawError)
|
||||
newErr := GetRateLimitError(true, opName)
|
||||
assert.Equal(t, true, newErr.Retriable)
|
||||
assert.Equal(t, rawErr, newErr.RawError)
|
||||
}
|
||||
|
||||
func TestGetThrottlingError(t *testing.T) {
|
||||
@ -57,10 +57,10 @@ func TestGetThrottlingError(t *testing.T) {
|
||||
reason := "reasontest"
|
||||
rawErr := fmt.Errorf("azure cloud provider throttled for operation %s with reason %q", operation, reason)
|
||||
onehourlater := time.Now().Add(time.Hour * 1)
|
||||
newerr := GetThrottlingError(operation, reason, onehourlater)
|
||||
assert.Equal(t, true, newerr.Retriable)
|
||||
assert.Equal(t, rawErr, newerr.RawError)
|
||||
assert.Equal(t, onehourlater, newerr.RetryAfter)
|
||||
newErr := GetThrottlingError(operation, reason, onehourlater)
|
||||
assert.Equal(t, true, newErr.Retriable)
|
||||
assert.Equal(t, rawErr, newErr.RawError)
|
||||
assert.Equal(t, onehourlater, newErr.RetryAfter)
|
||||
}
|
||||
|
||||
func TestGetError(t *testing.T) {
|
||||
@ -344,7 +344,7 @@ func TestIsThrottled(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestIsErrorRetriable(t *testing.T) {
|
||||
// flase case
|
||||
// false case
|
||||
result := IsErrorRetriable(nil)
|
||||
assert.Equal(t, false, result)
|
||||
|
||||
|
@ -144,7 +144,7 @@ func jitter(duration time.Duration, maxFactor float64) time.Duration {
|
||||
return wait
|
||||
}
|
||||
|
||||
// DoExponentialBackoffRetry reprents an autorest.SendDecorator with backoff retry.
|
||||
// DoExponentialBackoffRetry represents an autorest.SendDecorator with backoff retry.
|
||||
func DoExponentialBackoffRetry(backoff *Backoff) autorest.SendDecorator {
|
||||
return func(s autorest.Sender) autorest.Sender {
|
||||
return autorest.SenderFunc(func(r *http.Request) (*http.Response, error) {
|
||||
|
@ -80,9 +80,9 @@ func TestJitterWithNegativeMaxFactor(t *testing.T) {
|
||||
// If maxFactor is 0.0 or less than 0.0, a suggested default value will be chosen.
|
||||
// rand.Float64() returns, as a float64, a pseudo-random number in [0.0,1.0).
|
||||
duration := time.Duration(time.Second)
|
||||
maxFactor := float64(-3.0)
|
||||
maxFactor := -3.0
|
||||
res := jitter(duration, maxFactor)
|
||||
defaultMaxFactor := float64(1.0)
|
||||
defaultMaxFactor := 1.0
|
||||
expected := jitter(duration, defaultMaxFactor)
|
||||
assert.Equal(t, expected-res >= time.Duration(0.0*float64(duration)), true)
|
||||
assert.Equal(t, expected-res < time.Duration(1.0*float64(duration)), true)
|
||||
@ -156,7 +156,7 @@ func TestDoBackoffRetry(t *testing.T) {
|
||||
Path: "/api",
|
||||
},
|
||||
}
|
||||
r := mocks.NewResponseWithStatus("500 InternelServerError", http.StatusInternalServerError)
|
||||
r := mocks.NewResponseWithStatus("500 InternalServerError", http.StatusInternalServerError)
|
||||
client := mocks.NewSender()
|
||||
client.AppendAndRepeatResponse(r, 3)
|
||||
|
||||
|
Loading…
Reference in New Issue
Block a user