Fix a lot of typos in Azure codes

This commit is contained in:
Qi Ni 2020-10-26 22:47:38 +08:00 committed by qini
parent 16e18a590f
commit ecdc1ba57c
29 changed files with 95 additions and 95 deletions

View File

@ -213,7 +213,7 @@ func GetNetworkResourceServicePrincipalToken(config *AzureAuthConfig, env *azure
} }
// ParseAzureEnvironment returns the azure environment. // ParseAzureEnvironment returns the azure environment.
// If 'resourceManagerEndpoint' is set, the environment is computed by quering the cloud's resource manager endpoint. // If 'resourceManagerEndpoint' is set, the environment is computed by querying the cloud's resource manager endpoint.
// Otherwise, a pre-defined Environment is looked up by name. // Otherwise, a pre-defined Environment is looked up by name.
func ParseAzureEnvironment(cloudName, resourceManagerEndpoint, identitySystem string) (*azure.Environment, error) { func ParseAzureEnvironment(cloudName, resourceManagerEndpoint, identitySystem string) (*azure.Environment, error) {
var env azure.Environment var env azure.Environment

View File

@ -192,7 +192,7 @@ func TestGetNetworkResourceServicePrincipalTokenNegative(t *testing.T) {
} }
} }
func TestParseAzureEngironment(t *testing.T) { func TestParseAzureEnvironment(t *testing.T) {
cases := []struct { cases := []struct {
cloudName string cloudName string
resourceManagerEndpoint string resourceManagerEndpoint string

View File

@ -96,8 +96,8 @@ const (
const ( const (
// PreConfiguredBackendPoolLoadBalancerTypesNone means that the load balancers are not pre-configured // PreConfiguredBackendPoolLoadBalancerTypesNone means that the load balancers are not pre-configured
PreConfiguredBackendPoolLoadBalancerTypesNone = "" PreConfiguredBackendPoolLoadBalancerTypesNone = ""
// PreConfiguredBackendPoolLoadBalancerTypesInteral means that the `internal` load balancers are pre-configured // PreConfiguredBackendPoolLoadBalancerTypesInternal means that the `internal` load balancers are pre-configured
PreConfiguredBackendPoolLoadBalancerTypesInteral = "internal" PreConfiguredBackendPoolLoadBalancerTypesInternal = "internal"
// PreConfiguredBackendPoolLoadBalancerTypesExternal means that the `external` load balancers are pre-configured // PreConfiguredBackendPoolLoadBalancerTypesExternal means that the `external` load balancers are pre-configured
PreConfiguredBackendPoolLoadBalancerTypesExternal = "external" PreConfiguredBackendPoolLoadBalancerTypesExternal = "external"
// PreConfiguredBackendPoolLoadBalancerTypesAll means that all load balancers are pre-configured // PreConfiguredBackendPoolLoadBalancerTypesAll means that all load balancers are pre-configured
@ -383,7 +383,7 @@ func (az *Cloud) InitializeCloudFromConfig(config *Config, fromSecret bool) erro
if err == auth.ErrorNoAuth { if err == auth.ErrorNoAuth {
// Only controller-manager would lazy-initialize from secret, and credentials are required for such case. // Only controller-manager would lazy-initialize from secret, and credentials are required for such case.
if fromSecret { if fromSecret {
err := fmt.Errorf("No credentials provided for Azure cloud provider") err := fmt.Errorf("no credentials provided for Azure cloud provider")
klog.Fatalf("%v", err) klog.Fatalf("%v", err)
return err return err
} }
@ -810,7 +810,7 @@ func (az *Cloud) updateNodeCaches(prevNode, newNode *v1.Node) {
// GetActiveZones returns all the zones in which k8s nodes are currently running. // GetActiveZones returns all the zones in which k8s nodes are currently running.
func (az *Cloud) GetActiveZones() (sets.String, error) { func (az *Cloud) GetActiveZones() (sets.String, error) {
if az.nodeInformerSynced == nil { if az.nodeInformerSynced == nil {
return nil, fmt.Errorf("Azure cloud provider doesn't have informers set") return nil, fmt.Errorf("azure cloud provider doesn't have informers set")
} }
az.nodeCachesLock.RLock() az.nodeCachesLock.RLock()

View File

@ -68,9 +68,9 @@ func (az *Cloud) RequestBackoff() (resourceRequestBackoff wait.Backoff) {
} }
// Event creates a event for the specified object. // Event creates a event for the specified object.
func (az *Cloud) Event(obj runtime.Object, eventtype, reason, message string) { func (az *Cloud) Event(obj runtime.Object, eventType, reason, message string) {
if obj != nil && reason != "" { if obj != nil && reason != "" {
az.eventRecorder.Event(obj, eventtype, reason, message) az.eventRecorder.Event(obj, eventType, reason, message)
} }
} }
@ -359,7 +359,7 @@ func (az *Cloud) CreateOrUpdateRouteTable(routeTable network.RouteTable) error {
} }
// Invalidate the cache because another new operation has canceled the current request. // Invalidate the cache because another new operation has canceled the current request.
if strings.Contains(strings.ToLower(rerr.Error().Error()), operationCanceledErrorMessage) { if strings.Contains(strings.ToLower(rerr.Error().Error()), operationCanceledErrorMessage) {
klog.V(3).Infof("Route table cache for %s is cleanup because CreateOrUpdateRouteTable is canceld by another operation", *routeTable.Name) klog.V(3).Infof("Route table cache for %s is cleanup because CreateOrUpdateRouteTable is canceled by another operation", *routeTable.Name)
az.rtCache.Delete(*routeTable.Name) az.rtCache.Delete(*routeTable.Name)
} }
klog.Errorf("RouteTablesClient.CreateOrUpdate(%s) failed: %v", az.RouteTableName, rerr.Error()) klog.Errorf("RouteTablesClient.CreateOrUpdate(%s) failed: %v", az.RouteTableName, rerr.Error())
@ -384,7 +384,7 @@ func (az *Cloud) CreateOrUpdateRoute(route network.Route) error {
} }
// Invalidate the cache because another new operation has canceled the current request. // Invalidate the cache because another new operation has canceled the current request.
if strings.Contains(strings.ToLower(rerr.Error().Error()), operationCanceledErrorMessage) { if strings.Contains(strings.ToLower(rerr.Error().Error()), operationCanceledErrorMessage) {
klog.V(3).Infof("Route cache for %s is cleanup because CreateOrUpdateRouteTable is canceld by another operation", *route.Name) klog.V(3).Infof("Route cache for %s is cleanup because CreateOrUpdateRouteTable is canceled by another operation", *route.Name)
az.rtCache.Delete(az.RouteTableName) az.rtCache.Delete(az.RouteTableName)
} }
return rerr.Error() return rerr.Error()

View File

@ -254,7 +254,7 @@ func (c *BlobDiskController) CreateBlobDisk(dataDiskName string, storageAccountT
//DeleteBlobDisk : delete a blob disk from a node //DeleteBlobDisk : delete a blob disk from a node
func (c *BlobDiskController) DeleteBlobDisk(diskURI string) error { func (c *BlobDiskController) DeleteBlobDisk(diskURI string) error {
storageAccountName, vhdName, err := diskNameandSANameFromURI(diskURI) storageAccountName, vhdName, err := diskNameAndSANameFromURI(diskURI)
if err != nil { if err != nil {
return err return err
} }
@ -468,13 +468,13 @@ func (c *BlobDiskController) getAllStorageAccounts() (map[string]*storageAccount
} }
klog.Infof("azureDisk - identified account %s as part of shared PVC accounts", *v.Name) klog.Infof("azureDisk - identified account %s as part of shared PVC accounts", *v.Name)
sastate := &storageAccountState{ saState := &storageAccountState{
name: *v.Name, name: *v.Name,
saType: (*v.Sku).Name, saType: (*v.Sku).Name,
diskCount: -1, diskCount: -1,
} }
accounts[*v.Name] = sastate accounts[*v.Name] = saState
} }
return accounts, nil return accounts, nil
@ -578,7 +578,7 @@ func (c *BlobDiskController) findSANameForDisk(storageAccountType storage.SkuNam
disksAfter := totalDiskCounts + 1 // with the new one! disksAfter := totalDiskCounts + 1 // with the new one!
avgUtilization := float64(disksAfter) / float64(countAccounts*maxDisksPerStorageAccounts) avgUtilization := float64(disksAfter) / float64(countAccounts*maxDisksPerStorageAccounts)
aboveAvg := (avgUtilization > storageAccountUtilizationBeforeGrowing) aboveAvg := avgUtilization > storageAccountUtilizationBeforeGrowing
// avg are not create and we should create more accounts if we can // avg are not create and we should create more accounts if we can
if aboveAvg && countAccounts < maxStorageAccounts { if aboveAvg && countAccounts < maxStorageAccounts {
@ -631,7 +631,7 @@ func createVHDHeader(size uint64) ([]byte, error) {
return b.Bytes(), nil return b.Bytes(), nil
} }
func diskNameandSANameFromURI(diskURI string) (string, string, error) { func diskNameAndSANameFromURI(diskURI string) (string, string, error) {
uri, err := url.Parse(diskURI) uri, err := url.Parse(diskURI)
if err != nil { if err != nil {
return "", "", err return "", "", err

View File

@ -294,7 +294,7 @@ func TestCommonDetachDisk(t *testing.T) {
expectedErr: false, expectedErr: false,
}, },
{ {
desc: "no error shall be returned if the disk exsists", desc: "no error shall be returned if the disk exists",
vmList: map[string]string{"vm1": "PowerState/Running"}, vmList: map[string]string{"vm1": "PowerState/Running"},
nodeName: "vm1", nodeName: "vm1",
diskName: "disk1", diskName: "disk1",

View File

@ -153,7 +153,7 @@ func (ims *InstanceMetadataService) GetMetadata(crt azcache.AzureCacheReadType)
return nil, err return nil, err
} }
// Cache shouldn't be nil, but added a check incase something wrong. // Cache shouldn't be nil, but added a check in case something is wrong.
if cache == nil { if cache == nil {
return nil, fmt.Errorf("failure of getting instance metadata") return nil, fmt.Errorf("failure of getting instance metadata")
} }

View File

@ -258,7 +258,7 @@ func (az *Cloud) isCurrentInstance(name types.NodeName, metadataVMName string) (
} }
metadataVMName = strings.ToLower(metadataVMName) metadataVMName = strings.ToLower(metadataVMName)
return (metadataVMName == nodeName), nil return metadataVMName == nodeName, nil
} }
// InstanceID returns the cloud provider ID of the specified instance. // InstanceID returns the cloud provider ID of the specified instance.

View File

@ -1609,7 +1609,7 @@ func (az *Cloud) reconcileSecurityGroup(clusterName string, service *v1.Service,
if ports == nil { if ports == nil {
if useSharedSecurityRule(service) { if useSharedSecurityRule(service) {
klog.V(2).Infof("Attempting to reconcile security group for service %s, but service uses shared rule and we don't know which port it's for", service.Name) klog.V(2).Infof("Attempting to reconcile security group for service %s, but service uses shared rule and we don't know which port it's for", service.Name)
return nil, fmt.Errorf("No port info for reconciling shared rule for service %s", service.Name) return nil, fmt.Errorf("no port info for reconciling shared rule for service %s", service.Name)
} }
ports = []v1.ServicePort{} ports = []v1.ServicePort{}
} }
@ -1621,7 +1621,7 @@ func (az *Cloud) reconcileSecurityGroup(clusterName string, service *v1.Service,
destinationIPAddress := "" destinationIPAddress := ""
if wantLb && lbIP == nil { if wantLb && lbIP == nil {
return nil, fmt.Errorf("No load balancer IP for setting up security rules for service %s", service.Name) return nil, fmt.Errorf("no load balancer IP for setting up security rules for service %s", service.Name)
} }
if lbIP != nil { if lbIP != nil {
destinationIPAddress = *lbIP destinationIPAddress = *lbIP
@ -1724,17 +1724,17 @@ func (az *Cloud) reconcileSecurityGroup(clusterName string, service *v1.Service,
sharedIndex, sharedRule, sharedRuleFound := findSecurityRuleByName(updatedRules, sharedRuleName) sharedIndex, sharedRule, sharedRuleFound := findSecurityRuleByName(updatedRules, sharedRuleName)
if !sharedRuleFound { if !sharedRuleFound {
klog.V(4).Infof("Expected to find shared rule %s for service %s being deleted, but did not", sharedRuleName, service.Name) klog.V(4).Infof("Expected to find shared rule %s for service %s being deleted, but did not", sharedRuleName, service.Name)
return nil, fmt.Errorf("Expected to find shared rule %s for service %s being deleted, but did not", sharedRuleName, service.Name) return nil, fmt.Errorf("expected to find shared rule %s for service %s being deleted, but did not", sharedRuleName, service.Name)
} }
if sharedRule.DestinationAddressPrefixes == nil { if sharedRule.DestinationAddressPrefixes == nil {
klog.V(4).Infof("Expected to have array of destinations in shared rule for service %s being deleted, but did not", service.Name) klog.V(4).Infof("Expected to have array of destinations in shared rule for service %s being deleted, but did not", service.Name)
return nil, fmt.Errorf("Expected to have array of destinations in shared rule for service %s being deleted, but did not", service.Name) return nil, fmt.Errorf("expected to have array of destinations in shared rule for service %s being deleted, but did not", service.Name)
} }
existingPrefixes := *sharedRule.DestinationAddressPrefixes existingPrefixes := *sharedRule.DestinationAddressPrefixes
addressIndex, found := findIndex(existingPrefixes, destinationIPAddress) addressIndex, found := findIndex(existingPrefixes, destinationIPAddress)
if !found { if !found {
klog.V(4).Infof("Expected to find destination address %s in shared rule %s for service %s being deleted, but did not", destinationIPAddress, sharedRuleName, service.Name) klog.V(4).Infof("Expected to find destination address %s in shared rule %s for service %s being deleted, but did not", destinationIPAddress, sharedRuleName, service.Name)
return nil, fmt.Errorf("Expected to find destination address %s in shared rule %s for service %s being deleted, but did not", destinationIPAddress, sharedRuleName, service.Name) return nil, fmt.Errorf("expected to find destination address %s in shared rule %s for service %s being deleted, but did not", destinationIPAddress, sharedRuleName, service.Name)
} }
if len(existingPrefixes) == 1 { if len(existingPrefixes) == 1 {
updatedRules = append(updatedRules[:sharedIndex], updatedRules[sharedIndex+1:]...) updatedRules = append(updatedRules[:sharedIndex], updatedRules[sharedIndex+1:]...)
@ -2282,7 +2282,7 @@ func (az *Cloud) isBackendPoolPreConfigured(service *v1.Service) bool {
if az.PreConfiguredBackendPoolLoadBalancerTypes == PreConfiguredBackendPoolLoadBalancerTypesAll { if az.PreConfiguredBackendPoolLoadBalancerTypes == PreConfiguredBackendPoolLoadBalancerTypesAll {
preConfigured = true preConfigured = true
} }
if (az.PreConfiguredBackendPoolLoadBalancerTypes == PreConfiguredBackendPoolLoadBalancerTypesInteral) && isInternal { if (az.PreConfiguredBackendPoolLoadBalancerTypes == PreConfiguredBackendPoolLoadBalancerTypesInternal) && isInternal {
preConfigured = true preConfigured = true
} }
if (az.PreConfiguredBackendPoolLoadBalancerTypes == PreConfiguredBackendPoolLoadBalancerTypesExternal) && !isInternal { if (az.PreConfiguredBackendPoolLoadBalancerTypes == PreConfiguredBackendPoolLoadBalancerTypesExternal) && !isInternal {

View File

@ -56,7 +56,7 @@ func TestFindProbe(t *testing.T) {
expected: false, expected: false,
}, },
{ {
msg: "probe names match while ports unmatch should return false", msg: "probe names match while ports don't should return false",
existingProbe: []network.Probe{ existingProbe: []network.Probe{
{ {
Name: to.StringPtr("httpProbe"), Name: to.StringPtr("httpProbe"),
@ -74,7 +74,7 @@ func TestFindProbe(t *testing.T) {
expected: false, expected: false,
}, },
{ {
msg: "probe ports match while names unmatch should return false", msg: "probe ports match while names don't should return false",
existingProbe: []network.Probe{ existingProbe: []network.Probe{
{ {
Name: to.StringPtr("probe1"), Name: to.StringPtr("probe1"),
@ -129,7 +129,7 @@ func TestFindRule(t *testing.T) {
expected: false, expected: false,
}, },
{ {
msg: "rule names unmatch should return false", msg: "rule names don't match should return false",
existingRule: []network.LoadBalancingRule{ existingRule: []network.LoadBalancingRule{
{ {
Name: to.StringPtr("httpProbe1"), Name: to.StringPtr("httpProbe1"),
@ -147,7 +147,7 @@ func TestFindRule(t *testing.T) {
expected: false, expected: false,
}, },
{ {
msg: "rule names match while frontend ports unmatch should return false", msg: "rule names match while frontend ports don't should return false",
existingRule: []network.LoadBalancingRule{ existingRule: []network.LoadBalancingRule{
{ {
Name: to.StringPtr("httpProbe"), Name: to.StringPtr("httpProbe"),
@ -165,7 +165,7 @@ func TestFindRule(t *testing.T) {
expected: false, expected: false,
}, },
{ {
msg: "rule names match while backend ports unmatch should return false", msg: "rule names match while backend ports don't should return false",
existingRule: []network.LoadBalancingRule{ existingRule: []network.LoadBalancingRule{
{ {
Name: to.StringPtr("httpProbe"), Name: to.StringPtr("httpProbe"),
@ -183,7 +183,7 @@ func TestFindRule(t *testing.T) {
expected: false, expected: false,
}, },
{ {
msg: "rule names match while idletimeout unmatch should return false", msg: "rule names match while idletimeout don't should return false",
existingRule: []network.LoadBalancingRule{ existingRule: []network.LoadBalancingRule{
{ {
Name: to.StringPtr("httpRule"), Name: to.StringPtr("httpRule"),
@ -217,7 +217,7 @@ func TestFindRule(t *testing.T) {
expected: true, expected: true,
}, },
{ {
msg: "rule names match while LoadDistribution unmatch should return false", msg: "rule names match while LoadDistribution don't should return false",
existingRule: []network.LoadBalancingRule{ existingRule: []network.LoadBalancingRule{
{ {
Name: to.StringPtr("probe1"), Name: to.StringPtr("probe1"),
@ -722,7 +722,7 @@ func TestShouldReleaseExistingOwnedPublicIP(t *testing.T) {
} }
} }
func TestgetIPTagMap(t *testing.T) { func TestGetIPTagMap(t *testing.T) {
tests := []struct { tests := []struct {
desc string desc string
input string input string
@ -1170,7 +1170,7 @@ func TestGetServiceLoadBalancer(t *testing.T) {
expectedError: false, expectedError: false,
}, },
{ {
desc: "getServiceLoadBalancer shall report error if there're loadbalancer mode annotations on a standard lb", desc: "getServiceLoadBalancer shall report error if there are loadbalancer mode annotations on a standard lb",
service: getTestService("service1", v1.ProtocolTCP, nil, false, 80), service: getTestService("service1", v1.ProtocolTCP, nil, false, 80),
annotations: map[string]string{ServiceAnnotationLoadBalancerMode: "__auto__"}, annotations: map[string]string{ServiceAnnotationLoadBalancerMode: "__auto__"},
sku: "standard", sku: "standard",

View File

@ -56,7 +56,7 @@ func InitializeCloudProviderRateLimitConfig(config *CloudProviderRateLimitConfig
if config.CloudProviderRateLimitBucket == 0 { if config.CloudProviderRateLimitBucket == 0 {
config.CloudProviderRateLimitBucket = rateLimitBucketDefault config.CloudProviderRateLimitBucket = rateLimitBucketDefault
} }
// Assing write rate limit defaults if no configuration was passed in. // Assign write rate limit defaults if no configuration was passed in.
if config.CloudProviderRateLimitQPSWrite == 0 { if config.CloudProviderRateLimitQPSWrite == 0 {
config.CloudProviderRateLimitQPSWrite = config.CloudProviderRateLimitQPS config.CloudProviderRateLimitQPSWrite = config.CloudProviderRateLimitQPS
} }

View File

@ -151,7 +151,7 @@ func TestCreateRoute(t *testing.T) {
routeTableName string routeTableName string
initialRoute *[]network.Route initialRoute *[]network.Route
updatedRoute *[]network.Route updatedRoute *[]network.Route
hasUnmangedNodes bool hasUnmanagedNodes bool
nodeInformerNotSynced bool nodeInformerNotSynced bool
ipv6DualStackEnabled bool ipv6DualStackEnabled bool
routeTableNotExist bool routeTableNotExist bool
@ -230,14 +230,14 @@ func TestCreateRoute(t *testing.T) {
{ {
name: "CreateRoute should add route to cloud.RouteCIDRs if node is unmanaged", name: "CreateRoute should add route to cloud.RouteCIDRs if node is unmanaged",
routeTableName: "rt8", routeTableName: "rt8",
hasUnmangedNodes: true, hasUnmanagedNodes: true,
unmanagedNodeName: "node", unmanagedNodeName: "node",
routeCIDRs: map[string]string{}, routeCIDRs: map[string]string{},
expectedRouteCIDRs: map[string]string{"node": "1.2.3.4/24"}, expectedRouteCIDRs: map[string]string{"node": "1.2.3.4/24"},
}, },
{ {
name: "CreateRoute should report error if node is unmanaged and cloud.ipv6DualStackEnabled is true", name: "CreateRoute should report error if node is unmanaged and cloud.ipv6DualStackEnabled is true",
hasUnmangedNodes: true, hasUnmanagedNodes: true,
ipv6DualStackEnabled: true, ipv6DualStackEnabled: true,
unmanagedNodeName: "node", unmanagedNodeName: "node",
expectedErrMsg: fmt.Errorf("unmanaged nodes are not supported in dual stack mode"), expectedErrMsg: fmt.Errorf("unmanaged nodes are not supported in dual stack mode"),
@ -282,7 +282,7 @@ func TestCreateRoute(t *testing.T) {
cloud.RouteTableName = test.routeTableName cloud.RouteTableName = test.routeTableName
cloud.ipv6DualStackEnabled = test.ipv6DualStackEnabled cloud.ipv6DualStackEnabled = test.ipv6DualStackEnabled
if test.hasUnmangedNodes { if test.hasUnmanagedNodes {
cloud.unmanagedNodes.Insert(test.unmanagedNodeName) cloud.unmanagedNodes.Insert(test.unmanagedNodeName)
cloud.routeCIDRs = test.routeCIDRs cloud.routeCIDRs = test.routeCIDRs
} else { } else {
@ -544,7 +544,7 @@ func TestListRoutes(t *testing.T) {
name string name string
routeTableName string routeTableName string
routeTable network.RouteTable routeTable network.RouteTable
hasUnmangedNodes bool hasUnmanagedNodes bool
nodeInformerNotSynced bool nodeInformerNotSynced bool
unmanagedNodeName string unmanagedNodeName string
routeCIDRs map[string]string routeCIDRs map[string]string
@ -580,9 +580,9 @@ func TestListRoutes(t *testing.T) {
{ {
name: "ListRoutes should return correct routes if there's unmanaged nodes", name: "ListRoutes should return correct routes if there's unmanaged nodes",
routeTableName: "rt2", routeTableName: "rt2",
hasUnmangedNodes: true, hasUnmanagedNodes: true,
unmanagedNodeName: "umanaged-node", unmanagedNodeName: "unmanaged-node",
routeCIDRs: map[string]string{"umanaged-node": "2.2.3.4/24"}, routeCIDRs: map[string]string{"unmanaged-node": "2.2.3.4/24"},
routeTable: network.RouteTable{ routeTable: network.RouteTable{
Name: to.StringPtr("rt2"), Name: to.StringPtr("rt2"),
Location: &cloud.Location, Location: &cloud.Location,
@ -604,14 +604,14 @@ func TestListRoutes(t *testing.T) {
DestinationCIDR: "1.2.3.4/24", DestinationCIDR: "1.2.3.4/24",
}, },
{ {
Name: "umanaged-node", Name: "unmanaged-node",
TargetNode: mapRouteNameToNodeName(false, "umanaged-node"), TargetNode: mapRouteNameToNodeName(false, "unmanaged-node"),
DestinationCIDR: "2.2.3.4/24", DestinationCIDR: "2.2.3.4/24",
}, },
}, },
}, },
{ {
name: "ListRoutes should return nil if routeTabel don't exist", name: "ListRoutes should return nil if routeTable don't exist",
routeTableName: "rt3", routeTableName: "rt3",
routeTable: network.RouteTable{}, routeTable: network.RouteTable{},
getErr: &retry.Error{ getErr: &retry.Error{
@ -640,7 +640,7 @@ func TestListRoutes(t *testing.T) {
} }
for _, test := range testCases { for _, test := range testCases {
if test.hasUnmangedNodes { if test.hasUnmanagedNodes {
cloud.unmanagedNodes.Insert(test.unmanagedNodeName) cloud.unmanagedNodes.Insert(test.unmanagedNodeName)
cloud.routeCIDRs = test.routeCIDRs cloud.routeCIDRs = test.routeCIDRs
} else { } else {

View File

@ -59,7 +59,7 @@ const (
backendPoolIDTemplate = "/subscriptions/%s/resourceGroups/%s/providers/Microsoft.Network/loadBalancers/%s/backendAddressPools/%s" backendPoolIDTemplate = "/subscriptions/%s/resourceGroups/%s/providers/Microsoft.Network/loadBalancers/%s/backendAddressPools/%s"
loadBalancerProbeIDTemplate = "/subscriptions/%s/resourceGroups/%s/providers/Microsoft.Network/loadBalancers/%s/probes/%s" loadBalancerProbeIDTemplate = "/subscriptions/%s/resourceGroups/%s/providers/Microsoft.Network/loadBalancers/%s/probes/%s"
// InternalLoadBalancerNameSuffix is load balancer posfix // InternalLoadBalancerNameSuffix is load balancer suffix
InternalLoadBalancerNameSuffix = "-internal" InternalLoadBalancerNameSuffix = "-internal"
// nodeLabelRole specifies the role of a node // nodeLabelRole specifies the role of a node
@ -609,9 +609,9 @@ func (as *availabilitySet) GetPrivateIPsByNodeName(name string) ([]string, error
return ips, nil return ips, nil
} }
// getAgentPoolAvailabiliySets lists the virtual machines for the resource group and then builds // getAgentPoolAvailabilitySets lists the virtual machines for the resource group and then builds
// a list of availability sets that match the nodes available to k8s. // a list of availability sets that match the nodes available to k8s.
func (as *availabilitySet) getAgentPoolAvailabiliySets(nodes []*v1.Node) (agentPoolAvailabilitySets *[]string, err error) { func (as *availabilitySet) getAgentPoolAvailabilitySets(nodes []*v1.Node) (agentPoolAvailabilitySets *[]string, err error) {
vms, err := as.ListVirtualMachines(as.ResourceGroup) vms, err := as.ListVirtualMachines(as.ResourceGroup)
if err != nil { if err != nil {
klog.Errorf("as.getNodeAvailabilitySet - ListVirtualMachines failed, err=%v", err) klog.Errorf("as.getNodeAvailabilitySet - ListVirtualMachines failed, err=%v", err)
@ -634,7 +634,7 @@ func (as *availabilitySet) getAgentPoolAvailabiliySets(nodes []*v1.Node) (agentP
asID, ok := vmNameToAvailabilitySetID[nodeName] asID, ok := vmNameToAvailabilitySetID[nodeName]
if !ok { if !ok {
klog.Errorf("as.getNodeAvailabilitySet - Node(%s) has no availability sets", nodeName) klog.Errorf("as.getNodeAvailabilitySet - Node(%s) has no availability sets", nodeName)
return nil, fmt.Errorf("Node (%s) - has no availability sets", nodeName) return nil, fmt.Errorf("node (%s) - has no availability sets", nodeName)
} }
if availabilitySetIDs.Has(asID) { if availabilitySetIDs.Has(asID) {
// already added in the list // already added in the list
@ -645,7 +645,7 @@ func (as *availabilitySet) getAgentPoolAvailabiliySets(nodes []*v1.Node) (agentP
klog.Errorf("as.getNodeAvailabilitySet - Node (%s)- getLastSegment(%s), err=%v", nodeName, asID, err) klog.Errorf("as.getNodeAvailabilitySet - Node (%s)- getLastSegment(%s), err=%v", nodeName, asID, err)
return nil, err return nil, err
} }
// AvailabilitySet ID is currently upper cased in a indeterministic way // AvailabilitySet ID is currently upper cased in a non-deterministic way
// We want to keep it lower case, before the ID get fixed // We want to keep it lower case, before the ID get fixed
asName = strings.ToLower(asName) asName = strings.ToLower(asName)
@ -657,7 +657,7 @@ func (as *availabilitySet) getAgentPoolAvailabiliySets(nodes []*v1.Node) (agentP
// GetVMSetNames selects all possible availability sets or scale sets // GetVMSetNames selects all possible availability sets or scale sets
// (depending vmType configured) for service load balancer, if the service has // (depending vmType configured) for service load balancer, if the service has
// no loadbalancer mode annotaion returns the primary VMSet. If service annotation // no loadbalancer mode annotation returns the primary VMSet. If service annotation
// for loadbalancer exists then return the eligible VMSet. // for loadbalancer exists then return the eligible VMSet.
func (as *availabilitySet) GetVMSetNames(service *v1.Service, nodes []*v1.Node) (availabilitySetNames *[]string, err error) { func (as *availabilitySet) GetVMSetNames(service *v1.Service, nodes []*v1.Node) (availabilitySetNames *[]string, err error) {
hasMode, isAuto, serviceAvailabilitySetNames := getServiceLoadBalancerMode(service) hasMode, isAuto, serviceAvailabilitySetNames := getServiceLoadBalancerMode(service)
@ -666,14 +666,14 @@ func (as *availabilitySet) GetVMSetNames(service *v1.Service, nodes []*v1.Node)
availabilitySetNames = &[]string{as.Config.PrimaryAvailabilitySetName} availabilitySetNames = &[]string{as.Config.PrimaryAvailabilitySetName}
return availabilitySetNames, nil return availabilitySetNames, nil
} }
availabilitySetNames, err = as.getAgentPoolAvailabiliySets(nodes) availabilitySetNames, err = as.getAgentPoolAvailabilitySets(nodes)
if err != nil { if err != nil {
klog.Errorf("as.GetVMSetNames - getAgentPoolAvailabiliySets failed err=(%v)", err) klog.Errorf("as.GetVMSetNames - getAgentPoolAvailabilitySets failed err=(%v)", err)
return nil, err return nil, err
} }
if len(*availabilitySetNames) == 0 { if len(*availabilitySetNames) == 0 {
klog.Errorf("as.GetVMSetNames - No availability sets found for nodes in the cluster, node count(%d)", len(nodes)) klog.Errorf("as.GetVMSetNames - No availability sets found for nodes in the cluster, node count(%d)", len(nodes))
return nil, fmt.Errorf("No availability sets found for nodes, node count(%d)", len(nodes)) return nil, fmt.Errorf("no availability sets found for nodes, node count(%d)", len(nodes))
} }
// sort the list to have deterministic selection // sort the list to have deterministic selection
sort.Strings(*availabilitySetNames) sort.Strings(*availabilitySetNames)

View File

@ -1129,7 +1129,7 @@ func TestGetStandardVMSetNames(t *testing.T) {
}, },
}, },
}, },
expectedErrMsg: fmt.Errorf("Node (vm2) - has no availability sets"), expectedErrMsg: fmt.Errorf("node (vm2) - has no availability sets"),
}, },
{ {
name: "GetVMSetNames should report the error if there's no such availability set", name: "GetVMSetNames should report the error if there's no such availability set",

View File

@ -179,7 +179,7 @@ func (az *Cloud) EnsureStorageAccount(accountOptions *AccountOptions, genAccount
accountOptions.Tags = make(map[string]string) accountOptions.Tags = make(map[string]string)
} }
accountOptions.Tags["created-by"] = "azure" accountOptions.Tags["created-by"] = "azure"
tags := convertMaptoMapPointer(accountOptions.Tags) tags := convertMapToMapPointer(accountOptions.Tags)
klog.V(2).Infof("azure - no matching account found, begin to create a new account %s in resource group %s, location: %s, accountType: %s, accountKind: %s, tags: %+v", klog.V(2).Infof("azure - no matching account found, begin to create a new account %s in resource group %s, location: %s, accountType: %s, accountKind: %s, tags: %+v",
accountName, resourceGroup, location, accountType, kind, accountOptions.Tags) accountName, resourceGroup, location, accountType, kind, accountOptions.Tags)

View File

@ -947,7 +947,7 @@ func findLBRuleForPort(lbRules []network.LoadBalancingRule, port int32) (network
return lbRule, nil return lbRule, nil
} }
} }
return network.LoadBalancingRule{}, fmt.Errorf("Expected LB rule with port %d but none found", port) return network.LoadBalancingRule{}, fmt.Errorf("expected LB rule with port %d but none found", port)
} }
func TestServiceDefaultsToNoSessionPersistence(t *testing.T) { func TestServiceDefaultsToNoSessionPersistence(t *testing.T) {
@ -1883,7 +1883,7 @@ func TestSecurityRulePriorityFailsIfExhausted(t *testing.T) {
_, err := getNextAvailablePriority(rules) _, err := getNextAvailablePriority(rules)
if err == nil { if err == nil {
t.Error("Expectected an error. There are no priority levels left.") t.Error("Expect an error. There are no priority levels left.")
} }
} }
@ -3092,7 +3092,7 @@ func TestGetResourceGroupFromDiskURI(t *testing.T) {
expectError: false, expectError: false,
}, },
{ {
// case insentive check // case insensitive check
diskURL: "/subscriptions/4be8920b-2978-43d7-axyz-04d8549c1d05/resourcegroups/azure-k8s1102/providers/Microsoft.Compute/disks/andy-mghyb1102-dynamic-pvc-f7f014c9-49f4-11e8-ab5c-000d3af7b38e", diskURL: "/subscriptions/4be8920b-2978-43d7-axyz-04d8549c1d05/resourcegroups/azure-k8s1102/providers/Microsoft.Compute/disks/andy-mghyb1102-dynamic-pvc-f7f014c9-49f4-11e8-ab5c-000d3af7b38e",
expectedResult: "azure-k8s1102", expectedResult: "azure-k8s1102",
expectError: false, expectError: false,
@ -3285,7 +3285,7 @@ func TestGetActiveZones(t *testing.T) {
az.nodeInformerSynced = nil az.nodeInformerSynced = nil
zones, err := az.GetActiveZones() zones, err := az.GetActiveZones()
expectedErr := fmt.Errorf("Azure cloud provider doesn't have informers set") expectedErr := fmt.Errorf("azure cloud provider doesn't have informers set")
assert.Equal(t, expectedErr, err) assert.Equal(t, expectedErr, err)
assert.Nil(t, zones) assert.Nil(t, zones)

View File

@ -83,7 +83,7 @@ func getContextWithCancel() (context.Context, context.CancelFunc) {
} }
// ConvertTagsToMap convert the tags from string to map // ConvertTagsToMap convert the tags from string to map
// the valid tags fomat is "key1=value1,key2=value2", which could be converted to // the valid tags format is "key1=value1,key2=value2", which could be converted to
// {"key1": "value1", "key2": "value2"} // {"key1": "value1", "key2": "value2"}
func ConvertTagsToMap(tags string) (map[string]string, error) { func ConvertTagsToMap(tags string) (map[string]string, error) {
m := make(map[string]string) m := make(map[string]string)
@ -107,7 +107,7 @@ func ConvertTagsToMap(tags string) (map[string]string, error) {
return m, nil return m, nil
} }
func convertMaptoMapPointer(origin map[string]string) map[string]*string { func convertMapToMapPointer(origin map[string]string) map[string]*string {
newly := make(map[string]*string) newly := make(map[string]*string)
for k, v := range origin { for k, v := range origin {
value := v value := v

View File

@ -755,7 +755,7 @@ func (ss *scaleSet) GetVMSetNames(service *v1.Service, nodes []*v1.Node) (vmSetN
} }
if len(*scaleSetNames) == 0 { if len(*scaleSetNames) == 0 {
klog.Errorf("ss.GetVMSetNames - No scale sets found for nodes in the cluster, node count(%d)", len(nodes)) klog.Errorf("ss.GetVMSetNames - No scale sets found for nodes in the cluster, node count(%d)", len(nodes))
return nil, fmt.Errorf("No scale sets found for nodes, node count(%d)", len(nodes)) return nil, fmt.Errorf("no scale sets found for nodes, node count(%d)", len(nodes))
} }
// sort the list to have deterministic selection // sort the list to have deterministic selection
@ -1279,7 +1279,7 @@ func (ss *scaleSet) EnsureHostsInPool(service *v1.Service, nodes []*v1.Node, bac
return utilerrors.Flatten(errs) return utilerrors.Flatten(errs)
} }
// Fail if there're other errors. // Fail if there are other errors.
if len(errors) > 0 { if len(errors) > 0 {
return utilerrors.Flatten(utilerrors.NewAggregate(errors)) return utilerrors.Flatten(utilerrors.NewAggregate(errors))
} }
@ -1588,7 +1588,7 @@ func (ss *scaleSet) EnsureBackendPoolDeleted(service *v1.Service, backendPoolID,
return utilerrors.Flatten(errs) return utilerrors.Flatten(errs)
} }
// Fail if there're other errors. // Fail if there are other errors.
if len(errors) > 0 { if len(errors) > 0 {
return utilerrors.Flatten(utilerrors.NewAggregate(errors)) return utilerrors.Flatten(utilerrors.NewAggregate(errors))
} }

View File

@ -152,7 +152,7 @@ func (ss *scaleSet) gcVMSSVMCache() error {
return nil return nil
} }
// newVMSSVirtualMachinesCache instanciates a new VMs cache for VMs belonging to the provided VMSS. // newVMSSVirtualMachinesCache instantiates a new VMs cache for VMs belonging to the provided VMSS.
func (ss *scaleSet) newVMSSVirtualMachinesCache(resourceGroupName, vmssName, cacheKey string) (*azcache.TimedCache, error) { func (ss *scaleSet) newVMSSVirtualMachinesCache(resourceGroupName, vmssName, cacheKey string) (*azcache.TimedCache, error) {
vmssVirtualMachinesCacheTTL := time.Duration(ss.Config.VmssVirtualMachinesCacheTTLInSeconds) * time.Second vmssVirtualMachinesCacheTTL := time.Duration(ss.Config.VmssVirtualMachinesCacheTTLInSeconds) * time.Second

View File

@ -62,7 +62,7 @@ func checkResourceExistsFromError(err *retry.Error) (bool, *retry.Error) {
} }
/// getVirtualMachine calls 'VirtualMachinesClient.Get' with a timed cache /// getVirtualMachine calls 'VirtualMachinesClient.Get' with a timed cache
/// The service side has throttling control that delays responses if there're multiple requests onto certain vm /// The service side has throttling control that delays responses if there are multiple requests onto certain vm
/// resource request in short period. /// resource request in short period.
func (az *Cloud) getVirtualMachine(nodeName types.NodeName, crt azcache.AzureCacheReadType) (vm compute.VirtualMachine, err error) { func (az *Cloud) getVirtualMachine(nodeName types.NodeName, crt azcache.AzureCacheReadType) (vm compute.VirtualMachine, err error) {
vmName := string(nodeName) vmName := string(nodeName)

View File

@ -52,7 +52,7 @@ func TestIsAvailabilityZone(t *testing.T) {
expected bool expected bool
}{ }{
{"empty string should return false", "", false}, {"empty string should return false", "", false},
{"wrong farmat should return false", "123", false}, {"wrong format should return false", "123", false},
{"wrong location should return false", "chinanorth-1", false}, {"wrong location should return false", "chinanorth-1", false},
{"correct zone should return true", "eastus-1", true}, {"correct zone should return true", "eastus-1", true},
} }
@ -78,7 +78,7 @@ func TestGetZoneID(t *testing.T) {
expected string expected string
}{ }{
{"empty string should return empty string", "", ""}, {"empty string should return empty string", "", ""},
{"wrong farmat should return empty string", "123", ""}, {"wrong format should return empty string", "123", ""},
{"wrong location should return empty string", "chinanorth-1", ""}, {"wrong location should return empty string", "chinanorth-1", ""},
{"correct zone should return zone ID", "eastus-1", "1"}, {"correct zone should return zone ID", "eastus-1", "1"},
} }

View File

@ -167,7 +167,7 @@ func (c *Client) Send(ctx context.Context, request *http.Request) (*http.Respons
// only use the result if the regional request actually goes through and returns 2xx status code, for two reasons: // only use the result if the regional request actually goes through and returns 2xx status code, for two reasons:
// 1. the retry on regional ARM host approach is a hack. // 1. the retry on regional ARM host approach is a hack.
// 2. the concatted regional uri could be wrong as the rule is not officially declared by ARM. // 2. the concatenated regional uri could be wrong as the rule is not officially declared by ARM.
if regionalResponse == nil || regionalResponse.StatusCode > 299 { if regionalResponse == nil || regionalResponse.StatusCode > 299 {
regionalErrStr := "" regionalErrStr := ""
if regionalError != nil { if regionalError != nil {

View File

@ -68,7 +68,7 @@ type Interface interface {
PutResource(ctx context.Context, resourceID string, parameters interface{}) (*http.Response, *retry.Error) PutResource(ctx context.Context, resourceID string, parameters interface{}) (*http.Response, *retry.Error)
// PutResources puts a list of resources from resources map[resourceID]parameters. // PutResources puts a list of resources from resources map[resourceID]parameters.
// Those resources sync requests are sequential while async requests are concurent. It 's especially // Those resources sync requests are sequential while async requests are concurrent. It 's especially
// useful when the ARM API doesn't support concurrent requests. // useful when the ARM API doesn't support concurrent requests.
PutResources(ctx context.Context, resources map[string]interface{}) map[string]*PutResourcesResponse PutResources(ctx context.Context, resources map[string]interface{}) map[string]*PutResourcesResponse

View File

@ -87,7 +87,7 @@ func TestCreateOrUpdateWithNeverRateLimiter(t *testing.T) {
ctrl := gomock.NewController(t) ctrl := gomock.NewController(t)
defer ctrl.Finish() defer ctrl.Finish()
rcCreateOrUpdatetErr := &retry.Error{ rcCreateOrUpdateErr := &retry.Error{
RawError: fmt.Errorf("azure cloud provider rate limited(%s) for operation %q", "write", "RouteCreateOrUpdate"), RawError: fmt.Errorf("azure cloud provider rate limited(%s) for operation %q", "write", "RouteCreateOrUpdate"),
Retriable: true, Retriable: true,
} }
@ -97,7 +97,7 @@ func TestCreateOrUpdateWithNeverRateLimiter(t *testing.T) {
routeClient := getTestRouteClientWithNeverRateLimiter(armClient) routeClient := getTestRouteClientWithNeverRateLimiter(armClient)
rerr := routeClient.CreateOrUpdate(context.TODO(), "rg", "rt", "r1", r, "") rerr := routeClient.CreateOrUpdate(context.TODO(), "rg", "rt", "r1", r, "")
assert.Equal(t, rcCreateOrUpdatetErr, rerr) assert.Equal(t, rcCreateOrUpdateErr, rerr)
} }
func TestCreateOrUpdateRetryAfterReader(t *testing.T) { func TestCreateOrUpdateRetryAfterReader(t *testing.T) {

View File

@ -214,7 +214,7 @@ func TestCreateOrUpdateWithNeverRateLimiter(t *testing.T) {
ctrl := gomock.NewController(t) ctrl := gomock.NewController(t)
defer ctrl.Finish() defer ctrl.Finish()
rcCreateOrUpdatetErr := &retry.Error{ rcCreateOrUpdateErr := &retry.Error{
RawError: fmt.Errorf("azure cloud provider rate limited(%s) for operation %q", "write", "RouteTableCreateOrUpdate"), RawError: fmt.Errorf("azure cloud provider rate limited(%s) for operation %q", "write", "RouteTableCreateOrUpdate"),
Retriable: true, Retriable: true,
} }
@ -224,7 +224,7 @@ func TestCreateOrUpdateWithNeverRateLimiter(t *testing.T) {
routetableClient := getTestRouteTableClientWithNeverRateLimiter(armClient) routetableClient := getTestRouteTableClientWithNeverRateLimiter(armClient)
rerr := routetableClient.CreateOrUpdate(context.TODO(), "rg", "rt1", rt1, "") rerr := routetableClient.CreateOrUpdate(context.TODO(), "rg", "rt1", rt1, "")
assert.Equal(t, rcCreateOrUpdatetErr, rerr) assert.Equal(t, rcCreateOrUpdateErr, rerr)
} }
func TestCreateOrUpdateRetryAfterReader(t *testing.T) { func TestCreateOrUpdateRetryAfterReader(t *testing.T) {

View File

@ -179,7 +179,7 @@ func getRawError(resp *http.Response, err error) error {
return fmt.Errorf("empty HTTP response") return fmt.Errorf("empty HTTP response")
} }
// return the http status if unabled to get response body. // return the http status if it is unable to get response body.
defer resp.Body.Close() defer resp.Body.Close()
respBody, _ := ioutil.ReadAll(resp.Body) respBody, _ := ioutil.ReadAll(resp.Body)
resp.Body = ioutil.NopCloser(bytes.NewReader(respBody)) resp.Body = ioutil.NopCloser(bytes.NewReader(respBody))

View File

@ -31,25 +31,25 @@ import (
func TestNewError(t *testing.T) { func TestNewError(t *testing.T) {
rawErr := fmt.Errorf("HTTP status code (404)") rawErr := fmt.Errorf("HTTP status code (404)")
newerr := NewError(true, rawErr) newErr := NewError(true, rawErr)
assert.Equal(t, true, newerr.Retriable) assert.Equal(t, true, newErr.Retriable)
assert.Equal(t, rawErr, newerr.RawError) assert.Equal(t, rawErr, newErr.RawError)
} }
func TestGetRetriableError(t *testing.T) { func TestGetRetriableError(t *testing.T) {
rawErr := fmt.Errorf("HTTP status code (404)") rawErr := fmt.Errorf("HTTP status code (404)")
newerr := GetRetriableError(rawErr) newErr := GetRetriableError(rawErr)
assert.Equal(t, true, newerr.Retriable) assert.Equal(t, true, newErr.Retriable)
assert.Equal(t, rawErr, newerr.RawError) assert.Equal(t, rawErr, newErr.RawError)
} }
func TestGetRateLimitError(t *testing.T) { func TestGetRateLimitError(t *testing.T) {
opType := "write" opType := "write"
opName := "opNameTest" opName := "opNameTest"
rawErr := fmt.Errorf("azure cloud provider rate limited(%s) for operation %q", opType, opName) rawErr := fmt.Errorf("azure cloud provider rate limited(%s) for operation %q", opType, opName)
newerr := GetRateLimitError(true, opName) newErr := GetRateLimitError(true, opName)
assert.Equal(t, true, newerr.Retriable) assert.Equal(t, true, newErr.Retriable)
assert.Equal(t, rawErr, newerr.RawError) assert.Equal(t, rawErr, newErr.RawError)
} }
func TestGetThrottlingError(t *testing.T) { func TestGetThrottlingError(t *testing.T) {
@ -57,10 +57,10 @@ func TestGetThrottlingError(t *testing.T) {
reason := "reasontest" reason := "reasontest"
rawErr := fmt.Errorf("azure cloud provider throttled for operation %s with reason %q", operation, reason) rawErr := fmt.Errorf("azure cloud provider throttled for operation %s with reason %q", operation, reason)
onehourlater := time.Now().Add(time.Hour * 1) onehourlater := time.Now().Add(time.Hour * 1)
newerr := GetThrottlingError(operation, reason, onehourlater) newErr := GetThrottlingError(operation, reason, onehourlater)
assert.Equal(t, true, newerr.Retriable) assert.Equal(t, true, newErr.Retriable)
assert.Equal(t, rawErr, newerr.RawError) assert.Equal(t, rawErr, newErr.RawError)
assert.Equal(t, onehourlater, newerr.RetryAfter) assert.Equal(t, onehourlater, newErr.RetryAfter)
} }
func TestGetError(t *testing.T) { func TestGetError(t *testing.T) {
@ -344,7 +344,7 @@ func TestIsThrottled(t *testing.T) {
} }
func TestIsErrorRetriable(t *testing.T) { func TestIsErrorRetriable(t *testing.T) {
// flase case // false case
result := IsErrorRetriable(nil) result := IsErrorRetriable(nil)
assert.Equal(t, false, result) assert.Equal(t, false, result)

View File

@ -144,7 +144,7 @@ func jitter(duration time.Duration, maxFactor float64) time.Duration {
return wait return wait
} }
// DoExponentialBackoffRetry reprents an autorest.SendDecorator with backoff retry. // DoExponentialBackoffRetry represents an autorest.SendDecorator with backoff retry.
func DoExponentialBackoffRetry(backoff *Backoff) autorest.SendDecorator { func DoExponentialBackoffRetry(backoff *Backoff) autorest.SendDecorator {
return func(s autorest.Sender) autorest.Sender { return func(s autorest.Sender) autorest.Sender {
return autorest.SenderFunc(func(r *http.Request) (*http.Response, error) { return autorest.SenderFunc(func(r *http.Request) (*http.Response, error) {

View File

@ -80,9 +80,9 @@ func TestJitterWithNegativeMaxFactor(t *testing.T) {
// If maxFactor is 0.0 or less than 0.0, a suggested default value will be chosen. // If maxFactor is 0.0 or less than 0.0, a suggested default value will be chosen.
// rand.Float64() returns, as a float64, a pseudo-random number in [0.0,1.0). // rand.Float64() returns, as a float64, a pseudo-random number in [0.0,1.0).
duration := time.Duration(time.Second) duration := time.Duration(time.Second)
maxFactor := float64(-3.0) maxFactor := -3.0
res := jitter(duration, maxFactor) res := jitter(duration, maxFactor)
defaultMaxFactor := float64(1.0) defaultMaxFactor := 1.0
expected := jitter(duration, defaultMaxFactor) expected := jitter(duration, defaultMaxFactor)
assert.Equal(t, expected-res >= time.Duration(0.0*float64(duration)), true) assert.Equal(t, expected-res >= time.Duration(0.0*float64(duration)), true)
assert.Equal(t, expected-res < time.Duration(1.0*float64(duration)), true) assert.Equal(t, expected-res < time.Duration(1.0*float64(duration)), true)
@ -156,7 +156,7 @@ func TestDoBackoffRetry(t *testing.T) {
Path: "/api", Path: "/api",
}, },
} }
r := mocks.NewResponseWithStatus("500 InternelServerError", http.StatusInternalServerError) r := mocks.NewResponseWithStatus("500 InternalServerError", http.StatusInternalServerError)
client := mocks.NewSender() client := mocks.NewSender()
client.AppendAndRepeatResponse(r, 3) client.AppendAndRepeatResponse(r, 3)