diff --git a/pkg/cloudprovider/providers/azure/azure.go b/pkg/cloudprovider/providers/azure/azure.go index 34f14fda2c0..9958f744565 100644 --- a/pkg/cloudprovider/providers/azure/azure.go +++ b/pkg/cloudprovider/providers/azure/azure.go @@ -76,7 +76,7 @@ type Config struct { // the cloudprovider will try to add all nodes to a single backend pool which is forbidden. // In other words, if you use multiple agent pools (availability sets), you MUST set this field. PrimaryAvailabilitySetName string `json:"primaryAvailabilitySetName" yaml:"primaryAvailabilitySetName"` - // The type of azure nodes. Candidate valudes are: vmss and standard. + // The type of azure nodes. Candidate values are: vmss and standard. // If not set, it will be default to standard. VMType string `json:"vmType" yaml:"vmType"` // The name of the scale set that should be used as the load balancer backend. diff --git a/pkg/cloudprovider/providers/azure/azure_blobDiskController.go b/pkg/cloudprovider/providers/azure/azure_blobDiskController.go index d9d151d2e71..0d22ff84926 100644 --- a/pkg/cloudprovider/providers/azure/azure_blobDiskController.go +++ b/pkg/cloudprovider/providers/azure/azure_blobDiskController.go @@ -292,7 +292,7 @@ func (c *BlobDiskController) DeleteBlobDisk(diskURI string) error { c.accounts[storageAccountName].diskCount = int32(diskCount) } else { glog.Warningf("azureDisk - failed to get disk count for %s however the delete disk operation was ok", storageAccountName) - return nil // we have failed to aquire a new count. not an error condition + return nil // we have failed to acquire a new count. not an error condition } } atomic.AddInt32(&c.accounts[storageAccountName].diskCount, -1) @@ -602,7 +602,7 @@ func (c *BlobDiskController) findSANameForDisk(storageAccountType storage.SkuNam // avg are not create and we should create more accounts if we can if aboveAvg && countAccounts < maxStorageAccounts { - glog.V(2).Infof("azureDisk - shared storageAccounts utilzation(%v) > grow-at-avg-utilization (%v). New storage account will be created", avgUtilization, storageAccountUtilizationBeforeGrowing) + glog.V(2).Infof("azureDisk - shared storageAccounts utilization(%v) > grow-at-avg-utilization (%v). New storage account will be created", avgUtilization, storageAccountUtilizationBeforeGrowing) SAName = getAccountNameForNum(c.getNextAccountNum()) err := c.createStorageAccount(SAName, storageAccountType, c.common.location, true) if err != nil { @@ -613,7 +613,7 @@ func (c *BlobDiskController) findSANameForDisk(storageAccountType storage.SkuNam // averages are not ok and we are at capacity (max storage accounts allowed) if aboveAvg && countAccounts == maxStorageAccounts { - glog.Infof("azureDisk - shared storageAccounts utilzation(%v) > grow-at-avg-utilization (%v). But k8s maxed on SAs for PVC(%v). k8s will now exceed grow-at-avg-utilization without adding accounts", + glog.Infof("azureDisk - shared storageAccounts utilization(%v) > grow-at-avg-utilization (%v). But k8s maxed on SAs for PVC(%v). k8s will now exceed grow-at-avg-utilization without adding accounts", avgUtilization, storageAccountUtilizationBeforeGrowing, maxStorageAccounts) } diff --git a/pkg/cloudprovider/providers/azure/azure_fakes.go b/pkg/cloudprovider/providers/azure/azure_fakes.go index ac30a533abb..5233f605776 100644 --- a/pkg/cloudprovider/providers/azure/azure_fakes.go +++ b/pkg/cloudprovider/providers/azure/azure_fakes.go @@ -66,7 +66,7 @@ func (fLBC *fakeAzureLBClient) CreateOrUpdate(resourceGroupName string, loadBala for idx, config := range *parameters.FrontendIPConfigurations { if config.PrivateIPAllocationMethod == network.Dynamic { // Here we randomly assign an ip as private ip - // It dosen't smart enough to know whether it is in the subnet's range + // It doesn't smart enough to know whether it is in the subnet's range (*parameters.FrontendIPConfigurations)[idx].PrivateIPAddress = getRandomIPPtr() } } diff --git a/pkg/cloudprovider/providers/azure/azure_loadbalancer.go b/pkg/cloudprovider/providers/azure/azure_loadbalancer.go index 9d4551eefab..e2d6147b398 100644 --- a/pkg/cloudprovider/providers/azure/azure_loadbalancer.go +++ b/pkg/cloudprovider/providers/azure/azure_loadbalancer.go @@ -231,10 +231,10 @@ func (az *Cloud) getServiceLoadBalancer(service *v1.Service, clusterName string, return defaultLB, nil, false, nil } -// select load balancer for the service in the cluster -// the selection algorithm selectes the the load balancer with currently has -// the minimum lb rules, there there are multiple LB's with same number of rules -// it selects the first one (sorted based on name) +// selectLoadBalancer selects load balancer for the service in the cluster. +// The selection algorithm selects the the load balancer with currently has +// the minimum lb rules. If there are multiple LBs with same number of rules, +// then selects the first one (sorted based on name). func (az *Cloud) selectLoadBalancer(clusterName string, service *v1.Service, existingLBs *[]network.LoadBalancer, nodes []*v1.Node) (selectedLB *network.LoadBalancer, existsLb bool, err error) { isInternal := requiresInternalLoadBalancer(service) serviceName := getServiceName(service) diff --git a/pkg/cloudprovider/providers/azure/azure_managedDiskController.go b/pkg/cloudprovider/providers/azure/azure_managedDiskController.go index 71b341d9d3f..207392680f6 100644 --- a/pkg/cloudprovider/providers/azure/azure_managedDiskController.go +++ b/pkg/cloudprovider/providers/azure/azure_managedDiskController.go @@ -73,7 +73,7 @@ func (c *ManagedDiskController) CreateManagedDisk(diskName string, storageAccoun diskID := "" err = kwait.ExponentialBackoff(defaultBackOff, func() (bool, error) { - provisonState, id, err := c.getDisk(diskName) + provisionState, id, err := c.getDisk(diskName) diskID = id // We are waiting for provisioningState==Succeeded // We don't want to hand-off managed disks to k8s while they are @@ -81,7 +81,7 @@ func (c *ManagedDiskController) CreateManagedDisk(diskName string, storageAccoun if err != nil { return false, err } - if strings.ToLower(provisonState) == "succeeded" { + if strings.ToLower(provisionState) == "succeeded" { return true, nil } return false, nil diff --git a/pkg/cloudprovider/providers/azure/azure_metrics.go b/pkg/cloudprovider/providers/azure/azure_metrics.go index 908ce7a5944..cae35c594b6 100644 --- a/pkg/cloudprovider/providers/azure/azure_metrics.go +++ b/pkg/cloudprovider/providers/azure/azure_metrics.go @@ -42,10 +42,10 @@ type metricContext struct { attributes []string } -func newMetricContext(prefix, request, resouceGroup, subscriptionID string) *metricContext { +func newMetricContext(prefix, request, resourceGroup, subscriptionID string) *metricContext { return &metricContext{ start: time.Now(), - attributes: []string{prefix + "_" + request, resouceGroup, subscriptionID}, + attributes: []string{prefix + "_" + request, resourceGroup, subscriptionID}, } } diff --git a/pkg/cloudprovider/providers/azure/azure_standard_test.go b/pkg/cloudprovider/providers/azure/azure_standard_test.go index b8febcc8940..169f0cea0e6 100644 --- a/pkg/cloudprovider/providers/azure/azure_standard_test.go +++ b/pkg/cloudprovider/providers/azure/azure_standard_test.go @@ -34,7 +34,7 @@ func TestIsMasterNode(t *testing.T) { }, }, }) { - t.Errorf("Node labelled 'workerk' should not be master!") + t.Errorf("Node labelled 'worker' should not be master!") } if !isMasterNode(&v1.Node{ ObjectMeta: meta.ObjectMeta{ diff --git a/pkg/cloudprovider/providers/azure/azure_vmsets.go b/pkg/cloudprovider/providers/azure/azure_vmsets.go index dd5cc308dcd..38e347d8268 100644 --- a/pkg/cloudprovider/providers/azure/azure_vmsets.go +++ b/pkg/cloudprovider/providers/azure/azure_vmsets.go @@ -24,7 +24,7 @@ import ( "k8s.io/kubernetes/pkg/cloudprovider" ) -// VMSet defines functions all vmsets (including scale set and availabitlity +// VMSet defines functions all vmsets (including scale set and availability // set) should be implemented. type VMSet interface { // GetInstanceIDByNodeName gets the cloud provider ID by node name. @@ -48,7 +48,7 @@ type VMSet interface { GetPrimaryVMSetName() string // GetVMSetNames selects all possible availability sets or scale sets // (depending vmType configured) for service load balancer, if the service has - // no loadbalancer mode annotaion returns the primary VMSet. If service annotation + // no loadbalancer mode annotation returns the primary VMSet. If service annotation // for loadbalancer exists then return the eligible VMSet. GetVMSetNames(service *v1.Service, nodes []*v1.Node) (availabilitySetNames *[]string, err error) // EnsureHostsInPool ensures the given Node's primary IP configurations are diff --git a/pkg/cloudprovider/providers/azure/azure_vmss.go b/pkg/cloudprovider/providers/azure/azure_vmss.go index 6b603be07b5..9eafe0b71d2 100644 --- a/pkg/cloudprovider/providers/azure/azure_vmss.go +++ b/pkg/cloudprovider/providers/azure/azure_vmss.go @@ -539,7 +539,7 @@ func (ss *scaleSet) getAgentPoolScaleSets(nodes []*v1.Node) (*[]string, error) { // GetVMSetNames selects all possible availability sets or scale sets // (depending vmType configured) for service load balancer. If the service has -// no loadbalancer mode annotaion returns the primary VMSet. If service annotation +// no loadbalancer mode annotation returns the primary VMSet. If service annotation // for loadbalancer exists then return the eligible VMSet. func (ss *scaleSet) GetVMSetNames(service *v1.Service, nodes []*v1.Node) (vmSetNames *[]string, err error) { hasMode, isAuto, serviceVMSetNames := getServiceLoadBalancerMode(service)