Fix typo and comments

This commit is contained in:
Pengfei Ni 2018-01-31 10:55:41 +08:00
parent 84408378f9
commit 6e453d7d31
9 changed files with 17 additions and 17 deletions

View File

@ -76,7 +76,7 @@ type Config struct {
// the cloudprovider will try to add all nodes to a single backend pool which is forbidden. // the cloudprovider will try to add all nodes to a single backend pool which is forbidden.
// In other words, if you use multiple agent pools (availability sets), you MUST set this field. // In other words, if you use multiple agent pools (availability sets), you MUST set this field.
PrimaryAvailabilitySetName string `json:"primaryAvailabilitySetName" yaml:"primaryAvailabilitySetName"` PrimaryAvailabilitySetName string `json:"primaryAvailabilitySetName" yaml:"primaryAvailabilitySetName"`
// The type of azure nodes. Candidate valudes are: vmss and standard. // The type of azure nodes. Candidate values are: vmss and standard.
// If not set, it will be default to standard. // If not set, it will be default to standard.
VMType string `json:"vmType" yaml:"vmType"` VMType string `json:"vmType" yaml:"vmType"`
// The name of the scale set that should be used as the load balancer backend. // The name of the scale set that should be used as the load balancer backend.

View File

@ -292,7 +292,7 @@ func (c *BlobDiskController) DeleteBlobDisk(diskURI string) error {
c.accounts[storageAccountName].diskCount = int32(diskCount) c.accounts[storageAccountName].diskCount = int32(diskCount)
} else { } else {
glog.Warningf("azureDisk - failed to get disk count for %s however the delete disk operation was ok", storageAccountName) glog.Warningf("azureDisk - failed to get disk count for %s however the delete disk operation was ok", storageAccountName)
return nil // we have failed to aquire a new count. not an error condition return nil // we have failed to acquire a new count. not an error condition
} }
} }
atomic.AddInt32(&c.accounts[storageAccountName].diskCount, -1) atomic.AddInt32(&c.accounts[storageAccountName].diskCount, -1)
@ -602,7 +602,7 @@ func (c *BlobDiskController) findSANameForDisk(storageAccountType storage.SkuNam
// avg are not create and we should create more accounts if we can // avg are not create and we should create more accounts if we can
if aboveAvg && countAccounts < maxStorageAccounts { if aboveAvg && countAccounts < maxStorageAccounts {
glog.V(2).Infof("azureDisk - shared storageAccounts utilzation(%v) > grow-at-avg-utilization (%v). New storage account will be created", avgUtilization, storageAccountUtilizationBeforeGrowing) glog.V(2).Infof("azureDisk - shared storageAccounts utilization(%v) > grow-at-avg-utilization (%v). New storage account will be created", avgUtilization, storageAccountUtilizationBeforeGrowing)
SAName = getAccountNameForNum(c.getNextAccountNum()) SAName = getAccountNameForNum(c.getNextAccountNum())
err := c.createStorageAccount(SAName, storageAccountType, c.common.location, true) err := c.createStorageAccount(SAName, storageAccountType, c.common.location, true)
if err != nil { if err != nil {
@ -613,7 +613,7 @@ func (c *BlobDiskController) findSANameForDisk(storageAccountType storage.SkuNam
// averages are not ok and we are at capacity (max storage accounts allowed) // averages are not ok and we are at capacity (max storage accounts allowed)
if aboveAvg && countAccounts == maxStorageAccounts { if aboveAvg && countAccounts == maxStorageAccounts {
glog.Infof("azureDisk - shared storageAccounts utilzation(%v) > grow-at-avg-utilization (%v). But k8s maxed on SAs for PVC(%v). k8s will now exceed grow-at-avg-utilization without adding accounts", glog.Infof("azureDisk - shared storageAccounts utilization(%v) > grow-at-avg-utilization (%v). But k8s maxed on SAs for PVC(%v). k8s will now exceed grow-at-avg-utilization without adding accounts",
avgUtilization, storageAccountUtilizationBeforeGrowing, maxStorageAccounts) avgUtilization, storageAccountUtilizationBeforeGrowing, maxStorageAccounts)
} }

View File

@ -66,7 +66,7 @@ func (fLBC *fakeAzureLBClient) CreateOrUpdate(resourceGroupName string, loadBala
for idx, config := range *parameters.FrontendIPConfigurations { for idx, config := range *parameters.FrontendIPConfigurations {
if config.PrivateIPAllocationMethod == network.Dynamic { if config.PrivateIPAllocationMethod == network.Dynamic {
// Here we randomly assign an ip as private ip // Here we randomly assign an ip as private ip
// It dosen't smart enough to know whether it is in the subnet's range // It doesn't smart enough to know whether it is in the subnet's range
(*parameters.FrontendIPConfigurations)[idx].PrivateIPAddress = getRandomIPPtr() (*parameters.FrontendIPConfigurations)[idx].PrivateIPAddress = getRandomIPPtr()
} }
} }

View File

@ -231,10 +231,10 @@ func (az *Cloud) getServiceLoadBalancer(service *v1.Service, clusterName string,
return defaultLB, nil, false, nil return defaultLB, nil, false, nil
} }
// select load balancer for the service in the cluster // selectLoadBalancer selects load balancer for the service in the cluster.
// the selection algorithm selectes the the load balancer with currently has // The selection algorithm selects the the load balancer with currently has
// the minimum lb rules, there there are multiple LB's with same number of rules // the minimum lb rules. If there are multiple LBs with same number of rules,
// it selects the first one (sorted based on name) // then selects the first one (sorted based on name).
func (az *Cloud) selectLoadBalancer(clusterName string, service *v1.Service, existingLBs *[]network.LoadBalancer, nodes []*v1.Node) (selectedLB *network.LoadBalancer, existsLb bool, err error) { func (az *Cloud) selectLoadBalancer(clusterName string, service *v1.Service, existingLBs *[]network.LoadBalancer, nodes []*v1.Node) (selectedLB *network.LoadBalancer, existsLb bool, err error) {
isInternal := requiresInternalLoadBalancer(service) isInternal := requiresInternalLoadBalancer(service)
serviceName := getServiceName(service) serviceName := getServiceName(service)

View File

@ -73,7 +73,7 @@ func (c *ManagedDiskController) CreateManagedDisk(diskName string, storageAccoun
diskID := "" diskID := ""
err = kwait.ExponentialBackoff(defaultBackOff, func() (bool, error) { err = kwait.ExponentialBackoff(defaultBackOff, func() (bool, error) {
provisonState, id, err := c.getDisk(diskName) provisionState, id, err := c.getDisk(diskName)
diskID = id diskID = id
// We are waiting for provisioningState==Succeeded // We are waiting for provisioningState==Succeeded
// We don't want to hand-off managed disks to k8s while they are // We don't want to hand-off managed disks to k8s while they are
@ -81,7 +81,7 @@ func (c *ManagedDiskController) CreateManagedDisk(diskName string, storageAccoun
if err != nil { if err != nil {
return false, err return false, err
} }
if strings.ToLower(provisonState) == "succeeded" { if strings.ToLower(provisionState) == "succeeded" {
return true, nil return true, nil
} }
return false, nil return false, nil

View File

@ -42,10 +42,10 @@ type metricContext struct {
attributes []string attributes []string
} }
func newMetricContext(prefix, request, resouceGroup, subscriptionID string) *metricContext { func newMetricContext(prefix, request, resourceGroup, subscriptionID string) *metricContext {
return &metricContext{ return &metricContext{
start: time.Now(), start: time.Now(),
attributes: []string{prefix + "_" + request, resouceGroup, subscriptionID}, attributes: []string{prefix + "_" + request, resourceGroup, subscriptionID},
} }
} }

View File

@ -34,7 +34,7 @@ func TestIsMasterNode(t *testing.T) {
}, },
}, },
}) { }) {
t.Errorf("Node labelled 'workerk' should not be master!") t.Errorf("Node labelled 'worker' should not be master!")
} }
if !isMasterNode(&v1.Node{ if !isMasterNode(&v1.Node{
ObjectMeta: meta.ObjectMeta{ ObjectMeta: meta.ObjectMeta{

View File

@ -24,7 +24,7 @@ import (
"k8s.io/kubernetes/pkg/cloudprovider" "k8s.io/kubernetes/pkg/cloudprovider"
) )
// VMSet defines functions all vmsets (including scale set and availabitlity // VMSet defines functions all vmsets (including scale set and availability
// set) should be implemented. // set) should be implemented.
type VMSet interface { type VMSet interface {
// GetInstanceIDByNodeName gets the cloud provider ID by node name. // GetInstanceIDByNodeName gets the cloud provider ID by node name.
@ -48,7 +48,7 @@ type VMSet interface {
GetPrimaryVMSetName() string GetPrimaryVMSetName() string
// GetVMSetNames selects all possible availability sets or scale sets // GetVMSetNames selects all possible availability sets or scale sets
// (depending vmType configured) for service load balancer, if the service has // (depending vmType configured) for service load balancer, if the service has
// no loadbalancer mode annotaion returns the primary VMSet. If service annotation // no loadbalancer mode annotation returns the primary VMSet. If service annotation
// for loadbalancer exists then return the eligible VMSet. // for loadbalancer exists then return the eligible VMSet.
GetVMSetNames(service *v1.Service, nodes []*v1.Node) (availabilitySetNames *[]string, err error) GetVMSetNames(service *v1.Service, nodes []*v1.Node) (availabilitySetNames *[]string, err error)
// EnsureHostsInPool ensures the given Node's primary IP configurations are // EnsureHostsInPool ensures the given Node's primary IP configurations are

View File

@ -539,7 +539,7 @@ func (ss *scaleSet) getAgentPoolScaleSets(nodes []*v1.Node) (*[]string, error) {
// GetVMSetNames selects all possible availability sets or scale sets // GetVMSetNames selects all possible availability sets or scale sets
// (depending vmType configured) for service load balancer. If the service has // (depending vmType configured) for service load balancer. If the service has
// no loadbalancer mode annotaion returns the primary VMSet. If service annotation // no loadbalancer mode annotation returns the primary VMSet. If service annotation
// for loadbalancer exists then return the eligible VMSet. // for loadbalancer exists then return the eligible VMSet.
func (ss *scaleSet) GetVMSetNames(service *v1.Service, nodes []*v1.Node) (vmSetNames *[]string, err error) { func (ss *scaleSet) GetVMSetNames(service *v1.Service, nodes []*v1.Node) (vmSetNames *[]string, err error) {
hasMode, isAuto, serviceVMSetNames := getServiceLoadBalancerMode(service) hasMode, isAuto, serviceVMSetNames := getServiceLoadBalancerMode(service)