diff --git a/staging/src/k8s.io/legacy-cloud-providers/azure/azure_cache.go b/staging/src/k8s.io/legacy-cloud-providers/azure/azure_cache.go index f5216e61ae3..38f21378b49 100644 --- a/staging/src/k8s.io/legacy-cloud-providers/azure/azure_cache.go +++ b/staging/src/k8s.io/legacy-cloud-providers/azure/azure_cache.go @@ -30,14 +30,14 @@ import ( type cacheReadType int const ( - // cachedData returns data from cache if cache entry not expired + // cacheReadTypeDefault returns data from cache if cache entry not expired // if cache entry expired, then it will refetch the data using getter // save the entry in cache and then return - cachedData cacheReadType = iota - // allowUnsafeRead returns data from cache even if the cache entry is + cacheReadTypeDefault cacheReadType = iota + // cacheReadTypeUnsafe returns data from cache even if the cache entry is // active/expired. If entry doesn't exist in cache, then data is fetched // using getter, saved in cache and returned - allowUnsafeRead + cacheReadTypeUnsafe ) // getFunc defines a getter function for timedCache. @@ -90,16 +90,16 @@ func (t *timedCache) getInternal(key string) (*cacheEntry, error) { if err != nil { return nil, err } - // lock here to ensure if entry doesn't exist, we add a new entry - // avoiding overwrites - t.lock.Lock() - defer t.lock.Unlock() - // if entry exists, return the entry if exists { return entry.(*cacheEntry), nil } + // lock here to ensure if entry doesn't exist, we add a new entry + // avoiding overwrites + t.lock.Lock() + defer t.lock.Unlock() + // Still not found, add new entry with nil data. // Note the data will be filled later by getter. newEntry := &cacheEntry{ @@ -122,12 +122,12 @@ func (t *timedCache) Get(key string, crt cacheReadType) (interface{}, error) { // entry exists if entry.data != nil { - // allow dirty, so return data even if expired - if crt == allowUnsafeRead { + // allow unsafe read, so return data even if expired + if crt == cacheReadTypeUnsafe { return entry.data, nil } // if cached data is not expired, return cached data - if time.Since(entry.createdOn) < t.ttl && crt == cachedData { + if time.Since(entry.createdOn) < t.ttl { return entry.data, nil } } diff --git a/staging/src/k8s.io/legacy-cloud-providers/azure/azure_cache_test.go b/staging/src/k8s.io/legacy-cloud-providers/azure/azure_cache_test.go index af82027bda0..08b8f3d48d7 100644 --- a/staging/src/k8s.io/legacy-cloud-providers/azure/azure_cache_test.go +++ b/staging/src/k8s.io/legacy-cloud-providers/azure/azure_cache_test.go @@ -99,7 +99,7 @@ func TestCacheGet(t *testing.T) { for _, c := range cases { dataSource, cache := newFakeCache(t) dataSource.set(c.data) - val, err := cache.Get(c.key, cachedData) + val, err := cache.Get(c.key, cacheReadTypeDefault) assert.NoError(t, err, c.name) assert.Equal(t, c.expected, val, c.name) } @@ -113,7 +113,7 @@ func TestCacheGetError(t *testing.T) { cache, err := newTimedcache(fakeCacheTTL, getter) assert.NoError(t, err) - val, err := cache.Get("key", cachedData) + val, err := cache.Get("key", cacheReadTypeDefault) assert.Error(t, err) assert.Equal(t, getError, err) assert.Nil(t, val) @@ -128,13 +128,13 @@ func TestCacheDelete(t *testing.T) { dataSource, cache := newFakeCache(t) dataSource.set(data) - v, err := cache.Get(key, cachedData) + v, err := cache.Get(key, cacheReadTypeDefault) assert.NoError(t, err) assert.Equal(t, val, v, "cache should get correct data") dataSource.set(nil) cache.Delete(key) - v, err = cache.Get(key, cachedData) + v, err = cache.Get(key, cacheReadTypeDefault) assert.NoError(t, err) assert.Equal(t, 1, dataSource.called) assert.Equal(t, nil, v, "cache should get nil after data is removed") @@ -149,13 +149,13 @@ func TestCacheExpired(t *testing.T) { dataSource, cache := newFakeCache(t) dataSource.set(data) - v, err := cache.Get(key, cachedData) + v, err := cache.Get(key, cacheReadTypeDefault) assert.NoError(t, err) assert.Equal(t, 1, dataSource.called) assert.Equal(t, val, v, "cache should get correct data") time.Sleep(fakeCacheTTL) - v, err = cache.Get(key, cachedData) + v, err = cache.Get(key, cacheReadTypeDefault) assert.NoError(t, err) assert.Equal(t, 2, dataSource.called) assert.Equal(t, val, v, "cache should get correct data even after expired") @@ -170,13 +170,13 @@ func TestCacheAllowUnsafeRead(t *testing.T) { dataSource, cache := newFakeCache(t) dataSource.set(data) - v, err := cache.Get(key, cachedData) + v, err := cache.Get(key, cacheReadTypeDefault) assert.NoError(t, err) assert.Equal(t, 1, dataSource.called) assert.Equal(t, val, v, "cache should get correct data") time.Sleep(fakeCacheTTL) - v, err = cache.Get(key, allowUnsafeRead) + v, err = cache.Get(key, cacheReadTypeUnsafe) assert.NoError(t, err) assert.Equal(t, 1, dataSource.called) assert.Equal(t, val, v, "cache should return expired as allow unsafe read is allowed") @@ -195,10 +195,10 @@ func TestCacheNoConcurrentGet(t *testing.T) { var wg sync.WaitGroup for i := 0; i < 5; i++ { wg.Add(1) - go cache.Get(key, cachedData) + go cache.Get(key, cacheReadTypeDefault) wg.Done() } - v, err := cache.Get(key, cachedData) + v, err := cache.Get(key, cacheReadTypeDefault) wg.Wait() assert.NoError(t, err) assert.Equal(t, 1, dataSource.called) diff --git a/staging/src/k8s.io/legacy-cloud-providers/azure/azure_controller_common.go b/staging/src/k8s.io/legacy-cloud-providers/azure/azure_controller_common.go index e16539ae4c0..9697950a349 100644 --- a/staging/src/k8s.io/legacy-cloud-providers/azure/azure_controller_common.go +++ b/staging/src/k8s.io/legacy-cloud-providers/azure/azure_controller_common.go @@ -124,7 +124,7 @@ func (c *controllerCommon) AttachDisk(isManagedDisk bool, diskName, diskURI stri } } - vmset, err := c.getNodeVMSet(nodeName, allowUnsafeRead) + vmset, err := c.getNodeVMSet(nodeName, cacheReadTypeUnsafe) if err != nil { return -1, err } @@ -162,7 +162,7 @@ func (c *controllerCommon) DetachDisk(diskName, diskURI string, nodeName types.N return fmt.Errorf("failed to get azure instance id for node %q (%v)", nodeName, err) } - vmset, err := c.getNodeVMSet(nodeName, allowUnsafeRead) + vmset, err := c.getNodeVMSet(nodeName, cacheReadTypeUnsafe) if err != nil { return err } @@ -210,7 +210,7 @@ func (c *controllerCommon) getNodeDataDisks(nodeName types.NodeName, crt cacheRe func (c *controllerCommon) GetDiskLun(diskName, diskURI string, nodeName types.NodeName) (int32, error) { // getNodeDataDisks need to fetch the cached data/fresh data if cache expired here // to ensure we get LUN based on latest entry. - disks, err := c.getNodeDataDisks(nodeName, cachedData) + disks, err := c.getNodeDataDisks(nodeName, cacheReadTypeDefault) if err != nil { klog.Errorf("error of getting data disks for node %q: %v", nodeName, err) return -1, err @@ -230,7 +230,7 @@ func (c *controllerCommon) GetDiskLun(diskName, diskURI string, nodeName types.N // GetNextDiskLun searches all vhd attachment on the host and find unused lun. Return -1 if all luns are used. func (c *controllerCommon) GetNextDiskLun(nodeName types.NodeName) (int32, error) { - disks, err := c.getNodeDataDisks(nodeName, cachedData) + disks, err := c.getNodeDataDisks(nodeName, cacheReadTypeDefault) if err != nil { klog.Errorf("error of getting data disks for node %q: %v", nodeName, err) return -1, err @@ -261,7 +261,7 @@ func (c *controllerCommon) DisksAreAttached(diskNames []string, nodeName types.N // for every reconcile call. The cache is invalidated after Attach/Detach // disk. So the new entry will be fetched and cached the first time reconcile // loop runs after the Attach/Disk OP which will reflect the latest model. - disks, err := c.getNodeDataDisks(nodeName, allowUnsafeRead) + disks, err := c.getNodeDataDisks(nodeName, cacheReadTypeUnsafe) if err != nil { if err == cloudprovider.InstanceNotFound { // if host doesn't exist, no need to detach diff --git a/staging/src/k8s.io/legacy-cloud-providers/azure/azure_controller_standard.go b/staging/src/k8s.io/legacy-cloud-providers/azure/azure_controller_standard.go index 7cdf980b2fa..9b80280b102 100644 --- a/staging/src/k8s.io/legacy-cloud-providers/azure/azure_controller_standard.go +++ b/staging/src/k8s.io/legacy-cloud-providers/azure/azure_controller_standard.go @@ -31,7 +31,7 @@ import ( // AttachDisk attaches a vhd to vm // the vhd must exist, can be identified by diskName, diskURI, and lun. func (as *availabilitySet) AttachDisk(isManagedDisk bool, diskName, diskURI string, nodeName types.NodeName, lun int32, cachingMode compute.CachingTypes) error { - vm, err := as.getVirtualMachine(nodeName, cachedData) + vm, err := as.getVirtualMachine(nodeName, cacheReadTypeDefault) if err != nil { return err } @@ -102,7 +102,7 @@ func (as *availabilitySet) AttachDisk(isManagedDisk bool, diskName, diskURI stri // DetachDisk detaches a disk from host // the vhd can be identified by diskName or diskURI func (as *availabilitySet) DetachDisk(diskName, diskURI string, nodeName types.NodeName) (*http.Response, error) { - vm, err := as.getVirtualMachine(nodeName, cachedData) + vm, err := as.getVirtualMachine(nodeName, cacheReadTypeDefault) if err != nil { // if host doesn't exist, no need to detach klog.Warningf("azureDisk - cannot find node %s, skip detaching disk(%s, %s)", nodeName, diskName, diskURI) diff --git a/staging/src/k8s.io/legacy-cloud-providers/azure/azure_controller_standard_test.go b/staging/src/k8s.io/legacy-cloud-providers/azure/azure_controller_standard_test.go index f8d7c15381c..cd37631c623 100644 --- a/staging/src/k8s.io/legacy-cloud-providers/azure/azure_controller_standard_test.go +++ b/staging/src/k8s.io/legacy-cloud-providers/azure/azure_controller_standard_test.go @@ -107,7 +107,7 @@ func TestGetDataDisks(t *testing.T) { nodeName: "vm2", expectedDataDisks: nil, expectedError: true, - crt: cachedData, + crt: cacheReadTypeDefault, }, { desc: "correct list of data disks shall be returned if everything is good", @@ -119,7 +119,7 @@ func TestGetDataDisks(t *testing.T) { }, }, expectedError: false, - crt: cachedData, + crt: cacheReadTypeDefault, }, { desc: "correct list of data disks shall be returned if everything is good", @@ -131,7 +131,7 @@ func TestGetDataDisks(t *testing.T) { }, }, expectedError: false, - crt: allowUnsafeRead, + crt: cacheReadTypeUnsafe, }, } for i, test := range testCases { @@ -143,7 +143,7 @@ func TestGetDataDisks(t *testing.T) { assert.Equal(t, test.expectedDataDisks, dataDisks, "TestCase[%d]: %s", i, test.desc) assert.Equal(t, test.expectedError, err != nil, "TestCase[%d]: %s", i, test.desc) - if test.crt == allowUnsafeRead { + if test.crt == cacheReadTypeUnsafe { time.Sleep(fakeCacheTTL) dataDisks, err := vmSet.GetDataDisks(test.nodeName, test.crt) assert.Equal(t, test.expectedDataDisks, dataDisks, "TestCase[%d]: %s", i, test.desc) diff --git a/staging/src/k8s.io/legacy-cloud-providers/azure/azure_controller_vmss.go b/staging/src/k8s.io/legacy-cloud-providers/azure/azure_controller_vmss.go index 4e899ef1c1b..c54e63fcd67 100644 --- a/staging/src/k8s.io/legacy-cloud-providers/azure/azure_controller_vmss.go +++ b/staging/src/k8s.io/legacy-cloud-providers/azure/azure_controller_vmss.go @@ -32,7 +32,7 @@ import ( // the vhd must exist, can be identified by diskName, diskURI, and lun. func (ss *scaleSet) AttachDisk(isManagedDisk bool, diskName, diskURI string, nodeName types.NodeName, lun int32, cachingMode compute.CachingTypes) error { vmName := mapNodeNameToVMName(nodeName) - ssName, instanceID, vm, err := ss.getVmssVM(vmName, cachedData) + ssName, instanceID, vm, err := ss.getVmssVM(vmName, cacheReadTypeDefault) if err != nil { return err } @@ -109,7 +109,7 @@ func (ss *scaleSet) AttachDisk(isManagedDisk bool, diskName, diskURI string, nod // the vhd can be identified by diskName or diskURI func (ss *scaleSet) DetachDisk(diskName, diskURI string, nodeName types.NodeName) (*http.Response, error) { vmName := mapNodeNameToVMName(nodeName) - ssName, instanceID, vm, err := ss.getVmssVM(vmName, cachedData) + ssName, instanceID, vm, err := ss.getVmssVM(vmName, cacheReadTypeDefault) if err != nil { return nil, err } diff --git a/staging/src/k8s.io/legacy-cloud-providers/azure/azure_instances.go b/staging/src/k8s.io/legacy-cloud-providers/azure/azure_instances.go index dfbda140229..418be9e3f47 100644 --- a/staging/src/k8s.io/legacy-cloud-providers/azure/azure_instances.go +++ b/staging/src/k8s.io/legacy-cloud-providers/azure/azure_instances.go @@ -73,7 +73,7 @@ func (az *Cloud) NodeAddresses(ctx context.Context, name types.NodeName) ([]v1.N } if az.UseInstanceMetadata { - metadata, err := az.metadata.GetMetadata(allowUnsafeRead) + metadata, err := az.metadata.GetMetadata(cacheReadTypeUnsafe) if err != nil { return nil, err } @@ -259,7 +259,7 @@ func (az *Cloud) InstanceID(ctx context.Context, name types.NodeName) (string, e } if az.UseInstanceMetadata { - metadata, err := az.metadata.GetMetadata(allowUnsafeRead) + metadata, err := az.metadata.GetMetadata(cacheReadTypeUnsafe) if err != nil { return "", err } @@ -346,7 +346,7 @@ func (az *Cloud) InstanceType(ctx context.Context, name types.NodeName) (string, } if az.UseInstanceMetadata { - metadata, err := az.metadata.GetMetadata(allowUnsafeRead) + metadata, err := az.metadata.GetMetadata(cacheReadTypeUnsafe) if err != nil { return "", err } diff --git a/staging/src/k8s.io/legacy-cloud-providers/azure/azure_loadbalancer.go b/staging/src/k8s.io/legacy-cloud-providers/azure/azure_loadbalancer.go index 7486d7c6eca..3c52863e2a2 100644 --- a/staging/src/k8s.io/legacy-cloud-providers/azure/azure_loadbalancer.go +++ b/staging/src/k8s.io/legacy-cloud-providers/azure/azure_loadbalancer.go @@ -962,7 +962,7 @@ func (az *Cloud) reconcileLoadBalancer(clusterName string, service *v1.Service, if isInternal { // Refresh updated lb which will be used later in other places. - newLB, exist, err := az.getAzureLoadBalancer(lbName, cachedData) + newLB, exist, err := az.getAzureLoadBalancer(lbName, cacheReadTypeDefault) if err != nil { klog.V(2).Infof("reconcileLoadBalancer for service(%s): getAzureLoadBalancer(%s) failed: %v", serviceName, lbName, err) return nil, err @@ -1125,7 +1125,7 @@ func (az *Cloud) reconcileSecurityGroup(clusterName string, service *v1.Service, ports = []v1.ServicePort{} } - sg, err := az.getSecurityGroup(cachedData) + sg, err := az.getSecurityGroup(cacheReadTypeDefault) if err != nil { return nil, err } @@ -1466,7 +1466,7 @@ func (az *Cloud) reconcilePublicIP(clusterName string, service *v1.Service, lbNa } if lbName != "" { - loadBalancer, _, err := az.getAzureLoadBalancer(lbName, cachedData) + loadBalancer, _, err := az.getAzureLoadBalancer(lbName, cacheReadTypeDefault) if err != nil { return nil, err } diff --git a/staging/src/k8s.io/legacy-cloud-providers/azure/azure_routes.go b/staging/src/k8s.io/legacy-cloud-providers/azure/azure_routes.go index 9077eb5ad5b..aa0e7189b24 100644 --- a/staging/src/k8s.io/legacy-cloud-providers/azure/azure_routes.go +++ b/staging/src/k8s.io/legacy-cloud-providers/azure/azure_routes.go @@ -46,7 +46,7 @@ const ( // ListRoutes lists all managed routes that belong to the specified clusterName func (az *Cloud) ListRoutes(ctx context.Context, clusterName string) ([]*cloudprovider.Route, error) { klog.V(10).Infof("ListRoutes: START clusterName=%q", clusterName) - routeTable, existsRouteTable, err := az.getRouteTable(cachedData) + routeTable, existsRouteTable, err := az.getRouteTable(cacheReadTypeDefault) routes, err := processRoutes(routeTable, existsRouteTable, err) if err != nil { return nil, err @@ -102,7 +102,7 @@ func processRoutes(routeTable network.RouteTable, exists bool, err error) ([]*cl } func (az *Cloud) createRouteTableIfNotExists(clusterName string, kubeRoute *cloudprovider.Route) error { - if _, existsRouteTable, err := az.getRouteTable(cachedData); err != nil { + if _, existsRouteTable, err := az.getRouteTable(cacheReadTypeDefault); err != nil { klog.V(2).Infof("createRouteTableIfNotExists error: couldn't get routetable. clusterName=%q instance=%q cidr=%q", clusterName, kubeRoute.TargetNode, kubeRoute.DestinationCIDR) return err } else if existsRouteTable { diff --git a/staging/src/k8s.io/legacy-cloud-providers/azure/azure_standard.go b/staging/src/k8s.io/legacy-cloud-providers/azure/azure_standard.go index b4f716160ba..24b3ed0a82d 100644 --- a/staging/src/k8s.io/legacy-cloud-providers/azure/azure_standard.go +++ b/staging/src/k8s.io/legacy-cloud-providers/azure/azure_standard.go @@ -375,14 +375,14 @@ func (as *availabilitySet) GetInstanceIDByNodeName(name string) (string, error) var machine compute.VirtualMachine var err error - machine, err = as.getVirtualMachine(types.NodeName(name), allowUnsafeRead) + machine, err = as.getVirtualMachine(types.NodeName(name), cacheReadTypeUnsafe) if err == cloudprovider.InstanceNotFound { return "", cloudprovider.InstanceNotFound } if err != nil { if as.CloudProviderBackoff { klog.V(2).Infof("GetInstanceIDByNodeName(%s) backing off", name) - machine, err = as.GetVirtualMachineWithRetry(types.NodeName(name), allowUnsafeRead) + machine, err = as.GetVirtualMachineWithRetry(types.NodeName(name), cacheReadTypeUnsafe) if err != nil { klog.V(2).Infof("GetInstanceIDByNodeName(%s) abort backoff", name) return "", err @@ -403,7 +403,7 @@ func (as *availabilitySet) GetInstanceIDByNodeName(name string) (string, error) // GetPowerStatusByNodeName returns the power state of the specified node. func (as *availabilitySet) GetPowerStatusByNodeName(name string) (powerState string, err error) { - vm, err := as.getVirtualMachine(types.NodeName(name), cachedData) + vm, err := as.getVirtualMachine(types.NodeName(name), cacheReadTypeDefault) if err != nil { return powerState, err } @@ -436,7 +436,7 @@ func (as *availabilitySet) GetNodeNameByProviderID(providerID string) (types.Nod // GetInstanceTypeByNodeName gets the instance type by node name. func (as *availabilitySet) GetInstanceTypeByNodeName(name string) (string, error) { - machine, err := as.getVirtualMachine(types.NodeName(name), allowUnsafeRead) + machine, err := as.getVirtualMachine(types.NodeName(name), cacheReadTypeUnsafe) if err != nil { klog.Errorf("as.GetInstanceTypeByNodeName(%s) failed: as.getVirtualMachine(%s) err=%v", name, name, err) return "", err @@ -448,7 +448,7 @@ func (as *availabilitySet) GetInstanceTypeByNodeName(name string) (string, error // GetZoneByNodeName gets availability zone for the specified node. If the node is not running // with availability zone, then it returns fault domain. func (as *availabilitySet) GetZoneByNodeName(name string) (cloudprovider.Zone, error) { - vm, err := as.getVirtualMachine(types.NodeName(name), allowUnsafeRead) + vm, err := as.getVirtualMachine(types.NodeName(name), cacheReadTypeUnsafe) if err != nil { return cloudprovider.Zone{}, err } @@ -649,7 +649,7 @@ func extractResourceGroupByNicID(nicID string) (string, error) { func (as *availabilitySet) getPrimaryInterfaceWithVMSet(nodeName, vmSetName string) (network.Interface, error) { var machine compute.VirtualMachine - machine, err := as.GetVirtualMachineWithRetry(types.NodeName(nodeName), cachedData) + machine, err := as.GetVirtualMachineWithRetry(types.NodeName(nodeName), cacheReadTypeDefault) if err != nil { klog.V(2).Infof("GetPrimaryInterface(%s, %s) abort backoff", nodeName, vmSetName) return network.Interface{}, err diff --git a/staging/src/k8s.io/legacy-cloud-providers/azure/azure_vmss.go b/staging/src/k8s.io/legacy-cloud-providers/azure/azure_vmss.go index ffff81d8922..5a6ce9e4b79 100644 --- a/staging/src/k8s.io/legacy-cloud-providers/azure/azure_vmss.go +++ b/staging/src/k8s.io/legacy-cloud-providers/azure/azure_vmss.go @@ -132,7 +132,7 @@ func (ss *scaleSet) getVmssVM(nodeName string, crt cacheReadType) (string, strin // GetPowerStatusByNodeName returns the power state of the specified node. func (ss *scaleSet) GetPowerStatusByNodeName(name string) (powerState string, err error) { - _, _, vm, err := ss.getVmssVM(name, cachedData) + _, _, vm, err := ss.getVmssVM(name, cacheReadTypeDefault) if err != nil { return powerState, err } @@ -203,7 +203,7 @@ func (ss *scaleSet) getVmssVMByInstanceID(resourceGroup, scaleSetName, instanceI // It must return ("", cloudprovider.InstanceNotFound) if the instance does // not exist or is no longer running. func (ss *scaleSet) GetInstanceIDByNodeName(name string) (string, error) { - managedByAS, err := ss.isNodeManagedByAvailabilitySet(name, allowUnsafeRead) + managedByAS, err := ss.isNodeManagedByAvailabilitySet(name, cacheReadTypeUnsafe) if err != nil { klog.Errorf("Failed to check isNodeManagedByAvailabilitySet: %v", err) return "", err @@ -213,7 +213,7 @@ func (ss *scaleSet) GetInstanceIDByNodeName(name string) (string, error) { return ss.availabilitySet.GetInstanceIDByNodeName(name) } - _, _, vm, err := ss.getVmssVM(name, allowUnsafeRead) + _, _, vm, err := ss.getVmssVM(name, cacheReadTypeUnsafe) if err != nil { return "", err } @@ -247,7 +247,7 @@ func (ss *scaleSet) GetNodeNameByProviderID(providerID string) (types.NodeName, return ss.availabilitySet.GetNodeNameByProviderID(providerID) } - vm, err := ss.getVmssVMByInstanceID(resourceGroup, scaleSetName, instanceID, allowUnsafeRead) + vm, err := ss.getVmssVMByInstanceID(resourceGroup, scaleSetName, instanceID, cacheReadTypeUnsafe) if err != nil { return "", err } @@ -262,7 +262,7 @@ func (ss *scaleSet) GetNodeNameByProviderID(providerID string) (types.NodeName, // GetInstanceTypeByNodeName gets the instance type by node name. func (ss *scaleSet) GetInstanceTypeByNodeName(name string) (string, error) { - managedByAS, err := ss.isNodeManagedByAvailabilitySet(name, allowUnsafeRead) + managedByAS, err := ss.isNodeManagedByAvailabilitySet(name, cacheReadTypeUnsafe) if err != nil { klog.Errorf("Failed to check isNodeManagedByAvailabilitySet: %v", err) return "", err @@ -272,7 +272,7 @@ func (ss *scaleSet) GetInstanceTypeByNodeName(name string) (string, error) { return ss.availabilitySet.GetInstanceTypeByNodeName(name) } - _, _, vm, err := ss.getVmssVM(name, allowUnsafeRead) + _, _, vm, err := ss.getVmssVM(name, cacheReadTypeUnsafe) if err != nil { return "", err } @@ -287,7 +287,7 @@ func (ss *scaleSet) GetInstanceTypeByNodeName(name string) (string, error) { // GetZoneByNodeName gets availability zone for the specified node. If the node is not running // with availability zone, then it returns fault domain. func (ss *scaleSet) GetZoneByNodeName(name string) (cloudprovider.Zone, error) { - managedByAS, err := ss.isNodeManagedByAvailabilitySet(name, allowUnsafeRead) + managedByAS, err := ss.isNodeManagedByAvailabilitySet(name, cacheReadTypeUnsafe) if err != nil { klog.Errorf("Failed to check isNodeManagedByAvailabilitySet: %v", err) return cloudprovider.Zone{}, err @@ -297,7 +297,7 @@ func (ss *scaleSet) GetZoneByNodeName(name string) (cloudprovider.Zone, error) { return ss.availabilitySet.GetZoneByNodeName(name) } - _, _, vm, err := ss.getVmssVM(name, allowUnsafeRead) + _, _, vm, err := ss.getVmssVM(name, cacheReadTypeUnsafe) if err != nil { return cloudprovider.Zone{}, err } @@ -536,7 +536,7 @@ func (ss *scaleSet) getAgentPoolScaleSets(nodes []*v1.Node) (*[]string, error) { } nodeName := nodes[nx].Name - ssName, _, _, err := ss.getVmssVM(nodeName, cachedData) + ssName, _, _, err := ss.getVmssVM(nodeName, cacheReadTypeDefault) if err != nil { return nil, err } @@ -614,7 +614,7 @@ func extractResourceGroupByVMSSNicID(nicID string) (string, error) { // GetPrimaryInterface gets machine primary network interface by node name and vmSet. func (ss *scaleSet) GetPrimaryInterface(nodeName string) (network.Interface, error) { - managedByAS, err := ss.isNodeManagedByAvailabilitySet(nodeName, cachedData) + managedByAS, err := ss.isNodeManagedByAvailabilitySet(nodeName, cacheReadTypeDefault) if err != nil { klog.Errorf("Failed to check isNodeManagedByAvailabilitySet: %v", err) return network.Interface{}, err @@ -624,7 +624,7 @@ func (ss *scaleSet) GetPrimaryInterface(nodeName string) (network.Interface, err return ss.availabilitySet.GetPrimaryInterface(nodeName) } - ssName, instanceID, vm, err := ss.getVmssVM(nodeName, cachedData) + ssName, instanceID, vm, err := ss.getVmssVM(nodeName, cacheReadTypeDefault) if err != nil { // VM is availability set, but not cached yet in availabilitySetNodesCache. if err == ErrorNotVmssInstance { @@ -747,7 +747,7 @@ func (ss *scaleSet) getConfigForScaleSetByIPFamily(config *compute.VirtualMachin func (ss *scaleSet) EnsureHostInPool(service *v1.Service, nodeName types.NodeName, backendPoolID string, vmSetName string, isInternal bool) error { klog.V(3).Infof("ensuring node %q of scaleset %q in LB backendpool %q", nodeName, vmSetName, backendPoolID) vmName := mapNodeNameToVMName(nodeName) - ssName, instanceID, vm, err := ss.getVmssVM(vmName, cachedData) + ssName, instanceID, vm, err := ss.getVmssVM(vmName, cacheReadTypeDefault) if err != nil { return err } @@ -1027,7 +1027,7 @@ func (ss *scaleSet) EnsureHostsInPool(service *v1.Service, nodes []*v1.Node, bac f := func() error { // Check whether the node is VMAS virtual machine. - managedByAS, err := ss.isNodeManagedByAvailabilitySet(localNodeName, cachedData) + managedByAS, err := ss.isNodeManagedByAvailabilitySet(localNodeName, cacheReadTypeDefault) if err != nil { klog.Errorf("Failed to check isNodeManagedByAvailabilitySet(%s): %v", localNodeName, err) return err @@ -1068,7 +1068,7 @@ func (ss *scaleSet) EnsureHostsInPool(service *v1.Service, nodes []*v1.Node, bac // ensureBackendPoolDeletedFromNode ensures the loadBalancer backendAddressPools deleted from the specified node. func (ss *scaleSet) ensureBackendPoolDeletedFromNode(service *v1.Service, nodeName, backendPoolID string) error { - ssName, instanceID, vm, err := ss.getVmssVM(nodeName, cachedData) + ssName, instanceID, vm, err := ss.getVmssVM(nodeName, cacheReadTypeDefault) if err != nil { return err } @@ -1167,7 +1167,7 @@ func (ss *scaleSet) getNodeNameByIPConfigurationID(ipConfigurationID string) (st resourceGroup := matches[1] scaleSetName := matches[2] instanceID := matches[3] - vm, err := ss.getVmssVMByInstanceID(resourceGroup, scaleSetName, instanceID, allowUnsafeRead) + vm, err := ss.getVmssVMByInstanceID(resourceGroup, scaleSetName, instanceID, cacheReadTypeUnsafe) if err != nil { return "", err } diff --git a/staging/src/k8s.io/legacy-cloud-providers/azure/azure_vmss_cache.go b/staging/src/k8s.io/legacy-cloud-providers/azure/azure_vmss_cache.go index 633a9106245..9abe6f6a1bc 100644 --- a/staging/src/k8s.io/legacy-cloud-providers/azure/azure_vmss_cache.go +++ b/staging/src/k8s.io/legacy-cloud-providers/azure/azure_vmss_cache.go @@ -115,7 +115,7 @@ func (ss *scaleSet) newVMSSVirtualMachinesCache() (*timedCache, error) { } func (ss *scaleSet) deleteCacheForNode(nodeName string) error { - cached, err := ss.vmssVMCache.Get(vmssVirtualMachinesKey, allowUnsafeRead) + cached, err := ss.vmssVMCache.Get(vmssVirtualMachinesKey, cacheReadTypeUnsafe) if err != nil { return err } diff --git a/staging/src/k8s.io/legacy-cloud-providers/azure/azure_vmss_cache_test.go b/staging/src/k8s.io/legacy-cloud-providers/azure/azure_vmss_cache_test.go index a5998e53dcf..cfd71e4a555 100644 --- a/staging/src/k8s.io/legacy-cloud-providers/azure/azure_vmss_cache_test.go +++ b/staging/src/k8s.io/legacy-cloud-providers/azure/azure_vmss_cache_test.go @@ -85,7 +85,7 @@ func TestVMSSVMCache(t *testing.T) { for i := range virtualMachines { vm := virtualMachines[i] vmName := to.String(vm.OsProfile.ComputerName) - ssName, instanceID, realVM, err := ss.getVmssVM(vmName, cachedData) + ssName, instanceID, realVM, err := ss.getVmssVM(vmName, cacheReadTypeDefault) assert.NoError(t, err) assert.Equal(t, "vmss", ssName) assert.Equal(t, to.String(vm.InstanceID), instanceID) @@ -99,14 +99,14 @@ func TestVMSSVMCache(t *testing.T) { assert.NoError(t, err) // the VM should be removed from cache after deleteCacheForNode(). - cached, err := ss.vmssVMCache.Get(vmssVirtualMachinesKey, cachedData) + cached, err := ss.vmssVMCache.Get(vmssVirtualMachinesKey, cacheReadTypeDefault) assert.NoError(t, err) cachedVirtualMachines := cached.(*sync.Map) _, ok := cachedVirtualMachines.Load(vmName) assert.Equal(t, false, ok) // the VM should be get back after another cache refresh. - ssName, instanceID, realVM, err := ss.getVmssVM(vmName, cachedData) + ssName, instanceID, realVM, err := ss.getVmssVM(vmName, cacheReadTypeDefault) assert.NoError(t, err) assert.Equal(t, "vmss", ssName) assert.Equal(t, to.String(vm.InstanceID), instanceID) diff --git a/staging/src/k8s.io/legacy-cloud-providers/azure/azure_zones.go b/staging/src/k8s.io/legacy-cloud-providers/azure/azure_zones.go index a52dba7e450..43ca994750d 100644 --- a/staging/src/k8s.io/legacy-cloud-providers/azure/azure_zones.go +++ b/staging/src/k8s.io/legacy-cloud-providers/azure/azure_zones.go @@ -53,7 +53,7 @@ func (az *Cloud) GetZoneID(zoneLabel string) string { // If the node is not running with availability zones, then it will fall back to fault domain. func (az *Cloud) GetZone(ctx context.Context) (cloudprovider.Zone, error) { if az.UseInstanceMetadata { - metadata, err := az.metadata.GetMetadata(allowUnsafeRead) + metadata, err := az.metadata.GetMetadata(cacheReadTypeUnsafe) if err != nil { return cloudprovider.Zone{}, err }