mirror of
https://github.com/k3s-io/kubernetes.git
synced 2025-08-04 01:40:07 +00:00
add cache read type prefix for const
This commit is contained in:
parent
eeeb59e71a
commit
6d496861c4
@ -30,14 +30,14 @@ import (
|
|||||||
type cacheReadType int
|
type cacheReadType int
|
||||||
|
|
||||||
const (
|
const (
|
||||||
// cachedData returns data from cache if cache entry not expired
|
// cacheReadTypeDefault returns data from cache if cache entry not expired
|
||||||
// if cache entry expired, then it will refetch the data using getter
|
// if cache entry expired, then it will refetch the data using getter
|
||||||
// save the entry in cache and then return
|
// save the entry in cache and then return
|
||||||
cachedData cacheReadType = iota
|
cacheReadTypeDefault cacheReadType = iota
|
||||||
// allowUnsafeRead returns data from cache even if the cache entry is
|
// cacheReadTypeUnsafe returns data from cache even if the cache entry is
|
||||||
// active/expired. If entry doesn't exist in cache, then data is fetched
|
// active/expired. If entry doesn't exist in cache, then data is fetched
|
||||||
// using getter, saved in cache and returned
|
// using getter, saved in cache and returned
|
||||||
allowUnsafeRead
|
cacheReadTypeUnsafe
|
||||||
)
|
)
|
||||||
|
|
||||||
// getFunc defines a getter function for timedCache.
|
// getFunc defines a getter function for timedCache.
|
||||||
@ -90,16 +90,16 @@ func (t *timedCache) getInternal(key string) (*cacheEntry, error) {
|
|||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
// lock here to ensure if entry doesn't exist, we add a new entry
|
|
||||||
// avoiding overwrites
|
|
||||||
t.lock.Lock()
|
|
||||||
defer t.lock.Unlock()
|
|
||||||
|
|
||||||
// if entry exists, return the entry
|
// if entry exists, return the entry
|
||||||
if exists {
|
if exists {
|
||||||
return entry.(*cacheEntry), nil
|
return entry.(*cacheEntry), nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// lock here to ensure if entry doesn't exist, we add a new entry
|
||||||
|
// avoiding overwrites
|
||||||
|
t.lock.Lock()
|
||||||
|
defer t.lock.Unlock()
|
||||||
|
|
||||||
// Still not found, add new entry with nil data.
|
// Still not found, add new entry with nil data.
|
||||||
// Note the data will be filled later by getter.
|
// Note the data will be filled later by getter.
|
||||||
newEntry := &cacheEntry{
|
newEntry := &cacheEntry{
|
||||||
@ -122,12 +122,12 @@ func (t *timedCache) Get(key string, crt cacheReadType) (interface{}, error) {
|
|||||||
|
|
||||||
// entry exists
|
// entry exists
|
||||||
if entry.data != nil {
|
if entry.data != nil {
|
||||||
// allow dirty, so return data even if expired
|
// allow unsafe read, so return data even if expired
|
||||||
if crt == allowUnsafeRead {
|
if crt == cacheReadTypeUnsafe {
|
||||||
return entry.data, nil
|
return entry.data, nil
|
||||||
}
|
}
|
||||||
// if cached data is not expired, return cached data
|
// if cached data is not expired, return cached data
|
||||||
if time.Since(entry.createdOn) < t.ttl && crt == cachedData {
|
if time.Since(entry.createdOn) < t.ttl {
|
||||||
return entry.data, nil
|
return entry.data, nil
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -99,7 +99,7 @@ func TestCacheGet(t *testing.T) {
|
|||||||
for _, c := range cases {
|
for _, c := range cases {
|
||||||
dataSource, cache := newFakeCache(t)
|
dataSource, cache := newFakeCache(t)
|
||||||
dataSource.set(c.data)
|
dataSource.set(c.data)
|
||||||
val, err := cache.Get(c.key, cachedData)
|
val, err := cache.Get(c.key, cacheReadTypeDefault)
|
||||||
assert.NoError(t, err, c.name)
|
assert.NoError(t, err, c.name)
|
||||||
assert.Equal(t, c.expected, val, c.name)
|
assert.Equal(t, c.expected, val, c.name)
|
||||||
}
|
}
|
||||||
@ -113,7 +113,7 @@ func TestCacheGetError(t *testing.T) {
|
|||||||
cache, err := newTimedcache(fakeCacheTTL, getter)
|
cache, err := newTimedcache(fakeCacheTTL, getter)
|
||||||
assert.NoError(t, err)
|
assert.NoError(t, err)
|
||||||
|
|
||||||
val, err := cache.Get("key", cachedData)
|
val, err := cache.Get("key", cacheReadTypeDefault)
|
||||||
assert.Error(t, err)
|
assert.Error(t, err)
|
||||||
assert.Equal(t, getError, err)
|
assert.Equal(t, getError, err)
|
||||||
assert.Nil(t, val)
|
assert.Nil(t, val)
|
||||||
@ -128,13 +128,13 @@ func TestCacheDelete(t *testing.T) {
|
|||||||
dataSource, cache := newFakeCache(t)
|
dataSource, cache := newFakeCache(t)
|
||||||
dataSource.set(data)
|
dataSource.set(data)
|
||||||
|
|
||||||
v, err := cache.Get(key, cachedData)
|
v, err := cache.Get(key, cacheReadTypeDefault)
|
||||||
assert.NoError(t, err)
|
assert.NoError(t, err)
|
||||||
assert.Equal(t, val, v, "cache should get correct data")
|
assert.Equal(t, val, v, "cache should get correct data")
|
||||||
|
|
||||||
dataSource.set(nil)
|
dataSource.set(nil)
|
||||||
cache.Delete(key)
|
cache.Delete(key)
|
||||||
v, err = cache.Get(key, cachedData)
|
v, err = cache.Get(key, cacheReadTypeDefault)
|
||||||
assert.NoError(t, err)
|
assert.NoError(t, err)
|
||||||
assert.Equal(t, 1, dataSource.called)
|
assert.Equal(t, 1, dataSource.called)
|
||||||
assert.Equal(t, nil, v, "cache should get nil after data is removed")
|
assert.Equal(t, nil, v, "cache should get nil after data is removed")
|
||||||
@ -149,13 +149,13 @@ func TestCacheExpired(t *testing.T) {
|
|||||||
dataSource, cache := newFakeCache(t)
|
dataSource, cache := newFakeCache(t)
|
||||||
dataSource.set(data)
|
dataSource.set(data)
|
||||||
|
|
||||||
v, err := cache.Get(key, cachedData)
|
v, err := cache.Get(key, cacheReadTypeDefault)
|
||||||
assert.NoError(t, err)
|
assert.NoError(t, err)
|
||||||
assert.Equal(t, 1, dataSource.called)
|
assert.Equal(t, 1, dataSource.called)
|
||||||
assert.Equal(t, val, v, "cache should get correct data")
|
assert.Equal(t, val, v, "cache should get correct data")
|
||||||
|
|
||||||
time.Sleep(fakeCacheTTL)
|
time.Sleep(fakeCacheTTL)
|
||||||
v, err = cache.Get(key, cachedData)
|
v, err = cache.Get(key, cacheReadTypeDefault)
|
||||||
assert.NoError(t, err)
|
assert.NoError(t, err)
|
||||||
assert.Equal(t, 2, dataSource.called)
|
assert.Equal(t, 2, dataSource.called)
|
||||||
assert.Equal(t, val, v, "cache should get correct data even after expired")
|
assert.Equal(t, val, v, "cache should get correct data even after expired")
|
||||||
@ -170,13 +170,13 @@ func TestCacheAllowUnsafeRead(t *testing.T) {
|
|||||||
dataSource, cache := newFakeCache(t)
|
dataSource, cache := newFakeCache(t)
|
||||||
dataSource.set(data)
|
dataSource.set(data)
|
||||||
|
|
||||||
v, err := cache.Get(key, cachedData)
|
v, err := cache.Get(key, cacheReadTypeDefault)
|
||||||
assert.NoError(t, err)
|
assert.NoError(t, err)
|
||||||
assert.Equal(t, 1, dataSource.called)
|
assert.Equal(t, 1, dataSource.called)
|
||||||
assert.Equal(t, val, v, "cache should get correct data")
|
assert.Equal(t, val, v, "cache should get correct data")
|
||||||
|
|
||||||
time.Sleep(fakeCacheTTL)
|
time.Sleep(fakeCacheTTL)
|
||||||
v, err = cache.Get(key, allowUnsafeRead)
|
v, err = cache.Get(key, cacheReadTypeUnsafe)
|
||||||
assert.NoError(t, err)
|
assert.NoError(t, err)
|
||||||
assert.Equal(t, 1, dataSource.called)
|
assert.Equal(t, 1, dataSource.called)
|
||||||
assert.Equal(t, val, v, "cache should return expired as allow unsafe read is allowed")
|
assert.Equal(t, val, v, "cache should return expired as allow unsafe read is allowed")
|
||||||
@ -195,10 +195,10 @@ func TestCacheNoConcurrentGet(t *testing.T) {
|
|||||||
var wg sync.WaitGroup
|
var wg sync.WaitGroup
|
||||||
for i := 0; i < 5; i++ {
|
for i := 0; i < 5; i++ {
|
||||||
wg.Add(1)
|
wg.Add(1)
|
||||||
go cache.Get(key, cachedData)
|
go cache.Get(key, cacheReadTypeDefault)
|
||||||
wg.Done()
|
wg.Done()
|
||||||
}
|
}
|
||||||
v, err := cache.Get(key, cachedData)
|
v, err := cache.Get(key, cacheReadTypeDefault)
|
||||||
wg.Wait()
|
wg.Wait()
|
||||||
assert.NoError(t, err)
|
assert.NoError(t, err)
|
||||||
assert.Equal(t, 1, dataSource.called)
|
assert.Equal(t, 1, dataSource.called)
|
||||||
|
@ -124,7 +124,7 @@ func (c *controllerCommon) AttachDisk(isManagedDisk bool, diskName, diskURI stri
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
vmset, err := c.getNodeVMSet(nodeName, allowUnsafeRead)
|
vmset, err := c.getNodeVMSet(nodeName, cacheReadTypeUnsafe)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return -1, err
|
return -1, err
|
||||||
}
|
}
|
||||||
@ -162,7 +162,7 @@ func (c *controllerCommon) DetachDisk(diskName, diskURI string, nodeName types.N
|
|||||||
return fmt.Errorf("failed to get azure instance id for node %q (%v)", nodeName, err)
|
return fmt.Errorf("failed to get azure instance id for node %q (%v)", nodeName, err)
|
||||||
}
|
}
|
||||||
|
|
||||||
vmset, err := c.getNodeVMSet(nodeName, allowUnsafeRead)
|
vmset, err := c.getNodeVMSet(nodeName, cacheReadTypeUnsafe)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
@ -210,7 +210,7 @@ func (c *controllerCommon) getNodeDataDisks(nodeName types.NodeName, crt cacheRe
|
|||||||
func (c *controllerCommon) GetDiskLun(diskName, diskURI string, nodeName types.NodeName) (int32, error) {
|
func (c *controllerCommon) GetDiskLun(diskName, diskURI string, nodeName types.NodeName) (int32, error) {
|
||||||
// getNodeDataDisks need to fetch the cached data/fresh data if cache expired here
|
// getNodeDataDisks need to fetch the cached data/fresh data if cache expired here
|
||||||
// to ensure we get LUN based on latest entry.
|
// to ensure we get LUN based on latest entry.
|
||||||
disks, err := c.getNodeDataDisks(nodeName, cachedData)
|
disks, err := c.getNodeDataDisks(nodeName, cacheReadTypeDefault)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
klog.Errorf("error of getting data disks for node %q: %v", nodeName, err)
|
klog.Errorf("error of getting data disks for node %q: %v", nodeName, err)
|
||||||
return -1, err
|
return -1, err
|
||||||
@ -230,7 +230,7 @@ func (c *controllerCommon) GetDiskLun(diskName, diskURI string, nodeName types.N
|
|||||||
|
|
||||||
// GetNextDiskLun searches all vhd attachment on the host and find unused lun. Return -1 if all luns are used.
|
// GetNextDiskLun searches all vhd attachment on the host and find unused lun. Return -1 if all luns are used.
|
||||||
func (c *controllerCommon) GetNextDiskLun(nodeName types.NodeName) (int32, error) {
|
func (c *controllerCommon) GetNextDiskLun(nodeName types.NodeName) (int32, error) {
|
||||||
disks, err := c.getNodeDataDisks(nodeName, cachedData)
|
disks, err := c.getNodeDataDisks(nodeName, cacheReadTypeDefault)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
klog.Errorf("error of getting data disks for node %q: %v", nodeName, err)
|
klog.Errorf("error of getting data disks for node %q: %v", nodeName, err)
|
||||||
return -1, err
|
return -1, err
|
||||||
@ -261,7 +261,7 @@ func (c *controllerCommon) DisksAreAttached(diskNames []string, nodeName types.N
|
|||||||
// for every reconcile call. The cache is invalidated after Attach/Detach
|
// for every reconcile call. The cache is invalidated after Attach/Detach
|
||||||
// disk. So the new entry will be fetched and cached the first time reconcile
|
// disk. So the new entry will be fetched and cached the first time reconcile
|
||||||
// loop runs after the Attach/Disk OP which will reflect the latest model.
|
// loop runs after the Attach/Disk OP which will reflect the latest model.
|
||||||
disks, err := c.getNodeDataDisks(nodeName, allowUnsafeRead)
|
disks, err := c.getNodeDataDisks(nodeName, cacheReadTypeUnsafe)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
if err == cloudprovider.InstanceNotFound {
|
if err == cloudprovider.InstanceNotFound {
|
||||||
// if host doesn't exist, no need to detach
|
// if host doesn't exist, no need to detach
|
||||||
|
@ -31,7 +31,7 @@ import (
|
|||||||
// AttachDisk attaches a vhd to vm
|
// AttachDisk attaches a vhd to vm
|
||||||
// the vhd must exist, can be identified by diskName, diskURI, and lun.
|
// the vhd must exist, can be identified by diskName, diskURI, and lun.
|
||||||
func (as *availabilitySet) AttachDisk(isManagedDisk bool, diskName, diskURI string, nodeName types.NodeName, lun int32, cachingMode compute.CachingTypes) error {
|
func (as *availabilitySet) AttachDisk(isManagedDisk bool, diskName, diskURI string, nodeName types.NodeName, lun int32, cachingMode compute.CachingTypes) error {
|
||||||
vm, err := as.getVirtualMachine(nodeName, cachedData)
|
vm, err := as.getVirtualMachine(nodeName, cacheReadTypeDefault)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
@ -102,7 +102,7 @@ func (as *availabilitySet) AttachDisk(isManagedDisk bool, diskName, diskURI stri
|
|||||||
// DetachDisk detaches a disk from host
|
// DetachDisk detaches a disk from host
|
||||||
// the vhd can be identified by diskName or diskURI
|
// the vhd can be identified by diskName or diskURI
|
||||||
func (as *availabilitySet) DetachDisk(diskName, diskURI string, nodeName types.NodeName) (*http.Response, error) {
|
func (as *availabilitySet) DetachDisk(diskName, diskURI string, nodeName types.NodeName) (*http.Response, error) {
|
||||||
vm, err := as.getVirtualMachine(nodeName, cachedData)
|
vm, err := as.getVirtualMachine(nodeName, cacheReadTypeDefault)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
// if host doesn't exist, no need to detach
|
// if host doesn't exist, no need to detach
|
||||||
klog.Warningf("azureDisk - cannot find node %s, skip detaching disk(%s, %s)", nodeName, diskName, diskURI)
|
klog.Warningf("azureDisk - cannot find node %s, skip detaching disk(%s, %s)", nodeName, diskName, diskURI)
|
||||||
|
@ -107,7 +107,7 @@ func TestGetDataDisks(t *testing.T) {
|
|||||||
nodeName: "vm2",
|
nodeName: "vm2",
|
||||||
expectedDataDisks: nil,
|
expectedDataDisks: nil,
|
||||||
expectedError: true,
|
expectedError: true,
|
||||||
crt: cachedData,
|
crt: cacheReadTypeDefault,
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
desc: "correct list of data disks shall be returned if everything is good",
|
desc: "correct list of data disks shall be returned if everything is good",
|
||||||
@ -119,7 +119,7 @@ func TestGetDataDisks(t *testing.T) {
|
|||||||
},
|
},
|
||||||
},
|
},
|
||||||
expectedError: false,
|
expectedError: false,
|
||||||
crt: cachedData,
|
crt: cacheReadTypeDefault,
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
desc: "correct list of data disks shall be returned if everything is good",
|
desc: "correct list of data disks shall be returned if everything is good",
|
||||||
@ -131,7 +131,7 @@ func TestGetDataDisks(t *testing.T) {
|
|||||||
},
|
},
|
||||||
},
|
},
|
||||||
expectedError: false,
|
expectedError: false,
|
||||||
crt: allowUnsafeRead,
|
crt: cacheReadTypeUnsafe,
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
for i, test := range testCases {
|
for i, test := range testCases {
|
||||||
@ -143,7 +143,7 @@ func TestGetDataDisks(t *testing.T) {
|
|||||||
assert.Equal(t, test.expectedDataDisks, dataDisks, "TestCase[%d]: %s", i, test.desc)
|
assert.Equal(t, test.expectedDataDisks, dataDisks, "TestCase[%d]: %s", i, test.desc)
|
||||||
assert.Equal(t, test.expectedError, err != nil, "TestCase[%d]: %s", i, test.desc)
|
assert.Equal(t, test.expectedError, err != nil, "TestCase[%d]: %s", i, test.desc)
|
||||||
|
|
||||||
if test.crt == allowUnsafeRead {
|
if test.crt == cacheReadTypeUnsafe {
|
||||||
time.Sleep(fakeCacheTTL)
|
time.Sleep(fakeCacheTTL)
|
||||||
dataDisks, err := vmSet.GetDataDisks(test.nodeName, test.crt)
|
dataDisks, err := vmSet.GetDataDisks(test.nodeName, test.crt)
|
||||||
assert.Equal(t, test.expectedDataDisks, dataDisks, "TestCase[%d]: %s", i, test.desc)
|
assert.Equal(t, test.expectedDataDisks, dataDisks, "TestCase[%d]: %s", i, test.desc)
|
||||||
|
@ -32,7 +32,7 @@ import (
|
|||||||
// the vhd must exist, can be identified by diskName, diskURI, and lun.
|
// the vhd must exist, can be identified by diskName, diskURI, and lun.
|
||||||
func (ss *scaleSet) AttachDisk(isManagedDisk bool, diskName, diskURI string, nodeName types.NodeName, lun int32, cachingMode compute.CachingTypes) error {
|
func (ss *scaleSet) AttachDisk(isManagedDisk bool, diskName, diskURI string, nodeName types.NodeName, lun int32, cachingMode compute.CachingTypes) error {
|
||||||
vmName := mapNodeNameToVMName(nodeName)
|
vmName := mapNodeNameToVMName(nodeName)
|
||||||
ssName, instanceID, vm, err := ss.getVmssVM(vmName, cachedData)
|
ssName, instanceID, vm, err := ss.getVmssVM(vmName, cacheReadTypeDefault)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
@ -109,7 +109,7 @@ func (ss *scaleSet) AttachDisk(isManagedDisk bool, diskName, diskURI string, nod
|
|||||||
// the vhd can be identified by diskName or diskURI
|
// the vhd can be identified by diskName or diskURI
|
||||||
func (ss *scaleSet) DetachDisk(diskName, diskURI string, nodeName types.NodeName) (*http.Response, error) {
|
func (ss *scaleSet) DetachDisk(diskName, diskURI string, nodeName types.NodeName) (*http.Response, error) {
|
||||||
vmName := mapNodeNameToVMName(nodeName)
|
vmName := mapNodeNameToVMName(nodeName)
|
||||||
ssName, instanceID, vm, err := ss.getVmssVM(vmName, cachedData)
|
ssName, instanceID, vm, err := ss.getVmssVM(vmName, cacheReadTypeDefault)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
@ -73,7 +73,7 @@ func (az *Cloud) NodeAddresses(ctx context.Context, name types.NodeName) ([]v1.N
|
|||||||
}
|
}
|
||||||
|
|
||||||
if az.UseInstanceMetadata {
|
if az.UseInstanceMetadata {
|
||||||
metadata, err := az.metadata.GetMetadata(allowUnsafeRead)
|
metadata, err := az.metadata.GetMetadata(cacheReadTypeUnsafe)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
@ -259,7 +259,7 @@ func (az *Cloud) InstanceID(ctx context.Context, name types.NodeName) (string, e
|
|||||||
}
|
}
|
||||||
|
|
||||||
if az.UseInstanceMetadata {
|
if az.UseInstanceMetadata {
|
||||||
metadata, err := az.metadata.GetMetadata(allowUnsafeRead)
|
metadata, err := az.metadata.GetMetadata(cacheReadTypeUnsafe)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return "", err
|
return "", err
|
||||||
}
|
}
|
||||||
@ -346,7 +346,7 @@ func (az *Cloud) InstanceType(ctx context.Context, name types.NodeName) (string,
|
|||||||
}
|
}
|
||||||
|
|
||||||
if az.UseInstanceMetadata {
|
if az.UseInstanceMetadata {
|
||||||
metadata, err := az.metadata.GetMetadata(allowUnsafeRead)
|
metadata, err := az.metadata.GetMetadata(cacheReadTypeUnsafe)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return "", err
|
return "", err
|
||||||
}
|
}
|
||||||
|
@ -962,7 +962,7 @@ func (az *Cloud) reconcileLoadBalancer(clusterName string, service *v1.Service,
|
|||||||
|
|
||||||
if isInternal {
|
if isInternal {
|
||||||
// Refresh updated lb which will be used later in other places.
|
// Refresh updated lb which will be used later in other places.
|
||||||
newLB, exist, err := az.getAzureLoadBalancer(lbName, cachedData)
|
newLB, exist, err := az.getAzureLoadBalancer(lbName, cacheReadTypeDefault)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
klog.V(2).Infof("reconcileLoadBalancer for service(%s): getAzureLoadBalancer(%s) failed: %v", serviceName, lbName, err)
|
klog.V(2).Infof("reconcileLoadBalancer for service(%s): getAzureLoadBalancer(%s) failed: %v", serviceName, lbName, err)
|
||||||
return nil, err
|
return nil, err
|
||||||
@ -1125,7 +1125,7 @@ func (az *Cloud) reconcileSecurityGroup(clusterName string, service *v1.Service,
|
|||||||
ports = []v1.ServicePort{}
|
ports = []v1.ServicePort{}
|
||||||
}
|
}
|
||||||
|
|
||||||
sg, err := az.getSecurityGroup(cachedData)
|
sg, err := az.getSecurityGroup(cacheReadTypeDefault)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
@ -1466,7 +1466,7 @@ func (az *Cloud) reconcilePublicIP(clusterName string, service *v1.Service, lbNa
|
|||||||
}
|
}
|
||||||
|
|
||||||
if lbName != "" {
|
if lbName != "" {
|
||||||
loadBalancer, _, err := az.getAzureLoadBalancer(lbName, cachedData)
|
loadBalancer, _, err := az.getAzureLoadBalancer(lbName, cacheReadTypeDefault)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
@ -46,7 +46,7 @@ const (
|
|||||||
// ListRoutes lists all managed routes that belong to the specified clusterName
|
// ListRoutes lists all managed routes that belong to the specified clusterName
|
||||||
func (az *Cloud) ListRoutes(ctx context.Context, clusterName string) ([]*cloudprovider.Route, error) {
|
func (az *Cloud) ListRoutes(ctx context.Context, clusterName string) ([]*cloudprovider.Route, error) {
|
||||||
klog.V(10).Infof("ListRoutes: START clusterName=%q", clusterName)
|
klog.V(10).Infof("ListRoutes: START clusterName=%q", clusterName)
|
||||||
routeTable, existsRouteTable, err := az.getRouteTable(cachedData)
|
routeTable, existsRouteTable, err := az.getRouteTable(cacheReadTypeDefault)
|
||||||
routes, err := processRoutes(routeTable, existsRouteTable, err)
|
routes, err := processRoutes(routeTable, existsRouteTable, err)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
@ -102,7 +102,7 @@ func processRoutes(routeTable network.RouteTable, exists bool, err error) ([]*cl
|
|||||||
}
|
}
|
||||||
|
|
||||||
func (az *Cloud) createRouteTableIfNotExists(clusterName string, kubeRoute *cloudprovider.Route) error {
|
func (az *Cloud) createRouteTableIfNotExists(clusterName string, kubeRoute *cloudprovider.Route) error {
|
||||||
if _, existsRouteTable, err := az.getRouteTable(cachedData); err != nil {
|
if _, existsRouteTable, err := az.getRouteTable(cacheReadTypeDefault); err != nil {
|
||||||
klog.V(2).Infof("createRouteTableIfNotExists error: couldn't get routetable. clusterName=%q instance=%q cidr=%q", clusterName, kubeRoute.TargetNode, kubeRoute.DestinationCIDR)
|
klog.V(2).Infof("createRouteTableIfNotExists error: couldn't get routetable. clusterName=%q instance=%q cidr=%q", clusterName, kubeRoute.TargetNode, kubeRoute.DestinationCIDR)
|
||||||
return err
|
return err
|
||||||
} else if existsRouteTable {
|
} else if existsRouteTable {
|
||||||
|
@ -375,14 +375,14 @@ func (as *availabilitySet) GetInstanceIDByNodeName(name string) (string, error)
|
|||||||
var machine compute.VirtualMachine
|
var machine compute.VirtualMachine
|
||||||
var err error
|
var err error
|
||||||
|
|
||||||
machine, err = as.getVirtualMachine(types.NodeName(name), allowUnsafeRead)
|
machine, err = as.getVirtualMachine(types.NodeName(name), cacheReadTypeUnsafe)
|
||||||
if err == cloudprovider.InstanceNotFound {
|
if err == cloudprovider.InstanceNotFound {
|
||||||
return "", cloudprovider.InstanceNotFound
|
return "", cloudprovider.InstanceNotFound
|
||||||
}
|
}
|
||||||
if err != nil {
|
if err != nil {
|
||||||
if as.CloudProviderBackoff {
|
if as.CloudProviderBackoff {
|
||||||
klog.V(2).Infof("GetInstanceIDByNodeName(%s) backing off", name)
|
klog.V(2).Infof("GetInstanceIDByNodeName(%s) backing off", name)
|
||||||
machine, err = as.GetVirtualMachineWithRetry(types.NodeName(name), allowUnsafeRead)
|
machine, err = as.GetVirtualMachineWithRetry(types.NodeName(name), cacheReadTypeUnsafe)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
klog.V(2).Infof("GetInstanceIDByNodeName(%s) abort backoff", name)
|
klog.V(2).Infof("GetInstanceIDByNodeName(%s) abort backoff", name)
|
||||||
return "", err
|
return "", err
|
||||||
@ -403,7 +403,7 @@ func (as *availabilitySet) GetInstanceIDByNodeName(name string) (string, error)
|
|||||||
|
|
||||||
// GetPowerStatusByNodeName returns the power state of the specified node.
|
// GetPowerStatusByNodeName returns the power state of the specified node.
|
||||||
func (as *availabilitySet) GetPowerStatusByNodeName(name string) (powerState string, err error) {
|
func (as *availabilitySet) GetPowerStatusByNodeName(name string) (powerState string, err error) {
|
||||||
vm, err := as.getVirtualMachine(types.NodeName(name), cachedData)
|
vm, err := as.getVirtualMachine(types.NodeName(name), cacheReadTypeDefault)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return powerState, err
|
return powerState, err
|
||||||
}
|
}
|
||||||
@ -436,7 +436,7 @@ func (as *availabilitySet) GetNodeNameByProviderID(providerID string) (types.Nod
|
|||||||
|
|
||||||
// GetInstanceTypeByNodeName gets the instance type by node name.
|
// GetInstanceTypeByNodeName gets the instance type by node name.
|
||||||
func (as *availabilitySet) GetInstanceTypeByNodeName(name string) (string, error) {
|
func (as *availabilitySet) GetInstanceTypeByNodeName(name string) (string, error) {
|
||||||
machine, err := as.getVirtualMachine(types.NodeName(name), allowUnsafeRead)
|
machine, err := as.getVirtualMachine(types.NodeName(name), cacheReadTypeUnsafe)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
klog.Errorf("as.GetInstanceTypeByNodeName(%s) failed: as.getVirtualMachine(%s) err=%v", name, name, err)
|
klog.Errorf("as.GetInstanceTypeByNodeName(%s) failed: as.getVirtualMachine(%s) err=%v", name, name, err)
|
||||||
return "", err
|
return "", err
|
||||||
@ -448,7 +448,7 @@ func (as *availabilitySet) GetInstanceTypeByNodeName(name string) (string, error
|
|||||||
// GetZoneByNodeName gets availability zone for the specified node. If the node is not running
|
// GetZoneByNodeName gets availability zone for the specified node. If the node is not running
|
||||||
// with availability zone, then it returns fault domain.
|
// with availability zone, then it returns fault domain.
|
||||||
func (as *availabilitySet) GetZoneByNodeName(name string) (cloudprovider.Zone, error) {
|
func (as *availabilitySet) GetZoneByNodeName(name string) (cloudprovider.Zone, error) {
|
||||||
vm, err := as.getVirtualMachine(types.NodeName(name), allowUnsafeRead)
|
vm, err := as.getVirtualMachine(types.NodeName(name), cacheReadTypeUnsafe)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return cloudprovider.Zone{}, err
|
return cloudprovider.Zone{}, err
|
||||||
}
|
}
|
||||||
@ -649,7 +649,7 @@ func extractResourceGroupByNicID(nicID string) (string, error) {
|
|||||||
func (as *availabilitySet) getPrimaryInterfaceWithVMSet(nodeName, vmSetName string) (network.Interface, error) {
|
func (as *availabilitySet) getPrimaryInterfaceWithVMSet(nodeName, vmSetName string) (network.Interface, error) {
|
||||||
var machine compute.VirtualMachine
|
var machine compute.VirtualMachine
|
||||||
|
|
||||||
machine, err := as.GetVirtualMachineWithRetry(types.NodeName(nodeName), cachedData)
|
machine, err := as.GetVirtualMachineWithRetry(types.NodeName(nodeName), cacheReadTypeDefault)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
klog.V(2).Infof("GetPrimaryInterface(%s, %s) abort backoff", nodeName, vmSetName)
|
klog.V(2).Infof("GetPrimaryInterface(%s, %s) abort backoff", nodeName, vmSetName)
|
||||||
return network.Interface{}, err
|
return network.Interface{}, err
|
||||||
|
@ -132,7 +132,7 @@ func (ss *scaleSet) getVmssVM(nodeName string, crt cacheReadType) (string, strin
|
|||||||
|
|
||||||
// GetPowerStatusByNodeName returns the power state of the specified node.
|
// GetPowerStatusByNodeName returns the power state of the specified node.
|
||||||
func (ss *scaleSet) GetPowerStatusByNodeName(name string) (powerState string, err error) {
|
func (ss *scaleSet) GetPowerStatusByNodeName(name string) (powerState string, err error) {
|
||||||
_, _, vm, err := ss.getVmssVM(name, cachedData)
|
_, _, vm, err := ss.getVmssVM(name, cacheReadTypeDefault)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return powerState, err
|
return powerState, err
|
||||||
}
|
}
|
||||||
@ -203,7 +203,7 @@ func (ss *scaleSet) getVmssVMByInstanceID(resourceGroup, scaleSetName, instanceI
|
|||||||
// It must return ("", cloudprovider.InstanceNotFound) if the instance does
|
// It must return ("", cloudprovider.InstanceNotFound) if the instance does
|
||||||
// not exist or is no longer running.
|
// not exist or is no longer running.
|
||||||
func (ss *scaleSet) GetInstanceIDByNodeName(name string) (string, error) {
|
func (ss *scaleSet) GetInstanceIDByNodeName(name string) (string, error) {
|
||||||
managedByAS, err := ss.isNodeManagedByAvailabilitySet(name, allowUnsafeRead)
|
managedByAS, err := ss.isNodeManagedByAvailabilitySet(name, cacheReadTypeUnsafe)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
klog.Errorf("Failed to check isNodeManagedByAvailabilitySet: %v", err)
|
klog.Errorf("Failed to check isNodeManagedByAvailabilitySet: %v", err)
|
||||||
return "", err
|
return "", err
|
||||||
@ -213,7 +213,7 @@ func (ss *scaleSet) GetInstanceIDByNodeName(name string) (string, error) {
|
|||||||
return ss.availabilitySet.GetInstanceIDByNodeName(name)
|
return ss.availabilitySet.GetInstanceIDByNodeName(name)
|
||||||
}
|
}
|
||||||
|
|
||||||
_, _, vm, err := ss.getVmssVM(name, allowUnsafeRead)
|
_, _, vm, err := ss.getVmssVM(name, cacheReadTypeUnsafe)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return "", err
|
return "", err
|
||||||
}
|
}
|
||||||
@ -247,7 +247,7 @@ func (ss *scaleSet) GetNodeNameByProviderID(providerID string) (types.NodeName,
|
|||||||
return ss.availabilitySet.GetNodeNameByProviderID(providerID)
|
return ss.availabilitySet.GetNodeNameByProviderID(providerID)
|
||||||
}
|
}
|
||||||
|
|
||||||
vm, err := ss.getVmssVMByInstanceID(resourceGroup, scaleSetName, instanceID, allowUnsafeRead)
|
vm, err := ss.getVmssVMByInstanceID(resourceGroup, scaleSetName, instanceID, cacheReadTypeUnsafe)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return "", err
|
return "", err
|
||||||
}
|
}
|
||||||
@ -262,7 +262,7 @@ func (ss *scaleSet) GetNodeNameByProviderID(providerID string) (types.NodeName,
|
|||||||
|
|
||||||
// GetInstanceTypeByNodeName gets the instance type by node name.
|
// GetInstanceTypeByNodeName gets the instance type by node name.
|
||||||
func (ss *scaleSet) GetInstanceTypeByNodeName(name string) (string, error) {
|
func (ss *scaleSet) GetInstanceTypeByNodeName(name string) (string, error) {
|
||||||
managedByAS, err := ss.isNodeManagedByAvailabilitySet(name, allowUnsafeRead)
|
managedByAS, err := ss.isNodeManagedByAvailabilitySet(name, cacheReadTypeUnsafe)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
klog.Errorf("Failed to check isNodeManagedByAvailabilitySet: %v", err)
|
klog.Errorf("Failed to check isNodeManagedByAvailabilitySet: %v", err)
|
||||||
return "", err
|
return "", err
|
||||||
@ -272,7 +272,7 @@ func (ss *scaleSet) GetInstanceTypeByNodeName(name string) (string, error) {
|
|||||||
return ss.availabilitySet.GetInstanceTypeByNodeName(name)
|
return ss.availabilitySet.GetInstanceTypeByNodeName(name)
|
||||||
}
|
}
|
||||||
|
|
||||||
_, _, vm, err := ss.getVmssVM(name, allowUnsafeRead)
|
_, _, vm, err := ss.getVmssVM(name, cacheReadTypeUnsafe)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return "", err
|
return "", err
|
||||||
}
|
}
|
||||||
@ -287,7 +287,7 @@ func (ss *scaleSet) GetInstanceTypeByNodeName(name string) (string, error) {
|
|||||||
// GetZoneByNodeName gets availability zone for the specified node. If the node is not running
|
// GetZoneByNodeName gets availability zone for the specified node. If the node is not running
|
||||||
// with availability zone, then it returns fault domain.
|
// with availability zone, then it returns fault domain.
|
||||||
func (ss *scaleSet) GetZoneByNodeName(name string) (cloudprovider.Zone, error) {
|
func (ss *scaleSet) GetZoneByNodeName(name string) (cloudprovider.Zone, error) {
|
||||||
managedByAS, err := ss.isNodeManagedByAvailabilitySet(name, allowUnsafeRead)
|
managedByAS, err := ss.isNodeManagedByAvailabilitySet(name, cacheReadTypeUnsafe)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
klog.Errorf("Failed to check isNodeManagedByAvailabilitySet: %v", err)
|
klog.Errorf("Failed to check isNodeManagedByAvailabilitySet: %v", err)
|
||||||
return cloudprovider.Zone{}, err
|
return cloudprovider.Zone{}, err
|
||||||
@ -297,7 +297,7 @@ func (ss *scaleSet) GetZoneByNodeName(name string) (cloudprovider.Zone, error) {
|
|||||||
return ss.availabilitySet.GetZoneByNodeName(name)
|
return ss.availabilitySet.GetZoneByNodeName(name)
|
||||||
}
|
}
|
||||||
|
|
||||||
_, _, vm, err := ss.getVmssVM(name, allowUnsafeRead)
|
_, _, vm, err := ss.getVmssVM(name, cacheReadTypeUnsafe)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return cloudprovider.Zone{}, err
|
return cloudprovider.Zone{}, err
|
||||||
}
|
}
|
||||||
@ -536,7 +536,7 @@ func (ss *scaleSet) getAgentPoolScaleSets(nodes []*v1.Node) (*[]string, error) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
nodeName := nodes[nx].Name
|
nodeName := nodes[nx].Name
|
||||||
ssName, _, _, err := ss.getVmssVM(nodeName, cachedData)
|
ssName, _, _, err := ss.getVmssVM(nodeName, cacheReadTypeDefault)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
@ -614,7 +614,7 @@ func extractResourceGroupByVMSSNicID(nicID string) (string, error) {
|
|||||||
|
|
||||||
// GetPrimaryInterface gets machine primary network interface by node name and vmSet.
|
// GetPrimaryInterface gets machine primary network interface by node name and vmSet.
|
||||||
func (ss *scaleSet) GetPrimaryInterface(nodeName string) (network.Interface, error) {
|
func (ss *scaleSet) GetPrimaryInterface(nodeName string) (network.Interface, error) {
|
||||||
managedByAS, err := ss.isNodeManagedByAvailabilitySet(nodeName, cachedData)
|
managedByAS, err := ss.isNodeManagedByAvailabilitySet(nodeName, cacheReadTypeDefault)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
klog.Errorf("Failed to check isNodeManagedByAvailabilitySet: %v", err)
|
klog.Errorf("Failed to check isNodeManagedByAvailabilitySet: %v", err)
|
||||||
return network.Interface{}, err
|
return network.Interface{}, err
|
||||||
@ -624,7 +624,7 @@ func (ss *scaleSet) GetPrimaryInterface(nodeName string) (network.Interface, err
|
|||||||
return ss.availabilitySet.GetPrimaryInterface(nodeName)
|
return ss.availabilitySet.GetPrimaryInterface(nodeName)
|
||||||
}
|
}
|
||||||
|
|
||||||
ssName, instanceID, vm, err := ss.getVmssVM(nodeName, cachedData)
|
ssName, instanceID, vm, err := ss.getVmssVM(nodeName, cacheReadTypeDefault)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
// VM is availability set, but not cached yet in availabilitySetNodesCache.
|
// VM is availability set, but not cached yet in availabilitySetNodesCache.
|
||||||
if err == ErrorNotVmssInstance {
|
if err == ErrorNotVmssInstance {
|
||||||
@ -747,7 +747,7 @@ func (ss *scaleSet) getConfigForScaleSetByIPFamily(config *compute.VirtualMachin
|
|||||||
func (ss *scaleSet) EnsureHostInPool(service *v1.Service, nodeName types.NodeName, backendPoolID string, vmSetName string, isInternal bool) error {
|
func (ss *scaleSet) EnsureHostInPool(service *v1.Service, nodeName types.NodeName, backendPoolID string, vmSetName string, isInternal bool) error {
|
||||||
klog.V(3).Infof("ensuring node %q of scaleset %q in LB backendpool %q", nodeName, vmSetName, backendPoolID)
|
klog.V(3).Infof("ensuring node %q of scaleset %q in LB backendpool %q", nodeName, vmSetName, backendPoolID)
|
||||||
vmName := mapNodeNameToVMName(nodeName)
|
vmName := mapNodeNameToVMName(nodeName)
|
||||||
ssName, instanceID, vm, err := ss.getVmssVM(vmName, cachedData)
|
ssName, instanceID, vm, err := ss.getVmssVM(vmName, cacheReadTypeDefault)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
@ -1027,7 +1027,7 @@ func (ss *scaleSet) EnsureHostsInPool(service *v1.Service, nodes []*v1.Node, bac
|
|||||||
|
|
||||||
f := func() error {
|
f := func() error {
|
||||||
// Check whether the node is VMAS virtual machine.
|
// Check whether the node is VMAS virtual machine.
|
||||||
managedByAS, err := ss.isNodeManagedByAvailabilitySet(localNodeName, cachedData)
|
managedByAS, err := ss.isNodeManagedByAvailabilitySet(localNodeName, cacheReadTypeDefault)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
klog.Errorf("Failed to check isNodeManagedByAvailabilitySet(%s): %v", localNodeName, err)
|
klog.Errorf("Failed to check isNodeManagedByAvailabilitySet(%s): %v", localNodeName, err)
|
||||||
return err
|
return err
|
||||||
@ -1068,7 +1068,7 @@ func (ss *scaleSet) EnsureHostsInPool(service *v1.Service, nodes []*v1.Node, bac
|
|||||||
|
|
||||||
// ensureBackendPoolDeletedFromNode ensures the loadBalancer backendAddressPools deleted from the specified node.
|
// ensureBackendPoolDeletedFromNode ensures the loadBalancer backendAddressPools deleted from the specified node.
|
||||||
func (ss *scaleSet) ensureBackendPoolDeletedFromNode(service *v1.Service, nodeName, backendPoolID string) error {
|
func (ss *scaleSet) ensureBackendPoolDeletedFromNode(service *v1.Service, nodeName, backendPoolID string) error {
|
||||||
ssName, instanceID, vm, err := ss.getVmssVM(nodeName, cachedData)
|
ssName, instanceID, vm, err := ss.getVmssVM(nodeName, cacheReadTypeDefault)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
@ -1167,7 +1167,7 @@ func (ss *scaleSet) getNodeNameByIPConfigurationID(ipConfigurationID string) (st
|
|||||||
resourceGroup := matches[1]
|
resourceGroup := matches[1]
|
||||||
scaleSetName := matches[2]
|
scaleSetName := matches[2]
|
||||||
instanceID := matches[3]
|
instanceID := matches[3]
|
||||||
vm, err := ss.getVmssVMByInstanceID(resourceGroup, scaleSetName, instanceID, allowUnsafeRead)
|
vm, err := ss.getVmssVMByInstanceID(resourceGroup, scaleSetName, instanceID, cacheReadTypeUnsafe)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return "", err
|
return "", err
|
||||||
}
|
}
|
||||||
|
@ -115,7 +115,7 @@ func (ss *scaleSet) newVMSSVirtualMachinesCache() (*timedCache, error) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func (ss *scaleSet) deleteCacheForNode(nodeName string) error {
|
func (ss *scaleSet) deleteCacheForNode(nodeName string) error {
|
||||||
cached, err := ss.vmssVMCache.Get(vmssVirtualMachinesKey, allowUnsafeRead)
|
cached, err := ss.vmssVMCache.Get(vmssVirtualMachinesKey, cacheReadTypeUnsafe)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
@ -85,7 +85,7 @@ func TestVMSSVMCache(t *testing.T) {
|
|||||||
for i := range virtualMachines {
|
for i := range virtualMachines {
|
||||||
vm := virtualMachines[i]
|
vm := virtualMachines[i]
|
||||||
vmName := to.String(vm.OsProfile.ComputerName)
|
vmName := to.String(vm.OsProfile.ComputerName)
|
||||||
ssName, instanceID, realVM, err := ss.getVmssVM(vmName, cachedData)
|
ssName, instanceID, realVM, err := ss.getVmssVM(vmName, cacheReadTypeDefault)
|
||||||
assert.NoError(t, err)
|
assert.NoError(t, err)
|
||||||
assert.Equal(t, "vmss", ssName)
|
assert.Equal(t, "vmss", ssName)
|
||||||
assert.Equal(t, to.String(vm.InstanceID), instanceID)
|
assert.Equal(t, to.String(vm.InstanceID), instanceID)
|
||||||
@ -99,14 +99,14 @@ func TestVMSSVMCache(t *testing.T) {
|
|||||||
assert.NoError(t, err)
|
assert.NoError(t, err)
|
||||||
|
|
||||||
// the VM should be removed from cache after deleteCacheForNode().
|
// the VM should be removed from cache after deleteCacheForNode().
|
||||||
cached, err := ss.vmssVMCache.Get(vmssVirtualMachinesKey, cachedData)
|
cached, err := ss.vmssVMCache.Get(vmssVirtualMachinesKey, cacheReadTypeDefault)
|
||||||
assert.NoError(t, err)
|
assert.NoError(t, err)
|
||||||
cachedVirtualMachines := cached.(*sync.Map)
|
cachedVirtualMachines := cached.(*sync.Map)
|
||||||
_, ok := cachedVirtualMachines.Load(vmName)
|
_, ok := cachedVirtualMachines.Load(vmName)
|
||||||
assert.Equal(t, false, ok)
|
assert.Equal(t, false, ok)
|
||||||
|
|
||||||
// the VM should be get back after another cache refresh.
|
// the VM should be get back after another cache refresh.
|
||||||
ssName, instanceID, realVM, err := ss.getVmssVM(vmName, cachedData)
|
ssName, instanceID, realVM, err := ss.getVmssVM(vmName, cacheReadTypeDefault)
|
||||||
assert.NoError(t, err)
|
assert.NoError(t, err)
|
||||||
assert.Equal(t, "vmss", ssName)
|
assert.Equal(t, "vmss", ssName)
|
||||||
assert.Equal(t, to.String(vm.InstanceID), instanceID)
|
assert.Equal(t, to.String(vm.InstanceID), instanceID)
|
||||||
|
@ -53,7 +53,7 @@ func (az *Cloud) GetZoneID(zoneLabel string) string {
|
|||||||
// If the node is not running with availability zones, then it will fall back to fault domain.
|
// If the node is not running with availability zones, then it will fall back to fault domain.
|
||||||
func (az *Cloud) GetZone(ctx context.Context) (cloudprovider.Zone, error) {
|
func (az *Cloud) GetZone(ctx context.Context) (cloudprovider.Zone, error) {
|
||||||
if az.UseInstanceMetadata {
|
if az.UseInstanceMetadata {
|
||||||
metadata, err := az.metadata.GetMetadata(allowUnsafeRead)
|
metadata, err := az.metadata.GetMetadata(cacheReadTypeUnsafe)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return cloudprovider.Zone{}, err
|
return cloudprovider.Zone{}, err
|
||||||
}
|
}
|
||||||
|
Loading…
Reference in New Issue
Block a user