mirror of
https://github.com/k3s-io/kubernetes.git
synced 2025-07-25 04:33:26 +00:00
Fix instance not found issues when an Azure Node is recreated in a short time
This commit is contained in:
parent
98814409ba
commit
358885660d
@ -283,6 +283,13 @@ func (ss *scaleSet) getVmssVMByInstanceID(resourceGroup, scaleSetName, instanceI
|
|||||||
if found && vm != nil {
|
if found && vm != nil {
|
||||||
return vm, nil
|
return vm, nil
|
||||||
}
|
}
|
||||||
|
if found && vm == nil {
|
||||||
|
klog.V(2).Infof("Couldn't find VMSS VM with scaleSetName %q and instanceID %q, refreshing the cache if it is expired", scaleSetName, instanceID)
|
||||||
|
vm, found, err = getter(azcache.CacheReadTypeDefault)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
}
|
||||||
if !found || vm == nil {
|
if !found || vm == nil {
|
||||||
return nil, cloudprovider.InstanceNotFound
|
return nil, cloudprovider.InstanceNotFound
|
||||||
}
|
}
|
||||||
|
@ -154,6 +154,11 @@ func (ss *scaleSet) gcVMSSVMCache() error {
|
|||||||
|
|
||||||
// newVMSSVirtualMachinesCache instanciates a new VMs cache for VMs belonging to the provided VMSS.
|
// newVMSSVirtualMachinesCache instanciates a new VMs cache for VMs belonging to the provided VMSS.
|
||||||
func (ss *scaleSet) newVMSSVirtualMachinesCache(resourceGroupName, vmssName, cacheKey string) (*azcache.TimedCache, error) {
|
func (ss *scaleSet) newVMSSVirtualMachinesCache(resourceGroupName, vmssName, cacheKey string) (*azcache.TimedCache, error) {
|
||||||
|
if ss.Config.VmssVirtualMachinesCacheTTLInSeconds == 0 {
|
||||||
|
ss.Config.VmssVirtualMachinesCacheTTLInSeconds = vmssVirtualMachinesCacheTTLDefaultInSeconds
|
||||||
|
}
|
||||||
|
vmssVirtualMachinesCacheTTL := time.Duration(ss.Config.VmssVirtualMachinesCacheTTLInSeconds) * time.Second
|
||||||
|
|
||||||
getter := func(key string) (interface{}, error) {
|
getter := func(key string) (interface{}, error) {
|
||||||
localCache := &sync.Map{} // [nodeName]*vmssVirtualMachinesEntry
|
localCache := &sync.Map{} // [nodeName]*vmssVirtualMachinesEntry
|
||||||
|
|
||||||
@ -212,9 +217,9 @@ func (ss *scaleSet) newVMSSVirtualMachinesCache(resourceGroupName, vmssName, cac
|
|||||||
// add old missing cache data with nil entries to prevent aggressive
|
// add old missing cache data with nil entries to prevent aggressive
|
||||||
// ARM calls during cache invalidation
|
// ARM calls during cache invalidation
|
||||||
for name, vmEntry := range oldCache {
|
for name, vmEntry := range oldCache {
|
||||||
// if the nil cache entry has existed for 15 minutes in the cache
|
// if the nil cache entry has existed for vmssVirtualMachinesCacheTTL in the cache
|
||||||
// then it should not be added back to the cache
|
// then it should not be added back to the cache
|
||||||
if vmEntry.virtualMachine == nil && time.Since(vmEntry.lastUpdate) > 15*time.Minute {
|
if vmEntry.virtualMachine == nil && time.Since(vmEntry.lastUpdate) > vmssVirtualMachinesCacheTTL {
|
||||||
klog.V(5).Infof("ignoring expired entries from old cache for %s", name)
|
klog.V(5).Infof("ignoring expired entries from old cache for %s", name)
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
@ -238,10 +243,7 @@ func (ss *scaleSet) newVMSSVirtualMachinesCache(resourceGroupName, vmssName, cac
|
|||||||
return localCache, nil
|
return localCache, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
if ss.Config.VmssVirtualMachinesCacheTTLInSeconds == 0 {
|
return azcache.NewTimedcache(vmssVirtualMachinesCacheTTL, getter)
|
||||||
ss.Config.VmssVirtualMachinesCacheTTLInSeconds = vmssVirtualMachinesCacheTTLDefaultInSeconds
|
|
||||||
}
|
|
||||||
return azcache.NewTimedcache(time.Duration(ss.Config.VmssVirtualMachinesCacheTTLInSeconds)*time.Second, getter)
|
|
||||||
}
|
}
|
||||||
|
|
||||||
func (ss *scaleSet) deleteCacheForNode(nodeName string) error {
|
func (ss *scaleSet) deleteCacheForNode(nodeName string) error {
|
||||||
|
Loading…
Reference in New Issue
Block a user