diff --git a/staging/src/k8s.io/legacy-cloud-providers/azure/azure_controller_vmss.go b/staging/src/k8s.io/legacy-cloud-providers/azure/azure_controller_vmss.go index 2ca83e28aa1..664993085a5 100644 --- a/staging/src/k8s.io/legacy-cloud-providers/azure/azure_controller_vmss.go +++ b/staging/src/k8s.io/legacy-cloud-providers/azure/azure_controller_vmss.go @@ -88,9 +88,7 @@ func (ss *scaleSet) AttachDisk(isManagedDisk bool, diskName, diskURI string, nod defer cancel() // Invalidate the cache right after updating - if err = ss.deleteCacheForNode(vmName); err != nil { - return err - } + defer ss.deleteCacheForNode(vmName) klog.V(2).Infof("azureDisk - update(%s): vm(%s) - attach disk(%s, %s) with DiskEncryptionSetID(%s)", nodeResourceGroup, nodeName, diskName, diskURI, diskEncryptionSetID) _, err = ss.VirtualMachineScaleSetVMsClient.Update(ctx, nodeResourceGroup, ssName, instanceID, newVM, "attach_disk") @@ -160,9 +158,7 @@ func (ss *scaleSet) DetachDisk(diskName, diskURI string, nodeName types.NodeName defer cancel() // Invalidate the cache right after updating - if err = ss.deleteCacheForNode(vmName); err != nil { - return nil, err - } + defer ss.deleteCacheForNode(vmName) klog.V(2).Infof("azureDisk - update(%s): vm(%s) - detach disk(%s, %s)", nodeResourceGroup, nodeName, diskName, diskURI) return ss.VirtualMachineScaleSetVMsClient.Update(ctx, nodeResourceGroup, ssName, instanceID, newVM, "detach_disk") diff --git a/staging/src/k8s.io/legacy-cloud-providers/azure/azure_vmss.go b/staging/src/k8s.io/legacy-cloud-providers/azure/azure_vmss.go index 5351a072c09..379977ac2b2 100644 --- a/staging/src/k8s.io/legacy-cloud-providers/azure/azure_vmss.go +++ b/staging/src/k8s.io/legacy-cloud-providers/azure/azure_vmss.go @@ -850,10 +850,8 @@ func (ss *scaleSet) EnsureHostInPool(service *v1.Service, nodeName types.NodeNam return err } - // Invalidate the cache since we would update it. - if err = ss.deleteCacheForNode(vmName); err != nil { - return err - } + // Invalidate the cache since right after update + defer ss.deleteCacheForNode(vmName) // Update vmssVM with backoff. ctx, cancel := getContextWithCancel() @@ -1129,10 +1127,8 @@ func (ss *scaleSet) ensureBackendPoolDeletedFromNode(service *v1.Service, nodeNa return err } - // Invalidate the cache since we would update it. - if err = ss.deleteCacheForNode(nodeName); err != nil { - return err - } + // Invalidate the cache since right after update + defer ss.deleteCacheForNode(nodeName) // Update vmssVM with backoff. ctx, cancel := getContextWithCancel() diff --git a/staging/src/k8s.io/legacy-cloud-providers/azure/azure_vmss_cache.go b/staging/src/k8s.io/legacy-cloud-providers/azure/azure_vmss_cache.go index 9abe6f6a1bc..614b16b93e6 100644 --- a/staging/src/k8s.io/legacy-cloud-providers/azure/azure_vmss_cache.go +++ b/staging/src/k8s.io/legacy-cloud-providers/azure/azure_vmss_cache.go @@ -117,6 +117,7 @@ func (ss *scaleSet) newVMSSVirtualMachinesCache() (*timedCache, error) { func (ss *scaleSet) deleteCacheForNode(nodeName string) error { cached, err := ss.vmssVMCache.Get(vmssVirtualMachinesKey, cacheReadTypeUnsafe) if err != nil { + klog.Errorf("deleteCacheForNode(%s) failed with error: %v", nodeName, err) return err }