diff --git a/staging/src/k8s.io/legacy-cloud-providers/azure/azure_client.go b/staging/src/k8s.io/legacy-cloud-providers/azure/azure_client.go index b93341c7759..6920178be99 100644 --- a/staging/src/k8s.io/legacy-cloud-providers/azure/azure_client.go +++ b/staging/src/k8s.io/legacy-cloud-providers/azure/azure_client.go @@ -49,6 +49,7 @@ func createRateLimitErr(isWrite bool, opName string) error { // VirtualMachinesClient defines needed functions for azure compute.VirtualMachinesClient type VirtualMachinesClient interface { CreateOrUpdate(ctx context.Context, resourceGroupName string, VMName string, parameters compute.VirtualMachine, source string) (resp *http.Response, err error) + Update(ctx context.Context, resourceGroupName string, VMName string, parameters compute.VirtualMachineUpdate, source string) (resp *http.Response, err error) Get(ctx context.Context, resourceGroupName string, VMName string, expand compute.InstanceViewTypes) (result compute.VirtualMachine, err error) List(ctx context.Context, resourceGroupName string) (result []compute.VirtualMachine, err error) } @@ -207,6 +208,29 @@ func (az *azVirtualMachinesClient) CreateOrUpdate(ctx context.Context, resourceG return future.Response(), err } +func (az *azVirtualMachinesClient) Update(ctx context.Context, resourceGroupName string, VMName string, parameters compute.VirtualMachineUpdate, source string) (resp *http.Response, err error) { + // /* Write rate limiting */ + if !az.rateLimiterWriter.TryAccept() { + err = createRateLimitErr(true, "VMUpdate") + return + } + + klog.V(10).Infof("azVirtualMachinesClient.Update(%q, %q): start", resourceGroupName, VMName) + defer func() { + klog.V(10).Infof("azVirtualMachinesClient.Update(%q, %q): end", resourceGroupName, VMName) + }() + + mc := newMetricContext("vm", "update", resourceGroupName, az.client.SubscriptionID, source) + future, err := az.client.Update(ctx, resourceGroupName, VMName, parameters) + if err != nil { + return future.Response(), err + } + + err = future.WaitForCompletionRef(ctx, az.client.Client) + mc.Observe(err) + return future.Response(), err +} + func (az *azVirtualMachinesClient) Get(ctx context.Context, resourceGroupName string, VMName string, expand compute.InstanceViewTypes) (result compute.VirtualMachine, err error) { if !az.rateLimiterReader.TryAccept() { err = createRateLimitErr(false, "VMGet") diff --git a/staging/src/k8s.io/legacy-cloud-providers/azure/azure_controller_standard.go b/staging/src/k8s.io/legacy-cloud-providers/azure/azure_controller_standard.go index ecb972b7379..858c6e322c2 100644 --- a/staging/src/k8s.io/legacy-cloud-providers/azure/azure_controller_standard.go +++ b/staging/src/k8s.io/legacy-cloud-providers/azure/azure_controller_standard.go @@ -67,8 +67,7 @@ func (as *availabilitySet) AttachDisk(isManagedDisk bool, diskName, diskURI stri }) } - newVM := compute.VirtualMachine{ - Location: vm.Location, + newVM := compute.VirtualMachineUpdate{ VirtualMachineProperties: &compute.VirtualMachineProperties{ HardwareProfile: vm.HardwareProfile, StorageProfile: &compute.StorageProfile{ @@ -83,7 +82,7 @@ func (as *availabilitySet) AttachDisk(isManagedDisk bool, diskName, diskURI stri // Invalidate the cache right after updating defer as.cloud.vmCache.Delete(vmName) - _, err = as.VirtualMachinesClient.CreateOrUpdate(ctx, nodeResourceGroup, vmName, newVM, "attach_disk") + _, err = as.VirtualMachinesClient.Update(ctx, nodeResourceGroup, vmName, newVM, "attach_disk") if err != nil { klog.Errorf("azureDisk - attach disk(%s, %s) failed, err: %v", diskName, diskURI, err) detail := err.Error() @@ -135,8 +134,7 @@ func (as *availabilitySet) DetachDisk(diskName, diskURI string, nodeName types.N klog.Errorf("detach azure disk: disk %s not found, diskURI: %s", diskName, diskURI) } - newVM := compute.VirtualMachine{ - Location: vm.Location, + newVM := compute.VirtualMachineUpdate{ VirtualMachineProperties: &compute.VirtualMachineProperties{ HardwareProfile: vm.HardwareProfile, StorageProfile: &compute.StorageProfile{ @@ -151,7 +149,7 @@ func (as *availabilitySet) DetachDisk(diskName, diskURI string, nodeName types.N // Invalidate the cache right after updating defer as.cloud.vmCache.Delete(vmName) - return as.VirtualMachinesClient.CreateOrUpdate(ctx, nodeResourceGroup, vmName, newVM, "detach_disk") + return as.VirtualMachinesClient.Update(ctx, nodeResourceGroup, vmName, newVM, "detach_disk") } // GetDataDisks gets a list of data disks attached to the node. diff --git a/staging/src/k8s.io/legacy-cloud-providers/azure/azure_fakes.go b/staging/src/k8s.io/legacy-cloud-providers/azure/azure_fakes.go index 959ca1809e1..03ed11ff772 100644 --- a/staging/src/k8s.io/legacy-cloud-providers/azure/azure_fakes.go +++ b/staging/src/k8s.io/legacy-cloud-providers/azure/azure_fakes.go @@ -315,6 +315,17 @@ func (fVMC *fakeAzureVirtualMachinesClient) CreateOrUpdate(ctx context.Context, return nil, nil } +func (fVMC *fakeAzureVirtualMachinesClient) Update(ctx context.Context, resourceGroupName string, VMName string, parameters compute.VirtualMachineUpdate, source string) (resp *http.Response, err error) { + fVMC.mutex.Lock() + defer fVMC.mutex.Unlock() + + if _, ok := fVMC.FakeStore[resourceGroupName]; !ok { + fVMC.FakeStore[resourceGroupName] = make(map[string]compute.VirtualMachine) + } + + return nil, nil +} + func (fVMC *fakeAzureVirtualMachinesClient) Get(ctx context.Context, resourceGroupName string, VMName string, expand compute.InstanceViewTypes) (result compute.VirtualMachine, err error) { fVMC.mutex.Lock() defer fVMC.mutex.Unlock()