Merge pull request #71992 from andyzhangx/vmss-update-issue

fix race condition when attach azure disk in vmss
This commit is contained in:
Kubernetes Prow Robot 2018-12-16 22:27:45 -08:00 committed by GitHub
commit 2b96a60742
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23

View File

@ -67,7 +67,17 @@ func (ss *scaleSet) AttachDisk(isManagedDisk bool, diskName, diskURI string, nod
CreateOption: "attach", CreateOption: "attach",
}) })
} }
vm.StorageProfile.DataDisks = &disks newVM := compute.VirtualMachineScaleSetVM{
Sku: vm.Sku,
Location: vm.Location,
VirtualMachineScaleSetVMProperties: &compute.VirtualMachineScaleSetVMProperties{
HardwareProfile: vm.HardwareProfile,
StorageProfile: &compute.StorageProfile{
OsDisk: vm.StorageProfile.OsDisk,
DataDisks: &disks,
},
},
}
ctx, cancel := getContextWithCancel() ctx, cancel := getContextWithCancel()
defer cancel() defer cancel()
@ -77,7 +87,7 @@ func (ss *scaleSet) AttachDisk(isManagedDisk bool, diskName, diskURI string, nod
defer ss.vmssVMCache.Delete(key) defer ss.vmssVMCache.Delete(key)
klog.V(2).Infof("azureDisk - update(%s): vm(%s) - attach disk(%s)", nodeResourceGroup, nodeName, diskName) klog.V(2).Infof("azureDisk - update(%s): vm(%s) - attach disk(%s)", nodeResourceGroup, nodeName, diskName)
_, err = ss.VirtualMachineScaleSetVMsClient.Update(ctx, nodeResourceGroup, ssName, instanceID, vm) _, err = ss.VirtualMachineScaleSetVMsClient.Update(ctx, nodeResourceGroup, ssName, instanceID, newVM)
if err != nil { if err != nil {
detail := err.Error() detail := err.Error()
if strings.Contains(detail, errLeaseFailed) || strings.Contains(detail, errDiskBlobNotFound) { if strings.Contains(detail, errLeaseFailed) || strings.Contains(detail, errDiskBlobNotFound) {
@ -126,7 +136,18 @@ func (ss *scaleSet) DetachDiskByName(diskName, diskURI string, nodeName types.No
return fmt.Errorf("detach azure disk failure, disk %s not found, diskURI: %s", diskName, diskURI) return fmt.Errorf("detach azure disk failure, disk %s not found, diskURI: %s", diskName, diskURI)
} }
vm.StorageProfile.DataDisks = &disks newVM := compute.VirtualMachineScaleSetVM{
Sku: vm.Sku,
Location: vm.Location,
VirtualMachineScaleSetVMProperties: &compute.VirtualMachineScaleSetVMProperties{
HardwareProfile: vm.HardwareProfile,
StorageProfile: &compute.StorageProfile{
OsDisk: vm.StorageProfile.OsDisk,
DataDisks: &disks,
},
},
}
ctx, cancel := getContextWithCancel() ctx, cancel := getContextWithCancel()
defer cancel() defer cancel()
@ -135,7 +156,7 @@ func (ss *scaleSet) DetachDiskByName(diskName, diskURI string, nodeName types.No
defer ss.vmssVMCache.Delete(key) defer ss.vmssVMCache.Delete(key)
klog.V(2).Infof("azureDisk - update(%s): vm(%s) - detach disk(%s)", nodeResourceGroup, nodeName, diskName) klog.V(2).Infof("azureDisk - update(%s): vm(%s) - detach disk(%s)", nodeResourceGroup, nodeName, diskName)
_, err = ss.VirtualMachineScaleSetVMsClient.Update(ctx, nodeResourceGroup, ssName, instanceID, vm) _, err = ss.VirtualMachineScaleSetVMsClient.Update(ctx, nodeResourceGroup, ssName, instanceID, newVM)
if err != nil { if err != nil {
klog.Errorf("azureDisk - detach disk(%s) from %s failed, err: %v", diskName, nodeName, err) klog.Errorf("azureDisk - detach disk(%s) from %s failed, err: %v", diskName, nodeName, err)
} else { } else {