Merge pull request #91948 from andyzhangx/ToBeDetached

fix: use force detach for azure disk
This commit is contained in:
Kubernetes Prow Robot 2020-06-18 19:05:21 -07:00 committed by GitHub
commit bef57a7edb
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
4 changed files with 25 additions and 14 deletions

View File

@ -44,7 +44,8 @@ func (as *availabilitySet) AttachDisk(isManagedDisk bool, diskName, diskURI stri
return err
}
disks := filterDetachingDisks(*vm.StorageProfile.DataDisks)
disks := make([]compute.DataDisk, len(*vm.StorageProfile.DataDisks))
copy(disks, *vm.StorageProfile.DataDisks)
if isManagedDisk {
managedDisk := &compute.ManagedDiskParameters{ID: &diskURI}
@ -131,7 +132,8 @@ func (as *availabilitySet) DetachDisk(diskName, diskURI string, nodeName types.N
return err
}
disks := filterDetachingDisks(*vm.StorageProfile.DataDisks)
disks := make([]compute.DataDisk, len(*vm.StorageProfile.DataDisks))
copy(disks, *vm.StorageProfile.DataDisks)
bFoundDisk := false
for i, disk := range disks {
@ -140,7 +142,7 @@ func (as *availabilitySet) DetachDisk(diskName, diskURI string, nodeName types.N
(disk.ManagedDisk != nil && diskURI != "" && strings.EqualFold(*disk.ManagedDisk.ID, diskURI)) {
// found the disk
klog.V(2).Infof("azureDisk - detach disk: name %q uri %q", diskName, diskURI)
disks = append(disks[:i], disks[i+1:]...)
disks[i].ToBeDetached = to.BoolPtr(true)
bFoundDisk = true
break
}

View File

@ -163,6 +163,10 @@ func TestStandardDetachDisk(t *testing.T) {
err := vmSet.DetachDisk(test.diskName, "", test.nodeName)
assert.Equal(t, test.expectedError, err != nil, "TestCase[%d]: %s", i, test.desc)
if !test.expectedError && test.diskName != "" {
dataDisks, err := vmSet.GetDataDisks(test.nodeName, azcache.CacheReadTypeDefault)
assert.Equal(t, true, len(dataDisks) == 1, "TestCase[%d]: %s, err: %v", i, test.desc, err)
}
}
}

View File

@ -46,7 +46,8 @@ func (ss *scaleSet) AttachDisk(isManagedDisk bool, diskName, diskURI string, nod
disks := []compute.DataDisk{}
if vm.StorageProfile != nil && vm.StorageProfile.DataDisks != nil {
disks = filterDetachingDisks(*vm.StorageProfile.DataDisks)
disks = make([]compute.DataDisk, len(*vm.StorageProfile.DataDisks))
copy(disks, *vm.StorageProfile.DataDisks)
}
if isManagedDisk {
managedDisk := &compute.ManagedDiskParameters{ID: &diskURI}
@ -136,7 +137,8 @@ func (ss *scaleSet) DetachDisk(diskName, diskURI string, nodeName types.NodeName
disks := []compute.DataDisk{}
if vm.StorageProfile != nil && vm.StorageProfile.DataDisks != nil {
disks = filterDetachingDisks(*vm.StorageProfile.DataDisks)
disks = make([]compute.DataDisk, len(*vm.StorageProfile.DataDisks))
copy(disks, *vm.StorageProfile.DataDisks)
}
bFoundDisk := false
for i, disk := range disks {
@ -145,7 +147,7 @@ func (ss *scaleSet) DetachDisk(diskName, diskURI string, nodeName types.NodeName
(disk.ManagedDisk != nil && diskURI != "" && strings.EqualFold(*disk.ManagedDisk.ID, diskURI)) {
// found the disk
klog.V(2).Infof("azureDisk - detach disk: name %q uri %q", diskName, diskURI)
disks = append(disks[:i], disks[i+1:]...)
disks[i].ToBeDetached = to.BoolPtr(true)
bFoundDisk = true
break
}

View File

@ -141,6 +141,7 @@ func TestDetachDiskWithVMSS(t *testing.T) {
defer ctrl.Finish()
fakeStatusNotFoundVMSSName := types.NodeName("FakeStatusNotFoundVMSSName")
diskName := "disk-name"
testCases := []struct {
desc string
vmList map[string]string
@ -156,7 +157,7 @@ func TestDetachDiskWithVMSS(t *testing.T) {
vmssVMList: []string{"vmss-vm-000001"},
vmssName: "vm1",
vmssvmName: "vm1",
existedDisk: compute.Disk{Name: to.StringPtr("disk-name")},
existedDisk: compute.Disk{Name: to.StringPtr(diskName)},
expectedErr: true,
expectedErrMsg: fmt.Errorf("not a vmss instance"),
},
@ -165,7 +166,7 @@ func TestDetachDiskWithVMSS(t *testing.T) {
vmssVMList: []string{"vmss00-vm-000000", "vmss00-vm-000001", "vmss00-vm-000002"},
vmssName: "vmss00",
vmssvmName: "vmss00-vm-000000",
existedDisk: compute.Disk{Name: to.StringPtr("disk-name")},
existedDisk: compute.Disk{Name: to.StringPtr(diskName)},
expectedErr: false,
},
{
@ -173,7 +174,7 @@ func TestDetachDiskWithVMSS(t *testing.T) {
vmssVMList: []string{"vmss00-vm-000000", "vmss00-vm-000001", "vmss00-vm-000002"},
vmssName: fakeStatusNotFoundVMSSName,
vmssvmName: "vmss00-vm-000000",
existedDisk: compute.Disk{Name: to.StringPtr("disk-name")},
existedDisk: compute.Disk{Name: to.StringPtr(diskName)},
expectedErr: true,
expectedErrMsg: fmt.Errorf("Retriable: false, RetryAfter: 0s, HTTPStatusCode: 404, RawError: instance not found"),
},
@ -213,7 +214,7 @@ func TestDetachDiskWithVMSS(t *testing.T) {
},
DataDisks: &[]compute.DataDisk{{
Lun: to.Int32Ptr(0),
Name: to.StringPtr("disk-name"),
Name: to.StringPtr(diskName),
}},
}
}
@ -225,12 +226,14 @@ func TestDetachDiskWithVMSS(t *testing.T) {
mockVMSSVMClient.EXPECT().Update(gomock.Any(), testCloud.ResourceGroup, scaleSetName, gomock.Any(), gomock.Any(), gomock.Any()).Return(nil).AnyTimes()
}
diskURI := fmt.Sprintf("/subscriptions/%s/resourceGroups/%s/providers/Microsoft.Compute/disks/%s",
testCloud.SubscriptionID, testCloud.ResourceGroup, *test.existedDisk.Name)
err = ss.DetachDisk(*test.existedDisk.Name, diskURI, test.vmssvmName)
err = ss.DetachDisk(*test.existedDisk.Name, diskName, test.vmssvmName)
assert.Equal(t, test.expectedErr, err != nil, "TestCase[%d]: %s, err: %v", i, test.desc, err)
assert.Equal(t, test.expectedErrMsg, err, "TestCase[%d]: %s, expected error: %v, return error: %v", i, test.desc, test.expectedErrMsg, err)
if !test.expectedErr {
dataDisks, err := ss.GetDataDisks(test.vmssvmName, azcache.CacheReadTypeDefault)
assert.Equal(t, true, len(dataDisks) == 1, "TestCase[%d]: %s, actual data disk num: %d, err: %v", i, test.desc, len(dataDisks), err)
}
}
}