Merge pull request #93011 from andyzhangx/list-disks

fix: azure disk resize error if source does not exist
This commit is contained in:
Kubernetes Prow Robot 2020-08-27 23:42:53 -07:00 committed by GitHub
commit 4093df78ea
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
21 changed files with 331 additions and 224 deletions

View File

@ -248,7 +248,7 @@ type Cloud struct {
ResourceRequestBackoff wait.Backoff
metadata *InstanceMetadataService
vmSet VMSet
VMSet VMSet
// ipv6DualStack allows overriding for unit testing. It's normally initialized from featuregates
ipv6DualStackEnabled bool
@ -491,12 +491,12 @@ func (az *Cloud) InitializeCloudFromConfig(config *Config, fromSecret bool) erro
}
if strings.EqualFold(vmTypeVMSS, az.Config.VMType) {
az.vmSet, err = newScaleSet(az)
az.VMSet, err = newScaleSet(az)
if err != nil {
return err
}
} else {
az.vmSet = newAvailabilitySet(az)
az.VMSet = newAvailabilitySet(az)
}
az.vmCache, err = az.newVMCache()

View File

@ -111,7 +111,7 @@ func (az *Cloud) getPrivateIPsForMachineWithRetry(nodeName types.NodeName) ([]st
var privateIPs []string
err := wait.ExponentialBackoff(az.RequestBackoff(), func() (bool, error) {
var retryErr error
privateIPs, retryErr = az.vmSet.GetPrivateIPsByNodeName(string(nodeName))
privateIPs, retryErr = az.VMSet.GetPrivateIPsByNodeName(string(nodeName))
if retryErr != nil {
// won't retry since the instance doesn't exist on Azure.
if retryErr == cloudprovider.InstanceNotFound {
@ -135,7 +135,7 @@ func (az *Cloud) GetIPForMachineWithRetry(name types.NodeName) (string, string,
var ip, publicIP string
err := wait.ExponentialBackoff(az.RequestBackoff(), func() (bool, error) {
var retryErr error
ip, publicIP, retryErr = az.vmSet.GetIPByNodeName(string(name))
ip, publicIP, retryErr = az.VMSet.GetIPByNodeName(string(name))
if retryErr != nil {
klog.Errorf("GetIPForMachineWithRetry(%s): backoff failure, will retry,err=%v", name, retryErr)
return false, nil

View File

@ -90,15 +90,15 @@ type controllerCommon struct {
// getNodeVMSet gets the VMSet interface based on config.VMType and the real virtual machine type.
func (c *controllerCommon) getNodeVMSet(nodeName types.NodeName, crt azcache.AzureCacheReadType) (VMSet, error) {
// 1. vmType is standard, return cloud.vmSet directly.
// 1. vmType is standard, return cloud.VMSet directly.
if c.cloud.VMType == vmTypeStandard {
return c.cloud.vmSet, nil
return c.cloud.VMSet, nil
}
// 2. vmType is Virtual Machine Scale Set (vmss), convert vmSet to scaleSet.
ss, ok := c.cloud.vmSet.(*scaleSet)
ss, ok := c.cloud.VMSet.(*scaleSet)
if !ok {
return nil, fmt.Errorf("error of converting vmSet (%q) to scaleSet with vmType %q", c.cloud.vmSet, c.cloud.VMType)
return nil, fmt.Errorf("error of converting vmSet (%q) to scaleSet with vmType %q", c.cloud.VMSet, c.cloud.VMType)
}
// 3. If the node is managed by availability set, then return ss.availabilitySet.

View File

@ -222,7 +222,7 @@ func TestCommonAttachDiskWithVMSS(t *testing.T) {
}
ss, err := newScaleSet(testCloud)
assert.Nil(t, err)
testCloud.vmSet = ss
testCloud.VMSet = ss
}
common := &controllerCommon{

View File

@ -79,7 +79,7 @@ func TestStandardAttachDisk(t *testing.T) {
for i, test := range testCases {
testCloud := GetTestCloud(ctrl)
vmSet := testCloud.vmSet
vmSet := testCloud.VMSet
expectedVMs := setTestVirtualMachines(testCloud, map[string]string{"vm1": "PowerState/Running"}, false)
mockVMsClient := testCloud.VirtualMachinesClient.(*mockvmclient.MockInterface)
for _, vm := range expectedVMs {
@ -148,7 +148,7 @@ func TestStandardDetachDisk(t *testing.T) {
for i, test := range testCases {
testCloud := GetTestCloud(ctrl)
vmSet := testCloud.vmSet
vmSet := testCloud.VMSet
expectedVMs := setTestVirtualMachines(testCloud, map[string]string{"vm1": "PowerState/Running"}, false)
mockVMsClient := testCloud.VirtualMachinesClient.(*mockvmclient.MockInterface)
for _, vm := range expectedVMs {
@ -224,7 +224,7 @@ func TestGetDataDisks(t *testing.T) {
}
for i, test := range testCases {
testCloud := GetTestCloud(ctrl)
vmSet := testCloud.vmSet
vmSet := testCloud.VMSet
expectedVMs := setTestVirtualMachines(testCloud, map[string]string{"vm1": "PowerState/Running"}, false)
mockVMsClient := testCloud.VirtualMachinesClient.(*mockvmclient.MockInterface)
for _, vm := range expectedVMs {

View File

@ -83,7 +83,7 @@ func GetTestCloud(ctrl *gomock.Controller) (az *Cloud) {
az.VirtualMachineScaleSetsClient = mockvmssclient.NewMockInterface(ctrl)
az.VirtualMachineScaleSetVMsClient = mockvmssvmclient.NewMockInterface(ctrl)
az.VirtualMachinesClient = mockvmclient.NewMockInterface(ctrl)
az.vmSet = newAvailabilitySet(az)
az.VMSet = newAvailabilitySet(az)
az.vmCache, _ = az.newVMCache()
az.lbCache, _ = az.newLBCache()
az.nsgCache, _ = az.newNSGCache()

View File

@ -95,7 +95,7 @@ func (az *Cloud) NodeAddresses(ctx context.Context, name types.NodeName) ([]v1.N
// Not local instance, get addresses from Azure ARM API.
if !isLocalInstance {
if az.vmSet != nil {
if az.VMSet != nil {
return az.addressGetter(name)
}
@ -168,7 +168,7 @@ func (az *Cloud) NodeAddressesByProviderID(ctx context.Context, providerID strin
return nil, nil
}
name, err := az.vmSet.GetNodeNameByProviderID(providerID)
name, err := az.VMSet.GetNodeNameByProviderID(providerID)
if err != nil {
return nil, err
}
@ -189,7 +189,7 @@ func (az *Cloud) InstanceExistsByProviderID(ctx context.Context, providerID stri
return true, nil
}
name, err := az.vmSet.GetNodeNameByProviderID(providerID)
name, err := az.VMSet.GetNodeNameByProviderID(providerID)
if err != nil {
if err == cloudprovider.InstanceNotFound {
return false, nil
@ -214,7 +214,7 @@ func (az *Cloud) InstanceShutdownByProviderID(ctx context.Context, providerID st
return false, nil
}
nodeName, err := az.vmSet.GetNodeNameByProviderID(providerID)
nodeName, err := az.VMSet.GetNodeNameByProviderID(providerID)
if err != nil {
// Returns false, so the controller manager will continue to check InstanceExistsByProviderID().
if err == cloudprovider.InstanceNotFound {
@ -224,7 +224,7 @@ func (az *Cloud) InstanceShutdownByProviderID(ctx context.Context, providerID st
return false, err
}
powerStatus, err := az.vmSet.GetPowerStatusByNodeName(string(nodeName))
powerStatus, err := az.VMSet.GetPowerStatusByNodeName(string(nodeName))
if err != nil {
// Returns false, so the controller manager will continue to check InstanceExistsByProviderID().
if err == cloudprovider.InstanceNotFound {
@ -292,8 +292,8 @@ func (az *Cloud) InstanceID(ctx context.Context, name types.NodeName) (string, e
// Not local instance, get instanceID from Azure ARM API.
if !isLocalInstance {
if az.vmSet != nil {
return az.vmSet.GetInstanceIDByNodeName(nodeName)
if az.VMSet != nil {
return az.VMSet.GetInstanceIDByNodeName(nodeName)
}
// vmSet == nil indicates credentials are not provided.
@ -302,7 +302,7 @@ func (az *Cloud) InstanceID(ctx context.Context, name types.NodeName) (string, e
return az.getLocalInstanceProviderID(metadata, nodeName)
}
return az.vmSet.GetInstanceIDByNodeName(nodeName)
return az.VMSet.GetInstanceIDByNodeName(nodeName)
}
func (az *Cloud) getLocalInstanceProviderID(metadata *InstanceMetadata, nodeName string) (string, error) {
@ -342,7 +342,7 @@ func (az *Cloud) InstanceTypeByProviderID(ctx context.Context, providerID string
return "", nil
}
name, err := az.vmSet.GetNodeNameByProviderID(providerID)
name, err := az.VMSet.GetNodeNameByProviderID(providerID)
if err != nil {
return "", err
}
@ -380,8 +380,8 @@ func (az *Cloud) InstanceType(ctx context.Context, name types.NodeName) (string,
return "", err
}
if !isLocalInstance {
if az.vmSet != nil {
return az.vmSet.GetInstanceTypeByNodeName(string(name))
if az.VMSet != nil {
return az.VMSet.GetInstanceTypeByNodeName(string(name))
}
// vmSet == nil indicates credentials are not provided.
@ -393,7 +393,7 @@ func (az *Cloud) InstanceType(ctx context.Context, name types.NodeName) (string,
}
}
return az.vmSet.GetInstanceTypeByNodeName(string(name))
return az.VMSet.GetInstanceTypeByNodeName(string(name))
}
// AddSSHKeyToAllInstances adds an SSH public key as a legal identity for all instances

View File

@ -174,7 +174,7 @@ func TestInstanceID(t *testing.T) {
expectedErrMsg: fmt.Errorf("failure of getting instance metadata"),
},
{
name: "NodeAddresses should report error if cloud.vmSet is nil",
name: "NodeAddresses should report error if cloud.VMSet is nil",
nodeName: "vm1",
vmType: vmTypeStandard,
useInstanceMetadata: true,
@ -194,9 +194,9 @@ func TestInstanceID(t *testing.T) {
for _, test := range testcases {
if test.nilVMSet {
cloud.vmSet = nil
cloud.VMSet = nil
} else {
cloud.vmSet = newAvailabilitySet(cloud)
cloud.VMSet = newAvailabilitySet(cloud)
}
cloud.Config.VMType = test.vmType
cloud.Config.UseInstanceMetadata = test.useInstanceMetadata
@ -445,7 +445,7 @@ func TestNodeAddresses(t *testing.T) {
expectedErrMsg: fmt.Errorf("getError"),
},
{
name: "NodeAddresses should report error if cloud.vmSet is nil",
name: "NodeAddresses should report error if cloud.VMSet is nil",
nodeName: "vm1",
vmType: vmTypeStandard,
useInstanceMetadata: true,
@ -518,9 +518,9 @@ func TestNodeAddresses(t *testing.T) {
for _, test := range testcases {
if test.nilVMSet {
cloud.vmSet = nil
cloud.VMSet = nil
} else {
cloud.vmSet = newAvailabilitySet(cloud)
cloud.VMSet = newAvailabilitySet(cloud)
}
cloud.Config.VMType = test.vmType
cloud.Config.UseInstanceMetadata = test.useInstanceMetadata
@ -571,175 +571,6 @@ func TestNodeAddresses(t *testing.T) {
}
}
func TestIsCurrentInstance(t *testing.T) {
cloud := &Cloud{
Config: Config{
VMType: vmTypeStandard,
},
}
testcases := []struct {
nodeName string
metadataVMName string
expected bool
expectedErrMsg error
}{
{
nodeName: "node1",
metadataVMName: "node1",
expected: true,
},
{
nodeName: "node1",
metadataVMName: "node2",
expected: false,
},
}
for _, test := range testcases {
real, err := cloud.isCurrentInstance(types.NodeName(test.nodeName), test.metadataVMName)
assert.Equal(t, test.expectedErrMsg, err)
assert.Equal(t, test.expected, real)
}
}
func TestInstanceTypeByProviderID(t *testing.T) {
ctrl := gomock.NewController(t)
defer ctrl.Finish()
cloud := GetTestCloud(ctrl)
cloud.Config.UseInstanceMetadata = true
testcases := []struct {
name string
vmList []string
vmSize string
nodeName string
vmType string
providerID string
metadataName string
metadataTemplate string
useCustomImsCache bool
expectedVMsize string
expectedErrMsg error
}{
{
name: "InstanceTypeByProviderID should get InstanceType from Azure API if metadata.Compute.VMSize is nil",
vmList: []string{"vm1"},
nodeName: "vm1",
metadataName: "vm1",
vmType: vmTypeStandard,
providerID: "azure:///subscriptions/subscription/resourceGroups/rg/providers/Microsoft.Compute/virtualMachines/vm1",
expectedVMsize: "Standard_A0",
},
{
name: "InstanceTypeByProviderID should get InstanceType from metedata if node's name are equal to metadataName and metadata.Compute.VMSize is not nil",
vmList: []string{"vm1"},
vmSize: "Standard_A0",
nodeName: "vm1",
metadataName: "vm1",
vmType: vmTypeStandard,
providerID: "azure:///subscriptions/subscription/resourceGroups/rg/providers/Microsoft.Compute/virtualMachines/vm1",
expectedVMsize: "Standard_A0",
},
{
name: "InstanceTypeByProviderID should get InstanceType from Azure API if node is not local instance",
vmList: []string{"vm2"},
nodeName: "vm2",
metadataName: "vm1",
vmType: vmTypeStandard,
providerID: "azure:///subscriptions/subscription/resourceGroups/rg/providers/Microsoft.Compute/virtualMachines/vm2",
expectedVMsize: "Standard_A0",
},
{
name: "InstanceTypeByProviderID should return nil if node is unmanaged",
providerID: "/subscriptions/subscription/resourceGroups/rg/providers/Microsoft.Compute/virtualMachine/vm1",
},
{
name: "InstanceTypeByProviderID should report error if node doesn't exist",
vmList: []string{"vm1"},
nodeName: "vm3",
providerID: "azure:///subscriptions/subscription/resourceGroups/rg/providers/Microsoft.Compute/virtualMachines/vm3",
expectedErrMsg: fmt.Errorf("instance not found"),
},
{
name: "InstanceTypeByProviderID should report error if providerID is invalid",
providerID: "azure:///subscriptions/subscription/resourceGroups/rg/providers/Microsoft.Compute/virtualMachine/vm3",
expectedErrMsg: fmt.Errorf("error splitting providerID"),
},
{
name: "InstanceTypeByProviderID should report error if providerID is null",
expectedErrMsg: fmt.Errorf("providerID is empty, the node is not initialized yet"),
},
{
name: "InstanceTypeByProviderID should report error if metadata.Compute is nil",
nodeName: "vm1",
metadataName: "vm1",
providerID: "azure:///subscriptions/subscription/resourceGroups/rg/providers/Microsoft.Compute/virtualMachines/vm1",
metadataTemplate: `{"network":{"interface":[]}}`,
expectedErrMsg: fmt.Errorf("failure of getting instance metadata"),
},
{
name: "NodeAddresses should report error when invoke GetMetadata",
nodeName: "vm1",
metadataName: "vm1",
providerID: "azure:///subscriptions/subscription/resourceGroups/rg/providers/Microsoft.Compute/virtualMachines/vm1",
vmType: vmTypeStandard,
useCustomImsCache: true,
expectedErrMsg: fmt.Errorf("getError"),
},
}
for _, test := range testcases {
cloud.Config.VMType = test.vmType
listener, err := net.Listen("tcp", "127.0.0.1:0")
if err != nil {
t.Errorf("Test [%s] unexpected error: %v", test.name, err)
}
mux := http.NewServeMux()
mux.Handle("/", http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
if test.metadataTemplate != "" {
fmt.Fprintf(w, test.metadataTemplate)
} else {
fmt.Fprintf(w, "{\"compute\":{\"name\":\"%s\",\"vmsize\":\"%s\",\"subscriptionId\":\"subscription\",\"resourceGroupName\":\"rg\"}}", test.metadataName, test.vmSize)
}
}))
go func() {
http.Serve(listener, mux)
}()
defer listener.Close()
cloud.metadata, err = NewInstanceMetadataService("http://" + listener.Addr().String() + "/")
if err != nil {
t.Errorf("Test [%s] unexpected error: %v", test.name, err)
}
if test.useCustomImsCache {
cloud.metadata.imsCache, err = azcache.NewTimedcache(metadataCacheTTL, func(key string) (interface{}, error) {
return nil, fmt.Errorf("getError")
})
if err != nil {
t.Errorf("Test [%s] unexpected error: %v", test.name, err)
}
}
vmListWithPowerState := make(map[string]string)
for _, vm := range test.vmList {
vmListWithPowerState[vm] = ""
}
expectedVMs := setTestVirtualMachines(cloud, vmListWithPowerState, false)
mockVMsClient := cloud.VirtualMachinesClient.(*mockvmclient.MockInterface)
for _, vm := range expectedVMs {
mockVMsClient.EXPECT().Get(gomock.Any(), cloud.ResourceGroup, *vm.Name, gomock.Any()).Return(vm, nil).AnyTimes()
}
mockVMsClient.EXPECT().Get(gomock.Any(), cloud.ResourceGroup, "vm3", gomock.Any()).Return(compute.VirtualMachine{}, &retry.Error{HTTPStatusCode: http.StatusNotFound, RawError: cloudprovider.InstanceNotFound}).AnyTimes()
mockVMsClient.EXPECT().Update(gomock.Any(), cloud.ResourceGroup, gomock.Any(), gomock.Any(), gomock.Any()).Return(nil).AnyTimes()
instanceType, err := cloud.InstanceTypeByProviderID(context.Background(), test.providerID)
assert.Equal(t, test.expectedErrMsg, err, test.name)
assert.Equal(t, test.expectedVMsize, instanceType, test.name)
}
}
func TestInstanceExistsByProviderID(t *testing.T) {
ctrl := gomock.NewController(t)
defer ctrl.Finish()

View File

@ -258,7 +258,7 @@ func (az *Cloud) getLoadBalancerResourceGroup() string {
func (az *Cloud) getServiceLoadBalancer(service *v1.Service, clusterName string, nodes []*v1.Node, wantLb bool) (lb *network.LoadBalancer, status *v1.LoadBalancerStatus, exists bool, err error) {
isInternal := requiresInternalLoadBalancer(service)
var defaultLB *network.LoadBalancer
primaryVMSetName := az.vmSet.GetPrimaryVMSetName()
primaryVMSetName := az.VMSet.GetPrimaryVMSetName()
defaultLBName := az.getAzureLoadBalancerName(clusterName, primaryVMSetName, isInternal)
existingLBs, err := az.ListLB(service)
@ -329,7 +329,7 @@ func (az *Cloud) selectLoadBalancer(clusterName string, service *v1.Service, exi
isInternal := requiresInternalLoadBalancer(service)
serviceName := getServiceName(service)
klog.V(2).Infof("selectLoadBalancer for service (%s): isInternal(%v) - start", serviceName, isInternal)
vmSetNames, err := az.vmSet.GetVMSetNames(service, nodes)
vmSetNames, err := az.VMSet.GetVMSetNames(service, nodes)
if err != nil {
klog.Errorf("az.selectLoadBalancer: cluster(%s) service(%s) isInternal(%t) - az.GetVMSetNames failed, err=(%v)", clusterName, serviceName, isInternal, err)
return nil, false, err
@ -935,7 +935,7 @@ func (az *Cloud) reconcileLoadBalancer(clusterName string, service *v1.Service,
// Remove backend pools from vmSets. This is required for virtual machine scale sets before removing the LB.
vmSetName := az.mapLoadBalancerNameToVMSet(lbName, clusterName)
klog.V(10).Infof("EnsureBackendPoolDeleted(%s,%s) for service %s: start", lbBackendPoolID, vmSetName, serviceName)
err := az.vmSet.EnsureBackendPoolDeleted(service, lbBackendPoolID, vmSetName, lb.BackendAddressPools)
err := az.VMSet.EnsureBackendPoolDeleted(service, lbBackendPoolID, vmSetName, lb.BackendAddressPools)
if err != nil {
klog.Errorf("EnsureBackendPoolDeleted(%s) for service %s failed: %v", lbBackendPoolID, serviceName, err)
return nil, err
@ -979,7 +979,7 @@ func (az *Cloud) reconcileLoadBalancer(clusterName string, service *v1.Service,
vmSetName := az.mapLoadBalancerNameToVMSet(lbName, clusterName)
// Etag would be changed when updating backend pools, so invalidate lbCache after it.
defer az.lbCache.Delete(lbName)
err := az.vmSet.EnsureHostsInPool(service, nodes, lbBackendPoolID, vmSetName, isInternal)
err := az.VMSet.EnsureHostsInPool(service, nodes, lbBackendPoolID, vmSetName, isInternal)
if err != nil {
return nil, err
}

View File

@ -297,11 +297,15 @@ func (c *ManagedDiskController) ResizeDisk(diskURI string, oldSize resource.Quan
return newSizeQuant, nil
}
result.DiskProperties.DiskSizeGB = &requestGiB
diskParameter := compute.DiskUpdate{
DiskUpdateProperties: &compute.DiskUpdateProperties{
DiskSizeGB: &requestGiB,
},
}
ctx, cancel = getContextWithCancel()
defer cancel()
if rerr := c.common.cloud.DisksClient.CreateOrUpdate(ctx, resourceGroup, diskName, result); rerr != nil {
if rerr := c.common.cloud.DisksClient.Update(ctx, resourceGroup, diskName, diskParameter); rerr != nil {
return oldSize, rerr.Error()
}

View File

@ -369,9 +369,9 @@ func TestResizeDisk(t *testing.T) {
mockDisksClient.EXPECT().Get(gomock.Any(), testCloud.ResourceGroup, test.diskName).Return(test.existedDisk, nil).AnyTimes()
}
if test.diskName == fakeCreateDiskFailed {
mockDisksClient.EXPECT().CreateOrUpdate(gomock.Any(), testCloud.ResourceGroup, test.diskName, gomock.Any()).Return(&retry.Error{RawError: fmt.Errorf("Create Disk failed")}).AnyTimes()
mockDisksClient.EXPECT().Update(gomock.Any(), testCloud.ResourceGroup, test.diskName, gomock.Any()).Return(&retry.Error{RawError: fmt.Errorf("Create Disk failed")}).AnyTimes()
} else {
mockDisksClient.EXPECT().CreateOrUpdate(gomock.Any(), testCloud.ResourceGroup, test.diskName, gomock.Any()).Return(nil).AnyTimes()
mockDisksClient.EXPECT().Update(gomock.Any(), testCloud.ResourceGroup, test.diskName, gomock.Any()).Return(nil).AnyTimes()
}
result, err := managedDiskController.ResizeDisk(diskURI, test.oldSize, test.newSize)

View File

@ -119,7 +119,7 @@ func TestCreateRoute(t *testing.T) {
cloud := &Cloud{
RouteTablesClient: routeTableClient,
vmSet: mockVMSet,
VMSet: mockVMSet,
Config: Config{
RouteTableResourceGroup: "foo",
RouteTableName: "bar",
@ -526,7 +526,7 @@ func TestListRoutes(t *testing.T) {
cloud := &Cloud{
RouteTablesClient: routeTableClient,
vmSet: mockVMSet,
VMSet: mockVMSet,
Config: Config{
RouteTableResourceGroup: "foo",
RouteTableName: "bar",

View File

@ -135,7 +135,7 @@ func (az *Cloud) getNetworkResourceSubscriptionID() string {
func (az *Cloud) mapLoadBalancerNameToVMSet(lbName string, clusterName string) (vmSetName string) {
vmSetName = strings.TrimSuffix(lbName, InternalLoadBalancerNameSuffix)
if strings.EqualFold(clusterName, vmSetName) {
vmSetName = az.vmSet.GetPrimaryVMSetName()
vmSetName = az.VMSet.GetPrimaryVMSetName()
}
return vmSetName
@ -150,7 +150,7 @@ func (az *Cloud) getAzureLoadBalancerName(clusterName string, vmSetName string,
clusterName = az.LoadBalancerName
}
lbNamePrefix := vmSetName
if strings.EqualFold(vmSetName, az.vmSet.GetPrimaryVMSetName()) || az.useStandardLoadBalancer() {
if strings.EqualFold(vmSetName, az.VMSet.GetPrimaryVMSetName()) || az.useStandardLoadBalancer() {
lbNamePrefix = clusterName
}
if isInternal {
@ -732,7 +732,7 @@ func (as *availabilitySet) EnsureHostInPool(service *v1.Service, nodeName types.
return "", "", "", nil, nil
}
klog.Errorf("error: az.EnsureHostInPool(%s), az.vmSet.GetPrimaryInterface.Get(%s, %s), err=%v", nodeName, vmName, vmSetName, err)
klog.Errorf("error: az.EnsureHostInPool(%s), az.VMSet.GetPrimaryInterface.Get(%s, %s), err=%v", nodeName, vmName, vmSetName, err)
return "", "", "", nil, err
}

View File

@ -908,7 +908,7 @@ func TestGetStandardInstanceIDByNodeName(t *testing.T) {
ID: to.StringPtr(invalidResouceID),
}, nil).AnyTimes()
instanceID, err := cloud.vmSet.GetInstanceIDByNodeName(test.nodeName)
instanceID, err := cloud.VMSet.GetInstanceIDByNodeName(test.nodeName)
assert.Equal(t, test.expectedErrMsg, err, test.name)
assert.Equal(t, test.expectedID, instanceID, test.name)
}
@ -979,7 +979,7 @@ func TestGetStandardVMPowerStatusByNodeName(t *testing.T) {
mockVMClient := cloud.VirtualMachinesClient.(*mockvmclient.MockInterface)
mockVMClient.EXPECT().Get(gomock.Any(), cloud.ResourceGroup, test.nodeName, gomock.Any()).Return(test.vm, test.getErr).AnyTimes()
powerState, err := cloud.vmSet.GetPowerStatusByNodeName(test.nodeName)
powerState, err := cloud.VMSet.GetPowerStatusByNodeName(test.nodeName)
assert.Equal(t, test.expectedErrMsg, err, test.name)
assert.Equal(t, test.expectedStatus, powerState, test.name)
}
@ -1064,7 +1064,7 @@ func TestGetStandardVMZoneByNodeName(t *testing.T) {
mockVMClient := cloud.VirtualMachinesClient.(*mockvmclient.MockInterface)
mockVMClient.EXPECT().Get(gomock.Any(), cloud.ResourceGroup, test.nodeName, gomock.Any()).Return(test.vm, test.getErr).AnyTimes()
zone, err := cloud.vmSet.GetZoneByNodeName(test.nodeName)
zone, err := cloud.VMSet.GetZoneByNodeName(test.nodeName)
assert.Equal(t, test.expectedErrMsg, err, test.name)
assert.Equal(t, test.expectedZone, zone, test.name)
}
@ -1166,7 +1166,7 @@ func TestGetStandardVMSetNames(t *testing.T) {
mockVMClient := cloud.VirtualMachinesClient.(*mockvmclient.MockInterface)
mockVMClient.EXPECT().List(gomock.Any(), cloud.ResourceGroup).Return(test.vm, nil).AnyTimes()
vmSetNames, err := cloud.vmSet.GetVMSetNames(test.service, test.nodes)
vmSetNames, err := cloud.VMSet.GetVMSetNames(test.service, test.nodes)
assert.Equal(t, test.expectedErrMsg, err, test.name)
assert.Equal(t, test.expectedVMSetNames, vmSetNames, test.name)
}
@ -1339,7 +1339,7 @@ func TestStandardEnsureHostInPool(t *testing.T) {
mockInterfaceClient.EXPECT().Get(gomock.Any(), cloud.ResourceGroup, test.nicName, gomock.Any()).Return(testNIC, nil).AnyTimes()
mockInterfaceClient.EXPECT().CreateOrUpdate(gomock.Any(), cloud.ResourceGroup, gomock.Any(), gomock.Any()).Return(nil).AnyTimes()
_, _, _, vm, err := cloud.vmSet.EnsureHostInPool(test.service, test.nodeName, test.backendPoolID, test.vmSetName, false)
_, _, _, vm, err := cloud.VMSet.EnsureHostInPool(test.service, test.nodeName, test.backendPoolID, test.vmSetName, false)
assert.Equal(t, test.expectedErrMsg, err, test.name)
assert.Nil(t, vm, test.name)
}
@ -1501,7 +1501,7 @@ func TestStandardEnsureHostsInPool(t *testing.T) {
mockInterfaceClient.EXPECT().Get(gomock.Any(), cloud.ResourceGroup, test.nicName, gomock.Any()).Return(testNIC, nil).AnyTimes()
mockInterfaceClient.EXPECT().CreateOrUpdate(gomock.Any(), cloud.ResourceGroup, gomock.Any(), gomock.Any()).Return(nil).AnyTimes()
err := cloud.vmSet.EnsureHostsInPool(test.service, test.nodes, test.backendPoolID, test.vmSetName, false)
err := cloud.VMSet.EnsureHostsInPool(test.service, test.nodes, test.backendPoolID, test.vmSetName, false)
if test.expectedErr {
assert.Equal(t, test.expectedErrMsg, err.Error(), test.name)
} else {

View File

@ -2180,7 +2180,7 @@ func TestGetNodeNameByProviderID(t *testing.T) {
}
for _, test := range providers {
name, err := az.vmSet.GetNodeNameByProviderID(test.providerID)
name, err := az.VMSet.GetNodeNameByProviderID(test.providerID)
if (err != nil) != test.fail {
t.Errorf("Expected to fail=%t, with pattern %v", test.fail, test)
}

View File

@ -87,7 +87,7 @@ func (az *Cloud) GetZone(ctx context.Context) (cloudprovider.Zone, error) {
if err != nil {
return cloudprovider.Zone{}, fmt.Errorf("failure getting hostname from kernel")
}
return az.vmSet.GetZoneByNodeName(strings.ToLower(hostname))
return az.VMSet.GetZoneByNodeName(strings.ToLower(hostname))
}
// GetZoneByProviderID implements Zones.GetZoneByProviderID
@ -104,7 +104,7 @@ func (az *Cloud) GetZoneByProviderID(ctx context.Context, providerID string) (cl
return cloudprovider.Zone{}, nil
}
nodeName, err := az.vmSet.GetNodeNameByProviderID(providerID)
nodeName, err := az.VMSet.GetNodeNameByProviderID(providerID)
if err != nil {
return cloudprovider.Zone{}, err
}
@ -126,5 +126,5 @@ func (az *Cloud) GetZoneByNodeName(ctx context.Context, nodeName types.NodeName)
return cloudprovider.Zone{}, nil
}
return az.vmSet.GetZoneByNodeName(string(nodeName))
return az.VMSet.GetZoneByNodeName(string(nodeName))
}

View File

@ -19,6 +19,7 @@ go_library(
"//vendor/github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2019-12-01/compute:go_default_library",
"//vendor/github.com/Azure/go-autorest/autorest:go_default_library",
"//vendor/github.com/Azure/go-autorest/autorest/azure:go_default_library",
"//vendor/github.com/Azure/go-autorest/autorest/to:go_default_library",
"//vendor/k8s.io/klog/v2:go_default_library",
],
)

View File

@ -20,12 +20,14 @@ package diskclient
import (
"context"
"fmt"
"net/http"
"time"
"github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2019-12-01/compute"
"github.com/Azure/go-autorest/autorest"
"github.com/Azure/go-autorest/autorest/azure"
"github.com/Azure/go-autorest/autorest/to"
"k8s.io/client-go/util/flowcontrol"
"k8s.io/klog/v2"
@ -204,6 +206,74 @@ func (c *Client) createOrUpdateResponder(resp *http.Response) (*compute.Disk, *r
return result, retry.GetError(resp, err)
}
// Update creates or updates a Disk.
func (c *Client) Update(ctx context.Context, resourceGroupName string, diskName string, diskParameter compute.DiskUpdate) *retry.Error {
mc := metrics.NewMetricContext("disks", "update", resourceGroupName, c.subscriptionID, "")
// Report errors if the client is rate limited.
if !c.rateLimiterWriter.TryAccept() {
mc.RateLimitedCount()
return retry.GetRateLimitError(true, "DiskUpdate")
}
// Report errors if the client is throttled.
if c.RetryAfterWriter.After(time.Now()) {
mc.ThrottledCount()
rerr := retry.GetThrottlingError("DiskUpdate", "client throttled", c.RetryAfterWriter)
return rerr
}
rerr := c.updateDisk(ctx, resourceGroupName, diskName, diskParameter)
mc.Observe(rerr.Error())
if rerr != nil {
if rerr.IsThrottled() {
// Update RetryAfterReader so that no more requests would be sent until RetryAfter expires.
c.RetryAfterWriter = rerr.RetryAfter
}
return rerr
}
return nil
}
// updateDisk updates a Disk.
func (c *Client) updateDisk(ctx context.Context, resourceGroupName string, diskName string, diskParameter compute.DiskUpdate) *retry.Error {
resourceID := armclient.GetResourceID(
c.subscriptionID,
resourceGroupName,
"Microsoft.Compute/disks",
diskName,
)
response, rerr := c.armClient.PatchResource(ctx, resourceID, diskParameter)
defer c.armClient.CloseResponse(ctx, response)
if rerr != nil {
klog.V(5).Infof("Received error in %s: resourceID: %s, error: %s", "disk.put.request", resourceID, rerr.Error())
return rerr
}
if response != nil && response.StatusCode != http.StatusNoContent {
_, rerr = c.updateResponder(response)
if rerr != nil {
klog.V(5).Infof("Received error in %s: resourceID: %s, error: %s", "disk.put.respond", resourceID, rerr.Error())
return rerr
}
}
return nil
}
func (c *Client) updateResponder(resp *http.Response) (*compute.Disk, *retry.Error) {
result := &compute.Disk{}
err := autorest.Respond(
resp,
azure.WithErrorUnlessStatusCode(http.StatusOK, http.StatusCreated),
autorest.ByUnmarshallingJSON(&result))
result.Response = autorest.Response{Response: resp}
return result, retry.GetError(resp, err)
}
// Delete deletes a Disk by name.
func (c *Client) Delete(ctx context.Context, resourceGroupName string, diskName string) *retry.Error {
mc := metrics.NewMetricContext("disks", "delete", resourceGroupName, c.subscriptionID, "")
@ -246,3 +316,126 @@ func (c *Client) deleteDisk(ctx context.Context, resourceGroupName string, diskN
return c.armClient.DeleteResource(ctx, resourceID, "")
}
// ListByResourceGroup lists all the disks under a resource group.
func (c *Client) ListByResourceGroup(ctx context.Context, resourceGroupName string) ([]compute.Disk, *retry.Error) {
resourceID := fmt.Sprintf("/subscriptions/%s/resourceGroups/%s/providers/Microsoft.Compute/disks",
autorest.Encode("path", c.subscriptionID),
autorest.Encode("path", resourceGroupName))
result := make([]compute.Disk, 0)
page := &DiskListPage{}
page.fn = c.listNextResults
resp, rerr := c.armClient.GetResource(ctx, resourceID, "")
defer c.armClient.CloseResponse(ctx, resp)
if rerr != nil {
klog.V(5).Infof("Received error in %s: resourceID: %s, error: %s", "disk.list.request", resourceID, rerr.Error())
return result, rerr
}
var err error
page.dl, err = c.listResponder(resp)
if err != nil {
klog.V(5).Infof("Received error in %s: resourceID: %s, error: %s", "disk.list.respond", resourceID, err)
return result, retry.GetError(resp, err)
}
for page.NotDone() {
result = append(result, *page.Response().Value...)
if err = page.NextWithContext(ctx); err != nil {
klog.V(5).Infof("Received error in %s: resourceID: %s, error: %s", "disk.list.next", resourceID, err)
return result, retry.GetError(page.Response().Response.Response, err)
}
}
return result, nil
}
// listNextResults retrieves the next set of results, if any.
func (c *Client) listNextResults(ctx context.Context, lastResults compute.DiskList) (result compute.DiskList, err error) {
req, err := c.diskListPreparer(ctx, lastResults)
if err != nil {
return result, autorest.NewErrorWithError(err, "diskclient", "listNextResults", nil, "Failure preparing next results request")
}
if req == nil {
return
}
resp, rerr := c.armClient.Send(ctx, req)
defer c.armClient.CloseResponse(ctx, resp)
if rerr != nil {
result.Response = autorest.Response{Response: resp}
return result, autorest.NewErrorWithError(rerr.Error(), "diskclient", "listNextResults", resp, "Failure sending next results request")
}
result, err = c.listResponder(resp)
if err != nil {
err = autorest.NewErrorWithError(err, "diskclient", "listNextResults", resp, "Failure responding to next results request")
}
return
}
// listResponder handles the response to the List request. The method always
// closes the http.Response Body.
func (c *Client) listResponder(resp *http.Response) (result compute.DiskList, err error) {
err = autorest.Respond(
resp,
azure.WithErrorUnlessStatusCode(http.StatusOK),
autorest.ByUnmarshallingJSON(&result),
autorest.ByClosing())
result.Response = autorest.Response{Response: resp}
return
}
func (c *Client) diskListPreparer(ctx context.Context, lr compute.DiskList) (*http.Request, error) {
if lr.NextLink == nil || len(to.String(lr.NextLink)) < 1 {
return nil, nil
}
return autorest.Prepare((&http.Request{}).WithContext(ctx),
autorest.AsJSON(),
autorest.AsGet(),
autorest.WithBaseURL(to.String(lr.NextLink)))
}
// DiskListPage contains a page of Disk values.
type DiskListPage struct {
fn func(context.Context, compute.DiskList) (compute.DiskList, error)
dl compute.DiskList
}
// NextWithContext advances to the next page of values. If there was an error making
// the request the page does not advance and the error is returned.
func (page *DiskListPage) NextWithContext(ctx context.Context) (err error) {
next, err := page.fn(ctx, page.dl)
if err != nil {
return err
}
page.dl = next
return nil
}
// Next advances to the next page of values. If there was an error making
// the request the page does not advance and the error is returned.
// Deprecated: Use NextWithContext() instead.
func (page *DiskListPage) Next() error {
return page.NextWithContext(context.Background())
}
// NotDone returns true if the page enumeration should be started or is not yet complete.
func (page DiskListPage) NotDone() bool {
return !page.dl.IsEmpty()
}
// Response returns the raw server response from the last page request.
func (page DiskListPage) Response() compute.DiskList {
return page.dl
}
// Values returns the slice of values for the current page or nil if there are no values.
func (page DiskListPage) Values() []compute.Disk {
if page.dl.IsEmpty() {
return nil
}
return *page.dl.Value
}

View File

@ -161,6 +161,49 @@ func TestCreateOrUpdate(t *testing.T) {
assert.Equal(t, throttleErr, rerr)
}
func TestUpdate(t *testing.T) {
ctrl := gomock.NewController(t)
defer ctrl.Finish()
resourceID := "/subscriptions/subscriptionID/resourceGroups/rg/providers/Microsoft.Compute/disks/disk1"
diskUpdate := getTestDiskUpdate()
armClient := mockarmclient.NewMockInterface(ctrl)
response := &http.Response{
StatusCode: http.StatusOK,
Body: ioutil.NopCloser(bytes.NewReader([]byte(""))),
}
armClient.EXPECT().PatchResource(gomock.Any(), resourceID, diskUpdate).Return(response, nil).Times(1)
armClient.EXPECT().CloseResponse(gomock.Any(), gomock.Any()).Times(1)
diskClient := getTestDiskClient(armClient)
rerr := diskClient.Update(context.TODO(), "rg", "disk1", diskUpdate)
assert.Nil(t, rerr)
response = &http.Response{
StatusCode: http.StatusTooManyRequests,
Body: ioutil.NopCloser(bytes.NewReader([]byte("{}"))),
}
throttleErr := &retry.Error{
HTTPStatusCode: http.StatusTooManyRequests,
RawError: fmt.Errorf("error"),
Retriable: true,
RetryAfter: time.Unix(100, 0),
}
armClient.EXPECT().PatchResource(gomock.Any(), resourceID, diskUpdate).Return(response, throttleErr).Times(1)
armClient.EXPECT().CloseResponse(gomock.Any(), gomock.Any()).Times(1)
rerr = diskClient.Update(context.TODO(), "rg", "disk1", diskUpdate)
assert.Equal(t, throttleErr, rerr)
}
func getTestDiskUpdate() compute.DiskUpdate {
return compute.DiskUpdate{
DiskUpdateProperties: &compute.DiskUpdateProperties{
DiskSizeGB: to.Int32Ptr(100),
},
}
}
func TestDelete(t *testing.T) {
ctrl := gomock.NewController(t)
defer ctrl.Finish()

View File

@ -40,6 +40,12 @@ type Interface interface {
// CreateOrUpdate creates or updates a Disk.
CreateOrUpdate(ctx context.Context, resourceGroupName string, diskName string, diskParameter compute.Disk) *retry.Error
// Update updates a Disk.
Update(ctx context.Context, resourceGroupName string, diskName string, diskParameter compute.DiskUpdate) *retry.Error
// Delete deletes a Disk by name.
Delete(ctx context.Context, resourceGroupName string, diskName string) *retry.Error
// ListByResourceGroup lists all the disks under a resource group.
ListByResourceGroup(ctx context.Context, resourceGroupName string) ([]compute.Disk, *retry.Error)
}

View File

@ -79,6 +79,20 @@ func (mr *MockInterfaceMockRecorder) CreateOrUpdate(ctx, resourceGroupName, disk
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "CreateOrUpdate", reflect.TypeOf((*MockInterface)(nil).CreateOrUpdate), ctx, resourceGroupName, diskName, diskParameter)
}
// Update mocks base method
func (m *MockInterface) Update(ctx context.Context, resourceGroupName, diskName string, diskParameter compute.DiskUpdate) *retry.Error {
m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "Update", ctx, resourceGroupName, diskName, diskParameter)
ret0, _ := ret[0].(*retry.Error)
return ret0
}
// Update indicates an expected call of Update
func (mr *MockInterfaceMockRecorder) Update(ctx, resourceGroupName, diskName, diskParameter interface{}) *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Update", reflect.TypeOf((*MockInterface)(nil).Update), ctx, resourceGroupName, diskName, diskParameter)
}
// Delete mocks base method
func (m *MockInterface) Delete(ctx context.Context, resourceGroupName, diskName string) *retry.Error {
m.ctrl.T.Helper()
@ -92,3 +106,18 @@ func (mr *MockInterfaceMockRecorder) Delete(ctx, resourceGroupName, diskName int
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Delete", reflect.TypeOf((*MockInterface)(nil).Delete), ctx, resourceGroupName, diskName)
}
// ListByResourceGroup mocks base method
func (m *MockInterface) ListByResourceGroup(ctx context.Context, resourceGroupName string) ([]compute.Disk, *retry.Error) {
m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "ListByResourceGroup", ctx, resourceGroupName)
ret0, _ := ret[0].([]compute.Disk)
ret1, _ := ret[1].(*retry.Error)
return ret0, ret1
}
// ListByResourceGroup indicates an expected call of ListByResourceGroup
func (mr *MockInterfaceMockRecorder) ListByResourceGroup(ctx, resourceGroupName interface{}) *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ListByResourceGroup", reflect.TypeOf((*MockInterface)(nil).Delete), ctx, resourceGroupName)
}