diff --git a/staging/src/k8s.io/legacy-cloud-providers/azure/BUILD b/staging/src/k8s.io/legacy-cloud-providers/azure/BUILD index 4ac822a5a4b..3a04df8febf 100644 --- a/staging/src/k8s.io/legacy-cloud-providers/azure/BUILD +++ b/staging/src/k8s.io/legacy-cloud-providers/azure/BUILD @@ -24,7 +24,6 @@ go_library( "azure_instances.go", "azure_loadbalancer.go", "azure_managedDiskController.go", - "azure_metrics.go", "azure_ratelimit.go", "azure_routes.go", "azure_standard.go", @@ -65,10 +64,11 @@ go_library( "//staging/src/k8s.io/cloud-provider/volume/errors:go_default_library", "//staging/src/k8s.io/cloud-provider/volume/helpers:go_default_library", "//staging/src/k8s.io/component-base/featuregate:go_default_library", - "//staging/src/k8s.io/component-base/metrics:go_default_library", - "//staging/src/k8s.io/component-base/metrics/legacyregistry:go_default_library", "//staging/src/k8s.io/legacy-cloud-providers/azure/auth:go_default_library", "//staging/src/k8s.io/legacy-cloud-providers/azure/clients:go_default_library", + "//staging/src/k8s.io/legacy-cloud-providers/azure/clients/vmssclient:go_default_library", + "//staging/src/k8s.io/legacy-cloud-providers/azure/clients/vmssvmclient:go_default_library", + "//staging/src/k8s.io/legacy-cloud-providers/azure/metrics:go_default_library", "//staging/src/k8s.io/legacy-cloud-providers/azure/retry:go_default_library", "//vendor/github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2019-07-01/compute:go_default_library", "//vendor/github.com/Azure/azure-sdk-for-go/services/network/mgmt/2019-06-01/network:go_default_library", @@ -93,7 +93,6 @@ go_test( "azure_controller_standard_test.go", "azure_instances_test.go", "azure_loadbalancer_test.go", - "azure_metrics_test.go", "azure_ratelimit_test.go", "azure_routes_test.go", "azure_standard_test.go", @@ -143,6 +142,7 @@ filegroup( ":package-srcs", "//staging/src/k8s.io/legacy-cloud-providers/azure/auth:all-srcs", "//staging/src/k8s.io/legacy-cloud-providers/azure/clients:all-srcs", + "//staging/src/k8s.io/legacy-cloud-providers/azure/metrics:all-srcs", "//staging/src/k8s.io/legacy-cloud-providers/azure/retry:all-srcs", ], tags = ["automanaged"], diff --git a/staging/src/k8s.io/legacy-cloud-providers/azure/azure.go b/staging/src/k8s.io/legacy-cloud-providers/azure/azure.go index cb66e9fc64c..a04bb231727 100644 --- a/staging/src/k8s.io/legacy-cloud-providers/azure/azure.go +++ b/staging/src/k8s.io/legacy-cloud-providers/azure/azure.go @@ -46,6 +46,8 @@ import ( "k8s.io/klog" "k8s.io/legacy-cloud-providers/azure/auth" azclients "k8s.io/legacy-cloud-providers/azure/clients" + "k8s.io/legacy-cloud-providers/azure/clients/vmssclient" + "k8s.io/legacy-cloud-providers/azure/clients/vmssvmclient" "k8s.io/legacy-cloud-providers/azure/retry" "sigs.k8s.io/yaml" ) @@ -480,8 +482,12 @@ func (az *Cloud) InitializeCloudFromConfig(config *Config, fromSecret bool) erro az.VirtualMachinesClient = newAzVirtualMachinesClient(azClientConfig.WithRateLimiter(config.VirtualMachineRateLimit)) az.PublicIPAddressesClient = newAzPublicIPAddressesClient(azClientConfig.WithRateLimiter(config.PublicIPAddressRateLimit)) az.VirtualMachineSizesClient = newAzVirtualMachineSizesClient(azClientConfig.WithRateLimiter(config.VirtualMachineSizeRateLimit)) - az.VirtualMachineScaleSetsClient = newAzVirtualMachineScaleSetsClient(azClientConfig.WithRateLimiter(config.VirtualMachineScaleSetRateLimit)) - az.VirtualMachineScaleSetVMsClient = newAzVirtualMachineScaleSetVMsClient(azClientConfig.WithRateLimiter(config.VirtualMachineScaleSetRateLimit)) + + az.VirtualMachineScaleSetsClient = vmssclient.New(azClientConfig.WithRateLimiter(config.VirtualMachineScaleSetRateLimit)) + vmssVMClientConfig := azClientConfig.WithRateLimiter(config.VirtualMachineScaleSetRateLimit) + vmssVMClientConfig.Backoff = vmssVMClientConfig.Backoff.WithNonRetriableErrors([]string{vmssVMNotActiveErrorMessage}) + az.VirtualMachineScaleSetVMsClient = vmssvmclient.New(vmssVMClientConfig) + // TODO(feiskyer): refactor azureFileClient to Interface. az.FileClient = &azureFileClient{env: *env} diff --git a/staging/src/k8s.io/legacy-cloud-providers/azure/azure_backoff.go b/staging/src/k8s.io/legacy-cloud-providers/azure/azure_backoff.go index 480a71517b4..569c36a1b71 100644 --- a/staging/src/k8s.io/legacy-cloud-providers/azure/azure_backoff.go +++ b/staging/src/k8s.io/legacy-cloud-providers/azure/azure_backoff.go @@ -693,77 +693,32 @@ func (az *Cloud) deleteRouteWithRetry(routeName string) error { }) } -// UpdateVmssVMWithRetry invokes az.VirtualMachineScaleSetVMsClient.Update with exponential backoff retry -func (az *Cloud) UpdateVmssVMWithRetry(resourceGroupName string, VMScaleSetName string, instanceID string, parameters compute.VirtualMachineScaleSetVM, source string) error { - return wait.ExponentialBackoff(az.RequestBackoff(), func() (bool, error) { - ctx, cancel := getContextWithCancel() - defer cancel() +// CreateOrUpdateVMSS invokes az.VirtualMachineScaleSetsClient.Update(). +func (az *Cloud) CreateOrUpdateVMSS(resourceGroupName string, VMScaleSetName string, parameters compute.VirtualMachineScaleSet) *retry.Error { + ctx, cancel := getContextWithCancel() + defer cancel() - rerr := az.VirtualMachineScaleSetVMsClient.Update(ctx, resourceGroupName, VMScaleSetName, instanceID, parameters, source) - klog.V(10).Infof("UpdateVmssVMWithRetry: VirtualMachineScaleSetVMsClient.Update(%s,%s): end", VMScaleSetName, instanceID) - if rerr == nil { - return true, nil - } + // When vmss is being deleted, CreateOrUpdate API would report "the vmss is being deleted" error. + // Since it is being deleted, we shouldn't send more CreateOrUpdate requests for it. + klog.V(3).Infof("CreateOrUpdateVMSS: verify the status of the vmss being created or updated") + vmss, rerr := az.VirtualMachineScaleSetsClient.Get(ctx, resourceGroupName, VMScaleSetName) + if rerr != nil { + klog.Errorf("CreateOrUpdateVMSS: error getting vmss(%s): %v", VMScaleSetName, rerr) + return rerr + } + if vmss.ProvisioningState != nil && strings.EqualFold(*vmss.ProvisioningState, virtualMachineScaleSetsDeallocating) { + klog.V(3).Infof("CreateOrUpdateVMSS: found vmss %s being deleted, skipping", VMScaleSetName) + return nil + } - if strings.Contains(rerr.Error().Error(), vmssVMNotActiveErrorMessage) { - // When instances are under deleting, updating API would report "not an active Virtual Machine Scale Set VM instanceId" error. - // Since they're under deleting, we shouldn't send more update requests for it. - klog.V(3).Infof("UpdateVmssVMWithRetry: VirtualMachineScaleSetVMsClient.Update(%s,%s) gets error message %q, abort backoff because it's probably under deleting", VMScaleSetName, instanceID, vmssVMNotActiveErrorMessage) - return true, nil - } + rerr = az.VirtualMachineScaleSetsClient.CreateOrUpdate(ctx, resourceGroupName, VMScaleSetName, parameters) + klog.V(10).Infof("UpdateVmssVMWithRetry: VirtualMachineScaleSetsClient.CreateOrUpdate(%s): end", VMScaleSetName) + if rerr != nil { + klog.Errorf("CreateOrUpdateVMSS: error CreateOrUpdate vmss(%s): %v", VMScaleSetName, rerr) + return rerr + } - return !rerr.Retriable, rerr.Error() - }) -} - -// CreateOrUpdateVmssWithRetry invokes az.VirtualMachineScaleSetsClient.Update with exponential backoff retry -func (az *Cloud) CreateOrUpdateVmssWithRetry(resourceGroupName string, VMScaleSetName string, parameters compute.VirtualMachineScaleSet) error { - return wait.ExponentialBackoff(az.RequestBackoff(), func() (bool, error) { - ctx, cancel := getContextWithCancel() - defer cancel() - - // When vmss is being deleted, CreateOrUpdate API would report "the vmss is being deleted" error. - // Since it is being deleted, we shouldn't send more CreateOrUpdate requests for it. - klog.V(3).Infof("CreateOrUpdateVmssWithRetry: verify the status of the vmss being created or updated") - vmss, rerr := az.VirtualMachineScaleSetsClient.Get(ctx, resourceGroupName, VMScaleSetName) - if rerr != nil { - klog.Warningf("CreateOrUpdateVmssWithRetry: error getting vmss: %v", rerr) - } - if vmss.ProvisioningState != nil && strings.EqualFold(*vmss.ProvisioningState, virtualMachineScaleSetsDeallocating) { - klog.V(3).Infof("CreateOrUpdateVmssWithRetry: found vmss %s being deleted, skipping", VMScaleSetName) - return true, nil - } - - rerr = az.VirtualMachineScaleSetsClient.CreateOrUpdate(ctx, resourceGroupName, VMScaleSetName, parameters) - klog.V(10).Infof("UpdateVmssVMWithRetry: VirtualMachineScaleSetsClient.CreateOrUpdate(%s): end", VMScaleSetName) - if rerr == nil { - return true, nil - } - - return !rerr.Retriable, rerr.Error() - }) -} - -// GetScaleSetWithRetry gets scale set with exponential backoff retry -func (az *Cloud) GetScaleSetWithRetry(service *v1.Service, resourceGroupName, vmssName string) (compute.VirtualMachineScaleSet, error) { - var result compute.VirtualMachineScaleSet - var retryErr *retry.Error - - err := wait.ExponentialBackoff(az.RequestBackoff(), func() (bool, error) { - ctx, cancel := getContextWithCancel() - defer cancel() - - result, retryErr = az.VirtualMachineScaleSetsClient.Get(ctx, resourceGroupName, vmssName) - if retryErr != nil { - az.Event(service, v1.EventTypeWarning, "GetVirtualMachineScaleSet", retryErr.Error().Error()) - klog.Errorf("backoff: failure for scale set %q, will retry,err=%v", vmssName, retryErr) - return false, nil - } - klog.V(4).Infof("backoff: success for scale set %q", vmssName) - return true, nil - }) - - return result, err + return nil } func (cfg *Config) shouldOmitCloudProviderBackoff() bool { diff --git a/staging/src/k8s.io/legacy-cloud-providers/azure/azure_client.go b/staging/src/k8s.io/legacy-cloud-providers/azure/azure_client.go index 9caf2247869..ca196afde6f 100644 --- a/staging/src/k8s.io/legacy-cloud-providers/azure/azure_client.go +++ b/staging/src/k8s.io/legacy-cloud-providers/azure/azure_client.go @@ -32,6 +32,7 @@ import ( "k8s.io/client-go/util/flowcontrol" "k8s.io/klog" azclients "k8s.io/legacy-cloud-providers/azure/clients" + "k8s.io/legacy-cloud-providers/azure/metrics" "k8s.io/legacy-cloud-providers/azure/retry" ) @@ -108,7 +109,7 @@ type VirtualMachineScaleSetsClient interface { // VirtualMachineScaleSetVMsClient defines needed functions for azure compute.VirtualMachineScaleSetVMsClient type VirtualMachineScaleSetVMsClient interface { Get(ctx context.Context, resourceGroupName string, VMScaleSetName string, instanceID string, expand compute.InstanceViewTypes) (result compute.VirtualMachineScaleSetVM, rerr *retry.Error) - List(ctx context.Context, resourceGroupName string, virtualMachineScaleSetName string, filter string, selectParameter string, expand string) (result []compute.VirtualMachineScaleSetVM, rerr *retry.Error) + List(ctx context.Context, resourceGroupName string, virtualMachineScaleSetName string, expand string) (result []compute.VirtualMachineScaleSetVM, rerr *retry.Error) Update(ctx context.Context, resourceGroupName string, VMScaleSetName string, instanceID string, parameters compute.VirtualMachineScaleSetVM, source string) *retry.Error } @@ -183,7 +184,7 @@ func newAzVirtualMachinesClient(config *azclients.ClientConfig) *azVirtualMachin func (az *azVirtualMachinesClient) CreateOrUpdate(ctx context.Context, resourceGroupName string, VMName string, parameters compute.VirtualMachine, source string) *retry.Error { // /* Write rate limiting */ - mc := newMetricContext("vm", "create_or_update", resourceGroupName, az.client.SubscriptionID, source) + mc := metrics.NewMetricContext("vm", "create_or_update", resourceGroupName, az.client.SubscriptionID, source) if !az.rateLimiterWriter.TryAccept() { mc.RateLimitedCount() return createRateLimitErr(true, "VMCreateOrUpdate") @@ -205,7 +206,7 @@ func (az *azVirtualMachinesClient) CreateOrUpdate(ctx context.Context, resourceG } func (az *azVirtualMachinesClient) Update(ctx context.Context, resourceGroupName string, VMName string, parameters compute.VirtualMachineUpdate, source string) *retry.Error { - mc := newMetricContext("vm", "update", resourceGroupName, az.client.SubscriptionID, source) + mc := metrics.NewMetricContext("vm", "update", resourceGroupName, az.client.SubscriptionID, source) // /* Write rate limiting */ if !az.rateLimiterWriter.TryAccept() { mc.RateLimitedCount() @@ -228,7 +229,7 @@ func (az *azVirtualMachinesClient) Update(ctx context.Context, resourceGroupName } func (az *azVirtualMachinesClient) Get(ctx context.Context, resourceGroupName string, VMName string, expand compute.InstanceViewTypes) (result compute.VirtualMachine, rerr *retry.Error) { - mc := newMetricContext("vm", "get", resourceGroupName, az.client.SubscriptionID, "") + mc := metrics.NewMetricContext("vm", "get", resourceGroupName, az.client.SubscriptionID, "") if !az.rateLimiterReader.TryAccept() { mc.RateLimitedCount() rerr = createRateLimitErr(false, "VMGet") @@ -247,7 +248,7 @@ func (az *azVirtualMachinesClient) Get(ctx context.Context, resourceGroupName st } func (az *azVirtualMachinesClient) List(ctx context.Context, resourceGroupName string) (result []compute.VirtualMachine, rerr *retry.Error) { - mc := newMetricContext("vm", "list", resourceGroupName, az.client.SubscriptionID, "") + mc := metrics.NewMetricContext("vm", "list", resourceGroupName, az.client.SubscriptionID, "") if !az.rateLimiterReader.TryAccept() { mc.RateLimitedCount() rerr = createRateLimitErr(false, "VMList") @@ -310,7 +311,7 @@ func newAzInterfacesClient(config *azclients.ClientConfig) *azInterfacesClient { } func (az *azInterfacesClient) CreateOrUpdate(ctx context.Context, resourceGroupName string, networkInterfaceName string, parameters network.Interface) *retry.Error { - mc := newMetricContext("interfaces", "create_or_update", resourceGroupName, az.client.SubscriptionID, "") + mc := metrics.NewMetricContext("interfaces", "create_or_update", resourceGroupName, az.client.SubscriptionID, "") /* Write rate limiting */ if !az.rateLimiterWriter.TryAccept() { mc.RateLimitedCount() @@ -332,7 +333,7 @@ func (az *azInterfacesClient) CreateOrUpdate(ctx context.Context, resourceGroupN } func (az *azInterfacesClient) Get(ctx context.Context, resourceGroupName string, networkInterfaceName string, expand string) (result network.Interface, rerr *retry.Error) { - mc := newMetricContext("interfaces", "get", resourceGroupName, az.client.SubscriptionID, "") + mc := metrics.NewMetricContext("interfaces", "get", resourceGroupName, az.client.SubscriptionID, "") if !az.rateLimiterReader.TryAccept() { mc.RateLimitedCount() rerr = createRateLimitErr(false, "NicGet") @@ -351,7 +352,7 @@ func (az *azInterfacesClient) Get(ctx context.Context, resourceGroupName string, } func (az *azInterfacesClient) GetVirtualMachineScaleSetNetworkInterface(ctx context.Context, resourceGroupName string, virtualMachineScaleSetName string, virtualmachineIndex string, networkInterfaceName string, expand string) (result network.Interface, rerr *retry.Error) { - mc := newMetricContext("interfaces", "get_vmss_ni", resourceGroupName, az.client.SubscriptionID, "") + mc := metrics.NewMetricContext("interfaces", "get_vmss_ni", resourceGroupName, az.client.SubscriptionID, "") if !az.rateLimiterReader.TryAccept() { mc.RateLimitedCount() rerr = createRateLimitErr(false, "NicGetVirtualMachineScaleSetNetworkInterface") @@ -402,7 +403,7 @@ func newAzLoadBalancersClient(config *azclients.ClientConfig) *azLoadBalancersCl } func (az *azLoadBalancersClient) CreateOrUpdate(ctx context.Context, resourceGroupName string, loadBalancerName string, parameters network.LoadBalancer, etag string) *retry.Error { - mc := newMetricContext("load_balancers", "create_or_update", resourceGroupName, az.client.SubscriptionID, "") + mc := metrics.NewMetricContext("load_balancers", "create_or_update", resourceGroupName, az.client.SubscriptionID, "") /* Write rate limiting */ if !az.rateLimiterWriter.TryAccept() { mc.RateLimitedCount() @@ -456,7 +457,7 @@ func (az *azLoadBalancersClient) createOrUpdatePreparer(ctx context.Context, res } func (az *azLoadBalancersClient) Delete(ctx context.Context, resourceGroupName string, loadBalancerName string) *retry.Error { - mc := newMetricContext("load_balancers", "delete", resourceGroupName, az.client.SubscriptionID, "") + mc := metrics.NewMetricContext("load_balancers", "delete", resourceGroupName, az.client.SubscriptionID, "") /* Write rate limiting */ if !az.rateLimiterWriter.TryAccept() { mc.RateLimitedCount() @@ -478,7 +479,7 @@ func (az *azLoadBalancersClient) Delete(ctx context.Context, resourceGroupName s } func (az *azLoadBalancersClient) Get(ctx context.Context, resourceGroupName string, loadBalancerName string, expand string) (result network.LoadBalancer, rerr *retry.Error) { - mc := newMetricContext("load_balancers", "get", resourceGroupName, az.client.SubscriptionID, "") + mc := metrics.NewMetricContext("load_balancers", "get", resourceGroupName, az.client.SubscriptionID, "") if !az.rateLimiterReader.TryAccept() { mc.RateLimitedCount() rerr = createRateLimitErr(false, "LBGet") @@ -497,7 +498,7 @@ func (az *azLoadBalancersClient) Get(ctx context.Context, resourceGroupName stri } func (az *azLoadBalancersClient) List(ctx context.Context, resourceGroupName string) ([]network.LoadBalancer, *retry.Error) { - mc := newMetricContext("load_balancers", "list", resourceGroupName, az.client.SubscriptionID, "") + mc := metrics.NewMetricContext("load_balancers", "list", resourceGroupName, az.client.SubscriptionID, "") if !az.rateLimiterReader.TryAccept() { mc.RateLimitedCount() rerr := createRateLimitErr(false, "LBList") @@ -560,7 +561,7 @@ func newAzPublicIPAddressesClient(config *azclients.ClientConfig) *azPublicIPAdd } func (az *azPublicIPAddressesClient) CreateOrUpdate(ctx context.Context, resourceGroupName string, publicIPAddressName string, parameters network.PublicIPAddress) *retry.Error { - mc := newMetricContext("public_ip_addresses", "create_or_update", resourceGroupName, az.client.SubscriptionID, "") + mc := metrics.NewMetricContext("public_ip_addresses", "create_or_update", resourceGroupName, az.client.SubscriptionID, "") /* Write rate limiting */ if !az.rateLimiterWriter.TryAccept() { mc.RateLimitedCount() @@ -582,7 +583,7 @@ func (az *azPublicIPAddressesClient) CreateOrUpdate(ctx context.Context, resourc } func (az *azPublicIPAddressesClient) Delete(ctx context.Context, resourceGroupName string, publicIPAddressName string) *retry.Error { - mc := newMetricContext("public_ip_addresses", "delete", resourceGroupName, az.client.SubscriptionID, "") + mc := metrics.NewMetricContext("public_ip_addresses", "delete", resourceGroupName, az.client.SubscriptionID, "") /* Write rate limiting */ if !az.rateLimiterWriter.TryAccept() { mc.RateLimitedCount() @@ -604,7 +605,7 @@ func (az *azPublicIPAddressesClient) Delete(ctx context.Context, resourceGroupNa } func (az *azPublicIPAddressesClient) Get(ctx context.Context, resourceGroupName string, publicIPAddressName string, expand string) (result network.PublicIPAddress, rerr *retry.Error) { - mc := newMetricContext("public_ip_addresses", "get", resourceGroupName, az.client.SubscriptionID, "") + mc := metrics.NewMetricContext("public_ip_addresses", "get", resourceGroupName, az.client.SubscriptionID, "") if !az.rateLimiterReader.TryAccept() { mc.RateLimitedCount() rerr = createRateLimitErr(false, "PublicIPGet") @@ -623,7 +624,7 @@ func (az *azPublicIPAddressesClient) Get(ctx context.Context, resourceGroupName } func (az *azPublicIPAddressesClient) GetVirtualMachineScaleSetPublicIPAddress(ctx context.Context, resourceGroupName string, virtualMachineScaleSetName string, virtualmachineIndex string, networkInterfaceName string, IPConfigurationName string, publicIPAddressName string, expand string) (result network.PublicIPAddress, rerr *retry.Error) { - mc := newMetricContext("vmss_public_ip_addresses", "get", resourceGroupName, az.client.SubscriptionID, "") + mc := metrics.NewMetricContext("vmss_public_ip_addresses", "get", resourceGroupName, az.client.SubscriptionID, "") if !az.rateLimiterReader.TryAccept() { mc.RateLimitedCount() rerr = createRateLimitErr(false, "VMSSPublicIPGet") @@ -642,7 +643,7 @@ func (az *azPublicIPAddressesClient) GetVirtualMachineScaleSetPublicIPAddress(ct } func (az *azPublicIPAddressesClient) List(ctx context.Context, resourceGroupName string) ([]network.PublicIPAddress, *retry.Error) { - mc := newMetricContext("public_ip_addresses", "list", resourceGroupName, az.client.SubscriptionID, "") + mc := metrics.NewMetricContext("public_ip_addresses", "list", resourceGroupName, az.client.SubscriptionID, "") if !az.rateLimiterReader.TryAccept() { mc.RateLimitedCount() return nil, createRateLimitErr(false, "PublicIPList") @@ -704,7 +705,7 @@ func newAzSubnetsClient(config *azclients.ClientConfig) *azSubnetsClient { } func (az *azSubnetsClient) CreateOrUpdate(ctx context.Context, resourceGroupName string, virtualNetworkName string, subnetName string, subnetParameters network.Subnet) *retry.Error { - mc := newMetricContext("subnets", "create_or_update", resourceGroupName, az.client.SubscriptionID, "") + mc := metrics.NewMetricContext("subnets", "create_or_update", resourceGroupName, az.client.SubscriptionID, "") /* Write rate limiting */ if !az.rateLimiterWriter.TryAccept() { mc.RateLimitedCount() @@ -726,7 +727,7 @@ func (az *azSubnetsClient) CreateOrUpdate(ctx context.Context, resourceGroupName } func (az *azSubnetsClient) Delete(ctx context.Context, resourceGroupName string, virtualNetworkName string, subnetName string) *retry.Error { - mc := newMetricContext("subnets", "delete", resourceGroupName, az.client.SubscriptionID, "") + mc := metrics.NewMetricContext("subnets", "delete", resourceGroupName, az.client.SubscriptionID, "") /* Write rate limiting */ if !az.rateLimiterWriter.TryAccept() { mc.RateLimitedCount() @@ -748,7 +749,7 @@ func (az *azSubnetsClient) Delete(ctx context.Context, resourceGroupName string, } func (az *azSubnetsClient) Get(ctx context.Context, resourceGroupName string, virtualNetworkName string, subnetName string, expand string) (result network.Subnet, rerr *retry.Error) { - mc := newMetricContext("subnets", "get", resourceGroupName, az.client.SubscriptionID, "") + mc := metrics.NewMetricContext("subnets", "get", resourceGroupName, az.client.SubscriptionID, "") if !az.rateLimiterReader.TryAccept() { mc.RateLimitedCount() rerr = createRateLimitErr(false, "SubnetGet") @@ -767,7 +768,7 @@ func (az *azSubnetsClient) Get(ctx context.Context, resourceGroupName string, vi } func (az *azSubnetsClient) List(ctx context.Context, resourceGroupName string, virtualNetworkName string) ([]network.Subnet, *retry.Error) { - mc := newMetricContext("subnets", "list", resourceGroupName, az.client.SubscriptionID, "") + mc := metrics.NewMetricContext("subnets", "list", resourceGroupName, az.client.SubscriptionID, "") if !az.rateLimiterReader.TryAccept() { mc.RateLimitedCount() return nil, createRateLimitErr(false, "SubnetList") @@ -829,7 +830,7 @@ func newAzSecurityGroupsClient(config *azclients.ClientConfig) *azSecurityGroups } func (az *azSecurityGroupsClient) CreateOrUpdate(ctx context.Context, resourceGroupName string, networkSecurityGroupName string, parameters network.SecurityGroup, etag string) *retry.Error { - mc := newMetricContext("security_groups", "create_or_update", resourceGroupName, az.client.SubscriptionID, "") + mc := metrics.NewMetricContext("security_groups", "create_or_update", resourceGroupName, az.client.SubscriptionID, "") /* Write rate limiting */ if !az.rateLimiterWriter.TryAccept() { mc.RateLimitedCount() @@ -883,7 +884,7 @@ func (az *azSecurityGroupsClient) createOrUpdatePreparer(ctx context.Context, re } func (az *azSecurityGroupsClient) Delete(ctx context.Context, resourceGroupName string, networkSecurityGroupName string) *retry.Error { - mc := newMetricContext("security_groups", "delete", resourceGroupName, az.client.SubscriptionID, "") + mc := metrics.NewMetricContext("security_groups", "delete", resourceGroupName, az.client.SubscriptionID, "") /* Write rate limiting */ if !az.rateLimiterWriter.TryAccept() { mc.RateLimitedCount() @@ -905,7 +906,7 @@ func (az *azSecurityGroupsClient) Delete(ctx context.Context, resourceGroupName } func (az *azSecurityGroupsClient) Get(ctx context.Context, resourceGroupName string, networkSecurityGroupName string, expand string) (result network.SecurityGroup, rerr *retry.Error) { - mc := newMetricContext("security_groups", "get", resourceGroupName, az.client.SubscriptionID, "") + mc := metrics.NewMetricContext("security_groups", "get", resourceGroupName, az.client.SubscriptionID, "") if !az.rateLimiterReader.TryAccept() { mc.RateLimitedCount() rerr = createRateLimitErr(false, "NSGGet") @@ -924,7 +925,7 @@ func (az *azSecurityGroupsClient) Get(ctx context.Context, resourceGroupName str } func (az *azSecurityGroupsClient) List(ctx context.Context, resourceGroupName string) ([]network.SecurityGroup, *retry.Error) { - mc := newMetricContext("security_groups", "list", resourceGroupName, az.client.SubscriptionID, "") + mc := metrics.NewMetricContext("security_groups", "list", resourceGroupName, az.client.SubscriptionID, "") if !az.rateLimiterReader.TryAccept() { mc.RateLimitedCount() return nil, createRateLimitErr(false, "NSGList") @@ -953,213 +954,6 @@ func (az *azSecurityGroupsClient) List(ctx context.Context, resourceGroupName st return result, nil } -// azVirtualMachineScaleSetsClient implements VirtualMachineScaleSetsClient. -type azVirtualMachineScaleSetsClient struct { - client compute.VirtualMachineScaleSetsClient - rateLimiterReader flowcontrol.RateLimiter - rateLimiterWriter flowcontrol.RateLimiter -} - -func newAzVirtualMachineScaleSetsClient(config *azclients.ClientConfig) *azVirtualMachineScaleSetsClient { - virtualMachineScaleSetsClient := compute.NewVirtualMachineScaleSetsClient(config.SubscriptionID) - virtualMachineScaleSetsClient.BaseURI = config.ResourceManagerEndpoint - virtualMachineScaleSetsClient.Authorizer = autorest.NewBearerAuthorizer(config.ServicePrincipalToken) - virtualMachineScaleSetsClient.PollingDelay = 5 * time.Second - if config.ShouldOmitCloudProviderBackoff { - virtualMachineScaleSetsClient.RetryAttempts = config.CloudProviderBackoffRetries - virtualMachineScaleSetsClient.RetryDuration = time.Duration(config.CloudProviderBackoffDuration) * time.Second - } - configureUserAgent(&virtualMachineScaleSetsClient.Client) - - klog.V(2).Infof("Azure VirtualMachineScaleSetsClient (read ops) using rate limit config: QPS=%g, bucket=%d", - config.RateLimitConfig.CloudProviderRateLimitQPS, - config.RateLimitConfig.CloudProviderRateLimitBucket) - klog.V(2).Infof("Azure VirtualMachineScaleSetsClient (write ops) using rate limit config: QPS=%g, bucket=%d", - config.RateLimitConfig.CloudProviderRateLimitQPSWrite, - config.RateLimitConfig.CloudProviderRateLimitBucketWrite) - rateLimiterReader, rateLimiterWriter := azclients.NewRateLimiter(config.RateLimitConfig) - return &azVirtualMachineScaleSetsClient{ - client: virtualMachineScaleSetsClient, - rateLimiterReader: rateLimiterReader, - rateLimiterWriter: rateLimiterWriter, - } -} - -func (az *azVirtualMachineScaleSetsClient) Get(ctx context.Context, resourceGroupName string, VMScaleSetName string) (result compute.VirtualMachineScaleSet, rerr *retry.Error) { - mc := newMetricContext("vmss", "get", resourceGroupName, az.client.SubscriptionID, "") - if !az.rateLimiterReader.TryAccept() { - mc.RateLimitedCount() - rerr = createRateLimitErr(false, "VMSSGet") - return - } - - klog.V(10).Infof("azVirtualMachineScaleSetsClient.Get(%q,%q): start", resourceGroupName, VMScaleSetName) - defer func() { - klog.V(10).Infof("azVirtualMachineScaleSetsClient.Get(%q,%q): end", resourceGroupName, VMScaleSetName) - }() - - var err error - result, err = az.client.Get(ctx, resourceGroupName, VMScaleSetName) - mc.Observe(err) - return result, retry.GetError(result.Response.Response, err) -} - -func (az *azVirtualMachineScaleSetsClient) List(ctx context.Context, resourceGroupName string) (result []compute.VirtualMachineScaleSet, rerr *retry.Error) { - mc := newMetricContext("vmss", "list", resourceGroupName, az.client.SubscriptionID, "") - if !az.rateLimiterReader.TryAccept() { - mc.RateLimitedCount() - rerr = createRateLimitErr(false, "VMSSList") - return - } - - klog.V(10).Infof("azVirtualMachineScaleSetsClient.List(%q): start", resourceGroupName) - defer func() { - klog.V(10).Infof("azVirtualMachineScaleSetsClient.List(%q): end", resourceGroupName) - }() - - iterator, err := az.client.ListComplete(ctx, resourceGroupName) - mc.Observe(err) - if err != nil { - return nil, retry.GetRetriableError(err) - } - - result = make([]compute.VirtualMachineScaleSet, 0) - for ; iterator.NotDone(); err = iterator.Next() { - if err != nil { - return nil, retry.GetRetriableError(err) - } - - result = append(result, iterator.Value()) - } - - return result, nil -} - -func (az *azVirtualMachineScaleSetsClient) CreateOrUpdate(ctx context.Context, resourceGroupName string, vmScaleSetName string, parameters compute.VirtualMachineScaleSet) *retry.Error { - mc := newMetricContext("vmss", "create_or_update", resourceGroupName, az.client.SubscriptionID, "") - /* Write rate limiting */ - if !az.rateLimiterWriter.TryAccept() { - mc.RateLimitedCount() - return createRateLimitErr(true, "NiCreateOrUpdate") - } - - klog.V(10).Infof("azVirtualMachineScaleSetsClient.CreateOrUpdate(%q,%q): start", resourceGroupName, vmScaleSetName) - defer func() { - klog.V(10).Infof("azVirtualMachineScaleSetsClient.CreateOrUpdate(%q,%q): end", resourceGroupName, vmScaleSetName) - }() - - future, err := az.client.CreateOrUpdate(ctx, resourceGroupName, vmScaleSetName, parameters) - if err != nil { - return retry.GetError(future.Response(), mc.Observe(err)) - } - - err = future.WaitForCompletionRef(ctx, az.client.Client) - return retry.GetError(future.Response(), mc.Observe(err)) -} - -// azVirtualMachineScaleSetVMsClient implements VirtualMachineScaleSetVMsClient. -type azVirtualMachineScaleSetVMsClient struct { - client compute.VirtualMachineScaleSetVMsClient - rateLimiterReader flowcontrol.RateLimiter - rateLimiterWriter flowcontrol.RateLimiter -} - -func newAzVirtualMachineScaleSetVMsClient(config *azclients.ClientConfig) *azVirtualMachineScaleSetVMsClient { - virtualMachineScaleSetVMsClient := compute.NewVirtualMachineScaleSetVMsClient(config.SubscriptionID) - virtualMachineScaleSetVMsClient.BaseURI = config.ResourceManagerEndpoint - virtualMachineScaleSetVMsClient.Authorizer = autorest.NewBearerAuthorizer(config.ServicePrincipalToken) - virtualMachineScaleSetVMsClient.PollingDelay = 5 * time.Second - if config.ShouldOmitCloudProviderBackoff { - virtualMachineScaleSetVMsClient.RetryAttempts = config.CloudProviderBackoffRetries - virtualMachineScaleSetVMsClient.RetryDuration = time.Duration(config.CloudProviderBackoffDuration) * time.Second - } - configureUserAgent(&virtualMachineScaleSetVMsClient.Client) - - klog.V(2).Infof("Azure VirtualMachineScaleSetVMsClient (read ops) using rate limit config: QPS=%g, bucket=%d", - config.RateLimitConfig.CloudProviderRateLimitQPS, - config.RateLimitConfig.CloudProviderRateLimitBucket) - klog.V(2).Infof("Azure VirtualMachineScaleSetVMsClient (write ops) using rate limit config: QPS=%g, bucket=%d", - config.RateLimitConfig.CloudProviderRateLimitQPSWrite, - config.RateLimitConfig.CloudProviderRateLimitBucketWrite) - rateLimiterReader, rateLimiterWriter := azclients.NewRateLimiter(config.RateLimitConfig) - return &azVirtualMachineScaleSetVMsClient{ - client: virtualMachineScaleSetVMsClient, - rateLimiterReader: rateLimiterReader, - rateLimiterWriter: rateLimiterWriter, - } -} - -func (az *azVirtualMachineScaleSetVMsClient) Get(ctx context.Context, resourceGroupName string, VMScaleSetName string, instanceID string, expand compute.InstanceViewTypes) (result compute.VirtualMachineScaleSetVM, rerr *retry.Error) { - mc := newMetricContext("vmssvm", "get", resourceGroupName, az.client.SubscriptionID, "") - if !az.rateLimiterReader.TryAccept() { - mc.RateLimitedCount() - rerr = createRateLimitErr(false, "VMSSGet") - return - } - - klog.V(10).Infof("azVirtualMachineScaleSetVMsClient.Get(%q,%q,%q): start", resourceGroupName, VMScaleSetName, instanceID) - defer func() { - klog.V(10).Infof("azVirtualMachineScaleSetVMsClient.Get(%q,%q,%q): end", resourceGroupName, VMScaleSetName, instanceID) - }() - - var err error - result, err = az.client.Get(ctx, resourceGroupName, VMScaleSetName, instanceID, expand) - mc.Observe(err) - return result, retry.GetError(result.Response.Response, err) -} - -func (az *azVirtualMachineScaleSetVMsClient) List(ctx context.Context, resourceGroupName string, virtualMachineScaleSetName string, filter string, selectParameter string, expand string) (result []compute.VirtualMachineScaleSetVM, rerr *retry.Error) { - mc := newMetricContext("vmssvm", "list", resourceGroupName, az.client.SubscriptionID, "") - if !az.rateLimiterReader.TryAccept() { - mc.RateLimitedCount() - rerr = createRateLimitErr(false, "VMSSList") - return - } - - klog.V(10).Infof("azVirtualMachineScaleSetVMsClient.List(%q,%q,%q): start", resourceGroupName, virtualMachineScaleSetName, filter) - defer func() { - klog.V(10).Infof("azVirtualMachineScaleSetVMsClient.List(%q,%q,%q): end", resourceGroupName, virtualMachineScaleSetName, filter) - }() - - iterator, err := az.client.ListComplete(ctx, resourceGroupName, virtualMachineScaleSetName, filter, selectParameter, expand) - mc.Observe(err) - if err != nil { - return nil, retry.GetRetriableError(err) - } - - result = make([]compute.VirtualMachineScaleSetVM, 0) - for ; iterator.NotDone(); err = iterator.Next() { - if err != nil { - return nil, retry.GetRetriableError(err) - } - - result = append(result, iterator.Value()) - } - - return result, nil -} - -func (az *azVirtualMachineScaleSetVMsClient) Update(ctx context.Context, resourceGroupName string, VMScaleSetName string, instanceID string, parameters compute.VirtualMachineScaleSetVM, source string) *retry.Error { - mc := newMetricContext("vmssvm", "create_or_update", resourceGroupName, az.client.SubscriptionID, source) - if !az.rateLimiterWriter.TryAccept() { - mc.RateLimitedCount() - return createRateLimitErr(true, "VMSSVMUpdate") - } - - klog.V(10).Infof("azVirtualMachineScaleSetVMsClient.Update(%q,%q,%q): start", resourceGroupName, VMScaleSetName, instanceID) - defer func() { - klog.V(10).Infof("azVirtualMachineScaleSetVMsClient.Update(%q,%q,%q): end", resourceGroupName, VMScaleSetName, instanceID) - }() - - future, err := az.client.Update(ctx, resourceGroupName, VMScaleSetName, instanceID, parameters) - if err != nil { - return retry.GetError(future.Response(), mc.Observe(err)) - } - - err = future.WaitForCompletionRef(ctx, az.client.Client) - return retry.GetError(future.Response(), mc.Observe(err)) -} - // azRoutesClient implements RoutesClient. type azRoutesClient struct { client network.RoutesClient @@ -1193,7 +987,7 @@ func newAzRoutesClient(config *azclients.ClientConfig) *azRoutesClient { } func (az *azRoutesClient) CreateOrUpdate(ctx context.Context, resourceGroupName string, routeTableName string, routeName string, routeParameters network.Route, etag string) *retry.Error { - mc := newMetricContext("routes", "create_or_update", resourceGroupName, az.client.SubscriptionID, "") + mc := metrics.NewMetricContext("routes", "create_or_update", resourceGroupName, az.client.SubscriptionID, "") /* Write rate limiting */ if !az.rateLimiterWriter.TryAccept() { mc.RateLimitedCount() @@ -1249,7 +1043,7 @@ func (az *azRoutesClient) createOrUpdatePreparer(ctx context.Context, resourceGr } func (az *azRoutesClient) Delete(ctx context.Context, resourceGroupName string, routeTableName string, routeName string) *retry.Error { - mc := newMetricContext("routes", "delete", resourceGroupName, az.client.SubscriptionID, "") + mc := metrics.NewMetricContext("routes", "delete", resourceGroupName, az.client.SubscriptionID, "") /* Write rate limiting */ if !az.rateLimiterWriter.TryAccept() { mc.RateLimitedCount() @@ -1303,7 +1097,7 @@ func newAzRouteTablesClient(config *azclients.ClientConfig) *azRouteTablesClient } func (az *azRouteTablesClient) CreateOrUpdate(ctx context.Context, resourceGroupName string, routeTableName string, parameters network.RouteTable, etag string) *retry.Error { - mc := newMetricContext("route_tables", "create_or_update", resourceGroupName, az.client.SubscriptionID, "") + mc := metrics.NewMetricContext("route_tables", "create_or_update", resourceGroupName, az.client.SubscriptionID, "") /* Write rate limiting */ if !az.rateLimiterWriter.TryAccept() { mc.RateLimitedCount() @@ -1357,7 +1151,7 @@ func (az *azRouteTablesClient) createOrUpdatePreparer(ctx context.Context, resou } func (az *azRouteTablesClient) Get(ctx context.Context, resourceGroupName string, routeTableName string, expand string) (result network.RouteTable, rerr *retry.Error) { - mc := newMetricContext("route_tables", "get", resourceGroupName, az.client.SubscriptionID, "") + mc := metrics.NewMetricContext("route_tables", "get", resourceGroupName, az.client.SubscriptionID, "") if !az.rateLimiterReader.TryAccept() { mc.RateLimitedCount() rerr = createRateLimitErr(false, "GetRouteTable") @@ -1407,7 +1201,7 @@ func newAzStorageAccountClient(config *azclients.ClientConfig) *azStorageAccount } func (az *azStorageAccountClient) Create(ctx context.Context, resourceGroupName string, accountName string, parameters storage.AccountCreateParameters) *retry.Error { - mc := newMetricContext("storage_account", "create", resourceGroupName, az.client.SubscriptionID, "") + mc := metrics.NewMetricContext("storage_account", "create", resourceGroupName, az.client.SubscriptionID, "") /* Write rate limiting */ if !az.rateLimiterWriter.TryAccept() { mc.RateLimitedCount() @@ -1430,7 +1224,7 @@ func (az *azStorageAccountClient) Create(ctx context.Context, resourceGroupName } func (az *azStorageAccountClient) Delete(ctx context.Context, resourceGroupName string, accountName string) *retry.Error { - mc := newMetricContext("storage_account", "delete", resourceGroupName, az.client.SubscriptionID, "") + mc := metrics.NewMetricContext("storage_account", "delete", resourceGroupName, az.client.SubscriptionID, "") if !az.rateLimiterReader.TryAccept() { mc.RateLimitedCount() return createRateLimitErr(false, "DeleteStorageAccount") @@ -1447,7 +1241,7 @@ func (az *azStorageAccountClient) Delete(ctx context.Context, resourceGroupName } func (az *azStorageAccountClient) ListKeys(ctx context.Context, resourceGroupName string, accountName string) (result storage.AccountListKeysResult, rerr *retry.Error) { - mc := newMetricContext("storage_account", "list_keys", resourceGroupName, az.client.SubscriptionID, "") + mc := metrics.NewMetricContext("storage_account", "list_keys", resourceGroupName, az.client.SubscriptionID, "") if !az.rateLimiterReader.TryAccept() { mc.RateLimitedCount() rerr = createRateLimitErr(false, "ListStorageAccountKeys") @@ -1466,7 +1260,7 @@ func (az *azStorageAccountClient) ListKeys(ctx context.Context, resourceGroupNam } func (az *azStorageAccountClient) ListByResourceGroup(ctx context.Context, resourceGroupName string) (result storage.AccountListResult, rerr *retry.Error) { - mc := newMetricContext("storage_account", "list_by_resource_group", resourceGroupName, az.client.SubscriptionID, "") + mc := metrics.NewMetricContext("storage_account", "list_by_resource_group", resourceGroupName, az.client.SubscriptionID, "") if !az.rateLimiterReader.TryAccept() { mc.RateLimitedCount() rerr = createRateLimitErr(false, "ListStorageAccountsByResourceGroup") @@ -1485,7 +1279,7 @@ func (az *azStorageAccountClient) ListByResourceGroup(ctx context.Context, resou } func (az *azStorageAccountClient) GetProperties(ctx context.Context, resourceGroupName string, accountName string) (result storage.Account, rerr *retry.Error) { - mc := newMetricContext("storage_account", "get_properties", resourceGroupName, az.client.SubscriptionID, "") + mc := metrics.NewMetricContext("storage_account", "get_properties", resourceGroupName, az.client.SubscriptionID, "") if !az.rateLimiterReader.TryAccept() { mc.RateLimitedCount() rerr = createRateLimitErr(false, "GetStorageAccount/Properties") @@ -1535,7 +1329,7 @@ func newAzDisksClient(config *azclients.ClientConfig) *azDisksClient { } func (az *azDisksClient) CreateOrUpdate(ctx context.Context, resourceGroupName string, diskName string, diskParameter compute.Disk) *retry.Error { - mc := newMetricContext("disks", "create_or_update", resourceGroupName, az.client.SubscriptionID, "") + mc := metrics.NewMetricContext("disks", "create_or_update", resourceGroupName, az.client.SubscriptionID, "") /* Write rate limiting */ if !az.rateLimiterWriter.TryAccept() { mc.RateLimitedCount() @@ -1557,7 +1351,7 @@ func (az *azDisksClient) CreateOrUpdate(ctx context.Context, resourceGroupName s } func (az *azDisksClient) Delete(ctx context.Context, resourceGroupName string, diskName string) *retry.Error { - mc := newMetricContext("disks", "delete", resourceGroupName, az.client.SubscriptionID, "") + mc := metrics.NewMetricContext("disks", "delete", resourceGroupName, az.client.SubscriptionID, "") /* Write rate limiting */ if !az.rateLimiterWriter.TryAccept() { mc.RateLimitedCount() @@ -1578,7 +1372,7 @@ func (az *azDisksClient) Delete(ctx context.Context, resourceGroupName string, d } func (az *azDisksClient) Get(ctx context.Context, resourceGroupName string, diskName string) (result compute.Disk, rerr *retry.Error) { - mc := newMetricContext("disks", "get", resourceGroupName, az.client.SubscriptionID, "") + mc := metrics.NewMetricContext("disks", "get", resourceGroupName, az.client.SubscriptionID, "") if !az.rateLimiterReader.TryAccept() { mc.RateLimitedCount() rerr = createRateLimitErr(false, "GetDisk") @@ -1642,7 +1436,7 @@ func newAzVirtualMachineSizesClient(config *azclients.ClientConfig) *azVirtualMa } func (az *azVirtualMachineSizesClient) List(ctx context.Context, location string) (result compute.VirtualMachineSizeListResult, rerr *retry.Error) { - mc := newMetricContext("vmsizes", "list", "", az.client.SubscriptionID, "") + mc := metrics.NewMetricContext("vmsizes", "list", "", az.client.SubscriptionID, "") if !az.rateLimiterReader.TryAccept() { mc.RateLimitedCount() rerr = createRateLimitErr(false, "VMSizesList") diff --git a/staging/src/k8s.io/legacy-cloud-providers/azure/azure_fakes.go b/staging/src/k8s.io/legacy-cloud-providers/azure/azure_fakes.go index 71f82cebc49..bd1d7839279 100644 --- a/staging/src/k8s.io/legacy-cloud-providers/azure/azure_fakes.go +++ b/staging/src/k8s.io/legacy-cloud-providers/azure/azure_fakes.go @@ -548,7 +548,7 @@ func (fVMC *fakeVirtualMachineScaleSetVMsClient) setFakeStore(store map[string]m fVMC.FakeStore = store } -func (fVMC *fakeVirtualMachineScaleSetVMsClient) List(ctx context.Context, resourceGroupName string, virtualMachineScaleSetName string, filter string, selectParameter string, expand string) (result []compute.VirtualMachineScaleSetVM, err *retry.Error) { +func (fVMC *fakeVirtualMachineScaleSetVMsClient) List(ctx context.Context, resourceGroupName string, virtualMachineScaleSetName string, expand string) (result []compute.VirtualMachineScaleSetVM, err *retry.Error) { fVMC.mutex.Lock() defer fVMC.mutex.Unlock() diff --git a/staging/src/k8s.io/legacy-cloud-providers/azure/azure_vmss.go b/staging/src/k8s.io/legacy-cloud-providers/azure/azure_vmss.go index 154d683e73b..b7950a40d84 100644 --- a/staging/src/k8s.io/legacy-cloud-providers/azure/azure_vmss.go +++ b/staging/src/k8s.io/legacy-cloud-providers/azure/azure_vmss.go @@ -551,7 +551,7 @@ func (ss *scaleSet) listScaleSetVMs(scaleSetName, resourceGroup string) ([]compu ctx, cancel := getContextWithCancel() defer cancel() - allVMs, rerr := ss.VirtualMachineScaleSetVMsClient.List(ctx, resourceGroup, scaleSetName, "", "", string(compute.InstanceView)) + allVMs, rerr := ss.VirtualMachineScaleSetVMsClient.List(ctx, resourceGroup, scaleSetName, string(compute.InstanceView)) if rerr != nil { klog.Errorf("VirtualMachineScaleSetVMsClient.List failed: %v", rerr) return nil, rerr.Error() @@ -897,16 +897,12 @@ func (ss *scaleSet) EnsureHostInPool(service *v1.Service, nodeName types.NodeNam defer cancel() klog.V(2).Infof("EnsureHostInPool begins to update vmssVM(%s) with new backendPoolID %s", vmName, backendPoolID) rerr := ss.VirtualMachineScaleSetVMsClient.Update(ctx, nodeResourceGroup, ssName, instanceID, newVM, "network_update") - if rerr != nil && rerr.Retriable && ss.CloudProviderBackoff { - klog.V(2).Infof("EnsureHostInPool update backing off vmssVM(%s) with new backendPoolID %s, err: %v", vmName, backendPoolID, err) - retryErr := ss.UpdateVmssVMWithRetry(nodeResourceGroup, ssName, instanceID, newVM, "network_update") - if retryErr != nil { - klog.Errorf("EnsureHostInPool update abort backoff vmssVM(%s) with new backendPoolID %s, err: %v", vmName, backendPoolID, err) - } - return retryErr + if rerr != nil { + klog.Errorf("EnsureHostInPool VirtualMachineScaleSetVMsClient.Update(%s) with new backendPoolID %s, err: %v", vmName, backendPoolID, err) + return rerr.Error() } - return rerr.Error() + return nil } func getVmssAndResourceGroupNameByVMProviderID(providerID string) (string, string, error) { @@ -1022,21 +1018,10 @@ func (ss *scaleSet) ensureVMSSInPool(service *v1.Service, nodes []*v1.Node, back }, } - // Update vmssVM with backoff. - ctx, cancel := getContextWithCancel() - defer cancel() - klog.V(2).Infof("ensureVMSSInPool begins to update vmss(%s) with new backendPoolID %s", vmssName, backendPoolID) - rerr := ss.VirtualMachineScaleSetsClient.CreateOrUpdate(ctx, ss.ResourceGroup, vmssName, newVMSS) - if rerr != nil && rerr.Retriable && ss.CloudProviderBackoff { - klog.V(2).Infof("ensureVMSSInPool update backing off vmss(%s) with new backendPoolID %s, err: %v", vmssName, backendPoolID, err) - retryErr := ss.CreateOrUpdateVmssWithRetry(ss.ResourceGroup, vmssName, newVMSS) - if retryErr != nil { - klog.Errorf("ensureVMSSInPool update abort backoff vmssVM(%s) with new backendPoolID %s, err: %v", vmssName, backendPoolID, err) - return retryErr - } - } + rerr := ss.CreateOrUpdateVMSS(ss.ResourceGroup, vmssName, newVMSS) if rerr != nil { + klog.Errorf("ensureVMSSInPool CreateOrUpdateVMSS(%s) with new backendPoolID %s, err: %v", vmssName, backendPoolID, err) return rerr.Error() } } @@ -1173,14 +1158,6 @@ func (ss *scaleSet) ensureBackendPoolDeletedFromNode(service *v1.Service, nodeNa defer cancel() klog.V(2).Infof("ensureBackendPoolDeletedFromNode begins to update vmssVM(%s) with backendPoolID %s", nodeName, backendPoolID) rerr := ss.VirtualMachineScaleSetVMsClient.Update(ctx, nodeResourceGroup, ssName, instanceID, newVM, "network_update") - if rerr != nil && rerr.Retriable && ss.CloudProviderBackoff { - klog.V(2).Infof("ensureBackendPoolDeletedFromNode update backing off vmssVM(%s) with backendPoolID %s, err: %v", nodeName, backendPoolID, err) - retryErr := ss.UpdateVmssVMWithRetry(nodeResourceGroup, ssName, instanceID, newVM, "network_update") - if retryErr != nil { - err = retryErr - klog.Errorf("ensureBackendPoolDeletedFromNode update abort backoff vmssVM(%s) with backendPoolID %s, err: %v", nodeName, backendPoolID, err) - } - } if rerr != nil { klog.Errorf("ensureBackendPoolDeletedFromNode failed to update vmssVM(%s) with backendPoolID %s: %v", nodeName, backendPoolID, err) } else { @@ -1304,21 +1281,10 @@ func (ss *scaleSet) ensureBackendPoolDeletedFromVMSS(service *v1.Service, backen }, } - // Update vmssVM with backoff. - ctx, cancel := getContextWithCancel() - defer cancel() - klog.V(2).Infof("ensureBackendPoolDeletedFromVMSS begins to update vmss(%s) with backendPoolID %s", vmssName, backendPoolID) - rerr := ss.VirtualMachineScaleSetsClient.CreateOrUpdate(ctx, ss.ResourceGroup, vmssName, newVMSS) - if rerr != nil && rerr.Retriable && ss.CloudProviderBackoff { - klog.V(2).Infof("ensureBackendPoolDeletedFromVMSS update backing off vmss(%s) with backendPoolID %s, err: %v", vmssName, backendPoolID, err) - retryErr := ss.CreateOrUpdateVmssWithRetry(ss.ResourceGroup, vmssName, newVMSS) - if retryErr != nil { - klog.Errorf("ensureBackendPoolDeletedFromVMSS update abort backoff vmssVM(%s) with backendPoolID %s, err: %v", vmssName, backendPoolID, retryErr) - return retryErr - } - } + rerr := ss.CreateOrUpdateVMSS(ss.ResourceGroup, vmssName, newVMSS) if rerr != nil { + klog.Errorf("ensureBackendPoolDeletedFromVMSS CreateOrUpdateVMSS(%s) with new backendPoolID %s, err: %v", vmssName, backendPoolID, err) return rerr.Error() } } diff --git a/staging/src/k8s.io/legacy-cloud-providers/azure/azure_vmss_cache_test.go b/staging/src/k8s.io/legacy-cloud-providers/azure/azure_vmss_cache_test.go index 35ee33c8cf9..84ff972a1ad 100644 --- a/staging/src/k8s.io/legacy-cloud-providers/azure/azure_vmss_cache_test.go +++ b/staging/src/k8s.io/legacy-cloud-providers/azure/azure_vmss_cache_test.go @@ -79,7 +79,7 @@ func TestVMSSVMCache(t *testing.T) { // validate getting VMSS VM via cache. virtualMachines, rerr := ss.VirtualMachineScaleSetVMsClient.List( - context.Background(), "rg", "vmss", "", "", "") + context.Background(), "rg", "vmss", "") assert.Nil(t, rerr) assert.Equal(t, 3, len(virtualMachines)) for i := range virtualMachines { diff --git a/staging/src/k8s.io/legacy-cloud-providers/azure/clients/BUILD b/staging/src/k8s.io/legacy-cloud-providers/azure/clients/BUILD index 7d742947cb1..18aa96c0fac 100644 --- a/staging/src/k8s.io/legacy-cloud-providers/azure/clients/BUILD +++ b/staging/src/k8s.io/legacy-cloud-providers/azure/clients/BUILD @@ -25,7 +25,12 @@ filegroup( filegroup( name = "all-srcs", - srcs = [":package-srcs"], + srcs = [ + ":package-srcs", + "//staging/src/k8s.io/legacy-cloud-providers/azure/clients/armclient:all-srcs", + "//staging/src/k8s.io/legacy-cloud-providers/azure/clients/vmssclient:all-srcs", + "//staging/src/k8s.io/legacy-cloud-providers/azure/clients/vmssvmclient:all-srcs", + ], tags = ["automanaged"], visibility = ["//visibility:public"], ) diff --git a/staging/src/k8s.io/legacy-cloud-providers/azure/clients/armclient/BUILD b/staging/src/k8s.io/legacy-cloud-providers/azure/clients/armclient/BUILD new file mode 100644 index 00000000000..6fed3c8cf7f --- /dev/null +++ b/staging/src/k8s.io/legacy-cloud-providers/azure/clients/armclient/BUILD @@ -0,0 +1,48 @@ +load("@io_bazel_rules_go//go:def.bzl", "go_library", "go_test") + +go_library( + name = "go_default_library", + srcs = [ + "azure_armclient.go", + "doc.go", + "interface.go", + ], + importmap = "k8s.io/kubernetes/vendor/k8s.io/legacy-cloud-providers/azure/clients/armclient", + importpath = "k8s.io/legacy-cloud-providers/azure/clients/armclient", + visibility = ["//visibility:public"], + deps = [ + "//staging/src/k8s.io/client-go/pkg/version:go_default_library", + "//staging/src/k8s.io/legacy-cloud-providers/azure/retry:go_default_library", + "//vendor/github.com/Azure/go-autorest/autorest:go_default_library", + "//vendor/github.com/Azure/go-autorest/autorest/azure:go_default_library", + "//vendor/k8s.io/klog:go_default_library", + ], +) + +go_test( + name = "go_default_test", + srcs = ["azure_armclient_test.go"], + embed = [":go_default_library"], + deps = [ + "//staging/src/k8s.io/legacy-cloud-providers/azure/retry:go_default_library", + "//vendor/github.com/Azure/go-autorest/autorest:go_default_library", + "//vendor/github.com/stretchr/testify/assert:go_default_library", + ], +) + +filegroup( + name = "package-srcs", + srcs = glob(["**"]), + tags = ["automanaged"], + visibility = ["//visibility:private"], +) + +filegroup( + name = "all-srcs", + srcs = [ + ":package-srcs", + "//staging/src/k8s.io/legacy-cloud-providers/azure/clients/armclient/mockarmclient:all-srcs", + ], + tags = ["automanaged"], + visibility = ["//visibility:public"], +) diff --git a/staging/src/k8s.io/legacy-cloud-providers/azure/clients/armclient/azure_armclient.go b/staging/src/k8s.io/legacy-cloud-providers/azure/clients/armclient/azure_armclient.go new file mode 100644 index 00000000000..0a422946c9b --- /dev/null +++ b/staging/src/k8s.io/legacy-cloud-providers/azure/clients/armclient/azure_armclient.go @@ -0,0 +1,543 @@ +// +build !providerless + +/* +Copyright 2019 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package armclient + +import ( + "bytes" + "context" + "encoding/json" + "fmt" + "io/ioutil" + "net/http" + "strings" + "time" + "unicode" + + "github.com/Azure/go-autorest/autorest" + "github.com/Azure/go-autorest/autorest/azure" + + "k8s.io/client-go/pkg/version" + "k8s.io/klog" + "k8s.io/legacy-cloud-providers/azure/retry" +) + +var _ Interface = &Client{} + +// Client implements ARM client Interface. +type Client struct { + client autorest.Client + backoff *retry.Backoff + + baseURI string + apiVersion string + clientRegion string +} + +// New creates a ARM client +func New(authorizer autorest.Authorizer, baseURI, userAgent, apiVersion, clientRegion string, clientBackoff *retry.Backoff) *Client { + restClient := autorest.NewClientWithUserAgent(userAgent) + restClient.PollingDelay = 5 * time.Second + restClient.RetryAttempts = 3 + restClient.RetryDuration = time.Second * 1 + restClient.Authorizer = authorizer + + if userAgent == "" { + restClient.UserAgent = GetUserAgent(restClient) + } + + backoff := clientBackoff + if backoff == nil { + // 1 steps means no retry. + backoff = &retry.Backoff{ + Steps: 1, + } + } + + return &Client{ + client: restClient, + baseURI: baseURI, + backoff: backoff, + apiVersion: apiVersion, + clientRegion: NormalizeAzureRegion(clientRegion), + } +} + +// GetUserAgent gets the autorest client with a user agent that +// includes "kubernetes" and the full kubernetes git version string +// example: +// Azure-SDK-for-Go/7.0.1 arm-network/2016-09-01; kubernetes-cloudprovider/v1.17.0; +func GetUserAgent(client autorest.Client) string { + k8sVersion := version.Get().GitVersion + return fmt.Sprintf("%s; kubernetes-cloudprovider/%s", client.UserAgent, k8sVersion) +} + +// NormalizeAzureRegion returns a normalized Azure region with white spaces removed and converted to lower case +func NormalizeAzureRegion(name string) string { + region := "" + for _, runeValue := range name { + if !unicode.IsSpace(runeValue) { + region += string(runeValue) + } + } + return strings.ToLower(region) +} + +// sendRequest sends a http request to ARM service. +// Although Azure SDK supports retries per https://github.com/azure/azure-sdk-for-go#request-retry-policy, we +// disable it since we want to fully control the retry policies. +func (c *Client) sendRequest(ctx context.Context, request *http.Request) (*http.Response, *retry.Error) { + sendBackoff := *c.backoff + response, err := autorest.SendWithSender( + c.client, + request, + retry.DoExponentialBackoffRetry(&sendBackoff), + ) + return response, retry.GetError(response, err) +} + +// Send sends a http request to ARM service with possible retry to regional ARM endpoint. +func (c *Client) Send(ctx context.Context, request *http.Request) (*http.Response, *retry.Error) { + response, rerr := c.sendRequest(ctx, request) + if rerr != nil { + return response, rerr + } + + if response.StatusCode != http.StatusNotFound || c.clientRegion == "" { + return response, rerr + } + + bodyBytes, _ := ioutil.ReadAll(response.Body) + defer func() { + response.Body = ioutil.NopCloser(bytes.NewBuffer(bodyBytes)) + }() + + bodyString := string(bodyBytes) + klog.V(5).Infof("Send.sendRequest original error message: %s", bodyString) + + // Hack: retry the regional ARM endpoint in case of ARM traffic split and arm resource group replication is too slow + var body map[string]interface{} + if e := json.Unmarshal(bodyBytes, &body); e != nil { + klog.V(5).Infof("Send.sendRequest: error in parsing response body string: %s, Skip retrying regional host", e) + return response, rerr + } + + if err, ok := body["error"].(map[string]interface{}); !ok || + err["code"] == nil || + !strings.EqualFold(err["code"].(string), "ResourceGroupNotFound") { + klog.V(5).Infof("Send.sendRequest: response body does not contain ResourceGroupNotFound error code. Skip retrying regional host") + return response, rerr + } + + currentHost := request.URL.Host + if request.Host != "" { + currentHost = request.Host + } + + if strings.HasPrefix(strings.ToLower(currentHost), c.clientRegion) { + klog.V(5).Infof("Send.sendRequest: current host %s is regional host. Skip retrying regional host.", currentHost) + return response, rerr + } + + request.Host = fmt.Sprintf("%s.%s", c.clientRegion, strings.ToLower(currentHost)) + klog.V(5).Infof("Send.sendRegionalRequest on ResourceGroupNotFound error. Retrying regional host: %s", request.Host) + regionalResponse, regionalError := c.sendRequest(ctx, request) + + // only use the result if the regional request actually goes through and returns 2xx status code, for two reasons: + // 1. the retry on regional ARM host approach is a hack. + // 2. the concatted regional uri could be wrong as the rule is not officially declared by ARM. + if regionalResponse == nil || regionalResponse.StatusCode > 299 { + regionalErrStr := "" + if regionalError != nil { + regionalErrStr = regionalError.Error().Error() + } + + klog.V(5).Infof("Send.sendRegionalRequest failed to get response from regional host, error: '%s'. Ignoring the result.", regionalErrStr) + return response, rerr + } + + return regionalResponse, regionalError +} + +// PreparePutRequest prepares put request +func (c *Client) PreparePutRequest(ctx context.Context, decorators ...autorest.PrepareDecorator) (*http.Request, error) { + decorators = append( + []autorest.PrepareDecorator{ + autorest.AsContentType("application/json; charset=utf-8"), + autorest.AsPut(), + autorest.WithBaseURL(c.baseURI)}, + decorators...) + return c.prepareRequest(ctx, decorators...) +} + +// PreparePostRequest prepares post request +func (c *Client) PreparePostRequest(ctx context.Context, decorators ...autorest.PrepareDecorator) (*http.Request, error) { + decorators = append( + []autorest.PrepareDecorator{ + autorest.AsContentType("application/json; charset=utf-8"), + autorest.AsPost(), + autorest.WithBaseURL(c.baseURI)}, + decorators...) + return c.prepareRequest(ctx, decorators...) +} + +// PrepareGetRequest prepares get request +func (c *Client) PrepareGetRequest(ctx context.Context, decorators ...autorest.PrepareDecorator) (*http.Request, error) { + decorators = append( + []autorest.PrepareDecorator{ + autorest.AsGet(), + autorest.WithBaseURL(c.baseURI)}, + decorators...) + return c.prepareRequest(ctx, decorators...) +} + +// PrepareDeleteRequest preparse delete request +func (c *Client) PrepareDeleteRequest(ctx context.Context, decorators ...autorest.PrepareDecorator) (*http.Request, error) { + decorators = append( + []autorest.PrepareDecorator{ + autorest.AsDelete(), + autorest.WithBaseURL(c.baseURI)}, + decorators...) + return c.prepareRequest(ctx, decorators...) +} + +// PrepareHeadRequest prepares head request +func (c *Client) PrepareHeadRequest(ctx context.Context, decorators ...autorest.PrepareDecorator) (*http.Request, error) { + decorators = append( + []autorest.PrepareDecorator{ + autorest.AsHead(), + autorest.WithBaseURL(c.baseURI)}, + decorators...) + return c.prepareRequest(ctx, decorators...) +} + +// WaitForAsyncOperationCompletion waits for an operation completion +func (c *Client) WaitForAsyncOperationCompletion(ctx context.Context, future *azure.Future, asyncOperationName string) error { + err := future.WaitForCompletionRef(ctx, c.client) + if err != nil { + klog.V(5).Infof("Received error in WaitForCompletionRef: '%v'", err) + return err + } + + var done bool + done, err = future.DoneWithContext(ctx, c.client) + if err != nil { + klog.V(5).Infof("Received error in DoneWithContext: '%v'", err) + return autorest.NewErrorWithError(err, asyncOperationName, "Result", future.Response(), "Polling failure") + } + if !done { + return azure.NewAsyncOpIncompleteError(asyncOperationName) + } + + return nil +} + +// WaitForAsyncOperationResult waits for an operation result. +func (c *Client) WaitForAsyncOperationResult(ctx context.Context, future *azure.Future, asyncOperationName string) (*http.Response, error) { + err := c.WaitForAsyncOperationCompletion(ctx, future, asyncOperationName) + if err != nil { + klog.V(5).Infof("Received error in WaitForAsyncOperationCompletion: '%v'", err) + return nil, err + } + + sendBackoff := *c.backoff + sender := autorest.DecorateSender( + c.client, + retry.DoExponentialBackoffRetry(&sendBackoff), + ) + return future.GetResult(sender) +} + +// SendAsync send a request and return a future object representing the async result as well as the origin http response +func (c *Client) SendAsync(ctx context.Context, request *http.Request) (*azure.Future, *http.Response, *retry.Error) { + asyncResponse, rerr := c.Send(ctx, request) + if rerr != nil { + klog.V(5).Infof("Received error in %s: resourceID: %s, error: %s", "sendAsync.send", request.URL.String(), rerr.Error()) + return nil, nil, rerr + } + + future, err := azure.NewFutureFromResponse(asyncResponse) + if err != nil { + klog.V(5).Infof("Received error in %s: resourceID: %s, error: %s", "sendAsync.responed", request.URL.String(), err) + return nil, asyncResponse, retry.GetError(asyncResponse, err) + } + + return &future, asyncResponse, nil +} + +// GetResource get a resource by resource ID +func (c *Client) GetResource(ctx context.Context, resourceID, expand string) (*http.Response, *retry.Error) { + decorators := []autorest.PrepareDecorator{ + autorest.WithPathParameters("{resourceID}", map[string]interface{}{"resourceID": resourceID}), + } + if expand != "" { + queryParameters := map[string]interface{}{ + "$expand": autorest.Encode("query", expand), + } + decorators = append(decorators, autorest.WithQueryParameters(queryParameters)) + } + request, err := c.PrepareGetRequest(ctx, decorators...) + if err != nil { + klog.V(5).Infof("Received error in %s: resourceID: %s, error: %s", "get.prepare", resourceID, err) + return nil, retry.NewError(false, err) + } + + return c.Send(ctx, request) +} + +// PutResource puts a resource by resource ID +func (c *Client) PutResource(ctx context.Context, resourceID string, parameters interface{}) (*http.Response, *retry.Error) { + decorators := []autorest.PrepareDecorator{ + autorest.WithPathParameters("{resourceID}", map[string]interface{}{"resourceID": resourceID}), + autorest.WithJSON(parameters), + } + + request, err := c.PreparePutRequest(ctx, decorators...) + if err != nil { + klog.V(5).Infof("Received error in %s: resourceID: %s, error: %s", "put.prepare", resourceID, err) + return nil, retry.NewError(false, err) + } + + future, resp, clientErr := c.SendAsync(ctx, request) + defer c.CloseResponse(ctx, resp) + if clientErr != nil { + klog.V(5).Infof("Received error in %s: resourceID: %s, error: %s", "put.send", resourceID, clientErr.Error()) + return nil, clientErr + } + + response, err := c.WaitForAsyncOperationResult(ctx, future, "armclient.PutResource") + if err != nil { + if response != nil { + klog.V(5).Infof("Received error in WaitForAsyncOperationResult: '%s', response code %d", err.Error(), response.StatusCode) + } else { + klog.V(5).Infof("Received error in WaitForAsyncOperationResult: '%s', no response", err.Error()) + } + + retriableErr := retry.GetError(response, err) + if !retriableErr.Retriable && + strings.Contains(strings.ToUpper(err.Error()), strings.ToUpper("InternalServerError")) { + klog.V(5).Infof("Received InternalServerError in WaitForAsyncOperationResult: '%s', setting error retriable", err.Error()) + retriableErr.Retriable = true + } + return nil, retriableErr + } + + return response, nil +} + +// PutResourceAsync puts a resource by resource ID in async mode +func (c *Client) PutResourceAsync(ctx context.Context, resourceID string, parameters interface{}) (*azure.Future, *retry.Error) { + decorators := []autorest.PrepareDecorator{ + autorest.WithPathParameters("{resourceID}", map[string]interface{}{"resourceID": resourceID}), + autorest.WithJSON(parameters), + } + + request, err := c.PreparePutRequest(ctx, decorators...) + if err != nil { + klog.V(5).Infof("Received error in %s: resourceID: %s, error: %s", "put.prepare", resourceID, err) + return nil, retry.NewError(false, err) + } + + future, resp, rErr := c.SendAsync(ctx, request) + defer c.CloseResponse(ctx, resp) + if rErr != nil { + klog.V(5).Infof("Received error in %s: resourceID: %s, error: %s", "put.send", resourceID, err) + return nil, rErr + } + + return future, nil +} + +// PostResource posts a resource by resource ID +func (c *Client) PostResource(ctx context.Context, resourceID, action string, parameters interface{}) (*http.Response, *retry.Error) { + pathParameters := map[string]interface{}{ + "resourceID": resourceID, + "action": action, + } + + decorators := []autorest.PrepareDecorator{ + autorest.WithPathParameters("{resourceID}/{action}", pathParameters), + autorest.WithJSON(parameters), + } + request, err := c.PreparePostRequest(ctx, decorators...) + if err != nil { + klog.V(5).Infof("Received error in %s: resourceID: %s, error: %s", "post.prepare", resourceID, err) + return nil, retry.NewError(false, err) + } + + return c.sendRequest(ctx, request) +} + +// DeleteResource deletes a resource by resource ID +func (c *Client) DeleteResource(ctx context.Context, resourceID, ifMatch string) *retry.Error { + future, clientErr := c.DeleteResourceAsync(ctx, resourceID, ifMatch) + if clientErr != nil { + klog.V(5).Infof("Received error in %s: resourceID: %s, error: %s", "delete.request", resourceID, clientErr.Error()) + return clientErr + } + + if future == nil { + return nil + } + + if err := c.WaitForAsyncOperationCompletion(ctx, future, "armclient.DeleteResource"); err != nil { + klog.V(5).Infof("Received error in %s: resourceID: %s, error: %s", "delete.wait", resourceID, clientErr.Error()) + return retry.NewError(true, err) + } + + return nil +} + +// HeadResource heads a resource by resource ID +func (c *Client) HeadResource(ctx context.Context, resourceID string) (*http.Response, *retry.Error) { + decorators := []autorest.PrepareDecorator{ + autorest.WithPathParameters("{resourceID}", map[string]interface{}{"resourceID": resourceID}), + } + request, err := c.PrepareHeadRequest(ctx, decorators...) + if err != nil { + klog.V(5).Infof("Received error in %s: resourceID: %s, error: %s", "head.prepare", resourceID, err) + return nil, retry.NewError(false, err) + } + + return c.sendRequest(ctx, request) +} + +// DeleteResourceAsync delete a resource by resource ID and returns a future representing the async result +func (c *Client) DeleteResourceAsync(ctx context.Context, resourceID, ifMatch string) (*azure.Future, *retry.Error) { + decorators := []autorest.PrepareDecorator{ + autorest.WithPathParameters("{resourceID}", map[string]interface{}{"resourceID": resourceID}), + } + if len(ifMatch) > 0 { + decorators = append(decorators, autorest.WithHeader("If-Match", autorest.String(ifMatch))) + } + + deleteRequest, err := c.PrepareDeleteRequest(ctx, decorators...) + if err != nil { + klog.V(5).Infof("Received error in %s: resourceID: %s, error: %s", "deleteAsync.prepare", resourceID, err) + return nil, retry.NewError(false, err) + } + + resp, rerr := c.sendRequest(ctx, deleteRequest) + defer c.CloseResponse(ctx, resp) + if rerr != nil { + klog.V(5).Infof("Received error in %s: resourceID: %s, error: %s", "deleteAsync.send", resourceID, rerr.Error()) + return nil, rerr + } + + err = autorest.Respond( + resp, + azure.WithErrorUnlessStatusCode(http.StatusOK, http.StatusAccepted, http.StatusNoContent, http.StatusNotFound)) + if err != nil { + klog.V(5).Infof("Received error in %s: resourceID: %s, error: %s", "deleteAsync.respond", resourceID, err) + return nil, retry.GetError(resp, err) + } + + if resp.StatusCode == http.StatusNotFound { + return nil, nil + } + + future, err := azure.NewFutureFromResponse(resp) + if err != nil { + klog.V(5).Infof("Received error in %s: resourceID: %s, error: %s", "deleteAsync.future", resourceID, err) + return nil, retry.GetError(resp, err) + } + + return &future, nil +} + +// CloseResponse closes a response +func (c *Client) CloseResponse(ctx context.Context, response *http.Response) { + if response != nil && response.Body != nil { + if err := response.Body.Close(); err != nil { + klog.Errorf("Error closing the response body: %v", err) + } + } +} + +func (c *Client) prepareRequest(ctx context.Context, decorators ...autorest.PrepareDecorator) (*http.Request, error) { + decorators = append( + decorators, + withAPIVersion(c.apiVersion)) + preparer := autorest.CreatePreparer(decorators...) + return preparer.Prepare((&http.Request{}).WithContext(ctx)) +} + +func withAPIVersion(apiVersion string) autorest.PrepareDecorator { + const apiVersionKey = "api-version" + return func(p autorest.Preparer) autorest.Preparer { + return autorest.PreparerFunc(func(r *http.Request) (*http.Request, error) { + r, err := p.Prepare(r) + if err == nil { + if r.URL == nil { + return r, fmt.Errorf("Error in withAPIVersion: Invoked with a nil URL") + } + + v := r.URL.Query() + if len(v.Get(apiVersionKey)) > 0 { + return r, nil + } + + v.Add(apiVersionKey, apiVersion) + r.URL.RawQuery = v.Encode() + } + return r, err + }) + } +} + +// GetResourceID gets Azure resource ID +func GetResourceID(subscriptionID, resourceGroupName, resourceType, resourceName string) string { + return fmt.Sprintf("/subscriptions/%s/resourceGroups/%s/providers/%s/%s", + autorest.Encode("path", subscriptionID), + autorest.Encode("path", resourceGroupName), + resourceType, + autorest.Encode("path", resourceName)) +} + +// GetChildResourceID gets Azure child resource ID +func GetChildResourceID(subscriptionID, resourceGroupName, resourceType, resourceName, childResourceType, childResourceName string) string { + return fmt.Sprintf("/subscriptions/%s/resourceGroups/%s/providers/%s/%s/%s/%s", + autorest.Encode("path", subscriptionID), + autorest.Encode("path", resourceGroupName), + resourceType, + autorest.Encode("path", resourceName), + childResourceType, + autorest.Encode("path", childResourceName)) +} + +// GetChildResourcesListID gets Azure child resources list ID +func GetChildResourcesListID(subscriptionID, resourceGroupName, resourceType, resourceName, childResourceType string) string { + return fmt.Sprintf("/subscriptions/%s/resourceGroups/%s/providers/%s/%s/%s", + autorest.Encode("path", subscriptionID), + autorest.Encode("path", resourceGroupName), + resourceType, + autorest.Encode("path", resourceName), + childResourceType) +} + +// GetProviderResourceID gets Azure RP resource ID +func GetProviderResourceID(subscriptionID, providerNamespace string) string { + return fmt.Sprintf("/subscriptions/%s/providers/%s", + autorest.Encode("path", subscriptionID), + providerNamespace) +} + +// GetProviderResourcesListID gets Azure RP resources list ID +func GetProviderResourcesListID(subscriptionID string) string { + return fmt.Sprintf("/subscriptions/%s/providers", autorest.Encode("path", subscriptionID)) +} diff --git a/staging/src/k8s.io/legacy-cloud-providers/azure/clients/armclient/azure_armclient_test.go b/staging/src/k8s.io/legacy-cloud-providers/azure/clients/armclient/azure_armclient_test.go new file mode 100644 index 00000000000..3554e3eeaf4 --- /dev/null +++ b/staging/src/k8s.io/legacy-cloud-providers/azure/clients/armclient/azure_armclient_test.go @@ -0,0 +1,253 @@ +// +build !providerless + +/* +Copyright 2019 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package armclient + +import ( + "context" + "fmt" + "net/http" + "net/http/httptest" + "testing" + "time" + + "github.com/Azure/go-autorest/autorest" + "github.com/stretchr/testify/assert" + "k8s.io/legacy-cloud-providers/azure/retry" +) + +func TestSend(t *testing.T) { + count := 0 + server := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + if count <= 1 { + http.Error(w, "failed", http.StatusInternalServerError) + count++ + } + })) + + backoff := &retry.Backoff{Steps: 3} + armClient := New(nil, server.URL, "test", "2019-01-01", "eastus", backoff) + pathParameters := map[string]interface{}{ + "resourceGroupName": autorest.Encode("path", "testgroup"), + "subscriptionId": autorest.Encode("path", "testid"), + "resourceName": autorest.Encode("path", "testname"), + } + + decorators := []autorest.PrepareDecorator{ + autorest.WithPathParameters( + "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/vNets/{resourceName}", pathParameters), + } + + ctx := context.Background() + request, err := armClient.PrepareGetRequest(ctx, decorators...) + assert.NoError(t, err) + + response, rerr := armClient.Send(ctx, request) + assert.Nil(t, rerr) + assert.Equal(t, 2, count) + assert.Equal(t, http.StatusOK, response.StatusCode) +} + +func TestSendFailure(t *testing.T) { + count := 0 + server := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + http.Error(w, "failed", http.StatusInternalServerError) + count++ + })) + + backoff := &retry.Backoff{Steps: 3} + armClient := New(nil, server.URL, "test", "2019-01-01", "eastus", backoff) + pathParameters := map[string]interface{}{ + "resourceGroupName": autorest.Encode("path", "testgroup"), + "subscriptionId": autorest.Encode("path", "testid"), + "resourceName": autorest.Encode("path", "testname"), + } + + decorators := []autorest.PrepareDecorator{ + autorest.WithPathParameters( + "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/vNets/{resourceName}", pathParameters), + } + + ctx := context.Background() + request, err := armClient.PrepareGetRequest(ctx, decorators...) + assert.NoError(t, err) + + response, rerr := armClient.Send(ctx, request) + assert.NotNil(t, rerr) + assert.Equal(t, 3, count) + assert.Equal(t, http.StatusInternalServerError, response.StatusCode) +} + +func TestSendThrottled(t *testing.T) { + count := 0 + server := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + w.Header().Set(retry.RetryAfterHeaderKey, "30") + http.Error(w, "failed", http.StatusTooManyRequests) + count++ + })) + + backoff := &retry.Backoff{Steps: 3} + armClient := New(nil, server.URL, "test", "2019-01-01", "eastus", backoff) + pathParameters := map[string]interface{}{ + "resourceGroupName": autorest.Encode("path", "testgroup"), + "subscriptionId": autorest.Encode("path", "testid"), + "resourceName": autorest.Encode("path", "testname"), + } + decorators := []autorest.PrepareDecorator{ + autorest.WithPathParameters( + "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/vNets/{resourceName}", pathParameters), + } + + ctx := context.Background() + request, err := armClient.PrepareGetRequest(ctx, decorators...) + assert.NoError(t, err) + + response, rerr := armClient.Send(ctx, request) + assert.NotNil(t, rerr) + assert.Equal(t, 1, count) + assert.Equal(t, http.StatusTooManyRequests, response.StatusCode) +} + +func TestSendAsync(t *testing.T) { + count := 0 + server := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + count++ + http.Error(w, "failed", http.StatusForbidden) + + })) + + backoff := &retry.Backoff{Steps: 1} + armClient := New(nil, server.URL, "test", "2019-01-01", "eastus", backoff) + armClient.client.RetryDuration = time.Millisecond * 1 + + pathParameters := map[string]interface{}{ + "resourceGroupName": autorest.Encode("path", "testgroup"), + "subscriptionId": autorest.Encode("path", "testid"), + "resourceName": autorest.Encode("path", "testname"), + } + decorators := []autorest.PrepareDecorator{ + autorest.WithPathParameters( + "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/vNets/{resourceName}", pathParameters), + } + + ctx := context.Background() + request, err := armClient.PreparePutRequest(ctx, decorators...) + assert.NoError(t, err) + + future, response, rerr := armClient.SendAsync(ctx, request) + assert.Nil(t, future) + assert.Nil(t, response) + assert.Equal(t, 1, count) + assert.NotNil(t, rerr) + assert.Equal(t, true, rerr.Retriable) +} + +func TestNormalizeAzureRegion(t *testing.T) { + tests := []struct { + region string + expected string + }{ + { + region: "eastus", + expected: "eastus", + }, + { + region: " eastus ", + expected: "eastus", + }, + { + region: " eastus\t", + expected: "eastus", + }, + { + region: " eastus\v", + expected: "eastus", + }, + { + region: " eastus\v\r\f\n", + expected: "eastus", + }, + } + + for i, test := range tests { + real := NormalizeAzureRegion(test.region) + assert.Equal(t, test.expected, real, "test[%d]: NormalizeAzureRegion(%q) != %q", i, test.region, test.expected) + } +} + +func TestPutResource(t *testing.T) { + expectedURI := "/subscriptions/subscription/resourceGroups/rg/providers/Microsoft.Network/publicIPAddresses/testPIP?api-version=2019-01-01" + operationURI := "/subscriptions/subscription/providers/Microsoft.Network/locations/eastus/operations/op?api-version=2019-01-01" + handlers := []func(http.ResponseWriter, *http.Request){ + func(rw http.ResponseWriter, req *http.Request) { + assert.Equal(t, "PUT", req.Method) + assert.Equal(t, expectedURI, req.URL.String()) + rw.Header().Set(http.CanonicalHeaderKey("Azure-AsyncOperation"), + fmt.Sprintf("http://%s%s", req.Host, operationURI)) + rw.WriteHeader(http.StatusCreated) + }, + + func(rw http.ResponseWriter, req *http.Request) { + assert.Equal(t, "GET", req.Method) + assert.Equal(t, operationURI, req.URL.String()) + + rw.WriteHeader(http.StatusOK) + rw.Write([]byte(`{"error":{"code":"InternalServerError"},"status":"Failed"}`)) + }, + } + + count := 0 + server := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + handlers[count](w, r) + count++ + if count > 1 { + count = 1 + } + })) + + backoff := &retry.Backoff{Steps: 1} + armClient := New(nil, server.URL, "test", "2019-01-01", "eastus", backoff) + armClient.client.RetryDuration = time.Millisecond * 1 + + ctx := context.Background() + response, rerr := armClient.PutResource(ctx, "/subscriptions/subscription/resourceGroups/rg/providers/Microsoft.Network/publicIPAddresses/testPIP", nil) + assert.Equal(t, 1, count) + assert.Nil(t, response) + assert.NotNil(t, rerr) + assert.Equal(t, true, rerr.Retriable) +} + +func TestDeleteResourceAsync(t *testing.T) { + count := 0 + server := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + count++ + http.Error(w, "failed", http.StatusForbidden) + })) + + backoff := &retry.Backoff{Steps: 3} + armClient := New(nil, server.URL, "test", "2019-01-01", "eastus", backoff) + armClient.client.RetryDuration = time.Millisecond * 1 + + ctx := context.Background() + resourceID := "/subscriptions/subscription/resourceGroups/rg/providers/Microsoft.Network/publicIPAddresses/testPIP" + future, rerr := armClient.DeleteResourceAsync(ctx, resourceID, "") + assert.Equal(t, 3, count) + assert.Nil(t, future) + assert.NotNil(t, rerr) + assert.Equal(t, true, rerr.Retriable) +} diff --git a/staging/src/k8s.io/legacy-cloud-providers/azure/clients/armclient/doc.go b/staging/src/k8s.io/legacy-cloud-providers/azure/clients/armclient/doc.go new file mode 100644 index 00000000000..02ee927c97c --- /dev/null +++ b/staging/src/k8s.io/legacy-cloud-providers/azure/clients/armclient/doc.go @@ -0,0 +1,20 @@ +// +build !providerless + +/* +Copyright 2019 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Package armclient implements the client for ARM. +package armclient // import "k8s.io/legacy-cloud-providers/azure/clients/armclient" diff --git a/staging/src/k8s.io/legacy-cloud-providers/azure/clients/armclient/interface.go b/staging/src/k8s.io/legacy-cloud-providers/azure/clients/armclient/interface.go new file mode 100644 index 00000000000..7ffeef4a89d --- /dev/null +++ b/staging/src/k8s.io/legacy-cloud-providers/azure/clients/armclient/interface.go @@ -0,0 +1,84 @@ +// +build !providerless + +/* +Copyright 2019 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package armclient + +import ( + "context" + "net/http" + + "github.com/Azure/go-autorest/autorest" + "github.com/Azure/go-autorest/autorest/azure" + "k8s.io/legacy-cloud-providers/azure/retry" +) + +// Interface is the client interface for ARM. +// Don't forget to run the following command to generate the mock client: +// mockgen -source=$GOPATH/src/k8s.io/kubernetes/staging/src/k8s.io/legacy-cloud-providers/azure/clients/armclient/interface.go -package=mockarmclient Interface > $GOPATH/src/k8s.io/kubernetes/staging/src/k8s.io/legacy-cloud-providers/azure/clients/armclient/mockarmclient/interface.go +type Interface interface { + // Send sends a http request to ARM service with possible retry to regional ARM endpoint. + Send(ctx context.Context, request *http.Request) (*http.Response, *retry.Error) + + // PreparePutRequest prepares put request + PreparePutRequest(ctx context.Context, decorators ...autorest.PrepareDecorator) (*http.Request, error) + + // PreparePostRequest prepares post request + PreparePostRequest(ctx context.Context, decorators ...autorest.PrepareDecorator) (*http.Request, error) + + // PrepareGetRequest prepares get request + PrepareGetRequest(ctx context.Context, decorators ...autorest.PrepareDecorator) (*http.Request, error) + + // PrepareDeleteRequest preparse delete request + PrepareDeleteRequest(ctx context.Context, decorators ...autorest.PrepareDecorator) (*http.Request, error) + + // PrepareHeadRequest prepares head request + PrepareHeadRequest(ctx context.Context, decorators ...autorest.PrepareDecorator) (*http.Request, error) + + // WaitForAsyncOperationCompletion waits for an operation completion + WaitForAsyncOperationCompletion(ctx context.Context, future *azure.Future, asyncOperationName string) error + + // WaitForAsyncOperationResult waits for an operation result. + WaitForAsyncOperationResult(ctx context.Context, future *azure.Future, asyncOperationName string) (*http.Response, error) + + // SendAsync send a request and return a future object representing the async result as well as the origin http response + SendAsync(ctx context.Context, request *http.Request) (*azure.Future, *http.Response, *retry.Error) + + // PutResource puts a resource by resource ID + PutResource(ctx context.Context, resourceID string, parameters interface{}) (*http.Response, *retry.Error) + + // PutResourceAsync puts a resource by resource ID in async mode + PutResourceAsync(ctx context.Context, resourceID string, parameters interface{}) (*azure.Future, *retry.Error) + + // HeadResource heads a resource by resource ID + HeadResource(ctx context.Context, resourceID string) (*http.Response, *retry.Error) + + // GetResource get a resource by resource ID + GetResource(ctx context.Context, resourceID, expand string) (*http.Response, *retry.Error) + + // PostResource posts a resource by resource ID + PostResource(ctx context.Context, resourceID, action string, parameters interface{}) (*http.Response, *retry.Error) + + // DeleteResource deletes a resource by resource ID + DeleteResource(ctx context.Context, resourceID, ifMatch string) *retry.Error + + // DeleteResourceAsync delete a resource by resource ID and returns a future representing the async result + DeleteResourceAsync(ctx context.Context, resourceID, ifMatch string) (*azure.Future, *retry.Error) + + // CloseResponse closes a response + CloseResponse(ctx context.Context, response *http.Response) +} diff --git a/staging/src/k8s.io/legacy-cloud-providers/azure/clients/armclient/mockarmclient/BUILD b/staging/src/k8s.io/legacy-cloud-providers/azure/clients/armclient/mockarmclient/BUILD new file mode 100644 index 00000000000..d81422796ae --- /dev/null +++ b/staging/src/k8s.io/legacy-cloud-providers/azure/clients/armclient/mockarmclient/BUILD @@ -0,0 +1,32 @@ +load("@io_bazel_rules_go//go:def.bzl", "go_library") + +go_library( + name = "go_default_library", + srcs = [ + "doc.go", + "interface.go", + ], + importmap = "k8s.io/kubernetes/vendor/k8s.io/legacy-cloud-providers/azure/clients/armclient/mockarmclient", + importpath = "k8s.io/legacy-cloud-providers/azure/clients/armclient/mockarmclient", + visibility = ["//visibility:public"], + deps = [ + "//staging/src/k8s.io/legacy-cloud-providers/azure/retry:go_default_library", + "//vendor/github.com/Azure/go-autorest/autorest:go_default_library", + "//vendor/github.com/Azure/go-autorest/autorest/azure:go_default_library", + "//vendor/github.com/golang/mock/gomock:go_default_library", + ], +) + +filegroup( + name = "package-srcs", + srcs = glob(["**"]), + tags = ["automanaged"], + visibility = ["//visibility:private"], +) + +filegroup( + name = "all-srcs", + srcs = [":package-srcs"], + tags = ["automanaged"], + visibility = ["//visibility:public"], +) diff --git a/staging/src/k8s.io/legacy-cloud-providers/azure/clients/armclient/mockarmclient/doc.go b/staging/src/k8s.io/legacy-cloud-providers/azure/clients/armclient/mockarmclient/doc.go new file mode 100644 index 00000000000..109f68e1dd2 --- /dev/null +++ b/staging/src/k8s.io/legacy-cloud-providers/azure/clients/armclient/mockarmclient/doc.go @@ -0,0 +1,20 @@ +// +build !providerless + +/* +Copyright 2019 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Package mockarmclient implements the mock client for ARM. +package mockarmclient // import "k8s.io/legacy-cloud-providers/azure/clients/armclient/mockarmclient" diff --git a/staging/src/k8s.io/legacy-cloud-providers/azure/clients/armclient/mockarmclient/interface.go b/staging/src/k8s.io/legacy-cloud-providers/azure/clients/armclient/mockarmclient/interface.go new file mode 100644 index 00000000000..724dcd6be21 --- /dev/null +++ b/staging/src/k8s.io/legacy-cloud-providers/azure/clients/armclient/mockarmclient/interface.go @@ -0,0 +1,329 @@ +// +build !providerless + +/* +Copyright 2019 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package mockarmclient + +import ( + context "context" + http "net/http" + reflect "reflect" + + autorest "github.com/Azure/go-autorest/autorest" + azure "github.com/Azure/go-autorest/autorest/azure" + gomock "github.com/golang/mock/gomock" + retry "k8s.io/legacy-cloud-providers/azure/retry" +) + +// MockInterface is a mock of Interface interface +type MockInterface struct { + ctrl *gomock.Controller + recorder *MockInterfaceMockRecorder +} + +// MockInterfaceMockRecorder is the mock recorder for MockInterface +type MockInterfaceMockRecorder struct { + mock *MockInterface +} + +// NewMockInterface creates a new mock instance +func NewMockInterface(ctrl *gomock.Controller) *MockInterface { + mock := &MockInterface{ctrl: ctrl} + mock.recorder = &MockInterfaceMockRecorder{mock} + return mock +} + +// EXPECT returns an object that allows the caller to indicate expected use +func (m *MockInterface) EXPECT() *MockInterfaceMockRecorder { + return m.recorder +} + +// Send mocks base method +func (m *MockInterface) Send(ctx context.Context, request *http.Request) (*http.Response, *retry.Error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "Send", ctx, request) + ret0, _ := ret[0].(*http.Response) + ret1, _ := ret[1].(*retry.Error) + return ret0, ret1 +} + +// Send indicates an expected call of Send +func (mr *MockInterfaceMockRecorder) Send(ctx, request interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Send", reflect.TypeOf((*MockInterface)(nil).Send), ctx, request) +} + +// PreparePutRequest mocks base method +func (m *MockInterface) PreparePutRequest(ctx context.Context, decorators ...autorest.PrepareDecorator) (*http.Request, error) { + m.ctrl.T.Helper() + varargs := []interface{}{ctx} + for _, a := range decorators { + varargs = append(varargs, a) + } + ret := m.ctrl.Call(m, "PreparePutRequest", varargs...) + ret0, _ := ret[0].(*http.Request) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// PreparePutRequest indicates an expected call of PreparePutRequest +func (mr *MockInterfaceMockRecorder) PreparePutRequest(ctx interface{}, decorators ...interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + varargs := append([]interface{}{ctx}, decorators...) + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "PreparePutRequest", reflect.TypeOf((*MockInterface)(nil).PreparePutRequest), varargs...) +} + +// PreparePostRequest mocks base method +func (m *MockInterface) PreparePostRequest(ctx context.Context, decorators ...autorest.PrepareDecorator) (*http.Request, error) { + m.ctrl.T.Helper() + varargs := []interface{}{ctx} + for _, a := range decorators { + varargs = append(varargs, a) + } + ret := m.ctrl.Call(m, "PreparePostRequest", varargs...) + ret0, _ := ret[0].(*http.Request) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// PreparePostRequest indicates an expected call of PreparePostRequest +func (mr *MockInterfaceMockRecorder) PreparePostRequest(ctx interface{}, decorators ...interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + varargs := append([]interface{}{ctx}, decorators...) + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "PreparePostRequest", reflect.TypeOf((*MockInterface)(nil).PreparePostRequest), varargs...) +} + +// PrepareGetRequest mocks base method +func (m *MockInterface) PrepareGetRequest(ctx context.Context, decorators ...autorest.PrepareDecorator) (*http.Request, error) { + m.ctrl.T.Helper() + varargs := []interface{}{ctx} + for _, a := range decorators { + varargs = append(varargs, a) + } + ret := m.ctrl.Call(m, "PrepareGetRequest", varargs...) + ret0, _ := ret[0].(*http.Request) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// PrepareGetRequest indicates an expected call of PrepareGetRequest +func (mr *MockInterfaceMockRecorder) PrepareGetRequest(ctx interface{}, decorators ...interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + varargs := append([]interface{}{ctx}, decorators...) + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "PrepareGetRequest", reflect.TypeOf((*MockInterface)(nil).PrepareGetRequest), varargs...) +} + +// PrepareDeleteRequest mocks base method +func (m *MockInterface) PrepareDeleteRequest(ctx context.Context, decorators ...autorest.PrepareDecorator) (*http.Request, error) { + m.ctrl.T.Helper() + varargs := []interface{}{ctx} + for _, a := range decorators { + varargs = append(varargs, a) + } + ret := m.ctrl.Call(m, "PrepareDeleteRequest", varargs...) + ret0, _ := ret[0].(*http.Request) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// PrepareDeleteRequest indicates an expected call of PrepareDeleteRequest +func (mr *MockInterfaceMockRecorder) PrepareDeleteRequest(ctx interface{}, decorators ...interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + varargs := append([]interface{}{ctx}, decorators...) + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "PrepareDeleteRequest", reflect.TypeOf((*MockInterface)(nil).PrepareDeleteRequest), varargs...) +} + +// PrepareHeadRequest mocks base method +func (m *MockInterface) PrepareHeadRequest(ctx context.Context, decorators ...autorest.PrepareDecorator) (*http.Request, error) { + m.ctrl.T.Helper() + varargs := []interface{}{ctx} + for _, a := range decorators { + varargs = append(varargs, a) + } + ret := m.ctrl.Call(m, "PrepareHeadRequest", varargs...) + ret0, _ := ret[0].(*http.Request) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// PrepareHeadRequest indicates an expected call of PrepareHeadRequest +func (mr *MockInterfaceMockRecorder) PrepareHeadRequest(ctx interface{}, decorators ...interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + varargs := append([]interface{}{ctx}, decorators...) + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "PrepareHeadRequest", reflect.TypeOf((*MockInterface)(nil).PrepareHeadRequest), varargs...) +} + +// WaitForAsyncOperationCompletion mocks base method +func (m *MockInterface) WaitForAsyncOperationCompletion(ctx context.Context, future *azure.Future, asyncOperationName string) error { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "WaitForAsyncOperationCompletion", ctx, future, asyncOperationName) + ret0, _ := ret[0].(error) + return ret0 +} + +// WaitForAsyncOperationCompletion indicates an expected call of WaitForAsyncOperationCompletion +func (mr *MockInterfaceMockRecorder) WaitForAsyncOperationCompletion(ctx, future, asyncOperationName interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "WaitForAsyncOperationCompletion", reflect.TypeOf((*MockInterface)(nil).WaitForAsyncOperationCompletion), ctx, future, asyncOperationName) +} + +// WaitForAsyncOperationResult mocks base method +func (m *MockInterface) WaitForAsyncOperationResult(ctx context.Context, future *azure.Future, asyncOperationName string) (*http.Response, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "WaitForAsyncOperationResult", ctx, future, asyncOperationName) + ret0, _ := ret[0].(*http.Response) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// WaitForAsyncOperationResult indicates an expected call of WaitForAsyncOperationResult +func (mr *MockInterfaceMockRecorder) WaitForAsyncOperationResult(ctx, future, asyncOperationName interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "WaitForAsyncOperationResult", reflect.TypeOf((*MockInterface)(nil).WaitForAsyncOperationResult), ctx, future, asyncOperationName) +} + +// SendAsync mocks base method +func (m *MockInterface) SendAsync(ctx context.Context, request *http.Request) (*azure.Future, *http.Response, *retry.Error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "SendAsync", ctx, request) + ret0, _ := ret[0].(*azure.Future) + ret1, _ := ret[1].(*http.Response) + ret2, _ := ret[2].(*retry.Error) + return ret0, ret1, ret2 +} + +// SendAsync indicates an expected call of SendAsync +func (mr *MockInterfaceMockRecorder) SendAsync(ctx, request interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "SendAsync", reflect.TypeOf((*MockInterface)(nil).SendAsync), ctx, request) +} + +// PutResource mocks base method +func (m *MockInterface) PutResource(ctx context.Context, resourceID string, parameters interface{}) (*http.Response, *retry.Error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "PutResource", ctx, resourceID, parameters) + ret0, _ := ret[0].(*http.Response) + ret1, _ := ret[1].(*retry.Error) + return ret0, ret1 +} + +// PutResource indicates an expected call of PutResource +func (mr *MockInterfaceMockRecorder) PutResource(ctx, resourceID, parameters interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "PutResource", reflect.TypeOf((*MockInterface)(nil).PutResource), ctx, resourceID, parameters) +} + +// PutResourceAsync mocks base method +func (m *MockInterface) PutResourceAsync(ctx context.Context, resourceID string, parameters interface{}) (*azure.Future, *retry.Error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "PutResourceAsync", ctx, resourceID, parameters) + ret0, _ := ret[0].(*azure.Future) + ret1, _ := ret[1].(*retry.Error) + return ret0, ret1 +} + +// PutResourceAsync indicates an expected call of PutResourceAsync +func (mr *MockInterfaceMockRecorder) PutResourceAsync(ctx, resourceID, parameters interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "PutResourceAsync", reflect.TypeOf((*MockInterface)(nil).PutResourceAsync), ctx, resourceID, parameters) +} + +// HeadResource mocks base method +func (m *MockInterface) HeadResource(ctx context.Context, resourceID string) (*http.Response, *retry.Error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "HeadResource", ctx, resourceID) + ret0, _ := ret[0].(*http.Response) + ret1, _ := ret[1].(*retry.Error) + return ret0, ret1 +} + +// HeadResource indicates an expected call of HeadResource +func (mr *MockInterfaceMockRecorder) HeadResource(ctx, resourceID interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "HeadResource", reflect.TypeOf((*MockInterface)(nil).HeadResource), ctx, resourceID) +} + +// GetResource mocks base method +func (m *MockInterface) GetResource(ctx context.Context, resourceID, expand string) (*http.Response, *retry.Error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "GetResource", ctx, resourceID, expand) + ret0, _ := ret[0].(*http.Response) + ret1, _ := ret[1].(*retry.Error) + return ret0, ret1 +} + +// GetResource indicates an expected call of GetResource +func (mr *MockInterfaceMockRecorder) GetResource(ctx, resourceID, expand interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetResource", reflect.TypeOf((*MockInterface)(nil).GetResource), ctx, resourceID, expand) +} + +// PostResource mocks base method +func (m *MockInterface) PostResource(ctx context.Context, resourceID, action string, parameters interface{}) (*http.Response, *retry.Error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "PostResource", ctx, resourceID, action, parameters) + ret0, _ := ret[0].(*http.Response) + ret1, _ := ret[1].(*retry.Error) + return ret0, ret1 +} + +// PostResource indicates an expected call of PostResource +func (mr *MockInterfaceMockRecorder) PostResource(ctx, resourceID, action, parameters interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "PostResource", reflect.TypeOf((*MockInterface)(nil).PostResource), ctx, resourceID, action, parameters) +} + +// DeleteResource mocks base method +func (m *MockInterface) DeleteResource(ctx context.Context, resourceID, ifMatch string) *retry.Error { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "DeleteResource", ctx, resourceID, ifMatch) + ret0, _ := ret[0].(*retry.Error) + return ret0 +} + +// DeleteResource indicates an expected call of DeleteResource +func (mr *MockInterfaceMockRecorder) DeleteResource(ctx, resourceID, ifMatch interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "DeleteResource", reflect.TypeOf((*MockInterface)(nil).DeleteResource), ctx, resourceID, ifMatch) +} + +// DeleteResourceAsync mocks base method +func (m *MockInterface) DeleteResourceAsync(ctx context.Context, resourceID, ifMatch string) (*azure.Future, *retry.Error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "DeleteResourceAsync", ctx, resourceID, ifMatch) + ret0, _ := ret[0].(*azure.Future) + ret1, _ := ret[1].(*retry.Error) + return ret0, ret1 +} + +// DeleteResourceAsync indicates an expected call of DeleteResourceAsync +func (mr *MockInterfaceMockRecorder) DeleteResourceAsync(ctx, resourceID, ifMatch interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "DeleteResourceAsync", reflect.TypeOf((*MockInterface)(nil).DeleteResourceAsync), ctx, resourceID, ifMatch) +} + +// CloseResponse mocks base method +func (m *MockInterface) CloseResponse(ctx context.Context, response *http.Response) { + m.ctrl.T.Helper() + m.ctrl.Call(m, "CloseResponse", ctx, response) +} + +// CloseResponse indicates an expected call of CloseResponse +func (mr *MockInterfaceMockRecorder) CloseResponse(ctx, response interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "CloseResponse", reflect.TypeOf((*MockInterface)(nil).CloseResponse), ctx, response) +} diff --git a/staging/src/k8s.io/legacy-cloud-providers/azure/clients/azure_client_config.go b/staging/src/k8s.io/legacy-cloud-providers/azure/clients/azure_client_config.go index 88aef868e00..a0be77309c3 100644 --- a/staging/src/k8s.io/legacy-cloud-providers/azure/clients/azure_client_config.go +++ b/staging/src/k8s.io/legacy-cloud-providers/azure/clients/azure_client_config.go @@ -40,10 +40,11 @@ type ClientConfig struct { ShouldOmitCloudProviderBackoff bool } -// WithRateLimiter returns ClientConfig with rateLimitConfig set. +// WithRateLimiter returns a new ClientConfig with rateLimitConfig set. func (cfg *ClientConfig) WithRateLimiter(rl *RateLimitConfig) *ClientConfig { - cfg.RateLimitConfig = rl - return cfg + newClientConfig := *cfg + newClientConfig.RateLimitConfig = rl + return &newClientConfig } // RateLimitConfig indicates the rate limit config options. diff --git a/staging/src/k8s.io/legacy-cloud-providers/azure/clients/azure_client_config_test.go b/staging/src/k8s.io/legacy-cloud-providers/azure/clients/azure_client_config_test.go index f06db05d988..b1155d0038b 100644 --- a/staging/src/k8s.io/legacy-cloud-providers/azure/clients/azure_client_config_test.go +++ b/staging/src/k8s.io/legacy-cloud-providers/azure/clients/azure_client_config_test.go @@ -28,8 +28,8 @@ import ( func TestWithRateLimiter(t *testing.T) { config := &ClientConfig{} assert.Nil(t, config.RateLimitConfig) - config.WithRateLimiter(&RateLimitConfig{CloudProviderRateLimit: true}) - assert.Equal(t, &RateLimitConfig{CloudProviderRateLimit: true}, config.RateLimitConfig) + c := config.WithRateLimiter(&RateLimitConfig{CloudProviderRateLimit: true}) + assert.Equal(t, &RateLimitConfig{CloudProviderRateLimit: true}, c.RateLimitConfig) config.WithRateLimiter(nil) assert.Nil(t, config.RateLimitConfig) } diff --git a/staging/src/k8s.io/legacy-cloud-providers/azure/clients/vmssclient/BUILD b/staging/src/k8s.io/legacy-cloud-providers/azure/clients/vmssclient/BUILD new file mode 100644 index 00000000000..76f19faf8c5 --- /dev/null +++ b/staging/src/k8s.io/legacy-cloud-providers/azure/clients/vmssclient/BUILD @@ -0,0 +1,58 @@ +load("@io_bazel_rules_go//go:def.bzl", "go_library", "go_test") + +go_library( + name = "go_default_library", + srcs = [ + "azure_vmssclient.go", + "doc.go", + "interface.go", + ], + importmap = "k8s.io/kubernetes/vendor/k8s.io/legacy-cloud-providers/azure/clients/vmssclient", + importpath = "k8s.io/legacy-cloud-providers/azure/clients/vmssclient", + visibility = ["//visibility:public"], + deps = [ + "//staging/src/k8s.io/client-go/util/flowcontrol:go_default_library", + "//staging/src/k8s.io/legacy-cloud-providers/azure/clients:go_default_library", + "//staging/src/k8s.io/legacy-cloud-providers/azure/clients/armclient:go_default_library", + "//staging/src/k8s.io/legacy-cloud-providers/azure/metrics:go_default_library", + "//staging/src/k8s.io/legacy-cloud-providers/azure/retry:go_default_library", + "//vendor/github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2019-07-01/compute:go_default_library", + "//vendor/github.com/Azure/go-autorest/autorest:go_default_library", + "//vendor/github.com/Azure/go-autorest/autorest/azure:go_default_library", + "//vendor/github.com/Azure/go-autorest/autorest/to:go_default_library", + "//vendor/k8s.io/klog:go_default_library", + ], +) + +go_test( + name = "go_default_test", + srcs = ["azure_vmssclient_test.go"], + embed = [":go_default_library"], + deps = [ + "//staging/src/k8s.io/legacy-cloud-providers/azure/clients:go_default_library", + "//staging/src/k8s.io/legacy-cloud-providers/azure/clients/armclient:go_default_library", + "//staging/src/k8s.io/legacy-cloud-providers/azure/clients/armclient/mockarmclient:go_default_library", + "//vendor/github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2019-07-01/compute:go_default_library", + "//vendor/github.com/Azure/go-autorest/autorest:go_default_library", + "//vendor/github.com/Azure/go-autorest/autorest/to:go_default_library", + "//vendor/github.com/golang/mock/gomock:go_default_library", + "//vendor/github.com/stretchr/testify/assert:go_default_library", + ], +) + +filegroup( + name = "package-srcs", + srcs = glob(["**"]), + tags = ["automanaged"], + visibility = ["//visibility:private"], +) + +filegroup( + name = "all-srcs", + srcs = [ + ":package-srcs", + "//staging/src/k8s.io/legacy-cloud-providers/azure/clients/vmssclient/mockvmssclient:all-srcs", + ], + tags = ["automanaged"], + visibility = ["//visibility:public"], +) diff --git a/staging/src/k8s.io/legacy-cloud-providers/azure/clients/vmssclient/azure_vmssclient.go b/staging/src/k8s.io/legacy-cloud-providers/azure/clients/vmssclient/azure_vmssclient.go new file mode 100644 index 00000000000..ae2fe62b8a7 --- /dev/null +++ b/staging/src/k8s.io/legacy-cloud-providers/azure/clients/vmssclient/azure_vmssclient.go @@ -0,0 +1,361 @@ +// +build !providerless + +/* +Copyright 2019 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package vmssclient + +import ( + "context" + "fmt" + "net/http" + "time" + + "github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2019-07-01/compute" + "github.com/Azure/go-autorest/autorest" + "github.com/Azure/go-autorest/autorest/azure" + "github.com/Azure/go-autorest/autorest/to" + + "k8s.io/client-go/util/flowcontrol" + "k8s.io/klog" + azclients "k8s.io/legacy-cloud-providers/azure/clients" + "k8s.io/legacy-cloud-providers/azure/clients/armclient" + "k8s.io/legacy-cloud-providers/azure/metrics" + "k8s.io/legacy-cloud-providers/azure/retry" +) + +var _ Interface = &Client{} + +// Client implements VMSS client Interface. +type Client struct { + armClient armclient.Interface + subscriptionID string + + // Rate limiting configures. + rateLimiterReader flowcontrol.RateLimiter + rateLimiterWriter flowcontrol.RateLimiter + + // ARM throttling configures. + RetryAfterReader time.Time + RetryAfterWriter time.Time +} + +// New creates a new VMSS client with ratelimiting. +func New(config *azclients.ClientConfig) *Client { + baseURI := config.ResourceManagerEndpoint + authorizer := autorest.NewBearerAuthorizer(config.ServicePrincipalToken) + armClient := armclient.New(authorizer, baseURI, "", APIVersion, config.Location, config.Backoff) + rateLimiterReader, rateLimiterWriter := azclients.NewRateLimiter(config.RateLimitConfig) + + klog.V(2).Infof("Azure VirtualMachineScaleSetClient (read ops) using rate limit config: QPS=%g, bucket=%d", + config.RateLimitConfig.CloudProviderRateLimitQPS, + config.RateLimitConfig.CloudProviderRateLimitBucket) + klog.V(2).Infof("Azure VirtualMachineScaleSetClient (write ops) using rate limit config: QPS=%g, bucket=%d", + config.RateLimitConfig.CloudProviderRateLimitQPSWrite, + config.RateLimitConfig.CloudProviderRateLimitBucketWrite) + + client := &Client{ + armClient: armClient, + rateLimiterReader: rateLimiterReader, + rateLimiterWriter: rateLimiterWriter, + subscriptionID: config.SubscriptionID, + } + + return client +} + +// Get gets a VirtualMachineScaleSet. +func (c *Client) Get(ctx context.Context, resourceGroupName string, VMScaleSetName string) (compute.VirtualMachineScaleSet, *retry.Error) { + mc := metrics.NewMetricContext("vmss", "get", resourceGroupName, c.subscriptionID, "") + + // Report errors if the client is rate limited. + if !c.rateLimiterReader.TryAccept() { + mc.RateLimitedCount() + return compute.VirtualMachineScaleSet{}, retry.GetRateLimitError(false, "VMSSGet") + } + + // Report errors if the client is throttled. + if c.RetryAfterReader.After(time.Now()) { + mc.ThrottledCount() + rerr := retry.GetThrottlingError("VMSSGet", "client throttled", c.RetryAfterReader) + return compute.VirtualMachineScaleSet{}, rerr + } + + result, rerr := c.getVMSS(ctx, resourceGroupName, VMScaleSetName) + mc.Observe(rerr.Error()) + if rerr != nil { + if rerr.IsThrottled() { + // Update RetryAfterReader so that no more requests would be sent until RetryAfter expires. + c.RetryAfterReader = rerr.RetryAfter + } + + return result, rerr + } + + return result, nil +} + +// getVMSS gets a VirtualMachineScaleSet. +func (c *Client) getVMSS(ctx context.Context, resourceGroupName string, VMScaleSetName string) (compute.VirtualMachineScaleSet, *retry.Error) { + resourceID := armclient.GetResourceID( + c.subscriptionID, + resourceGroupName, + "Microsoft.Compute/virtualMachineScaleSets", + VMScaleSetName, + ) + result := compute.VirtualMachineScaleSet{} + + response, rerr := c.armClient.GetResource(ctx, resourceID, "") + defer c.armClient.CloseResponse(ctx, response) + if rerr != nil { + klog.V(5).Infof("Received error in %s: resourceID: %s, error: %s", "vmss.get.request", resourceID, rerr.Error()) + return result, rerr + } + + err := autorest.Respond( + response, + azure.WithErrorUnlessStatusCode(http.StatusOK), + autorest.ByUnmarshallingJSON(&result)) + if err != nil { + klog.V(5).Infof("Received error in %s: resourceID: %s, error: %s", "vmss.get.respond", resourceID, err) + return result, retry.GetError(response, err) + } + + result.Response = autorest.Response{Response: response} + return result, nil +} + +// List gets a list of VirtualMachineScaleSets in the resource group. +func (c *Client) List(ctx context.Context, resourceGroupName string) ([]compute.VirtualMachineScaleSet, *retry.Error) { + mc := metrics.NewMetricContext("vmss", "list", resourceGroupName, c.subscriptionID, "") + + // Report errors if the client is rate limited. + if !c.rateLimiterReader.TryAccept() { + mc.RateLimitedCount() + return nil, retry.GetRateLimitError(false, "VMSSList") + } + + // Report errors if the client is throttled. + if c.RetryAfterReader.After(time.Now()) { + mc.ThrottledCount() + rerr := retry.GetThrottlingError("VMSSList", "client throttled", c.RetryAfterReader) + return nil, rerr + } + + result, rerr := c.listVMSS(ctx, resourceGroupName) + mc.Observe(rerr.Error()) + if rerr != nil { + if rerr.IsThrottled() { + // Update RetryAfterReader so that no more requests would be sent until RetryAfter expires. + c.RetryAfterReader = rerr.RetryAfter + } + + return result, rerr + } + + return result, nil +} + +// listVMSS gets a list of VirtualMachineScaleSets in the resource group. +func (c *Client) listVMSS(ctx context.Context, resourceGroupName string) ([]compute.VirtualMachineScaleSet, *retry.Error) { + resourceID := fmt.Sprintf("/subscriptions/%s/resourceGroups/%s/providers/Microsoft.Compute/virtualMachineScaleSets", + autorest.Encode("path", c.subscriptionID), + autorest.Encode("path", resourceGroupName)) + result := make([]compute.VirtualMachineScaleSet, 0) + page := &VirtualMachineScaleSetListResultPage{} + page.fn = c.listNextResults + + resp, rerr := c.armClient.GetResource(ctx, resourceID, "") + defer c.armClient.CloseResponse(ctx, resp) + if rerr != nil { + klog.V(5).Infof("Received error in %s: resourceID: %s, error: %s", "vmss.list.request", resourceID, rerr.Error()) + return result, rerr + } + + var err error + page.vmsslr, err = c.listResponder(resp) + if err != nil { + klog.V(5).Infof("Received error in %s: resourceID: %s, error: %s", "vmss.list.respond", resourceID, err) + return result, retry.GetError(resp, err) + } + + for page.NotDone() { + result = append(result, *page.Response().Value...) + if err = page.NextWithContext(ctx); err != nil { + klog.V(5).Infof("Received error in %s: resourceID: %s, error: %s", "vmss.list.next", resourceID, err) + return result, retry.GetError(page.Response().Response.Response, err) + } + } + + return result, nil +} + +// CreateOrUpdate creates or updates a VirtualMachineScaleSet. +func (c *Client) CreateOrUpdate(ctx context.Context, resourceGroupName string, VMScaleSetName string, parameters compute.VirtualMachineScaleSet) *retry.Error { + mc := metrics.NewMetricContext("vmss", "create_or_update", resourceGroupName, c.subscriptionID, "") + + // Report errors if the client is rate limited. + if !c.rateLimiterWriter.TryAccept() { + mc.RateLimitedCount() + return retry.GetRateLimitError(true, "VMSSCreateOrUpdate") + } + + // Report errors if the client is throttled. + if c.RetryAfterWriter.After(time.Now()) { + mc.ThrottledCount() + rerr := retry.GetThrottlingError("VMSSCreateOrUpdate", "client throttled", c.RetryAfterWriter) + return rerr + } + + rerr := c.createOrUpdateVMSS(ctx, resourceGroupName, VMScaleSetName, parameters) + mc.Observe(rerr.Error()) + if rerr != nil { + if rerr.IsThrottled() { + // Update RetryAfterReader so that no more requests would be sent until RetryAfter expires. + c.RetryAfterWriter = rerr.RetryAfter + } + + return rerr + } + + return nil +} + +// createOrUpdateVMSS creates or updates a VirtualMachineScaleSet. +func (c *Client) createOrUpdateVMSS(ctx context.Context, resourceGroupName string, VMScaleSetName string, parameters compute.VirtualMachineScaleSet) *retry.Error { + resourceID := armclient.GetResourceID( + c.subscriptionID, + resourceGroupName, + "Microsoft.Compute/virtualMachineScaleSets", + VMScaleSetName, + ) + response, rerr := c.armClient.PutResource(ctx, resourceID, parameters) + defer c.armClient.CloseResponse(ctx, response) + if rerr != nil { + klog.V(5).Infof("Received error in %s: resourceID: %s, error: %s", "vmss.put.request", resourceID, rerr.Error()) + return rerr + } + + if response != nil && response.StatusCode != http.StatusNoContent { + _, rerr = c.createOrUpdateResponder(response) + if rerr != nil { + klog.V(5).Infof("Received error in %s: resourceID: %s, error: %s", "vmss.put.respond", resourceID, rerr.Error()) + return rerr + } + } + + return nil +} + +func (c *Client) createOrUpdateResponder(resp *http.Response) (*compute.VirtualMachineScaleSet, *retry.Error) { + result := &compute.VirtualMachineScaleSet{} + err := autorest.Respond( + resp, + azure.WithErrorUnlessStatusCode(http.StatusOK, http.StatusCreated), + autorest.ByUnmarshallingJSON(&result)) + result.Response = autorest.Response{Response: resp} + return result, retry.GetError(resp, err) +} + +func (c *Client) listResponder(resp *http.Response) (result compute.VirtualMachineScaleSetListResult, err error) { + err = autorest.Respond( + resp, + autorest.ByIgnoring(), + azure.WithErrorUnlessStatusCode(http.StatusOK), + autorest.ByUnmarshallingJSON(&result)) + result.Response = autorest.Response{Response: resp} + return +} + +// virtualMachineScaleSetListResultPreparer prepares a request to retrieve the next set of results. +// It returns nil if no more results exist. +func (c *Client) virtualMachineScaleSetListResultPreparer(ctx context.Context, vmsslr compute.VirtualMachineScaleSetListResult) (*http.Request, error) { + if vmsslr.NextLink == nil || len(to.String(vmsslr.NextLink)) < 1 { + return nil, nil + } + + decorators := []autorest.PrepareDecorator{ + autorest.WithBaseURL(to.String(vmsslr.NextLink)), + } + return c.armClient.PrepareGetRequest(ctx, decorators...) +} + +// listNextResults retrieves the next set of results, if any. +func (c *Client) listNextResults(ctx context.Context, lastResults compute.VirtualMachineScaleSetListResult) (result compute.VirtualMachineScaleSetListResult, err error) { + req, err := c.virtualMachineScaleSetListResultPreparer(ctx, lastResults) + if err != nil { + return result, autorest.NewErrorWithError(err, "vmssclient", "listNextResults", nil, "Failure preparing next results request") + } + if req == nil { + return + } + + resp, rerr := c.armClient.Send(ctx, req) + defer c.armClient.CloseResponse(ctx, resp) + if rerr != nil { + result.Response = autorest.Response{Response: resp} + return result, autorest.NewErrorWithError(rerr.Error(), "vmssclient", "listNextResults", resp, "Failure sending next results request") + } + + result, err = c.listResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "vmssclient", "listNextResults", resp, "Failure responding to next results request") + } + + return +} + +// VirtualMachineScaleSetListResultPage contains a page of VirtualMachineScaleSet values. +type VirtualMachineScaleSetListResultPage struct { + fn func(context.Context, compute.VirtualMachineScaleSetListResult) (compute.VirtualMachineScaleSetListResult, error) + vmsslr compute.VirtualMachineScaleSetListResult +} + +// NextWithContext advances to the next page of values. If there was an error making +// the request the page does not advance and the error is returned. +func (page *VirtualMachineScaleSetListResultPage) NextWithContext(ctx context.Context) (err error) { + next, err := page.fn(ctx, page.vmsslr) + if err != nil { + return err + } + page.vmsslr = next + return nil +} + +// Next advances to the next page of values. If there was an error making +// the request the page does not advance and the error is returned. +// Deprecated: Use NextWithContext() instead. +func (page *VirtualMachineScaleSetListResultPage) Next() error { + return page.NextWithContext(context.Background()) +} + +// NotDone returns true if the page enumeration should be started or is not yet complete. +func (page VirtualMachineScaleSetListResultPage) NotDone() bool { + return !page.vmsslr.IsEmpty() +} + +// Response returns the raw server response from the last page request. +func (page VirtualMachineScaleSetListResultPage) Response() compute.VirtualMachineScaleSetListResult { + return page.vmsslr +} + +// Values returns the slice of values for the current page or nil if there are no values. +func (page VirtualMachineScaleSetListResultPage) Values() []compute.VirtualMachineScaleSet { + if page.vmsslr.IsEmpty() { + return nil + } + return *page.vmsslr.Value +} diff --git a/staging/src/k8s.io/legacy-cloud-providers/azure/clients/vmssclient/azure_vmssclient_test.go b/staging/src/k8s.io/legacy-cloud-providers/azure/clients/vmssclient/azure_vmssclient_test.go new file mode 100644 index 00000000000..3ee19cdb85d --- /dev/null +++ b/staging/src/k8s.io/legacy-cloud-providers/azure/clients/vmssclient/azure_vmssclient_test.go @@ -0,0 +1,142 @@ +// +build !providerless + +/* +Copyright 2019 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package vmssclient + +import ( + "bytes" + "context" + "encoding/json" + "io/ioutil" + "net/http" + "testing" + + "github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2019-07-01/compute" + "github.com/Azure/go-autorest/autorest" + "github.com/Azure/go-autorest/autorest/to" + "github.com/golang/mock/gomock" + "github.com/stretchr/testify/assert" + + azclients "k8s.io/legacy-cloud-providers/azure/clients" + "k8s.io/legacy-cloud-providers/azure/clients/armclient" + "k8s.io/legacy-cloud-providers/azure/clients/armclient/mockarmclient" +) + +func TestGetNotFound(t *testing.T) { + ctrl := gomock.NewController(t) + defer ctrl.Finish() + + resourceID := "/subscriptions/subscriptionID/resourceGroups/rg/providers/Microsoft.Compute/virtualMachineScaleSets/vmss1" + response := &http.Response{ + StatusCode: http.StatusNotFound, + Body: ioutil.NopCloser(bytes.NewReader([]byte("{}"))), + } + armClient := mockarmclient.NewMockInterface(ctrl) + armClient.EXPECT().GetResource(gomock.Any(), resourceID, "").Return(response, nil).Times(1) + armClient.EXPECT().CloseResponse(gomock.Any(), gomock.Any()).Times(1) + + vmssClient := getTestVMSSClient(armClient) + expectedVMSS := compute.VirtualMachineScaleSet{Response: autorest.Response{}} + result, rerr := vmssClient.Get(context.TODO(), "rg", "vmss1") + assert.Equal(t, expectedVMSS, result) + assert.NotNil(t, rerr) + assert.Equal(t, http.StatusNotFound, rerr.HTTPStatusCode) +} + +func TestGetInternalError(t *testing.T) { + ctrl := gomock.NewController(t) + defer ctrl.Finish() + + resourceID := "/subscriptions/subscriptionID/resourceGroups/rg/providers/Microsoft.Compute/virtualMachineScaleSets/vmss1" + response := &http.Response{ + StatusCode: http.StatusInternalServerError, + Body: ioutil.NopCloser(bytes.NewReader([]byte("{}"))), + } + armClient := mockarmclient.NewMockInterface(ctrl) + armClient.EXPECT().GetResource(gomock.Any(), resourceID, "").Return(response, nil).Times(1) + armClient.EXPECT().CloseResponse(gomock.Any(), gomock.Any()).Times(1) + + vmssClient := getTestVMSSClient(armClient) + expectedVMSS := compute.VirtualMachineScaleSet{Response: autorest.Response{}} + result, rerr := vmssClient.Get(context.TODO(), "rg", "vmss1") + assert.Equal(t, expectedVMSS, result) + assert.NotNil(t, rerr) + assert.Equal(t, http.StatusInternalServerError, rerr.HTTPStatusCode) +} + +func TestList(t *testing.T) { + ctrl := gomock.NewController(t) + defer ctrl.Finish() + + resourceID := "/subscriptions/subscriptionID/resourceGroups/rg/providers/Microsoft.Compute/virtualMachineScaleSets" + armClient := mockarmclient.NewMockInterface(ctrl) + vmssList := []compute.VirtualMachineScaleSet{getTestVMSS("vmss1"), getTestVMSS("vmss2"), getTestVMSS("vmss3")} + responseBody, err := json.Marshal(compute.VirtualMachineScaleSetListResult{Value: &vmssList}) + assert.Nil(t, err) + armClient.EXPECT().GetResource(gomock.Any(), resourceID, "").Return( + &http.Response{ + StatusCode: http.StatusOK, + Body: ioutil.NopCloser(bytes.NewReader(responseBody)), + }, nil).Times(1) + armClient.EXPECT().CloseResponse(gomock.Any(), gomock.Any()).Times(1) + + vmssClient := getTestVMSSClient(armClient) + result, rerr := vmssClient.List(context.TODO(), "rg") + assert.Nil(t, rerr) + assert.Equal(t, 3, len(result)) +} + +func TestCreateOrUpdate(t *testing.T) { + ctrl := gomock.NewController(t) + defer ctrl.Finish() + + vmss := getTestVMSS("vmss1") + armClient := mockarmclient.NewMockInterface(ctrl) + response := &http.Response{ + StatusCode: http.StatusOK, + Body: ioutil.NopCloser(bytes.NewReader([]byte(""))), + } + armClient.EXPECT().PutResource(gomock.Any(), to.String(vmss.ID), vmss).Return(response, nil).Times(1) + armClient.EXPECT().CloseResponse(gomock.Any(), gomock.Any()).Times(1) + + vmssClient := getTestVMSSClient(armClient) + rerr := vmssClient.CreateOrUpdate(context.TODO(), "rg", "vmss1", vmss) + assert.Nil(t, rerr) +} + +func getTestVMSS(name string) compute.VirtualMachineScaleSet { + return compute.VirtualMachineScaleSet{ + ID: to.StringPtr("/subscriptions/subscriptionID/resourceGroups/rg/providers/Microsoft.Compute/virtualMachineScaleSets/vmss1"), + Name: to.StringPtr(name), + Location: to.StringPtr("eastus"), + Sku: &compute.Sku{ + Name: to.StringPtr("Standard"), + Capacity: to.Int64Ptr(3), + }, + } +} + +func getTestVMSSClient(armClient armclient.Interface) *Client { + rateLimiterReader, rateLimiterWriter := azclients.NewRateLimiter(&azclients.RateLimitConfig{}) + return &Client{ + armClient: armClient, + subscriptionID: "subscriptionID", + rateLimiterReader: rateLimiterReader, + rateLimiterWriter: rateLimiterWriter, + } +} diff --git a/staging/src/k8s.io/legacy-cloud-providers/azure/clients/vmssclient/doc.go b/staging/src/k8s.io/legacy-cloud-providers/azure/clients/vmssclient/doc.go new file mode 100644 index 00000000000..c108f0bf799 --- /dev/null +++ b/staging/src/k8s.io/legacy-cloud-providers/azure/clients/vmssclient/doc.go @@ -0,0 +1,20 @@ +// +build !providerless + +/* +Copyright 2019 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Package vmssclient implements the client for VMSS. +package vmssclient // import "k8s.io/legacy-cloud-providers/azure/clients/vmssclient" diff --git a/staging/src/k8s.io/legacy-cloud-providers/azure/clients/vmssclient/interface.go b/staging/src/k8s.io/legacy-cloud-providers/azure/clients/vmssclient/interface.go new file mode 100644 index 00000000000..91a7b2a3fd4 --- /dev/null +++ b/staging/src/k8s.io/legacy-cloud-providers/azure/clients/vmssclient/interface.go @@ -0,0 +1,45 @@ +// +build !providerless + +/* +Copyright 2019 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package vmssclient + +import ( + "context" + + "github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2019-07-01/compute" + "k8s.io/legacy-cloud-providers/azure/retry" +) + +const ( + // APIVersion is the API version for VMSS. + APIVersion = "2019-07-01" +) + +// Interface is the client interface for VirtualMachineScaleSet. +// Don't forget to run the following command to generate the mock client: +// mockgen -source=$GOPATH/src/k8s.io/kubernetes/staging/src/k8s.io/legacy-cloud-providers/azure/clients/vmssclient/interface.go -package=mockvmssclient Interface > $GOPATH/src/k8s.io/kubernetes/staging/src/k8s.io/legacy-cloud-providers/azure/clients/vmssclient/mockvmssclient/interface.go +type Interface interface { + // Get gets a VirtualMachineScaleSet. + Get(ctx context.Context, resourceGroupName string, VMScaleSetName string) (result compute.VirtualMachineScaleSet, rerr *retry.Error) + + // List gets a list of VirtualMachineScaleSets in the resource group. + List(ctx context.Context, resourceGroupName string) (result []compute.VirtualMachineScaleSet, rerr *retry.Error) + + // CreateOrUpdate creates or updates a VirtualMachineScaleSet. + CreateOrUpdate(ctx context.Context, resourceGroupName string, VMScaleSetName string, parameters compute.VirtualMachineScaleSet) *retry.Error +} diff --git a/staging/src/k8s.io/legacy-cloud-providers/azure/clients/vmssclient/mockvmssclient/BUILD b/staging/src/k8s.io/legacy-cloud-providers/azure/clients/vmssclient/mockvmssclient/BUILD new file mode 100644 index 00000000000..b8bf624a6f5 --- /dev/null +++ b/staging/src/k8s.io/legacy-cloud-providers/azure/clients/vmssclient/mockvmssclient/BUILD @@ -0,0 +1,31 @@ +load("@io_bazel_rules_go//go:def.bzl", "go_library") + +go_library( + name = "go_default_library", + srcs = [ + "doc.go", + "interface.go", + ], + importmap = "k8s.io/kubernetes/vendor/k8s.io/legacy-cloud-providers/azure/clients/vmssclient/mockvmssclient", + importpath = "k8s.io/legacy-cloud-providers/azure/clients/vmssclient/mockvmssclient", + visibility = ["//visibility:public"], + deps = [ + "//staging/src/k8s.io/legacy-cloud-providers/azure/retry:go_default_library", + "//vendor/github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2019-07-01/compute:go_default_library", + "//vendor/github.com/golang/mock/gomock:go_default_library", + ], +) + +filegroup( + name = "package-srcs", + srcs = glob(["**"]), + tags = ["automanaged"], + visibility = ["//visibility:private"], +) + +filegroup( + name = "all-srcs", + srcs = [":package-srcs"], + tags = ["automanaged"], + visibility = ["//visibility:public"], +) diff --git a/staging/src/k8s.io/legacy-cloud-providers/azure/clients/vmssclient/mockvmssclient/doc.go b/staging/src/k8s.io/legacy-cloud-providers/azure/clients/vmssclient/mockvmssclient/doc.go new file mode 100644 index 00000000000..7b691179e5d --- /dev/null +++ b/staging/src/k8s.io/legacy-cloud-providers/azure/clients/vmssclient/mockvmssclient/doc.go @@ -0,0 +1,20 @@ +// +build !providerless + +/* +Copyright 2019 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Package mockvmssclient implements the mock client for VMSS. +package mockvmssclient // import "k8s.io/legacy-cloud-providers/azure/clients/vmssclient/mockvmssclient" diff --git a/staging/src/k8s.io/legacy-cloud-providers/azure/clients/vmssclient/mockvmssclient/interface.go b/staging/src/k8s.io/legacy-cloud-providers/azure/clients/vmssclient/mockvmssclient/interface.go new file mode 100644 index 00000000000..c80827e1eba --- /dev/null +++ b/staging/src/k8s.io/legacy-cloud-providers/azure/clients/vmssclient/mockvmssclient/interface.go @@ -0,0 +1,95 @@ +// +build !providerless + +/* +Copyright 2019 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package mockvmssclient + +import ( + context "context" + reflect "reflect" + + compute "github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2019-07-01/compute" + gomock "github.com/golang/mock/gomock" + retry "k8s.io/legacy-cloud-providers/azure/retry" +) + +// MockInterface is a mock of Interface interface +type MockInterface struct { + ctrl *gomock.Controller + recorder *MockInterfaceMockRecorder +} + +// MockInterfaceMockRecorder is the mock recorder for MockInterface +type MockInterfaceMockRecorder struct { + mock *MockInterface +} + +// NewMockInterface creates a new mock instance +func NewMockInterface(ctrl *gomock.Controller) *MockInterface { + mock := &MockInterface{ctrl: ctrl} + mock.recorder = &MockInterfaceMockRecorder{mock} + return mock +} + +// EXPECT returns an object that allows the caller to indicate expected use +func (m *MockInterface) EXPECT() *MockInterfaceMockRecorder { + return m.recorder +} + +// Get mocks base method +func (m *MockInterface) Get(ctx context.Context, resourceGroupName, VMScaleSetName string) (compute.VirtualMachineScaleSet, *retry.Error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "Get", ctx, resourceGroupName, VMScaleSetName) + ret0, _ := ret[0].(compute.VirtualMachineScaleSet) + ret1, _ := ret[1].(*retry.Error) + return ret0, ret1 +} + +// Get indicates an expected call of Get +func (mr *MockInterfaceMockRecorder) Get(ctx, resourceGroupName, VMScaleSetName interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Get", reflect.TypeOf((*MockInterface)(nil).Get), ctx, resourceGroupName, VMScaleSetName) +} + +// List mocks base method +func (m *MockInterface) List(ctx context.Context, resourceGroupName string) ([]compute.VirtualMachineScaleSet, *retry.Error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "List", ctx, resourceGroupName) + ret0, _ := ret[0].([]compute.VirtualMachineScaleSet) + ret1, _ := ret[1].(*retry.Error) + return ret0, ret1 +} + +// List indicates an expected call of List +func (mr *MockInterfaceMockRecorder) List(ctx, resourceGroupName interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "List", reflect.TypeOf((*MockInterface)(nil).List), ctx, resourceGroupName) +} + +// CreateOrUpdate mocks base method +func (m *MockInterface) CreateOrUpdate(ctx context.Context, resourceGroupName, VMScaleSetName string, parameters compute.VirtualMachineScaleSet) *retry.Error { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "CreateOrUpdate", ctx, resourceGroupName, VMScaleSetName, parameters) + ret0, _ := ret[0].(*retry.Error) + return ret0 +} + +// CreateOrUpdate indicates an expected call of CreateOrUpdate +func (mr *MockInterfaceMockRecorder) CreateOrUpdate(ctx, resourceGroupName, VMScaleSetName, parameters interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "CreateOrUpdate", reflect.TypeOf((*MockInterface)(nil).CreateOrUpdate), ctx, resourceGroupName, VMScaleSetName, parameters) +} diff --git a/staging/src/k8s.io/legacy-cloud-providers/azure/clients/vmssvmclient/BUILD b/staging/src/k8s.io/legacy-cloud-providers/azure/clients/vmssvmclient/BUILD new file mode 100644 index 00000000000..4fd46f2c9ad --- /dev/null +++ b/staging/src/k8s.io/legacy-cloud-providers/azure/clients/vmssvmclient/BUILD @@ -0,0 +1,58 @@ +load("@io_bazel_rules_go//go:def.bzl", "go_library", "go_test") + +go_library( + name = "go_default_library", + srcs = [ + "azure_vmssclientvm.go", + "doc.go", + "interface.go", + ], + importmap = "k8s.io/kubernetes/vendor/k8s.io/legacy-cloud-providers/azure/clients/vmssvmclient", + importpath = "k8s.io/legacy-cloud-providers/azure/clients/vmssvmclient", + visibility = ["//visibility:public"], + deps = [ + "//staging/src/k8s.io/client-go/util/flowcontrol:go_default_library", + "//staging/src/k8s.io/legacy-cloud-providers/azure/clients:go_default_library", + "//staging/src/k8s.io/legacy-cloud-providers/azure/clients/armclient:go_default_library", + "//staging/src/k8s.io/legacy-cloud-providers/azure/metrics:go_default_library", + "//staging/src/k8s.io/legacy-cloud-providers/azure/retry:go_default_library", + "//vendor/github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2019-07-01/compute:go_default_library", + "//vendor/github.com/Azure/go-autorest/autorest:go_default_library", + "//vendor/github.com/Azure/go-autorest/autorest/azure:go_default_library", + "//vendor/github.com/Azure/go-autorest/autorest/to:go_default_library", + "//vendor/k8s.io/klog:go_default_library", + ], +) + +go_test( + name = "go_default_test", + srcs = ["azure_vmssvmclient_test.go"], + embed = [":go_default_library"], + deps = [ + "//staging/src/k8s.io/legacy-cloud-providers/azure/clients:go_default_library", + "//staging/src/k8s.io/legacy-cloud-providers/azure/clients/armclient:go_default_library", + "//staging/src/k8s.io/legacy-cloud-providers/azure/clients/armclient/mockarmclient:go_default_library", + "//vendor/github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2019-07-01/compute:go_default_library", + "//vendor/github.com/Azure/go-autorest/autorest:go_default_library", + "//vendor/github.com/Azure/go-autorest/autorest/to:go_default_library", + "//vendor/github.com/golang/mock/gomock:go_default_library", + "//vendor/github.com/stretchr/testify/assert:go_default_library", + ], +) + +filegroup( + name = "package-srcs", + srcs = glob(["**"]), + tags = ["automanaged"], + visibility = ["//visibility:private"], +) + +filegroup( + name = "all-srcs", + srcs = [ + ":package-srcs", + "//staging/src/k8s.io/legacy-cloud-providers/azure/clients/vmssvmclient/mockvmssvmclient:all-srcs", + ], + tags = ["automanaged"], + visibility = ["//visibility:public"], +) diff --git a/staging/src/k8s.io/legacy-cloud-providers/azure/clients/vmssvmclient/azure_vmssclientvm.go b/staging/src/k8s.io/legacy-cloud-providers/azure/clients/vmssvmclient/azure_vmssclientvm.go new file mode 100644 index 00000000000..e2ce97c6590 --- /dev/null +++ b/staging/src/k8s.io/legacy-cloud-providers/azure/clients/vmssvmclient/azure_vmssclientvm.go @@ -0,0 +1,369 @@ +// +build !providerless + +/* +Copyright 2019 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package vmssvmclient + +import ( + "context" + "fmt" + "net/http" + "time" + + "github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2019-07-01/compute" + "github.com/Azure/go-autorest/autorest" + "github.com/Azure/go-autorest/autorest/azure" + "github.com/Azure/go-autorest/autorest/to" + + "k8s.io/client-go/util/flowcontrol" + "k8s.io/klog" + azclients "k8s.io/legacy-cloud-providers/azure/clients" + "k8s.io/legacy-cloud-providers/azure/clients/armclient" + "k8s.io/legacy-cloud-providers/azure/metrics" + "k8s.io/legacy-cloud-providers/azure/retry" +) + +var _ Interface = &Client{} + +// Client implements VMSS client Interface. +type Client struct { + armClient armclient.Interface + subscriptionID string + + // Rate limiting configures. + rateLimiterReader flowcontrol.RateLimiter + rateLimiterWriter flowcontrol.RateLimiter + + // ARM throttling configures. + RetryAfterReader time.Time + RetryAfterWriter time.Time +} + +// New creates a new vmssVM client with ratelimiting. +func New(config *azclients.ClientConfig) *Client { + baseURI := config.ResourceManagerEndpoint + authorizer := autorest.NewBearerAuthorizer(config.ServicePrincipalToken) + armClient := armclient.New(authorizer, baseURI, "", APIVersion, config.Location, config.Backoff) + rateLimiterReader, rateLimiterWriter := azclients.NewRateLimiter(config.RateLimitConfig) + + klog.V(2).Infof("Azure vmssVM client (read ops) using rate limit config: QPS=%g, bucket=%d", + config.RateLimitConfig.CloudProviderRateLimitQPS, + config.RateLimitConfig.CloudProviderRateLimitBucket) + klog.V(2).Infof("Azure vmssVM client (write ops) using rate limit config: QPS=%g, bucket=%d", + config.RateLimitConfig.CloudProviderRateLimitQPSWrite, + config.RateLimitConfig.CloudProviderRateLimitBucketWrite) + + client := &Client{ + armClient: armClient, + rateLimiterReader: rateLimiterReader, + rateLimiterWriter: rateLimiterWriter, + subscriptionID: config.SubscriptionID, + } + + return client +} + +// Get gets a VirtualMachineScaleSetVM. +func (c *Client) Get(ctx context.Context, resourceGroupName string, VMScaleSetName string, instanceID string, expand compute.InstanceViewTypes) (compute.VirtualMachineScaleSetVM, *retry.Error) { + mc := metrics.NewMetricContext("vmssvm", "get", resourceGroupName, c.subscriptionID, "") + + // Report errors if the client is rate limited. + if !c.rateLimiterReader.TryAccept() { + mc.RateLimitedCount() + return compute.VirtualMachineScaleSetVM{}, retry.GetRateLimitError(false, "VMSSVMGet") + } + + // Report errors if the client is throttled. + if c.RetryAfterReader.After(time.Now()) { + mc.ThrottledCount() + rerr := retry.GetThrottlingError("VMSSVMGet", "client throttled", c.RetryAfterReader) + return compute.VirtualMachineScaleSetVM{}, rerr + } + + result, rerr := c.getVMSSVM(ctx, resourceGroupName, VMScaleSetName, instanceID, expand) + mc.Observe(rerr.Error()) + if rerr != nil { + if rerr.IsThrottled() { + // Update RetryAfterReader so that no more requests would be sent until RetryAfter expires. + c.RetryAfterReader = rerr.RetryAfter + } + + return result, rerr + } + + return result, nil +} + +// getVMSSVM gets a VirtualMachineScaleSetVM. +func (c *Client) getVMSSVM(ctx context.Context, resourceGroupName string, VMScaleSetName string, instanceID string, expand compute.InstanceViewTypes) (compute.VirtualMachineScaleSetVM, *retry.Error) { + resourceID := armclient.GetChildResourceID( + c.subscriptionID, + resourceGroupName, + "Microsoft.Compute/virtualMachineScaleSets", + VMScaleSetName, + "virtualMachines", + instanceID, + ) + result := compute.VirtualMachineScaleSetVM{} + + response, rerr := c.armClient.GetResource(ctx, resourceID, string(expand)) + defer c.armClient.CloseResponse(ctx, response) + if rerr != nil { + klog.V(5).Infof("Received error in %s: resourceID: %s, error: %s", "vmssvm.get.request", resourceID, rerr.Error()) + return result, rerr + } + + err := autorest.Respond( + response, + azure.WithErrorUnlessStatusCode(http.StatusOK), + autorest.ByUnmarshallingJSON(&result)) + if err != nil { + klog.V(5).Infof("Received error in %s: resourceID: %s, error: %s", "vmssvm.get.respond", resourceID, err) + return result, retry.GetError(response, err) + } + + result.Response = autorest.Response{Response: response} + return result, nil +} + +// List gets a list of VirtualMachineScaleSetVMs in the virtualMachineScaleSet. +func (c *Client) List(ctx context.Context, resourceGroupName string, virtualMachineScaleSetName string, expand string) ([]compute.VirtualMachineScaleSetVM, *retry.Error) { + mc := metrics.NewMetricContext("vmssvm", "list", resourceGroupName, c.subscriptionID, "") + + // Report errors if the client is rate limited. + if !c.rateLimiterReader.TryAccept() { + mc.RateLimitedCount() + return nil, retry.GetRateLimitError(false, "VMSSVMList") + } + + // Report errors if the client is throttled. + if c.RetryAfterReader.After(time.Now()) { + mc.ThrottledCount() + rerr := retry.GetThrottlingError("VMSSVMList", "client throttled", c.RetryAfterReader) + return nil, rerr + } + + result, rerr := c.listVMSSVM(ctx, resourceGroupName, virtualMachineScaleSetName, expand) + mc.Observe(rerr.Error()) + if rerr != nil { + if rerr.IsThrottled() { + // Update RetryAfterReader so that no more requests would be sent until RetryAfter expires. + c.RetryAfterReader = rerr.RetryAfter + } + + return result, rerr + } + + return result, nil +} + +// listVMSSVM gets a list of VirtualMachineScaleSetVMs in the virtualMachineScaleSet. +func (c *Client) listVMSSVM(ctx context.Context, resourceGroupName string, virtualMachineScaleSetName string, expand string) ([]compute.VirtualMachineScaleSetVM, *retry.Error) { + resourceID := fmt.Sprintf("/subscriptions/%s/resourceGroups/%s/providers/Microsoft.Compute/virtualMachineScaleSets/%s/virtualMachines", + autorest.Encode("path", c.subscriptionID), + autorest.Encode("path", resourceGroupName), + autorest.Encode("path", virtualMachineScaleSetName), + ) + + result := make([]compute.VirtualMachineScaleSetVM, 0) + page := &VirtualMachineScaleSetVMListResultPage{} + page.fn = c.listNextResults + + resp, rerr := c.armClient.GetResource(ctx, resourceID, expand) + defer c.armClient.CloseResponse(ctx, resp) + if rerr != nil { + klog.V(5).Infof("Received error in %s: resourceID: %s, error: %s", "vmssvm.list.request", resourceID, rerr.Error()) + return result, rerr + } + + var err error + page.vmssvlr, err = c.listResponder(resp) + if err != nil { + klog.V(5).Infof("Received error in %s: resourceID: %s, error: %s", "vmssvm.list.respond", resourceID, err) + return result, retry.GetError(resp, err) + } + + for page.NotDone() { + result = append(result, *page.Response().Value...) + if err = page.NextWithContext(ctx); err != nil { + klog.V(5).Infof("Received error in %s: resourceID: %s, error: %s", "vmssvm.list.next", resourceID, err) + return result, retry.GetError(page.Response().Response.Response, err) + } + } + + return result, nil +} + +// Update updates a VirtualMachineScaleSetVM. +func (c *Client) Update(ctx context.Context, resourceGroupName string, VMScaleSetName string, instanceID string, parameters compute.VirtualMachineScaleSetVM, source string) *retry.Error { + mc := metrics.NewMetricContext("vmssvm", "update", resourceGroupName, c.subscriptionID, source) + + // Report errors if the client is rate limited. + if !c.rateLimiterWriter.TryAccept() { + mc.RateLimitedCount() + return retry.GetRateLimitError(true, "VMSSVMUpdate") + } + + // Report errors if the client is throttled. + if c.RetryAfterWriter.After(time.Now()) { + mc.ThrottledCount() + rerr := retry.GetThrottlingError("VMSSVMUpdate", "client throttled", c.RetryAfterWriter) + return rerr + } + + rerr := c.updateVMSSVM(ctx, resourceGroupName, VMScaleSetName, instanceID, parameters) + mc.Observe(rerr.Error()) + if rerr != nil { + if rerr.IsThrottled() { + // Update RetryAfterReader so that no more requests would be sent until RetryAfter expires. + c.RetryAfterWriter = rerr.RetryAfter + } + + return rerr + } + + return nil +} + +// updateVMSSVM updates a VirtualMachineScaleSetVM. +func (c *Client) updateVMSSVM(ctx context.Context, resourceGroupName string, VMScaleSetName string, instanceID string, parameters compute.VirtualMachineScaleSetVM) *retry.Error { + resourceID := armclient.GetChildResourceID( + c.subscriptionID, + resourceGroupName, + "Microsoft.Compute/virtualMachineScaleSets", + VMScaleSetName, + "virtualMachines", + instanceID, + ) + + response, rerr := c.armClient.PutResource(ctx, resourceID, parameters) + defer c.armClient.CloseResponse(ctx, response) + if rerr != nil { + klog.V(5).Infof("Received error in %s: resourceID: %s, error: %s", "vmssvm.put.request", resourceID, rerr.Error()) + return rerr + } + + if response != nil && response.StatusCode != http.StatusNoContent { + _, rerr = c.updateResponder(response) + if rerr != nil { + klog.V(5).Infof("Received error in %s: resourceID: %s, error: %s", "vmssvm.put.respond", resourceID, rerr.Error()) + return rerr + } + } + + return nil +} + +func (c *Client) updateResponder(resp *http.Response) (*compute.VirtualMachineScaleSetVM, *retry.Error) { + result := &compute.VirtualMachineScaleSetVM{} + err := autorest.Respond( + resp, + azure.WithErrorUnlessStatusCode(http.StatusOK, http.StatusCreated), + autorest.ByUnmarshallingJSON(&result)) + result.Response = autorest.Response{Response: resp} + return result, retry.GetError(resp, err) +} + +func (c *Client) listResponder(resp *http.Response) (result compute.VirtualMachineScaleSetVMListResult, err error) { + err = autorest.Respond( + resp, + autorest.ByIgnoring(), + azure.WithErrorUnlessStatusCode(http.StatusOK), + autorest.ByUnmarshallingJSON(&result)) + result.Response = autorest.Response{Response: resp} + return +} + +// virtualMachineScaleSetListResultPreparer prepares a request to retrieve the next set of results. +// It returns nil if no more results exist. +func (c *Client) virtualMachineScaleSetVMListResultPreparer(ctx context.Context, vmssvmlr compute.VirtualMachineScaleSetVMListResult) (*http.Request, error) { + if vmssvmlr.NextLink == nil || len(to.String(vmssvmlr.NextLink)) < 1 { + return nil, nil + } + + decorators := []autorest.PrepareDecorator{ + autorest.WithBaseURL(to.String(vmssvmlr.NextLink)), + } + return c.armClient.PrepareGetRequest(ctx, decorators...) +} + +// listNextResults retrieves the next set of results, if any. +func (c *Client) listNextResults(ctx context.Context, lastResults compute.VirtualMachineScaleSetVMListResult) (result compute.VirtualMachineScaleSetVMListResult, err error) { + req, err := c.virtualMachineScaleSetVMListResultPreparer(ctx, lastResults) + if err != nil { + return result, autorest.NewErrorWithError(err, "vmssvmclient", "listNextResults", nil, "Failure preparing next results request") + } + if req == nil { + return + } + + resp, rerr := c.armClient.Send(ctx, req) + defer c.armClient.CloseResponse(ctx, resp) + if rerr != nil { + result.Response = autorest.Response{Response: resp} + return result, autorest.NewErrorWithError(rerr.Error(), "vmssvmclient", "listNextResults", resp, "Failure sending next results request") + } + + result, err = c.listResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "vmssvmclient", "listNextResults", resp, "Failure responding to next results request") + } + + return +} + +// VirtualMachineScaleSetVMListResultPage contains a page of VirtualMachineScaleSetVM values. +type VirtualMachineScaleSetVMListResultPage struct { + fn func(context.Context, compute.VirtualMachineScaleSetVMListResult) (compute.VirtualMachineScaleSetVMListResult, error) + vmssvlr compute.VirtualMachineScaleSetVMListResult +} + +// NextWithContext advances to the next page of values. If there was an error making +// the request the page does not advance and the error is returned. +func (page *VirtualMachineScaleSetVMListResultPage) NextWithContext(ctx context.Context) (err error) { + next, err := page.fn(ctx, page.vmssvlr) + if err != nil { + return err + } + page.vmssvlr = next + return nil +} + +// Next advances to the next page of values. If there was an error making +// the request the page does not advance and the error is returned. +// Deprecated: Use NextWithContext() instead. +func (page *VirtualMachineScaleSetVMListResultPage) Next() error { + return page.NextWithContext(context.Background()) +} + +// NotDone returns true if the page enumeration should be started or is not yet complete. +func (page VirtualMachineScaleSetVMListResultPage) NotDone() bool { + return !page.vmssvlr.IsEmpty() +} + +// Response returns the raw server response from the last page request. +func (page VirtualMachineScaleSetVMListResultPage) Response() compute.VirtualMachineScaleSetVMListResult { + return page.vmssvlr +} + +// Values returns the slice of values for the current page or nil if there are no values. +func (page VirtualMachineScaleSetVMListResultPage) Values() []compute.VirtualMachineScaleSetVM { + if page.vmssvlr.IsEmpty() { + return nil + } + return *page.vmssvlr.Value +} diff --git a/staging/src/k8s.io/legacy-cloud-providers/azure/clients/vmssvmclient/azure_vmssvmclient_test.go b/staging/src/k8s.io/legacy-cloud-providers/azure/clients/vmssvmclient/azure_vmssvmclient_test.go new file mode 100644 index 00000000000..23e10c0b412 --- /dev/null +++ b/staging/src/k8s.io/legacy-cloud-providers/azure/clients/vmssvmclient/azure_vmssvmclient_test.go @@ -0,0 +1,140 @@ +// +build !providerless + +/* +Copyright 2019 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package vmssvmclient + +import ( + "bytes" + "context" + "encoding/json" + "fmt" + "io/ioutil" + "net/http" + "testing" + + "github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2019-07-01/compute" + "github.com/Azure/go-autorest/autorest" + "github.com/Azure/go-autorest/autorest/to" + "github.com/golang/mock/gomock" + "github.com/stretchr/testify/assert" + + azclients "k8s.io/legacy-cloud-providers/azure/clients" + "k8s.io/legacy-cloud-providers/azure/clients/armclient" + "k8s.io/legacy-cloud-providers/azure/clients/armclient/mockarmclient" +) + +func TestGetNotFound(t *testing.T) { + ctrl := gomock.NewController(t) + defer ctrl.Finish() + + resourceID := "/subscriptions/subscriptionID/resourceGroups/rg/providers/Microsoft.Compute/virtualMachineScaleSets/vmss1/virtualMachines/0" + response := &http.Response{ + StatusCode: http.StatusNotFound, + Body: ioutil.NopCloser(bytes.NewReader([]byte("{}"))), + } + armClient := mockarmclient.NewMockInterface(ctrl) + armClient.EXPECT().GetResource(gomock.Any(), resourceID, "InstanceView").Return(response, nil).Times(1) + armClient.EXPECT().CloseResponse(gomock.Any(), gomock.Any()).Times(1) + + vmssClient := getTestVMSSVMClient(armClient) + expectedVM := compute.VirtualMachineScaleSetVM{Response: autorest.Response{}} + result, rerr := vmssClient.Get(context.TODO(), "rg", "vmss1", "0", "InstanceView") + assert.Equal(t, expectedVM, result) + assert.NotNil(t, rerr) + assert.Equal(t, http.StatusNotFound, rerr.HTTPStatusCode) +} + +func TestGetInternalError(t *testing.T) { + ctrl := gomock.NewController(t) + defer ctrl.Finish() + + resourceID := "/subscriptions/subscriptionID/resourceGroups/rg/providers/Microsoft.Compute/virtualMachineScaleSets/vmss1/virtualMachines/1" + response := &http.Response{ + StatusCode: http.StatusInternalServerError, + Body: ioutil.NopCloser(bytes.NewReader([]byte("{}"))), + } + armClient := mockarmclient.NewMockInterface(ctrl) + armClient.EXPECT().GetResource(gomock.Any(), resourceID, "InstanceView").Return(response, nil).Times(1) + armClient.EXPECT().CloseResponse(gomock.Any(), gomock.Any()).Times(1) + + vmssClient := getTestVMSSVMClient(armClient) + expectedVM := compute.VirtualMachineScaleSetVM{Response: autorest.Response{}} + result, rerr := vmssClient.Get(context.TODO(), "rg", "vmss1", "1", "InstanceView") + assert.Equal(t, expectedVM, result) + assert.NotNil(t, rerr) + assert.Equal(t, http.StatusInternalServerError, rerr.HTTPStatusCode) +} + +func TestList(t *testing.T) { + ctrl := gomock.NewController(t) + defer ctrl.Finish() + + resourceID := "/subscriptions/subscriptionID/resourceGroups/rg/providers/Microsoft.Compute/virtualMachineScaleSets/vmss1/virtualMachines" + armClient := mockarmclient.NewMockInterface(ctrl) + vmssList := []compute.VirtualMachineScaleSetVM{getTestVMSSVM("vmss1", "1"), getTestVMSSVM("vmss1", "2"), getTestVMSSVM("vmss1", "3")} + responseBody, err := json.Marshal(compute.VirtualMachineScaleSetVMListResult{Value: &vmssList}) + assert.Nil(t, err) + armClient.EXPECT().GetResource(gomock.Any(), resourceID, "InstanceView").Return( + &http.Response{ + StatusCode: http.StatusOK, + Body: ioutil.NopCloser(bytes.NewReader(responseBody)), + }, nil).Times(1) + armClient.EXPECT().CloseResponse(gomock.Any(), gomock.Any()).Times(1) + + vmssClient := getTestVMSSVMClient(armClient) + result, rerr := vmssClient.List(context.TODO(), "rg", "vmss1", "InstanceView") + assert.Nil(t, rerr) + assert.Equal(t, 3, len(result)) +} + +func TestUpdate(t *testing.T) { + ctrl := gomock.NewController(t) + defer ctrl.Finish() + + vmssVM := getTestVMSSVM("vmss1", "0") + armClient := mockarmclient.NewMockInterface(ctrl) + response := &http.Response{ + StatusCode: http.StatusOK, + Body: ioutil.NopCloser(bytes.NewReader([]byte(""))), + } + armClient.EXPECT().PutResource(gomock.Any(), to.String(vmssVM.ID), vmssVM).Return(response, nil).Times(1) + armClient.EXPECT().CloseResponse(gomock.Any(), gomock.Any()).Times(1) + + vmssClient := getTestVMSSVMClient(armClient) + rerr := vmssClient.Update(context.TODO(), "rg", "vmss1", "0", vmssVM, "test") + assert.Nil(t, rerr) +} + +func getTestVMSSVM(vmssName, instanceID string) compute.VirtualMachineScaleSetVM { + resourceID := fmt.Sprintf("/subscriptions/subscriptionID/resourceGroups/rg/providers/Microsoft.Compute/virtualMachineScaleSets/%s/virtualMachines/%s", vmssName, instanceID) + return compute.VirtualMachineScaleSetVM{ + ID: to.StringPtr(resourceID), + InstanceID: to.StringPtr(instanceID), + Location: to.StringPtr("eastus"), + } +} + +func getTestVMSSVMClient(armClient armclient.Interface) *Client { + rateLimiterReader, rateLimiterWriter := azclients.NewRateLimiter(&azclients.RateLimitConfig{}) + return &Client{ + armClient: armClient, + subscriptionID: "subscriptionID", + rateLimiterReader: rateLimiterReader, + rateLimiterWriter: rateLimiterWriter, + } +} diff --git a/staging/src/k8s.io/legacy-cloud-providers/azure/clients/vmssvmclient/doc.go b/staging/src/k8s.io/legacy-cloud-providers/azure/clients/vmssvmclient/doc.go new file mode 100644 index 00000000000..aba6c0a2a8f --- /dev/null +++ b/staging/src/k8s.io/legacy-cloud-providers/azure/clients/vmssvmclient/doc.go @@ -0,0 +1,20 @@ +// +build !providerless + +/* +Copyright 2019 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Package vmssvmclient implements the client for VirtualMachineScaleSetVM. +package vmssvmclient // import "k8s.io/legacy-cloud-providers/azure/clients/vmssvmclient" diff --git a/staging/src/k8s.io/legacy-cloud-providers/azure/clients/vmssvmclient/interface.go b/staging/src/k8s.io/legacy-cloud-providers/azure/clients/vmssvmclient/interface.go new file mode 100644 index 00000000000..e92c0fd3c78 --- /dev/null +++ b/staging/src/k8s.io/legacy-cloud-providers/azure/clients/vmssvmclient/interface.go @@ -0,0 +1,45 @@ +// +build !providerless + +/* +Copyright 2019 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package vmssvmclient + +import ( + "context" + + "github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2019-07-01/compute" + "k8s.io/legacy-cloud-providers/azure/retry" +) + +const ( + // APIVersion is the API version for VMSS. + APIVersion = "2019-07-01" +) + +// Interface is the client interface for VirtualMachineScaleSetVM. +// Don't forget to run the following command to generate the mock client: +// mockgen -source=$GOPATH/src/k8s.io/kubernetes/staging/src/k8s.io/legacy-cloud-providers/azure/clients/vmssvmclient/interface.go -package=mockvmssvmclient Interface > $GOPATH/src/k8s.io/kubernetes/staging/src/k8s.io/legacy-cloud-providers/azure/clients/vmssvmclient/mockvmssvmclient/interface.go +type Interface interface { + // Get gets a VirtualMachineScaleSetVM. + Get(ctx context.Context, resourceGroupName string, VMScaleSetName string, instanceID string, expand compute.InstanceViewTypes) (compute.VirtualMachineScaleSetVM, *retry.Error) + + // List gets a list of VirtualMachineScaleSetVMs in the virtualMachineScaleSet. + List(ctx context.Context, resourceGroupName string, virtualMachineScaleSetName string, expand string) ([]compute.VirtualMachineScaleSetVM, *retry.Error) + + // Update updates a VirtualMachineScaleSetVM. + Update(ctx context.Context, resourceGroupName string, VMScaleSetName string, instanceID string, parameters compute.VirtualMachineScaleSetVM, source string) *retry.Error +} diff --git a/staging/src/k8s.io/legacy-cloud-providers/azure/clients/vmssvmclient/mockvmssvmclient/BUILD b/staging/src/k8s.io/legacy-cloud-providers/azure/clients/vmssvmclient/mockvmssvmclient/BUILD new file mode 100644 index 00000000000..342b84bd252 --- /dev/null +++ b/staging/src/k8s.io/legacy-cloud-providers/azure/clients/vmssvmclient/mockvmssvmclient/BUILD @@ -0,0 +1,31 @@ +load("@io_bazel_rules_go//go:def.bzl", "go_library") + +go_library( + name = "go_default_library", + srcs = [ + "doc.go", + "interface.go", + ], + importmap = "k8s.io/kubernetes/vendor/k8s.io/legacy-cloud-providers/azure/clients/vmssvmclient/mockvmssvmclient", + importpath = "k8s.io/legacy-cloud-providers/azure/clients/vmssvmclient/mockvmssvmclient", + visibility = ["//visibility:public"], + deps = [ + "//staging/src/k8s.io/legacy-cloud-providers/azure/retry:go_default_library", + "//vendor/github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2019-07-01/compute:go_default_library", + "//vendor/github.com/golang/mock/gomock:go_default_library", + ], +) + +filegroup( + name = "package-srcs", + srcs = glob(["**"]), + tags = ["automanaged"], + visibility = ["//visibility:private"], +) + +filegroup( + name = "all-srcs", + srcs = [":package-srcs"], + tags = ["automanaged"], + visibility = ["//visibility:public"], +) diff --git a/staging/src/k8s.io/legacy-cloud-providers/azure/clients/vmssvmclient/mockvmssvmclient/doc.go b/staging/src/k8s.io/legacy-cloud-providers/azure/clients/vmssvmclient/mockvmssvmclient/doc.go new file mode 100644 index 00000000000..0910283933d --- /dev/null +++ b/staging/src/k8s.io/legacy-cloud-providers/azure/clients/vmssvmclient/mockvmssvmclient/doc.go @@ -0,0 +1,20 @@ +// +build !providerless + +/* +Copyright 2019 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Package mockvmssvmclient implements the mock client for VirtualMachineScaleSetVM. +package mockvmssvmclient // import "k8s.io/legacy-cloud-providers/azure/clients/vmssvmclient/mockvmssvmclient" diff --git a/staging/src/k8s.io/legacy-cloud-providers/azure/clients/vmssvmclient/mockvmssvmclient/interface.go b/staging/src/k8s.io/legacy-cloud-providers/azure/clients/vmssvmclient/mockvmssvmclient/interface.go new file mode 100644 index 00000000000..8258f462fda --- /dev/null +++ b/staging/src/k8s.io/legacy-cloud-providers/azure/clients/vmssvmclient/mockvmssvmclient/interface.go @@ -0,0 +1,95 @@ +// +build !providerless + +/* +Copyright 2019 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package mockvmssvmclient + +import ( + context "context" + reflect "reflect" + + compute "github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2019-07-01/compute" + gomock "github.com/golang/mock/gomock" + retry "k8s.io/legacy-cloud-providers/azure/retry" +) + +// MockInterface is a mock of Interface interface +type MockInterface struct { + ctrl *gomock.Controller + recorder *MockInterfaceMockRecorder +} + +// MockInterfaceMockRecorder is the mock recorder for MockInterface +type MockInterfaceMockRecorder struct { + mock *MockInterface +} + +// NewMockInterface creates a new mock instance +func NewMockInterface(ctrl *gomock.Controller) *MockInterface { + mock := &MockInterface{ctrl: ctrl} + mock.recorder = &MockInterfaceMockRecorder{mock} + return mock +} + +// EXPECT returns an object that allows the caller to indicate expected use +func (m *MockInterface) EXPECT() *MockInterfaceMockRecorder { + return m.recorder +} + +// Get mocks base method +func (m *MockInterface) Get(ctx context.Context, resourceGroupName, VMScaleSetName, instanceID string, expand compute.InstanceViewTypes) (compute.VirtualMachineScaleSetVM, *retry.Error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "Get", ctx, resourceGroupName, VMScaleSetName, instanceID, expand) + ret0, _ := ret[0].(compute.VirtualMachineScaleSetVM) + ret1, _ := ret[1].(*retry.Error) + return ret0, ret1 +} + +// Get indicates an expected call of Get +func (mr *MockInterfaceMockRecorder) Get(ctx, resourceGroupName, VMScaleSetName, instanceID, expand interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Get", reflect.TypeOf((*MockInterface)(nil).Get), ctx, resourceGroupName, VMScaleSetName, instanceID, expand) +} + +// List mocks base method +func (m *MockInterface) List(ctx context.Context, resourceGroupName, virtualMachineScaleSetName, expand string) ([]compute.VirtualMachineScaleSetVM, *retry.Error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "List", ctx, resourceGroupName, virtualMachineScaleSetName, expand) + ret0, _ := ret[0].([]compute.VirtualMachineScaleSetVM) + ret1, _ := ret[1].(*retry.Error) + return ret0, ret1 +} + +// List indicates an expected call of List +func (mr *MockInterfaceMockRecorder) List(ctx, resourceGroupName, virtualMachineScaleSetName, expand interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "List", reflect.TypeOf((*MockInterface)(nil).List), ctx, resourceGroupName, virtualMachineScaleSetName, expand) +} + +// Update mocks base method +func (m *MockInterface) Update(ctx context.Context, resourceGroupName, VMScaleSetName, instanceID string, parameters compute.VirtualMachineScaleSetVM, source string) *retry.Error { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "Update", ctx, resourceGroupName, VMScaleSetName, instanceID, parameters, source) + ret0, _ := ret[0].(*retry.Error) + return ret0 +} + +// Update indicates an expected call of Update +func (mr *MockInterfaceMockRecorder) Update(ctx, resourceGroupName, VMScaleSetName, instanceID, parameters, source interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Update", reflect.TypeOf((*MockInterface)(nil).Update), ctx, resourceGroupName, VMScaleSetName, instanceID, parameters, source) +} diff --git a/staging/src/k8s.io/legacy-cloud-providers/azure/metrics/BUILD b/staging/src/k8s.io/legacy-cloud-providers/azure/metrics/BUILD new file mode 100644 index 00000000000..46ce36be896 --- /dev/null +++ b/staging/src/k8s.io/legacy-cloud-providers/azure/metrics/BUILD @@ -0,0 +1,37 @@ +load("@io_bazel_rules_go//go:def.bzl", "go_library", "go_test") + +go_library( + name = "go_default_library", + srcs = [ + "azure_metrics.go", + "doc.go", + ], + importmap = "k8s.io/kubernetes/vendor/k8s.io/legacy-cloud-providers/azure/metrics", + importpath = "k8s.io/legacy-cloud-providers/azure/metrics", + visibility = ["//visibility:public"], + deps = [ + "//staging/src/k8s.io/component-base/metrics:go_default_library", + "//staging/src/k8s.io/component-base/metrics/legacyregistry:go_default_library", + ], +) + +go_test( + name = "go_default_test", + srcs = ["azure_metrics_test.go"], + embed = [":go_default_library"], + deps = ["//vendor/github.com/stretchr/testify/assert:go_default_library"], +) + +filegroup( + name = "package-srcs", + srcs = glob(["**"]), + tags = ["automanaged"], + visibility = ["//visibility:private"], +) + +filegroup( + name = "all-srcs", + srcs = [":package-srcs"], + tags = ["automanaged"], + visibility = ["//visibility:public"], +) diff --git a/staging/src/k8s.io/legacy-cloud-providers/azure/azure_metrics.go b/staging/src/k8s.io/legacy-cloud-providers/azure/metrics/azure_metrics.go similarity index 70% rename from staging/src/k8s.io/legacy-cloud-providers/azure/azure_metrics.go rename to staging/src/k8s.io/legacy-cloud-providers/azure/metrics/azure_metrics.go index 999a4a2b639..93c51daa87c 100644 --- a/staging/src/k8s.io/legacy-cloud-providers/azure/azure_metrics.go +++ b/staging/src/k8s.io/legacy-cloud-providers/azure/metrics/azure_metrics.go @@ -16,7 +16,7 @@ See the License for the specific language governing permissions and limitations under the License. */ -package azure +package metrics import ( "strings" @@ -30,6 +30,7 @@ type apiCallMetrics struct { latency *metrics.HistogramVec errors *metrics.CounterVec rateLimitedCount *metrics.CounterVec + throttledCount *metrics.CounterVec } var ( @@ -43,22 +44,32 @@ var ( apiMetrics = registerAPIMetrics(metricLabels...) ) -type metricContext struct { +// MetricContext indicates the context for Azure client metrics. +type MetricContext struct { start time.Time attributes []string } -func newMetricContext(prefix, request, resourceGroup, subscriptionID, source string) *metricContext { - return &metricContext{ +// NewMetricContext creates a new MetricContext. +func NewMetricContext(prefix, request, resourceGroup, subscriptionID, source string) *MetricContext { + return &MetricContext{ start: time.Now(), attributes: []string{prefix + "_" + request, strings.ToLower(resourceGroup), subscriptionID, source}, } } -func (mc *metricContext) RateLimitedCount() { + +// RateLimitedCount records the metrics for rate limited request count. +func (mc *MetricContext) RateLimitedCount() { apiMetrics.rateLimitedCount.WithLabelValues(mc.attributes...).Inc() } -func (mc *metricContext) Observe(err error) error { +// ThrottledCount records the metrics for throttled request count. +func (mc *MetricContext) ThrottledCount() { + apiMetrics.throttledCount.WithLabelValues(mc.attributes...).Inc() +} + +// Observe observes the request latency and failed requests. +func (mc *MetricContext) Observe(err error) error { apiMetrics.latency.WithLabelValues(mc.attributes...).Observe( time.Since(mc.start).Seconds()) if err != nil { @@ -68,6 +79,7 @@ func (mc *metricContext) Observe(err error) error { return err } +// registerAPIMetrics registers the API metrics. func registerAPIMetrics(attributes ...string) *apiCallMetrics { metrics := &apiCallMetrics{ latency: metrics.NewHistogramVec( @@ -94,11 +106,20 @@ func registerAPIMetrics(attributes ...string) *apiCallMetrics { }, attributes, ), + throttledCount: metrics.NewCounterVec( + &metrics.CounterOpts{ + Name: "cloudprovider_azure_api_request_throttled_count", + Help: "Number of throttled Azure API calls", + StabilityLevel: metrics.ALPHA, + }, + attributes, + ), } legacyregistry.MustRegister(metrics.latency) legacyregistry.MustRegister(metrics.errors) legacyregistry.MustRegister(metrics.rateLimitedCount) + legacyregistry.MustRegister(metrics.throttledCount) return metrics } diff --git a/staging/src/k8s.io/legacy-cloud-providers/azure/azure_metrics_test.go b/staging/src/k8s.io/legacy-cloud-providers/azure/metrics/azure_metrics_test.go similarity index 88% rename from staging/src/k8s.io/legacy-cloud-providers/azure/azure_metrics_test.go rename to staging/src/k8s.io/legacy-cloud-providers/azure/metrics/azure_metrics_test.go index 286208e5e46..9af8b52c5af 100644 --- a/staging/src/k8s.io/legacy-cloud-providers/azure/azure_metrics_test.go +++ b/staging/src/k8s.io/legacy-cloud-providers/azure/metrics/azure_metrics_test.go @@ -16,7 +16,7 @@ See the License for the specific language governing permissions and limitations under the License. */ -package azure +package metrics import ( "testing" @@ -25,12 +25,12 @@ import ( ) func TestAzureMetricLabelCardinality(t *testing.T) { - mc := newMetricContext("test", "create", "resource_group", "subscription_id", "source") + mc := NewMetricContext("test", "create", "resource_group", "subscription_id", "source") assert.Len(t, mc.attributes, len(metricLabels), "cardinalities of labels and values must match") } func TestAzureMetricLabelPrefix(t *testing.T) { - mc := newMetricContext("prefix", "request", "resource_group", "subscription_id", "source") + mc := NewMetricContext("prefix", "request", "resource_group", "subscription_id", "source") found := false for _, attribute := range mc.attributes { if attribute == "prefix_request" { diff --git a/staging/src/k8s.io/legacy-cloud-providers/azure/metrics/doc.go b/staging/src/k8s.io/legacy-cloud-providers/azure/metrics/doc.go new file mode 100644 index 00000000000..5f9f4a096ff --- /dev/null +++ b/staging/src/k8s.io/legacy-cloud-providers/azure/metrics/doc.go @@ -0,0 +1,20 @@ +// +build !providerless + +/* +Copyright 2019 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Package metrics is an implementation of Azure CloudProvider metrics. +package metrics // import "k8s.io/legacy-cloud-providers/azure/metrics" diff --git a/staging/src/k8s.io/legacy-cloud-providers/azure/retry/azure_error.go b/staging/src/k8s.io/legacy-cloud-providers/azure/retry/azure_error.go index 9e604e973de..bf0ec8df851 100644 --- a/staging/src/k8s.io/legacy-cloud-providers/azure/retry/azure_error.go +++ b/staging/src/k8s.io/legacy-cloud-providers/azure/retry/azure_error.go @@ -60,8 +60,15 @@ func (err *Error) Error() error { return nil } - return fmt.Errorf("Retriable: %v, RetryAfter: %s, HTTPStatusCode: %d, RawError: %v", - err.Retriable, err.RetryAfter.String(), err.HTTPStatusCode, err.RawError) + // Convert time to seconds for better logging. + retryAfterSeconds := 0 + curTime := now() + if err.RetryAfter.After(curTime) { + retryAfterSeconds = int(err.RetryAfter.Sub(curTime) / time.Second) + } + + return fmt.Errorf("Retriable: %v, RetryAfter: %ds, HTTPStatusCode: %d, RawError: %v", + err.Retriable, retryAfterSeconds, err.HTTPStatusCode, err.RawError) } // IsThrottled returns true the if the request is being throttled. @@ -99,8 +106,13 @@ func GetRateLimitError(isWrite bool, opName string) *Error { } // GetThrottlingError creates a new error for throttling. -func GetThrottlingError(operation, reason string) *Error { - return GetRetriableError(fmt.Errorf("azure cloud provider throttled for operation %s with reason %q", operation, reason)) +func GetThrottlingError(operation, reason string, retryAfter time.Time) *Error { + rawError := fmt.Errorf("azure cloud provider throttled for operation %s with reason %q", operation, reason) + return &Error{ + Retriable: true, + RawError: rawError, + RetryAfter: retryAfter, + } } // GetError gets a new Error based on resp and error. diff --git a/staging/src/k8s.io/legacy-cloud-providers/azure/retry/azure_retry.go b/staging/src/k8s.io/legacy-cloud-providers/azure/retry/azure_retry.go index bc8a4778541..120b7dffaa1 100644 --- a/staging/src/k8s.io/legacy-cloud-providers/azure/retry/azure_retry.go +++ b/staging/src/k8s.io/legacy-cloud-providers/azure/retry/azure_retry.go @@ -21,6 +21,7 @@ package retry import ( "math/rand" "net/http" + "strings" "time" "github.com/Azure/go-autorest/autorest" @@ -55,6 +56,8 @@ type Backoff struct { // exceed the cap then the duration is set to the cap and the // steps parameter is set to zero. Cap time.Duration + // The errors indicate that the request shouldn't do more retrying. + NonRetriableErrors []string } // NewBackoff creates a new Backoff. @@ -68,6 +71,28 @@ func NewBackoff(duration time.Duration, factor float64, jitter float64, steps in } } +// WithNonRetriableErrors returns a new *Backoff with NonRetriableErrors assigned. +func (b *Backoff) WithNonRetriableErrors(errs []string) *Backoff { + newBackoff := *b + newBackoff.NonRetriableErrors = errs + return &newBackoff +} + +// isNonRetriableError returns true if the Error is one of NonRetriableErrors. +func (b *Backoff) isNonRetriableError(rerr *Error) bool { + if rerr == nil { + return false + } + + for _, err := range b.NonRetriableErrors { + if strings.Contains(rerr.RawError.Error(), err) { + return true + } + } + + return false +} + // Step (1) returns an amount of time to sleep determined by the // original Duration and Jitter and (2) mutates the provided Backoff // to update its Steps and Duration. @@ -134,8 +159,9 @@ func doBackoffRetry(s autorest.Sender, r *http.Request, backoff *Backoff) (resp // 1) request succeed // 2) request is not retriable // 3) request has been throttled - // 4) request has completed all the retry steps - if rerr == nil || !rerr.Retriable || rerr.IsThrottled() || backoff.Steps == 1 { + // 4) request contains non-retriable errors + // 5) request has completed all the retry steps + if rerr == nil || !rerr.Retriable || rerr.IsThrottled() || backoff.isNonRetriableError(rerr) || backoff.Steps == 1 { return resp, rerr.Error() } diff --git a/staging/src/k8s.io/legacy-cloud-providers/go.mod b/staging/src/k8s.io/legacy-cloud-providers/go.mod index bcf8585b806..631dbe985dd 100644 --- a/staging/src/k8s.io/legacy-cloud-providers/go.mod +++ b/staging/src/k8s.io/legacy-cloud-providers/go.mod @@ -15,6 +15,7 @@ require ( github.com/GoogleCloudPlatform/k8s-cloud-provider v0.0.0-20190822182118-27a4ced34534 github.com/aws/aws-sdk-go v1.16.26 github.com/dnaeon/go-vcr v1.0.1 // indirect + github.com/golang/mock v1.2.0 github.com/gophercloud/gophercloud v0.1.0 github.com/mitchellh/mapstructure v1.1.2 github.com/rubiojr/go-vhd v0.0.0-20160810183302-0bfd3b39853c diff --git a/vendor/modules.txt b/vendor/modules.txt index 20610a2a98f..67af2bba312 100644 --- a/vendor/modules.txt +++ b/vendor/modules.txt @@ -1839,6 +1839,10 @@ k8s.io/legacy-cloud-providers/aws k8s.io/legacy-cloud-providers/azure k8s.io/legacy-cloud-providers/azure/auth k8s.io/legacy-cloud-providers/azure/clients +k8s.io/legacy-cloud-providers/azure/clients/armclient +k8s.io/legacy-cloud-providers/azure/clients/vmssclient +k8s.io/legacy-cloud-providers/azure/clients/vmssvmclient +k8s.io/legacy-cloud-providers/azure/metrics k8s.io/legacy-cloud-providers/azure/retry k8s.io/legacy-cloud-providers/gce k8s.io/legacy-cloud-providers/openstack