mirror of
https://github.com/k3s-io/kubernetes.git
synced 2025-07-31 23:37:01 +00:00
Get VirtualMachines with cached resource groups
This commit is contained in:
parent
d499855ba2
commit
f83fb14452
@ -281,12 +281,12 @@ func (az *Cloud) DeleteRouteWithRetry(routeName string) error {
|
||||
}
|
||||
|
||||
// CreateOrUpdateVMWithRetry invokes az.VirtualMachinesClient.CreateOrUpdate with exponential backoff retry
|
||||
func (az *Cloud) CreateOrUpdateVMWithRetry(vmName string, newVM compute.VirtualMachine) error {
|
||||
func (az *Cloud) CreateOrUpdateVMWithRetry(resourceGroup, vmName string, newVM compute.VirtualMachine) error {
|
||||
return wait.ExponentialBackoff(az.requestBackoff(), func() (bool, error) {
|
||||
ctx, cancel := getContextWithCancel()
|
||||
defer cancel()
|
||||
|
||||
resp, err := az.VirtualMachinesClient.CreateOrUpdate(ctx, az.ResourceGroup, vmName, newVM)
|
||||
resp, err := az.VirtualMachinesClient.CreateOrUpdate(ctx, resourceGroup, vmName, newVM)
|
||||
glog.V(10).Infof("VirtualMachinesClient.CreateOrUpdate(%s): end", vmName)
|
||||
return processHTTPRetryResponse(resp, err)
|
||||
})
|
||||
|
@ -34,6 +34,12 @@ func (as *availabilitySet) AttachDisk(isManagedDisk bool, diskName, diskURI stri
|
||||
return err
|
||||
}
|
||||
|
||||
vmName := mapNodeNameToVMName(nodeName)
|
||||
nodeResourceGroup, err := as.GetNodeResourceGroup(vmName)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
disks := *vm.StorageProfile.DataDisks
|
||||
if isManagedDisk {
|
||||
disks = append(disks,
|
||||
@ -67,17 +73,16 @@ func (as *availabilitySet) AttachDisk(isManagedDisk bool, diskName, diskURI stri
|
||||
},
|
||||
},
|
||||
}
|
||||
vmName := mapNodeNameToVMName(nodeName)
|
||||
glog.V(2).Infof("azureDisk - update(%s): vm(%s) - attach disk", as.resourceGroup, vmName)
|
||||
glog.V(2).Infof("azureDisk - update(%s): vm(%s) - attach disk", nodeResourceGroup, vmName)
|
||||
ctx, cancel := getContextWithCancel()
|
||||
defer cancel()
|
||||
resp, err := as.VirtualMachinesClient.CreateOrUpdate(ctx, as.resourceGroup, vmName, newVM)
|
||||
resp, err := as.VirtualMachinesClient.CreateOrUpdate(ctx, nodeResourceGroup, vmName, newVM)
|
||||
if as.CloudProviderBackoff && shouldRetryHTTPRequest(resp, err) {
|
||||
glog.V(2).Infof("azureDisk - update(%s) backing off: vm(%s)", as.resourceGroup, vmName)
|
||||
retryErr := as.CreateOrUpdateVMWithRetry(vmName, newVM)
|
||||
glog.V(2).Infof("azureDisk - update(%s) backing off: vm(%s)", nodeResourceGroup, vmName)
|
||||
retryErr := as.CreateOrUpdateVMWithRetry(nodeResourceGroup, vmName, newVM)
|
||||
if retryErr != nil {
|
||||
err = retryErr
|
||||
glog.V(2).Infof("azureDisk - update(%s) abort backoff: vm(%s)", as.resourceGroup, vmName)
|
||||
glog.V(2).Infof("azureDisk - update(%s) abort backoff: vm(%s)", nodeResourceGroup, vmName)
|
||||
}
|
||||
}
|
||||
if err != nil {
|
||||
@ -106,6 +111,12 @@ func (as *availabilitySet) DetachDiskByName(diskName, diskURI string, nodeName t
|
||||
return nil
|
||||
}
|
||||
|
||||
vmName := mapNodeNameToVMName(nodeName)
|
||||
nodeResourceGroup, err := as.GetNodeResourceGroup(vmName)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
disks := *vm.StorageProfile.DataDisks
|
||||
bFoundDisk := false
|
||||
for i, disk := range disks {
|
||||
@ -132,17 +143,16 @@ func (as *availabilitySet) DetachDiskByName(diskName, diskURI string, nodeName t
|
||||
},
|
||||
},
|
||||
}
|
||||
vmName := mapNodeNameToVMName(nodeName)
|
||||
glog.V(2).Infof("azureDisk - update(%s): vm(%s) - detach disk", as.resourceGroup, vmName)
|
||||
glog.V(2).Infof("azureDisk - update(%s): vm(%s) - detach disk", nodeResourceGroup, vmName)
|
||||
ctx, cancel := getContextWithCancel()
|
||||
defer cancel()
|
||||
resp, err := as.VirtualMachinesClient.CreateOrUpdate(ctx, as.resourceGroup, vmName, newVM)
|
||||
resp, err := as.VirtualMachinesClient.CreateOrUpdate(ctx, nodeResourceGroup, vmName, newVM)
|
||||
if as.CloudProviderBackoff && shouldRetryHTTPRequest(resp, err) {
|
||||
glog.V(2).Infof("azureDisk - update(%s) backing off: vm(%s)", as.resourceGroup, vmName)
|
||||
retryErr := as.CreateOrUpdateVMWithRetry(vmName, newVM)
|
||||
glog.V(2).Infof("azureDisk - update(%s) backing off: vm(%s)", nodeResourceGroup, vmName)
|
||||
retryErr := as.CreateOrUpdateVMWithRetry(nodeResourceGroup, vmName, newVM)
|
||||
if retryErr != nil {
|
||||
err = retryErr
|
||||
glog.V(2).Infof("azureDisk - update(%s) abort backoff: vm(%s)", as.ResourceGroup, vmName)
|
||||
glog.V(2).Infof("azureDisk - update(%s) abort backoff: vm(%s)", nodeResourceGroup, vmName)
|
||||
}
|
||||
}
|
||||
if err != nil {
|
||||
|
@ -154,9 +154,15 @@ func (az *Cloud) InstanceID(ctx context.Context, name types.NodeName) (string, e
|
||||
return az.vmSet.GetInstanceIDByNodeName(nodeName)
|
||||
}
|
||||
|
||||
// Get resource group name.
|
||||
resourceGroup, err := az.metadata.Text("instance/compute/resourceGroupName")
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
|
||||
// Compose instanceID based on nodeName for standard instance.
|
||||
if az.VMType == vmTypeStandard {
|
||||
return az.getStandardMachineID(nodeName), nil
|
||||
return az.getStandardMachineID(resourceGroup, nodeName), nil
|
||||
}
|
||||
|
||||
// Get scale set name and instanceID from vmName for vmss.
|
||||
@ -168,12 +174,12 @@ func (az *Cloud) InstanceID(ctx context.Context, name types.NodeName) (string, e
|
||||
if err != nil {
|
||||
if err == ErrorNotVmssInstance {
|
||||
// Compose machineID for standard Node.
|
||||
return az.getStandardMachineID(nodeName), nil
|
||||
return az.getStandardMachineID(resourceGroup, nodeName), nil
|
||||
}
|
||||
return "", err
|
||||
}
|
||||
// Compose instanceID based on ssName and instanceID for vmss instance.
|
||||
return az.getVmssMachineID(ssName, instanceID), nil
|
||||
return az.getVmssMachineID(resourceGroup, ssName, instanceID), nil
|
||||
}
|
||||
|
||||
return az.vmSet.GetInstanceIDByNodeName(nodeName)
|
||||
|
@ -62,22 +62,24 @@ const (
|
||||
var errNotInVMSet = errors.New("vm is not in the vmset")
|
||||
var providerIDRE = regexp.MustCompile(`^` + CloudProviderName + `://(?:.*)/Microsoft.Compute/virtualMachines/(.+)$`)
|
||||
var backendPoolIDRE = regexp.MustCompile(`^/subscriptions/(?:.*)/resourceGroups/(?:.*)/providers/Microsoft.Network/loadBalancers/(.+)/backendAddressPools/(?:.*)`)
|
||||
var nicResourceGroupRE = regexp.MustCompile(`.*/subscriptions/(?:.*)/resourceGroups/(.+)/providers/Microsoft.Network/networkInterfaces/(?:.*)`)
|
||||
var publicIPResourceGroupRE = regexp.MustCompile(`.*/subscriptions/(?:.*)/resourceGroups/(.+)/providers/Microsoft.Network/publicIPAddresses/(?:.*)`)
|
||||
|
||||
// getStandardMachineID returns the full identifier of a virtual machine.
|
||||
func (az *Cloud) getStandardMachineID(machineName string) string {
|
||||
func (az *Cloud) getStandardMachineID(resourceGroup, machineName string) string {
|
||||
return fmt.Sprintf(
|
||||
machineIDTemplate,
|
||||
az.SubscriptionID,
|
||||
az.ResourceGroup,
|
||||
resourceGroup,
|
||||
machineName)
|
||||
}
|
||||
|
||||
// returns the full identifier of an availabilitySet
|
||||
func (az *Cloud) getAvailabilitySetID(availabilitySetName string) string {
|
||||
func (az *Cloud) getAvailabilitySetID(resourceGroup, availabilitySetName string) string {
|
||||
return fmt.Sprintf(
|
||||
availabilitySetIDTemplate,
|
||||
az.SubscriptionID,
|
||||
az.ResourceGroup,
|
||||
resourceGroup,
|
||||
availabilitySetName)
|
||||
}
|
||||
|
||||
@ -306,51 +308,6 @@ func MakeCRC32(str string) string {
|
||||
return strconv.FormatUint(uint64(hash), 10)
|
||||
}
|
||||
|
||||
//ExtractVMData : extract dataDisks, storageProfile from a map struct
|
||||
func ExtractVMData(vmData map[string]interface{}) (dataDisks []interface{},
|
||||
storageProfile map[string]interface{},
|
||||
hardwareProfile map[string]interface{}, err error) {
|
||||
props, ok := vmData["properties"].(map[string]interface{})
|
||||
if !ok {
|
||||
return nil, nil, nil, fmt.Errorf("convert vmData(properties) to map error")
|
||||
}
|
||||
|
||||
storageProfile, ok = props["storageProfile"].(map[string]interface{})
|
||||
if !ok {
|
||||
return nil, nil, nil, fmt.Errorf("convert vmData(storageProfile) to map error")
|
||||
}
|
||||
|
||||
hardwareProfile, ok = props["hardwareProfile"].(map[string]interface{})
|
||||
if !ok {
|
||||
return nil, nil, nil, fmt.Errorf("convert vmData(hardwareProfile) to map error")
|
||||
}
|
||||
|
||||
dataDisks, ok = storageProfile["dataDisks"].([]interface{})
|
||||
if !ok {
|
||||
return nil, nil, nil, fmt.Errorf("convert vmData(dataDisks) to map error")
|
||||
}
|
||||
return dataDisks, storageProfile, hardwareProfile, nil
|
||||
}
|
||||
|
||||
//ExtractDiskData : extract provisioningState, diskState from a map struct
|
||||
func ExtractDiskData(diskData interface{}) (provisioningState string, diskState string, err error) {
|
||||
fragment, ok := diskData.(map[string]interface{})
|
||||
if !ok {
|
||||
return "", "", fmt.Errorf("convert diskData to map error")
|
||||
}
|
||||
|
||||
properties, ok := fragment["properties"].(map[string]interface{})
|
||||
if !ok {
|
||||
return "", "", fmt.Errorf("convert diskData(properties) to map error")
|
||||
}
|
||||
|
||||
provisioningState, ok = properties["provisioningState"].(string) // if there is a disk, provisioningState property will be there
|
||||
if ref, ok := properties["diskState"]; ok {
|
||||
diskState = ref.(string)
|
||||
}
|
||||
return provisioningState, diskState, nil
|
||||
}
|
||||
|
||||
// availabilitySet implements VMSet interface for Azure availability sets.
|
||||
type availabilitySet struct {
|
||||
*Cloud
|
||||
@ -578,6 +535,26 @@ func (as *availabilitySet) GetPrimaryInterface(nodeName string) (network.Interfa
|
||||
return as.getPrimaryInterfaceWithVMSet(nodeName, "")
|
||||
}
|
||||
|
||||
// extractResourceGroupByNicID extracts the resource group name by nicID.
|
||||
func extractResourceGroupByNicID(nicID string) (string, error) {
|
||||
matches := nicResourceGroupRE.FindStringSubmatch(nicID)
|
||||
if len(matches) != 2 {
|
||||
return "", fmt.Errorf("error of extracting resourceGroup from nicID %q", nicID)
|
||||
}
|
||||
|
||||
return matches[1], nil
|
||||
}
|
||||
|
||||
// extractResourceGroupByPipID extracts the resource group name by publicIP ID.
|
||||
func extractResourceGroupByPipID(pipID string) (string, error) {
|
||||
matches := publicIPResourceGroupRE.FindStringSubmatch(pipID)
|
||||
if len(matches) != 2 {
|
||||
return "", fmt.Errorf("error of extracting resourceGroup from pipID %q", pipID)
|
||||
}
|
||||
|
||||
return matches[1], nil
|
||||
}
|
||||
|
||||
// getPrimaryInterfaceWithVMSet gets machine primary network interface by node name and vmSet.
|
||||
func (as *availabilitySet) getPrimaryInterfaceWithVMSet(nodeName, vmSetName string) (network.Interface, error) {
|
||||
var machine compute.VirtualMachine
|
||||
@ -596,6 +573,10 @@ func (as *availabilitySet) getPrimaryInterfaceWithVMSet(nodeName, vmSetName stri
|
||||
if err != nil {
|
||||
return network.Interface{}, err
|
||||
}
|
||||
nodeResourceGroup, err := as.GetNodeResourceGroup(nodeName)
|
||||
if err != nil {
|
||||
return network.Interface{}, err
|
||||
}
|
||||
|
||||
// Check availability set name. Note that vmSetName is empty string when getting
|
||||
// the Node's IP address. While vmSetName is not empty, it should be checked with
|
||||
@ -605,7 +586,7 @@ func (as *availabilitySet) getPrimaryInterfaceWithVMSet(nodeName, vmSetName stri
|
||||
// - For standard SKU load balancer, backend could belong to multiple VMAS, so we
|
||||
// don't check vmSet for it.
|
||||
if vmSetName != "" && !as.useStandardLoadBalancer() {
|
||||
expectedAvailabilitySetName := as.getAvailabilitySetID(vmSetName)
|
||||
expectedAvailabilitySetName := as.getAvailabilitySetID(nodeResourceGroup, vmSetName)
|
||||
if machine.AvailabilitySet == nil || !strings.EqualFold(*machine.AvailabilitySet.ID, expectedAvailabilitySetName) {
|
||||
glog.V(3).Infof(
|
||||
"GetPrimaryInterface: nic (%s) is not in the availabilitySet(%s)", nicName, vmSetName)
|
||||
@ -613,9 +594,14 @@ func (as *availabilitySet) getPrimaryInterfaceWithVMSet(nodeName, vmSetName stri
|
||||
}
|
||||
}
|
||||
|
||||
nicResourceGroup, err := extractResourceGroupByNicID(primaryNicID)
|
||||
if err != nil {
|
||||
return network.Interface{}, err
|
||||
}
|
||||
|
||||
ctx, cancel := getContextWithCancel()
|
||||
defer cancel()
|
||||
nic, err := as.InterfacesClient.Get(ctx, as.ResourceGroup, nicName, "")
|
||||
nic, err := as.InterfacesClient.Get(ctx, nicResourceGroup, nicName, "")
|
||||
if err != nil {
|
||||
return network.Interface{}, err
|
||||
}
|
||||
@ -718,6 +704,11 @@ func (as *availabilitySet) EnsureHostsInPool(serviceName string, nodes []*v1.Nod
|
||||
continue
|
||||
}
|
||||
|
||||
if as.ShouldNodeExcludedFromLoadBalancer(node) {
|
||||
glog.V(4).Infof("Excluding unmanaged/external-resource-group node %q", localNodeName)
|
||||
continue
|
||||
}
|
||||
|
||||
f := func() error {
|
||||
err := as.ensureHostInPool(serviceName, types.NodeName(localNodeName), backendPoolID, vmSetName, isInternal)
|
||||
if err != nil {
|
||||
|
@ -956,6 +956,8 @@ func getTestCloud() (az *Cloud) {
|
||||
},
|
||||
nodeZones: map[string]sets.String{},
|
||||
nodeInformerSynced: func() bool { return true },
|
||||
nodeResourceGroups: map[string]string{},
|
||||
unmanagedNodes: sets.NewString(),
|
||||
}
|
||||
az.DisksClient = newFakeDisksClient()
|
||||
az.InterfacesClient = newFakeAzureInterfacesClient()
|
||||
|
@ -189,7 +189,13 @@ func (az *Cloud) newVMCache() (*timedCache, error) {
|
||||
// Consider adding separate parameter for controlling 'InstanceView' once node update issue #56276 is fixed
|
||||
ctx, cancel := getContextWithCancel()
|
||||
defer cancel()
|
||||
vm, err := az.VirtualMachinesClient.Get(ctx, az.ResourceGroup, key, compute.InstanceView)
|
||||
|
||||
resourceGroup, err := az.GetNodeResourceGroup(key)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
vm, err := az.VirtualMachinesClient.Get(ctx, resourceGroup, key, compute.InstanceView)
|
||||
exists, message, realErr := checkResourceExistsFromError(err)
|
||||
if realErr != nil {
|
||||
return nil, realErr
|
||||
|
Loading…
Reference in New Issue
Block a user