Merge pull request #43510 from karataliu/azurelb

Automatic merge from submit-queue (batch tested with PRs 44645, 44639, 43510)

Add support for Azure internal load balancer

**Which issue this PR fixes**
Fixes https://github.com/kubernetes/kubernetes/issues/38901

**What this PR does / why we need it**:
This PR is to add support for Azure internal load balancer

Currently when exposing a serivce with LoadBalancer type, Azure provider would assume that it requires a public load balancer.
Thus it will request a public IP address resource, and expose the service via that public IP.
In this case we're not able to apply private IP addresses (within the cluster virtual network) for the service.

**Special notes for your reviewer**:
1. Clarification:
a. 'LoadBalancer' refers to an option for 'type' field under ServiceSpec. See https://kubernetes.io/docs/resources-reference/v1.5/#servicespec-v1
b. 'Azure LoadBalancer' refers a type of Azure resource. See https://docs.microsoft.com/en-us/azure/load-balancer/load-balancer-overview

2. For a single Azure LoadBalancer, all frontend ip should reference either a subnet or publicIpAddress, which means that it could be either an Internet facing load balancer or an internal one.
For current provider, it would create an Azure LoadBalancer with generated '${loadBalancerName}' for all services with 'LoadBalancer' type.
This PR introduces name '${loadBalancerName}-internal' for a separate Azure Load Balancer resource, used by all the service that requires internal load balancers.

3. This PR introduces a new annotation for the internal load balancer type behaviour:
a. When the annotaion value is set to 'false' or not set, it falls back to the original behaviour, assuming that user is requesting a public load balancer;
b. When the annotaion value is set to 'true', the following rule applies depending on 'loadBalancerIP' field on ServiceSpec:
   - If 'loadBalancerIP' is not set, it will create a load balancer rule with dynamic assigned frontend IP under the cluster subnet;
   - If 'loadBalancerIP' is set, it will create a load balancer rule with the frontend IP set to the given value. If the given value is not valid, that is, it does not falls into the cluster subnet range, then the creation will fail.

4. Users may change the load balancer type by applying the annotation to the service at runtime.
In this case, the load balancer rule would need to be 'switched' between the internal one and external one.
For example, it we have a service with internal load balancer, and then user removes the annotation, making it to a public one. Before we creating rules in the public Azure LoadBalancer, we'll need to clean up rules in the internal Azure LoadBalancer.

**Release note**:
This commit is contained in:
Kubernetes Submit Queue 2017-04-18 23:22:04 -07:00 committed by GitHub
commit d2060ade08
4 changed files with 327 additions and 91 deletions

View File

@ -32,37 +32,57 @@ import (
"k8s.io/apimachinery/pkg/types"
)
// ServiceAnnotationLoadBalancerInternal is the annotation used on the service
const ServiceAnnotationLoadBalancerInternal = "service.beta.kubernetes.io/azure-load-balancer-internal"
// GetLoadBalancer returns whether the specified load balancer exists, and
// if so, what its status is.
func (az *Cloud) GetLoadBalancer(clusterName string, service *v1.Service) (status *v1.LoadBalancerStatus, exists bool, err error) {
lbName := getLoadBalancerName(clusterName)
isInternal := requiresInternalLoadBalancer(service)
lbName := getLoadBalancerName(clusterName, isInternal)
serviceName := getServiceName(service)
pipName, err := az.getPublicIPName(clusterName, service)
if err != nil {
return nil, false, err
}
_, existsLb, err := az.getAzureLoadBalancer(lbName)
lb, existsLb, err := az.getAzureLoadBalancer(lbName)
if err != nil {
return nil, false, err
}
if !existsLb {
glog.V(5).Infof("get(%s): lb(%s) - doesn't exist", serviceName, pipName)
glog.V(5).Infof("get(%s): lb(%s) - doesn't exist", serviceName, lbName)
return nil, false, nil
}
pip, existsPip, err := az.getPublicIPAddress(pipName)
if err != nil {
return nil, false, err
var lbIP *string
if isInternal {
lbFrontendIPConfigName := getFrontendIPConfigName(service)
for _, ipConfiguration := range *lb.FrontendIPConfigurations {
if lbFrontendIPConfigName == *ipConfiguration.Name {
lbIP = ipConfiguration.PrivateIPAddress
break
}
}
} else {
// TODO: Consider also read address from lb's FrontendIPConfigurations
pipName, err := az.getPublicIPName(clusterName, service)
if err != nil {
return nil, false, err
}
pip, existsPip, err := az.getPublicIPAddress(pipName)
if err != nil {
return nil, false, err
}
if existsPip {
lbIP = pip.IPAddress
}
}
if !existsPip {
glog.V(5).Infof("get(%s): pip(%s) - doesn't exist", serviceName, pipName)
if lbIP == nil {
glog.V(5).Infof("get(%s): lb(%s) - IP doesn't exist", serviceName, lbName)
return nil, false, nil
}
return &v1.LoadBalancerStatus{
Ingress: []v1.LoadBalancerIngress{{IP: *pip.IPAddress}},
Ingress: []v1.LoadBalancerIngress{{IP: *lbIP}},
}, true, nil
}
@ -93,24 +113,33 @@ func (az *Cloud) getPublicIPName(clusterName string, service *v1.Service) (strin
// EnsureLoadBalancer creates a new load balancer 'name', or updates the existing one. Returns the status of the balancer
func (az *Cloud) EnsureLoadBalancer(clusterName string, service *v1.Service, nodes []*v1.Node) (*v1.LoadBalancerStatus, error) {
lbName := getLoadBalancerName(clusterName)
pipName, err := az.getPublicIPName(clusterName, service)
if err != nil {
return nil, err
}
serviceName := getServiceName(service)
glog.V(2).Infof("ensure(%s): START clusterName=%q lbName=%q", serviceName, clusterName, lbName)
isInternal := requiresInternalLoadBalancer(service)
lbName := getLoadBalancerName(clusterName, isInternal)
pip, err := az.ensurePublicIPExists(serviceName, pipName)
// When a client updates the internal load balancer annotation,
// the service may be switched from an internal LB to a public one, or vise versa.
// Here we'll firstly ensure service do not lie in the opposite LB.
err := az.cleanupLoadBalancer(clusterName, service, !isInternal)
if err != nil {
return nil, err
}
// Also clean up public ip resource, since service might be switched from public load balancer type.
if isInternal {
err = az.cleanupPublicIP(clusterName, service)
if err != nil {
return nil, err
}
}
serviceName := getServiceName(service)
glog.V(5).Infof("ensure(%s): START clusterName=%q lbName=%q", serviceName, clusterName, lbName)
sg, err := az.SecurityGroupsClient.Get(az.ResourceGroup, az.SecurityGroupName, "")
if err != nil {
return nil, err
}
sg, sgNeedsUpdate, err := az.reconcileSecurityGroup(sg, clusterName, service)
sg, sgNeedsUpdate, err := az.reconcileSecurityGroup(sg, clusterName, service, true /* wantLb */)
if err != nil {
return nil, err
}
@ -138,7 +167,53 @@ func (az *Cloud) EnsureLoadBalancer(clusterName string, service *v1.Service, nod
}
}
lb, lbNeedsUpdate, err := az.reconcileLoadBalancer(lb, pip, clusterName, service, nodes)
var lbIP *string
var fipConfigurationProperties *network.FrontendIPConfigurationPropertiesFormat
if isInternal {
subnet, existsSubnet, err := az.getSubnet(az.VnetName, az.SubnetName)
if err != nil {
return nil, err
}
if !existsSubnet {
return nil, fmt.Errorf("ensure(%s): lb(%s) - failed to get subnet: %s/%s", serviceName, lbName, az.VnetName, az.SubnetName)
}
configProperties := network.FrontendIPConfigurationPropertiesFormat{
Subnet: &network.Subnet{
ID: subnet.ID,
},
}
loadBalancerIP := service.Spec.LoadBalancerIP
if loadBalancerIP != "" {
configProperties.PrivateIPAllocationMethod = network.Static
configProperties.PrivateIPAddress = &loadBalancerIP
lbIP = &loadBalancerIP
} else {
// We'll need to call GetLoadBalancer later to retrieve allocated IP.
configProperties.PrivateIPAllocationMethod = network.Dynamic
}
fipConfigurationProperties = &configProperties
} else {
pipName, err := az.getPublicIPName(clusterName, service)
if err != nil {
return nil, err
}
pip, err := az.ensurePublicIPExists(serviceName, pipName)
if err != nil {
return nil, err
}
lbIP = pip.IPAddress
fipConfigurationProperties = &network.FrontendIPConfigurationPropertiesFormat{
PublicIPAddress: &network.PublicIPAddress{ID: pip.ID},
}
}
lb, lbNeedsUpdate, err := az.reconcileLoadBalancer(lb, fipConfigurationProperties, clusterName, service, nodes)
if err != nil {
return nil, err
}
@ -171,9 +246,21 @@ func (az *Cloud) EnsureLoadBalancer(clusterName string, service *v1.Service, nod
return nil, utilerrors.Flatten(errs)
}
glog.V(2).Infof("ensure(%s): FINISH - %s", serviceName, *pip.IPAddress)
glog.V(2).Infof("ensure(%s): lb(%s) finished", serviceName, lbName)
if lbIP == nil {
lbStatus, exists, err := az.GetLoadBalancer(clusterName, service)
if err != nil {
return nil, err
}
if !exists {
return nil, fmt.Errorf("ensure(%s): lb(%s) - failed to get back load balancer", serviceName, lbName)
}
return lbStatus, nil
}
return &v1.LoadBalancerStatus{
Ingress: []v1.LoadBalancerIngress{{IP: *pip.IPAddress}},
Ingress: []v1.LoadBalancerIngress{{IP: *lbIP}},
}, nil
}
@ -190,18 +277,55 @@ func (az *Cloud) UpdateLoadBalancer(clusterName string, service *v1.Service, nod
// have multiple underlying components, meaning a Get could say that the LB
// doesn't exist even if some part of it is still laying around.
func (az *Cloud) EnsureLoadBalancerDeleted(clusterName string, service *v1.Service) error {
lbName := getLoadBalancerName(clusterName)
isInternal := requiresInternalLoadBalancer(service)
lbName := getLoadBalancerName(clusterName, isInternal)
serviceName := getServiceName(service)
pipName, err := az.getPublicIPName(clusterName, service)
glog.V(5).Infof("delete(%s): START clusterName=%q lbName=%q", serviceName, clusterName, lbName)
err := az.cleanupLoadBalancer(clusterName, service, isInternal)
if err != nil {
return err
}
glog.V(2).Infof("delete(%s): START clusterName=%q lbName=%q", serviceName, clusterName, lbName)
if !isInternal {
err = az.cleanupPublicIP(clusterName, service)
if err != nil {
return err
}
}
// reconcile logic is capable of fully reconcile, so we can use this to delete
service.Spec.Ports = []v1.ServicePort{}
sg, existsSg, err := az.getSecurityGroup()
if err != nil {
return err
}
if existsSg {
reconciledSg, sgNeedsUpdate, reconcileErr := az.reconcileSecurityGroup(sg, clusterName, service, false /* wantLb */)
if reconcileErr != nil {
return reconcileErr
}
if sgNeedsUpdate {
glog.V(3).Infof("delete(%s): sg(%s) - updating", serviceName, az.SecurityGroupName)
// azure-sdk-for-go introduced contraint validation which breaks the updating here if we don't set these
// to nil. This is a workaround until https://github.com/Azure/go-autorest/issues/112 is fixed
sg.SecurityGroupPropertiesFormat.NetworkInterfaces = nil
sg.SecurityGroupPropertiesFormat.Subnets = nil
_, err := az.SecurityGroupsClient.CreateOrUpdate(az.ResourceGroup, *reconciledSg.Name, reconciledSg, nil)
if err != nil {
return err
}
}
}
glog.V(2).Infof("delete(%s): FINISH", serviceName)
return nil
}
func (az *Cloud) cleanupLoadBalancer(clusterName string, service *v1.Service, isInternalLb bool) error {
lbName := getLoadBalancerName(clusterName, isInternalLb)
serviceName := getServiceName(service)
glog.V(10).Infof("ensure lb deleted: clusterName=%q, serviceName=%s, lbName=%q", clusterName, serviceName, lbName)
lb, existsLb, err := az.getAzureLoadBalancer(lbName)
if err != nil {
@ -230,36 +354,24 @@ func (az *Cloud) EnsureLoadBalancerDeleted(clusterName string, service *v1.Servi
}
}
sg, existsSg, err := az.getSecurityGroup()
if err != nil {
return err
}
if existsSg {
reconciledSg, sgNeedsUpdate, reconcileErr := az.reconcileSecurityGroup(sg, clusterName, service)
if reconcileErr != nil {
return reconcileErr
}
if sgNeedsUpdate {
glog.V(3).Infof("delete(%s): sg(%s) - updating", serviceName, az.SecurityGroupName)
// azure-sdk-for-go introduced contraint validation which breaks the updating here if we don't set these
// to nil. This is a workaround until https://github.com/Azure/go-autorest/issues/112 is fixed
sg.SecurityGroupPropertiesFormat.NetworkInterfaces = nil
sg.SecurityGroupPropertiesFormat.Subnets = nil
_, err := az.SecurityGroupsClient.CreateOrUpdate(az.ResourceGroup, *reconciledSg.Name, reconciledSg, nil)
if err != nil {
return err
}
}
}
return nil
}
func (az *Cloud) cleanupPublicIP(clusterName string, service *v1.Service) error {
serviceName := getServiceName(service)
// Only delete an IP address if we created it.
if service.Spec.LoadBalancerIP == "" {
pipName, err := az.getPublicIPName(clusterName, service)
if err != nil {
return err
}
err = az.ensurePublicIPDeleted(serviceName, pipName)
if err != nil {
return err
}
}
glog.V(2).Infof("delete(%s): FINISH", serviceName)
return nil
}
@ -306,15 +418,16 @@ func (az *Cloud) ensurePublicIPDeleted(serviceName, pipName string) error {
// This ensures load balancer exists and the frontend ip config is setup.
// This also reconciles the Service's Ports with the LoadBalancer config.
// This entails adding rules/probes for expected Ports and removing stale rules/ports.
func (az *Cloud) reconcileLoadBalancer(lb network.LoadBalancer, pip *network.PublicIPAddress, clusterName string, service *v1.Service, nodes []*v1.Node) (network.LoadBalancer, bool, error) {
lbName := getLoadBalancerName(clusterName)
func (az *Cloud) reconcileLoadBalancer(lb network.LoadBalancer, fipConfigurationProperties *network.FrontendIPConfigurationPropertiesFormat, clusterName string, service *v1.Service, nodes []*v1.Node) (network.LoadBalancer, bool, error) {
isInternal := requiresInternalLoadBalancer(service)
lbName := getLoadBalancerName(clusterName, isInternal)
serviceName := getServiceName(service)
lbFrontendIPConfigName := getFrontendIPConfigName(service)
lbFrontendIPConfigID := az.getFrontendIPConfigID(lbName, lbFrontendIPConfigName)
lbBackendPoolName := getBackendPoolName(clusterName)
lbBackendPoolID := az.getBackendPoolID(lbName, lbBackendPoolName)
wantLb := len(service.Spec.Ports) > 0
wantLb := fipConfigurationProperties != nil
dirtyLb := false
// Ensure LoadBalancer's Backend Pool Configuration
@ -372,11 +485,7 @@ func (az *Cloud) reconcileLoadBalancer(lb network.LoadBalancer, pip *network.Pub
newConfigs = append(newConfigs,
network.FrontendIPConfiguration{
Name: to.StringPtr(lbFrontendIPConfigName),
FrontendIPConfigurationPropertiesFormat: &network.FrontendIPConfigurationPropertiesFormat{
PublicIPAddress: &network.PublicIPAddress{
ID: pip.ID,
},
},
FrontendIPConfigurationPropertiesFormat: fipConfigurationProperties,
})
glog.V(10).Infof("reconcile(%s)(%t): lb frontendconfig(%s) - adding", serviceName, wantLb, lbFrontendIPConfigName)
dirtyConfigs = true
@ -388,9 +497,15 @@ func (az *Cloud) reconcileLoadBalancer(lb network.LoadBalancer, pip *network.Pub
}
// update probes/rules
expectedProbes := make([]network.Probe, len(service.Spec.Ports))
expectedRules := make([]network.LoadBalancingRule, len(service.Spec.Ports))
for i, port := range service.Spec.Ports {
var ports []v1.ServicePort
if wantLb {
ports = service.Spec.Ports
} else {
ports = []v1.ServicePort{}
}
expectedProbes := make([]network.Probe, len(ports))
expectedRules := make([]network.LoadBalancingRule, len(ports))
for i, port := range ports {
lbRuleName := getRuleName(service, port)
transportProto, _, probeProto, err := getProtocolsFromKubernetesProtocol(port.Protocol)
@ -529,9 +644,14 @@ func (az *Cloud) reconcileLoadBalancer(lb network.LoadBalancer, pip *network.Pub
// This reconciles the Network Security Group similar to how the LB is reconciled.
// This entails adding required, missing SecurityRules and removing stale rules.
func (az *Cloud) reconcileSecurityGroup(sg network.SecurityGroup, clusterName string, service *v1.Service) (network.SecurityGroup, bool, error) {
func (az *Cloud) reconcileSecurityGroup(sg network.SecurityGroup, clusterName string, service *v1.Service, wantLb bool) (network.SecurityGroup, bool, error) {
serviceName := getServiceName(service)
wantLb := len(service.Spec.Ports) > 0
var ports []v1.ServicePort
if wantLb {
ports = service.Spec.Ports
} else {
ports = []v1.ServicePort{}
}
sourceRanges, err := serviceapi.GetLoadBalancerSourceRanges(service)
if err != nil {
@ -539,15 +659,17 @@ func (az *Cloud) reconcileSecurityGroup(sg network.SecurityGroup, clusterName st
}
var sourceAddressPrefixes []string
if sourceRanges == nil || serviceapi.IsAllowAll(sourceRanges) {
sourceAddressPrefixes = []string{"Internet"}
if !requiresInternalLoadBalancer(service) {
sourceAddressPrefixes = []string{"Internet"}
}
} else {
for _, ip := range sourceRanges {
sourceAddressPrefixes = append(sourceAddressPrefixes, ip.String())
}
}
expectedSecurityRules := make([]network.SecurityRule, len(service.Spec.Ports)*len(sourceAddressPrefixes))
expectedSecurityRules := make([]network.SecurityRule, len(ports)*len(sourceAddressPrefixes))
for i, port := range service.Spec.Ports {
for i, port := range ports {
securityRuleName := getRuleName(service, port)
_, securityProto, _, err := getProtocolsFromKubernetesProtocol(port.Protocol)
if err != nil {
@ -713,3 +835,12 @@ func (az *Cloud) ensureHostInPool(serviceName string, nodeName types.NodeName, b
}
return nil
}
// Check if service requires an internal load balancer.
func requiresInternalLoadBalancer(service *v1.Service) bool {
if l, ok := service.Annotations[ServiceAnnotationLoadBalancerInternal]; ok {
return l == "true"
}
return false
}

View File

@ -36,11 +36,11 @@ var testClusterName = "testCluster"
func TestReconcileLoadBalancerAddPort(t *testing.T) {
az := getTestCloud()
svc := getTestService("servicea", 80)
pip := getTestPublicIP()
configProperties := getTestPublicFipConfigurationProperties()
lb := getTestLoadBalancer()
nodes := []*v1.Node{}
lb, updated, err := az.reconcileLoadBalancer(lb, &pip, testClusterName, &svc, nodes)
lb, updated, err := az.reconcileLoadBalancer(lb, &configProperties, testClusterName, &svc, nodes)
if err != nil {
t.Errorf("Unexpected error: %q", err)
}
@ -64,12 +64,12 @@ func TestReconcileLoadBalancerNodeHealth(t *testing.T) {
serviceapi.BetaAnnotationExternalTraffic: serviceapi.AnnotationValueExternalTrafficLocal,
serviceapi.BetaAnnotationHealthCheckNodePort: "32456",
}
pip := getTestPublicIP()
configProperties := getTestPublicFipConfigurationProperties()
lb := getTestLoadBalancer()
nodes := []*v1.Node{}
lb, updated, err := az.reconcileLoadBalancer(lb, &pip, testClusterName, &svc, nodes)
lb, updated, err := az.reconcileLoadBalancer(lb, &configProperties, testClusterName, &svc, nodes)
if err != nil {
t.Errorf("Unexpected error: %q", err)
}
@ -87,17 +87,49 @@ func TestReconcileLoadBalancerNodeHealth(t *testing.T) {
}
// Test removing all services results in removing the frontend ip configuration
func TestReconcileLoadBalancerRemoveService(t *testing.T) {
az := getTestCloud()
svc := getTestService("servicea", 80, 443)
lb := getTestLoadBalancer()
configProperties := getTestPublicFipConfigurationProperties()
nodes := []*v1.Node{}
lb, updated, err := az.reconcileLoadBalancer(lb, &configProperties, testClusterName, &svc, nodes)
if err != nil {
t.Errorf("Unexpected error: %q", err)
}
validateLoadBalancer(t, lb, svc)
lb, updated, err = az.reconcileLoadBalancer(lb, nil, testClusterName, &svc, nodes)
if err != nil {
t.Errorf("Unexpected error: %q", err)
}
if !updated {
t.Error("Expected the loadbalancer to need an update")
}
// ensure we abandoned the frontend ip configuration
if len(*lb.FrontendIPConfigurations) != 0 {
t.Error("Expected the loadbalancer to have no frontend ip configuration")
}
validateLoadBalancer(t, lb)
}
// Test removing all service ports results in removing the frontend ip configuration
func TestReconcileLoadBalancerRemoveAllPortsRemovesFrontendConfig(t *testing.T) {
az := getTestCloud()
svc := getTestService("servicea", 80)
lb := getTestLoadBalancer()
pip := getTestPublicIP()
configProperties := getTestPublicFipConfigurationProperties()
nodes := []*v1.Node{}
lb, updated, err := az.reconcileLoadBalancer(lb, &pip, testClusterName, &svc, nodes)
lb, updated, err := az.reconcileLoadBalancer(lb, &configProperties, testClusterName, &svc, nodes)
if err != nil {
t.Errorf("Unexpected error: %q", err)
}
validateLoadBalancer(t, lb, svc)
svcUpdated := getTestService("servicea")
lb, updated, err = az.reconcileLoadBalancer(lb, nil, testClusterName, &svcUpdated, nodes)
@ -121,13 +153,13 @@ func TestReconcileLoadBalancerRemoveAllPortsRemovesFrontendConfig(t *testing.T)
func TestReconcileLoadBalancerRemovesPort(t *testing.T) {
az := getTestCloud()
svc := getTestService("servicea", 80, 443)
pip := getTestPublicIP()
configProperties := getTestPublicFipConfigurationProperties()
nodes := []*v1.Node{}
existingLoadBalancer := getTestLoadBalancer(svc)
svcUpdated := getTestService("servicea", 80)
updatedLoadBalancer, _, err := az.reconcileLoadBalancer(existingLoadBalancer, &pip, testClusterName, &svcUpdated, nodes)
updatedLoadBalancer, _, err := az.reconcileLoadBalancer(existingLoadBalancer, &configProperties, testClusterName, &svcUpdated, nodes)
if err != nil {
t.Errorf("Unexpected error: %q", err)
}
@ -140,17 +172,17 @@ func TestReconcileLoadBalancerMultipleServices(t *testing.T) {
az := getTestCloud()
svc1 := getTestService("servicea", 80, 443)
svc2 := getTestService("serviceb", 80)
pip := getTestPublicIP()
configProperties := getTestPublicFipConfigurationProperties()
nodes := []*v1.Node{}
existingLoadBalancer := getTestLoadBalancer()
updatedLoadBalancer, _, err := az.reconcileLoadBalancer(existingLoadBalancer, &pip, testClusterName, &svc1, nodes)
updatedLoadBalancer, _, err := az.reconcileLoadBalancer(existingLoadBalancer, &configProperties, testClusterName, &svc1, nodes)
if err != nil {
t.Errorf("Unexpected error: %q", err)
}
updatedLoadBalancer, _, err = az.reconcileLoadBalancer(updatedLoadBalancer, &pip, testClusterName, &svc2, nodes)
updatedLoadBalancer, _, err = az.reconcileLoadBalancer(updatedLoadBalancer, &configProperties, testClusterName, &svc2, nodes)
if err != nil {
t.Errorf("Unexpected error: %q", err)
}
@ -164,7 +196,7 @@ func TestReconcileSecurityGroupNewServiceAddsPort(t *testing.T) {
sg := getTestSecurityGroup()
sg, _, err := az.reconcileSecurityGroup(sg, testClusterName, &svc1)
sg, _, err := az.reconcileSecurityGroup(sg, testClusterName, &svc1, true)
if err != nil {
t.Errorf("Unexpected error: %q", err)
}
@ -172,6 +204,36 @@ func TestReconcileSecurityGroupNewServiceAddsPort(t *testing.T) {
validateSecurityGroup(t, sg, svc1)
}
func TestReconcileSecurityGroupNewInternalServiceAddsPort(t *testing.T) {
az := getTestCloud()
svc1 := getInternalTestService("serviceea", 80)
sg := getTestSecurityGroup()
sg, _, err := az.reconcileSecurityGroup(sg, testClusterName, &svc1, true)
if err != nil {
t.Errorf("Unexpected error: %q", err)
}
validateSecurityGroup(t, sg, svc1)
}
func TestReconcileSecurityGroupRemoveService(t *testing.T) {
service1 := getTestService("servicea", 81)
service2 := getTestService("serviceb", 82)
sg := getTestSecurityGroup(service1, service2)
validateSecurityGroup(t, sg, service1, service2)
az := getTestCloud()
sg, _, err := az.reconcileSecurityGroup(sg, testClusterName, &service1, false)
if err != nil {
t.Errorf("Unexpected error: %q", err)
}
validateSecurityGroup(t, sg, service2)
}
func TestReconcileSecurityGroupRemoveServiceRemovesPort(t *testing.T) {
az := getTestCloud()
svc := getTestService("servicea", 80, 443)
@ -179,7 +241,7 @@ func TestReconcileSecurityGroupRemoveServiceRemovesPort(t *testing.T) {
sg := getTestSecurityGroup(svc)
svcUpdated := getTestService("servicea", 80)
sg, _, err := az.reconcileSecurityGroup(sg, testClusterName, &svcUpdated)
sg, _, err := az.reconcileSecurityGroup(sg, testClusterName, &svcUpdated, true)
if err != nil {
t.Errorf("Unexpected error: %q", err)
}
@ -196,7 +258,7 @@ func TestReconcileSecurityWithSourceRanges(t *testing.T) {
}
sg := getTestSecurityGroup(svc)
sg, _, err := az.reconcileSecurityGroup(sg, testClusterName, &svc)
sg, _, err := az.reconcileSecurityGroup(sg, testClusterName, &svc, true)
if err != nil {
t.Errorf("Unexpected error: %q", err)
}
@ -223,10 +285,10 @@ func getBackendPort(port int32) int32 {
return port + 10000
}
func getTestPublicIP() network.PublicIPAddress {
pip := network.PublicIPAddress{}
pip.ID = to.StringPtr("/this/is/a/public/ip/address/id")
return pip
func getTestPublicFipConfigurationProperties() network.FrontendIPConfigurationPropertiesFormat {
return network.FrontendIPConfigurationPropertiesFormat{
PublicIPAddress: &network.PublicIPAddress{ID: to.StringPtr("/this/is/a/public/ip/address/id")},
}
}
func getTestService(identifier string, requestedPorts ...int32) v1.Service {
@ -249,6 +311,14 @@ func getTestService(identifier string, requestedPorts ...int32) v1.Service {
svc.Name = identifier
svc.Namespace = "default"
svc.UID = types.UID(identifier)
svc.Annotations = make(map[string]string)
return svc
}
func getInternalTestService(identifier string, requestedPorts ...int32) v1.Service {
svc := getTestService(identifier, requestedPorts...)
svc.Annotations[ServiceAnnotationLoadBalancerInternal] = "true"
return svc
}
@ -288,8 +358,11 @@ func getTestLoadBalancer(services ...v1.Service) network.LoadBalancer {
func getServiceSourceRanges(service *v1.Service) []string {
if len(service.Spec.LoadBalancerSourceRanges) == 0 {
return []string{"Internet"}
if !requiresInternalLoadBalancer(service) {
return []string{"Internet"}
}
}
return service.Spec.LoadBalancerSourceRanges
}
@ -324,7 +397,11 @@ func getTestSecurityGroup(services ...v1.Service) network.SecurityGroup {
func validateLoadBalancer(t *testing.T, loadBalancer network.LoadBalancer, services ...v1.Service) {
expectedRuleCount := 0
expectedFrontendIPCount := 0
for _, svc := range services {
if len(svc.Spec.Ports) > 0 {
expectedFrontendIPCount++
}
for _, wantedRule := range svc.Spec.Ports {
expectedRuleCount++
wantedRuleName := getRuleName(&svc, wantedRule)
@ -371,6 +448,11 @@ func validateLoadBalancer(t *testing.T, loadBalancer network.LoadBalancer, servi
}
}
frontendIPCount := len(*loadBalancer.FrontendIPConfigurations)
if frontendIPCount != expectedFrontendIPCount {
t.Errorf("Expected the loadbalancer to have %d frontend IPs. Found %d.\n%v", expectedFrontendIPCount, frontendIPCount, loadBalancer.FrontendIPConfigurations)
}
lenRules := len(*loadBalancer.LoadBalancingRules)
if lenRules != expectedRuleCount {
t.Errorf("Expected the loadbalancer to have %d rules. Found %d.\n%v", expectedRuleCount, lenRules, loadBalancer.LoadBalancingRules)
@ -386,10 +468,9 @@ func validateSecurityGroup(t *testing.T, securityGroup network.SecurityGroup, se
for _, svc := range services {
for _, wantedRule := range svc.Spec.Ports {
sources := getServiceSourceRanges(&svc)
wantedRuleName := getRuleName(&svc, wantedRule)
for _, source := range sources {
expectedRuleCount++
wantedRuleName := getRuleName(&svc, wantedRule)
foundRule := false
for _, actualRule := range *securityGroup.SecurityRules {
if strings.EqualFold(*actualRule.Name, wantedRuleName) &&

View File

@ -160,7 +160,14 @@ func getPrimaryIPConfig(nic network.Interface) (*network.InterfaceIPConfiguratio
return nil, fmt.Errorf("failed to determine the determine primary ipconfig. nicname=%q", *nic.Name)
}
func getLoadBalancerName(clusterName string) string {
// For a load balancer, all frontend ip should reference either a subnet or publicIpAddress.
// Thus Azure do not allow mixed type (public and internal) load balancer.
// So we'd have a separate name for internal load balancer.
func getLoadBalancerName(clusterName string, isInternal bool) string {
if isInternal {
return fmt.Sprintf("%s-internal", clusterName)
}
return clusterName
}

View File

@ -124,3 +124,20 @@ func (az *Cloud) getPublicIPAddress(name string) (pip network.PublicIPAddress, e
return pip, exists, err
}
func (az *Cloud) getSubnet(virtualNetworkName string, subnetName string) (subnet network.Subnet, exists bool, err error) {
var realErr error
subnet, err = az.SubnetsClient.Get(az.ResourceGroup, virtualNetworkName, subnetName, "")
exists, realErr = checkResourceExistsFromError(err)
if realErr != nil {
return subnet, false, realErr
}
if !exists {
return subnet, false, nil
}
return subnet, exists, err
}