From 4e4684662d1735bcd5416e3f29de648ff276dd7f Mon Sep 17 00:00:00 2001 From: andyzhangx Date: Mon, 18 Feb 2019 08:29:37 +0000 Subject: [PATCH] add mixed protocol support for azure load balancer --- .../providers/azure/azure_loadbalancer.go | 141 ++++++++++-------- .../providers/azure/azure_standard.go | 6 +- .../providers/azure/azure_test.go | 2 +- 3 files changed, 80 insertions(+), 69 deletions(-) diff --git a/pkg/cloudprovider/providers/azure/azure_loadbalancer.go b/pkg/cloudprovider/providers/azure/azure_loadbalancer.go index 0121e8e7dcf..9a5f8398684 100644 --- a/pkg/cloudprovider/providers/azure/azure_loadbalancer.go +++ b/pkg/cloudprovider/providers/azure/azure_loadbalancer.go @@ -879,74 +879,85 @@ func (az *Cloud) reconcileLoadBalancerRule( var expectedProbes []network.Probe var expectedRules []network.LoadBalancingRule for _, port := range ports { - lbRuleName := az.getLoadBalancerRuleName(service, port, subnet(service)) - - klog.V(2).Infof("reconcileLoadBalancerRule lb name (%s) rule name (%s)", lbName, lbRuleName) - - transportProto, _, probeProto, err := getProtocolsFromKubernetesProtocol(port.Protocol) - if err != nil { - return expectedProbes, expectedRules, err - } - - if servicehelpers.NeedsHealthCheck(service) { - podPresencePath, podPresencePort := servicehelpers.GetServiceHealthCheckPathPort(service) - - expectedProbes = append(expectedProbes, network.Probe{ - Name: &lbRuleName, - ProbePropertiesFormat: &network.ProbePropertiesFormat{ - RequestPath: to.StringPtr(podPresencePath), - Protocol: network.ProbeProtocolHTTP, - Port: to.Int32Ptr(podPresencePort), - IntervalInSeconds: to.Int32Ptr(5), - NumberOfProbes: to.Int32Ptr(2), - }, - }) - } else if port.Protocol != v1.ProtocolUDP && port.Protocol != v1.ProtocolSCTP { - // we only add the expected probe if we're doing TCP - expectedProbes = append(expectedProbes, network.Probe{ - Name: &lbRuleName, - ProbePropertiesFormat: &network.ProbePropertiesFormat{ - Protocol: *probeProto, - Port: to.Int32Ptr(port.NodePort), - IntervalInSeconds: to.Int32Ptr(5), - NumberOfProbes: to.Int32Ptr(2), - }, - }) - } - - loadDistribution := network.Default - if service.Spec.SessionAffinity == v1.ServiceAffinityClientIP { - loadDistribution = network.SourceIP - } - - expectedRule := network.LoadBalancingRule{ - Name: &lbRuleName, - LoadBalancingRulePropertiesFormat: &network.LoadBalancingRulePropertiesFormat{ - Protocol: *transportProto, - FrontendIPConfiguration: &network.SubResource{ - ID: to.StringPtr(lbFrontendIPConfigID), - }, - BackendAddressPool: &network.SubResource{ - ID: to.StringPtr(lbBackendPoolID), - }, - LoadDistribution: loadDistribution, - FrontendPort: to.Int32Ptr(port.Port), - BackendPort: to.Int32Ptr(port.Port), - EnableFloatingIP: to.BoolPtr(true), - }, - } - if port.Protocol == v1.ProtocolTCP { - expectedRule.LoadBalancingRulePropertiesFormat.IdleTimeoutInMinutes = lbIdleTimeout - } - - // we didn't construct the probe objects for UDP or SCTP because they're not used/needed/allowed - if port.Protocol != v1.ProtocolUDP && port.Protocol != v1.ProtocolSCTP { - expectedRule.Probe = &network.SubResource{ - ID: to.StringPtr(az.getLoadBalancerProbeID(lbName, lbRuleName)), + protocols := []v1.Protocol{port.Protocol} + if v, ok := service.Annotations[ServiceAnnotationLoadBalancerMixedProtocols]; ok && v == "true" { + klog.V(2).Infof("reconcileLoadBalancerRule lb name (%s) flag(%s) is set", lbName, ServiceAnnotationLoadBalancerMixedProtocols) + if port.Protocol == v1.ProtocolTCP { + protocols = append(protocols, v1.ProtocolUDP) + } else if port.Protocol == v1.ProtocolUDP { + protocols = append(protocols, v1.ProtocolTCP) } } - expectedRules = append(expectedRules, expectedRule) + for _, protocol := range protocols { + lbRuleName := az.getLoadBalancerRuleName(service, protocol, port.Port, subnet(service)) + klog.V(2).Infof("reconcileLoadBalancerRule lb name (%s) rule name (%s)", lbName, lbRuleName) + + transportProto, _, probeProto, err := getProtocolsFromKubernetesProtocol(protocol) + if err != nil { + return expectedProbes, expectedRules, err + } + + if servicehelpers.NeedsHealthCheck(service) { + podPresencePath, podPresencePort := servicehelpers.GetServiceHealthCheckPathPort(service) + + expectedProbes = append(expectedProbes, network.Probe{ + Name: &lbRuleName, + ProbePropertiesFormat: &network.ProbePropertiesFormat{ + RequestPath: to.StringPtr(podPresencePath), + Protocol: network.ProbeProtocolHTTP, + Port: to.Int32Ptr(podPresencePort), + IntervalInSeconds: to.Int32Ptr(5), + NumberOfProbes: to.Int32Ptr(2), + }, + }) + } else if protocol != v1.ProtocolUDP && protocol != v1.ProtocolSCTP { + // we only add the expected probe if we're doing TCP + expectedProbes = append(expectedProbes, network.Probe{ + Name: &lbRuleName, + ProbePropertiesFormat: &network.ProbePropertiesFormat{ + Protocol: *probeProto, + Port: to.Int32Ptr(port.NodePort), + IntervalInSeconds: to.Int32Ptr(5), + NumberOfProbes: to.Int32Ptr(2), + }, + }) + } + + loadDistribution := network.Default + if service.Spec.SessionAffinity == v1.ServiceAffinityClientIP { + loadDistribution = network.SourceIP + } + + expectedRule := network.LoadBalancingRule{ + Name: &lbRuleName, + LoadBalancingRulePropertiesFormat: &network.LoadBalancingRulePropertiesFormat{ + Protocol: *transportProto, + FrontendIPConfiguration: &network.SubResource{ + ID: to.StringPtr(lbFrontendIPConfigID), + }, + BackendAddressPool: &network.SubResource{ + ID: to.StringPtr(lbBackendPoolID), + }, + LoadDistribution: loadDistribution, + FrontendPort: to.Int32Ptr(port.Port), + BackendPort: to.Int32Ptr(port.Port), + EnableFloatingIP: to.BoolPtr(true), + }, + } + if protocol == v1.ProtocolTCP { + expectedRule.LoadBalancingRulePropertiesFormat.IdleTimeoutInMinutes = lbIdleTimeout + } + + // we didn't construct the probe objects for UDP or SCTP because they're not used/needed/allowed + if protocol != v1.ProtocolUDP && protocol != v1.ProtocolSCTP { + expectedRule.Probe = &network.SubResource{ + ID: to.StringPtr(az.getLoadBalancerProbeID(lbName, lbRuleName)), + } + } + + expectedRules = append(expectedRules, expectedRule) + } } return expectedProbes, expectedRules, nil diff --git a/pkg/cloudprovider/providers/azure/azure_standard.go b/pkg/cloudprovider/providers/azure/azure_standard.go index baad60fd639..29568b2e2a3 100644 --- a/pkg/cloudprovider/providers/azure/azure_standard.go +++ b/pkg/cloudprovider/providers/azure/azure_standard.go @@ -223,12 +223,12 @@ func getBackendPoolName(clusterName string) string { return clusterName } -func (az *Cloud) getLoadBalancerRuleName(service *v1.Service, port v1.ServicePort, subnetName *string) string { +func (az *Cloud) getLoadBalancerRuleName(service *v1.Service, protocol v1.Protocol, port int32, subnetName *string) string { prefix := az.getRulePrefix(service) if subnetName == nil { - return fmt.Sprintf("%s-%s-%d", prefix, port.Protocol, port.Port) + return fmt.Sprintf("%s-%s-%d", prefix, protocol, port) } - return fmt.Sprintf("%s-%s-%s-%d", prefix, *subnetName, port.Protocol, port.Port) + return fmt.Sprintf("%s-%s-%s-%d", prefix, *subnetName, protocol, port) } func (az *Cloud) getSecurityRuleName(service *v1.Service, port v1.ServicePort, sourceAddrPrefix string) string { diff --git a/pkg/cloudprovider/providers/azure/azure_test.go b/pkg/cloudprovider/providers/azure/azure_test.go index 4cfb3a1d0fa..7400a9fd47b 100644 --- a/pkg/cloudprovider/providers/azure/azure_test.go +++ b/pkg/cloudprovider/providers/azure/azure_test.go @@ -1209,7 +1209,7 @@ func validateLoadBalancer(t *testing.T, loadBalancer *network.LoadBalancer, serv } for _, wantedRule := range svc.Spec.Ports { expectedRuleCount++ - wantedRuleName := az.getLoadBalancerRuleName(&svc, wantedRule, subnet(&svc)) + wantedRuleName := az.getLoadBalancerRuleName(&svc, wantedRule.Protocol, wantedRule.Port, subnet(&svc)) foundRule := false for _, actualRule := range *loadBalancer.LoadBalancingRules { if strings.EqualFold(*actualRule.Name, wantedRuleName) &&