Fix health check from Google's Load Balancer

This change adds 2 options for windows:
--forward-healthcheck-vip: If true forward service VIP for health check
port
--root-hnsendpoint-name: The name of the hns endpoint name for root
namespace attached to l2bridge, default is cbr0

When --forward-healthcheck-vip is set as true and winkernel is used,
kube-proxy will add an hns load balancer to forward health check request
that was sent to lb_vip:healthcheck_port to the node_ip:healthcheck_port.
Without this forwarding, the health check from google load balancer will
fail, and it will stop forwarding traffic to the windows node.

This change fixes the following 2 cases for service:
- `externalTrafficPolicy: Cluster` (default option): healthcheck_port is
10256 for all services. Without this fix, all traffic won't be directly
forwarded to windows node. It will always go through a linux node and
get forwarded to windows from there.
- `externalTrafficPolicy: Local`: different healthcheck_port for each
service that is configured as local. Without this fix, this feature
won't work on windows node at all. This feature preserves client ip
that tries to connect to their application running in windows pod.

Change-Id: If4513e72900101ef70d86b91155e56a1f8c79719
This commit is contained in:
Yongkun Gui 2021-03-29 23:27:39 +00:00
parent 45f2c63d6a
commit 78a507b256
14 changed files with 183 additions and 32 deletions

View File

@ -892,8 +892,12 @@ function Configure-HostNetworkingService {
-Verbose -Verbose
$created_hns_network = $true $created_hns_network = $true
} }
# This name of endpoint is referred in pkg/proxy/winkernel/proxier.go as part of
# kube-proxy as well. A health check port for every service that is specified as
# "externalTrafficPolicy: local" will be added on the endpoint.
# PLEASE KEEP THEM CONSISTENT!!!
$endpoint_name = "cbr0" $endpoint_name = "cbr0"
$vnic_name = "vEthernet (${endpoint_name})" $vnic_name = "vEthernet (${endpoint_name})"
$hns_endpoint = Get-HnsEndpoint | Where-Object Name -eq $endpoint_name $hns_endpoint = Get-HnsEndpoint | Where-Object Name -eq $endpoint_name

View File

@ -41,4 +41,6 @@ func (o *Options) addOSFlags(fs *pflag.FlagSet) {
fs.StringVar(&o.config.Winkernel.SourceVip, "source-vip", o.config.Winkernel.SourceVip, "The IP address of the source VIP for non-DSR.") fs.StringVar(&o.config.Winkernel.SourceVip, "source-vip", o.config.Winkernel.SourceVip, "The IP address of the source VIP for non-DSR.")
fs.StringVar(&o.config.Winkernel.NetworkName, "network-name", o.config.Winkernel.NetworkName, "The name of the cluster network.") fs.StringVar(&o.config.Winkernel.NetworkName, "network-name", o.config.Winkernel.NetworkName, "The name of the cluster network.")
fs.BoolVar(&o.config.Winkernel.EnableDSR, "enable-dsr", o.config.Winkernel.EnableDSR, "If true make kube-proxy apply DSR policies for service VIP") fs.BoolVar(&o.config.Winkernel.EnableDSR, "enable-dsr", o.config.Winkernel.EnableDSR, "If true make kube-proxy apply DSR policies for service VIP")
fs.StringVar(&o.config.Winkernel.RootHnsEndpointName, "root-hnsendpoint-name", "cbr0", "The name of the hns endpoint name for root namespace attached to l2bridge")
fs.BoolVar(&o.config.Winkernel.ForwardHealthCheckVip, "forward-healthcheck-vip", o.config.Winkernel.ForwardHealthCheckVip, "If true forward service VIP for health check port")
} }

View File

@ -24,7 +24,9 @@ package app
import ( import (
"errors" "errors"
"fmt" "fmt"
"net"
goruntime "runtime" goruntime "runtime"
"strconv"
// Enable pprof HTTP handlers. // Enable pprof HTTP handlers.
_ "net/http/pprof" _ "net/http/pprof"
@ -97,8 +99,11 @@ func newProxyServer(config *proxyconfigapi.KubeProxyConfiguration, cleanupAndExi
} }
var healthzServer healthcheck.ProxierHealthUpdater var healthzServer healthcheck.ProxierHealthUpdater
var healthzPort int
if len(config.HealthzBindAddress) > 0 { if len(config.HealthzBindAddress) > 0 {
healthzServer = healthcheck.NewProxierHealthServer(config.HealthzBindAddress, 2*config.IPTables.SyncPeriod.Duration, recorder, nodeRef) healthzServer = healthcheck.NewProxierHealthServer(config.HealthzBindAddress, 2*config.IPTables.SyncPeriod.Duration, recorder, nodeRef)
_, port, _ := net.SplitHostPort(config.HealthzBindAddress)
healthzPort, _ = strconv.Atoi(port)
} }
var proxier proxy.Provider var proxier proxy.Provider
@ -120,6 +125,7 @@ func newProxyServer(config *proxyconfigapi.KubeProxyConfiguration, cleanupAndExi
recorder, recorder,
healthzServer, healthzServer,
config.Winkernel, config.Winkernel,
healthzPort,
) )
} else { } else {
@ -134,6 +140,7 @@ func newProxyServer(config *proxyconfigapi.KubeProxyConfiguration, cleanupAndExi
recorder, recorder,
healthzServer, healthzServer,
config.Winkernel, config.Winkernel,
healthzPort,
) )
} }

View File

@ -49802,8 +49802,24 @@ func schema_k8sio_kube_proxy_config_v1alpha1_KubeProxyWinkernelConfiguration(ref
Format: "", Format: "",
}, },
}, },
"rootHnsEndpointName": {
SchemaProps: spec.SchemaProps{
Description: "RootHnsEndpointName is the name of hnsendpoint that is attached to l2bridge for root network namespace",
Default: "",
Type: []string{"string"},
Format: "",
},
},
"forwardHealthCheckVip": {
SchemaProps: spec.SchemaProps{
Description: "ForwardHealthCheckVip forwards service VIP for health check port on Windows",
Default: false,
Type: []string{"boolean"},
Format: "",
},
},
}, },
Required: []string{"networkName", "sourceVip", "enableDSR"}, Required: []string{"networkName", "sourceVip", "enableDSR", "rootHnsEndpointName", "forwardHealthCheckVip"},
}, },
}, },
} }

View File

@ -42,5 +42,7 @@ showHiddenMetricsForVersion: ""
udpIdleTimeout: 250ms udpIdleTimeout: 250ms
winkernel: winkernel:
enableDSR: false enableDSR: false
forwardHealthCheckVip: false
networkName: "" networkName: ""
rootHnsEndpointName: ""
sourceVip: "" sourceVip: ""

View File

@ -42,5 +42,7 @@ showHiddenMetricsForVersion: ""
udpIdleTimeout: 250ms udpIdleTimeout: 250ms
winkernel: winkernel:
enableDSR: false enableDSR: false
forwardHealthCheckVip: false
networkName: "" networkName: ""
rootHnsEndpointName: ""
sourceVip: "" sourceVip: ""

View File

@ -99,6 +99,12 @@ type KubeProxyWinkernelConfiguration struct {
// enableDSR tells kube-proxy whether HNS policies should be created // enableDSR tells kube-proxy whether HNS policies should be created
// with DSR // with DSR
EnableDSR bool EnableDSR bool
// RootHnsEndpointName is the name of hnsendpoint that is attached to
// l2bridge for root network namespace
RootHnsEndpointName string
// ForwardHealthCheckVip forwards service VIP for health check port on
// Windows
ForwardHealthCheckVip bool
} }
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object // +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object

View File

@ -262,6 +262,8 @@ func autoConvert_v1alpha1_KubeProxyWinkernelConfiguration_To_config_KubeProxyWin
out.NetworkName = in.NetworkName out.NetworkName = in.NetworkName
out.SourceVip = in.SourceVip out.SourceVip = in.SourceVip
out.EnableDSR = in.EnableDSR out.EnableDSR = in.EnableDSR
out.RootHnsEndpointName = in.RootHnsEndpointName
out.ForwardHealthCheckVip = in.ForwardHealthCheckVip
return nil return nil
} }
@ -274,6 +276,8 @@ func autoConvert_config_KubeProxyWinkernelConfiguration_To_v1alpha1_KubeProxyWin
out.NetworkName = in.NetworkName out.NetworkName = in.NetworkName
out.SourceVip = in.SourceVip out.SourceVip = in.SourceVip
out.EnableDSR = in.EnableDSR out.EnableDSR = in.EnableDSR
out.RootHnsEndpointName = in.RootHnsEndpointName
out.ForwardHealthCheckVip = in.ForwardHealthCheckVip
return nil return nil
} }

View File

@ -33,6 +33,7 @@ type HostNetworkService interface {
getNetworkByName(name string) (*hnsNetworkInfo, error) getNetworkByName(name string) (*hnsNetworkInfo, error)
getEndpointByID(id string) (*endpointsInfo, error) getEndpointByID(id string) (*endpointsInfo, error)
getEndpointByIpAddress(ip string, networkName string) (*endpointsInfo, error) getEndpointByIpAddress(ip string, networkName string) (*endpointsInfo, error)
getEndpointByName(id string) (*endpointsInfo, error)
createEndpoint(ep *endpointsInfo, networkName string) (*endpointsInfo, error) createEndpoint(ep *endpointsInfo, networkName string) (*endpointsInfo, error)
deleteEndpoint(hnsID string) error deleteEndpoint(hnsID string) error
getLoadBalancer(endpoints []endpointsInfo, flags loadBalancerFlags, sourceVip string, vip string, protocol uint16, internalPort uint16, externalPort uint16) (*loadBalancerInfo, error) getLoadBalancer(endpoints []endpointsInfo, flags loadBalancerFlags, sourceVip string, vip string, protocol uint16, internalPort uint16, externalPort uint16) (*loadBalancerInfo, error)
@ -62,8 +63,9 @@ func (hns hnsV1) getEndpointByID(id string) (*endpointsInfo, error) {
return nil, err return nil, err
} }
return &endpointsInfo{ return &endpointsInfo{
ip: hnsendpoint.IPAddress.String(), ip: hnsendpoint.IPAddress.String(),
isLocal: !hnsendpoint.IsRemoteEndpoint, //TODO: Change isLocal to isRemote //TODO: Change isLocal to isRemote
isLocal: !hnsendpoint.IsRemoteEndpoint,
macAddress: hnsendpoint.MacAddress, macAddress: hnsendpoint.MacAddress,
hnsID: hnsendpoint.Id, hnsID: hnsendpoint.Id,
hns: hns, hns: hns,
@ -108,6 +110,23 @@ func (hns hnsV1) getEndpointByIpAddress(ip string, networkName string) (*endpoin
return nil, fmt.Errorf("Endpoint %v not found on network %s", ip, networkName) return nil, fmt.Errorf("Endpoint %v not found on network %s", ip, networkName)
} }
func (hns hnsV1) getEndpointByName(name string) (*endpointsInfo, error) {
hnsendpoint, err := hcsshim.GetHNSEndpointByName(name)
if err != nil {
klog.ErrorS(err, "failed to get HNS endpoint by name", "name", name)
return nil, err
}
return &endpointsInfo{
ip: hnsendpoint.IPAddress.String(),
//TODO: Change isLocal to isRemote
isLocal: !hnsendpoint.IsRemoteEndpoint,
macAddress: hnsendpoint.MacAddress,
hnsID: hnsendpoint.Id,
hns: hns,
}, nil
}
func (hns hnsV1) createEndpoint(ep *endpointsInfo, networkName string) (*endpointsInfo, error) { func (hns hnsV1) createEndpoint(ep *endpointsInfo, networkName string) (*endpointsInfo, error) {
hnsNetwork, err := hcsshim.GetHNSNetworkByName(networkName) hnsNetwork, err := hcsshim.GetHNSNetworkByName(networkName)
if err != nil { if err != nil {

View File

@ -114,6 +114,19 @@ func (hns hnsV2) getEndpointByIpAddress(ip string, networkName string) (*endpoin
return nil, fmt.Errorf("Endpoint %v not found on network %s", ip, networkName) return nil, fmt.Errorf("Endpoint %v not found on network %s", ip, networkName)
} }
func (hns hnsV2) getEndpointByName(name string) (*endpointsInfo, error) {
hnsendpoint, err := hcn.GetEndpointByName(name)
if err != nil {
return nil, err
}
return &endpointsInfo{ //TODO: fill out PA
ip: hnsendpoint.IpConfigurations[0].IpAddress,
isLocal: uint32(hnsendpoint.Flags&hcn.EndpointFlagsRemoteEndpoint) == 0, //TODO: Change isLocal to isRemote
macAddress: hnsendpoint.MacAddress,
hnsID: hnsendpoint.Id,
hns: hns,
}, nil
}
func (hns hnsV2) createEndpoint(ep *endpointsInfo, networkName string) (*endpointsInfo, error) { func (hns hnsV2) createEndpoint(ep *endpointsInfo, networkName string) (*endpointsInfo, error) {
hnsNetwork, err := hcn.GetNetworkByName(networkName) hnsNetwork, err := hcn.GetNetworkByName(networkName)
if err != nil { if err != nil {

View File

@ -26,6 +26,8 @@ import (
"strings" "strings"
"testing" "testing"
"github.com/google/go-cmp/cmp"
) )
const ( const (
@ -56,12 +58,12 @@ func TestGetEndpointByID(t *testing.T) {
testGetEndpointByID(t, hnsV1) testGetEndpointByID(t, hnsV1)
testGetEndpointByID(t, hnsV2) testGetEndpointByID(t, hnsV2)
} }
func TestGetEndpointByIpAddress(t *testing.T) { func TestGetEndpointByIpAddressAndName(t *testing.T) {
hnsV1 := hnsV1{} hnsV1 := hnsV1{}
hnsV2 := hnsV2{} hnsV2 := hnsV2{}
testGetEndpointByIpAddress(t, hnsV1) testGetEndpointByIpAddressAndName(t, hnsV1)
testGetEndpointByIpAddress(t, hnsV2) testGetEndpointByIpAddressAndName(t, hnsV2)
} }
func TestCreateEndpointLocal(t *testing.T) { func TestCreateEndpointLocal(t *testing.T) {
hnsV1 := hnsV1{} hnsV1 := hnsV1{}
@ -165,7 +167,7 @@ func testGetEndpointByID(t *testing.T, hns HostNetworkService) {
t.Error(err) t.Error(err)
} }
} }
func testGetEndpointByIpAddress(t *testing.T, hns HostNetworkService) { func testGetEndpointByIpAddressAndName(t *testing.T, hns HostNetworkService) {
Network := mustTestNetwork(t) Network := mustTestNetwork(t)
ipConfig := &hcn.IpConfig{ ipConfig := &hcn.IpConfig{
@ -195,6 +197,15 @@ func testGetEndpointByIpAddress(t *testing.T, hns HostNetworkService) {
t.Errorf("%v does not match %v", endpoint.ip, Endpoint.IpConfigurations[0].IpAddress) t.Errorf("%v does not match %v", endpoint.ip, Endpoint.IpConfigurations[0].IpAddress)
} }
endpoint, err = hns.getEndpointByName(Endpoint.Name)
if err != nil {
t.Error(err)
}
diff := cmp.Diff(endpoint, Endpoint)
if diff != "" {
t.Errorf("getEndpointByName(%s) returned a different endpoint. Diff: %s ", Endpoint.Name, diff)
}
err = Endpoint.Delete() err = Endpoint.Delete()
if err != nil { if err != nil {
t.Error(err) t.Error(err)

View File

@ -87,8 +87,9 @@ type externalIPInfo struct {
} }
type loadBalancerIngressInfo struct { type loadBalancerIngressInfo struct {
ip string ip string
hnsID string hnsID string
healthCheckHnsID string
} }
type loadBalancerInfo struct { type loadBalancerInfo struct {
@ -548,6 +549,10 @@ type Proxier struct {
hostMac string hostMac string
isDSR bool isDSR bool
supportedFeatures hcn.SupportedFeatures supportedFeatures hcn.SupportedFeatures
healthzPort int
forwardHealthCheckVip bool
rootHnsEndpointName string
} }
type localPort struct { type localPort struct {
@ -593,6 +598,7 @@ func NewProxier(
recorder events.EventRecorder, recorder events.EventRecorder,
healthzServer healthcheck.ProxierHealthUpdater, healthzServer healthcheck.ProxierHealthUpdater,
config config.KubeProxyWinkernelConfiguration, config config.KubeProxyWinkernelConfiguration,
healthzPort int,
) (*Proxier, error) { ) (*Proxier, error) {
masqueradeValue := 1 << uint(masqueradeBit) masqueradeValue := 1 << uint(masqueradeBit)
masqueradeMark := fmt.Sprintf("%#08x/%#08x", masqueradeValue, masqueradeValue) masqueradeMark := fmt.Sprintf("%#08x/%#08x", masqueradeValue, masqueradeValue)
@ -684,24 +690,27 @@ func NewProxier(
isIPv6 := netutils.IsIPv6(nodeIP) isIPv6 := netutils.IsIPv6(nodeIP)
proxier := &Proxier{ proxier := &Proxier{
endPointsRefCount: make(endPointsReferenceCountMap), endPointsRefCount: make(endPointsReferenceCountMap),
serviceMap: make(proxy.ServiceMap), serviceMap: make(proxy.ServiceMap),
endpointsMap: make(proxy.EndpointsMap), endpointsMap: make(proxy.EndpointsMap),
masqueradeAll: masqueradeAll, masqueradeAll: masqueradeAll,
masqueradeMark: masqueradeMark, masqueradeMark: masqueradeMark,
clusterCIDR: clusterCIDR, clusterCIDR: clusterCIDR,
hostname: hostname, hostname: hostname,
nodeIP: nodeIP, nodeIP: nodeIP,
recorder: recorder, recorder: recorder,
serviceHealthServer: serviceHealthServer, serviceHealthServer: serviceHealthServer,
healthzServer: healthzServer, healthzServer: healthzServer,
hns: hns, hns: hns,
network: *hnsNetworkInfo, network: *hnsNetworkInfo,
sourceVip: sourceVip, sourceVip: sourceVip,
hostMac: hostMac, hostMac: hostMac,
isDSR: isDSR, isDSR: isDSR,
supportedFeatures: supportedFeatures, supportedFeatures: supportedFeatures,
isIPv6Mode: isIPv6, isIPv6Mode: isIPv6,
healthzPort: healthzPort,
rootHnsEndpointName: config.RootHnsEndpointName,
forwardHealthCheckVip: config.ForwardHealthCheckVip,
} }
ipFamily := v1.IPv4Protocol ipFamily := v1.IPv4Protocol
@ -730,18 +739,19 @@ func NewDualStackProxier(
recorder events.EventRecorder, recorder events.EventRecorder,
healthzServer healthcheck.ProxierHealthUpdater, healthzServer healthcheck.ProxierHealthUpdater,
config config.KubeProxyWinkernelConfiguration, config config.KubeProxyWinkernelConfiguration,
healthzPort int,
) (proxy.Provider, error) { ) (proxy.Provider, error) {
// Create an ipv4 instance of the single-stack proxier // Create an ipv4 instance of the single-stack proxier
ipv4Proxier, err := NewProxier(syncPeriod, minSyncPeriod, masqueradeAll, masqueradeBit, ipv4Proxier, err := NewProxier(syncPeriod, minSyncPeriod, masqueradeAll, masqueradeBit,
clusterCIDR, hostname, nodeIP[0], recorder, healthzServer, config) clusterCIDR, hostname, nodeIP[0], recorder, healthzServer, config, healthzPort)
if err != nil { if err != nil {
return nil, fmt.Errorf("unable to create ipv4 proxier: %v, hostname: %s, clusterCIDR : %s, nodeIP:%v", err, hostname, clusterCIDR, nodeIP[0]) return nil, fmt.Errorf("unable to create ipv4 proxier: %v, hostname: %s, clusterCIDR : %s, nodeIP:%v", err, hostname, clusterCIDR, nodeIP[0])
} }
ipv6Proxier, err := NewProxier(syncPeriod, minSyncPeriod, masqueradeAll, masqueradeBit, ipv6Proxier, err := NewProxier(syncPeriod, minSyncPeriod, masqueradeAll, masqueradeBit,
clusterCIDR, hostname, nodeIP[1], recorder, healthzServer, config) clusterCIDR, hostname, nodeIP[1], recorder, healthzServer, config, healthzPort)
if err != nil { if err != nil {
return nil, fmt.Errorf("unable to create ipv6 proxier: %v, hostname: %s, clusterCIDR : %s, nodeIP:%v", err, hostname, clusterCIDR, nodeIP[1]) return nil, fmt.Errorf("unable to create ipv6 proxier: %v, hostname: %s, clusterCIDR : %s, nodeIP:%v", err, hostname, clusterCIDR, nodeIP[1])
} }
@ -796,6 +806,10 @@ func (svcInfo *serviceInfo) deleteAllHnsLoadBalancerPolicy() {
for _, lbIngressIP := range svcInfo.loadBalancerIngressIPs { for _, lbIngressIP := range svcInfo.loadBalancerIngressIPs {
hns.deleteLoadBalancer(lbIngressIP.hnsID) hns.deleteLoadBalancer(lbIngressIP.hnsID)
lbIngressIP.hnsID = "" lbIngressIP.hnsID = ""
if lbIngressIP.healthCheckHnsID != "" {
hns.deleteLoadBalancer(lbIngressIP.healthCheckHnsID)
lbIngressIP.healthCheckHnsID = ""
}
} }
} }
@ -988,6 +1002,11 @@ func (proxier *Proxier) syncProxyRules() {
hnsNetworkName := proxier.network.name hnsNetworkName := proxier.network.name
hns := proxier.hns hns := proxier.hns
var gatewayHnsendpoint *endpointsInfo
if proxier.forwardHealthCheckVip {
gatewayHnsendpoint, _ = hns.getEndpointByName(proxier.rootHnsEndpointName)
}
prevNetworkID := proxier.network.id prevNetworkID := proxier.network.id
updatedNetwork, err := hns.getNetworkByName(hnsNetworkName) updatedNetwork, err := hns.getNetworkByName(hnsNetworkName)
if updatedNetwork == nil || updatedNetwork.id != prevNetworkID || isNetworkNotFoundError(err) { if updatedNetwork == nil || updatedNetwork.id != prevNetworkID || isNetworkNotFoundError(err) {
@ -1319,7 +1338,30 @@ func (proxier *Proxier) syncProxyRules() {
} else { } else {
klog.V(3).InfoS("Skipped creating Hns LoadBalancer for loadBalancer Ingress resources", "lbIngressIP", lbIngressIP) klog.V(3).InfoS("Skipped creating Hns LoadBalancer for loadBalancer Ingress resources", "lbIngressIP", lbIngressIP)
} }
lbIngressIP.hnsID = hnsLoadBalancer.hnsID
klog.V(3).InfoS("Hns LoadBalancer resource created for loadBalancer Ingress resources", "lbIngressIP", lbIngressIP)
if proxier.forwardHealthCheckVip && gatewayHnsendpoint != nil {
nodeport := proxier.healthzPort
if svcInfo.HealthCheckNodePort() != 0 {
nodeport = svcInfo.HealthCheckNodePort()
}
hnsHealthCheckLoadBalancer, err := hns.getLoadBalancer(
[]endpointsInfo{*gatewayHnsendpoint},
loadBalancerFlags{isDSR: false, useMUX: svcInfo.preserveDIP, preserveDIP: svcInfo.preserveDIP},
sourceVip,
lbIngressIP.ip,
Enum(svcInfo.Protocol()),
uint16(nodeport),
uint16(nodeport),
)
if err != nil {
klog.ErrorS(err, "Policy creation failed")
continue
}
lbIngressIP.healthCheckHnsID = hnsHealthCheckLoadBalancer.hnsID
klog.V(3).InfoS("Hns Health Check LoadBalancer resource created for loadBalancer Ingress resources", "ip", lbIngressIP)
}
} }
svcInfo.policyApplied = true svcInfo.policyApplied = true
klog.V(2).InfoS("Policy successfully applied for service", "serviceInfo", svcInfo) klog.V(2).InfoS("Policy successfully applied for service", "serviceInfo", svcInfo)

View File

@ -2,7 +2,7 @@
// +build windows // +build windows
/* /*
Copyright 2018 The Kubernetes Authors. Copyright 2021 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License"); Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License. you may not use this file except in compliance with the License.
@ -73,6 +73,15 @@ func (hns fakeHNS) getEndpointByID(id string) (*endpointsInfo, error) {
return nil, nil return nil, nil
} }
func (hns fakeHNS) getEndpointByName(name string) (*endpointsInfo, error) {
return &endpointsInfo{
isLocal: true,
macAddress: macAddress,
hnsID: guid,
hns: hns,
}, nil
}
func (hns fakeHNS) getEndpointByIpAddress(ip string, networkName string) (*endpointsInfo, error) { func (hns fakeHNS) getEndpointByIpAddress(ip string, networkName string) (*endpointsInfo, error) {
_, ipNet, _ := netutils.ParseCIDRSloppy(destinationPrefix) _, ipNet, _ := netutils.ParseCIDRSloppy(destinationPrefix)
@ -699,7 +708,6 @@ func TestCreateLoadBalancer(t *testing.T) {
t.Errorf("%v does not match %v", svcInfo.hnsID, guid) t.Errorf("%v does not match %v", svcInfo.hnsID, guid)
} }
} }
} }
func TestCreateDsrLoadBalancer(t *testing.T) { func TestCreateDsrLoadBalancer(t *testing.T) {
@ -717,6 +725,7 @@ func TestCreateDsrLoadBalancer(t *testing.T) {
Port: "p80", Port: "p80",
Protocol: v1.ProtocolTCP, Protocol: v1.ProtocolTCP,
} }
lbIP := "11.21.31.41"
makeServiceMap(proxier, makeServiceMap(proxier,
makeTestService(svcPortName.Namespace, svcPortName.Name, func(svc *v1.Service) { makeTestService(svcPortName.Namespace, svcPortName.Name, func(svc *v1.Service) {
@ -729,6 +738,9 @@ func TestCreateDsrLoadBalancer(t *testing.T) {
Protocol: v1.ProtocolTCP, Protocol: v1.ProtocolTCP,
NodePort: int32(svcNodePort), NodePort: int32(svcNodePort),
}} }}
svc.Status.LoadBalancer.Ingress = []v1.LoadBalancerIngress{{
IP: lbIP,
}}
}), }),
) )
tcpProtocol := v1.ProtocolTCP tcpProtocol := v1.ProtocolTCP
@ -761,6 +773,11 @@ func TestCreateDsrLoadBalancer(t *testing.T) {
if svcInfo.localTrafficDSR != true { if svcInfo.localTrafficDSR != true {
t.Errorf("Failed to create DSR loadbalancer with local traffic policy") t.Errorf("Failed to create DSR loadbalancer with local traffic policy")
} }
if len(svcInfo.loadBalancerIngressIPs) == 0 {
t.Errorf("svcInfo does not have any loadBalancerIngressIPs, %+v", svcInfo)
} else if svcInfo.loadBalancerIngressIPs[0].healthCheckHnsID != guid {
t.Errorf("The Hns Loadbalancer HealthCheck Id %v does not match %v. ServicePortName %q", svcInfo.loadBalancerIngressIPs[0].healthCheckHnsID, guid, svcPortName.String())
}
} }
} }

View File

@ -95,6 +95,12 @@ type KubeProxyWinkernelConfiguration struct {
// enableDSR tells kube-proxy whether HNS policies should be created // enableDSR tells kube-proxy whether HNS policies should be created
// with DSR // with DSR
EnableDSR bool `json:"enableDSR"` EnableDSR bool `json:"enableDSR"`
// RootHnsEndpointName is the name of hnsendpoint that is attached to
// l2bridge for root network namespace
RootHnsEndpointName string `json:"rootHnsEndpointName"`
// ForwardHealthCheckVip forwards service VIP for health check port on
// Windows
ForwardHealthCheckVip bool `json:"forwardHealthCheckVip"`
} }
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object // +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object