mirror of
https://github.com/k3s-io/kubernetes.git
synced 2025-09-21 18:11:22 +00:00
Fix health check from Google's Load Balancer
This change adds 2 options for windows: --forward-healthcheck-vip: If true forward service VIP for health check port --root-hnsendpoint-name: The name of the hns endpoint name for root namespace attached to l2bridge, default is cbr0 When --forward-healthcheck-vip is set as true and winkernel is used, kube-proxy will add an hns load balancer to forward health check request that was sent to lb_vip:healthcheck_port to the node_ip:healthcheck_port. Without this forwarding, the health check from google load balancer will fail, and it will stop forwarding traffic to the windows node. This change fixes the following 2 cases for service: - `externalTrafficPolicy: Cluster` (default option): healthcheck_port is 10256 for all services. Without this fix, all traffic won't be directly forwarded to windows node. It will always go through a linux node and get forwarded to windows from there. - `externalTrafficPolicy: Local`: different healthcheck_port for each service that is configured as local. Without this fix, this feature won't work on windows node at all. This feature preserves client ip that tries to connect to their application running in windows pod. Change-Id: If4513e72900101ef70d86b91155e56a1f8c79719
This commit is contained in:
@@ -42,5 +42,7 @@ showHiddenMetricsForVersion: ""
|
||||
udpIdleTimeout: 250ms
|
||||
winkernel:
|
||||
enableDSR: false
|
||||
forwardHealthCheckVip: false
|
||||
networkName: ""
|
||||
rootHnsEndpointName: ""
|
||||
sourceVip: ""
|
||||
|
@@ -42,5 +42,7 @@ showHiddenMetricsForVersion: ""
|
||||
udpIdleTimeout: 250ms
|
||||
winkernel:
|
||||
enableDSR: false
|
||||
forwardHealthCheckVip: false
|
||||
networkName: ""
|
||||
rootHnsEndpointName: ""
|
||||
sourceVip: ""
|
||||
|
@@ -99,6 +99,12 @@ type KubeProxyWinkernelConfiguration struct {
|
||||
// enableDSR tells kube-proxy whether HNS policies should be created
|
||||
// with DSR
|
||||
EnableDSR bool
|
||||
// RootHnsEndpointName is the name of hnsendpoint that is attached to
|
||||
// l2bridge for root network namespace
|
||||
RootHnsEndpointName string
|
||||
// ForwardHealthCheckVip forwards service VIP for health check port on
|
||||
// Windows
|
||||
ForwardHealthCheckVip bool
|
||||
}
|
||||
|
||||
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
|
||||
|
@@ -262,6 +262,8 @@ func autoConvert_v1alpha1_KubeProxyWinkernelConfiguration_To_config_KubeProxyWin
|
||||
out.NetworkName = in.NetworkName
|
||||
out.SourceVip = in.SourceVip
|
||||
out.EnableDSR = in.EnableDSR
|
||||
out.RootHnsEndpointName = in.RootHnsEndpointName
|
||||
out.ForwardHealthCheckVip = in.ForwardHealthCheckVip
|
||||
return nil
|
||||
}
|
||||
|
||||
@@ -274,6 +276,8 @@ func autoConvert_config_KubeProxyWinkernelConfiguration_To_v1alpha1_KubeProxyWin
|
||||
out.NetworkName = in.NetworkName
|
||||
out.SourceVip = in.SourceVip
|
||||
out.EnableDSR = in.EnableDSR
|
||||
out.RootHnsEndpointName = in.RootHnsEndpointName
|
||||
out.ForwardHealthCheckVip = in.ForwardHealthCheckVip
|
||||
return nil
|
||||
}
|
||||
|
||||
|
@@ -33,6 +33,7 @@ type HostNetworkService interface {
|
||||
getNetworkByName(name string) (*hnsNetworkInfo, error)
|
||||
getEndpointByID(id string) (*endpointsInfo, error)
|
||||
getEndpointByIpAddress(ip string, networkName string) (*endpointsInfo, error)
|
||||
getEndpointByName(id string) (*endpointsInfo, error)
|
||||
createEndpoint(ep *endpointsInfo, networkName string) (*endpointsInfo, error)
|
||||
deleteEndpoint(hnsID string) error
|
||||
getLoadBalancer(endpoints []endpointsInfo, flags loadBalancerFlags, sourceVip string, vip string, protocol uint16, internalPort uint16, externalPort uint16) (*loadBalancerInfo, error)
|
||||
@@ -62,8 +63,9 @@ func (hns hnsV1) getEndpointByID(id string) (*endpointsInfo, error) {
|
||||
return nil, err
|
||||
}
|
||||
return &endpointsInfo{
|
||||
ip: hnsendpoint.IPAddress.String(),
|
||||
isLocal: !hnsendpoint.IsRemoteEndpoint, //TODO: Change isLocal to isRemote
|
||||
ip: hnsendpoint.IPAddress.String(),
|
||||
//TODO: Change isLocal to isRemote
|
||||
isLocal: !hnsendpoint.IsRemoteEndpoint,
|
||||
macAddress: hnsendpoint.MacAddress,
|
||||
hnsID: hnsendpoint.Id,
|
||||
hns: hns,
|
||||
@@ -108,6 +110,23 @@ func (hns hnsV1) getEndpointByIpAddress(ip string, networkName string) (*endpoin
|
||||
|
||||
return nil, fmt.Errorf("Endpoint %v not found on network %s", ip, networkName)
|
||||
}
|
||||
|
||||
func (hns hnsV1) getEndpointByName(name string) (*endpointsInfo, error) {
|
||||
hnsendpoint, err := hcsshim.GetHNSEndpointByName(name)
|
||||
if err != nil {
|
||||
klog.ErrorS(err, "failed to get HNS endpoint by name", "name", name)
|
||||
return nil, err
|
||||
}
|
||||
return &endpointsInfo{
|
||||
ip: hnsendpoint.IPAddress.String(),
|
||||
//TODO: Change isLocal to isRemote
|
||||
isLocal: !hnsendpoint.IsRemoteEndpoint,
|
||||
macAddress: hnsendpoint.MacAddress,
|
||||
hnsID: hnsendpoint.Id,
|
||||
hns: hns,
|
||||
}, nil
|
||||
}
|
||||
|
||||
func (hns hnsV1) createEndpoint(ep *endpointsInfo, networkName string) (*endpointsInfo, error) {
|
||||
hnsNetwork, err := hcsshim.GetHNSNetworkByName(networkName)
|
||||
if err != nil {
|
||||
|
@@ -114,6 +114,19 @@ func (hns hnsV2) getEndpointByIpAddress(ip string, networkName string) (*endpoin
|
||||
|
||||
return nil, fmt.Errorf("Endpoint %v not found on network %s", ip, networkName)
|
||||
}
|
||||
func (hns hnsV2) getEndpointByName(name string) (*endpointsInfo, error) {
|
||||
hnsendpoint, err := hcn.GetEndpointByName(name)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return &endpointsInfo{ //TODO: fill out PA
|
||||
ip: hnsendpoint.IpConfigurations[0].IpAddress,
|
||||
isLocal: uint32(hnsendpoint.Flags&hcn.EndpointFlagsRemoteEndpoint) == 0, //TODO: Change isLocal to isRemote
|
||||
macAddress: hnsendpoint.MacAddress,
|
||||
hnsID: hnsendpoint.Id,
|
||||
hns: hns,
|
||||
}, nil
|
||||
}
|
||||
func (hns hnsV2) createEndpoint(ep *endpointsInfo, networkName string) (*endpointsInfo, error) {
|
||||
hnsNetwork, err := hcn.GetNetworkByName(networkName)
|
||||
if err != nil {
|
||||
|
@@ -26,6 +26,8 @@ import (
|
||||
|
||||
"strings"
|
||||
"testing"
|
||||
|
||||
"github.com/google/go-cmp/cmp"
|
||||
)
|
||||
|
||||
const (
|
||||
@@ -56,12 +58,12 @@ func TestGetEndpointByID(t *testing.T) {
|
||||
testGetEndpointByID(t, hnsV1)
|
||||
testGetEndpointByID(t, hnsV2)
|
||||
}
|
||||
func TestGetEndpointByIpAddress(t *testing.T) {
|
||||
func TestGetEndpointByIpAddressAndName(t *testing.T) {
|
||||
hnsV1 := hnsV1{}
|
||||
hnsV2 := hnsV2{}
|
||||
|
||||
testGetEndpointByIpAddress(t, hnsV1)
|
||||
testGetEndpointByIpAddress(t, hnsV2)
|
||||
testGetEndpointByIpAddressAndName(t, hnsV1)
|
||||
testGetEndpointByIpAddressAndName(t, hnsV2)
|
||||
}
|
||||
func TestCreateEndpointLocal(t *testing.T) {
|
||||
hnsV1 := hnsV1{}
|
||||
@@ -165,7 +167,7 @@ func testGetEndpointByID(t *testing.T, hns HostNetworkService) {
|
||||
t.Error(err)
|
||||
}
|
||||
}
|
||||
func testGetEndpointByIpAddress(t *testing.T, hns HostNetworkService) {
|
||||
func testGetEndpointByIpAddressAndName(t *testing.T, hns HostNetworkService) {
|
||||
Network := mustTestNetwork(t)
|
||||
|
||||
ipConfig := &hcn.IpConfig{
|
||||
@@ -195,6 +197,15 @@ func testGetEndpointByIpAddress(t *testing.T, hns HostNetworkService) {
|
||||
t.Errorf("%v does not match %v", endpoint.ip, Endpoint.IpConfigurations[0].IpAddress)
|
||||
}
|
||||
|
||||
endpoint, err = hns.getEndpointByName(Endpoint.Name)
|
||||
if err != nil {
|
||||
t.Error(err)
|
||||
}
|
||||
diff := cmp.Diff(endpoint, Endpoint)
|
||||
if diff != "" {
|
||||
t.Errorf("getEndpointByName(%s) returned a different endpoint. Diff: %s ", Endpoint.Name, diff)
|
||||
}
|
||||
|
||||
err = Endpoint.Delete()
|
||||
if err != nil {
|
||||
t.Error(err)
|
||||
|
@@ -87,8 +87,9 @@ type externalIPInfo struct {
|
||||
}
|
||||
|
||||
type loadBalancerIngressInfo struct {
|
||||
ip string
|
||||
hnsID string
|
||||
ip string
|
||||
hnsID string
|
||||
healthCheckHnsID string
|
||||
}
|
||||
|
||||
type loadBalancerInfo struct {
|
||||
@@ -548,6 +549,10 @@ type Proxier struct {
|
||||
hostMac string
|
||||
isDSR bool
|
||||
supportedFeatures hcn.SupportedFeatures
|
||||
healthzPort int
|
||||
|
||||
forwardHealthCheckVip bool
|
||||
rootHnsEndpointName string
|
||||
}
|
||||
|
||||
type localPort struct {
|
||||
@@ -593,6 +598,7 @@ func NewProxier(
|
||||
recorder events.EventRecorder,
|
||||
healthzServer healthcheck.ProxierHealthUpdater,
|
||||
config config.KubeProxyWinkernelConfiguration,
|
||||
healthzPort int,
|
||||
) (*Proxier, error) {
|
||||
masqueradeValue := 1 << uint(masqueradeBit)
|
||||
masqueradeMark := fmt.Sprintf("%#08x/%#08x", masqueradeValue, masqueradeValue)
|
||||
@@ -684,24 +690,27 @@ func NewProxier(
|
||||
|
||||
isIPv6 := netutils.IsIPv6(nodeIP)
|
||||
proxier := &Proxier{
|
||||
endPointsRefCount: make(endPointsReferenceCountMap),
|
||||
serviceMap: make(proxy.ServiceMap),
|
||||
endpointsMap: make(proxy.EndpointsMap),
|
||||
masqueradeAll: masqueradeAll,
|
||||
masqueradeMark: masqueradeMark,
|
||||
clusterCIDR: clusterCIDR,
|
||||
hostname: hostname,
|
||||
nodeIP: nodeIP,
|
||||
recorder: recorder,
|
||||
serviceHealthServer: serviceHealthServer,
|
||||
healthzServer: healthzServer,
|
||||
hns: hns,
|
||||
network: *hnsNetworkInfo,
|
||||
sourceVip: sourceVip,
|
||||
hostMac: hostMac,
|
||||
isDSR: isDSR,
|
||||
supportedFeatures: supportedFeatures,
|
||||
isIPv6Mode: isIPv6,
|
||||
endPointsRefCount: make(endPointsReferenceCountMap),
|
||||
serviceMap: make(proxy.ServiceMap),
|
||||
endpointsMap: make(proxy.EndpointsMap),
|
||||
masqueradeAll: masqueradeAll,
|
||||
masqueradeMark: masqueradeMark,
|
||||
clusterCIDR: clusterCIDR,
|
||||
hostname: hostname,
|
||||
nodeIP: nodeIP,
|
||||
recorder: recorder,
|
||||
serviceHealthServer: serviceHealthServer,
|
||||
healthzServer: healthzServer,
|
||||
hns: hns,
|
||||
network: *hnsNetworkInfo,
|
||||
sourceVip: sourceVip,
|
||||
hostMac: hostMac,
|
||||
isDSR: isDSR,
|
||||
supportedFeatures: supportedFeatures,
|
||||
isIPv6Mode: isIPv6,
|
||||
healthzPort: healthzPort,
|
||||
rootHnsEndpointName: config.RootHnsEndpointName,
|
||||
forwardHealthCheckVip: config.ForwardHealthCheckVip,
|
||||
}
|
||||
|
||||
ipFamily := v1.IPv4Protocol
|
||||
@@ -730,18 +739,19 @@ func NewDualStackProxier(
|
||||
recorder events.EventRecorder,
|
||||
healthzServer healthcheck.ProxierHealthUpdater,
|
||||
config config.KubeProxyWinkernelConfiguration,
|
||||
healthzPort int,
|
||||
) (proxy.Provider, error) {
|
||||
|
||||
// Create an ipv4 instance of the single-stack proxier
|
||||
ipv4Proxier, err := NewProxier(syncPeriod, minSyncPeriod, masqueradeAll, masqueradeBit,
|
||||
clusterCIDR, hostname, nodeIP[0], recorder, healthzServer, config)
|
||||
clusterCIDR, hostname, nodeIP[0], recorder, healthzServer, config, healthzPort)
|
||||
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("unable to create ipv4 proxier: %v, hostname: %s, clusterCIDR : %s, nodeIP:%v", err, hostname, clusterCIDR, nodeIP[0])
|
||||
}
|
||||
|
||||
ipv6Proxier, err := NewProxier(syncPeriod, minSyncPeriod, masqueradeAll, masqueradeBit,
|
||||
clusterCIDR, hostname, nodeIP[1], recorder, healthzServer, config)
|
||||
clusterCIDR, hostname, nodeIP[1], recorder, healthzServer, config, healthzPort)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("unable to create ipv6 proxier: %v, hostname: %s, clusterCIDR : %s, nodeIP:%v", err, hostname, clusterCIDR, nodeIP[1])
|
||||
}
|
||||
@@ -796,6 +806,10 @@ func (svcInfo *serviceInfo) deleteAllHnsLoadBalancerPolicy() {
|
||||
for _, lbIngressIP := range svcInfo.loadBalancerIngressIPs {
|
||||
hns.deleteLoadBalancer(lbIngressIP.hnsID)
|
||||
lbIngressIP.hnsID = ""
|
||||
if lbIngressIP.healthCheckHnsID != "" {
|
||||
hns.deleteLoadBalancer(lbIngressIP.healthCheckHnsID)
|
||||
lbIngressIP.healthCheckHnsID = ""
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@@ -988,6 +1002,11 @@ func (proxier *Proxier) syncProxyRules() {
|
||||
hnsNetworkName := proxier.network.name
|
||||
hns := proxier.hns
|
||||
|
||||
var gatewayHnsendpoint *endpointsInfo
|
||||
if proxier.forwardHealthCheckVip {
|
||||
gatewayHnsendpoint, _ = hns.getEndpointByName(proxier.rootHnsEndpointName)
|
||||
}
|
||||
|
||||
prevNetworkID := proxier.network.id
|
||||
updatedNetwork, err := hns.getNetworkByName(hnsNetworkName)
|
||||
if updatedNetwork == nil || updatedNetwork.id != prevNetworkID || isNetworkNotFoundError(err) {
|
||||
@@ -1319,7 +1338,30 @@ func (proxier *Proxier) syncProxyRules() {
|
||||
} else {
|
||||
klog.V(3).InfoS("Skipped creating Hns LoadBalancer for loadBalancer Ingress resources", "lbIngressIP", lbIngressIP)
|
||||
}
|
||||
lbIngressIP.hnsID = hnsLoadBalancer.hnsID
|
||||
klog.V(3).InfoS("Hns LoadBalancer resource created for loadBalancer Ingress resources", "lbIngressIP", lbIngressIP)
|
||||
|
||||
if proxier.forwardHealthCheckVip && gatewayHnsendpoint != nil {
|
||||
nodeport := proxier.healthzPort
|
||||
if svcInfo.HealthCheckNodePort() != 0 {
|
||||
nodeport = svcInfo.HealthCheckNodePort()
|
||||
}
|
||||
hnsHealthCheckLoadBalancer, err := hns.getLoadBalancer(
|
||||
[]endpointsInfo{*gatewayHnsendpoint},
|
||||
loadBalancerFlags{isDSR: false, useMUX: svcInfo.preserveDIP, preserveDIP: svcInfo.preserveDIP},
|
||||
sourceVip,
|
||||
lbIngressIP.ip,
|
||||
Enum(svcInfo.Protocol()),
|
||||
uint16(nodeport),
|
||||
uint16(nodeport),
|
||||
)
|
||||
if err != nil {
|
||||
klog.ErrorS(err, "Policy creation failed")
|
||||
continue
|
||||
}
|
||||
lbIngressIP.healthCheckHnsID = hnsHealthCheckLoadBalancer.hnsID
|
||||
klog.V(3).InfoS("Hns Health Check LoadBalancer resource created for loadBalancer Ingress resources", "ip", lbIngressIP)
|
||||
}
|
||||
}
|
||||
svcInfo.policyApplied = true
|
||||
klog.V(2).InfoS("Policy successfully applied for service", "serviceInfo", svcInfo)
|
||||
|
@@ -2,7 +2,7 @@
|
||||
// +build windows
|
||||
|
||||
/*
|
||||
Copyright 2018 The Kubernetes Authors.
|
||||
Copyright 2021 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
@@ -73,6 +73,15 @@ func (hns fakeHNS) getEndpointByID(id string) (*endpointsInfo, error) {
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
func (hns fakeHNS) getEndpointByName(name string) (*endpointsInfo, error) {
|
||||
return &endpointsInfo{
|
||||
isLocal: true,
|
||||
macAddress: macAddress,
|
||||
hnsID: guid,
|
||||
hns: hns,
|
||||
}, nil
|
||||
}
|
||||
|
||||
func (hns fakeHNS) getEndpointByIpAddress(ip string, networkName string) (*endpointsInfo, error) {
|
||||
_, ipNet, _ := netutils.ParseCIDRSloppy(destinationPrefix)
|
||||
|
||||
@@ -699,7 +708,6 @@ func TestCreateLoadBalancer(t *testing.T) {
|
||||
t.Errorf("%v does not match %v", svcInfo.hnsID, guid)
|
||||
}
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
func TestCreateDsrLoadBalancer(t *testing.T) {
|
||||
@@ -717,6 +725,7 @@ func TestCreateDsrLoadBalancer(t *testing.T) {
|
||||
Port: "p80",
|
||||
Protocol: v1.ProtocolTCP,
|
||||
}
|
||||
lbIP := "11.21.31.41"
|
||||
|
||||
makeServiceMap(proxier,
|
||||
makeTestService(svcPortName.Namespace, svcPortName.Name, func(svc *v1.Service) {
|
||||
@@ -729,6 +738,9 @@ func TestCreateDsrLoadBalancer(t *testing.T) {
|
||||
Protocol: v1.ProtocolTCP,
|
||||
NodePort: int32(svcNodePort),
|
||||
}}
|
||||
svc.Status.LoadBalancer.Ingress = []v1.LoadBalancerIngress{{
|
||||
IP: lbIP,
|
||||
}}
|
||||
}),
|
||||
)
|
||||
tcpProtocol := v1.ProtocolTCP
|
||||
@@ -761,6 +773,11 @@ func TestCreateDsrLoadBalancer(t *testing.T) {
|
||||
if svcInfo.localTrafficDSR != true {
|
||||
t.Errorf("Failed to create DSR loadbalancer with local traffic policy")
|
||||
}
|
||||
if len(svcInfo.loadBalancerIngressIPs) == 0 {
|
||||
t.Errorf("svcInfo does not have any loadBalancerIngressIPs, %+v", svcInfo)
|
||||
} else if svcInfo.loadBalancerIngressIPs[0].healthCheckHnsID != guid {
|
||||
t.Errorf("The Hns Loadbalancer HealthCheck Id %v does not match %v. ServicePortName %q", svcInfo.loadBalancerIngressIPs[0].healthCheckHnsID, guid, svcPortName.String())
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
|
Reference in New Issue
Block a user