mirror of
https://github.com/k3s-io/kubernetes.git
synced 2025-07-27 13:37:30 +00:00
Remove Linux and Windows Kube-proxy Userspace mode
This commit is contained in:
parent
44a0b4e145
commit
7df6c02288
@ -160,7 +160,7 @@ func (o *Options) AddFlags(fs *pflag.FlagSet) {
|
|||||||
fs.Var(&utilflag.IPPortVar{Val: &o.config.MetricsBindAddress}, "metrics-bind-address", "The IP address with port for the metrics server to serve on (set to '0.0.0.0:10249' for all IPv4 interfaces and '[::]:10249' for all IPv6 interfaces). Set empty to disable. This parameter is ignored if a config file is specified by --config.")
|
fs.Var(&utilflag.IPPortVar{Val: &o.config.MetricsBindAddress}, "metrics-bind-address", "The IP address with port for the metrics server to serve on (set to '0.0.0.0:10249' for all IPv4 interfaces and '[::]:10249' for all IPv6 interfaces). Set empty to disable. This parameter is ignored if a config file is specified by --config.")
|
||||||
fs.BoolVar(&o.config.BindAddressHardFail, "bind-address-hard-fail", o.config.BindAddressHardFail, "If true kube-proxy will treat failure to bind to a port as fatal and exit")
|
fs.BoolVar(&o.config.BindAddressHardFail, "bind-address-hard-fail", o.config.BindAddressHardFail, "If true kube-proxy will treat failure to bind to a port as fatal and exit")
|
||||||
fs.Var(utilflag.PortRangeVar{Val: &o.config.PortRange}, "proxy-port-range", "Range of host ports (beginPort-endPort, single port or beginPort+offset, inclusive) that may be consumed in order to proxy service traffic. If (unspecified, 0, or 0-0) then ports will be randomly chosen.")
|
fs.Var(utilflag.PortRangeVar{Val: &o.config.PortRange}, "proxy-port-range", "Range of host ports (beginPort-endPort, single port or beginPort+offset, inclusive) that may be consumed in order to proxy service traffic. If (unspecified, 0, or 0-0) then ports will be randomly chosen.")
|
||||||
fs.Var(&o.config.Mode, "proxy-mode", "Which proxy mode to use: 'iptables' (Linux-only), 'ipvs' (Linux-only), 'kernelspace' (Windows-only), or 'userspace' (Linux/Windows, deprecated). The default value is 'iptables' on Linux and 'userspace' on Windows(will be 'kernelspace' in a future release). "+
|
fs.Var(&o.config.Mode, "proxy-mode", "Which proxy mode to use: on Linux this can be 'iptables' (default) or 'ipvs'. On Windows the only supported value is 'kernelspace'."+
|
||||||
"This parameter is ignored if a config file is specified by --config.")
|
"This parameter is ignored if a config file is specified by --config.")
|
||||||
fs.Var(cliflag.NewMapStringBool(&o.config.FeatureGates), "feature-gates", "A set of key=value pairs that describe feature gates for alpha/experimental features. "+
|
fs.Var(cliflag.NewMapStringBool(&o.config.FeatureGates), "feature-gates", "A set of key=value pairs that describe feature gates for alpha/experimental features. "+
|
||||||
"Options are:\n"+strings.Join(utilfeature.DefaultFeatureGate.KnownFeatures(), "\n")+"\n"+
|
"Options are:\n"+strings.Join(utilfeature.DefaultFeatureGate.KnownFeatures(), "\n")+"\n"+
|
||||||
@ -191,7 +191,6 @@ func (o *Options) AddFlags(fs *pflag.FlagSet) {
|
|||||||
o.config.Conntrack.TCPCloseWaitTimeout.Duration,
|
o.config.Conntrack.TCPCloseWaitTimeout.Duration,
|
||||||
"NAT timeout for TCP connections in the CLOSE_WAIT state")
|
"NAT timeout for TCP connections in the CLOSE_WAIT state")
|
||||||
fs.DurationVar(&o.config.ConfigSyncPeriod.Duration, "config-sync-period", o.config.ConfigSyncPeriod.Duration, "How often configuration from the apiserver is refreshed. Must be greater than 0.")
|
fs.DurationVar(&o.config.ConfigSyncPeriod.Duration, "config-sync-period", o.config.ConfigSyncPeriod.Duration, "How often configuration from the apiserver is refreshed. Must be greater than 0.")
|
||||||
fs.DurationVar(&o.config.UDPIdleTimeout.Duration, "udp-timeout", o.config.UDPIdleTimeout.Duration, "How long an idle UDP connection will be kept open (e.g. '250ms', '2s'). Must be greater than 0. Only applicable for proxy-mode=userspace")
|
|
||||||
|
|
||||||
fs.BoolVar(&o.config.IPVS.StrictARP, "ipvs-strict-arp", o.config.IPVS.StrictARP, "Enable strict ARP by setting arp_ignore to 1 and arp_announce to 2")
|
fs.BoolVar(&o.config.IPVS.StrictARP, "ipvs-strict-arp", o.config.IPVS.StrictARP, "Enable strict ARP by setting arp_ignore to 1 and arp_announce to 2")
|
||||||
fs.BoolVar(&o.config.IPTables.MasqueradeAll, "masquerade-all", o.config.IPTables.MasqueradeAll, "If using the pure iptables proxy, SNAT all traffic sent via Service cluster IPs (this not commonly needed)")
|
fs.BoolVar(&o.config.IPTables.MasqueradeAll, "masquerade-all", o.config.IPTables.MasqueradeAll, "If using the pure iptables proxy, SNAT all traffic sent via Service cluster IPs (this not commonly needed)")
|
||||||
@ -539,7 +538,6 @@ type ProxyServer struct {
|
|||||||
MetricsBindAddress string
|
MetricsBindAddress string
|
||||||
BindAddressHardFail bool
|
BindAddressHardFail bool
|
||||||
EnableProfiling bool
|
EnableProfiling bool
|
||||||
UseEndpointSlices bool
|
|
||||||
OOMScoreAdj *int32
|
OOMScoreAdj *int32
|
||||||
ConfigSyncPeriod time.Duration
|
ConfigSyncPeriod time.Duration
|
||||||
HealthzServer healthcheck.ProxierHealthUpdater
|
HealthzServer healthcheck.ProxierHealthUpdater
|
||||||
@ -738,7 +736,7 @@ func (s *ProxyServer) Run() error {
|
|||||||
options.LabelSelector = labelSelector.String()
|
options.LabelSelector = labelSelector.String()
|
||||||
}))
|
}))
|
||||||
|
|
||||||
// Create configs (i.e. Watches for Services and Endpoints or EndpointSlices)
|
// Create configs (i.e. Watches for Services and EndpointSlices)
|
||||||
// Note: RegisterHandler() calls need to happen before creation of Sources because sources
|
// Note: RegisterHandler() calls need to happen before creation of Sources because sources
|
||||||
// only notify on changes, and the initial update (on process start) may be lost if no handlers
|
// only notify on changes, and the initial update (on process start) may be lost if no handlers
|
||||||
// are registered yet.
|
// are registered yet.
|
||||||
@ -746,18 +744,12 @@ func (s *ProxyServer) Run() error {
|
|||||||
serviceConfig.RegisterEventHandler(s.Proxier)
|
serviceConfig.RegisterEventHandler(s.Proxier)
|
||||||
go serviceConfig.Run(wait.NeverStop)
|
go serviceConfig.Run(wait.NeverStop)
|
||||||
|
|
||||||
if endpointsHandler, ok := s.Proxier.(config.EndpointsHandler); ok && !s.UseEndpointSlices {
|
|
||||||
endpointsConfig := config.NewEndpointsConfig(informerFactory.Core().V1().Endpoints(), s.ConfigSyncPeriod)
|
|
||||||
endpointsConfig.RegisterEventHandler(endpointsHandler)
|
|
||||||
go endpointsConfig.Run(wait.NeverStop)
|
|
||||||
} else {
|
|
||||||
endpointSliceConfig := config.NewEndpointSliceConfig(informerFactory.Discovery().V1().EndpointSlices(), s.ConfigSyncPeriod)
|
endpointSliceConfig := config.NewEndpointSliceConfig(informerFactory.Discovery().V1().EndpointSlices(), s.ConfigSyncPeriod)
|
||||||
endpointSliceConfig.RegisterEventHandler(s.Proxier)
|
endpointSliceConfig.RegisterEventHandler(s.Proxier)
|
||||||
go endpointSliceConfig.Run(wait.NeverStop)
|
go endpointSliceConfig.Run(wait.NeverStop)
|
||||||
}
|
|
||||||
|
|
||||||
// This has to start after the calls to NewServiceConfig and NewEndpointsConfig because those
|
// This has to start after the calls to NewServiceConfig because that
|
||||||
// functions must configure their shared informer event handlers first.
|
// function must configure its shared informer event handlers first.
|
||||||
informerFactory.Start(wait.NeverStop)
|
informerFactory.Start(wait.NeverStop)
|
||||||
|
|
||||||
if utilfeature.DefaultFeatureGate.Enabled(features.TopologyAwareHints) {
|
if utilfeature.DefaultFeatureGate.Enabled(features.TopologyAwareHints) {
|
||||||
|
@ -42,7 +42,6 @@ import (
|
|||||||
v1 "k8s.io/api/core/v1"
|
v1 "k8s.io/api/core/v1"
|
||||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||||
"k8s.io/apimachinery/pkg/types"
|
"k8s.io/apimachinery/pkg/types"
|
||||||
utilnet "k8s.io/apimachinery/pkg/util/net"
|
|
||||||
clientset "k8s.io/client-go/kubernetes"
|
clientset "k8s.io/client-go/kubernetes"
|
||||||
toolswatch "k8s.io/client-go/tools/watch"
|
toolswatch "k8s.io/client-go/tools/watch"
|
||||||
"k8s.io/component-base/configz"
|
"k8s.io/component-base/configz"
|
||||||
@ -55,7 +54,6 @@ import (
|
|||||||
"k8s.io/kubernetes/pkg/proxy/iptables"
|
"k8s.io/kubernetes/pkg/proxy/iptables"
|
||||||
"k8s.io/kubernetes/pkg/proxy/ipvs"
|
"k8s.io/kubernetes/pkg/proxy/ipvs"
|
||||||
proxymetrics "k8s.io/kubernetes/pkg/proxy/metrics"
|
proxymetrics "k8s.io/kubernetes/pkg/proxy/metrics"
|
||||||
"k8s.io/kubernetes/pkg/proxy/userspace"
|
|
||||||
proxyutiliptables "k8s.io/kubernetes/pkg/proxy/util/iptables"
|
proxyutiliptables "k8s.io/kubernetes/pkg/proxy/util/iptables"
|
||||||
utilipset "k8s.io/kubernetes/pkg/util/ipset"
|
utilipset "k8s.io/kubernetes/pkg/util/ipset"
|
||||||
utiliptables "k8s.io/kubernetes/pkg/util/iptables"
|
utiliptables "k8s.io/kubernetes/pkg/util/iptables"
|
||||||
@ -157,7 +155,6 @@ func newProxyServer(
|
|||||||
var ipt [2]utiliptables.Interface
|
var ipt [2]utiliptables.Interface
|
||||||
dualStack := true // While we assume that node supports, we do further checks below
|
dualStack := true // While we assume that node supports, we do further checks below
|
||||||
|
|
||||||
if proxyMode != proxyconfigapi.ProxyModeUserspace {
|
|
||||||
// Create iptables handlers for both families, one is already created
|
// Create iptables handlers for both families, one is already created
|
||||||
// Always ordered as IPv4, IPv6
|
// Always ordered as IPv4, IPv6
|
||||||
if primaryProtocol == utiliptables.ProtocolIPv4 {
|
if primaryProtocol == utiliptables.ProtocolIPv4 {
|
||||||
@ -170,11 +167,10 @@ func newProxyServer(
|
|||||||
|
|
||||||
for _, perFamilyIpt := range ipt {
|
for _, perFamilyIpt := range ipt {
|
||||||
if !perFamilyIpt.Present() {
|
if !perFamilyIpt.Present() {
|
||||||
klog.InfoS("kube-proxy running in single-stack mode, this ipFamily is not supported", "ipFamily", perFamilyIpt.Protocol())
|
klog.V(0).InfoS("kube-proxy running in single-stack mode, this ipFamily is not supported", "ipFamily", perFamilyIpt.Protocol())
|
||||||
dualStack = false
|
dualStack = false
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
|
||||||
|
|
||||||
if proxyMode == proxyconfigapi.ProxyModeIPTables {
|
if proxyMode == proxyconfigapi.ProxyModeIPTables {
|
||||||
klog.InfoS("Using iptables Proxier")
|
klog.InfoS("Using iptables Proxier")
|
||||||
@ -320,31 +316,6 @@ func newProxyServer(
|
|||||||
return nil, fmt.Errorf("unable to create proxier: %v", err)
|
return nil, fmt.Errorf("unable to create proxier: %v", err)
|
||||||
}
|
}
|
||||||
proxymetrics.RegisterMetrics()
|
proxymetrics.RegisterMetrics()
|
||||||
} else {
|
|
||||||
klog.InfoS("Using userspace Proxier")
|
|
||||||
klog.InfoS("The userspace proxier is now deprecated and will be removed in a future release, please use 'iptables' or 'ipvs' instead")
|
|
||||||
|
|
||||||
// TODO this has side effects that should only happen when Run() is invoked.
|
|
||||||
proxier, err = userspace.NewProxier(
|
|
||||||
userspace.NewLoadBalancerRR(),
|
|
||||||
netutils.ParseIPSloppy(config.BindAddress),
|
|
||||||
iptInterface,
|
|
||||||
execer,
|
|
||||||
*utilnet.ParsePortRangeOrDie(config.PortRange),
|
|
||||||
config.IPTables.SyncPeriod.Duration,
|
|
||||||
config.IPTables.MinSyncPeriod.Duration,
|
|
||||||
config.UDPIdleTimeout.Duration,
|
|
||||||
config.NodePortAddresses,
|
|
||||||
)
|
|
||||||
if err != nil {
|
|
||||||
return nil, fmt.Errorf("unable to create proxier: %v", err)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
useEndpointSlices := true
|
|
||||||
if proxyMode == proxyconfigapi.ProxyModeUserspace {
|
|
||||||
// userspace mode doesn't support endpointslice.
|
|
||||||
useEndpointSlices = false
|
|
||||||
}
|
}
|
||||||
|
|
||||||
return &ProxyServer{
|
return &ProxyServer{
|
||||||
@ -367,7 +338,6 @@ func newProxyServer(
|
|||||||
OOMScoreAdj: config.OOMScoreAdj,
|
OOMScoreAdj: config.OOMScoreAdj,
|
||||||
ConfigSyncPeriod: config.ConfigSyncPeriod.Duration,
|
ConfigSyncPeriod: config.ConfigSyncPeriod.Duration,
|
||||||
HealthzServer: healthzServer,
|
HealthzServer: healthzServer,
|
||||||
UseEndpointSlices: useEndpointSlices,
|
|
||||||
}, nil
|
}, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -571,7 +541,6 @@ func cleanupAndExit() error {
|
|||||||
|
|
||||||
var encounteredError bool
|
var encounteredError bool
|
||||||
for _, ipt := range ipts {
|
for _, ipt := range ipts {
|
||||||
encounteredError = userspace.CleanupLeftovers(ipt) || encounteredError
|
|
||||||
encounteredError = iptables.CleanupLeftovers(ipt) || encounteredError
|
encounteredError = iptables.CleanupLeftovers(ipt) || encounteredError
|
||||||
encounteredError = ipvs.CleanupLeftovers(ipvsInterface, ipt, ipsetInterface) || encounteredError
|
encounteredError = ipvs.CleanupLeftovers(ipvsInterface, ipt, ipsetInterface) || encounteredError
|
||||||
}
|
}
|
||||||
|
@ -120,7 +120,6 @@ metricsBindAddress: "%s"
|
|||||||
mode: "%s"
|
mode: "%s"
|
||||||
oomScoreAdj: 17
|
oomScoreAdj: 17
|
||||||
portRange: "2-7"
|
portRange: "2-7"
|
||||||
udpIdleTimeout: 123ms
|
|
||||||
detectLocalMode: "ClusterCIDR"
|
detectLocalMode: "ClusterCIDR"
|
||||||
detectLocal:
|
detectLocal:
|
||||||
bridgeInterface: "cbr0"
|
bridgeInterface: "cbr0"
|
||||||
@ -263,7 +262,6 @@ nodePortAddresses:
|
|||||||
Mode: kubeproxyconfig.ProxyMode(tc.mode),
|
Mode: kubeproxyconfig.ProxyMode(tc.mode),
|
||||||
OOMScoreAdj: pointer.Int32(17),
|
OOMScoreAdj: pointer.Int32(17),
|
||||||
PortRange: "2-7",
|
PortRange: "2-7",
|
||||||
UDPIdleTimeout: metav1.Duration{Duration: 123 * time.Millisecond},
|
|
||||||
NodePortAddresses: []string{"10.20.30.40/16", "fd00:1::0/64"},
|
NodePortAddresses: []string{"10.20.30.40/16", "fd00:1::0/64"},
|
||||||
DetectLocalMode: kubeproxyconfig.LocalModeClusterCIDR,
|
DetectLocalMode: kubeproxyconfig.LocalModeClusterCIDR,
|
||||||
DetectLocal: kubeproxyconfig.DetectLocalConfiguration{
|
DetectLocal: kubeproxyconfig.DetectLocalConfiguration{
|
||||||
@ -457,8 +455,7 @@ mode: ""
|
|||||||
nodePortAddresses: null
|
nodePortAddresses: null
|
||||||
oomScoreAdj: -999
|
oomScoreAdj: -999
|
||||||
portRange: ""
|
portRange: ""
|
||||||
detectLocalMode: "BridgeInterface"
|
detectLocalMode: "BridgeInterface"`)
|
||||||
udpIdleTimeout: 250ms`)
|
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, "", fmt.Errorf("unexpected error when writing content to temp kube-proxy config file: %v", err)
|
return nil, "", fmt.Errorf("unexpected error when writing content to temp kube-proxy config file: %v", err)
|
||||||
}
|
}
|
||||||
|
@ -33,7 +33,6 @@ import (
|
|||||||
|
|
||||||
v1 "k8s.io/api/core/v1"
|
v1 "k8s.io/api/core/v1"
|
||||||
"k8s.io/apimachinery/pkg/types"
|
"k8s.io/apimachinery/pkg/types"
|
||||||
utilnet "k8s.io/apimachinery/pkg/util/net"
|
|
||||||
"k8s.io/client-go/tools/events"
|
"k8s.io/client-go/tools/events"
|
||||||
"k8s.io/component-base/configz"
|
"k8s.io/component-base/configz"
|
||||||
"k8s.io/component-base/metrics"
|
"k8s.io/component-base/metrics"
|
||||||
@ -43,11 +42,7 @@ import (
|
|||||||
proxyconfigscheme "k8s.io/kubernetes/pkg/proxy/apis/config/scheme"
|
proxyconfigscheme "k8s.io/kubernetes/pkg/proxy/apis/config/scheme"
|
||||||
"k8s.io/kubernetes/pkg/proxy/healthcheck"
|
"k8s.io/kubernetes/pkg/proxy/healthcheck"
|
||||||
"k8s.io/kubernetes/pkg/proxy/winkernel"
|
"k8s.io/kubernetes/pkg/proxy/winkernel"
|
||||||
"k8s.io/kubernetes/pkg/proxy/winuserspace"
|
|
||||||
utilnetsh "k8s.io/kubernetes/pkg/util/netsh"
|
|
||||||
utilnode "k8s.io/kubernetes/pkg/util/node"
|
utilnode "k8s.io/kubernetes/pkg/util/node"
|
||||||
"k8s.io/utils/exec"
|
|
||||||
netutils "k8s.io/utils/net"
|
|
||||||
)
|
)
|
||||||
|
|
||||||
// NewProxyServer returns a new ProxyServer.
|
// NewProxyServer returns a new ProxyServer.
|
||||||
@ -101,13 +96,17 @@ func newProxyServer(config *proxyconfigapi.KubeProxyConfiguration, master string
|
|||||||
healthzPort, _ = strconv.Atoi(port)
|
healthzPort, _ = strconv.Atoi(port)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// Check if Kernel Space can be used.
|
||||||
|
canUseWinKernelProxy, err := winkernel.CanUseWinKernelProxier(winkernel.WindowsKernelCompatTester{})
|
||||||
|
if !canUseWinKernelProxy && err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
var proxier proxy.Provider
|
var proxier proxy.Provider
|
||||||
proxyMode := getProxyMode(config.Mode, winkernel.WindowsKernelCompatTester{})
|
proxyMode := proxyconfigapi.ProxyModeKernelspace
|
||||||
dualStackMode := getDualStackMode(config.Winkernel.NetworkName, winkernel.DualStackCompatTester{})
|
dualStackMode := getDualStackMode(config.Winkernel.NetworkName, winkernel.DualStackCompatTester{})
|
||||||
if proxyMode == proxyconfigapi.ProxyModeKernelspace {
|
|
||||||
klog.InfoS("Using Kernelspace Proxier.")
|
|
||||||
if dualStackMode {
|
if dualStackMode {
|
||||||
klog.InfoS("Creating dualStackProxier for Windows kernel.")
|
klog.V(0).InfoS("Creating dualStackProxier for Windows kernel.")
|
||||||
|
|
||||||
proxier, err = winkernel.NewDualStackProxier(
|
proxier, err = winkernel.NewDualStackProxier(
|
||||||
config.IPTables.SyncPeriod.Duration,
|
config.IPTables.SyncPeriod.Duration,
|
||||||
@ -123,7 +122,6 @@ func newProxyServer(config *proxyconfigapi.KubeProxyConfiguration, master string
|
|||||||
healthzPort,
|
healthzPort,
|
||||||
)
|
)
|
||||||
} else {
|
} else {
|
||||||
|
|
||||||
proxier, err = winkernel.NewProxier(
|
proxier, err = winkernel.NewProxier(
|
||||||
config.IPTables.SyncPeriod.Duration,
|
config.IPTables.SyncPeriod.Duration,
|
||||||
config.IPTables.MinSyncPeriod.Duration,
|
config.IPTables.MinSyncPeriod.Duration,
|
||||||
@ -137,39 +135,12 @@ func newProxyServer(config *proxyconfigapi.KubeProxyConfiguration, master string
|
|||||||
config.Winkernel,
|
config.Winkernel,
|
||||||
healthzPort,
|
healthzPort,
|
||||||
)
|
)
|
||||||
|
|
||||||
}
|
}
|
||||||
|
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, fmt.Errorf("unable to create proxier: %v", err)
|
return nil, fmt.Errorf("unable to create proxier: %v", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
winkernel.RegisterMetrics()
|
winkernel.RegisterMetrics()
|
||||||
} else {
|
|
||||||
klog.InfoS("Using userspace Proxier.")
|
|
||||||
klog.InfoS("The userspace proxier is now deprecated and will be removed in a future release, please use 'kernelspace' instead")
|
|
||||||
execer := exec.New()
|
|
||||||
var netshInterface utilnetsh.Interface
|
|
||||||
netshInterface = utilnetsh.New(execer)
|
|
||||||
|
|
||||||
proxier, err = winuserspace.NewProxier(
|
|
||||||
winuserspace.NewLoadBalancerRR(),
|
|
||||||
netutils.ParseIPSloppy(config.BindAddress),
|
|
||||||
netshInterface,
|
|
||||||
*utilnet.ParsePortRangeOrDie(config.PortRange),
|
|
||||||
// TODO @pires replace below with default values, if applicable
|
|
||||||
config.IPTables.SyncPeriod.Duration,
|
|
||||||
config.UDPIdleTimeout.Duration,
|
|
||||||
)
|
|
||||||
if err != nil {
|
|
||||||
return nil, fmt.Errorf("unable to create proxier: %v", err)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
useEndpointSlices := true
|
|
||||||
if proxyMode == proxyconfigapi.ProxyModeUserspace {
|
|
||||||
// userspace mode doesn't support endpointslice.
|
|
||||||
useEndpointSlices = false
|
|
||||||
}
|
|
||||||
return &ProxyServer{
|
return &ProxyServer{
|
||||||
Client: client,
|
Client: client,
|
||||||
EventClient: eventClient,
|
EventClient: eventClient,
|
||||||
@ -184,7 +155,6 @@ func newProxyServer(config *proxyconfigapi.KubeProxyConfiguration, master string
|
|||||||
OOMScoreAdj: config.OOMScoreAdj,
|
OOMScoreAdj: config.OOMScoreAdj,
|
||||||
ConfigSyncPeriod: config.ConfigSyncPeriod.Duration,
|
ConfigSyncPeriod: config.ConfigSyncPeriod.Duration,
|
||||||
HealthzServer: healthzServer,
|
HealthzServer: healthzServer,
|
||||||
UseEndpointSlices: useEndpointSlices,
|
|
||||||
}, nil
|
}, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -192,35 +162,10 @@ func getDualStackMode(networkname string, compatTester winkernel.StackCompatTest
|
|||||||
return compatTester.DualStackCompatible(networkname)
|
return compatTester.DualStackCompatible(networkname)
|
||||||
}
|
}
|
||||||
|
|
||||||
func getProxyMode(proxyMode proxyconfigapi.ProxyMode, kcompat winkernel.KernelCompatTester) proxyconfigapi.ProxyMode {
|
|
||||||
if proxyMode == proxyconfigapi.ProxyModeKernelspace {
|
|
||||||
return tryWinKernelSpaceProxy(kcompat)
|
|
||||||
}
|
|
||||||
return proxyconfigapi.ProxyModeUserspace
|
|
||||||
}
|
|
||||||
|
|
||||||
func detectNumCPU() int {
|
func detectNumCPU() int {
|
||||||
return goruntime.NumCPU()
|
return goruntime.NumCPU()
|
||||||
}
|
}
|
||||||
|
|
||||||
func tryWinKernelSpaceProxy(kcompat winkernel.KernelCompatTester) proxyconfigapi.ProxyMode {
|
|
||||||
// Check for Windows Kernel Version if we can support Kernel Space proxy
|
|
||||||
// Check for Windows Version
|
|
||||||
|
|
||||||
// guaranteed false on error, error only necessary for debugging
|
|
||||||
useWinKernelProxy, err := winkernel.CanUseWinKernelProxier(kcompat)
|
|
||||||
if err != nil {
|
|
||||||
klog.ErrorS(err, "Can't determine whether to use windows kernel proxy, using userspace proxier")
|
|
||||||
return proxyconfigapi.ProxyModeUserspace
|
|
||||||
}
|
|
||||||
if useWinKernelProxy {
|
|
||||||
return proxyconfigapi.ProxyModeKernelspace
|
|
||||||
}
|
|
||||||
// Fallback.
|
|
||||||
klog.V(1).InfoS("Can't use winkernel proxy, using userspace proxier")
|
|
||||||
return proxyconfigapi.ProxyModeUserspace
|
|
||||||
}
|
|
||||||
|
|
||||||
// cleanupAndExit cleans up after a previous proxy run
|
// cleanupAndExit cleans up after a previous proxy run
|
||||||
func cleanupAndExit() error {
|
func cleanupAndExit() error {
|
||||||
return errors.New("--cleanup-and-exit is not implemented on Windows")
|
return errors.New("--cleanup-and-exit is not implemented on Windows")
|
||||||
|
9
pkg/generated/openapi/zz_generated.openapi.go
generated
9
pkg/generated/openapi/zz_generated.openapi.go
generated
@ -51138,13 +51138,6 @@ func schema_k8sio_kube_proxy_config_v1alpha1_KubeProxyConfiguration(ref common.R
|
|||||||
Format: "",
|
Format: "",
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
"udpIdleTimeout": {
|
|
||||||
SchemaProps: spec.SchemaProps{
|
|
||||||
Description: "udpIdleTimeout is how long an idle UDP connection will be kept open (e.g. '250ms', '2s'). Must be greater than 0. Only applicable for proxyMode=userspace.",
|
|
||||||
Default: 0,
|
|
||||||
Ref: ref("k8s.io/apimachinery/pkg/apis/meta/v1.Duration"),
|
|
||||||
},
|
|
||||||
},
|
|
||||||
"conntrack": {
|
"conntrack": {
|
||||||
SchemaProps: spec.SchemaProps{
|
SchemaProps: spec.SchemaProps{
|
||||||
Description: "conntrack contains conntrack-related configuration options.",
|
Description: "conntrack contains conntrack-related configuration options.",
|
||||||
@ -51205,7 +51198,7 @@ func schema_k8sio_kube_proxy_config_v1alpha1_KubeProxyConfiguration(ref common.R
|
|||||||
},
|
},
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
Required: []string{"bindAddress", "healthzBindAddress", "metricsBindAddress", "bindAddressHardFail", "enableProfiling", "clusterCIDR", "hostnameOverride", "clientConnection", "iptables", "ipvs", "oomScoreAdj", "mode", "portRange", "udpIdleTimeout", "conntrack", "configSyncPeriod", "nodePortAddresses", "winkernel", "showHiddenMetricsForVersion", "detectLocalMode", "detectLocal"},
|
Required: []string{"bindAddress", "healthzBindAddress", "metricsBindAddress", "bindAddressHardFail", "enableProfiling", "clusterCIDR", "hostnameOverride", "clientConnection", "iptables", "ipvs", "oomScoreAdj", "mode", "portRange", "conntrack", "configSyncPeriod", "nodePortAddresses", "winkernel", "showHiddenMetricsForVersion", "detectLocalMode", "detectLocal"},
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
Dependencies: []string{
|
Dependencies: []string{
|
||||||
|
@ -126,7 +126,6 @@ func NewHollowProxyOrDie(
|
|||||||
Recorder: recorder,
|
Recorder: recorder,
|
||||||
ProxyMode: "fake",
|
ProxyMode: "fake",
|
||||||
NodeRef: nodeRef,
|
NodeRef: nodeRef,
|
||||||
UseEndpointSlices: true,
|
|
||||||
OOMScoreAdj: utilpointer.Int32Ptr(0),
|
OOMScoreAdj: utilpointer.Int32Ptr(0),
|
||||||
ConfigSyncPeriod: 30 * time.Second,
|
ConfigSyncPeriod: 30 * time.Second,
|
||||||
},
|
},
|
||||||
|
@ -42,7 +42,6 @@ nodePortAddresses: null
|
|||||||
oomScoreAdj: -999
|
oomScoreAdj: -999
|
||||||
portRange: ""
|
portRange: ""
|
||||||
showHiddenMetricsForVersion: ""
|
showHiddenMetricsForVersion: ""
|
||||||
udpIdleTimeout: 250ms
|
|
||||||
winkernel:
|
winkernel:
|
||||||
enableDSR: false
|
enableDSR: false
|
||||||
forwardHealthCheckVip: false
|
forwardHealthCheckVip: false
|
||||||
|
@ -42,7 +42,6 @@ nodePortAddresses: null
|
|||||||
oomScoreAdj: -999
|
oomScoreAdj: -999
|
||||||
portRange: ""
|
portRange: ""
|
||||||
showHiddenMetricsForVersion: ""
|
showHiddenMetricsForVersion: ""
|
||||||
udpIdleTimeout: 250ms
|
|
||||||
winkernel:
|
winkernel:
|
||||||
enableDSR: false
|
enableDSR: false
|
||||||
forwardHealthCheckVip: false
|
forwardHealthCheckVip: false
|
||||||
|
@ -164,9 +164,6 @@ type KubeProxyConfiguration struct {
|
|||||||
// portRange is the range of host ports (beginPort-endPort, inclusive) that may be consumed
|
// portRange is the range of host ports (beginPort-endPort, inclusive) that may be consumed
|
||||||
// in order to proxy service traffic. If unspecified (0-0) then ports will be randomly chosen.
|
// in order to proxy service traffic. If unspecified (0-0) then ports will be randomly chosen.
|
||||||
PortRange string
|
PortRange string
|
||||||
// udpIdleTimeout is how long an idle UDP connection will be kept open (e.g. '250ms', '2s').
|
|
||||||
// Must be greater than 0. Only applicable for proxyMode=userspace.
|
|
||||||
UDPIdleTimeout metav1.Duration
|
|
||||||
// conntrack contains conntrack-related configuration options.
|
// conntrack contains conntrack-related configuration options.
|
||||||
Conntrack KubeProxyConntrackConfiguration
|
Conntrack KubeProxyConntrackConfiguration
|
||||||
// configSyncPeriod is how often configuration from the apiserver is refreshed. Must be greater
|
// configSyncPeriod is how often configuration from the apiserver is refreshed. Must be greater
|
||||||
@ -190,24 +187,18 @@ type KubeProxyConfiguration struct {
|
|||||||
DetectLocal DetectLocalConfiguration
|
DetectLocal DetectLocalConfiguration
|
||||||
}
|
}
|
||||||
|
|
||||||
// ProxyMode represents modes used by the Kubernetes proxy server. Currently, three modes of proxy are available in
|
// ProxyMode represents modes used by the Kubernetes proxy server.
|
||||||
// Linux platform: 'userspace' (older, going to be EOL), 'iptables' (newer, faster), 'ipvs'(newest, better in performance
|
|
||||||
// and scalability).
|
|
||||||
//
|
//
|
||||||
// Two modes of proxy are available in Windows platform: 'userspace'(older, stable) and 'kernelspace' (newer, faster).
|
// Currently, two modes of proxy are available in Linux platform: 'iptables' and 'ipvs'.
|
||||||
|
// One mode of proxy is available in Windows platform: 'kernelspace'.
|
||||||
//
|
//
|
||||||
// In Linux platform, if proxy mode is blank, use the best-available proxy (currently iptables, but may change in the
|
// If the proxy mode is unspecified, the best-available proxy mode will be used (currently this
|
||||||
// future). If the iptables proxy is selected, regardless of how, but the system's kernel or iptables versions are
|
// is `iptables` on Linux and `kernelspace` on Windows). If the selected proxy mode cannot be
|
||||||
// insufficient, this always falls back to the userspace proxy. IPVS mode will be enabled when proxy mode is set to 'ipvs',
|
// used (due to lack of kernel support, missing userspace components, etc) then kube-proxy
|
||||||
// and the fall back path is firstly iptables and then userspace.
|
// will exit with an error.
|
||||||
//
|
|
||||||
// In Windows platform, if proxy mode is blank, use the best-available proxy (currently userspace, but may change in the
|
|
||||||
// future). If winkernel proxy is selected, regardless of how, but the Windows kernel can't support this mode of proxy,
|
|
||||||
// this always falls back to the userspace proxy.
|
|
||||||
type ProxyMode string
|
type ProxyMode string
|
||||||
|
|
||||||
const (
|
const (
|
||||||
ProxyModeUserspace ProxyMode = "userspace"
|
|
||||||
ProxyModeIPTables ProxyMode = "iptables"
|
ProxyModeIPTables ProxyMode = "iptables"
|
||||||
ProxyModeIPVS ProxyMode = "ipvs"
|
ProxyModeIPVS ProxyMode = "ipvs"
|
||||||
ProxyModeKernelspace ProxyMode = "kernelspace"
|
ProxyModeKernelspace ProxyMode = "kernelspace"
|
||||||
|
@ -67,10 +67,6 @@ func SetDefaults_KubeProxyConfiguration(obj *kubeproxyconfigv1alpha1.KubeProxyCo
|
|||||||
if obj.IPVS.SyncPeriod.Duration == 0 {
|
if obj.IPVS.SyncPeriod.Duration == 0 {
|
||||||
obj.IPVS.SyncPeriod = metav1.Duration{Duration: 30 * time.Second}
|
obj.IPVS.SyncPeriod = metav1.Duration{Duration: 30 * time.Second}
|
||||||
}
|
}
|
||||||
zero := metav1.Duration{}
|
|
||||||
if obj.UDPIdleTimeout == zero {
|
|
||||||
obj.UDPIdleTimeout = metav1.Duration{Duration: 250 * time.Millisecond}
|
|
||||||
}
|
|
||||||
|
|
||||||
if obj.Conntrack.MaxPerCore == nil {
|
if obj.Conntrack.MaxPerCore == nil {
|
||||||
obj.Conntrack.MaxPerCore = pointer.Int32(32 * 1024)
|
obj.Conntrack.MaxPerCore = pointer.Int32(32 * 1024)
|
||||||
|
@ -59,7 +59,6 @@ func TestDefaultsKubeProxyConfiguration(t *testing.T) {
|
|||||||
SyncPeriod: metav1.Duration{Duration: 30 * time.Second},
|
SyncPeriod: metav1.Duration{Duration: 30 * time.Second},
|
||||||
},
|
},
|
||||||
OOMScoreAdj: &oomScore,
|
OOMScoreAdj: &oomScore,
|
||||||
UDPIdleTimeout: metav1.Duration{Duration: 250 * time.Millisecond},
|
|
||||||
Conntrack: kubeproxyconfigv1alpha1.KubeProxyConntrackConfiguration{
|
Conntrack: kubeproxyconfigv1alpha1.KubeProxyConntrackConfiguration{
|
||||||
MaxPerCore: &ctMaxPerCore,
|
MaxPerCore: &ctMaxPerCore,
|
||||||
Min: &ctMin,
|
Min: &ctMin,
|
||||||
@ -95,7 +94,6 @@ func TestDefaultsKubeProxyConfiguration(t *testing.T) {
|
|||||||
SyncPeriod: metav1.Duration{Duration: 30 * time.Second},
|
SyncPeriod: metav1.Duration{Duration: 30 * time.Second},
|
||||||
},
|
},
|
||||||
OOMScoreAdj: &oomScore,
|
OOMScoreAdj: &oomScore,
|
||||||
UDPIdleTimeout: metav1.Duration{Duration: 250 * time.Millisecond},
|
|
||||||
Conntrack: kubeproxyconfigv1alpha1.KubeProxyConntrackConfiguration{
|
Conntrack: kubeproxyconfigv1alpha1.KubeProxyConntrackConfiguration{
|
||||||
MaxPerCore: &ctMaxPerCore,
|
MaxPerCore: &ctMaxPerCore,
|
||||||
Min: &ctMin,
|
Min: &ctMin,
|
||||||
|
@ -145,7 +145,6 @@ func autoConvert_v1alpha1_KubeProxyConfiguration_To_config_KubeProxyConfiguratio
|
|||||||
out.OOMScoreAdj = (*int32)(unsafe.Pointer(in.OOMScoreAdj))
|
out.OOMScoreAdj = (*int32)(unsafe.Pointer(in.OOMScoreAdj))
|
||||||
out.Mode = config.ProxyMode(in.Mode)
|
out.Mode = config.ProxyMode(in.Mode)
|
||||||
out.PortRange = in.PortRange
|
out.PortRange = in.PortRange
|
||||||
out.UDPIdleTimeout = in.UDPIdleTimeout
|
|
||||||
if err := Convert_v1alpha1_KubeProxyConntrackConfiguration_To_config_KubeProxyConntrackConfiguration(&in.Conntrack, &out.Conntrack, s); err != nil {
|
if err := Convert_v1alpha1_KubeProxyConntrackConfiguration_To_config_KubeProxyConntrackConfiguration(&in.Conntrack, &out.Conntrack, s); err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
@ -188,7 +187,6 @@ func autoConvert_config_KubeProxyConfiguration_To_v1alpha1_KubeProxyConfiguratio
|
|||||||
out.OOMScoreAdj = (*int32)(unsafe.Pointer(in.OOMScoreAdj))
|
out.OOMScoreAdj = (*int32)(unsafe.Pointer(in.OOMScoreAdj))
|
||||||
out.Mode = v1alpha1.ProxyMode(in.Mode)
|
out.Mode = v1alpha1.ProxyMode(in.Mode)
|
||||||
out.PortRange = in.PortRange
|
out.PortRange = in.PortRange
|
||||||
out.UDPIdleTimeout = in.UDPIdleTimeout
|
|
||||||
if err := Convert_config_KubeProxyConntrackConfiguration_To_v1alpha1_KubeProxyConntrackConfiguration(&in.Conntrack, &out.Conntrack, s); err != nil {
|
if err := Convert_config_KubeProxyConntrackConfiguration_To_v1alpha1_KubeProxyConntrackConfiguration(&in.Conntrack, &out.Conntrack, s); err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
@ -57,10 +57,6 @@ func Validate(config *kubeproxyconfig.KubeProxyConfiguration) field.ErrorList {
|
|||||||
allErrs = append(allErrs, field.Invalid(newPath.Child("OOMScoreAdj"), *config.OOMScoreAdj, "must be within the range [-1000, 1000]"))
|
allErrs = append(allErrs, field.Invalid(newPath.Child("OOMScoreAdj"), *config.OOMScoreAdj, "must be within the range [-1000, 1000]"))
|
||||||
}
|
}
|
||||||
|
|
||||||
if config.UDPIdleTimeout.Duration <= 0 {
|
|
||||||
allErrs = append(allErrs, field.Invalid(newPath.Child("UDPIdleTimeout"), config.UDPIdleTimeout, "must be greater than 0"))
|
|
||||||
}
|
|
||||||
|
|
||||||
if config.ConfigSyncPeriod.Duration <= 0 {
|
if config.ConfigSyncPeriod.Duration <= 0 {
|
||||||
allErrs = append(allErrs, field.Invalid(newPath.Child("ConfigSyncPeriod"), config.ConfigSyncPeriod, "must be greater than 0"))
|
allErrs = append(allErrs, field.Invalid(newPath.Child("ConfigSyncPeriod"), config.ConfigSyncPeriod, "must be greater than 0"))
|
||||||
}
|
}
|
||||||
@ -185,7 +181,6 @@ func validateProxyMode(mode kubeproxyconfig.ProxyMode, fldPath *field.Path) fiel
|
|||||||
|
|
||||||
func validateProxyModeLinux(mode kubeproxyconfig.ProxyMode, fldPath *field.Path) field.ErrorList {
|
func validateProxyModeLinux(mode kubeproxyconfig.ProxyMode, fldPath *field.Path) field.ErrorList {
|
||||||
validModes := sets.NewString(
|
validModes := sets.NewString(
|
||||||
string(kubeproxyconfig.ProxyModeUserspace),
|
|
||||||
string(kubeproxyconfig.ProxyModeIPTables),
|
string(kubeproxyconfig.ProxyModeIPTables),
|
||||||
string(kubeproxyconfig.ProxyModeIPVS),
|
string(kubeproxyconfig.ProxyModeIPVS),
|
||||||
)
|
)
|
||||||
@ -200,7 +195,6 @@ func validateProxyModeLinux(mode kubeproxyconfig.ProxyMode, fldPath *field.Path)
|
|||||||
|
|
||||||
func validateProxyModeWindows(mode kubeproxyconfig.ProxyMode, fldPath *field.Path) field.ErrorList {
|
func validateProxyModeWindows(mode kubeproxyconfig.ProxyMode, fldPath *field.Path) field.ErrorList {
|
||||||
validModes := sets.NewString(
|
validModes := sets.NewString(
|
||||||
string(kubeproxyconfig.ProxyModeUserspace),
|
|
||||||
string(kubeproxyconfig.ProxyModeKernelspace),
|
string(kubeproxyconfig.ProxyModeKernelspace),
|
||||||
)
|
)
|
||||||
|
|
||||||
@ -208,7 +202,7 @@ func validateProxyModeWindows(mode kubeproxyconfig.ProxyMode, fldPath *field.Pat
|
|||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
errMsg := fmt.Sprintf("must be %s or blank (blank means the most-available proxy [currently userspace(will be 'kernelspace' in a future release)])", strings.Join(validModes.List(), ","))
|
errMsg := fmt.Sprintf("must be %s or blank (blank means the most-available proxy [currently 'kernelspace'])", strings.Join(validModes.List(), ","))
|
||||||
return field.ErrorList{field.Invalid(fldPath.Child("ProxyMode"), string(mode), errMsg)}
|
return field.ErrorList{field.Invalid(fldPath.Child("ProxyMode"), string(mode), errMsg)}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -42,7 +42,6 @@ func TestValidateKubeProxyConfiguration(t *testing.T) {
|
|||||||
HealthzBindAddress: "0.0.0.0:10256",
|
HealthzBindAddress: "0.0.0.0:10256",
|
||||||
MetricsBindAddress: "127.0.0.1:10249",
|
MetricsBindAddress: "127.0.0.1:10249",
|
||||||
ClusterCIDR: "192.168.59.0/24",
|
ClusterCIDR: "192.168.59.0/24",
|
||||||
UDPIdleTimeout: metav1.Duration{Duration: 1 * time.Second},
|
|
||||||
ConfigSyncPeriod: metav1.Duration{Duration: 1 * time.Second},
|
ConfigSyncPeriod: metav1.Duration{Duration: 1 * time.Second},
|
||||||
IPTables: kubeproxyconfig.KubeProxyIPTablesConfiguration{
|
IPTables: kubeproxyconfig.KubeProxyIPTablesConfiguration{
|
||||||
MasqueradeAll: true,
|
MasqueradeAll: true,
|
||||||
@ -66,7 +65,6 @@ func TestValidateKubeProxyConfiguration(t *testing.T) {
|
|||||||
HealthzBindAddress: "0.0.0.0:10256",
|
HealthzBindAddress: "0.0.0.0:10256",
|
||||||
MetricsBindAddress: "127.0.0.1:10249",
|
MetricsBindAddress: "127.0.0.1:10249",
|
||||||
ClusterCIDR: "192.168.59.0/24",
|
ClusterCIDR: "192.168.59.0/24",
|
||||||
UDPIdleTimeout: metav1.Duration{Duration: 1 * time.Second},
|
|
||||||
ConfigSyncPeriod: metav1.Duration{Duration: 1 * time.Second},
|
ConfigSyncPeriod: metav1.Duration{Duration: 1 * time.Second},
|
||||||
IPTables: kubeproxyconfig.KubeProxyIPTablesConfiguration{
|
IPTables: kubeproxyconfig.KubeProxyIPTablesConfiguration{
|
||||||
MasqueradeAll: true,
|
MasqueradeAll: true,
|
||||||
@ -85,7 +83,6 @@ func TestValidateKubeProxyConfiguration(t *testing.T) {
|
|||||||
HealthzBindAddress: "",
|
HealthzBindAddress: "",
|
||||||
MetricsBindAddress: "127.0.0.1:10249",
|
MetricsBindAddress: "127.0.0.1:10249",
|
||||||
ClusterCIDR: "192.168.59.0/24",
|
ClusterCIDR: "192.168.59.0/24",
|
||||||
UDPIdleTimeout: metav1.Duration{Duration: 1 * time.Second},
|
|
||||||
ConfigSyncPeriod: metav1.Duration{Duration: 1 * time.Second},
|
ConfigSyncPeriod: metav1.Duration{Duration: 1 * time.Second},
|
||||||
IPTables: kubeproxyconfig.KubeProxyIPTablesConfiguration{
|
IPTables: kubeproxyconfig.KubeProxyIPTablesConfiguration{
|
||||||
MasqueradeAll: true,
|
MasqueradeAll: true,
|
||||||
@ -104,7 +101,6 @@ func TestValidateKubeProxyConfiguration(t *testing.T) {
|
|||||||
HealthzBindAddress: "",
|
HealthzBindAddress: "",
|
||||||
MetricsBindAddress: "[::1]:10249",
|
MetricsBindAddress: "[::1]:10249",
|
||||||
ClusterCIDR: "fd00:192:168:59::/64",
|
ClusterCIDR: "fd00:192:168:59::/64",
|
||||||
UDPIdleTimeout: metav1.Duration{Duration: 1 * time.Second},
|
|
||||||
ConfigSyncPeriod: metav1.Duration{Duration: 1 * time.Second},
|
ConfigSyncPeriod: metav1.Duration{Duration: 1 * time.Second},
|
||||||
IPTables: kubeproxyconfig.KubeProxyIPTablesConfiguration{
|
IPTables: kubeproxyconfig.KubeProxyIPTablesConfiguration{
|
||||||
MasqueradeAll: true,
|
MasqueradeAll: true,
|
||||||
@ -123,7 +119,6 @@ func TestValidateKubeProxyConfiguration(t *testing.T) {
|
|||||||
HealthzBindAddress: "0.0.0.0:12345",
|
HealthzBindAddress: "0.0.0.0:12345",
|
||||||
MetricsBindAddress: "127.0.0.1:10249",
|
MetricsBindAddress: "127.0.0.1:10249",
|
||||||
ClusterCIDR: "192.168.59.0/24",
|
ClusterCIDR: "192.168.59.0/24",
|
||||||
UDPIdleTimeout: metav1.Duration{Duration: 1 * time.Second},
|
|
||||||
ConfigSyncPeriod: metav1.Duration{Duration: 1 * time.Second},
|
ConfigSyncPeriod: metav1.Duration{Duration: 1 * time.Second},
|
||||||
IPTables: kubeproxyconfig.KubeProxyIPTablesConfiguration{
|
IPTables: kubeproxyconfig.KubeProxyIPTablesConfiguration{
|
||||||
MasqueradeAll: true,
|
MasqueradeAll: true,
|
||||||
@ -142,7 +137,6 @@ func TestValidateKubeProxyConfiguration(t *testing.T) {
|
|||||||
HealthzBindAddress: "0.0.0.0:12345",
|
HealthzBindAddress: "0.0.0.0:12345",
|
||||||
MetricsBindAddress: "127.0.0.1:10249",
|
MetricsBindAddress: "127.0.0.1:10249",
|
||||||
ClusterCIDR: "fd00:192:168::/64",
|
ClusterCIDR: "fd00:192:168::/64",
|
||||||
UDPIdleTimeout: metav1.Duration{Duration: 1 * time.Second},
|
|
||||||
ConfigSyncPeriod: metav1.Duration{Duration: 1 * time.Second},
|
ConfigSyncPeriod: metav1.Duration{Duration: 1 * time.Second},
|
||||||
IPTables: kubeproxyconfig.KubeProxyIPTablesConfiguration{
|
IPTables: kubeproxyconfig.KubeProxyIPTablesConfiguration{
|
||||||
MasqueradeAll: true,
|
MasqueradeAll: true,
|
||||||
@ -161,7 +155,6 @@ func TestValidateKubeProxyConfiguration(t *testing.T) {
|
|||||||
HealthzBindAddress: "0.0.0.0:12345",
|
HealthzBindAddress: "0.0.0.0:12345",
|
||||||
MetricsBindAddress: "127.0.0.1:10249",
|
MetricsBindAddress: "127.0.0.1:10249",
|
||||||
ClusterCIDR: "192.168.59.0/24,fd00:192:168::/64",
|
ClusterCIDR: "192.168.59.0/24,fd00:192:168::/64",
|
||||||
UDPIdleTimeout: metav1.Duration{Duration: 1 * time.Second},
|
|
||||||
ConfigSyncPeriod: metav1.Duration{Duration: 1 * time.Second},
|
ConfigSyncPeriod: metav1.Duration{Duration: 1 * time.Second},
|
||||||
IPTables: kubeproxyconfig.KubeProxyIPTablesConfiguration{
|
IPTables: kubeproxyconfig.KubeProxyIPTablesConfiguration{
|
||||||
MasqueradeAll: true,
|
MasqueradeAll: true,
|
||||||
@ -180,7 +173,6 @@ func TestValidateKubeProxyConfiguration(t *testing.T) {
|
|||||||
HealthzBindAddress: "0.0.0.0:12345",
|
HealthzBindAddress: "0.0.0.0:12345",
|
||||||
MetricsBindAddress: "127.0.0.1:10249",
|
MetricsBindAddress: "127.0.0.1:10249",
|
||||||
ClusterCIDR: "192.168.59.0/24",
|
ClusterCIDR: "192.168.59.0/24",
|
||||||
UDPIdleTimeout: metav1.Duration{Duration: 1 * time.Second},
|
|
||||||
ConfigSyncPeriod: metav1.Duration{Duration: 1 * time.Second},
|
ConfigSyncPeriod: metav1.Duration{Duration: 1 * time.Second},
|
||||||
IPTables: kubeproxyconfig.KubeProxyIPTablesConfiguration{
|
IPTables: kubeproxyconfig.KubeProxyIPTablesConfiguration{
|
||||||
MasqueradeAll: true,
|
MasqueradeAll: true,
|
||||||
@ -203,7 +195,6 @@ func TestValidateKubeProxyConfiguration(t *testing.T) {
|
|||||||
HealthzBindAddress: "0.0.0.0:12345",
|
HealthzBindAddress: "0.0.0.0:12345",
|
||||||
MetricsBindAddress: "127.0.0.1:10249",
|
MetricsBindAddress: "127.0.0.1:10249",
|
||||||
ClusterCIDR: "192.168.59.0/24",
|
ClusterCIDR: "192.168.59.0/24",
|
||||||
UDPIdleTimeout: metav1.Duration{Duration: 1 * time.Second},
|
|
||||||
ConfigSyncPeriod: metav1.Duration{Duration: 1 * time.Second},
|
ConfigSyncPeriod: metav1.Duration{Duration: 1 * time.Second},
|
||||||
IPTables: kubeproxyconfig.KubeProxyIPTablesConfiguration{
|
IPTables: kubeproxyconfig.KubeProxyIPTablesConfiguration{
|
||||||
MasqueradeAll: true,
|
MasqueradeAll: true,
|
||||||
@ -240,7 +231,6 @@ func TestValidateKubeProxyConfiguration(t *testing.T) {
|
|||||||
HealthzBindAddress: "0.0.0.0:10256",
|
HealthzBindAddress: "0.0.0.0:10256",
|
||||||
MetricsBindAddress: "127.0.0.1:10249",
|
MetricsBindAddress: "127.0.0.1:10249",
|
||||||
ClusterCIDR: "192.168.59.0/24",
|
ClusterCIDR: "192.168.59.0/24",
|
||||||
UDPIdleTimeout: metav1.Duration{Duration: 1 * time.Second},
|
|
||||||
ConfigSyncPeriod: metav1.Duration{Duration: 1 * time.Second},
|
ConfigSyncPeriod: metav1.Duration{Duration: 1 * time.Second},
|
||||||
IPTables: kubeproxyconfig.KubeProxyIPTablesConfiguration{
|
IPTables: kubeproxyconfig.KubeProxyIPTablesConfiguration{
|
||||||
MasqueradeAll: true,
|
MasqueradeAll: true,
|
||||||
@ -262,7 +252,6 @@ func TestValidateKubeProxyConfiguration(t *testing.T) {
|
|||||||
HealthzBindAddress: "0.0.0.0",
|
HealthzBindAddress: "0.0.0.0",
|
||||||
MetricsBindAddress: "127.0.0.1:10249",
|
MetricsBindAddress: "127.0.0.1:10249",
|
||||||
ClusterCIDR: "192.168.59.0/24",
|
ClusterCIDR: "192.168.59.0/24",
|
||||||
UDPIdleTimeout: metav1.Duration{Duration: 1 * time.Second},
|
|
||||||
ConfigSyncPeriod: metav1.Duration{Duration: 1 * time.Second},
|
ConfigSyncPeriod: metav1.Duration{Duration: 1 * time.Second},
|
||||||
IPTables: kubeproxyconfig.KubeProxyIPTablesConfiguration{
|
IPTables: kubeproxyconfig.KubeProxyIPTablesConfiguration{
|
||||||
MasqueradeAll: true,
|
MasqueradeAll: true,
|
||||||
@ -284,7 +273,6 @@ func TestValidateKubeProxyConfiguration(t *testing.T) {
|
|||||||
HealthzBindAddress: "0.0.0.0:12345",
|
HealthzBindAddress: "0.0.0.0:12345",
|
||||||
MetricsBindAddress: "127.0.0.1",
|
MetricsBindAddress: "127.0.0.1",
|
||||||
ClusterCIDR: "192.168.59.0/24",
|
ClusterCIDR: "192.168.59.0/24",
|
||||||
UDPIdleTimeout: metav1.Duration{Duration: 1 * time.Second},
|
|
||||||
ConfigSyncPeriod: metav1.Duration{Duration: 1 * time.Second},
|
ConfigSyncPeriod: metav1.Duration{Duration: 1 * time.Second},
|
||||||
IPTables: kubeproxyconfig.KubeProxyIPTablesConfiguration{
|
IPTables: kubeproxyconfig.KubeProxyIPTablesConfiguration{
|
||||||
MasqueradeAll: true,
|
MasqueradeAll: true,
|
||||||
@ -306,7 +294,6 @@ func TestValidateKubeProxyConfiguration(t *testing.T) {
|
|||||||
HealthzBindAddress: "0.0.0.0:12345",
|
HealthzBindAddress: "0.0.0.0:12345",
|
||||||
MetricsBindAddress: "127.0.0.1:10249",
|
MetricsBindAddress: "127.0.0.1:10249",
|
||||||
ClusterCIDR: "192.168.59.0",
|
ClusterCIDR: "192.168.59.0",
|
||||||
UDPIdleTimeout: metav1.Duration{Duration: 1 * time.Second},
|
|
||||||
ConfigSyncPeriod: metav1.Duration{Duration: 1 * time.Second},
|
ConfigSyncPeriod: metav1.Duration{Duration: 1 * time.Second},
|
||||||
IPTables: kubeproxyconfig.KubeProxyIPTablesConfiguration{
|
IPTables: kubeproxyconfig.KubeProxyIPTablesConfiguration{
|
||||||
MasqueradeAll: true,
|
MasqueradeAll: true,
|
||||||
@ -328,7 +315,6 @@ func TestValidateKubeProxyConfiguration(t *testing.T) {
|
|||||||
HealthzBindAddress: "0.0.0.0:12345",
|
HealthzBindAddress: "0.0.0.0:12345",
|
||||||
MetricsBindAddress: "127.0.0.1:10249",
|
MetricsBindAddress: "127.0.0.1:10249",
|
||||||
ClusterCIDR: "192.168.59.0/24,fd00:192:168::/64,10.0.0.0/16",
|
ClusterCIDR: "192.168.59.0/24,fd00:192:168::/64,10.0.0.0/16",
|
||||||
UDPIdleTimeout: metav1.Duration{Duration: 1 * time.Second},
|
|
||||||
ConfigSyncPeriod: metav1.Duration{Duration: 1 * time.Second},
|
ConfigSyncPeriod: metav1.Duration{Duration: 1 * time.Second},
|
||||||
IPTables: kubeproxyconfig.KubeProxyIPTablesConfiguration{
|
IPTables: kubeproxyconfig.KubeProxyIPTablesConfiguration{
|
||||||
MasqueradeAll: true,
|
MasqueradeAll: true,
|
||||||
@ -344,35 +330,12 @@ func TestValidateKubeProxyConfiguration(t *testing.T) {
|
|||||||
},
|
},
|
||||||
expectedErrs: field.ErrorList{field.Invalid(newPath.Child("ClusterCIDR"), "192.168.59.0/24,fd00:192:168::/64,10.0.0.0/16", "only one CIDR allowed or a valid DualStack CIDR (e.g. 10.100.0.0/16,fde4:8dba:82e1::/48)")},
|
expectedErrs: field.ErrorList{field.Invalid(newPath.Child("ClusterCIDR"), "192.168.59.0/24,fd00:192:168::/64,10.0.0.0/16", "only one CIDR allowed or a valid DualStack CIDR (e.g. 10.100.0.0/16,fde4:8dba:82e1::/48)")},
|
||||||
},
|
},
|
||||||
"UDPIdleTimeout must be > 0": {
|
|
||||||
config: kubeproxyconfig.KubeProxyConfiguration{
|
|
||||||
BindAddress: "10.10.12.11",
|
|
||||||
HealthzBindAddress: "0.0.0.0:12345",
|
|
||||||
MetricsBindAddress: "127.0.0.1:10249",
|
|
||||||
ClusterCIDR: "192.168.59.0/24",
|
|
||||||
UDPIdleTimeout: metav1.Duration{Duration: -1 * time.Second},
|
|
||||||
ConfigSyncPeriod: metav1.Duration{Duration: 1 * time.Second},
|
|
||||||
IPTables: kubeproxyconfig.KubeProxyIPTablesConfiguration{
|
|
||||||
MasqueradeAll: true,
|
|
||||||
SyncPeriod: metav1.Duration{Duration: 5 * time.Second},
|
|
||||||
MinSyncPeriod: metav1.Duration{Duration: 2 * time.Second},
|
|
||||||
},
|
|
||||||
Conntrack: kubeproxyconfig.KubeProxyConntrackConfiguration{
|
|
||||||
MaxPerCore: pointer.Int32(1),
|
|
||||||
Min: pointer.Int32(1),
|
|
||||||
TCPEstablishedTimeout: &metav1.Duration{Duration: 5 * time.Second},
|
|
||||||
TCPCloseWaitTimeout: &metav1.Duration{Duration: 5 * time.Second},
|
|
||||||
},
|
|
||||||
},
|
|
||||||
expectedErrs: field.ErrorList{field.Invalid(newPath.Child("UDPIdleTimeout"), metav1.Duration{Duration: -1 * time.Second}, "must be greater than 0")},
|
|
||||||
},
|
|
||||||
"ConfigSyncPeriod must be > 0": {
|
"ConfigSyncPeriod must be > 0": {
|
||||||
config: kubeproxyconfig.KubeProxyConfiguration{
|
config: kubeproxyconfig.KubeProxyConfiguration{
|
||||||
BindAddress: "10.10.12.11",
|
BindAddress: "10.10.12.11",
|
||||||
HealthzBindAddress: "0.0.0.0:12345",
|
HealthzBindAddress: "0.0.0.0:12345",
|
||||||
MetricsBindAddress: "127.0.0.1:10249",
|
MetricsBindAddress: "127.0.0.1:10249",
|
||||||
ClusterCIDR: "192.168.59.0/24",
|
ClusterCIDR: "192.168.59.0/24",
|
||||||
UDPIdleTimeout: metav1.Duration{Duration: 1 * time.Second},
|
|
||||||
ConfigSyncPeriod: metav1.Duration{Duration: -1 * time.Second},
|
ConfigSyncPeriod: metav1.Duration{Duration: -1 * time.Second},
|
||||||
IPTables: kubeproxyconfig.KubeProxyIPTablesConfiguration{
|
IPTables: kubeproxyconfig.KubeProxyIPTablesConfiguration{
|
||||||
MasqueradeAll: true,
|
MasqueradeAll: true,
|
||||||
@ -394,7 +357,6 @@ func TestValidateKubeProxyConfiguration(t *testing.T) {
|
|||||||
HealthzBindAddress: "0.0.0.0:10256",
|
HealthzBindAddress: "0.0.0.0:10256",
|
||||||
MetricsBindAddress: "127.0.0.1:10249",
|
MetricsBindAddress: "127.0.0.1:10249",
|
||||||
ClusterCIDR: "192.168.59.0/24",
|
ClusterCIDR: "192.168.59.0/24",
|
||||||
UDPIdleTimeout: metav1.Duration{Duration: 1 * time.Second},
|
|
||||||
ConfigSyncPeriod: metav1.Duration{Duration: 1 * time.Second},
|
ConfigSyncPeriod: metav1.Duration{Duration: 1 * time.Second},
|
||||||
IPTables: kubeproxyconfig.KubeProxyIPTablesConfiguration{
|
IPTables: kubeproxyconfig.KubeProxyIPTablesConfiguration{
|
||||||
MasqueradeAll: true,
|
MasqueradeAll: true,
|
||||||
@ -418,7 +380,6 @@ func TestValidateKubeProxyConfiguration(t *testing.T) {
|
|||||||
HealthzBindAddress: "0.0.0.0:12345",
|
HealthzBindAddress: "0.0.0.0:12345",
|
||||||
MetricsBindAddress: "127.0.0.1:10249",
|
MetricsBindAddress: "127.0.0.1:10249",
|
||||||
ClusterCIDR: "192.168.59.0/24",
|
ClusterCIDR: "192.168.59.0/24",
|
||||||
UDPIdleTimeout: metav1.Duration{Duration: 1 * time.Second},
|
|
||||||
ConfigSyncPeriod: metav1.Duration{Duration: 1 * time.Second},
|
ConfigSyncPeriod: metav1.Duration{Duration: 1 * time.Second},
|
||||||
IPTables: kubeproxyconfig.KubeProxyIPTablesConfiguration{
|
IPTables: kubeproxyconfig.KubeProxyIPTablesConfiguration{
|
||||||
MasqueradeAll: true,
|
MasqueradeAll: true,
|
||||||
@ -444,7 +405,6 @@ func TestValidateKubeProxyConfiguration(t *testing.T) {
|
|||||||
HealthzBindAddress: "0.0.0.0:12345",
|
HealthzBindAddress: "0.0.0.0:12345",
|
||||||
MetricsBindAddress: "127.0.0.1:10249",
|
MetricsBindAddress: "127.0.0.1:10249",
|
||||||
ClusterCIDR: "192.168.59.0/24",
|
ClusterCIDR: "192.168.59.0/24",
|
||||||
UDPIdleTimeout: metav1.Duration{Duration: 1 * time.Second},
|
|
||||||
ConfigSyncPeriod: metav1.Duration{Duration: 1 * time.Second},
|
ConfigSyncPeriod: metav1.Duration{Duration: 1 * time.Second},
|
||||||
IPTables: kubeproxyconfig.KubeProxyIPTablesConfiguration{
|
IPTables: kubeproxyconfig.KubeProxyIPTablesConfiguration{
|
||||||
MasqueradeAll: true,
|
MasqueradeAll: true,
|
||||||
@ -751,17 +711,13 @@ func TestValidateProxyMode(t *testing.T) {
|
|||||||
mode kubeproxyconfig.ProxyMode
|
mode kubeproxyconfig.ProxyMode
|
||||||
expectedErrs field.ErrorList
|
expectedErrs field.ErrorList
|
||||||
}{
|
}{
|
||||||
"valid Userspace mode": {
|
|
||||||
mode: kubeproxyconfig.ProxyModeUserspace,
|
|
||||||
expectedErrs: field.ErrorList{},
|
|
||||||
},
|
|
||||||
"blank mode should default": {
|
"blank mode should default": {
|
||||||
mode: kubeproxyconfig.ProxyMode(""),
|
mode: kubeproxyconfig.ProxyMode(""),
|
||||||
expectedErrs: field.ErrorList{},
|
expectedErrs: field.ErrorList{},
|
||||||
},
|
},
|
||||||
"invalid mode non-existent": {
|
"invalid mode non-existent": {
|
||||||
mode: kubeproxyconfig.ProxyMode("non-existing"),
|
mode: kubeproxyconfig.ProxyMode("non-existing"),
|
||||||
expectedErrs: field.ErrorList{field.Invalid(newPath.Child("ProxyMode"), "non-existing", "must be iptables,ipvs,userspace or blank (blank means the best-available proxy [currently iptables])")},
|
expectedErrs: field.ErrorList{field.Invalid(newPath.Child("ProxyMode"), "non-existing", "must be iptables,ipvs or blank (blank means the best-available proxy [currently iptables])")},
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
for _, testCase := range testCases {
|
for _, testCase := range testCases {
|
||||||
|
1
pkg/proxy/apis/config/zz_generated.deepcopy.go
generated
1
pkg/proxy/apis/config/zz_generated.deepcopy.go
generated
@ -83,7 +83,6 @@ func (in *KubeProxyConfiguration) DeepCopyInto(out *KubeProxyConfiguration) {
|
|||||||
*out = new(int32)
|
*out = new(int32)
|
||||||
**out = **in
|
**out = **in
|
||||||
}
|
}
|
||||||
out.UDPIdleTimeout = in.UDPIdleTimeout
|
|
||||||
in.Conntrack.DeepCopyInto(&out.Conntrack)
|
in.Conntrack.DeepCopyInto(&out.Conntrack)
|
||||||
out.ConfigSyncPeriod = in.ConfigSyncPeriod
|
out.ConfigSyncPeriod = in.ConfigSyncPeriod
|
||||||
if in.NodePortAddresses != nil {
|
if in.NodePortAddresses != nil {
|
||||||
|
@ -1,10 +0,0 @@
|
|||||||
# See the OWNERS docs at https://go.k8s.io/owners
|
|
||||||
|
|
||||||
approvers:
|
|
||||||
- sig-network-approvers
|
|
||||||
reviewers:
|
|
||||||
- sig-network-reviewers
|
|
||||||
- lavalamp
|
|
||||||
- smarterclayton
|
|
||||||
labels:
|
|
||||||
- sig/network
|
|
@ -1,37 +0,0 @@
|
|||||||
/*
|
|
||||||
Copyright 2014 The Kubernetes Authors.
|
|
||||||
|
|
||||||
Licensed under the Apache License, Version 2.0 (the "License");
|
|
||||||
you may not use this file except in compliance with the License.
|
|
||||||
You may obtain a copy of the License at
|
|
||||||
|
|
||||||
http://www.apache.org/licenses/LICENSE-2.0
|
|
||||||
|
|
||||||
Unless required by applicable law or agreed to in writing, software
|
|
||||||
distributed under the License is distributed on an "AS IS" BASIS,
|
|
||||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
||||||
See the License for the specific language governing permissions and
|
|
||||||
limitations under the License.
|
|
||||||
*/
|
|
||||||
|
|
||||||
package userspace
|
|
||||||
|
|
||||||
import (
|
|
||||||
"k8s.io/api/core/v1"
|
|
||||||
"k8s.io/kubernetes/pkg/proxy"
|
|
||||||
proxyconfig "k8s.io/kubernetes/pkg/proxy/config"
|
|
||||||
"net"
|
|
||||||
)
|
|
||||||
|
|
||||||
// LoadBalancer is an interface for distributing incoming requests to service endpoints.
|
|
||||||
type LoadBalancer interface {
|
|
||||||
// NextEndpoint returns the endpoint to handle a request for the given
|
|
||||||
// service-port and source address.
|
|
||||||
NextEndpoint(service proxy.ServicePortName, srcAddr net.Addr, sessionAffinityReset bool) (string, error)
|
|
||||||
NewService(service proxy.ServicePortName, sessionAffinityType v1.ServiceAffinity, stickyMaxAgeSeconds int) error
|
|
||||||
DeleteService(service proxy.ServicePortName)
|
|
||||||
CleanupStaleStickySessions(service proxy.ServicePortName)
|
|
||||||
ServiceHasEndpoints(service proxy.ServicePortName) bool
|
|
||||||
|
|
||||||
proxyconfig.EndpointsHandler
|
|
||||||
}
|
|
@ -1,158 +0,0 @@
|
|||||||
/*
|
|
||||||
Copyright 2015 The Kubernetes Authors.
|
|
||||||
|
|
||||||
Licensed under the Apache License, Version 2.0 (the "License");
|
|
||||||
you may not use this file except in compliance with the License.
|
|
||||||
You may obtain a copy of the License at
|
|
||||||
|
|
||||||
http://www.apache.org/licenses/LICENSE-2.0
|
|
||||||
|
|
||||||
Unless required by applicable law or agreed to in writing, software
|
|
||||||
distributed under the License is distributed on an "AS IS" BASIS,
|
|
||||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
||||||
See the License for the specific language governing permissions and
|
|
||||||
limitations under the License.
|
|
||||||
*/
|
|
||||||
|
|
||||||
package userspace
|
|
||||||
|
|
||||||
import (
|
|
||||||
"errors"
|
|
||||||
"math/big"
|
|
||||||
"math/rand"
|
|
||||||
"sync"
|
|
||||||
"time"
|
|
||||||
|
|
||||||
"k8s.io/apimachinery/pkg/util/net"
|
|
||||||
"k8s.io/apimachinery/pkg/util/wait"
|
|
||||||
)
|
|
||||||
|
|
||||||
var (
|
|
||||||
errPortRangeNoPortsRemaining = errors.New("port allocation failed; there are no remaining ports left to allocate in the accepted range")
|
|
||||||
)
|
|
||||||
|
|
||||||
type PortAllocator interface {
|
|
||||||
AllocateNext() (int, error)
|
|
||||||
Release(int)
|
|
||||||
}
|
|
||||||
|
|
||||||
// randomAllocator is a PortAllocator implementation that allocates random ports, yielding
|
|
||||||
// a port value of 0 for every call to AllocateNext().
|
|
||||||
type randomAllocator struct{}
|
|
||||||
|
|
||||||
// AllocateNext always returns 0
|
|
||||||
func (r *randomAllocator) AllocateNext() (int, error) {
|
|
||||||
return 0, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// Release is a noop
|
|
||||||
func (r *randomAllocator) Release(_ int) {
|
|
||||||
// noop
|
|
||||||
}
|
|
||||||
|
|
||||||
// newPortAllocator builds PortAllocator for a given PortRange. If the PortRange is empty
|
|
||||||
// then a random port allocator is returned; otherwise, a new range-based allocator
|
|
||||||
// is returned.
|
|
||||||
func newPortAllocator(r net.PortRange) PortAllocator {
|
|
||||||
if r.Base == 0 {
|
|
||||||
return &randomAllocator{}
|
|
||||||
}
|
|
||||||
return newPortRangeAllocator(r, true)
|
|
||||||
}
|
|
||||||
|
|
||||||
const (
|
|
||||||
portsBufSize = 16
|
|
||||||
nextFreePortCooldown = 500 * time.Millisecond
|
|
||||||
allocateNextTimeout = 1 * time.Second
|
|
||||||
)
|
|
||||||
|
|
||||||
type rangeAllocator struct {
|
|
||||||
net.PortRange
|
|
||||||
ports chan int
|
|
||||||
used big.Int
|
|
||||||
lock sync.Mutex
|
|
||||||
rand *rand.Rand
|
|
||||||
}
|
|
||||||
|
|
||||||
func newPortRangeAllocator(r net.PortRange, autoFill bool) PortAllocator {
|
|
||||||
if r.Base == 0 || r.Size == 0 {
|
|
||||||
panic("illegal argument: may not specify an empty port range")
|
|
||||||
}
|
|
||||||
ra := &rangeAllocator{
|
|
||||||
PortRange: r,
|
|
||||||
ports: make(chan int, portsBufSize),
|
|
||||||
rand: rand.New(rand.NewSource(time.Now().UnixNano())),
|
|
||||||
}
|
|
||||||
if autoFill {
|
|
||||||
go wait.Forever(func() { ra.fillPorts() }, nextFreePortCooldown)
|
|
||||||
}
|
|
||||||
return ra
|
|
||||||
}
|
|
||||||
|
|
||||||
// fillPorts loops, always searching for the next free port and, if found, fills the ports buffer with it.
|
|
||||||
// this func blocks unless there are no remaining free ports.
|
|
||||||
func (r *rangeAllocator) fillPorts() {
|
|
||||||
for {
|
|
||||||
if !r.fillPortsOnce() {
|
|
||||||
return
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func (r *rangeAllocator) fillPortsOnce() bool {
|
|
||||||
port := r.nextFreePort()
|
|
||||||
if port == -1 {
|
|
||||||
return false
|
|
||||||
}
|
|
||||||
r.ports <- port
|
|
||||||
return true
|
|
||||||
}
|
|
||||||
|
|
||||||
// nextFreePort finds a free port, first picking a random port. if that port is already in use
|
|
||||||
// then the port range is scanned sequentially until either a port is found or the scan completes
|
|
||||||
// unsuccessfully. an unsuccessful scan returns a port of -1.
|
|
||||||
func (r *rangeAllocator) nextFreePort() int {
|
|
||||||
r.lock.Lock()
|
|
||||||
defer r.lock.Unlock()
|
|
||||||
|
|
||||||
// choose random port
|
|
||||||
j := r.rand.Intn(r.Size)
|
|
||||||
if b := r.used.Bit(j); b == 0 {
|
|
||||||
r.used.SetBit(&r.used, j, 1)
|
|
||||||
return j + r.Base
|
|
||||||
}
|
|
||||||
|
|
||||||
// search sequentially
|
|
||||||
for i := j + 1; i < r.Size; i++ {
|
|
||||||
if b := r.used.Bit(i); b == 0 {
|
|
||||||
r.used.SetBit(&r.used, i, 1)
|
|
||||||
return i + r.Base
|
|
||||||
}
|
|
||||||
}
|
|
||||||
for i := 0; i < j; i++ {
|
|
||||||
if b := r.used.Bit(i); b == 0 {
|
|
||||||
r.used.SetBit(&r.used, i, 1)
|
|
||||||
return i + r.Base
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return -1
|
|
||||||
}
|
|
||||||
|
|
||||||
func (r *rangeAllocator) AllocateNext() (port int, err error) {
|
|
||||||
select {
|
|
||||||
case port = <-r.ports:
|
|
||||||
case <-time.After(allocateNextTimeout):
|
|
||||||
err = errPortRangeNoPortsRemaining
|
|
||||||
}
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
func (r *rangeAllocator) Release(port int) {
|
|
||||||
port -= r.Base
|
|
||||||
if port < 0 || port >= r.Size {
|
|
||||||
return
|
|
||||||
}
|
|
||||||
r.lock.Lock()
|
|
||||||
defer r.lock.Unlock()
|
|
||||||
r.used.SetBit(&r.used, port, 0)
|
|
||||||
}
|
|
@ -1,178 +0,0 @@
|
|||||||
/*
|
|
||||||
Copyright 2015 The Kubernetes Authors.
|
|
||||||
|
|
||||||
Licensed under the Apache License, Version 2.0 (the "License");
|
|
||||||
you may not use this file except in compliance with the License.
|
|
||||||
You may obtain a copy of the License at
|
|
||||||
|
|
||||||
http://www.apache.org/licenses/LICENSE-2.0
|
|
||||||
|
|
||||||
Unless required by applicable law or agreed to in writing, software
|
|
||||||
distributed under the License is distributed on an "AS IS" BASIS,
|
|
||||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
||||||
See the License for the specific language governing permissions and
|
|
||||||
limitations under the License.
|
|
||||||
*/
|
|
||||||
|
|
||||||
package userspace
|
|
||||||
|
|
||||||
import (
|
|
||||||
"reflect"
|
|
||||||
"testing"
|
|
||||||
|
|
||||||
"k8s.io/apimachinery/pkg/util/net"
|
|
||||||
)
|
|
||||||
|
|
||||||
func TestRangeAllocatorEmpty(t *testing.T) {
|
|
||||||
r := &net.PortRange{}
|
|
||||||
r.Set("0-0")
|
|
||||||
defer func() {
|
|
||||||
if rv := recover(); rv == nil {
|
|
||||||
t.Fatalf("expected panic because of empty port range: %#v", r)
|
|
||||||
}
|
|
||||||
}()
|
|
||||||
_ = newPortRangeAllocator(*r, true)
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestRangeAllocatorFullyAllocated(t *testing.T) {
|
|
||||||
r := &net.PortRange{}
|
|
||||||
r.Set("1-1")
|
|
||||||
// Don't auto-fill ports, we'll manually turn the crank
|
|
||||||
pra := newPortRangeAllocator(*r, false)
|
|
||||||
a := pra.(*rangeAllocator)
|
|
||||||
|
|
||||||
// Fill in the one available port
|
|
||||||
if !a.fillPortsOnce() {
|
|
||||||
t.Fatalf("Expected to be able to fill ports")
|
|
||||||
}
|
|
||||||
|
|
||||||
// There should be no ports available
|
|
||||||
if a.fillPortsOnce() {
|
|
||||||
t.Fatalf("Expected to be unable to fill ports")
|
|
||||||
}
|
|
||||||
|
|
||||||
p, err := a.AllocateNext()
|
|
||||||
if err != nil {
|
|
||||||
t.Fatalf("unexpected error: %v", err)
|
|
||||||
}
|
|
||||||
if p != 1 {
|
|
||||||
t.Fatalf("unexpected allocated port: %d", p)
|
|
||||||
}
|
|
||||||
|
|
||||||
a.lock.Lock()
|
|
||||||
if bit := a.used.Bit(p - a.Base); bit != 1 {
|
|
||||||
a.lock.Unlock()
|
|
||||||
t.Fatalf("unexpected used bit for allocated port: %d", p)
|
|
||||||
}
|
|
||||||
a.lock.Unlock()
|
|
||||||
|
|
||||||
_, err = a.AllocateNext()
|
|
||||||
if err == nil {
|
|
||||||
t.Fatalf("expected error because of fully-allocated range")
|
|
||||||
}
|
|
||||||
|
|
||||||
a.Release(p)
|
|
||||||
a.lock.Lock()
|
|
||||||
if bit := a.used.Bit(p - a.Base); bit != 0 {
|
|
||||||
a.lock.Unlock()
|
|
||||||
t.Fatalf("unexpected used bit for allocated port: %d", p)
|
|
||||||
}
|
|
||||||
a.lock.Unlock()
|
|
||||||
|
|
||||||
// Fill in the one available port
|
|
||||||
if !a.fillPortsOnce() {
|
|
||||||
t.Fatalf("Expected to be able to fill ports")
|
|
||||||
}
|
|
||||||
|
|
||||||
p, err = a.AllocateNext()
|
|
||||||
if err != nil {
|
|
||||||
t.Fatalf("unexpected error: %v", err)
|
|
||||||
}
|
|
||||||
if p != 1 {
|
|
||||||
t.Fatalf("unexpected allocated port: %d", p)
|
|
||||||
}
|
|
||||||
a.lock.Lock()
|
|
||||||
if bit := a.used.Bit(p - a.Base); bit != 1 {
|
|
||||||
a.lock.Unlock()
|
|
||||||
t.Fatalf("unexpected used bit for allocated port: %d", p)
|
|
||||||
}
|
|
||||||
a.lock.Unlock()
|
|
||||||
|
|
||||||
_, err = a.AllocateNext()
|
|
||||||
if err == nil {
|
|
||||||
t.Fatalf("expected error because of fully-allocated range")
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestRangeAllocator_RandomishAllocation(t *testing.T) {
|
|
||||||
r := &net.PortRange{}
|
|
||||||
r.Set("1-100")
|
|
||||||
pra := newPortRangeAllocator(*r, false)
|
|
||||||
a := pra.(*rangeAllocator)
|
|
||||||
|
|
||||||
// allocate all the ports
|
|
||||||
var err error
|
|
||||||
ports := make([]int, 100, 100)
|
|
||||||
for i := 0; i < 100; i++ {
|
|
||||||
if !a.fillPortsOnce() {
|
|
||||||
t.Fatalf("Expected to be able to fill ports")
|
|
||||||
}
|
|
||||||
ports[i], err = a.AllocateNext()
|
|
||||||
if err != nil {
|
|
||||||
t.Fatalf("unexpected error: %v", err)
|
|
||||||
}
|
|
||||||
if ports[i] < 1 || ports[i] > 100 {
|
|
||||||
t.Fatalf("unexpected allocated port: %d", ports[i])
|
|
||||||
}
|
|
||||||
a.lock.Lock()
|
|
||||||
if bit := a.used.Bit(ports[i] - a.Base); bit != 1 {
|
|
||||||
a.lock.Unlock()
|
|
||||||
t.Fatalf("unexpected used bit for allocated port: %d", ports[i])
|
|
||||||
}
|
|
||||||
a.lock.Unlock()
|
|
||||||
}
|
|
||||||
|
|
||||||
if a.fillPortsOnce() {
|
|
||||||
t.Fatalf("Expected to be unable to fill ports")
|
|
||||||
}
|
|
||||||
|
|
||||||
// release them all
|
|
||||||
for i := 0; i < 100; i++ {
|
|
||||||
a.Release(ports[i])
|
|
||||||
a.lock.Lock()
|
|
||||||
if bit := a.used.Bit(ports[i] - a.Base); bit != 0 {
|
|
||||||
a.lock.Unlock()
|
|
||||||
t.Fatalf("unexpected used bit for allocated port: %d", ports[i])
|
|
||||||
}
|
|
||||||
a.lock.Unlock()
|
|
||||||
}
|
|
||||||
|
|
||||||
// allocate the ports again
|
|
||||||
rports := make([]int, 100, 100)
|
|
||||||
for i := 0; i < 100; i++ {
|
|
||||||
if !a.fillPortsOnce() {
|
|
||||||
t.Fatalf("Expected to be able to fill ports")
|
|
||||||
}
|
|
||||||
rports[i], err = a.AllocateNext()
|
|
||||||
if err != nil {
|
|
||||||
t.Fatalf("unexpected error: %v", err)
|
|
||||||
}
|
|
||||||
if rports[i] < 1 || rports[i] > 100 {
|
|
||||||
t.Fatalf("unexpected allocated port: %d", rports[i])
|
|
||||||
}
|
|
||||||
a.lock.Lock()
|
|
||||||
if bit := a.used.Bit(rports[i] - a.Base); bit != 1 {
|
|
||||||
a.lock.Unlock()
|
|
||||||
t.Fatalf("unexpected used bit for allocated port: %d", rports[i])
|
|
||||||
}
|
|
||||||
a.lock.Unlock()
|
|
||||||
}
|
|
||||||
|
|
||||||
if a.fillPortsOnce() {
|
|
||||||
t.Fatalf("Expected to be unable to fill ports")
|
|
||||||
}
|
|
||||||
|
|
||||||
if reflect.DeepEqual(ports, rports) {
|
|
||||||
t.Fatalf("expected re-allocated ports to be in a somewhat random order")
|
|
||||||
}
|
|
||||||
}
|
|
File diff suppressed because it is too large
Load Diff
File diff suppressed because it is too large
Load Diff
@ -1,304 +0,0 @@
|
|||||||
/*
|
|
||||||
Copyright 2015 The Kubernetes Authors.
|
|
||||||
|
|
||||||
Licensed under the Apache License, Version 2.0 (the "License");
|
|
||||||
you may not use this file except in compliance with the License.
|
|
||||||
You may obtain a copy of the License at
|
|
||||||
|
|
||||||
http://www.apache.org/licenses/LICENSE-2.0
|
|
||||||
|
|
||||||
Unless required by applicable law or agreed to in writing, software
|
|
||||||
distributed under the License is distributed on an "AS IS" BASIS,
|
|
||||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
||||||
See the License for the specific language governing permissions and
|
|
||||||
limitations under the License.
|
|
||||||
*/
|
|
||||||
|
|
||||||
package userspace
|
|
||||||
|
|
||||||
import (
|
|
||||||
"fmt"
|
|
||||||
"io"
|
|
||||||
"net"
|
|
||||||
"strconv"
|
|
||||||
"strings"
|
|
||||||
"sync"
|
|
||||||
"time"
|
|
||||||
|
|
||||||
"k8s.io/api/core/v1"
|
|
||||||
"k8s.io/apimachinery/pkg/util/runtime"
|
|
||||||
"k8s.io/klog/v2"
|
|
||||||
"k8s.io/kubernetes/pkg/proxy"
|
|
||||||
)
|
|
||||||
|
|
||||||
// Abstraction over TCP/UDP sockets which are proxied.
|
|
||||||
type ProxySocket interface {
|
|
||||||
// Addr gets the net.Addr for a ProxySocket.
|
|
||||||
Addr() net.Addr
|
|
||||||
// Close stops the ProxySocket from accepting incoming connections.
|
|
||||||
// Each implementation should comment on the impact of calling Close
|
|
||||||
// while sessions are active.
|
|
||||||
Close() error
|
|
||||||
// ProxyLoop proxies incoming connections for the specified service to the service endpoints.
|
|
||||||
ProxyLoop(service proxy.ServicePortName, info *ServiceInfo, loadBalancer LoadBalancer)
|
|
||||||
// ListenPort returns the host port that the ProxySocket is listening on
|
|
||||||
ListenPort() int
|
|
||||||
}
|
|
||||||
|
|
||||||
func newProxySocket(protocol v1.Protocol, ip net.IP, port int) (ProxySocket, error) {
|
|
||||||
host := ""
|
|
||||||
if ip != nil {
|
|
||||||
host = ip.String()
|
|
||||||
}
|
|
||||||
|
|
||||||
switch strings.ToUpper(string(protocol)) {
|
|
||||||
case "TCP":
|
|
||||||
listener, err := net.Listen("tcp", net.JoinHostPort(host, strconv.Itoa(port)))
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
return &tcpProxySocket{Listener: listener, port: port}, nil
|
|
||||||
case "UDP":
|
|
||||||
addr, err := net.ResolveUDPAddr("udp", net.JoinHostPort(host, strconv.Itoa(port)))
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
conn, err := net.ListenUDP("udp", addr)
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
return &udpProxySocket{UDPConn: conn, port: port}, nil
|
|
||||||
case "SCTP":
|
|
||||||
return nil, fmt.Errorf("SCTP is not supported for user space proxy")
|
|
||||||
}
|
|
||||||
return nil, fmt.Errorf("unknown protocol %q", protocol)
|
|
||||||
}
|
|
||||||
|
|
||||||
// How long we wait for a connection to a backend in seconds
|
|
||||||
var EndpointDialTimeouts = []time.Duration{250 * time.Millisecond, 500 * time.Millisecond, 1 * time.Second, 2 * time.Second}
|
|
||||||
|
|
||||||
// tcpProxySocket implements ProxySocket. Close() is implemented by net.Listener. When Close() is called,
|
|
||||||
// no new connections are allowed but existing connections are left untouched.
|
|
||||||
type tcpProxySocket struct {
|
|
||||||
net.Listener
|
|
||||||
port int
|
|
||||||
}
|
|
||||||
|
|
||||||
func (tcp *tcpProxySocket) ListenPort() int {
|
|
||||||
return tcp.port
|
|
||||||
}
|
|
||||||
|
|
||||||
// TryConnectEndpoints attempts to connect to the next available endpoint for the given service, cycling
|
|
||||||
// through until it is able to successfully connect, or it has tried with all timeouts in EndpointDialTimeouts.
|
|
||||||
func TryConnectEndpoints(service proxy.ServicePortName, srcAddr net.Addr, protocol string, loadBalancer LoadBalancer) (out net.Conn, err error) {
|
|
||||||
sessionAffinityReset := false
|
|
||||||
for _, dialTimeout := range EndpointDialTimeouts {
|
|
||||||
endpoint, err := loadBalancer.NextEndpoint(service, srcAddr, sessionAffinityReset)
|
|
||||||
if err != nil {
|
|
||||||
klog.ErrorS(err, "Couldn't find an endpoint for service", "service", service)
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
klog.V(3).InfoS("Mapped service to endpoint", "service", service, "endpoint", endpoint)
|
|
||||||
// TODO: This could spin up a new goroutine to make the outbound connection,
|
|
||||||
// and keep accepting inbound traffic.
|
|
||||||
outConn, err := net.DialTimeout(protocol, endpoint, dialTimeout)
|
|
||||||
if err != nil {
|
|
||||||
if isTooManyFDsError(err) {
|
|
||||||
panic("Dial failed: " + err.Error())
|
|
||||||
}
|
|
||||||
klog.ErrorS(err, "Dial failed")
|
|
||||||
sessionAffinityReset = true
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
return outConn, nil
|
|
||||||
}
|
|
||||||
return nil, fmt.Errorf("failed to connect to an endpoint")
|
|
||||||
}
|
|
||||||
|
|
||||||
func (tcp *tcpProxySocket) ProxyLoop(service proxy.ServicePortName, myInfo *ServiceInfo, loadBalancer LoadBalancer) {
|
|
||||||
for {
|
|
||||||
if !myInfo.IsAlive() {
|
|
||||||
// The service port was closed or replaced.
|
|
||||||
return
|
|
||||||
}
|
|
||||||
// Block until a connection is made.
|
|
||||||
inConn, err := tcp.Accept()
|
|
||||||
if err != nil {
|
|
||||||
if isTooManyFDsError(err) {
|
|
||||||
panic("Accept failed: " + err.Error())
|
|
||||||
}
|
|
||||||
|
|
||||||
if isClosedError(err) {
|
|
||||||
return
|
|
||||||
}
|
|
||||||
if !myInfo.IsAlive() {
|
|
||||||
// Then the service port was just closed so the accept failure is to be expected.
|
|
||||||
return
|
|
||||||
}
|
|
||||||
klog.ErrorS(err, "Accept failed")
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
klog.V(3).InfoS("Accepted TCP connection from remote", "remoteAddress", inConn.RemoteAddr(), "localAddress", inConn.LocalAddr())
|
|
||||||
outConn, err := TryConnectEndpoints(service, inConn.(*net.TCPConn).RemoteAddr(), "tcp", loadBalancer)
|
|
||||||
if err != nil {
|
|
||||||
klog.ErrorS(err, "Failed to connect to balancer")
|
|
||||||
inConn.Close()
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
// Spin up an async copy loop.
|
|
||||||
go ProxyTCP(inConn.(*net.TCPConn), outConn.(*net.TCPConn))
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// ProxyTCP proxies data bi-directionally between in and out.
|
|
||||||
func ProxyTCP(in, out *net.TCPConn) {
|
|
||||||
var wg sync.WaitGroup
|
|
||||||
wg.Add(2)
|
|
||||||
klog.V(4).InfoS("Creating proxy between remote and local addresses",
|
|
||||||
"inRemoteAddress", in.RemoteAddr(), "inLocalAddress", in.LocalAddr(), "outLocalAddress", out.LocalAddr(), "outRemoteAddress", out.RemoteAddr())
|
|
||||||
go copyBytes("from backend", in, out, &wg)
|
|
||||||
go copyBytes("to backend", out, in, &wg)
|
|
||||||
wg.Wait()
|
|
||||||
}
|
|
||||||
|
|
||||||
func copyBytes(direction string, dest, src *net.TCPConn, wg *sync.WaitGroup) {
|
|
||||||
defer wg.Done()
|
|
||||||
klog.V(4).InfoS("Copying remote address bytes", "direction", direction, "sourceRemoteAddress", src.RemoteAddr(), "destinationRemoteAddress", dest.RemoteAddr())
|
|
||||||
n, err := io.Copy(dest, src)
|
|
||||||
if err != nil {
|
|
||||||
if !isClosedError(err) {
|
|
||||||
klog.ErrorS(err, "I/O error occurred")
|
|
||||||
}
|
|
||||||
}
|
|
||||||
klog.V(4).InfoS("Copied remote address bytes", "bytes", n, "direction", direction, "sourceRemoteAddress", src.RemoteAddr(), "destinationRemoteAddress", dest.RemoteAddr())
|
|
||||||
dest.Close()
|
|
||||||
src.Close()
|
|
||||||
}
|
|
||||||
|
|
||||||
// udpProxySocket implements ProxySocket. Close() is implemented by net.UDPConn. When Close() is called,
|
|
||||||
// no new connections are allowed and existing connections are broken.
|
|
||||||
// TODO: We could lame-duck this ourselves, if it becomes important.
|
|
||||||
type udpProxySocket struct {
|
|
||||||
*net.UDPConn
|
|
||||||
port int
|
|
||||||
}
|
|
||||||
|
|
||||||
func (udp *udpProxySocket) ListenPort() int {
|
|
||||||
return udp.port
|
|
||||||
}
|
|
||||||
|
|
||||||
func (udp *udpProxySocket) Addr() net.Addr {
|
|
||||||
return udp.LocalAddr()
|
|
||||||
}
|
|
||||||
|
|
||||||
// Holds all the known UDP clients that have not timed out.
|
|
||||||
type ClientCache struct {
|
|
||||||
Mu sync.Mutex
|
|
||||||
Clients map[string]net.Conn // addr string -> connection
|
|
||||||
}
|
|
||||||
|
|
||||||
func newClientCache() *ClientCache {
|
|
||||||
return &ClientCache{Clients: map[string]net.Conn{}}
|
|
||||||
}
|
|
||||||
|
|
||||||
func (udp *udpProxySocket) ProxyLoop(service proxy.ServicePortName, myInfo *ServiceInfo, loadBalancer LoadBalancer) {
|
|
||||||
var buffer [4096]byte // 4KiB should be enough for most whole-packets
|
|
||||||
for {
|
|
||||||
if !myInfo.IsAlive() {
|
|
||||||
// The service port was closed or replaced.
|
|
||||||
break
|
|
||||||
}
|
|
||||||
|
|
||||||
// Block until data arrives.
|
|
||||||
// TODO: Accumulate a histogram of n or something, to fine tune the buffer size.
|
|
||||||
n, cliAddr, err := udp.ReadFrom(buffer[0:])
|
|
||||||
if err != nil {
|
|
||||||
if e, ok := err.(net.Error); ok {
|
|
||||||
if e.Temporary() {
|
|
||||||
klog.V(1).ErrorS(err, "ReadFrom had a temporary failure")
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
}
|
|
||||||
klog.ErrorS(err, "ReadFrom failed, exiting ProxyLoop")
|
|
||||||
break
|
|
||||||
}
|
|
||||||
// If this is a client we know already, reuse the connection and goroutine.
|
|
||||||
svrConn, err := udp.getBackendConn(myInfo.ActiveClients, cliAddr, loadBalancer, service, myInfo.Timeout)
|
|
||||||
if err != nil {
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
// TODO: It would be nice to let the goroutine handle this write, but we don't
|
|
||||||
// really want to copy the buffer. We could do a pool of buffers or something.
|
|
||||||
_, err = svrConn.Write(buffer[0:n])
|
|
||||||
if err != nil {
|
|
||||||
if !logTimeout(err) {
|
|
||||||
klog.ErrorS(err, "Write failed")
|
|
||||||
// TODO: Maybe tear down the goroutine for this client/server pair?
|
|
||||||
}
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
err = svrConn.SetDeadline(time.Now().Add(myInfo.Timeout))
|
|
||||||
if err != nil {
|
|
||||||
klog.ErrorS(err, "SetDeadline failed")
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func (udp *udpProxySocket) getBackendConn(activeClients *ClientCache, cliAddr net.Addr, loadBalancer LoadBalancer, service proxy.ServicePortName, timeout time.Duration) (net.Conn, error) {
|
|
||||||
activeClients.Mu.Lock()
|
|
||||||
defer activeClients.Mu.Unlock()
|
|
||||||
|
|
||||||
svrConn, found := activeClients.Clients[cliAddr.String()]
|
|
||||||
if !found {
|
|
||||||
// TODO: This could spin up a new goroutine to make the outbound connection,
|
|
||||||
// and keep accepting inbound traffic.
|
|
||||||
klog.V(3).InfoS("New UDP connection from client", "address", cliAddr)
|
|
||||||
var err error
|
|
||||||
svrConn, err = TryConnectEndpoints(service, cliAddr, "udp", loadBalancer)
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
if err = svrConn.SetDeadline(time.Now().Add(timeout)); err != nil {
|
|
||||||
klog.ErrorS(err, "SetDeadline failed")
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
activeClients.Clients[cliAddr.String()] = svrConn
|
|
||||||
go func(cliAddr net.Addr, svrConn net.Conn, activeClients *ClientCache, timeout time.Duration) {
|
|
||||||
defer runtime.HandleCrash()
|
|
||||||
udp.proxyClient(cliAddr, svrConn, activeClients, timeout)
|
|
||||||
}(cliAddr, svrConn, activeClients, timeout)
|
|
||||||
}
|
|
||||||
return svrConn, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// This function is expected to be called as a goroutine.
|
|
||||||
// TODO: Track and log bytes copied, like TCP
|
|
||||||
func (udp *udpProxySocket) proxyClient(cliAddr net.Addr, svrConn net.Conn, activeClients *ClientCache, timeout time.Duration) {
|
|
||||||
defer svrConn.Close()
|
|
||||||
var buffer [4096]byte
|
|
||||||
for {
|
|
||||||
n, err := svrConn.Read(buffer[0:])
|
|
||||||
if err != nil {
|
|
||||||
if !logTimeout(err) {
|
|
||||||
klog.ErrorS(err, "Read failed")
|
|
||||||
}
|
|
||||||
break
|
|
||||||
}
|
|
||||||
err = svrConn.SetDeadline(time.Now().Add(timeout))
|
|
||||||
if err != nil {
|
|
||||||
klog.ErrorS(err, "SetDeadline failed")
|
|
||||||
break
|
|
||||||
}
|
|
||||||
_, err = udp.WriteTo(buffer[0:n], cliAddr)
|
|
||||||
if err != nil {
|
|
||||||
if !logTimeout(err) {
|
|
||||||
klog.ErrorS(err, "WriteTo failed")
|
|
||||||
}
|
|
||||||
break
|
|
||||||
}
|
|
||||||
}
|
|
||||||
activeClients.Mu.Lock()
|
|
||||||
delete(activeClients.Clients, cliAddr.String())
|
|
||||||
activeClients.Mu.Unlock()
|
|
||||||
}
|
|
@ -1,26 +0,0 @@
|
|||||||
//go:build !windows
|
|
||||||
// +build !windows
|
|
||||||
|
|
||||||
/*
|
|
||||||
Copyright 2015 The Kubernetes Authors.
|
|
||||||
|
|
||||||
Licensed under the Apache License, Version 2.0 (the "License");
|
|
||||||
you may not use this file except in compliance with the License.
|
|
||||||
You may obtain a copy of the License at
|
|
||||||
|
|
||||||
http://www.apache.org/licenses/LICENSE-2.0
|
|
||||||
|
|
||||||
Unless required by applicable law or agreed to in writing, software
|
|
||||||
distributed under the License is distributed on an "AS IS" BASIS,
|
|
||||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
||||||
See the License for the specific language governing permissions and
|
|
||||||
limitations under the License.
|
|
||||||
*/
|
|
||||||
|
|
||||||
package userspace
|
|
||||||
|
|
||||||
import "golang.org/x/sys/unix"
|
|
||||||
|
|
||||||
func setRLimit(limit uint64) error {
|
|
||||||
return unix.Setrlimit(unix.RLIMIT_NOFILE, &unix.Rlimit{Max: limit, Cur: limit})
|
|
||||||
}
|
|
@ -1,24 +0,0 @@
|
|||||||
//go:build windows
|
|
||||||
// +build windows
|
|
||||||
|
|
||||||
/*
|
|
||||||
Copyright 2015 The Kubernetes Authors.
|
|
||||||
|
|
||||||
Licensed under the Apache License, Version 2.0 (the "License");
|
|
||||||
you may not use this file except in compliance with the License.
|
|
||||||
You may obtain a copy of the License at
|
|
||||||
|
|
||||||
http://www.apache.org/licenses/LICENSE-2.0
|
|
||||||
|
|
||||||
Unless required by applicable law or agreed to in writing, software
|
|
||||||
distributed under the License is distributed on an "AS IS" BASIS,
|
|
||||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
||||||
See the License for the specific language governing permissions and
|
|
||||||
limitations under the License.
|
|
||||||
*/
|
|
||||||
|
|
||||||
package userspace
|
|
||||||
|
|
||||||
func setRLimit(limit uint64) error {
|
|
||||||
return nil
|
|
||||||
}
|
|
@ -1,343 +0,0 @@
|
|||||||
/*
|
|
||||||
Copyright 2014 The Kubernetes Authors.
|
|
||||||
|
|
||||||
Licensed under the Apache License, Version 2.0 (the "License");
|
|
||||||
you may not use this file except in compliance with the License.
|
|
||||||
You may obtain a copy of the License at
|
|
||||||
|
|
||||||
http://www.apache.org/licenses/LICENSE-2.0
|
|
||||||
|
|
||||||
Unless required by applicable law or agreed to in writing, software
|
|
||||||
distributed under the License is distributed on an "AS IS" BASIS,
|
|
||||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
||||||
See the License for the specific language governing permissions and
|
|
||||||
limitations under the License.
|
|
||||||
*/
|
|
||||||
|
|
||||||
package userspace
|
|
||||||
|
|
||||||
import (
|
|
||||||
"errors"
|
|
||||||
"fmt"
|
|
||||||
"net"
|
|
||||||
"sort"
|
|
||||||
"sync"
|
|
||||||
"time"
|
|
||||||
|
|
||||||
v1 "k8s.io/api/core/v1"
|
|
||||||
"k8s.io/apimachinery/pkg/types"
|
|
||||||
"k8s.io/apimachinery/pkg/util/sets"
|
|
||||||
"k8s.io/klog/v2"
|
|
||||||
"k8s.io/kubernetes/pkg/proxy"
|
|
||||||
"k8s.io/kubernetes/pkg/proxy/util"
|
|
||||||
stringslices "k8s.io/utils/strings/slices"
|
|
||||||
)
|
|
||||||
|
|
||||||
var (
|
|
||||||
ErrMissingServiceEntry = errors.New("missing service entry")
|
|
||||||
ErrMissingEndpoints = errors.New("missing endpoints")
|
|
||||||
)
|
|
||||||
|
|
||||||
type affinityState struct {
|
|
||||||
clientIP string
|
|
||||||
//clientProtocol api.Protocol //not yet used
|
|
||||||
//sessionCookie string //not yet used
|
|
||||||
endpoint string
|
|
||||||
lastUsed time.Time
|
|
||||||
}
|
|
||||||
|
|
||||||
type affinityPolicy struct {
|
|
||||||
affinityType v1.ServiceAffinity
|
|
||||||
affinityMap map[string]*affinityState // map client IP -> affinity info
|
|
||||||
ttlSeconds int
|
|
||||||
}
|
|
||||||
|
|
||||||
// LoadBalancerRR is a round-robin load balancer.
|
|
||||||
type LoadBalancerRR struct {
|
|
||||||
lock sync.RWMutex
|
|
||||||
services map[proxy.ServicePortName]*balancerState
|
|
||||||
}
|
|
||||||
|
|
||||||
// Ensure this implements LoadBalancer.
|
|
||||||
var _ LoadBalancer = &LoadBalancerRR{}
|
|
||||||
|
|
||||||
type balancerState struct {
|
|
||||||
endpoints []string // a list of "ip:port" style strings
|
|
||||||
index int // current index into endpoints
|
|
||||||
affinity affinityPolicy
|
|
||||||
}
|
|
||||||
|
|
||||||
func newAffinityPolicy(affinityType v1.ServiceAffinity, ttlSeconds int) *affinityPolicy {
|
|
||||||
return &affinityPolicy{
|
|
||||||
affinityType: affinityType,
|
|
||||||
affinityMap: make(map[string]*affinityState),
|
|
||||||
ttlSeconds: ttlSeconds,
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// NewLoadBalancerRR returns a new LoadBalancerRR.
|
|
||||||
func NewLoadBalancerRR() *LoadBalancerRR {
|
|
||||||
return &LoadBalancerRR{
|
|
||||||
services: map[proxy.ServicePortName]*balancerState{},
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func (lb *LoadBalancerRR) NewService(svcPort proxy.ServicePortName, affinityType v1.ServiceAffinity, ttlSeconds int) error {
|
|
||||||
klog.V(4).InfoS("LoadBalancerRR NewService", "servicePortName", svcPort)
|
|
||||||
lb.lock.Lock()
|
|
||||||
defer lb.lock.Unlock()
|
|
||||||
lb.newServiceInternal(svcPort, affinityType, ttlSeconds)
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// This assumes that lb.lock is already held.
|
|
||||||
func (lb *LoadBalancerRR) newServiceInternal(svcPort proxy.ServicePortName, affinityType v1.ServiceAffinity, ttlSeconds int) *balancerState {
|
|
||||||
if ttlSeconds == 0 {
|
|
||||||
ttlSeconds = int(v1.DefaultClientIPServiceAffinitySeconds) //default to 3 hours if not specified. Should 0 be unlimited instead????
|
|
||||||
}
|
|
||||||
|
|
||||||
if state, exists := lb.services[svcPort]; !exists || state == nil {
|
|
||||||
lb.services[svcPort] = &balancerState{affinity: *newAffinityPolicy(affinityType, ttlSeconds)}
|
|
||||||
klog.V(4).InfoS("LoadBalancerRR service does not exist, created", "servicePortName", svcPort)
|
|
||||||
} else if affinityType != "" {
|
|
||||||
lb.services[svcPort].affinity.affinityType = affinityType
|
|
||||||
}
|
|
||||||
return lb.services[svcPort]
|
|
||||||
}
|
|
||||||
|
|
||||||
func (lb *LoadBalancerRR) DeleteService(svcPort proxy.ServicePortName) {
|
|
||||||
klog.V(4).InfoS("LoadBalancerRR DeleteService", "servicePortName", svcPort)
|
|
||||||
lb.lock.Lock()
|
|
||||||
defer lb.lock.Unlock()
|
|
||||||
delete(lb.services, svcPort)
|
|
||||||
}
|
|
||||||
|
|
||||||
// return true if this service is using some form of session affinity.
|
|
||||||
func isSessionAffinity(affinity *affinityPolicy) bool {
|
|
||||||
// Should never be empty string, but checking for it to be safe.
|
|
||||||
if affinity.affinityType == "" || affinity.affinityType == v1.ServiceAffinityNone {
|
|
||||||
return false
|
|
||||||
}
|
|
||||||
return true
|
|
||||||
}
|
|
||||||
|
|
||||||
// ServiceHasEndpoints checks whether a service entry has endpoints.
|
|
||||||
func (lb *LoadBalancerRR) ServiceHasEndpoints(svcPort proxy.ServicePortName) bool {
|
|
||||||
lb.lock.RLock()
|
|
||||||
defer lb.lock.RUnlock()
|
|
||||||
state, exists := lb.services[svcPort]
|
|
||||||
if !exists || state == nil {
|
|
||||||
return false
|
|
||||||
}
|
|
||||||
return len(state.endpoints) > 0
|
|
||||||
}
|
|
||||||
|
|
||||||
// NextEndpoint returns a service endpoint.
|
|
||||||
// The service endpoint is chosen using the round-robin algorithm.
|
|
||||||
func (lb *LoadBalancerRR) NextEndpoint(svcPort proxy.ServicePortName, srcAddr net.Addr, sessionAffinityReset bool) (string, error) {
|
|
||||||
// Coarse locking is simple. We can get more fine-grained if/when we
|
|
||||||
// can prove it matters.
|
|
||||||
lb.lock.Lock()
|
|
||||||
defer lb.lock.Unlock()
|
|
||||||
|
|
||||||
state, exists := lb.services[svcPort]
|
|
||||||
if !exists || state == nil {
|
|
||||||
return "", ErrMissingServiceEntry
|
|
||||||
}
|
|
||||||
if len(state.endpoints) == 0 {
|
|
||||||
return "", ErrMissingEndpoints
|
|
||||||
}
|
|
||||||
klog.V(4).InfoS("NextEndpoint for service", "servicePortName", svcPort, "address", srcAddr, "endpoints", state.endpoints)
|
|
||||||
|
|
||||||
sessionAffinityEnabled := isSessionAffinity(&state.affinity)
|
|
||||||
|
|
||||||
var ipaddr string
|
|
||||||
if sessionAffinityEnabled {
|
|
||||||
// Caution: don't shadow ipaddr
|
|
||||||
var err error
|
|
||||||
ipaddr, _, err = net.SplitHostPort(srcAddr.String())
|
|
||||||
if err != nil {
|
|
||||||
return "", fmt.Errorf("malformed source address %q: %v", srcAddr.String(), err)
|
|
||||||
}
|
|
||||||
if !sessionAffinityReset {
|
|
||||||
sessionAffinity, exists := state.affinity.affinityMap[ipaddr]
|
|
||||||
if exists && int(time.Since(sessionAffinity.lastUsed).Seconds()) < state.affinity.ttlSeconds {
|
|
||||||
// Affinity wins.
|
|
||||||
endpoint := sessionAffinity.endpoint
|
|
||||||
sessionAffinity.lastUsed = time.Now()
|
|
||||||
klog.V(4).InfoS("NextEndpoint for service from IP with sessionAffinity", "servicePortName", svcPort, "IP", ipaddr, "sessionAffinity", sessionAffinity, "endpoint", endpoint)
|
|
||||||
return endpoint, nil
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
// Take the next endpoint.
|
|
||||||
endpoint := state.endpoints[state.index]
|
|
||||||
state.index = (state.index + 1) % len(state.endpoints)
|
|
||||||
|
|
||||||
if sessionAffinityEnabled {
|
|
||||||
var affinity *affinityState
|
|
||||||
affinity = state.affinity.affinityMap[ipaddr]
|
|
||||||
if affinity == nil {
|
|
||||||
affinity = new(affinityState) //&affinityState{ipaddr, "TCP", "", endpoint, time.Now()}
|
|
||||||
state.affinity.affinityMap[ipaddr] = affinity
|
|
||||||
}
|
|
||||||
affinity.lastUsed = time.Now()
|
|
||||||
affinity.endpoint = endpoint
|
|
||||||
affinity.clientIP = ipaddr
|
|
||||||
klog.V(4).InfoS("Updated affinity key", "IP", ipaddr, "affinityState", state.affinity.affinityMap[ipaddr])
|
|
||||||
}
|
|
||||||
|
|
||||||
return endpoint, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// Remove any session affinity records associated to a particular endpoint (for example when a pod goes down).
|
|
||||||
func removeSessionAffinityByEndpoint(state *balancerState, svcPort proxy.ServicePortName, endpoint string) {
|
|
||||||
for _, affinity := range state.affinity.affinityMap {
|
|
||||||
if affinity.endpoint == endpoint {
|
|
||||||
klog.V(4).InfoS("Removing client from affinityMap for service", "endpoint", affinity.endpoint, "servicePortName", svcPort)
|
|
||||||
delete(state.affinity.affinityMap, affinity.clientIP)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// Loop through the valid endpoints and then the endpoints associated with the Load Balancer.
|
|
||||||
// Then remove any session affinity records that are not in both lists.
|
|
||||||
// This assumes the lb.lock is held.
|
|
||||||
func (lb *LoadBalancerRR) removeStaleAffinity(svcPort proxy.ServicePortName, newEndpoints []string) {
|
|
||||||
newEndpointsSet := sets.NewString()
|
|
||||||
for _, newEndpoint := range newEndpoints {
|
|
||||||
newEndpointsSet.Insert(newEndpoint)
|
|
||||||
}
|
|
||||||
|
|
||||||
state, exists := lb.services[svcPort]
|
|
||||||
if !exists || state == nil {
|
|
||||||
return
|
|
||||||
}
|
|
||||||
for _, existingEndpoint := range state.endpoints {
|
|
||||||
if !newEndpointsSet.Has(existingEndpoint) {
|
|
||||||
klog.V(2).InfoS("Delete endpoint for service", "endpoint", existingEndpoint, "servicePortName", svcPort)
|
|
||||||
removeSessionAffinityByEndpoint(state, svcPort, existingEndpoint)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func (lb *LoadBalancerRR) OnEndpointsAdd(endpoints *v1.Endpoints) {
|
|
||||||
portsToEndpoints := util.BuildPortsToEndpointsMap(endpoints)
|
|
||||||
|
|
||||||
lb.lock.Lock()
|
|
||||||
defer lb.lock.Unlock()
|
|
||||||
|
|
||||||
for portname := range portsToEndpoints {
|
|
||||||
svcPort := proxy.ServicePortName{NamespacedName: types.NamespacedName{Namespace: endpoints.Namespace, Name: endpoints.Name}, Port: portname}
|
|
||||||
newEndpoints := portsToEndpoints[portname]
|
|
||||||
state, exists := lb.services[svcPort]
|
|
||||||
|
|
||||||
if !exists || state == nil || len(newEndpoints) > 0 {
|
|
||||||
klog.V(1).InfoS("LoadBalancerRR: Setting endpoints service", "servicePortName", svcPort, "endpoints", newEndpoints)
|
|
||||||
// OnEndpointsAdd can be called without NewService being called externally.
|
|
||||||
// To be safe we will call it here. A new service will only be created
|
|
||||||
// if one does not already exist.
|
|
||||||
state = lb.newServiceInternal(svcPort, v1.ServiceAffinity(""), 0)
|
|
||||||
state.endpoints = util.ShuffleStrings(newEndpoints)
|
|
||||||
|
|
||||||
// Reset the round-robin index.
|
|
||||||
state.index = 0
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func (lb *LoadBalancerRR) OnEndpointsUpdate(oldEndpoints, endpoints *v1.Endpoints) {
|
|
||||||
portsToEndpoints := util.BuildPortsToEndpointsMap(endpoints)
|
|
||||||
oldPortsToEndpoints := util.BuildPortsToEndpointsMap(oldEndpoints)
|
|
||||||
registeredEndpoints := make(map[proxy.ServicePortName]bool)
|
|
||||||
|
|
||||||
lb.lock.Lock()
|
|
||||||
defer lb.lock.Unlock()
|
|
||||||
|
|
||||||
for portname := range portsToEndpoints {
|
|
||||||
svcPort := proxy.ServicePortName{NamespacedName: types.NamespacedName{Namespace: endpoints.Namespace, Name: endpoints.Name}, Port: portname}
|
|
||||||
newEndpoints := portsToEndpoints[portname]
|
|
||||||
state, exists := lb.services[svcPort]
|
|
||||||
|
|
||||||
curEndpoints := []string{}
|
|
||||||
if state != nil {
|
|
||||||
curEndpoints = state.endpoints
|
|
||||||
}
|
|
||||||
|
|
||||||
if !exists || state == nil || len(curEndpoints) != len(newEndpoints) || !slicesEquiv(stringslices.Clone(curEndpoints), newEndpoints) {
|
|
||||||
klog.V(1).InfoS("LoadBalancerRR: Setting endpoints for service", "servicePortName", svcPort, "endpoints", newEndpoints)
|
|
||||||
lb.removeStaleAffinity(svcPort, newEndpoints)
|
|
||||||
// OnEndpointsUpdate can be called without NewService being called externally.
|
|
||||||
// To be safe we will call it here. A new service will only be created
|
|
||||||
// if one does not already exist. The affinity will be updated
|
|
||||||
// later, once NewService is called.
|
|
||||||
state = lb.newServiceInternal(svcPort, v1.ServiceAffinity(""), 0)
|
|
||||||
state.endpoints = util.ShuffleStrings(newEndpoints)
|
|
||||||
|
|
||||||
// Reset the round-robin index.
|
|
||||||
state.index = 0
|
|
||||||
}
|
|
||||||
registeredEndpoints[svcPort] = true
|
|
||||||
}
|
|
||||||
|
|
||||||
// Now remove all endpoints missing from the update.
|
|
||||||
for portname := range oldPortsToEndpoints {
|
|
||||||
svcPort := proxy.ServicePortName{NamespacedName: types.NamespacedName{Namespace: oldEndpoints.Namespace, Name: oldEndpoints.Name}, Port: portname}
|
|
||||||
if _, exists := registeredEndpoints[svcPort]; !exists {
|
|
||||||
lb.resetService(svcPort)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func (lb *LoadBalancerRR) resetService(svcPort proxy.ServicePortName) {
|
|
||||||
// If the service is still around, reset but don't delete.
|
|
||||||
if state, ok := lb.services[svcPort]; ok && state != nil {
|
|
||||||
if len(state.endpoints) > 0 {
|
|
||||||
klog.V(2).InfoS("LoadBalancerRR: Removing endpoints service", "servicePortName", svcPort)
|
|
||||||
state.endpoints = []string{}
|
|
||||||
}
|
|
||||||
state.index = 0
|
|
||||||
state.affinity.affinityMap = map[string]*affinityState{}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func (lb *LoadBalancerRR) OnEndpointsDelete(endpoints *v1.Endpoints) {
|
|
||||||
portsToEndpoints := util.BuildPortsToEndpointsMap(endpoints)
|
|
||||||
|
|
||||||
lb.lock.Lock()
|
|
||||||
defer lb.lock.Unlock()
|
|
||||||
|
|
||||||
for portname := range portsToEndpoints {
|
|
||||||
svcPort := proxy.ServicePortName{NamespacedName: types.NamespacedName{Namespace: endpoints.Namespace, Name: endpoints.Name}, Port: portname}
|
|
||||||
lb.resetService(svcPort)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func (lb *LoadBalancerRR) OnEndpointsSynced() {
|
|
||||||
}
|
|
||||||
|
|
||||||
// Tests whether two slices are equivalent. This sorts both slices in-place.
|
|
||||||
func slicesEquiv(lhs, rhs []string) bool {
|
|
||||||
if len(lhs) != len(rhs) {
|
|
||||||
return false
|
|
||||||
}
|
|
||||||
sort.Strings(lhs)
|
|
||||||
sort.Strings(rhs)
|
|
||||||
return stringslices.Equal(lhs, rhs)
|
|
||||||
}
|
|
||||||
|
|
||||||
func (lb *LoadBalancerRR) CleanupStaleStickySessions(svcPort proxy.ServicePortName) {
|
|
||||||
lb.lock.Lock()
|
|
||||||
defer lb.lock.Unlock()
|
|
||||||
|
|
||||||
state, exists := lb.services[svcPort]
|
|
||||||
if !exists || state == nil {
|
|
||||||
return
|
|
||||||
}
|
|
||||||
for ip, affinity := range state.affinity.affinityMap {
|
|
||||||
if int(time.Since(affinity.lastUsed).Seconds()) >= state.affinity.ttlSeconds {
|
|
||||||
klog.V(4).InfoS("Removing client from affinityMap for service", "IP", affinity.clientIP, "servicePortName", svcPort)
|
|
||||||
delete(state.affinity.affinityMap, ip)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
@ -1,678 +0,0 @@
|
|||||||
/*
|
|
||||||
Copyright 2014 The Kubernetes Authors.
|
|
||||||
|
|
||||||
Licensed under the Apache License, Version 2.0 (the "License");
|
|
||||||
you may not use this file except in compliance with the License.
|
|
||||||
You may obtain a copy of the License at
|
|
||||||
|
|
||||||
http://www.apache.org/licenses/LICENSE-2.0
|
|
||||||
|
|
||||||
Unless required by applicable law or agreed to in writing, software
|
|
||||||
distributed under the License is distributed on an "AS IS" BASIS,
|
|
||||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
||||||
See the License for the specific language governing permissions and
|
|
||||||
limitations under the License.
|
|
||||||
*/
|
|
||||||
|
|
||||||
package userspace
|
|
||||||
|
|
||||||
import (
|
|
||||||
"net"
|
|
||||||
"testing"
|
|
||||||
|
|
||||||
"k8s.io/api/core/v1"
|
|
||||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
|
||||||
"k8s.io/apimachinery/pkg/types"
|
|
||||||
"k8s.io/kubernetes/pkg/proxy"
|
|
||||||
)
|
|
||||||
|
|
||||||
func TestLoadBalanceFailsWithNoEndpoints(t *testing.T) {
|
|
||||||
loadBalancer := NewLoadBalancerRR()
|
|
||||||
service := proxy.ServicePortName{NamespacedName: types.NamespacedName{Namespace: "testnamespace", Name: "foo"}, Port: "does-not-exist"}
|
|
||||||
endpoint, err := loadBalancer.NextEndpoint(service, nil, false)
|
|
||||||
if err == nil {
|
|
||||||
t.Errorf("Didn't fail with non-existent service")
|
|
||||||
}
|
|
||||||
if len(endpoint) != 0 {
|
|
||||||
t.Errorf("Got an endpoint")
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func expectEndpoint(t *testing.T, loadBalancer *LoadBalancerRR, service proxy.ServicePortName, expected string, netaddr net.Addr) {
|
|
||||||
endpoint, err := loadBalancer.NextEndpoint(service, netaddr, false)
|
|
||||||
if err != nil {
|
|
||||||
t.Errorf("Didn't find a service for %s, expected %s, failed with: %v", service, expected, err)
|
|
||||||
}
|
|
||||||
if endpoint != expected {
|
|
||||||
t.Errorf("Didn't get expected endpoint for service %s client %v, expected %s, got: %s", service, netaddr, expected, endpoint)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func expectEndpointWithSessionAffinityReset(t *testing.T, loadBalancer *LoadBalancerRR, service proxy.ServicePortName, expected string, netaddr net.Addr) {
|
|
||||||
endpoint, err := loadBalancer.NextEndpoint(service, netaddr, true)
|
|
||||||
if err != nil {
|
|
||||||
t.Errorf("Didn't find a service for %s, expected %s, failed with: %v", service, expected, err)
|
|
||||||
}
|
|
||||||
if endpoint != expected {
|
|
||||||
t.Errorf("Didn't get expected endpoint for service %s client %v, expected %s, got: %s", service, netaddr, expected, endpoint)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestLoadBalanceWorksWithSingleEndpoint(t *testing.T) {
|
|
||||||
loadBalancer := NewLoadBalancerRR()
|
|
||||||
service := proxy.ServicePortName{NamespacedName: types.NamespacedName{Namespace: "testnamespace", Name: "foo"}, Port: "p"}
|
|
||||||
endpoint, err := loadBalancer.NextEndpoint(service, nil, false)
|
|
||||||
if err == nil || len(endpoint) != 0 {
|
|
||||||
t.Errorf("Didn't fail with non-existent service")
|
|
||||||
}
|
|
||||||
endpoints := &v1.Endpoints{
|
|
||||||
ObjectMeta: metav1.ObjectMeta{Name: service.Name, Namespace: service.Namespace},
|
|
||||||
Subsets: []v1.EndpointSubset{{
|
|
||||||
Addresses: []v1.EndpointAddress{{IP: "endpoint1"}},
|
|
||||||
Ports: []v1.EndpointPort{{Name: "p", Port: 40}},
|
|
||||||
}},
|
|
||||||
}
|
|
||||||
loadBalancer.OnEndpointsAdd(endpoints)
|
|
||||||
expectEndpoint(t, loadBalancer, service, "endpoint1:40", nil)
|
|
||||||
expectEndpoint(t, loadBalancer, service, "endpoint1:40", nil)
|
|
||||||
expectEndpoint(t, loadBalancer, service, "endpoint1:40", nil)
|
|
||||||
expectEndpoint(t, loadBalancer, service, "endpoint1:40", nil)
|
|
||||||
}
|
|
||||||
|
|
||||||
func stringsInSlice(haystack []string, needles ...string) bool {
|
|
||||||
for _, needle := range needles {
|
|
||||||
found := false
|
|
||||||
for i := range haystack {
|
|
||||||
if haystack[i] == needle {
|
|
||||||
found = true
|
|
||||||
break
|
|
||||||
}
|
|
||||||
}
|
|
||||||
if found == false {
|
|
||||||
return false
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return true
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestLoadBalanceWorksWithMultipleEndpoints(t *testing.T) {
|
|
||||||
loadBalancer := NewLoadBalancerRR()
|
|
||||||
service := proxy.ServicePortName{NamespacedName: types.NamespacedName{Namespace: "testnamespace", Name: "foo"}, Port: "p"}
|
|
||||||
endpoint, err := loadBalancer.NextEndpoint(service, nil, false)
|
|
||||||
if err == nil || len(endpoint) != 0 {
|
|
||||||
t.Errorf("Didn't fail with non-existent service")
|
|
||||||
}
|
|
||||||
endpoints := &v1.Endpoints{
|
|
||||||
ObjectMeta: metav1.ObjectMeta{Name: service.Name, Namespace: service.Namespace},
|
|
||||||
Subsets: []v1.EndpointSubset{{
|
|
||||||
Addresses: []v1.EndpointAddress{{IP: "endpoint"}},
|
|
||||||
Ports: []v1.EndpointPort{{Name: "p", Port: 1}, {Name: "p", Port: 2}, {Name: "p", Port: 3}},
|
|
||||||
}},
|
|
||||||
}
|
|
||||||
loadBalancer.OnEndpointsAdd(endpoints)
|
|
||||||
|
|
||||||
shuffledEndpoints := loadBalancer.services[service].endpoints
|
|
||||||
if !stringsInSlice(shuffledEndpoints, "endpoint:1", "endpoint:2", "endpoint:3") {
|
|
||||||
t.Errorf("did not find expected endpoints: %v", shuffledEndpoints)
|
|
||||||
}
|
|
||||||
expectEndpoint(t, loadBalancer, service, shuffledEndpoints[0], nil)
|
|
||||||
expectEndpoint(t, loadBalancer, service, shuffledEndpoints[1], nil)
|
|
||||||
expectEndpoint(t, loadBalancer, service, shuffledEndpoints[2], nil)
|
|
||||||
expectEndpoint(t, loadBalancer, service, shuffledEndpoints[0], nil)
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestLoadBalanceWorksWithMultipleEndpointsMultiplePorts(t *testing.T) {
|
|
||||||
loadBalancer := NewLoadBalancerRR()
|
|
||||||
serviceP := proxy.ServicePortName{NamespacedName: types.NamespacedName{Namespace: "testnamespace", Name: "foo"}, Port: "p"}
|
|
||||||
serviceQ := proxy.ServicePortName{NamespacedName: types.NamespacedName{Namespace: "testnamespace", Name: "foo"}, Port: "q"}
|
|
||||||
endpoint, err := loadBalancer.NextEndpoint(serviceP, nil, false)
|
|
||||||
if err == nil || len(endpoint) != 0 {
|
|
||||||
t.Errorf("Didn't fail with non-existent service")
|
|
||||||
}
|
|
||||||
endpoints := &v1.Endpoints{
|
|
||||||
ObjectMeta: metav1.ObjectMeta{Name: serviceP.Name, Namespace: serviceP.Namespace},
|
|
||||||
Subsets: []v1.EndpointSubset{
|
|
||||||
{
|
|
||||||
Addresses: []v1.EndpointAddress{{IP: "endpoint1"}, {IP: "endpoint2"}},
|
|
||||||
Ports: []v1.EndpointPort{{Name: "p", Port: 1}, {Name: "q", Port: 2}},
|
|
||||||
},
|
|
||||||
{
|
|
||||||
Addresses: []v1.EndpointAddress{{IP: "endpoint3"}},
|
|
||||||
Ports: []v1.EndpointPort{{Name: "p", Port: 3}, {Name: "q", Port: 4}},
|
|
||||||
},
|
|
||||||
},
|
|
||||||
}
|
|
||||||
loadBalancer.OnEndpointsAdd(endpoints)
|
|
||||||
|
|
||||||
shuffledEndpoints := loadBalancer.services[serviceP].endpoints
|
|
||||||
if !stringsInSlice(shuffledEndpoints, "endpoint1:1", "endpoint2:1", "endpoint3:3") {
|
|
||||||
t.Errorf("did not find expected endpoints: %v", shuffledEndpoints)
|
|
||||||
}
|
|
||||||
expectEndpoint(t, loadBalancer, serviceP, shuffledEndpoints[0], nil)
|
|
||||||
expectEndpoint(t, loadBalancer, serviceP, shuffledEndpoints[1], nil)
|
|
||||||
expectEndpoint(t, loadBalancer, serviceP, shuffledEndpoints[2], nil)
|
|
||||||
expectEndpoint(t, loadBalancer, serviceP, shuffledEndpoints[0], nil)
|
|
||||||
|
|
||||||
shuffledEndpoints = loadBalancer.services[serviceQ].endpoints
|
|
||||||
if !stringsInSlice(shuffledEndpoints, "endpoint1:2", "endpoint2:2", "endpoint3:4") {
|
|
||||||
t.Errorf("did not find expected endpoints: %v", shuffledEndpoints)
|
|
||||||
}
|
|
||||||
expectEndpoint(t, loadBalancer, serviceQ, shuffledEndpoints[0], nil)
|
|
||||||
expectEndpoint(t, loadBalancer, serviceQ, shuffledEndpoints[1], nil)
|
|
||||||
expectEndpoint(t, loadBalancer, serviceQ, shuffledEndpoints[2], nil)
|
|
||||||
expectEndpoint(t, loadBalancer, serviceQ, shuffledEndpoints[0], nil)
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestLoadBalanceWorksWithMultipleEndpointsAndUpdates(t *testing.T) {
|
|
||||||
loadBalancer := NewLoadBalancerRR()
|
|
||||||
serviceP := proxy.ServicePortName{NamespacedName: types.NamespacedName{Namespace: "testnamespace", Name: "foo"}, Port: "p"}
|
|
||||||
serviceQ := proxy.ServicePortName{NamespacedName: types.NamespacedName{Namespace: "testnamespace", Name: "foo"}, Port: "q"}
|
|
||||||
endpoint, err := loadBalancer.NextEndpoint(serviceP, nil, false)
|
|
||||||
if err == nil || len(endpoint) != 0 {
|
|
||||||
t.Errorf("Didn't fail with non-existent service")
|
|
||||||
}
|
|
||||||
endpointsv1 := &v1.Endpoints{
|
|
||||||
ObjectMeta: metav1.ObjectMeta{Name: serviceP.Name, Namespace: serviceP.Namespace},
|
|
||||||
Subsets: []v1.EndpointSubset{
|
|
||||||
{
|
|
||||||
Addresses: []v1.EndpointAddress{{IP: "endpoint1"}},
|
|
||||||
Ports: []v1.EndpointPort{{Name: "p", Port: 1}, {Name: "q", Port: 10}},
|
|
||||||
},
|
|
||||||
{
|
|
||||||
Addresses: []v1.EndpointAddress{{IP: "endpoint2"}},
|
|
||||||
Ports: []v1.EndpointPort{{Name: "p", Port: 2}, {Name: "q", Port: 20}},
|
|
||||||
},
|
|
||||||
{
|
|
||||||
Addresses: []v1.EndpointAddress{{IP: "endpoint3"}},
|
|
||||||
Ports: []v1.EndpointPort{{Name: "p", Port: 3}, {Name: "q", Port: 30}},
|
|
||||||
},
|
|
||||||
},
|
|
||||||
}
|
|
||||||
loadBalancer.OnEndpointsAdd(endpointsv1)
|
|
||||||
|
|
||||||
shuffledEndpoints := loadBalancer.services[serviceP].endpoints
|
|
||||||
if !stringsInSlice(shuffledEndpoints, "endpoint1:1", "endpoint2:2", "endpoint3:3") {
|
|
||||||
t.Errorf("did not find expected endpoints: %v", shuffledEndpoints)
|
|
||||||
}
|
|
||||||
expectEndpoint(t, loadBalancer, serviceP, shuffledEndpoints[0], nil)
|
|
||||||
expectEndpoint(t, loadBalancer, serviceP, shuffledEndpoints[1], nil)
|
|
||||||
expectEndpoint(t, loadBalancer, serviceP, shuffledEndpoints[2], nil)
|
|
||||||
expectEndpoint(t, loadBalancer, serviceP, shuffledEndpoints[0], nil)
|
|
||||||
|
|
||||||
shuffledEndpoints = loadBalancer.services[serviceQ].endpoints
|
|
||||||
if !stringsInSlice(shuffledEndpoints, "endpoint1:10", "endpoint2:20", "endpoint3:30") {
|
|
||||||
t.Errorf("did not find expected endpoints: %v", shuffledEndpoints)
|
|
||||||
}
|
|
||||||
expectEndpoint(t, loadBalancer, serviceQ, shuffledEndpoints[0], nil)
|
|
||||||
expectEndpoint(t, loadBalancer, serviceQ, shuffledEndpoints[1], nil)
|
|
||||||
expectEndpoint(t, loadBalancer, serviceQ, shuffledEndpoints[2], nil)
|
|
||||||
expectEndpoint(t, loadBalancer, serviceQ, shuffledEndpoints[0], nil)
|
|
||||||
|
|
||||||
// Then update the configuration with one fewer endpoints, make sure
|
|
||||||
// we start in the beginning again
|
|
||||||
endpointsv2 := &v1.Endpoints{
|
|
||||||
ObjectMeta: metav1.ObjectMeta{Name: serviceP.Name, Namespace: serviceP.Namespace},
|
|
||||||
Subsets: []v1.EndpointSubset{
|
|
||||||
{
|
|
||||||
Addresses: []v1.EndpointAddress{{IP: "endpoint4"}},
|
|
||||||
Ports: []v1.EndpointPort{{Name: "p", Port: 4}, {Name: "q", Port: 40}},
|
|
||||||
},
|
|
||||||
{
|
|
||||||
Addresses: []v1.EndpointAddress{{IP: "endpoint5"}},
|
|
||||||
Ports: []v1.EndpointPort{{Name: "p", Port: 5}, {Name: "q", Port: 50}},
|
|
||||||
},
|
|
||||||
},
|
|
||||||
}
|
|
||||||
loadBalancer.OnEndpointsUpdate(endpointsv1, endpointsv2)
|
|
||||||
|
|
||||||
shuffledEndpoints = loadBalancer.services[serviceP].endpoints
|
|
||||||
if !stringsInSlice(shuffledEndpoints, "endpoint4:4", "endpoint5:5") {
|
|
||||||
t.Errorf("did not find expected endpoints: %v", shuffledEndpoints)
|
|
||||||
}
|
|
||||||
expectEndpoint(t, loadBalancer, serviceP, shuffledEndpoints[0], nil)
|
|
||||||
expectEndpoint(t, loadBalancer, serviceP, shuffledEndpoints[1], nil)
|
|
||||||
expectEndpoint(t, loadBalancer, serviceP, shuffledEndpoints[0], nil)
|
|
||||||
expectEndpoint(t, loadBalancer, serviceP, shuffledEndpoints[1], nil)
|
|
||||||
|
|
||||||
shuffledEndpoints = loadBalancer.services[serviceQ].endpoints
|
|
||||||
if !stringsInSlice(shuffledEndpoints, "endpoint4:40", "endpoint5:50") {
|
|
||||||
t.Errorf("did not find expected endpoints: %v", shuffledEndpoints)
|
|
||||||
}
|
|
||||||
expectEndpoint(t, loadBalancer, serviceQ, shuffledEndpoints[0], nil)
|
|
||||||
expectEndpoint(t, loadBalancer, serviceQ, shuffledEndpoints[1], nil)
|
|
||||||
expectEndpoint(t, loadBalancer, serviceQ, shuffledEndpoints[0], nil)
|
|
||||||
expectEndpoint(t, loadBalancer, serviceQ, shuffledEndpoints[1], nil)
|
|
||||||
|
|
||||||
// Clear endpoints
|
|
||||||
endpointsv3 := &v1.Endpoints{ObjectMeta: metav1.ObjectMeta{Name: serviceP.Name, Namespace: serviceP.Namespace}, Subsets: nil}
|
|
||||||
loadBalancer.OnEndpointsUpdate(endpointsv2, endpointsv3)
|
|
||||||
|
|
||||||
endpoint, err = loadBalancer.NextEndpoint(serviceP, nil, false)
|
|
||||||
if err == nil || len(endpoint) != 0 {
|
|
||||||
t.Errorf("Didn't fail with non-existent service")
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestLoadBalanceWorksWithServiceRemoval(t *testing.T) {
|
|
||||||
loadBalancer := NewLoadBalancerRR()
|
|
||||||
fooServiceP := proxy.ServicePortName{NamespacedName: types.NamespacedName{Namespace: "testnamespace", Name: "foo"}, Port: "p"}
|
|
||||||
barServiceP := proxy.ServicePortName{NamespacedName: types.NamespacedName{Namespace: "testnamespace", Name: "bar"}, Port: "p"}
|
|
||||||
endpoint, err := loadBalancer.NextEndpoint(fooServiceP, nil, false)
|
|
||||||
if err == nil || len(endpoint) != 0 {
|
|
||||||
t.Errorf("Didn't fail with non-existent service")
|
|
||||||
}
|
|
||||||
endpoints1 := &v1.Endpoints{
|
|
||||||
ObjectMeta: metav1.ObjectMeta{Name: fooServiceP.Name, Namespace: fooServiceP.Namespace},
|
|
||||||
Subsets: []v1.EndpointSubset{
|
|
||||||
{
|
|
||||||
Addresses: []v1.EndpointAddress{{IP: "endpoint1"}, {IP: "endpoint2"}, {IP: "endpoint3"}},
|
|
||||||
Ports: []v1.EndpointPort{{Name: "p", Port: 123}},
|
|
||||||
},
|
|
||||||
},
|
|
||||||
}
|
|
||||||
endpoints2 := &v1.Endpoints{
|
|
||||||
ObjectMeta: metav1.ObjectMeta{Name: barServiceP.Name, Namespace: barServiceP.Namespace},
|
|
||||||
Subsets: []v1.EndpointSubset{
|
|
||||||
{
|
|
||||||
Addresses: []v1.EndpointAddress{{IP: "endpoint4"}, {IP: "endpoint5"}, {IP: "endpoint6"}},
|
|
||||||
Ports: []v1.EndpointPort{{Name: "p", Port: 456}},
|
|
||||||
},
|
|
||||||
},
|
|
||||||
}
|
|
||||||
loadBalancer.OnEndpointsAdd(endpoints1)
|
|
||||||
loadBalancer.OnEndpointsAdd(endpoints2)
|
|
||||||
shuffledFooEndpoints := loadBalancer.services[fooServiceP].endpoints
|
|
||||||
expectEndpoint(t, loadBalancer, fooServiceP, shuffledFooEndpoints[0], nil)
|
|
||||||
expectEndpoint(t, loadBalancer, fooServiceP, shuffledFooEndpoints[1], nil)
|
|
||||||
expectEndpoint(t, loadBalancer, fooServiceP, shuffledFooEndpoints[2], nil)
|
|
||||||
expectEndpoint(t, loadBalancer, fooServiceP, shuffledFooEndpoints[0], nil)
|
|
||||||
expectEndpoint(t, loadBalancer, fooServiceP, shuffledFooEndpoints[1], nil)
|
|
||||||
|
|
||||||
shuffledBarEndpoints := loadBalancer.services[barServiceP].endpoints
|
|
||||||
expectEndpoint(t, loadBalancer, barServiceP, shuffledBarEndpoints[0], nil)
|
|
||||||
expectEndpoint(t, loadBalancer, barServiceP, shuffledBarEndpoints[1], nil)
|
|
||||||
expectEndpoint(t, loadBalancer, barServiceP, shuffledBarEndpoints[2], nil)
|
|
||||||
expectEndpoint(t, loadBalancer, barServiceP, shuffledBarEndpoints[0], nil)
|
|
||||||
expectEndpoint(t, loadBalancer, barServiceP, shuffledBarEndpoints[1], nil)
|
|
||||||
|
|
||||||
// Then update the configuration by removing foo
|
|
||||||
loadBalancer.OnEndpointsDelete(endpoints1)
|
|
||||||
endpoint, err = loadBalancer.NextEndpoint(fooServiceP, nil, false)
|
|
||||||
if err == nil || len(endpoint) != 0 {
|
|
||||||
t.Errorf("Didn't fail with non-existent service")
|
|
||||||
}
|
|
||||||
|
|
||||||
// but bar is still there, and we continue RR from where we left off.
|
|
||||||
expectEndpoint(t, loadBalancer, barServiceP, shuffledBarEndpoints[2], nil)
|
|
||||||
expectEndpoint(t, loadBalancer, barServiceP, shuffledBarEndpoints[0], nil)
|
|
||||||
expectEndpoint(t, loadBalancer, barServiceP, shuffledBarEndpoints[1], nil)
|
|
||||||
expectEndpoint(t, loadBalancer, barServiceP, shuffledBarEndpoints[2], nil)
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestStickyLoadBalanceWorksWithNewServiceCalledFirst(t *testing.T) {
|
|
||||||
loadBalancer := NewLoadBalancerRR()
|
|
||||||
service := proxy.ServicePortName{NamespacedName: types.NamespacedName{Namespace: "testnamespace", Name: "foo"}, Port: ""}
|
|
||||||
endpoint, err := loadBalancer.NextEndpoint(service, nil, false)
|
|
||||||
if err == nil || len(endpoint) != 0 {
|
|
||||||
t.Errorf("Didn't fail with non-existent service")
|
|
||||||
}
|
|
||||||
|
|
||||||
// Call NewService() before OnEndpointsUpdate()
|
|
||||||
loadBalancer.NewService(service, v1.ServiceAffinityClientIP, int(v1.DefaultClientIPServiceAffinitySeconds))
|
|
||||||
endpoints := &v1.Endpoints{
|
|
||||||
ObjectMeta: metav1.ObjectMeta{Name: service.Name, Namespace: service.Namespace},
|
|
||||||
Subsets: []v1.EndpointSubset{
|
|
||||||
{Addresses: []v1.EndpointAddress{{IP: "endpoint1"}}, Ports: []v1.EndpointPort{{Port: 1}}},
|
|
||||||
{Addresses: []v1.EndpointAddress{{IP: "endpoint2"}}, Ports: []v1.EndpointPort{{Port: 2}}},
|
|
||||||
{Addresses: []v1.EndpointAddress{{IP: "endpoint3"}}, Ports: []v1.EndpointPort{{Port: 3}}},
|
|
||||||
},
|
|
||||||
}
|
|
||||||
loadBalancer.OnEndpointsAdd(endpoints)
|
|
||||||
|
|
||||||
client1 := &net.TCPAddr{IP: net.IPv4(127, 0, 0, 1), Port: 0}
|
|
||||||
client2 := &net.TCPAddr{IP: net.IPv4(127, 0, 0, 2), Port: 0}
|
|
||||||
client3 := &net.TCPAddr{IP: net.IPv4(127, 0, 0, 3), Port: 0}
|
|
||||||
|
|
||||||
ep1, err := loadBalancer.NextEndpoint(service, client1, false)
|
|
||||||
if err != nil {
|
|
||||||
t.Errorf("Didn't find a service for %s: %v", service, err)
|
|
||||||
}
|
|
||||||
expectEndpoint(t, loadBalancer, service, ep1, client1)
|
|
||||||
expectEndpoint(t, loadBalancer, service, ep1, client1)
|
|
||||||
expectEndpoint(t, loadBalancer, service, ep1, client1)
|
|
||||||
|
|
||||||
ep2, err := loadBalancer.NextEndpoint(service, client2, false)
|
|
||||||
if err != nil {
|
|
||||||
t.Errorf("Didn't find a service for %s: %v", service, err)
|
|
||||||
}
|
|
||||||
expectEndpoint(t, loadBalancer, service, ep2, client2)
|
|
||||||
expectEndpoint(t, loadBalancer, service, ep2, client2)
|
|
||||||
expectEndpoint(t, loadBalancer, service, ep2, client2)
|
|
||||||
|
|
||||||
ep3, err := loadBalancer.NextEndpoint(service, client3, false)
|
|
||||||
if err != nil {
|
|
||||||
t.Errorf("Didn't find a service for %s: %v", service, err)
|
|
||||||
}
|
|
||||||
expectEndpoint(t, loadBalancer, service, ep3, client3)
|
|
||||||
expectEndpoint(t, loadBalancer, service, ep3, client3)
|
|
||||||
expectEndpoint(t, loadBalancer, service, ep3, client3)
|
|
||||||
|
|
||||||
expectEndpoint(t, loadBalancer, service, ep1, client1)
|
|
||||||
expectEndpoint(t, loadBalancer, service, ep2, client2)
|
|
||||||
expectEndpoint(t, loadBalancer, service, ep3, client3)
|
|
||||||
expectEndpoint(t, loadBalancer, service, ep1, client1)
|
|
||||||
expectEndpoint(t, loadBalancer, service, ep2, client2)
|
|
||||||
expectEndpoint(t, loadBalancer, service, ep3, client3)
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestStickyLoadBalanceWorksWithNewServiceCalledSecond(t *testing.T) {
|
|
||||||
loadBalancer := NewLoadBalancerRR()
|
|
||||||
service := proxy.ServicePortName{NamespacedName: types.NamespacedName{Namespace: "testnamespace", Name: "foo"}, Port: ""}
|
|
||||||
endpoint, err := loadBalancer.NextEndpoint(service, nil, false)
|
|
||||||
if err == nil || len(endpoint) != 0 {
|
|
||||||
t.Errorf("Didn't fail with non-existent service")
|
|
||||||
}
|
|
||||||
|
|
||||||
// Call OnEndpointsUpdate() before NewService()
|
|
||||||
endpoints := &v1.Endpoints{
|
|
||||||
ObjectMeta: metav1.ObjectMeta{Name: service.Name, Namespace: service.Namespace},
|
|
||||||
Subsets: []v1.EndpointSubset{
|
|
||||||
{Addresses: []v1.EndpointAddress{{IP: "endpoint1"}}, Ports: []v1.EndpointPort{{Port: 1}}},
|
|
||||||
{Addresses: []v1.EndpointAddress{{IP: "endpoint2"}}, Ports: []v1.EndpointPort{{Port: 2}}},
|
|
||||||
},
|
|
||||||
}
|
|
||||||
loadBalancer.OnEndpointsAdd(endpoints)
|
|
||||||
loadBalancer.NewService(service, v1.ServiceAffinityClientIP, int(v1.DefaultClientIPServiceAffinitySeconds))
|
|
||||||
|
|
||||||
client1 := &net.TCPAddr{IP: net.IPv4(127, 0, 0, 1), Port: 0}
|
|
||||||
client2 := &net.TCPAddr{IP: net.IPv4(127, 0, 0, 2), Port: 0}
|
|
||||||
client3 := &net.TCPAddr{IP: net.IPv4(127, 0, 0, 3), Port: 0}
|
|
||||||
|
|
||||||
ep1, err := loadBalancer.NextEndpoint(service, client1, false)
|
|
||||||
if err != nil {
|
|
||||||
t.Errorf("Didn't find a service for %s: %v", service, err)
|
|
||||||
}
|
|
||||||
expectEndpoint(t, loadBalancer, service, ep1, client1)
|
|
||||||
expectEndpoint(t, loadBalancer, service, ep1, client1)
|
|
||||||
expectEndpoint(t, loadBalancer, service, ep1, client1)
|
|
||||||
|
|
||||||
ep2, err := loadBalancer.NextEndpoint(service, client2, false)
|
|
||||||
if err != nil {
|
|
||||||
t.Errorf("Didn't find a service for %s: %v", service, err)
|
|
||||||
}
|
|
||||||
expectEndpoint(t, loadBalancer, service, ep2, client2)
|
|
||||||
expectEndpoint(t, loadBalancer, service, ep2, client2)
|
|
||||||
expectEndpoint(t, loadBalancer, service, ep2, client2)
|
|
||||||
|
|
||||||
ep3, err := loadBalancer.NextEndpoint(service, client3, false)
|
|
||||||
if err != nil {
|
|
||||||
t.Errorf("Didn't find a service for %s: %v", service, err)
|
|
||||||
}
|
|
||||||
expectEndpoint(t, loadBalancer, service, ep3, client3)
|
|
||||||
expectEndpoint(t, loadBalancer, service, ep3, client3)
|
|
||||||
expectEndpoint(t, loadBalancer, service, ep3, client3)
|
|
||||||
|
|
||||||
expectEndpoint(t, loadBalancer, service, ep1, client1)
|
|
||||||
expectEndpoint(t, loadBalancer, service, ep2, client2)
|
|
||||||
expectEndpoint(t, loadBalancer, service, ep3, client3)
|
|
||||||
expectEndpoint(t, loadBalancer, service, ep1, client1)
|
|
||||||
expectEndpoint(t, loadBalancer, service, ep2, client2)
|
|
||||||
expectEndpoint(t, loadBalancer, service, ep3, client3)
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestStickyLoadBalanaceWorksWithMultipleEndpointsRemoveOne(t *testing.T) {
|
|
||||||
client1 := &net.TCPAddr{IP: net.IPv4(127, 0, 0, 1), Port: 0}
|
|
||||||
client2 := &net.TCPAddr{IP: net.IPv4(127, 0, 0, 2), Port: 0}
|
|
||||||
client3 := &net.TCPAddr{IP: net.IPv4(127, 0, 0, 3), Port: 0}
|
|
||||||
client4 := &net.TCPAddr{IP: net.IPv4(127, 0, 0, 4), Port: 0}
|
|
||||||
client5 := &net.TCPAddr{IP: net.IPv4(127, 0, 0, 5), Port: 0}
|
|
||||||
client6 := &net.TCPAddr{IP: net.IPv4(127, 0, 0, 6), Port: 0}
|
|
||||||
loadBalancer := NewLoadBalancerRR()
|
|
||||||
service := proxy.ServicePortName{NamespacedName: types.NamespacedName{Namespace: "testnamespace", Name: "foo"}, Port: ""}
|
|
||||||
endpoint, err := loadBalancer.NextEndpoint(service, nil, false)
|
|
||||||
if err == nil || len(endpoint) != 0 {
|
|
||||||
t.Errorf("Didn't fail with non-existent service")
|
|
||||||
}
|
|
||||||
|
|
||||||
loadBalancer.NewService(service, v1.ServiceAffinityClientIP, int(v1.DefaultClientIPServiceAffinitySeconds))
|
|
||||||
endpointsv1 := &v1.Endpoints{
|
|
||||||
ObjectMeta: metav1.ObjectMeta{Name: service.Name, Namespace: service.Namespace},
|
|
||||||
Subsets: []v1.EndpointSubset{
|
|
||||||
{
|
|
||||||
Addresses: []v1.EndpointAddress{{IP: "endpoint"}},
|
|
||||||
Ports: []v1.EndpointPort{{Port: 1}, {Port: 2}, {Port: 3}},
|
|
||||||
},
|
|
||||||
},
|
|
||||||
}
|
|
||||||
loadBalancer.OnEndpointsAdd(endpointsv1)
|
|
||||||
shuffledEndpoints := loadBalancer.services[service].endpoints
|
|
||||||
expectEndpoint(t, loadBalancer, service, shuffledEndpoints[0], client1)
|
|
||||||
client1Endpoint := shuffledEndpoints[0]
|
|
||||||
expectEndpoint(t, loadBalancer, service, shuffledEndpoints[0], client1)
|
|
||||||
expectEndpoint(t, loadBalancer, service, shuffledEndpoints[1], client2)
|
|
||||||
client2Endpoint := shuffledEndpoints[1]
|
|
||||||
expectEndpoint(t, loadBalancer, service, shuffledEndpoints[1], client2)
|
|
||||||
expectEndpoint(t, loadBalancer, service, shuffledEndpoints[2], client3)
|
|
||||||
client3Endpoint := shuffledEndpoints[2]
|
|
||||||
|
|
||||||
endpointsv2 := &v1.Endpoints{
|
|
||||||
ObjectMeta: metav1.ObjectMeta{Name: service.Name, Namespace: service.Namespace},
|
|
||||||
Subsets: []v1.EndpointSubset{
|
|
||||||
{
|
|
||||||
Addresses: []v1.EndpointAddress{{IP: "endpoint"}},
|
|
||||||
Ports: []v1.EndpointPort{{Port: 1}, {Port: 2}},
|
|
||||||
},
|
|
||||||
},
|
|
||||||
}
|
|
||||||
loadBalancer.OnEndpointsUpdate(endpointsv1, endpointsv2)
|
|
||||||
shuffledEndpoints = loadBalancer.services[service].endpoints
|
|
||||||
if client1Endpoint == "endpoint:3" {
|
|
||||||
client1Endpoint = shuffledEndpoints[0]
|
|
||||||
} else if client2Endpoint == "endpoint:3" {
|
|
||||||
client2Endpoint = shuffledEndpoints[0]
|
|
||||||
} else if client3Endpoint == "endpoint:3" {
|
|
||||||
client3Endpoint = shuffledEndpoints[0]
|
|
||||||
}
|
|
||||||
expectEndpoint(t, loadBalancer, service, client1Endpoint, client1)
|
|
||||||
expectEndpoint(t, loadBalancer, service, client2Endpoint, client2)
|
|
||||||
expectEndpoint(t, loadBalancer, service, client3Endpoint, client3)
|
|
||||||
|
|
||||||
endpointsv3 := &v1.Endpoints{
|
|
||||||
ObjectMeta: metav1.ObjectMeta{Name: service.Name, Namespace: service.Namespace},
|
|
||||||
Subsets: []v1.EndpointSubset{
|
|
||||||
{
|
|
||||||
Addresses: []v1.EndpointAddress{{IP: "endpoint"}},
|
|
||||||
Ports: []v1.EndpointPort{{Port: 1}, {Port: 2}, {Port: 4}},
|
|
||||||
},
|
|
||||||
},
|
|
||||||
}
|
|
||||||
loadBalancer.OnEndpointsUpdate(endpointsv2, endpointsv3)
|
|
||||||
shuffledEndpoints = loadBalancer.services[service].endpoints
|
|
||||||
expectEndpoint(t, loadBalancer, service, client1Endpoint, client1)
|
|
||||||
expectEndpoint(t, loadBalancer, service, client2Endpoint, client2)
|
|
||||||
expectEndpoint(t, loadBalancer, service, client3Endpoint, client3)
|
|
||||||
expectEndpoint(t, loadBalancer, service, shuffledEndpoints[0], client4)
|
|
||||||
expectEndpoint(t, loadBalancer, service, shuffledEndpoints[1], client5)
|
|
||||||
expectEndpoint(t, loadBalancer, service, shuffledEndpoints[2], client6)
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestStickyLoadBalanceWorksWithMultipleEndpointsAndUpdates(t *testing.T) {
|
|
||||||
client1 := &net.TCPAddr{IP: net.IPv4(127, 0, 0, 1), Port: 0}
|
|
||||||
client2 := &net.TCPAddr{IP: net.IPv4(127, 0, 0, 2), Port: 0}
|
|
||||||
client3 := &net.TCPAddr{IP: net.IPv4(127, 0, 0, 3), Port: 0}
|
|
||||||
loadBalancer := NewLoadBalancerRR()
|
|
||||||
service := proxy.ServicePortName{NamespacedName: types.NamespacedName{Namespace: "testnamespace", Name: "foo"}, Port: ""}
|
|
||||||
endpoint, err := loadBalancer.NextEndpoint(service, nil, false)
|
|
||||||
if err == nil || len(endpoint) != 0 {
|
|
||||||
t.Errorf("Didn't fail with non-existent service")
|
|
||||||
}
|
|
||||||
|
|
||||||
loadBalancer.NewService(service, v1.ServiceAffinityClientIP, int(v1.DefaultClientIPServiceAffinitySeconds))
|
|
||||||
endpointsv1 := &v1.Endpoints{
|
|
||||||
ObjectMeta: metav1.ObjectMeta{Name: service.Name, Namespace: service.Namespace},
|
|
||||||
Subsets: []v1.EndpointSubset{
|
|
||||||
{
|
|
||||||
Addresses: []v1.EndpointAddress{{IP: "endpoint"}},
|
|
||||||
Ports: []v1.EndpointPort{{Port: 1}, {Port: 2}, {Port: 3}},
|
|
||||||
},
|
|
||||||
},
|
|
||||||
}
|
|
||||||
loadBalancer.OnEndpointsAdd(endpointsv1)
|
|
||||||
shuffledEndpoints := loadBalancer.services[service].endpoints
|
|
||||||
expectEndpoint(t, loadBalancer, service, shuffledEndpoints[0], client1)
|
|
||||||
expectEndpoint(t, loadBalancer, service, shuffledEndpoints[0], client1)
|
|
||||||
expectEndpoint(t, loadBalancer, service, shuffledEndpoints[1], client2)
|
|
||||||
expectEndpoint(t, loadBalancer, service, shuffledEndpoints[1], client2)
|
|
||||||
expectEndpoint(t, loadBalancer, service, shuffledEndpoints[2], client3)
|
|
||||||
expectEndpoint(t, loadBalancer, service, shuffledEndpoints[0], client1)
|
|
||||||
expectEndpoint(t, loadBalancer, service, shuffledEndpoints[1], client2)
|
|
||||||
// Then update the configuration with one fewer endpoints, make sure
|
|
||||||
// we start in the beginning again
|
|
||||||
endpointsv2 := &v1.Endpoints{
|
|
||||||
ObjectMeta: metav1.ObjectMeta{Name: service.Name, Namespace: service.Namespace},
|
|
||||||
Subsets: []v1.EndpointSubset{
|
|
||||||
{
|
|
||||||
Addresses: []v1.EndpointAddress{{IP: "endpoint"}},
|
|
||||||
Ports: []v1.EndpointPort{{Port: 4}, {Port: 5}},
|
|
||||||
},
|
|
||||||
},
|
|
||||||
}
|
|
||||||
loadBalancer.OnEndpointsUpdate(endpointsv1, endpointsv2)
|
|
||||||
shuffledEndpoints = loadBalancer.services[service].endpoints
|
|
||||||
expectEndpoint(t, loadBalancer, service, shuffledEndpoints[0], client1)
|
|
||||||
expectEndpoint(t, loadBalancer, service, shuffledEndpoints[1], client2)
|
|
||||||
expectEndpoint(t, loadBalancer, service, shuffledEndpoints[0], client1)
|
|
||||||
expectEndpoint(t, loadBalancer, service, shuffledEndpoints[0], client1)
|
|
||||||
expectEndpoint(t, loadBalancer, service, shuffledEndpoints[1], client2)
|
|
||||||
expectEndpoint(t, loadBalancer, service, shuffledEndpoints[1], client2)
|
|
||||||
|
|
||||||
// Clear endpoints
|
|
||||||
endpointsv3 := &v1.Endpoints{ObjectMeta: metav1.ObjectMeta{Name: service.Name, Namespace: service.Namespace}, Subsets: nil}
|
|
||||||
loadBalancer.OnEndpointsUpdate(endpointsv2, endpointsv3)
|
|
||||||
|
|
||||||
endpoint, err = loadBalancer.NextEndpoint(service, nil, false)
|
|
||||||
if err == nil || len(endpoint) != 0 {
|
|
||||||
t.Errorf("Didn't fail with non-existent service")
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestStickyLoadBalanceWorksWithServiceRemoval(t *testing.T) {
|
|
||||||
client1 := &net.TCPAddr{IP: net.IPv4(127, 0, 0, 1), Port: 0}
|
|
||||||
client2 := &net.TCPAddr{IP: net.IPv4(127, 0, 0, 2), Port: 0}
|
|
||||||
client3 := &net.TCPAddr{IP: net.IPv4(127, 0, 0, 3), Port: 0}
|
|
||||||
loadBalancer := NewLoadBalancerRR()
|
|
||||||
fooService := proxy.ServicePortName{NamespacedName: types.NamespacedName{Namespace: "testnamespace", Name: "foo"}, Port: ""}
|
|
||||||
endpoint, err := loadBalancer.NextEndpoint(fooService, nil, false)
|
|
||||||
if err == nil || len(endpoint) != 0 {
|
|
||||||
t.Errorf("Didn't fail with non-existent service")
|
|
||||||
}
|
|
||||||
loadBalancer.NewService(fooService, v1.ServiceAffinityClientIP, int(v1.DefaultClientIPServiceAffinitySeconds))
|
|
||||||
endpoints1 := &v1.Endpoints{
|
|
||||||
ObjectMeta: metav1.ObjectMeta{Name: fooService.Name, Namespace: fooService.Namespace},
|
|
||||||
Subsets: []v1.EndpointSubset{
|
|
||||||
{
|
|
||||||
Addresses: []v1.EndpointAddress{{IP: "endpoint"}},
|
|
||||||
Ports: []v1.EndpointPort{{Port: 1}, {Port: 2}, {Port: 3}},
|
|
||||||
},
|
|
||||||
},
|
|
||||||
}
|
|
||||||
barService := proxy.ServicePortName{NamespacedName: types.NamespacedName{Namespace: "testnamespace", Name: "bar"}, Port: ""}
|
|
||||||
loadBalancer.NewService(barService, v1.ServiceAffinityClientIP, int(v1.DefaultClientIPServiceAffinitySeconds))
|
|
||||||
endpoints2 := &v1.Endpoints{
|
|
||||||
ObjectMeta: metav1.ObjectMeta{Name: barService.Name, Namespace: barService.Namespace},
|
|
||||||
Subsets: []v1.EndpointSubset{
|
|
||||||
{
|
|
||||||
Addresses: []v1.EndpointAddress{{IP: "endpoint"}},
|
|
||||||
Ports: []v1.EndpointPort{{Port: 4}, {Port: 5}},
|
|
||||||
},
|
|
||||||
},
|
|
||||||
}
|
|
||||||
loadBalancer.OnEndpointsAdd(endpoints1)
|
|
||||||
loadBalancer.OnEndpointsAdd(endpoints2)
|
|
||||||
|
|
||||||
shuffledFooEndpoints := loadBalancer.services[fooService].endpoints
|
|
||||||
expectEndpoint(t, loadBalancer, fooService, shuffledFooEndpoints[0], client1)
|
|
||||||
expectEndpoint(t, loadBalancer, fooService, shuffledFooEndpoints[1], client2)
|
|
||||||
expectEndpoint(t, loadBalancer, fooService, shuffledFooEndpoints[2], client3)
|
|
||||||
expectEndpoint(t, loadBalancer, fooService, shuffledFooEndpoints[0], client1)
|
|
||||||
expectEndpoint(t, loadBalancer, fooService, shuffledFooEndpoints[0], client1)
|
|
||||||
expectEndpoint(t, loadBalancer, fooService, shuffledFooEndpoints[1], client2)
|
|
||||||
expectEndpoint(t, loadBalancer, fooService, shuffledFooEndpoints[1], client2)
|
|
||||||
expectEndpoint(t, loadBalancer, fooService, shuffledFooEndpoints[2], client3)
|
|
||||||
expectEndpoint(t, loadBalancer, fooService, shuffledFooEndpoints[2], client3)
|
|
||||||
|
|
||||||
shuffledBarEndpoints := loadBalancer.services[barService].endpoints
|
|
||||||
expectEndpoint(t, loadBalancer, barService, shuffledBarEndpoints[0], client1)
|
|
||||||
expectEndpoint(t, loadBalancer, barService, shuffledBarEndpoints[1], client2)
|
|
||||||
expectEndpoint(t, loadBalancer, barService, shuffledBarEndpoints[0], client1)
|
|
||||||
expectEndpoint(t, loadBalancer, barService, shuffledBarEndpoints[0], client1)
|
|
||||||
expectEndpoint(t, loadBalancer, barService, shuffledBarEndpoints[1], client2)
|
|
||||||
expectEndpoint(t, loadBalancer, barService, shuffledBarEndpoints[1], client2)
|
|
||||||
|
|
||||||
// Then update the configuration by removing foo
|
|
||||||
loadBalancer.OnEndpointsDelete(endpoints1)
|
|
||||||
endpoint, err = loadBalancer.NextEndpoint(fooService, nil, false)
|
|
||||||
if err == nil || len(endpoint) != 0 {
|
|
||||||
t.Errorf("Didn't fail with non-existent service")
|
|
||||||
}
|
|
||||||
|
|
||||||
// but bar is still there, and we continue RR from where we left off.
|
|
||||||
shuffledBarEndpoints = loadBalancer.services[barService].endpoints
|
|
||||||
expectEndpoint(t, loadBalancer, barService, shuffledBarEndpoints[0], client1)
|
|
||||||
expectEndpoint(t, loadBalancer, barService, shuffledBarEndpoints[1], client2)
|
|
||||||
expectEndpoint(t, loadBalancer, barService, shuffledBarEndpoints[0], client1)
|
|
||||||
expectEndpoint(t, loadBalancer, barService, shuffledBarEndpoints[1], client2)
|
|
||||||
expectEndpoint(t, loadBalancer, barService, shuffledBarEndpoints[0], client1)
|
|
||||||
expectEndpoint(t, loadBalancer, barService, shuffledBarEndpoints[0], client1)
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestStickyLoadBalanceWorksWithEndpointFails(t *testing.T) {
|
|
||||||
loadBalancer := NewLoadBalancerRR()
|
|
||||||
service := proxy.ServicePortName{NamespacedName: types.NamespacedName{Namespace: "testnamespace", Name: "foo"}, Port: ""}
|
|
||||||
endpoint, err := loadBalancer.NextEndpoint(service, nil, false)
|
|
||||||
if err == nil || len(endpoint) != 0 {
|
|
||||||
t.Errorf("Didn't fail with non-existent service")
|
|
||||||
}
|
|
||||||
|
|
||||||
// Call NewService() before OnEndpointsUpdate()
|
|
||||||
loadBalancer.NewService(service, v1.ServiceAffinityClientIP, int(v1.DefaultClientIPServiceAffinitySeconds))
|
|
||||||
endpoints := &v1.Endpoints{
|
|
||||||
ObjectMeta: metav1.ObjectMeta{Name: service.Name, Namespace: service.Namespace},
|
|
||||||
Subsets: []v1.EndpointSubset{
|
|
||||||
{Addresses: []v1.EndpointAddress{{IP: "endpoint1"}}, Ports: []v1.EndpointPort{{Port: 1}}},
|
|
||||||
{Addresses: []v1.EndpointAddress{{IP: "endpoint2"}}, Ports: []v1.EndpointPort{{Port: 2}}},
|
|
||||||
{Addresses: []v1.EndpointAddress{{IP: "endpoint3"}}, Ports: []v1.EndpointPort{{Port: 3}}},
|
|
||||||
},
|
|
||||||
}
|
|
||||||
loadBalancer.OnEndpointsAdd(endpoints)
|
|
||||||
|
|
||||||
client1 := &net.TCPAddr{IP: net.IPv4(127, 0, 0, 1), Port: 0}
|
|
||||||
client2 := &net.TCPAddr{IP: net.IPv4(127, 0, 0, 2), Port: 0}
|
|
||||||
client3 := &net.TCPAddr{IP: net.IPv4(127, 0, 0, 3), Port: 0}
|
|
||||||
|
|
||||||
ep1, err := loadBalancer.NextEndpoint(service, client1, false)
|
|
||||||
if err != nil {
|
|
||||||
t.Errorf("Didn't find a service for %s: %v", service, err)
|
|
||||||
}
|
|
||||||
|
|
||||||
ep2, err := loadBalancer.NextEndpoint(service, client2, false)
|
|
||||||
if err != nil {
|
|
||||||
t.Errorf("Didn't find a service for %s: %v", service, err)
|
|
||||||
}
|
|
||||||
|
|
||||||
ep3, err := loadBalancer.NextEndpoint(service, client3, false)
|
|
||||||
if err != nil {
|
|
||||||
t.Errorf("Didn't find a service for %s: %v", service, err)
|
|
||||||
}
|
|
||||||
|
|
||||||
expectEndpointWithSessionAffinityReset(t, loadBalancer, service, ep1, client1)
|
|
||||||
expectEndpointWithSessionAffinityReset(t, loadBalancer, service, ep2, client1)
|
|
||||||
expectEndpointWithSessionAffinityReset(t, loadBalancer, service, ep3, client1)
|
|
||||||
|
|
||||||
expectEndpoint(t, loadBalancer, service, ep2, client2)
|
|
||||||
expectEndpointWithSessionAffinityReset(t, loadBalancer, service, ep1, client2)
|
|
||||||
expectEndpointWithSessionAffinityReset(t, loadBalancer, service, ep2, client3)
|
|
||||||
expectEndpointWithSessionAffinityReset(t, loadBalancer, service, ep3, client1)
|
|
||||||
expectEndpointWithSessionAffinityReset(t, loadBalancer, service, ep1, client2)
|
|
||||||
expectEndpointWithSessionAffinityReset(t, loadBalancer, service, ep2, client3)
|
|
||||||
}
|
|
@ -1,36 +0,0 @@
|
|||||||
/*
|
|
||||||
Copyright 2016 The Kubernetes Authors.
|
|
||||||
|
|
||||||
Licensed under the Apache License, Version 2.0 (the "License");
|
|
||||||
you may not use this file except in compliance with the License.
|
|
||||||
You may obtain a copy of the License at
|
|
||||||
|
|
||||||
http://www.apache.org/licenses/LICENSE-2.0
|
|
||||||
|
|
||||||
Unless required by applicable law or agreed to in writing, software
|
|
||||||
distributed under the License is distributed on an "AS IS" BASIS,
|
|
||||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
||||||
See the License for the specific language governing permissions and
|
|
||||||
limitations under the License.
|
|
||||||
*/
|
|
||||||
|
|
||||||
package winuserspace
|
|
||||||
|
|
||||||
import (
|
|
||||||
"k8s.io/api/core/v1"
|
|
||||||
"k8s.io/kubernetes/pkg/proxy"
|
|
||||||
proxyconfig "k8s.io/kubernetes/pkg/proxy/config"
|
|
||||||
"net"
|
|
||||||
)
|
|
||||||
|
|
||||||
// LoadBalancer is an interface for distributing incoming requests to service endpoints.
|
|
||||||
type LoadBalancer interface {
|
|
||||||
// NextEndpoint returns the endpoint to handle a request for the given
|
|
||||||
// service-port and source address.
|
|
||||||
NextEndpoint(service proxy.ServicePortName, srcAddr net.Addr, sessionAffinityReset bool) (string, error)
|
|
||||||
NewService(service proxy.ServicePortName, sessionAffinityType v1.ServiceAffinity, stickyMaxAgeMinutes int) error
|
|
||||||
DeleteService(service proxy.ServicePortName)
|
|
||||||
CleanupStaleStickySessions(service proxy.ServicePortName)
|
|
||||||
|
|
||||||
proxyconfig.EndpointsHandler
|
|
||||||
}
|
|
@ -1,496 +0,0 @@
|
|||||||
/*
|
|
||||||
Copyright 2016 The Kubernetes Authors.
|
|
||||||
|
|
||||||
Licensed under the Apache License, Version 2.0 (the "License");
|
|
||||||
you may not use this file except in compliance with the License.
|
|
||||||
You may obtain a copy of the License at
|
|
||||||
|
|
||||||
http://www.apache.org/licenses/LICENSE-2.0
|
|
||||||
|
|
||||||
Unless required by applicable law or agreed to in writing, software
|
|
||||||
distributed under the License is distributed on an "AS IS" BASIS,
|
|
||||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
||||||
See the License for the specific language governing permissions and
|
|
||||||
limitations under the License.
|
|
||||||
*/
|
|
||||||
|
|
||||||
package winuserspace
|
|
||||||
|
|
||||||
import (
|
|
||||||
"fmt"
|
|
||||||
"net"
|
|
||||||
"strconv"
|
|
||||||
"strings"
|
|
||||||
"sync"
|
|
||||||
"sync/atomic"
|
|
||||||
"time"
|
|
||||||
|
|
||||||
"k8s.io/klog/v2"
|
|
||||||
netutils "k8s.io/utils/net"
|
|
||||||
|
|
||||||
v1 "k8s.io/api/core/v1"
|
|
||||||
"k8s.io/apimachinery/pkg/types"
|
|
||||||
utilnet "k8s.io/apimachinery/pkg/util/net"
|
|
||||||
"k8s.io/apimachinery/pkg/util/runtime"
|
|
||||||
"k8s.io/kubernetes/pkg/apis/core/v1/helper"
|
|
||||||
"k8s.io/kubernetes/pkg/proxy"
|
|
||||||
"k8s.io/kubernetes/pkg/proxy/config"
|
|
||||||
"k8s.io/kubernetes/pkg/util/netsh"
|
|
||||||
)
|
|
||||||
|
|
||||||
const allAvailableInterfaces string = ""
|
|
||||||
|
|
||||||
type portal struct {
|
|
||||||
ip string
|
|
||||||
port int
|
|
||||||
isExternal bool
|
|
||||||
}
|
|
||||||
|
|
||||||
type serviceInfo struct {
|
|
||||||
isAliveAtomic int32 // Only access this with atomic ops
|
|
||||||
portal portal
|
|
||||||
protocol v1.Protocol
|
|
||||||
socket proxySocket
|
|
||||||
timeout time.Duration
|
|
||||||
activeClients *clientCache
|
|
||||||
sessionAffinityType v1.ServiceAffinity
|
|
||||||
}
|
|
||||||
|
|
||||||
func (info *serviceInfo) setAlive(b bool) {
|
|
||||||
var i int32
|
|
||||||
if b {
|
|
||||||
i = 1
|
|
||||||
}
|
|
||||||
atomic.StoreInt32(&info.isAliveAtomic, i)
|
|
||||||
}
|
|
||||||
|
|
||||||
func (info *serviceInfo) isAlive() bool {
|
|
||||||
return atomic.LoadInt32(&info.isAliveAtomic) != 0
|
|
||||||
}
|
|
||||||
|
|
||||||
func logTimeout(err error) bool {
|
|
||||||
if e, ok := err.(net.Error); ok {
|
|
||||||
if e.Timeout() {
|
|
||||||
klog.V(3).InfoS("connection to endpoint closed due to inactivity")
|
|
||||||
return true
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return false
|
|
||||||
}
|
|
||||||
|
|
||||||
// Proxier is a simple proxy for TCP connections between a localhost:lport
|
|
||||||
// and services that provide the actual implementations.
|
|
||||||
type Proxier struct {
|
|
||||||
// EndpointSlice support has not been added for this proxier yet.
|
|
||||||
config.NoopEndpointSliceHandler
|
|
||||||
// TODO(imroc): implement node handler for winuserspace proxier.
|
|
||||||
config.NoopNodeHandler
|
|
||||||
|
|
||||||
loadBalancer LoadBalancer
|
|
||||||
mu sync.Mutex // protects serviceMap
|
|
||||||
serviceMap map[ServicePortPortalName]*serviceInfo
|
|
||||||
syncPeriod time.Duration
|
|
||||||
udpIdleTimeout time.Duration
|
|
||||||
numProxyLoops int32 // use atomic ops to access this; mostly for testing
|
|
||||||
netsh netsh.Interface
|
|
||||||
hostIP net.IP
|
|
||||||
}
|
|
||||||
|
|
||||||
// assert Proxier is a proxy.Provider
|
|
||||||
var _ proxy.Provider = &Proxier{}
|
|
||||||
|
|
||||||
var (
|
|
||||||
// ErrProxyOnLocalhost is returned by NewProxier if the user requests a proxier on
|
|
||||||
// the loopback address. May be checked for by callers of NewProxier to know whether
|
|
||||||
// the caller provided invalid input.
|
|
||||||
ErrProxyOnLocalhost = fmt.Errorf("cannot proxy on localhost")
|
|
||||||
)
|
|
||||||
|
|
||||||
// Used below.
|
|
||||||
var localhostIPv4 = netutils.ParseIPSloppy("127.0.0.1")
|
|
||||||
var localhostIPv6 = netutils.ParseIPSloppy("::1")
|
|
||||||
|
|
||||||
// NewProxier returns a new Proxier given a LoadBalancer and an address on
|
|
||||||
// which to listen. It is assumed that there is only a single Proxier active
|
|
||||||
// on a machine. An error will be returned if the proxier cannot be started
|
|
||||||
// due to an invalid ListenIP (loopback)
|
|
||||||
func NewProxier(loadBalancer LoadBalancer, listenIP net.IP, netsh netsh.Interface, pr utilnet.PortRange, syncPeriod, udpIdleTimeout time.Duration) (*Proxier, error) {
|
|
||||||
if listenIP.Equal(localhostIPv4) || listenIP.Equal(localhostIPv6) {
|
|
||||||
return nil, ErrProxyOnLocalhost
|
|
||||||
}
|
|
||||||
|
|
||||||
hostIP, err := utilnet.ChooseHostInterface()
|
|
||||||
if err != nil {
|
|
||||||
return nil, fmt.Errorf("failed to select a host interface: %v", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
klog.V(2).InfoS("Setting proxy", "ip", hostIP)
|
|
||||||
return createProxier(loadBalancer, listenIP, netsh, hostIP, syncPeriod, udpIdleTimeout)
|
|
||||||
}
|
|
||||||
|
|
||||||
func createProxier(loadBalancer LoadBalancer, listenIP net.IP, netsh netsh.Interface, hostIP net.IP, syncPeriod, udpIdleTimeout time.Duration) (*Proxier, error) {
|
|
||||||
return &Proxier{
|
|
||||||
loadBalancer: loadBalancer,
|
|
||||||
serviceMap: make(map[ServicePortPortalName]*serviceInfo),
|
|
||||||
syncPeriod: syncPeriod,
|
|
||||||
udpIdleTimeout: udpIdleTimeout,
|
|
||||||
netsh: netsh,
|
|
||||||
hostIP: hostIP,
|
|
||||||
}, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// Sync is called to immediately synchronize the proxier state
|
|
||||||
func (proxier *Proxier) Sync() {
|
|
||||||
proxier.cleanupStaleStickySessions()
|
|
||||||
}
|
|
||||||
|
|
||||||
// SyncLoop runs periodic work. This is expected to run as a goroutine or as the main loop of the app. It does not return.
|
|
||||||
func (proxier *Proxier) SyncLoop() {
|
|
||||||
t := time.NewTicker(proxier.syncPeriod)
|
|
||||||
defer t.Stop()
|
|
||||||
for {
|
|
||||||
<-t.C
|
|
||||||
klog.V(6).InfoS("Periodic sync")
|
|
||||||
proxier.Sync()
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// cleanupStaleStickySessions cleans up any stale sticky session records in the hash map.
|
|
||||||
func (proxier *Proxier) cleanupStaleStickySessions() {
|
|
||||||
proxier.mu.Lock()
|
|
||||||
defer proxier.mu.Unlock()
|
|
||||||
servicePortNameMap := make(map[proxy.ServicePortName]bool)
|
|
||||||
for name := range proxier.serviceMap {
|
|
||||||
servicePortName := proxy.ServicePortName{
|
|
||||||
NamespacedName: types.NamespacedName{
|
|
||||||
Namespace: name.Namespace,
|
|
||||||
Name: name.Name,
|
|
||||||
},
|
|
||||||
Port: name.Port,
|
|
||||||
}
|
|
||||||
if !servicePortNameMap[servicePortName] {
|
|
||||||
// ensure cleanup sticky sessions only gets called once per serviceportname
|
|
||||||
servicePortNameMap[servicePortName] = true
|
|
||||||
proxier.loadBalancer.CleanupStaleStickySessions(servicePortName)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// This assumes proxier.mu is not locked.
|
|
||||||
func (proxier *Proxier) stopProxy(service ServicePortPortalName, info *serviceInfo) error {
|
|
||||||
proxier.mu.Lock()
|
|
||||||
defer proxier.mu.Unlock()
|
|
||||||
return proxier.stopProxyInternal(service, info)
|
|
||||||
}
|
|
||||||
|
|
||||||
// This assumes proxier.mu is locked.
|
|
||||||
func (proxier *Proxier) stopProxyInternal(service ServicePortPortalName, info *serviceInfo) error {
|
|
||||||
delete(proxier.serviceMap, service)
|
|
||||||
info.setAlive(false)
|
|
||||||
err := info.socket.Close()
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
func (proxier *Proxier) getServiceInfo(service ServicePortPortalName) (*serviceInfo, bool) {
|
|
||||||
proxier.mu.Lock()
|
|
||||||
defer proxier.mu.Unlock()
|
|
||||||
info, ok := proxier.serviceMap[service]
|
|
||||||
return info, ok
|
|
||||||
}
|
|
||||||
|
|
||||||
func (proxier *Proxier) setServiceInfo(service ServicePortPortalName, info *serviceInfo) {
|
|
||||||
proxier.mu.Lock()
|
|
||||||
defer proxier.mu.Unlock()
|
|
||||||
proxier.serviceMap[service] = info
|
|
||||||
}
|
|
||||||
|
|
||||||
// addServicePortPortal starts listening for a new service, returning the serviceInfo.
|
|
||||||
// The timeout only applies to UDP connections, for now.
|
|
||||||
func (proxier *Proxier) addServicePortPortal(servicePortPortalName ServicePortPortalName, protocol v1.Protocol, listenIP string, port int, timeout time.Duration) (*serviceInfo, error) {
|
|
||||||
var serviceIP net.IP
|
|
||||||
if listenIP != allAvailableInterfaces {
|
|
||||||
if serviceIP = netutils.ParseIPSloppy(listenIP); serviceIP == nil {
|
|
||||||
return nil, fmt.Errorf("could not parse ip '%q'", listenIP)
|
|
||||||
}
|
|
||||||
// add the IP address. Node port binds to all interfaces.
|
|
||||||
args := proxier.netshIPv4AddressAddArgs(serviceIP)
|
|
||||||
if existed, err := proxier.netsh.EnsureIPAddress(args, serviceIP); err != nil {
|
|
||||||
return nil, err
|
|
||||||
} else if !existed {
|
|
||||||
klog.V(3).InfoS("Added ip address to fowarder interface for service", "servicePortPortalName", servicePortPortalName.String(), "addr", net.JoinHostPort(listenIP, strconv.Itoa(port)), "protocol", protocol)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// add the listener, proxy
|
|
||||||
sock, err := newProxySocket(protocol, serviceIP, port)
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
si := &serviceInfo{
|
|
||||||
isAliveAtomic: 1,
|
|
||||||
portal: portal{
|
|
||||||
ip: listenIP,
|
|
||||||
port: port,
|
|
||||||
isExternal: false,
|
|
||||||
},
|
|
||||||
protocol: protocol,
|
|
||||||
socket: sock,
|
|
||||||
timeout: timeout,
|
|
||||||
activeClients: newClientCache(),
|
|
||||||
sessionAffinityType: v1.ServiceAffinityNone, // default
|
|
||||||
}
|
|
||||||
proxier.setServiceInfo(servicePortPortalName, si)
|
|
||||||
|
|
||||||
klog.V(2).InfoS("Proxying for service", "servicePortPortalName", servicePortPortalName.String(), "addr", net.JoinHostPort(listenIP, strconv.Itoa(port)), "protocol", protocol)
|
|
||||||
go func(service ServicePortPortalName, proxier *Proxier) {
|
|
||||||
defer runtime.HandleCrash()
|
|
||||||
atomic.AddInt32(&proxier.numProxyLoops, 1)
|
|
||||||
sock.ProxyLoop(service, si, proxier)
|
|
||||||
atomic.AddInt32(&proxier.numProxyLoops, -1)
|
|
||||||
}(servicePortPortalName, proxier)
|
|
||||||
|
|
||||||
return si, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func (proxier *Proxier) closeServicePortPortal(servicePortPortalName ServicePortPortalName, info *serviceInfo) error {
|
|
||||||
// turn off the proxy
|
|
||||||
if err := proxier.stopProxy(servicePortPortalName, info); err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
// close the PortalProxy by deleting the service IP address
|
|
||||||
if info.portal.ip != allAvailableInterfaces {
|
|
||||||
serviceIP := netutils.ParseIPSloppy(info.portal.ip)
|
|
||||||
args := proxier.netshIPv4AddressDeleteArgs(serviceIP)
|
|
||||||
if err := proxier.netsh.DeleteIPAddress(args); err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// getListenIPPortMap returns a slice of all listen IPs for a service.
|
|
||||||
func getListenIPPortMap(service *v1.Service, listenPort int, nodePort int) map[string]int {
|
|
||||||
listenIPPortMap := make(map[string]int)
|
|
||||||
listenIPPortMap[service.Spec.ClusterIP] = listenPort
|
|
||||||
|
|
||||||
for _, ip := range service.Spec.ExternalIPs {
|
|
||||||
listenIPPortMap[ip] = listenPort
|
|
||||||
}
|
|
||||||
|
|
||||||
for _, ingress := range service.Status.LoadBalancer.Ingress {
|
|
||||||
listenIPPortMap[ingress.IP] = listenPort
|
|
||||||
}
|
|
||||||
|
|
||||||
if nodePort != 0 {
|
|
||||||
listenIPPortMap[allAvailableInterfaces] = nodePort
|
|
||||||
}
|
|
||||||
|
|
||||||
return listenIPPortMap
|
|
||||||
}
|
|
||||||
|
|
||||||
func (proxier *Proxier) mergeService(service *v1.Service) map[ServicePortPortalName]bool {
|
|
||||||
if service == nil {
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
svcName := types.NamespacedName{Namespace: service.Namespace, Name: service.Name}
|
|
||||||
if !helper.IsServiceIPSet(service) {
|
|
||||||
klog.V(3).InfoS("Skipping service due to clusterIP", "svcName", svcName, "ip", service.Spec.ClusterIP)
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
existingPortPortals := make(map[ServicePortPortalName]bool)
|
|
||||||
|
|
||||||
for i := range service.Spec.Ports {
|
|
||||||
servicePort := &service.Spec.Ports[i]
|
|
||||||
// create a slice of all the source IPs to use for service port portals
|
|
||||||
listenIPPortMap := getListenIPPortMap(service, int(servicePort.Port), int(servicePort.NodePort))
|
|
||||||
protocol := servicePort.Protocol
|
|
||||||
|
|
||||||
for listenIP, listenPort := range listenIPPortMap {
|
|
||||||
servicePortPortalName := ServicePortPortalName{
|
|
||||||
NamespacedName: svcName,
|
|
||||||
Port: servicePort.Name,
|
|
||||||
PortalIPName: listenIP,
|
|
||||||
}
|
|
||||||
existingPortPortals[servicePortPortalName] = true
|
|
||||||
info, exists := proxier.getServiceInfo(servicePortPortalName)
|
|
||||||
if exists && sameConfig(info, service, protocol, listenPort) {
|
|
||||||
// Nothing changed.
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
if exists {
|
|
||||||
klog.V(4).InfoS("Something changed for service: stopping it", "servicePortPortalName", servicePortPortalName.String())
|
|
||||||
if err := proxier.closeServicePortPortal(servicePortPortalName, info); err != nil {
|
|
||||||
klog.ErrorS(err, "Failed to close service port portal", "servicePortPortalName", servicePortPortalName.String())
|
|
||||||
}
|
|
||||||
}
|
|
||||||
klog.V(1).InfoS("Adding new service", "servicePortPortalName", servicePortPortalName.String(), "addr", net.JoinHostPort(listenIP, strconv.Itoa(listenPort)), "protocol", protocol)
|
|
||||||
info, err := proxier.addServicePortPortal(servicePortPortalName, protocol, listenIP, listenPort, proxier.udpIdleTimeout)
|
|
||||||
if err != nil {
|
|
||||||
klog.ErrorS(err, "Failed to start proxy", "servicePortPortalName", servicePortPortalName.String())
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
info.sessionAffinityType = service.Spec.SessionAffinity
|
|
||||||
klog.V(10).InfoS("record serviceInfo", "info", info)
|
|
||||||
}
|
|
||||||
if len(listenIPPortMap) > 0 {
|
|
||||||
// only one loadbalancer per service port portal
|
|
||||||
servicePortName := proxy.ServicePortName{
|
|
||||||
NamespacedName: types.NamespacedName{
|
|
||||||
Namespace: service.Namespace,
|
|
||||||
Name: service.Name,
|
|
||||||
},
|
|
||||||
Port: servicePort.Name,
|
|
||||||
}
|
|
||||||
timeoutSeconds := 0
|
|
||||||
if service.Spec.SessionAffinity == v1.ServiceAffinityClientIP {
|
|
||||||
timeoutSeconds = int(*service.Spec.SessionAffinityConfig.ClientIP.TimeoutSeconds)
|
|
||||||
}
|
|
||||||
proxier.loadBalancer.NewService(servicePortName, service.Spec.SessionAffinity, timeoutSeconds)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
return existingPortPortals
|
|
||||||
}
|
|
||||||
|
|
||||||
func (proxier *Proxier) unmergeService(service *v1.Service, existingPortPortals map[ServicePortPortalName]bool) {
|
|
||||||
if service == nil {
|
|
||||||
return
|
|
||||||
}
|
|
||||||
svcName := types.NamespacedName{Namespace: service.Namespace, Name: service.Name}
|
|
||||||
if !helper.IsServiceIPSet(service) {
|
|
||||||
klog.V(3).InfoS("Skipping service due to clusterIP", "svcName", svcName, "ip", service.Spec.ClusterIP)
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
servicePortNameMap := make(map[proxy.ServicePortName]bool)
|
|
||||||
for name := range existingPortPortals {
|
|
||||||
servicePortName := proxy.ServicePortName{
|
|
||||||
NamespacedName: types.NamespacedName{
|
|
||||||
Namespace: name.Namespace,
|
|
||||||
Name: name.Name,
|
|
||||||
},
|
|
||||||
Port: name.Port,
|
|
||||||
}
|
|
||||||
servicePortNameMap[servicePortName] = true
|
|
||||||
}
|
|
||||||
|
|
||||||
for i := range service.Spec.Ports {
|
|
||||||
servicePort := &service.Spec.Ports[i]
|
|
||||||
serviceName := proxy.ServicePortName{NamespacedName: svcName, Port: servicePort.Name}
|
|
||||||
// create a slice of all the source IPs to use for service port portals
|
|
||||||
listenIPPortMap := getListenIPPortMap(service, int(servicePort.Port), int(servicePort.NodePort))
|
|
||||||
|
|
||||||
for listenIP := range listenIPPortMap {
|
|
||||||
servicePortPortalName := ServicePortPortalName{
|
|
||||||
NamespacedName: svcName,
|
|
||||||
Port: servicePort.Name,
|
|
||||||
PortalIPName: listenIP,
|
|
||||||
}
|
|
||||||
if existingPortPortals[servicePortPortalName] {
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
|
|
||||||
klog.V(1).InfoS("Stopping service", "servicePortPortalName", servicePortPortalName.String())
|
|
||||||
info, exists := proxier.getServiceInfo(servicePortPortalName)
|
|
||||||
if !exists {
|
|
||||||
klog.ErrorS(nil, "Service is being removed but doesn't exist", "servicePortPortalName", servicePortPortalName.String())
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
|
|
||||||
if err := proxier.closeServicePortPortal(servicePortPortalName, info); err != nil {
|
|
||||||
klog.ErrorS(err, "Failed to close service port portal", "servicePortPortalName", servicePortPortalName)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// Only delete load balancer if all listen ips per name/port show inactive.
|
|
||||||
if !servicePortNameMap[serviceName] {
|
|
||||||
proxier.loadBalancer.DeleteService(serviceName)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// OnServiceAdd is called whenever creation of new service object
|
|
||||||
// is observed.
|
|
||||||
func (proxier *Proxier) OnServiceAdd(service *v1.Service) {
|
|
||||||
_ = proxier.mergeService(service)
|
|
||||||
}
|
|
||||||
|
|
||||||
// OnServiceUpdate is called whenever modification of an existing
|
|
||||||
// service object is observed.
|
|
||||||
func (proxier *Proxier) OnServiceUpdate(oldService, service *v1.Service) {
|
|
||||||
existingPortPortals := proxier.mergeService(service)
|
|
||||||
proxier.unmergeService(oldService, existingPortPortals)
|
|
||||||
}
|
|
||||||
|
|
||||||
// OnServiceDelete is called whenever deletion of an existing service
|
|
||||||
// object is observed.
|
|
||||||
func (proxier *Proxier) OnServiceDelete(service *v1.Service) {
|
|
||||||
proxier.unmergeService(service, map[ServicePortPortalName]bool{})
|
|
||||||
}
|
|
||||||
|
|
||||||
// OnServiceSynced is called once all the initial event handlers were
|
|
||||||
// called and the state is fully propagated to local cache.
|
|
||||||
func (proxier *Proxier) OnServiceSynced() {
|
|
||||||
}
|
|
||||||
|
|
||||||
// OnEndpointsAdd is called whenever creation of new endpoints object
|
|
||||||
// is observed.
|
|
||||||
func (proxier *Proxier) OnEndpointsAdd(endpoints *v1.Endpoints) {
|
|
||||||
proxier.loadBalancer.OnEndpointsAdd(endpoints)
|
|
||||||
}
|
|
||||||
|
|
||||||
// OnEndpointsUpdate is called whenever modification of an existing
|
|
||||||
// endpoints object is observed.
|
|
||||||
func (proxier *Proxier) OnEndpointsUpdate(oldEndpoints, endpoints *v1.Endpoints) {
|
|
||||||
proxier.loadBalancer.OnEndpointsUpdate(oldEndpoints, endpoints)
|
|
||||||
}
|
|
||||||
|
|
||||||
// OnEndpointsDelete is called whenever deletion of an existing endpoints
|
|
||||||
// object is observed.
|
|
||||||
func (proxier *Proxier) OnEndpointsDelete(endpoints *v1.Endpoints) {
|
|
||||||
proxier.loadBalancer.OnEndpointsDelete(endpoints)
|
|
||||||
}
|
|
||||||
|
|
||||||
// OnEndpointsSynced is called once all the initial event handlers were
|
|
||||||
// called and the state is fully propagated to local cache.
|
|
||||||
func (proxier *Proxier) OnEndpointsSynced() {
|
|
||||||
proxier.loadBalancer.OnEndpointsSynced()
|
|
||||||
}
|
|
||||||
|
|
||||||
func sameConfig(info *serviceInfo, service *v1.Service, protocol v1.Protocol, listenPort int) bool {
|
|
||||||
return info.protocol == protocol && info.portal.port == listenPort && info.sessionAffinityType == service.Spec.SessionAffinity
|
|
||||||
}
|
|
||||||
|
|
||||||
func isTooManyFDsError(err error) bool {
|
|
||||||
return strings.Contains(err.Error(), "too many open files")
|
|
||||||
}
|
|
||||||
|
|
||||||
func isClosedError(err error) bool {
|
|
||||||
// A brief discussion about handling closed error here:
|
|
||||||
// https://code.google.com/p/go/issues/detail?id=4373#c14
|
|
||||||
// TODO: maybe create a stoppable TCP listener that returns a StoppedError
|
|
||||||
return strings.HasSuffix(err.Error(), "use of closed network connection")
|
|
||||||
}
|
|
||||||
|
|
||||||
func (proxier *Proxier) netshIPv4AddressAddArgs(destIP net.IP) []string {
|
|
||||||
intName := proxier.netsh.GetInterfaceToAddIP()
|
|
||||||
args := []string{
|
|
||||||
"interface", "ipv4", "add", "address",
|
|
||||||
"name=" + intName,
|
|
||||||
"address=" + destIP.String(),
|
|
||||||
}
|
|
||||||
|
|
||||||
return args
|
|
||||||
}
|
|
||||||
|
|
||||||
func (proxier *Proxier) netshIPv4AddressDeleteArgs(destIP net.IP) []string {
|
|
||||||
intName := proxier.netsh.GetInterfaceToAddIP()
|
|
||||||
args := []string{
|
|
||||||
"interface", "ipv4", "delete", "address",
|
|
||||||
"name=" + intName,
|
|
||||||
"address=" + destIP.String(),
|
|
||||||
}
|
|
||||||
|
|
||||||
return args
|
|
||||||
}
|
|
@ -1,959 +0,0 @@
|
|||||||
/*
|
|
||||||
Copyright 2016 The Kubernetes Authors.
|
|
||||||
|
|
||||||
Licensed under the Apache License, Version 2.0 (the "License");
|
|
||||||
you may not use this file except in compliance with the License.
|
|
||||||
You may obtain a copy of the License at
|
|
||||||
|
|
||||||
http://www.apache.org/licenses/LICENSE-2.0
|
|
||||||
|
|
||||||
Unless required by applicable law or agreed to in writing, software
|
|
||||||
distributed under the License is distributed on an "AS IS" BASIS,
|
|
||||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
||||||
See the License for the specific language governing permissions and
|
|
||||||
limitations under the License.
|
|
||||||
*/
|
|
||||||
|
|
||||||
package winuserspace
|
|
||||||
|
|
||||||
import (
|
|
||||||
"fmt"
|
|
||||||
"io/ioutil"
|
|
||||||
"net"
|
|
||||||
"net/http"
|
|
||||||
"net/http/httptest"
|
|
||||||
"net/url"
|
|
||||||
"os"
|
|
||||||
"strconv"
|
|
||||||
"sync/atomic"
|
|
||||||
"testing"
|
|
||||||
"time"
|
|
||||||
|
|
||||||
v1 "k8s.io/api/core/v1"
|
|
||||||
discovery "k8s.io/api/discovery/v1"
|
|
||||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
|
||||||
"k8s.io/apimachinery/pkg/types"
|
|
||||||
"k8s.io/kubernetes/pkg/proxy"
|
|
||||||
netshtest "k8s.io/kubernetes/pkg/util/netsh/testing"
|
|
||||||
netutils "k8s.io/utils/net"
|
|
||||||
)
|
|
||||||
|
|
||||||
const (
|
|
||||||
udpIdleTimeoutForTest = 250 * time.Millisecond
|
|
||||||
)
|
|
||||||
|
|
||||||
func joinHostPort(host string, port int) string {
|
|
||||||
return net.JoinHostPort(host, fmt.Sprintf("%d", port))
|
|
||||||
}
|
|
||||||
|
|
||||||
func waitForClosedPortTCP(p *Proxier, proxyPort int) error {
|
|
||||||
for i := 0; i < 50; i++ {
|
|
||||||
conn, err := net.Dial("tcp", joinHostPort("", proxyPort))
|
|
||||||
if err != nil {
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
conn.Close()
|
|
||||||
time.Sleep(1 * time.Millisecond)
|
|
||||||
}
|
|
||||||
return fmt.Errorf("port %d still open", proxyPort)
|
|
||||||
}
|
|
||||||
|
|
||||||
func waitForClosedPortUDP(p *Proxier, proxyPort int) error {
|
|
||||||
for i := 0; i < 50; i++ {
|
|
||||||
conn, err := net.Dial("udp", joinHostPort("", proxyPort))
|
|
||||||
if err != nil {
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
conn.SetReadDeadline(time.Now().Add(10 * time.Millisecond))
|
|
||||||
// To detect a closed UDP port write, then read.
|
|
||||||
_, err = conn.Write([]byte("x"))
|
|
||||||
if err != nil {
|
|
||||||
if e, ok := err.(net.Error); ok && !e.Timeout() {
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
}
|
|
||||||
var buf [4]byte
|
|
||||||
_, err = conn.Read(buf[0:])
|
|
||||||
if err != nil {
|
|
||||||
if e, ok := err.(net.Error); ok && !e.Timeout() {
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
}
|
|
||||||
conn.Close()
|
|
||||||
time.Sleep(1 * time.Millisecond)
|
|
||||||
}
|
|
||||||
return fmt.Errorf("port %d still open", proxyPort)
|
|
||||||
}
|
|
||||||
|
|
||||||
// udpEchoServer is a simple echo server in UDP, intended for testing the proxy.
|
|
||||||
type udpEchoServer struct {
|
|
||||||
net.PacketConn
|
|
||||||
}
|
|
||||||
|
|
||||||
func newUDPEchoServer() (*udpEchoServer, error) {
|
|
||||||
packetconn, err := net.ListenPacket("udp", ":0")
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
return &udpEchoServer{packetconn}, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func (r *udpEchoServer) Loop() {
|
|
||||||
var buffer [4096]byte
|
|
||||||
for {
|
|
||||||
n, cliAddr, err := r.ReadFrom(buffer[0:])
|
|
||||||
if err != nil {
|
|
||||||
fmt.Printf("ReadFrom failed: %v\n", err)
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
r.WriteTo(buffer[0:n], cliAddr)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
var tcpServerPort int32
|
|
||||||
var udpServerPort int32
|
|
||||||
|
|
||||||
func TestMain(m *testing.M) {
|
|
||||||
// TCP setup.
|
|
||||||
tcp := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
|
|
||||||
w.WriteHeader(http.StatusOK)
|
|
||||||
w.Write([]byte(r.URL.Path[1:]))
|
|
||||||
}))
|
|
||||||
defer tcp.Close()
|
|
||||||
|
|
||||||
u, err := url.Parse(tcp.URL)
|
|
||||||
if err != nil {
|
|
||||||
panic(fmt.Sprintf("failed to parse: %v", err))
|
|
||||||
}
|
|
||||||
_, port, err := net.SplitHostPort(u.Host)
|
|
||||||
if err != nil {
|
|
||||||
panic(fmt.Sprintf("failed to parse: %v", err))
|
|
||||||
}
|
|
||||||
tcpServerPortValue, err := strconv.Atoi(port)
|
|
||||||
if err != nil {
|
|
||||||
panic(fmt.Sprintf("failed to atoi(%s): %v", port, err))
|
|
||||||
}
|
|
||||||
tcpServerPort = int32(tcpServerPortValue)
|
|
||||||
|
|
||||||
// UDP setup.
|
|
||||||
udp, err := newUDPEchoServer()
|
|
||||||
if err != nil {
|
|
||||||
panic(fmt.Sprintf("failed to make a UDP server: %v", err))
|
|
||||||
}
|
|
||||||
_, port, err = net.SplitHostPort(udp.LocalAddr().String())
|
|
||||||
if err != nil {
|
|
||||||
panic(fmt.Sprintf("failed to parse: %v", err))
|
|
||||||
}
|
|
||||||
udpServerPortValue, err := strconv.Atoi(port)
|
|
||||||
if err != nil {
|
|
||||||
panic(fmt.Sprintf("failed to atoi(%s): %v", port, err))
|
|
||||||
}
|
|
||||||
udpServerPort = int32(udpServerPortValue)
|
|
||||||
go udp.Loop()
|
|
||||||
|
|
||||||
ret := m.Run()
|
|
||||||
// it should be safe to call Close() multiple times.
|
|
||||||
tcp.Close()
|
|
||||||
os.Exit(ret)
|
|
||||||
}
|
|
||||||
|
|
||||||
func testEchoTCP(t *testing.T, address string, port int) {
|
|
||||||
path := "aaaaa"
|
|
||||||
res, err := http.Get("http://" + address + ":" + fmt.Sprintf("%d", port) + "/" + path)
|
|
||||||
if err != nil {
|
|
||||||
t.Fatalf("error connecting to server: %v", err)
|
|
||||||
}
|
|
||||||
defer res.Body.Close()
|
|
||||||
data, err := ioutil.ReadAll(res.Body)
|
|
||||||
if err != nil {
|
|
||||||
t.Errorf("error reading data: %v %v", err, string(data))
|
|
||||||
}
|
|
||||||
if string(data) != path {
|
|
||||||
t.Errorf("expected: %s, got %s", path, string(data))
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func testEchoUDP(t *testing.T, address string, port int) {
|
|
||||||
data := "abc123"
|
|
||||||
|
|
||||||
conn, err := net.Dial("udp", joinHostPort(address, port))
|
|
||||||
if err != nil {
|
|
||||||
t.Fatalf("error connecting to server: %v", err)
|
|
||||||
}
|
|
||||||
if _, err := conn.Write([]byte(data)); err != nil {
|
|
||||||
t.Fatalf("error sending to server: %v", err)
|
|
||||||
}
|
|
||||||
var resp [1024]byte
|
|
||||||
n, err := conn.Read(resp[0:])
|
|
||||||
if err != nil {
|
|
||||||
t.Errorf("error receiving data: %v", err)
|
|
||||||
}
|
|
||||||
if string(resp[0:n]) != data {
|
|
||||||
t.Errorf("expected: %s, got %s", data, string(resp[0:n]))
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func waitForNumProxyLoops(t *testing.T, p *Proxier, want int32) {
|
|
||||||
var got int32
|
|
||||||
for i := 0; i < 600; i++ {
|
|
||||||
got = atomic.LoadInt32(&p.numProxyLoops)
|
|
||||||
if got == want {
|
|
||||||
return
|
|
||||||
}
|
|
||||||
time.Sleep(100 * time.Millisecond)
|
|
||||||
}
|
|
||||||
t.Errorf("expected %d ProxyLoops running, got %d", want, got)
|
|
||||||
}
|
|
||||||
|
|
||||||
func waitForNumProxyClients(t *testing.T, s *serviceInfo, want int, timeout time.Duration) {
|
|
||||||
var got int
|
|
||||||
now := time.Now()
|
|
||||||
deadline := now.Add(timeout)
|
|
||||||
for time.Now().Before(deadline) {
|
|
||||||
s.activeClients.mu.Lock()
|
|
||||||
got = len(s.activeClients.clients)
|
|
||||||
s.activeClients.mu.Unlock()
|
|
||||||
if got == want {
|
|
||||||
return
|
|
||||||
}
|
|
||||||
time.Sleep(500 * time.Millisecond)
|
|
||||||
}
|
|
||||||
t.Errorf("expected %d ProxyClients live, got %d", want, got)
|
|
||||||
}
|
|
||||||
|
|
||||||
func getPortNum(t *testing.T, addr string) int {
|
|
||||||
_, portStr, err := net.SplitHostPort(addr)
|
|
||||||
if err != nil {
|
|
||||||
t.Errorf("error getting port from %s", addr)
|
|
||||||
return 0
|
|
||||||
}
|
|
||||||
portNum, err := strconv.Atoi(portStr)
|
|
||||||
if err != nil {
|
|
||||||
t.Errorf("error getting port from %s", addr)
|
|
||||||
return 0
|
|
||||||
}
|
|
||||||
|
|
||||||
return portNum
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestTCPProxy(t *testing.T) {
|
|
||||||
lb := NewLoadBalancerRR()
|
|
||||||
service := proxy.ServicePortName{NamespacedName: types.NamespacedName{Namespace: "testnamespace", Name: "echo"}, Port: "p"}
|
|
||||||
lb.OnEndpointsAdd(&v1.Endpoints{
|
|
||||||
ObjectMeta: metav1.ObjectMeta{Name: service.Name, Namespace: service.Namespace},
|
|
||||||
Subsets: []v1.EndpointSubset{{
|
|
||||||
Addresses: []v1.EndpointAddress{{IP: "127.0.0.1"}},
|
|
||||||
Ports: []v1.EndpointPort{{Name: "p", Port: tcpServerPort}},
|
|
||||||
}},
|
|
||||||
})
|
|
||||||
|
|
||||||
listenIP := "0.0.0.0"
|
|
||||||
p, err := createProxier(lb, netutils.ParseIPSloppy(listenIP), netshtest.NewFake(), netutils.ParseIPSloppy("127.0.0.1"), time.Minute, udpIdleTimeoutForTest)
|
|
||||||
if err != nil {
|
|
||||||
t.Fatal(err)
|
|
||||||
}
|
|
||||||
waitForNumProxyLoops(t, p, 0)
|
|
||||||
|
|
||||||
servicePortPortalName := ServicePortPortalName{NamespacedName: types.NamespacedName{Namespace: service.Namespace, Name: service.Name}, Port: service.Port, PortalIPName: listenIP}
|
|
||||||
svcInfo, err := p.addServicePortPortal(servicePortPortalName, "TCP", listenIP, 0, time.Second)
|
|
||||||
if err != nil {
|
|
||||||
t.Fatalf("error adding new service: %#v", err)
|
|
||||||
}
|
|
||||||
testEchoTCP(t, "127.0.0.1", getPortNum(t, svcInfo.socket.Addr().String()))
|
|
||||||
waitForNumProxyLoops(t, p, 1)
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestUDPProxy(t *testing.T) {
|
|
||||||
lb := NewLoadBalancerRR()
|
|
||||||
service := proxy.ServicePortName{NamespacedName: types.NamespacedName{Namespace: "testnamespace", Name: "echo"}, Port: "p"}
|
|
||||||
lb.OnEndpointsAdd(&v1.Endpoints{
|
|
||||||
ObjectMeta: metav1.ObjectMeta{Name: service.Name, Namespace: service.Namespace},
|
|
||||||
Subsets: []v1.EndpointSubset{{
|
|
||||||
Addresses: []v1.EndpointAddress{{IP: "127.0.0.1"}},
|
|
||||||
Ports: []v1.EndpointPort{{Name: "p", Port: udpServerPort}},
|
|
||||||
}},
|
|
||||||
})
|
|
||||||
|
|
||||||
listenIP := "0.0.0.0"
|
|
||||||
p, err := createProxier(lb, netutils.ParseIPSloppy(listenIP), netshtest.NewFake(), netutils.ParseIPSloppy("127.0.0.1"), time.Minute, udpIdleTimeoutForTest)
|
|
||||||
if err != nil {
|
|
||||||
t.Fatal(err)
|
|
||||||
}
|
|
||||||
waitForNumProxyLoops(t, p, 0)
|
|
||||||
|
|
||||||
servicePortPortalName := ServicePortPortalName{NamespacedName: types.NamespacedName{Namespace: service.Namespace, Name: service.Name}, Port: service.Port, PortalIPName: listenIP}
|
|
||||||
svcInfo, err := p.addServicePortPortal(servicePortPortalName, "UDP", listenIP, 0, time.Second)
|
|
||||||
if err != nil {
|
|
||||||
t.Fatalf("error adding new service: %#v", err)
|
|
||||||
}
|
|
||||||
testEchoUDP(t, "127.0.0.1", getPortNum(t, svcInfo.socket.Addr().String()))
|
|
||||||
waitForNumProxyLoops(t, p, 1)
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestUDPProxyTimeout(t *testing.T) {
|
|
||||||
lb := NewLoadBalancerRR()
|
|
||||||
service := proxy.ServicePortName{NamespacedName: types.NamespacedName{Namespace: "testnamespace", Name: "echo"}, Port: "p"}
|
|
||||||
lb.OnEndpointsAdd(&v1.Endpoints{
|
|
||||||
ObjectMeta: metav1.ObjectMeta{Name: service.Name, Namespace: service.Namespace},
|
|
||||||
Subsets: []v1.EndpointSubset{{
|
|
||||||
Addresses: []v1.EndpointAddress{{IP: "127.0.0.1"}},
|
|
||||||
Ports: []v1.EndpointPort{{Name: "p", Port: udpServerPort}},
|
|
||||||
}},
|
|
||||||
})
|
|
||||||
|
|
||||||
listenIP := "0.0.0.0"
|
|
||||||
p, err := createProxier(lb, netutils.ParseIPSloppy(listenIP), netshtest.NewFake(), netutils.ParseIPSloppy("127.0.0.1"), time.Minute, udpIdleTimeoutForTest)
|
|
||||||
if err != nil {
|
|
||||||
t.Fatal(err)
|
|
||||||
}
|
|
||||||
waitForNumProxyLoops(t, p, 0)
|
|
||||||
|
|
||||||
servicePortPortalName := ServicePortPortalName{NamespacedName: types.NamespacedName{Namespace: service.Namespace, Name: service.Name}, Port: service.Port, PortalIPName: listenIP}
|
|
||||||
svcInfo, err := p.addServicePortPortal(servicePortPortalName, "UDP", listenIP, 0, time.Second)
|
|
||||||
if err != nil {
|
|
||||||
t.Fatalf("error adding new service: %#v", err)
|
|
||||||
}
|
|
||||||
waitForNumProxyLoops(t, p, 1)
|
|
||||||
testEchoUDP(t, "127.0.0.1", getPortNum(t, svcInfo.socket.Addr().String()))
|
|
||||||
// When connecting to a UDP service endpoint, there should be a Conn for proxy.
|
|
||||||
waitForNumProxyClients(t, svcInfo, 1, time.Second)
|
|
||||||
// If conn has no activity for serviceInfo.timeout since last Read/Write, it should be closed because of timeout.
|
|
||||||
waitForNumProxyClients(t, svcInfo, 0, 2*time.Second)
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestMultiPortProxy(t *testing.T) {
|
|
||||||
lb := NewLoadBalancerRR()
|
|
||||||
serviceP := proxy.ServicePortName{NamespacedName: types.NamespacedName{Namespace: "testnamespace", Name: "echo-p"}, Port: "p"}
|
|
||||||
serviceQ := proxy.ServicePortName{NamespacedName: types.NamespacedName{Namespace: "testnamespace", Name: "echo-q"}, Port: "q"}
|
|
||||||
lb.OnEndpointsAdd(&v1.Endpoints{
|
|
||||||
ObjectMeta: metav1.ObjectMeta{Name: serviceP.Name, Namespace: serviceP.Namespace},
|
|
||||||
Subsets: []v1.EndpointSubset{{
|
|
||||||
Addresses: []v1.EndpointAddress{{IP: "127.0.0.1"}},
|
|
||||||
Ports: []v1.EndpointPort{{Name: "p", Protocol: "TCP", Port: tcpServerPort}},
|
|
||||||
}},
|
|
||||||
})
|
|
||||||
lb.OnEndpointsAdd(&v1.Endpoints{
|
|
||||||
ObjectMeta: metav1.ObjectMeta{Name: serviceQ.Name, Namespace: serviceQ.Namespace},
|
|
||||||
Subsets: []v1.EndpointSubset{{
|
|
||||||
Addresses: []v1.EndpointAddress{{IP: "127.0.0.1"}},
|
|
||||||
Ports: []v1.EndpointPort{{Name: "q", Protocol: "UDP", Port: udpServerPort}},
|
|
||||||
}},
|
|
||||||
})
|
|
||||||
|
|
||||||
listenIP := "0.0.0.0"
|
|
||||||
p, err := createProxier(lb, netutils.ParseIPSloppy(listenIP), netshtest.NewFake(), netutils.ParseIPSloppy("127.0.0.1"), time.Minute, udpIdleTimeoutForTest)
|
|
||||||
if err != nil {
|
|
||||||
t.Fatal(err)
|
|
||||||
}
|
|
||||||
waitForNumProxyLoops(t, p, 0)
|
|
||||||
|
|
||||||
servicePortPortalNameP := ServicePortPortalName{NamespacedName: types.NamespacedName{Namespace: serviceP.Namespace, Name: serviceP.Name}, Port: serviceP.Port, PortalIPName: listenIP}
|
|
||||||
svcInfoP, err := p.addServicePortPortal(servicePortPortalNameP, "TCP", listenIP, 0, time.Second)
|
|
||||||
if err != nil {
|
|
||||||
t.Fatalf("error adding new service: %#v", err)
|
|
||||||
}
|
|
||||||
testEchoTCP(t, "127.0.0.1", getPortNum(t, svcInfoP.socket.Addr().String()))
|
|
||||||
waitForNumProxyLoops(t, p, 1)
|
|
||||||
|
|
||||||
servicePortPortalNameQ := ServicePortPortalName{NamespacedName: types.NamespacedName{Namespace: serviceQ.Namespace, Name: serviceQ.Name}, Port: serviceQ.Port, PortalIPName: listenIP}
|
|
||||||
svcInfoQ, err := p.addServicePortPortal(servicePortPortalNameQ, "UDP", listenIP, 0, time.Second)
|
|
||||||
if err != nil {
|
|
||||||
t.Fatalf("error adding new service: %#v", err)
|
|
||||||
}
|
|
||||||
testEchoUDP(t, "127.0.0.1", getPortNum(t, svcInfoQ.socket.Addr().String()))
|
|
||||||
waitForNumProxyLoops(t, p, 2)
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestMultiPortOnServiceAdd(t *testing.T) {
|
|
||||||
lb := NewLoadBalancerRR()
|
|
||||||
serviceP := proxy.ServicePortName{NamespacedName: types.NamespacedName{Namespace: "testnamespace", Name: "echo"}, Port: "p"}
|
|
||||||
serviceQ := proxy.ServicePortName{NamespacedName: types.NamespacedName{Namespace: "testnamespace", Name: "echo"}, Port: "q"}
|
|
||||||
serviceX := proxy.ServicePortName{NamespacedName: types.NamespacedName{Namespace: "testnamespace", Name: "echo"}, Port: "x"}
|
|
||||||
|
|
||||||
listenIP := "0.0.0.0"
|
|
||||||
p, err := createProxier(lb, netutils.ParseIPSloppy(listenIP), netshtest.NewFake(), netutils.ParseIPSloppy("127.0.0.1"), time.Minute, udpIdleTimeoutForTest)
|
|
||||||
if err != nil {
|
|
||||||
t.Fatal(err)
|
|
||||||
}
|
|
||||||
waitForNumProxyLoops(t, p, 0)
|
|
||||||
|
|
||||||
p.OnServiceAdd(&v1.Service{
|
|
||||||
ObjectMeta: metav1.ObjectMeta{Name: serviceP.Name, Namespace: serviceP.Namespace},
|
|
||||||
Spec: v1.ServiceSpec{ClusterIP: "0.0.0.0", Ports: []v1.ServicePort{{
|
|
||||||
Name: "p",
|
|
||||||
Port: 0,
|
|
||||||
Protocol: "TCP",
|
|
||||||
}, {
|
|
||||||
Name: "q",
|
|
||||||
Port: 0,
|
|
||||||
Protocol: "UDP",
|
|
||||||
}}},
|
|
||||||
})
|
|
||||||
waitForNumProxyLoops(t, p, 2)
|
|
||||||
|
|
||||||
servicePortPortalNameP := ServicePortPortalName{NamespacedName: types.NamespacedName{Namespace: serviceP.Namespace, Name: serviceP.Name}, Port: serviceP.Port, PortalIPName: listenIP}
|
|
||||||
svcInfo, exists := p.getServiceInfo(servicePortPortalNameP)
|
|
||||||
if !exists {
|
|
||||||
t.Fatalf("can't find serviceInfo for %s", servicePortPortalNameP)
|
|
||||||
}
|
|
||||||
if svcInfo.portal.ip != "0.0.0.0" || svcInfo.portal.port != 0 || svcInfo.protocol != "TCP" {
|
|
||||||
t.Errorf("unexpected serviceInfo for %s: %#v", serviceP, svcInfo)
|
|
||||||
}
|
|
||||||
|
|
||||||
servicePortPortalNameQ := ServicePortPortalName{NamespacedName: types.NamespacedName{Namespace: serviceQ.Namespace, Name: serviceQ.Name}, Port: serviceQ.Port, PortalIPName: listenIP}
|
|
||||||
svcInfo, exists = p.getServiceInfo(servicePortPortalNameQ)
|
|
||||||
if !exists {
|
|
||||||
t.Fatalf("can't find serviceInfo for %s", servicePortPortalNameQ)
|
|
||||||
}
|
|
||||||
if svcInfo.portal.ip != "0.0.0.0" || svcInfo.portal.port != 0 || svcInfo.protocol != "UDP" {
|
|
||||||
t.Errorf("unexpected serviceInfo for %s: %#v", serviceQ, svcInfo)
|
|
||||||
}
|
|
||||||
|
|
||||||
servicePortPortalNameX := ServicePortPortalName{NamespacedName: types.NamespacedName{Namespace: serviceX.Namespace, Name: serviceX.Name}, Port: serviceX.Port, PortalIPName: listenIP}
|
|
||||||
svcInfo, exists = p.getServiceInfo(servicePortPortalNameX)
|
|
||||||
if exists {
|
|
||||||
t.Fatalf("found unwanted serviceInfo for %s: %#v", serviceX, svcInfo)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// Helper: Stops the proxy for the named service.
|
|
||||||
func stopProxyByName(proxier *Proxier, service ServicePortPortalName) error {
|
|
||||||
info, found := proxier.getServiceInfo(service)
|
|
||||||
if !found {
|
|
||||||
return fmt.Errorf("unknown service: %s", service)
|
|
||||||
}
|
|
||||||
return proxier.stopProxy(service, info)
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestTCPProxyStop(t *testing.T) {
|
|
||||||
lb := NewLoadBalancerRR()
|
|
||||||
service := proxy.ServicePortName{NamespacedName: types.NamespacedName{Namespace: "testnamespace", Name: "echo"}, Port: "p"}
|
|
||||||
lb.OnEndpointsAdd(&v1.Endpoints{
|
|
||||||
ObjectMeta: metav1.ObjectMeta{Namespace: service.Namespace, Name: service.Name},
|
|
||||||
Subsets: []v1.EndpointSubset{{
|
|
||||||
Addresses: []v1.EndpointAddress{{IP: "127.0.0.1"}},
|
|
||||||
Ports: []v1.EndpointPort{{Name: "p", Port: tcpServerPort}},
|
|
||||||
}},
|
|
||||||
})
|
|
||||||
|
|
||||||
listenIP := "0.0.0.0"
|
|
||||||
p, err := createProxier(lb, netutils.ParseIPSloppy(listenIP), netshtest.NewFake(), netutils.ParseIPSloppy("127.0.0.1"), time.Minute, udpIdleTimeoutForTest)
|
|
||||||
if err != nil {
|
|
||||||
t.Fatal(err)
|
|
||||||
}
|
|
||||||
waitForNumProxyLoops(t, p, 0)
|
|
||||||
|
|
||||||
servicePortPortalName := ServicePortPortalName{NamespacedName: types.NamespacedName{Namespace: service.Namespace, Name: service.Name}, Port: service.Port, PortalIPName: listenIP}
|
|
||||||
svcInfo, err := p.addServicePortPortal(servicePortPortalName, "TCP", listenIP, 0, time.Second)
|
|
||||||
if err != nil {
|
|
||||||
t.Fatalf("error adding new service: %#v", err)
|
|
||||||
}
|
|
||||||
if !svcInfo.isAlive() {
|
|
||||||
t.Fatalf("wrong value for isAlive(): expected true")
|
|
||||||
}
|
|
||||||
conn, err := net.Dial("tcp", joinHostPort("", getPortNum(t, svcInfo.socket.Addr().String())))
|
|
||||||
if err != nil {
|
|
||||||
t.Fatalf("error connecting to proxy: %v", err)
|
|
||||||
}
|
|
||||||
conn.Close()
|
|
||||||
waitForNumProxyLoops(t, p, 1)
|
|
||||||
|
|
||||||
stopProxyByName(p, servicePortPortalName)
|
|
||||||
if svcInfo.isAlive() {
|
|
||||||
t.Fatalf("wrong value for isAlive(): expected false")
|
|
||||||
}
|
|
||||||
// Wait for the port to really close.
|
|
||||||
if err := waitForClosedPortTCP(p, getPortNum(t, svcInfo.socket.Addr().String())); err != nil {
|
|
||||||
t.Fatalf(err.Error())
|
|
||||||
}
|
|
||||||
waitForNumProxyLoops(t, p, 0)
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestUDPProxyStop(t *testing.T) {
|
|
||||||
lb := NewLoadBalancerRR()
|
|
||||||
service := proxy.ServicePortName{NamespacedName: types.NamespacedName{Namespace: "testnamespace", Name: "echo"}, Port: "p"}
|
|
||||||
lb.OnEndpointsAdd(&v1.Endpoints{
|
|
||||||
ObjectMeta: metav1.ObjectMeta{Namespace: service.Namespace, Name: service.Name},
|
|
||||||
Subsets: []v1.EndpointSubset{{
|
|
||||||
Addresses: []v1.EndpointAddress{{IP: "127.0.0.1"}},
|
|
||||||
Ports: []v1.EndpointPort{{Name: "p", Port: udpServerPort}},
|
|
||||||
}},
|
|
||||||
})
|
|
||||||
|
|
||||||
listenIP := "0.0.0.0"
|
|
||||||
p, err := createProxier(lb, netutils.ParseIPSloppy(listenIP), netshtest.NewFake(), netutils.ParseIPSloppy("127.0.0.1"), time.Minute, udpIdleTimeoutForTest)
|
|
||||||
if err != nil {
|
|
||||||
t.Fatal(err)
|
|
||||||
}
|
|
||||||
waitForNumProxyLoops(t, p, 0)
|
|
||||||
|
|
||||||
servicePortPortalName := ServicePortPortalName{NamespacedName: types.NamespacedName{Namespace: service.Namespace, Name: service.Name}, Port: service.Port, PortalIPName: listenIP}
|
|
||||||
svcInfo, err := p.addServicePortPortal(servicePortPortalName, "UDP", listenIP, 0, time.Second)
|
|
||||||
if err != nil {
|
|
||||||
t.Fatalf("error adding new service: %#v", err)
|
|
||||||
}
|
|
||||||
conn, err := net.Dial("udp", joinHostPort("", getPortNum(t, svcInfo.socket.Addr().String())))
|
|
||||||
if err != nil {
|
|
||||||
t.Fatalf("error connecting to proxy: %v", err)
|
|
||||||
}
|
|
||||||
conn.Close()
|
|
||||||
waitForNumProxyLoops(t, p, 1)
|
|
||||||
|
|
||||||
stopProxyByName(p, servicePortPortalName)
|
|
||||||
// Wait for the port to really close.
|
|
||||||
if err := waitForClosedPortUDP(p, getPortNum(t, svcInfo.socket.Addr().String())); err != nil {
|
|
||||||
t.Fatalf(err.Error())
|
|
||||||
}
|
|
||||||
waitForNumProxyLoops(t, p, 0)
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestTCPProxyUpdateDelete(t *testing.T) {
|
|
||||||
lb := NewLoadBalancerRR()
|
|
||||||
service := proxy.ServicePortName{NamespacedName: types.NamespacedName{Namespace: "testnamespace", Name: "echo"}, Port: "p"}
|
|
||||||
lb.OnEndpointsAdd(&v1.Endpoints{
|
|
||||||
ObjectMeta: metav1.ObjectMeta{Namespace: service.Namespace, Name: service.Name},
|
|
||||||
Subsets: []v1.EndpointSubset{{
|
|
||||||
Addresses: []v1.EndpointAddress{{IP: "127.0.0.1"}},
|
|
||||||
Ports: []v1.EndpointPort{{Name: "p", Port: tcpServerPort}},
|
|
||||||
}},
|
|
||||||
})
|
|
||||||
|
|
||||||
listenIP := "0.0.0.0"
|
|
||||||
p, err := createProxier(lb, netutils.ParseIPSloppy(listenIP), netshtest.NewFake(), netutils.ParseIPSloppy("127.0.0.1"), time.Minute, udpIdleTimeoutForTest)
|
|
||||||
if err != nil {
|
|
||||||
t.Fatal(err)
|
|
||||||
}
|
|
||||||
waitForNumProxyLoops(t, p, 0)
|
|
||||||
|
|
||||||
servicePortPortalName := ServicePortPortalName{NamespacedName: types.NamespacedName{Namespace: service.Namespace, Name: service.Name}, Port: service.Port, PortalIPName: listenIP}
|
|
||||||
svcInfo, err := p.addServicePortPortal(servicePortPortalName, "TCP", listenIP, 0, time.Second)
|
|
||||||
if err != nil {
|
|
||||||
t.Fatalf("error adding new service: %#v", err)
|
|
||||||
}
|
|
||||||
fmt.Println("here0")
|
|
||||||
conn, err := net.Dial("tcp", joinHostPort("", getPortNum(t, svcInfo.socket.Addr().String())))
|
|
||||||
if err != nil {
|
|
||||||
t.Fatalf("error connecting to proxy: %v", err)
|
|
||||||
}
|
|
||||||
conn.Close()
|
|
||||||
waitForNumProxyLoops(t, p, 1)
|
|
||||||
|
|
||||||
p.OnServiceDelete(&v1.Service{
|
|
||||||
ObjectMeta: metav1.ObjectMeta{Name: service.Name, Namespace: service.Namespace},
|
|
||||||
Spec: v1.ServiceSpec{ClusterIP: listenIP, Ports: []v1.ServicePort{{
|
|
||||||
Name: "p",
|
|
||||||
Port: int32(getPortNum(t, svcInfo.socket.Addr().String())),
|
|
||||||
Protocol: "TCP",
|
|
||||||
}}},
|
|
||||||
})
|
|
||||||
if err := waitForClosedPortTCP(p, getPortNum(t, svcInfo.socket.Addr().String())); err != nil {
|
|
||||||
t.Fatalf(err.Error())
|
|
||||||
}
|
|
||||||
waitForNumProxyLoops(t, p, 0)
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestUDPProxyUpdateDelete(t *testing.T) {
|
|
||||||
lb := NewLoadBalancerRR()
|
|
||||||
service := proxy.ServicePortName{NamespacedName: types.NamespacedName{Namespace: "testnamespace", Name: "echo"}, Port: "p"}
|
|
||||||
lb.OnEndpointsAdd(&v1.Endpoints{
|
|
||||||
ObjectMeta: metav1.ObjectMeta{Namespace: service.Namespace, Name: service.Name},
|
|
||||||
Subsets: []v1.EndpointSubset{{
|
|
||||||
Addresses: []v1.EndpointAddress{{IP: "127.0.0.1"}},
|
|
||||||
Ports: []v1.EndpointPort{{Name: "p", Port: udpServerPort}},
|
|
||||||
}},
|
|
||||||
})
|
|
||||||
|
|
||||||
listenIP := "0.0.0.0"
|
|
||||||
p, err := createProxier(lb, netutils.ParseIPSloppy(listenIP), netshtest.NewFake(), netutils.ParseIPSloppy("127.0.0.1"), time.Minute, udpIdleTimeoutForTest)
|
|
||||||
if err != nil {
|
|
||||||
t.Fatal(err)
|
|
||||||
}
|
|
||||||
waitForNumProxyLoops(t, p, 0)
|
|
||||||
|
|
||||||
servicePortPortalName := ServicePortPortalName{NamespacedName: types.NamespacedName{Namespace: service.Namespace, Name: service.Name}, Port: service.Port, PortalIPName: listenIP}
|
|
||||||
svcInfo, err := p.addServicePortPortal(servicePortPortalName, "UDP", listenIP, 0, time.Second)
|
|
||||||
if err != nil {
|
|
||||||
t.Fatalf("error adding new service: %#v", err)
|
|
||||||
}
|
|
||||||
conn, err := net.Dial("udp", joinHostPort("", getPortNum(t, svcInfo.socket.Addr().String())))
|
|
||||||
if err != nil {
|
|
||||||
t.Fatalf("error connecting to proxy: %v", err)
|
|
||||||
}
|
|
||||||
conn.Close()
|
|
||||||
waitForNumProxyLoops(t, p, 1)
|
|
||||||
|
|
||||||
p.OnServiceDelete(&v1.Service{
|
|
||||||
ObjectMeta: metav1.ObjectMeta{Name: service.Name, Namespace: service.Namespace},
|
|
||||||
Spec: v1.ServiceSpec{ClusterIP: listenIP, Ports: []v1.ServicePort{{
|
|
||||||
Name: "p",
|
|
||||||
Port: int32(getPortNum(t, svcInfo.socket.Addr().String())),
|
|
||||||
Protocol: "UDP",
|
|
||||||
}}},
|
|
||||||
})
|
|
||||||
if err := waitForClosedPortUDP(p, getPortNum(t, svcInfo.socket.Addr().String())); err != nil {
|
|
||||||
t.Fatalf(err.Error())
|
|
||||||
}
|
|
||||||
waitForNumProxyLoops(t, p, 0)
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestTCPProxyUpdateDeleteUpdate(t *testing.T) {
|
|
||||||
lb := NewLoadBalancerRR()
|
|
||||||
service := proxy.ServicePortName{NamespacedName: types.NamespacedName{Namespace: "testnamespace", Name: "echo"}, Port: "p"}
|
|
||||||
endpoint := &v1.Endpoints{
|
|
||||||
ObjectMeta: metav1.ObjectMeta{Name: service.Name, Namespace: service.Namespace},
|
|
||||||
Subsets: []v1.EndpointSubset{{
|
|
||||||
Addresses: []v1.EndpointAddress{{IP: "127.0.0.1"}},
|
|
||||||
Ports: []v1.EndpointPort{{Name: "p", Port: tcpServerPort}},
|
|
||||||
}},
|
|
||||||
}
|
|
||||||
lb.OnEndpointsAdd(endpoint)
|
|
||||||
|
|
||||||
listenIP := "0.0.0.0"
|
|
||||||
p, err := createProxier(lb, netutils.ParseIPSloppy(listenIP), netshtest.NewFake(), netutils.ParseIPSloppy("127.0.0.1"), time.Minute, udpIdleTimeoutForTest)
|
|
||||||
if err != nil {
|
|
||||||
t.Fatal(err)
|
|
||||||
}
|
|
||||||
waitForNumProxyLoops(t, p, 0)
|
|
||||||
|
|
||||||
servicePortPortalName := ServicePortPortalName{NamespacedName: types.NamespacedName{Namespace: service.Namespace, Name: service.Name}, Port: service.Port, PortalIPName: listenIP}
|
|
||||||
svcInfo, err := p.addServicePortPortal(servicePortPortalName, "TCP", listenIP, 0, time.Second)
|
|
||||||
if err != nil {
|
|
||||||
t.Fatalf("error adding new service: %#v", err)
|
|
||||||
}
|
|
||||||
conn, err := net.Dial("tcp", joinHostPort("", getPortNum(t, svcInfo.socket.Addr().String())))
|
|
||||||
if err != nil {
|
|
||||||
t.Fatalf("error connecting to proxy: %v", err)
|
|
||||||
}
|
|
||||||
conn.Close()
|
|
||||||
waitForNumProxyLoops(t, p, 1)
|
|
||||||
|
|
||||||
p.OnServiceDelete(&v1.Service{
|
|
||||||
ObjectMeta: metav1.ObjectMeta{Name: service.Name, Namespace: service.Namespace},
|
|
||||||
Spec: v1.ServiceSpec{ClusterIP: listenIP, Ports: []v1.ServicePort{{
|
|
||||||
Name: "p",
|
|
||||||
Port: int32(getPortNum(t, svcInfo.socket.Addr().String())),
|
|
||||||
Protocol: "TCP",
|
|
||||||
}}},
|
|
||||||
})
|
|
||||||
if err := waitForClosedPortTCP(p, getPortNum(t, svcInfo.socket.Addr().String())); err != nil {
|
|
||||||
t.Fatalf(err.Error())
|
|
||||||
}
|
|
||||||
waitForNumProxyLoops(t, p, 0)
|
|
||||||
|
|
||||||
// need to add endpoint here because it got clean up during service delete
|
|
||||||
lb.OnEndpointsAdd(endpoint)
|
|
||||||
p.OnServiceAdd(&v1.Service{
|
|
||||||
ObjectMeta: metav1.ObjectMeta{Name: service.Name, Namespace: service.Namespace},
|
|
||||||
Spec: v1.ServiceSpec{ClusterIP: listenIP, Ports: []v1.ServicePort{{
|
|
||||||
Name: "p",
|
|
||||||
Port: int32(getPortNum(t, svcInfo.socket.Addr().String())),
|
|
||||||
Protocol: "TCP",
|
|
||||||
}}},
|
|
||||||
})
|
|
||||||
svcInfo, exists := p.getServiceInfo(servicePortPortalName)
|
|
||||||
if !exists {
|
|
||||||
t.Fatalf("can't find serviceInfo for %s", servicePortPortalName)
|
|
||||||
}
|
|
||||||
testEchoTCP(t, "127.0.0.1", getPortNum(t, svcInfo.socket.Addr().String()))
|
|
||||||
waitForNumProxyLoops(t, p, 1)
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestUDPProxyUpdateDeleteUpdate(t *testing.T) {
|
|
||||||
lb := NewLoadBalancerRR()
|
|
||||||
service := proxy.ServicePortName{NamespacedName: types.NamespacedName{Namespace: "testnamespace", Name: "echo"}, Port: "p"}
|
|
||||||
endpoint := &v1.Endpoints{
|
|
||||||
ObjectMeta: metav1.ObjectMeta{Name: service.Name, Namespace: service.Namespace},
|
|
||||||
Subsets: []v1.EndpointSubset{{
|
|
||||||
Addresses: []v1.EndpointAddress{{IP: "127.0.0.1"}},
|
|
||||||
Ports: []v1.EndpointPort{{Name: "p", Port: udpServerPort}},
|
|
||||||
}},
|
|
||||||
}
|
|
||||||
lb.OnEndpointsAdd(endpoint)
|
|
||||||
|
|
||||||
listenIP := "0.0.0.0"
|
|
||||||
p, err := createProxier(lb, netutils.ParseIPSloppy(listenIP), netshtest.NewFake(), netutils.ParseIPSloppy("127.0.0.1"), time.Minute, udpIdleTimeoutForTest)
|
|
||||||
if err != nil {
|
|
||||||
t.Fatal(err)
|
|
||||||
}
|
|
||||||
waitForNumProxyLoops(t, p, 0)
|
|
||||||
|
|
||||||
servicePortPortalName := ServicePortPortalName{NamespacedName: types.NamespacedName{Namespace: service.Namespace, Name: service.Name}, Port: service.Port, PortalIPName: listenIP}
|
|
||||||
svcInfo, err := p.addServicePortPortal(servicePortPortalName, "UDP", listenIP, 0, time.Second)
|
|
||||||
if err != nil {
|
|
||||||
t.Fatalf("error adding new service: %#v", err)
|
|
||||||
}
|
|
||||||
conn, err := net.Dial("udp", joinHostPort("", getPortNum(t, svcInfo.socket.Addr().String())))
|
|
||||||
if err != nil {
|
|
||||||
t.Fatalf("error connecting to proxy: %v", err)
|
|
||||||
}
|
|
||||||
conn.Close()
|
|
||||||
waitForNumProxyLoops(t, p, 1)
|
|
||||||
|
|
||||||
p.OnServiceDelete(&v1.Service{
|
|
||||||
ObjectMeta: metav1.ObjectMeta{Name: service.Name, Namespace: service.Namespace},
|
|
||||||
Spec: v1.ServiceSpec{ClusterIP: listenIP, Ports: []v1.ServicePort{{
|
|
||||||
Name: "p",
|
|
||||||
Port: int32(getPortNum(t, svcInfo.socket.Addr().String())),
|
|
||||||
Protocol: "UDP",
|
|
||||||
}}},
|
|
||||||
})
|
|
||||||
if err := waitForClosedPortUDP(p, getPortNum(t, svcInfo.socket.Addr().String())); err != nil {
|
|
||||||
t.Fatalf(err.Error())
|
|
||||||
}
|
|
||||||
waitForNumProxyLoops(t, p, 0)
|
|
||||||
|
|
||||||
// need to add endpoint here because it got clean up during service delete
|
|
||||||
lb.OnEndpointsAdd(endpoint)
|
|
||||||
p.OnServiceAdd(&v1.Service{
|
|
||||||
ObjectMeta: metav1.ObjectMeta{Name: service.Name, Namespace: service.Namespace},
|
|
||||||
Spec: v1.ServiceSpec{ClusterIP: listenIP, Ports: []v1.ServicePort{{
|
|
||||||
Name: "p",
|
|
||||||
Port: int32(getPortNum(t, svcInfo.socket.Addr().String())),
|
|
||||||
Protocol: "UDP",
|
|
||||||
}}},
|
|
||||||
})
|
|
||||||
svcInfo, exists := p.getServiceInfo(servicePortPortalName)
|
|
||||||
if !exists {
|
|
||||||
t.Fatalf("can't find serviceInfo for %s", servicePortPortalName)
|
|
||||||
}
|
|
||||||
testEchoUDP(t, "127.0.0.1", getPortNum(t, svcInfo.socket.Addr().String()))
|
|
||||||
waitForNumProxyLoops(t, p, 1)
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestTCPProxyUpdatePort(t *testing.T) {
|
|
||||||
lb := NewLoadBalancerRR()
|
|
||||||
service := proxy.ServicePortName{NamespacedName: types.NamespacedName{Namespace: "testnamespace", Name: "echo"}, Port: "p"}
|
|
||||||
lb.OnEndpointsAdd(&v1.Endpoints{
|
|
||||||
ObjectMeta: metav1.ObjectMeta{Name: service.Name, Namespace: service.Namespace},
|
|
||||||
Subsets: []v1.EndpointSubset{{
|
|
||||||
Addresses: []v1.EndpointAddress{{IP: "127.0.0.1"}},
|
|
||||||
Ports: []v1.EndpointPort{{Name: "p", Port: tcpServerPort}},
|
|
||||||
}},
|
|
||||||
})
|
|
||||||
|
|
||||||
listenIP := "0.0.0.0"
|
|
||||||
p, err := createProxier(lb, netutils.ParseIPSloppy(listenIP), netshtest.NewFake(), netutils.ParseIPSloppy("127.0.0.1"), time.Minute, udpIdleTimeoutForTest)
|
|
||||||
if err != nil {
|
|
||||||
t.Fatal(err)
|
|
||||||
}
|
|
||||||
waitForNumProxyLoops(t, p, 0)
|
|
||||||
|
|
||||||
servicePortPortalName := ServicePortPortalName{NamespacedName: types.NamespacedName{Namespace: service.Namespace, Name: service.Name}, Port: service.Port, PortalIPName: listenIP}
|
|
||||||
svcInfo, err := p.addServicePortPortal(servicePortPortalName, "TCP", listenIP, 0, time.Second)
|
|
||||||
if err != nil {
|
|
||||||
t.Fatalf("error adding new service: %#v", err)
|
|
||||||
}
|
|
||||||
testEchoTCP(t, "127.0.0.1", getPortNum(t, svcInfo.socket.Addr().String()))
|
|
||||||
waitForNumProxyLoops(t, p, 1)
|
|
||||||
|
|
||||||
p.OnServiceAdd(&v1.Service{
|
|
||||||
ObjectMeta: metav1.ObjectMeta{Name: service.Name, Namespace: service.Namespace},
|
|
||||||
Spec: v1.ServiceSpec{ClusterIP: listenIP, Ports: []v1.ServicePort{{
|
|
||||||
Name: "p",
|
|
||||||
Port: 0,
|
|
||||||
Protocol: "TCP",
|
|
||||||
}}},
|
|
||||||
})
|
|
||||||
// Wait for the socket to actually get free.
|
|
||||||
if err := waitForClosedPortTCP(p, getPortNum(t, svcInfo.socket.Addr().String())); err != nil {
|
|
||||||
t.Fatalf(err.Error())
|
|
||||||
}
|
|
||||||
svcInfo, exists := p.getServiceInfo(servicePortPortalName)
|
|
||||||
if !exists {
|
|
||||||
t.Fatalf("can't find serviceInfo for %s", servicePortPortalName)
|
|
||||||
}
|
|
||||||
testEchoTCP(t, "127.0.0.1", getPortNum(t, svcInfo.socket.Addr().String()))
|
|
||||||
// This is a bit async, but this should be sufficient.
|
|
||||||
time.Sleep(500 * time.Millisecond)
|
|
||||||
waitForNumProxyLoops(t, p, 1)
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestUDPProxyUpdatePort(t *testing.T) {
|
|
||||||
lb := NewLoadBalancerRR()
|
|
||||||
service := proxy.ServicePortName{NamespacedName: types.NamespacedName{Namespace: "testnamespace", Name: "echo"}, Port: "p"}
|
|
||||||
lb.OnEndpointsAdd(&v1.Endpoints{
|
|
||||||
ObjectMeta: metav1.ObjectMeta{Name: service.Name, Namespace: service.Namespace},
|
|
||||||
Subsets: []v1.EndpointSubset{{
|
|
||||||
Addresses: []v1.EndpointAddress{{IP: "127.0.0.1"}},
|
|
||||||
Ports: []v1.EndpointPort{{Name: "p", Port: udpServerPort}},
|
|
||||||
}},
|
|
||||||
})
|
|
||||||
|
|
||||||
listenIP := "0.0.0.0"
|
|
||||||
p, err := createProxier(lb, netutils.ParseIPSloppy(listenIP), netshtest.NewFake(), netutils.ParseIPSloppy("127.0.0.1"), time.Minute, udpIdleTimeoutForTest)
|
|
||||||
if err != nil {
|
|
||||||
t.Fatal(err)
|
|
||||||
}
|
|
||||||
waitForNumProxyLoops(t, p, 0)
|
|
||||||
|
|
||||||
servicePortPortalName := ServicePortPortalName{NamespacedName: types.NamespacedName{Namespace: service.Namespace, Name: service.Name}, Port: service.Port, PortalIPName: listenIP}
|
|
||||||
svcInfo, err := p.addServicePortPortal(servicePortPortalName, "UDP", listenIP, 0, time.Second)
|
|
||||||
if err != nil {
|
|
||||||
t.Fatalf("error adding new service: %#v", err)
|
|
||||||
}
|
|
||||||
waitForNumProxyLoops(t, p, 1)
|
|
||||||
|
|
||||||
p.OnServiceAdd(&v1.Service{
|
|
||||||
ObjectMeta: metav1.ObjectMeta{Name: service.Name, Namespace: service.Namespace},
|
|
||||||
Spec: v1.ServiceSpec{ClusterIP: listenIP, Ports: []v1.ServicePort{{
|
|
||||||
Name: "p",
|
|
||||||
Port: 0,
|
|
||||||
Protocol: "UDP",
|
|
||||||
}}},
|
|
||||||
})
|
|
||||||
// Wait for the socket to actually get free.
|
|
||||||
if err := waitForClosedPortUDP(p, getPortNum(t, svcInfo.socket.Addr().String())); err != nil {
|
|
||||||
t.Fatalf(err.Error())
|
|
||||||
}
|
|
||||||
svcInfo, exists := p.getServiceInfo(servicePortPortalName)
|
|
||||||
if !exists {
|
|
||||||
t.Fatalf("can't find serviceInfo for %s", servicePortPortalName)
|
|
||||||
}
|
|
||||||
testEchoUDP(t, "127.0.0.1", getPortNum(t, svcInfo.socket.Addr().String()))
|
|
||||||
waitForNumProxyLoops(t, p, 1)
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestProxyUpdatePublicIPs(t *testing.T) {
|
|
||||||
lb := NewLoadBalancerRR()
|
|
||||||
service := proxy.ServicePortName{NamespacedName: types.NamespacedName{Namespace: "testnamespace", Name: "echo"}, Port: "p"}
|
|
||||||
lb.OnEndpointsAdd(&v1.Endpoints{
|
|
||||||
ObjectMeta: metav1.ObjectMeta{Name: service.Name, Namespace: service.Namespace},
|
|
||||||
Subsets: []v1.EndpointSubset{{
|
|
||||||
Addresses: []v1.EndpointAddress{{IP: "127.0.0.1"}},
|
|
||||||
Ports: []v1.EndpointPort{{Name: "p", Port: tcpServerPort}},
|
|
||||||
}},
|
|
||||||
})
|
|
||||||
|
|
||||||
listenIP := "0.0.0.0"
|
|
||||||
p, err := createProxier(lb, netutils.ParseIPSloppy(listenIP), netshtest.NewFake(), netutils.ParseIPSloppy("127.0.0.1"), time.Minute, udpIdleTimeoutForTest)
|
|
||||||
if err != nil {
|
|
||||||
t.Fatal(err)
|
|
||||||
}
|
|
||||||
waitForNumProxyLoops(t, p, 0)
|
|
||||||
|
|
||||||
servicePortPortalName := ServicePortPortalName{NamespacedName: types.NamespacedName{Namespace: service.Namespace, Name: service.Name}, Port: service.Port, PortalIPName: listenIP}
|
|
||||||
svcInfo, err := p.addServicePortPortal(servicePortPortalName, "TCP", listenIP, 0, time.Second)
|
|
||||||
if err != nil {
|
|
||||||
t.Fatalf("error adding new service: %#v", err)
|
|
||||||
}
|
|
||||||
testEchoTCP(t, "127.0.0.1", getPortNum(t, svcInfo.socket.Addr().String()))
|
|
||||||
waitForNumProxyLoops(t, p, 1)
|
|
||||||
|
|
||||||
p.OnServiceAdd(&v1.Service{
|
|
||||||
ObjectMeta: metav1.ObjectMeta{Name: service.Name, Namespace: service.Namespace},
|
|
||||||
Spec: v1.ServiceSpec{
|
|
||||||
Ports: []v1.ServicePort{{
|
|
||||||
Name: "p",
|
|
||||||
Port: int32(svcInfo.portal.port),
|
|
||||||
Protocol: "TCP",
|
|
||||||
}},
|
|
||||||
ClusterIP: svcInfo.portal.ip,
|
|
||||||
ExternalIPs: []string{"0.0.0.0"},
|
|
||||||
},
|
|
||||||
})
|
|
||||||
// Wait for the socket to actually get free.
|
|
||||||
if err := waitForClosedPortTCP(p, getPortNum(t, svcInfo.socket.Addr().String())); err != nil {
|
|
||||||
t.Fatalf(err.Error())
|
|
||||||
}
|
|
||||||
svcInfo, exists := p.getServiceInfo(servicePortPortalName)
|
|
||||||
if !exists {
|
|
||||||
t.Fatalf("can't find serviceInfo for %s", servicePortPortalName)
|
|
||||||
}
|
|
||||||
testEchoTCP(t, "127.0.0.1", getPortNum(t, svcInfo.socket.Addr().String()))
|
|
||||||
// This is a bit async, but this should be sufficient.
|
|
||||||
time.Sleep(500 * time.Millisecond)
|
|
||||||
waitForNumProxyLoops(t, p, 1)
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestProxyUpdatePortal(t *testing.T) {
|
|
||||||
lb := NewLoadBalancerRR()
|
|
||||||
service := proxy.ServicePortName{NamespacedName: types.NamespacedName{Namespace: "testnamespace", Name: "echo"}, Port: "p"}
|
|
||||||
endpoint := &v1.Endpoints{
|
|
||||||
ObjectMeta: metav1.ObjectMeta{Name: service.Name, Namespace: service.Namespace},
|
|
||||||
Subsets: []v1.EndpointSubset{{
|
|
||||||
Addresses: []v1.EndpointAddress{{IP: "127.0.0.1"}},
|
|
||||||
Ports: []v1.EndpointPort{{Name: "p", Port: tcpServerPort}},
|
|
||||||
}},
|
|
||||||
}
|
|
||||||
lb.OnEndpointsAdd(endpoint)
|
|
||||||
|
|
||||||
listenIP := "0.0.0.0"
|
|
||||||
p, err := createProxier(lb, netutils.ParseIPSloppy(listenIP), netshtest.NewFake(), netutils.ParseIPSloppy("127.0.0.1"), time.Minute, udpIdleTimeoutForTest)
|
|
||||||
if err != nil {
|
|
||||||
t.Fatal(err)
|
|
||||||
}
|
|
||||||
waitForNumProxyLoops(t, p, 0)
|
|
||||||
|
|
||||||
servicePortPortalName := ServicePortPortalName{NamespacedName: types.NamespacedName{Namespace: service.Namespace, Name: service.Name}, Port: service.Port, PortalIPName: listenIP}
|
|
||||||
svcInfo, err := p.addServicePortPortal(servicePortPortalName, "TCP", listenIP, 0, time.Second)
|
|
||||||
if err != nil {
|
|
||||||
t.Fatalf("error adding new service: %#v", err)
|
|
||||||
}
|
|
||||||
testEchoTCP(t, "127.0.0.1", getPortNum(t, svcInfo.socket.Addr().String()))
|
|
||||||
waitForNumProxyLoops(t, p, 1)
|
|
||||||
|
|
||||||
svcv0 := &v1.Service{
|
|
||||||
ObjectMeta: metav1.ObjectMeta{Name: service.Name, Namespace: service.Namespace},
|
|
||||||
Spec: v1.ServiceSpec{ClusterIP: listenIP, Ports: []v1.ServicePort{{
|
|
||||||
Name: "p",
|
|
||||||
Port: int32(svcInfo.portal.port),
|
|
||||||
Protocol: "TCP",
|
|
||||||
}}},
|
|
||||||
}
|
|
||||||
|
|
||||||
svcv1 := &v1.Service{
|
|
||||||
ObjectMeta: metav1.ObjectMeta{Name: service.Name, Namespace: service.Namespace},
|
|
||||||
Spec: v1.ServiceSpec{ClusterIP: "", Ports: []v1.ServicePort{{
|
|
||||||
Name: "p",
|
|
||||||
Port: int32(svcInfo.portal.port),
|
|
||||||
Protocol: "TCP",
|
|
||||||
}}},
|
|
||||||
}
|
|
||||||
|
|
||||||
p.OnServiceUpdate(svcv0, svcv1)
|
|
||||||
_, exists := p.getServiceInfo(servicePortPortalName)
|
|
||||||
if exists {
|
|
||||||
t.Fatalf("service with empty ClusterIP should not be included in the proxy")
|
|
||||||
}
|
|
||||||
|
|
||||||
svcv2 := &v1.Service{
|
|
||||||
ObjectMeta: metav1.ObjectMeta{Name: service.Name, Namespace: service.Namespace},
|
|
||||||
Spec: v1.ServiceSpec{ClusterIP: "None", Ports: []v1.ServicePort{{
|
|
||||||
Name: "p",
|
|
||||||
Port: int32(getPortNum(t, svcInfo.socket.Addr().String())),
|
|
||||||
Protocol: "TCP",
|
|
||||||
}}},
|
|
||||||
}
|
|
||||||
p.OnServiceUpdate(svcv1, svcv2)
|
|
||||||
_, exists = p.getServiceInfo(servicePortPortalName)
|
|
||||||
if exists {
|
|
||||||
t.Fatalf("service with 'None' as ClusterIP should not be included in the proxy")
|
|
||||||
}
|
|
||||||
|
|
||||||
svcv3 := &v1.Service{
|
|
||||||
ObjectMeta: metav1.ObjectMeta{Name: service.Name, Namespace: service.Namespace},
|
|
||||||
Spec: v1.ServiceSpec{ClusterIP: listenIP, Ports: []v1.ServicePort{{
|
|
||||||
Name: "p",
|
|
||||||
Port: int32(svcInfo.portal.port),
|
|
||||||
Protocol: "TCP",
|
|
||||||
}}},
|
|
||||||
}
|
|
||||||
p.OnServiceUpdate(svcv2, svcv3)
|
|
||||||
lb.OnEndpointsAdd(endpoint)
|
|
||||||
svcInfo, exists = p.getServiceInfo(servicePortPortalName)
|
|
||||||
if !exists {
|
|
||||||
t.Fatalf("service with ClusterIP set not found in the proxy")
|
|
||||||
}
|
|
||||||
testEchoTCP(t, "127.0.0.1", getPortNum(t, svcInfo.socket.Addr().String()))
|
|
||||||
waitForNumProxyLoops(t, p, 1)
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestNoopEndpointSlice(t *testing.T) {
|
|
||||||
p := Proxier{}
|
|
||||||
p.OnEndpointSliceAdd(&discovery.EndpointSlice{})
|
|
||||||
p.OnEndpointSliceUpdate(&discovery.EndpointSlice{}, &discovery.EndpointSlice{})
|
|
||||||
p.OnEndpointSliceDelete(&discovery.EndpointSlice{})
|
|
||||||
p.OnEndpointSlicesSynced()
|
|
||||||
}
|
|
||||||
|
|
||||||
// TODO(justinsb): Add test for nodePort conflict detection, once we have nodePort wired in
|
|
@ -1,313 +0,0 @@
|
|||||||
/*
|
|
||||||
Copyright 2016 The Kubernetes Authors.
|
|
||||||
|
|
||||||
Licensed under the Apache License, Version 2.0 (the "License");
|
|
||||||
you may not use this file except in compliance with the License.
|
|
||||||
You may obtain a copy of the License at
|
|
||||||
|
|
||||||
http://www.apache.org/licenses/LICENSE-2.0
|
|
||||||
|
|
||||||
Unless required by applicable law or agreed to in writing, software
|
|
||||||
distributed under the License is distributed on an "AS IS" BASIS,
|
|
||||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
||||||
See the License for the specific language governing permissions and
|
|
||||||
limitations under the License.
|
|
||||||
*/
|
|
||||||
|
|
||||||
package winuserspace
|
|
||||||
|
|
||||||
import (
|
|
||||||
"fmt"
|
|
||||||
"io"
|
|
||||||
"net"
|
|
||||||
"strconv"
|
|
||||||
"strings"
|
|
||||||
"sync"
|
|
||||||
"time"
|
|
||||||
|
|
||||||
v1 "k8s.io/api/core/v1"
|
|
||||||
"k8s.io/apimachinery/pkg/types"
|
|
||||||
"k8s.io/apimachinery/pkg/util/runtime"
|
|
||||||
"k8s.io/klog/v2"
|
|
||||||
"k8s.io/kubernetes/pkg/proxy"
|
|
||||||
)
|
|
||||||
|
|
||||||
// Abstraction over TCP/UDP sockets which are proxied.
|
|
||||||
type proxySocket interface {
|
|
||||||
// Addr gets the net.Addr for a proxySocket.
|
|
||||||
Addr() net.Addr
|
|
||||||
// Close stops the proxySocket from accepting incoming connections.
|
|
||||||
// Each implementation should comment on the impact of calling Close
|
|
||||||
// while sessions are active.
|
|
||||||
Close() error
|
|
||||||
// ProxyLoop proxies incoming connections for the specified service to the service endpoints.
|
|
||||||
ProxyLoop(service ServicePortPortalName, info *serviceInfo, proxier *Proxier)
|
|
||||||
// ListenPort returns the host port that the proxySocket is listening on
|
|
||||||
ListenPort() int
|
|
||||||
}
|
|
||||||
|
|
||||||
func newProxySocket(protocol v1.Protocol, ip net.IP, port int) (proxySocket, error) {
|
|
||||||
host := ""
|
|
||||||
if ip != nil {
|
|
||||||
host = ip.String()
|
|
||||||
}
|
|
||||||
|
|
||||||
switch strings.ToUpper(string(protocol)) {
|
|
||||||
case "TCP":
|
|
||||||
listener, err := net.Listen("tcp", net.JoinHostPort(host, strconv.Itoa(port)))
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
return &tcpProxySocket{Listener: listener, port: port}, nil
|
|
||||||
case "UDP":
|
|
||||||
addr, err := net.ResolveUDPAddr("udp", net.JoinHostPort(host, strconv.Itoa(port)))
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
conn, err := net.ListenUDP("udp", addr)
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
return &udpProxySocket{UDPConn: conn, port: port}, nil
|
|
||||||
case "SCTP":
|
|
||||||
return nil, fmt.Errorf("SCTP is not supported for user space proxy")
|
|
||||||
}
|
|
||||||
return nil, fmt.Errorf("unknown protocol %q", protocol)
|
|
||||||
}
|
|
||||||
|
|
||||||
// How long we wait for a connection to a backend in seconds
|
|
||||||
var endpointDialTimeout = []time.Duration{250 * time.Millisecond, 500 * time.Millisecond, 1 * time.Second, 2 * time.Second}
|
|
||||||
|
|
||||||
// tcpProxySocket implements proxySocket. Close() is implemented by net.Listener. When Close() is called,
|
|
||||||
// no new connections are allowed but existing connections are left untouched.
|
|
||||||
type tcpProxySocket struct {
|
|
||||||
net.Listener
|
|
||||||
port int
|
|
||||||
}
|
|
||||||
|
|
||||||
func (tcp *tcpProxySocket) ListenPort() int {
|
|
||||||
return tcp.port
|
|
||||||
}
|
|
||||||
|
|
||||||
func tryConnect(service ServicePortPortalName, srcAddr net.Addr, protocol string, proxier *Proxier) (out net.Conn, err error) {
|
|
||||||
sessionAffinityReset := false
|
|
||||||
for _, dialTimeout := range endpointDialTimeout {
|
|
||||||
servicePortName := proxy.ServicePortName{
|
|
||||||
NamespacedName: types.NamespacedName{
|
|
||||||
Namespace: service.Namespace,
|
|
||||||
Name: service.Name,
|
|
||||||
},
|
|
||||||
Port: service.Port,
|
|
||||||
}
|
|
||||||
endpoint, err := proxier.loadBalancer.NextEndpoint(servicePortName, srcAddr, sessionAffinityReset)
|
|
||||||
if err != nil {
|
|
||||||
klog.ErrorS(err, "Couldn't find an endpoint for service", "service", klog.KRef(service.Namespace, service.Name))
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
klog.V(3).InfoS("Mapped service to endpoint", "service", klog.KRef(service.Namespace, service.Name), "endpoint", endpoint)
|
|
||||||
// TODO: This could spin up a new goroutine to make the outbound connection,
|
|
||||||
// and keep accepting inbound traffic.
|
|
||||||
outConn, err := net.DialTimeout(protocol, endpoint, dialTimeout)
|
|
||||||
if err != nil {
|
|
||||||
if isTooManyFDsError(err) {
|
|
||||||
panic("Dial failed: " + err.Error())
|
|
||||||
}
|
|
||||||
klog.ErrorS(err, "Dial failed")
|
|
||||||
sessionAffinityReset = true
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
return outConn, nil
|
|
||||||
}
|
|
||||||
return nil, fmt.Errorf("failed to connect to an endpoint")
|
|
||||||
}
|
|
||||||
|
|
||||||
func (tcp *tcpProxySocket) ProxyLoop(service ServicePortPortalName, myInfo *serviceInfo, proxier *Proxier) {
|
|
||||||
for {
|
|
||||||
if !myInfo.isAlive() {
|
|
||||||
// The service port was closed or replaced.
|
|
||||||
return
|
|
||||||
}
|
|
||||||
// Block until a connection is made.
|
|
||||||
inConn, err := tcp.Accept()
|
|
||||||
if err != nil {
|
|
||||||
if isTooManyFDsError(err) {
|
|
||||||
panic("Accept failed: " + err.Error())
|
|
||||||
}
|
|
||||||
|
|
||||||
if isClosedError(err) {
|
|
||||||
return
|
|
||||||
}
|
|
||||||
if !myInfo.isAlive() {
|
|
||||||
// Then the service port was just closed so the accept failure is to be expected.
|
|
||||||
return
|
|
||||||
}
|
|
||||||
klog.ErrorS(err, "Accept failed")
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
klog.V(3).InfoS("Accepted TCP connection from remote", "remoteAddress", inConn.RemoteAddr(), "localAddress", inConn.LocalAddr())
|
|
||||||
outConn, err := tryConnect(service, inConn.(*net.TCPConn).RemoteAddr(), "tcp", proxier)
|
|
||||||
if err != nil {
|
|
||||||
klog.ErrorS(err, "Failed to connect to balancer")
|
|
||||||
inConn.Close()
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
// Spin up an async copy loop.
|
|
||||||
go proxyTCP(inConn.(*net.TCPConn), outConn.(*net.TCPConn))
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// proxyTCP proxies data bi-directionally between in and out.
|
|
||||||
func proxyTCP(in, out *net.TCPConn) {
|
|
||||||
var wg sync.WaitGroup
|
|
||||||
wg.Add(2)
|
|
||||||
klog.V(4).InfoS("Creating proxy between remote and local addresses",
|
|
||||||
"inRemoteAddress", in.RemoteAddr(), "inLocalAddress", in.LocalAddr(), "outLocalAddress", out.LocalAddr(), "outRemoteAddress", out.RemoteAddr())
|
|
||||||
go copyBytes("from backend", in, out, &wg)
|
|
||||||
go copyBytes("to backend", out, in, &wg)
|
|
||||||
wg.Wait()
|
|
||||||
}
|
|
||||||
|
|
||||||
func copyBytes(direction string, dest, src *net.TCPConn, wg *sync.WaitGroup) {
|
|
||||||
defer wg.Done()
|
|
||||||
klog.V(4).InfoS("Copying remote address bytes", "direction", direction, "sourceRemoteAddress", src.RemoteAddr(), "destinationRemoteAddress", dest.RemoteAddr())
|
|
||||||
n, err := io.Copy(dest, src)
|
|
||||||
if err != nil {
|
|
||||||
if !isClosedError(err) {
|
|
||||||
klog.ErrorS(err, "I/O error occurred")
|
|
||||||
}
|
|
||||||
}
|
|
||||||
klog.V(4).InfoS("Copied remote address bytes", "bytes", n, "direction", direction, "sourceRemoteAddress", src.RemoteAddr(), "destinationRemoteAddress", dest.RemoteAddr())
|
|
||||||
dest.Close()
|
|
||||||
src.Close()
|
|
||||||
}
|
|
||||||
|
|
||||||
// udpProxySocket implements proxySocket. Close() is implemented by net.UDPConn. When Close() is called,
|
|
||||||
// no new connections are allowed and existing connections are broken.
|
|
||||||
// TODO: We could lame-duck this ourselves, if it becomes important.
|
|
||||||
type udpProxySocket struct {
|
|
||||||
*net.UDPConn
|
|
||||||
port int
|
|
||||||
}
|
|
||||||
|
|
||||||
func (udp *udpProxySocket) ListenPort() int {
|
|
||||||
return udp.port
|
|
||||||
}
|
|
||||||
|
|
||||||
func (udp *udpProxySocket) Addr() net.Addr {
|
|
||||||
return udp.LocalAddr()
|
|
||||||
}
|
|
||||||
|
|
||||||
// Holds all the known UDP clients that have not timed out.
|
|
||||||
type clientCache struct {
|
|
||||||
mu sync.Mutex
|
|
||||||
clients map[string]net.Conn // addr string -> connection
|
|
||||||
}
|
|
||||||
|
|
||||||
func newClientCache() *clientCache {
|
|
||||||
return &clientCache{clients: map[string]net.Conn{}}
|
|
||||||
}
|
|
||||||
|
|
||||||
func (udp *udpProxySocket) ProxyLoop(service ServicePortPortalName, myInfo *serviceInfo, proxier *Proxier) {
|
|
||||||
var buffer [4096]byte // 4KiB should be enough for most whole-packets
|
|
||||||
|
|
||||||
for {
|
|
||||||
if !myInfo.isAlive() {
|
|
||||||
// The service port was closed or replaced.
|
|
||||||
break
|
|
||||||
}
|
|
||||||
|
|
||||||
// Block until data arrives.
|
|
||||||
// TODO: Accumulate a histogram of n or something, to fine tune the buffer size.
|
|
||||||
n, cliAddr, err := udp.ReadFrom(buffer[0:])
|
|
||||||
if err != nil {
|
|
||||||
if e, ok := err.(net.Error); ok {
|
|
||||||
if e.Temporary() {
|
|
||||||
klog.V(1).ErrorS(err, "ReadFrom had a temporary failure")
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
}
|
|
||||||
klog.ErrorS(err, "ReadFrom failed, exiting ProxyLoop")
|
|
||||||
break
|
|
||||||
}
|
|
||||||
|
|
||||||
// If this is a client we know already, reuse the connection and goroutine.
|
|
||||||
svrConn, err := udp.getBackendConn(myInfo.activeClients, cliAddr, proxier, service, myInfo.timeout)
|
|
||||||
if err != nil {
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
// TODO: It would be nice to let the goroutine handle this write, but we don't
|
|
||||||
// really want to copy the buffer. We could do a pool of buffers or something.
|
|
||||||
_, err = svrConn.Write(buffer[0:n])
|
|
||||||
if err != nil {
|
|
||||||
if !logTimeout(err) {
|
|
||||||
klog.ErrorS(err, "Write failed")
|
|
||||||
// TODO: Maybe tear down the goroutine for this client/server pair?
|
|
||||||
}
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
err = svrConn.SetDeadline(time.Now().Add(myInfo.timeout))
|
|
||||||
if err != nil {
|
|
||||||
klog.ErrorS(err, "SetDeadline failed")
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func (udp *udpProxySocket) getBackendConn(activeClients *clientCache, cliAddr net.Addr, proxier *Proxier, service ServicePortPortalName, timeout time.Duration) (net.Conn, error) {
|
|
||||||
activeClients.mu.Lock()
|
|
||||||
defer activeClients.mu.Unlock()
|
|
||||||
|
|
||||||
svrConn, found := activeClients.clients[cliAddr.String()]
|
|
||||||
if !found {
|
|
||||||
// TODO: This could spin up a new goroutine to make the outbound connection,
|
|
||||||
// and keep accepting inbound traffic.
|
|
||||||
klog.V(3).InfoS("New UDP connection from client", "address", cliAddr)
|
|
||||||
var err error
|
|
||||||
svrConn, err = tryConnect(service, cliAddr, "udp", proxier)
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
if err = svrConn.SetDeadline(time.Now().Add(timeout)); err != nil {
|
|
||||||
klog.ErrorS(err, "SetDeadline failed")
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
activeClients.clients[cliAddr.String()] = svrConn
|
|
||||||
go func(cliAddr net.Addr, svrConn net.Conn, activeClients *clientCache, service ServicePortPortalName, timeout time.Duration) {
|
|
||||||
defer runtime.HandleCrash()
|
|
||||||
udp.proxyClient(cliAddr, svrConn, activeClients, service, timeout)
|
|
||||||
}(cliAddr, svrConn, activeClients, service, timeout)
|
|
||||||
}
|
|
||||||
return svrConn, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// This function is expected to be called as a goroutine.
|
|
||||||
// TODO: Track and log bytes copied, like TCP
|
|
||||||
func (udp *udpProxySocket) proxyClient(cliAddr net.Addr, svrConn net.Conn, activeClients *clientCache, service ServicePortPortalName, timeout time.Duration) {
|
|
||||||
defer svrConn.Close()
|
|
||||||
var buffer [4096]byte
|
|
||||||
for {
|
|
||||||
n, err := svrConn.Read(buffer[0:])
|
|
||||||
if err != nil {
|
|
||||||
if !logTimeout(err) {
|
|
||||||
klog.ErrorS(err, "Read failed")
|
|
||||||
}
|
|
||||||
break
|
|
||||||
}
|
|
||||||
|
|
||||||
err = svrConn.SetDeadline(time.Now().Add(timeout))
|
|
||||||
if err != nil {
|
|
||||||
klog.ErrorS(err, "SetDeadline failed")
|
|
||||||
break
|
|
||||||
}
|
|
||||||
_, err = udp.WriteTo(buffer[0:n], cliAddr)
|
|
||||||
if err != nil {
|
|
||||||
if !logTimeout(err) {
|
|
||||||
klog.ErrorS(err, "WriteTo failed")
|
|
||||||
}
|
|
||||||
break
|
|
||||||
}
|
|
||||||
}
|
|
||||||
activeClients.mu.Lock()
|
|
||||||
delete(activeClients.clients, cliAddr.String())
|
|
||||||
activeClients.mu.Unlock()
|
|
||||||
}
|
|
@ -1,332 +0,0 @@
|
|||||||
/*
|
|
||||||
Copyright 2016 The Kubernetes Authors.
|
|
||||||
|
|
||||||
Licensed under the Apache License, Version 2.0 (the "License");
|
|
||||||
you may not use this file except in compliance with the License.
|
|
||||||
You may obtain a copy of the License at
|
|
||||||
|
|
||||||
http://www.apache.org/licenses/LICENSE-2.0
|
|
||||||
|
|
||||||
Unless required by applicable law or agreed to in writing, software
|
|
||||||
distributed under the License is distributed on an "AS IS" BASIS,
|
|
||||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
||||||
See the License for the specific language governing permissions and
|
|
||||||
limitations under the License.
|
|
||||||
*/
|
|
||||||
|
|
||||||
package winuserspace
|
|
||||||
|
|
||||||
import (
|
|
||||||
"errors"
|
|
||||||
"fmt"
|
|
||||||
"net"
|
|
||||||
"sort"
|
|
||||||
"sync"
|
|
||||||
"time"
|
|
||||||
|
|
||||||
v1 "k8s.io/api/core/v1"
|
|
||||||
"k8s.io/apimachinery/pkg/types"
|
|
||||||
"k8s.io/klog/v2"
|
|
||||||
"k8s.io/kubernetes/pkg/proxy"
|
|
||||||
"k8s.io/kubernetes/pkg/proxy/util"
|
|
||||||
stringslices "k8s.io/utils/strings/slices"
|
|
||||||
)
|
|
||||||
|
|
||||||
var (
|
|
||||||
ErrMissingServiceEntry = errors.New("missing service entry")
|
|
||||||
ErrMissingEndpoints = errors.New("missing endpoints")
|
|
||||||
)
|
|
||||||
|
|
||||||
type affinityState struct {
|
|
||||||
clientIP string
|
|
||||||
//clientProtocol api.Protocol //not yet used
|
|
||||||
//sessionCookie string //not yet used
|
|
||||||
endpoint string
|
|
||||||
lastUsed time.Time
|
|
||||||
}
|
|
||||||
|
|
||||||
type affinityPolicy struct {
|
|
||||||
affinityType v1.ServiceAffinity
|
|
||||||
affinityMap map[string]*affinityState // map client IP -> affinity info
|
|
||||||
ttlSeconds int
|
|
||||||
}
|
|
||||||
|
|
||||||
// LoadBalancerRR is a round-robin load balancer.
|
|
||||||
type LoadBalancerRR struct {
|
|
||||||
lock sync.RWMutex
|
|
||||||
services map[proxy.ServicePortName]*balancerState
|
|
||||||
}
|
|
||||||
|
|
||||||
// Ensure this implements LoadBalancer.
|
|
||||||
var _ LoadBalancer = &LoadBalancerRR{}
|
|
||||||
|
|
||||||
type balancerState struct {
|
|
||||||
endpoints []string // a list of "ip:port" style strings
|
|
||||||
index int // current index into endpoints
|
|
||||||
affinity affinityPolicy
|
|
||||||
}
|
|
||||||
|
|
||||||
func newAffinityPolicy(affinityType v1.ServiceAffinity, ttlSeconds int) *affinityPolicy {
|
|
||||||
return &affinityPolicy{
|
|
||||||
affinityType: affinityType,
|
|
||||||
affinityMap: make(map[string]*affinityState),
|
|
||||||
ttlSeconds: ttlSeconds,
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// NewLoadBalancerRR returns a new LoadBalancerRR.
|
|
||||||
func NewLoadBalancerRR() *LoadBalancerRR {
|
|
||||||
return &LoadBalancerRR{
|
|
||||||
services: map[proxy.ServicePortName]*balancerState{},
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func (lb *LoadBalancerRR) NewService(svcPort proxy.ServicePortName, affinityType v1.ServiceAffinity, ttlSeconds int) error {
|
|
||||||
klog.V(4).InfoS("LoadBalancerRR NewService", "servicePortName", svcPort)
|
|
||||||
lb.lock.Lock()
|
|
||||||
defer lb.lock.Unlock()
|
|
||||||
lb.newServiceInternal(svcPort, affinityType, ttlSeconds)
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// This assumes that lb.lock is already held.
|
|
||||||
func (lb *LoadBalancerRR) newServiceInternal(svcPort proxy.ServicePortName, affinityType v1.ServiceAffinity, ttlSeconds int) *balancerState {
|
|
||||||
if ttlSeconds == 0 {
|
|
||||||
ttlSeconds = int(v1.DefaultClientIPServiceAffinitySeconds) //default to 3 hours if not specified. Should 0 be unlimited instead????
|
|
||||||
}
|
|
||||||
|
|
||||||
if _, exists := lb.services[svcPort]; !exists {
|
|
||||||
lb.services[svcPort] = &balancerState{affinity: *newAffinityPolicy(affinityType, ttlSeconds)}
|
|
||||||
klog.V(4).InfoS("LoadBalancerRR service did not exist, created", "servicePortName", svcPort)
|
|
||||||
} else if affinityType != "" {
|
|
||||||
lb.services[svcPort].affinity.affinityType = affinityType
|
|
||||||
}
|
|
||||||
return lb.services[svcPort]
|
|
||||||
}
|
|
||||||
|
|
||||||
func (lb *LoadBalancerRR) DeleteService(svcPort proxy.ServicePortName) {
|
|
||||||
klog.V(4).InfoS("LoadBalancerRR DeleteService", "servicePortName", svcPort)
|
|
||||||
lb.lock.Lock()
|
|
||||||
defer lb.lock.Unlock()
|
|
||||||
delete(lb.services, svcPort)
|
|
||||||
}
|
|
||||||
|
|
||||||
// return true if this service is using some form of session affinity.
|
|
||||||
func isSessionAffinity(affinity *affinityPolicy) bool {
|
|
||||||
// Should never be empty string, but checking for it to be safe.
|
|
||||||
if affinity.affinityType == "" || affinity.affinityType == v1.ServiceAffinityNone {
|
|
||||||
return false
|
|
||||||
}
|
|
||||||
return true
|
|
||||||
}
|
|
||||||
|
|
||||||
// NextEndpoint returns a service endpoint.
|
|
||||||
// The service endpoint is chosen using the round-robin algorithm.
|
|
||||||
func (lb *LoadBalancerRR) NextEndpoint(svcPort proxy.ServicePortName, srcAddr net.Addr, sessionAffinityReset bool) (string, error) {
|
|
||||||
// Coarse locking is simple. We can get more fine-grained if/when we
|
|
||||||
// can prove it matters.
|
|
||||||
lb.lock.Lock()
|
|
||||||
defer lb.lock.Unlock()
|
|
||||||
|
|
||||||
state, exists := lb.services[svcPort]
|
|
||||||
if !exists || state == nil {
|
|
||||||
return "", ErrMissingServiceEntry
|
|
||||||
}
|
|
||||||
if len(state.endpoints) == 0 {
|
|
||||||
return "", ErrMissingEndpoints
|
|
||||||
}
|
|
||||||
klog.V(4).InfoS("NextEndpoint for service", "servicePortName", svcPort, "address", srcAddr, "endpoints", state.endpoints)
|
|
||||||
sessionAffinityEnabled := isSessionAffinity(&state.affinity)
|
|
||||||
|
|
||||||
var ipaddr string
|
|
||||||
if sessionAffinityEnabled {
|
|
||||||
// Caution: don't shadow ipaddr
|
|
||||||
var err error
|
|
||||||
ipaddr, _, err = net.SplitHostPort(srcAddr.String())
|
|
||||||
if err != nil {
|
|
||||||
return "", fmt.Errorf("malformed source address %q: %v", srcAddr.String(), err)
|
|
||||||
}
|
|
||||||
if !sessionAffinityReset {
|
|
||||||
sessionAffinity, exists := state.affinity.affinityMap[ipaddr]
|
|
||||||
if exists && int(time.Since(sessionAffinity.lastUsed).Seconds()) < state.affinity.ttlSeconds {
|
|
||||||
// Affinity wins.
|
|
||||||
endpoint := sessionAffinity.endpoint
|
|
||||||
sessionAffinity.lastUsed = time.Now()
|
|
||||||
klog.V(4).InfoS("NextEndpoint for service from IP with sessionAffinity", "servicePortName", svcPort, "IP", ipaddr, "sessionAffinity", sessionAffinity, "endpoint", endpoint)
|
|
||||||
return endpoint, nil
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
// Take the next endpoint.
|
|
||||||
endpoint := state.endpoints[state.index]
|
|
||||||
state.index = (state.index + 1) % len(state.endpoints)
|
|
||||||
|
|
||||||
if sessionAffinityEnabled {
|
|
||||||
var affinity *affinityState
|
|
||||||
affinity = state.affinity.affinityMap[ipaddr]
|
|
||||||
if affinity == nil {
|
|
||||||
affinity = new(affinityState) //&affinityState{ipaddr, "TCP", "", endpoint, time.Now()}
|
|
||||||
state.affinity.affinityMap[ipaddr] = affinity
|
|
||||||
}
|
|
||||||
affinity.lastUsed = time.Now()
|
|
||||||
affinity.endpoint = endpoint
|
|
||||||
affinity.clientIP = ipaddr
|
|
||||||
klog.V(4).InfoS("Updated affinity key", "IP", ipaddr, "affinityState", state.affinity.affinityMap[ipaddr])
|
|
||||||
}
|
|
||||||
|
|
||||||
return endpoint, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// Remove any session affinity records associated to a particular endpoint (for example when a pod goes down).
|
|
||||||
func removeSessionAffinityByEndpoint(state *balancerState, svcPort proxy.ServicePortName, endpoint string) {
|
|
||||||
for _, affinity := range state.affinity.affinityMap {
|
|
||||||
if affinity.endpoint == endpoint {
|
|
||||||
klog.V(4).InfoS("Removing client from affinityMap for service", "endpoint", affinity.endpoint, "servicePortName", svcPort)
|
|
||||||
delete(state.affinity.affinityMap, affinity.clientIP)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// Loop through the valid endpoints and then the endpoints associated with the Load Balancer.
|
|
||||||
// Then remove any session affinity records that are not in both lists.
|
|
||||||
// This assumes the lb.lock is held.
|
|
||||||
func (lb *LoadBalancerRR) updateAffinityMap(svcPort proxy.ServicePortName, newEndpoints []string) {
|
|
||||||
allEndpoints := map[string]int{}
|
|
||||||
for _, newEndpoint := range newEndpoints {
|
|
||||||
allEndpoints[newEndpoint] = 1
|
|
||||||
}
|
|
||||||
state, exists := lb.services[svcPort]
|
|
||||||
if !exists {
|
|
||||||
return
|
|
||||||
}
|
|
||||||
for _, existingEndpoint := range state.endpoints {
|
|
||||||
allEndpoints[existingEndpoint] = allEndpoints[existingEndpoint] + 1
|
|
||||||
}
|
|
||||||
for mKey, mVal := range allEndpoints {
|
|
||||||
if mVal == 1 {
|
|
||||||
klog.V(2).InfoS("Delete endpoint for service", "endpoint", mKey, "servicePortName", svcPort)
|
|
||||||
removeSessionAffinityByEndpoint(state, svcPort, mKey)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func (lb *LoadBalancerRR) OnEndpointsAdd(endpoints *v1.Endpoints) {
|
|
||||||
portsToEndpoints := util.BuildPortsToEndpointsMap(endpoints)
|
|
||||||
|
|
||||||
lb.lock.Lock()
|
|
||||||
defer lb.lock.Unlock()
|
|
||||||
|
|
||||||
for portname := range portsToEndpoints {
|
|
||||||
svcPort := proxy.ServicePortName{NamespacedName: types.NamespacedName{Namespace: endpoints.Namespace, Name: endpoints.Name}, Port: portname}
|
|
||||||
newEndpoints := portsToEndpoints[portname]
|
|
||||||
state, exists := lb.services[svcPort]
|
|
||||||
|
|
||||||
if !exists || state == nil || len(newEndpoints) > 0 {
|
|
||||||
klog.V(1).InfoS("LoadBalancerRR: Setting endpoints service", "servicePortName", svcPort, "endpoints", newEndpoints)
|
|
||||||
lb.updateAffinityMap(svcPort, newEndpoints)
|
|
||||||
// OnEndpointsAdd can be called without NewService being called externally.
|
|
||||||
// To be safe we will call it here. A new service will only be created
|
|
||||||
// if one does not already exist. The affinity will be updated
|
|
||||||
// later, once NewService is called.
|
|
||||||
state = lb.newServiceInternal(svcPort, v1.ServiceAffinity(""), 0)
|
|
||||||
state.endpoints = util.ShuffleStrings(newEndpoints)
|
|
||||||
|
|
||||||
// Reset the round-robin index.
|
|
||||||
state.index = 0
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func (lb *LoadBalancerRR) OnEndpointsUpdate(oldEndpoints, endpoints *v1.Endpoints) {
|
|
||||||
portsToEndpoints := util.BuildPortsToEndpointsMap(endpoints)
|
|
||||||
oldPortsToEndpoints := util.BuildPortsToEndpointsMap(oldEndpoints)
|
|
||||||
registeredEndpoints := make(map[proxy.ServicePortName]bool)
|
|
||||||
|
|
||||||
lb.lock.Lock()
|
|
||||||
defer lb.lock.Unlock()
|
|
||||||
|
|
||||||
for portname := range portsToEndpoints {
|
|
||||||
svcPort := proxy.ServicePortName{NamespacedName: types.NamespacedName{Namespace: endpoints.Namespace, Name: endpoints.Name}, Port: portname}
|
|
||||||
newEndpoints := portsToEndpoints[portname]
|
|
||||||
state, exists := lb.services[svcPort]
|
|
||||||
|
|
||||||
curEndpoints := []string{}
|
|
||||||
if state != nil {
|
|
||||||
curEndpoints = state.endpoints
|
|
||||||
}
|
|
||||||
|
|
||||||
if !exists || state == nil || len(curEndpoints) != len(newEndpoints) || !slicesEquiv(stringslices.Clone(curEndpoints), newEndpoints) {
|
|
||||||
klog.V(1).InfoS("LoadBalancerRR: Setting endpoints for service", "servicePortName", svcPort, "endpoints", newEndpoints)
|
|
||||||
lb.updateAffinityMap(svcPort, newEndpoints)
|
|
||||||
// OnEndpointsUpdate can be called without NewService being called externally.
|
|
||||||
// To be safe we will call it here. A new service will only be created
|
|
||||||
// if one does not already exist. The affinity will be updated
|
|
||||||
// later, once NewService is called.
|
|
||||||
state = lb.newServiceInternal(svcPort, v1.ServiceAffinity(""), 0)
|
|
||||||
state.endpoints = util.ShuffleStrings(newEndpoints)
|
|
||||||
|
|
||||||
// Reset the round-robin index.
|
|
||||||
state.index = 0
|
|
||||||
}
|
|
||||||
registeredEndpoints[svcPort] = true
|
|
||||||
}
|
|
||||||
|
|
||||||
for portname := range oldPortsToEndpoints {
|
|
||||||
svcPort := proxy.ServicePortName{NamespacedName: types.NamespacedName{Namespace: endpoints.Namespace, Name: endpoints.Name}, Port: portname}
|
|
||||||
if _, exists := registeredEndpoints[svcPort]; !exists {
|
|
||||||
klog.V(2).InfoS("LoadBalancerRR: Removing endpoints service", "servicePortName", svcPort)
|
|
||||||
// Reset but don't delete.
|
|
||||||
state := lb.services[svcPort]
|
|
||||||
state.endpoints = []string{}
|
|
||||||
state.index = 0
|
|
||||||
state.affinity.affinityMap = map[string]*affinityState{}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func (lb *LoadBalancerRR) OnEndpointsDelete(endpoints *v1.Endpoints) {
|
|
||||||
portsToEndpoints := util.BuildPortsToEndpointsMap(endpoints)
|
|
||||||
|
|
||||||
lb.lock.Lock()
|
|
||||||
defer lb.lock.Unlock()
|
|
||||||
|
|
||||||
for portname := range portsToEndpoints {
|
|
||||||
svcPort := proxy.ServicePortName{NamespacedName: types.NamespacedName{Namespace: endpoints.Namespace, Name: endpoints.Name}, Port: portname}
|
|
||||||
klog.V(2).InfoS("LoadBalancerRR: Removing endpoints service", "servicePortName", svcPort)
|
|
||||||
// If the service is still around, reset but don't delete.
|
|
||||||
if state, ok := lb.services[svcPort]; ok {
|
|
||||||
state.endpoints = []string{}
|
|
||||||
state.index = 0
|
|
||||||
state.affinity.affinityMap = map[string]*affinityState{}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func (lb *LoadBalancerRR) OnEndpointsSynced() {
|
|
||||||
}
|
|
||||||
|
|
||||||
// Tests whether two slices are equivalent. This sorts both slices in-place.
|
|
||||||
func slicesEquiv(lhs, rhs []string) bool {
|
|
||||||
if len(lhs) != len(rhs) {
|
|
||||||
return false
|
|
||||||
}
|
|
||||||
sort.Strings(lhs)
|
|
||||||
sort.Strings(rhs)
|
|
||||||
return stringslices.Equal(lhs, rhs)
|
|
||||||
}
|
|
||||||
|
|
||||||
func (lb *LoadBalancerRR) CleanupStaleStickySessions(svcPort proxy.ServicePortName) {
|
|
||||||
lb.lock.Lock()
|
|
||||||
defer lb.lock.Unlock()
|
|
||||||
|
|
||||||
state, exists := lb.services[svcPort]
|
|
||||||
if !exists {
|
|
||||||
return
|
|
||||||
}
|
|
||||||
for ip, affinity := range state.affinity.affinityMap {
|
|
||||||
if int(time.Since(affinity.lastUsed).Seconds()) >= state.affinity.ttlSeconds {
|
|
||||||
klog.V(4).InfoS("Removing client from affinityMap for service", "IP", affinity.clientIP, "servicePortName", svcPort)
|
|
||||||
delete(state.affinity.affinityMap, ip)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
@ -1,678 +0,0 @@
|
|||||||
/*
|
|
||||||
Copyright 2016 The Kubernetes Authors.
|
|
||||||
|
|
||||||
Licensed under the Apache License, Version 2.0 (the "License");
|
|
||||||
you may not use this file except in compliance with the License.
|
|
||||||
You may obtain a copy of the License at
|
|
||||||
|
|
||||||
http://www.apache.org/licenses/LICENSE-2.0
|
|
||||||
|
|
||||||
Unless required by applicable law or agreed to in writing, software
|
|
||||||
distributed under the License is distributed on an "AS IS" BASIS,
|
|
||||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
||||||
See the License for the specific language governing permissions and
|
|
||||||
limitations under the License.
|
|
||||||
*/
|
|
||||||
|
|
||||||
package winuserspace
|
|
||||||
|
|
||||||
import (
|
|
||||||
"net"
|
|
||||||
"testing"
|
|
||||||
|
|
||||||
"k8s.io/api/core/v1"
|
|
||||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
|
||||||
"k8s.io/apimachinery/pkg/types"
|
|
||||||
"k8s.io/kubernetes/pkg/proxy"
|
|
||||||
)
|
|
||||||
|
|
||||||
func TestLoadBalanceFailsWithNoEndpoints(t *testing.T) {
|
|
||||||
loadBalancer := NewLoadBalancerRR()
|
|
||||||
service := proxy.ServicePortName{NamespacedName: types.NamespacedName{Namespace: "testnamespace", Name: "foo"}, Port: "does-not-exist"}
|
|
||||||
endpoint, err := loadBalancer.NextEndpoint(service, nil, false)
|
|
||||||
if err == nil {
|
|
||||||
t.Errorf("Didn't fail with non-existent service")
|
|
||||||
}
|
|
||||||
if len(endpoint) != 0 {
|
|
||||||
t.Errorf("Got an endpoint")
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func expectEndpoint(t *testing.T, loadBalancer *LoadBalancerRR, service proxy.ServicePortName, expected string, netaddr net.Addr) {
|
|
||||||
endpoint, err := loadBalancer.NextEndpoint(service, netaddr, false)
|
|
||||||
if err != nil {
|
|
||||||
t.Errorf("Didn't find a service for %s, expected %s, failed with: %v", service, expected, err)
|
|
||||||
}
|
|
||||||
if endpoint != expected {
|
|
||||||
t.Errorf("Didn't get expected endpoint for service %s client %v, expected %s, got: %s", service, netaddr, expected, endpoint)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func expectEndpointWithSessionAffinityReset(t *testing.T, loadBalancer *LoadBalancerRR, service proxy.ServicePortName, expected string, netaddr net.Addr) {
|
|
||||||
endpoint, err := loadBalancer.NextEndpoint(service, netaddr, true)
|
|
||||||
if err != nil {
|
|
||||||
t.Errorf("Didn't find a service for %s, expected %s, failed with: %v", service, expected, err)
|
|
||||||
}
|
|
||||||
if endpoint != expected {
|
|
||||||
t.Errorf("Didn't get expected endpoint for service %s client %v, expected %s, got: %s", service, netaddr, expected, endpoint)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestLoadBalanceWorksWithSingleEndpoint(t *testing.T) {
|
|
||||||
loadBalancer := NewLoadBalancerRR()
|
|
||||||
service := proxy.ServicePortName{NamespacedName: types.NamespacedName{Namespace: "testnamespace", Name: "foo"}, Port: "p"}
|
|
||||||
endpoint, err := loadBalancer.NextEndpoint(service, nil, false)
|
|
||||||
if err == nil || len(endpoint) != 0 {
|
|
||||||
t.Errorf("Didn't fail with non-existent service")
|
|
||||||
}
|
|
||||||
endpoints := &v1.Endpoints{
|
|
||||||
ObjectMeta: metav1.ObjectMeta{Name: service.Name, Namespace: service.Namespace},
|
|
||||||
Subsets: []v1.EndpointSubset{{
|
|
||||||
Addresses: []v1.EndpointAddress{{IP: "endpoint1"}},
|
|
||||||
Ports: []v1.EndpointPort{{Name: "p", Port: 40}},
|
|
||||||
}},
|
|
||||||
}
|
|
||||||
loadBalancer.OnEndpointsAdd(endpoints)
|
|
||||||
expectEndpoint(t, loadBalancer, service, "endpoint1:40", nil)
|
|
||||||
expectEndpoint(t, loadBalancer, service, "endpoint1:40", nil)
|
|
||||||
expectEndpoint(t, loadBalancer, service, "endpoint1:40", nil)
|
|
||||||
expectEndpoint(t, loadBalancer, service, "endpoint1:40", nil)
|
|
||||||
}
|
|
||||||
|
|
||||||
func stringsInSlice(haystack []string, needles ...string) bool {
|
|
||||||
for _, needle := range needles {
|
|
||||||
found := false
|
|
||||||
for i := range haystack {
|
|
||||||
if haystack[i] == needle {
|
|
||||||
found = true
|
|
||||||
break
|
|
||||||
}
|
|
||||||
}
|
|
||||||
if found == false {
|
|
||||||
return false
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return true
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestLoadBalanceWorksWithMultipleEndpoints(t *testing.T) {
|
|
||||||
loadBalancer := NewLoadBalancerRR()
|
|
||||||
service := proxy.ServicePortName{NamespacedName: types.NamespacedName{Namespace: "testnamespace", Name: "foo"}, Port: "p"}
|
|
||||||
endpoint, err := loadBalancer.NextEndpoint(service, nil, false)
|
|
||||||
if err == nil || len(endpoint) != 0 {
|
|
||||||
t.Errorf("Didn't fail with non-existent service")
|
|
||||||
}
|
|
||||||
endpoints := &v1.Endpoints{
|
|
||||||
ObjectMeta: metav1.ObjectMeta{Name: service.Name, Namespace: service.Namespace},
|
|
||||||
Subsets: []v1.EndpointSubset{{
|
|
||||||
Addresses: []v1.EndpointAddress{{IP: "endpoint"}},
|
|
||||||
Ports: []v1.EndpointPort{{Name: "p", Port: 1}, {Name: "p", Port: 2}, {Name: "p", Port: 3}},
|
|
||||||
}},
|
|
||||||
}
|
|
||||||
loadBalancer.OnEndpointsAdd(endpoints)
|
|
||||||
|
|
||||||
shuffledEndpoints := loadBalancer.services[service].endpoints
|
|
||||||
if !stringsInSlice(shuffledEndpoints, "endpoint:1", "endpoint:2", "endpoint:3") {
|
|
||||||
t.Errorf("did not find expected endpoints: %v", shuffledEndpoints)
|
|
||||||
}
|
|
||||||
expectEndpoint(t, loadBalancer, service, shuffledEndpoints[0], nil)
|
|
||||||
expectEndpoint(t, loadBalancer, service, shuffledEndpoints[1], nil)
|
|
||||||
expectEndpoint(t, loadBalancer, service, shuffledEndpoints[2], nil)
|
|
||||||
expectEndpoint(t, loadBalancer, service, shuffledEndpoints[0], nil)
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestLoadBalanceWorksWithMultipleEndpointsMultiplePorts(t *testing.T) {
|
|
||||||
loadBalancer := NewLoadBalancerRR()
|
|
||||||
serviceP := proxy.ServicePortName{NamespacedName: types.NamespacedName{Namespace: "testnamespace", Name: "foo"}, Port: "p"}
|
|
||||||
serviceQ := proxy.ServicePortName{NamespacedName: types.NamespacedName{Namespace: "testnamespace", Name: "foo"}, Port: "q"}
|
|
||||||
endpoint, err := loadBalancer.NextEndpoint(serviceP, nil, false)
|
|
||||||
if err == nil || len(endpoint) != 0 {
|
|
||||||
t.Errorf("Didn't fail with non-existent service")
|
|
||||||
}
|
|
||||||
endpoints := &v1.Endpoints{
|
|
||||||
ObjectMeta: metav1.ObjectMeta{Name: serviceP.Name, Namespace: serviceP.Namespace},
|
|
||||||
Subsets: []v1.EndpointSubset{
|
|
||||||
{
|
|
||||||
Addresses: []v1.EndpointAddress{{IP: "endpoint1"}, {IP: "endpoint2"}},
|
|
||||||
Ports: []v1.EndpointPort{{Name: "p", Port: 1}, {Name: "q", Port: 2}},
|
|
||||||
},
|
|
||||||
{
|
|
||||||
Addresses: []v1.EndpointAddress{{IP: "endpoint3"}},
|
|
||||||
Ports: []v1.EndpointPort{{Name: "p", Port: 3}, {Name: "q", Port: 4}},
|
|
||||||
},
|
|
||||||
},
|
|
||||||
}
|
|
||||||
loadBalancer.OnEndpointsAdd(endpoints)
|
|
||||||
|
|
||||||
shuffledEndpoints := loadBalancer.services[serviceP].endpoints
|
|
||||||
if !stringsInSlice(shuffledEndpoints, "endpoint1:1", "endpoint2:1", "endpoint3:3") {
|
|
||||||
t.Errorf("did not find expected endpoints: %v", shuffledEndpoints)
|
|
||||||
}
|
|
||||||
expectEndpoint(t, loadBalancer, serviceP, shuffledEndpoints[0], nil)
|
|
||||||
expectEndpoint(t, loadBalancer, serviceP, shuffledEndpoints[1], nil)
|
|
||||||
expectEndpoint(t, loadBalancer, serviceP, shuffledEndpoints[2], nil)
|
|
||||||
expectEndpoint(t, loadBalancer, serviceP, shuffledEndpoints[0], nil)
|
|
||||||
|
|
||||||
shuffledEndpoints = loadBalancer.services[serviceQ].endpoints
|
|
||||||
if !stringsInSlice(shuffledEndpoints, "endpoint1:2", "endpoint2:2", "endpoint3:4") {
|
|
||||||
t.Errorf("did not find expected endpoints: %v", shuffledEndpoints)
|
|
||||||
}
|
|
||||||
expectEndpoint(t, loadBalancer, serviceQ, shuffledEndpoints[0], nil)
|
|
||||||
expectEndpoint(t, loadBalancer, serviceQ, shuffledEndpoints[1], nil)
|
|
||||||
expectEndpoint(t, loadBalancer, serviceQ, shuffledEndpoints[2], nil)
|
|
||||||
expectEndpoint(t, loadBalancer, serviceQ, shuffledEndpoints[0], nil)
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestLoadBalanceWorksWithMultipleEndpointsAndUpdates(t *testing.T) {
|
|
||||||
loadBalancer := NewLoadBalancerRR()
|
|
||||||
serviceP := proxy.ServicePortName{NamespacedName: types.NamespacedName{Namespace: "testnamespace", Name: "foo"}, Port: "p"}
|
|
||||||
serviceQ := proxy.ServicePortName{NamespacedName: types.NamespacedName{Namespace: "testnamespace", Name: "foo"}, Port: "q"}
|
|
||||||
endpoint, err := loadBalancer.NextEndpoint(serviceP, nil, false)
|
|
||||||
if err == nil || len(endpoint) != 0 {
|
|
||||||
t.Errorf("Didn't fail with non-existent service")
|
|
||||||
}
|
|
||||||
endpointsv1 := &v1.Endpoints{
|
|
||||||
ObjectMeta: metav1.ObjectMeta{Name: serviceP.Name, Namespace: serviceP.Namespace},
|
|
||||||
Subsets: []v1.EndpointSubset{
|
|
||||||
{
|
|
||||||
Addresses: []v1.EndpointAddress{{IP: "endpoint1"}},
|
|
||||||
Ports: []v1.EndpointPort{{Name: "p", Port: 1}, {Name: "q", Port: 10}},
|
|
||||||
},
|
|
||||||
{
|
|
||||||
Addresses: []v1.EndpointAddress{{IP: "endpoint2"}},
|
|
||||||
Ports: []v1.EndpointPort{{Name: "p", Port: 2}, {Name: "q", Port: 20}},
|
|
||||||
},
|
|
||||||
{
|
|
||||||
Addresses: []v1.EndpointAddress{{IP: "endpoint3"}},
|
|
||||||
Ports: []v1.EndpointPort{{Name: "p", Port: 3}, {Name: "q", Port: 30}},
|
|
||||||
},
|
|
||||||
},
|
|
||||||
}
|
|
||||||
loadBalancer.OnEndpointsAdd(endpointsv1)
|
|
||||||
|
|
||||||
shuffledEndpoints := loadBalancer.services[serviceP].endpoints
|
|
||||||
if !stringsInSlice(shuffledEndpoints, "endpoint1:1", "endpoint2:2", "endpoint3:3") {
|
|
||||||
t.Errorf("did not find expected endpoints: %v", shuffledEndpoints)
|
|
||||||
}
|
|
||||||
expectEndpoint(t, loadBalancer, serviceP, shuffledEndpoints[0], nil)
|
|
||||||
expectEndpoint(t, loadBalancer, serviceP, shuffledEndpoints[1], nil)
|
|
||||||
expectEndpoint(t, loadBalancer, serviceP, shuffledEndpoints[2], nil)
|
|
||||||
expectEndpoint(t, loadBalancer, serviceP, shuffledEndpoints[0], nil)
|
|
||||||
|
|
||||||
shuffledEndpoints = loadBalancer.services[serviceQ].endpoints
|
|
||||||
if !stringsInSlice(shuffledEndpoints, "endpoint1:10", "endpoint2:20", "endpoint3:30") {
|
|
||||||
t.Errorf("did not find expected endpoints: %v", shuffledEndpoints)
|
|
||||||
}
|
|
||||||
expectEndpoint(t, loadBalancer, serviceQ, shuffledEndpoints[0], nil)
|
|
||||||
expectEndpoint(t, loadBalancer, serviceQ, shuffledEndpoints[1], nil)
|
|
||||||
expectEndpoint(t, loadBalancer, serviceQ, shuffledEndpoints[2], nil)
|
|
||||||
expectEndpoint(t, loadBalancer, serviceQ, shuffledEndpoints[0], nil)
|
|
||||||
|
|
||||||
// Then update the configuration with one fewer endpoints, make sure
|
|
||||||
// we start in the beginning again
|
|
||||||
endpointsv2 := &v1.Endpoints{
|
|
||||||
ObjectMeta: metav1.ObjectMeta{Name: serviceP.Name, Namespace: serviceP.Namespace},
|
|
||||||
Subsets: []v1.EndpointSubset{
|
|
||||||
{
|
|
||||||
Addresses: []v1.EndpointAddress{{IP: "endpoint4"}},
|
|
||||||
Ports: []v1.EndpointPort{{Name: "p", Port: 4}, {Name: "q", Port: 40}},
|
|
||||||
},
|
|
||||||
{
|
|
||||||
Addresses: []v1.EndpointAddress{{IP: "endpoint5"}},
|
|
||||||
Ports: []v1.EndpointPort{{Name: "p", Port: 5}, {Name: "q", Port: 50}},
|
|
||||||
},
|
|
||||||
},
|
|
||||||
}
|
|
||||||
loadBalancer.OnEndpointsUpdate(endpointsv1, endpointsv2)
|
|
||||||
|
|
||||||
shuffledEndpoints = loadBalancer.services[serviceP].endpoints
|
|
||||||
if !stringsInSlice(shuffledEndpoints, "endpoint4:4", "endpoint5:5") {
|
|
||||||
t.Errorf("did not find expected endpoints: %v", shuffledEndpoints)
|
|
||||||
}
|
|
||||||
expectEndpoint(t, loadBalancer, serviceP, shuffledEndpoints[0], nil)
|
|
||||||
expectEndpoint(t, loadBalancer, serviceP, shuffledEndpoints[1], nil)
|
|
||||||
expectEndpoint(t, loadBalancer, serviceP, shuffledEndpoints[0], nil)
|
|
||||||
expectEndpoint(t, loadBalancer, serviceP, shuffledEndpoints[1], nil)
|
|
||||||
|
|
||||||
shuffledEndpoints = loadBalancer.services[serviceQ].endpoints
|
|
||||||
if !stringsInSlice(shuffledEndpoints, "endpoint4:40", "endpoint5:50") {
|
|
||||||
t.Errorf("did not find expected endpoints: %v", shuffledEndpoints)
|
|
||||||
}
|
|
||||||
expectEndpoint(t, loadBalancer, serviceQ, shuffledEndpoints[0], nil)
|
|
||||||
expectEndpoint(t, loadBalancer, serviceQ, shuffledEndpoints[1], nil)
|
|
||||||
expectEndpoint(t, loadBalancer, serviceQ, shuffledEndpoints[0], nil)
|
|
||||||
expectEndpoint(t, loadBalancer, serviceQ, shuffledEndpoints[1], nil)
|
|
||||||
|
|
||||||
// Clear endpoints
|
|
||||||
endpointsv3 := &v1.Endpoints{ObjectMeta: metav1.ObjectMeta{Name: serviceP.Name, Namespace: serviceP.Namespace}, Subsets: nil}
|
|
||||||
loadBalancer.OnEndpointsUpdate(endpointsv2, endpointsv3)
|
|
||||||
|
|
||||||
endpoint, err = loadBalancer.NextEndpoint(serviceP, nil, false)
|
|
||||||
if err == nil || len(endpoint) != 0 {
|
|
||||||
t.Errorf("Didn't fail with non-existent service")
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestLoadBalanceWorksWithServiceRemoval(t *testing.T) {
|
|
||||||
loadBalancer := NewLoadBalancerRR()
|
|
||||||
fooServiceP := proxy.ServicePortName{NamespacedName: types.NamespacedName{Namespace: "testnamespace", Name: "foo"}, Port: "p"}
|
|
||||||
barServiceP := proxy.ServicePortName{NamespacedName: types.NamespacedName{Namespace: "testnamespace", Name: "bar"}, Port: "p"}
|
|
||||||
endpoint, err := loadBalancer.NextEndpoint(fooServiceP, nil, false)
|
|
||||||
if err == nil || len(endpoint) != 0 {
|
|
||||||
t.Errorf("Didn't fail with non-existent service")
|
|
||||||
}
|
|
||||||
endpoints1 := &v1.Endpoints{
|
|
||||||
ObjectMeta: metav1.ObjectMeta{Name: fooServiceP.Name, Namespace: fooServiceP.Namespace},
|
|
||||||
Subsets: []v1.EndpointSubset{
|
|
||||||
{
|
|
||||||
Addresses: []v1.EndpointAddress{{IP: "endpoint1"}, {IP: "endpoint2"}, {IP: "endpoint3"}},
|
|
||||||
Ports: []v1.EndpointPort{{Name: "p", Port: 123}},
|
|
||||||
},
|
|
||||||
},
|
|
||||||
}
|
|
||||||
endpoints2 := &v1.Endpoints{
|
|
||||||
ObjectMeta: metav1.ObjectMeta{Name: barServiceP.Name, Namespace: barServiceP.Namespace},
|
|
||||||
Subsets: []v1.EndpointSubset{
|
|
||||||
{
|
|
||||||
Addresses: []v1.EndpointAddress{{IP: "endpoint4"}, {IP: "endpoint5"}, {IP: "endpoint6"}},
|
|
||||||
Ports: []v1.EndpointPort{{Name: "p", Port: 456}},
|
|
||||||
},
|
|
||||||
},
|
|
||||||
}
|
|
||||||
loadBalancer.OnEndpointsAdd(endpoints1)
|
|
||||||
loadBalancer.OnEndpointsAdd(endpoints2)
|
|
||||||
shuffledFooEndpoints := loadBalancer.services[fooServiceP].endpoints
|
|
||||||
expectEndpoint(t, loadBalancer, fooServiceP, shuffledFooEndpoints[0], nil)
|
|
||||||
expectEndpoint(t, loadBalancer, fooServiceP, shuffledFooEndpoints[1], nil)
|
|
||||||
expectEndpoint(t, loadBalancer, fooServiceP, shuffledFooEndpoints[2], nil)
|
|
||||||
expectEndpoint(t, loadBalancer, fooServiceP, shuffledFooEndpoints[0], nil)
|
|
||||||
expectEndpoint(t, loadBalancer, fooServiceP, shuffledFooEndpoints[1], nil)
|
|
||||||
|
|
||||||
shuffledBarEndpoints := loadBalancer.services[barServiceP].endpoints
|
|
||||||
expectEndpoint(t, loadBalancer, barServiceP, shuffledBarEndpoints[0], nil)
|
|
||||||
expectEndpoint(t, loadBalancer, barServiceP, shuffledBarEndpoints[1], nil)
|
|
||||||
expectEndpoint(t, loadBalancer, barServiceP, shuffledBarEndpoints[2], nil)
|
|
||||||
expectEndpoint(t, loadBalancer, barServiceP, shuffledBarEndpoints[0], nil)
|
|
||||||
expectEndpoint(t, loadBalancer, barServiceP, shuffledBarEndpoints[1], nil)
|
|
||||||
|
|
||||||
// Then update the configuration by removing foo
|
|
||||||
loadBalancer.OnEndpointsDelete(endpoints1)
|
|
||||||
endpoint, err = loadBalancer.NextEndpoint(fooServiceP, nil, false)
|
|
||||||
if err == nil || len(endpoint) != 0 {
|
|
||||||
t.Errorf("Didn't fail with non-existent service")
|
|
||||||
}
|
|
||||||
|
|
||||||
// but bar is still there, and we continue RR from where we left off.
|
|
||||||
expectEndpoint(t, loadBalancer, barServiceP, shuffledBarEndpoints[2], nil)
|
|
||||||
expectEndpoint(t, loadBalancer, barServiceP, shuffledBarEndpoints[0], nil)
|
|
||||||
expectEndpoint(t, loadBalancer, barServiceP, shuffledBarEndpoints[1], nil)
|
|
||||||
expectEndpoint(t, loadBalancer, barServiceP, shuffledBarEndpoints[2], nil)
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestStickyLoadBalanceWorksWithNewServiceCalledFirst(t *testing.T) {
|
|
||||||
loadBalancer := NewLoadBalancerRR()
|
|
||||||
service := proxy.ServicePortName{NamespacedName: types.NamespacedName{Namespace: "testnamespace", Name: "foo"}, Port: ""}
|
|
||||||
endpoint, err := loadBalancer.NextEndpoint(service, nil, false)
|
|
||||||
if err == nil || len(endpoint) != 0 {
|
|
||||||
t.Errorf("Didn't fail with non-existent service")
|
|
||||||
}
|
|
||||||
|
|
||||||
// Call NewService() before OnEndpointsUpdate()
|
|
||||||
loadBalancer.NewService(service, v1.ServiceAffinityClientIP, int(v1.DefaultClientIPServiceAffinitySeconds))
|
|
||||||
endpoints := &v1.Endpoints{
|
|
||||||
ObjectMeta: metav1.ObjectMeta{Name: service.Name, Namespace: service.Namespace},
|
|
||||||
Subsets: []v1.EndpointSubset{
|
|
||||||
{Addresses: []v1.EndpointAddress{{IP: "endpoint1"}}, Ports: []v1.EndpointPort{{Port: 1}}},
|
|
||||||
{Addresses: []v1.EndpointAddress{{IP: "endpoint2"}}, Ports: []v1.EndpointPort{{Port: 2}}},
|
|
||||||
{Addresses: []v1.EndpointAddress{{IP: "endpoint3"}}, Ports: []v1.EndpointPort{{Port: 3}}},
|
|
||||||
},
|
|
||||||
}
|
|
||||||
loadBalancer.OnEndpointsAdd(endpoints)
|
|
||||||
|
|
||||||
client1 := &net.TCPAddr{IP: net.IPv4(127, 0, 0, 1), Port: 0}
|
|
||||||
client2 := &net.TCPAddr{IP: net.IPv4(127, 0, 0, 2), Port: 0}
|
|
||||||
client3 := &net.TCPAddr{IP: net.IPv4(127, 0, 0, 3), Port: 0}
|
|
||||||
|
|
||||||
ep1, err := loadBalancer.NextEndpoint(service, client1, false)
|
|
||||||
if err != nil {
|
|
||||||
t.Errorf("Didn't find a service for %s: %v", service, err)
|
|
||||||
}
|
|
||||||
expectEndpoint(t, loadBalancer, service, ep1, client1)
|
|
||||||
expectEndpoint(t, loadBalancer, service, ep1, client1)
|
|
||||||
expectEndpoint(t, loadBalancer, service, ep1, client1)
|
|
||||||
|
|
||||||
ep2, err := loadBalancer.NextEndpoint(service, client2, false)
|
|
||||||
if err != nil {
|
|
||||||
t.Errorf("Didn't find a service for %s: %v", service, err)
|
|
||||||
}
|
|
||||||
expectEndpoint(t, loadBalancer, service, ep2, client2)
|
|
||||||
expectEndpoint(t, loadBalancer, service, ep2, client2)
|
|
||||||
expectEndpoint(t, loadBalancer, service, ep2, client2)
|
|
||||||
|
|
||||||
ep3, err := loadBalancer.NextEndpoint(service, client3, false)
|
|
||||||
if err != nil {
|
|
||||||
t.Errorf("Didn't find a service for %s: %v", service, err)
|
|
||||||
}
|
|
||||||
expectEndpoint(t, loadBalancer, service, ep3, client3)
|
|
||||||
expectEndpoint(t, loadBalancer, service, ep3, client3)
|
|
||||||
expectEndpoint(t, loadBalancer, service, ep3, client3)
|
|
||||||
|
|
||||||
expectEndpoint(t, loadBalancer, service, ep1, client1)
|
|
||||||
expectEndpoint(t, loadBalancer, service, ep2, client2)
|
|
||||||
expectEndpoint(t, loadBalancer, service, ep3, client3)
|
|
||||||
expectEndpoint(t, loadBalancer, service, ep1, client1)
|
|
||||||
expectEndpoint(t, loadBalancer, service, ep2, client2)
|
|
||||||
expectEndpoint(t, loadBalancer, service, ep3, client3)
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestStickyLoadBalanceWorksWithNewServiceCalledSecond(t *testing.T) {
|
|
||||||
loadBalancer := NewLoadBalancerRR()
|
|
||||||
service := proxy.ServicePortName{NamespacedName: types.NamespacedName{Namespace: "testnamespace", Name: "foo"}, Port: ""}
|
|
||||||
endpoint, err := loadBalancer.NextEndpoint(service, nil, false)
|
|
||||||
if err == nil || len(endpoint) != 0 {
|
|
||||||
t.Errorf("Didn't fail with non-existent service")
|
|
||||||
}
|
|
||||||
|
|
||||||
// Call OnEndpointsUpdate() before NewService()
|
|
||||||
endpoints := &v1.Endpoints{
|
|
||||||
ObjectMeta: metav1.ObjectMeta{Name: service.Name, Namespace: service.Namespace},
|
|
||||||
Subsets: []v1.EndpointSubset{
|
|
||||||
{Addresses: []v1.EndpointAddress{{IP: "endpoint1"}}, Ports: []v1.EndpointPort{{Port: 1}}},
|
|
||||||
{Addresses: []v1.EndpointAddress{{IP: "endpoint2"}}, Ports: []v1.EndpointPort{{Port: 2}}},
|
|
||||||
},
|
|
||||||
}
|
|
||||||
loadBalancer.OnEndpointsAdd(endpoints)
|
|
||||||
loadBalancer.NewService(service, v1.ServiceAffinityClientIP, int(v1.DefaultClientIPServiceAffinitySeconds))
|
|
||||||
|
|
||||||
client1 := &net.TCPAddr{IP: net.IPv4(127, 0, 0, 1), Port: 0}
|
|
||||||
client2 := &net.TCPAddr{IP: net.IPv4(127, 0, 0, 2), Port: 0}
|
|
||||||
client3 := &net.TCPAddr{IP: net.IPv4(127, 0, 0, 3), Port: 0}
|
|
||||||
|
|
||||||
ep1, err := loadBalancer.NextEndpoint(service, client1, false)
|
|
||||||
if err != nil {
|
|
||||||
t.Errorf("Didn't find a service for %s: %v", service, err)
|
|
||||||
}
|
|
||||||
expectEndpoint(t, loadBalancer, service, ep1, client1)
|
|
||||||
expectEndpoint(t, loadBalancer, service, ep1, client1)
|
|
||||||
expectEndpoint(t, loadBalancer, service, ep1, client1)
|
|
||||||
|
|
||||||
ep2, err := loadBalancer.NextEndpoint(service, client2, false)
|
|
||||||
if err != nil {
|
|
||||||
t.Errorf("Didn't find a service for %s: %v", service, err)
|
|
||||||
}
|
|
||||||
expectEndpoint(t, loadBalancer, service, ep2, client2)
|
|
||||||
expectEndpoint(t, loadBalancer, service, ep2, client2)
|
|
||||||
expectEndpoint(t, loadBalancer, service, ep2, client2)
|
|
||||||
|
|
||||||
ep3, err := loadBalancer.NextEndpoint(service, client3, false)
|
|
||||||
if err != nil {
|
|
||||||
t.Errorf("Didn't find a service for %s: %v", service, err)
|
|
||||||
}
|
|
||||||
expectEndpoint(t, loadBalancer, service, ep3, client3)
|
|
||||||
expectEndpoint(t, loadBalancer, service, ep3, client3)
|
|
||||||
expectEndpoint(t, loadBalancer, service, ep3, client3)
|
|
||||||
|
|
||||||
expectEndpoint(t, loadBalancer, service, ep1, client1)
|
|
||||||
expectEndpoint(t, loadBalancer, service, ep2, client2)
|
|
||||||
expectEndpoint(t, loadBalancer, service, ep3, client3)
|
|
||||||
expectEndpoint(t, loadBalancer, service, ep1, client1)
|
|
||||||
expectEndpoint(t, loadBalancer, service, ep2, client2)
|
|
||||||
expectEndpoint(t, loadBalancer, service, ep3, client3)
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestStickyLoadBalanaceWorksWithMultipleEndpointsRemoveOne(t *testing.T) {
|
|
||||||
client1 := &net.TCPAddr{IP: net.IPv4(127, 0, 0, 1), Port: 0}
|
|
||||||
client2 := &net.TCPAddr{IP: net.IPv4(127, 0, 0, 2), Port: 0}
|
|
||||||
client3 := &net.TCPAddr{IP: net.IPv4(127, 0, 0, 3), Port: 0}
|
|
||||||
client4 := &net.TCPAddr{IP: net.IPv4(127, 0, 0, 4), Port: 0}
|
|
||||||
client5 := &net.TCPAddr{IP: net.IPv4(127, 0, 0, 5), Port: 0}
|
|
||||||
client6 := &net.TCPAddr{IP: net.IPv4(127, 0, 0, 6), Port: 0}
|
|
||||||
loadBalancer := NewLoadBalancerRR()
|
|
||||||
service := proxy.ServicePortName{NamespacedName: types.NamespacedName{Namespace: "testnamespace", Name: "foo"}, Port: ""}
|
|
||||||
endpoint, err := loadBalancer.NextEndpoint(service, nil, false)
|
|
||||||
if err == nil || len(endpoint) != 0 {
|
|
||||||
t.Errorf("Didn't fail with non-existent service")
|
|
||||||
}
|
|
||||||
|
|
||||||
loadBalancer.NewService(service, v1.ServiceAffinityClientIP, int(v1.DefaultClientIPServiceAffinitySeconds))
|
|
||||||
endpointsv1 := &v1.Endpoints{
|
|
||||||
ObjectMeta: metav1.ObjectMeta{Name: service.Name, Namespace: service.Namespace},
|
|
||||||
Subsets: []v1.EndpointSubset{
|
|
||||||
{
|
|
||||||
Addresses: []v1.EndpointAddress{{IP: "endpoint"}},
|
|
||||||
Ports: []v1.EndpointPort{{Port: 1}, {Port: 2}, {Port: 3}},
|
|
||||||
},
|
|
||||||
},
|
|
||||||
}
|
|
||||||
loadBalancer.OnEndpointsAdd(endpointsv1)
|
|
||||||
shuffledEndpoints := loadBalancer.services[service].endpoints
|
|
||||||
expectEndpoint(t, loadBalancer, service, shuffledEndpoints[0], client1)
|
|
||||||
client1Endpoint := shuffledEndpoints[0]
|
|
||||||
expectEndpoint(t, loadBalancer, service, shuffledEndpoints[0], client1)
|
|
||||||
expectEndpoint(t, loadBalancer, service, shuffledEndpoints[1], client2)
|
|
||||||
client2Endpoint := shuffledEndpoints[1]
|
|
||||||
expectEndpoint(t, loadBalancer, service, shuffledEndpoints[1], client2)
|
|
||||||
expectEndpoint(t, loadBalancer, service, shuffledEndpoints[2], client3)
|
|
||||||
client3Endpoint := shuffledEndpoints[2]
|
|
||||||
|
|
||||||
endpointsv2 := &v1.Endpoints{
|
|
||||||
ObjectMeta: metav1.ObjectMeta{Name: service.Name, Namespace: service.Namespace},
|
|
||||||
Subsets: []v1.EndpointSubset{
|
|
||||||
{
|
|
||||||
Addresses: []v1.EndpointAddress{{IP: "endpoint"}},
|
|
||||||
Ports: []v1.EndpointPort{{Port: 1}, {Port: 2}},
|
|
||||||
},
|
|
||||||
},
|
|
||||||
}
|
|
||||||
loadBalancer.OnEndpointsUpdate(endpointsv1, endpointsv2)
|
|
||||||
shuffledEndpoints = loadBalancer.services[service].endpoints
|
|
||||||
if client1Endpoint == "endpoint:3" {
|
|
||||||
client1Endpoint = shuffledEndpoints[0]
|
|
||||||
} else if client2Endpoint == "endpoint:3" {
|
|
||||||
client2Endpoint = shuffledEndpoints[0]
|
|
||||||
} else if client3Endpoint == "endpoint:3" {
|
|
||||||
client3Endpoint = shuffledEndpoints[0]
|
|
||||||
}
|
|
||||||
expectEndpoint(t, loadBalancer, service, client1Endpoint, client1)
|
|
||||||
expectEndpoint(t, loadBalancer, service, client2Endpoint, client2)
|
|
||||||
expectEndpoint(t, loadBalancer, service, client3Endpoint, client3)
|
|
||||||
|
|
||||||
endpointsv3 := &v1.Endpoints{
|
|
||||||
ObjectMeta: metav1.ObjectMeta{Name: service.Name, Namespace: service.Namespace},
|
|
||||||
Subsets: []v1.EndpointSubset{
|
|
||||||
{
|
|
||||||
Addresses: []v1.EndpointAddress{{IP: "endpoint"}},
|
|
||||||
Ports: []v1.EndpointPort{{Port: 1}, {Port: 2}, {Port: 4}},
|
|
||||||
},
|
|
||||||
},
|
|
||||||
}
|
|
||||||
loadBalancer.OnEndpointsUpdate(endpointsv2, endpointsv3)
|
|
||||||
shuffledEndpoints = loadBalancer.services[service].endpoints
|
|
||||||
expectEndpoint(t, loadBalancer, service, client1Endpoint, client1)
|
|
||||||
expectEndpoint(t, loadBalancer, service, client2Endpoint, client2)
|
|
||||||
expectEndpoint(t, loadBalancer, service, client3Endpoint, client3)
|
|
||||||
expectEndpoint(t, loadBalancer, service, shuffledEndpoints[0], client4)
|
|
||||||
expectEndpoint(t, loadBalancer, service, shuffledEndpoints[1], client5)
|
|
||||||
expectEndpoint(t, loadBalancer, service, shuffledEndpoints[2], client6)
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestStickyLoadBalanceWorksWithMultipleEndpointsAndUpdates(t *testing.T) {
|
|
||||||
client1 := &net.TCPAddr{IP: net.IPv4(127, 0, 0, 1), Port: 0}
|
|
||||||
client2 := &net.TCPAddr{IP: net.IPv4(127, 0, 0, 2), Port: 0}
|
|
||||||
client3 := &net.TCPAddr{IP: net.IPv4(127, 0, 0, 3), Port: 0}
|
|
||||||
loadBalancer := NewLoadBalancerRR()
|
|
||||||
service := proxy.ServicePortName{NamespacedName: types.NamespacedName{Namespace: "testnamespace", Name: "foo"}, Port: ""}
|
|
||||||
endpoint, err := loadBalancer.NextEndpoint(service, nil, false)
|
|
||||||
if err == nil || len(endpoint) != 0 {
|
|
||||||
t.Errorf("Didn't fail with non-existent service")
|
|
||||||
}
|
|
||||||
|
|
||||||
loadBalancer.NewService(service, v1.ServiceAffinityClientIP, int(v1.DefaultClientIPServiceAffinitySeconds))
|
|
||||||
endpointsv1 := &v1.Endpoints{
|
|
||||||
ObjectMeta: metav1.ObjectMeta{Name: service.Name, Namespace: service.Namespace},
|
|
||||||
Subsets: []v1.EndpointSubset{
|
|
||||||
{
|
|
||||||
Addresses: []v1.EndpointAddress{{IP: "endpoint"}},
|
|
||||||
Ports: []v1.EndpointPort{{Port: 1}, {Port: 2}, {Port: 3}},
|
|
||||||
},
|
|
||||||
},
|
|
||||||
}
|
|
||||||
loadBalancer.OnEndpointsAdd(endpointsv1)
|
|
||||||
shuffledEndpoints := loadBalancer.services[service].endpoints
|
|
||||||
expectEndpoint(t, loadBalancer, service, shuffledEndpoints[0], client1)
|
|
||||||
expectEndpoint(t, loadBalancer, service, shuffledEndpoints[0], client1)
|
|
||||||
expectEndpoint(t, loadBalancer, service, shuffledEndpoints[1], client2)
|
|
||||||
expectEndpoint(t, loadBalancer, service, shuffledEndpoints[1], client2)
|
|
||||||
expectEndpoint(t, loadBalancer, service, shuffledEndpoints[2], client3)
|
|
||||||
expectEndpoint(t, loadBalancer, service, shuffledEndpoints[0], client1)
|
|
||||||
expectEndpoint(t, loadBalancer, service, shuffledEndpoints[1], client2)
|
|
||||||
// Then update the configuration with one fewer endpoints, make sure
|
|
||||||
// we start in the beginning again
|
|
||||||
endpointsv2 := &v1.Endpoints{
|
|
||||||
ObjectMeta: metav1.ObjectMeta{Name: service.Name, Namespace: service.Namespace},
|
|
||||||
Subsets: []v1.EndpointSubset{
|
|
||||||
{
|
|
||||||
Addresses: []v1.EndpointAddress{{IP: "endpoint"}},
|
|
||||||
Ports: []v1.EndpointPort{{Port: 4}, {Port: 5}},
|
|
||||||
},
|
|
||||||
},
|
|
||||||
}
|
|
||||||
loadBalancer.OnEndpointsUpdate(endpointsv1, endpointsv2)
|
|
||||||
shuffledEndpoints = loadBalancer.services[service].endpoints
|
|
||||||
expectEndpoint(t, loadBalancer, service, shuffledEndpoints[0], client1)
|
|
||||||
expectEndpoint(t, loadBalancer, service, shuffledEndpoints[1], client2)
|
|
||||||
expectEndpoint(t, loadBalancer, service, shuffledEndpoints[0], client1)
|
|
||||||
expectEndpoint(t, loadBalancer, service, shuffledEndpoints[0], client1)
|
|
||||||
expectEndpoint(t, loadBalancer, service, shuffledEndpoints[1], client2)
|
|
||||||
expectEndpoint(t, loadBalancer, service, shuffledEndpoints[1], client2)
|
|
||||||
|
|
||||||
// Clear endpoints
|
|
||||||
endpointsv3 := &v1.Endpoints{ObjectMeta: metav1.ObjectMeta{Name: service.Name, Namespace: service.Namespace}, Subsets: nil}
|
|
||||||
loadBalancer.OnEndpointsUpdate(endpointsv2, endpointsv3)
|
|
||||||
|
|
||||||
endpoint, err = loadBalancer.NextEndpoint(service, nil, false)
|
|
||||||
if err == nil || len(endpoint) != 0 {
|
|
||||||
t.Errorf("Didn't fail with non-existent service")
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestStickyLoadBalanceWorksWithServiceRemoval(t *testing.T) {
|
|
||||||
client1 := &net.TCPAddr{IP: net.IPv4(127, 0, 0, 1), Port: 0}
|
|
||||||
client2 := &net.TCPAddr{IP: net.IPv4(127, 0, 0, 2), Port: 0}
|
|
||||||
client3 := &net.TCPAddr{IP: net.IPv4(127, 0, 0, 3), Port: 0}
|
|
||||||
loadBalancer := NewLoadBalancerRR()
|
|
||||||
fooService := proxy.ServicePortName{NamespacedName: types.NamespacedName{Namespace: "testnamespace", Name: "foo"}, Port: ""}
|
|
||||||
endpoint, err := loadBalancer.NextEndpoint(fooService, nil, false)
|
|
||||||
if err == nil || len(endpoint) != 0 {
|
|
||||||
t.Errorf("Didn't fail with non-existent service")
|
|
||||||
}
|
|
||||||
loadBalancer.NewService(fooService, v1.ServiceAffinityClientIP, int(v1.DefaultClientIPServiceAffinitySeconds))
|
|
||||||
endpoints1 := &v1.Endpoints{
|
|
||||||
ObjectMeta: metav1.ObjectMeta{Name: fooService.Name, Namespace: fooService.Namespace},
|
|
||||||
Subsets: []v1.EndpointSubset{
|
|
||||||
{
|
|
||||||
Addresses: []v1.EndpointAddress{{IP: "endpoint"}},
|
|
||||||
Ports: []v1.EndpointPort{{Port: 1}, {Port: 2}, {Port: 3}},
|
|
||||||
},
|
|
||||||
},
|
|
||||||
}
|
|
||||||
barService := proxy.ServicePortName{NamespacedName: types.NamespacedName{Namespace: "testnamespace", Name: "bar"}, Port: ""}
|
|
||||||
loadBalancer.NewService(barService, v1.ServiceAffinityClientIP, int(v1.DefaultClientIPServiceAffinitySeconds))
|
|
||||||
endpoints2 := &v1.Endpoints{
|
|
||||||
ObjectMeta: metav1.ObjectMeta{Name: barService.Name, Namespace: barService.Namespace},
|
|
||||||
Subsets: []v1.EndpointSubset{
|
|
||||||
{
|
|
||||||
Addresses: []v1.EndpointAddress{{IP: "endpoint"}},
|
|
||||||
Ports: []v1.EndpointPort{{Port: 4}, {Port: 5}},
|
|
||||||
},
|
|
||||||
},
|
|
||||||
}
|
|
||||||
loadBalancer.OnEndpointsAdd(endpoints1)
|
|
||||||
loadBalancer.OnEndpointsAdd(endpoints2)
|
|
||||||
|
|
||||||
shuffledFooEndpoints := loadBalancer.services[fooService].endpoints
|
|
||||||
expectEndpoint(t, loadBalancer, fooService, shuffledFooEndpoints[0], client1)
|
|
||||||
expectEndpoint(t, loadBalancer, fooService, shuffledFooEndpoints[1], client2)
|
|
||||||
expectEndpoint(t, loadBalancer, fooService, shuffledFooEndpoints[2], client3)
|
|
||||||
expectEndpoint(t, loadBalancer, fooService, shuffledFooEndpoints[0], client1)
|
|
||||||
expectEndpoint(t, loadBalancer, fooService, shuffledFooEndpoints[0], client1)
|
|
||||||
expectEndpoint(t, loadBalancer, fooService, shuffledFooEndpoints[1], client2)
|
|
||||||
expectEndpoint(t, loadBalancer, fooService, shuffledFooEndpoints[1], client2)
|
|
||||||
expectEndpoint(t, loadBalancer, fooService, shuffledFooEndpoints[2], client3)
|
|
||||||
expectEndpoint(t, loadBalancer, fooService, shuffledFooEndpoints[2], client3)
|
|
||||||
|
|
||||||
shuffledBarEndpoints := loadBalancer.services[barService].endpoints
|
|
||||||
expectEndpoint(t, loadBalancer, barService, shuffledBarEndpoints[0], client1)
|
|
||||||
expectEndpoint(t, loadBalancer, barService, shuffledBarEndpoints[1], client2)
|
|
||||||
expectEndpoint(t, loadBalancer, barService, shuffledBarEndpoints[0], client1)
|
|
||||||
expectEndpoint(t, loadBalancer, barService, shuffledBarEndpoints[0], client1)
|
|
||||||
expectEndpoint(t, loadBalancer, barService, shuffledBarEndpoints[1], client2)
|
|
||||||
expectEndpoint(t, loadBalancer, barService, shuffledBarEndpoints[1], client2)
|
|
||||||
|
|
||||||
// Then update the configuration by removing foo
|
|
||||||
loadBalancer.OnEndpointsDelete(endpoints1)
|
|
||||||
endpoint, err = loadBalancer.NextEndpoint(fooService, nil, false)
|
|
||||||
if err == nil || len(endpoint) != 0 {
|
|
||||||
t.Errorf("Didn't fail with non-existent service")
|
|
||||||
}
|
|
||||||
|
|
||||||
// but bar is still there, and we continue RR from where we left off.
|
|
||||||
shuffledBarEndpoints = loadBalancer.services[barService].endpoints
|
|
||||||
expectEndpoint(t, loadBalancer, barService, shuffledBarEndpoints[0], client1)
|
|
||||||
expectEndpoint(t, loadBalancer, barService, shuffledBarEndpoints[1], client2)
|
|
||||||
expectEndpoint(t, loadBalancer, barService, shuffledBarEndpoints[0], client1)
|
|
||||||
expectEndpoint(t, loadBalancer, barService, shuffledBarEndpoints[1], client2)
|
|
||||||
expectEndpoint(t, loadBalancer, barService, shuffledBarEndpoints[0], client1)
|
|
||||||
expectEndpoint(t, loadBalancer, barService, shuffledBarEndpoints[0], client1)
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestStickyLoadBalanceWorksWithEndpointFails(t *testing.T) {
|
|
||||||
loadBalancer := NewLoadBalancerRR()
|
|
||||||
service := proxy.ServicePortName{NamespacedName: types.NamespacedName{Namespace: "testnamespace", Name: "foo"}, Port: ""}
|
|
||||||
endpoint, err := loadBalancer.NextEndpoint(service, nil, false)
|
|
||||||
if err == nil || len(endpoint) != 0 {
|
|
||||||
t.Errorf("Didn't fail with non-existent service")
|
|
||||||
}
|
|
||||||
|
|
||||||
// Call NewService() before OnEndpointsUpdate()
|
|
||||||
loadBalancer.NewService(service, v1.ServiceAffinityClientIP, int(v1.DefaultClientIPServiceAffinitySeconds))
|
|
||||||
endpoints := &v1.Endpoints{
|
|
||||||
ObjectMeta: metav1.ObjectMeta{Name: service.Name, Namespace: service.Namespace},
|
|
||||||
Subsets: []v1.EndpointSubset{
|
|
||||||
{Addresses: []v1.EndpointAddress{{IP: "endpoint1"}}, Ports: []v1.EndpointPort{{Port: 1}}},
|
|
||||||
{Addresses: []v1.EndpointAddress{{IP: "endpoint2"}}, Ports: []v1.EndpointPort{{Port: 2}}},
|
|
||||||
{Addresses: []v1.EndpointAddress{{IP: "endpoint3"}}, Ports: []v1.EndpointPort{{Port: 3}}},
|
|
||||||
},
|
|
||||||
}
|
|
||||||
loadBalancer.OnEndpointsAdd(endpoints)
|
|
||||||
|
|
||||||
client1 := &net.TCPAddr{IP: net.IPv4(127, 0, 0, 1), Port: 0}
|
|
||||||
client2 := &net.TCPAddr{IP: net.IPv4(127, 0, 0, 2), Port: 0}
|
|
||||||
client3 := &net.TCPAddr{IP: net.IPv4(127, 0, 0, 3), Port: 0}
|
|
||||||
|
|
||||||
ep1, err := loadBalancer.NextEndpoint(service, client1, false)
|
|
||||||
if err != nil {
|
|
||||||
t.Errorf("Didn't find a service for %s: %v", service, err)
|
|
||||||
}
|
|
||||||
|
|
||||||
ep2, err := loadBalancer.NextEndpoint(service, client2, false)
|
|
||||||
if err != nil {
|
|
||||||
t.Errorf("Didn't find a service for %s: %v", service, err)
|
|
||||||
}
|
|
||||||
|
|
||||||
ep3, err := loadBalancer.NextEndpoint(service, client3, false)
|
|
||||||
if err != nil {
|
|
||||||
t.Errorf("Didn't find a service for %s: %v", service, err)
|
|
||||||
}
|
|
||||||
|
|
||||||
expectEndpointWithSessionAffinityReset(t, loadBalancer, service, ep1, client1)
|
|
||||||
expectEndpointWithSessionAffinityReset(t, loadBalancer, service, ep2, client1)
|
|
||||||
expectEndpointWithSessionAffinityReset(t, loadBalancer, service, ep3, client1)
|
|
||||||
|
|
||||||
expectEndpoint(t, loadBalancer, service, ep2, client2)
|
|
||||||
expectEndpointWithSessionAffinityReset(t, loadBalancer, service, ep1, client2)
|
|
||||||
expectEndpointWithSessionAffinityReset(t, loadBalancer, service, ep2, client3)
|
|
||||||
expectEndpointWithSessionAffinityReset(t, loadBalancer, service, ep3, client1)
|
|
||||||
expectEndpointWithSessionAffinityReset(t, loadBalancer, service, ep1, client2)
|
|
||||||
expectEndpointWithSessionAffinityReset(t, loadBalancer, service, ep2, client3)
|
|
||||||
}
|
|
@ -1,35 +0,0 @@
|
|||||||
/*
|
|
||||||
Copyright 2017 The Kubernetes Authors.
|
|
||||||
|
|
||||||
Licensed under the Apache License, Version 2.0 (the "License");
|
|
||||||
you may not use this file except in compliance with the License.
|
|
||||||
You may obtain a copy of the License at
|
|
||||||
|
|
||||||
http://www.apache.org/licenses/LICENSE-2.0
|
|
||||||
|
|
||||||
Unless required by applicable law or agreed to in writing, software
|
|
||||||
distributed under the License is distributed on an "AS IS" BASIS,
|
|
||||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
||||||
See the License for the specific language governing permissions and
|
|
||||||
limitations under the License.
|
|
||||||
*/
|
|
||||||
|
|
||||||
package winuserspace
|
|
||||||
|
|
||||||
import (
|
|
||||||
"fmt"
|
|
||||||
|
|
||||||
"k8s.io/apimachinery/pkg/types"
|
|
||||||
)
|
|
||||||
|
|
||||||
// ServicePortPortalName carries a namespace + name + portname + portalip. This is the unique
|
|
||||||
// identifier for a windows service port portal.
|
|
||||||
type ServicePortPortalName struct {
|
|
||||||
types.NamespacedName
|
|
||||||
Port string
|
|
||||||
PortalIPName string
|
|
||||||
}
|
|
||||||
|
|
||||||
func (spn ServicePortPortalName) String() string {
|
|
||||||
return fmt.Sprintf("%s:%s:%s", spn.NamespacedName.String(), spn.Port, spn.PortalIPName)
|
|
||||||
}
|
|
@ -1,8 +0,0 @@
|
|||||||
# See the OWNERS docs at https://go.k8s.io/owners
|
|
||||||
|
|
||||||
reviewers:
|
|
||||||
- sig-network-reviewers
|
|
||||||
approvers:
|
|
||||||
- sig-network-approvers
|
|
||||||
labels:
|
|
||||||
- sig/network
|
|
@ -1,18 +0,0 @@
|
|||||||
/*
|
|
||||||
Copyright 2016 The Kubernetes Authors.
|
|
||||||
|
|
||||||
Licensed under the Apache License, Version 2.0 (the "License");
|
|
||||||
you may not use this file except in compliance with the License.
|
|
||||||
You may obtain a copy of the License at
|
|
||||||
|
|
||||||
http://www.apache.org/licenses/LICENSE-2.0
|
|
||||||
|
|
||||||
Unless required by applicable law or agreed to in writing, software
|
|
||||||
distributed under the License is distributed on an "AS IS" BASIS,
|
|
||||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
||||||
See the License for the specific language governing permissions and
|
|
||||||
limitations under the License.
|
|
||||||
*/
|
|
||||||
|
|
||||||
// Package netsh provides an interface and implementations for running Windows netsh commands.
|
|
||||||
package netsh // import "k8s.io/kubernetes/pkg/util/netsh"
|
|
@ -1,209 +0,0 @@
|
|||||||
/*
|
|
||||||
Copyright 2016 The Kubernetes Authors.
|
|
||||||
|
|
||||||
Licensed under the Apache License, Version 2.0 (the "License");
|
|
||||||
you may not use this file except in compliance with the License.
|
|
||||||
You may obtain a copy of the License at
|
|
||||||
|
|
||||||
http://www.apache.org/licenses/LICENSE-2.0
|
|
||||||
|
|
||||||
Unless required by applicable law or agreed to in writing, software
|
|
||||||
distributed under the License is distributed on an "AS IS" BASIS,
|
|
||||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
||||||
See the License for the specific language governing permissions and
|
|
||||||
limitations under the License.
|
|
||||||
*/
|
|
||||||
|
|
||||||
package netsh
|
|
||||||
|
|
||||||
import (
|
|
||||||
"fmt"
|
|
||||||
"net"
|
|
||||||
"os"
|
|
||||||
"strings"
|
|
||||||
"time"
|
|
||||||
|
|
||||||
"k8s.io/klog/v2"
|
|
||||||
utilexec "k8s.io/utils/exec"
|
|
||||||
)
|
|
||||||
|
|
||||||
// Interface is an injectable interface for running netsh commands. Implementations must be goroutine-safe.
|
|
||||||
type Interface interface {
|
|
||||||
// EnsurePortProxyRule checks if the specified redirect exists, if not creates it
|
|
||||||
EnsurePortProxyRule(args []string) (bool, error)
|
|
||||||
// DeletePortProxyRule deletes the specified portproxy rule. If the rule did not exist, return error.
|
|
||||||
DeletePortProxyRule(args []string) error
|
|
||||||
// EnsureIPAddress checks if the specified IP Address is added to vEthernet (HNSTransparent) interface, if not, add it. If the address existed, return true.
|
|
||||||
EnsureIPAddress(args []string, ip net.IP) (bool, error)
|
|
||||||
// DeleteIPAddress checks if the specified IP address is present and, if so, deletes it.
|
|
||||||
DeleteIPAddress(args []string) error
|
|
||||||
// Restore runs `netsh exec` to restore portproxy or addresses using a file.
|
|
||||||
// TODO Check if this is required, most likely not
|
|
||||||
Restore(args []string) error
|
|
||||||
|
|
||||||
// GetInterfaceToAddIP returns the interface name where Service IP needs to be added
|
|
||||||
// IP Address needs to be added for netsh portproxy to redirect traffic
|
|
||||||
// Reads Environment variable INTERFACE_TO_ADD_SERVICE_IP, if it is not defined then "vEthernet (HNSTransparent)" is returned
|
|
||||||
GetInterfaceToAddIP() string
|
|
||||||
}
|
|
||||||
|
|
||||||
const (
|
|
||||||
cmdNetsh string = "netsh"
|
|
||||||
)
|
|
||||||
|
|
||||||
// runner implements Interface in terms of exec("netsh").
|
|
||||||
type runner struct {
|
|
||||||
exec utilexec.Interface
|
|
||||||
}
|
|
||||||
|
|
||||||
// New returns a new Interface which will exec netsh.
|
|
||||||
func New(exec utilexec.Interface) Interface {
|
|
||||||
runner := &runner{
|
|
||||||
exec: exec,
|
|
||||||
}
|
|
||||||
return runner
|
|
||||||
}
|
|
||||||
|
|
||||||
// EnsurePortProxyRule checks if the specified redirect exists, if not creates it.
|
|
||||||
func (runner *runner) EnsurePortProxyRule(args []string) (bool, error) {
|
|
||||||
klog.V(4).InfoS("Running netsh interface portproxy add v4tov4", "arguments", args)
|
|
||||||
out, err := runner.exec.Command(cmdNetsh, args...).CombinedOutput()
|
|
||||||
|
|
||||||
if err == nil {
|
|
||||||
return true, nil
|
|
||||||
}
|
|
||||||
if ee, ok := err.(utilexec.ExitError); ok {
|
|
||||||
// netsh uses exit(0) to indicate a success of the operation,
|
|
||||||
// as compared to a malformed commandline, for example.
|
|
||||||
if ee.Exited() && ee.ExitStatus() != 0 {
|
|
||||||
return false, nil
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return false, fmt.Errorf("error checking portproxy rule: %v: %s", err, out)
|
|
||||||
|
|
||||||
}
|
|
||||||
|
|
||||||
// DeletePortProxyRule deletes the specified portproxy rule. If the rule did not exist, return error.
|
|
||||||
func (runner *runner) DeletePortProxyRule(args []string) error {
|
|
||||||
klog.V(4).InfoS("Running netsh interface portproxy delete v4tov4", "arguments", args)
|
|
||||||
out, err := runner.exec.Command(cmdNetsh, args...).CombinedOutput()
|
|
||||||
|
|
||||||
if err == nil {
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
if ee, ok := err.(utilexec.ExitError); ok {
|
|
||||||
// netsh uses exit(0) to indicate a success of the operation,
|
|
||||||
// as compared to a malformed commandline, for example.
|
|
||||||
if ee.Exited() && ee.ExitStatus() == 0 {
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return fmt.Errorf("error deleting portproxy rule: %v: %s", err, out)
|
|
||||||
}
|
|
||||||
|
|
||||||
// EnsureIPAddress checks if the specified IP Address is added to interface identified by Environment variable INTERFACE_TO_ADD_SERVICE_IP, if not, add it. If the address existed, return true.
|
|
||||||
func (runner *runner) EnsureIPAddress(args []string, ip net.IP) (bool, error) {
|
|
||||||
// Check if the ip address exists
|
|
||||||
intName := runner.GetInterfaceToAddIP()
|
|
||||||
argsShowAddress := []string{
|
|
||||||
"interface", "ipv4", "show", "address",
|
|
||||||
"name=" + intName,
|
|
||||||
}
|
|
||||||
|
|
||||||
ipToCheck := ip.String()
|
|
||||||
|
|
||||||
exists, _ := checkIPExists(ipToCheck, argsShowAddress, runner)
|
|
||||||
if exists {
|
|
||||||
klog.V(4).InfoS("Not adding IP address, as it already exists", "IP", ipToCheck)
|
|
||||||
return true, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// IP Address is not already added, add it now
|
|
||||||
klog.V(4).InfoS("Running netsh interface IPv4 add address", "IP", args)
|
|
||||||
out, err := runner.exec.Command(cmdNetsh, args...).CombinedOutput()
|
|
||||||
|
|
||||||
if err == nil {
|
|
||||||
// Once the IP Address is added, it takes a bit to initialize and show up when querying for it
|
|
||||||
// Query all the IP addresses and see if the one we added is present
|
|
||||||
// PS: We are using netsh interface IPv4 show address here to query all the IP addresses, instead of
|
|
||||||
// querying net.InterfaceAddrs() as it returns the IP address as soon as it is added even though it is uninitialized
|
|
||||||
klog.V(3).InfoS("Waiting until IP is added to the network adapter", "IP", ipToCheck)
|
|
||||||
for {
|
|
||||||
if exists, _ := checkIPExists(ipToCheck, argsShowAddress, runner); exists {
|
|
||||||
return true, nil
|
|
||||||
}
|
|
||||||
time.Sleep(500 * time.Millisecond)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
if ee, ok := err.(utilexec.ExitError); ok {
|
|
||||||
// netsh uses exit(0) to indicate a success of the operation,
|
|
||||||
// as compared to a malformed commandline, for example.
|
|
||||||
if ee.Exited() && ee.ExitStatus() != 0 {
|
|
||||||
return false, nil
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return false, fmt.Errorf("error adding IPv4 address: %v: %s", err, out)
|
|
||||||
}
|
|
||||||
|
|
||||||
// DeleteIPAddress checks if the specified IP address is present and, if so, deletes it.
|
|
||||||
func (runner *runner) DeleteIPAddress(args []string) error {
|
|
||||||
klog.V(4).InfoS("Running netsh interface IPv4 delete address", "IP", args)
|
|
||||||
out, err := runner.exec.Command(cmdNetsh, args...).CombinedOutput()
|
|
||||||
|
|
||||||
if err == nil {
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
if ee, ok := err.(utilexec.ExitError); ok {
|
|
||||||
// netsh uses exit(0) to indicate a success of the operation,
|
|
||||||
// as compared to a malformed commandline, for example.
|
|
||||||
if ee.Exited() && ee.ExitStatus() == 0 {
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return fmt.Errorf("error deleting IPv4 address: %v: %s", err, out)
|
|
||||||
}
|
|
||||||
|
|
||||||
// GetInterfaceToAddIP returns the interface name where Service IP needs to be added
|
|
||||||
// IP Address needs to be added for netsh portproxy to redirect traffic
|
|
||||||
// Reads Environment variable INTERFACE_TO_ADD_SERVICE_IP, if it is not defined then "vEthernet (HNS Internal NIC)" is returned
|
|
||||||
func (runner *runner) GetInterfaceToAddIP() string {
|
|
||||||
if iface := os.Getenv("INTERFACE_TO_ADD_SERVICE_IP"); len(iface) > 0 {
|
|
||||||
return iface
|
|
||||||
}
|
|
||||||
return "vEthernet (HNS Internal NIC)"
|
|
||||||
}
|
|
||||||
|
|
||||||
// Restore is part of Interface.
|
|
||||||
func (runner *runner) Restore(args []string) error {
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// checkIPExists checks if an IP address exists in 'netsh interface IPv4 show address' output
|
|
||||||
func checkIPExists(ipToCheck string, args []string, runner *runner) (bool, error) {
|
|
||||||
ipAddress, err := runner.exec.Command(cmdNetsh, args...).CombinedOutput()
|
|
||||||
if err != nil {
|
|
||||||
return false, err
|
|
||||||
}
|
|
||||||
ipAddressString := string(ipAddress[:])
|
|
||||||
klog.V(3).InfoS("Searching for IP in IP dump", "IP", ipToCheck, "IPDump", ipAddressString)
|
|
||||||
showAddressArray := strings.Split(ipAddressString, "\n")
|
|
||||||
for _, showAddress := range showAddressArray {
|
|
||||||
if strings.Contains(showAddress, "IP") {
|
|
||||||
ipFromNetsh := getIP(showAddress)
|
|
||||||
if ipFromNetsh == ipToCheck {
|
|
||||||
return true, nil
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
return false, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// getIP gets ip from showAddress (e.g. "IP Address: 10.96.0.4").
|
|
||||||
func getIP(showAddress string) string {
|
|
||||||
list := strings.SplitN(showAddress, ":", 2)
|
|
||||||
if len(list) != 2 {
|
|
||||||
return ""
|
|
||||||
}
|
|
||||||
return strings.TrimSpace(list[1])
|
|
||||||
}
|
|
@ -1,483 +0,0 @@
|
|||||||
/*
|
|
||||||
Copyright 2017 The Kubernetes Authors.
|
|
||||||
|
|
||||||
Licensed under the Apache License, Version 2.0 (the "License");
|
|
||||||
you may not use this file except in compliance with the License.
|
|
||||||
You may obtain a copy of the License at
|
|
||||||
|
|
||||||
http://www.apache.org/licenses/LICENSE-2.0
|
|
||||||
|
|
||||||
Unless required by applicable law or agreed to in writing, software
|
|
||||||
distributed under the License is distributed on an "AS IS" BASIS,
|
|
||||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
||||||
See the License for the specific language governing permissions and
|
|
||||||
limitations under the License.
|
|
||||||
*/
|
|
||||||
|
|
||||||
package netsh
|
|
||||||
|
|
||||||
import (
|
|
||||||
"net"
|
|
||||||
"os"
|
|
||||||
"testing"
|
|
||||||
|
|
||||||
"k8s.io/utils/exec"
|
|
||||||
fakeexec "k8s.io/utils/exec/testing"
|
|
||||||
|
|
||||||
"errors"
|
|
||||||
"github.com/stretchr/testify/assert"
|
|
||||||
)
|
|
||||||
|
|
||||||
func fakeCommonRunner() *runner {
|
|
||||||
fakeCmd := fakeexec.FakeCmd{
|
|
||||||
CombinedOutputScript: []fakeexec.FakeAction{
|
|
||||||
// Success
|
|
||||||
func() ([]byte, []byte, error) {
|
|
||||||
return []byte{}, nil, nil
|
|
||||||
},
|
|
||||||
// utilexec.ExitError exists, and status is not 0
|
|
||||||
func() ([]byte, []byte, error) {
|
|
||||||
return nil, nil, &fakeexec.FakeExitError{Status: 1}
|
|
||||||
},
|
|
||||||
// utilexec.ExitError exists, and status is 0
|
|
||||||
func() ([]byte, []byte, error) {
|
|
||||||
return nil, nil, &fakeexec.FakeExitError{Status: 0}
|
|
||||||
},
|
|
||||||
// other error exists
|
|
||||||
func() ([]byte, []byte, error) {
|
|
||||||
return nil, nil, errors.New("not ExitError")
|
|
||||||
},
|
|
||||||
},
|
|
||||||
}
|
|
||||||
|
|
||||||
return &runner{
|
|
||||||
exec: &fakeexec.FakeExec{
|
|
||||||
CommandScript: []fakeexec.FakeCommandAction{
|
|
||||||
func(cmd string, args ...string) exec.Cmd {
|
|
||||||
return fakeexec.InitFakeCmd(&fakeCmd, cmd, args...)
|
|
||||||
},
|
|
||||||
func(cmd string, args ...string) exec.Cmd {
|
|
||||||
return fakeexec.InitFakeCmd(&fakeCmd, cmd, args...)
|
|
||||||
},
|
|
||||||
func(cmd string, args ...string) exec.Cmd {
|
|
||||||
return fakeexec.InitFakeCmd(&fakeCmd, cmd, args...)
|
|
||||||
},
|
|
||||||
func(cmd string, args ...string) exec.Cmd {
|
|
||||||
return fakeexec.InitFakeCmd(&fakeCmd, cmd, args...)
|
|
||||||
},
|
|
||||||
},
|
|
||||||
},
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestEnsurePortProxyRule(t *testing.T) {
|
|
||||||
runner := fakeCommonRunner()
|
|
||||||
|
|
||||||
tests := []struct {
|
|
||||||
name string
|
|
||||||
arguments []string
|
|
||||||
expectedResult bool
|
|
||||||
expectedError bool
|
|
||||||
}{
|
|
||||||
{"Success", []string{"ensure-port-proxy-rule"}, true, false},
|
|
||||||
{"utilexec.ExitError exists, and status is not 0", []string{"ensure-port-proxy-rule"}, false, false},
|
|
||||||
{"utilexec.ExitError exists, and status is 0", []string{"ensure-port-proxy-rule"}, false, true},
|
|
||||||
{"other error exists", []string{"ensure-port-proxy-rule"}, false, true},
|
|
||||||
}
|
|
||||||
|
|
||||||
for _, test := range tests {
|
|
||||||
result, err := runner.EnsurePortProxyRule(test.arguments)
|
|
||||||
if test.expectedError {
|
|
||||||
assert.Errorf(t, err, "Failed to test: %s", test.name)
|
|
||||||
} else {
|
|
||||||
if err != nil {
|
|
||||||
assert.NoErrorf(t, err, "Failed to test: %s", test.name)
|
|
||||||
} else {
|
|
||||||
assert.EqualValuesf(t, test.expectedResult, result, "Failed to test: %s", test.name)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestDeletePortProxyRule(t *testing.T) {
|
|
||||||
runner := fakeCommonRunner()
|
|
||||||
|
|
||||||
tests := []struct {
|
|
||||||
name string
|
|
||||||
arguments []string
|
|
||||||
expectedError bool
|
|
||||||
}{
|
|
||||||
{"Success", []string{"delete-port-proxy-rule"}, false},
|
|
||||||
{"utilexec.ExitError exists, and status is not 0", []string{"delete-port-proxy-rule"}, true},
|
|
||||||
{"utilexec.ExitError exists, and status is 0", []string{"delete-port-proxy-rule"}, false},
|
|
||||||
{"other error exists", []string{"delete-port-proxy-rule"}, true},
|
|
||||||
}
|
|
||||||
|
|
||||||
for _, test := range tests {
|
|
||||||
err := runner.DeletePortProxyRule(test.arguments)
|
|
||||||
if test.expectedError {
|
|
||||||
assert.Errorf(t, err, "Failed to test: %s", test.name)
|
|
||||||
} else {
|
|
||||||
assert.NoErrorf(t, err, "Failed to test: %s", test.name)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestEnsureIPAddress(t *testing.T) {
|
|
||||||
tests := []struct {
|
|
||||||
name string
|
|
||||||
arguments []string
|
|
||||||
ip net.IP
|
|
||||||
fakeCmdAction []fakeexec.FakeCommandAction
|
|
||||||
expectedError bool
|
|
||||||
expectedResult bool
|
|
||||||
}{
|
|
||||||
{
|
|
||||||
"IP address exists",
|
|
||||||
[]string{"delete-port-proxy-rule"},
|
|
||||||
net.IPv4(10, 10, 10, 20),
|
|
||||||
[]fakeexec.FakeCommandAction{
|
|
||||||
func(cmd string, args ...string) exec.Cmd {
|
|
||||||
return fakeexec.InitFakeCmd(&fakeexec.FakeCmd{
|
|
||||||
CombinedOutputScript: []fakeexec.FakeAction{
|
|
||||||
// IP address exists
|
|
||||||
func() ([]byte, []byte, error) {
|
|
||||||
return []byte("IP Address:10.10.10.10\nIP Address:10.10.10.20"), nil, nil
|
|
||||||
},
|
|
||||||
},
|
|
||||||
}, cmd, args...)
|
|
||||||
},
|
|
||||||
},
|
|
||||||
false,
|
|
||||||
true,
|
|
||||||
},
|
|
||||||
|
|
||||||
{
|
|
||||||
"IP address not exists, but set successful(find it in the second time)",
|
|
||||||
[]string{"ensure-ip-address"},
|
|
||||||
net.IPv4(10, 10, 10, 20),
|
|
||||||
[]fakeexec.FakeCommandAction{
|
|
||||||
func(cmd string, args ...string) exec.Cmd {
|
|
||||||
return fakeexec.InitFakeCmd(&fakeexec.FakeCmd{
|
|
||||||
CombinedOutputScript: []fakeexec.FakeAction{
|
|
||||||
// IP address not exists
|
|
||||||
func() ([]byte, []byte, error) {
|
|
||||||
return []byte("IP Address:10.10.10.10"), nil, nil
|
|
||||||
},
|
|
||||||
},
|
|
||||||
}, cmd, args...)
|
|
||||||
},
|
|
||||||
func(cmd string, args ...string) exec.Cmd {
|
|
||||||
return fakeexec.InitFakeCmd(&fakeexec.FakeCmd{
|
|
||||||
CombinedOutputScript: []fakeexec.FakeAction{
|
|
||||||
// Success to set ip
|
|
||||||
func() ([]byte, []byte, error) {
|
|
||||||
return []byte(""), nil, nil
|
|
||||||
},
|
|
||||||
},
|
|
||||||
}, cmd, args...)
|
|
||||||
},
|
|
||||||
func(cmd string, args ...string) exec.Cmd {
|
|
||||||
return fakeexec.InitFakeCmd(&fakeexec.FakeCmd{
|
|
||||||
CombinedOutputScript: []fakeexec.FakeAction{
|
|
||||||
// IP address still not exists
|
|
||||||
func() ([]byte, []byte, error) {
|
|
||||||
return []byte("IP Address:10.10.10.10"), nil, nil
|
|
||||||
},
|
|
||||||
},
|
|
||||||
}, cmd, args...)
|
|
||||||
},
|
|
||||||
func(cmd string, args ...string) exec.Cmd {
|
|
||||||
return fakeexec.InitFakeCmd(&fakeexec.FakeCmd{
|
|
||||||
CombinedOutputScript: []fakeexec.FakeAction{
|
|
||||||
// IP address exists
|
|
||||||
func() ([]byte, []byte, error) {
|
|
||||||
return []byte("IP Address:10.10.10.10\nIP Address:10.10.10.20"), nil, nil
|
|
||||||
},
|
|
||||||
},
|
|
||||||
}, cmd, args...)
|
|
||||||
},
|
|
||||||
},
|
|
||||||
false,
|
|
||||||
true,
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"IP address not exists, utilexec.ExitError exists, but status is not 0)",
|
|
||||||
[]string{"ensure-ip-address"},
|
|
||||||
net.IPv4(10, 10, 10, 20),
|
|
||||||
[]fakeexec.FakeCommandAction{
|
|
||||||
func(cmd string, args ...string) exec.Cmd {
|
|
||||||
return fakeexec.InitFakeCmd(&fakeexec.FakeCmd{
|
|
||||||
CombinedOutputScript: []fakeexec.FakeAction{
|
|
||||||
// IP address not exists
|
|
||||||
func() ([]byte, []byte, error) {
|
|
||||||
return []byte("IP Address:10.10.10.10"), nil, nil
|
|
||||||
},
|
|
||||||
},
|
|
||||||
}, cmd, args...)
|
|
||||||
},
|
|
||||||
func(cmd string, args ...string) exec.Cmd {
|
|
||||||
return fakeexec.InitFakeCmd(&fakeexec.FakeCmd{
|
|
||||||
CombinedOutputScript: []fakeexec.FakeAction{
|
|
||||||
// Failed to set ip, utilexec.ExitError exists, and status is not 0
|
|
||||||
func() ([]byte, []byte, error) {
|
|
||||||
return nil, nil, &fakeexec.FakeExitError{Status: 1}
|
|
||||||
},
|
|
||||||
},
|
|
||||||
}, cmd, args...)
|
|
||||||
},
|
|
||||||
},
|
|
||||||
false,
|
|
||||||
false,
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"IP address not exists, utilexec.ExitError exists, and status is 0)",
|
|
||||||
[]string{"ensure-ip-address"},
|
|
||||||
net.IPv4(10, 10, 10, 20),
|
|
||||||
[]fakeexec.FakeCommandAction{
|
|
||||||
func(cmd string, args ...string) exec.Cmd {
|
|
||||||
return fakeexec.InitFakeCmd(&fakeexec.FakeCmd{
|
|
||||||
CombinedOutputScript: []fakeexec.FakeAction{
|
|
||||||
// IP address not exists
|
|
||||||
func() ([]byte, []byte, error) {
|
|
||||||
return []byte("IP Address:10.10.10.10"), nil, nil
|
|
||||||
},
|
|
||||||
},
|
|
||||||
}, cmd, args...)
|
|
||||||
},
|
|
||||||
func(cmd string, args ...string) exec.Cmd {
|
|
||||||
return fakeexec.InitFakeCmd(&fakeexec.FakeCmd{
|
|
||||||
CombinedOutputScript: []fakeexec.FakeAction{
|
|
||||||
// Failed to set ip, utilexec.ExitError exists, and status is 0
|
|
||||||
func() ([]byte, []byte, error) {
|
|
||||||
return nil, nil, &fakeexec.FakeExitError{Status: 0}
|
|
||||||
},
|
|
||||||
},
|
|
||||||
}, cmd, args...)
|
|
||||||
},
|
|
||||||
},
|
|
||||||
true,
|
|
||||||
false,
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"IP address not exists, and error is not utilexec.ExitError)",
|
|
||||||
[]string{"ensure-ip-address"},
|
|
||||||
net.IPv4(10, 10, 10, 20),
|
|
||||||
[]fakeexec.FakeCommandAction{
|
|
||||||
func(cmd string, args ...string) exec.Cmd {
|
|
||||||
return fakeexec.InitFakeCmd(&fakeexec.FakeCmd{
|
|
||||||
CombinedOutputScript: []fakeexec.FakeAction{
|
|
||||||
// IP address not exists
|
|
||||||
func() ([]byte, []byte, error) {
|
|
||||||
return []byte("IP Address:10.10.10.10"), nil, nil
|
|
||||||
},
|
|
||||||
},
|
|
||||||
}, cmd, args...)
|
|
||||||
},
|
|
||||||
func(cmd string, args ...string) exec.Cmd {
|
|
||||||
return fakeexec.InitFakeCmd(&fakeexec.FakeCmd{
|
|
||||||
CombinedOutputScript: []fakeexec.FakeAction{
|
|
||||||
// Failed to set ip, other error exists
|
|
||||||
func() ([]byte, []byte, error) {
|
|
||||||
return nil, nil, errors.New("not ExitError")
|
|
||||||
},
|
|
||||||
},
|
|
||||||
}, cmd, args...)
|
|
||||||
},
|
|
||||||
},
|
|
||||||
true,
|
|
||||||
false,
|
|
||||||
},
|
|
||||||
}
|
|
||||||
|
|
||||||
for _, test := range tests {
|
|
||||||
runner := New(&fakeexec.FakeExec{CommandScript: test.fakeCmdAction})
|
|
||||||
result, err := runner.EnsureIPAddress(test.arguments, test.ip)
|
|
||||||
if test.expectedError {
|
|
||||||
assert.Errorf(t, err, "Failed to test: %s", test.name)
|
|
||||||
} else {
|
|
||||||
if err != nil {
|
|
||||||
assert.NoErrorf(t, err, "Failed to test: %s", test.name)
|
|
||||||
} else {
|
|
||||||
assert.EqualValuesf(t, test.expectedResult, result, "Failed to test: %s", test.name)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestDeleteIPAddress(t *testing.T) {
|
|
||||||
runner := fakeCommonRunner()
|
|
||||||
|
|
||||||
tests := []struct {
|
|
||||||
name string
|
|
||||||
arguments []string
|
|
||||||
expectedError bool
|
|
||||||
}{
|
|
||||||
{"Success", []string{"delete-ip-address"}, false},
|
|
||||||
{"utilexec.ExitError exists, and status is not 0", []string{"delete-ip-address"}, true},
|
|
||||||
{"utilexec.ExitError exists, and status is 0", []string{"delete-ip-address"}, false},
|
|
||||||
{"other error exists", []string{"delete-ip-address"}, true},
|
|
||||||
}
|
|
||||||
|
|
||||||
for _, test := range tests {
|
|
||||||
err := runner.DeleteIPAddress(test.arguments)
|
|
||||||
if test.expectedError {
|
|
||||||
assert.Errorf(t, err, "Failed to test: %s", test.name)
|
|
||||||
} else {
|
|
||||||
assert.NoErrorf(t, err, "Failed to test: %s", test.name)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestGetInterfaceToAddIP(t *testing.T) {
|
|
||||||
// backup env 'INTERFACE_TO_ADD_SERVICE_IP'
|
|
||||||
backupValue := os.Getenv("INTERFACE_TO_ADD_SERVICE_IP")
|
|
||||||
// recover env
|
|
||||||
defer os.Setenv("INTERFACE_TO_ADD_SERVICE_IP", backupValue)
|
|
||||||
|
|
||||||
tests := []struct {
|
|
||||||
name string
|
|
||||||
envToBeSet string
|
|
||||||
expectedResult string
|
|
||||||
}{
|
|
||||||
{"env_value_is_empty", "", "vEthernet (HNS Internal NIC)"},
|
|
||||||
{"env_value_is_not_empty", "eth0", "eth0"},
|
|
||||||
}
|
|
||||||
|
|
||||||
fakeExec := fakeexec.FakeExec{
|
|
||||||
CommandScript: []fakeexec.FakeCommandAction{},
|
|
||||||
}
|
|
||||||
netsh := New(&fakeExec)
|
|
||||||
|
|
||||||
for _, test := range tests {
|
|
||||||
os.Setenv("INTERFACE_TO_ADD_SERVICE_IP", test.envToBeSet)
|
|
||||||
result := netsh.GetInterfaceToAddIP()
|
|
||||||
|
|
||||||
assert.EqualValuesf(t, test.expectedResult, result, "Failed to test: %s", test.name)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestRestore(t *testing.T) {
|
|
||||||
runner := New(&fakeexec.FakeExec{
|
|
||||||
CommandScript: []fakeexec.FakeCommandAction{},
|
|
||||||
})
|
|
||||||
|
|
||||||
result := runner.Restore([]string{})
|
|
||||||
assert.NoErrorf(t, result, "The return value must be nil")
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestCheckIPExists(t *testing.T) {
|
|
||||||
fakeCmd := fakeexec.FakeCmd{
|
|
||||||
CombinedOutputScript: []fakeexec.FakeAction{
|
|
||||||
// Error exists
|
|
||||||
func() ([]byte, []byte, error) {
|
|
||||||
return nil, nil, &fakeexec.FakeExitError{Status: 1}
|
|
||||||
},
|
|
||||||
// IP address string is empty
|
|
||||||
func() ([]byte, []byte, error) {
|
|
||||||
return []byte(""), nil, nil
|
|
||||||
},
|
|
||||||
// "IP Address:" field not exists
|
|
||||||
func() ([]byte, []byte, error) {
|
|
||||||
return []byte("10.10.10.10"), nil, nil
|
|
||||||
},
|
|
||||||
// IP not exists
|
|
||||||
func() ([]byte, []byte, error) {
|
|
||||||
return []byte("IP Address:10.10.10.10"), nil, nil
|
|
||||||
},
|
|
||||||
// IP exists
|
|
||||||
func() ([]byte, []byte, error) {
|
|
||||||
return []byte("IP Address:10.10.10.10\nIP Address:10.10.10.20"), nil, nil
|
|
||||||
},
|
|
||||||
},
|
|
||||||
}
|
|
||||||
fakeExec := fakeexec.FakeExec{
|
|
||||||
CommandScript: []fakeexec.FakeCommandAction{
|
|
||||||
func(cmd string, args ...string) exec.Cmd {
|
|
||||||
return fakeexec.InitFakeCmd(&fakeCmd, cmd, args...)
|
|
||||||
},
|
|
||||||
func(cmd string, args ...string) exec.Cmd {
|
|
||||||
return fakeexec.InitFakeCmd(&fakeCmd, cmd, args...)
|
|
||||||
},
|
|
||||||
func(cmd string, args ...string) exec.Cmd {
|
|
||||||
return fakeexec.InitFakeCmd(&fakeCmd, cmd, args...)
|
|
||||||
},
|
|
||||||
func(cmd string, args ...string) exec.Cmd {
|
|
||||||
return fakeexec.InitFakeCmd(&fakeCmd, cmd, args...)
|
|
||||||
},
|
|
||||||
func(cmd string, args ...string) exec.Cmd {
|
|
||||||
return fakeexec.InitFakeCmd(&fakeCmd, cmd, args...)
|
|
||||||
},
|
|
||||||
},
|
|
||||||
}
|
|
||||||
fakeRunner := &runner{
|
|
||||||
exec: &fakeExec,
|
|
||||||
}
|
|
||||||
|
|
||||||
tests := []struct {
|
|
||||||
name string
|
|
||||||
ipToCheck string
|
|
||||||
arguments []string
|
|
||||||
expectedError bool
|
|
||||||
expectedResult bool
|
|
||||||
}{
|
|
||||||
{"Error exists", "10.10.10.20", []string{"check-IP-exists"}, true, false},
|
|
||||||
{"IP address string is empty", "10.10.10.20", []string{"check-IP-exists"}, false, false},
|
|
||||||
{"'IP Address:' field not exists", "10.10.10.20", []string{"check-IP-exists"}, false, false},
|
|
||||||
{"IP not exists", "10.10.10.20", []string{"check-IP-exists"}, false, false},
|
|
||||||
{"IP exists", "10.10.10.20", []string{"check-IP-exists"}, false, true},
|
|
||||||
}
|
|
||||||
|
|
||||||
for _, test := range tests {
|
|
||||||
result, err := checkIPExists(test.ipToCheck, test.arguments, fakeRunner)
|
|
||||||
if test.expectedError {
|
|
||||||
assert.Errorf(t, err, "Failed to test: %s", test.name)
|
|
||||||
} else {
|
|
||||||
assert.EqualValuesf(t, test.expectedResult, result, "Failed to test: %s", test.name)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestGetIP(t *testing.T) {
|
|
||||||
testcases := []struct {
|
|
||||||
name string
|
|
||||||
showAddress string
|
|
||||||
expectAddress string
|
|
||||||
}{
|
|
||||||
{
|
|
||||||
name: "IP address displayed in Chinese",
|
|
||||||
showAddress: "IP 地址: 10.96.0.2",
|
|
||||||
expectAddress: "10.96.0.2",
|
|
||||||
},
|
|
||||||
{
|
|
||||||
name: "IP address displayed in English",
|
|
||||||
showAddress: "IP Address: 10.96.0.3",
|
|
||||||
expectAddress: "10.96.0.3",
|
|
||||||
},
|
|
||||||
{
|
|
||||||
name: "IP address without spaces",
|
|
||||||
showAddress: "IP Address:10.96.0.4",
|
|
||||||
expectAddress: "10.96.0.4",
|
|
||||||
},
|
|
||||||
{
|
|
||||||
name: "Only 'IP Address:' field exists",
|
|
||||||
showAddress: "IP Address:",
|
|
||||||
expectAddress: "",
|
|
||||||
},
|
|
||||||
{
|
|
||||||
name: "IP address without ':' separator",
|
|
||||||
showAddress: "IP Address10.6.9.2",
|
|
||||||
expectAddress: "",
|
|
||||||
},
|
|
||||||
}
|
|
||||||
|
|
||||||
for _, tc := range testcases {
|
|
||||||
t.Run(tc.name, func(t *testing.T) {
|
|
||||||
address := getIP(tc.showAddress)
|
|
||||||
if address != tc.expectAddress {
|
|
||||||
t.Errorf("expected address=%q, got %q", tc.expectAddress, address)
|
|
||||||
}
|
|
||||||
})
|
|
||||||
}
|
|
||||||
}
|
|
@ -1,71 +0,0 @@
|
|||||||
/*
|
|
||||||
Copyright 2016 The Kubernetes Authors.
|
|
||||||
|
|
||||||
Licensed under the Apache License, Version 2.0 (the "License");
|
|
||||||
you may not use this file except in compliance with the License.
|
|
||||||
You may obtain a copy of the License at
|
|
||||||
|
|
||||||
http://www.apache.org/licenses/LICENSE-2.0
|
|
||||||
|
|
||||||
Unless required by applicable law or agreed to in writing, software
|
|
||||||
distributed under the License is distributed on an "AS IS" BASIS,
|
|
||||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
||||||
See the License for the specific language governing permissions and
|
|
||||||
limitations under the License.
|
|
||||||
*/
|
|
||||||
|
|
||||||
package testing
|
|
||||||
|
|
||||||
import (
|
|
||||||
"net"
|
|
||||||
|
|
||||||
"k8s.io/kubernetes/pkg/util/netsh"
|
|
||||||
)
|
|
||||||
|
|
||||||
// FakeNetsh is a no-op implementation of the netsh Interface
|
|
||||||
type FakeNetsh struct {
|
|
||||||
}
|
|
||||||
|
|
||||||
// NewFake returns a fakenetsh no-op implementation of the netsh Interface
|
|
||||||
func NewFake() *FakeNetsh {
|
|
||||||
return &FakeNetsh{}
|
|
||||||
}
|
|
||||||
|
|
||||||
// EnsurePortProxyRule function implementing the netsh interface and always returns true and nil without any error
|
|
||||||
func (*FakeNetsh) EnsurePortProxyRule(args []string) (bool, error) {
|
|
||||||
// Do Nothing
|
|
||||||
return true, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// DeletePortProxyRule deletes the specified portproxy rule. If the rule did not exist, return error.
|
|
||||||
func (*FakeNetsh) DeletePortProxyRule(args []string) error {
|
|
||||||
// Do Nothing
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// EnsureIPAddress checks if the specified IP Address is added to vEthernet (HNSTransparent) interface, if not, add it. If the address existed, return true.
|
|
||||||
func (*FakeNetsh) EnsureIPAddress(args []string, ip net.IP) (bool, error) {
|
|
||||||
return true, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// DeleteIPAddress checks if the specified IP address is present and, if so, deletes it.
|
|
||||||
func (*FakeNetsh) DeleteIPAddress(args []string) error {
|
|
||||||
// Do Nothing
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// Restore runs `netsh exec` to restore portproxy or addresses using a file.
|
|
||||||
// TODO Check if this is required, most likely not
|
|
||||||
func (*FakeNetsh) Restore(args []string) error {
|
|
||||||
// Do Nothing
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// GetInterfaceToAddIP returns the interface name where Service IP needs to be added
|
|
||||||
// IP Address needs to be added for netsh portproxy to redirect traffic
|
|
||||||
// Reads Environment variable INTERFACE_TO_ADD_SERVICE_IP, if it is not defined then "vEthernet (HNSTransparent)" is returned
|
|
||||||
func (*FakeNetsh) GetInterfaceToAddIP() string {
|
|
||||||
return "Interface 1"
|
|
||||||
}
|
|
||||||
|
|
||||||
var _ = netsh.Interface(&FakeNetsh{})
|
|
@ -160,9 +160,6 @@ type KubeProxyConfiguration struct {
|
|||||||
// portRange is the range of host ports (beginPort-endPort, inclusive) that may be consumed
|
// portRange is the range of host ports (beginPort-endPort, inclusive) that may be consumed
|
||||||
// in order to proxy service traffic. If unspecified (0-0) then ports will be randomly chosen.
|
// in order to proxy service traffic. If unspecified (0-0) then ports will be randomly chosen.
|
||||||
PortRange string `json:"portRange"`
|
PortRange string `json:"portRange"`
|
||||||
// udpIdleTimeout is how long an idle UDP connection will be kept open (e.g. '250ms', '2s').
|
|
||||||
// Must be greater than 0. Only applicable for proxyMode=userspace.
|
|
||||||
UDPIdleTimeout metav1.Duration `json:"udpIdleTimeout"`
|
|
||||||
// conntrack contains conntrack-related configuration options.
|
// conntrack contains conntrack-related configuration options.
|
||||||
Conntrack KubeProxyConntrackConfiguration `json:"conntrack"`
|
Conntrack KubeProxyConntrackConfiguration `json:"conntrack"`
|
||||||
// configSyncPeriod is how often configuration from the apiserver is refreshed. Must be greater
|
// configSyncPeriod is how often configuration from the apiserver is refreshed. Must be greater
|
||||||
@ -188,19 +185,13 @@ type KubeProxyConfiguration struct {
|
|||||||
|
|
||||||
// ProxyMode represents modes used by the Kubernetes proxy server.
|
// ProxyMode represents modes used by the Kubernetes proxy server.
|
||||||
//
|
//
|
||||||
// Currently, three modes of proxy are available in Linux platform: 'userspace' (older, going to be EOL), 'iptables'
|
// Currently, two modes of proxy are available in Linux platform: 'iptables' and 'ipvs'.
|
||||||
// (newer, faster), 'ipvs'(newest, better in performance and scalability).
|
// One mode of proxy is available in Windows platform: 'kernelspace'.
|
||||||
//
|
//
|
||||||
// Two modes of proxy are available in Windows platform: 'userspace'(older, stable) and 'kernelspace' (newer, faster).
|
// If the proxy mode is unspecified, the best-available proxy mode will be used (currently this
|
||||||
//
|
// is `iptables` on Linux and `kernelspace` on Windows). If the selected proxy mode cannot be
|
||||||
// In Linux platform, if proxy mode is blank, use the best-available proxy (currently iptables, but may change in the
|
// used (due to lack of kernel support, missing userspace components, etc) then kube-proxy
|
||||||
// future). If the iptables proxy is selected, regardless of how, but the system's kernel or iptables versions are
|
// will exit with an error.
|
||||||
// insufficient, this always falls back to the userspace proxy. IPVS mode will be enabled when proxy mode is set to 'ipvs',
|
|
||||||
// and the fall back path is firstly iptables and then userspace.
|
|
||||||
//
|
|
||||||
// In Windows platform, if proxy mode is blank, use the best-available proxy (currently userspace, but may change in the
|
|
||||||
// future). If winkernel proxy is selected, regardless of how, but the Windows kernel can't support this mode of proxy,
|
|
||||||
// this always falls back to the userspace proxy.
|
|
||||||
type ProxyMode string
|
type ProxyMode string
|
||||||
|
|
||||||
// LocalMode represents modes to detect local traffic from the node
|
// LocalMode represents modes to detect local traffic from the node
|
||||||
|
@ -61,7 +61,6 @@ func (in *KubeProxyConfiguration) DeepCopyInto(out *KubeProxyConfiguration) {
|
|||||||
*out = new(int32)
|
*out = new(int32)
|
||||||
**out = **in
|
**out = **in
|
||||||
}
|
}
|
||||||
out.UDPIdleTimeout = in.UDPIdleTimeout
|
|
||||||
in.Conntrack.DeepCopyInto(&out.Conntrack)
|
in.Conntrack.DeepCopyInto(&out.Conntrack)
|
||||||
out.ConfigSyncPeriod = in.ConfigSyncPeriod
|
out.ConfigSyncPeriod = in.ConfigSyncPeriod
|
||||||
if in.NodePortAddresses != nil {
|
if in.NodePortAddresses != nil {
|
||||||
|
@ -168,7 +168,6 @@ rules:
|
|||||||
- k8s.io/kubernetes/pkg/proxy/ipvs
|
- k8s.io/kubernetes/pkg/proxy/ipvs
|
||||||
- k8s.io/kubernetes/pkg/proxy/metaproxier
|
- k8s.io/kubernetes/pkg/proxy/metaproxier
|
||||||
- k8s.io/kubernetes/pkg/proxy/metrics
|
- k8s.io/kubernetes/pkg/proxy/metrics
|
||||||
- k8s.io/kubernetes/pkg/proxy/userspace
|
|
||||||
- k8s.io/kubernetes/pkg/proxy/util
|
- k8s.io/kubernetes/pkg/proxy/util
|
||||||
- k8s.io/kubernetes/pkg/registry/core/service/allocator
|
- k8s.io/kubernetes/pkg/registry/core/service/allocator
|
||||||
- k8s.io/kubernetes/pkg/registry/core/service/portallocator
|
- k8s.io/kubernetes/pkg/registry/core/service/portallocator
|
||||||
|
Loading…
Reference in New Issue
Block a user