diff --git a/cmd/kube-proxy/app/server.go b/cmd/kube-proxy/app/server.go index 51bfac2dbb9..37f7ffc194c 100644 --- a/cmd/kube-proxy/app/server.go +++ b/cmd/kube-proxy/app/server.go @@ -160,7 +160,7 @@ func (o *Options) AddFlags(fs *pflag.FlagSet) { fs.Var(&utilflag.IPPortVar{Val: &o.config.MetricsBindAddress}, "metrics-bind-address", "The IP address with port for the metrics server to serve on (set to '0.0.0.0:10249' for all IPv4 interfaces and '[::]:10249' for all IPv6 interfaces). Set empty to disable. This parameter is ignored if a config file is specified by --config.") fs.BoolVar(&o.config.BindAddressHardFail, "bind-address-hard-fail", o.config.BindAddressHardFail, "If true kube-proxy will treat failure to bind to a port as fatal and exit") fs.Var(utilflag.PortRangeVar{Val: &o.config.PortRange}, "proxy-port-range", "Range of host ports (beginPort-endPort, single port or beginPort+offset, inclusive) that may be consumed in order to proxy service traffic. If (unspecified, 0, or 0-0) then ports will be randomly chosen.") - fs.Var(&o.config.Mode, "proxy-mode", "Which proxy mode to use: 'iptables' (Linux-only), 'ipvs' (Linux-only), 'kernelspace' (Windows-only), or 'userspace' (Linux/Windows, deprecated). The default value is 'iptables' on Linux and 'userspace' on Windows(will be 'kernelspace' in a future release). "+ + fs.Var(&o.config.Mode, "proxy-mode", "Which proxy mode to use: on Linux this can be 'iptables' (default) or 'ipvs'. On Windows the only supported value is 'kernelspace'."+ "This parameter is ignored if a config file is specified by --config.") fs.Var(cliflag.NewMapStringBool(&o.config.FeatureGates), "feature-gates", "A set of key=value pairs that describe feature gates for alpha/experimental features. "+ "Options are:\n"+strings.Join(utilfeature.DefaultFeatureGate.KnownFeatures(), "\n")+"\n"+ @@ -191,7 +191,6 @@ func (o *Options) AddFlags(fs *pflag.FlagSet) { o.config.Conntrack.TCPCloseWaitTimeout.Duration, "NAT timeout for TCP connections in the CLOSE_WAIT state") fs.DurationVar(&o.config.ConfigSyncPeriod.Duration, "config-sync-period", o.config.ConfigSyncPeriod.Duration, "How often configuration from the apiserver is refreshed. Must be greater than 0.") - fs.DurationVar(&o.config.UDPIdleTimeout.Duration, "udp-timeout", o.config.UDPIdleTimeout.Duration, "How long an idle UDP connection will be kept open (e.g. '250ms', '2s'). Must be greater than 0. Only applicable for proxy-mode=userspace") fs.BoolVar(&o.config.IPVS.StrictARP, "ipvs-strict-arp", o.config.IPVS.StrictARP, "Enable strict ARP by setting arp_ignore to 1 and arp_announce to 2") fs.BoolVar(&o.config.IPTables.MasqueradeAll, "masquerade-all", o.config.IPTables.MasqueradeAll, "If using the pure iptables proxy, SNAT all traffic sent via Service cluster IPs (this not commonly needed)") @@ -539,7 +538,6 @@ type ProxyServer struct { MetricsBindAddress string BindAddressHardFail bool EnableProfiling bool - UseEndpointSlices bool OOMScoreAdj *int32 ConfigSyncPeriod time.Duration HealthzServer healthcheck.ProxierHealthUpdater @@ -738,7 +736,7 @@ func (s *ProxyServer) Run() error { options.LabelSelector = labelSelector.String() })) - // Create configs (i.e. Watches for Services and Endpoints or EndpointSlices) + // Create configs (i.e. Watches for Services and EndpointSlices) // Note: RegisterHandler() calls need to happen before creation of Sources because sources // only notify on changes, and the initial update (on process start) may be lost if no handlers // are registered yet. @@ -746,18 +744,12 @@ func (s *ProxyServer) Run() error { serviceConfig.RegisterEventHandler(s.Proxier) go serviceConfig.Run(wait.NeverStop) - if endpointsHandler, ok := s.Proxier.(config.EndpointsHandler); ok && !s.UseEndpointSlices { - endpointsConfig := config.NewEndpointsConfig(informerFactory.Core().V1().Endpoints(), s.ConfigSyncPeriod) - endpointsConfig.RegisterEventHandler(endpointsHandler) - go endpointsConfig.Run(wait.NeverStop) - } else { - endpointSliceConfig := config.NewEndpointSliceConfig(informerFactory.Discovery().V1().EndpointSlices(), s.ConfigSyncPeriod) - endpointSliceConfig.RegisterEventHandler(s.Proxier) - go endpointSliceConfig.Run(wait.NeverStop) - } + endpointSliceConfig := config.NewEndpointSliceConfig(informerFactory.Discovery().V1().EndpointSlices(), s.ConfigSyncPeriod) + endpointSliceConfig.RegisterEventHandler(s.Proxier) + go endpointSliceConfig.Run(wait.NeverStop) - // This has to start after the calls to NewServiceConfig and NewEndpointsConfig because those - // functions must configure their shared informer event handlers first. + // This has to start after the calls to NewServiceConfig because that + // function must configure its shared informer event handlers first. informerFactory.Start(wait.NeverStop) if utilfeature.DefaultFeatureGate.Enabled(features.TopologyAwareHints) { diff --git a/cmd/kube-proxy/app/server_others.go b/cmd/kube-proxy/app/server_others.go index 7e880f847f3..e938cdfab7b 100644 --- a/cmd/kube-proxy/app/server_others.go +++ b/cmd/kube-proxy/app/server_others.go @@ -42,7 +42,6 @@ import ( v1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/types" - utilnet "k8s.io/apimachinery/pkg/util/net" clientset "k8s.io/client-go/kubernetes" toolswatch "k8s.io/client-go/tools/watch" "k8s.io/component-base/configz" @@ -55,7 +54,6 @@ import ( "k8s.io/kubernetes/pkg/proxy/iptables" "k8s.io/kubernetes/pkg/proxy/ipvs" proxymetrics "k8s.io/kubernetes/pkg/proxy/metrics" - "k8s.io/kubernetes/pkg/proxy/userspace" proxyutiliptables "k8s.io/kubernetes/pkg/proxy/util/iptables" utilipset "k8s.io/kubernetes/pkg/util/ipset" utiliptables "k8s.io/kubernetes/pkg/util/iptables" @@ -157,22 +155,20 @@ func newProxyServer( var ipt [2]utiliptables.Interface dualStack := true // While we assume that node supports, we do further checks below - if proxyMode != proxyconfigapi.ProxyModeUserspace { - // Create iptables handlers for both families, one is already created - // Always ordered as IPv4, IPv6 - if primaryProtocol == utiliptables.ProtocolIPv4 { - ipt[0] = iptInterface - ipt[1] = utiliptables.New(execer, utiliptables.ProtocolIPv6) - } else { - ipt[0] = utiliptables.New(execer, utiliptables.ProtocolIPv4) - ipt[1] = iptInterface - } + // Create iptables handlers for both families, one is already created + // Always ordered as IPv4, IPv6 + if primaryProtocol == utiliptables.ProtocolIPv4 { + ipt[0] = iptInterface + ipt[1] = utiliptables.New(execer, utiliptables.ProtocolIPv6) + } else { + ipt[0] = utiliptables.New(execer, utiliptables.ProtocolIPv4) + ipt[1] = iptInterface + } - for _, perFamilyIpt := range ipt { - if !perFamilyIpt.Present() { - klog.InfoS("kube-proxy running in single-stack mode, this ipFamily is not supported", "ipFamily", perFamilyIpt.Protocol()) - dualStack = false - } + for _, perFamilyIpt := range ipt { + if !perFamilyIpt.Present() { + klog.V(0).InfoS("kube-proxy running in single-stack mode, this ipFamily is not supported", "ipFamily", perFamilyIpt.Protocol()) + dualStack = false } } @@ -320,31 +316,6 @@ func newProxyServer( return nil, fmt.Errorf("unable to create proxier: %v", err) } proxymetrics.RegisterMetrics() - } else { - klog.InfoS("Using userspace Proxier") - klog.InfoS("The userspace proxier is now deprecated and will be removed in a future release, please use 'iptables' or 'ipvs' instead") - - // TODO this has side effects that should only happen when Run() is invoked. - proxier, err = userspace.NewProxier( - userspace.NewLoadBalancerRR(), - netutils.ParseIPSloppy(config.BindAddress), - iptInterface, - execer, - *utilnet.ParsePortRangeOrDie(config.PortRange), - config.IPTables.SyncPeriod.Duration, - config.IPTables.MinSyncPeriod.Duration, - config.UDPIdleTimeout.Duration, - config.NodePortAddresses, - ) - if err != nil { - return nil, fmt.Errorf("unable to create proxier: %v", err) - } - } - - useEndpointSlices := true - if proxyMode == proxyconfigapi.ProxyModeUserspace { - // userspace mode doesn't support endpointslice. - useEndpointSlices = false } return &ProxyServer{ @@ -367,7 +338,6 @@ func newProxyServer( OOMScoreAdj: config.OOMScoreAdj, ConfigSyncPeriod: config.ConfigSyncPeriod.Duration, HealthzServer: healthzServer, - UseEndpointSlices: useEndpointSlices, }, nil } @@ -571,7 +541,6 @@ func cleanupAndExit() error { var encounteredError bool for _, ipt := range ipts { - encounteredError = userspace.CleanupLeftovers(ipt) || encounteredError encounteredError = iptables.CleanupLeftovers(ipt) || encounteredError encounteredError = ipvs.CleanupLeftovers(ipvsInterface, ipt, ipsetInterface) || encounteredError } diff --git a/cmd/kube-proxy/app/server_test.go b/cmd/kube-proxy/app/server_test.go index 739b29ccf00..7e65919c309 100644 --- a/cmd/kube-proxy/app/server_test.go +++ b/cmd/kube-proxy/app/server_test.go @@ -120,7 +120,6 @@ metricsBindAddress: "%s" mode: "%s" oomScoreAdj: 17 portRange: "2-7" -udpIdleTimeout: 123ms detectLocalMode: "ClusterCIDR" detectLocal: bridgeInterface: "cbr0" @@ -263,7 +262,6 @@ nodePortAddresses: Mode: kubeproxyconfig.ProxyMode(tc.mode), OOMScoreAdj: pointer.Int32(17), PortRange: "2-7", - UDPIdleTimeout: metav1.Duration{Duration: 123 * time.Millisecond}, NodePortAddresses: []string{"10.20.30.40/16", "fd00:1::0/64"}, DetectLocalMode: kubeproxyconfig.LocalModeClusterCIDR, DetectLocal: kubeproxyconfig.DetectLocalConfiguration{ @@ -457,8 +455,7 @@ mode: "" nodePortAddresses: null oomScoreAdj: -999 portRange: "" -detectLocalMode: "BridgeInterface" -udpIdleTimeout: 250ms`) +detectLocalMode: "BridgeInterface"`) if err != nil { return nil, "", fmt.Errorf("unexpected error when writing content to temp kube-proxy config file: %v", err) } diff --git a/cmd/kube-proxy/app/server_windows.go b/cmd/kube-proxy/app/server_windows.go index cea3279e8ce..067ad97371c 100644 --- a/cmd/kube-proxy/app/server_windows.go +++ b/cmd/kube-proxy/app/server_windows.go @@ -33,7 +33,6 @@ import ( v1 "k8s.io/api/core/v1" "k8s.io/apimachinery/pkg/types" - utilnet "k8s.io/apimachinery/pkg/util/net" "k8s.io/client-go/tools/events" "k8s.io/component-base/configz" "k8s.io/component-base/metrics" @@ -43,11 +42,7 @@ import ( proxyconfigscheme "k8s.io/kubernetes/pkg/proxy/apis/config/scheme" "k8s.io/kubernetes/pkg/proxy/healthcheck" "k8s.io/kubernetes/pkg/proxy/winkernel" - "k8s.io/kubernetes/pkg/proxy/winuserspace" - utilnetsh "k8s.io/kubernetes/pkg/util/netsh" utilnode "k8s.io/kubernetes/pkg/util/node" - "k8s.io/utils/exec" - netutils "k8s.io/utils/net" ) // NewProxyServer returns a new ProxyServer. @@ -101,75 +96,51 @@ func newProxyServer(config *proxyconfigapi.KubeProxyConfiguration, master string healthzPort, _ = strconv.Atoi(port) } + // Check if Kernel Space can be used. + canUseWinKernelProxy, err := winkernel.CanUseWinKernelProxier(winkernel.WindowsKernelCompatTester{}) + if !canUseWinKernelProxy && err != nil { + return nil, err + } + var proxier proxy.Provider - proxyMode := getProxyMode(config.Mode, winkernel.WindowsKernelCompatTester{}) + proxyMode := proxyconfigapi.ProxyModeKernelspace dualStackMode := getDualStackMode(config.Winkernel.NetworkName, winkernel.DualStackCompatTester{}) - if proxyMode == proxyconfigapi.ProxyModeKernelspace { - klog.InfoS("Using Kernelspace Proxier.") - if dualStackMode { - klog.InfoS("Creating dualStackProxier for Windows kernel.") + if dualStackMode { + klog.V(0).InfoS("Creating dualStackProxier for Windows kernel.") - proxier, err = winkernel.NewDualStackProxier( - config.IPTables.SyncPeriod.Duration, - config.IPTables.MinSyncPeriod.Duration, - config.IPTables.MasqueradeAll, - int(*config.IPTables.MasqueradeBit), - config.ClusterCIDR, - hostname, - nodeIPTuple(config.BindAddress), - recorder, - healthzServer, - config.Winkernel, - healthzPort, - ) - } else { - - proxier, err = winkernel.NewProxier( - config.IPTables.SyncPeriod.Duration, - config.IPTables.MinSyncPeriod.Duration, - config.IPTables.MasqueradeAll, - int(*config.IPTables.MasqueradeBit), - config.ClusterCIDR, - hostname, - nodeIP, - recorder, - healthzServer, - config.Winkernel, - healthzPort, - ) - - } - - if err != nil { - return nil, fmt.Errorf("unable to create proxier: %v", err) - } - - winkernel.RegisterMetrics() - } else { - klog.InfoS("Using userspace Proxier.") - klog.InfoS("The userspace proxier is now deprecated and will be removed in a future release, please use 'kernelspace' instead") - execer := exec.New() - var netshInterface utilnetsh.Interface - netshInterface = utilnetsh.New(execer) - - proxier, err = winuserspace.NewProxier( - winuserspace.NewLoadBalancerRR(), - netutils.ParseIPSloppy(config.BindAddress), - netshInterface, - *utilnet.ParsePortRangeOrDie(config.PortRange), - // TODO @pires replace below with default values, if applicable + proxier, err = winkernel.NewDualStackProxier( config.IPTables.SyncPeriod.Duration, - config.UDPIdleTimeout.Duration, + config.IPTables.MinSyncPeriod.Duration, + config.IPTables.MasqueradeAll, + int(*config.IPTables.MasqueradeBit), + config.ClusterCIDR, + hostname, + nodeIPTuple(config.BindAddress), + recorder, + healthzServer, + config.Winkernel, + healthzPort, + ) + } else { + proxier, err = winkernel.NewProxier( + config.IPTables.SyncPeriod.Duration, + config.IPTables.MinSyncPeriod.Duration, + config.IPTables.MasqueradeAll, + int(*config.IPTables.MasqueradeBit), + config.ClusterCIDR, + hostname, + nodeIP, + recorder, + healthzServer, + config.Winkernel, + healthzPort, ) - if err != nil { - return nil, fmt.Errorf("unable to create proxier: %v", err) - } } - useEndpointSlices := true - if proxyMode == proxyconfigapi.ProxyModeUserspace { - // userspace mode doesn't support endpointslice. - useEndpointSlices = false + if err != nil { + return nil, fmt.Errorf("unable to create proxier: %v", err) } + winkernel.RegisterMetrics() + return &ProxyServer{ Client: client, EventClient: eventClient, @@ -184,7 +155,6 @@ func newProxyServer(config *proxyconfigapi.KubeProxyConfiguration, master string OOMScoreAdj: config.OOMScoreAdj, ConfigSyncPeriod: config.ConfigSyncPeriod.Duration, HealthzServer: healthzServer, - UseEndpointSlices: useEndpointSlices, }, nil } @@ -192,35 +162,10 @@ func getDualStackMode(networkname string, compatTester winkernel.StackCompatTest return compatTester.DualStackCompatible(networkname) } -func getProxyMode(proxyMode proxyconfigapi.ProxyMode, kcompat winkernel.KernelCompatTester) proxyconfigapi.ProxyMode { - if proxyMode == proxyconfigapi.ProxyModeKernelspace { - return tryWinKernelSpaceProxy(kcompat) - } - return proxyconfigapi.ProxyModeUserspace -} - func detectNumCPU() int { return goruntime.NumCPU() } -func tryWinKernelSpaceProxy(kcompat winkernel.KernelCompatTester) proxyconfigapi.ProxyMode { - // Check for Windows Kernel Version if we can support Kernel Space proxy - // Check for Windows Version - - // guaranteed false on error, error only necessary for debugging - useWinKernelProxy, err := winkernel.CanUseWinKernelProxier(kcompat) - if err != nil { - klog.ErrorS(err, "Can't determine whether to use windows kernel proxy, using userspace proxier") - return proxyconfigapi.ProxyModeUserspace - } - if useWinKernelProxy { - return proxyconfigapi.ProxyModeKernelspace - } - // Fallback. - klog.V(1).InfoS("Can't use winkernel proxy, using userspace proxier") - return proxyconfigapi.ProxyModeUserspace -} - // cleanupAndExit cleans up after a previous proxy run func cleanupAndExit() error { return errors.New("--cleanup-and-exit is not implemented on Windows") diff --git a/pkg/generated/openapi/zz_generated.openapi.go b/pkg/generated/openapi/zz_generated.openapi.go index 03e44137a17..7abb1422d5d 100644 --- a/pkg/generated/openapi/zz_generated.openapi.go +++ b/pkg/generated/openapi/zz_generated.openapi.go @@ -51138,13 +51138,6 @@ func schema_k8sio_kube_proxy_config_v1alpha1_KubeProxyConfiguration(ref common.R Format: "", }, }, - "udpIdleTimeout": { - SchemaProps: spec.SchemaProps{ - Description: "udpIdleTimeout is how long an idle UDP connection will be kept open (e.g. '250ms', '2s'). Must be greater than 0. Only applicable for proxyMode=userspace.", - Default: 0, - Ref: ref("k8s.io/apimachinery/pkg/apis/meta/v1.Duration"), - }, - }, "conntrack": { SchemaProps: spec.SchemaProps{ Description: "conntrack contains conntrack-related configuration options.", @@ -51205,7 +51198,7 @@ func schema_k8sio_kube_proxy_config_v1alpha1_KubeProxyConfiguration(ref common.R }, }, }, - Required: []string{"bindAddress", "healthzBindAddress", "metricsBindAddress", "bindAddressHardFail", "enableProfiling", "clusterCIDR", "hostnameOverride", "clientConnection", "iptables", "ipvs", "oomScoreAdj", "mode", "portRange", "udpIdleTimeout", "conntrack", "configSyncPeriod", "nodePortAddresses", "winkernel", "showHiddenMetricsForVersion", "detectLocalMode", "detectLocal"}, + Required: []string{"bindAddress", "healthzBindAddress", "metricsBindAddress", "bindAddressHardFail", "enableProfiling", "clusterCIDR", "hostnameOverride", "clientConnection", "iptables", "ipvs", "oomScoreAdj", "mode", "portRange", "conntrack", "configSyncPeriod", "nodePortAddresses", "winkernel", "showHiddenMetricsForVersion", "detectLocalMode", "detectLocal"}, }, }, Dependencies: []string{ diff --git a/pkg/kubemark/hollow_proxy.go b/pkg/kubemark/hollow_proxy.go index 968dd452654..6ca4d25d6b3 100644 --- a/pkg/kubemark/hollow_proxy.go +++ b/pkg/kubemark/hollow_proxy.go @@ -118,17 +118,16 @@ func NewHollowProxyOrDie( } return &HollowProxy{ ProxyServer: &proxyapp.ProxyServer{ - Client: client, - EventClient: eventClient, - IptInterface: iptInterface, - Proxier: proxier, - Broadcaster: broadcaster, - Recorder: recorder, - ProxyMode: "fake", - NodeRef: nodeRef, - UseEndpointSlices: true, - OOMScoreAdj: utilpointer.Int32Ptr(0), - ConfigSyncPeriod: 30 * time.Second, + Client: client, + EventClient: eventClient, + IptInterface: iptInterface, + Proxier: proxier, + Broadcaster: broadcaster, + Recorder: recorder, + ProxyMode: "fake", + NodeRef: nodeRef, + OOMScoreAdj: utilpointer.Int32Ptr(0), + ConfigSyncPeriod: 30 * time.Second, }, }, nil } diff --git a/pkg/proxy/apis/config/scheme/testdata/KubeProxyConfiguration/after/v1alpha1.yaml b/pkg/proxy/apis/config/scheme/testdata/KubeProxyConfiguration/after/v1alpha1.yaml index d4084f9e6a6..569d47ee6c5 100644 --- a/pkg/proxy/apis/config/scheme/testdata/KubeProxyConfiguration/after/v1alpha1.yaml +++ b/pkg/proxy/apis/config/scheme/testdata/KubeProxyConfiguration/after/v1alpha1.yaml @@ -42,7 +42,6 @@ nodePortAddresses: null oomScoreAdj: -999 portRange: "" showHiddenMetricsForVersion: "" -udpIdleTimeout: 250ms winkernel: enableDSR: false forwardHealthCheckVip: false diff --git a/pkg/proxy/apis/config/scheme/testdata/KubeProxyConfiguration/roundtrip/default/v1alpha1.yaml b/pkg/proxy/apis/config/scheme/testdata/KubeProxyConfiguration/roundtrip/default/v1alpha1.yaml index d4084f9e6a6..569d47ee6c5 100644 --- a/pkg/proxy/apis/config/scheme/testdata/KubeProxyConfiguration/roundtrip/default/v1alpha1.yaml +++ b/pkg/proxy/apis/config/scheme/testdata/KubeProxyConfiguration/roundtrip/default/v1alpha1.yaml @@ -42,7 +42,6 @@ nodePortAddresses: null oomScoreAdj: -999 portRange: "" showHiddenMetricsForVersion: "" -udpIdleTimeout: 250ms winkernel: enableDSR: false forwardHealthCheckVip: false diff --git a/pkg/proxy/apis/config/types.go b/pkg/proxy/apis/config/types.go index 0f162043d90..11d5022b82a 100644 --- a/pkg/proxy/apis/config/types.go +++ b/pkg/proxy/apis/config/types.go @@ -164,9 +164,6 @@ type KubeProxyConfiguration struct { // portRange is the range of host ports (beginPort-endPort, inclusive) that may be consumed // in order to proxy service traffic. If unspecified (0-0) then ports will be randomly chosen. PortRange string - // udpIdleTimeout is how long an idle UDP connection will be kept open (e.g. '250ms', '2s'). - // Must be greater than 0. Only applicable for proxyMode=userspace. - UDPIdleTimeout metav1.Duration // conntrack contains conntrack-related configuration options. Conntrack KubeProxyConntrackConfiguration // configSyncPeriod is how often configuration from the apiserver is refreshed. Must be greater @@ -190,24 +187,18 @@ type KubeProxyConfiguration struct { DetectLocal DetectLocalConfiguration } -// ProxyMode represents modes used by the Kubernetes proxy server. Currently, three modes of proxy are available in -// Linux platform: 'userspace' (older, going to be EOL), 'iptables' (newer, faster), 'ipvs'(newest, better in performance -// and scalability). +// ProxyMode represents modes used by the Kubernetes proxy server. // -// Two modes of proxy are available in Windows platform: 'userspace'(older, stable) and 'kernelspace' (newer, faster). +// Currently, two modes of proxy are available in Linux platform: 'iptables' and 'ipvs'. +// One mode of proxy is available in Windows platform: 'kernelspace'. // -// In Linux platform, if proxy mode is blank, use the best-available proxy (currently iptables, but may change in the -// future). If the iptables proxy is selected, regardless of how, but the system's kernel or iptables versions are -// insufficient, this always falls back to the userspace proxy. IPVS mode will be enabled when proxy mode is set to 'ipvs', -// and the fall back path is firstly iptables and then userspace. -// -// In Windows platform, if proxy mode is blank, use the best-available proxy (currently userspace, but may change in the -// future). If winkernel proxy is selected, regardless of how, but the Windows kernel can't support this mode of proxy, -// this always falls back to the userspace proxy. +// If the proxy mode is unspecified, the best-available proxy mode will be used (currently this +// is `iptables` on Linux and `kernelspace` on Windows). If the selected proxy mode cannot be +// used (due to lack of kernel support, missing userspace components, etc) then kube-proxy +// will exit with an error. type ProxyMode string const ( - ProxyModeUserspace ProxyMode = "userspace" ProxyModeIPTables ProxyMode = "iptables" ProxyModeIPVS ProxyMode = "ipvs" ProxyModeKernelspace ProxyMode = "kernelspace" diff --git a/pkg/proxy/apis/config/v1alpha1/defaults.go b/pkg/proxy/apis/config/v1alpha1/defaults.go index ac345d6d48c..667486624c9 100644 --- a/pkg/proxy/apis/config/v1alpha1/defaults.go +++ b/pkg/proxy/apis/config/v1alpha1/defaults.go @@ -67,10 +67,6 @@ func SetDefaults_KubeProxyConfiguration(obj *kubeproxyconfigv1alpha1.KubeProxyCo if obj.IPVS.SyncPeriod.Duration == 0 { obj.IPVS.SyncPeriod = metav1.Duration{Duration: 30 * time.Second} } - zero := metav1.Duration{} - if obj.UDPIdleTimeout == zero { - obj.UDPIdleTimeout = metav1.Duration{Duration: 250 * time.Millisecond} - } if obj.Conntrack.MaxPerCore == nil { obj.Conntrack.MaxPerCore = pointer.Int32(32 * 1024) diff --git a/pkg/proxy/apis/config/v1alpha1/defaults_test.go b/pkg/proxy/apis/config/v1alpha1/defaults_test.go index 878ce77f011..8485063513a 100644 --- a/pkg/proxy/apis/config/v1alpha1/defaults_test.go +++ b/pkg/proxy/apis/config/v1alpha1/defaults_test.go @@ -58,8 +58,7 @@ func TestDefaultsKubeProxyConfiguration(t *testing.T) { IPVS: kubeproxyconfigv1alpha1.KubeProxyIPVSConfiguration{ SyncPeriod: metav1.Duration{Duration: 30 * time.Second}, }, - OOMScoreAdj: &oomScore, - UDPIdleTimeout: metav1.Duration{Duration: 250 * time.Millisecond}, + OOMScoreAdj: &oomScore, Conntrack: kubeproxyconfigv1alpha1.KubeProxyConntrackConfiguration{ MaxPerCore: &ctMaxPerCore, Min: &ctMin, @@ -94,8 +93,7 @@ func TestDefaultsKubeProxyConfiguration(t *testing.T) { IPVS: kubeproxyconfigv1alpha1.KubeProxyIPVSConfiguration{ SyncPeriod: metav1.Duration{Duration: 30 * time.Second}, }, - OOMScoreAdj: &oomScore, - UDPIdleTimeout: metav1.Duration{Duration: 250 * time.Millisecond}, + OOMScoreAdj: &oomScore, Conntrack: kubeproxyconfigv1alpha1.KubeProxyConntrackConfiguration{ MaxPerCore: &ctMaxPerCore, Min: &ctMin, diff --git a/pkg/proxy/apis/config/v1alpha1/zz_generated.conversion.go b/pkg/proxy/apis/config/v1alpha1/zz_generated.conversion.go index 3f4e9eed6a9..8c46561fce9 100644 --- a/pkg/proxy/apis/config/v1alpha1/zz_generated.conversion.go +++ b/pkg/proxy/apis/config/v1alpha1/zz_generated.conversion.go @@ -145,7 +145,6 @@ func autoConvert_v1alpha1_KubeProxyConfiguration_To_config_KubeProxyConfiguratio out.OOMScoreAdj = (*int32)(unsafe.Pointer(in.OOMScoreAdj)) out.Mode = config.ProxyMode(in.Mode) out.PortRange = in.PortRange - out.UDPIdleTimeout = in.UDPIdleTimeout if err := Convert_v1alpha1_KubeProxyConntrackConfiguration_To_config_KubeProxyConntrackConfiguration(&in.Conntrack, &out.Conntrack, s); err != nil { return err } @@ -188,7 +187,6 @@ func autoConvert_config_KubeProxyConfiguration_To_v1alpha1_KubeProxyConfiguratio out.OOMScoreAdj = (*int32)(unsafe.Pointer(in.OOMScoreAdj)) out.Mode = v1alpha1.ProxyMode(in.Mode) out.PortRange = in.PortRange - out.UDPIdleTimeout = in.UDPIdleTimeout if err := Convert_config_KubeProxyConntrackConfiguration_To_v1alpha1_KubeProxyConntrackConfiguration(&in.Conntrack, &out.Conntrack, s); err != nil { return err } diff --git a/pkg/proxy/apis/config/validation/validation.go b/pkg/proxy/apis/config/validation/validation.go index 2110f21f082..40c58d8104e 100644 --- a/pkg/proxy/apis/config/validation/validation.go +++ b/pkg/proxy/apis/config/validation/validation.go @@ -57,10 +57,6 @@ func Validate(config *kubeproxyconfig.KubeProxyConfiguration) field.ErrorList { allErrs = append(allErrs, field.Invalid(newPath.Child("OOMScoreAdj"), *config.OOMScoreAdj, "must be within the range [-1000, 1000]")) } - if config.UDPIdleTimeout.Duration <= 0 { - allErrs = append(allErrs, field.Invalid(newPath.Child("UDPIdleTimeout"), config.UDPIdleTimeout, "must be greater than 0")) - } - if config.ConfigSyncPeriod.Duration <= 0 { allErrs = append(allErrs, field.Invalid(newPath.Child("ConfigSyncPeriod"), config.ConfigSyncPeriod, "must be greater than 0")) } @@ -185,7 +181,6 @@ func validateProxyMode(mode kubeproxyconfig.ProxyMode, fldPath *field.Path) fiel func validateProxyModeLinux(mode kubeproxyconfig.ProxyMode, fldPath *field.Path) field.ErrorList { validModes := sets.NewString( - string(kubeproxyconfig.ProxyModeUserspace), string(kubeproxyconfig.ProxyModeIPTables), string(kubeproxyconfig.ProxyModeIPVS), ) @@ -200,7 +195,6 @@ func validateProxyModeLinux(mode kubeproxyconfig.ProxyMode, fldPath *field.Path) func validateProxyModeWindows(mode kubeproxyconfig.ProxyMode, fldPath *field.Path) field.ErrorList { validModes := sets.NewString( - string(kubeproxyconfig.ProxyModeUserspace), string(kubeproxyconfig.ProxyModeKernelspace), ) @@ -208,7 +202,7 @@ func validateProxyModeWindows(mode kubeproxyconfig.ProxyMode, fldPath *field.Pat return nil } - errMsg := fmt.Sprintf("must be %s or blank (blank means the most-available proxy [currently userspace(will be 'kernelspace' in a future release)])", strings.Join(validModes.List(), ",")) + errMsg := fmt.Sprintf("must be %s or blank (blank means the most-available proxy [currently 'kernelspace'])", strings.Join(validModes.List(), ",")) return field.ErrorList{field.Invalid(fldPath.Child("ProxyMode"), string(mode), errMsg)} } diff --git a/pkg/proxy/apis/config/validation/validation_test.go b/pkg/proxy/apis/config/validation/validation_test.go index 85f1b68b35f..f340067bb41 100644 --- a/pkg/proxy/apis/config/validation/validation_test.go +++ b/pkg/proxy/apis/config/validation/validation_test.go @@ -42,7 +42,6 @@ func TestValidateKubeProxyConfiguration(t *testing.T) { HealthzBindAddress: "0.0.0.0:10256", MetricsBindAddress: "127.0.0.1:10249", ClusterCIDR: "192.168.59.0/24", - UDPIdleTimeout: metav1.Duration{Duration: 1 * time.Second}, ConfigSyncPeriod: metav1.Duration{Duration: 1 * time.Second}, IPTables: kubeproxyconfig.KubeProxyIPTablesConfiguration{ MasqueradeAll: true, @@ -66,7 +65,6 @@ func TestValidateKubeProxyConfiguration(t *testing.T) { HealthzBindAddress: "0.0.0.0:10256", MetricsBindAddress: "127.0.0.1:10249", ClusterCIDR: "192.168.59.0/24", - UDPIdleTimeout: metav1.Duration{Duration: 1 * time.Second}, ConfigSyncPeriod: metav1.Duration{Duration: 1 * time.Second}, IPTables: kubeproxyconfig.KubeProxyIPTablesConfiguration{ MasqueradeAll: true, @@ -85,7 +83,6 @@ func TestValidateKubeProxyConfiguration(t *testing.T) { HealthzBindAddress: "", MetricsBindAddress: "127.0.0.1:10249", ClusterCIDR: "192.168.59.0/24", - UDPIdleTimeout: metav1.Duration{Duration: 1 * time.Second}, ConfigSyncPeriod: metav1.Duration{Duration: 1 * time.Second}, IPTables: kubeproxyconfig.KubeProxyIPTablesConfiguration{ MasqueradeAll: true, @@ -104,7 +101,6 @@ func TestValidateKubeProxyConfiguration(t *testing.T) { HealthzBindAddress: "", MetricsBindAddress: "[::1]:10249", ClusterCIDR: "fd00:192:168:59::/64", - UDPIdleTimeout: metav1.Duration{Duration: 1 * time.Second}, ConfigSyncPeriod: metav1.Duration{Duration: 1 * time.Second}, IPTables: kubeproxyconfig.KubeProxyIPTablesConfiguration{ MasqueradeAll: true, @@ -123,7 +119,6 @@ func TestValidateKubeProxyConfiguration(t *testing.T) { HealthzBindAddress: "0.0.0.0:12345", MetricsBindAddress: "127.0.0.1:10249", ClusterCIDR: "192.168.59.0/24", - UDPIdleTimeout: metav1.Duration{Duration: 1 * time.Second}, ConfigSyncPeriod: metav1.Duration{Duration: 1 * time.Second}, IPTables: kubeproxyconfig.KubeProxyIPTablesConfiguration{ MasqueradeAll: true, @@ -142,7 +137,6 @@ func TestValidateKubeProxyConfiguration(t *testing.T) { HealthzBindAddress: "0.0.0.0:12345", MetricsBindAddress: "127.0.0.1:10249", ClusterCIDR: "fd00:192:168::/64", - UDPIdleTimeout: metav1.Duration{Duration: 1 * time.Second}, ConfigSyncPeriod: metav1.Duration{Duration: 1 * time.Second}, IPTables: kubeproxyconfig.KubeProxyIPTablesConfiguration{ MasqueradeAll: true, @@ -161,7 +155,6 @@ func TestValidateKubeProxyConfiguration(t *testing.T) { HealthzBindAddress: "0.0.0.0:12345", MetricsBindAddress: "127.0.0.1:10249", ClusterCIDR: "192.168.59.0/24,fd00:192:168::/64", - UDPIdleTimeout: metav1.Duration{Duration: 1 * time.Second}, ConfigSyncPeriod: metav1.Duration{Duration: 1 * time.Second}, IPTables: kubeproxyconfig.KubeProxyIPTablesConfiguration{ MasqueradeAll: true, @@ -180,7 +173,6 @@ func TestValidateKubeProxyConfiguration(t *testing.T) { HealthzBindAddress: "0.0.0.0:12345", MetricsBindAddress: "127.0.0.1:10249", ClusterCIDR: "192.168.59.0/24", - UDPIdleTimeout: metav1.Duration{Duration: 1 * time.Second}, ConfigSyncPeriod: metav1.Duration{Duration: 1 * time.Second}, IPTables: kubeproxyconfig.KubeProxyIPTablesConfiguration{ MasqueradeAll: true, @@ -203,7 +195,6 @@ func TestValidateKubeProxyConfiguration(t *testing.T) { HealthzBindAddress: "0.0.0.0:12345", MetricsBindAddress: "127.0.0.1:10249", ClusterCIDR: "192.168.59.0/24", - UDPIdleTimeout: metav1.Duration{Duration: 1 * time.Second}, ConfigSyncPeriod: metav1.Duration{Duration: 1 * time.Second}, IPTables: kubeproxyconfig.KubeProxyIPTablesConfiguration{ MasqueradeAll: true, @@ -240,7 +231,6 @@ func TestValidateKubeProxyConfiguration(t *testing.T) { HealthzBindAddress: "0.0.0.0:10256", MetricsBindAddress: "127.0.0.1:10249", ClusterCIDR: "192.168.59.0/24", - UDPIdleTimeout: metav1.Duration{Duration: 1 * time.Second}, ConfigSyncPeriod: metav1.Duration{Duration: 1 * time.Second}, IPTables: kubeproxyconfig.KubeProxyIPTablesConfiguration{ MasqueradeAll: true, @@ -262,7 +252,6 @@ func TestValidateKubeProxyConfiguration(t *testing.T) { HealthzBindAddress: "0.0.0.0", MetricsBindAddress: "127.0.0.1:10249", ClusterCIDR: "192.168.59.0/24", - UDPIdleTimeout: metav1.Duration{Duration: 1 * time.Second}, ConfigSyncPeriod: metav1.Duration{Duration: 1 * time.Second}, IPTables: kubeproxyconfig.KubeProxyIPTablesConfiguration{ MasqueradeAll: true, @@ -284,7 +273,6 @@ func TestValidateKubeProxyConfiguration(t *testing.T) { HealthzBindAddress: "0.0.0.0:12345", MetricsBindAddress: "127.0.0.1", ClusterCIDR: "192.168.59.0/24", - UDPIdleTimeout: metav1.Duration{Duration: 1 * time.Second}, ConfigSyncPeriod: metav1.Duration{Duration: 1 * time.Second}, IPTables: kubeproxyconfig.KubeProxyIPTablesConfiguration{ MasqueradeAll: true, @@ -306,7 +294,6 @@ func TestValidateKubeProxyConfiguration(t *testing.T) { HealthzBindAddress: "0.0.0.0:12345", MetricsBindAddress: "127.0.0.1:10249", ClusterCIDR: "192.168.59.0", - UDPIdleTimeout: metav1.Duration{Duration: 1 * time.Second}, ConfigSyncPeriod: metav1.Duration{Duration: 1 * time.Second}, IPTables: kubeproxyconfig.KubeProxyIPTablesConfiguration{ MasqueradeAll: true, @@ -328,7 +315,6 @@ func TestValidateKubeProxyConfiguration(t *testing.T) { HealthzBindAddress: "0.0.0.0:12345", MetricsBindAddress: "127.0.0.1:10249", ClusterCIDR: "192.168.59.0/24,fd00:192:168::/64,10.0.0.0/16", - UDPIdleTimeout: metav1.Duration{Duration: 1 * time.Second}, ConfigSyncPeriod: metav1.Duration{Duration: 1 * time.Second}, IPTables: kubeproxyconfig.KubeProxyIPTablesConfiguration{ MasqueradeAll: true, @@ -344,35 +330,12 @@ func TestValidateKubeProxyConfiguration(t *testing.T) { }, expectedErrs: field.ErrorList{field.Invalid(newPath.Child("ClusterCIDR"), "192.168.59.0/24,fd00:192:168::/64,10.0.0.0/16", "only one CIDR allowed or a valid DualStack CIDR (e.g. 10.100.0.0/16,fde4:8dba:82e1::/48)")}, }, - "UDPIdleTimeout must be > 0": { - config: kubeproxyconfig.KubeProxyConfiguration{ - BindAddress: "10.10.12.11", - HealthzBindAddress: "0.0.0.0:12345", - MetricsBindAddress: "127.0.0.1:10249", - ClusterCIDR: "192.168.59.0/24", - UDPIdleTimeout: metav1.Duration{Duration: -1 * time.Second}, - ConfigSyncPeriod: metav1.Duration{Duration: 1 * time.Second}, - IPTables: kubeproxyconfig.KubeProxyIPTablesConfiguration{ - MasqueradeAll: true, - SyncPeriod: metav1.Duration{Duration: 5 * time.Second}, - MinSyncPeriod: metav1.Duration{Duration: 2 * time.Second}, - }, - Conntrack: kubeproxyconfig.KubeProxyConntrackConfiguration{ - MaxPerCore: pointer.Int32(1), - Min: pointer.Int32(1), - TCPEstablishedTimeout: &metav1.Duration{Duration: 5 * time.Second}, - TCPCloseWaitTimeout: &metav1.Duration{Duration: 5 * time.Second}, - }, - }, - expectedErrs: field.ErrorList{field.Invalid(newPath.Child("UDPIdleTimeout"), metav1.Duration{Duration: -1 * time.Second}, "must be greater than 0")}, - }, "ConfigSyncPeriod must be > 0": { config: kubeproxyconfig.KubeProxyConfiguration{ BindAddress: "10.10.12.11", HealthzBindAddress: "0.0.0.0:12345", MetricsBindAddress: "127.0.0.1:10249", ClusterCIDR: "192.168.59.0/24", - UDPIdleTimeout: metav1.Duration{Duration: 1 * time.Second}, ConfigSyncPeriod: metav1.Duration{Duration: -1 * time.Second}, IPTables: kubeproxyconfig.KubeProxyIPTablesConfiguration{ MasqueradeAll: true, @@ -394,7 +357,6 @@ func TestValidateKubeProxyConfiguration(t *testing.T) { HealthzBindAddress: "0.0.0.0:10256", MetricsBindAddress: "127.0.0.1:10249", ClusterCIDR: "192.168.59.0/24", - UDPIdleTimeout: metav1.Duration{Duration: 1 * time.Second}, ConfigSyncPeriod: metav1.Duration{Duration: 1 * time.Second}, IPTables: kubeproxyconfig.KubeProxyIPTablesConfiguration{ MasqueradeAll: true, @@ -418,7 +380,6 @@ func TestValidateKubeProxyConfiguration(t *testing.T) { HealthzBindAddress: "0.0.0.0:12345", MetricsBindAddress: "127.0.0.1:10249", ClusterCIDR: "192.168.59.0/24", - UDPIdleTimeout: metav1.Duration{Duration: 1 * time.Second}, ConfigSyncPeriod: metav1.Duration{Duration: 1 * time.Second}, IPTables: kubeproxyconfig.KubeProxyIPTablesConfiguration{ MasqueradeAll: true, @@ -444,7 +405,6 @@ func TestValidateKubeProxyConfiguration(t *testing.T) { HealthzBindAddress: "0.0.0.0:12345", MetricsBindAddress: "127.0.0.1:10249", ClusterCIDR: "192.168.59.0/24", - UDPIdleTimeout: metav1.Duration{Duration: 1 * time.Second}, ConfigSyncPeriod: metav1.Duration{Duration: 1 * time.Second}, IPTables: kubeproxyconfig.KubeProxyIPTablesConfiguration{ MasqueradeAll: true, @@ -751,17 +711,13 @@ func TestValidateProxyMode(t *testing.T) { mode kubeproxyconfig.ProxyMode expectedErrs field.ErrorList }{ - "valid Userspace mode": { - mode: kubeproxyconfig.ProxyModeUserspace, - expectedErrs: field.ErrorList{}, - }, "blank mode should default": { mode: kubeproxyconfig.ProxyMode(""), expectedErrs: field.ErrorList{}, }, "invalid mode non-existent": { mode: kubeproxyconfig.ProxyMode("non-existing"), - expectedErrs: field.ErrorList{field.Invalid(newPath.Child("ProxyMode"), "non-existing", "must be iptables,ipvs,userspace or blank (blank means the best-available proxy [currently iptables])")}, + expectedErrs: field.ErrorList{field.Invalid(newPath.Child("ProxyMode"), "non-existing", "must be iptables,ipvs or blank (blank means the best-available proxy [currently iptables])")}, }, } for _, testCase := range testCases { diff --git a/pkg/proxy/apis/config/zz_generated.deepcopy.go b/pkg/proxy/apis/config/zz_generated.deepcopy.go index 89adbaae11b..8ceac506314 100644 --- a/pkg/proxy/apis/config/zz_generated.deepcopy.go +++ b/pkg/proxy/apis/config/zz_generated.deepcopy.go @@ -83,7 +83,6 @@ func (in *KubeProxyConfiguration) DeepCopyInto(out *KubeProxyConfiguration) { *out = new(int32) **out = **in } - out.UDPIdleTimeout = in.UDPIdleTimeout in.Conntrack.DeepCopyInto(&out.Conntrack) out.ConfigSyncPeriod = in.ConfigSyncPeriod if in.NodePortAddresses != nil { diff --git a/pkg/proxy/userspace/OWNERS b/pkg/proxy/userspace/OWNERS deleted file mode 100644 index 1484574c2bf..00000000000 --- a/pkg/proxy/userspace/OWNERS +++ /dev/null @@ -1,10 +0,0 @@ -# See the OWNERS docs at https://go.k8s.io/owners - -approvers: - - sig-network-approvers -reviewers: - - sig-network-reviewers - - lavalamp - - smarterclayton -labels: - - sig/network diff --git a/pkg/proxy/userspace/loadbalancer.go b/pkg/proxy/userspace/loadbalancer.go deleted file mode 100644 index 0b853477851..00000000000 --- a/pkg/proxy/userspace/loadbalancer.go +++ /dev/null @@ -1,37 +0,0 @@ -/* -Copyright 2014 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package userspace - -import ( - "k8s.io/api/core/v1" - "k8s.io/kubernetes/pkg/proxy" - proxyconfig "k8s.io/kubernetes/pkg/proxy/config" - "net" -) - -// LoadBalancer is an interface for distributing incoming requests to service endpoints. -type LoadBalancer interface { - // NextEndpoint returns the endpoint to handle a request for the given - // service-port and source address. - NextEndpoint(service proxy.ServicePortName, srcAddr net.Addr, sessionAffinityReset bool) (string, error) - NewService(service proxy.ServicePortName, sessionAffinityType v1.ServiceAffinity, stickyMaxAgeSeconds int) error - DeleteService(service proxy.ServicePortName) - CleanupStaleStickySessions(service proxy.ServicePortName) - ServiceHasEndpoints(service proxy.ServicePortName) bool - - proxyconfig.EndpointsHandler -} diff --git a/pkg/proxy/userspace/port_allocator.go b/pkg/proxy/userspace/port_allocator.go deleted file mode 100644 index 51c10cc719e..00000000000 --- a/pkg/proxy/userspace/port_allocator.go +++ /dev/null @@ -1,158 +0,0 @@ -/* -Copyright 2015 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package userspace - -import ( - "errors" - "math/big" - "math/rand" - "sync" - "time" - - "k8s.io/apimachinery/pkg/util/net" - "k8s.io/apimachinery/pkg/util/wait" -) - -var ( - errPortRangeNoPortsRemaining = errors.New("port allocation failed; there are no remaining ports left to allocate in the accepted range") -) - -type PortAllocator interface { - AllocateNext() (int, error) - Release(int) -} - -// randomAllocator is a PortAllocator implementation that allocates random ports, yielding -// a port value of 0 for every call to AllocateNext(). -type randomAllocator struct{} - -// AllocateNext always returns 0 -func (r *randomAllocator) AllocateNext() (int, error) { - return 0, nil -} - -// Release is a noop -func (r *randomAllocator) Release(_ int) { - // noop -} - -// newPortAllocator builds PortAllocator for a given PortRange. If the PortRange is empty -// then a random port allocator is returned; otherwise, a new range-based allocator -// is returned. -func newPortAllocator(r net.PortRange) PortAllocator { - if r.Base == 0 { - return &randomAllocator{} - } - return newPortRangeAllocator(r, true) -} - -const ( - portsBufSize = 16 - nextFreePortCooldown = 500 * time.Millisecond - allocateNextTimeout = 1 * time.Second -) - -type rangeAllocator struct { - net.PortRange - ports chan int - used big.Int - lock sync.Mutex - rand *rand.Rand -} - -func newPortRangeAllocator(r net.PortRange, autoFill bool) PortAllocator { - if r.Base == 0 || r.Size == 0 { - panic("illegal argument: may not specify an empty port range") - } - ra := &rangeAllocator{ - PortRange: r, - ports: make(chan int, portsBufSize), - rand: rand.New(rand.NewSource(time.Now().UnixNano())), - } - if autoFill { - go wait.Forever(func() { ra.fillPorts() }, nextFreePortCooldown) - } - return ra -} - -// fillPorts loops, always searching for the next free port and, if found, fills the ports buffer with it. -// this func blocks unless there are no remaining free ports. -func (r *rangeAllocator) fillPorts() { - for { - if !r.fillPortsOnce() { - return - } - } -} - -func (r *rangeAllocator) fillPortsOnce() bool { - port := r.nextFreePort() - if port == -1 { - return false - } - r.ports <- port - return true -} - -// nextFreePort finds a free port, first picking a random port. if that port is already in use -// then the port range is scanned sequentially until either a port is found or the scan completes -// unsuccessfully. an unsuccessful scan returns a port of -1. -func (r *rangeAllocator) nextFreePort() int { - r.lock.Lock() - defer r.lock.Unlock() - - // choose random port - j := r.rand.Intn(r.Size) - if b := r.used.Bit(j); b == 0 { - r.used.SetBit(&r.used, j, 1) - return j + r.Base - } - - // search sequentially - for i := j + 1; i < r.Size; i++ { - if b := r.used.Bit(i); b == 0 { - r.used.SetBit(&r.used, i, 1) - return i + r.Base - } - } - for i := 0; i < j; i++ { - if b := r.used.Bit(i); b == 0 { - r.used.SetBit(&r.used, i, 1) - return i + r.Base - } - } - return -1 -} - -func (r *rangeAllocator) AllocateNext() (port int, err error) { - select { - case port = <-r.ports: - case <-time.After(allocateNextTimeout): - err = errPortRangeNoPortsRemaining - } - return -} - -func (r *rangeAllocator) Release(port int) { - port -= r.Base - if port < 0 || port >= r.Size { - return - } - r.lock.Lock() - defer r.lock.Unlock() - r.used.SetBit(&r.used, port, 0) -} diff --git a/pkg/proxy/userspace/port_allocator_test.go b/pkg/proxy/userspace/port_allocator_test.go deleted file mode 100644 index f669bb4112a..00000000000 --- a/pkg/proxy/userspace/port_allocator_test.go +++ /dev/null @@ -1,178 +0,0 @@ -/* -Copyright 2015 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package userspace - -import ( - "reflect" - "testing" - - "k8s.io/apimachinery/pkg/util/net" -) - -func TestRangeAllocatorEmpty(t *testing.T) { - r := &net.PortRange{} - r.Set("0-0") - defer func() { - if rv := recover(); rv == nil { - t.Fatalf("expected panic because of empty port range: %#v", r) - } - }() - _ = newPortRangeAllocator(*r, true) -} - -func TestRangeAllocatorFullyAllocated(t *testing.T) { - r := &net.PortRange{} - r.Set("1-1") - // Don't auto-fill ports, we'll manually turn the crank - pra := newPortRangeAllocator(*r, false) - a := pra.(*rangeAllocator) - - // Fill in the one available port - if !a.fillPortsOnce() { - t.Fatalf("Expected to be able to fill ports") - } - - // There should be no ports available - if a.fillPortsOnce() { - t.Fatalf("Expected to be unable to fill ports") - } - - p, err := a.AllocateNext() - if err != nil { - t.Fatalf("unexpected error: %v", err) - } - if p != 1 { - t.Fatalf("unexpected allocated port: %d", p) - } - - a.lock.Lock() - if bit := a.used.Bit(p - a.Base); bit != 1 { - a.lock.Unlock() - t.Fatalf("unexpected used bit for allocated port: %d", p) - } - a.lock.Unlock() - - _, err = a.AllocateNext() - if err == nil { - t.Fatalf("expected error because of fully-allocated range") - } - - a.Release(p) - a.lock.Lock() - if bit := a.used.Bit(p - a.Base); bit != 0 { - a.lock.Unlock() - t.Fatalf("unexpected used bit for allocated port: %d", p) - } - a.lock.Unlock() - - // Fill in the one available port - if !a.fillPortsOnce() { - t.Fatalf("Expected to be able to fill ports") - } - - p, err = a.AllocateNext() - if err != nil { - t.Fatalf("unexpected error: %v", err) - } - if p != 1 { - t.Fatalf("unexpected allocated port: %d", p) - } - a.lock.Lock() - if bit := a.used.Bit(p - a.Base); bit != 1 { - a.lock.Unlock() - t.Fatalf("unexpected used bit for allocated port: %d", p) - } - a.lock.Unlock() - - _, err = a.AllocateNext() - if err == nil { - t.Fatalf("expected error because of fully-allocated range") - } -} - -func TestRangeAllocator_RandomishAllocation(t *testing.T) { - r := &net.PortRange{} - r.Set("1-100") - pra := newPortRangeAllocator(*r, false) - a := pra.(*rangeAllocator) - - // allocate all the ports - var err error - ports := make([]int, 100, 100) - for i := 0; i < 100; i++ { - if !a.fillPortsOnce() { - t.Fatalf("Expected to be able to fill ports") - } - ports[i], err = a.AllocateNext() - if err != nil { - t.Fatalf("unexpected error: %v", err) - } - if ports[i] < 1 || ports[i] > 100 { - t.Fatalf("unexpected allocated port: %d", ports[i]) - } - a.lock.Lock() - if bit := a.used.Bit(ports[i] - a.Base); bit != 1 { - a.lock.Unlock() - t.Fatalf("unexpected used bit for allocated port: %d", ports[i]) - } - a.lock.Unlock() - } - - if a.fillPortsOnce() { - t.Fatalf("Expected to be unable to fill ports") - } - - // release them all - for i := 0; i < 100; i++ { - a.Release(ports[i]) - a.lock.Lock() - if bit := a.used.Bit(ports[i] - a.Base); bit != 0 { - a.lock.Unlock() - t.Fatalf("unexpected used bit for allocated port: %d", ports[i]) - } - a.lock.Unlock() - } - - // allocate the ports again - rports := make([]int, 100, 100) - for i := 0; i < 100; i++ { - if !a.fillPortsOnce() { - t.Fatalf("Expected to be able to fill ports") - } - rports[i], err = a.AllocateNext() - if err != nil { - t.Fatalf("unexpected error: %v", err) - } - if rports[i] < 1 || rports[i] > 100 { - t.Fatalf("unexpected allocated port: %d", rports[i]) - } - a.lock.Lock() - if bit := a.used.Bit(rports[i] - a.Base); bit != 1 { - a.lock.Unlock() - t.Fatalf("unexpected used bit for allocated port: %d", rports[i]) - } - a.lock.Unlock() - } - - if a.fillPortsOnce() { - t.Fatalf("Expected to be unable to fill ports") - } - - if reflect.DeepEqual(ports, rports) { - t.Fatalf("expected re-allocated ports to be in a somewhat random order") - } -} diff --git a/pkg/proxy/userspace/proxier.go b/pkg/proxy/userspace/proxier.go deleted file mode 100644 index bf36cee0ff1..00000000000 --- a/pkg/proxy/userspace/proxier.go +++ /dev/null @@ -1,1268 +0,0 @@ -/* -Copyright 2014 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package userspace - -import ( - "fmt" - "net" - "reflect" - "strconv" - "strings" - "sync" - "sync/atomic" - "time" - - libcontaineruserns "github.com/opencontainers/runc/libcontainer/userns" - v1 "k8s.io/api/core/v1" - "k8s.io/apimachinery/pkg/types" - utilerrors "k8s.io/apimachinery/pkg/util/errors" - utilnet "k8s.io/apimachinery/pkg/util/net" - "k8s.io/apimachinery/pkg/util/runtime" - "k8s.io/apimachinery/pkg/util/sets" - utilfeature "k8s.io/apiserver/pkg/util/feature" - servicehelper "k8s.io/cloud-provider/service/helpers" - "k8s.io/klog/v2" - kubefeatures "k8s.io/kubernetes/pkg/features" - "k8s.io/kubernetes/pkg/proxy" - "k8s.io/kubernetes/pkg/proxy/config" - utilproxy "k8s.io/kubernetes/pkg/proxy/util" - "k8s.io/kubernetes/pkg/util/async" - "k8s.io/kubernetes/pkg/util/conntrack" - "k8s.io/kubernetes/pkg/util/iptables" - utilexec "k8s.io/utils/exec" - netutils "k8s.io/utils/net" -) - -type portal struct { - ip net.IP - port int - isExternal bool -} - -// ServiceInfo contains information and state for a particular proxied service -type ServiceInfo struct { - // Timeout is the read/write timeout (used for UDP connections) - Timeout time.Duration - // ActiveClients is the cache of active UDP clients being proxied by this proxy for this service - ActiveClients *ClientCache - - isAliveAtomic int32 // Only access this with atomic ops - portal portal - protocol v1.Protocol - proxyPort int - socket ProxySocket - nodePort int - loadBalancerStatus v1.LoadBalancerStatus - sessionAffinityType v1.ServiceAffinity - stickyMaxAgeSeconds int - // Deprecated, but required for back-compat (including e2e) - externalIPs []string - - // isStartedAtomic is set to non-zero when the service's socket begins - // accepting requests. Used in testcases. Only access this with atomic ops. - isStartedAtomic int32 - // isFinishedAtomic is set to non-zero when the service's socket shuts - // down. Used in testcases. Only access this with atomic ops. - isFinishedAtomic int32 -} - -func (info *ServiceInfo) setStarted() { - atomic.StoreInt32(&info.isStartedAtomic, 1) -} - -func (info *ServiceInfo) IsStarted() bool { - return atomic.LoadInt32(&info.isStartedAtomic) != 0 -} - -func (info *ServiceInfo) setFinished() { - atomic.StoreInt32(&info.isFinishedAtomic, 1) -} - -func (info *ServiceInfo) IsFinished() bool { - return atomic.LoadInt32(&info.isFinishedAtomic) != 0 -} - -func (info *ServiceInfo) setAlive(b bool) { - var i int32 - if b { - i = 1 - } - atomic.StoreInt32(&info.isAliveAtomic, i) -} - -func (info *ServiceInfo) IsAlive() bool { - return atomic.LoadInt32(&info.isAliveAtomic) != 0 -} - -func logTimeout(err error) bool { - if e, ok := err.(net.Error); ok { - if e.Timeout() { - klog.V(3).InfoS("Connection to endpoint closed due to inactivity") - return true - } - } - return false -} - -// ProxySocketFunc is a function which constructs a ProxySocket from a protocol, ip, and port -type ProxySocketFunc func(protocol v1.Protocol, ip net.IP, port int) (ProxySocket, error) - -const numBurstSyncs int = 2 - -type serviceChange struct { - current *v1.Service - previous *v1.Service -} - -// Interface for async runner; abstracted for testing -type asyncRunnerInterface interface { - Run() - Loop(<-chan struct{}) -} - -// Proxier is a simple proxy for TCP connections between a localhost:lport -// and services that provide the actual implementations. -type Proxier struct { - // EndpointSlice support has not been added for this proxier yet. - config.NoopEndpointSliceHandler - // TODO(imroc): implement node handler for userspace proxier. - config.NoopNodeHandler - - loadBalancer LoadBalancer - mu sync.Mutex // protects serviceMap - serviceMap map[proxy.ServicePortName]*ServiceInfo - syncPeriod time.Duration - minSyncPeriod time.Duration - udpIdleTimeout time.Duration - portMapMutex sync.Mutex - portMap map[portMapKey]*portMapValue - listenIP net.IP - iptables iptables.Interface - hostIP net.IP - localAddrs netutils.IPSet - proxyPorts PortAllocator - makeProxySocket ProxySocketFunc - exec utilexec.Interface - // endpointsSynced and servicesSynced are set to 1 when the corresponding - // objects are synced after startup. This is used to avoid updating iptables - // with some partial data after kube-proxy restart. - endpointsSynced int32 - servicesSynced int32 - initialized int32 - // protects serviceChanges - serviceChangesLock sync.Mutex - serviceChanges map[types.NamespacedName]*serviceChange // map of service changes - syncRunner asyncRunnerInterface // governs calls to syncProxyRules - - stopChan chan struct{} -} - -// assert Proxier is a proxy.Provider -var _ proxy.Provider = &Proxier{} - -// A key for the portMap. The ip has to be a string because slices can't be map -// keys. -type portMapKey struct { - ip string - port int - protocol v1.Protocol -} - -func (k *portMapKey) String() string { - return fmt.Sprintf("%s/%s", net.JoinHostPort(k.ip, strconv.Itoa(k.port)), k.protocol) -} - -// A value for the portMap -type portMapValue struct { - owner proxy.ServicePortName - socket interface { - Close() error - } -} - -var ( - // ErrProxyOnLocalhost is returned by NewProxier if the user requests a proxier on - // the loopback address. May be checked for by callers of NewProxier to know whether - // the caller provided invalid input. - ErrProxyOnLocalhost = fmt.Errorf("cannot proxy on localhost") -) - -// NewProxier returns a new Proxier given a LoadBalancer and an address on -// which to listen. Because of the iptables logic, It is assumed that there -// is only a single Proxier active on a machine. An error will be returned if -// the proxier cannot be started due to an invalid ListenIP (loopback) or -// if iptables fails to update or acquire the initial lock. Once a proxier is -// created, it will keep iptables up to date in the background and will not -// terminate if a particular iptables call fails. -func NewProxier(loadBalancer LoadBalancer, listenIP net.IP, iptables iptables.Interface, exec utilexec.Interface, pr utilnet.PortRange, syncPeriod, minSyncPeriod, udpIdleTimeout time.Duration, nodePortAddresses []string) (*Proxier, error) { - return NewCustomProxier(loadBalancer, listenIP, iptables, exec, pr, syncPeriod, minSyncPeriod, udpIdleTimeout, nodePortAddresses, newProxySocket) -} - -// NewCustomProxier functions similarly to NewProxier, returning a new Proxier -// for the given LoadBalancer and address. The new proxier is constructed using -// the ProxySocket constructor provided, however, instead of constructing the -// default ProxySockets. -func NewCustomProxier(loadBalancer LoadBalancer, listenIP net.IP, iptables iptables.Interface, exec utilexec.Interface, pr utilnet.PortRange, syncPeriod, minSyncPeriod, udpIdleTimeout time.Duration, nodePortAddresses []string, makeProxySocket ProxySocketFunc) (*Proxier, error) { - if listenIP.Equal(localhostIPv4) || listenIP.Equal(localhostIPv6) { - return nil, ErrProxyOnLocalhost - } - - // If listenIP is given, assume that is the intended host IP. Otherwise - // try to find a suitable host IP address from network interfaces. - var err error - hostIP := listenIP - if hostIP.Equal(net.IPv4zero) || hostIP.Equal(net.IPv6zero) { - hostIP, err = utilnet.ChooseHostInterface() - if err != nil { - return nil, fmt.Errorf("failed to select a host interface: %v", err) - } - } - - err = setRLimit(64 * 1000) - if err != nil { - if utilfeature.DefaultFeatureGate.Enabled(kubefeatures.KubeletInUserNamespace) && libcontaineruserns.RunningInUserNS() { - klog.V(2).InfoS("Failed to set open file handler limit to 64000 (running in UserNS, ignoring)", "err", err) - } else { - return nil, fmt.Errorf("failed to set open file handler limit to 64000: %w", err) - } - } - - proxyPorts := newPortAllocator(pr) - - klog.V(2).InfoS("Setting proxy IP and initializing iptables", "ip", hostIP) - return createProxier(loadBalancer, listenIP, iptables, exec, hostIP, proxyPorts, syncPeriod, minSyncPeriod, udpIdleTimeout, makeProxySocket) -} - -func createProxier(loadBalancer LoadBalancer, listenIP net.IP, iptables iptables.Interface, exec utilexec.Interface, hostIP net.IP, proxyPorts PortAllocator, syncPeriod, minSyncPeriod, udpIdleTimeout time.Duration, makeProxySocket ProxySocketFunc) (*Proxier, error) { - // convenient to pass nil for tests.. - if proxyPorts == nil { - proxyPorts = newPortAllocator(utilnet.PortRange{}) - } - // Set up the iptables foundations we need. - if err := iptablesInit(iptables); err != nil { - return nil, fmt.Errorf("failed to initialize iptables: %v", err) - } - // Flush old iptables rules (since the bound ports will be invalid after a restart). - // When OnUpdate() is first called, the rules will be recreated. - if err := iptablesFlush(iptables); err != nil { - return nil, fmt.Errorf("failed to flush iptables: %v", err) - } - proxier := &Proxier{ - loadBalancer: loadBalancer, - serviceMap: make(map[proxy.ServicePortName]*ServiceInfo), - serviceChanges: make(map[types.NamespacedName]*serviceChange), - portMap: make(map[portMapKey]*portMapValue), - syncPeriod: syncPeriod, - minSyncPeriod: minSyncPeriod, - udpIdleTimeout: udpIdleTimeout, - listenIP: listenIP, - iptables: iptables, - hostIP: hostIP, - proxyPorts: proxyPorts, - makeProxySocket: makeProxySocket, - exec: exec, - stopChan: make(chan struct{}), - } - klog.V(3).InfoS("Record sync param", "minSyncPeriod", minSyncPeriod, "syncPeriod", syncPeriod, "burstSyncs", numBurstSyncs) - proxier.syncRunner = async.NewBoundedFrequencyRunner("userspace-proxy-sync-runner", proxier.syncProxyRules, minSyncPeriod, syncPeriod, numBurstSyncs) - return proxier, nil -} - -// CleanupLeftovers removes all iptables rules and chains created by the Proxier -// It returns true if an error was encountered. Errors are logged. -func CleanupLeftovers(ipt iptables.Interface) (encounteredError bool) { - // NOTE: Warning, this needs to be kept in sync with the userspace Proxier, - // we want to ensure we remove all of the iptables rules it creates. - // Currently they are all in iptablesInit() - // Delete Rules first, then Flush and Delete Chains - args := []string{"-m", "comment", "--comment", "handle ClusterIPs; NOTE: this must be before the NodePort rules"} - if err := ipt.DeleteRule(iptables.TableNAT, iptables.ChainOutput, append(args, "-j", string(iptablesHostPortalChain))...); err != nil { - if !iptables.IsNotFoundError(err) { - klog.ErrorS(err, "Error removing userspace rule") - encounteredError = true - } - } - if err := ipt.DeleteRule(iptables.TableNAT, iptables.ChainPrerouting, append(args, "-j", string(iptablesContainerPortalChain))...); err != nil { - if !iptables.IsNotFoundError(err) { - klog.ErrorS(err, "Error removing userspace rule") - encounteredError = true - } - } - args = []string{"-m", "addrtype", "--dst-type", "LOCAL"} - args = append(args, "-m", "comment", "--comment", "handle service NodePorts; NOTE: this must be the last rule in the chain") - if err := ipt.DeleteRule(iptables.TableNAT, iptables.ChainOutput, append(args, "-j", string(iptablesHostNodePortChain))...); err != nil { - if !iptables.IsNotFoundError(err) { - klog.ErrorS(err, "Error removing userspace rule") - encounteredError = true - } - } - if err := ipt.DeleteRule(iptables.TableNAT, iptables.ChainPrerouting, append(args, "-j", string(iptablesContainerNodePortChain))...); err != nil { - if !iptables.IsNotFoundError(err) { - klog.ErrorS(err, "Error removing userspace rule") - encounteredError = true - } - } - args = []string{"-m", "comment", "--comment", "Ensure that non-local NodePort traffic can flow"} - if err := ipt.DeleteRule(iptables.TableFilter, iptables.ChainInput, append(args, "-j", string(iptablesNonLocalNodePortChain))...); err != nil { - if !iptables.IsNotFoundError(err) { - klog.ErrorS(err, "Error removing userspace rule") - encounteredError = true - } - } - - // flush and delete chains. - tableChains := map[iptables.Table][]iptables.Chain{ - iptables.TableNAT: {iptablesContainerPortalChain, iptablesHostPortalChain, iptablesHostNodePortChain, iptablesContainerNodePortChain}, - iptables.TableFilter: {iptablesNonLocalNodePortChain}, - } - for table, chains := range tableChains { - for _, c := range chains { - // flush chain, then if successful delete, delete will fail if flush fails. - if err := ipt.FlushChain(table, c); err != nil { - if !iptables.IsNotFoundError(err) { - klog.ErrorS(err, "Error flushing userspace chain") - encounteredError = true - } - } else { - if err = ipt.DeleteChain(table, c); err != nil { - if !iptables.IsNotFoundError(err) { - klog.ErrorS(err, "Error deleting userspace chain") - encounteredError = true - } - } - } - } - } - return encounteredError -} - -// shutdown closes all service port proxies and returns from the proxy's -// sync loop. Used from testcases. -func (proxier *Proxier) shutdown() { - proxier.mu.Lock() - defer proxier.mu.Unlock() - - for serviceName, info := range proxier.serviceMap { - proxier.stopProxy(serviceName, info) - } - proxier.cleanupStaleStickySessions() - close(proxier.stopChan) -} - -func (proxier *Proxier) isInitialized() bool { - return atomic.LoadInt32(&proxier.initialized) > 0 -} - -// Sync is called to synchronize the proxier state to iptables as soon as possible. -func (proxier *Proxier) Sync() { - proxier.syncRunner.Run() -} - -func (proxier *Proxier) syncProxyRules() { - start := time.Now() - defer func() { - klog.V(4).InfoS("Userspace syncProxyRules complete", "elapsed", time.Since(start)) - }() - - // don't sync rules till we've received services and endpoints - if !proxier.isInitialized() { - klog.V(2).InfoS("Not syncing userspace proxy until Services and Endpoints have been received from master") - return - } - - if err := iptablesInit(proxier.iptables); err != nil { - klog.ErrorS(err, "Failed to ensure iptables") - } - - proxier.serviceChangesLock.Lock() - changes := proxier.serviceChanges - proxier.serviceChanges = make(map[types.NamespacedName]*serviceChange) - proxier.serviceChangesLock.Unlock() - - proxier.mu.Lock() - defer proxier.mu.Unlock() - - klog.V(4).InfoS("userspace proxy: processing service events", "count", len(changes)) - for _, change := range changes { - existingPorts := proxier.mergeService(change.current) - proxier.unmergeService(change.previous, existingPorts) - } - - proxier.localAddrs = utilproxy.GetLocalAddrSet() - - proxier.ensurePortals() - proxier.cleanupStaleStickySessions() -} - -// SyncLoop runs periodic work. This is expected to run as a goroutine or as the main loop of the app. It does not return. -func (proxier *Proxier) SyncLoop() { - proxier.syncRunner.Loop(proxier.stopChan) -} - -// Ensure that portals exist for all services. -func (proxier *Proxier) ensurePortals() { - // NB: This does not remove rules that should not be present. - for name, info := range proxier.serviceMap { - err := proxier.openPortal(name, info) - if err != nil { - klog.ErrorS(err, "Failed to ensure portal", "servicePortName", name) - } - } -} - -// clean up any stale sticky session records in the hash map. -func (proxier *Proxier) cleanupStaleStickySessions() { - for name := range proxier.serviceMap { - proxier.loadBalancer.CleanupStaleStickySessions(name) - } -} - -func (proxier *Proxier) stopProxy(service proxy.ServicePortName, info *ServiceInfo) error { - delete(proxier.serviceMap, service) - info.setAlive(false) - err := info.socket.Close() - port := info.socket.ListenPort() - proxier.proxyPorts.Release(port) - return err -} - -func (proxier *Proxier) getServiceInfo(service proxy.ServicePortName) (*ServiceInfo, bool) { - proxier.mu.Lock() - defer proxier.mu.Unlock() - info, ok := proxier.serviceMap[service] - return info, ok -} - -// addServiceOnPortInternal starts listening for a new service, returning the ServiceInfo. -// Pass proxyPort=0 to allocate a random port. The timeout only applies to UDP -// connections, for now. -func (proxier *Proxier) addServiceOnPortInternal(service proxy.ServicePortName, protocol v1.Protocol, proxyPort int, timeout time.Duration) (*ServiceInfo, error) { - sock, err := proxier.makeProxySocket(protocol, proxier.listenIP, proxyPort) - if err != nil { - return nil, err - } - _, portStr, err := net.SplitHostPort(sock.Addr().String()) - if err != nil { - sock.Close() - return nil, err - } - portNum, err := strconv.Atoi(portStr) - if err != nil { - sock.Close() - return nil, err - } - si := &ServiceInfo{ - Timeout: timeout, - ActiveClients: newClientCache(), - isAliveAtomic: 1, - proxyPort: portNum, - protocol: protocol, - socket: sock, - sessionAffinityType: v1.ServiceAffinityNone, // default - } - proxier.serviceMap[service] = si - - klog.V(2).InfoS("Proxying for service", "service", service, "protocol", protocol, "portNum", portNum) - go func() { - defer runtime.HandleCrash() - sock.ProxyLoop(service, si, proxier.loadBalancer) - }() - - return si, nil -} - -func (proxier *Proxier) cleanupPortalAndProxy(serviceName proxy.ServicePortName, info *ServiceInfo) error { - if err := proxier.closePortal(serviceName, info); err != nil { - return fmt.Errorf("failed to close portal for %q: %w", serviceName, err) - } - if err := proxier.stopProxy(serviceName, info); err != nil { - return fmt.Errorf("failed to stop service %q: %w", serviceName, err) - } - return nil -} - -func (proxier *Proxier) mergeService(service *v1.Service) sets.String { - if service == nil { - return nil - } - if utilproxy.ShouldSkipService(service) { - return nil - } - existingPorts := sets.NewString() - svcName := types.NamespacedName{Namespace: service.Namespace, Name: service.Name} - for i := range service.Spec.Ports { - servicePort := &service.Spec.Ports[i] - serviceName := proxy.ServicePortName{NamespacedName: svcName, Port: servicePort.Name} - existingPorts.Insert(servicePort.Name) - info, exists := proxier.serviceMap[serviceName] - // TODO: check health of the socket? What if ProxyLoop exited? - if exists && sameConfig(info, service, servicePort) { - // Nothing changed. - continue - } - if exists { - klog.V(4).InfoS("Something changed for service: stopping it", "serviceName", serviceName) - if err := proxier.cleanupPortalAndProxy(serviceName, info); err != nil { - klog.ErrorS(err, "Failed to cleanup portal and proxy") - } - info.setFinished() - } - proxyPort, err := proxier.proxyPorts.AllocateNext() - if err != nil { - klog.ErrorS(err, "Failed to allocate proxy port", "serviceName", serviceName) - continue - } - - serviceIP := netutils.ParseIPSloppy(service.Spec.ClusterIP) - klog.V(1).InfoS("Adding new service", "serviceName", serviceName, "addr", net.JoinHostPort(serviceIP.String(), strconv.Itoa(int(servicePort.Port))), "protocol", servicePort.Protocol) - info, err = proxier.addServiceOnPortInternal(serviceName, servicePort.Protocol, proxyPort, proxier.udpIdleTimeout) - if err != nil { - klog.ErrorS(err, "Failed to start proxy", "serviceName", serviceName) - continue - } - info.portal.ip = serviceIP - info.portal.port = int(servicePort.Port) - info.externalIPs = service.Spec.ExternalIPs - // Deep-copy in case the service instance changes - info.loadBalancerStatus = *service.Status.LoadBalancer.DeepCopy() - info.nodePort = int(servicePort.NodePort) - info.sessionAffinityType = service.Spec.SessionAffinity - // Kube-apiserver side guarantees SessionAffinityConfig won't be nil when session affinity type is ClientIP - if service.Spec.SessionAffinity == v1.ServiceAffinityClientIP { - info.stickyMaxAgeSeconds = int(*service.Spec.SessionAffinityConfig.ClientIP.TimeoutSeconds) - } - - klog.V(4).InfoS("Record serviceInfo", "serviceInfo", info) - - if err := proxier.openPortal(serviceName, info); err != nil { - klog.ErrorS(err, "Failed to open portal", "serviceName", serviceName) - } - proxier.loadBalancer.NewService(serviceName, info.sessionAffinityType, info.stickyMaxAgeSeconds) - - info.setStarted() - } - - return existingPorts -} - -func (proxier *Proxier) unmergeService(service *v1.Service, existingPorts sets.String) { - if service == nil { - return - } - - if utilproxy.ShouldSkipService(service) { - return - } - staleUDPServices := sets.NewString() - svcName := types.NamespacedName{Namespace: service.Namespace, Name: service.Name} - for i := range service.Spec.Ports { - servicePort := &service.Spec.Ports[i] - if existingPorts.Has(servicePort.Name) { - continue - } - serviceName := proxy.ServicePortName{NamespacedName: svcName, Port: servicePort.Name} - - klog.V(1).InfoS("Stopping service", "serviceName", serviceName) - info, exists := proxier.serviceMap[serviceName] - if !exists { - klog.ErrorS(nil, "Service is being removed but doesn't exist", "serviceName", serviceName) - continue - } - - if proxier.serviceMap[serviceName].protocol == v1.ProtocolUDP { - staleUDPServices.Insert(proxier.serviceMap[serviceName].portal.ip.String()) - } - - if err := proxier.cleanupPortalAndProxy(serviceName, info); err != nil { - klog.ErrorS(err, "Clean up portal and proxy") - } - proxier.loadBalancer.DeleteService(serviceName) - info.setFinished() - } - for _, svcIP := range staleUDPServices.UnsortedList() { - if err := conntrack.ClearEntriesForIP(proxier.exec, svcIP, v1.ProtocolUDP); err != nil { - klog.ErrorS(err, "Failed to delete stale service IP connections", "ip", svcIP) - } - } -} - -func (proxier *Proxier) serviceChange(previous, current *v1.Service, detail string) { - var svcName types.NamespacedName - if current != nil { - svcName = types.NamespacedName{Namespace: current.Namespace, Name: current.Name} - } else { - svcName = types.NamespacedName{Namespace: previous.Namespace, Name: previous.Name} - } - klog.V(4).InfoS("Record service change", "action", detail, "svcName", svcName) - - proxier.serviceChangesLock.Lock() - defer proxier.serviceChangesLock.Unlock() - - change, exists := proxier.serviceChanges[svcName] - if !exists { - // change.previous is only set for new changes. We must keep - // the oldest service info (or nil) because correct unmerging - // depends on the next update/del after a merge, not subsequent - // updates. - change = &serviceChange{previous: previous} - proxier.serviceChanges[svcName] = change - } - - // Always use the most current service (or nil) as change.current - change.current = current - - if reflect.DeepEqual(change.previous, change.current) { - // collapsed change had no effect - delete(proxier.serviceChanges, svcName) - } else if proxier.isInitialized() { - // change will have an effect, ask the proxy to sync - proxier.syncRunner.Run() - } -} - -// OnServiceAdd is called whenever creation of new service object -// is observed. -func (proxier *Proxier) OnServiceAdd(service *v1.Service) { - proxier.serviceChange(nil, service, "OnServiceAdd") -} - -// OnServiceUpdate is called whenever modification of an existing -// service object is observed. -func (proxier *Proxier) OnServiceUpdate(oldService, service *v1.Service) { - proxier.serviceChange(oldService, service, "OnServiceUpdate") -} - -// OnServiceDelete is called whenever deletion of an existing service -// object is observed. -func (proxier *Proxier) OnServiceDelete(service *v1.Service) { - proxier.serviceChange(service, nil, "OnServiceDelete") -} - -// OnServiceSynced is called once all the initial event handlers were -// called and the state is fully propagated to local cache. -func (proxier *Proxier) OnServiceSynced() { - klog.V(2).InfoS("Userspace OnServiceSynced") - - // Mark services as initialized and (if endpoints are already - // initialized) the entire proxy as initialized - atomic.StoreInt32(&proxier.servicesSynced, 1) - if atomic.LoadInt32(&proxier.endpointsSynced) > 0 { - atomic.StoreInt32(&proxier.initialized, 1) - } - - // Must sync from a goroutine to avoid blocking the - // service event handler on startup with large numbers - // of initial objects - go proxier.syncProxyRules() -} - -// OnEndpointsAdd is called whenever creation of new endpoints object -// is observed. -func (proxier *Proxier) OnEndpointsAdd(endpoints *v1.Endpoints) { - proxier.loadBalancer.OnEndpointsAdd(endpoints) -} - -// OnEndpointsUpdate is called whenever modification of an existing -// endpoints object is observed. -func (proxier *Proxier) OnEndpointsUpdate(oldEndpoints, endpoints *v1.Endpoints) { - proxier.loadBalancer.OnEndpointsUpdate(oldEndpoints, endpoints) -} - -// OnEndpointsDelete is called whenever deletion of an existing endpoints -// object is observed. -func (proxier *Proxier) OnEndpointsDelete(endpoints *v1.Endpoints) { - proxier.loadBalancer.OnEndpointsDelete(endpoints) -} - -// OnEndpointsSynced is called once all the initial event handlers were -// called and the state is fully propagated to local cache. -func (proxier *Proxier) OnEndpointsSynced() { - klog.V(2).InfoS("Userspace OnEndpointsSynced") - proxier.loadBalancer.OnEndpointsSynced() - - // Mark endpoints as initialized and (if services are already - // initialized) the entire proxy as initialized - atomic.StoreInt32(&proxier.endpointsSynced, 1) - if atomic.LoadInt32(&proxier.servicesSynced) > 0 { - atomic.StoreInt32(&proxier.initialized, 1) - } - - // Must sync from a goroutine to avoid blocking the - // service event handler on startup with large numbers - // of initial objects - go proxier.syncProxyRules() -} - -func sameConfig(info *ServiceInfo, service *v1.Service, port *v1.ServicePort) bool { - if info.protocol != port.Protocol || info.portal.port != int(port.Port) || info.nodePort != int(port.NodePort) { - return false - } - if !info.portal.ip.Equal(netutils.ParseIPSloppy(service.Spec.ClusterIP)) { - return false - } - if !ipsEqual(info.externalIPs, service.Spec.ExternalIPs) { - return false - } - if !servicehelper.LoadBalancerStatusEqual(&info.loadBalancerStatus, &service.Status.LoadBalancer) { - return false - } - if info.sessionAffinityType != service.Spec.SessionAffinity { - return false - } - return true -} - -func ipsEqual(lhs, rhs []string) bool { - if len(lhs) != len(rhs) { - return false - } - for i := range lhs { - if lhs[i] != rhs[i] { - return false - } - } - return true -} - -func (proxier *Proxier) openPortal(service proxy.ServicePortName, info *ServiceInfo) error { - err := proxier.openOnePortal(info.portal, info.protocol, proxier.listenIP, info.proxyPort, service) - if err != nil { - return err - } - for _, publicIP := range info.externalIPs { - err = proxier.openOnePortal(portal{netutils.ParseIPSloppy(publicIP), info.portal.port, true}, info.protocol, proxier.listenIP, info.proxyPort, service) - if err != nil { - return err - } - } - for _, ingress := range info.loadBalancerStatus.Ingress { - if ingress.IP != "" { - err = proxier.openOnePortal(portal{netutils.ParseIPSloppy(ingress.IP), info.portal.port, false}, info.protocol, proxier.listenIP, info.proxyPort, service) - if err != nil { - return err - } - } - } - if info.nodePort != 0 { - err = proxier.openNodePort(info.nodePort, info.protocol, proxier.listenIP, info.proxyPort, service) - if err != nil { - return err - } - } - return nil -} - -func (proxier *Proxier) openOnePortal(portal portal, protocol v1.Protocol, proxyIP net.IP, proxyPort int, name proxy.ServicePortName) error { - if proxier.localAddrs.Has(portal.ip) { - err := proxier.claimNodePort(portal.ip, portal.port, protocol, name) - if err != nil { - return err - } - } - - // Handle traffic from containers. - args := proxier.iptablesContainerPortalArgs(portal.ip, portal.isExternal, false, portal.port, protocol, proxyIP, proxyPort, name) - portalAddress := net.JoinHostPort(portal.ip.String(), strconv.Itoa(portal.port)) - existed, err := proxier.iptables.EnsureRule(iptables.Append, iptables.TableNAT, iptablesContainerPortalChain, args...) - if err != nil { - klog.ErrorS(err, "Failed to install iptables rule for service", "chain", iptablesContainerPortalChain, "servicePortName", name, "args", args) - return err - } - if !existed { - klog.V(3).InfoS("Opened iptables from-containers portal for service", "servicePortName", name, "protocol", protocol, "portalAddress", portalAddress) - } - if portal.isExternal { - args := proxier.iptablesContainerPortalArgs(portal.ip, false, true, portal.port, protocol, proxyIP, proxyPort, name) - existed, err := proxier.iptables.EnsureRule(iptables.Append, iptables.TableNAT, iptablesContainerPortalChain, args...) - if err != nil { - klog.ErrorS(err, "Failed to install iptables rule that opens service for local traffic", "chain", iptablesContainerPortalChain, "servicePortName", name, "args", args) - return err - } - if !existed { - klog.V(3).InfoS("Opened iptables from-containers portal for service for local traffic", "servicePortName", name, "protocol", protocol, "portalAddress", portalAddress) - } - - args = proxier.iptablesHostPortalArgs(portal.ip, true, portal.port, protocol, proxyIP, proxyPort, name) - existed, err = proxier.iptables.EnsureRule(iptables.Append, iptables.TableNAT, iptablesHostPortalChain, args...) - if err != nil { - klog.ErrorS(err, "Failed to install iptables rule for service for dst-local traffic", "chain", iptablesHostPortalChain, "servicePortName", name) - return err - } - if !existed { - klog.V(3).InfoS("Opened iptables from-host portal for service for dst-local traffic", "servicePortName", name, "protocol", protocol, "portalAddress", portalAddress) - } - return nil - } - - // Handle traffic from the host. - args = proxier.iptablesHostPortalArgs(portal.ip, false, portal.port, protocol, proxyIP, proxyPort, name) - existed, err = proxier.iptables.EnsureRule(iptables.Append, iptables.TableNAT, iptablesHostPortalChain, args...) - if err != nil { - klog.ErrorS(err, "Failed to install iptables rule for service", "chain", iptablesHostPortalChain, "servicePortName", name) - return err - } - if !existed { - klog.V(3).InfoS("Opened iptables from-host portal for service", "servicePortName", name, "protocol", protocol, "portalAddress", portalAddress) - } - return nil -} - -// Marks a port as being owned by a particular service, or returns error if already claimed. -// Idempotent: reclaiming with the same owner is not an error -func (proxier *Proxier) claimNodePort(ip net.IP, port int, protocol v1.Protocol, owner proxy.ServicePortName) error { - proxier.portMapMutex.Lock() - defer proxier.portMapMutex.Unlock() - - // TODO: We could pre-populate some reserved ports into portMap and/or blacklist some well-known ports - - key := portMapKey{ip: ip.String(), port: port, protocol: protocol} - existing, found := proxier.portMap[key] - if !found { - // Hold the actual port open, even though we use iptables to redirect - // it. This ensures that a) it's safe to take and b) that stays true. - // NOTE: We should not need to have a real listen()ing socket - bind() - // should be enough, but I can't figure out a way to e2e test without - // it. Tools like 'ss' and 'netstat' do not show sockets that are - // bind()ed but not listen()ed, and at least the default debian netcat - // has no way to avoid about 10 seconds of retries. - socket, err := proxier.makeProxySocket(protocol, ip, port) - if err != nil { - return fmt.Errorf("can't open node port for %s: %v", key.String(), err) - } - proxier.portMap[key] = &portMapValue{owner: owner, socket: socket} - klog.V(2).InfoS("Claimed local port", "port", key.String()) - return nil - } - if existing.owner == owner { - // We are idempotent - return nil - } - return fmt.Errorf("Port conflict detected on port %s. %v vs %v", key.String(), owner, existing) -} - -// Release a claim on a port. Returns an error if the owner does not match the claim. -// Tolerates release on an unclaimed port, to simplify . -func (proxier *Proxier) releaseNodePort(ip net.IP, port int, protocol v1.Protocol, owner proxy.ServicePortName) error { - proxier.portMapMutex.Lock() - defer proxier.portMapMutex.Unlock() - - key := portMapKey{ip: ip.String(), port: port, protocol: protocol} - existing, found := proxier.portMap[key] - if !found { - // We tolerate this, it happens if we are cleaning up a failed allocation - klog.InfoS("Ignoring release on unowned port", "port", key) - return nil - } - if existing.owner != owner { - return fmt.Errorf("Port conflict detected on port %v (unowned unlock). %v vs %v", key, owner, existing) - } - delete(proxier.portMap, key) - existing.socket.Close() - return nil -} - -func (proxier *Proxier) openNodePort(nodePort int, protocol v1.Protocol, proxyIP net.IP, proxyPort int, name proxy.ServicePortName) error { - // TODO: Do we want to allow containers to access public services? Probably yes. - // TODO: We could refactor this to be the same code as portal, but with IP == nil - - err := proxier.claimNodePort(nil, nodePort, protocol, name) - if err != nil { - return err - } - - // Handle traffic from containers. - args := proxier.iptablesContainerPortalArgs(nil, false, false, nodePort, protocol, proxyIP, proxyPort, name) - existed, err := proxier.iptables.EnsureRule(iptables.Append, iptables.TableNAT, iptablesContainerNodePortChain, args...) - if err != nil { - klog.ErrorS(err, "Failed to install iptables rule for service", "chain", iptablesContainerNodePortChain, "servicePortName", name) - return err - } - if !existed { - klog.InfoS("Opened iptables from-containers public port for service", "servicePortName", name, "protocol", protocol, "nodePort", nodePort) - } - - // Handle traffic from the host. - args = proxier.iptablesHostNodePortArgs(nodePort, protocol, proxyIP, proxyPort, name) - existed, err = proxier.iptables.EnsureRule(iptables.Append, iptables.TableNAT, iptablesHostNodePortChain, args...) - if err != nil { - klog.ErrorS(err, "Failed to install iptables rule for service", "chain", iptablesHostNodePortChain, "servicePortName", name) - return err - } - if !existed { - klog.InfoS("Opened iptables from-host public port for service", "servicePortName", name, "protocol", protocol, "nodePort", nodePort) - } - - args = proxier.iptablesNonLocalNodePortArgs(nodePort, protocol, proxyIP, proxyPort, name) - existed, err = proxier.iptables.EnsureRule(iptables.Append, iptables.TableFilter, iptablesNonLocalNodePortChain, args...) - if err != nil { - klog.ErrorS(err, "Failed to install iptables rule for service", "chain", iptablesNonLocalNodePortChain, "servicePortName", name) - return err - } - if !existed { - klog.InfoS("Opened iptables from-non-local public port for service", "servicePortName", name, "protocol", protocol, "nodePort", nodePort) - } - - return nil -} - -func (proxier *Proxier) closePortal(service proxy.ServicePortName, info *ServiceInfo) error { - // Collect errors and report them all at the end. - el := proxier.closeOnePortal(info.portal, info.protocol, proxier.listenIP, info.proxyPort, service) - for _, publicIP := range info.externalIPs { - el = append(el, proxier.closeOnePortal(portal{netutils.ParseIPSloppy(publicIP), info.portal.port, true}, info.protocol, proxier.listenIP, info.proxyPort, service)...) - } - for _, ingress := range info.loadBalancerStatus.Ingress { - if ingress.IP != "" { - el = append(el, proxier.closeOnePortal(portal{netutils.ParseIPSloppy(ingress.IP), info.portal.port, false}, info.protocol, proxier.listenIP, info.proxyPort, service)...) - } - } - if info.nodePort != 0 { - el = append(el, proxier.closeNodePort(info.nodePort, info.protocol, proxier.listenIP, info.proxyPort, service)...) - } - if len(el) == 0 { - klog.V(3).InfoS("Closed iptables portals for service", "servicePortName", service) - } else { - klog.ErrorS(nil, "Some errors closing iptables portals for service", "servicePortName", service) - } - return utilerrors.NewAggregate(el) -} - -func (proxier *Proxier) closeOnePortal(portal portal, protocol v1.Protocol, proxyIP net.IP, proxyPort int, name proxy.ServicePortName) []error { - el := []error{} - if proxier.localAddrs.Has(portal.ip) { - if err := proxier.releaseNodePort(portal.ip, portal.port, protocol, name); err != nil { - el = append(el, err) - } - } - - // Handle traffic from containers. - args := proxier.iptablesContainerPortalArgs(portal.ip, portal.isExternal, false, portal.port, protocol, proxyIP, proxyPort, name) - if err := proxier.iptables.DeleteRule(iptables.TableNAT, iptablesContainerPortalChain, args...); err != nil { - klog.ErrorS(err, "Failed to delete iptables rule for service", "chain", iptablesContainerPortalChain, "servicePortName", name) - el = append(el, err) - } - - if portal.isExternal { - args := proxier.iptablesContainerPortalArgs(portal.ip, false, true, portal.port, protocol, proxyIP, proxyPort, name) - if err := proxier.iptables.DeleteRule(iptables.TableNAT, iptablesContainerPortalChain, args...); err != nil { - klog.ErrorS(err, "Failed to delete iptables rule for service", "chain", iptablesContainerPortalChain, "servicePortName", name) - el = append(el, err) - } - - args = proxier.iptablesHostPortalArgs(portal.ip, true, portal.port, protocol, proxyIP, proxyPort, name) - if err := proxier.iptables.DeleteRule(iptables.TableNAT, iptablesHostPortalChain, args...); err != nil { - klog.ErrorS(err, "Failed to delete iptables rule for service", "chain", iptablesHostPortalChain, "servicePortName", name) - el = append(el, err) - } - return el - } - - // Handle traffic from the host (portalIP is not external). - args = proxier.iptablesHostPortalArgs(portal.ip, false, portal.port, protocol, proxyIP, proxyPort, name) - if err := proxier.iptables.DeleteRule(iptables.TableNAT, iptablesHostPortalChain, args...); err != nil { - klog.ErrorS(err, "Failed to delete iptables rule for service", "chain", iptablesHostPortalChain, "servicePortName", name) - el = append(el, err) - } - - return el -} - -func (proxier *Proxier) closeNodePort(nodePort int, protocol v1.Protocol, proxyIP net.IP, proxyPort int, name proxy.ServicePortName) []error { - el := []error{} - - // Handle traffic from containers. - args := proxier.iptablesContainerPortalArgs(nil, false, false, nodePort, protocol, proxyIP, proxyPort, name) - if err := proxier.iptables.DeleteRule(iptables.TableNAT, iptablesContainerNodePortChain, args...); err != nil { - klog.ErrorS(err, "Failed to delete iptables rule for service", "chain", iptablesContainerNodePortChain, "servicePortName", name) - el = append(el, err) - } - - // Handle traffic from the host. - args = proxier.iptablesHostNodePortArgs(nodePort, protocol, proxyIP, proxyPort, name) - if err := proxier.iptables.DeleteRule(iptables.TableNAT, iptablesHostNodePortChain, args...); err != nil { - klog.ErrorS(err, "Failed to delete iptables rule for service", "chain", iptablesHostNodePortChain, "servicePortName", name) - el = append(el, err) - } - - // Handle traffic not local to the host - args = proxier.iptablesNonLocalNodePortArgs(nodePort, protocol, proxyIP, proxyPort, name) - if err := proxier.iptables.DeleteRule(iptables.TableFilter, iptablesNonLocalNodePortChain, args...); err != nil { - klog.ErrorS(err, "Failed to delete iptables rule for service", "chain", iptablesNonLocalNodePortChain, "servicePortName", name) - el = append(el, err) - } - - if err := proxier.releaseNodePort(nil, nodePort, protocol, name); err != nil { - el = append(el, err) - } - - return el -} - -// See comments in the *PortalArgs() functions for some details about why we -// use two chains for portals. -var iptablesContainerPortalChain iptables.Chain = "KUBE-PORTALS-CONTAINER" -var iptablesHostPortalChain iptables.Chain = "KUBE-PORTALS-HOST" - -// Chains for NodePort services -var iptablesContainerNodePortChain iptables.Chain = "KUBE-NODEPORT-CONTAINER" -var iptablesHostNodePortChain iptables.Chain = "KUBE-NODEPORT-HOST" -var iptablesNonLocalNodePortChain iptables.Chain = "KUBE-NODEPORT-NON-LOCAL" - -// Ensure that the iptables infrastructure we use is set up. This can safely be called periodically. -func iptablesInit(ipt iptables.Interface) error { - // TODO: There is almost certainly room for optimization here. E.g. If - // we knew the service-cluster-ip-range CIDR we could fast-track outbound packets not - // destined for a service. There's probably more, help wanted. - - // Danger - order of these rules matters here: - // - // We match portal rules first, then NodePort rules. For NodePort rules, we filter primarily on --dst-type LOCAL, - // because we want to listen on all local addresses, but don't match internet traffic with the same dst port number. - // - // There is one complication (per thockin): - // -m addrtype --dst-type LOCAL is what we want except that it is broken (by intent without foresight to our usecase) - // on at least GCE. Specifically, GCE machines have a daemon which learns what external IPs are forwarded to that - // machine, and configure a local route for that IP, making a match for --dst-type LOCAL when we don't want it to. - // Removing the route gives correct behavior until the daemon recreates it. - // Killing the daemon is an option, but means that any non-kubernetes use of the machine with external IP will be broken. - // - // This applies to IPs on GCE that are actually from a load-balancer; they will be categorized as LOCAL. - // _If_ the chains were in the wrong order, and the LB traffic had dst-port == a NodePort on some other service, - // the NodePort would take priority (incorrectly). - // This is unlikely (and would only affect outgoing traffic from the cluster to the load balancer, which seems - // doubly-unlikely), but we need to be careful to keep the rules in the right order. - args := []string{ /* service-cluster-ip-range matching could go here */ } - args = append(args, "-m", "comment", "--comment", "handle ClusterIPs; NOTE: this must be before the NodePort rules") - if _, err := ipt.EnsureChain(iptables.TableNAT, iptablesContainerPortalChain); err != nil { - return err - } - if _, err := ipt.EnsureRule(iptables.Prepend, iptables.TableNAT, iptables.ChainPrerouting, append(args, "-j", string(iptablesContainerPortalChain))...); err != nil { - return err - } - if _, err := ipt.EnsureChain(iptables.TableNAT, iptablesHostPortalChain); err != nil { - return err - } - if _, err := ipt.EnsureRule(iptables.Prepend, iptables.TableNAT, iptables.ChainOutput, append(args, "-j", string(iptablesHostPortalChain))...); err != nil { - return err - } - - // This set of rules matches broadly (addrtype & destination port), and therefore must come after the portal rules - args = []string{"-m", "addrtype", "--dst-type", "LOCAL"} - args = append(args, "-m", "comment", "--comment", "handle service NodePorts; NOTE: this must be the last rule in the chain") - if _, err := ipt.EnsureChain(iptables.TableNAT, iptablesContainerNodePortChain); err != nil { - return err - } - if _, err := ipt.EnsureRule(iptables.Append, iptables.TableNAT, iptables.ChainPrerouting, append(args, "-j", string(iptablesContainerNodePortChain))...); err != nil { - return err - } - if _, err := ipt.EnsureChain(iptables.TableNAT, iptablesHostNodePortChain); err != nil { - return err - } - if _, err := ipt.EnsureRule(iptables.Append, iptables.TableNAT, iptables.ChainOutput, append(args, "-j", string(iptablesHostNodePortChain))...); err != nil { - return err - } - - // Create a chain intended to explicitly allow non-local NodePort - // traffic to work around default-deny iptables configurations - // that would otherwise reject such traffic. - args = []string{"-m", "comment", "--comment", "Ensure that non-local NodePort traffic can flow"} - if _, err := ipt.EnsureChain(iptables.TableFilter, iptablesNonLocalNodePortChain); err != nil { - return err - } - if _, err := ipt.EnsureRule(iptables.Prepend, iptables.TableFilter, iptables.ChainInput, append(args, "-j", string(iptablesNonLocalNodePortChain))...); err != nil { - return err - } - - // TODO: Verify order of rules. - return nil -} - -// Flush all of our custom iptables rules. -func iptablesFlush(ipt iptables.Interface) error { - el := []error{} - if err := ipt.FlushChain(iptables.TableNAT, iptablesContainerPortalChain); err != nil { - el = append(el, err) - } - if err := ipt.FlushChain(iptables.TableNAT, iptablesHostPortalChain); err != nil { - el = append(el, err) - } - if err := ipt.FlushChain(iptables.TableNAT, iptablesContainerNodePortChain); err != nil { - el = append(el, err) - } - if err := ipt.FlushChain(iptables.TableNAT, iptablesHostNodePortChain); err != nil { - el = append(el, err) - } - if err := ipt.FlushChain(iptables.TableFilter, iptablesNonLocalNodePortChain); err != nil { - el = append(el, err) - } - if len(el) != 0 { - klog.ErrorS(utilerrors.NewAggregate(el), "Some errors flushing old iptables portals") - } - return utilerrors.NewAggregate(el) -} - -// Used below. -var zeroIPv4 = netutils.ParseIPSloppy("0.0.0.0") -var localhostIPv4 = netutils.ParseIPSloppy("127.0.0.1") - -var zeroIPv6 = netutils.ParseIPSloppy("::") -var localhostIPv6 = netutils.ParseIPSloppy("::1") - -// Build a slice of iptables args that are common to from-container and from-host portal rules. -func iptablesCommonPortalArgs(destIP net.IP, addPhysicalInterfaceMatch bool, addDstLocalMatch bool, destPort int, protocol v1.Protocol, service proxy.ServicePortName) []string { - // This list needs to include all fields as they are eventually spit out - // by iptables-save. This is because some systems do not support the - // 'iptables -C' arg, and so fall back on parsing iptables-save output. - // If this does not match, it will not pass the check. For example: - // adding the /32 on the destination IP arg is not strictly required, - // but causes this list to not match the final iptables-save output. - // This is fragile and I hope one day we can stop supporting such old - // iptables versions. - args := []string{ - "-m", "comment", - "--comment", service.String(), - "-p", strings.ToLower(string(protocol)), - "-m", strings.ToLower(string(protocol)), - "--dport", fmt.Sprintf("%d", destPort), - } - - if destIP != nil { - args = append(args, "-d", destIP.String()) - } - - if addPhysicalInterfaceMatch { - args = append(args, "-m", "physdev", "!", "--physdev-is-in") - } - - if addDstLocalMatch { - args = append(args, "-m", "addrtype", "--dst-type", "LOCAL") - } - - return args -} - -// Build a slice of iptables args for a from-container portal rule. -func (proxier *Proxier) iptablesContainerPortalArgs(destIP net.IP, addPhysicalInterfaceMatch bool, addDstLocalMatch bool, destPort int, protocol v1.Protocol, proxyIP net.IP, proxyPort int, service proxy.ServicePortName) []string { - args := iptablesCommonPortalArgs(destIP, addPhysicalInterfaceMatch, addDstLocalMatch, destPort, protocol, service) - - // This is tricky. - // - // If the proxy is bound (see Proxier.listenIP) to 0.0.0.0 ("any - // interface") we want to use REDIRECT, which sends traffic to the - // "primary address of the incoming interface" which means the container - // bridge, if there is one. When the response comes, it comes from that - // same interface, so the NAT matches and the response packet is - // correct. This matters for UDP, since there is no per-connection port - // number. - // - // The alternative would be to use DNAT, except that it doesn't work - // (empirically): - // * DNAT to 127.0.0.1 = Packets just disappear - this seems to be a - // well-known limitation of iptables. - // * DNAT to eth0's IP = Response packets come from the bridge, which - // breaks the NAT, and makes things like DNS not accept them. If - // this could be resolved, it would simplify all of this code. - // - // If the proxy is bound to a specific IP, then we have to use DNAT to - // that IP. Unlike the previous case, this works because the proxy is - // ONLY listening on that IP, not the bridge. - // - // Why would anyone bind to an address that is not inclusive of - // localhost? Apparently some cloud environments have their public IP - // exposed as a real network interface AND do not have firewalling. We - // don't want to expose everything out to the world. - // - // Unfortunately, I don't know of any way to listen on some (N > 1) - // interfaces but not ALL interfaces, short of doing it manually, and - // this is simpler than that. - // - // If the proxy is bound to localhost only, all of this is broken. Not - // allowed. - if proxyIP.Equal(zeroIPv4) || proxyIP.Equal(zeroIPv6) { - // TODO: Can we REDIRECT with IPv6? - args = append(args, "-j", "REDIRECT", "--to-ports", fmt.Sprintf("%d", proxyPort)) - } else { - // TODO: Can we DNAT with IPv6? - args = append(args, "-j", "DNAT", "--to-destination", net.JoinHostPort(proxyIP.String(), strconv.Itoa(proxyPort))) - } - return args -} - -// Build a slice of iptables args for a from-host portal rule. -func (proxier *Proxier) iptablesHostPortalArgs(destIP net.IP, addDstLocalMatch bool, destPort int, protocol v1.Protocol, proxyIP net.IP, proxyPort int, service proxy.ServicePortName) []string { - args := iptablesCommonPortalArgs(destIP, false, addDstLocalMatch, destPort, protocol, service) - - // This is tricky. - // - // If the proxy is bound (see Proxier.listenIP) to 0.0.0.0 ("any - // interface") we want to do the same as from-container traffic and use - // REDIRECT. Except that it doesn't work (empirically). REDIRECT on - // local packets sends the traffic to localhost (special case, but it is - // documented) but the response comes from the eth0 IP (not sure why, - // truthfully), which makes DNS unhappy. - // - // So we have to use DNAT. DNAT to 127.0.0.1 can't work for the same - // reason. - // - // So we do our best to find an interface that is not a loopback and - // DNAT to that. This works (again, empirically). - // - // If the proxy is bound to a specific IP, then we have to use DNAT to - // that IP. Unlike the previous case, this works because the proxy is - // ONLY listening on that IP, not the bridge. - // - // If the proxy is bound to localhost only, this should work, but we - // don't allow it for now. - if proxyIP.Equal(zeroIPv4) || proxyIP.Equal(zeroIPv6) { - proxyIP = proxier.hostIP - } - // TODO: Can we DNAT with IPv6? - args = append(args, "-j", "DNAT", "--to-destination", net.JoinHostPort(proxyIP.String(), strconv.Itoa(proxyPort))) - return args -} - -// Build a slice of iptables args for a from-host public-port rule. -// See iptablesHostPortalArgs -// TODO: Should we just reuse iptablesHostPortalArgs? -func (proxier *Proxier) iptablesHostNodePortArgs(nodePort int, protocol v1.Protocol, proxyIP net.IP, proxyPort int, service proxy.ServicePortName) []string { - args := iptablesCommonPortalArgs(nil, false, false, nodePort, protocol, service) - - if proxyIP.Equal(zeroIPv4) || proxyIP.Equal(zeroIPv6) { - proxyIP = proxier.hostIP - } - // TODO: Can we DNAT with IPv6? - args = append(args, "-j", "DNAT", "--to-destination", net.JoinHostPort(proxyIP.String(), strconv.Itoa(proxyPort))) - return args -} - -// Build a slice of iptables args for an from-non-local public-port rule. -func (proxier *Proxier) iptablesNonLocalNodePortArgs(nodePort int, protocol v1.Protocol, proxyIP net.IP, proxyPort int, service proxy.ServicePortName) []string { - args := iptablesCommonPortalArgs(nil, false, false, proxyPort, protocol, service) - args = append(args, "-m", "state", "--state", "NEW", "-j", "ACCEPT") - return args -} - -func isTooManyFDsError(err error) bool { - return strings.Contains(err.Error(), "too many open files") -} - -func isClosedError(err error) bool { - // A brief discussion about handling closed error here: - // https://code.google.com/p/go/issues/detail?id=4373#c14 - // TODO: maybe create a stoppable TCP listener that returns a StoppedError - return strings.HasSuffix(err.Error(), "use of closed network connection") -} diff --git a/pkg/proxy/userspace/proxier_test.go b/pkg/proxy/userspace/proxier_test.go deleted file mode 100644 index 9b9fddeeb1f..00000000000 --- a/pkg/proxy/userspace/proxier_test.go +++ /dev/null @@ -1,1024 +0,0 @@ -/* -Copyright 2014 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package userspace - -import ( - "fmt" - "io/ioutil" - "net" - "net/http" - "net/http/httptest" - "net/url" - "os" - "reflect" - "strconv" - "testing" - "time" - - v1 "k8s.io/api/core/v1" - discovery "k8s.io/api/discovery/v1" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/apimachinery/pkg/types" - "k8s.io/apimachinery/pkg/util/wait" - "k8s.io/kubernetes/pkg/proxy" - ipttest "k8s.io/kubernetes/pkg/util/iptables/testing" - "k8s.io/utils/exec" - fakeexec "k8s.io/utils/exec/testing" - netutils "k8s.io/utils/net" -) - -const ( - udpIdleTimeoutForTest = 250 * time.Millisecond -) - -func joinHostPort(host string, port int) string { - return net.JoinHostPort(host, fmt.Sprintf("%d", port)) -} - -func waitForClosedPortTCP(p *Proxier, proxyPort int) error { - for i := 0; i < 50; i++ { - conn, err := net.Dial("tcp", joinHostPort("", proxyPort)) - if err != nil { - return nil - } - conn.Close() - time.Sleep(1 * time.Millisecond) - } - return fmt.Errorf("port %d still open", proxyPort) -} - -func waitForClosedPortUDP(p *Proxier, proxyPort int) error { - for i := 0; i < 50; i++ { - conn, err := net.Dial("udp", joinHostPort("", proxyPort)) - if err != nil { - return nil - } - conn.SetReadDeadline(time.Now().Add(10 * time.Millisecond)) - // To detect a closed UDP port write, then read. - _, err = conn.Write([]byte("x")) - if err != nil { - if e, ok := err.(net.Error); ok && !e.Timeout() { - return nil - } - } - var buf [4]byte - _, err = conn.Read(buf[0:]) - if err != nil { - if e, ok := err.(net.Error); ok && !e.Timeout() { - return nil - } - } - conn.Close() - time.Sleep(1 * time.Millisecond) - } - return fmt.Errorf("port %d still open", proxyPort) -} - -func waitForProxyFinished(t *testing.T, svcInfo *ServiceInfo) { - if err := wait.PollImmediate(50*time.Millisecond, 30*time.Second, func() (bool, error) { - return svcInfo.IsFinished(), nil - }); err != nil { - t.Errorf("timed out waiting for proxy socket to finish: %v", err) - } -} - -func waitForServiceInfo(t *testing.T, p *Proxier, servicePortName proxy.ServicePortName, service *v1.Service) *ServiceInfo { - var svcInfo *ServiceInfo - var exists bool - wait.PollImmediate(50*time.Millisecond, 3*time.Second, func() (bool, error) { - svcInfo, exists = p.getServiceInfo(servicePortName) - return exists, nil - }) - if !exists { - t.Fatalf("can't find serviceInfo for %s", servicePortName) - } - if !svcInfo.IsAlive() { - t.Fatalf("expected IsAlive() true for %s", servicePortName) - } - - var servicePort *v1.ServicePort - for _, port := range service.Spec.Ports { - if port.Name == servicePortName.Port { - servicePort = &port - break - } - } - if servicePort == nil { - t.Errorf("failed to find service %s port with name %q", servicePortName.NamespacedName, servicePortName.Port) - } - if svcInfo.portal.ip.String() != service.Spec.ClusterIP || int32(svcInfo.portal.port) != servicePort.Port || svcInfo.protocol != servicePort.Protocol { - t.Errorf("unexpected serviceInfo for %s: %#v", servicePortName, svcInfo) - } - - // Wait for proxy socket to start up - if err := wait.PollImmediate(50*time.Millisecond, 30*time.Second, func() (bool, error) { - return svcInfo.IsStarted(), nil - }); err != nil { - t.Errorf("timed out waiting for proxy socket %s to start: %v", servicePortName, err) - } - - return svcInfo -} - -// addServiceAndWaitForInfoIndex adds the service to the proxy and waits for the -// named port to be ready -func addServiceAndWaitForInfo(t *testing.T, p *Proxier, servicePortName proxy.ServicePortName, service *v1.Service) *ServiceInfo { - p.OnServiceAdd(service) - return waitForServiceInfo(t, p, servicePortName, service) -} - -// deleteServiceAndWait deletes the servicein the proxy and waits until it -// has been cleaned up. waitFunc will be called to wait for the service -// port's socket to close. -func deleteServiceAndWait(t *testing.T, p *Proxier, svcInfo *ServiceInfo, service *v1.Service, waitFunc func(*Proxier, int) error) { - p.OnServiceDelete(service) - // Wait for the port to really close. - if err := waitFunc(p, svcInfo.proxyPort); err != nil { - t.Fatalf(err.Error()) - } - waitForProxyFinished(t, svcInfo) - if svcInfo.IsAlive() { - t.Fatalf("wrong value for IsAlive(): expected false") - } -} - -// udpEchoServer is a simple echo server in UDP, intended for testing the proxy. -type udpEchoServer struct { - net.PacketConn -} - -func newUDPEchoServer() (*udpEchoServer, error) { - packetconn, err := net.ListenPacket("udp", ":0") - if err != nil { - return nil, err - } - return &udpEchoServer{packetconn}, nil -} - -func (r *udpEchoServer) Loop() { - var buffer [4096]byte - for { - n, cliAddr, err := r.ReadFrom(buffer[0:]) - if err != nil { - fmt.Printf("ReadFrom failed: %v\n", err) - continue - } - r.WriteTo(buffer[0:n], cliAddr) - } -} - -var tcpServerPort int32 -var udpServerPort int32 - -func TestMain(m *testing.M) { - // TCP setup. - tcp := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { - w.WriteHeader(http.StatusOK) - w.Write([]byte(r.URL.Path[1:])) - })) - defer tcp.Close() - - u, err := url.Parse(tcp.URL) - if err != nil { - panic(fmt.Sprintf("failed to parse: %v", err)) - } - _, port, err := net.SplitHostPort(u.Host) - if err != nil { - panic(fmt.Sprintf("failed to parse: %v", err)) - } - tcpServerPortValue, err := strconv.Atoi(port) - if err != nil { - panic(fmt.Sprintf("failed to atoi(%s): %v", port, err)) - } - tcpServerPort = int32(tcpServerPortValue) - - // UDP setup. - udp, err := newUDPEchoServer() - if err != nil { - panic(fmt.Sprintf("failed to make a UDP server: %v", err)) - } - _, port, err = net.SplitHostPort(udp.LocalAddr().String()) - if err != nil { - panic(fmt.Sprintf("failed to parse: %v", err)) - } - udpServerPortValue, err := strconv.Atoi(port) - if err != nil { - panic(fmt.Sprintf("failed to atoi(%s): %v", port, err)) - } - udpServerPort = int32(udpServerPortValue) - go udp.Loop() - - ret := m.Run() - // it should be safe to call Close() multiple times. - tcp.Close() - os.Exit(ret) -} - -func testEchoTCP(t *testing.T, address string, port int) { - path := "aaaaa" - res, err := http.Get("http://" + address + ":" + fmt.Sprintf("%d", port) + "/" + path) - if err != nil { - t.Fatalf("error connecting to server: %v", err) - } - defer res.Body.Close() - data, err := ioutil.ReadAll(res.Body) - if err != nil { - t.Errorf("error reading data: %v %v", err, string(data)) - } - if string(data) != path { - t.Errorf("expected: %s, got %s", path, string(data)) - } -} - -func testEchoUDP(t *testing.T, address string, port int) { - data := "abc123" - - conn, err := net.Dial("udp", joinHostPort(address, port)) - if err != nil { - t.Fatalf("error connecting to server: %v", err) - } - if _, err := conn.Write([]byte(data)); err != nil { - t.Fatalf("error sending to server: %v", err) - } - var resp [1024]byte - n, err := conn.Read(resp[0:]) - if err != nil { - t.Errorf("error receiving data: %v", err) - } - if string(resp[0:n]) != data { - t.Errorf("expected: %s, got %s", data, string(resp[0:n])) - } -} - -func waitForNumProxyClients(t *testing.T, s *ServiceInfo, want int, timeout time.Duration) { - var got int - now := time.Now() - deadline := now.Add(timeout) - for time.Now().Before(deadline) { - s.ActiveClients.Mu.Lock() - got = len(s.ActiveClients.Clients) - s.ActiveClients.Mu.Unlock() - if got == want { - return - } - time.Sleep(500 * time.Millisecond) - } - t.Errorf("expected %d ProxyClients live, got %d", want, got) -} - -func startProxier(p *Proxier, t *testing.T) { - go func() { - p.SyncLoop() - }() - p.OnServiceSynced() - p.OnEndpointsSynced() -} - -func newServiceObject(namespace, name, clusterIP string, ports []v1.ServicePort) (*v1.Service, []proxy.ServicePortName) { - service := &v1.Service{ - ObjectMeta: metav1.ObjectMeta{Namespace: namespace, Name: name}, - Spec: v1.ServiceSpec{ - ClusterIP: clusterIP, - Ports: ports, - }, - } - - servicePorts := make([]proxy.ServicePortName, len(ports)) - for i, port := range ports { - servicePorts[i] = proxy.ServicePortName{ - NamespacedName: types.NamespacedName{ - Namespace: namespace, - Name: name, - }, - Port: port.Name, - } - } - - return service, servicePorts -} - -func TestTCPProxy(t *testing.T) { - service, ports := newServiceObject("testnamespace", "echo", "1.2.3.4", []v1.ServicePort{{Name: "p", Port: 80, Protocol: "TCP"}}) - - lb := NewLoadBalancerRR() - lb.OnEndpointsAdd(&v1.Endpoints{ - ObjectMeta: service.ObjectMeta, - Subsets: []v1.EndpointSubset{{ - Addresses: []v1.EndpointAddress{{IP: "127.0.0.1"}}, - Ports: []v1.EndpointPort{{Name: ports[0].Port, Port: tcpServerPort}}, - }}, - }) - - fexec := makeFakeExec() - - p, err := createProxier(lb, netutils.ParseIPSloppy("0.0.0.0"), ipttest.NewFake(), fexec, netutils.ParseIPSloppy("127.0.0.1"), nil, time.Minute, time.Second, udpIdleTimeoutForTest, newProxySocket) - if err != nil { - t.Fatal(err) - } - startProxier(p, t) - defer p.shutdown() - - svcInfo := addServiceAndWaitForInfo(t, p, ports[0], service) - testEchoTCP(t, "127.0.0.1", svcInfo.proxyPort) -} - -func TestUDPProxy(t *testing.T) { - service, ports := newServiceObject("testnamespace", "echo", "1.2.3.4", []v1.ServicePort{{Name: "p", Port: 80, Protocol: "UDP"}}) - - lb := NewLoadBalancerRR() - lb.OnEndpointsAdd(&v1.Endpoints{ - ObjectMeta: service.ObjectMeta, - Subsets: []v1.EndpointSubset{{ - Addresses: []v1.EndpointAddress{{IP: "127.0.0.1"}}, - Ports: []v1.EndpointPort{{Name: ports[0].Port, Port: udpServerPort}}, - }}, - }) - - fexec := makeFakeExec() - - p, err := createProxier(lb, netutils.ParseIPSloppy("0.0.0.0"), ipttest.NewFake(), fexec, netutils.ParseIPSloppy("127.0.0.1"), nil, time.Minute, time.Second, udpIdleTimeoutForTest, newProxySocket) - if err != nil { - t.Fatal(err) - } - startProxier(p, t) - defer p.shutdown() - - svcInfo := addServiceAndWaitForInfo(t, p, ports[0], service) - testEchoUDP(t, "127.0.0.1", svcInfo.proxyPort) -} - -func TestUDPProxyTimeout(t *testing.T) { - service, ports := newServiceObject("testnamespace", "echo", "1.2.3.4", []v1.ServicePort{{Name: "p", Port: 80, Protocol: "UDP"}}) - - lb := NewLoadBalancerRR() - lb.OnEndpointsAdd(&v1.Endpoints{ - ObjectMeta: service.ObjectMeta, - Subsets: []v1.EndpointSubset{{ - Addresses: []v1.EndpointAddress{{IP: "127.0.0.1"}}, - Ports: []v1.EndpointPort{{Name: ports[0].Port, Port: udpServerPort}}, - }}, - }) - - fexec := makeFakeExec() - - p, err := createProxier(lb, netutils.ParseIPSloppy("0.0.0.0"), ipttest.NewFake(), fexec, netutils.ParseIPSloppy("127.0.0.1"), nil, time.Minute, time.Second, udpIdleTimeoutForTest, newProxySocket) - if err != nil { - t.Fatal(err) - } - startProxier(p, t) - defer p.shutdown() - - svcInfo := addServiceAndWaitForInfo(t, p, ports[0], service) - testEchoUDP(t, "127.0.0.1", svcInfo.proxyPort) - // When connecting to a UDP service endpoint, there should be a Conn for proxy. - waitForNumProxyClients(t, svcInfo, 1, time.Second) - // If conn has no activity for serviceInfo.timeout since last Read/Write, it should be closed because of timeout. - waitForNumProxyClients(t, svcInfo, 0, 2*time.Second) -} - -func TestMultiPortProxy(t *testing.T) { - service, ports := newServiceObject("testnamespace", "echo", "1.2.3.4", []v1.ServicePort{ - {Name: "p", Port: 80, Protocol: "TCP"}, - {Name: "q", Port: 80, Protocol: "UDP"}, - }) - - lb := NewLoadBalancerRR() - lb.OnEndpointsAdd(&v1.Endpoints{ - ObjectMeta: service.ObjectMeta, - Subsets: []v1.EndpointSubset{{ - Addresses: []v1.EndpointAddress{{IP: "127.0.0.1"}}, - Ports: []v1.EndpointPort{{Name: ports[0].Port, Protocol: service.Spec.Ports[0].Protocol, Port: tcpServerPort}}, - }}, - }) - lb.OnEndpointsAdd(&v1.Endpoints{ - ObjectMeta: service.ObjectMeta, - Subsets: []v1.EndpointSubset{{ - Addresses: []v1.EndpointAddress{{IP: "127.0.0.1"}}, - Ports: []v1.EndpointPort{{Name: ports[1].Port, Protocol: service.Spec.Ports[1].Protocol, Port: udpServerPort}}, - }}, - }) - - fexec := makeFakeExec() - - p, err := createProxier(lb, netutils.ParseIPSloppy("0.0.0.0"), ipttest.NewFake(), fexec, netutils.ParseIPSloppy("127.0.0.1"), nil, time.Minute, time.Second, udpIdleTimeoutForTest, newProxySocket) - if err != nil { - t.Fatal(err) - } - startProxier(p, t) - defer p.shutdown() - - svcInfo := addServiceAndWaitForInfo(t, p, ports[0], service) - testEchoTCP(t, "127.0.0.1", svcInfo.proxyPort) - - svcInfo = waitForServiceInfo(t, p, ports[1], service) - testEchoUDP(t, "127.0.0.1", svcInfo.proxyPort) -} - -func TestMultiPortOnServiceAdd(t *testing.T) { - service, ports := newServiceObject("testnamespace", "echo", "1.2.3.4", []v1.ServicePort{ - {Name: "p", Port: 80, Protocol: "TCP"}, - {Name: "q", Port: 81, Protocol: "UDP"}, - }) - - lb := NewLoadBalancerRR() - fexec := makeFakeExec() - - p, err := createProxier(lb, netutils.ParseIPSloppy("0.0.0.0"), ipttest.NewFake(), fexec, netutils.ParseIPSloppy("127.0.0.1"), nil, time.Minute, time.Second, udpIdleTimeoutForTest, newProxySocket) - if err != nil { - t.Fatal(err) - } - startProxier(p, t) - defer p.shutdown() - - // ports p and q should exist - _ = addServiceAndWaitForInfo(t, p, ports[0], service) - _ = waitForServiceInfo(t, p, ports[1], service) - - // non-existent port x should not exist - serviceX := proxy.ServicePortName{NamespacedName: ports[0].NamespacedName, Port: "x"} - svcInfo, exists := p.getServiceInfo(serviceX) - if exists { - t.Fatalf("found unwanted serviceInfo for %s: %#v", serviceX, svcInfo) - } -} - -func TestTCPProxyStop(t *testing.T) { - service, ports := newServiceObject("testnamespace", "echo", "1.2.3.4", []v1.ServicePort{{Name: "p", Port: 80, Protocol: "TCP"}}) - - lb := NewLoadBalancerRR() - lb.OnEndpointsAdd(&v1.Endpoints{ - ObjectMeta: service.ObjectMeta, - Subsets: []v1.EndpointSubset{{ - Addresses: []v1.EndpointAddress{{IP: "127.0.0.1"}}, - Ports: []v1.EndpointPort{{Name: ports[0].Port, Port: tcpServerPort}}, - }}, - }) - - fexec := makeFakeExec() - - p, err := createProxier(lb, netutils.ParseIPSloppy("0.0.0.0"), ipttest.NewFake(), fexec, netutils.ParseIPSloppy("127.0.0.1"), nil, time.Minute, time.Second, udpIdleTimeoutForTest, newProxySocket) - if err != nil { - t.Fatal(err) - } - startProxier(p, t) - defer p.shutdown() - - svcInfo := addServiceAndWaitForInfo(t, p, ports[0], service) - conn, err := net.Dial("tcp", joinHostPort("", svcInfo.proxyPort)) - if err != nil { - t.Fatalf("error connecting to proxy: %v", err) - } - conn.Close() - - // Wait for the port to really close. - deleteServiceAndWait(t, p, svcInfo, service, waitForClosedPortTCP) -} - -func TestUDPProxyStop(t *testing.T) { - service, ports := newServiceObject("testnamespace", "echo", "1.2.3.4", []v1.ServicePort{{Name: "p", Port: 80, Protocol: "UDP"}}) - - lb := NewLoadBalancerRR() - lb.OnEndpointsAdd(&v1.Endpoints{ - ObjectMeta: service.ObjectMeta, - Subsets: []v1.EndpointSubset{{ - Addresses: []v1.EndpointAddress{{IP: "127.0.0.1"}}, - Ports: []v1.EndpointPort{{Name: ports[0].Port, Port: udpServerPort}}, - }}, - }) - - fexec := makeFakeExec() - - p, err := createProxier(lb, netutils.ParseIPSloppy("0.0.0.0"), ipttest.NewFake(), fexec, netutils.ParseIPSloppy("127.0.0.1"), nil, time.Minute, time.Second, udpIdleTimeoutForTest, newProxySocket) - if err != nil { - t.Fatal(err) - } - startProxier(p, t) - defer p.shutdown() - - svcInfo := addServiceAndWaitForInfo(t, p, ports[0], service) - conn, err := net.Dial("udp", joinHostPort("", svcInfo.proxyPort)) - if err != nil { - t.Fatalf("error connecting to proxy: %v", err) - } - conn.Close() - - // Wait for the port to really close. - deleteServiceAndWait(t, p, svcInfo, service, waitForClosedPortUDP) -} - -func TestTCPProxyUpdateDelete(t *testing.T) { - service, ports := newServiceObject("testnamespace", "echo", "1.2.3.4", []v1.ServicePort{{Name: "p", Port: 9997, Protocol: "TCP"}}) - - lb := NewLoadBalancerRR() - lb.OnEndpointsAdd(&v1.Endpoints{ - ObjectMeta: service.ObjectMeta, - Subsets: []v1.EndpointSubset{{ - Addresses: []v1.EndpointAddress{{IP: "127.0.0.1"}}, - Ports: []v1.EndpointPort{{Name: ports[0].Port, Port: tcpServerPort}}, - }}, - }) - - fexec := makeFakeExec() - - p, err := createProxier(lb, netutils.ParseIPSloppy("0.0.0.0"), ipttest.NewFake(), fexec, netutils.ParseIPSloppy("127.0.0.1"), nil, time.Minute, time.Second, udpIdleTimeoutForTest, newProxySocket) - if err != nil { - t.Fatal(err) - } - startProxier(p, t) - defer p.shutdown() - - svcInfo := addServiceAndWaitForInfo(t, p, ports[0], service) - conn, err := net.Dial("tcp", joinHostPort("", svcInfo.proxyPort)) - if err != nil { - t.Fatalf("error connecting to proxy: %v", err) - } - conn.Close() - - // Wait for the port to really close. - deleteServiceAndWait(t, p, svcInfo, service, waitForClosedPortTCP) -} - -func TestUDPProxyUpdateDelete(t *testing.T) { - service, ports := newServiceObject("testnamespace", "echo", "1.2.3.4", []v1.ServicePort{{Name: "p", Port: 9997, Protocol: "UDP"}}) - - lb := NewLoadBalancerRR() - lb.OnEndpointsAdd(&v1.Endpoints{ - ObjectMeta: service.ObjectMeta, - Subsets: []v1.EndpointSubset{{ - Addresses: []v1.EndpointAddress{{IP: "127.0.0.1"}}, - Ports: []v1.EndpointPort{{Name: ports[0].Port, Port: udpServerPort}}, - }}, - }) - - fexec := makeFakeExec() - - p, err := createProxier(lb, netutils.ParseIPSloppy("0.0.0.0"), ipttest.NewFake(), fexec, netutils.ParseIPSloppy("127.0.0.1"), nil, time.Minute, time.Second, udpIdleTimeoutForTest, newProxySocket) - if err != nil { - t.Fatal(err) - } - startProxier(p, t) - defer p.shutdown() - - svcInfo := addServiceAndWaitForInfo(t, p, ports[0], service) - conn, err := net.Dial("udp", joinHostPort("", svcInfo.proxyPort)) - if err != nil { - t.Fatalf("error connecting to proxy: %v", err) - } - conn.Close() - - // Wait for the port to really close. - deleteServiceAndWait(t, p, svcInfo, service, waitForClosedPortUDP) -} - -func TestTCPProxyUpdateDeleteUpdate(t *testing.T) { - service, ports := newServiceObject("testnamespace", "echo", "1.2.3.4", []v1.ServicePort{{Name: "p", Port: 9997, Protocol: "TCP"}}) - - lb := NewLoadBalancerRR() - endpoint := &v1.Endpoints{ - ObjectMeta: service.ObjectMeta, - Subsets: []v1.EndpointSubset{{ - Addresses: []v1.EndpointAddress{{IP: "127.0.0.1"}}, - Ports: []v1.EndpointPort{{Name: ports[0].Port, Port: tcpServerPort}}, - }}, - } - lb.OnEndpointsAdd(endpoint) - - fexec := makeFakeExec() - - p, err := createProxier(lb, netutils.ParseIPSloppy("0.0.0.0"), ipttest.NewFake(), fexec, netutils.ParseIPSloppy("127.0.0.1"), nil, time.Minute, time.Second, udpIdleTimeoutForTest, newProxySocket) - if err != nil { - t.Fatal(err) - } - startProxier(p, t) - defer p.shutdown() - - svcInfo := addServiceAndWaitForInfo(t, p, ports[0], service) - conn, err := net.Dial("tcp", joinHostPort("", svcInfo.proxyPort)) - if err != nil { - t.Fatalf("error connecting to proxy: %v", err) - } - conn.Close() - - // Wait for the port to really close. - deleteServiceAndWait(t, p, svcInfo, service, waitForClosedPortTCP) - - // need to add endpoint here because it got clean up during service delete - lb.OnEndpointsAdd(endpoint) - svcInfo = addServiceAndWaitForInfo(t, p, ports[0], service) - testEchoTCP(t, "127.0.0.1", svcInfo.proxyPort) -} - -func TestUDPProxyUpdateDeleteUpdate(t *testing.T) { - service, ports := newServiceObject("testnamespace", "echo", "1.2.3.4", []v1.ServicePort{{Name: "p", Port: 9997, Protocol: "UDP"}}) - - lb := NewLoadBalancerRR() - endpoint := &v1.Endpoints{ - ObjectMeta: service.ObjectMeta, - Subsets: []v1.EndpointSubset{{ - Addresses: []v1.EndpointAddress{{IP: "127.0.0.1"}}, - Ports: []v1.EndpointPort{{Name: ports[0].Port, Port: udpServerPort}}, - }}, - } - lb.OnEndpointsAdd(endpoint) - - fexec := makeFakeExec() - - p, err := createProxier(lb, netutils.ParseIPSloppy("0.0.0.0"), ipttest.NewFake(), fexec, netutils.ParseIPSloppy("127.0.0.1"), nil, time.Minute, time.Second, udpIdleTimeoutForTest, newProxySocket) - if err != nil { - t.Fatal(err) - } - startProxier(p, t) - defer p.shutdown() - - svcInfo := addServiceAndWaitForInfo(t, p, ports[0], service) - conn, err := net.Dial("udp", joinHostPort("", svcInfo.proxyPort)) - if err != nil { - t.Fatalf("error connecting to proxy: %v", err) - } - conn.Close() - - // Wait for the port to really close. - deleteServiceAndWait(t, p, svcInfo, service, waitForClosedPortUDP) - - // need to add endpoint here because it got clean up during service delete - lb.OnEndpointsAdd(endpoint) - svcInfo = addServiceAndWaitForInfo(t, p, ports[0], service) - testEchoUDP(t, "127.0.0.1", svcInfo.proxyPort) -} - -func TestTCPProxyUpdatePort(t *testing.T) { - origPort := int32(99) - service, ports := newServiceObject("testnamespace", "echo", "1.2.3.4", []v1.ServicePort{{Name: "p", Port: origPort, Protocol: "TCP"}}) - - lb := NewLoadBalancerRR() - lb.OnEndpointsAdd(&v1.Endpoints{ - ObjectMeta: service.ObjectMeta, - Subsets: []v1.EndpointSubset{{ - Addresses: []v1.EndpointAddress{{IP: "127.0.0.1"}}, - Ports: []v1.EndpointPort{{Name: ports[0].Port, Port: tcpServerPort}}, - }}, - }) - - fexec := makeFakeExec() - - p, err := createProxier(lb, netutils.ParseIPSloppy("0.0.0.0"), ipttest.NewFake(), fexec, netutils.ParseIPSloppy("127.0.0.1"), nil, time.Minute, time.Second, udpIdleTimeoutForTest, newProxySocket) - if err != nil { - t.Fatal(err) - } - startProxier(p, t) - defer p.shutdown() - - svcInfo := addServiceAndWaitForInfo(t, p, ports[0], service) - testEchoTCP(t, "127.0.0.1", svcInfo.proxyPort) - - newService := service.DeepCopy() - newService.Spec.Ports[0].Port = 100 - p.OnServiceUpdate(service, newService) - // Wait for the socket to actually get free. - if err := waitForClosedPortTCP(p, int(origPort)); err != nil { - t.Fatalf(err.Error()) - } - waitForProxyFinished(t, svcInfo) - - svcInfo = waitForServiceInfo(t, p, ports[0], newService) - testEchoTCP(t, "127.0.0.1", svcInfo.proxyPort) -} - -func TestUDPProxyUpdatePort(t *testing.T) { - origPort := int32(99) - service, ports := newServiceObject("testnamespace", "echo", "1.2.3.4", []v1.ServicePort{{Name: "p", Port: origPort, Protocol: "UDP"}}) - - lb := NewLoadBalancerRR() - lb.OnEndpointsAdd(&v1.Endpoints{ - ObjectMeta: service.ObjectMeta, - Subsets: []v1.EndpointSubset{{ - Addresses: []v1.EndpointAddress{{IP: "127.0.0.1"}}, - Ports: []v1.EndpointPort{{Name: ports[0].Port, Port: udpServerPort}}, - }}, - }) - - fexec := makeFakeExec() - - p, err := createProxier(lb, netutils.ParseIPSloppy("0.0.0.0"), ipttest.NewFake(), fexec, netutils.ParseIPSloppy("127.0.0.1"), nil, time.Minute, time.Second, udpIdleTimeoutForTest, newProxySocket) - if err != nil { - t.Fatal(err) - } - startProxier(p, t) - defer p.shutdown() - - svcInfo := addServiceAndWaitForInfo(t, p, ports[0], service) - testEchoUDP(t, "127.0.0.1", svcInfo.proxyPort) - - newService := service.DeepCopy() - newService.Spec.Ports[0].Port = 100 - p.OnServiceUpdate(service, newService) - // Wait for the socket to actually get free. - if err := waitForClosedPortUDP(p, int(origPort)); err != nil { - t.Fatalf(err.Error()) - } - waitForProxyFinished(t, svcInfo) - - svcInfo = waitForServiceInfo(t, p, ports[0], newService) - testEchoUDP(t, "127.0.0.1", svcInfo.proxyPort) -} - -func TestProxyUpdatePublicIPs(t *testing.T) { - origPort := int32(9997) - service, ports := newServiceObject("testnamespace", "echo", "1.2.3.4", []v1.ServicePort{{Name: "p", Port: origPort, Protocol: "TCP"}}) - - lb := NewLoadBalancerRR() - lb.OnEndpointsAdd(&v1.Endpoints{ - ObjectMeta: service.ObjectMeta, - Subsets: []v1.EndpointSubset{{ - Addresses: []v1.EndpointAddress{{IP: "127.0.0.1"}}, - Ports: []v1.EndpointPort{{Name: ports[0].Port, Port: tcpServerPort}}, - }}, - }) - - fexec := makeFakeExec() - - p, err := createProxier(lb, netutils.ParseIPSloppy("0.0.0.0"), ipttest.NewFake(), fexec, netutils.ParseIPSloppy("127.0.0.1"), nil, time.Minute, time.Second, udpIdleTimeoutForTest, newProxySocket) - if err != nil { - t.Fatal(err) - } - startProxier(p, t) - defer p.shutdown() - - svcInfo := addServiceAndWaitForInfo(t, p, ports[0], service) - testEchoTCP(t, "127.0.0.1", svcInfo.proxyPort) - - newService := service.DeepCopy() - newService.Spec.ExternalIPs = []string{"4.3.2.1"} - p.OnServiceUpdate(service, newService) - - // Wait for the socket to actually get free. - if err := waitForClosedPortTCP(p, int(origPort)); err != nil { - t.Fatalf(err.Error()) - } - waitForProxyFinished(t, svcInfo) - - svcInfo = waitForServiceInfo(t, p, ports[0], newService) - testEchoTCP(t, "127.0.0.1", svcInfo.proxyPort) -} - -func TestProxyUpdatePortal(t *testing.T) { - svcv0, ports := newServiceObject("testnamespace", "echo", "1.2.3.4", []v1.ServicePort{{Name: "p", Port: 9997, Protocol: "TCP"}}) - - lb := NewLoadBalancerRR() - endpoint := &v1.Endpoints{ - ObjectMeta: svcv0.ObjectMeta, - Subsets: []v1.EndpointSubset{{ - Addresses: []v1.EndpointAddress{{IP: "127.0.0.1"}}, - Ports: []v1.EndpointPort{{Name: ports[0].Port, Port: tcpServerPort}}, - }}, - } - lb.OnEndpointsAdd(endpoint) - - fexec := makeFakeExec() - - p, err := createProxier(lb, netutils.ParseIPSloppy("0.0.0.0"), ipttest.NewFake(), fexec, netutils.ParseIPSloppy("127.0.0.1"), nil, time.Minute, time.Second, udpIdleTimeoutForTest, newProxySocket) - if err != nil { - t.Fatal(err) - } - startProxier(p, t) - defer p.shutdown() - - svcInfo := addServiceAndWaitForInfo(t, p, ports[0], svcv0) - testEchoTCP(t, "127.0.0.1", svcInfo.proxyPort) - - svcv1 := svcv0.DeepCopy() - svcv1.Spec.ClusterIP = "" - p.OnServiceUpdate(svcv0, svcv1) - - // Wait for the service to be removed because it had an empty ClusterIP - var exists bool - for i := 0; i < 50; i++ { - _, exists = p.getServiceInfo(ports[0]) - if !exists { - break - } - time.Sleep(50 * time.Millisecond) - } - if exists { - t.Fatalf("service with empty ClusterIP should not be included in the proxy") - } - waitForProxyFinished(t, svcInfo) - - svcv2 := svcv0.DeepCopy() - svcv2.Spec.ClusterIP = "None" - p.OnServiceUpdate(svcv1, svcv2) - _, exists = p.getServiceInfo(ports[0]) - if exists { - t.Fatalf("service with 'None' as ClusterIP should not be included in the proxy") - } - - // Set the ClusterIP again and make sure the proxy opens the port - lb.OnEndpointsAdd(endpoint) - p.OnServiceUpdate(svcv2, svcv0) - svcInfo = waitForServiceInfo(t, p, ports[0], svcv0) - testEchoTCP(t, "127.0.0.1", svcInfo.proxyPort) -} - -type fakeRunner struct{} - -// assert fakeAsyncRunner is a ProxyProvider -var _ asyncRunnerInterface = &fakeRunner{} - -func (f fakeRunner) Run() { -} - -func (f fakeRunner) Loop(stop <-chan struct{}) { -} - -func TestOnServiceAddChangeMap(t *testing.T) { - fexec := makeFakeExec() - - // Use long minSyncPeriod so we can test that immediate syncs work - p, err := createProxier(NewLoadBalancerRR(), netutils.ParseIPSloppy("0.0.0.0"), ipttest.NewFake(), fexec, netutils.ParseIPSloppy("127.0.0.1"), nil, time.Minute, time.Minute, udpIdleTimeoutForTest, newProxySocket) - if err != nil { - t.Fatal(err) - } - - // Fake out sync runner - p.syncRunner = fakeRunner{} - - serviceMeta := metav1.ObjectMeta{Namespace: "testnamespace", Name: "testname"} - service := &v1.Service{ - ObjectMeta: serviceMeta, - Spec: v1.ServiceSpec{ClusterIP: "1.2.3.4", Ports: []v1.ServicePort{{ - Name: "p", - Port: 99, - Protocol: "TCP", - }}}, - } - - serviceUpdate := &v1.Service{ - ObjectMeta: serviceMeta, - Spec: v1.ServiceSpec{ClusterIP: "1.2.3.5", Ports: []v1.ServicePort{{ - Name: "p", - Port: 100, - Protocol: "TCP", - }}}, - } - - serviceUpdate2 := &v1.Service{ - ObjectMeta: serviceMeta, - Spec: v1.ServiceSpec{ClusterIP: "1.2.3.6", Ports: []v1.ServicePort{{ - Name: "p", - Port: 101, - Protocol: "TCP", - }}}, - } - - type onServiceTest struct { - detail string - changes []serviceChange - expectedChange *serviceChange - } - - tests := []onServiceTest{ - { - detail: "add", - changes: []serviceChange{ - {current: service}, - }, - expectedChange: &serviceChange{ - current: service, - }, - }, - { - detail: "add+update=add", - changes: []serviceChange{ - {current: service}, - { - previous: service, - current: serviceUpdate, - }, - }, - expectedChange: &serviceChange{ - current: serviceUpdate, - }, - }, - { - detail: "add+del=none", - changes: []serviceChange{ - {current: service}, - {previous: service}, - }, - }, - { - detail: "update+update=update", - changes: []serviceChange{ - { - previous: service, - current: serviceUpdate, - }, - { - previous: serviceUpdate, - current: serviceUpdate2, - }, - }, - expectedChange: &serviceChange{ - previous: service, - current: serviceUpdate2, - }, - }, - { - detail: "update+del=del", - changes: []serviceChange{ - { - previous: service, - current: serviceUpdate, - }, - {previous: serviceUpdate}, - }, - // change collapsing always keeps the oldest service - // info since correct unmerging depends on the least - // recent update, not the most current. - expectedChange: &serviceChange{ - previous: service, - }, - }, - { - detail: "del+add=update", - changes: []serviceChange{ - {previous: service}, - {current: serviceUpdate}, - }, - expectedChange: &serviceChange{ - previous: service, - current: serviceUpdate, - }, - }, - } - - for _, test := range tests { - for _, change := range test.changes { - p.serviceChange(change.previous, change.current, test.detail) - } - - if test.expectedChange != nil { - if len(p.serviceChanges) != 1 { - t.Fatalf("[%s] expected 1 service change but found %d", test.detail, len(p.serviceChanges)) - } - expectedService := test.expectedChange.current - if expectedService == nil { - expectedService = test.expectedChange.previous - } - svcName := types.NamespacedName{Namespace: expectedService.Namespace, Name: expectedService.Name} - - change, ok := p.serviceChanges[svcName] - if !ok { - t.Fatalf("[%s] did not find service change for %v", test.detail, svcName) - } - if !reflect.DeepEqual(change.previous, test.expectedChange.previous) { - t.Fatalf("[%s] change previous service and expected previous service don't match\nchange: %+v\nexp: %+v", test.detail, change.previous, test.expectedChange.previous) - } - if !reflect.DeepEqual(change.current, test.expectedChange.current) { - t.Fatalf("[%s] change current service and expected current service don't match\nchange: %+v\nexp: %+v", test.detail, change.current, test.expectedChange.current) - } - } else { - if len(p.serviceChanges) != 0 { - t.Fatalf("[%s] expected no service changes but found %d", test.detail, len(p.serviceChanges)) - } - } - } -} - -func TestNoopEndpointSlice(t *testing.T) { - p := Proxier{} - p.OnEndpointSliceAdd(&discovery.EndpointSlice{}) - p.OnEndpointSliceUpdate(&discovery.EndpointSlice{}, &discovery.EndpointSlice{}) - p.OnEndpointSliceDelete(&discovery.EndpointSlice{}) - p.OnEndpointSlicesSynced() -} - -func makeFakeExec() *fakeexec.FakeExec { - fcmd := fakeexec.FakeCmd{ - CombinedOutputScript: []fakeexec.FakeAction{ - func() ([]byte, []byte, error) { return []byte("1 flow entries have been deleted"), nil, nil }, - }, - } - return &fakeexec.FakeExec{ - CommandScript: []fakeexec.FakeCommandAction{ - func(cmd string, args ...string) exec.Cmd { return fakeexec.InitFakeCmd(&fcmd, cmd, args...) }, - }, - LookPathFunc: func(cmd string) (string, error) { return cmd, nil }, - } -} - -// TODO(justinsb): Add test for nodePort conflict detection, once we have nodePort wired in diff --git a/pkg/proxy/userspace/proxysocket.go b/pkg/proxy/userspace/proxysocket.go deleted file mode 100644 index e8a89b8e34f..00000000000 --- a/pkg/proxy/userspace/proxysocket.go +++ /dev/null @@ -1,304 +0,0 @@ -/* -Copyright 2015 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package userspace - -import ( - "fmt" - "io" - "net" - "strconv" - "strings" - "sync" - "time" - - "k8s.io/api/core/v1" - "k8s.io/apimachinery/pkg/util/runtime" - "k8s.io/klog/v2" - "k8s.io/kubernetes/pkg/proxy" -) - -// Abstraction over TCP/UDP sockets which are proxied. -type ProxySocket interface { - // Addr gets the net.Addr for a ProxySocket. - Addr() net.Addr - // Close stops the ProxySocket from accepting incoming connections. - // Each implementation should comment on the impact of calling Close - // while sessions are active. - Close() error - // ProxyLoop proxies incoming connections for the specified service to the service endpoints. - ProxyLoop(service proxy.ServicePortName, info *ServiceInfo, loadBalancer LoadBalancer) - // ListenPort returns the host port that the ProxySocket is listening on - ListenPort() int -} - -func newProxySocket(protocol v1.Protocol, ip net.IP, port int) (ProxySocket, error) { - host := "" - if ip != nil { - host = ip.String() - } - - switch strings.ToUpper(string(protocol)) { - case "TCP": - listener, err := net.Listen("tcp", net.JoinHostPort(host, strconv.Itoa(port))) - if err != nil { - return nil, err - } - return &tcpProxySocket{Listener: listener, port: port}, nil - case "UDP": - addr, err := net.ResolveUDPAddr("udp", net.JoinHostPort(host, strconv.Itoa(port))) - if err != nil { - return nil, err - } - conn, err := net.ListenUDP("udp", addr) - if err != nil { - return nil, err - } - return &udpProxySocket{UDPConn: conn, port: port}, nil - case "SCTP": - return nil, fmt.Errorf("SCTP is not supported for user space proxy") - } - return nil, fmt.Errorf("unknown protocol %q", protocol) -} - -// How long we wait for a connection to a backend in seconds -var EndpointDialTimeouts = []time.Duration{250 * time.Millisecond, 500 * time.Millisecond, 1 * time.Second, 2 * time.Second} - -// tcpProxySocket implements ProxySocket. Close() is implemented by net.Listener. When Close() is called, -// no new connections are allowed but existing connections are left untouched. -type tcpProxySocket struct { - net.Listener - port int -} - -func (tcp *tcpProxySocket) ListenPort() int { - return tcp.port -} - -// TryConnectEndpoints attempts to connect to the next available endpoint for the given service, cycling -// through until it is able to successfully connect, or it has tried with all timeouts in EndpointDialTimeouts. -func TryConnectEndpoints(service proxy.ServicePortName, srcAddr net.Addr, protocol string, loadBalancer LoadBalancer) (out net.Conn, err error) { - sessionAffinityReset := false - for _, dialTimeout := range EndpointDialTimeouts { - endpoint, err := loadBalancer.NextEndpoint(service, srcAddr, sessionAffinityReset) - if err != nil { - klog.ErrorS(err, "Couldn't find an endpoint for service", "service", service) - return nil, err - } - klog.V(3).InfoS("Mapped service to endpoint", "service", service, "endpoint", endpoint) - // TODO: This could spin up a new goroutine to make the outbound connection, - // and keep accepting inbound traffic. - outConn, err := net.DialTimeout(protocol, endpoint, dialTimeout) - if err != nil { - if isTooManyFDsError(err) { - panic("Dial failed: " + err.Error()) - } - klog.ErrorS(err, "Dial failed") - sessionAffinityReset = true - continue - } - return outConn, nil - } - return nil, fmt.Errorf("failed to connect to an endpoint") -} - -func (tcp *tcpProxySocket) ProxyLoop(service proxy.ServicePortName, myInfo *ServiceInfo, loadBalancer LoadBalancer) { - for { - if !myInfo.IsAlive() { - // The service port was closed or replaced. - return - } - // Block until a connection is made. - inConn, err := tcp.Accept() - if err != nil { - if isTooManyFDsError(err) { - panic("Accept failed: " + err.Error()) - } - - if isClosedError(err) { - return - } - if !myInfo.IsAlive() { - // Then the service port was just closed so the accept failure is to be expected. - return - } - klog.ErrorS(err, "Accept failed") - continue - } - klog.V(3).InfoS("Accepted TCP connection from remote", "remoteAddress", inConn.RemoteAddr(), "localAddress", inConn.LocalAddr()) - outConn, err := TryConnectEndpoints(service, inConn.(*net.TCPConn).RemoteAddr(), "tcp", loadBalancer) - if err != nil { - klog.ErrorS(err, "Failed to connect to balancer") - inConn.Close() - continue - } - // Spin up an async copy loop. - go ProxyTCP(inConn.(*net.TCPConn), outConn.(*net.TCPConn)) - } -} - -// ProxyTCP proxies data bi-directionally between in and out. -func ProxyTCP(in, out *net.TCPConn) { - var wg sync.WaitGroup - wg.Add(2) - klog.V(4).InfoS("Creating proxy between remote and local addresses", - "inRemoteAddress", in.RemoteAddr(), "inLocalAddress", in.LocalAddr(), "outLocalAddress", out.LocalAddr(), "outRemoteAddress", out.RemoteAddr()) - go copyBytes("from backend", in, out, &wg) - go copyBytes("to backend", out, in, &wg) - wg.Wait() -} - -func copyBytes(direction string, dest, src *net.TCPConn, wg *sync.WaitGroup) { - defer wg.Done() - klog.V(4).InfoS("Copying remote address bytes", "direction", direction, "sourceRemoteAddress", src.RemoteAddr(), "destinationRemoteAddress", dest.RemoteAddr()) - n, err := io.Copy(dest, src) - if err != nil { - if !isClosedError(err) { - klog.ErrorS(err, "I/O error occurred") - } - } - klog.V(4).InfoS("Copied remote address bytes", "bytes", n, "direction", direction, "sourceRemoteAddress", src.RemoteAddr(), "destinationRemoteAddress", dest.RemoteAddr()) - dest.Close() - src.Close() -} - -// udpProxySocket implements ProxySocket. Close() is implemented by net.UDPConn. When Close() is called, -// no new connections are allowed and existing connections are broken. -// TODO: We could lame-duck this ourselves, if it becomes important. -type udpProxySocket struct { - *net.UDPConn - port int -} - -func (udp *udpProxySocket) ListenPort() int { - return udp.port -} - -func (udp *udpProxySocket) Addr() net.Addr { - return udp.LocalAddr() -} - -// Holds all the known UDP clients that have not timed out. -type ClientCache struct { - Mu sync.Mutex - Clients map[string]net.Conn // addr string -> connection -} - -func newClientCache() *ClientCache { - return &ClientCache{Clients: map[string]net.Conn{}} -} - -func (udp *udpProxySocket) ProxyLoop(service proxy.ServicePortName, myInfo *ServiceInfo, loadBalancer LoadBalancer) { - var buffer [4096]byte // 4KiB should be enough for most whole-packets - for { - if !myInfo.IsAlive() { - // The service port was closed or replaced. - break - } - - // Block until data arrives. - // TODO: Accumulate a histogram of n or something, to fine tune the buffer size. - n, cliAddr, err := udp.ReadFrom(buffer[0:]) - if err != nil { - if e, ok := err.(net.Error); ok { - if e.Temporary() { - klog.V(1).ErrorS(err, "ReadFrom had a temporary failure") - continue - } - } - klog.ErrorS(err, "ReadFrom failed, exiting ProxyLoop") - break - } - // If this is a client we know already, reuse the connection and goroutine. - svrConn, err := udp.getBackendConn(myInfo.ActiveClients, cliAddr, loadBalancer, service, myInfo.Timeout) - if err != nil { - continue - } - // TODO: It would be nice to let the goroutine handle this write, but we don't - // really want to copy the buffer. We could do a pool of buffers or something. - _, err = svrConn.Write(buffer[0:n]) - if err != nil { - if !logTimeout(err) { - klog.ErrorS(err, "Write failed") - // TODO: Maybe tear down the goroutine for this client/server pair? - } - continue - } - err = svrConn.SetDeadline(time.Now().Add(myInfo.Timeout)) - if err != nil { - klog.ErrorS(err, "SetDeadline failed") - continue - } - } -} - -func (udp *udpProxySocket) getBackendConn(activeClients *ClientCache, cliAddr net.Addr, loadBalancer LoadBalancer, service proxy.ServicePortName, timeout time.Duration) (net.Conn, error) { - activeClients.Mu.Lock() - defer activeClients.Mu.Unlock() - - svrConn, found := activeClients.Clients[cliAddr.String()] - if !found { - // TODO: This could spin up a new goroutine to make the outbound connection, - // and keep accepting inbound traffic. - klog.V(3).InfoS("New UDP connection from client", "address", cliAddr) - var err error - svrConn, err = TryConnectEndpoints(service, cliAddr, "udp", loadBalancer) - if err != nil { - return nil, err - } - if err = svrConn.SetDeadline(time.Now().Add(timeout)); err != nil { - klog.ErrorS(err, "SetDeadline failed") - return nil, err - } - activeClients.Clients[cliAddr.String()] = svrConn - go func(cliAddr net.Addr, svrConn net.Conn, activeClients *ClientCache, timeout time.Duration) { - defer runtime.HandleCrash() - udp.proxyClient(cliAddr, svrConn, activeClients, timeout) - }(cliAddr, svrConn, activeClients, timeout) - } - return svrConn, nil -} - -// This function is expected to be called as a goroutine. -// TODO: Track and log bytes copied, like TCP -func (udp *udpProxySocket) proxyClient(cliAddr net.Addr, svrConn net.Conn, activeClients *ClientCache, timeout time.Duration) { - defer svrConn.Close() - var buffer [4096]byte - for { - n, err := svrConn.Read(buffer[0:]) - if err != nil { - if !logTimeout(err) { - klog.ErrorS(err, "Read failed") - } - break - } - err = svrConn.SetDeadline(time.Now().Add(timeout)) - if err != nil { - klog.ErrorS(err, "SetDeadline failed") - break - } - _, err = udp.WriteTo(buffer[0:n], cliAddr) - if err != nil { - if !logTimeout(err) { - klog.ErrorS(err, "WriteTo failed") - } - break - } - } - activeClients.Mu.Lock() - delete(activeClients.Clients, cliAddr.String()) - activeClients.Mu.Unlock() -} diff --git a/pkg/proxy/userspace/rlimit.go b/pkg/proxy/userspace/rlimit.go deleted file mode 100644 index 4b67c884819..00000000000 --- a/pkg/proxy/userspace/rlimit.go +++ /dev/null @@ -1,26 +0,0 @@ -//go:build !windows -// +build !windows - -/* -Copyright 2015 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package userspace - -import "golang.org/x/sys/unix" - -func setRLimit(limit uint64) error { - return unix.Setrlimit(unix.RLIMIT_NOFILE, &unix.Rlimit{Max: limit, Cur: limit}) -} diff --git a/pkg/proxy/userspace/rlimit_windows.go b/pkg/proxy/userspace/rlimit_windows.go deleted file mode 100644 index dfe495e6e20..00000000000 --- a/pkg/proxy/userspace/rlimit_windows.go +++ /dev/null @@ -1,24 +0,0 @@ -//go:build windows -// +build windows - -/* -Copyright 2015 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package userspace - -func setRLimit(limit uint64) error { - return nil -} diff --git a/pkg/proxy/userspace/roundrobin.go b/pkg/proxy/userspace/roundrobin.go deleted file mode 100644 index f0551071124..00000000000 --- a/pkg/proxy/userspace/roundrobin.go +++ /dev/null @@ -1,343 +0,0 @@ -/* -Copyright 2014 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package userspace - -import ( - "errors" - "fmt" - "net" - "sort" - "sync" - "time" - - v1 "k8s.io/api/core/v1" - "k8s.io/apimachinery/pkg/types" - "k8s.io/apimachinery/pkg/util/sets" - "k8s.io/klog/v2" - "k8s.io/kubernetes/pkg/proxy" - "k8s.io/kubernetes/pkg/proxy/util" - stringslices "k8s.io/utils/strings/slices" -) - -var ( - ErrMissingServiceEntry = errors.New("missing service entry") - ErrMissingEndpoints = errors.New("missing endpoints") -) - -type affinityState struct { - clientIP string - //clientProtocol api.Protocol //not yet used - //sessionCookie string //not yet used - endpoint string - lastUsed time.Time -} - -type affinityPolicy struct { - affinityType v1.ServiceAffinity - affinityMap map[string]*affinityState // map client IP -> affinity info - ttlSeconds int -} - -// LoadBalancerRR is a round-robin load balancer. -type LoadBalancerRR struct { - lock sync.RWMutex - services map[proxy.ServicePortName]*balancerState -} - -// Ensure this implements LoadBalancer. -var _ LoadBalancer = &LoadBalancerRR{} - -type balancerState struct { - endpoints []string // a list of "ip:port" style strings - index int // current index into endpoints - affinity affinityPolicy -} - -func newAffinityPolicy(affinityType v1.ServiceAffinity, ttlSeconds int) *affinityPolicy { - return &affinityPolicy{ - affinityType: affinityType, - affinityMap: make(map[string]*affinityState), - ttlSeconds: ttlSeconds, - } -} - -// NewLoadBalancerRR returns a new LoadBalancerRR. -func NewLoadBalancerRR() *LoadBalancerRR { - return &LoadBalancerRR{ - services: map[proxy.ServicePortName]*balancerState{}, - } -} - -func (lb *LoadBalancerRR) NewService(svcPort proxy.ServicePortName, affinityType v1.ServiceAffinity, ttlSeconds int) error { - klog.V(4).InfoS("LoadBalancerRR NewService", "servicePortName", svcPort) - lb.lock.Lock() - defer lb.lock.Unlock() - lb.newServiceInternal(svcPort, affinityType, ttlSeconds) - return nil -} - -// This assumes that lb.lock is already held. -func (lb *LoadBalancerRR) newServiceInternal(svcPort proxy.ServicePortName, affinityType v1.ServiceAffinity, ttlSeconds int) *balancerState { - if ttlSeconds == 0 { - ttlSeconds = int(v1.DefaultClientIPServiceAffinitySeconds) //default to 3 hours if not specified. Should 0 be unlimited instead???? - } - - if state, exists := lb.services[svcPort]; !exists || state == nil { - lb.services[svcPort] = &balancerState{affinity: *newAffinityPolicy(affinityType, ttlSeconds)} - klog.V(4).InfoS("LoadBalancerRR service does not exist, created", "servicePortName", svcPort) - } else if affinityType != "" { - lb.services[svcPort].affinity.affinityType = affinityType - } - return lb.services[svcPort] -} - -func (lb *LoadBalancerRR) DeleteService(svcPort proxy.ServicePortName) { - klog.V(4).InfoS("LoadBalancerRR DeleteService", "servicePortName", svcPort) - lb.lock.Lock() - defer lb.lock.Unlock() - delete(lb.services, svcPort) -} - -// return true if this service is using some form of session affinity. -func isSessionAffinity(affinity *affinityPolicy) bool { - // Should never be empty string, but checking for it to be safe. - if affinity.affinityType == "" || affinity.affinityType == v1.ServiceAffinityNone { - return false - } - return true -} - -// ServiceHasEndpoints checks whether a service entry has endpoints. -func (lb *LoadBalancerRR) ServiceHasEndpoints(svcPort proxy.ServicePortName) bool { - lb.lock.RLock() - defer lb.lock.RUnlock() - state, exists := lb.services[svcPort] - if !exists || state == nil { - return false - } - return len(state.endpoints) > 0 -} - -// NextEndpoint returns a service endpoint. -// The service endpoint is chosen using the round-robin algorithm. -func (lb *LoadBalancerRR) NextEndpoint(svcPort proxy.ServicePortName, srcAddr net.Addr, sessionAffinityReset bool) (string, error) { - // Coarse locking is simple. We can get more fine-grained if/when we - // can prove it matters. - lb.lock.Lock() - defer lb.lock.Unlock() - - state, exists := lb.services[svcPort] - if !exists || state == nil { - return "", ErrMissingServiceEntry - } - if len(state.endpoints) == 0 { - return "", ErrMissingEndpoints - } - klog.V(4).InfoS("NextEndpoint for service", "servicePortName", svcPort, "address", srcAddr, "endpoints", state.endpoints) - - sessionAffinityEnabled := isSessionAffinity(&state.affinity) - - var ipaddr string - if sessionAffinityEnabled { - // Caution: don't shadow ipaddr - var err error - ipaddr, _, err = net.SplitHostPort(srcAddr.String()) - if err != nil { - return "", fmt.Errorf("malformed source address %q: %v", srcAddr.String(), err) - } - if !sessionAffinityReset { - sessionAffinity, exists := state.affinity.affinityMap[ipaddr] - if exists && int(time.Since(sessionAffinity.lastUsed).Seconds()) < state.affinity.ttlSeconds { - // Affinity wins. - endpoint := sessionAffinity.endpoint - sessionAffinity.lastUsed = time.Now() - klog.V(4).InfoS("NextEndpoint for service from IP with sessionAffinity", "servicePortName", svcPort, "IP", ipaddr, "sessionAffinity", sessionAffinity, "endpoint", endpoint) - return endpoint, nil - } - } - } - // Take the next endpoint. - endpoint := state.endpoints[state.index] - state.index = (state.index + 1) % len(state.endpoints) - - if sessionAffinityEnabled { - var affinity *affinityState - affinity = state.affinity.affinityMap[ipaddr] - if affinity == nil { - affinity = new(affinityState) //&affinityState{ipaddr, "TCP", "", endpoint, time.Now()} - state.affinity.affinityMap[ipaddr] = affinity - } - affinity.lastUsed = time.Now() - affinity.endpoint = endpoint - affinity.clientIP = ipaddr - klog.V(4).InfoS("Updated affinity key", "IP", ipaddr, "affinityState", state.affinity.affinityMap[ipaddr]) - } - - return endpoint, nil -} - -// Remove any session affinity records associated to a particular endpoint (for example when a pod goes down). -func removeSessionAffinityByEndpoint(state *balancerState, svcPort proxy.ServicePortName, endpoint string) { - for _, affinity := range state.affinity.affinityMap { - if affinity.endpoint == endpoint { - klog.V(4).InfoS("Removing client from affinityMap for service", "endpoint", affinity.endpoint, "servicePortName", svcPort) - delete(state.affinity.affinityMap, affinity.clientIP) - } - } -} - -// Loop through the valid endpoints and then the endpoints associated with the Load Balancer. -// Then remove any session affinity records that are not in both lists. -// This assumes the lb.lock is held. -func (lb *LoadBalancerRR) removeStaleAffinity(svcPort proxy.ServicePortName, newEndpoints []string) { - newEndpointsSet := sets.NewString() - for _, newEndpoint := range newEndpoints { - newEndpointsSet.Insert(newEndpoint) - } - - state, exists := lb.services[svcPort] - if !exists || state == nil { - return - } - for _, existingEndpoint := range state.endpoints { - if !newEndpointsSet.Has(existingEndpoint) { - klog.V(2).InfoS("Delete endpoint for service", "endpoint", existingEndpoint, "servicePortName", svcPort) - removeSessionAffinityByEndpoint(state, svcPort, existingEndpoint) - } - } -} - -func (lb *LoadBalancerRR) OnEndpointsAdd(endpoints *v1.Endpoints) { - portsToEndpoints := util.BuildPortsToEndpointsMap(endpoints) - - lb.lock.Lock() - defer lb.lock.Unlock() - - for portname := range portsToEndpoints { - svcPort := proxy.ServicePortName{NamespacedName: types.NamespacedName{Namespace: endpoints.Namespace, Name: endpoints.Name}, Port: portname} - newEndpoints := portsToEndpoints[portname] - state, exists := lb.services[svcPort] - - if !exists || state == nil || len(newEndpoints) > 0 { - klog.V(1).InfoS("LoadBalancerRR: Setting endpoints service", "servicePortName", svcPort, "endpoints", newEndpoints) - // OnEndpointsAdd can be called without NewService being called externally. - // To be safe we will call it here. A new service will only be created - // if one does not already exist. - state = lb.newServiceInternal(svcPort, v1.ServiceAffinity(""), 0) - state.endpoints = util.ShuffleStrings(newEndpoints) - - // Reset the round-robin index. - state.index = 0 - } - } -} - -func (lb *LoadBalancerRR) OnEndpointsUpdate(oldEndpoints, endpoints *v1.Endpoints) { - portsToEndpoints := util.BuildPortsToEndpointsMap(endpoints) - oldPortsToEndpoints := util.BuildPortsToEndpointsMap(oldEndpoints) - registeredEndpoints := make(map[proxy.ServicePortName]bool) - - lb.lock.Lock() - defer lb.lock.Unlock() - - for portname := range portsToEndpoints { - svcPort := proxy.ServicePortName{NamespacedName: types.NamespacedName{Namespace: endpoints.Namespace, Name: endpoints.Name}, Port: portname} - newEndpoints := portsToEndpoints[portname] - state, exists := lb.services[svcPort] - - curEndpoints := []string{} - if state != nil { - curEndpoints = state.endpoints - } - - if !exists || state == nil || len(curEndpoints) != len(newEndpoints) || !slicesEquiv(stringslices.Clone(curEndpoints), newEndpoints) { - klog.V(1).InfoS("LoadBalancerRR: Setting endpoints for service", "servicePortName", svcPort, "endpoints", newEndpoints) - lb.removeStaleAffinity(svcPort, newEndpoints) - // OnEndpointsUpdate can be called without NewService being called externally. - // To be safe we will call it here. A new service will only be created - // if one does not already exist. The affinity will be updated - // later, once NewService is called. - state = lb.newServiceInternal(svcPort, v1.ServiceAffinity(""), 0) - state.endpoints = util.ShuffleStrings(newEndpoints) - - // Reset the round-robin index. - state.index = 0 - } - registeredEndpoints[svcPort] = true - } - - // Now remove all endpoints missing from the update. - for portname := range oldPortsToEndpoints { - svcPort := proxy.ServicePortName{NamespacedName: types.NamespacedName{Namespace: oldEndpoints.Namespace, Name: oldEndpoints.Name}, Port: portname} - if _, exists := registeredEndpoints[svcPort]; !exists { - lb.resetService(svcPort) - } - } -} - -func (lb *LoadBalancerRR) resetService(svcPort proxy.ServicePortName) { - // If the service is still around, reset but don't delete. - if state, ok := lb.services[svcPort]; ok && state != nil { - if len(state.endpoints) > 0 { - klog.V(2).InfoS("LoadBalancerRR: Removing endpoints service", "servicePortName", svcPort) - state.endpoints = []string{} - } - state.index = 0 - state.affinity.affinityMap = map[string]*affinityState{} - } -} - -func (lb *LoadBalancerRR) OnEndpointsDelete(endpoints *v1.Endpoints) { - portsToEndpoints := util.BuildPortsToEndpointsMap(endpoints) - - lb.lock.Lock() - defer lb.lock.Unlock() - - for portname := range portsToEndpoints { - svcPort := proxy.ServicePortName{NamespacedName: types.NamespacedName{Namespace: endpoints.Namespace, Name: endpoints.Name}, Port: portname} - lb.resetService(svcPort) - } -} - -func (lb *LoadBalancerRR) OnEndpointsSynced() { -} - -// Tests whether two slices are equivalent. This sorts both slices in-place. -func slicesEquiv(lhs, rhs []string) bool { - if len(lhs) != len(rhs) { - return false - } - sort.Strings(lhs) - sort.Strings(rhs) - return stringslices.Equal(lhs, rhs) -} - -func (lb *LoadBalancerRR) CleanupStaleStickySessions(svcPort proxy.ServicePortName) { - lb.lock.Lock() - defer lb.lock.Unlock() - - state, exists := lb.services[svcPort] - if !exists || state == nil { - return - } - for ip, affinity := range state.affinity.affinityMap { - if int(time.Since(affinity.lastUsed).Seconds()) >= state.affinity.ttlSeconds { - klog.V(4).InfoS("Removing client from affinityMap for service", "IP", affinity.clientIP, "servicePortName", svcPort) - delete(state.affinity.affinityMap, ip) - } - } -} diff --git a/pkg/proxy/userspace/roundrobin_test.go b/pkg/proxy/userspace/roundrobin_test.go deleted file mode 100644 index 315a2ea9865..00000000000 --- a/pkg/proxy/userspace/roundrobin_test.go +++ /dev/null @@ -1,678 +0,0 @@ -/* -Copyright 2014 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package userspace - -import ( - "net" - "testing" - - "k8s.io/api/core/v1" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/apimachinery/pkg/types" - "k8s.io/kubernetes/pkg/proxy" -) - -func TestLoadBalanceFailsWithNoEndpoints(t *testing.T) { - loadBalancer := NewLoadBalancerRR() - service := proxy.ServicePortName{NamespacedName: types.NamespacedName{Namespace: "testnamespace", Name: "foo"}, Port: "does-not-exist"} - endpoint, err := loadBalancer.NextEndpoint(service, nil, false) - if err == nil { - t.Errorf("Didn't fail with non-existent service") - } - if len(endpoint) != 0 { - t.Errorf("Got an endpoint") - } -} - -func expectEndpoint(t *testing.T, loadBalancer *LoadBalancerRR, service proxy.ServicePortName, expected string, netaddr net.Addr) { - endpoint, err := loadBalancer.NextEndpoint(service, netaddr, false) - if err != nil { - t.Errorf("Didn't find a service for %s, expected %s, failed with: %v", service, expected, err) - } - if endpoint != expected { - t.Errorf("Didn't get expected endpoint for service %s client %v, expected %s, got: %s", service, netaddr, expected, endpoint) - } -} - -func expectEndpointWithSessionAffinityReset(t *testing.T, loadBalancer *LoadBalancerRR, service proxy.ServicePortName, expected string, netaddr net.Addr) { - endpoint, err := loadBalancer.NextEndpoint(service, netaddr, true) - if err != nil { - t.Errorf("Didn't find a service for %s, expected %s, failed with: %v", service, expected, err) - } - if endpoint != expected { - t.Errorf("Didn't get expected endpoint for service %s client %v, expected %s, got: %s", service, netaddr, expected, endpoint) - } -} - -func TestLoadBalanceWorksWithSingleEndpoint(t *testing.T) { - loadBalancer := NewLoadBalancerRR() - service := proxy.ServicePortName{NamespacedName: types.NamespacedName{Namespace: "testnamespace", Name: "foo"}, Port: "p"} - endpoint, err := loadBalancer.NextEndpoint(service, nil, false) - if err == nil || len(endpoint) != 0 { - t.Errorf("Didn't fail with non-existent service") - } - endpoints := &v1.Endpoints{ - ObjectMeta: metav1.ObjectMeta{Name: service.Name, Namespace: service.Namespace}, - Subsets: []v1.EndpointSubset{{ - Addresses: []v1.EndpointAddress{{IP: "endpoint1"}}, - Ports: []v1.EndpointPort{{Name: "p", Port: 40}}, - }}, - } - loadBalancer.OnEndpointsAdd(endpoints) - expectEndpoint(t, loadBalancer, service, "endpoint1:40", nil) - expectEndpoint(t, loadBalancer, service, "endpoint1:40", nil) - expectEndpoint(t, loadBalancer, service, "endpoint1:40", nil) - expectEndpoint(t, loadBalancer, service, "endpoint1:40", nil) -} - -func stringsInSlice(haystack []string, needles ...string) bool { - for _, needle := range needles { - found := false - for i := range haystack { - if haystack[i] == needle { - found = true - break - } - } - if found == false { - return false - } - } - return true -} - -func TestLoadBalanceWorksWithMultipleEndpoints(t *testing.T) { - loadBalancer := NewLoadBalancerRR() - service := proxy.ServicePortName{NamespacedName: types.NamespacedName{Namespace: "testnamespace", Name: "foo"}, Port: "p"} - endpoint, err := loadBalancer.NextEndpoint(service, nil, false) - if err == nil || len(endpoint) != 0 { - t.Errorf("Didn't fail with non-existent service") - } - endpoints := &v1.Endpoints{ - ObjectMeta: metav1.ObjectMeta{Name: service.Name, Namespace: service.Namespace}, - Subsets: []v1.EndpointSubset{{ - Addresses: []v1.EndpointAddress{{IP: "endpoint"}}, - Ports: []v1.EndpointPort{{Name: "p", Port: 1}, {Name: "p", Port: 2}, {Name: "p", Port: 3}}, - }}, - } - loadBalancer.OnEndpointsAdd(endpoints) - - shuffledEndpoints := loadBalancer.services[service].endpoints - if !stringsInSlice(shuffledEndpoints, "endpoint:1", "endpoint:2", "endpoint:3") { - t.Errorf("did not find expected endpoints: %v", shuffledEndpoints) - } - expectEndpoint(t, loadBalancer, service, shuffledEndpoints[0], nil) - expectEndpoint(t, loadBalancer, service, shuffledEndpoints[1], nil) - expectEndpoint(t, loadBalancer, service, shuffledEndpoints[2], nil) - expectEndpoint(t, loadBalancer, service, shuffledEndpoints[0], nil) -} - -func TestLoadBalanceWorksWithMultipleEndpointsMultiplePorts(t *testing.T) { - loadBalancer := NewLoadBalancerRR() - serviceP := proxy.ServicePortName{NamespacedName: types.NamespacedName{Namespace: "testnamespace", Name: "foo"}, Port: "p"} - serviceQ := proxy.ServicePortName{NamespacedName: types.NamespacedName{Namespace: "testnamespace", Name: "foo"}, Port: "q"} - endpoint, err := loadBalancer.NextEndpoint(serviceP, nil, false) - if err == nil || len(endpoint) != 0 { - t.Errorf("Didn't fail with non-existent service") - } - endpoints := &v1.Endpoints{ - ObjectMeta: metav1.ObjectMeta{Name: serviceP.Name, Namespace: serviceP.Namespace}, - Subsets: []v1.EndpointSubset{ - { - Addresses: []v1.EndpointAddress{{IP: "endpoint1"}, {IP: "endpoint2"}}, - Ports: []v1.EndpointPort{{Name: "p", Port: 1}, {Name: "q", Port: 2}}, - }, - { - Addresses: []v1.EndpointAddress{{IP: "endpoint3"}}, - Ports: []v1.EndpointPort{{Name: "p", Port: 3}, {Name: "q", Port: 4}}, - }, - }, - } - loadBalancer.OnEndpointsAdd(endpoints) - - shuffledEndpoints := loadBalancer.services[serviceP].endpoints - if !stringsInSlice(shuffledEndpoints, "endpoint1:1", "endpoint2:1", "endpoint3:3") { - t.Errorf("did not find expected endpoints: %v", shuffledEndpoints) - } - expectEndpoint(t, loadBalancer, serviceP, shuffledEndpoints[0], nil) - expectEndpoint(t, loadBalancer, serviceP, shuffledEndpoints[1], nil) - expectEndpoint(t, loadBalancer, serviceP, shuffledEndpoints[2], nil) - expectEndpoint(t, loadBalancer, serviceP, shuffledEndpoints[0], nil) - - shuffledEndpoints = loadBalancer.services[serviceQ].endpoints - if !stringsInSlice(shuffledEndpoints, "endpoint1:2", "endpoint2:2", "endpoint3:4") { - t.Errorf("did not find expected endpoints: %v", shuffledEndpoints) - } - expectEndpoint(t, loadBalancer, serviceQ, shuffledEndpoints[0], nil) - expectEndpoint(t, loadBalancer, serviceQ, shuffledEndpoints[1], nil) - expectEndpoint(t, loadBalancer, serviceQ, shuffledEndpoints[2], nil) - expectEndpoint(t, loadBalancer, serviceQ, shuffledEndpoints[0], nil) -} - -func TestLoadBalanceWorksWithMultipleEndpointsAndUpdates(t *testing.T) { - loadBalancer := NewLoadBalancerRR() - serviceP := proxy.ServicePortName{NamespacedName: types.NamespacedName{Namespace: "testnamespace", Name: "foo"}, Port: "p"} - serviceQ := proxy.ServicePortName{NamespacedName: types.NamespacedName{Namespace: "testnamespace", Name: "foo"}, Port: "q"} - endpoint, err := loadBalancer.NextEndpoint(serviceP, nil, false) - if err == nil || len(endpoint) != 0 { - t.Errorf("Didn't fail with non-existent service") - } - endpointsv1 := &v1.Endpoints{ - ObjectMeta: metav1.ObjectMeta{Name: serviceP.Name, Namespace: serviceP.Namespace}, - Subsets: []v1.EndpointSubset{ - { - Addresses: []v1.EndpointAddress{{IP: "endpoint1"}}, - Ports: []v1.EndpointPort{{Name: "p", Port: 1}, {Name: "q", Port: 10}}, - }, - { - Addresses: []v1.EndpointAddress{{IP: "endpoint2"}}, - Ports: []v1.EndpointPort{{Name: "p", Port: 2}, {Name: "q", Port: 20}}, - }, - { - Addresses: []v1.EndpointAddress{{IP: "endpoint3"}}, - Ports: []v1.EndpointPort{{Name: "p", Port: 3}, {Name: "q", Port: 30}}, - }, - }, - } - loadBalancer.OnEndpointsAdd(endpointsv1) - - shuffledEndpoints := loadBalancer.services[serviceP].endpoints - if !stringsInSlice(shuffledEndpoints, "endpoint1:1", "endpoint2:2", "endpoint3:3") { - t.Errorf("did not find expected endpoints: %v", shuffledEndpoints) - } - expectEndpoint(t, loadBalancer, serviceP, shuffledEndpoints[0], nil) - expectEndpoint(t, loadBalancer, serviceP, shuffledEndpoints[1], nil) - expectEndpoint(t, loadBalancer, serviceP, shuffledEndpoints[2], nil) - expectEndpoint(t, loadBalancer, serviceP, shuffledEndpoints[0], nil) - - shuffledEndpoints = loadBalancer.services[serviceQ].endpoints - if !stringsInSlice(shuffledEndpoints, "endpoint1:10", "endpoint2:20", "endpoint3:30") { - t.Errorf("did not find expected endpoints: %v", shuffledEndpoints) - } - expectEndpoint(t, loadBalancer, serviceQ, shuffledEndpoints[0], nil) - expectEndpoint(t, loadBalancer, serviceQ, shuffledEndpoints[1], nil) - expectEndpoint(t, loadBalancer, serviceQ, shuffledEndpoints[2], nil) - expectEndpoint(t, loadBalancer, serviceQ, shuffledEndpoints[0], nil) - - // Then update the configuration with one fewer endpoints, make sure - // we start in the beginning again - endpointsv2 := &v1.Endpoints{ - ObjectMeta: metav1.ObjectMeta{Name: serviceP.Name, Namespace: serviceP.Namespace}, - Subsets: []v1.EndpointSubset{ - { - Addresses: []v1.EndpointAddress{{IP: "endpoint4"}}, - Ports: []v1.EndpointPort{{Name: "p", Port: 4}, {Name: "q", Port: 40}}, - }, - { - Addresses: []v1.EndpointAddress{{IP: "endpoint5"}}, - Ports: []v1.EndpointPort{{Name: "p", Port: 5}, {Name: "q", Port: 50}}, - }, - }, - } - loadBalancer.OnEndpointsUpdate(endpointsv1, endpointsv2) - - shuffledEndpoints = loadBalancer.services[serviceP].endpoints - if !stringsInSlice(shuffledEndpoints, "endpoint4:4", "endpoint5:5") { - t.Errorf("did not find expected endpoints: %v", shuffledEndpoints) - } - expectEndpoint(t, loadBalancer, serviceP, shuffledEndpoints[0], nil) - expectEndpoint(t, loadBalancer, serviceP, shuffledEndpoints[1], nil) - expectEndpoint(t, loadBalancer, serviceP, shuffledEndpoints[0], nil) - expectEndpoint(t, loadBalancer, serviceP, shuffledEndpoints[1], nil) - - shuffledEndpoints = loadBalancer.services[serviceQ].endpoints - if !stringsInSlice(shuffledEndpoints, "endpoint4:40", "endpoint5:50") { - t.Errorf("did not find expected endpoints: %v", shuffledEndpoints) - } - expectEndpoint(t, loadBalancer, serviceQ, shuffledEndpoints[0], nil) - expectEndpoint(t, loadBalancer, serviceQ, shuffledEndpoints[1], nil) - expectEndpoint(t, loadBalancer, serviceQ, shuffledEndpoints[0], nil) - expectEndpoint(t, loadBalancer, serviceQ, shuffledEndpoints[1], nil) - - // Clear endpoints - endpointsv3 := &v1.Endpoints{ObjectMeta: metav1.ObjectMeta{Name: serviceP.Name, Namespace: serviceP.Namespace}, Subsets: nil} - loadBalancer.OnEndpointsUpdate(endpointsv2, endpointsv3) - - endpoint, err = loadBalancer.NextEndpoint(serviceP, nil, false) - if err == nil || len(endpoint) != 0 { - t.Errorf("Didn't fail with non-existent service") - } -} - -func TestLoadBalanceWorksWithServiceRemoval(t *testing.T) { - loadBalancer := NewLoadBalancerRR() - fooServiceP := proxy.ServicePortName{NamespacedName: types.NamespacedName{Namespace: "testnamespace", Name: "foo"}, Port: "p"} - barServiceP := proxy.ServicePortName{NamespacedName: types.NamespacedName{Namespace: "testnamespace", Name: "bar"}, Port: "p"} - endpoint, err := loadBalancer.NextEndpoint(fooServiceP, nil, false) - if err == nil || len(endpoint) != 0 { - t.Errorf("Didn't fail with non-existent service") - } - endpoints1 := &v1.Endpoints{ - ObjectMeta: metav1.ObjectMeta{Name: fooServiceP.Name, Namespace: fooServiceP.Namespace}, - Subsets: []v1.EndpointSubset{ - { - Addresses: []v1.EndpointAddress{{IP: "endpoint1"}, {IP: "endpoint2"}, {IP: "endpoint3"}}, - Ports: []v1.EndpointPort{{Name: "p", Port: 123}}, - }, - }, - } - endpoints2 := &v1.Endpoints{ - ObjectMeta: metav1.ObjectMeta{Name: barServiceP.Name, Namespace: barServiceP.Namespace}, - Subsets: []v1.EndpointSubset{ - { - Addresses: []v1.EndpointAddress{{IP: "endpoint4"}, {IP: "endpoint5"}, {IP: "endpoint6"}}, - Ports: []v1.EndpointPort{{Name: "p", Port: 456}}, - }, - }, - } - loadBalancer.OnEndpointsAdd(endpoints1) - loadBalancer.OnEndpointsAdd(endpoints2) - shuffledFooEndpoints := loadBalancer.services[fooServiceP].endpoints - expectEndpoint(t, loadBalancer, fooServiceP, shuffledFooEndpoints[0], nil) - expectEndpoint(t, loadBalancer, fooServiceP, shuffledFooEndpoints[1], nil) - expectEndpoint(t, loadBalancer, fooServiceP, shuffledFooEndpoints[2], nil) - expectEndpoint(t, loadBalancer, fooServiceP, shuffledFooEndpoints[0], nil) - expectEndpoint(t, loadBalancer, fooServiceP, shuffledFooEndpoints[1], nil) - - shuffledBarEndpoints := loadBalancer.services[barServiceP].endpoints - expectEndpoint(t, loadBalancer, barServiceP, shuffledBarEndpoints[0], nil) - expectEndpoint(t, loadBalancer, barServiceP, shuffledBarEndpoints[1], nil) - expectEndpoint(t, loadBalancer, barServiceP, shuffledBarEndpoints[2], nil) - expectEndpoint(t, loadBalancer, barServiceP, shuffledBarEndpoints[0], nil) - expectEndpoint(t, loadBalancer, barServiceP, shuffledBarEndpoints[1], nil) - - // Then update the configuration by removing foo - loadBalancer.OnEndpointsDelete(endpoints1) - endpoint, err = loadBalancer.NextEndpoint(fooServiceP, nil, false) - if err == nil || len(endpoint) != 0 { - t.Errorf("Didn't fail with non-existent service") - } - - // but bar is still there, and we continue RR from where we left off. - expectEndpoint(t, loadBalancer, barServiceP, shuffledBarEndpoints[2], nil) - expectEndpoint(t, loadBalancer, barServiceP, shuffledBarEndpoints[0], nil) - expectEndpoint(t, loadBalancer, barServiceP, shuffledBarEndpoints[1], nil) - expectEndpoint(t, loadBalancer, barServiceP, shuffledBarEndpoints[2], nil) -} - -func TestStickyLoadBalanceWorksWithNewServiceCalledFirst(t *testing.T) { - loadBalancer := NewLoadBalancerRR() - service := proxy.ServicePortName{NamespacedName: types.NamespacedName{Namespace: "testnamespace", Name: "foo"}, Port: ""} - endpoint, err := loadBalancer.NextEndpoint(service, nil, false) - if err == nil || len(endpoint) != 0 { - t.Errorf("Didn't fail with non-existent service") - } - - // Call NewService() before OnEndpointsUpdate() - loadBalancer.NewService(service, v1.ServiceAffinityClientIP, int(v1.DefaultClientIPServiceAffinitySeconds)) - endpoints := &v1.Endpoints{ - ObjectMeta: metav1.ObjectMeta{Name: service.Name, Namespace: service.Namespace}, - Subsets: []v1.EndpointSubset{ - {Addresses: []v1.EndpointAddress{{IP: "endpoint1"}}, Ports: []v1.EndpointPort{{Port: 1}}}, - {Addresses: []v1.EndpointAddress{{IP: "endpoint2"}}, Ports: []v1.EndpointPort{{Port: 2}}}, - {Addresses: []v1.EndpointAddress{{IP: "endpoint3"}}, Ports: []v1.EndpointPort{{Port: 3}}}, - }, - } - loadBalancer.OnEndpointsAdd(endpoints) - - client1 := &net.TCPAddr{IP: net.IPv4(127, 0, 0, 1), Port: 0} - client2 := &net.TCPAddr{IP: net.IPv4(127, 0, 0, 2), Port: 0} - client3 := &net.TCPAddr{IP: net.IPv4(127, 0, 0, 3), Port: 0} - - ep1, err := loadBalancer.NextEndpoint(service, client1, false) - if err != nil { - t.Errorf("Didn't find a service for %s: %v", service, err) - } - expectEndpoint(t, loadBalancer, service, ep1, client1) - expectEndpoint(t, loadBalancer, service, ep1, client1) - expectEndpoint(t, loadBalancer, service, ep1, client1) - - ep2, err := loadBalancer.NextEndpoint(service, client2, false) - if err != nil { - t.Errorf("Didn't find a service for %s: %v", service, err) - } - expectEndpoint(t, loadBalancer, service, ep2, client2) - expectEndpoint(t, loadBalancer, service, ep2, client2) - expectEndpoint(t, loadBalancer, service, ep2, client2) - - ep3, err := loadBalancer.NextEndpoint(service, client3, false) - if err != nil { - t.Errorf("Didn't find a service for %s: %v", service, err) - } - expectEndpoint(t, loadBalancer, service, ep3, client3) - expectEndpoint(t, loadBalancer, service, ep3, client3) - expectEndpoint(t, loadBalancer, service, ep3, client3) - - expectEndpoint(t, loadBalancer, service, ep1, client1) - expectEndpoint(t, loadBalancer, service, ep2, client2) - expectEndpoint(t, loadBalancer, service, ep3, client3) - expectEndpoint(t, loadBalancer, service, ep1, client1) - expectEndpoint(t, loadBalancer, service, ep2, client2) - expectEndpoint(t, loadBalancer, service, ep3, client3) -} - -func TestStickyLoadBalanceWorksWithNewServiceCalledSecond(t *testing.T) { - loadBalancer := NewLoadBalancerRR() - service := proxy.ServicePortName{NamespacedName: types.NamespacedName{Namespace: "testnamespace", Name: "foo"}, Port: ""} - endpoint, err := loadBalancer.NextEndpoint(service, nil, false) - if err == nil || len(endpoint) != 0 { - t.Errorf("Didn't fail with non-existent service") - } - - // Call OnEndpointsUpdate() before NewService() - endpoints := &v1.Endpoints{ - ObjectMeta: metav1.ObjectMeta{Name: service.Name, Namespace: service.Namespace}, - Subsets: []v1.EndpointSubset{ - {Addresses: []v1.EndpointAddress{{IP: "endpoint1"}}, Ports: []v1.EndpointPort{{Port: 1}}}, - {Addresses: []v1.EndpointAddress{{IP: "endpoint2"}}, Ports: []v1.EndpointPort{{Port: 2}}}, - }, - } - loadBalancer.OnEndpointsAdd(endpoints) - loadBalancer.NewService(service, v1.ServiceAffinityClientIP, int(v1.DefaultClientIPServiceAffinitySeconds)) - - client1 := &net.TCPAddr{IP: net.IPv4(127, 0, 0, 1), Port: 0} - client2 := &net.TCPAddr{IP: net.IPv4(127, 0, 0, 2), Port: 0} - client3 := &net.TCPAddr{IP: net.IPv4(127, 0, 0, 3), Port: 0} - - ep1, err := loadBalancer.NextEndpoint(service, client1, false) - if err != nil { - t.Errorf("Didn't find a service for %s: %v", service, err) - } - expectEndpoint(t, loadBalancer, service, ep1, client1) - expectEndpoint(t, loadBalancer, service, ep1, client1) - expectEndpoint(t, loadBalancer, service, ep1, client1) - - ep2, err := loadBalancer.NextEndpoint(service, client2, false) - if err != nil { - t.Errorf("Didn't find a service for %s: %v", service, err) - } - expectEndpoint(t, loadBalancer, service, ep2, client2) - expectEndpoint(t, loadBalancer, service, ep2, client2) - expectEndpoint(t, loadBalancer, service, ep2, client2) - - ep3, err := loadBalancer.NextEndpoint(service, client3, false) - if err != nil { - t.Errorf("Didn't find a service for %s: %v", service, err) - } - expectEndpoint(t, loadBalancer, service, ep3, client3) - expectEndpoint(t, loadBalancer, service, ep3, client3) - expectEndpoint(t, loadBalancer, service, ep3, client3) - - expectEndpoint(t, loadBalancer, service, ep1, client1) - expectEndpoint(t, loadBalancer, service, ep2, client2) - expectEndpoint(t, loadBalancer, service, ep3, client3) - expectEndpoint(t, loadBalancer, service, ep1, client1) - expectEndpoint(t, loadBalancer, service, ep2, client2) - expectEndpoint(t, loadBalancer, service, ep3, client3) -} - -func TestStickyLoadBalanaceWorksWithMultipleEndpointsRemoveOne(t *testing.T) { - client1 := &net.TCPAddr{IP: net.IPv4(127, 0, 0, 1), Port: 0} - client2 := &net.TCPAddr{IP: net.IPv4(127, 0, 0, 2), Port: 0} - client3 := &net.TCPAddr{IP: net.IPv4(127, 0, 0, 3), Port: 0} - client4 := &net.TCPAddr{IP: net.IPv4(127, 0, 0, 4), Port: 0} - client5 := &net.TCPAddr{IP: net.IPv4(127, 0, 0, 5), Port: 0} - client6 := &net.TCPAddr{IP: net.IPv4(127, 0, 0, 6), Port: 0} - loadBalancer := NewLoadBalancerRR() - service := proxy.ServicePortName{NamespacedName: types.NamespacedName{Namespace: "testnamespace", Name: "foo"}, Port: ""} - endpoint, err := loadBalancer.NextEndpoint(service, nil, false) - if err == nil || len(endpoint) != 0 { - t.Errorf("Didn't fail with non-existent service") - } - - loadBalancer.NewService(service, v1.ServiceAffinityClientIP, int(v1.DefaultClientIPServiceAffinitySeconds)) - endpointsv1 := &v1.Endpoints{ - ObjectMeta: metav1.ObjectMeta{Name: service.Name, Namespace: service.Namespace}, - Subsets: []v1.EndpointSubset{ - { - Addresses: []v1.EndpointAddress{{IP: "endpoint"}}, - Ports: []v1.EndpointPort{{Port: 1}, {Port: 2}, {Port: 3}}, - }, - }, - } - loadBalancer.OnEndpointsAdd(endpointsv1) - shuffledEndpoints := loadBalancer.services[service].endpoints - expectEndpoint(t, loadBalancer, service, shuffledEndpoints[0], client1) - client1Endpoint := shuffledEndpoints[0] - expectEndpoint(t, loadBalancer, service, shuffledEndpoints[0], client1) - expectEndpoint(t, loadBalancer, service, shuffledEndpoints[1], client2) - client2Endpoint := shuffledEndpoints[1] - expectEndpoint(t, loadBalancer, service, shuffledEndpoints[1], client2) - expectEndpoint(t, loadBalancer, service, shuffledEndpoints[2], client3) - client3Endpoint := shuffledEndpoints[2] - - endpointsv2 := &v1.Endpoints{ - ObjectMeta: metav1.ObjectMeta{Name: service.Name, Namespace: service.Namespace}, - Subsets: []v1.EndpointSubset{ - { - Addresses: []v1.EndpointAddress{{IP: "endpoint"}}, - Ports: []v1.EndpointPort{{Port: 1}, {Port: 2}}, - }, - }, - } - loadBalancer.OnEndpointsUpdate(endpointsv1, endpointsv2) - shuffledEndpoints = loadBalancer.services[service].endpoints - if client1Endpoint == "endpoint:3" { - client1Endpoint = shuffledEndpoints[0] - } else if client2Endpoint == "endpoint:3" { - client2Endpoint = shuffledEndpoints[0] - } else if client3Endpoint == "endpoint:3" { - client3Endpoint = shuffledEndpoints[0] - } - expectEndpoint(t, loadBalancer, service, client1Endpoint, client1) - expectEndpoint(t, loadBalancer, service, client2Endpoint, client2) - expectEndpoint(t, loadBalancer, service, client3Endpoint, client3) - - endpointsv3 := &v1.Endpoints{ - ObjectMeta: metav1.ObjectMeta{Name: service.Name, Namespace: service.Namespace}, - Subsets: []v1.EndpointSubset{ - { - Addresses: []v1.EndpointAddress{{IP: "endpoint"}}, - Ports: []v1.EndpointPort{{Port: 1}, {Port: 2}, {Port: 4}}, - }, - }, - } - loadBalancer.OnEndpointsUpdate(endpointsv2, endpointsv3) - shuffledEndpoints = loadBalancer.services[service].endpoints - expectEndpoint(t, loadBalancer, service, client1Endpoint, client1) - expectEndpoint(t, loadBalancer, service, client2Endpoint, client2) - expectEndpoint(t, loadBalancer, service, client3Endpoint, client3) - expectEndpoint(t, loadBalancer, service, shuffledEndpoints[0], client4) - expectEndpoint(t, loadBalancer, service, shuffledEndpoints[1], client5) - expectEndpoint(t, loadBalancer, service, shuffledEndpoints[2], client6) -} - -func TestStickyLoadBalanceWorksWithMultipleEndpointsAndUpdates(t *testing.T) { - client1 := &net.TCPAddr{IP: net.IPv4(127, 0, 0, 1), Port: 0} - client2 := &net.TCPAddr{IP: net.IPv4(127, 0, 0, 2), Port: 0} - client3 := &net.TCPAddr{IP: net.IPv4(127, 0, 0, 3), Port: 0} - loadBalancer := NewLoadBalancerRR() - service := proxy.ServicePortName{NamespacedName: types.NamespacedName{Namespace: "testnamespace", Name: "foo"}, Port: ""} - endpoint, err := loadBalancer.NextEndpoint(service, nil, false) - if err == nil || len(endpoint) != 0 { - t.Errorf("Didn't fail with non-existent service") - } - - loadBalancer.NewService(service, v1.ServiceAffinityClientIP, int(v1.DefaultClientIPServiceAffinitySeconds)) - endpointsv1 := &v1.Endpoints{ - ObjectMeta: metav1.ObjectMeta{Name: service.Name, Namespace: service.Namespace}, - Subsets: []v1.EndpointSubset{ - { - Addresses: []v1.EndpointAddress{{IP: "endpoint"}}, - Ports: []v1.EndpointPort{{Port: 1}, {Port: 2}, {Port: 3}}, - }, - }, - } - loadBalancer.OnEndpointsAdd(endpointsv1) - shuffledEndpoints := loadBalancer.services[service].endpoints - expectEndpoint(t, loadBalancer, service, shuffledEndpoints[0], client1) - expectEndpoint(t, loadBalancer, service, shuffledEndpoints[0], client1) - expectEndpoint(t, loadBalancer, service, shuffledEndpoints[1], client2) - expectEndpoint(t, loadBalancer, service, shuffledEndpoints[1], client2) - expectEndpoint(t, loadBalancer, service, shuffledEndpoints[2], client3) - expectEndpoint(t, loadBalancer, service, shuffledEndpoints[0], client1) - expectEndpoint(t, loadBalancer, service, shuffledEndpoints[1], client2) - // Then update the configuration with one fewer endpoints, make sure - // we start in the beginning again - endpointsv2 := &v1.Endpoints{ - ObjectMeta: metav1.ObjectMeta{Name: service.Name, Namespace: service.Namespace}, - Subsets: []v1.EndpointSubset{ - { - Addresses: []v1.EndpointAddress{{IP: "endpoint"}}, - Ports: []v1.EndpointPort{{Port: 4}, {Port: 5}}, - }, - }, - } - loadBalancer.OnEndpointsUpdate(endpointsv1, endpointsv2) - shuffledEndpoints = loadBalancer.services[service].endpoints - expectEndpoint(t, loadBalancer, service, shuffledEndpoints[0], client1) - expectEndpoint(t, loadBalancer, service, shuffledEndpoints[1], client2) - expectEndpoint(t, loadBalancer, service, shuffledEndpoints[0], client1) - expectEndpoint(t, loadBalancer, service, shuffledEndpoints[0], client1) - expectEndpoint(t, loadBalancer, service, shuffledEndpoints[1], client2) - expectEndpoint(t, loadBalancer, service, shuffledEndpoints[1], client2) - - // Clear endpoints - endpointsv3 := &v1.Endpoints{ObjectMeta: metav1.ObjectMeta{Name: service.Name, Namespace: service.Namespace}, Subsets: nil} - loadBalancer.OnEndpointsUpdate(endpointsv2, endpointsv3) - - endpoint, err = loadBalancer.NextEndpoint(service, nil, false) - if err == nil || len(endpoint) != 0 { - t.Errorf("Didn't fail with non-existent service") - } -} - -func TestStickyLoadBalanceWorksWithServiceRemoval(t *testing.T) { - client1 := &net.TCPAddr{IP: net.IPv4(127, 0, 0, 1), Port: 0} - client2 := &net.TCPAddr{IP: net.IPv4(127, 0, 0, 2), Port: 0} - client3 := &net.TCPAddr{IP: net.IPv4(127, 0, 0, 3), Port: 0} - loadBalancer := NewLoadBalancerRR() - fooService := proxy.ServicePortName{NamespacedName: types.NamespacedName{Namespace: "testnamespace", Name: "foo"}, Port: ""} - endpoint, err := loadBalancer.NextEndpoint(fooService, nil, false) - if err == nil || len(endpoint) != 0 { - t.Errorf("Didn't fail with non-existent service") - } - loadBalancer.NewService(fooService, v1.ServiceAffinityClientIP, int(v1.DefaultClientIPServiceAffinitySeconds)) - endpoints1 := &v1.Endpoints{ - ObjectMeta: metav1.ObjectMeta{Name: fooService.Name, Namespace: fooService.Namespace}, - Subsets: []v1.EndpointSubset{ - { - Addresses: []v1.EndpointAddress{{IP: "endpoint"}}, - Ports: []v1.EndpointPort{{Port: 1}, {Port: 2}, {Port: 3}}, - }, - }, - } - barService := proxy.ServicePortName{NamespacedName: types.NamespacedName{Namespace: "testnamespace", Name: "bar"}, Port: ""} - loadBalancer.NewService(barService, v1.ServiceAffinityClientIP, int(v1.DefaultClientIPServiceAffinitySeconds)) - endpoints2 := &v1.Endpoints{ - ObjectMeta: metav1.ObjectMeta{Name: barService.Name, Namespace: barService.Namespace}, - Subsets: []v1.EndpointSubset{ - { - Addresses: []v1.EndpointAddress{{IP: "endpoint"}}, - Ports: []v1.EndpointPort{{Port: 4}, {Port: 5}}, - }, - }, - } - loadBalancer.OnEndpointsAdd(endpoints1) - loadBalancer.OnEndpointsAdd(endpoints2) - - shuffledFooEndpoints := loadBalancer.services[fooService].endpoints - expectEndpoint(t, loadBalancer, fooService, shuffledFooEndpoints[0], client1) - expectEndpoint(t, loadBalancer, fooService, shuffledFooEndpoints[1], client2) - expectEndpoint(t, loadBalancer, fooService, shuffledFooEndpoints[2], client3) - expectEndpoint(t, loadBalancer, fooService, shuffledFooEndpoints[0], client1) - expectEndpoint(t, loadBalancer, fooService, shuffledFooEndpoints[0], client1) - expectEndpoint(t, loadBalancer, fooService, shuffledFooEndpoints[1], client2) - expectEndpoint(t, loadBalancer, fooService, shuffledFooEndpoints[1], client2) - expectEndpoint(t, loadBalancer, fooService, shuffledFooEndpoints[2], client3) - expectEndpoint(t, loadBalancer, fooService, shuffledFooEndpoints[2], client3) - - shuffledBarEndpoints := loadBalancer.services[barService].endpoints - expectEndpoint(t, loadBalancer, barService, shuffledBarEndpoints[0], client1) - expectEndpoint(t, loadBalancer, barService, shuffledBarEndpoints[1], client2) - expectEndpoint(t, loadBalancer, barService, shuffledBarEndpoints[0], client1) - expectEndpoint(t, loadBalancer, barService, shuffledBarEndpoints[0], client1) - expectEndpoint(t, loadBalancer, barService, shuffledBarEndpoints[1], client2) - expectEndpoint(t, loadBalancer, barService, shuffledBarEndpoints[1], client2) - - // Then update the configuration by removing foo - loadBalancer.OnEndpointsDelete(endpoints1) - endpoint, err = loadBalancer.NextEndpoint(fooService, nil, false) - if err == nil || len(endpoint) != 0 { - t.Errorf("Didn't fail with non-existent service") - } - - // but bar is still there, and we continue RR from where we left off. - shuffledBarEndpoints = loadBalancer.services[barService].endpoints - expectEndpoint(t, loadBalancer, barService, shuffledBarEndpoints[0], client1) - expectEndpoint(t, loadBalancer, barService, shuffledBarEndpoints[1], client2) - expectEndpoint(t, loadBalancer, barService, shuffledBarEndpoints[0], client1) - expectEndpoint(t, loadBalancer, barService, shuffledBarEndpoints[1], client2) - expectEndpoint(t, loadBalancer, barService, shuffledBarEndpoints[0], client1) - expectEndpoint(t, loadBalancer, barService, shuffledBarEndpoints[0], client1) -} - -func TestStickyLoadBalanceWorksWithEndpointFails(t *testing.T) { - loadBalancer := NewLoadBalancerRR() - service := proxy.ServicePortName{NamespacedName: types.NamespacedName{Namespace: "testnamespace", Name: "foo"}, Port: ""} - endpoint, err := loadBalancer.NextEndpoint(service, nil, false) - if err == nil || len(endpoint) != 0 { - t.Errorf("Didn't fail with non-existent service") - } - - // Call NewService() before OnEndpointsUpdate() - loadBalancer.NewService(service, v1.ServiceAffinityClientIP, int(v1.DefaultClientIPServiceAffinitySeconds)) - endpoints := &v1.Endpoints{ - ObjectMeta: metav1.ObjectMeta{Name: service.Name, Namespace: service.Namespace}, - Subsets: []v1.EndpointSubset{ - {Addresses: []v1.EndpointAddress{{IP: "endpoint1"}}, Ports: []v1.EndpointPort{{Port: 1}}}, - {Addresses: []v1.EndpointAddress{{IP: "endpoint2"}}, Ports: []v1.EndpointPort{{Port: 2}}}, - {Addresses: []v1.EndpointAddress{{IP: "endpoint3"}}, Ports: []v1.EndpointPort{{Port: 3}}}, - }, - } - loadBalancer.OnEndpointsAdd(endpoints) - - client1 := &net.TCPAddr{IP: net.IPv4(127, 0, 0, 1), Port: 0} - client2 := &net.TCPAddr{IP: net.IPv4(127, 0, 0, 2), Port: 0} - client3 := &net.TCPAddr{IP: net.IPv4(127, 0, 0, 3), Port: 0} - - ep1, err := loadBalancer.NextEndpoint(service, client1, false) - if err != nil { - t.Errorf("Didn't find a service for %s: %v", service, err) - } - - ep2, err := loadBalancer.NextEndpoint(service, client2, false) - if err != nil { - t.Errorf("Didn't find a service for %s: %v", service, err) - } - - ep3, err := loadBalancer.NextEndpoint(service, client3, false) - if err != nil { - t.Errorf("Didn't find a service for %s: %v", service, err) - } - - expectEndpointWithSessionAffinityReset(t, loadBalancer, service, ep1, client1) - expectEndpointWithSessionAffinityReset(t, loadBalancer, service, ep2, client1) - expectEndpointWithSessionAffinityReset(t, loadBalancer, service, ep3, client1) - - expectEndpoint(t, loadBalancer, service, ep2, client2) - expectEndpointWithSessionAffinityReset(t, loadBalancer, service, ep1, client2) - expectEndpointWithSessionAffinityReset(t, loadBalancer, service, ep2, client3) - expectEndpointWithSessionAffinityReset(t, loadBalancer, service, ep3, client1) - expectEndpointWithSessionAffinityReset(t, loadBalancer, service, ep1, client2) - expectEndpointWithSessionAffinityReset(t, loadBalancer, service, ep2, client3) -} diff --git a/pkg/proxy/winuserspace/loadbalancer.go b/pkg/proxy/winuserspace/loadbalancer.go deleted file mode 100644 index d96a2951f48..00000000000 --- a/pkg/proxy/winuserspace/loadbalancer.go +++ /dev/null @@ -1,36 +0,0 @@ -/* -Copyright 2016 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package winuserspace - -import ( - "k8s.io/api/core/v1" - "k8s.io/kubernetes/pkg/proxy" - proxyconfig "k8s.io/kubernetes/pkg/proxy/config" - "net" -) - -// LoadBalancer is an interface for distributing incoming requests to service endpoints. -type LoadBalancer interface { - // NextEndpoint returns the endpoint to handle a request for the given - // service-port and source address. - NextEndpoint(service proxy.ServicePortName, srcAddr net.Addr, sessionAffinityReset bool) (string, error) - NewService(service proxy.ServicePortName, sessionAffinityType v1.ServiceAffinity, stickyMaxAgeMinutes int) error - DeleteService(service proxy.ServicePortName) - CleanupStaleStickySessions(service proxy.ServicePortName) - - proxyconfig.EndpointsHandler -} diff --git a/pkg/proxy/winuserspace/proxier.go b/pkg/proxy/winuserspace/proxier.go deleted file mode 100644 index 0713542f261..00000000000 --- a/pkg/proxy/winuserspace/proxier.go +++ /dev/null @@ -1,496 +0,0 @@ -/* -Copyright 2016 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package winuserspace - -import ( - "fmt" - "net" - "strconv" - "strings" - "sync" - "sync/atomic" - "time" - - "k8s.io/klog/v2" - netutils "k8s.io/utils/net" - - v1 "k8s.io/api/core/v1" - "k8s.io/apimachinery/pkg/types" - utilnet "k8s.io/apimachinery/pkg/util/net" - "k8s.io/apimachinery/pkg/util/runtime" - "k8s.io/kubernetes/pkg/apis/core/v1/helper" - "k8s.io/kubernetes/pkg/proxy" - "k8s.io/kubernetes/pkg/proxy/config" - "k8s.io/kubernetes/pkg/util/netsh" -) - -const allAvailableInterfaces string = "" - -type portal struct { - ip string - port int - isExternal bool -} - -type serviceInfo struct { - isAliveAtomic int32 // Only access this with atomic ops - portal portal - protocol v1.Protocol - socket proxySocket - timeout time.Duration - activeClients *clientCache - sessionAffinityType v1.ServiceAffinity -} - -func (info *serviceInfo) setAlive(b bool) { - var i int32 - if b { - i = 1 - } - atomic.StoreInt32(&info.isAliveAtomic, i) -} - -func (info *serviceInfo) isAlive() bool { - return atomic.LoadInt32(&info.isAliveAtomic) != 0 -} - -func logTimeout(err error) bool { - if e, ok := err.(net.Error); ok { - if e.Timeout() { - klog.V(3).InfoS("connection to endpoint closed due to inactivity") - return true - } - } - return false -} - -// Proxier is a simple proxy for TCP connections between a localhost:lport -// and services that provide the actual implementations. -type Proxier struct { - // EndpointSlice support has not been added for this proxier yet. - config.NoopEndpointSliceHandler - // TODO(imroc): implement node handler for winuserspace proxier. - config.NoopNodeHandler - - loadBalancer LoadBalancer - mu sync.Mutex // protects serviceMap - serviceMap map[ServicePortPortalName]*serviceInfo - syncPeriod time.Duration - udpIdleTimeout time.Duration - numProxyLoops int32 // use atomic ops to access this; mostly for testing - netsh netsh.Interface - hostIP net.IP -} - -// assert Proxier is a proxy.Provider -var _ proxy.Provider = &Proxier{} - -var ( - // ErrProxyOnLocalhost is returned by NewProxier if the user requests a proxier on - // the loopback address. May be checked for by callers of NewProxier to know whether - // the caller provided invalid input. - ErrProxyOnLocalhost = fmt.Errorf("cannot proxy on localhost") -) - -// Used below. -var localhostIPv4 = netutils.ParseIPSloppy("127.0.0.1") -var localhostIPv6 = netutils.ParseIPSloppy("::1") - -// NewProxier returns a new Proxier given a LoadBalancer and an address on -// which to listen. It is assumed that there is only a single Proxier active -// on a machine. An error will be returned if the proxier cannot be started -// due to an invalid ListenIP (loopback) -func NewProxier(loadBalancer LoadBalancer, listenIP net.IP, netsh netsh.Interface, pr utilnet.PortRange, syncPeriod, udpIdleTimeout time.Duration) (*Proxier, error) { - if listenIP.Equal(localhostIPv4) || listenIP.Equal(localhostIPv6) { - return nil, ErrProxyOnLocalhost - } - - hostIP, err := utilnet.ChooseHostInterface() - if err != nil { - return nil, fmt.Errorf("failed to select a host interface: %v", err) - } - - klog.V(2).InfoS("Setting proxy", "ip", hostIP) - return createProxier(loadBalancer, listenIP, netsh, hostIP, syncPeriod, udpIdleTimeout) -} - -func createProxier(loadBalancer LoadBalancer, listenIP net.IP, netsh netsh.Interface, hostIP net.IP, syncPeriod, udpIdleTimeout time.Duration) (*Proxier, error) { - return &Proxier{ - loadBalancer: loadBalancer, - serviceMap: make(map[ServicePortPortalName]*serviceInfo), - syncPeriod: syncPeriod, - udpIdleTimeout: udpIdleTimeout, - netsh: netsh, - hostIP: hostIP, - }, nil -} - -// Sync is called to immediately synchronize the proxier state -func (proxier *Proxier) Sync() { - proxier.cleanupStaleStickySessions() -} - -// SyncLoop runs periodic work. This is expected to run as a goroutine or as the main loop of the app. It does not return. -func (proxier *Proxier) SyncLoop() { - t := time.NewTicker(proxier.syncPeriod) - defer t.Stop() - for { - <-t.C - klog.V(6).InfoS("Periodic sync") - proxier.Sync() - } -} - -// cleanupStaleStickySessions cleans up any stale sticky session records in the hash map. -func (proxier *Proxier) cleanupStaleStickySessions() { - proxier.mu.Lock() - defer proxier.mu.Unlock() - servicePortNameMap := make(map[proxy.ServicePortName]bool) - for name := range proxier.serviceMap { - servicePortName := proxy.ServicePortName{ - NamespacedName: types.NamespacedName{ - Namespace: name.Namespace, - Name: name.Name, - }, - Port: name.Port, - } - if !servicePortNameMap[servicePortName] { - // ensure cleanup sticky sessions only gets called once per serviceportname - servicePortNameMap[servicePortName] = true - proxier.loadBalancer.CleanupStaleStickySessions(servicePortName) - } - } -} - -// This assumes proxier.mu is not locked. -func (proxier *Proxier) stopProxy(service ServicePortPortalName, info *serviceInfo) error { - proxier.mu.Lock() - defer proxier.mu.Unlock() - return proxier.stopProxyInternal(service, info) -} - -// This assumes proxier.mu is locked. -func (proxier *Proxier) stopProxyInternal(service ServicePortPortalName, info *serviceInfo) error { - delete(proxier.serviceMap, service) - info.setAlive(false) - err := info.socket.Close() - return err -} - -func (proxier *Proxier) getServiceInfo(service ServicePortPortalName) (*serviceInfo, bool) { - proxier.mu.Lock() - defer proxier.mu.Unlock() - info, ok := proxier.serviceMap[service] - return info, ok -} - -func (proxier *Proxier) setServiceInfo(service ServicePortPortalName, info *serviceInfo) { - proxier.mu.Lock() - defer proxier.mu.Unlock() - proxier.serviceMap[service] = info -} - -// addServicePortPortal starts listening for a new service, returning the serviceInfo. -// The timeout only applies to UDP connections, for now. -func (proxier *Proxier) addServicePortPortal(servicePortPortalName ServicePortPortalName, protocol v1.Protocol, listenIP string, port int, timeout time.Duration) (*serviceInfo, error) { - var serviceIP net.IP - if listenIP != allAvailableInterfaces { - if serviceIP = netutils.ParseIPSloppy(listenIP); serviceIP == nil { - return nil, fmt.Errorf("could not parse ip '%q'", listenIP) - } - // add the IP address. Node port binds to all interfaces. - args := proxier.netshIPv4AddressAddArgs(serviceIP) - if existed, err := proxier.netsh.EnsureIPAddress(args, serviceIP); err != nil { - return nil, err - } else if !existed { - klog.V(3).InfoS("Added ip address to fowarder interface for service", "servicePortPortalName", servicePortPortalName.String(), "addr", net.JoinHostPort(listenIP, strconv.Itoa(port)), "protocol", protocol) - } - } - - // add the listener, proxy - sock, err := newProxySocket(protocol, serviceIP, port) - if err != nil { - return nil, err - } - si := &serviceInfo{ - isAliveAtomic: 1, - portal: portal{ - ip: listenIP, - port: port, - isExternal: false, - }, - protocol: protocol, - socket: sock, - timeout: timeout, - activeClients: newClientCache(), - sessionAffinityType: v1.ServiceAffinityNone, // default - } - proxier.setServiceInfo(servicePortPortalName, si) - - klog.V(2).InfoS("Proxying for service", "servicePortPortalName", servicePortPortalName.String(), "addr", net.JoinHostPort(listenIP, strconv.Itoa(port)), "protocol", protocol) - go func(service ServicePortPortalName, proxier *Proxier) { - defer runtime.HandleCrash() - atomic.AddInt32(&proxier.numProxyLoops, 1) - sock.ProxyLoop(service, si, proxier) - atomic.AddInt32(&proxier.numProxyLoops, -1) - }(servicePortPortalName, proxier) - - return si, nil -} - -func (proxier *Proxier) closeServicePortPortal(servicePortPortalName ServicePortPortalName, info *serviceInfo) error { - // turn off the proxy - if err := proxier.stopProxy(servicePortPortalName, info); err != nil { - return err - } - - // close the PortalProxy by deleting the service IP address - if info.portal.ip != allAvailableInterfaces { - serviceIP := netutils.ParseIPSloppy(info.portal.ip) - args := proxier.netshIPv4AddressDeleteArgs(serviceIP) - if err := proxier.netsh.DeleteIPAddress(args); err != nil { - return err - } - } - return nil -} - -// getListenIPPortMap returns a slice of all listen IPs for a service. -func getListenIPPortMap(service *v1.Service, listenPort int, nodePort int) map[string]int { - listenIPPortMap := make(map[string]int) - listenIPPortMap[service.Spec.ClusterIP] = listenPort - - for _, ip := range service.Spec.ExternalIPs { - listenIPPortMap[ip] = listenPort - } - - for _, ingress := range service.Status.LoadBalancer.Ingress { - listenIPPortMap[ingress.IP] = listenPort - } - - if nodePort != 0 { - listenIPPortMap[allAvailableInterfaces] = nodePort - } - - return listenIPPortMap -} - -func (proxier *Proxier) mergeService(service *v1.Service) map[ServicePortPortalName]bool { - if service == nil { - return nil - } - svcName := types.NamespacedName{Namespace: service.Namespace, Name: service.Name} - if !helper.IsServiceIPSet(service) { - klog.V(3).InfoS("Skipping service due to clusterIP", "svcName", svcName, "ip", service.Spec.ClusterIP) - return nil - } - existingPortPortals := make(map[ServicePortPortalName]bool) - - for i := range service.Spec.Ports { - servicePort := &service.Spec.Ports[i] - // create a slice of all the source IPs to use for service port portals - listenIPPortMap := getListenIPPortMap(service, int(servicePort.Port), int(servicePort.NodePort)) - protocol := servicePort.Protocol - - for listenIP, listenPort := range listenIPPortMap { - servicePortPortalName := ServicePortPortalName{ - NamespacedName: svcName, - Port: servicePort.Name, - PortalIPName: listenIP, - } - existingPortPortals[servicePortPortalName] = true - info, exists := proxier.getServiceInfo(servicePortPortalName) - if exists && sameConfig(info, service, protocol, listenPort) { - // Nothing changed. - continue - } - if exists { - klog.V(4).InfoS("Something changed for service: stopping it", "servicePortPortalName", servicePortPortalName.String()) - if err := proxier.closeServicePortPortal(servicePortPortalName, info); err != nil { - klog.ErrorS(err, "Failed to close service port portal", "servicePortPortalName", servicePortPortalName.String()) - } - } - klog.V(1).InfoS("Adding new service", "servicePortPortalName", servicePortPortalName.String(), "addr", net.JoinHostPort(listenIP, strconv.Itoa(listenPort)), "protocol", protocol) - info, err := proxier.addServicePortPortal(servicePortPortalName, protocol, listenIP, listenPort, proxier.udpIdleTimeout) - if err != nil { - klog.ErrorS(err, "Failed to start proxy", "servicePortPortalName", servicePortPortalName.String()) - continue - } - info.sessionAffinityType = service.Spec.SessionAffinity - klog.V(10).InfoS("record serviceInfo", "info", info) - } - if len(listenIPPortMap) > 0 { - // only one loadbalancer per service port portal - servicePortName := proxy.ServicePortName{ - NamespacedName: types.NamespacedName{ - Namespace: service.Namespace, - Name: service.Name, - }, - Port: servicePort.Name, - } - timeoutSeconds := 0 - if service.Spec.SessionAffinity == v1.ServiceAffinityClientIP { - timeoutSeconds = int(*service.Spec.SessionAffinityConfig.ClientIP.TimeoutSeconds) - } - proxier.loadBalancer.NewService(servicePortName, service.Spec.SessionAffinity, timeoutSeconds) - } - } - - return existingPortPortals -} - -func (proxier *Proxier) unmergeService(service *v1.Service, existingPortPortals map[ServicePortPortalName]bool) { - if service == nil { - return - } - svcName := types.NamespacedName{Namespace: service.Namespace, Name: service.Name} - if !helper.IsServiceIPSet(service) { - klog.V(3).InfoS("Skipping service due to clusterIP", "svcName", svcName, "ip", service.Spec.ClusterIP) - return - } - - servicePortNameMap := make(map[proxy.ServicePortName]bool) - for name := range existingPortPortals { - servicePortName := proxy.ServicePortName{ - NamespacedName: types.NamespacedName{ - Namespace: name.Namespace, - Name: name.Name, - }, - Port: name.Port, - } - servicePortNameMap[servicePortName] = true - } - - for i := range service.Spec.Ports { - servicePort := &service.Spec.Ports[i] - serviceName := proxy.ServicePortName{NamespacedName: svcName, Port: servicePort.Name} - // create a slice of all the source IPs to use for service port portals - listenIPPortMap := getListenIPPortMap(service, int(servicePort.Port), int(servicePort.NodePort)) - - for listenIP := range listenIPPortMap { - servicePortPortalName := ServicePortPortalName{ - NamespacedName: svcName, - Port: servicePort.Name, - PortalIPName: listenIP, - } - if existingPortPortals[servicePortPortalName] { - continue - } - - klog.V(1).InfoS("Stopping service", "servicePortPortalName", servicePortPortalName.String()) - info, exists := proxier.getServiceInfo(servicePortPortalName) - if !exists { - klog.ErrorS(nil, "Service is being removed but doesn't exist", "servicePortPortalName", servicePortPortalName.String()) - continue - } - - if err := proxier.closeServicePortPortal(servicePortPortalName, info); err != nil { - klog.ErrorS(err, "Failed to close service port portal", "servicePortPortalName", servicePortPortalName) - } - } - - // Only delete load balancer if all listen ips per name/port show inactive. - if !servicePortNameMap[serviceName] { - proxier.loadBalancer.DeleteService(serviceName) - } - } -} - -// OnServiceAdd is called whenever creation of new service object -// is observed. -func (proxier *Proxier) OnServiceAdd(service *v1.Service) { - _ = proxier.mergeService(service) -} - -// OnServiceUpdate is called whenever modification of an existing -// service object is observed. -func (proxier *Proxier) OnServiceUpdate(oldService, service *v1.Service) { - existingPortPortals := proxier.mergeService(service) - proxier.unmergeService(oldService, existingPortPortals) -} - -// OnServiceDelete is called whenever deletion of an existing service -// object is observed. -func (proxier *Proxier) OnServiceDelete(service *v1.Service) { - proxier.unmergeService(service, map[ServicePortPortalName]bool{}) -} - -// OnServiceSynced is called once all the initial event handlers were -// called and the state is fully propagated to local cache. -func (proxier *Proxier) OnServiceSynced() { -} - -// OnEndpointsAdd is called whenever creation of new endpoints object -// is observed. -func (proxier *Proxier) OnEndpointsAdd(endpoints *v1.Endpoints) { - proxier.loadBalancer.OnEndpointsAdd(endpoints) -} - -// OnEndpointsUpdate is called whenever modification of an existing -// endpoints object is observed. -func (proxier *Proxier) OnEndpointsUpdate(oldEndpoints, endpoints *v1.Endpoints) { - proxier.loadBalancer.OnEndpointsUpdate(oldEndpoints, endpoints) -} - -// OnEndpointsDelete is called whenever deletion of an existing endpoints -// object is observed. -func (proxier *Proxier) OnEndpointsDelete(endpoints *v1.Endpoints) { - proxier.loadBalancer.OnEndpointsDelete(endpoints) -} - -// OnEndpointsSynced is called once all the initial event handlers were -// called and the state is fully propagated to local cache. -func (proxier *Proxier) OnEndpointsSynced() { - proxier.loadBalancer.OnEndpointsSynced() -} - -func sameConfig(info *serviceInfo, service *v1.Service, protocol v1.Protocol, listenPort int) bool { - return info.protocol == protocol && info.portal.port == listenPort && info.sessionAffinityType == service.Spec.SessionAffinity -} - -func isTooManyFDsError(err error) bool { - return strings.Contains(err.Error(), "too many open files") -} - -func isClosedError(err error) bool { - // A brief discussion about handling closed error here: - // https://code.google.com/p/go/issues/detail?id=4373#c14 - // TODO: maybe create a stoppable TCP listener that returns a StoppedError - return strings.HasSuffix(err.Error(), "use of closed network connection") -} - -func (proxier *Proxier) netshIPv4AddressAddArgs(destIP net.IP) []string { - intName := proxier.netsh.GetInterfaceToAddIP() - args := []string{ - "interface", "ipv4", "add", "address", - "name=" + intName, - "address=" + destIP.String(), - } - - return args -} - -func (proxier *Proxier) netshIPv4AddressDeleteArgs(destIP net.IP) []string { - intName := proxier.netsh.GetInterfaceToAddIP() - args := []string{ - "interface", "ipv4", "delete", "address", - "name=" + intName, - "address=" + destIP.String(), - } - - return args -} diff --git a/pkg/proxy/winuserspace/proxier_test.go b/pkg/proxy/winuserspace/proxier_test.go deleted file mode 100644 index b5b517d8ac5..00000000000 --- a/pkg/proxy/winuserspace/proxier_test.go +++ /dev/null @@ -1,959 +0,0 @@ -/* -Copyright 2016 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package winuserspace - -import ( - "fmt" - "io/ioutil" - "net" - "net/http" - "net/http/httptest" - "net/url" - "os" - "strconv" - "sync/atomic" - "testing" - "time" - - v1 "k8s.io/api/core/v1" - discovery "k8s.io/api/discovery/v1" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/apimachinery/pkg/types" - "k8s.io/kubernetes/pkg/proxy" - netshtest "k8s.io/kubernetes/pkg/util/netsh/testing" - netutils "k8s.io/utils/net" -) - -const ( - udpIdleTimeoutForTest = 250 * time.Millisecond -) - -func joinHostPort(host string, port int) string { - return net.JoinHostPort(host, fmt.Sprintf("%d", port)) -} - -func waitForClosedPortTCP(p *Proxier, proxyPort int) error { - for i := 0; i < 50; i++ { - conn, err := net.Dial("tcp", joinHostPort("", proxyPort)) - if err != nil { - return nil - } - conn.Close() - time.Sleep(1 * time.Millisecond) - } - return fmt.Errorf("port %d still open", proxyPort) -} - -func waitForClosedPortUDP(p *Proxier, proxyPort int) error { - for i := 0; i < 50; i++ { - conn, err := net.Dial("udp", joinHostPort("", proxyPort)) - if err != nil { - return nil - } - conn.SetReadDeadline(time.Now().Add(10 * time.Millisecond)) - // To detect a closed UDP port write, then read. - _, err = conn.Write([]byte("x")) - if err != nil { - if e, ok := err.(net.Error); ok && !e.Timeout() { - return nil - } - } - var buf [4]byte - _, err = conn.Read(buf[0:]) - if err != nil { - if e, ok := err.(net.Error); ok && !e.Timeout() { - return nil - } - } - conn.Close() - time.Sleep(1 * time.Millisecond) - } - return fmt.Errorf("port %d still open", proxyPort) -} - -// udpEchoServer is a simple echo server in UDP, intended for testing the proxy. -type udpEchoServer struct { - net.PacketConn -} - -func newUDPEchoServer() (*udpEchoServer, error) { - packetconn, err := net.ListenPacket("udp", ":0") - if err != nil { - return nil, err - } - return &udpEchoServer{packetconn}, nil -} - -func (r *udpEchoServer) Loop() { - var buffer [4096]byte - for { - n, cliAddr, err := r.ReadFrom(buffer[0:]) - if err != nil { - fmt.Printf("ReadFrom failed: %v\n", err) - continue - } - r.WriteTo(buffer[0:n], cliAddr) - } -} - -var tcpServerPort int32 -var udpServerPort int32 - -func TestMain(m *testing.M) { - // TCP setup. - tcp := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { - w.WriteHeader(http.StatusOK) - w.Write([]byte(r.URL.Path[1:])) - })) - defer tcp.Close() - - u, err := url.Parse(tcp.URL) - if err != nil { - panic(fmt.Sprintf("failed to parse: %v", err)) - } - _, port, err := net.SplitHostPort(u.Host) - if err != nil { - panic(fmt.Sprintf("failed to parse: %v", err)) - } - tcpServerPortValue, err := strconv.Atoi(port) - if err != nil { - panic(fmt.Sprintf("failed to atoi(%s): %v", port, err)) - } - tcpServerPort = int32(tcpServerPortValue) - - // UDP setup. - udp, err := newUDPEchoServer() - if err != nil { - panic(fmt.Sprintf("failed to make a UDP server: %v", err)) - } - _, port, err = net.SplitHostPort(udp.LocalAddr().String()) - if err != nil { - panic(fmt.Sprintf("failed to parse: %v", err)) - } - udpServerPortValue, err := strconv.Atoi(port) - if err != nil { - panic(fmt.Sprintf("failed to atoi(%s): %v", port, err)) - } - udpServerPort = int32(udpServerPortValue) - go udp.Loop() - - ret := m.Run() - // it should be safe to call Close() multiple times. - tcp.Close() - os.Exit(ret) -} - -func testEchoTCP(t *testing.T, address string, port int) { - path := "aaaaa" - res, err := http.Get("http://" + address + ":" + fmt.Sprintf("%d", port) + "/" + path) - if err != nil { - t.Fatalf("error connecting to server: %v", err) - } - defer res.Body.Close() - data, err := ioutil.ReadAll(res.Body) - if err != nil { - t.Errorf("error reading data: %v %v", err, string(data)) - } - if string(data) != path { - t.Errorf("expected: %s, got %s", path, string(data)) - } -} - -func testEchoUDP(t *testing.T, address string, port int) { - data := "abc123" - - conn, err := net.Dial("udp", joinHostPort(address, port)) - if err != nil { - t.Fatalf("error connecting to server: %v", err) - } - if _, err := conn.Write([]byte(data)); err != nil { - t.Fatalf("error sending to server: %v", err) - } - var resp [1024]byte - n, err := conn.Read(resp[0:]) - if err != nil { - t.Errorf("error receiving data: %v", err) - } - if string(resp[0:n]) != data { - t.Errorf("expected: %s, got %s", data, string(resp[0:n])) - } -} - -func waitForNumProxyLoops(t *testing.T, p *Proxier, want int32) { - var got int32 - for i := 0; i < 600; i++ { - got = atomic.LoadInt32(&p.numProxyLoops) - if got == want { - return - } - time.Sleep(100 * time.Millisecond) - } - t.Errorf("expected %d ProxyLoops running, got %d", want, got) -} - -func waitForNumProxyClients(t *testing.T, s *serviceInfo, want int, timeout time.Duration) { - var got int - now := time.Now() - deadline := now.Add(timeout) - for time.Now().Before(deadline) { - s.activeClients.mu.Lock() - got = len(s.activeClients.clients) - s.activeClients.mu.Unlock() - if got == want { - return - } - time.Sleep(500 * time.Millisecond) - } - t.Errorf("expected %d ProxyClients live, got %d", want, got) -} - -func getPortNum(t *testing.T, addr string) int { - _, portStr, err := net.SplitHostPort(addr) - if err != nil { - t.Errorf("error getting port from %s", addr) - return 0 - } - portNum, err := strconv.Atoi(portStr) - if err != nil { - t.Errorf("error getting port from %s", addr) - return 0 - } - - return portNum -} - -func TestTCPProxy(t *testing.T) { - lb := NewLoadBalancerRR() - service := proxy.ServicePortName{NamespacedName: types.NamespacedName{Namespace: "testnamespace", Name: "echo"}, Port: "p"} - lb.OnEndpointsAdd(&v1.Endpoints{ - ObjectMeta: metav1.ObjectMeta{Name: service.Name, Namespace: service.Namespace}, - Subsets: []v1.EndpointSubset{{ - Addresses: []v1.EndpointAddress{{IP: "127.0.0.1"}}, - Ports: []v1.EndpointPort{{Name: "p", Port: tcpServerPort}}, - }}, - }) - - listenIP := "0.0.0.0" - p, err := createProxier(lb, netutils.ParseIPSloppy(listenIP), netshtest.NewFake(), netutils.ParseIPSloppy("127.0.0.1"), time.Minute, udpIdleTimeoutForTest) - if err != nil { - t.Fatal(err) - } - waitForNumProxyLoops(t, p, 0) - - servicePortPortalName := ServicePortPortalName{NamespacedName: types.NamespacedName{Namespace: service.Namespace, Name: service.Name}, Port: service.Port, PortalIPName: listenIP} - svcInfo, err := p.addServicePortPortal(servicePortPortalName, "TCP", listenIP, 0, time.Second) - if err != nil { - t.Fatalf("error adding new service: %#v", err) - } - testEchoTCP(t, "127.0.0.1", getPortNum(t, svcInfo.socket.Addr().String())) - waitForNumProxyLoops(t, p, 1) -} - -func TestUDPProxy(t *testing.T) { - lb := NewLoadBalancerRR() - service := proxy.ServicePortName{NamespacedName: types.NamespacedName{Namespace: "testnamespace", Name: "echo"}, Port: "p"} - lb.OnEndpointsAdd(&v1.Endpoints{ - ObjectMeta: metav1.ObjectMeta{Name: service.Name, Namespace: service.Namespace}, - Subsets: []v1.EndpointSubset{{ - Addresses: []v1.EndpointAddress{{IP: "127.0.0.1"}}, - Ports: []v1.EndpointPort{{Name: "p", Port: udpServerPort}}, - }}, - }) - - listenIP := "0.0.0.0" - p, err := createProxier(lb, netutils.ParseIPSloppy(listenIP), netshtest.NewFake(), netutils.ParseIPSloppy("127.0.0.1"), time.Minute, udpIdleTimeoutForTest) - if err != nil { - t.Fatal(err) - } - waitForNumProxyLoops(t, p, 0) - - servicePortPortalName := ServicePortPortalName{NamespacedName: types.NamespacedName{Namespace: service.Namespace, Name: service.Name}, Port: service.Port, PortalIPName: listenIP} - svcInfo, err := p.addServicePortPortal(servicePortPortalName, "UDP", listenIP, 0, time.Second) - if err != nil { - t.Fatalf("error adding new service: %#v", err) - } - testEchoUDP(t, "127.0.0.1", getPortNum(t, svcInfo.socket.Addr().String())) - waitForNumProxyLoops(t, p, 1) -} - -func TestUDPProxyTimeout(t *testing.T) { - lb := NewLoadBalancerRR() - service := proxy.ServicePortName{NamespacedName: types.NamespacedName{Namespace: "testnamespace", Name: "echo"}, Port: "p"} - lb.OnEndpointsAdd(&v1.Endpoints{ - ObjectMeta: metav1.ObjectMeta{Name: service.Name, Namespace: service.Namespace}, - Subsets: []v1.EndpointSubset{{ - Addresses: []v1.EndpointAddress{{IP: "127.0.0.1"}}, - Ports: []v1.EndpointPort{{Name: "p", Port: udpServerPort}}, - }}, - }) - - listenIP := "0.0.0.0" - p, err := createProxier(lb, netutils.ParseIPSloppy(listenIP), netshtest.NewFake(), netutils.ParseIPSloppy("127.0.0.1"), time.Minute, udpIdleTimeoutForTest) - if err != nil { - t.Fatal(err) - } - waitForNumProxyLoops(t, p, 0) - - servicePortPortalName := ServicePortPortalName{NamespacedName: types.NamespacedName{Namespace: service.Namespace, Name: service.Name}, Port: service.Port, PortalIPName: listenIP} - svcInfo, err := p.addServicePortPortal(servicePortPortalName, "UDP", listenIP, 0, time.Second) - if err != nil { - t.Fatalf("error adding new service: %#v", err) - } - waitForNumProxyLoops(t, p, 1) - testEchoUDP(t, "127.0.0.1", getPortNum(t, svcInfo.socket.Addr().String())) - // When connecting to a UDP service endpoint, there should be a Conn for proxy. - waitForNumProxyClients(t, svcInfo, 1, time.Second) - // If conn has no activity for serviceInfo.timeout since last Read/Write, it should be closed because of timeout. - waitForNumProxyClients(t, svcInfo, 0, 2*time.Second) -} - -func TestMultiPortProxy(t *testing.T) { - lb := NewLoadBalancerRR() - serviceP := proxy.ServicePortName{NamespacedName: types.NamespacedName{Namespace: "testnamespace", Name: "echo-p"}, Port: "p"} - serviceQ := proxy.ServicePortName{NamespacedName: types.NamespacedName{Namespace: "testnamespace", Name: "echo-q"}, Port: "q"} - lb.OnEndpointsAdd(&v1.Endpoints{ - ObjectMeta: metav1.ObjectMeta{Name: serviceP.Name, Namespace: serviceP.Namespace}, - Subsets: []v1.EndpointSubset{{ - Addresses: []v1.EndpointAddress{{IP: "127.0.0.1"}}, - Ports: []v1.EndpointPort{{Name: "p", Protocol: "TCP", Port: tcpServerPort}}, - }}, - }) - lb.OnEndpointsAdd(&v1.Endpoints{ - ObjectMeta: metav1.ObjectMeta{Name: serviceQ.Name, Namespace: serviceQ.Namespace}, - Subsets: []v1.EndpointSubset{{ - Addresses: []v1.EndpointAddress{{IP: "127.0.0.1"}}, - Ports: []v1.EndpointPort{{Name: "q", Protocol: "UDP", Port: udpServerPort}}, - }}, - }) - - listenIP := "0.0.0.0" - p, err := createProxier(lb, netutils.ParseIPSloppy(listenIP), netshtest.NewFake(), netutils.ParseIPSloppy("127.0.0.1"), time.Minute, udpIdleTimeoutForTest) - if err != nil { - t.Fatal(err) - } - waitForNumProxyLoops(t, p, 0) - - servicePortPortalNameP := ServicePortPortalName{NamespacedName: types.NamespacedName{Namespace: serviceP.Namespace, Name: serviceP.Name}, Port: serviceP.Port, PortalIPName: listenIP} - svcInfoP, err := p.addServicePortPortal(servicePortPortalNameP, "TCP", listenIP, 0, time.Second) - if err != nil { - t.Fatalf("error adding new service: %#v", err) - } - testEchoTCP(t, "127.0.0.1", getPortNum(t, svcInfoP.socket.Addr().String())) - waitForNumProxyLoops(t, p, 1) - - servicePortPortalNameQ := ServicePortPortalName{NamespacedName: types.NamespacedName{Namespace: serviceQ.Namespace, Name: serviceQ.Name}, Port: serviceQ.Port, PortalIPName: listenIP} - svcInfoQ, err := p.addServicePortPortal(servicePortPortalNameQ, "UDP", listenIP, 0, time.Second) - if err != nil { - t.Fatalf("error adding new service: %#v", err) - } - testEchoUDP(t, "127.0.0.1", getPortNum(t, svcInfoQ.socket.Addr().String())) - waitForNumProxyLoops(t, p, 2) -} - -func TestMultiPortOnServiceAdd(t *testing.T) { - lb := NewLoadBalancerRR() - serviceP := proxy.ServicePortName{NamespacedName: types.NamespacedName{Namespace: "testnamespace", Name: "echo"}, Port: "p"} - serviceQ := proxy.ServicePortName{NamespacedName: types.NamespacedName{Namespace: "testnamespace", Name: "echo"}, Port: "q"} - serviceX := proxy.ServicePortName{NamespacedName: types.NamespacedName{Namespace: "testnamespace", Name: "echo"}, Port: "x"} - - listenIP := "0.0.0.0" - p, err := createProxier(lb, netutils.ParseIPSloppy(listenIP), netshtest.NewFake(), netutils.ParseIPSloppy("127.0.0.1"), time.Minute, udpIdleTimeoutForTest) - if err != nil { - t.Fatal(err) - } - waitForNumProxyLoops(t, p, 0) - - p.OnServiceAdd(&v1.Service{ - ObjectMeta: metav1.ObjectMeta{Name: serviceP.Name, Namespace: serviceP.Namespace}, - Spec: v1.ServiceSpec{ClusterIP: "0.0.0.0", Ports: []v1.ServicePort{{ - Name: "p", - Port: 0, - Protocol: "TCP", - }, { - Name: "q", - Port: 0, - Protocol: "UDP", - }}}, - }) - waitForNumProxyLoops(t, p, 2) - - servicePortPortalNameP := ServicePortPortalName{NamespacedName: types.NamespacedName{Namespace: serviceP.Namespace, Name: serviceP.Name}, Port: serviceP.Port, PortalIPName: listenIP} - svcInfo, exists := p.getServiceInfo(servicePortPortalNameP) - if !exists { - t.Fatalf("can't find serviceInfo for %s", servicePortPortalNameP) - } - if svcInfo.portal.ip != "0.0.0.0" || svcInfo.portal.port != 0 || svcInfo.protocol != "TCP" { - t.Errorf("unexpected serviceInfo for %s: %#v", serviceP, svcInfo) - } - - servicePortPortalNameQ := ServicePortPortalName{NamespacedName: types.NamespacedName{Namespace: serviceQ.Namespace, Name: serviceQ.Name}, Port: serviceQ.Port, PortalIPName: listenIP} - svcInfo, exists = p.getServiceInfo(servicePortPortalNameQ) - if !exists { - t.Fatalf("can't find serviceInfo for %s", servicePortPortalNameQ) - } - if svcInfo.portal.ip != "0.0.0.0" || svcInfo.portal.port != 0 || svcInfo.protocol != "UDP" { - t.Errorf("unexpected serviceInfo for %s: %#v", serviceQ, svcInfo) - } - - servicePortPortalNameX := ServicePortPortalName{NamespacedName: types.NamespacedName{Namespace: serviceX.Namespace, Name: serviceX.Name}, Port: serviceX.Port, PortalIPName: listenIP} - svcInfo, exists = p.getServiceInfo(servicePortPortalNameX) - if exists { - t.Fatalf("found unwanted serviceInfo for %s: %#v", serviceX, svcInfo) - } -} - -// Helper: Stops the proxy for the named service. -func stopProxyByName(proxier *Proxier, service ServicePortPortalName) error { - info, found := proxier.getServiceInfo(service) - if !found { - return fmt.Errorf("unknown service: %s", service) - } - return proxier.stopProxy(service, info) -} - -func TestTCPProxyStop(t *testing.T) { - lb := NewLoadBalancerRR() - service := proxy.ServicePortName{NamespacedName: types.NamespacedName{Namespace: "testnamespace", Name: "echo"}, Port: "p"} - lb.OnEndpointsAdd(&v1.Endpoints{ - ObjectMeta: metav1.ObjectMeta{Namespace: service.Namespace, Name: service.Name}, - Subsets: []v1.EndpointSubset{{ - Addresses: []v1.EndpointAddress{{IP: "127.0.0.1"}}, - Ports: []v1.EndpointPort{{Name: "p", Port: tcpServerPort}}, - }}, - }) - - listenIP := "0.0.0.0" - p, err := createProxier(lb, netutils.ParseIPSloppy(listenIP), netshtest.NewFake(), netutils.ParseIPSloppy("127.0.0.1"), time.Minute, udpIdleTimeoutForTest) - if err != nil { - t.Fatal(err) - } - waitForNumProxyLoops(t, p, 0) - - servicePortPortalName := ServicePortPortalName{NamespacedName: types.NamespacedName{Namespace: service.Namespace, Name: service.Name}, Port: service.Port, PortalIPName: listenIP} - svcInfo, err := p.addServicePortPortal(servicePortPortalName, "TCP", listenIP, 0, time.Second) - if err != nil { - t.Fatalf("error adding new service: %#v", err) - } - if !svcInfo.isAlive() { - t.Fatalf("wrong value for isAlive(): expected true") - } - conn, err := net.Dial("tcp", joinHostPort("", getPortNum(t, svcInfo.socket.Addr().String()))) - if err != nil { - t.Fatalf("error connecting to proxy: %v", err) - } - conn.Close() - waitForNumProxyLoops(t, p, 1) - - stopProxyByName(p, servicePortPortalName) - if svcInfo.isAlive() { - t.Fatalf("wrong value for isAlive(): expected false") - } - // Wait for the port to really close. - if err := waitForClosedPortTCP(p, getPortNum(t, svcInfo.socket.Addr().String())); err != nil { - t.Fatalf(err.Error()) - } - waitForNumProxyLoops(t, p, 0) -} - -func TestUDPProxyStop(t *testing.T) { - lb := NewLoadBalancerRR() - service := proxy.ServicePortName{NamespacedName: types.NamespacedName{Namespace: "testnamespace", Name: "echo"}, Port: "p"} - lb.OnEndpointsAdd(&v1.Endpoints{ - ObjectMeta: metav1.ObjectMeta{Namespace: service.Namespace, Name: service.Name}, - Subsets: []v1.EndpointSubset{{ - Addresses: []v1.EndpointAddress{{IP: "127.0.0.1"}}, - Ports: []v1.EndpointPort{{Name: "p", Port: udpServerPort}}, - }}, - }) - - listenIP := "0.0.0.0" - p, err := createProxier(lb, netutils.ParseIPSloppy(listenIP), netshtest.NewFake(), netutils.ParseIPSloppy("127.0.0.1"), time.Minute, udpIdleTimeoutForTest) - if err != nil { - t.Fatal(err) - } - waitForNumProxyLoops(t, p, 0) - - servicePortPortalName := ServicePortPortalName{NamespacedName: types.NamespacedName{Namespace: service.Namespace, Name: service.Name}, Port: service.Port, PortalIPName: listenIP} - svcInfo, err := p.addServicePortPortal(servicePortPortalName, "UDP", listenIP, 0, time.Second) - if err != nil { - t.Fatalf("error adding new service: %#v", err) - } - conn, err := net.Dial("udp", joinHostPort("", getPortNum(t, svcInfo.socket.Addr().String()))) - if err != nil { - t.Fatalf("error connecting to proxy: %v", err) - } - conn.Close() - waitForNumProxyLoops(t, p, 1) - - stopProxyByName(p, servicePortPortalName) - // Wait for the port to really close. - if err := waitForClosedPortUDP(p, getPortNum(t, svcInfo.socket.Addr().String())); err != nil { - t.Fatalf(err.Error()) - } - waitForNumProxyLoops(t, p, 0) -} - -func TestTCPProxyUpdateDelete(t *testing.T) { - lb := NewLoadBalancerRR() - service := proxy.ServicePortName{NamespacedName: types.NamespacedName{Namespace: "testnamespace", Name: "echo"}, Port: "p"} - lb.OnEndpointsAdd(&v1.Endpoints{ - ObjectMeta: metav1.ObjectMeta{Namespace: service.Namespace, Name: service.Name}, - Subsets: []v1.EndpointSubset{{ - Addresses: []v1.EndpointAddress{{IP: "127.0.0.1"}}, - Ports: []v1.EndpointPort{{Name: "p", Port: tcpServerPort}}, - }}, - }) - - listenIP := "0.0.0.0" - p, err := createProxier(lb, netutils.ParseIPSloppy(listenIP), netshtest.NewFake(), netutils.ParseIPSloppy("127.0.0.1"), time.Minute, udpIdleTimeoutForTest) - if err != nil { - t.Fatal(err) - } - waitForNumProxyLoops(t, p, 0) - - servicePortPortalName := ServicePortPortalName{NamespacedName: types.NamespacedName{Namespace: service.Namespace, Name: service.Name}, Port: service.Port, PortalIPName: listenIP} - svcInfo, err := p.addServicePortPortal(servicePortPortalName, "TCP", listenIP, 0, time.Second) - if err != nil { - t.Fatalf("error adding new service: %#v", err) - } - fmt.Println("here0") - conn, err := net.Dial("tcp", joinHostPort("", getPortNum(t, svcInfo.socket.Addr().String()))) - if err != nil { - t.Fatalf("error connecting to proxy: %v", err) - } - conn.Close() - waitForNumProxyLoops(t, p, 1) - - p.OnServiceDelete(&v1.Service{ - ObjectMeta: metav1.ObjectMeta{Name: service.Name, Namespace: service.Namespace}, - Spec: v1.ServiceSpec{ClusterIP: listenIP, Ports: []v1.ServicePort{{ - Name: "p", - Port: int32(getPortNum(t, svcInfo.socket.Addr().String())), - Protocol: "TCP", - }}}, - }) - if err := waitForClosedPortTCP(p, getPortNum(t, svcInfo.socket.Addr().String())); err != nil { - t.Fatalf(err.Error()) - } - waitForNumProxyLoops(t, p, 0) -} - -func TestUDPProxyUpdateDelete(t *testing.T) { - lb := NewLoadBalancerRR() - service := proxy.ServicePortName{NamespacedName: types.NamespacedName{Namespace: "testnamespace", Name: "echo"}, Port: "p"} - lb.OnEndpointsAdd(&v1.Endpoints{ - ObjectMeta: metav1.ObjectMeta{Namespace: service.Namespace, Name: service.Name}, - Subsets: []v1.EndpointSubset{{ - Addresses: []v1.EndpointAddress{{IP: "127.0.0.1"}}, - Ports: []v1.EndpointPort{{Name: "p", Port: udpServerPort}}, - }}, - }) - - listenIP := "0.0.0.0" - p, err := createProxier(lb, netutils.ParseIPSloppy(listenIP), netshtest.NewFake(), netutils.ParseIPSloppy("127.0.0.1"), time.Minute, udpIdleTimeoutForTest) - if err != nil { - t.Fatal(err) - } - waitForNumProxyLoops(t, p, 0) - - servicePortPortalName := ServicePortPortalName{NamespacedName: types.NamespacedName{Namespace: service.Namespace, Name: service.Name}, Port: service.Port, PortalIPName: listenIP} - svcInfo, err := p.addServicePortPortal(servicePortPortalName, "UDP", listenIP, 0, time.Second) - if err != nil { - t.Fatalf("error adding new service: %#v", err) - } - conn, err := net.Dial("udp", joinHostPort("", getPortNum(t, svcInfo.socket.Addr().String()))) - if err != nil { - t.Fatalf("error connecting to proxy: %v", err) - } - conn.Close() - waitForNumProxyLoops(t, p, 1) - - p.OnServiceDelete(&v1.Service{ - ObjectMeta: metav1.ObjectMeta{Name: service.Name, Namespace: service.Namespace}, - Spec: v1.ServiceSpec{ClusterIP: listenIP, Ports: []v1.ServicePort{{ - Name: "p", - Port: int32(getPortNum(t, svcInfo.socket.Addr().String())), - Protocol: "UDP", - }}}, - }) - if err := waitForClosedPortUDP(p, getPortNum(t, svcInfo.socket.Addr().String())); err != nil { - t.Fatalf(err.Error()) - } - waitForNumProxyLoops(t, p, 0) -} - -func TestTCPProxyUpdateDeleteUpdate(t *testing.T) { - lb := NewLoadBalancerRR() - service := proxy.ServicePortName{NamespacedName: types.NamespacedName{Namespace: "testnamespace", Name: "echo"}, Port: "p"} - endpoint := &v1.Endpoints{ - ObjectMeta: metav1.ObjectMeta{Name: service.Name, Namespace: service.Namespace}, - Subsets: []v1.EndpointSubset{{ - Addresses: []v1.EndpointAddress{{IP: "127.0.0.1"}}, - Ports: []v1.EndpointPort{{Name: "p", Port: tcpServerPort}}, - }}, - } - lb.OnEndpointsAdd(endpoint) - - listenIP := "0.0.0.0" - p, err := createProxier(lb, netutils.ParseIPSloppy(listenIP), netshtest.NewFake(), netutils.ParseIPSloppy("127.0.0.1"), time.Minute, udpIdleTimeoutForTest) - if err != nil { - t.Fatal(err) - } - waitForNumProxyLoops(t, p, 0) - - servicePortPortalName := ServicePortPortalName{NamespacedName: types.NamespacedName{Namespace: service.Namespace, Name: service.Name}, Port: service.Port, PortalIPName: listenIP} - svcInfo, err := p.addServicePortPortal(servicePortPortalName, "TCP", listenIP, 0, time.Second) - if err != nil { - t.Fatalf("error adding new service: %#v", err) - } - conn, err := net.Dial("tcp", joinHostPort("", getPortNum(t, svcInfo.socket.Addr().String()))) - if err != nil { - t.Fatalf("error connecting to proxy: %v", err) - } - conn.Close() - waitForNumProxyLoops(t, p, 1) - - p.OnServiceDelete(&v1.Service{ - ObjectMeta: metav1.ObjectMeta{Name: service.Name, Namespace: service.Namespace}, - Spec: v1.ServiceSpec{ClusterIP: listenIP, Ports: []v1.ServicePort{{ - Name: "p", - Port: int32(getPortNum(t, svcInfo.socket.Addr().String())), - Protocol: "TCP", - }}}, - }) - if err := waitForClosedPortTCP(p, getPortNum(t, svcInfo.socket.Addr().String())); err != nil { - t.Fatalf(err.Error()) - } - waitForNumProxyLoops(t, p, 0) - - // need to add endpoint here because it got clean up during service delete - lb.OnEndpointsAdd(endpoint) - p.OnServiceAdd(&v1.Service{ - ObjectMeta: metav1.ObjectMeta{Name: service.Name, Namespace: service.Namespace}, - Spec: v1.ServiceSpec{ClusterIP: listenIP, Ports: []v1.ServicePort{{ - Name: "p", - Port: int32(getPortNum(t, svcInfo.socket.Addr().String())), - Protocol: "TCP", - }}}, - }) - svcInfo, exists := p.getServiceInfo(servicePortPortalName) - if !exists { - t.Fatalf("can't find serviceInfo for %s", servicePortPortalName) - } - testEchoTCP(t, "127.0.0.1", getPortNum(t, svcInfo.socket.Addr().String())) - waitForNumProxyLoops(t, p, 1) -} - -func TestUDPProxyUpdateDeleteUpdate(t *testing.T) { - lb := NewLoadBalancerRR() - service := proxy.ServicePortName{NamespacedName: types.NamespacedName{Namespace: "testnamespace", Name: "echo"}, Port: "p"} - endpoint := &v1.Endpoints{ - ObjectMeta: metav1.ObjectMeta{Name: service.Name, Namespace: service.Namespace}, - Subsets: []v1.EndpointSubset{{ - Addresses: []v1.EndpointAddress{{IP: "127.0.0.1"}}, - Ports: []v1.EndpointPort{{Name: "p", Port: udpServerPort}}, - }}, - } - lb.OnEndpointsAdd(endpoint) - - listenIP := "0.0.0.0" - p, err := createProxier(lb, netutils.ParseIPSloppy(listenIP), netshtest.NewFake(), netutils.ParseIPSloppy("127.0.0.1"), time.Minute, udpIdleTimeoutForTest) - if err != nil { - t.Fatal(err) - } - waitForNumProxyLoops(t, p, 0) - - servicePortPortalName := ServicePortPortalName{NamespacedName: types.NamespacedName{Namespace: service.Namespace, Name: service.Name}, Port: service.Port, PortalIPName: listenIP} - svcInfo, err := p.addServicePortPortal(servicePortPortalName, "UDP", listenIP, 0, time.Second) - if err != nil { - t.Fatalf("error adding new service: %#v", err) - } - conn, err := net.Dial("udp", joinHostPort("", getPortNum(t, svcInfo.socket.Addr().String()))) - if err != nil { - t.Fatalf("error connecting to proxy: %v", err) - } - conn.Close() - waitForNumProxyLoops(t, p, 1) - - p.OnServiceDelete(&v1.Service{ - ObjectMeta: metav1.ObjectMeta{Name: service.Name, Namespace: service.Namespace}, - Spec: v1.ServiceSpec{ClusterIP: listenIP, Ports: []v1.ServicePort{{ - Name: "p", - Port: int32(getPortNum(t, svcInfo.socket.Addr().String())), - Protocol: "UDP", - }}}, - }) - if err := waitForClosedPortUDP(p, getPortNum(t, svcInfo.socket.Addr().String())); err != nil { - t.Fatalf(err.Error()) - } - waitForNumProxyLoops(t, p, 0) - - // need to add endpoint here because it got clean up during service delete - lb.OnEndpointsAdd(endpoint) - p.OnServiceAdd(&v1.Service{ - ObjectMeta: metav1.ObjectMeta{Name: service.Name, Namespace: service.Namespace}, - Spec: v1.ServiceSpec{ClusterIP: listenIP, Ports: []v1.ServicePort{{ - Name: "p", - Port: int32(getPortNum(t, svcInfo.socket.Addr().String())), - Protocol: "UDP", - }}}, - }) - svcInfo, exists := p.getServiceInfo(servicePortPortalName) - if !exists { - t.Fatalf("can't find serviceInfo for %s", servicePortPortalName) - } - testEchoUDP(t, "127.0.0.1", getPortNum(t, svcInfo.socket.Addr().String())) - waitForNumProxyLoops(t, p, 1) -} - -func TestTCPProxyUpdatePort(t *testing.T) { - lb := NewLoadBalancerRR() - service := proxy.ServicePortName{NamespacedName: types.NamespacedName{Namespace: "testnamespace", Name: "echo"}, Port: "p"} - lb.OnEndpointsAdd(&v1.Endpoints{ - ObjectMeta: metav1.ObjectMeta{Name: service.Name, Namespace: service.Namespace}, - Subsets: []v1.EndpointSubset{{ - Addresses: []v1.EndpointAddress{{IP: "127.0.0.1"}}, - Ports: []v1.EndpointPort{{Name: "p", Port: tcpServerPort}}, - }}, - }) - - listenIP := "0.0.0.0" - p, err := createProxier(lb, netutils.ParseIPSloppy(listenIP), netshtest.NewFake(), netutils.ParseIPSloppy("127.0.0.1"), time.Minute, udpIdleTimeoutForTest) - if err != nil { - t.Fatal(err) - } - waitForNumProxyLoops(t, p, 0) - - servicePortPortalName := ServicePortPortalName{NamespacedName: types.NamespacedName{Namespace: service.Namespace, Name: service.Name}, Port: service.Port, PortalIPName: listenIP} - svcInfo, err := p.addServicePortPortal(servicePortPortalName, "TCP", listenIP, 0, time.Second) - if err != nil { - t.Fatalf("error adding new service: %#v", err) - } - testEchoTCP(t, "127.0.0.1", getPortNum(t, svcInfo.socket.Addr().String())) - waitForNumProxyLoops(t, p, 1) - - p.OnServiceAdd(&v1.Service{ - ObjectMeta: metav1.ObjectMeta{Name: service.Name, Namespace: service.Namespace}, - Spec: v1.ServiceSpec{ClusterIP: listenIP, Ports: []v1.ServicePort{{ - Name: "p", - Port: 0, - Protocol: "TCP", - }}}, - }) - // Wait for the socket to actually get free. - if err := waitForClosedPortTCP(p, getPortNum(t, svcInfo.socket.Addr().String())); err != nil { - t.Fatalf(err.Error()) - } - svcInfo, exists := p.getServiceInfo(servicePortPortalName) - if !exists { - t.Fatalf("can't find serviceInfo for %s", servicePortPortalName) - } - testEchoTCP(t, "127.0.0.1", getPortNum(t, svcInfo.socket.Addr().String())) - // This is a bit async, but this should be sufficient. - time.Sleep(500 * time.Millisecond) - waitForNumProxyLoops(t, p, 1) -} - -func TestUDPProxyUpdatePort(t *testing.T) { - lb := NewLoadBalancerRR() - service := proxy.ServicePortName{NamespacedName: types.NamespacedName{Namespace: "testnamespace", Name: "echo"}, Port: "p"} - lb.OnEndpointsAdd(&v1.Endpoints{ - ObjectMeta: metav1.ObjectMeta{Name: service.Name, Namespace: service.Namespace}, - Subsets: []v1.EndpointSubset{{ - Addresses: []v1.EndpointAddress{{IP: "127.0.0.1"}}, - Ports: []v1.EndpointPort{{Name: "p", Port: udpServerPort}}, - }}, - }) - - listenIP := "0.0.0.0" - p, err := createProxier(lb, netutils.ParseIPSloppy(listenIP), netshtest.NewFake(), netutils.ParseIPSloppy("127.0.0.1"), time.Minute, udpIdleTimeoutForTest) - if err != nil { - t.Fatal(err) - } - waitForNumProxyLoops(t, p, 0) - - servicePortPortalName := ServicePortPortalName{NamespacedName: types.NamespacedName{Namespace: service.Namespace, Name: service.Name}, Port: service.Port, PortalIPName: listenIP} - svcInfo, err := p.addServicePortPortal(servicePortPortalName, "UDP", listenIP, 0, time.Second) - if err != nil { - t.Fatalf("error adding new service: %#v", err) - } - waitForNumProxyLoops(t, p, 1) - - p.OnServiceAdd(&v1.Service{ - ObjectMeta: metav1.ObjectMeta{Name: service.Name, Namespace: service.Namespace}, - Spec: v1.ServiceSpec{ClusterIP: listenIP, Ports: []v1.ServicePort{{ - Name: "p", - Port: 0, - Protocol: "UDP", - }}}, - }) - // Wait for the socket to actually get free. - if err := waitForClosedPortUDP(p, getPortNum(t, svcInfo.socket.Addr().String())); err != nil { - t.Fatalf(err.Error()) - } - svcInfo, exists := p.getServiceInfo(servicePortPortalName) - if !exists { - t.Fatalf("can't find serviceInfo for %s", servicePortPortalName) - } - testEchoUDP(t, "127.0.0.1", getPortNum(t, svcInfo.socket.Addr().String())) - waitForNumProxyLoops(t, p, 1) -} - -func TestProxyUpdatePublicIPs(t *testing.T) { - lb := NewLoadBalancerRR() - service := proxy.ServicePortName{NamespacedName: types.NamespacedName{Namespace: "testnamespace", Name: "echo"}, Port: "p"} - lb.OnEndpointsAdd(&v1.Endpoints{ - ObjectMeta: metav1.ObjectMeta{Name: service.Name, Namespace: service.Namespace}, - Subsets: []v1.EndpointSubset{{ - Addresses: []v1.EndpointAddress{{IP: "127.0.0.1"}}, - Ports: []v1.EndpointPort{{Name: "p", Port: tcpServerPort}}, - }}, - }) - - listenIP := "0.0.0.0" - p, err := createProxier(lb, netutils.ParseIPSloppy(listenIP), netshtest.NewFake(), netutils.ParseIPSloppy("127.0.0.1"), time.Minute, udpIdleTimeoutForTest) - if err != nil { - t.Fatal(err) - } - waitForNumProxyLoops(t, p, 0) - - servicePortPortalName := ServicePortPortalName{NamespacedName: types.NamespacedName{Namespace: service.Namespace, Name: service.Name}, Port: service.Port, PortalIPName: listenIP} - svcInfo, err := p.addServicePortPortal(servicePortPortalName, "TCP", listenIP, 0, time.Second) - if err != nil { - t.Fatalf("error adding new service: %#v", err) - } - testEchoTCP(t, "127.0.0.1", getPortNum(t, svcInfo.socket.Addr().String())) - waitForNumProxyLoops(t, p, 1) - - p.OnServiceAdd(&v1.Service{ - ObjectMeta: metav1.ObjectMeta{Name: service.Name, Namespace: service.Namespace}, - Spec: v1.ServiceSpec{ - Ports: []v1.ServicePort{{ - Name: "p", - Port: int32(svcInfo.portal.port), - Protocol: "TCP", - }}, - ClusterIP: svcInfo.portal.ip, - ExternalIPs: []string{"0.0.0.0"}, - }, - }) - // Wait for the socket to actually get free. - if err := waitForClosedPortTCP(p, getPortNum(t, svcInfo.socket.Addr().String())); err != nil { - t.Fatalf(err.Error()) - } - svcInfo, exists := p.getServiceInfo(servicePortPortalName) - if !exists { - t.Fatalf("can't find serviceInfo for %s", servicePortPortalName) - } - testEchoTCP(t, "127.0.0.1", getPortNum(t, svcInfo.socket.Addr().String())) - // This is a bit async, but this should be sufficient. - time.Sleep(500 * time.Millisecond) - waitForNumProxyLoops(t, p, 1) -} - -func TestProxyUpdatePortal(t *testing.T) { - lb := NewLoadBalancerRR() - service := proxy.ServicePortName{NamespacedName: types.NamespacedName{Namespace: "testnamespace", Name: "echo"}, Port: "p"} - endpoint := &v1.Endpoints{ - ObjectMeta: metav1.ObjectMeta{Name: service.Name, Namespace: service.Namespace}, - Subsets: []v1.EndpointSubset{{ - Addresses: []v1.EndpointAddress{{IP: "127.0.0.1"}}, - Ports: []v1.EndpointPort{{Name: "p", Port: tcpServerPort}}, - }}, - } - lb.OnEndpointsAdd(endpoint) - - listenIP := "0.0.0.0" - p, err := createProxier(lb, netutils.ParseIPSloppy(listenIP), netshtest.NewFake(), netutils.ParseIPSloppy("127.0.0.1"), time.Minute, udpIdleTimeoutForTest) - if err != nil { - t.Fatal(err) - } - waitForNumProxyLoops(t, p, 0) - - servicePortPortalName := ServicePortPortalName{NamespacedName: types.NamespacedName{Namespace: service.Namespace, Name: service.Name}, Port: service.Port, PortalIPName: listenIP} - svcInfo, err := p.addServicePortPortal(servicePortPortalName, "TCP", listenIP, 0, time.Second) - if err != nil { - t.Fatalf("error adding new service: %#v", err) - } - testEchoTCP(t, "127.0.0.1", getPortNum(t, svcInfo.socket.Addr().String())) - waitForNumProxyLoops(t, p, 1) - - svcv0 := &v1.Service{ - ObjectMeta: metav1.ObjectMeta{Name: service.Name, Namespace: service.Namespace}, - Spec: v1.ServiceSpec{ClusterIP: listenIP, Ports: []v1.ServicePort{{ - Name: "p", - Port: int32(svcInfo.portal.port), - Protocol: "TCP", - }}}, - } - - svcv1 := &v1.Service{ - ObjectMeta: metav1.ObjectMeta{Name: service.Name, Namespace: service.Namespace}, - Spec: v1.ServiceSpec{ClusterIP: "", Ports: []v1.ServicePort{{ - Name: "p", - Port: int32(svcInfo.portal.port), - Protocol: "TCP", - }}}, - } - - p.OnServiceUpdate(svcv0, svcv1) - _, exists := p.getServiceInfo(servicePortPortalName) - if exists { - t.Fatalf("service with empty ClusterIP should not be included in the proxy") - } - - svcv2 := &v1.Service{ - ObjectMeta: metav1.ObjectMeta{Name: service.Name, Namespace: service.Namespace}, - Spec: v1.ServiceSpec{ClusterIP: "None", Ports: []v1.ServicePort{{ - Name: "p", - Port: int32(getPortNum(t, svcInfo.socket.Addr().String())), - Protocol: "TCP", - }}}, - } - p.OnServiceUpdate(svcv1, svcv2) - _, exists = p.getServiceInfo(servicePortPortalName) - if exists { - t.Fatalf("service with 'None' as ClusterIP should not be included in the proxy") - } - - svcv3 := &v1.Service{ - ObjectMeta: metav1.ObjectMeta{Name: service.Name, Namespace: service.Namespace}, - Spec: v1.ServiceSpec{ClusterIP: listenIP, Ports: []v1.ServicePort{{ - Name: "p", - Port: int32(svcInfo.portal.port), - Protocol: "TCP", - }}}, - } - p.OnServiceUpdate(svcv2, svcv3) - lb.OnEndpointsAdd(endpoint) - svcInfo, exists = p.getServiceInfo(servicePortPortalName) - if !exists { - t.Fatalf("service with ClusterIP set not found in the proxy") - } - testEchoTCP(t, "127.0.0.1", getPortNum(t, svcInfo.socket.Addr().String())) - waitForNumProxyLoops(t, p, 1) -} - -func TestNoopEndpointSlice(t *testing.T) { - p := Proxier{} - p.OnEndpointSliceAdd(&discovery.EndpointSlice{}) - p.OnEndpointSliceUpdate(&discovery.EndpointSlice{}, &discovery.EndpointSlice{}) - p.OnEndpointSliceDelete(&discovery.EndpointSlice{}) - p.OnEndpointSlicesSynced() -} - -// TODO(justinsb): Add test for nodePort conflict detection, once we have nodePort wired in diff --git a/pkg/proxy/winuserspace/proxysocket.go b/pkg/proxy/winuserspace/proxysocket.go deleted file mode 100644 index a788894ce2b..00000000000 --- a/pkg/proxy/winuserspace/proxysocket.go +++ /dev/null @@ -1,313 +0,0 @@ -/* -Copyright 2016 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package winuserspace - -import ( - "fmt" - "io" - "net" - "strconv" - "strings" - "sync" - "time" - - v1 "k8s.io/api/core/v1" - "k8s.io/apimachinery/pkg/types" - "k8s.io/apimachinery/pkg/util/runtime" - "k8s.io/klog/v2" - "k8s.io/kubernetes/pkg/proxy" -) - -// Abstraction over TCP/UDP sockets which are proxied. -type proxySocket interface { - // Addr gets the net.Addr for a proxySocket. - Addr() net.Addr - // Close stops the proxySocket from accepting incoming connections. - // Each implementation should comment on the impact of calling Close - // while sessions are active. - Close() error - // ProxyLoop proxies incoming connections for the specified service to the service endpoints. - ProxyLoop(service ServicePortPortalName, info *serviceInfo, proxier *Proxier) - // ListenPort returns the host port that the proxySocket is listening on - ListenPort() int -} - -func newProxySocket(protocol v1.Protocol, ip net.IP, port int) (proxySocket, error) { - host := "" - if ip != nil { - host = ip.String() - } - - switch strings.ToUpper(string(protocol)) { - case "TCP": - listener, err := net.Listen("tcp", net.JoinHostPort(host, strconv.Itoa(port))) - if err != nil { - return nil, err - } - return &tcpProxySocket{Listener: listener, port: port}, nil - case "UDP": - addr, err := net.ResolveUDPAddr("udp", net.JoinHostPort(host, strconv.Itoa(port))) - if err != nil { - return nil, err - } - conn, err := net.ListenUDP("udp", addr) - if err != nil { - return nil, err - } - return &udpProxySocket{UDPConn: conn, port: port}, nil - case "SCTP": - return nil, fmt.Errorf("SCTP is not supported for user space proxy") - } - return nil, fmt.Errorf("unknown protocol %q", protocol) -} - -// How long we wait for a connection to a backend in seconds -var endpointDialTimeout = []time.Duration{250 * time.Millisecond, 500 * time.Millisecond, 1 * time.Second, 2 * time.Second} - -// tcpProxySocket implements proxySocket. Close() is implemented by net.Listener. When Close() is called, -// no new connections are allowed but existing connections are left untouched. -type tcpProxySocket struct { - net.Listener - port int -} - -func (tcp *tcpProxySocket) ListenPort() int { - return tcp.port -} - -func tryConnect(service ServicePortPortalName, srcAddr net.Addr, protocol string, proxier *Proxier) (out net.Conn, err error) { - sessionAffinityReset := false - for _, dialTimeout := range endpointDialTimeout { - servicePortName := proxy.ServicePortName{ - NamespacedName: types.NamespacedName{ - Namespace: service.Namespace, - Name: service.Name, - }, - Port: service.Port, - } - endpoint, err := proxier.loadBalancer.NextEndpoint(servicePortName, srcAddr, sessionAffinityReset) - if err != nil { - klog.ErrorS(err, "Couldn't find an endpoint for service", "service", klog.KRef(service.Namespace, service.Name)) - return nil, err - } - klog.V(3).InfoS("Mapped service to endpoint", "service", klog.KRef(service.Namespace, service.Name), "endpoint", endpoint) - // TODO: This could spin up a new goroutine to make the outbound connection, - // and keep accepting inbound traffic. - outConn, err := net.DialTimeout(protocol, endpoint, dialTimeout) - if err != nil { - if isTooManyFDsError(err) { - panic("Dial failed: " + err.Error()) - } - klog.ErrorS(err, "Dial failed") - sessionAffinityReset = true - continue - } - return outConn, nil - } - return nil, fmt.Errorf("failed to connect to an endpoint") -} - -func (tcp *tcpProxySocket) ProxyLoop(service ServicePortPortalName, myInfo *serviceInfo, proxier *Proxier) { - for { - if !myInfo.isAlive() { - // The service port was closed or replaced. - return - } - // Block until a connection is made. - inConn, err := tcp.Accept() - if err != nil { - if isTooManyFDsError(err) { - panic("Accept failed: " + err.Error()) - } - - if isClosedError(err) { - return - } - if !myInfo.isAlive() { - // Then the service port was just closed so the accept failure is to be expected. - return - } - klog.ErrorS(err, "Accept failed") - continue - } - klog.V(3).InfoS("Accepted TCP connection from remote", "remoteAddress", inConn.RemoteAddr(), "localAddress", inConn.LocalAddr()) - outConn, err := tryConnect(service, inConn.(*net.TCPConn).RemoteAddr(), "tcp", proxier) - if err != nil { - klog.ErrorS(err, "Failed to connect to balancer") - inConn.Close() - continue - } - // Spin up an async copy loop. - go proxyTCP(inConn.(*net.TCPConn), outConn.(*net.TCPConn)) - } -} - -// proxyTCP proxies data bi-directionally between in and out. -func proxyTCP(in, out *net.TCPConn) { - var wg sync.WaitGroup - wg.Add(2) - klog.V(4).InfoS("Creating proxy between remote and local addresses", - "inRemoteAddress", in.RemoteAddr(), "inLocalAddress", in.LocalAddr(), "outLocalAddress", out.LocalAddr(), "outRemoteAddress", out.RemoteAddr()) - go copyBytes("from backend", in, out, &wg) - go copyBytes("to backend", out, in, &wg) - wg.Wait() -} - -func copyBytes(direction string, dest, src *net.TCPConn, wg *sync.WaitGroup) { - defer wg.Done() - klog.V(4).InfoS("Copying remote address bytes", "direction", direction, "sourceRemoteAddress", src.RemoteAddr(), "destinationRemoteAddress", dest.RemoteAddr()) - n, err := io.Copy(dest, src) - if err != nil { - if !isClosedError(err) { - klog.ErrorS(err, "I/O error occurred") - } - } - klog.V(4).InfoS("Copied remote address bytes", "bytes", n, "direction", direction, "sourceRemoteAddress", src.RemoteAddr(), "destinationRemoteAddress", dest.RemoteAddr()) - dest.Close() - src.Close() -} - -// udpProxySocket implements proxySocket. Close() is implemented by net.UDPConn. When Close() is called, -// no new connections are allowed and existing connections are broken. -// TODO: We could lame-duck this ourselves, if it becomes important. -type udpProxySocket struct { - *net.UDPConn - port int -} - -func (udp *udpProxySocket) ListenPort() int { - return udp.port -} - -func (udp *udpProxySocket) Addr() net.Addr { - return udp.LocalAddr() -} - -// Holds all the known UDP clients that have not timed out. -type clientCache struct { - mu sync.Mutex - clients map[string]net.Conn // addr string -> connection -} - -func newClientCache() *clientCache { - return &clientCache{clients: map[string]net.Conn{}} -} - -func (udp *udpProxySocket) ProxyLoop(service ServicePortPortalName, myInfo *serviceInfo, proxier *Proxier) { - var buffer [4096]byte // 4KiB should be enough for most whole-packets - - for { - if !myInfo.isAlive() { - // The service port was closed or replaced. - break - } - - // Block until data arrives. - // TODO: Accumulate a histogram of n or something, to fine tune the buffer size. - n, cliAddr, err := udp.ReadFrom(buffer[0:]) - if err != nil { - if e, ok := err.(net.Error); ok { - if e.Temporary() { - klog.V(1).ErrorS(err, "ReadFrom had a temporary failure") - continue - } - } - klog.ErrorS(err, "ReadFrom failed, exiting ProxyLoop") - break - } - - // If this is a client we know already, reuse the connection and goroutine. - svrConn, err := udp.getBackendConn(myInfo.activeClients, cliAddr, proxier, service, myInfo.timeout) - if err != nil { - continue - } - // TODO: It would be nice to let the goroutine handle this write, but we don't - // really want to copy the buffer. We could do a pool of buffers or something. - _, err = svrConn.Write(buffer[0:n]) - if err != nil { - if !logTimeout(err) { - klog.ErrorS(err, "Write failed") - // TODO: Maybe tear down the goroutine for this client/server pair? - } - continue - } - err = svrConn.SetDeadline(time.Now().Add(myInfo.timeout)) - if err != nil { - klog.ErrorS(err, "SetDeadline failed") - continue - } - } -} - -func (udp *udpProxySocket) getBackendConn(activeClients *clientCache, cliAddr net.Addr, proxier *Proxier, service ServicePortPortalName, timeout time.Duration) (net.Conn, error) { - activeClients.mu.Lock() - defer activeClients.mu.Unlock() - - svrConn, found := activeClients.clients[cliAddr.String()] - if !found { - // TODO: This could spin up a new goroutine to make the outbound connection, - // and keep accepting inbound traffic. - klog.V(3).InfoS("New UDP connection from client", "address", cliAddr) - var err error - svrConn, err = tryConnect(service, cliAddr, "udp", proxier) - if err != nil { - return nil, err - } - if err = svrConn.SetDeadline(time.Now().Add(timeout)); err != nil { - klog.ErrorS(err, "SetDeadline failed") - return nil, err - } - activeClients.clients[cliAddr.String()] = svrConn - go func(cliAddr net.Addr, svrConn net.Conn, activeClients *clientCache, service ServicePortPortalName, timeout time.Duration) { - defer runtime.HandleCrash() - udp.proxyClient(cliAddr, svrConn, activeClients, service, timeout) - }(cliAddr, svrConn, activeClients, service, timeout) - } - return svrConn, nil -} - -// This function is expected to be called as a goroutine. -// TODO: Track and log bytes copied, like TCP -func (udp *udpProxySocket) proxyClient(cliAddr net.Addr, svrConn net.Conn, activeClients *clientCache, service ServicePortPortalName, timeout time.Duration) { - defer svrConn.Close() - var buffer [4096]byte - for { - n, err := svrConn.Read(buffer[0:]) - if err != nil { - if !logTimeout(err) { - klog.ErrorS(err, "Read failed") - } - break - } - - err = svrConn.SetDeadline(time.Now().Add(timeout)) - if err != nil { - klog.ErrorS(err, "SetDeadline failed") - break - } - _, err = udp.WriteTo(buffer[0:n], cliAddr) - if err != nil { - if !logTimeout(err) { - klog.ErrorS(err, "WriteTo failed") - } - break - } - } - activeClients.mu.Lock() - delete(activeClients.clients, cliAddr.String()) - activeClients.mu.Unlock() -} diff --git a/pkg/proxy/winuserspace/roundrobin.go b/pkg/proxy/winuserspace/roundrobin.go deleted file mode 100644 index 041be3e0198..00000000000 --- a/pkg/proxy/winuserspace/roundrobin.go +++ /dev/null @@ -1,332 +0,0 @@ -/* -Copyright 2016 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package winuserspace - -import ( - "errors" - "fmt" - "net" - "sort" - "sync" - "time" - - v1 "k8s.io/api/core/v1" - "k8s.io/apimachinery/pkg/types" - "k8s.io/klog/v2" - "k8s.io/kubernetes/pkg/proxy" - "k8s.io/kubernetes/pkg/proxy/util" - stringslices "k8s.io/utils/strings/slices" -) - -var ( - ErrMissingServiceEntry = errors.New("missing service entry") - ErrMissingEndpoints = errors.New("missing endpoints") -) - -type affinityState struct { - clientIP string - //clientProtocol api.Protocol //not yet used - //sessionCookie string //not yet used - endpoint string - lastUsed time.Time -} - -type affinityPolicy struct { - affinityType v1.ServiceAffinity - affinityMap map[string]*affinityState // map client IP -> affinity info - ttlSeconds int -} - -// LoadBalancerRR is a round-robin load balancer. -type LoadBalancerRR struct { - lock sync.RWMutex - services map[proxy.ServicePortName]*balancerState -} - -// Ensure this implements LoadBalancer. -var _ LoadBalancer = &LoadBalancerRR{} - -type balancerState struct { - endpoints []string // a list of "ip:port" style strings - index int // current index into endpoints - affinity affinityPolicy -} - -func newAffinityPolicy(affinityType v1.ServiceAffinity, ttlSeconds int) *affinityPolicy { - return &affinityPolicy{ - affinityType: affinityType, - affinityMap: make(map[string]*affinityState), - ttlSeconds: ttlSeconds, - } -} - -// NewLoadBalancerRR returns a new LoadBalancerRR. -func NewLoadBalancerRR() *LoadBalancerRR { - return &LoadBalancerRR{ - services: map[proxy.ServicePortName]*balancerState{}, - } -} - -func (lb *LoadBalancerRR) NewService(svcPort proxy.ServicePortName, affinityType v1.ServiceAffinity, ttlSeconds int) error { - klog.V(4).InfoS("LoadBalancerRR NewService", "servicePortName", svcPort) - lb.lock.Lock() - defer lb.lock.Unlock() - lb.newServiceInternal(svcPort, affinityType, ttlSeconds) - return nil -} - -// This assumes that lb.lock is already held. -func (lb *LoadBalancerRR) newServiceInternal(svcPort proxy.ServicePortName, affinityType v1.ServiceAffinity, ttlSeconds int) *balancerState { - if ttlSeconds == 0 { - ttlSeconds = int(v1.DefaultClientIPServiceAffinitySeconds) //default to 3 hours if not specified. Should 0 be unlimited instead???? - } - - if _, exists := lb.services[svcPort]; !exists { - lb.services[svcPort] = &balancerState{affinity: *newAffinityPolicy(affinityType, ttlSeconds)} - klog.V(4).InfoS("LoadBalancerRR service did not exist, created", "servicePortName", svcPort) - } else if affinityType != "" { - lb.services[svcPort].affinity.affinityType = affinityType - } - return lb.services[svcPort] -} - -func (lb *LoadBalancerRR) DeleteService(svcPort proxy.ServicePortName) { - klog.V(4).InfoS("LoadBalancerRR DeleteService", "servicePortName", svcPort) - lb.lock.Lock() - defer lb.lock.Unlock() - delete(lb.services, svcPort) -} - -// return true if this service is using some form of session affinity. -func isSessionAffinity(affinity *affinityPolicy) bool { - // Should never be empty string, but checking for it to be safe. - if affinity.affinityType == "" || affinity.affinityType == v1.ServiceAffinityNone { - return false - } - return true -} - -// NextEndpoint returns a service endpoint. -// The service endpoint is chosen using the round-robin algorithm. -func (lb *LoadBalancerRR) NextEndpoint(svcPort proxy.ServicePortName, srcAddr net.Addr, sessionAffinityReset bool) (string, error) { - // Coarse locking is simple. We can get more fine-grained if/when we - // can prove it matters. - lb.lock.Lock() - defer lb.lock.Unlock() - - state, exists := lb.services[svcPort] - if !exists || state == nil { - return "", ErrMissingServiceEntry - } - if len(state.endpoints) == 0 { - return "", ErrMissingEndpoints - } - klog.V(4).InfoS("NextEndpoint for service", "servicePortName", svcPort, "address", srcAddr, "endpoints", state.endpoints) - sessionAffinityEnabled := isSessionAffinity(&state.affinity) - - var ipaddr string - if sessionAffinityEnabled { - // Caution: don't shadow ipaddr - var err error - ipaddr, _, err = net.SplitHostPort(srcAddr.String()) - if err != nil { - return "", fmt.Errorf("malformed source address %q: %v", srcAddr.String(), err) - } - if !sessionAffinityReset { - sessionAffinity, exists := state.affinity.affinityMap[ipaddr] - if exists && int(time.Since(sessionAffinity.lastUsed).Seconds()) < state.affinity.ttlSeconds { - // Affinity wins. - endpoint := sessionAffinity.endpoint - sessionAffinity.lastUsed = time.Now() - klog.V(4).InfoS("NextEndpoint for service from IP with sessionAffinity", "servicePortName", svcPort, "IP", ipaddr, "sessionAffinity", sessionAffinity, "endpoint", endpoint) - return endpoint, nil - } - } - } - // Take the next endpoint. - endpoint := state.endpoints[state.index] - state.index = (state.index + 1) % len(state.endpoints) - - if sessionAffinityEnabled { - var affinity *affinityState - affinity = state.affinity.affinityMap[ipaddr] - if affinity == nil { - affinity = new(affinityState) //&affinityState{ipaddr, "TCP", "", endpoint, time.Now()} - state.affinity.affinityMap[ipaddr] = affinity - } - affinity.lastUsed = time.Now() - affinity.endpoint = endpoint - affinity.clientIP = ipaddr - klog.V(4).InfoS("Updated affinity key", "IP", ipaddr, "affinityState", state.affinity.affinityMap[ipaddr]) - } - - return endpoint, nil -} - -// Remove any session affinity records associated to a particular endpoint (for example when a pod goes down). -func removeSessionAffinityByEndpoint(state *balancerState, svcPort proxy.ServicePortName, endpoint string) { - for _, affinity := range state.affinity.affinityMap { - if affinity.endpoint == endpoint { - klog.V(4).InfoS("Removing client from affinityMap for service", "endpoint", affinity.endpoint, "servicePortName", svcPort) - delete(state.affinity.affinityMap, affinity.clientIP) - } - } -} - -// Loop through the valid endpoints and then the endpoints associated with the Load Balancer. -// Then remove any session affinity records that are not in both lists. -// This assumes the lb.lock is held. -func (lb *LoadBalancerRR) updateAffinityMap(svcPort proxy.ServicePortName, newEndpoints []string) { - allEndpoints := map[string]int{} - for _, newEndpoint := range newEndpoints { - allEndpoints[newEndpoint] = 1 - } - state, exists := lb.services[svcPort] - if !exists { - return - } - for _, existingEndpoint := range state.endpoints { - allEndpoints[existingEndpoint] = allEndpoints[existingEndpoint] + 1 - } - for mKey, mVal := range allEndpoints { - if mVal == 1 { - klog.V(2).InfoS("Delete endpoint for service", "endpoint", mKey, "servicePortName", svcPort) - removeSessionAffinityByEndpoint(state, svcPort, mKey) - } - } -} - -func (lb *LoadBalancerRR) OnEndpointsAdd(endpoints *v1.Endpoints) { - portsToEndpoints := util.BuildPortsToEndpointsMap(endpoints) - - lb.lock.Lock() - defer lb.lock.Unlock() - - for portname := range portsToEndpoints { - svcPort := proxy.ServicePortName{NamespacedName: types.NamespacedName{Namespace: endpoints.Namespace, Name: endpoints.Name}, Port: portname} - newEndpoints := portsToEndpoints[portname] - state, exists := lb.services[svcPort] - - if !exists || state == nil || len(newEndpoints) > 0 { - klog.V(1).InfoS("LoadBalancerRR: Setting endpoints service", "servicePortName", svcPort, "endpoints", newEndpoints) - lb.updateAffinityMap(svcPort, newEndpoints) - // OnEndpointsAdd can be called without NewService being called externally. - // To be safe we will call it here. A new service will only be created - // if one does not already exist. The affinity will be updated - // later, once NewService is called. - state = lb.newServiceInternal(svcPort, v1.ServiceAffinity(""), 0) - state.endpoints = util.ShuffleStrings(newEndpoints) - - // Reset the round-robin index. - state.index = 0 - } - } -} - -func (lb *LoadBalancerRR) OnEndpointsUpdate(oldEndpoints, endpoints *v1.Endpoints) { - portsToEndpoints := util.BuildPortsToEndpointsMap(endpoints) - oldPortsToEndpoints := util.BuildPortsToEndpointsMap(oldEndpoints) - registeredEndpoints := make(map[proxy.ServicePortName]bool) - - lb.lock.Lock() - defer lb.lock.Unlock() - - for portname := range portsToEndpoints { - svcPort := proxy.ServicePortName{NamespacedName: types.NamespacedName{Namespace: endpoints.Namespace, Name: endpoints.Name}, Port: portname} - newEndpoints := portsToEndpoints[portname] - state, exists := lb.services[svcPort] - - curEndpoints := []string{} - if state != nil { - curEndpoints = state.endpoints - } - - if !exists || state == nil || len(curEndpoints) != len(newEndpoints) || !slicesEquiv(stringslices.Clone(curEndpoints), newEndpoints) { - klog.V(1).InfoS("LoadBalancerRR: Setting endpoints for service", "servicePortName", svcPort, "endpoints", newEndpoints) - lb.updateAffinityMap(svcPort, newEndpoints) - // OnEndpointsUpdate can be called without NewService being called externally. - // To be safe we will call it here. A new service will only be created - // if one does not already exist. The affinity will be updated - // later, once NewService is called. - state = lb.newServiceInternal(svcPort, v1.ServiceAffinity(""), 0) - state.endpoints = util.ShuffleStrings(newEndpoints) - - // Reset the round-robin index. - state.index = 0 - } - registeredEndpoints[svcPort] = true - } - - for portname := range oldPortsToEndpoints { - svcPort := proxy.ServicePortName{NamespacedName: types.NamespacedName{Namespace: endpoints.Namespace, Name: endpoints.Name}, Port: portname} - if _, exists := registeredEndpoints[svcPort]; !exists { - klog.V(2).InfoS("LoadBalancerRR: Removing endpoints service", "servicePortName", svcPort) - // Reset but don't delete. - state := lb.services[svcPort] - state.endpoints = []string{} - state.index = 0 - state.affinity.affinityMap = map[string]*affinityState{} - } - } -} - -func (lb *LoadBalancerRR) OnEndpointsDelete(endpoints *v1.Endpoints) { - portsToEndpoints := util.BuildPortsToEndpointsMap(endpoints) - - lb.lock.Lock() - defer lb.lock.Unlock() - - for portname := range portsToEndpoints { - svcPort := proxy.ServicePortName{NamespacedName: types.NamespacedName{Namespace: endpoints.Namespace, Name: endpoints.Name}, Port: portname} - klog.V(2).InfoS("LoadBalancerRR: Removing endpoints service", "servicePortName", svcPort) - // If the service is still around, reset but don't delete. - if state, ok := lb.services[svcPort]; ok { - state.endpoints = []string{} - state.index = 0 - state.affinity.affinityMap = map[string]*affinityState{} - } - } -} - -func (lb *LoadBalancerRR) OnEndpointsSynced() { -} - -// Tests whether two slices are equivalent. This sorts both slices in-place. -func slicesEquiv(lhs, rhs []string) bool { - if len(lhs) != len(rhs) { - return false - } - sort.Strings(lhs) - sort.Strings(rhs) - return stringslices.Equal(lhs, rhs) -} - -func (lb *LoadBalancerRR) CleanupStaleStickySessions(svcPort proxy.ServicePortName) { - lb.lock.Lock() - defer lb.lock.Unlock() - - state, exists := lb.services[svcPort] - if !exists { - return - } - for ip, affinity := range state.affinity.affinityMap { - if int(time.Since(affinity.lastUsed).Seconds()) >= state.affinity.ttlSeconds { - klog.V(4).InfoS("Removing client from affinityMap for service", "IP", affinity.clientIP, "servicePortName", svcPort) - delete(state.affinity.affinityMap, ip) - } - } -} diff --git a/pkg/proxy/winuserspace/roundrobin_test.go b/pkg/proxy/winuserspace/roundrobin_test.go deleted file mode 100644 index f6cce8cff8e..00000000000 --- a/pkg/proxy/winuserspace/roundrobin_test.go +++ /dev/null @@ -1,678 +0,0 @@ -/* -Copyright 2016 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package winuserspace - -import ( - "net" - "testing" - - "k8s.io/api/core/v1" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/apimachinery/pkg/types" - "k8s.io/kubernetes/pkg/proxy" -) - -func TestLoadBalanceFailsWithNoEndpoints(t *testing.T) { - loadBalancer := NewLoadBalancerRR() - service := proxy.ServicePortName{NamespacedName: types.NamespacedName{Namespace: "testnamespace", Name: "foo"}, Port: "does-not-exist"} - endpoint, err := loadBalancer.NextEndpoint(service, nil, false) - if err == nil { - t.Errorf("Didn't fail with non-existent service") - } - if len(endpoint) != 0 { - t.Errorf("Got an endpoint") - } -} - -func expectEndpoint(t *testing.T, loadBalancer *LoadBalancerRR, service proxy.ServicePortName, expected string, netaddr net.Addr) { - endpoint, err := loadBalancer.NextEndpoint(service, netaddr, false) - if err != nil { - t.Errorf("Didn't find a service for %s, expected %s, failed with: %v", service, expected, err) - } - if endpoint != expected { - t.Errorf("Didn't get expected endpoint for service %s client %v, expected %s, got: %s", service, netaddr, expected, endpoint) - } -} - -func expectEndpointWithSessionAffinityReset(t *testing.T, loadBalancer *LoadBalancerRR, service proxy.ServicePortName, expected string, netaddr net.Addr) { - endpoint, err := loadBalancer.NextEndpoint(service, netaddr, true) - if err != nil { - t.Errorf("Didn't find a service for %s, expected %s, failed with: %v", service, expected, err) - } - if endpoint != expected { - t.Errorf("Didn't get expected endpoint for service %s client %v, expected %s, got: %s", service, netaddr, expected, endpoint) - } -} - -func TestLoadBalanceWorksWithSingleEndpoint(t *testing.T) { - loadBalancer := NewLoadBalancerRR() - service := proxy.ServicePortName{NamespacedName: types.NamespacedName{Namespace: "testnamespace", Name: "foo"}, Port: "p"} - endpoint, err := loadBalancer.NextEndpoint(service, nil, false) - if err == nil || len(endpoint) != 0 { - t.Errorf("Didn't fail with non-existent service") - } - endpoints := &v1.Endpoints{ - ObjectMeta: metav1.ObjectMeta{Name: service.Name, Namespace: service.Namespace}, - Subsets: []v1.EndpointSubset{{ - Addresses: []v1.EndpointAddress{{IP: "endpoint1"}}, - Ports: []v1.EndpointPort{{Name: "p", Port: 40}}, - }}, - } - loadBalancer.OnEndpointsAdd(endpoints) - expectEndpoint(t, loadBalancer, service, "endpoint1:40", nil) - expectEndpoint(t, loadBalancer, service, "endpoint1:40", nil) - expectEndpoint(t, loadBalancer, service, "endpoint1:40", nil) - expectEndpoint(t, loadBalancer, service, "endpoint1:40", nil) -} - -func stringsInSlice(haystack []string, needles ...string) bool { - for _, needle := range needles { - found := false - for i := range haystack { - if haystack[i] == needle { - found = true - break - } - } - if found == false { - return false - } - } - return true -} - -func TestLoadBalanceWorksWithMultipleEndpoints(t *testing.T) { - loadBalancer := NewLoadBalancerRR() - service := proxy.ServicePortName{NamespacedName: types.NamespacedName{Namespace: "testnamespace", Name: "foo"}, Port: "p"} - endpoint, err := loadBalancer.NextEndpoint(service, nil, false) - if err == nil || len(endpoint) != 0 { - t.Errorf("Didn't fail with non-existent service") - } - endpoints := &v1.Endpoints{ - ObjectMeta: metav1.ObjectMeta{Name: service.Name, Namespace: service.Namespace}, - Subsets: []v1.EndpointSubset{{ - Addresses: []v1.EndpointAddress{{IP: "endpoint"}}, - Ports: []v1.EndpointPort{{Name: "p", Port: 1}, {Name: "p", Port: 2}, {Name: "p", Port: 3}}, - }}, - } - loadBalancer.OnEndpointsAdd(endpoints) - - shuffledEndpoints := loadBalancer.services[service].endpoints - if !stringsInSlice(shuffledEndpoints, "endpoint:1", "endpoint:2", "endpoint:3") { - t.Errorf("did not find expected endpoints: %v", shuffledEndpoints) - } - expectEndpoint(t, loadBalancer, service, shuffledEndpoints[0], nil) - expectEndpoint(t, loadBalancer, service, shuffledEndpoints[1], nil) - expectEndpoint(t, loadBalancer, service, shuffledEndpoints[2], nil) - expectEndpoint(t, loadBalancer, service, shuffledEndpoints[0], nil) -} - -func TestLoadBalanceWorksWithMultipleEndpointsMultiplePorts(t *testing.T) { - loadBalancer := NewLoadBalancerRR() - serviceP := proxy.ServicePortName{NamespacedName: types.NamespacedName{Namespace: "testnamespace", Name: "foo"}, Port: "p"} - serviceQ := proxy.ServicePortName{NamespacedName: types.NamespacedName{Namespace: "testnamespace", Name: "foo"}, Port: "q"} - endpoint, err := loadBalancer.NextEndpoint(serviceP, nil, false) - if err == nil || len(endpoint) != 0 { - t.Errorf("Didn't fail with non-existent service") - } - endpoints := &v1.Endpoints{ - ObjectMeta: metav1.ObjectMeta{Name: serviceP.Name, Namespace: serviceP.Namespace}, - Subsets: []v1.EndpointSubset{ - { - Addresses: []v1.EndpointAddress{{IP: "endpoint1"}, {IP: "endpoint2"}}, - Ports: []v1.EndpointPort{{Name: "p", Port: 1}, {Name: "q", Port: 2}}, - }, - { - Addresses: []v1.EndpointAddress{{IP: "endpoint3"}}, - Ports: []v1.EndpointPort{{Name: "p", Port: 3}, {Name: "q", Port: 4}}, - }, - }, - } - loadBalancer.OnEndpointsAdd(endpoints) - - shuffledEndpoints := loadBalancer.services[serviceP].endpoints - if !stringsInSlice(shuffledEndpoints, "endpoint1:1", "endpoint2:1", "endpoint3:3") { - t.Errorf("did not find expected endpoints: %v", shuffledEndpoints) - } - expectEndpoint(t, loadBalancer, serviceP, shuffledEndpoints[0], nil) - expectEndpoint(t, loadBalancer, serviceP, shuffledEndpoints[1], nil) - expectEndpoint(t, loadBalancer, serviceP, shuffledEndpoints[2], nil) - expectEndpoint(t, loadBalancer, serviceP, shuffledEndpoints[0], nil) - - shuffledEndpoints = loadBalancer.services[serviceQ].endpoints - if !stringsInSlice(shuffledEndpoints, "endpoint1:2", "endpoint2:2", "endpoint3:4") { - t.Errorf("did not find expected endpoints: %v", shuffledEndpoints) - } - expectEndpoint(t, loadBalancer, serviceQ, shuffledEndpoints[0], nil) - expectEndpoint(t, loadBalancer, serviceQ, shuffledEndpoints[1], nil) - expectEndpoint(t, loadBalancer, serviceQ, shuffledEndpoints[2], nil) - expectEndpoint(t, loadBalancer, serviceQ, shuffledEndpoints[0], nil) -} - -func TestLoadBalanceWorksWithMultipleEndpointsAndUpdates(t *testing.T) { - loadBalancer := NewLoadBalancerRR() - serviceP := proxy.ServicePortName{NamespacedName: types.NamespacedName{Namespace: "testnamespace", Name: "foo"}, Port: "p"} - serviceQ := proxy.ServicePortName{NamespacedName: types.NamespacedName{Namespace: "testnamespace", Name: "foo"}, Port: "q"} - endpoint, err := loadBalancer.NextEndpoint(serviceP, nil, false) - if err == nil || len(endpoint) != 0 { - t.Errorf("Didn't fail with non-existent service") - } - endpointsv1 := &v1.Endpoints{ - ObjectMeta: metav1.ObjectMeta{Name: serviceP.Name, Namespace: serviceP.Namespace}, - Subsets: []v1.EndpointSubset{ - { - Addresses: []v1.EndpointAddress{{IP: "endpoint1"}}, - Ports: []v1.EndpointPort{{Name: "p", Port: 1}, {Name: "q", Port: 10}}, - }, - { - Addresses: []v1.EndpointAddress{{IP: "endpoint2"}}, - Ports: []v1.EndpointPort{{Name: "p", Port: 2}, {Name: "q", Port: 20}}, - }, - { - Addresses: []v1.EndpointAddress{{IP: "endpoint3"}}, - Ports: []v1.EndpointPort{{Name: "p", Port: 3}, {Name: "q", Port: 30}}, - }, - }, - } - loadBalancer.OnEndpointsAdd(endpointsv1) - - shuffledEndpoints := loadBalancer.services[serviceP].endpoints - if !stringsInSlice(shuffledEndpoints, "endpoint1:1", "endpoint2:2", "endpoint3:3") { - t.Errorf("did not find expected endpoints: %v", shuffledEndpoints) - } - expectEndpoint(t, loadBalancer, serviceP, shuffledEndpoints[0], nil) - expectEndpoint(t, loadBalancer, serviceP, shuffledEndpoints[1], nil) - expectEndpoint(t, loadBalancer, serviceP, shuffledEndpoints[2], nil) - expectEndpoint(t, loadBalancer, serviceP, shuffledEndpoints[0], nil) - - shuffledEndpoints = loadBalancer.services[serviceQ].endpoints - if !stringsInSlice(shuffledEndpoints, "endpoint1:10", "endpoint2:20", "endpoint3:30") { - t.Errorf("did not find expected endpoints: %v", shuffledEndpoints) - } - expectEndpoint(t, loadBalancer, serviceQ, shuffledEndpoints[0], nil) - expectEndpoint(t, loadBalancer, serviceQ, shuffledEndpoints[1], nil) - expectEndpoint(t, loadBalancer, serviceQ, shuffledEndpoints[2], nil) - expectEndpoint(t, loadBalancer, serviceQ, shuffledEndpoints[0], nil) - - // Then update the configuration with one fewer endpoints, make sure - // we start in the beginning again - endpointsv2 := &v1.Endpoints{ - ObjectMeta: metav1.ObjectMeta{Name: serviceP.Name, Namespace: serviceP.Namespace}, - Subsets: []v1.EndpointSubset{ - { - Addresses: []v1.EndpointAddress{{IP: "endpoint4"}}, - Ports: []v1.EndpointPort{{Name: "p", Port: 4}, {Name: "q", Port: 40}}, - }, - { - Addresses: []v1.EndpointAddress{{IP: "endpoint5"}}, - Ports: []v1.EndpointPort{{Name: "p", Port: 5}, {Name: "q", Port: 50}}, - }, - }, - } - loadBalancer.OnEndpointsUpdate(endpointsv1, endpointsv2) - - shuffledEndpoints = loadBalancer.services[serviceP].endpoints - if !stringsInSlice(shuffledEndpoints, "endpoint4:4", "endpoint5:5") { - t.Errorf("did not find expected endpoints: %v", shuffledEndpoints) - } - expectEndpoint(t, loadBalancer, serviceP, shuffledEndpoints[0], nil) - expectEndpoint(t, loadBalancer, serviceP, shuffledEndpoints[1], nil) - expectEndpoint(t, loadBalancer, serviceP, shuffledEndpoints[0], nil) - expectEndpoint(t, loadBalancer, serviceP, shuffledEndpoints[1], nil) - - shuffledEndpoints = loadBalancer.services[serviceQ].endpoints - if !stringsInSlice(shuffledEndpoints, "endpoint4:40", "endpoint5:50") { - t.Errorf("did not find expected endpoints: %v", shuffledEndpoints) - } - expectEndpoint(t, loadBalancer, serviceQ, shuffledEndpoints[0], nil) - expectEndpoint(t, loadBalancer, serviceQ, shuffledEndpoints[1], nil) - expectEndpoint(t, loadBalancer, serviceQ, shuffledEndpoints[0], nil) - expectEndpoint(t, loadBalancer, serviceQ, shuffledEndpoints[1], nil) - - // Clear endpoints - endpointsv3 := &v1.Endpoints{ObjectMeta: metav1.ObjectMeta{Name: serviceP.Name, Namespace: serviceP.Namespace}, Subsets: nil} - loadBalancer.OnEndpointsUpdate(endpointsv2, endpointsv3) - - endpoint, err = loadBalancer.NextEndpoint(serviceP, nil, false) - if err == nil || len(endpoint) != 0 { - t.Errorf("Didn't fail with non-existent service") - } -} - -func TestLoadBalanceWorksWithServiceRemoval(t *testing.T) { - loadBalancer := NewLoadBalancerRR() - fooServiceP := proxy.ServicePortName{NamespacedName: types.NamespacedName{Namespace: "testnamespace", Name: "foo"}, Port: "p"} - barServiceP := proxy.ServicePortName{NamespacedName: types.NamespacedName{Namespace: "testnamespace", Name: "bar"}, Port: "p"} - endpoint, err := loadBalancer.NextEndpoint(fooServiceP, nil, false) - if err == nil || len(endpoint) != 0 { - t.Errorf("Didn't fail with non-existent service") - } - endpoints1 := &v1.Endpoints{ - ObjectMeta: metav1.ObjectMeta{Name: fooServiceP.Name, Namespace: fooServiceP.Namespace}, - Subsets: []v1.EndpointSubset{ - { - Addresses: []v1.EndpointAddress{{IP: "endpoint1"}, {IP: "endpoint2"}, {IP: "endpoint3"}}, - Ports: []v1.EndpointPort{{Name: "p", Port: 123}}, - }, - }, - } - endpoints2 := &v1.Endpoints{ - ObjectMeta: metav1.ObjectMeta{Name: barServiceP.Name, Namespace: barServiceP.Namespace}, - Subsets: []v1.EndpointSubset{ - { - Addresses: []v1.EndpointAddress{{IP: "endpoint4"}, {IP: "endpoint5"}, {IP: "endpoint6"}}, - Ports: []v1.EndpointPort{{Name: "p", Port: 456}}, - }, - }, - } - loadBalancer.OnEndpointsAdd(endpoints1) - loadBalancer.OnEndpointsAdd(endpoints2) - shuffledFooEndpoints := loadBalancer.services[fooServiceP].endpoints - expectEndpoint(t, loadBalancer, fooServiceP, shuffledFooEndpoints[0], nil) - expectEndpoint(t, loadBalancer, fooServiceP, shuffledFooEndpoints[1], nil) - expectEndpoint(t, loadBalancer, fooServiceP, shuffledFooEndpoints[2], nil) - expectEndpoint(t, loadBalancer, fooServiceP, shuffledFooEndpoints[0], nil) - expectEndpoint(t, loadBalancer, fooServiceP, shuffledFooEndpoints[1], nil) - - shuffledBarEndpoints := loadBalancer.services[barServiceP].endpoints - expectEndpoint(t, loadBalancer, barServiceP, shuffledBarEndpoints[0], nil) - expectEndpoint(t, loadBalancer, barServiceP, shuffledBarEndpoints[1], nil) - expectEndpoint(t, loadBalancer, barServiceP, shuffledBarEndpoints[2], nil) - expectEndpoint(t, loadBalancer, barServiceP, shuffledBarEndpoints[0], nil) - expectEndpoint(t, loadBalancer, barServiceP, shuffledBarEndpoints[1], nil) - - // Then update the configuration by removing foo - loadBalancer.OnEndpointsDelete(endpoints1) - endpoint, err = loadBalancer.NextEndpoint(fooServiceP, nil, false) - if err == nil || len(endpoint) != 0 { - t.Errorf("Didn't fail with non-existent service") - } - - // but bar is still there, and we continue RR from where we left off. - expectEndpoint(t, loadBalancer, barServiceP, shuffledBarEndpoints[2], nil) - expectEndpoint(t, loadBalancer, barServiceP, shuffledBarEndpoints[0], nil) - expectEndpoint(t, loadBalancer, barServiceP, shuffledBarEndpoints[1], nil) - expectEndpoint(t, loadBalancer, barServiceP, shuffledBarEndpoints[2], nil) -} - -func TestStickyLoadBalanceWorksWithNewServiceCalledFirst(t *testing.T) { - loadBalancer := NewLoadBalancerRR() - service := proxy.ServicePortName{NamespacedName: types.NamespacedName{Namespace: "testnamespace", Name: "foo"}, Port: ""} - endpoint, err := loadBalancer.NextEndpoint(service, nil, false) - if err == nil || len(endpoint) != 0 { - t.Errorf("Didn't fail with non-existent service") - } - - // Call NewService() before OnEndpointsUpdate() - loadBalancer.NewService(service, v1.ServiceAffinityClientIP, int(v1.DefaultClientIPServiceAffinitySeconds)) - endpoints := &v1.Endpoints{ - ObjectMeta: metav1.ObjectMeta{Name: service.Name, Namespace: service.Namespace}, - Subsets: []v1.EndpointSubset{ - {Addresses: []v1.EndpointAddress{{IP: "endpoint1"}}, Ports: []v1.EndpointPort{{Port: 1}}}, - {Addresses: []v1.EndpointAddress{{IP: "endpoint2"}}, Ports: []v1.EndpointPort{{Port: 2}}}, - {Addresses: []v1.EndpointAddress{{IP: "endpoint3"}}, Ports: []v1.EndpointPort{{Port: 3}}}, - }, - } - loadBalancer.OnEndpointsAdd(endpoints) - - client1 := &net.TCPAddr{IP: net.IPv4(127, 0, 0, 1), Port: 0} - client2 := &net.TCPAddr{IP: net.IPv4(127, 0, 0, 2), Port: 0} - client3 := &net.TCPAddr{IP: net.IPv4(127, 0, 0, 3), Port: 0} - - ep1, err := loadBalancer.NextEndpoint(service, client1, false) - if err != nil { - t.Errorf("Didn't find a service for %s: %v", service, err) - } - expectEndpoint(t, loadBalancer, service, ep1, client1) - expectEndpoint(t, loadBalancer, service, ep1, client1) - expectEndpoint(t, loadBalancer, service, ep1, client1) - - ep2, err := loadBalancer.NextEndpoint(service, client2, false) - if err != nil { - t.Errorf("Didn't find a service for %s: %v", service, err) - } - expectEndpoint(t, loadBalancer, service, ep2, client2) - expectEndpoint(t, loadBalancer, service, ep2, client2) - expectEndpoint(t, loadBalancer, service, ep2, client2) - - ep3, err := loadBalancer.NextEndpoint(service, client3, false) - if err != nil { - t.Errorf("Didn't find a service for %s: %v", service, err) - } - expectEndpoint(t, loadBalancer, service, ep3, client3) - expectEndpoint(t, loadBalancer, service, ep3, client3) - expectEndpoint(t, loadBalancer, service, ep3, client3) - - expectEndpoint(t, loadBalancer, service, ep1, client1) - expectEndpoint(t, loadBalancer, service, ep2, client2) - expectEndpoint(t, loadBalancer, service, ep3, client3) - expectEndpoint(t, loadBalancer, service, ep1, client1) - expectEndpoint(t, loadBalancer, service, ep2, client2) - expectEndpoint(t, loadBalancer, service, ep3, client3) -} - -func TestStickyLoadBalanceWorksWithNewServiceCalledSecond(t *testing.T) { - loadBalancer := NewLoadBalancerRR() - service := proxy.ServicePortName{NamespacedName: types.NamespacedName{Namespace: "testnamespace", Name: "foo"}, Port: ""} - endpoint, err := loadBalancer.NextEndpoint(service, nil, false) - if err == nil || len(endpoint) != 0 { - t.Errorf("Didn't fail with non-existent service") - } - - // Call OnEndpointsUpdate() before NewService() - endpoints := &v1.Endpoints{ - ObjectMeta: metav1.ObjectMeta{Name: service.Name, Namespace: service.Namespace}, - Subsets: []v1.EndpointSubset{ - {Addresses: []v1.EndpointAddress{{IP: "endpoint1"}}, Ports: []v1.EndpointPort{{Port: 1}}}, - {Addresses: []v1.EndpointAddress{{IP: "endpoint2"}}, Ports: []v1.EndpointPort{{Port: 2}}}, - }, - } - loadBalancer.OnEndpointsAdd(endpoints) - loadBalancer.NewService(service, v1.ServiceAffinityClientIP, int(v1.DefaultClientIPServiceAffinitySeconds)) - - client1 := &net.TCPAddr{IP: net.IPv4(127, 0, 0, 1), Port: 0} - client2 := &net.TCPAddr{IP: net.IPv4(127, 0, 0, 2), Port: 0} - client3 := &net.TCPAddr{IP: net.IPv4(127, 0, 0, 3), Port: 0} - - ep1, err := loadBalancer.NextEndpoint(service, client1, false) - if err != nil { - t.Errorf("Didn't find a service for %s: %v", service, err) - } - expectEndpoint(t, loadBalancer, service, ep1, client1) - expectEndpoint(t, loadBalancer, service, ep1, client1) - expectEndpoint(t, loadBalancer, service, ep1, client1) - - ep2, err := loadBalancer.NextEndpoint(service, client2, false) - if err != nil { - t.Errorf("Didn't find a service for %s: %v", service, err) - } - expectEndpoint(t, loadBalancer, service, ep2, client2) - expectEndpoint(t, loadBalancer, service, ep2, client2) - expectEndpoint(t, loadBalancer, service, ep2, client2) - - ep3, err := loadBalancer.NextEndpoint(service, client3, false) - if err != nil { - t.Errorf("Didn't find a service for %s: %v", service, err) - } - expectEndpoint(t, loadBalancer, service, ep3, client3) - expectEndpoint(t, loadBalancer, service, ep3, client3) - expectEndpoint(t, loadBalancer, service, ep3, client3) - - expectEndpoint(t, loadBalancer, service, ep1, client1) - expectEndpoint(t, loadBalancer, service, ep2, client2) - expectEndpoint(t, loadBalancer, service, ep3, client3) - expectEndpoint(t, loadBalancer, service, ep1, client1) - expectEndpoint(t, loadBalancer, service, ep2, client2) - expectEndpoint(t, loadBalancer, service, ep3, client3) -} - -func TestStickyLoadBalanaceWorksWithMultipleEndpointsRemoveOne(t *testing.T) { - client1 := &net.TCPAddr{IP: net.IPv4(127, 0, 0, 1), Port: 0} - client2 := &net.TCPAddr{IP: net.IPv4(127, 0, 0, 2), Port: 0} - client3 := &net.TCPAddr{IP: net.IPv4(127, 0, 0, 3), Port: 0} - client4 := &net.TCPAddr{IP: net.IPv4(127, 0, 0, 4), Port: 0} - client5 := &net.TCPAddr{IP: net.IPv4(127, 0, 0, 5), Port: 0} - client6 := &net.TCPAddr{IP: net.IPv4(127, 0, 0, 6), Port: 0} - loadBalancer := NewLoadBalancerRR() - service := proxy.ServicePortName{NamespacedName: types.NamespacedName{Namespace: "testnamespace", Name: "foo"}, Port: ""} - endpoint, err := loadBalancer.NextEndpoint(service, nil, false) - if err == nil || len(endpoint) != 0 { - t.Errorf("Didn't fail with non-existent service") - } - - loadBalancer.NewService(service, v1.ServiceAffinityClientIP, int(v1.DefaultClientIPServiceAffinitySeconds)) - endpointsv1 := &v1.Endpoints{ - ObjectMeta: metav1.ObjectMeta{Name: service.Name, Namespace: service.Namespace}, - Subsets: []v1.EndpointSubset{ - { - Addresses: []v1.EndpointAddress{{IP: "endpoint"}}, - Ports: []v1.EndpointPort{{Port: 1}, {Port: 2}, {Port: 3}}, - }, - }, - } - loadBalancer.OnEndpointsAdd(endpointsv1) - shuffledEndpoints := loadBalancer.services[service].endpoints - expectEndpoint(t, loadBalancer, service, shuffledEndpoints[0], client1) - client1Endpoint := shuffledEndpoints[0] - expectEndpoint(t, loadBalancer, service, shuffledEndpoints[0], client1) - expectEndpoint(t, loadBalancer, service, shuffledEndpoints[1], client2) - client2Endpoint := shuffledEndpoints[1] - expectEndpoint(t, loadBalancer, service, shuffledEndpoints[1], client2) - expectEndpoint(t, loadBalancer, service, shuffledEndpoints[2], client3) - client3Endpoint := shuffledEndpoints[2] - - endpointsv2 := &v1.Endpoints{ - ObjectMeta: metav1.ObjectMeta{Name: service.Name, Namespace: service.Namespace}, - Subsets: []v1.EndpointSubset{ - { - Addresses: []v1.EndpointAddress{{IP: "endpoint"}}, - Ports: []v1.EndpointPort{{Port: 1}, {Port: 2}}, - }, - }, - } - loadBalancer.OnEndpointsUpdate(endpointsv1, endpointsv2) - shuffledEndpoints = loadBalancer.services[service].endpoints - if client1Endpoint == "endpoint:3" { - client1Endpoint = shuffledEndpoints[0] - } else if client2Endpoint == "endpoint:3" { - client2Endpoint = shuffledEndpoints[0] - } else if client3Endpoint == "endpoint:3" { - client3Endpoint = shuffledEndpoints[0] - } - expectEndpoint(t, loadBalancer, service, client1Endpoint, client1) - expectEndpoint(t, loadBalancer, service, client2Endpoint, client2) - expectEndpoint(t, loadBalancer, service, client3Endpoint, client3) - - endpointsv3 := &v1.Endpoints{ - ObjectMeta: metav1.ObjectMeta{Name: service.Name, Namespace: service.Namespace}, - Subsets: []v1.EndpointSubset{ - { - Addresses: []v1.EndpointAddress{{IP: "endpoint"}}, - Ports: []v1.EndpointPort{{Port: 1}, {Port: 2}, {Port: 4}}, - }, - }, - } - loadBalancer.OnEndpointsUpdate(endpointsv2, endpointsv3) - shuffledEndpoints = loadBalancer.services[service].endpoints - expectEndpoint(t, loadBalancer, service, client1Endpoint, client1) - expectEndpoint(t, loadBalancer, service, client2Endpoint, client2) - expectEndpoint(t, loadBalancer, service, client3Endpoint, client3) - expectEndpoint(t, loadBalancer, service, shuffledEndpoints[0], client4) - expectEndpoint(t, loadBalancer, service, shuffledEndpoints[1], client5) - expectEndpoint(t, loadBalancer, service, shuffledEndpoints[2], client6) -} - -func TestStickyLoadBalanceWorksWithMultipleEndpointsAndUpdates(t *testing.T) { - client1 := &net.TCPAddr{IP: net.IPv4(127, 0, 0, 1), Port: 0} - client2 := &net.TCPAddr{IP: net.IPv4(127, 0, 0, 2), Port: 0} - client3 := &net.TCPAddr{IP: net.IPv4(127, 0, 0, 3), Port: 0} - loadBalancer := NewLoadBalancerRR() - service := proxy.ServicePortName{NamespacedName: types.NamespacedName{Namespace: "testnamespace", Name: "foo"}, Port: ""} - endpoint, err := loadBalancer.NextEndpoint(service, nil, false) - if err == nil || len(endpoint) != 0 { - t.Errorf("Didn't fail with non-existent service") - } - - loadBalancer.NewService(service, v1.ServiceAffinityClientIP, int(v1.DefaultClientIPServiceAffinitySeconds)) - endpointsv1 := &v1.Endpoints{ - ObjectMeta: metav1.ObjectMeta{Name: service.Name, Namespace: service.Namespace}, - Subsets: []v1.EndpointSubset{ - { - Addresses: []v1.EndpointAddress{{IP: "endpoint"}}, - Ports: []v1.EndpointPort{{Port: 1}, {Port: 2}, {Port: 3}}, - }, - }, - } - loadBalancer.OnEndpointsAdd(endpointsv1) - shuffledEndpoints := loadBalancer.services[service].endpoints - expectEndpoint(t, loadBalancer, service, shuffledEndpoints[0], client1) - expectEndpoint(t, loadBalancer, service, shuffledEndpoints[0], client1) - expectEndpoint(t, loadBalancer, service, shuffledEndpoints[1], client2) - expectEndpoint(t, loadBalancer, service, shuffledEndpoints[1], client2) - expectEndpoint(t, loadBalancer, service, shuffledEndpoints[2], client3) - expectEndpoint(t, loadBalancer, service, shuffledEndpoints[0], client1) - expectEndpoint(t, loadBalancer, service, shuffledEndpoints[1], client2) - // Then update the configuration with one fewer endpoints, make sure - // we start in the beginning again - endpointsv2 := &v1.Endpoints{ - ObjectMeta: metav1.ObjectMeta{Name: service.Name, Namespace: service.Namespace}, - Subsets: []v1.EndpointSubset{ - { - Addresses: []v1.EndpointAddress{{IP: "endpoint"}}, - Ports: []v1.EndpointPort{{Port: 4}, {Port: 5}}, - }, - }, - } - loadBalancer.OnEndpointsUpdate(endpointsv1, endpointsv2) - shuffledEndpoints = loadBalancer.services[service].endpoints - expectEndpoint(t, loadBalancer, service, shuffledEndpoints[0], client1) - expectEndpoint(t, loadBalancer, service, shuffledEndpoints[1], client2) - expectEndpoint(t, loadBalancer, service, shuffledEndpoints[0], client1) - expectEndpoint(t, loadBalancer, service, shuffledEndpoints[0], client1) - expectEndpoint(t, loadBalancer, service, shuffledEndpoints[1], client2) - expectEndpoint(t, loadBalancer, service, shuffledEndpoints[1], client2) - - // Clear endpoints - endpointsv3 := &v1.Endpoints{ObjectMeta: metav1.ObjectMeta{Name: service.Name, Namespace: service.Namespace}, Subsets: nil} - loadBalancer.OnEndpointsUpdate(endpointsv2, endpointsv3) - - endpoint, err = loadBalancer.NextEndpoint(service, nil, false) - if err == nil || len(endpoint) != 0 { - t.Errorf("Didn't fail with non-existent service") - } -} - -func TestStickyLoadBalanceWorksWithServiceRemoval(t *testing.T) { - client1 := &net.TCPAddr{IP: net.IPv4(127, 0, 0, 1), Port: 0} - client2 := &net.TCPAddr{IP: net.IPv4(127, 0, 0, 2), Port: 0} - client3 := &net.TCPAddr{IP: net.IPv4(127, 0, 0, 3), Port: 0} - loadBalancer := NewLoadBalancerRR() - fooService := proxy.ServicePortName{NamespacedName: types.NamespacedName{Namespace: "testnamespace", Name: "foo"}, Port: ""} - endpoint, err := loadBalancer.NextEndpoint(fooService, nil, false) - if err == nil || len(endpoint) != 0 { - t.Errorf("Didn't fail with non-existent service") - } - loadBalancer.NewService(fooService, v1.ServiceAffinityClientIP, int(v1.DefaultClientIPServiceAffinitySeconds)) - endpoints1 := &v1.Endpoints{ - ObjectMeta: metav1.ObjectMeta{Name: fooService.Name, Namespace: fooService.Namespace}, - Subsets: []v1.EndpointSubset{ - { - Addresses: []v1.EndpointAddress{{IP: "endpoint"}}, - Ports: []v1.EndpointPort{{Port: 1}, {Port: 2}, {Port: 3}}, - }, - }, - } - barService := proxy.ServicePortName{NamespacedName: types.NamespacedName{Namespace: "testnamespace", Name: "bar"}, Port: ""} - loadBalancer.NewService(barService, v1.ServiceAffinityClientIP, int(v1.DefaultClientIPServiceAffinitySeconds)) - endpoints2 := &v1.Endpoints{ - ObjectMeta: metav1.ObjectMeta{Name: barService.Name, Namespace: barService.Namespace}, - Subsets: []v1.EndpointSubset{ - { - Addresses: []v1.EndpointAddress{{IP: "endpoint"}}, - Ports: []v1.EndpointPort{{Port: 4}, {Port: 5}}, - }, - }, - } - loadBalancer.OnEndpointsAdd(endpoints1) - loadBalancer.OnEndpointsAdd(endpoints2) - - shuffledFooEndpoints := loadBalancer.services[fooService].endpoints - expectEndpoint(t, loadBalancer, fooService, shuffledFooEndpoints[0], client1) - expectEndpoint(t, loadBalancer, fooService, shuffledFooEndpoints[1], client2) - expectEndpoint(t, loadBalancer, fooService, shuffledFooEndpoints[2], client3) - expectEndpoint(t, loadBalancer, fooService, shuffledFooEndpoints[0], client1) - expectEndpoint(t, loadBalancer, fooService, shuffledFooEndpoints[0], client1) - expectEndpoint(t, loadBalancer, fooService, shuffledFooEndpoints[1], client2) - expectEndpoint(t, loadBalancer, fooService, shuffledFooEndpoints[1], client2) - expectEndpoint(t, loadBalancer, fooService, shuffledFooEndpoints[2], client3) - expectEndpoint(t, loadBalancer, fooService, shuffledFooEndpoints[2], client3) - - shuffledBarEndpoints := loadBalancer.services[barService].endpoints - expectEndpoint(t, loadBalancer, barService, shuffledBarEndpoints[0], client1) - expectEndpoint(t, loadBalancer, barService, shuffledBarEndpoints[1], client2) - expectEndpoint(t, loadBalancer, barService, shuffledBarEndpoints[0], client1) - expectEndpoint(t, loadBalancer, barService, shuffledBarEndpoints[0], client1) - expectEndpoint(t, loadBalancer, barService, shuffledBarEndpoints[1], client2) - expectEndpoint(t, loadBalancer, barService, shuffledBarEndpoints[1], client2) - - // Then update the configuration by removing foo - loadBalancer.OnEndpointsDelete(endpoints1) - endpoint, err = loadBalancer.NextEndpoint(fooService, nil, false) - if err == nil || len(endpoint) != 0 { - t.Errorf("Didn't fail with non-existent service") - } - - // but bar is still there, and we continue RR from where we left off. - shuffledBarEndpoints = loadBalancer.services[barService].endpoints - expectEndpoint(t, loadBalancer, barService, shuffledBarEndpoints[0], client1) - expectEndpoint(t, loadBalancer, barService, shuffledBarEndpoints[1], client2) - expectEndpoint(t, loadBalancer, barService, shuffledBarEndpoints[0], client1) - expectEndpoint(t, loadBalancer, barService, shuffledBarEndpoints[1], client2) - expectEndpoint(t, loadBalancer, barService, shuffledBarEndpoints[0], client1) - expectEndpoint(t, loadBalancer, barService, shuffledBarEndpoints[0], client1) -} - -func TestStickyLoadBalanceWorksWithEndpointFails(t *testing.T) { - loadBalancer := NewLoadBalancerRR() - service := proxy.ServicePortName{NamespacedName: types.NamespacedName{Namespace: "testnamespace", Name: "foo"}, Port: ""} - endpoint, err := loadBalancer.NextEndpoint(service, nil, false) - if err == nil || len(endpoint) != 0 { - t.Errorf("Didn't fail with non-existent service") - } - - // Call NewService() before OnEndpointsUpdate() - loadBalancer.NewService(service, v1.ServiceAffinityClientIP, int(v1.DefaultClientIPServiceAffinitySeconds)) - endpoints := &v1.Endpoints{ - ObjectMeta: metav1.ObjectMeta{Name: service.Name, Namespace: service.Namespace}, - Subsets: []v1.EndpointSubset{ - {Addresses: []v1.EndpointAddress{{IP: "endpoint1"}}, Ports: []v1.EndpointPort{{Port: 1}}}, - {Addresses: []v1.EndpointAddress{{IP: "endpoint2"}}, Ports: []v1.EndpointPort{{Port: 2}}}, - {Addresses: []v1.EndpointAddress{{IP: "endpoint3"}}, Ports: []v1.EndpointPort{{Port: 3}}}, - }, - } - loadBalancer.OnEndpointsAdd(endpoints) - - client1 := &net.TCPAddr{IP: net.IPv4(127, 0, 0, 1), Port: 0} - client2 := &net.TCPAddr{IP: net.IPv4(127, 0, 0, 2), Port: 0} - client3 := &net.TCPAddr{IP: net.IPv4(127, 0, 0, 3), Port: 0} - - ep1, err := loadBalancer.NextEndpoint(service, client1, false) - if err != nil { - t.Errorf("Didn't find a service for %s: %v", service, err) - } - - ep2, err := loadBalancer.NextEndpoint(service, client2, false) - if err != nil { - t.Errorf("Didn't find a service for %s: %v", service, err) - } - - ep3, err := loadBalancer.NextEndpoint(service, client3, false) - if err != nil { - t.Errorf("Didn't find a service for %s: %v", service, err) - } - - expectEndpointWithSessionAffinityReset(t, loadBalancer, service, ep1, client1) - expectEndpointWithSessionAffinityReset(t, loadBalancer, service, ep2, client1) - expectEndpointWithSessionAffinityReset(t, loadBalancer, service, ep3, client1) - - expectEndpoint(t, loadBalancer, service, ep2, client2) - expectEndpointWithSessionAffinityReset(t, loadBalancer, service, ep1, client2) - expectEndpointWithSessionAffinityReset(t, loadBalancer, service, ep2, client3) - expectEndpointWithSessionAffinityReset(t, loadBalancer, service, ep3, client1) - expectEndpointWithSessionAffinityReset(t, loadBalancer, service, ep1, client2) - expectEndpointWithSessionAffinityReset(t, loadBalancer, service, ep2, client3) -} diff --git a/pkg/proxy/winuserspace/types.go b/pkg/proxy/winuserspace/types.go deleted file mode 100644 index cf31999704c..00000000000 --- a/pkg/proxy/winuserspace/types.go +++ /dev/null @@ -1,35 +0,0 @@ -/* -Copyright 2017 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package winuserspace - -import ( - "fmt" - - "k8s.io/apimachinery/pkg/types" -) - -// ServicePortPortalName carries a namespace + name + portname + portalip. This is the unique -// identifier for a windows service port portal. -type ServicePortPortalName struct { - types.NamespacedName - Port string - PortalIPName string -} - -func (spn ServicePortPortalName) String() string { - return fmt.Sprintf("%s:%s:%s", spn.NamespacedName.String(), spn.Port, spn.PortalIPName) -} diff --git a/pkg/util/netsh/OWNERS b/pkg/util/netsh/OWNERS deleted file mode 100644 index 548730fa952..00000000000 --- a/pkg/util/netsh/OWNERS +++ /dev/null @@ -1,8 +0,0 @@ -# See the OWNERS docs at https://go.k8s.io/owners - -reviewers: - - sig-network-reviewers -approvers: - - sig-network-approvers -labels: - - sig/network diff --git a/pkg/util/netsh/doc.go b/pkg/util/netsh/doc.go deleted file mode 100644 index 529d1e8f1cf..00000000000 --- a/pkg/util/netsh/doc.go +++ /dev/null @@ -1,18 +0,0 @@ -/* -Copyright 2016 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Package netsh provides an interface and implementations for running Windows netsh commands. -package netsh // import "k8s.io/kubernetes/pkg/util/netsh" diff --git a/pkg/util/netsh/netsh.go b/pkg/util/netsh/netsh.go deleted file mode 100644 index 2099ac7acdf..00000000000 --- a/pkg/util/netsh/netsh.go +++ /dev/null @@ -1,209 +0,0 @@ -/* -Copyright 2016 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package netsh - -import ( - "fmt" - "net" - "os" - "strings" - "time" - - "k8s.io/klog/v2" - utilexec "k8s.io/utils/exec" -) - -// Interface is an injectable interface for running netsh commands. Implementations must be goroutine-safe. -type Interface interface { - // EnsurePortProxyRule checks if the specified redirect exists, if not creates it - EnsurePortProxyRule(args []string) (bool, error) - // DeletePortProxyRule deletes the specified portproxy rule. If the rule did not exist, return error. - DeletePortProxyRule(args []string) error - // EnsureIPAddress checks if the specified IP Address is added to vEthernet (HNSTransparent) interface, if not, add it. If the address existed, return true. - EnsureIPAddress(args []string, ip net.IP) (bool, error) - // DeleteIPAddress checks if the specified IP address is present and, if so, deletes it. - DeleteIPAddress(args []string) error - // Restore runs `netsh exec` to restore portproxy or addresses using a file. - // TODO Check if this is required, most likely not - Restore(args []string) error - - // GetInterfaceToAddIP returns the interface name where Service IP needs to be added - // IP Address needs to be added for netsh portproxy to redirect traffic - // Reads Environment variable INTERFACE_TO_ADD_SERVICE_IP, if it is not defined then "vEthernet (HNSTransparent)" is returned - GetInterfaceToAddIP() string -} - -const ( - cmdNetsh string = "netsh" -) - -// runner implements Interface in terms of exec("netsh"). -type runner struct { - exec utilexec.Interface -} - -// New returns a new Interface which will exec netsh. -func New(exec utilexec.Interface) Interface { - runner := &runner{ - exec: exec, - } - return runner -} - -// EnsurePortProxyRule checks if the specified redirect exists, if not creates it. -func (runner *runner) EnsurePortProxyRule(args []string) (bool, error) { - klog.V(4).InfoS("Running netsh interface portproxy add v4tov4", "arguments", args) - out, err := runner.exec.Command(cmdNetsh, args...).CombinedOutput() - - if err == nil { - return true, nil - } - if ee, ok := err.(utilexec.ExitError); ok { - // netsh uses exit(0) to indicate a success of the operation, - // as compared to a malformed commandline, for example. - if ee.Exited() && ee.ExitStatus() != 0 { - return false, nil - } - } - return false, fmt.Errorf("error checking portproxy rule: %v: %s", err, out) - -} - -// DeletePortProxyRule deletes the specified portproxy rule. If the rule did not exist, return error. -func (runner *runner) DeletePortProxyRule(args []string) error { - klog.V(4).InfoS("Running netsh interface portproxy delete v4tov4", "arguments", args) - out, err := runner.exec.Command(cmdNetsh, args...).CombinedOutput() - - if err == nil { - return nil - } - if ee, ok := err.(utilexec.ExitError); ok { - // netsh uses exit(0) to indicate a success of the operation, - // as compared to a malformed commandline, for example. - if ee.Exited() && ee.ExitStatus() == 0 { - return nil - } - } - return fmt.Errorf("error deleting portproxy rule: %v: %s", err, out) -} - -// EnsureIPAddress checks if the specified IP Address is added to interface identified by Environment variable INTERFACE_TO_ADD_SERVICE_IP, if not, add it. If the address existed, return true. -func (runner *runner) EnsureIPAddress(args []string, ip net.IP) (bool, error) { - // Check if the ip address exists - intName := runner.GetInterfaceToAddIP() - argsShowAddress := []string{ - "interface", "ipv4", "show", "address", - "name=" + intName, - } - - ipToCheck := ip.String() - - exists, _ := checkIPExists(ipToCheck, argsShowAddress, runner) - if exists { - klog.V(4).InfoS("Not adding IP address, as it already exists", "IP", ipToCheck) - return true, nil - } - - // IP Address is not already added, add it now - klog.V(4).InfoS("Running netsh interface IPv4 add address", "IP", args) - out, err := runner.exec.Command(cmdNetsh, args...).CombinedOutput() - - if err == nil { - // Once the IP Address is added, it takes a bit to initialize and show up when querying for it - // Query all the IP addresses and see if the one we added is present - // PS: We are using netsh interface IPv4 show address here to query all the IP addresses, instead of - // querying net.InterfaceAddrs() as it returns the IP address as soon as it is added even though it is uninitialized - klog.V(3).InfoS("Waiting until IP is added to the network adapter", "IP", ipToCheck) - for { - if exists, _ := checkIPExists(ipToCheck, argsShowAddress, runner); exists { - return true, nil - } - time.Sleep(500 * time.Millisecond) - } - } - if ee, ok := err.(utilexec.ExitError); ok { - // netsh uses exit(0) to indicate a success of the operation, - // as compared to a malformed commandline, for example. - if ee.Exited() && ee.ExitStatus() != 0 { - return false, nil - } - } - return false, fmt.Errorf("error adding IPv4 address: %v: %s", err, out) -} - -// DeleteIPAddress checks if the specified IP address is present and, if so, deletes it. -func (runner *runner) DeleteIPAddress(args []string) error { - klog.V(4).InfoS("Running netsh interface IPv4 delete address", "IP", args) - out, err := runner.exec.Command(cmdNetsh, args...).CombinedOutput() - - if err == nil { - return nil - } - if ee, ok := err.(utilexec.ExitError); ok { - // netsh uses exit(0) to indicate a success of the operation, - // as compared to a malformed commandline, for example. - if ee.Exited() && ee.ExitStatus() == 0 { - return nil - } - } - return fmt.Errorf("error deleting IPv4 address: %v: %s", err, out) -} - -// GetInterfaceToAddIP returns the interface name where Service IP needs to be added -// IP Address needs to be added for netsh portproxy to redirect traffic -// Reads Environment variable INTERFACE_TO_ADD_SERVICE_IP, if it is not defined then "vEthernet (HNS Internal NIC)" is returned -func (runner *runner) GetInterfaceToAddIP() string { - if iface := os.Getenv("INTERFACE_TO_ADD_SERVICE_IP"); len(iface) > 0 { - return iface - } - return "vEthernet (HNS Internal NIC)" -} - -// Restore is part of Interface. -func (runner *runner) Restore(args []string) error { - return nil -} - -// checkIPExists checks if an IP address exists in 'netsh interface IPv4 show address' output -func checkIPExists(ipToCheck string, args []string, runner *runner) (bool, error) { - ipAddress, err := runner.exec.Command(cmdNetsh, args...).CombinedOutput() - if err != nil { - return false, err - } - ipAddressString := string(ipAddress[:]) - klog.V(3).InfoS("Searching for IP in IP dump", "IP", ipToCheck, "IPDump", ipAddressString) - showAddressArray := strings.Split(ipAddressString, "\n") - for _, showAddress := range showAddressArray { - if strings.Contains(showAddress, "IP") { - ipFromNetsh := getIP(showAddress) - if ipFromNetsh == ipToCheck { - return true, nil - } - } - } - - return false, nil -} - -// getIP gets ip from showAddress (e.g. "IP Address: 10.96.0.4"). -func getIP(showAddress string) string { - list := strings.SplitN(showAddress, ":", 2) - if len(list) != 2 { - return "" - } - return strings.TrimSpace(list[1]) -} diff --git a/pkg/util/netsh/netsh_test.go b/pkg/util/netsh/netsh_test.go deleted file mode 100644 index 7b184a98043..00000000000 --- a/pkg/util/netsh/netsh_test.go +++ /dev/null @@ -1,483 +0,0 @@ -/* -Copyright 2017 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package netsh - -import ( - "net" - "os" - "testing" - - "k8s.io/utils/exec" - fakeexec "k8s.io/utils/exec/testing" - - "errors" - "github.com/stretchr/testify/assert" -) - -func fakeCommonRunner() *runner { - fakeCmd := fakeexec.FakeCmd{ - CombinedOutputScript: []fakeexec.FakeAction{ - // Success - func() ([]byte, []byte, error) { - return []byte{}, nil, nil - }, - // utilexec.ExitError exists, and status is not 0 - func() ([]byte, []byte, error) { - return nil, nil, &fakeexec.FakeExitError{Status: 1} - }, - // utilexec.ExitError exists, and status is 0 - func() ([]byte, []byte, error) { - return nil, nil, &fakeexec.FakeExitError{Status: 0} - }, - // other error exists - func() ([]byte, []byte, error) { - return nil, nil, errors.New("not ExitError") - }, - }, - } - - return &runner{ - exec: &fakeexec.FakeExec{ - CommandScript: []fakeexec.FakeCommandAction{ - func(cmd string, args ...string) exec.Cmd { - return fakeexec.InitFakeCmd(&fakeCmd, cmd, args...) - }, - func(cmd string, args ...string) exec.Cmd { - return fakeexec.InitFakeCmd(&fakeCmd, cmd, args...) - }, - func(cmd string, args ...string) exec.Cmd { - return fakeexec.InitFakeCmd(&fakeCmd, cmd, args...) - }, - func(cmd string, args ...string) exec.Cmd { - return fakeexec.InitFakeCmd(&fakeCmd, cmd, args...) - }, - }, - }, - } -} - -func TestEnsurePortProxyRule(t *testing.T) { - runner := fakeCommonRunner() - - tests := []struct { - name string - arguments []string - expectedResult bool - expectedError bool - }{ - {"Success", []string{"ensure-port-proxy-rule"}, true, false}, - {"utilexec.ExitError exists, and status is not 0", []string{"ensure-port-proxy-rule"}, false, false}, - {"utilexec.ExitError exists, and status is 0", []string{"ensure-port-proxy-rule"}, false, true}, - {"other error exists", []string{"ensure-port-proxy-rule"}, false, true}, - } - - for _, test := range tests { - result, err := runner.EnsurePortProxyRule(test.arguments) - if test.expectedError { - assert.Errorf(t, err, "Failed to test: %s", test.name) - } else { - if err != nil { - assert.NoErrorf(t, err, "Failed to test: %s", test.name) - } else { - assert.EqualValuesf(t, test.expectedResult, result, "Failed to test: %s", test.name) - } - } - } - -} - -func TestDeletePortProxyRule(t *testing.T) { - runner := fakeCommonRunner() - - tests := []struct { - name string - arguments []string - expectedError bool - }{ - {"Success", []string{"delete-port-proxy-rule"}, false}, - {"utilexec.ExitError exists, and status is not 0", []string{"delete-port-proxy-rule"}, true}, - {"utilexec.ExitError exists, and status is 0", []string{"delete-port-proxy-rule"}, false}, - {"other error exists", []string{"delete-port-proxy-rule"}, true}, - } - - for _, test := range tests { - err := runner.DeletePortProxyRule(test.arguments) - if test.expectedError { - assert.Errorf(t, err, "Failed to test: %s", test.name) - } else { - assert.NoErrorf(t, err, "Failed to test: %s", test.name) - } - } -} - -func TestEnsureIPAddress(t *testing.T) { - tests := []struct { - name string - arguments []string - ip net.IP - fakeCmdAction []fakeexec.FakeCommandAction - expectedError bool - expectedResult bool - }{ - { - "IP address exists", - []string{"delete-port-proxy-rule"}, - net.IPv4(10, 10, 10, 20), - []fakeexec.FakeCommandAction{ - func(cmd string, args ...string) exec.Cmd { - return fakeexec.InitFakeCmd(&fakeexec.FakeCmd{ - CombinedOutputScript: []fakeexec.FakeAction{ - // IP address exists - func() ([]byte, []byte, error) { - return []byte("IP Address:10.10.10.10\nIP Address:10.10.10.20"), nil, nil - }, - }, - }, cmd, args...) - }, - }, - false, - true, - }, - - { - "IP address not exists, but set successful(find it in the second time)", - []string{"ensure-ip-address"}, - net.IPv4(10, 10, 10, 20), - []fakeexec.FakeCommandAction{ - func(cmd string, args ...string) exec.Cmd { - return fakeexec.InitFakeCmd(&fakeexec.FakeCmd{ - CombinedOutputScript: []fakeexec.FakeAction{ - // IP address not exists - func() ([]byte, []byte, error) { - return []byte("IP Address:10.10.10.10"), nil, nil - }, - }, - }, cmd, args...) - }, - func(cmd string, args ...string) exec.Cmd { - return fakeexec.InitFakeCmd(&fakeexec.FakeCmd{ - CombinedOutputScript: []fakeexec.FakeAction{ - // Success to set ip - func() ([]byte, []byte, error) { - return []byte(""), nil, nil - }, - }, - }, cmd, args...) - }, - func(cmd string, args ...string) exec.Cmd { - return fakeexec.InitFakeCmd(&fakeexec.FakeCmd{ - CombinedOutputScript: []fakeexec.FakeAction{ - // IP address still not exists - func() ([]byte, []byte, error) { - return []byte("IP Address:10.10.10.10"), nil, nil - }, - }, - }, cmd, args...) - }, - func(cmd string, args ...string) exec.Cmd { - return fakeexec.InitFakeCmd(&fakeexec.FakeCmd{ - CombinedOutputScript: []fakeexec.FakeAction{ - // IP address exists - func() ([]byte, []byte, error) { - return []byte("IP Address:10.10.10.10\nIP Address:10.10.10.20"), nil, nil - }, - }, - }, cmd, args...) - }, - }, - false, - true, - }, - { - "IP address not exists, utilexec.ExitError exists, but status is not 0)", - []string{"ensure-ip-address"}, - net.IPv4(10, 10, 10, 20), - []fakeexec.FakeCommandAction{ - func(cmd string, args ...string) exec.Cmd { - return fakeexec.InitFakeCmd(&fakeexec.FakeCmd{ - CombinedOutputScript: []fakeexec.FakeAction{ - // IP address not exists - func() ([]byte, []byte, error) { - return []byte("IP Address:10.10.10.10"), nil, nil - }, - }, - }, cmd, args...) - }, - func(cmd string, args ...string) exec.Cmd { - return fakeexec.InitFakeCmd(&fakeexec.FakeCmd{ - CombinedOutputScript: []fakeexec.FakeAction{ - // Failed to set ip, utilexec.ExitError exists, and status is not 0 - func() ([]byte, []byte, error) { - return nil, nil, &fakeexec.FakeExitError{Status: 1} - }, - }, - }, cmd, args...) - }, - }, - false, - false, - }, - { - "IP address not exists, utilexec.ExitError exists, and status is 0)", - []string{"ensure-ip-address"}, - net.IPv4(10, 10, 10, 20), - []fakeexec.FakeCommandAction{ - func(cmd string, args ...string) exec.Cmd { - return fakeexec.InitFakeCmd(&fakeexec.FakeCmd{ - CombinedOutputScript: []fakeexec.FakeAction{ - // IP address not exists - func() ([]byte, []byte, error) { - return []byte("IP Address:10.10.10.10"), nil, nil - }, - }, - }, cmd, args...) - }, - func(cmd string, args ...string) exec.Cmd { - return fakeexec.InitFakeCmd(&fakeexec.FakeCmd{ - CombinedOutputScript: []fakeexec.FakeAction{ - // Failed to set ip, utilexec.ExitError exists, and status is 0 - func() ([]byte, []byte, error) { - return nil, nil, &fakeexec.FakeExitError{Status: 0} - }, - }, - }, cmd, args...) - }, - }, - true, - false, - }, - { - "IP address not exists, and error is not utilexec.ExitError)", - []string{"ensure-ip-address"}, - net.IPv4(10, 10, 10, 20), - []fakeexec.FakeCommandAction{ - func(cmd string, args ...string) exec.Cmd { - return fakeexec.InitFakeCmd(&fakeexec.FakeCmd{ - CombinedOutputScript: []fakeexec.FakeAction{ - // IP address not exists - func() ([]byte, []byte, error) { - return []byte("IP Address:10.10.10.10"), nil, nil - }, - }, - }, cmd, args...) - }, - func(cmd string, args ...string) exec.Cmd { - return fakeexec.InitFakeCmd(&fakeexec.FakeCmd{ - CombinedOutputScript: []fakeexec.FakeAction{ - // Failed to set ip, other error exists - func() ([]byte, []byte, error) { - return nil, nil, errors.New("not ExitError") - }, - }, - }, cmd, args...) - }, - }, - true, - false, - }, - } - - for _, test := range tests { - runner := New(&fakeexec.FakeExec{CommandScript: test.fakeCmdAction}) - result, err := runner.EnsureIPAddress(test.arguments, test.ip) - if test.expectedError { - assert.Errorf(t, err, "Failed to test: %s", test.name) - } else { - if err != nil { - assert.NoErrorf(t, err, "Failed to test: %s", test.name) - } else { - assert.EqualValuesf(t, test.expectedResult, result, "Failed to test: %s", test.name) - } - } - } -} - -func TestDeleteIPAddress(t *testing.T) { - runner := fakeCommonRunner() - - tests := []struct { - name string - arguments []string - expectedError bool - }{ - {"Success", []string{"delete-ip-address"}, false}, - {"utilexec.ExitError exists, and status is not 0", []string{"delete-ip-address"}, true}, - {"utilexec.ExitError exists, and status is 0", []string{"delete-ip-address"}, false}, - {"other error exists", []string{"delete-ip-address"}, true}, - } - - for _, test := range tests { - err := runner.DeleteIPAddress(test.arguments) - if test.expectedError { - assert.Errorf(t, err, "Failed to test: %s", test.name) - } else { - assert.NoErrorf(t, err, "Failed to test: %s", test.name) - } - } -} - -func TestGetInterfaceToAddIP(t *testing.T) { - // backup env 'INTERFACE_TO_ADD_SERVICE_IP' - backupValue := os.Getenv("INTERFACE_TO_ADD_SERVICE_IP") - // recover env - defer os.Setenv("INTERFACE_TO_ADD_SERVICE_IP", backupValue) - - tests := []struct { - name string - envToBeSet string - expectedResult string - }{ - {"env_value_is_empty", "", "vEthernet (HNS Internal NIC)"}, - {"env_value_is_not_empty", "eth0", "eth0"}, - } - - fakeExec := fakeexec.FakeExec{ - CommandScript: []fakeexec.FakeCommandAction{}, - } - netsh := New(&fakeExec) - - for _, test := range tests { - os.Setenv("INTERFACE_TO_ADD_SERVICE_IP", test.envToBeSet) - result := netsh.GetInterfaceToAddIP() - - assert.EqualValuesf(t, test.expectedResult, result, "Failed to test: %s", test.name) - } -} - -func TestRestore(t *testing.T) { - runner := New(&fakeexec.FakeExec{ - CommandScript: []fakeexec.FakeCommandAction{}, - }) - - result := runner.Restore([]string{}) - assert.NoErrorf(t, result, "The return value must be nil") -} - -func TestCheckIPExists(t *testing.T) { - fakeCmd := fakeexec.FakeCmd{ - CombinedOutputScript: []fakeexec.FakeAction{ - // Error exists - func() ([]byte, []byte, error) { - return nil, nil, &fakeexec.FakeExitError{Status: 1} - }, - // IP address string is empty - func() ([]byte, []byte, error) { - return []byte(""), nil, nil - }, - // "IP Address:" field not exists - func() ([]byte, []byte, error) { - return []byte("10.10.10.10"), nil, nil - }, - // IP not exists - func() ([]byte, []byte, error) { - return []byte("IP Address:10.10.10.10"), nil, nil - }, - // IP exists - func() ([]byte, []byte, error) { - return []byte("IP Address:10.10.10.10\nIP Address:10.10.10.20"), nil, nil - }, - }, - } - fakeExec := fakeexec.FakeExec{ - CommandScript: []fakeexec.FakeCommandAction{ - func(cmd string, args ...string) exec.Cmd { - return fakeexec.InitFakeCmd(&fakeCmd, cmd, args...) - }, - func(cmd string, args ...string) exec.Cmd { - return fakeexec.InitFakeCmd(&fakeCmd, cmd, args...) - }, - func(cmd string, args ...string) exec.Cmd { - return fakeexec.InitFakeCmd(&fakeCmd, cmd, args...) - }, - func(cmd string, args ...string) exec.Cmd { - return fakeexec.InitFakeCmd(&fakeCmd, cmd, args...) - }, - func(cmd string, args ...string) exec.Cmd { - return fakeexec.InitFakeCmd(&fakeCmd, cmd, args...) - }, - }, - } - fakeRunner := &runner{ - exec: &fakeExec, - } - - tests := []struct { - name string - ipToCheck string - arguments []string - expectedError bool - expectedResult bool - }{ - {"Error exists", "10.10.10.20", []string{"check-IP-exists"}, true, false}, - {"IP address string is empty", "10.10.10.20", []string{"check-IP-exists"}, false, false}, - {"'IP Address:' field not exists", "10.10.10.20", []string{"check-IP-exists"}, false, false}, - {"IP not exists", "10.10.10.20", []string{"check-IP-exists"}, false, false}, - {"IP exists", "10.10.10.20", []string{"check-IP-exists"}, false, true}, - } - - for _, test := range tests { - result, err := checkIPExists(test.ipToCheck, test.arguments, fakeRunner) - if test.expectedError { - assert.Errorf(t, err, "Failed to test: %s", test.name) - } else { - assert.EqualValuesf(t, test.expectedResult, result, "Failed to test: %s", test.name) - } - } -} - -func TestGetIP(t *testing.T) { - testcases := []struct { - name string - showAddress string - expectAddress string - }{ - { - name: "IP address displayed in Chinese", - showAddress: "IP 地址: 10.96.0.2", - expectAddress: "10.96.0.2", - }, - { - name: "IP address displayed in English", - showAddress: "IP Address: 10.96.0.3", - expectAddress: "10.96.0.3", - }, - { - name: "IP address without spaces", - showAddress: "IP Address:10.96.0.4", - expectAddress: "10.96.0.4", - }, - { - name: "Only 'IP Address:' field exists", - showAddress: "IP Address:", - expectAddress: "", - }, - { - name: "IP address without ':' separator", - showAddress: "IP Address10.6.9.2", - expectAddress: "", - }, - } - - for _, tc := range testcases { - t.Run(tc.name, func(t *testing.T) { - address := getIP(tc.showAddress) - if address != tc.expectAddress { - t.Errorf("expected address=%q, got %q", tc.expectAddress, address) - } - }) - } -} diff --git a/pkg/util/netsh/testing/fake.go b/pkg/util/netsh/testing/fake.go deleted file mode 100644 index 19ca89888d7..00000000000 --- a/pkg/util/netsh/testing/fake.go +++ /dev/null @@ -1,71 +0,0 @@ -/* -Copyright 2016 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package testing - -import ( - "net" - - "k8s.io/kubernetes/pkg/util/netsh" -) - -// FakeNetsh is a no-op implementation of the netsh Interface -type FakeNetsh struct { -} - -// NewFake returns a fakenetsh no-op implementation of the netsh Interface -func NewFake() *FakeNetsh { - return &FakeNetsh{} -} - -// EnsurePortProxyRule function implementing the netsh interface and always returns true and nil without any error -func (*FakeNetsh) EnsurePortProxyRule(args []string) (bool, error) { - // Do Nothing - return true, nil -} - -// DeletePortProxyRule deletes the specified portproxy rule. If the rule did not exist, return error. -func (*FakeNetsh) DeletePortProxyRule(args []string) error { - // Do Nothing - return nil -} - -// EnsureIPAddress checks if the specified IP Address is added to vEthernet (HNSTransparent) interface, if not, add it. If the address existed, return true. -func (*FakeNetsh) EnsureIPAddress(args []string, ip net.IP) (bool, error) { - return true, nil -} - -// DeleteIPAddress checks if the specified IP address is present and, if so, deletes it. -func (*FakeNetsh) DeleteIPAddress(args []string) error { - // Do Nothing - return nil -} - -// Restore runs `netsh exec` to restore portproxy or addresses using a file. -// TODO Check if this is required, most likely not -func (*FakeNetsh) Restore(args []string) error { - // Do Nothing - return nil -} - -// GetInterfaceToAddIP returns the interface name where Service IP needs to be added -// IP Address needs to be added for netsh portproxy to redirect traffic -// Reads Environment variable INTERFACE_TO_ADD_SERVICE_IP, if it is not defined then "vEthernet (HNSTransparent)" is returned -func (*FakeNetsh) GetInterfaceToAddIP() string { - return "Interface 1" -} - -var _ = netsh.Interface(&FakeNetsh{}) diff --git a/staging/src/k8s.io/kube-proxy/config/v1alpha1/types.go b/staging/src/k8s.io/kube-proxy/config/v1alpha1/types.go index ba02859006a..72c3901b933 100644 --- a/staging/src/k8s.io/kube-proxy/config/v1alpha1/types.go +++ b/staging/src/k8s.io/kube-proxy/config/v1alpha1/types.go @@ -160,9 +160,6 @@ type KubeProxyConfiguration struct { // portRange is the range of host ports (beginPort-endPort, inclusive) that may be consumed // in order to proxy service traffic. If unspecified (0-0) then ports will be randomly chosen. PortRange string `json:"portRange"` - // udpIdleTimeout is how long an idle UDP connection will be kept open (e.g. '250ms', '2s'). - // Must be greater than 0. Only applicable for proxyMode=userspace. - UDPIdleTimeout metav1.Duration `json:"udpIdleTimeout"` // conntrack contains conntrack-related configuration options. Conntrack KubeProxyConntrackConfiguration `json:"conntrack"` // configSyncPeriod is how often configuration from the apiserver is refreshed. Must be greater @@ -188,19 +185,13 @@ type KubeProxyConfiguration struct { // ProxyMode represents modes used by the Kubernetes proxy server. // -// Currently, three modes of proxy are available in Linux platform: 'userspace' (older, going to be EOL), 'iptables' -// (newer, faster), 'ipvs'(newest, better in performance and scalability). +// Currently, two modes of proxy are available in Linux platform: 'iptables' and 'ipvs'. +// One mode of proxy is available in Windows platform: 'kernelspace'. // -// Two modes of proxy are available in Windows platform: 'userspace'(older, stable) and 'kernelspace' (newer, faster). -// -// In Linux platform, if proxy mode is blank, use the best-available proxy (currently iptables, but may change in the -// future). If the iptables proxy is selected, regardless of how, but the system's kernel or iptables versions are -// insufficient, this always falls back to the userspace proxy. IPVS mode will be enabled when proxy mode is set to 'ipvs', -// and the fall back path is firstly iptables and then userspace. -// -// In Windows platform, if proxy mode is blank, use the best-available proxy (currently userspace, but may change in the -// future). If winkernel proxy is selected, regardless of how, but the Windows kernel can't support this mode of proxy, -// this always falls back to the userspace proxy. +// If the proxy mode is unspecified, the best-available proxy mode will be used (currently this +// is `iptables` on Linux and `kernelspace` on Windows). If the selected proxy mode cannot be +// used (due to lack of kernel support, missing userspace components, etc) then kube-proxy +// will exit with an error. type ProxyMode string // LocalMode represents modes to detect local traffic from the node diff --git a/staging/src/k8s.io/kube-proxy/config/v1alpha1/zz_generated.deepcopy.go b/staging/src/k8s.io/kube-proxy/config/v1alpha1/zz_generated.deepcopy.go index c7c8b5f8e60..f5893982b1b 100644 --- a/staging/src/k8s.io/kube-proxy/config/v1alpha1/zz_generated.deepcopy.go +++ b/staging/src/k8s.io/kube-proxy/config/v1alpha1/zz_generated.deepcopy.go @@ -61,7 +61,6 @@ func (in *KubeProxyConfiguration) DeepCopyInto(out *KubeProxyConfiguration) { *out = new(int32) **out = **in } - out.UDPIdleTimeout = in.UDPIdleTimeout in.Conntrack.DeepCopyInto(&out.Conntrack) out.ConfigSyncPeriod = in.ConfigSyncPeriod if in.NodePortAddresses != nil { diff --git a/test/e2e/framework/.import-restrictions b/test/e2e/framework/.import-restrictions index d515c37f202..be280ae1d91 100644 --- a/test/e2e/framework/.import-restrictions +++ b/test/e2e/framework/.import-restrictions @@ -168,7 +168,6 @@ rules: - k8s.io/kubernetes/pkg/proxy/ipvs - k8s.io/kubernetes/pkg/proxy/metaproxier - k8s.io/kubernetes/pkg/proxy/metrics - - k8s.io/kubernetes/pkg/proxy/userspace - k8s.io/kubernetes/pkg/proxy/util - k8s.io/kubernetes/pkg/registry/core/service/allocator - k8s.io/kubernetes/pkg/registry/core/service/portallocator