From 8de0fc09aa4670f0e6ef1f17064aea2e8fa9f582 Mon Sep 17 00:00:00 2001 From: Dan Winship Date: Fri, 2 Feb 2024 09:18:27 -0500 Subject: [PATCH 1/5] Remove an unused type from kube-proxy config, move around some helpers --- pkg/proxy/apis/config/types.go | 56 ++++--------------- .../apis/config/zz_generated.deepcopy.go | 22 -------- 2 files changed, 11 insertions(+), 67 deletions(-) diff --git a/pkg/proxy/apis/config/types.go b/pkg/proxy/apis/config/types.go index cb4100dcc58..1ba5b711c67 100644 --- a/pkg/proxy/apis/config/types.go +++ b/pkg/proxy/apis/config/types.go @@ -17,10 +17,6 @@ limitations under the License. package config import ( - "fmt" - "sort" - "strings" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" componentbaseconfig "k8s.io/component-base/config" logsapi "k8s.io/component-base/logs/api/v1" @@ -265,17 +261,6 @@ const ( ProxyModeKernelspace ProxyMode = "kernelspace" ) -// LocalMode represents modes to detect local traffic from the node -type LocalMode string - -// Currently supported modes for LocalMode -const ( - LocalModeClusterCIDR LocalMode = "ClusterCIDR" - LocalModeNodeCIDR LocalMode = "NodeCIDR" - LocalModeBridgeInterface LocalMode = "BridgeInterface" - LocalModeInterfaceNamePrefix LocalMode = "InterfaceNamePrefix" -) - func (m *ProxyMode) Set(s string) error { *m = ProxyMode(s) return nil @@ -292,6 +277,17 @@ func (m *ProxyMode) Type() string { return "ProxyMode" } +// LocalMode represents modes to detect local traffic from the node +type LocalMode string + +// Currently supported modes for LocalMode +const ( + LocalModeClusterCIDR LocalMode = "ClusterCIDR" + LocalModeNodeCIDR LocalMode = "NodeCIDR" + LocalModeBridgeInterface LocalMode = "BridgeInterface" + LocalModeInterfaceNamePrefix LocalMode = "InterfaceNamePrefix" +) + func (m *LocalMode) Set(s string) error { *m = LocalMode(s) return nil @@ -307,33 +303,3 @@ func (m *LocalMode) String() string { func (m *LocalMode) Type() string { return "LocalMode" } - -type ConfigurationMap map[string]string - -func (m *ConfigurationMap) String() string { - pairs := []string{} - for k, v := range *m { - pairs = append(pairs, fmt.Sprintf("%s=%s", k, v)) - } - sort.Strings(pairs) - return strings.Join(pairs, ",") -} - -func (m *ConfigurationMap) Set(value string) error { - for _, s := range strings.Split(value, ",") { - if len(s) == 0 { - continue - } - arr := strings.SplitN(s, "=", 2) - if len(arr) == 2 { - (*m)[strings.TrimSpace(arr[0])] = strings.TrimSpace(arr[1]) - } else { - (*m)[strings.TrimSpace(arr[0])] = "" - } - } - return nil -} - -func (*ConfigurationMap) Type() string { - return "mapStringString" -} diff --git a/pkg/proxy/apis/config/zz_generated.deepcopy.go b/pkg/proxy/apis/config/zz_generated.deepcopy.go index 386cf2465f9..fb95d451c95 100644 --- a/pkg/proxy/apis/config/zz_generated.deepcopy.go +++ b/pkg/proxy/apis/config/zz_generated.deepcopy.go @@ -26,28 +26,6 @@ import ( runtime "k8s.io/apimachinery/pkg/runtime" ) -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in ConfigurationMap) DeepCopyInto(out *ConfigurationMap) { - { - in := &in - *out = make(ConfigurationMap, len(*in)) - for key, val := range *in { - (*out)[key] = val - } - return - } -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ConfigurationMap. -func (in ConfigurationMap) DeepCopy() ConfigurationMap { - if in == nil { - return nil - } - out := new(ConfigurationMap) - in.DeepCopyInto(out) - return *out -} - // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *DetectLocalConfiguration) DeepCopyInto(out *DetectLocalConfiguration) { *out = *in From 0b599aa8e33385a07a807dadf66ebf8cf7b77784 Mon Sep 17 00:00:00 2001 From: Dan Winship Date: Fri, 2 Feb 2024 09:43:39 -0500 Subject: [PATCH 2/5] Add `--nodeport-addresses primary` The behavior when you specify no --nodeport-addresses value in a dual-stack cluster is terrible and we can't fix it, for backward-compatibility reasons. Actually, the behavior when you specify no --nodeport-addresses value in a single-stack cluster isn't exactly awesome either... Allow specifying `--nodeport-addresses primary` to get the previously-nftables-backend-specific behavior of listening on only the node's primary IP or IPs. --- cmd/kube-proxy/app/server.go | 13 ++++++++++++- cmd/kube-proxy/app/server_linux.go | 4 ++++ pkg/generated/openapi/zz_generated.openapi.go | 2 +- pkg/proxy/apis/config/types.go | 12 +++++++++--- pkg/proxy/apis/config/validation/validation.go | 7 +++++++ pkg/proxy/apis/config/validation/validation_test.go | 9 +++++++++ pkg/proxy/nftables/proxier_test.go | 3 ++- .../src/k8s.io/kube-proxy/config/v1alpha1/types.go | 8 +++++--- 8 files changed, 49 insertions(+), 9 deletions(-) diff --git a/cmd/kube-proxy/app/server.go b/cmd/kube-proxy/app/server.go index 0857851d404..e2c84ca7af4 100644 --- a/cmd/kube-proxy/app/server.go +++ b/cmd/kube-proxy/app/server.go @@ -197,7 +197,7 @@ func (o *Options) AddFlags(fs *pflag.FlagSet) { "This parameter is ignored if a config file is specified by --config.") fs.StringSliceVar(&o.config.NodePortAddresses, "nodeport-addresses", o.config.NodePortAddresses, - "A list of CIDR ranges that contain valid node IPs. If set, connections to NodePort services will only be accepted on node IPs in one of the indicated ranges. If unset, NodePort connections will be accepted on all local IPs. This parameter is ignored if a config file is specified by --config.") + "A list of CIDR ranges that contain valid node IPs, or alternatively, the single string 'primary'. If set to a list of CIDRs, connections to NodePort services will only be accepted on node IPs in one of the indicated ranges. If set to 'primary', NodePort services will only be accepted on the node's primary IP(s) according to the Node object. If unset, NodePort connections will be accepted on all local IPs. This parameter is ignored if a config file is specified by --config.") fs.Int32Var(o.config.OOMScoreAdj, "oom-score-adj", ptr.Deref(o.config.OOMScoreAdj, int32(qos.KubeProxyOOMScoreAdj)), "The oom-score-adj value for kube-proxy process. Values must be within the range [-1000, 1000]. This parameter is ignored if a config file is specified by --config.") fs.Int32Var(o.config.Conntrack.MaxPerCore, "conntrack-max-per-core", *o.config.Conntrack.MaxPerCore, @@ -631,6 +631,17 @@ func newProxyServer(logger klog.Logger, config *kubeproxyconfig.KubeProxyConfigu rawNodeIPs := getNodeIPs(logger, s.Client, s.Hostname) s.PrimaryIPFamily, s.NodeIPs = detectNodeIPs(logger, rawNodeIPs, config.BindAddress) + if len(config.NodePortAddresses) == 1 && config.NodePortAddresses[0] == kubeproxyconfig.NodePortAddressesPrimary { + var nodePortAddresses []string + if nodeIP := s.NodeIPs[v1.IPv4Protocol]; nodeIP != nil && !nodeIP.IsLoopback() { + nodePortAddresses = append(nodePortAddresses, fmt.Sprintf("%s/32", nodeIP.String())) + } + if nodeIP := s.NodeIPs[v1.IPv6Protocol]; nodeIP != nil && !nodeIP.IsLoopback() { + nodePortAddresses = append(nodePortAddresses, fmt.Sprintf("%s/128", nodeIP.String())) + } + config.NodePortAddresses = nodePortAddresses + } + s.Broadcaster = events.NewBroadcaster(&events.EventSinkImpl{Interface: s.Client.EventsV1()}) s.Recorder = s.Broadcaster.NewRecorder(proxyconfigscheme.Scheme, "kube-proxy") diff --git a/cmd/kube-proxy/app/server_linux.go b/cmd/kube-proxy/app/server_linux.go index 75ca44a9b73..866195e1bf0 100644 --- a/cmd/kube-proxy/app/server_linux.go +++ b/cmd/kube-proxy/app/server_linux.go @@ -70,6 +70,10 @@ func (o *Options) platformApplyDefaults(config *proxyconfigapi.KubeProxyConfigur config.Mode = proxyconfigapi.ProxyModeIPTables } + if config.Mode == proxyconfigapi.ProxyModeNFTables && len(config.NodePortAddresses) == 0 { + config.NodePortAddresses = []string{proxyconfigapi.NodePortAddressesPrimary} + } + if config.DetectLocalMode == "" { o.logger.V(4).Info("Defaulting detect-local-mode", "localModeClusterCIDR", string(proxyconfigapi.LocalModeClusterCIDR)) config.DetectLocalMode = proxyconfigapi.LocalModeClusterCIDR diff --git a/pkg/generated/openapi/zz_generated.openapi.go b/pkg/generated/openapi/zz_generated.openapi.go index 1fc94fee217..8f6433c2844 100644 --- a/pkg/generated/openapi/zz_generated.openapi.go +++ b/pkg/generated/openapi/zz_generated.openapi.go @@ -59048,7 +59048,7 @@ func schema_k8sio_kube_proxy_config_v1alpha1_KubeProxyConfiguration(ref common.R }, "nodePortAddresses": { SchemaProps: spec.SchemaProps{ - Description: "nodePortAddresses is a list of CIDR ranges that contain valid node IPs. If set, connections to NodePort services will only be accepted on node IPs in one of the indicated ranges. If unset, NodePort connections will be accepted on all local IPs.", + Description: "nodePortAddresses is a list of CIDR ranges that contain valid node IPs, or alternatively, the single string 'primary'. If set to a list of CIDRs, connections to NodePort services will only be accepted on node IPs in one of the indicated ranges. If set to 'primary', NodePort services will only be accepted on the node's primary IPv4 and/or IPv6 address according to the Node object. If unset, NodePort connections will be accepted on all local IPs.", Type: []string{"array"}, Items: &spec.SchemaOrArray{ Schema: &spec.Schema{ diff --git a/pkg/proxy/apis/config/types.go b/pkg/proxy/apis/config/types.go index 1ba5b711c67..4e303936b8f 100644 --- a/pkg/proxy/apis/config/types.go +++ b/pkg/proxy/apis/config/types.go @@ -224,10 +224,12 @@ type KubeProxyConfiguration struct { // used.) ClusterCIDR string - // nodePortAddresses is a list of CIDR ranges that contain valid node IPs. If set, + // nodePortAddresses is a list of CIDR ranges that contain valid node IPs, or + // alternatively, the single string 'primary'. If set to a list of CIDRs, // connections to NodePort services will only be accepted on node IPs in one of - // the indicated ranges. If unset, NodePort connections will be accepted on all - // local IPs. + // the indicated ranges. If set to 'primary', NodePort services will only be + // accepted on the node's primary IPv4 and/or IPv6 address according to the Node + // object. If unset, NodePort connections will be accepted on all local IPs. NodePortAddresses []string // oomScoreAdj is the oom-score-adj value for kube-proxy process. Values must be within @@ -303,3 +305,7 @@ func (m *LocalMode) String() string { func (m *LocalMode) Type() string { return "LocalMode" } + +// NodePortAddressesPrimary is a special value for NodePortAddresses indicating that it +// should only use the primary node IPs. +const NodePortAddressesPrimary string = "primary" diff --git a/pkg/proxy/apis/config/validation/validation.go b/pkg/proxy/apis/config/validation/validation.go index 1b31f5450f2..fb596aa779c 100644 --- a/pkg/proxy/apis/config/validation/validation.go +++ b/pkg/proxy/apis/config/validation/validation.go @@ -297,6 +297,13 @@ func validateKubeProxyNodePortAddress(nodePortAddresses []string, fldPath *field allErrs := field.ErrorList{} for i := range nodePortAddresses { + if nodePortAddresses[i] == kubeproxyconfig.NodePortAddressesPrimary { + if i != 0 || len(nodePortAddresses) != 1 { + allErrs = append(allErrs, field.Invalid(fldPath.Index(i), nodePortAddresses[i], "can't use both 'primary' and CIDRs")) + } + break + } + if _, _, err := netutils.ParseCIDRSloppy(nodePortAddresses[i]); err != nil { allErrs = append(allErrs, field.Invalid(fldPath.Index(i), nodePortAddresses[i], "must be a valid CIDR")) } diff --git a/pkg/proxy/apis/config/validation/validation_test.go b/pkg/proxy/apis/config/validation/validation_test.go index d9cc7283aa9..f3e07e16278 100644 --- a/pkg/proxy/apis/config/validation/validation_test.go +++ b/pkg/proxy/apis/config/validation/validation_test.go @@ -975,6 +975,7 @@ func TestValidateKubeProxyNodePortAddress(t *testing.T) { {[]string{"10.20.0.0/16", "100.200.0.0/16"}}, {[]string{"10.0.0.0/8"}}, {[]string{"2001:db8::/32"}}, + {[]string{kubeproxyconfig.NodePortAddressesPrimary}}, } for _, successCase := range successCases { @@ -1012,6 +1013,14 @@ func TestValidateKubeProxyNodePortAddress(t *testing.T) { addresses: []string{"::1/128", "2001:db8::/32", "2001:db8:xyz/64"}, expectedErrs: field.ErrorList{field.Invalid(newPath.Child("NodePortAddresses[2]"), "2001:db8:xyz/64", "must be a valid CIDR")}, }, + "invalid primary/CIDR mix 1": { + addresses: []string{"primary", "127.0.0.1/32"}, + expectedErrs: field.ErrorList{field.Invalid(newPath.Child("NodePortAddresses[0]"), "primary", "can't use both 'primary' and CIDRs")}, + }, + "invalid primary/CIDR mix 2": { + addresses: []string{"127.0.0.1/32", "primary"}, + expectedErrs: field.ErrorList{field.Invalid(newPath.Child("NodePortAddresses[1]"), "primary", "can't use both 'primary' and CIDRs")}, + }, } for _, testCase := range testCases { diff --git a/pkg/proxy/nftables/proxier_test.go b/pkg/proxy/nftables/proxier_test.go index b5ad53dd2d7..1b67de4f75c 100644 --- a/pkg/proxy/nftables/proxier_test.go +++ b/pkg/proxy/nftables/proxier_test.go @@ -86,6 +86,7 @@ func NewFakeProxier(ipFamily v1.IPFamily) (*knftables.Fake, *Proxier) { serviceCIDRs = "fd00:10:96::/112" } detectLocal, _ := proxyutiliptables.NewDetectLocalByCIDR(podCIDR) + nodePortAddresses := []string{fmt.Sprintf("%s/32", testNodeIP), fmt.Sprintf("%s/128", testNodeIPv6)} networkInterfacer := proxyutiltest.NewFakeNetwork() itf := net.Interface{Index: 0, MTU: 0, Name: "lo", HardwareAddr: nil, Flags: 0} @@ -125,7 +126,7 @@ func NewFakeProxier(ipFamily v1.IPFamily) (*knftables.Fake, *Proxier) { hostname: testHostname, serviceHealthServer: healthcheck.NewFakeServiceHealthServer(), nodeIP: nodeIP, - nodePortAddresses: proxyutil.NewNodePortAddresses(ipFamily, nil, nodeIP), + nodePortAddresses: proxyutil.NewNodePortAddresses(ipFamily, nodePortAddresses, nodeIP), networkInterfacer: networkInterfacer, staleChains: make(map[string]time.Time), serviceCIDRs: serviceCIDRs, diff --git a/staging/src/k8s.io/kube-proxy/config/v1alpha1/types.go b/staging/src/k8s.io/kube-proxy/config/v1alpha1/types.go index e4382574de3..c9a19e39b73 100644 --- a/staging/src/k8s.io/kube-proxy/config/v1alpha1/types.go +++ b/staging/src/k8s.io/kube-proxy/config/v1alpha1/types.go @@ -224,10 +224,12 @@ type KubeProxyConfiguration struct { // used.) ClusterCIDR string `json:"clusterCIDR"` - // nodePortAddresses is a list of CIDR ranges that contain valid node IPs. If set, + // nodePortAddresses is a list of CIDR ranges that contain valid node IPs, or + // alternatively, the single string 'primary'. If set to a list of CIDRs, // connections to NodePort services will only be accepted on node IPs in one of - // the indicated ranges. If unset, NodePort connections will be accepted on all - // local IPs. + // the indicated ranges. If set to 'primary', NodePort services will only be + // accepted on the node's primary IPv4 and/or IPv6 address according to the Node + // object. If unset, NodePort connections will be accepted on all local IPs. NodePortAddresses []string `json:"nodePortAddresses"` // oomScoreAdj is the oom-score-adj value for kube-proxy process. Values must be within From fde1af55d2920c41a831066816c6c1c2dd1e3429 Mon Sep 17 00:00:00 2001 From: Dan Winship Date: Fri, 2 Feb 2024 09:43:39 -0500 Subject: [PATCH 3/5] Warn users with bad --nodeport-addresses If users don't pass any --nodeport-addresses, suggest they should pass `--nodeport-addresses primary`, to avoid accepting NodePort connections on all interfaces. If users pass a single-stack --nodeport-addresses in what looks like a dual-stack cluster, warn them that they probably ought to be passing a dual-stack --nodeport-addresses. --- cmd/kube-proxy/app/server.go | 57 +++++++++++--- cmd/kube-proxy/app/server_test.go | 127 ++++++++++++++++++------------ 2 files changed, 122 insertions(+), 62 deletions(-) diff --git a/cmd/kube-proxy/app/server.go b/cmd/kube-proxy/app/server.go index e2c84ca7af4..73489b3f18c 100644 --- a/cmd/kube-proxy/app/server.go +++ b/cmd/kube-proxy/app/server.go @@ -661,6 +661,11 @@ func newProxyServer(logger klog.Logger, config *kubeproxyconfig.KubeProxyConfigu return nil, err } + err = checkBadConfig(s) + if err != nil { + logger.Error(err, "Kube-proxy configuration may be incomplete or incorrect") + } + ipv4Supported, ipv6Supported, dualStackSupported, err := s.platformCheckSupported() if err != nil { return nil, err @@ -672,7 +677,7 @@ func newProxyServer(logger klog.Logger, config *kubeproxyconfig.KubeProxyConfigu logger.Info("kube-proxy running in single-stack mode", "ipFamily", s.PrimaryIPFamily) } - err, fatal := checkIPConfig(s, dualStackSupported) + err, fatal := checkBadIPConfig(s, dualStackSupported) if err != nil { if fatal { return nil, fmt.Errorf("kube-proxy configuration is incorrect: %v", err) @@ -688,8 +693,42 @@ func newProxyServer(logger klog.Logger, config *kubeproxyconfig.KubeProxyConfigu return s, nil } -// checkIPConfig confirms that s has proper configuration for its primary IP family. -func checkIPConfig(s *ProxyServer, dualStackSupported bool) (error, bool) { +// checkBadConfig checks for bad/deprecated configuation +func checkBadConfig(s *ProxyServer) error { + var errors []error + + // At this point we haven't seen any actual Services or EndpointSlices, so we + // don't really know if the cluster is expected to be single- or dual-stack. But + // we can at least take note of whether there is any explicitly-dual-stack + // configuration. + anyDualStackConfig := false + clusterCIDRs := strings.Split(s.Config.ClusterCIDR, ",") + for _, config := range [][]string{clusterCIDRs, s.Config.NodePortAddresses, s.Config.IPVS.ExcludeCIDRs, s.podCIDRs} { + if dual, _ := netutils.IsDualStackCIDRStrings(config); dual { + anyDualStackConfig = true + break + } + } + + // Warn if NodePortAddresses does not limit connections on all IP families that + // seem to be in use. + cidrsByFamily := proxyutil.MapCIDRsByIPFamily(s.Config.NodePortAddresses) + if len(s.Config.NodePortAddresses) == 0 { + errors = append(errors, fmt.Errorf("nodePortAddresses is unset; NodePort connections will be accepted on all local IPs. Consider using `--nodeport-addresses primary`")) + } else if anyDualStackConfig && len(cidrsByFamily[s.PrimaryIPFamily]) == len(s.Config.NodePortAddresses) { + errors = append(errors, fmt.Errorf("cluster appears to be dual-stack but nodePortAddresses contains only %s addresses; NodePort connections will be accepted on all local %s IPs", s.PrimaryIPFamily, proxyutil.OtherIPFamily(s.PrimaryIPFamily))) + } else if len(cidrsByFamily[s.PrimaryIPFamily]) == 0 { + errors = append(errors, fmt.Errorf("cluster appears to be %s-primary but nodePortAddresses contains only %s addresses; NodePort connections will be accepted on all local %s IPs", s.PrimaryIPFamily, proxyutil.OtherIPFamily(s.PrimaryIPFamily), s.PrimaryIPFamily)) + } + + return utilerrors.NewAggregate(errors) +} + +// checkBadIPConfig checks for bad configuration relative to s.PrimaryIPFamily. +// Historically, we did not check most of the config options, so we cannot retroactively +// make IP family mismatches in those options be fatal. When we add new options to check +// here, we should make problems with those options be fatal. +func checkBadIPConfig(s *ProxyServer, dualStackSupported bool) (err error, fatal bool) { var errors []error var badFamily netutils.IPFamily @@ -706,11 +745,6 @@ func checkIPConfig(s *ProxyServer, dualStackSupported bool) (error, bool) { clusterType = fmt.Sprintf("%s-only", s.PrimaryIPFamily) } - // Historically, we did not check most of the config options, so we cannot - // retroactively make IP family mismatches in those options be fatal. When we add - // new options to check here, we should make problems with those options be fatal. - fatal := false - if s.Config.ClusterCIDR != "" { clusterCIDRs := strings.Split(s.Config.ClusterCIDR, ",") if badCIDRs(clusterCIDRs, badFamily) { @@ -722,10 +756,6 @@ func checkIPConfig(s *ProxyServer, dualStackSupported bool) (error, bool) { } } - if badCIDRs(s.Config.NodePortAddresses, badFamily) { - errors = append(errors, fmt.Errorf("cluster is %s but nodePortAddresses contains only IPv%s addresses", clusterType, badFamily)) - } - if badCIDRs(s.podCIDRs, badFamily) { errors = append(errors, fmt.Errorf("cluster is %s but node.spec.podCIDRs contains only IPv%s addresses", clusterType, badFamily)) if s.Config.DetectLocalMode == kubeproxyconfig.LocalModeNodeCIDR { @@ -753,6 +783,9 @@ func checkIPConfig(s *ProxyServer, dualStackSupported bool) (error, bool) { } } + // Note that s.Config.NodePortAddresses gets checked as part of checkBadConfig() + // so it doesn't need to be checked here. + return utilerrors.NewAggregate(errors), fatal } diff --git a/cmd/kube-proxy/app/server_test.go b/cmd/kube-proxy/app/server_test.go index 37559f10c1c..b8963dca8c5 100644 --- a/cmd/kube-proxy/app/server_test.go +++ b/cmd/kube-proxy/app/server_test.go @@ -849,7 +849,81 @@ func Test_detectNodeIPs(t *testing.T) { } } -func Test_checkIPConfig(t *testing.T) { +func Test_checkBadConfig(t *testing.T) { + cases := []struct { + name string + proxy *ProxyServer + err bool + }{ + { + name: "single-stack NodePortAddresses with single-stack config", + proxy: &ProxyServer{ + Config: &kubeproxyconfig.KubeProxyConfiguration{ + ClusterCIDR: "10.0.0.0/8", + NodePortAddresses: []string{"192.168.0.0/24"}, + }, + PrimaryIPFamily: v1.IPv4Protocol, + }, + err: false, + }, + { + name: "dual-stack NodePortAddresses with dual-stack config", + proxy: &ProxyServer{ + Config: &kubeproxyconfig.KubeProxyConfiguration{ + ClusterCIDR: "10.0.0.0/8,fd09::/64", + NodePortAddresses: []string{"192.168.0.0/24", "fd03::/64"}, + }, + PrimaryIPFamily: v1.IPv4Protocol, + }, + err: false, + }, + { + name: "empty NodePortAddresses", + proxy: &ProxyServer{ + Config: &kubeproxyconfig.KubeProxyConfiguration{ + NodePortAddresses: []string{}, + }, + PrimaryIPFamily: v1.IPv4Protocol, + }, + err: true, + }, + { + name: "single-stack NodePortAddresses with dual-stack config", + proxy: &ProxyServer{ + Config: &kubeproxyconfig.KubeProxyConfiguration{ + ClusterCIDR: "10.0.0.0/8,fd09::/64", + NodePortAddresses: []string{"192.168.0.0/24"}, + }, + PrimaryIPFamily: v1.IPv4Protocol, + }, + err: true, + }, + { + name: "wrong-single-stack NodePortAddresses", + proxy: &ProxyServer{ + Config: &kubeproxyconfig.KubeProxyConfiguration{ + ClusterCIDR: "fd09::/64", + NodePortAddresses: []string{"192.168.0.0/24"}, + }, + PrimaryIPFamily: v1.IPv6Protocol, + }, + err: true, + }, + } + + for _, c := range cases { + t.Run(c.name, func(t *testing.T) { + err := checkBadConfig(c.proxy) + if err != nil && !c.err { + t.Errorf("unexpected error: %v", err) + } else if err == nil && c.err { + t.Errorf("unexpected lack of error") + } + }) + } +} + +func Test_checkBadIPConfig(t *testing.T) { cases := []struct { name string proxy *ProxyServer @@ -929,53 +1003,6 @@ func Test_checkIPConfig(t *testing.T) { dsFatal: false, }, - { - name: "ok single-stack nodePortAddresses", - proxy: &ProxyServer{ - Config: &kubeproxyconfig.KubeProxyConfiguration{ - NodePortAddresses: []string{"10.0.0.0/8", "192.168.0.0/24"}, - }, - PrimaryIPFamily: v1.IPv4Protocol, - }, - ssErr: false, - dsErr: false, - }, - { - name: "ok dual-stack nodePortAddresses", - proxy: &ProxyServer{ - Config: &kubeproxyconfig.KubeProxyConfiguration{ - NodePortAddresses: []string{"10.0.0.0/8", "fd01:2345::/64", "fd01:abcd::/64"}, - }, - PrimaryIPFamily: v1.IPv4Protocol, - }, - ssErr: false, - dsErr: false, - }, - { - name: "ok reversed dual-stack nodePortAddresses", - proxy: &ProxyServer{ - Config: &kubeproxyconfig.KubeProxyConfiguration{ - NodePortAddresses: []string{"fd01:2345::/64", "fd01:abcd::/64", "10.0.0.0/8"}, - }, - PrimaryIPFamily: v1.IPv4Protocol, - }, - ssErr: false, - dsErr: false, - }, - { - name: "wrong-family nodePortAddresses", - proxy: &ProxyServer{ - Config: &kubeproxyconfig.KubeProxyConfiguration{ - NodePortAddresses: []string{"10.0.0.0/8"}, - }, - PrimaryIPFamily: v1.IPv6Protocol, - }, - ssErr: true, - ssFatal: false, - dsErr: true, - dsFatal: false, - }, - { name: "ok single-stack node.spec.podCIDRs", proxy: &ProxyServer{ @@ -1133,7 +1160,7 @@ func Test_checkIPConfig(t *testing.T) { for _, c := range cases { t.Run(c.name, func(t *testing.T) { - err, fatal := checkIPConfig(c.proxy, false) + err, fatal := checkBadIPConfig(c.proxy, false) if err != nil && !c.ssErr { t.Errorf("unexpected error in single-stack case: %v", err) } else if err == nil && c.ssErr { @@ -1142,7 +1169,7 @@ func Test_checkIPConfig(t *testing.T) { t.Errorf("expected fatal=%v, got %v", c.ssFatal, fatal) } - err, fatal = checkIPConfig(c.proxy, true) + err, fatal = checkBadIPConfig(c.proxy, true) if err != nil && !c.dsErr { t.Errorf("unexpected error in dual-stack case: %v", err) } else if err == nil && c.dsErr { From 19b3a9e19436328eaaa247ead8afbdd501a08e85 Mon Sep 17 00:00:00 2001 From: Dan Winship Date: Fri, 2 Feb 2024 09:44:40 -0500 Subject: [PATCH 4/5] (Mostly) Revert "change --nodeport-addresses behavior to default to primary node ip only" This reverts commit 8bccf4873bd9fd612aa340c024c47dbf4cb03fa0, except for the nftables unit test changes, since we still want the "new" results (not to mention the bugfixes), just for a different reason now. --- pkg/proxy/healthcheck/healthcheck_test.go | 4 +- pkg/proxy/iptables/proxier.go | 2 +- pkg/proxy/iptables/proxier_test.go | 6 +-- pkg/proxy/ipvs/proxier.go | 2 +- pkg/proxy/ipvs/proxier_test.go | 10 ++-- pkg/proxy/nftables/proxier.go | 4 +- pkg/proxy/nftables/proxier_test.go | 4 +- pkg/proxy/util/nodeport_addresses.go | 28 ++++------- pkg/proxy/util/nodeport_addresses_test.go | 60 ++--------------------- pkg/proxy/util/utils.go | 3 +- pkg/proxy/winkernel/proxier.go | 2 +- 11 files changed, 33 insertions(+), 92 deletions(-) diff --git a/pkg/proxy/healthcheck/healthcheck_test.go b/pkg/proxy/healthcheck/healthcheck_test.go index fc82049dcfe..c1c451ef8c2 100644 --- a/pkg/proxy/healthcheck/healthcheck_test.go +++ b/pkg/proxy/healthcheck/healthcheck_test.go @@ -150,7 +150,7 @@ func (fake fakeProxierHealthChecker) IsHealthy() bool { func TestServer(t *testing.T) { listener := newFakeListener() httpFactory := newFakeHTTPServerFactory() - nodePortAddresses := proxyutil.NewNodePortAddresses(v1.IPv4Protocol, []string{}, nil) + nodePortAddresses := proxyutil.NewNodePortAddresses(v1.IPv4Protocol, []string{}) proxyChecker := &fakeProxierHealthChecker{true} hcsi := newServiceHealthServer("hostname", nil, listener, httpFactory, nodePortAddresses, proxyChecker) @@ -664,7 +664,7 @@ func TestServerWithSelectiveListeningAddress(t *testing.T) { // limiting addresses to loop back. We don't want any cleverness here around getting IP for // machine nor testing ipv6 || ipv4. using loop back guarantees the test will work on any machine - nodePortAddresses := proxyutil.NewNodePortAddresses(v1.IPv4Protocol, []string{"127.0.0.0/8"}, nil) + nodePortAddresses := proxyutil.NewNodePortAddresses(v1.IPv4Protocol, []string{"127.0.0.0/8"}) hcsi := newServiceHealthServer("hostname", nil, listener, httpFactory, nodePortAddresses, proxyChecker) hcs := hcsi.(*server) diff --git a/pkg/proxy/iptables/proxier.go b/pkg/proxy/iptables/proxier.go index d0c427d5e41..2771801b69c 100644 --- a/pkg/proxy/iptables/proxier.go +++ b/pkg/proxy/iptables/proxier.go @@ -232,7 +232,7 @@ func NewProxier(ipFamily v1.IPFamily, nodePortAddressStrings []string, initOnly bool, ) (*Proxier, error) { - nodePortAddresses := proxyutil.NewNodePortAddresses(ipFamily, nodePortAddressStrings, nil) + nodePortAddresses := proxyutil.NewNodePortAddresses(ipFamily, nodePortAddressStrings) if !nodePortAddresses.ContainsIPv4Loopback() { localhostNodePorts = false diff --git a/pkg/proxy/iptables/proxier_test.go b/pkg/proxy/iptables/proxier_test.go index 45f75be1ae4..cb434c1263c 100644 --- a/pkg/proxy/iptables/proxier_test.go +++ b/pkg/proxy/iptables/proxier_test.go @@ -133,7 +133,7 @@ func NewFakeProxier(ipt utiliptables.Interface) *Proxier { natRules: proxyutil.NewLineBuffer(), nodeIP: netutils.ParseIPSloppy(testNodeIP), localhostNodePorts: true, - nodePortAddresses: proxyutil.NewNodePortAddresses(ipfamily, nil, nil), + nodePortAddresses: proxyutil.NewNodePortAddresses(ipfamily, nil), networkInterfacer: networkInterfacer, } p.setInitialized(true) @@ -2342,7 +2342,7 @@ func TestNodePorts(t *testing.T) { fp := NewFakeProxier(ipt) fp.localhostNodePorts = tc.localhostNodePorts if tc.nodePortAddresses != nil { - fp.nodePortAddresses = proxyutil.NewNodePortAddresses(tc.family, tc.nodePortAddresses, nil) + fp.nodePortAddresses = proxyutil.NewNodePortAddresses(tc.family, tc.nodePortAddresses) } makeServiceMap(fp, @@ -2490,7 +2490,7 @@ func TestNodePorts(t *testing.T) { func TestHealthCheckNodePort(t *testing.T) { ipt := iptablestest.NewFake() fp := NewFakeProxier(ipt) - fp.nodePortAddresses = proxyutil.NewNodePortAddresses(v1.IPv4Protocol, []string{"127.0.0.0/8"}, nil) + fp.nodePortAddresses = proxyutil.NewNodePortAddresses(v1.IPv4Protocol, []string{"127.0.0.0/8"}) svcIP := "172.30.0.42" svcPort := 80 diff --git a/pkg/proxy/ipvs/proxier.go b/pkg/proxy/ipvs/proxier.go index 5c492eb7b66..daf28fee6be 100644 --- a/pkg/proxy/ipvs/proxier.go +++ b/pkg/proxy/ipvs/proxier.go @@ -359,7 +359,7 @@ func NewProxier(ipFamily v1.IPFamily, scheduler = defaultScheduler } - nodePortAddresses := proxyutil.NewNodePortAddresses(ipFamily, nodePortAddressStrings, nil) + nodePortAddresses := proxyutil.NewNodePortAddresses(ipFamily, nodePortAddressStrings) serviceHealthServer := healthcheck.NewServiceHealthServer(hostname, recorder, nodePortAddresses, healthzServer) diff --git a/pkg/proxy/ipvs/proxier_test.go b/pkg/proxy/ipvs/proxier_test.go index 55cc280a831..33af6d72b61 100644 --- a/pkg/proxy/ipvs/proxier_test.go +++ b/pkg/proxy/ipvs/proxier_test.go @@ -158,7 +158,7 @@ func NewFakeProxier(ipt utiliptables.Interface, ipvs utilipvs.Interface, ipset u filterRules: proxyutil.NewLineBuffer(), netlinkHandle: netlinkHandle, ipsetList: ipsetList, - nodePortAddresses: proxyutil.NewNodePortAddresses(ipFamily, nil, nil), + nodePortAddresses: proxyutil.NewNodePortAddresses(ipFamily, nil), networkInterfacer: proxyutiltest.NewFakeNetwork(), gracefuldeleteManager: NewGracefulTerminationManager(ipvs), ipFamily: ipFamily, @@ -945,7 +945,7 @@ func TestNodePortIPv4(t *testing.T) { ipvs := ipvstest.NewFake() ipset := ipsettest.NewFake(testIPSetVersion) fp := NewFakeProxier(ipt, ipvs, ipset, test.nodeIPs, nil, v1.IPv4Protocol) - fp.nodePortAddresses = proxyutil.NewNodePortAddresses(v1.IPv4Protocol, test.nodePortAddresses, nil) + fp.nodePortAddresses = proxyutil.NewNodePortAddresses(v1.IPv4Protocol, test.nodePortAddresses) makeServiceMap(fp, test.services...) populateEndpointSlices(fp, test.endpoints...) @@ -1287,7 +1287,7 @@ func TestNodePortIPv6(t *testing.T) { ipvs := ipvstest.NewFake() ipset := ipsettest.NewFake(testIPSetVersion) fp := NewFakeProxier(ipt, ipvs, ipset, test.nodeIPs, nil, v1.IPv6Protocol) - fp.nodePortAddresses = proxyutil.NewNodePortAddresses(v1.IPv6Protocol, test.nodePortAddresses, nil) + fp.nodePortAddresses = proxyutil.NewNodePortAddresses(v1.IPv6Protocol, test.nodePortAddresses) makeServiceMap(fp, test.services...) populateEndpointSlices(fp, test.endpoints...) @@ -2040,7 +2040,7 @@ func TestOnlyLocalNodePorts(t *testing.T) { addrs1 := []net.Addr{&net.IPNet{IP: netutils.ParseIPSloppy("2001:db8::"), Mask: net.CIDRMask(64, 128)}} fp.networkInterfacer.(*proxyutiltest.FakeNetwork).AddInterfaceAddr(&itf, addrs) fp.networkInterfacer.(*proxyutiltest.FakeNetwork).AddInterfaceAddr(&itf1, addrs1) - fp.nodePortAddresses = proxyutil.NewNodePortAddresses(v1.IPv4Protocol, []string{"100.101.102.0/24"}, nil) + fp.nodePortAddresses = proxyutil.NewNodePortAddresses(v1.IPv4Protocol, []string{"100.101.102.0/24"}) fp.syncProxyRules() @@ -2128,7 +2128,7 @@ func TestHealthCheckNodePort(t *testing.T) { addrs1 := []net.Addr{&net.IPNet{IP: netutils.ParseIPSloppy("2001:db8::"), Mask: net.CIDRMask(64, 128)}} fp.networkInterfacer.(*proxyutiltest.FakeNetwork).AddInterfaceAddr(&itf, addrs) fp.networkInterfacer.(*proxyutiltest.FakeNetwork).AddInterfaceAddr(&itf1, addrs1) - fp.nodePortAddresses = proxyutil.NewNodePortAddresses(v1.IPv4Protocol, []string{"100.101.102.0/24"}, nil) + fp.nodePortAddresses = proxyutil.NewNodePortAddresses(v1.IPv4Protocol, []string{"100.101.102.0/24"}) fp.syncProxyRules() diff --git a/pkg/proxy/nftables/proxier.go b/pkg/proxy/nftables/proxier.go index 3e54e7ee4c9..490ea19d3ba 100644 --- a/pkg/proxy/nftables/proxier.go +++ b/pkg/proxy/nftables/proxier.go @@ -211,8 +211,6 @@ func NewProxier(ipFamily v1.IPFamily, nodePortAddressStrings []string, initOnly bool, ) (*Proxier, error) { - nodePortAddresses := proxyutil.NewNodePortAddresses(ipFamily, nodePortAddressStrings, nodeIP) - if initOnly { klog.InfoS("System initialized and --init-only specified") return nil, nil @@ -223,6 +221,8 @@ func NewProxier(ipFamily v1.IPFamily, masqueradeMark := fmt.Sprintf("%#08x", masqueradeValue) klog.V(2).InfoS("Using nftables mark for masquerade", "ipFamily", ipFamily, "mark", masqueradeMark) + nodePortAddresses := proxyutil.NewNodePortAddresses(ipFamily, nodePortAddressStrings) + serviceHealthServer := healthcheck.NewServiceHealthServer(hostname, recorder, nodePortAddresses, healthzServer) var nftablesFamily knftables.Family diff --git a/pkg/proxy/nftables/proxier_test.go b/pkg/proxy/nftables/proxier_test.go index 1b67de4f75c..fcf67d05afc 100644 --- a/pkg/proxy/nftables/proxier_test.go +++ b/pkg/proxy/nftables/proxier_test.go @@ -126,7 +126,7 @@ func NewFakeProxier(ipFamily v1.IPFamily) (*knftables.Fake, *Proxier) { hostname: testHostname, serviceHealthServer: healthcheck.NewFakeServiceHealthServer(), nodeIP: nodeIP, - nodePortAddresses: proxyutil.NewNodePortAddresses(ipFamily, nodePortAddresses, nodeIP), + nodePortAddresses: proxyutil.NewNodePortAddresses(ipFamily, nodePortAddresses), networkInterfacer: networkInterfacer, staleChains: make(map[string]time.Time), serviceCIDRs: serviceCIDRs, @@ -952,7 +952,7 @@ func TestNodePorts(t *testing.T) { nodeIP = testNodeIPv6 } if tc.nodePortAddresses != nil { - fp.nodePortAddresses = proxyutil.NewNodePortAddresses(tc.family, tc.nodePortAddresses, netutils.ParseIPSloppy(nodeIP)) + fp.nodePortAddresses = proxyutil.NewNodePortAddresses(tc.family, tc.nodePortAddresses) } makeServiceMap(fp, diff --git a/pkg/proxy/util/nodeport_addresses.go b/pkg/proxy/util/nodeport_addresses.go index ab11d08917b..c5332a07958 100644 --- a/pkg/proxy/util/nodeport_addresses.go +++ b/pkg/proxy/util/nodeport_addresses.go @@ -20,7 +20,7 @@ import ( "fmt" "net" - v1 "k8s.io/api/core/v1" + "k8s.io/api/core/v1" netutils "k8s.io/utils/net" ) @@ -37,12 +37,11 @@ type NodePortAddresses struct { var ipv4LoopbackStart = net.IPv4(127, 0, 0, 0) // NewNodePortAddresses takes an IP family and the `--nodeport-addresses` value (which is -// assumed to contain only valid CIDRs, potentially of both IP families) and the primary IP -// (which will be used as node port address when `--nodeport-addresses` is empty). -// It will return a NodePortAddresses object for the given family. If there are no CIDRs of -// the given family then the CIDR "0.0.0.0/0" or "::/0" will be added (even if there are -// CIDRs of the other family). -func NewNodePortAddresses(family v1.IPFamily, cidrStrings []string, primaryIP net.IP) *NodePortAddresses { +// assumed to contain only valid CIDRs, potentially of both IP families) and returns a +// NodePortAddresses object for the given family. If there are no CIDRs of the given +// family then the CIDR "0.0.0.0/0" or "::/0" will be added (even if there are CIDRs of +// the other family). +func NewNodePortAddresses(family v1.IPFamily, cidrStrings []string) *NodePortAddresses { npa := &NodePortAddresses{} // Filter CIDRs to correct family @@ -52,24 +51,17 @@ func NewNodePortAddresses(family v1.IPFamily, cidrStrings []string, primaryIP ne } } if len(npa.cidrStrings) == 0 { - if primaryIP == nil { - if family == v1.IPv4Protocol { - npa.cidrStrings = []string{IPv4ZeroCIDR} - } else { - npa.cidrStrings = []string{IPv6ZeroCIDR} - } + if family == v1.IPv4Protocol { + npa.cidrStrings = []string{IPv4ZeroCIDR} } else { - if family == v1.IPv4Protocol { - npa.cidrStrings = []string{fmt.Sprintf("%s/32", primaryIP.String())} - } else { - npa.cidrStrings = []string{fmt.Sprintf("%s/128", primaryIP.String())} - } + npa.cidrStrings = []string{IPv6ZeroCIDR} } } // Now parse for _, str := range npa.cidrStrings { _, cidr, _ := netutils.ParseCIDRSloppy(str) + if netutils.IsIPv4CIDR(cidr) { if cidr.IP.IsLoopback() || cidr.Contains(ipv4LoopbackStart) { npa.containsIPv4Loopback = true diff --git a/pkg/proxy/util/nodeport_addresses_test.go b/pkg/proxy/util/nodeport_addresses_test.go index fad92ef6a54..c66db1b024f 100644 --- a/pkg/proxy/util/nodeport_addresses_test.go +++ b/pkg/proxy/util/nodeport_addresses_test.go @@ -21,7 +21,7 @@ import ( "net" "testing" - v1 "k8s.io/api/core/v1" + "k8s.io/api/core/v1" "k8s.io/apimachinery/pkg/util/sets" fake "k8s.io/kubernetes/pkg/proxy/util/testing" netutils "k8s.io/utils/net" @@ -60,8 +60,6 @@ func TestGetNodeIPs(t *testing.T) { cidrs []string itfAddrsPairs []InterfaceAddrsPair expected map[v1.IPFamily]expectation - // nodeIP will take effect when `--nodeport-addresses` is empty - nodeIP net.IP }{ { name: "IPv4 single", @@ -371,53 +369,6 @@ func TestGetNodeIPs(t *testing.T) { }, }, }, - { - name: "ipv4 with nodeIP", - itfAddrsPairs: []InterfaceAddrsPair{ - { - itf: net.Interface{Index: 0, MTU: 0, Name: "eth0", HardwareAddr: nil, Flags: 0}, - addrs: []net.Addr{ - &net.IPNet{IP: netutils.ParseIPSloppy("1.2.3.4"), Mask: net.CIDRMask(30, 32)}, - }, - }, - { - itf: net.Interface{Index: 1, MTU: 0, Name: "lo", HardwareAddr: nil, Flags: 0}, - addrs: []net.Addr{ - &net.IPNet{IP: netutils.ParseIPSloppy("127.0.0.1"), Mask: net.CIDRMask(8, 32)}, - }, - }, - }, - expected: map[v1.IPFamily]expectation{ - v1.IPv4Protocol: { - ips: sets.New[string]("1.2.3.4"), - }, - }, - nodeIP: netutils.ParseIPSloppy("1.2.3.4"), - }, - { - name: "ipv6 with nodeIP", - itfAddrsPairs: []InterfaceAddrsPair{ - { - itf: net.Interface{Index: 0, MTU: 0, Name: "eth0", HardwareAddr: nil, Flags: 0}, - addrs: []net.Addr{ - &net.IPNet{IP: netutils.ParseIPSloppy("2001:db8::1"), Mask: net.CIDRMask(64, 128)}, - }, - }, - { - itf: net.Interface{Index: 1, MTU: 0, Name: "lo", HardwareAddr: nil, Flags: 0}, - addrs: []net.Addr{ - &net.IPNet{IP: netutils.ParseIPSloppy("::1"), Mask: net.CIDRMask(128, 128)}, - }, - }, - }, - expected: map[v1.IPFamily]expectation{ - v1.IPv6Protocol: { - matchAll: true, - ips: sets.New[string]("2001:db8::1", "::1"), - }, - }, - nodeIP: netutils.ParseIPSloppy("1.2.3.4"), - }, } for _, tc := range testCases { @@ -428,10 +379,7 @@ func TestGetNodeIPs(t *testing.T) { } for _, family := range []v1.IPFamily{v1.IPv4Protocol, v1.IPv6Protocol} { - if tc.nodeIP != nil && v1.IPFamily(fmt.Sprintf("IPv%s", netutils.IPFamilyOf(tc.nodeIP))) != family { - continue - } - npa := NewNodePortAddresses(family, tc.cidrs, tc.nodeIP) + npa := NewNodePortAddresses(family, tc.cidrs) if npa.MatchAll() != tc.expected[family].matchAll { t.Errorf("unexpected MatchAll(%s), expected: %v", family, tc.expected[family].matchAll) @@ -503,12 +451,12 @@ func TestContainsIPv4Loopback(t *testing.T) { } for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { - npa := NewNodePortAddresses(v1.IPv4Protocol, tt.cidrStrings, nil) + npa := NewNodePortAddresses(v1.IPv4Protocol, tt.cidrStrings) if got := npa.ContainsIPv4Loopback(); got != tt.want { t.Errorf("IPv4 ContainsIPv4Loopback() = %v, want %v", got, tt.want) } // ContainsIPv4Loopback should always be false for family=IPv6 - npa = NewNodePortAddresses(v1.IPv6Protocol, tt.cidrStrings, nil) + npa = NewNodePortAddresses(v1.IPv6Protocol, tt.cidrStrings) if got := npa.ContainsIPv4Loopback(); got { t.Errorf("IPv6 ContainsIPv4Loopback() = %v, want %v", got, false) } diff --git a/pkg/proxy/util/utils.go b/pkg/proxy/util/utils.go index da232fef982..260894461d5 100644 --- a/pkg/proxy/util/utils.go +++ b/pkg/proxy/util/utils.go @@ -29,10 +29,11 @@ import ( utilfeature "k8s.io/apiserver/pkg/util/feature" "k8s.io/client-go/tools/events" utilsysctl "k8s.io/component-helpers/node/util/sysctl" - "k8s.io/klog/v2" helper "k8s.io/kubernetes/pkg/apis/core/v1/helper" "k8s.io/kubernetes/pkg/features" netutils "k8s.io/utils/net" + + "k8s.io/klog/v2" ) const ( diff --git a/pkg/proxy/winkernel/proxier.go b/pkg/proxy/winkernel/proxier.go index a09a693f5ff..a1f9bade5f2 100644 --- a/pkg/proxy/winkernel/proxier.go +++ b/pkg/proxy/winkernel/proxier.go @@ -666,7 +666,7 @@ func NewProxier( } // windows listens to all node addresses - nodePortAddresses := proxyutil.NewNodePortAddresses(ipFamily, nil, nil) + nodePortAddresses := proxyutil.NewNodePortAddresses(ipFamily, nil) serviceHealthServer := healthcheck.NewServiceHealthServer(hostname, recorder, nodePortAddresses, healthzServer) var healthzPort int From 3ecd9332767bafc7e57704f5d30e6f2736e21358 Mon Sep 17 00:00:00 2001 From: Dan Winship Date: Fri, 2 Feb 2024 16:05:50 -0500 Subject: [PATCH 5/5] fix/simplify an nftables unit test The nodeport-ips value is part of the baseline, which wouldn't change no matter what Services or EndpointSlices we added/removed. --- pkg/proxy/nftables/proxier_test.go | 11 +---------- 1 file changed, 1 insertion(+), 10 deletions(-) diff --git a/pkg/proxy/nftables/proxier_test.go b/pkg/proxy/nftables/proxier_test.go index fcf67d05afc..a4754cb2154 100644 --- a/pkg/proxy/nftables/proxier_test.go +++ b/pkg/proxy/nftables/proxier_test.go @@ -3986,6 +3986,7 @@ func TestSyncProxyRulesRepeated(t *testing.T) { add rule ip kube-proxy services ip daddr @nodeport-ips meta l4proto . th dport vmap @service-nodeports add set ip kube-proxy cluster-ips { type ipv4_addr ; comment "Active ClusterIPs" ; } add set ip kube-proxy nodeport-ips { type ipv4_addr ; comment "IPs that accept NodePort traffic" ; } + add element ip kube-proxy nodeport-ips { 192.168.0.2 } add rule ip kube-proxy service-endpoints-check ip daddr . meta l4proto . th dport vmap @no-endpoint-services add map ip kube-proxy firewall-ips { type ipv4_addr . inet_proto . inet_service : verdict ; comment "destinations that are subject to LoadBalancerSourceRanges" ; } @@ -4058,7 +4059,6 @@ func TestSyncProxyRulesRepeated(t *testing.T) { expected := baseRules + dedent.Dedent(` add element ip kube-proxy cluster-ips { 172.30.0.41 } add element ip kube-proxy cluster-ips { 172.30.0.42 } - add element ip kube-proxy nodeport-ips { 192.168.0.2 } add element ip kube-proxy service-ips { 172.30.0.41 . tcp . 80 : goto service-ULMVA6XW-ns1/svc1/tcp/p80 } add element ip kube-proxy service-ips { 172.30.0.42 . tcp . 8080 : goto service-MHHHYRWA-ns2/svc2/tcp/p8080 } @@ -4111,7 +4111,6 @@ func TestSyncProxyRulesRepeated(t *testing.T) { add element ip kube-proxy cluster-ips { 172.30.0.41 } add element ip kube-proxy cluster-ips { 172.30.0.42 } add element ip kube-proxy cluster-ips { 172.30.0.43 } - add element ip kube-proxy nodeport-ips { 192.168.0.2 } add element ip kube-proxy service-ips { 172.30.0.41 . tcp . 80 : goto service-ULMVA6XW-ns1/svc1/tcp/p80 } add element ip kube-proxy service-ips { 172.30.0.42 . tcp . 8080 : goto service-MHHHYRWA-ns2/svc2/tcp/p8080 } add element ip kube-proxy service-ips { 172.30.0.43 . tcp . 80 : goto service-4AT6LBPK-ns3/svc3/tcp/p80 } @@ -4145,7 +4144,6 @@ func TestSyncProxyRulesRepeated(t *testing.T) { expected = baseRules + dedent.Dedent(` add element ip kube-proxy cluster-ips { 172.30.0.41 } add element ip kube-proxy cluster-ips { 172.30.0.43 } - add element ip kube-proxy nodeport-ips { 192.168.0.2 } add element ip kube-proxy service-ips { 172.30.0.41 . tcp . 80 : goto service-ULMVA6XW-ns1/svc1/tcp/p80 } add element ip kube-proxy service-ips { 172.30.0.43 . tcp . 80 : goto service-4AT6LBPK-ns3/svc3/tcp/p80 } @@ -4174,7 +4172,6 @@ func TestSyncProxyRulesRepeated(t *testing.T) { expected = baseRules + dedent.Dedent(` add element ip kube-proxy cluster-ips { 172.30.0.41 } add element ip kube-proxy cluster-ips { 172.30.0.43 } - add element ip kube-proxy nodeport-ips { 192.168.0.2 } add element ip kube-proxy service-ips { 172.30.0.41 . tcp . 80 : goto service-ULMVA6XW-ns1/svc1/tcp/p80 } add element ip kube-proxy service-ips { 172.30.0.43 . tcp . 80 : goto service-4AT6LBPK-ns3/svc3/tcp/p80 } @@ -4211,7 +4208,6 @@ func TestSyncProxyRulesRepeated(t *testing.T) { add element ip kube-proxy cluster-ips { 172.30.0.41 } add element ip kube-proxy cluster-ips { 172.30.0.43 } add element ip kube-proxy cluster-ips { 172.30.0.44 } - add element ip kube-proxy nodeport-ips { 192.168.0.2 } add element ip kube-proxy service-ips { 172.30.0.41 . tcp . 80 : goto service-ULMVA6XW-ns1/svc1/tcp/p80 } add element ip kube-proxy service-ips { 172.30.0.43 . tcp . 80 : goto service-4AT6LBPK-ns3/svc3/tcp/p80 } @@ -4251,7 +4247,6 @@ func TestSyncProxyRulesRepeated(t *testing.T) { add element ip kube-proxy cluster-ips { 172.30.0.41 } add element ip kube-proxy cluster-ips { 172.30.0.43 } add element ip kube-proxy cluster-ips { 172.30.0.44 } - add element ip kube-proxy nodeport-ips { 192.168.0.2 } add element ip kube-proxy service-ips { 172.30.0.41 . tcp . 80 : goto service-ULMVA6XW-ns1/svc1/tcp/p80 } add element ip kube-proxy service-ips { 172.30.0.43 . tcp . 80 : goto service-4AT6LBPK-ns3/svc3/tcp/p80 } add element ip kube-proxy service-ips { 172.30.0.44 . tcp . 80 : goto service-LAUZTJTB-ns4/svc4/tcp/p80 } @@ -4290,7 +4285,6 @@ func TestSyncProxyRulesRepeated(t *testing.T) { add element ip kube-proxy cluster-ips { 172.30.0.41 } add element ip kube-proxy cluster-ips { 172.30.0.43 } add element ip kube-proxy cluster-ips { 172.30.0.44 } - add element ip kube-proxy nodeport-ips { 192.168.0.2 } add element ip kube-proxy service-ips { 172.30.0.41 . tcp . 80 : goto service-ULMVA6XW-ns1/svc1/tcp/p80 } add element ip kube-proxy service-ips { 172.30.0.43 . tcp . 80 : goto service-4AT6LBPK-ns3/svc3/tcp/p80 } add element ip kube-proxy service-ips { 172.30.0.44 . tcp . 80 : goto service-LAUZTJTB-ns4/svc4/tcp/p80 } @@ -4332,7 +4326,6 @@ func TestSyncProxyRulesRepeated(t *testing.T) { add element ip kube-proxy cluster-ips { 172.30.0.41 } add element ip kube-proxy cluster-ips { 172.30.0.43 } add element ip kube-proxy cluster-ips { 172.30.0.44 } - add element ip kube-proxy nodeport-ips { 192.168.0.2 } add element ip kube-proxy service-ips { 172.30.0.41 . tcp . 80 : goto service-ULMVA6XW-ns1/svc1/tcp/p80 } add element ip kube-proxy service-ips { 172.30.0.43 . tcp . 80 : goto service-4AT6LBPK-ns3/svc3/tcp/p80 } add element ip kube-proxy service-ips { 172.30.0.44 . tcp . 80 : goto service-LAUZTJTB-ns4/svc4/tcp/p80 } @@ -4372,7 +4365,6 @@ func TestSyncProxyRulesRepeated(t *testing.T) { add element ip kube-proxy cluster-ips { 172.30.0.41 } add element ip kube-proxy cluster-ips { 172.30.0.43 } add element ip kube-proxy cluster-ips { 172.30.0.44 } - add element ip kube-proxy nodeport-ips { 192.168.0.2 } add element ip kube-proxy service-ips { 172.30.0.41 . tcp . 80 : goto service-ULMVA6XW-ns1/svc1/tcp/p80 } add element ip kube-proxy no-endpoint-services { 172.30.0.43 . tcp . 80 comment "ns3/svc3:p80" : goto reject-chain } add element ip kube-proxy service-ips { 172.30.0.44 . tcp . 80 : goto service-LAUZTJTB-ns4/svc4/tcp/p80 } @@ -4408,7 +4400,6 @@ func TestSyncProxyRulesRepeated(t *testing.T) { add element ip kube-proxy cluster-ips { 172.30.0.41 } add element ip kube-proxy cluster-ips { 172.30.0.43 } add element ip kube-proxy cluster-ips { 172.30.0.44 } - add element ip kube-proxy nodeport-ips { 192.168.0.2 } add element ip kube-proxy service-ips { 172.30.0.41 . tcp . 80 : goto service-ULMVA6XW-ns1/svc1/tcp/p80 } add element ip kube-proxy service-ips { 172.30.0.43 . tcp . 80 : goto service-4AT6LBPK-ns3/svc3/tcp/p80 } add element ip kube-proxy service-ips { 172.30.0.44 . tcp . 80 : goto service-LAUZTJTB-ns4/svc4/tcp/p80 }