Merge pull request #123105 from danwinship/nodeport-addresses-primary

Add `--nodeport-addresses primary`, warn on empty `--nodeport-addresses`
This commit is contained in:
Kubernetes Prow Robot 2024-04-18 08:49:21 -07:00 committed by GitHub
commit b2c32fb015
No known key found for this signature in database
GPG Key ID: B5690EEEBB952194
20 changed files with 214 additions and 238 deletions

View File

@ -197,7 +197,7 @@ func (o *Options) AddFlags(fs *pflag.FlagSet) {
"This parameter is ignored if a config file is specified by --config.")
fs.StringSliceVar(&o.config.NodePortAddresses, "nodeport-addresses", o.config.NodePortAddresses,
"A list of CIDR ranges that contain valid node IPs. If set, connections to NodePort services will only be accepted on node IPs in one of the indicated ranges. If unset, NodePort connections will be accepted on all local IPs. This parameter is ignored if a config file is specified by --config.")
"A list of CIDR ranges that contain valid node IPs, or alternatively, the single string 'primary'. If set to a list of CIDRs, connections to NodePort services will only be accepted on node IPs in one of the indicated ranges. If set to 'primary', NodePort services will only be accepted on the node's primary IP(s) according to the Node object. If unset, NodePort connections will be accepted on all local IPs. This parameter is ignored if a config file is specified by --config.")
fs.Int32Var(o.config.OOMScoreAdj, "oom-score-adj", ptr.Deref(o.config.OOMScoreAdj, int32(qos.KubeProxyOOMScoreAdj)), "The oom-score-adj value for kube-proxy process. Values must be within the range [-1000, 1000]. This parameter is ignored if a config file is specified by --config.")
fs.Int32Var(o.config.Conntrack.MaxPerCore, "conntrack-max-per-core", *o.config.Conntrack.MaxPerCore,
@ -631,6 +631,17 @@ func newProxyServer(logger klog.Logger, config *kubeproxyconfig.KubeProxyConfigu
rawNodeIPs := getNodeIPs(logger, s.Client, s.Hostname)
s.PrimaryIPFamily, s.NodeIPs = detectNodeIPs(logger, rawNodeIPs, config.BindAddress)
if len(config.NodePortAddresses) == 1 && config.NodePortAddresses[0] == kubeproxyconfig.NodePortAddressesPrimary {
var nodePortAddresses []string
if nodeIP := s.NodeIPs[v1.IPv4Protocol]; nodeIP != nil && !nodeIP.IsLoopback() {
nodePortAddresses = append(nodePortAddresses, fmt.Sprintf("%s/32", nodeIP.String()))
}
if nodeIP := s.NodeIPs[v1.IPv6Protocol]; nodeIP != nil && !nodeIP.IsLoopback() {
nodePortAddresses = append(nodePortAddresses, fmt.Sprintf("%s/128", nodeIP.String()))
}
config.NodePortAddresses = nodePortAddresses
}
s.Broadcaster = events.NewBroadcaster(&events.EventSinkImpl{Interface: s.Client.EventsV1()})
s.Recorder = s.Broadcaster.NewRecorder(proxyconfigscheme.Scheme, "kube-proxy")
@ -650,6 +661,11 @@ func newProxyServer(logger klog.Logger, config *kubeproxyconfig.KubeProxyConfigu
return nil, err
}
err = checkBadConfig(s)
if err != nil {
logger.Error(err, "Kube-proxy configuration may be incomplete or incorrect")
}
ipv4Supported, ipv6Supported, dualStackSupported, err := s.platformCheckSupported()
if err != nil {
return nil, err
@ -661,7 +677,7 @@ func newProxyServer(logger klog.Logger, config *kubeproxyconfig.KubeProxyConfigu
logger.Info("kube-proxy running in single-stack mode", "ipFamily", s.PrimaryIPFamily)
}
err, fatal := checkIPConfig(s, dualStackSupported)
err, fatal := checkBadIPConfig(s, dualStackSupported)
if err != nil {
if fatal {
return nil, fmt.Errorf("kube-proxy configuration is incorrect: %v", err)
@ -677,8 +693,42 @@ func newProxyServer(logger klog.Logger, config *kubeproxyconfig.KubeProxyConfigu
return s, nil
}
// checkIPConfig confirms that s has proper configuration for its primary IP family.
func checkIPConfig(s *ProxyServer, dualStackSupported bool) (error, bool) {
// checkBadConfig checks for bad/deprecated configuation
func checkBadConfig(s *ProxyServer) error {
var errors []error
// At this point we haven't seen any actual Services or EndpointSlices, so we
// don't really know if the cluster is expected to be single- or dual-stack. But
// we can at least take note of whether there is any explicitly-dual-stack
// configuration.
anyDualStackConfig := false
clusterCIDRs := strings.Split(s.Config.ClusterCIDR, ",")
for _, config := range [][]string{clusterCIDRs, s.Config.NodePortAddresses, s.Config.IPVS.ExcludeCIDRs, s.podCIDRs} {
if dual, _ := netutils.IsDualStackCIDRStrings(config); dual {
anyDualStackConfig = true
break
}
}
// Warn if NodePortAddresses does not limit connections on all IP families that
// seem to be in use.
cidrsByFamily := proxyutil.MapCIDRsByIPFamily(s.Config.NodePortAddresses)
if len(s.Config.NodePortAddresses) == 0 {
errors = append(errors, fmt.Errorf("nodePortAddresses is unset; NodePort connections will be accepted on all local IPs. Consider using `--nodeport-addresses primary`"))
} else if anyDualStackConfig && len(cidrsByFamily[s.PrimaryIPFamily]) == len(s.Config.NodePortAddresses) {
errors = append(errors, fmt.Errorf("cluster appears to be dual-stack but nodePortAddresses contains only %s addresses; NodePort connections will be accepted on all local %s IPs", s.PrimaryIPFamily, proxyutil.OtherIPFamily(s.PrimaryIPFamily)))
} else if len(cidrsByFamily[s.PrimaryIPFamily]) == 0 {
errors = append(errors, fmt.Errorf("cluster appears to be %s-primary but nodePortAddresses contains only %s addresses; NodePort connections will be accepted on all local %s IPs", s.PrimaryIPFamily, proxyutil.OtherIPFamily(s.PrimaryIPFamily), s.PrimaryIPFamily))
}
return utilerrors.NewAggregate(errors)
}
// checkBadIPConfig checks for bad configuration relative to s.PrimaryIPFamily.
// Historically, we did not check most of the config options, so we cannot retroactively
// make IP family mismatches in those options be fatal. When we add new options to check
// here, we should make problems with those options be fatal.
func checkBadIPConfig(s *ProxyServer, dualStackSupported bool) (err error, fatal bool) {
var errors []error
var badFamily netutils.IPFamily
@ -695,11 +745,6 @@ func checkIPConfig(s *ProxyServer, dualStackSupported bool) (error, bool) {
clusterType = fmt.Sprintf("%s-only", s.PrimaryIPFamily)
}
// Historically, we did not check most of the config options, so we cannot
// retroactively make IP family mismatches in those options be fatal. When we add
// new options to check here, we should make problems with those options be fatal.
fatal := false
if s.Config.ClusterCIDR != "" {
clusterCIDRs := strings.Split(s.Config.ClusterCIDR, ",")
if badCIDRs(clusterCIDRs, badFamily) {
@ -711,10 +756,6 @@ func checkIPConfig(s *ProxyServer, dualStackSupported bool) (error, bool) {
}
}
if badCIDRs(s.Config.NodePortAddresses, badFamily) {
errors = append(errors, fmt.Errorf("cluster is %s but nodePortAddresses contains only IPv%s addresses", clusterType, badFamily))
}
if badCIDRs(s.podCIDRs, badFamily) {
errors = append(errors, fmt.Errorf("cluster is %s but node.spec.podCIDRs contains only IPv%s addresses", clusterType, badFamily))
if s.Config.DetectLocalMode == kubeproxyconfig.LocalModeNodeCIDR {
@ -742,6 +783,9 @@ func checkIPConfig(s *ProxyServer, dualStackSupported bool) (error, bool) {
}
}
// Note that s.Config.NodePortAddresses gets checked as part of checkBadConfig()
// so it doesn't need to be checked here.
return utilerrors.NewAggregate(errors), fatal
}

View File

@ -70,6 +70,10 @@ func (o *Options) platformApplyDefaults(config *proxyconfigapi.KubeProxyConfigur
config.Mode = proxyconfigapi.ProxyModeIPTables
}
if config.Mode == proxyconfigapi.ProxyModeNFTables && len(config.NodePortAddresses) == 0 {
config.NodePortAddresses = []string{proxyconfigapi.NodePortAddressesPrimary}
}
if config.DetectLocalMode == "" {
o.logger.V(4).Info("Defaulting detect-local-mode", "localModeClusterCIDR", string(proxyconfigapi.LocalModeClusterCIDR))
config.DetectLocalMode = proxyconfigapi.LocalModeClusterCIDR

View File

@ -849,7 +849,81 @@ func Test_detectNodeIPs(t *testing.T) {
}
}
func Test_checkIPConfig(t *testing.T) {
func Test_checkBadConfig(t *testing.T) {
cases := []struct {
name string
proxy *ProxyServer
err bool
}{
{
name: "single-stack NodePortAddresses with single-stack config",
proxy: &ProxyServer{
Config: &kubeproxyconfig.KubeProxyConfiguration{
ClusterCIDR: "10.0.0.0/8",
NodePortAddresses: []string{"192.168.0.0/24"},
},
PrimaryIPFamily: v1.IPv4Protocol,
},
err: false,
},
{
name: "dual-stack NodePortAddresses with dual-stack config",
proxy: &ProxyServer{
Config: &kubeproxyconfig.KubeProxyConfiguration{
ClusterCIDR: "10.0.0.0/8,fd09::/64",
NodePortAddresses: []string{"192.168.0.0/24", "fd03::/64"},
},
PrimaryIPFamily: v1.IPv4Protocol,
},
err: false,
},
{
name: "empty NodePortAddresses",
proxy: &ProxyServer{
Config: &kubeproxyconfig.KubeProxyConfiguration{
NodePortAddresses: []string{},
},
PrimaryIPFamily: v1.IPv4Protocol,
},
err: true,
},
{
name: "single-stack NodePortAddresses with dual-stack config",
proxy: &ProxyServer{
Config: &kubeproxyconfig.KubeProxyConfiguration{
ClusterCIDR: "10.0.0.0/8,fd09::/64",
NodePortAddresses: []string{"192.168.0.0/24"},
},
PrimaryIPFamily: v1.IPv4Protocol,
},
err: true,
},
{
name: "wrong-single-stack NodePortAddresses",
proxy: &ProxyServer{
Config: &kubeproxyconfig.KubeProxyConfiguration{
ClusterCIDR: "fd09::/64",
NodePortAddresses: []string{"192.168.0.0/24"},
},
PrimaryIPFamily: v1.IPv6Protocol,
},
err: true,
},
}
for _, c := range cases {
t.Run(c.name, func(t *testing.T) {
err := checkBadConfig(c.proxy)
if err != nil && !c.err {
t.Errorf("unexpected error: %v", err)
} else if err == nil && c.err {
t.Errorf("unexpected lack of error")
}
})
}
}
func Test_checkBadIPConfig(t *testing.T) {
cases := []struct {
name string
proxy *ProxyServer
@ -929,53 +1003,6 @@ func Test_checkIPConfig(t *testing.T) {
dsFatal: false,
},
{
name: "ok single-stack nodePortAddresses",
proxy: &ProxyServer{
Config: &kubeproxyconfig.KubeProxyConfiguration{
NodePortAddresses: []string{"10.0.0.0/8", "192.168.0.0/24"},
},
PrimaryIPFamily: v1.IPv4Protocol,
},
ssErr: false,
dsErr: false,
},
{
name: "ok dual-stack nodePortAddresses",
proxy: &ProxyServer{
Config: &kubeproxyconfig.KubeProxyConfiguration{
NodePortAddresses: []string{"10.0.0.0/8", "fd01:2345::/64", "fd01:abcd::/64"},
},
PrimaryIPFamily: v1.IPv4Protocol,
},
ssErr: false,
dsErr: false,
},
{
name: "ok reversed dual-stack nodePortAddresses",
proxy: &ProxyServer{
Config: &kubeproxyconfig.KubeProxyConfiguration{
NodePortAddresses: []string{"fd01:2345::/64", "fd01:abcd::/64", "10.0.0.0/8"},
},
PrimaryIPFamily: v1.IPv4Protocol,
},
ssErr: false,
dsErr: false,
},
{
name: "wrong-family nodePortAddresses",
proxy: &ProxyServer{
Config: &kubeproxyconfig.KubeProxyConfiguration{
NodePortAddresses: []string{"10.0.0.0/8"},
},
PrimaryIPFamily: v1.IPv6Protocol,
},
ssErr: true,
ssFatal: false,
dsErr: true,
dsFatal: false,
},
{
name: "ok single-stack node.spec.podCIDRs",
proxy: &ProxyServer{
@ -1133,7 +1160,7 @@ func Test_checkIPConfig(t *testing.T) {
for _, c := range cases {
t.Run(c.name, func(t *testing.T) {
err, fatal := checkIPConfig(c.proxy, false)
err, fatal := checkBadIPConfig(c.proxy, false)
if err != nil && !c.ssErr {
t.Errorf("unexpected error in single-stack case: %v", err)
} else if err == nil && c.ssErr {
@ -1142,7 +1169,7 @@ func Test_checkIPConfig(t *testing.T) {
t.Errorf("expected fatal=%v, got %v", c.ssFatal, fatal)
}
err, fatal = checkIPConfig(c.proxy, true)
err, fatal = checkBadIPConfig(c.proxy, true)
if err != nil && !c.dsErr {
t.Errorf("unexpected error in dual-stack case: %v", err)
} else if err == nil && c.dsErr {

View File

@ -59048,7 +59048,7 @@ func schema_k8sio_kube_proxy_config_v1alpha1_KubeProxyConfiguration(ref common.R
},
"nodePortAddresses": {
SchemaProps: spec.SchemaProps{
Description: "nodePortAddresses is a list of CIDR ranges that contain valid node IPs. If set, connections to NodePort services will only be accepted on node IPs in one of the indicated ranges. If unset, NodePort connections will be accepted on all local IPs.",
Description: "nodePortAddresses is a list of CIDR ranges that contain valid node IPs, or alternatively, the single string 'primary'. If set to a list of CIDRs, connections to NodePort services will only be accepted on node IPs in one of the indicated ranges. If set to 'primary', NodePort services will only be accepted on the node's primary IPv4 and/or IPv6 address according to the Node object. If unset, NodePort connections will be accepted on all local IPs.",
Type: []string{"array"},
Items: &spec.SchemaOrArray{
Schema: &spec.Schema{

View File

@ -17,10 +17,6 @@ limitations under the License.
package config
import (
"fmt"
"sort"
"strings"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
componentbaseconfig "k8s.io/component-base/config"
logsapi "k8s.io/component-base/logs/api/v1"
@ -228,10 +224,12 @@ type KubeProxyConfiguration struct {
// used.)
ClusterCIDR string
// nodePortAddresses is a list of CIDR ranges that contain valid node IPs. If set,
// nodePortAddresses is a list of CIDR ranges that contain valid node IPs, or
// alternatively, the single string 'primary'. If set to a list of CIDRs,
// connections to NodePort services will only be accepted on node IPs in one of
// the indicated ranges. If unset, NodePort connections will be accepted on all
// local IPs.
// the indicated ranges. If set to 'primary', NodePort services will only be
// accepted on the node's primary IPv4 and/or IPv6 address according to the Node
// object. If unset, NodePort connections will be accepted on all local IPs.
NodePortAddresses []string
// oomScoreAdj is the oom-score-adj value for kube-proxy process. Values must be within
@ -265,17 +263,6 @@ const (
ProxyModeKernelspace ProxyMode = "kernelspace"
)
// LocalMode represents modes to detect local traffic from the node
type LocalMode string
// Currently supported modes for LocalMode
const (
LocalModeClusterCIDR LocalMode = "ClusterCIDR"
LocalModeNodeCIDR LocalMode = "NodeCIDR"
LocalModeBridgeInterface LocalMode = "BridgeInterface"
LocalModeInterfaceNamePrefix LocalMode = "InterfaceNamePrefix"
)
func (m *ProxyMode) Set(s string) error {
*m = ProxyMode(s)
return nil
@ -292,6 +279,17 @@ func (m *ProxyMode) Type() string {
return "ProxyMode"
}
// LocalMode represents modes to detect local traffic from the node
type LocalMode string
// Currently supported modes for LocalMode
const (
LocalModeClusterCIDR LocalMode = "ClusterCIDR"
LocalModeNodeCIDR LocalMode = "NodeCIDR"
LocalModeBridgeInterface LocalMode = "BridgeInterface"
LocalModeInterfaceNamePrefix LocalMode = "InterfaceNamePrefix"
)
func (m *LocalMode) Set(s string) error {
*m = LocalMode(s)
return nil
@ -308,32 +306,6 @@ func (m *LocalMode) Type() string {
return "LocalMode"
}
type ConfigurationMap map[string]string
func (m *ConfigurationMap) String() string {
pairs := []string{}
for k, v := range *m {
pairs = append(pairs, fmt.Sprintf("%s=%s", k, v))
}
sort.Strings(pairs)
return strings.Join(pairs, ",")
}
func (m *ConfigurationMap) Set(value string) error {
for _, s := range strings.Split(value, ",") {
if len(s) == 0 {
continue
}
arr := strings.SplitN(s, "=", 2)
if len(arr) == 2 {
(*m)[strings.TrimSpace(arr[0])] = strings.TrimSpace(arr[1])
} else {
(*m)[strings.TrimSpace(arr[0])] = ""
}
}
return nil
}
func (*ConfigurationMap) Type() string {
return "mapStringString"
}
// NodePortAddressesPrimary is a special value for NodePortAddresses indicating that it
// should only use the primary node IPs.
const NodePortAddressesPrimary string = "primary"

View File

@ -297,6 +297,13 @@ func validateKubeProxyNodePortAddress(nodePortAddresses []string, fldPath *field
allErrs := field.ErrorList{}
for i := range nodePortAddresses {
if nodePortAddresses[i] == kubeproxyconfig.NodePortAddressesPrimary {
if i != 0 || len(nodePortAddresses) != 1 {
allErrs = append(allErrs, field.Invalid(fldPath.Index(i), nodePortAddresses[i], "can't use both 'primary' and CIDRs"))
}
break
}
if _, _, err := netutils.ParseCIDRSloppy(nodePortAddresses[i]); err != nil {
allErrs = append(allErrs, field.Invalid(fldPath.Index(i), nodePortAddresses[i], "must be a valid CIDR"))
}

View File

@ -975,6 +975,7 @@ func TestValidateKubeProxyNodePortAddress(t *testing.T) {
{[]string{"10.20.0.0/16", "100.200.0.0/16"}},
{[]string{"10.0.0.0/8"}},
{[]string{"2001:db8::/32"}},
{[]string{kubeproxyconfig.NodePortAddressesPrimary}},
}
for _, successCase := range successCases {
@ -1012,6 +1013,14 @@ func TestValidateKubeProxyNodePortAddress(t *testing.T) {
addresses: []string{"::1/128", "2001:db8::/32", "2001:db8:xyz/64"},
expectedErrs: field.ErrorList{field.Invalid(newPath.Child("NodePortAddresses[2]"), "2001:db8:xyz/64", "must be a valid CIDR")},
},
"invalid primary/CIDR mix 1": {
addresses: []string{"primary", "127.0.0.1/32"},
expectedErrs: field.ErrorList{field.Invalid(newPath.Child("NodePortAddresses[0]"), "primary", "can't use both 'primary' and CIDRs")},
},
"invalid primary/CIDR mix 2": {
addresses: []string{"127.0.0.1/32", "primary"},
expectedErrs: field.ErrorList{field.Invalid(newPath.Child("NodePortAddresses[1]"), "primary", "can't use both 'primary' and CIDRs")},
},
}
for _, testCase := range testCases {

View File

@ -26,28 +26,6 @@ import (
runtime "k8s.io/apimachinery/pkg/runtime"
)
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in ConfigurationMap) DeepCopyInto(out *ConfigurationMap) {
{
in := &in
*out = make(ConfigurationMap, len(*in))
for key, val := range *in {
(*out)[key] = val
}
return
}
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ConfigurationMap.
func (in ConfigurationMap) DeepCopy() ConfigurationMap {
if in == nil {
return nil
}
out := new(ConfigurationMap)
in.DeepCopyInto(out)
return *out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *DetectLocalConfiguration) DeepCopyInto(out *DetectLocalConfiguration) {
*out = *in

View File

@ -150,7 +150,7 @@ func (fake fakeProxierHealthChecker) IsHealthy() bool {
func TestServer(t *testing.T) {
listener := newFakeListener()
httpFactory := newFakeHTTPServerFactory()
nodePortAddresses := proxyutil.NewNodePortAddresses(v1.IPv4Protocol, []string{}, nil)
nodePortAddresses := proxyutil.NewNodePortAddresses(v1.IPv4Protocol, []string{})
proxyChecker := &fakeProxierHealthChecker{true}
hcsi := newServiceHealthServer("hostname", nil, listener, httpFactory, nodePortAddresses, proxyChecker)
@ -664,7 +664,7 @@ func TestServerWithSelectiveListeningAddress(t *testing.T) {
// limiting addresses to loop back. We don't want any cleverness here around getting IP for
// machine nor testing ipv6 || ipv4. using loop back guarantees the test will work on any machine
nodePortAddresses := proxyutil.NewNodePortAddresses(v1.IPv4Protocol, []string{"127.0.0.0/8"}, nil)
nodePortAddresses := proxyutil.NewNodePortAddresses(v1.IPv4Protocol, []string{"127.0.0.0/8"})
hcsi := newServiceHealthServer("hostname", nil, listener, httpFactory, nodePortAddresses, proxyChecker)
hcs := hcsi.(*server)

View File

@ -232,7 +232,7 @@ func NewProxier(ipFamily v1.IPFamily,
nodePortAddressStrings []string,
initOnly bool,
) (*Proxier, error) {
nodePortAddresses := proxyutil.NewNodePortAddresses(ipFamily, nodePortAddressStrings, nil)
nodePortAddresses := proxyutil.NewNodePortAddresses(ipFamily, nodePortAddressStrings)
if !nodePortAddresses.ContainsIPv4Loopback() {
localhostNodePorts = false

View File

@ -133,7 +133,7 @@ func NewFakeProxier(ipt utiliptables.Interface) *Proxier {
natRules: proxyutil.NewLineBuffer(),
nodeIP: netutils.ParseIPSloppy(testNodeIP),
localhostNodePorts: true,
nodePortAddresses: proxyutil.NewNodePortAddresses(ipfamily, nil, nil),
nodePortAddresses: proxyutil.NewNodePortAddresses(ipfamily, nil),
networkInterfacer: networkInterfacer,
}
p.setInitialized(true)
@ -2342,7 +2342,7 @@ func TestNodePorts(t *testing.T) {
fp := NewFakeProxier(ipt)
fp.localhostNodePorts = tc.localhostNodePorts
if tc.nodePortAddresses != nil {
fp.nodePortAddresses = proxyutil.NewNodePortAddresses(tc.family, tc.nodePortAddresses, nil)
fp.nodePortAddresses = proxyutil.NewNodePortAddresses(tc.family, tc.nodePortAddresses)
}
makeServiceMap(fp,
@ -2490,7 +2490,7 @@ func TestNodePorts(t *testing.T) {
func TestHealthCheckNodePort(t *testing.T) {
ipt := iptablestest.NewFake()
fp := NewFakeProxier(ipt)
fp.nodePortAddresses = proxyutil.NewNodePortAddresses(v1.IPv4Protocol, []string{"127.0.0.0/8"}, nil)
fp.nodePortAddresses = proxyutil.NewNodePortAddresses(v1.IPv4Protocol, []string{"127.0.0.0/8"})
svcIP := "172.30.0.42"
svcPort := 80

View File

@ -359,7 +359,7 @@ func NewProxier(ipFamily v1.IPFamily,
scheduler = defaultScheduler
}
nodePortAddresses := proxyutil.NewNodePortAddresses(ipFamily, nodePortAddressStrings, nil)
nodePortAddresses := proxyutil.NewNodePortAddresses(ipFamily, nodePortAddressStrings)
serviceHealthServer := healthcheck.NewServiceHealthServer(hostname, recorder, nodePortAddresses, healthzServer)

View File

@ -158,7 +158,7 @@ func NewFakeProxier(ipt utiliptables.Interface, ipvs utilipvs.Interface, ipset u
filterRules: proxyutil.NewLineBuffer(),
netlinkHandle: netlinkHandle,
ipsetList: ipsetList,
nodePortAddresses: proxyutil.NewNodePortAddresses(ipFamily, nil, nil),
nodePortAddresses: proxyutil.NewNodePortAddresses(ipFamily, nil),
networkInterfacer: proxyutiltest.NewFakeNetwork(),
gracefuldeleteManager: NewGracefulTerminationManager(ipvs),
ipFamily: ipFamily,
@ -945,7 +945,7 @@ func TestNodePortIPv4(t *testing.T) {
ipvs := ipvstest.NewFake()
ipset := ipsettest.NewFake(testIPSetVersion)
fp := NewFakeProxier(ipt, ipvs, ipset, test.nodeIPs, nil, v1.IPv4Protocol)
fp.nodePortAddresses = proxyutil.NewNodePortAddresses(v1.IPv4Protocol, test.nodePortAddresses, nil)
fp.nodePortAddresses = proxyutil.NewNodePortAddresses(v1.IPv4Protocol, test.nodePortAddresses)
makeServiceMap(fp, test.services...)
populateEndpointSlices(fp, test.endpoints...)
@ -1287,7 +1287,7 @@ func TestNodePortIPv6(t *testing.T) {
ipvs := ipvstest.NewFake()
ipset := ipsettest.NewFake(testIPSetVersion)
fp := NewFakeProxier(ipt, ipvs, ipset, test.nodeIPs, nil, v1.IPv6Protocol)
fp.nodePortAddresses = proxyutil.NewNodePortAddresses(v1.IPv6Protocol, test.nodePortAddresses, nil)
fp.nodePortAddresses = proxyutil.NewNodePortAddresses(v1.IPv6Protocol, test.nodePortAddresses)
makeServiceMap(fp, test.services...)
populateEndpointSlices(fp, test.endpoints...)
@ -2040,7 +2040,7 @@ func TestOnlyLocalNodePorts(t *testing.T) {
addrs1 := []net.Addr{&net.IPNet{IP: netutils.ParseIPSloppy("2001:db8::"), Mask: net.CIDRMask(64, 128)}}
fp.networkInterfacer.(*proxyutiltest.FakeNetwork).AddInterfaceAddr(&itf, addrs)
fp.networkInterfacer.(*proxyutiltest.FakeNetwork).AddInterfaceAddr(&itf1, addrs1)
fp.nodePortAddresses = proxyutil.NewNodePortAddresses(v1.IPv4Protocol, []string{"100.101.102.0/24"}, nil)
fp.nodePortAddresses = proxyutil.NewNodePortAddresses(v1.IPv4Protocol, []string{"100.101.102.0/24"})
fp.syncProxyRules()
@ -2128,7 +2128,7 @@ func TestHealthCheckNodePort(t *testing.T) {
addrs1 := []net.Addr{&net.IPNet{IP: netutils.ParseIPSloppy("2001:db8::"), Mask: net.CIDRMask(64, 128)}}
fp.networkInterfacer.(*proxyutiltest.FakeNetwork).AddInterfaceAddr(&itf, addrs)
fp.networkInterfacer.(*proxyutiltest.FakeNetwork).AddInterfaceAddr(&itf1, addrs1)
fp.nodePortAddresses = proxyutil.NewNodePortAddresses(v1.IPv4Protocol, []string{"100.101.102.0/24"}, nil)
fp.nodePortAddresses = proxyutil.NewNodePortAddresses(v1.IPv4Protocol, []string{"100.101.102.0/24"})
fp.syncProxyRules()

View File

@ -211,8 +211,6 @@ func NewProxier(ipFamily v1.IPFamily,
nodePortAddressStrings []string,
initOnly bool,
) (*Proxier, error) {
nodePortAddresses := proxyutil.NewNodePortAddresses(ipFamily, nodePortAddressStrings, nodeIP)
if initOnly {
klog.InfoS("System initialized and --init-only specified")
return nil, nil
@ -223,6 +221,8 @@ func NewProxier(ipFamily v1.IPFamily,
masqueradeMark := fmt.Sprintf("%#08x", masqueradeValue)
klog.V(2).InfoS("Using nftables mark for masquerade", "ipFamily", ipFamily, "mark", masqueradeMark)
nodePortAddresses := proxyutil.NewNodePortAddresses(ipFamily, nodePortAddressStrings)
serviceHealthServer := healthcheck.NewServiceHealthServer(hostname, recorder, nodePortAddresses, healthzServer)
var nftablesFamily knftables.Family

View File

@ -86,6 +86,7 @@ func NewFakeProxier(ipFamily v1.IPFamily) (*knftables.Fake, *Proxier) {
serviceCIDRs = "fd00:10:96::/112"
}
detectLocal, _ := proxyutiliptables.NewDetectLocalByCIDR(podCIDR)
nodePortAddresses := []string{fmt.Sprintf("%s/32", testNodeIP), fmt.Sprintf("%s/128", testNodeIPv6)}
networkInterfacer := proxyutiltest.NewFakeNetwork()
itf := net.Interface{Index: 0, MTU: 0, Name: "lo", HardwareAddr: nil, Flags: 0}
@ -125,7 +126,7 @@ func NewFakeProxier(ipFamily v1.IPFamily) (*knftables.Fake, *Proxier) {
hostname: testHostname,
serviceHealthServer: healthcheck.NewFakeServiceHealthServer(),
nodeIP: nodeIP,
nodePortAddresses: proxyutil.NewNodePortAddresses(ipFamily, nil, nodeIP),
nodePortAddresses: proxyutil.NewNodePortAddresses(ipFamily, nodePortAddresses),
networkInterfacer: networkInterfacer,
staleChains: make(map[string]time.Time),
serviceCIDRs: serviceCIDRs,
@ -951,7 +952,7 @@ func TestNodePorts(t *testing.T) {
nodeIP = testNodeIPv6
}
if tc.nodePortAddresses != nil {
fp.nodePortAddresses = proxyutil.NewNodePortAddresses(tc.family, tc.nodePortAddresses, netutils.ParseIPSloppy(nodeIP))
fp.nodePortAddresses = proxyutil.NewNodePortAddresses(tc.family, tc.nodePortAddresses)
}
makeServiceMap(fp,
@ -3985,6 +3986,7 @@ func TestSyncProxyRulesRepeated(t *testing.T) {
add rule ip kube-proxy services ip daddr @nodeport-ips meta l4proto . th dport vmap @service-nodeports
add set ip kube-proxy cluster-ips { type ipv4_addr ; comment "Active ClusterIPs" ; }
add set ip kube-proxy nodeport-ips { type ipv4_addr ; comment "IPs that accept NodePort traffic" ; }
add element ip kube-proxy nodeport-ips { 192.168.0.2 }
add rule ip kube-proxy service-endpoints-check ip daddr . meta l4proto . th dport vmap @no-endpoint-services
add map ip kube-proxy firewall-ips { type ipv4_addr . inet_proto . inet_service : verdict ; comment "destinations that are subject to LoadBalancerSourceRanges" ; }
@ -4057,7 +4059,6 @@ func TestSyncProxyRulesRepeated(t *testing.T) {
expected := baseRules + dedent.Dedent(`
add element ip kube-proxy cluster-ips { 172.30.0.41 }
add element ip kube-proxy cluster-ips { 172.30.0.42 }
add element ip kube-proxy nodeport-ips { 192.168.0.2 }
add element ip kube-proxy service-ips { 172.30.0.41 . tcp . 80 : goto service-ULMVA6XW-ns1/svc1/tcp/p80 }
add element ip kube-proxy service-ips { 172.30.0.42 . tcp . 8080 : goto service-MHHHYRWA-ns2/svc2/tcp/p8080 }
@ -4110,7 +4111,6 @@ func TestSyncProxyRulesRepeated(t *testing.T) {
add element ip kube-proxy cluster-ips { 172.30.0.41 }
add element ip kube-proxy cluster-ips { 172.30.0.42 }
add element ip kube-proxy cluster-ips { 172.30.0.43 }
add element ip kube-proxy nodeport-ips { 192.168.0.2 }
add element ip kube-proxy service-ips { 172.30.0.41 . tcp . 80 : goto service-ULMVA6XW-ns1/svc1/tcp/p80 }
add element ip kube-proxy service-ips { 172.30.0.42 . tcp . 8080 : goto service-MHHHYRWA-ns2/svc2/tcp/p8080 }
add element ip kube-proxy service-ips { 172.30.0.43 . tcp . 80 : goto service-4AT6LBPK-ns3/svc3/tcp/p80 }
@ -4144,7 +4144,6 @@ func TestSyncProxyRulesRepeated(t *testing.T) {
expected = baseRules + dedent.Dedent(`
add element ip kube-proxy cluster-ips { 172.30.0.41 }
add element ip kube-proxy cluster-ips { 172.30.0.43 }
add element ip kube-proxy nodeport-ips { 192.168.0.2 }
add element ip kube-proxy service-ips { 172.30.0.41 . tcp . 80 : goto service-ULMVA6XW-ns1/svc1/tcp/p80 }
add element ip kube-proxy service-ips { 172.30.0.43 . tcp . 80 : goto service-4AT6LBPK-ns3/svc3/tcp/p80 }
@ -4173,7 +4172,6 @@ func TestSyncProxyRulesRepeated(t *testing.T) {
expected = baseRules + dedent.Dedent(`
add element ip kube-proxy cluster-ips { 172.30.0.41 }
add element ip kube-proxy cluster-ips { 172.30.0.43 }
add element ip kube-proxy nodeport-ips { 192.168.0.2 }
add element ip kube-proxy service-ips { 172.30.0.41 . tcp . 80 : goto service-ULMVA6XW-ns1/svc1/tcp/p80 }
add element ip kube-proxy service-ips { 172.30.0.43 . tcp . 80 : goto service-4AT6LBPK-ns3/svc3/tcp/p80 }
@ -4210,7 +4208,6 @@ func TestSyncProxyRulesRepeated(t *testing.T) {
add element ip kube-proxy cluster-ips { 172.30.0.41 }
add element ip kube-proxy cluster-ips { 172.30.0.43 }
add element ip kube-proxy cluster-ips { 172.30.0.44 }
add element ip kube-proxy nodeport-ips { 192.168.0.2 }
add element ip kube-proxy service-ips { 172.30.0.41 . tcp . 80 : goto service-ULMVA6XW-ns1/svc1/tcp/p80 }
add element ip kube-proxy service-ips { 172.30.0.43 . tcp . 80 : goto service-4AT6LBPK-ns3/svc3/tcp/p80 }
@ -4250,7 +4247,6 @@ func TestSyncProxyRulesRepeated(t *testing.T) {
add element ip kube-proxy cluster-ips { 172.30.0.41 }
add element ip kube-proxy cluster-ips { 172.30.0.43 }
add element ip kube-proxy cluster-ips { 172.30.0.44 }
add element ip kube-proxy nodeport-ips { 192.168.0.2 }
add element ip kube-proxy service-ips { 172.30.0.41 . tcp . 80 : goto service-ULMVA6XW-ns1/svc1/tcp/p80 }
add element ip kube-proxy service-ips { 172.30.0.43 . tcp . 80 : goto service-4AT6LBPK-ns3/svc3/tcp/p80 }
add element ip kube-proxy service-ips { 172.30.0.44 . tcp . 80 : goto service-LAUZTJTB-ns4/svc4/tcp/p80 }
@ -4289,7 +4285,6 @@ func TestSyncProxyRulesRepeated(t *testing.T) {
add element ip kube-proxy cluster-ips { 172.30.0.41 }
add element ip kube-proxy cluster-ips { 172.30.0.43 }
add element ip kube-proxy cluster-ips { 172.30.0.44 }
add element ip kube-proxy nodeport-ips { 192.168.0.2 }
add element ip kube-proxy service-ips { 172.30.0.41 . tcp . 80 : goto service-ULMVA6XW-ns1/svc1/tcp/p80 }
add element ip kube-proxy service-ips { 172.30.0.43 . tcp . 80 : goto service-4AT6LBPK-ns3/svc3/tcp/p80 }
add element ip kube-proxy service-ips { 172.30.0.44 . tcp . 80 : goto service-LAUZTJTB-ns4/svc4/tcp/p80 }
@ -4331,7 +4326,6 @@ func TestSyncProxyRulesRepeated(t *testing.T) {
add element ip kube-proxy cluster-ips { 172.30.0.41 }
add element ip kube-proxy cluster-ips { 172.30.0.43 }
add element ip kube-proxy cluster-ips { 172.30.0.44 }
add element ip kube-proxy nodeport-ips { 192.168.0.2 }
add element ip kube-proxy service-ips { 172.30.0.41 . tcp . 80 : goto service-ULMVA6XW-ns1/svc1/tcp/p80 }
add element ip kube-proxy service-ips { 172.30.0.43 . tcp . 80 : goto service-4AT6LBPK-ns3/svc3/tcp/p80 }
add element ip kube-proxy service-ips { 172.30.0.44 . tcp . 80 : goto service-LAUZTJTB-ns4/svc4/tcp/p80 }
@ -4371,7 +4365,6 @@ func TestSyncProxyRulesRepeated(t *testing.T) {
add element ip kube-proxy cluster-ips { 172.30.0.41 }
add element ip kube-proxy cluster-ips { 172.30.0.43 }
add element ip kube-proxy cluster-ips { 172.30.0.44 }
add element ip kube-proxy nodeport-ips { 192.168.0.2 }
add element ip kube-proxy service-ips { 172.30.0.41 . tcp . 80 : goto service-ULMVA6XW-ns1/svc1/tcp/p80 }
add element ip kube-proxy no-endpoint-services { 172.30.0.43 . tcp . 80 comment "ns3/svc3:p80" : goto reject-chain }
add element ip kube-proxy service-ips { 172.30.0.44 . tcp . 80 : goto service-LAUZTJTB-ns4/svc4/tcp/p80 }
@ -4407,7 +4400,6 @@ func TestSyncProxyRulesRepeated(t *testing.T) {
add element ip kube-proxy cluster-ips { 172.30.0.41 }
add element ip kube-proxy cluster-ips { 172.30.0.43 }
add element ip kube-proxy cluster-ips { 172.30.0.44 }
add element ip kube-proxy nodeport-ips { 192.168.0.2 }
add element ip kube-proxy service-ips { 172.30.0.41 . tcp . 80 : goto service-ULMVA6XW-ns1/svc1/tcp/p80 }
add element ip kube-proxy service-ips { 172.30.0.43 . tcp . 80 : goto service-4AT6LBPK-ns3/svc3/tcp/p80 }
add element ip kube-proxy service-ips { 172.30.0.44 . tcp . 80 : goto service-LAUZTJTB-ns4/svc4/tcp/p80 }

View File

@ -20,7 +20,7 @@ import (
"fmt"
"net"
v1 "k8s.io/api/core/v1"
"k8s.io/api/core/v1"
netutils "k8s.io/utils/net"
)
@ -37,12 +37,11 @@ type NodePortAddresses struct {
var ipv4LoopbackStart = net.IPv4(127, 0, 0, 0)
// NewNodePortAddresses takes an IP family and the `--nodeport-addresses` value (which is
// assumed to contain only valid CIDRs, potentially of both IP families) and the primary IP
// (which will be used as node port address when `--nodeport-addresses` is empty).
// It will return a NodePortAddresses object for the given family. If there are no CIDRs of
// the given family then the CIDR "0.0.0.0/0" or "::/0" will be added (even if there are
// CIDRs of the other family).
func NewNodePortAddresses(family v1.IPFamily, cidrStrings []string, primaryIP net.IP) *NodePortAddresses {
// assumed to contain only valid CIDRs, potentially of both IP families) and returns a
// NodePortAddresses object for the given family. If there are no CIDRs of the given
// family then the CIDR "0.0.0.0/0" or "::/0" will be added (even if there are CIDRs of
// the other family).
func NewNodePortAddresses(family v1.IPFamily, cidrStrings []string) *NodePortAddresses {
npa := &NodePortAddresses{}
// Filter CIDRs to correct family
@ -52,24 +51,17 @@ func NewNodePortAddresses(family v1.IPFamily, cidrStrings []string, primaryIP ne
}
}
if len(npa.cidrStrings) == 0 {
if primaryIP == nil {
if family == v1.IPv4Protocol {
npa.cidrStrings = []string{IPv4ZeroCIDR}
} else {
npa.cidrStrings = []string{IPv6ZeroCIDR}
}
if family == v1.IPv4Protocol {
npa.cidrStrings = []string{IPv4ZeroCIDR}
} else {
if family == v1.IPv4Protocol {
npa.cidrStrings = []string{fmt.Sprintf("%s/32", primaryIP.String())}
} else {
npa.cidrStrings = []string{fmt.Sprintf("%s/128", primaryIP.String())}
}
npa.cidrStrings = []string{IPv6ZeroCIDR}
}
}
// Now parse
for _, str := range npa.cidrStrings {
_, cidr, _ := netutils.ParseCIDRSloppy(str)
if netutils.IsIPv4CIDR(cidr) {
if cidr.IP.IsLoopback() || cidr.Contains(ipv4LoopbackStart) {
npa.containsIPv4Loopback = true

View File

@ -21,7 +21,7 @@ import (
"net"
"testing"
v1 "k8s.io/api/core/v1"
"k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/util/sets"
fake "k8s.io/kubernetes/pkg/proxy/util/testing"
netutils "k8s.io/utils/net"
@ -60,8 +60,6 @@ func TestGetNodeIPs(t *testing.T) {
cidrs []string
itfAddrsPairs []InterfaceAddrsPair
expected map[v1.IPFamily]expectation
// nodeIP will take effect when `--nodeport-addresses` is empty
nodeIP net.IP
}{
{
name: "IPv4 single",
@ -371,53 +369,6 @@ func TestGetNodeIPs(t *testing.T) {
},
},
},
{
name: "ipv4 with nodeIP",
itfAddrsPairs: []InterfaceAddrsPair{
{
itf: net.Interface{Index: 0, MTU: 0, Name: "eth0", HardwareAddr: nil, Flags: 0},
addrs: []net.Addr{
&net.IPNet{IP: netutils.ParseIPSloppy("1.2.3.4"), Mask: net.CIDRMask(30, 32)},
},
},
{
itf: net.Interface{Index: 1, MTU: 0, Name: "lo", HardwareAddr: nil, Flags: 0},
addrs: []net.Addr{
&net.IPNet{IP: netutils.ParseIPSloppy("127.0.0.1"), Mask: net.CIDRMask(8, 32)},
},
},
},
expected: map[v1.IPFamily]expectation{
v1.IPv4Protocol: {
ips: sets.New[string]("1.2.3.4"),
},
},
nodeIP: netutils.ParseIPSloppy("1.2.3.4"),
},
{
name: "ipv6 with nodeIP",
itfAddrsPairs: []InterfaceAddrsPair{
{
itf: net.Interface{Index: 0, MTU: 0, Name: "eth0", HardwareAddr: nil, Flags: 0},
addrs: []net.Addr{
&net.IPNet{IP: netutils.ParseIPSloppy("2001:db8::1"), Mask: net.CIDRMask(64, 128)},
},
},
{
itf: net.Interface{Index: 1, MTU: 0, Name: "lo", HardwareAddr: nil, Flags: 0},
addrs: []net.Addr{
&net.IPNet{IP: netutils.ParseIPSloppy("::1"), Mask: net.CIDRMask(128, 128)},
},
},
},
expected: map[v1.IPFamily]expectation{
v1.IPv6Protocol: {
matchAll: true,
ips: sets.New[string]("2001:db8::1", "::1"),
},
},
nodeIP: netutils.ParseIPSloppy("1.2.3.4"),
},
}
for _, tc := range testCases {
@ -428,10 +379,7 @@ func TestGetNodeIPs(t *testing.T) {
}
for _, family := range []v1.IPFamily{v1.IPv4Protocol, v1.IPv6Protocol} {
if tc.nodeIP != nil && v1.IPFamily(fmt.Sprintf("IPv%s", netutils.IPFamilyOf(tc.nodeIP))) != family {
continue
}
npa := NewNodePortAddresses(family, tc.cidrs, tc.nodeIP)
npa := NewNodePortAddresses(family, tc.cidrs)
if npa.MatchAll() != tc.expected[family].matchAll {
t.Errorf("unexpected MatchAll(%s), expected: %v", family, tc.expected[family].matchAll)
@ -503,12 +451,12 @@ func TestContainsIPv4Loopback(t *testing.T) {
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
npa := NewNodePortAddresses(v1.IPv4Protocol, tt.cidrStrings, nil)
npa := NewNodePortAddresses(v1.IPv4Protocol, tt.cidrStrings)
if got := npa.ContainsIPv4Loopback(); got != tt.want {
t.Errorf("IPv4 ContainsIPv4Loopback() = %v, want %v", got, tt.want)
}
// ContainsIPv4Loopback should always be false for family=IPv6
npa = NewNodePortAddresses(v1.IPv6Protocol, tt.cidrStrings, nil)
npa = NewNodePortAddresses(v1.IPv6Protocol, tt.cidrStrings)
if got := npa.ContainsIPv4Loopback(); got {
t.Errorf("IPv6 ContainsIPv4Loopback() = %v, want %v", got, false)
}

View File

@ -29,10 +29,11 @@ import (
utilfeature "k8s.io/apiserver/pkg/util/feature"
"k8s.io/client-go/tools/events"
utilsysctl "k8s.io/component-helpers/node/util/sysctl"
"k8s.io/klog/v2"
helper "k8s.io/kubernetes/pkg/apis/core/v1/helper"
"k8s.io/kubernetes/pkg/features"
netutils "k8s.io/utils/net"
"k8s.io/klog/v2"
)
const (

View File

@ -666,7 +666,7 @@ func NewProxier(
}
// windows listens to all node addresses
nodePortAddresses := proxyutil.NewNodePortAddresses(ipFamily, nil, nil)
nodePortAddresses := proxyutil.NewNodePortAddresses(ipFamily, nil)
serviceHealthServer := healthcheck.NewServiceHealthServer(hostname, recorder, nodePortAddresses, healthzServer)
var healthzPort int

View File

@ -224,10 +224,12 @@ type KubeProxyConfiguration struct {
// used.)
ClusterCIDR string `json:"clusterCIDR"`
// nodePortAddresses is a list of CIDR ranges that contain valid node IPs. If set,
// nodePortAddresses is a list of CIDR ranges that contain valid node IPs, or
// alternatively, the single string 'primary'. If set to a list of CIDRs,
// connections to NodePort services will only be accepted on node IPs in one of
// the indicated ranges. If unset, NodePort connections will be accepted on all
// local IPs.
// the indicated ranges. If set to 'primary', NodePort services will only be
// accepted on the node's primary IPv4 and/or IPv6 address according to the Node
// object. If unset, NodePort connections will be accepted on all local IPs.
NodePortAddresses []string `json:"nodePortAddresses"`
// oomScoreAdj is the oom-score-adj value for kube-proxy process. Values must be within