mirror of
https://github.com/k3s-io/kubernetes.git
synced 2025-08-03 17:30:00 +00:00
change --nodeport-addresses behavior to default to primary node ip only
This commit is contained in:
parent
1ad143177c
commit
8bccf4873b
@ -150,7 +150,7 @@ func (fake fakeProxierHealthChecker) IsHealthy() bool {
|
|||||||
func TestServer(t *testing.T) {
|
func TestServer(t *testing.T) {
|
||||||
listener := newFakeListener()
|
listener := newFakeListener()
|
||||||
httpFactory := newFakeHTTPServerFactory()
|
httpFactory := newFakeHTTPServerFactory()
|
||||||
nodePortAddresses := proxyutil.NewNodePortAddresses(v1.IPv4Protocol, []string{})
|
nodePortAddresses := proxyutil.NewNodePortAddresses(v1.IPv4Protocol, []string{}, nil)
|
||||||
proxyChecker := &fakeProxierHealthChecker{true}
|
proxyChecker := &fakeProxierHealthChecker{true}
|
||||||
|
|
||||||
hcsi := newServiceHealthServer("hostname", nil, listener, httpFactory, nodePortAddresses, proxyChecker)
|
hcsi := newServiceHealthServer("hostname", nil, listener, httpFactory, nodePortAddresses, proxyChecker)
|
||||||
@ -664,7 +664,7 @@ func TestServerWithSelectiveListeningAddress(t *testing.T) {
|
|||||||
|
|
||||||
// limiting addresses to loop back. We don't want any cleverness here around getting IP for
|
// limiting addresses to loop back. We don't want any cleverness here around getting IP for
|
||||||
// machine nor testing ipv6 || ipv4. using loop back guarantees the test will work on any machine
|
// machine nor testing ipv6 || ipv4. using loop back guarantees the test will work on any machine
|
||||||
nodePortAddresses := proxyutil.NewNodePortAddresses(v1.IPv4Protocol, []string{"127.0.0.0/8"})
|
nodePortAddresses := proxyutil.NewNodePortAddresses(v1.IPv4Protocol, []string{"127.0.0.0/8"}, nil)
|
||||||
|
|
||||||
hcsi := newServiceHealthServer("hostname", nil, listener, httpFactory, nodePortAddresses, proxyChecker)
|
hcsi := newServiceHealthServer("hostname", nil, listener, httpFactory, nodePortAddresses, proxyChecker)
|
||||||
hcs := hcsi.(*server)
|
hcs := hcsi.(*server)
|
||||||
|
@ -237,7 +237,7 @@ func NewProxier(ipFamily v1.IPFamily,
|
|||||||
nodePortAddressStrings []string,
|
nodePortAddressStrings []string,
|
||||||
initOnly bool,
|
initOnly bool,
|
||||||
) (*Proxier, error) {
|
) (*Proxier, error) {
|
||||||
nodePortAddresses := proxyutil.NewNodePortAddresses(ipFamily, nodePortAddressStrings)
|
nodePortAddresses := proxyutil.NewNodePortAddresses(ipFamily, nodePortAddressStrings, nil)
|
||||||
|
|
||||||
if !nodePortAddresses.ContainsIPv4Loopback() {
|
if !nodePortAddresses.ContainsIPv4Loopback() {
|
||||||
localhostNodePorts = false
|
localhostNodePorts = false
|
||||||
|
@ -133,7 +133,7 @@ func NewFakeProxier(ipt utiliptables.Interface) *Proxier {
|
|||||||
natRules: proxyutil.NewLineBuffer(),
|
natRules: proxyutil.NewLineBuffer(),
|
||||||
nodeIP: netutils.ParseIPSloppy(testNodeIP),
|
nodeIP: netutils.ParseIPSloppy(testNodeIP),
|
||||||
localhostNodePorts: true,
|
localhostNodePorts: true,
|
||||||
nodePortAddresses: proxyutil.NewNodePortAddresses(ipfamily, nil),
|
nodePortAddresses: proxyutil.NewNodePortAddresses(ipfamily, nil, nil),
|
||||||
networkInterfacer: networkInterfacer,
|
networkInterfacer: networkInterfacer,
|
||||||
}
|
}
|
||||||
p.setInitialized(true)
|
p.setInitialized(true)
|
||||||
@ -2342,7 +2342,7 @@ func TestNodePorts(t *testing.T) {
|
|||||||
fp := NewFakeProxier(ipt)
|
fp := NewFakeProxier(ipt)
|
||||||
fp.localhostNodePorts = tc.localhostNodePorts
|
fp.localhostNodePorts = tc.localhostNodePorts
|
||||||
if tc.nodePortAddresses != nil {
|
if tc.nodePortAddresses != nil {
|
||||||
fp.nodePortAddresses = proxyutil.NewNodePortAddresses(tc.family, tc.nodePortAddresses)
|
fp.nodePortAddresses = proxyutil.NewNodePortAddresses(tc.family, tc.nodePortAddresses, nil)
|
||||||
}
|
}
|
||||||
|
|
||||||
makeServiceMap(fp,
|
makeServiceMap(fp,
|
||||||
@ -2490,7 +2490,7 @@ func TestNodePorts(t *testing.T) {
|
|||||||
func TestHealthCheckNodePort(t *testing.T) {
|
func TestHealthCheckNodePort(t *testing.T) {
|
||||||
ipt := iptablestest.NewFake()
|
ipt := iptablestest.NewFake()
|
||||||
fp := NewFakeProxier(ipt)
|
fp := NewFakeProxier(ipt)
|
||||||
fp.nodePortAddresses = proxyutil.NewNodePortAddresses(v1.IPv4Protocol, []string{"127.0.0.0/8"})
|
fp.nodePortAddresses = proxyutil.NewNodePortAddresses(v1.IPv4Protocol, []string{"127.0.0.0/8"}, nil)
|
||||||
|
|
||||||
svcIP := "172.30.0.42"
|
svcIP := "172.30.0.42"
|
||||||
svcPort := 80
|
svcPort := 80
|
||||||
|
@ -413,7 +413,7 @@ func NewProxier(ipFamily v1.IPFamily,
|
|||||||
scheduler = defaultScheduler
|
scheduler = defaultScheduler
|
||||||
}
|
}
|
||||||
|
|
||||||
nodePortAddresses := proxyutil.NewNodePortAddresses(ipFamily, nodePortAddressStrings)
|
nodePortAddresses := proxyutil.NewNodePortAddresses(ipFamily, nodePortAddressStrings, nil)
|
||||||
|
|
||||||
serviceHealthServer := healthcheck.NewServiceHealthServer(hostname, recorder, nodePortAddresses, healthzServer)
|
serviceHealthServer := healthcheck.NewServiceHealthServer(hostname, recorder, nodePortAddresses, healthzServer)
|
||||||
|
|
||||||
|
@ -158,7 +158,7 @@ func NewFakeProxier(ipt utiliptables.Interface, ipvs utilipvs.Interface, ipset u
|
|||||||
filterRules: proxyutil.NewLineBuffer(),
|
filterRules: proxyutil.NewLineBuffer(),
|
||||||
netlinkHandle: netlinkHandle,
|
netlinkHandle: netlinkHandle,
|
||||||
ipsetList: ipsetList,
|
ipsetList: ipsetList,
|
||||||
nodePortAddresses: proxyutil.NewNodePortAddresses(ipFamily, nil),
|
nodePortAddresses: proxyutil.NewNodePortAddresses(ipFamily, nil, nil),
|
||||||
networkInterfacer: proxyutiltest.NewFakeNetwork(),
|
networkInterfacer: proxyutiltest.NewFakeNetwork(),
|
||||||
gracefuldeleteManager: NewGracefulTerminationManager(ipvs),
|
gracefuldeleteManager: NewGracefulTerminationManager(ipvs),
|
||||||
ipFamily: ipFamily,
|
ipFamily: ipFamily,
|
||||||
@ -945,7 +945,7 @@ func TestNodePortIPv4(t *testing.T) {
|
|||||||
ipvs := ipvstest.NewFake()
|
ipvs := ipvstest.NewFake()
|
||||||
ipset := ipsettest.NewFake(testIPSetVersion)
|
ipset := ipsettest.NewFake(testIPSetVersion)
|
||||||
fp := NewFakeProxier(ipt, ipvs, ipset, test.nodeIPs, nil, v1.IPv4Protocol)
|
fp := NewFakeProxier(ipt, ipvs, ipset, test.nodeIPs, nil, v1.IPv4Protocol)
|
||||||
fp.nodePortAddresses = proxyutil.NewNodePortAddresses(v1.IPv4Protocol, test.nodePortAddresses)
|
fp.nodePortAddresses = proxyutil.NewNodePortAddresses(v1.IPv4Protocol, test.nodePortAddresses, nil)
|
||||||
|
|
||||||
makeServiceMap(fp, test.services...)
|
makeServiceMap(fp, test.services...)
|
||||||
populateEndpointSlices(fp, test.endpoints...)
|
populateEndpointSlices(fp, test.endpoints...)
|
||||||
@ -1287,7 +1287,7 @@ func TestNodePortIPv6(t *testing.T) {
|
|||||||
ipvs := ipvstest.NewFake()
|
ipvs := ipvstest.NewFake()
|
||||||
ipset := ipsettest.NewFake(testIPSetVersion)
|
ipset := ipsettest.NewFake(testIPSetVersion)
|
||||||
fp := NewFakeProxier(ipt, ipvs, ipset, test.nodeIPs, nil, v1.IPv6Protocol)
|
fp := NewFakeProxier(ipt, ipvs, ipset, test.nodeIPs, nil, v1.IPv6Protocol)
|
||||||
fp.nodePortAddresses = proxyutil.NewNodePortAddresses(v1.IPv6Protocol, test.nodePortAddresses)
|
fp.nodePortAddresses = proxyutil.NewNodePortAddresses(v1.IPv6Protocol, test.nodePortAddresses, nil)
|
||||||
|
|
||||||
makeServiceMap(fp, test.services...)
|
makeServiceMap(fp, test.services...)
|
||||||
populateEndpointSlices(fp, test.endpoints...)
|
populateEndpointSlices(fp, test.endpoints...)
|
||||||
@ -2040,7 +2040,7 @@ func TestOnlyLocalNodePorts(t *testing.T) {
|
|||||||
addrs1 := []net.Addr{&net.IPNet{IP: netutils.ParseIPSloppy("2001:db8::"), Mask: net.CIDRMask(64, 128)}}
|
addrs1 := []net.Addr{&net.IPNet{IP: netutils.ParseIPSloppy("2001:db8::"), Mask: net.CIDRMask(64, 128)}}
|
||||||
fp.networkInterfacer.(*proxyutiltest.FakeNetwork).AddInterfaceAddr(&itf, addrs)
|
fp.networkInterfacer.(*proxyutiltest.FakeNetwork).AddInterfaceAddr(&itf, addrs)
|
||||||
fp.networkInterfacer.(*proxyutiltest.FakeNetwork).AddInterfaceAddr(&itf1, addrs1)
|
fp.networkInterfacer.(*proxyutiltest.FakeNetwork).AddInterfaceAddr(&itf1, addrs1)
|
||||||
fp.nodePortAddresses = proxyutil.NewNodePortAddresses(v1.IPv4Protocol, []string{"100.101.102.0/24"})
|
fp.nodePortAddresses = proxyutil.NewNodePortAddresses(v1.IPv4Protocol, []string{"100.101.102.0/24"}, nil)
|
||||||
|
|
||||||
fp.syncProxyRules()
|
fp.syncProxyRules()
|
||||||
|
|
||||||
@ -2128,7 +2128,7 @@ func TestHealthCheckNodePort(t *testing.T) {
|
|||||||
addrs1 := []net.Addr{&net.IPNet{IP: netutils.ParseIPSloppy("2001:db8::"), Mask: net.CIDRMask(64, 128)}}
|
addrs1 := []net.Addr{&net.IPNet{IP: netutils.ParseIPSloppy("2001:db8::"), Mask: net.CIDRMask(64, 128)}}
|
||||||
fp.networkInterfacer.(*proxyutiltest.FakeNetwork).AddInterfaceAddr(&itf, addrs)
|
fp.networkInterfacer.(*proxyutiltest.FakeNetwork).AddInterfaceAddr(&itf, addrs)
|
||||||
fp.networkInterfacer.(*proxyutiltest.FakeNetwork).AddInterfaceAddr(&itf1, addrs1)
|
fp.networkInterfacer.(*proxyutiltest.FakeNetwork).AddInterfaceAddr(&itf1, addrs1)
|
||||||
fp.nodePortAddresses = proxyutil.NewNodePortAddresses(v1.IPv4Protocol, []string{"100.101.102.0/24"})
|
fp.nodePortAddresses = proxyutil.NewNodePortAddresses(v1.IPv4Protocol, []string{"100.101.102.0/24"}, nil)
|
||||||
|
|
||||||
fp.syncProxyRules()
|
fp.syncProxyRules()
|
||||||
|
|
||||||
|
@ -223,7 +223,7 @@ func NewProxier(ipFamily v1.IPFamily,
|
|||||||
nodePortAddressStrings []string,
|
nodePortAddressStrings []string,
|
||||||
initOnly bool,
|
initOnly bool,
|
||||||
) (*Proxier, error) {
|
) (*Proxier, error) {
|
||||||
nodePortAddresses := proxyutil.NewNodePortAddresses(ipFamily, nodePortAddressStrings)
|
nodePortAddresses := proxyutil.NewNodePortAddresses(ipFamily, nodePortAddressStrings, nodeIP)
|
||||||
|
|
||||||
if initOnly {
|
if initOnly {
|
||||||
klog.InfoS("System initialized and --init-only specified")
|
klog.InfoS("System initialized and --init-only specified")
|
||||||
|
@ -106,6 +106,12 @@ func NewFakeProxier(ipFamily v1.IPFamily) (*knftables.Fake, *Proxier) {
|
|||||||
|
|
||||||
nft := knftables.NewFake(nftablesFamily, kubeProxyTable)
|
nft := knftables.NewFake(nftablesFamily, kubeProxyTable)
|
||||||
|
|
||||||
|
var nodeIP net.IP
|
||||||
|
if ipFamily == v1.IPv4Protocol {
|
||||||
|
nodeIP = netutils.ParseIPSloppy(testNodeIP)
|
||||||
|
} else {
|
||||||
|
nodeIP = netutils.ParseIPSloppy(testNodeIPv6)
|
||||||
|
}
|
||||||
p := &Proxier{
|
p := &Proxier{
|
||||||
ipFamily: ipFamily,
|
ipFamily: ipFamily,
|
||||||
svcPortMap: make(proxy.ServicePortMap),
|
svcPortMap: make(proxy.ServicePortMap),
|
||||||
@ -118,8 +124,8 @@ func NewFakeProxier(ipFamily v1.IPFamily) (*knftables.Fake, *Proxier) {
|
|||||||
localDetector: detectLocal,
|
localDetector: detectLocal,
|
||||||
hostname: testHostname,
|
hostname: testHostname,
|
||||||
serviceHealthServer: healthcheck.NewFakeServiceHealthServer(),
|
serviceHealthServer: healthcheck.NewFakeServiceHealthServer(),
|
||||||
nodeIP: netutils.ParseIPSloppy(testNodeIP),
|
nodeIP: nodeIP,
|
||||||
nodePortAddresses: proxyutil.NewNodePortAddresses(ipFamily, nil),
|
nodePortAddresses: proxyutil.NewNodePortAddresses(ipFamily, nil, nodeIP),
|
||||||
networkInterfacer: networkInterfacer,
|
networkInterfacer: networkInterfacer,
|
||||||
staleChains: make(map[string]time.Time),
|
staleChains: make(map[string]time.Time),
|
||||||
serviceCIDRs: serviceCIDRs,
|
serviceCIDRs: serviceCIDRs,
|
||||||
@ -304,6 +310,8 @@ func TestOverallNFTablesRules(t *testing.T) {
|
|||||||
add rule ip kube-proxy masquerading mark set mark xor 0x4000
|
add rule ip kube-proxy masquerading mark set mark xor 0x4000
|
||||||
add rule ip kube-proxy masquerading masquerade fully-random
|
add rule ip kube-proxy masquerading masquerade fully-random
|
||||||
add chain ip kube-proxy services
|
add chain ip kube-proxy services
|
||||||
|
add chain ip kube-proxy service-endpoints-check
|
||||||
|
add rule ip kube-proxy service-endpoints-check ip daddr . meta l4proto . th dport vmap @no-endpoint-services
|
||||||
add chain ip kube-proxy filter-prerouting { type filter hook prerouting priority -110 ; }
|
add chain ip kube-proxy filter-prerouting { type filter hook prerouting priority -110 ; }
|
||||||
add rule ip kube-proxy filter-prerouting ct state new jump firewall-check
|
add rule ip kube-proxy filter-prerouting ct state new jump firewall-check
|
||||||
add chain ip kube-proxy filter-forward { type filter hook forward priority -110 ; }
|
add chain ip kube-proxy filter-forward { type filter hook forward priority -110 ; }
|
||||||
@ -323,12 +331,15 @@ func TestOverallNFTablesRules(t *testing.T) {
|
|||||||
add rule ip kube-proxy nat-postrouting jump masquerading
|
add rule ip kube-proxy nat-postrouting jump masquerading
|
||||||
add chain ip kube-proxy nat-prerouting { type nat hook prerouting priority -100 ; }
|
add chain ip kube-proxy nat-prerouting { type nat hook prerouting priority -100 ; }
|
||||||
add rule ip kube-proxy nat-prerouting jump services
|
add rule ip kube-proxy nat-prerouting jump services
|
||||||
|
add chain ip kube-proxy nodeport-endpoints-check
|
||||||
|
add rule ip kube-proxy nodeport-endpoints-check ip daddr @nodeport-ips meta l4proto . th dport vmap @no-endpoint-nodeports
|
||||||
|
|
||||||
add set ip kube-proxy cluster-ips { type ipv4_addr ; comment "Active ClusterIPs" ; }
|
add set ip kube-proxy cluster-ips { type ipv4_addr ; comment "Active ClusterIPs" ; }
|
||||||
add chain ip kube-proxy cluster-ips-check
|
add chain ip kube-proxy cluster-ips-check
|
||||||
add rule ip kube-proxy cluster-ips-check ip daddr @cluster-ips reject comment "Reject traffic to invalid ports of ClusterIPs"
|
add rule ip kube-proxy cluster-ips-check ip daddr @cluster-ips reject comment "Reject traffic to invalid ports of ClusterIPs"
|
||||||
add rule ip kube-proxy cluster-ips-check ip daddr { 172.30.0.0/16 } drop comment "Drop traffic to unallocated ClusterIPs"
|
add rule ip kube-proxy cluster-ips-check ip daddr { 172.30.0.0/16 } drop comment "Drop traffic to unallocated ClusterIPs"
|
||||||
|
|
||||||
|
add set ip kube-proxy nodeport-ips { type ipv4_addr ; comment "IPs that accept NodePort traffic" ; }
|
||||||
add map ip kube-proxy firewall-ips { type ipv4_addr . inet_proto . inet_service : verdict ; comment "destinations that are subject to LoadBalancerSourceRanges" ; }
|
add map ip kube-proxy firewall-ips { type ipv4_addr . inet_proto . inet_service : verdict ; comment "destinations that are subject to LoadBalancerSourceRanges" ; }
|
||||||
add chain ip kube-proxy firewall-check
|
add chain ip kube-proxy firewall-check
|
||||||
add rule ip kube-proxy firewall-check ip daddr . meta l4proto . th dport vmap @firewall-ips
|
add rule ip kube-proxy firewall-check ip daddr . meta l4proto . th dport vmap @firewall-ips
|
||||||
@ -339,15 +350,11 @@ func TestOverallNFTablesRules(t *testing.T) {
|
|||||||
add map ip kube-proxy no-endpoint-services { type ipv4_addr . inet_proto . inet_service : verdict ; comment "vmap to drop or reject packets to services with no endpoints" ; }
|
add map ip kube-proxy no-endpoint-services { type ipv4_addr . inet_proto . inet_service : verdict ; comment "vmap to drop or reject packets to services with no endpoints" ; }
|
||||||
add map ip kube-proxy no-endpoint-nodeports { type inet_proto . inet_service : verdict ; comment "vmap to drop or reject packets to service nodeports with no endpoints" ; }
|
add map ip kube-proxy no-endpoint-nodeports { type inet_proto . inet_service : verdict ; comment "vmap to drop or reject packets to service nodeports with no endpoints" ; }
|
||||||
|
|
||||||
add chain ip kube-proxy nodeport-endpoints-check
|
|
||||||
add rule ip kube-proxy nodeport-endpoints-check ip daddr != 127.0.0.0/8 meta l4proto . th dport vmap @no-endpoint-nodeports
|
|
||||||
add chain ip kube-proxy service-endpoints-check
|
|
||||||
add rule ip kube-proxy service-endpoints-check ip daddr . meta l4proto . th dport vmap @no-endpoint-services
|
|
||||||
|
|
||||||
add map ip kube-proxy service-ips { type ipv4_addr . inet_proto . inet_service : verdict ; comment "ClusterIP, ExternalIP and LoadBalancer IP traffic" ; }
|
add map ip kube-proxy service-ips { type ipv4_addr . inet_proto . inet_service : verdict ; comment "ClusterIP, ExternalIP and LoadBalancer IP traffic" ; }
|
||||||
add map ip kube-proxy service-nodeports { type inet_proto . inet_service : verdict ; comment "NodePort traffic" ; }
|
add map ip kube-proxy service-nodeports { type inet_proto . inet_service : verdict ; comment "NodePort traffic" ; }
|
||||||
add rule ip kube-proxy services ip daddr . meta l4proto . th dport vmap @service-ips
|
add rule ip kube-proxy services ip daddr . meta l4proto . th dport vmap @service-ips
|
||||||
add rule ip kube-proxy services fib daddr type local ip daddr != 127.0.0.0/8 meta l4proto . th dport vmap @service-nodeports
|
add rule ip kube-proxy services ip daddr @nodeport-ips meta l4proto . th dport vmap @service-nodeports
|
||||||
|
add element ip kube-proxy nodeport-ips { 192.168.0.2 }
|
||||||
|
|
||||||
# svc1
|
# svc1
|
||||||
add chain ip kube-proxy service-ULMVA6XW-ns1/svc1/tcp/p80
|
add chain ip kube-proxy service-ULMVA6XW-ns1/svc1/tcp/p80
|
||||||
@ -880,7 +887,7 @@ func TestLoadBalancer(t *testing.T) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// TestNodePorts tests NodePort services under various combinations of the
|
// TestNodePorts tests NodePort services under various combinations of the
|
||||||
// --nodeport-addresses and --localhost-nodeports flags.
|
// --nodeport-addresses flags.
|
||||||
func TestNodePorts(t *testing.T) {
|
func TestNodePorts(t *testing.T) {
|
||||||
testCases := []struct {
|
testCases := []struct {
|
||||||
name string
|
name string
|
||||||
@ -891,10 +898,6 @@ func TestNodePorts(t *testing.T) {
|
|||||||
// allowAltNodeIP is true if we expect NodePort traffic on the alternate
|
// allowAltNodeIP is true if we expect NodePort traffic on the alternate
|
||||||
// node IP to be accepted
|
// node IP to be accepted
|
||||||
allowAltNodeIP bool
|
allowAltNodeIP bool
|
||||||
|
|
||||||
// expectFirewall is true if we expect firewall to be filled in with
|
|
||||||
// an anti-martian-packet rule
|
|
||||||
expectFirewall bool
|
|
||||||
}{
|
}{
|
||||||
{
|
{
|
||||||
name: "ipv4",
|
name: "ipv4",
|
||||||
@ -902,8 +905,7 @@ func TestNodePorts(t *testing.T) {
|
|||||||
family: v1.IPv4Protocol,
|
family: v1.IPv4Protocol,
|
||||||
nodePortAddresses: nil,
|
nodePortAddresses: nil,
|
||||||
|
|
||||||
allowAltNodeIP: true,
|
allowAltNodeIP: false,
|
||||||
expectFirewall: true,
|
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
name: "ipv4, multiple nodeport-addresses",
|
name: "ipv4, multiple nodeport-addresses",
|
||||||
@ -912,7 +914,6 @@ func TestNodePorts(t *testing.T) {
|
|||||||
nodePortAddresses: []string{"192.168.0.0/24", "192.168.1.0/24", "2001:db8::/64"},
|
nodePortAddresses: []string{"192.168.0.0/24", "192.168.1.0/24", "2001:db8::/64"},
|
||||||
|
|
||||||
allowAltNodeIP: true,
|
allowAltNodeIP: true,
|
||||||
expectFirewall: false,
|
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
name: "ipv6",
|
name: "ipv6",
|
||||||
@ -920,17 +921,15 @@ func TestNodePorts(t *testing.T) {
|
|||||||
family: v1.IPv6Protocol,
|
family: v1.IPv6Protocol,
|
||||||
nodePortAddresses: nil,
|
nodePortAddresses: nil,
|
||||||
|
|
||||||
allowAltNodeIP: true,
|
allowAltNodeIP: false,
|
||||||
expectFirewall: false,
|
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
name: "ipv6, multiple nodeport-addresses",
|
name: "ipv6, multiple nodeport-addresses",
|
||||||
|
|
||||||
family: v1.IPv6Protocol,
|
family: v1.IPv6Protocol,
|
||||||
nodePortAddresses: []string{"192.168.0.0/24", "192.168.1.0/24", "2001:db8::/64"},
|
nodePortAddresses: []string{"192.168.0.0/24", "192.168.1.0/24", "2001:db8::/64", "2001:db8:1::2/128"},
|
||||||
|
|
||||||
allowAltNodeIP: false,
|
allowAltNodeIP: true,
|
||||||
expectFirewall: false,
|
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -939,17 +938,20 @@ func TestNodePorts(t *testing.T) {
|
|||||||
nft, fp := NewFakeProxier(tc.family)
|
nft, fp := NewFakeProxier(tc.family)
|
||||||
|
|
||||||
var svcIP, epIP1, epIP2 string
|
var svcIP, epIP1, epIP2 string
|
||||||
|
var nodeIP string
|
||||||
if tc.family == v1.IPv4Protocol {
|
if tc.family == v1.IPv4Protocol {
|
||||||
svcIP = "172.30.0.41"
|
svcIP = "172.30.0.41"
|
||||||
epIP1 = "10.180.0.1"
|
epIP1 = "10.180.0.1"
|
||||||
epIP2 = "10.180.2.1"
|
epIP2 = "10.180.2.1"
|
||||||
|
nodeIP = testNodeIP
|
||||||
} else {
|
} else {
|
||||||
svcIP = "fd00:172:30::41"
|
svcIP = "fd00:172:30::41"
|
||||||
epIP1 = "fd00:10:180::1"
|
epIP1 = "fd00:10:180::1"
|
||||||
epIP2 = "fd00:10:180::2:1"
|
epIP2 = "fd00:10:180::2:1"
|
||||||
|
nodeIP = testNodeIPv6
|
||||||
}
|
}
|
||||||
if tc.nodePortAddresses != nil {
|
if tc.nodePortAddresses != nil {
|
||||||
fp.nodePortAddresses = proxyutil.NewNodePortAddresses(tc.family, tc.nodePortAddresses)
|
fp.nodePortAddresses = proxyutil.NewNodePortAddresses(tc.family, tc.nodePortAddresses, netutils.ParseIPSloppy(nodeIP))
|
||||||
}
|
}
|
||||||
|
|
||||||
makeServiceMap(fp,
|
makeServiceMap(fp,
|
||||||
@ -989,16 +991,14 @@ func TestNodePorts(t *testing.T) {
|
|||||||
|
|
||||||
fp.syncProxyRules()
|
fp.syncProxyRules()
|
||||||
|
|
||||||
var podIP, externalClientIP, nodeIP, altNodeIP string
|
var podIP, externalClientIP, altNodeIP string
|
||||||
if tc.family == v1.IPv4Protocol {
|
if tc.family == v1.IPv4Protocol {
|
||||||
podIP = "10.0.0.2"
|
podIP = "10.0.0.2"
|
||||||
externalClientIP = testExternalClient
|
externalClientIP = testExternalClient
|
||||||
nodeIP = testNodeIP
|
|
||||||
altNodeIP = testNodeIPAlt
|
altNodeIP = testNodeIPAlt
|
||||||
} else {
|
} else {
|
||||||
podIP = "fd00:10::2"
|
podIP = "fd00:10::2"
|
||||||
externalClientIP = "2600:5200::1"
|
externalClientIP = "2600:5200::1"
|
||||||
nodeIP = testNodeIPv6
|
|
||||||
altNodeIP = testNodeIPv6Alt
|
altNodeIP = testNodeIPv6Alt
|
||||||
}
|
}
|
||||||
output := net.JoinHostPort(epIP1, "80") + ", " + net.JoinHostPort(epIP2, "80")
|
output := net.JoinHostPort(epIP1, "80") + ", " + net.JoinHostPort(epIP2, "80")
|
||||||
@ -1031,8 +1031,6 @@ func TestNodePorts(t *testing.T) {
|
|||||||
},
|
},
|
||||||
})
|
})
|
||||||
|
|
||||||
// NodePort on altNodeIP should be allowed, unless
|
|
||||||
// nodePortAddressess excludes altNodeIP
|
|
||||||
if tc.allowAltNodeIP {
|
if tc.allowAltNodeIP {
|
||||||
runPacketFlowTests(t, getLine(), nft, testNodeIPs, []packetFlowTest{
|
runPacketFlowTests(t, getLine(), nft, testNodeIPs, []packetFlowTest{
|
||||||
{
|
{
|
||||||
@ -3981,13 +3979,13 @@ func TestSyncProxyRulesRepeated(t *testing.T) {
|
|||||||
add rule ip kube-proxy nat-output jump services
|
add rule ip kube-proxy nat-output jump services
|
||||||
add rule ip kube-proxy nat-postrouting jump masquerading
|
add rule ip kube-proxy nat-postrouting jump masquerading
|
||||||
add rule ip kube-proxy nat-prerouting jump services
|
add rule ip kube-proxy nat-prerouting jump services
|
||||||
add rule ip kube-proxy nodeport-endpoints-check ip daddr != 127.0.0.0/8 meta l4proto . th dport vmap @no-endpoint-nodeports
|
add rule ip kube-proxy nodeport-endpoints-check ip daddr @nodeport-ips meta l4proto . th dport vmap @no-endpoint-nodeports
|
||||||
add rule ip kube-proxy reject-chain reject
|
add rule ip kube-proxy reject-chain reject
|
||||||
add rule ip kube-proxy services ip daddr . meta l4proto . th dport vmap @service-ips
|
add rule ip kube-proxy services ip daddr . meta l4proto . th dport vmap @service-ips
|
||||||
add rule ip kube-proxy services fib daddr type local ip daddr != 127.0.0.0/8 meta l4proto . th dport vmap @service-nodeports
|
add rule ip kube-proxy services ip daddr @nodeport-ips meta l4proto . th dport vmap @service-nodeports
|
||||||
add rule ip kube-proxy service-endpoints-check ip daddr . meta l4proto . th dport vmap @no-endpoint-services
|
|
||||||
|
|
||||||
add set ip kube-proxy cluster-ips { type ipv4_addr ; comment "Active ClusterIPs" ; }
|
add set ip kube-proxy cluster-ips { type ipv4_addr ; comment "Active ClusterIPs" ; }
|
||||||
|
add set ip kube-proxy nodeport-ips { type ipv4_addr ; comment "IPs that accept NodePort traffic" ; }
|
||||||
|
add rule ip kube-proxy service-endpoints-check ip daddr . meta l4proto . th dport vmap @no-endpoint-services
|
||||||
|
|
||||||
add map ip kube-proxy firewall-ips { type ipv4_addr . inet_proto . inet_service : verdict ; comment "destinations that are subject to LoadBalancerSourceRanges" ; }
|
add map ip kube-proxy firewall-ips { type ipv4_addr . inet_proto . inet_service : verdict ; comment "destinations that are subject to LoadBalancerSourceRanges" ; }
|
||||||
add map ip kube-proxy no-endpoint-nodeports { type inet_proto . inet_service : verdict ; comment "vmap to drop or reject packets to service nodeports with no endpoints" ; }
|
add map ip kube-proxy no-endpoint-nodeports { type inet_proto . inet_service : verdict ; comment "vmap to drop or reject packets to service nodeports with no endpoints" ; }
|
||||||
@ -4059,6 +4057,7 @@ func TestSyncProxyRulesRepeated(t *testing.T) {
|
|||||||
expected := baseRules + dedent.Dedent(`
|
expected := baseRules + dedent.Dedent(`
|
||||||
add element ip kube-proxy cluster-ips { 172.30.0.41 }
|
add element ip kube-proxy cluster-ips { 172.30.0.41 }
|
||||||
add element ip kube-proxy cluster-ips { 172.30.0.42 }
|
add element ip kube-proxy cluster-ips { 172.30.0.42 }
|
||||||
|
add element ip kube-proxy nodeport-ips { 192.168.0.2 }
|
||||||
add element ip kube-proxy service-ips { 172.30.0.41 . tcp . 80 : goto service-ULMVA6XW-ns1/svc1/tcp/p80 }
|
add element ip kube-proxy service-ips { 172.30.0.41 . tcp . 80 : goto service-ULMVA6XW-ns1/svc1/tcp/p80 }
|
||||||
add element ip kube-proxy service-ips { 172.30.0.42 . tcp . 8080 : goto service-MHHHYRWA-ns2/svc2/tcp/p8080 }
|
add element ip kube-proxy service-ips { 172.30.0.42 . tcp . 8080 : goto service-MHHHYRWA-ns2/svc2/tcp/p8080 }
|
||||||
|
|
||||||
@ -4111,6 +4110,7 @@ func TestSyncProxyRulesRepeated(t *testing.T) {
|
|||||||
add element ip kube-proxy cluster-ips { 172.30.0.41 }
|
add element ip kube-proxy cluster-ips { 172.30.0.41 }
|
||||||
add element ip kube-proxy cluster-ips { 172.30.0.42 }
|
add element ip kube-proxy cluster-ips { 172.30.0.42 }
|
||||||
add element ip kube-proxy cluster-ips { 172.30.0.43 }
|
add element ip kube-proxy cluster-ips { 172.30.0.43 }
|
||||||
|
add element ip kube-proxy nodeport-ips { 192.168.0.2 }
|
||||||
add element ip kube-proxy service-ips { 172.30.0.41 . tcp . 80 : goto service-ULMVA6XW-ns1/svc1/tcp/p80 }
|
add element ip kube-proxy service-ips { 172.30.0.41 . tcp . 80 : goto service-ULMVA6XW-ns1/svc1/tcp/p80 }
|
||||||
add element ip kube-proxy service-ips { 172.30.0.42 . tcp . 8080 : goto service-MHHHYRWA-ns2/svc2/tcp/p8080 }
|
add element ip kube-proxy service-ips { 172.30.0.42 . tcp . 8080 : goto service-MHHHYRWA-ns2/svc2/tcp/p8080 }
|
||||||
add element ip kube-proxy service-ips { 172.30.0.43 . tcp . 80 : goto service-4AT6LBPK-ns3/svc3/tcp/p80 }
|
add element ip kube-proxy service-ips { 172.30.0.43 . tcp . 80 : goto service-4AT6LBPK-ns3/svc3/tcp/p80 }
|
||||||
@ -4144,6 +4144,7 @@ func TestSyncProxyRulesRepeated(t *testing.T) {
|
|||||||
expected = baseRules + dedent.Dedent(`
|
expected = baseRules + dedent.Dedent(`
|
||||||
add element ip kube-proxy cluster-ips { 172.30.0.41 }
|
add element ip kube-proxy cluster-ips { 172.30.0.41 }
|
||||||
add element ip kube-proxy cluster-ips { 172.30.0.43 }
|
add element ip kube-proxy cluster-ips { 172.30.0.43 }
|
||||||
|
add element ip kube-proxy nodeport-ips { 192.168.0.2 }
|
||||||
add element ip kube-proxy service-ips { 172.30.0.41 . tcp . 80 : goto service-ULMVA6XW-ns1/svc1/tcp/p80 }
|
add element ip kube-proxy service-ips { 172.30.0.41 . tcp . 80 : goto service-ULMVA6XW-ns1/svc1/tcp/p80 }
|
||||||
add element ip kube-proxy service-ips { 172.30.0.43 . tcp . 80 : goto service-4AT6LBPK-ns3/svc3/tcp/p80 }
|
add element ip kube-proxy service-ips { 172.30.0.43 . tcp . 80 : goto service-4AT6LBPK-ns3/svc3/tcp/p80 }
|
||||||
|
|
||||||
@ -4172,6 +4173,7 @@ func TestSyncProxyRulesRepeated(t *testing.T) {
|
|||||||
expected = baseRules + dedent.Dedent(`
|
expected = baseRules + dedent.Dedent(`
|
||||||
add element ip kube-proxy cluster-ips { 172.30.0.41 }
|
add element ip kube-proxy cluster-ips { 172.30.0.41 }
|
||||||
add element ip kube-proxy cluster-ips { 172.30.0.43 }
|
add element ip kube-proxy cluster-ips { 172.30.0.43 }
|
||||||
|
add element ip kube-proxy nodeport-ips { 192.168.0.2 }
|
||||||
add element ip kube-proxy service-ips { 172.30.0.41 . tcp . 80 : goto service-ULMVA6XW-ns1/svc1/tcp/p80 }
|
add element ip kube-proxy service-ips { 172.30.0.41 . tcp . 80 : goto service-ULMVA6XW-ns1/svc1/tcp/p80 }
|
||||||
add element ip kube-proxy service-ips { 172.30.0.43 . tcp . 80 : goto service-4AT6LBPK-ns3/svc3/tcp/p80 }
|
add element ip kube-proxy service-ips { 172.30.0.43 . tcp . 80 : goto service-4AT6LBPK-ns3/svc3/tcp/p80 }
|
||||||
|
|
||||||
@ -4208,6 +4210,7 @@ func TestSyncProxyRulesRepeated(t *testing.T) {
|
|||||||
add element ip kube-proxy cluster-ips { 172.30.0.41 }
|
add element ip kube-proxy cluster-ips { 172.30.0.41 }
|
||||||
add element ip kube-proxy cluster-ips { 172.30.0.43 }
|
add element ip kube-proxy cluster-ips { 172.30.0.43 }
|
||||||
add element ip kube-proxy cluster-ips { 172.30.0.44 }
|
add element ip kube-proxy cluster-ips { 172.30.0.44 }
|
||||||
|
add element ip kube-proxy nodeport-ips { 192.168.0.2 }
|
||||||
add element ip kube-proxy service-ips { 172.30.0.41 . tcp . 80 : goto service-ULMVA6XW-ns1/svc1/tcp/p80 }
|
add element ip kube-proxy service-ips { 172.30.0.41 . tcp . 80 : goto service-ULMVA6XW-ns1/svc1/tcp/p80 }
|
||||||
add element ip kube-proxy service-ips { 172.30.0.43 . tcp . 80 : goto service-4AT6LBPK-ns3/svc3/tcp/p80 }
|
add element ip kube-proxy service-ips { 172.30.0.43 . tcp . 80 : goto service-4AT6LBPK-ns3/svc3/tcp/p80 }
|
||||||
|
|
||||||
@ -4247,6 +4250,7 @@ func TestSyncProxyRulesRepeated(t *testing.T) {
|
|||||||
add element ip kube-proxy cluster-ips { 172.30.0.41 }
|
add element ip kube-proxy cluster-ips { 172.30.0.41 }
|
||||||
add element ip kube-proxy cluster-ips { 172.30.0.43 }
|
add element ip kube-proxy cluster-ips { 172.30.0.43 }
|
||||||
add element ip kube-proxy cluster-ips { 172.30.0.44 }
|
add element ip kube-proxy cluster-ips { 172.30.0.44 }
|
||||||
|
add element ip kube-proxy nodeport-ips { 192.168.0.2 }
|
||||||
add element ip kube-proxy service-ips { 172.30.0.41 . tcp . 80 : goto service-ULMVA6XW-ns1/svc1/tcp/p80 }
|
add element ip kube-proxy service-ips { 172.30.0.41 . tcp . 80 : goto service-ULMVA6XW-ns1/svc1/tcp/p80 }
|
||||||
add element ip kube-proxy service-ips { 172.30.0.43 . tcp . 80 : goto service-4AT6LBPK-ns3/svc3/tcp/p80 }
|
add element ip kube-proxy service-ips { 172.30.0.43 . tcp . 80 : goto service-4AT6LBPK-ns3/svc3/tcp/p80 }
|
||||||
add element ip kube-proxy service-ips { 172.30.0.44 . tcp . 80 : goto service-LAUZTJTB-ns4/svc4/tcp/p80 }
|
add element ip kube-proxy service-ips { 172.30.0.44 . tcp . 80 : goto service-LAUZTJTB-ns4/svc4/tcp/p80 }
|
||||||
@ -4285,6 +4289,7 @@ func TestSyncProxyRulesRepeated(t *testing.T) {
|
|||||||
add element ip kube-proxy cluster-ips { 172.30.0.41 }
|
add element ip kube-proxy cluster-ips { 172.30.0.41 }
|
||||||
add element ip kube-proxy cluster-ips { 172.30.0.43 }
|
add element ip kube-proxy cluster-ips { 172.30.0.43 }
|
||||||
add element ip kube-proxy cluster-ips { 172.30.0.44 }
|
add element ip kube-proxy cluster-ips { 172.30.0.44 }
|
||||||
|
add element ip kube-proxy nodeport-ips { 192.168.0.2 }
|
||||||
add element ip kube-proxy service-ips { 172.30.0.41 . tcp . 80 : goto service-ULMVA6XW-ns1/svc1/tcp/p80 }
|
add element ip kube-proxy service-ips { 172.30.0.41 . tcp . 80 : goto service-ULMVA6XW-ns1/svc1/tcp/p80 }
|
||||||
add element ip kube-proxy service-ips { 172.30.0.43 . tcp . 80 : goto service-4AT6LBPK-ns3/svc3/tcp/p80 }
|
add element ip kube-proxy service-ips { 172.30.0.43 . tcp . 80 : goto service-4AT6LBPK-ns3/svc3/tcp/p80 }
|
||||||
add element ip kube-proxy service-ips { 172.30.0.44 . tcp . 80 : goto service-LAUZTJTB-ns4/svc4/tcp/p80 }
|
add element ip kube-proxy service-ips { 172.30.0.44 . tcp . 80 : goto service-LAUZTJTB-ns4/svc4/tcp/p80 }
|
||||||
@ -4326,6 +4331,7 @@ func TestSyncProxyRulesRepeated(t *testing.T) {
|
|||||||
add element ip kube-proxy cluster-ips { 172.30.0.41 }
|
add element ip kube-proxy cluster-ips { 172.30.0.41 }
|
||||||
add element ip kube-proxy cluster-ips { 172.30.0.43 }
|
add element ip kube-proxy cluster-ips { 172.30.0.43 }
|
||||||
add element ip kube-proxy cluster-ips { 172.30.0.44 }
|
add element ip kube-proxy cluster-ips { 172.30.0.44 }
|
||||||
|
add element ip kube-proxy nodeport-ips { 192.168.0.2 }
|
||||||
add element ip kube-proxy service-ips { 172.30.0.41 . tcp . 80 : goto service-ULMVA6XW-ns1/svc1/tcp/p80 }
|
add element ip kube-proxy service-ips { 172.30.0.41 . tcp . 80 : goto service-ULMVA6XW-ns1/svc1/tcp/p80 }
|
||||||
add element ip kube-proxy service-ips { 172.30.0.43 . tcp . 80 : goto service-4AT6LBPK-ns3/svc3/tcp/p80 }
|
add element ip kube-proxy service-ips { 172.30.0.43 . tcp . 80 : goto service-4AT6LBPK-ns3/svc3/tcp/p80 }
|
||||||
add element ip kube-proxy service-ips { 172.30.0.44 . tcp . 80 : goto service-LAUZTJTB-ns4/svc4/tcp/p80 }
|
add element ip kube-proxy service-ips { 172.30.0.44 . tcp . 80 : goto service-LAUZTJTB-ns4/svc4/tcp/p80 }
|
||||||
@ -4365,6 +4371,7 @@ func TestSyncProxyRulesRepeated(t *testing.T) {
|
|||||||
add element ip kube-proxy cluster-ips { 172.30.0.41 }
|
add element ip kube-proxy cluster-ips { 172.30.0.41 }
|
||||||
add element ip kube-proxy cluster-ips { 172.30.0.43 }
|
add element ip kube-proxy cluster-ips { 172.30.0.43 }
|
||||||
add element ip kube-proxy cluster-ips { 172.30.0.44 }
|
add element ip kube-proxy cluster-ips { 172.30.0.44 }
|
||||||
|
add element ip kube-proxy nodeport-ips { 192.168.0.2 }
|
||||||
add element ip kube-proxy service-ips { 172.30.0.41 . tcp . 80 : goto service-ULMVA6XW-ns1/svc1/tcp/p80 }
|
add element ip kube-proxy service-ips { 172.30.0.41 . tcp . 80 : goto service-ULMVA6XW-ns1/svc1/tcp/p80 }
|
||||||
add element ip kube-proxy no-endpoint-services { 172.30.0.43 . tcp . 80 comment "ns3/svc3:p80" : goto reject-chain }
|
add element ip kube-proxy no-endpoint-services { 172.30.0.43 . tcp . 80 comment "ns3/svc3:p80" : goto reject-chain }
|
||||||
add element ip kube-proxy service-ips { 172.30.0.44 . tcp . 80 : goto service-LAUZTJTB-ns4/svc4/tcp/p80 }
|
add element ip kube-proxy service-ips { 172.30.0.44 . tcp . 80 : goto service-LAUZTJTB-ns4/svc4/tcp/p80 }
|
||||||
@ -4400,6 +4407,7 @@ func TestSyncProxyRulesRepeated(t *testing.T) {
|
|||||||
add element ip kube-proxy cluster-ips { 172.30.0.41 }
|
add element ip kube-proxy cluster-ips { 172.30.0.41 }
|
||||||
add element ip kube-proxy cluster-ips { 172.30.0.43 }
|
add element ip kube-proxy cluster-ips { 172.30.0.43 }
|
||||||
add element ip kube-proxy cluster-ips { 172.30.0.44 }
|
add element ip kube-proxy cluster-ips { 172.30.0.44 }
|
||||||
|
add element ip kube-proxy nodeport-ips { 192.168.0.2 }
|
||||||
add element ip kube-proxy service-ips { 172.30.0.41 . tcp . 80 : goto service-ULMVA6XW-ns1/svc1/tcp/p80 }
|
add element ip kube-proxy service-ips { 172.30.0.41 . tcp . 80 : goto service-ULMVA6XW-ns1/svc1/tcp/p80 }
|
||||||
add element ip kube-proxy service-ips { 172.30.0.43 . tcp . 80 : goto service-4AT6LBPK-ns3/svc3/tcp/p80 }
|
add element ip kube-proxy service-ips { 172.30.0.43 . tcp . 80 : goto service-4AT6LBPK-ns3/svc3/tcp/p80 }
|
||||||
add element ip kube-proxy service-ips { 172.30.0.44 . tcp . 80 : goto service-LAUZTJTB-ns4/svc4/tcp/p80 }
|
add element ip kube-proxy service-ips { 172.30.0.44 . tcp . 80 : goto service-LAUZTJTB-ns4/svc4/tcp/p80 }
|
||||||
|
@ -20,7 +20,7 @@ import (
|
|||||||
"fmt"
|
"fmt"
|
||||||
"net"
|
"net"
|
||||||
|
|
||||||
"k8s.io/api/core/v1"
|
v1 "k8s.io/api/core/v1"
|
||||||
netutils "k8s.io/utils/net"
|
netutils "k8s.io/utils/net"
|
||||||
)
|
)
|
||||||
|
|
||||||
@ -37,11 +37,12 @@ type NodePortAddresses struct {
|
|||||||
var ipv4LoopbackStart = net.IPv4(127, 0, 0, 0)
|
var ipv4LoopbackStart = net.IPv4(127, 0, 0, 0)
|
||||||
|
|
||||||
// NewNodePortAddresses takes an IP family and the `--nodeport-addresses` value (which is
|
// NewNodePortAddresses takes an IP family and the `--nodeport-addresses` value (which is
|
||||||
// assumed to contain only valid CIDRs, potentially of both IP families) and returns a
|
// assumed to contain only valid CIDRs, potentially of both IP families) and the primary IP
|
||||||
// NodePortAddresses object for the given family. If there are no CIDRs of the given
|
// (which will be used as node port address when `--nodeport-addresses` is empty).
|
||||||
// family then the CIDR "0.0.0.0/0" or "::/0" will be added (even if there are CIDRs of
|
// It will return a NodePortAddresses object for the given family. If there are no CIDRs of
|
||||||
// the other family).
|
// the given family then the CIDR "0.0.0.0/0" or "::/0" will be added (even if there are
|
||||||
func NewNodePortAddresses(family v1.IPFamily, cidrStrings []string) *NodePortAddresses {
|
// CIDRs of the other family).
|
||||||
|
func NewNodePortAddresses(family v1.IPFamily, cidrStrings []string, primaryIP net.IP) *NodePortAddresses {
|
||||||
npa := &NodePortAddresses{}
|
npa := &NodePortAddresses{}
|
||||||
|
|
||||||
// Filter CIDRs to correct family
|
// Filter CIDRs to correct family
|
||||||
@ -51,17 +52,24 @@ func NewNodePortAddresses(family v1.IPFamily, cidrStrings []string) *NodePortAdd
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
if len(npa.cidrStrings) == 0 {
|
if len(npa.cidrStrings) == 0 {
|
||||||
if family == v1.IPv4Protocol {
|
if primaryIP == nil {
|
||||||
npa.cidrStrings = []string{IPv4ZeroCIDR}
|
if family == v1.IPv4Protocol {
|
||||||
|
npa.cidrStrings = []string{IPv4ZeroCIDR}
|
||||||
|
} else {
|
||||||
|
npa.cidrStrings = []string{IPv6ZeroCIDR}
|
||||||
|
}
|
||||||
} else {
|
} else {
|
||||||
npa.cidrStrings = []string{IPv6ZeroCIDR}
|
if family == v1.IPv4Protocol {
|
||||||
|
npa.cidrStrings = []string{fmt.Sprintf("%s/32", primaryIP.String())}
|
||||||
|
} else {
|
||||||
|
npa.cidrStrings = []string{fmt.Sprintf("%s/128", primaryIP.String())}
|
||||||
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// Now parse
|
// Now parse
|
||||||
for _, str := range npa.cidrStrings {
|
for _, str := range npa.cidrStrings {
|
||||||
_, cidr, _ := netutils.ParseCIDRSloppy(str)
|
_, cidr, _ := netutils.ParseCIDRSloppy(str)
|
||||||
|
|
||||||
if netutils.IsIPv4CIDR(cidr) {
|
if netutils.IsIPv4CIDR(cidr) {
|
||||||
if cidr.IP.IsLoopback() || cidr.Contains(ipv4LoopbackStart) {
|
if cidr.IP.IsLoopback() || cidr.Contains(ipv4LoopbackStart) {
|
||||||
npa.containsIPv4Loopback = true
|
npa.containsIPv4Loopback = true
|
||||||
|
@ -21,7 +21,7 @@ import (
|
|||||||
"net"
|
"net"
|
||||||
"testing"
|
"testing"
|
||||||
|
|
||||||
"k8s.io/api/core/v1"
|
v1 "k8s.io/api/core/v1"
|
||||||
"k8s.io/apimachinery/pkg/util/sets"
|
"k8s.io/apimachinery/pkg/util/sets"
|
||||||
fake "k8s.io/kubernetes/pkg/proxy/util/testing"
|
fake "k8s.io/kubernetes/pkg/proxy/util/testing"
|
||||||
netutils "k8s.io/utils/net"
|
netutils "k8s.io/utils/net"
|
||||||
@ -60,6 +60,8 @@ func TestGetNodeIPs(t *testing.T) {
|
|||||||
cidrs []string
|
cidrs []string
|
||||||
itfAddrsPairs []InterfaceAddrsPair
|
itfAddrsPairs []InterfaceAddrsPair
|
||||||
expected map[v1.IPFamily]expectation
|
expected map[v1.IPFamily]expectation
|
||||||
|
// nodeIP will take effect when `--nodeport-addresses` is empty
|
||||||
|
nodeIP net.IP
|
||||||
}{
|
}{
|
||||||
{
|
{
|
||||||
name: "IPv4 single",
|
name: "IPv4 single",
|
||||||
@ -369,6 +371,53 @@ func TestGetNodeIPs(t *testing.T) {
|
|||||||
},
|
},
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
|
{
|
||||||
|
name: "ipv4 with nodeIP",
|
||||||
|
itfAddrsPairs: []InterfaceAddrsPair{
|
||||||
|
{
|
||||||
|
itf: net.Interface{Index: 0, MTU: 0, Name: "eth0", HardwareAddr: nil, Flags: 0},
|
||||||
|
addrs: []net.Addr{
|
||||||
|
&net.IPNet{IP: netutils.ParseIPSloppy("1.2.3.4"), Mask: net.CIDRMask(30, 32)},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
itf: net.Interface{Index: 1, MTU: 0, Name: "lo", HardwareAddr: nil, Flags: 0},
|
||||||
|
addrs: []net.Addr{
|
||||||
|
&net.IPNet{IP: netutils.ParseIPSloppy("127.0.0.1"), Mask: net.CIDRMask(8, 32)},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
expected: map[v1.IPFamily]expectation{
|
||||||
|
v1.IPv4Protocol: {
|
||||||
|
ips: sets.New[string]("1.2.3.4"),
|
||||||
|
},
|
||||||
|
},
|
||||||
|
nodeIP: netutils.ParseIPSloppy("1.2.3.4"),
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "ipv6 with nodeIP",
|
||||||
|
itfAddrsPairs: []InterfaceAddrsPair{
|
||||||
|
{
|
||||||
|
itf: net.Interface{Index: 0, MTU: 0, Name: "eth0", HardwareAddr: nil, Flags: 0},
|
||||||
|
addrs: []net.Addr{
|
||||||
|
&net.IPNet{IP: netutils.ParseIPSloppy("2001:db8::1"), Mask: net.CIDRMask(64, 128)},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
itf: net.Interface{Index: 1, MTU: 0, Name: "lo", HardwareAddr: nil, Flags: 0},
|
||||||
|
addrs: []net.Addr{
|
||||||
|
&net.IPNet{IP: netutils.ParseIPSloppy("::1"), Mask: net.CIDRMask(128, 128)},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
expected: map[v1.IPFamily]expectation{
|
||||||
|
v1.IPv6Protocol: {
|
||||||
|
matchAll: true,
|
||||||
|
ips: sets.New[string]("2001:db8::1", "::1"),
|
||||||
|
},
|
||||||
|
},
|
||||||
|
nodeIP: netutils.ParseIPSloppy("1.2.3.4"),
|
||||||
|
},
|
||||||
}
|
}
|
||||||
|
|
||||||
for _, tc := range testCases {
|
for _, tc := range testCases {
|
||||||
@ -379,7 +428,10 @@ func TestGetNodeIPs(t *testing.T) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
for _, family := range []v1.IPFamily{v1.IPv4Protocol, v1.IPv6Protocol} {
|
for _, family := range []v1.IPFamily{v1.IPv4Protocol, v1.IPv6Protocol} {
|
||||||
npa := NewNodePortAddresses(family, tc.cidrs)
|
if tc.nodeIP != nil && v1.IPFamily(fmt.Sprintf("IPv%s", netutils.IPFamilyOf(tc.nodeIP))) != family {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
npa := NewNodePortAddresses(family, tc.cidrs, tc.nodeIP)
|
||||||
|
|
||||||
if npa.MatchAll() != tc.expected[family].matchAll {
|
if npa.MatchAll() != tc.expected[family].matchAll {
|
||||||
t.Errorf("unexpected MatchAll(%s), expected: %v", family, tc.expected[family].matchAll)
|
t.Errorf("unexpected MatchAll(%s), expected: %v", family, tc.expected[family].matchAll)
|
||||||
@ -451,12 +503,12 @@ func TestContainsIPv4Loopback(t *testing.T) {
|
|||||||
}
|
}
|
||||||
for _, tt := range tests {
|
for _, tt := range tests {
|
||||||
t.Run(tt.name, func(t *testing.T) {
|
t.Run(tt.name, func(t *testing.T) {
|
||||||
npa := NewNodePortAddresses(v1.IPv4Protocol, tt.cidrStrings)
|
npa := NewNodePortAddresses(v1.IPv4Protocol, tt.cidrStrings, nil)
|
||||||
if got := npa.ContainsIPv4Loopback(); got != tt.want {
|
if got := npa.ContainsIPv4Loopback(); got != tt.want {
|
||||||
t.Errorf("IPv4 ContainsIPv4Loopback() = %v, want %v", got, tt.want)
|
t.Errorf("IPv4 ContainsIPv4Loopback() = %v, want %v", got, tt.want)
|
||||||
}
|
}
|
||||||
// ContainsIPv4Loopback should always be false for family=IPv6
|
// ContainsIPv4Loopback should always be false for family=IPv6
|
||||||
npa = NewNodePortAddresses(v1.IPv6Protocol, tt.cidrStrings)
|
npa = NewNodePortAddresses(v1.IPv6Protocol, tt.cidrStrings, nil)
|
||||||
if got := npa.ContainsIPv4Loopback(); got {
|
if got := npa.ContainsIPv4Loopback(); got {
|
||||||
t.Errorf("IPv6 ContainsIPv4Loopback() = %v, want %v", got, false)
|
t.Errorf("IPv6 ContainsIPv4Loopback() = %v, want %v", got, false)
|
||||||
}
|
}
|
||||||
|
@ -30,11 +30,10 @@ import (
|
|||||||
utilfeature "k8s.io/apiserver/pkg/util/feature"
|
utilfeature "k8s.io/apiserver/pkg/util/feature"
|
||||||
"k8s.io/client-go/tools/events"
|
"k8s.io/client-go/tools/events"
|
||||||
utilsysctl "k8s.io/component-helpers/node/util/sysctl"
|
utilsysctl "k8s.io/component-helpers/node/util/sysctl"
|
||||||
|
"k8s.io/klog/v2"
|
||||||
helper "k8s.io/kubernetes/pkg/apis/core/v1/helper"
|
helper "k8s.io/kubernetes/pkg/apis/core/v1/helper"
|
||||||
"k8s.io/kubernetes/pkg/features"
|
"k8s.io/kubernetes/pkg/features"
|
||||||
netutils "k8s.io/utils/net"
|
netutils "k8s.io/utils/net"
|
||||||
|
|
||||||
"k8s.io/klog/v2"
|
|
||||||
)
|
)
|
||||||
|
|
||||||
const (
|
const (
|
||||||
|
@ -672,7 +672,7 @@ func NewProxier(
|
|||||||
}
|
}
|
||||||
|
|
||||||
// windows listens to all node addresses
|
// windows listens to all node addresses
|
||||||
nodePortAddresses := proxyutil.NewNodePortAddresses(ipFamily, nil)
|
nodePortAddresses := proxyutil.NewNodePortAddresses(ipFamily, nil, nil)
|
||||||
serviceHealthServer := healthcheck.NewServiceHealthServer(hostname, recorder, nodePortAddresses, healthzServer)
|
serviceHealthServer := healthcheck.NewServiceHealthServer(hostname, recorder, nodePortAddresses, healthzServer)
|
||||||
|
|
||||||
hcnImpl := newHcnImpl()
|
hcnImpl := newHcnImpl()
|
||||||
|
Loading…
Reference in New Issue
Block a user