Merge pull request #104368 from aojea/ruleguard

golang 1.17 fails to parse IPs with leading zeros
This commit is contained in:
Kubernetes Prow Robot 2021-08-20 07:59:24 -07:00 committed by GitHub
commit b0bc8adbc2
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
171 changed files with 1237 additions and 991 deletions

View File

@ -39,6 +39,7 @@ import (
"github.com/blang/semver" "github.com/blang/semver"
"k8s.io/klog/v2" "k8s.io/klog/v2"
netutils "k8s.io/utils/net"
) )
var ( var (
@ -307,7 +308,7 @@ func getOrCreateTestCertFiles(certFileName, keyFileName string, spec TestCertSpe
func parseIPList(ips []string) []net.IP { func parseIPList(ips []string) []net.IP {
var netIPs []net.IP var netIPs []net.IP
for _, ip := range ips { for _, ip := range ips {
netIPs = append(netIPs, net.ParseIP(ip)) netIPs = append(netIPs, netutils.ParseIPSloppy(ip))
} }
return netIPs return netIPs
} }
@ -335,7 +336,7 @@ func generateSelfSignedCertKey(host string, alternateIPs []net.IP, alternateDNS
IsCA: true, IsCA: true,
} }
if ip := net.ParseIP(host); ip != nil { if ip := netutils.ParseIPSloppy(host); ip != nil {
template.IPAddresses = append(template.IPAddresses, ip) template.IPAddresses = append(template.IPAddresses, ip)
} else { } else {
template.DNSNames = append(template.DNSNames, host) template.DNSNames = append(template.DNSNames, host)

View File

@ -96,14 +96,14 @@ func startNodeIpamController(ccmConfig *cloudcontrollerconfig.CompletedConfig, n
// service cidr processing // service cidr processing
if len(strings.TrimSpace(nodeIPAMConfig.ServiceCIDR)) != 0 { if len(strings.TrimSpace(nodeIPAMConfig.ServiceCIDR)) != 0 {
_, serviceCIDR, err = net.ParseCIDR(nodeIPAMConfig.ServiceCIDR) _, serviceCIDR, err = netutils.ParseCIDRSloppy(nodeIPAMConfig.ServiceCIDR)
if err != nil { if err != nil {
klog.Warningf("Unsuccessful parsing of service CIDR %v: %v", nodeIPAMConfig.ServiceCIDR, err) klog.Warningf("Unsuccessful parsing of service CIDR %v: %v", nodeIPAMConfig.ServiceCIDR, err)
} }
} }
if len(strings.TrimSpace(nodeIPAMConfig.SecondaryServiceCIDR)) != 0 { if len(strings.TrimSpace(nodeIPAMConfig.SecondaryServiceCIDR)) != 0 {
_, secondaryServiceCIDR, err = net.ParseCIDR(nodeIPAMConfig.SecondaryServiceCIDR) _, secondaryServiceCIDR, err = netutils.ParseCIDRSloppy(nodeIPAMConfig.SecondaryServiceCIDR)
if err != nil { if err != nil {
klog.Warningf("Unsuccessful parsing of service CIDR %v: %v", nodeIPAMConfig.SecondaryServiceCIDR, err) klog.Warningf("Unsuccessful parsing of service CIDR %v: %v", nodeIPAMConfig.SecondaryServiceCIDR, err)
} }

View File

@ -39,6 +39,7 @@ import (
"k8s.io/kubernetes/pkg/controlplane/reconcilers" "k8s.io/kubernetes/pkg/controlplane/reconcilers"
kubeoptions "k8s.io/kubernetes/pkg/kubeapiserver/options" kubeoptions "k8s.io/kubernetes/pkg/kubeapiserver/options"
kubeletclient "k8s.io/kubernetes/pkg/kubelet/client" kubeletclient "k8s.io/kubernetes/pkg/kubelet/client"
netutils "k8s.io/utils/net"
) )
func TestAddFlags(t *testing.T) { func TestAddFlags(t *testing.T) {
@ -124,12 +125,12 @@ func TestAddFlags(t *testing.T) {
// This is a snapshot of expected options parsed by args. // This is a snapshot of expected options parsed by args.
expected := &ServerRunOptions{ expected := &ServerRunOptions{
ServiceNodePortRange: kubeoptions.DefaultServiceNodePortRange, ServiceNodePortRange: kubeoptions.DefaultServiceNodePortRange,
ServiceClusterIPRanges: (&net.IPNet{IP: net.ParseIP("192.168.128.0"), Mask: net.CIDRMask(17, 32)}).String(), ServiceClusterIPRanges: (&net.IPNet{IP: netutils.ParseIPSloppy("192.168.128.0"), Mask: net.CIDRMask(17, 32)}).String(),
MasterCount: 5, MasterCount: 5,
EndpointReconcilerType: string(reconcilers.LeaseEndpointReconcilerType), EndpointReconcilerType: string(reconcilers.LeaseEndpointReconcilerType),
AllowPrivileged: false, AllowPrivileged: false,
GenericServerRunOptions: &apiserveroptions.ServerRunOptions{ GenericServerRunOptions: &apiserveroptions.ServerRunOptions{
AdvertiseAddress: net.ParseIP("192.168.10.10"), AdvertiseAddress: netutils.ParseIPSloppy("192.168.10.10"),
CorsAllowedOriginList: []string{"10.10.10.100", "10.10.10.200"}, CorsAllowedOriginList: []string{"10.10.10.100", "10.10.10.200"},
MaxRequestsInFlight: 400, MaxRequestsInFlight: 400,
MaxMutatingRequestsInFlight: 200, MaxMutatingRequestsInFlight: 200,
@ -175,7 +176,7 @@ func TestAddFlags(t *testing.T) {
DefaultWatchCacheSize: 100, DefaultWatchCacheSize: 100,
}, },
SecureServing: (&apiserveroptions.SecureServingOptions{ SecureServing: (&apiserveroptions.SecureServingOptions{
BindAddress: net.ParseIP("192.168.10.20"), BindAddress: netutils.ParseIPSloppy("192.168.10.20"),
BindPort: 6443, BindPort: 6443,
ServerCert: apiserveroptions.GeneratableKeyCert{ ServerCert: apiserveroptions.GeneratableKeyCert{
CertDirectory: "/var/run/kubernetes", CertDirectory: "/var/run/kubernetes",

View File

@ -23,6 +23,7 @@ import (
utilfeature "k8s.io/apiserver/pkg/util/feature" utilfeature "k8s.io/apiserver/pkg/util/feature"
featuregatetesting "k8s.io/component-base/featuregate/testing" featuregatetesting "k8s.io/component-base/featuregate/testing"
"k8s.io/kubernetes/pkg/features" "k8s.io/kubernetes/pkg/features"
netutils "k8s.io/utils/net"
) )
func makeOptionsWithCIDRs(serviceCIDR string, secondaryServiceCIDR string) *ServerRunOptions { func makeOptionsWithCIDRs(serviceCIDR string, secondaryServiceCIDR string) *ServerRunOptions {
@ -33,14 +34,14 @@ func makeOptionsWithCIDRs(serviceCIDR string, secondaryServiceCIDR string) *Serv
var primaryCIDR, secondaryCIDR net.IPNet var primaryCIDR, secondaryCIDR net.IPNet
if len(serviceCIDR) > 0 { if len(serviceCIDR) > 0 {
_, cidr, _ := net.ParseCIDR(serviceCIDR) _, cidr, _ := netutils.ParseCIDRSloppy(serviceCIDR)
if cidr != nil { if cidr != nil {
primaryCIDR = *(cidr) primaryCIDR = *(cidr)
} }
} }
if len(secondaryServiceCIDR) > 0 { if len(secondaryServiceCIDR) > 0 {
_, cidr, _ := net.ParseCIDR(secondaryServiceCIDR) _, cidr, _ := netutils.ParseCIDRSloppy(secondaryServiceCIDR)
if cidr != nil { if cidr != nil {
secondaryCIDR = *(cidr) secondaryCIDR = *(cidr)
} }
@ -151,7 +152,7 @@ func TestClusterServiceIPRange(t *testing.T) {
} }
func getIPnetFromCIDR(cidr string) *net.IPNet { func getIPnetFromCIDR(cidr string) *net.IPNet {
_, ipnet, _ := net.ParseCIDR(cidr) _, ipnet, _ := netutils.ParseCIDRSloppy(cidr)
return ipnet return ipnet
} }

View File

@ -61,6 +61,7 @@ import (
"k8s.io/klog/v2" "k8s.io/klog/v2"
aggregatorapiserver "k8s.io/kube-aggregator/pkg/apiserver" aggregatorapiserver "k8s.io/kube-aggregator/pkg/apiserver"
aggregatorscheme "k8s.io/kube-aggregator/pkg/apiserver/scheme" aggregatorscheme "k8s.io/kube-aggregator/pkg/apiserver/scheme"
netutils "k8s.io/utils/net"
"k8s.io/kubernetes/cmd/kube-apiserver/app/options" "k8s.io/kubernetes/cmd/kube-apiserver/app/options"
"k8s.io/kubernetes/pkg/api/legacyscheme" "k8s.io/kubernetes/pkg/api/legacyscheme"
@ -670,7 +671,7 @@ func getServiceIPAndRanges(serviceClusterIPRanges string) (net.IP, net.IPNet, ne
return apiServerServiceIP, primaryServiceIPRange, net.IPNet{}, nil return apiServerServiceIP, primaryServiceIPRange, net.IPNet{}, nil
} }
_, primaryServiceClusterCIDR, err := net.ParseCIDR(serviceClusterIPRangeList[0]) _, primaryServiceClusterCIDR, err := netutils.ParseCIDRSloppy(serviceClusterIPRangeList[0])
if err != nil { if err != nil {
return net.IP{}, net.IPNet{}, net.IPNet{}, fmt.Errorf("service-cluster-ip-range[0] is not a valid cidr") return net.IP{}, net.IPNet{}, net.IPNet{}, fmt.Errorf("service-cluster-ip-range[0] is not a valid cidr")
} }
@ -683,7 +684,7 @@ func getServiceIPAndRanges(serviceClusterIPRanges string) (net.IP, net.IPNet, ne
// user provided at least two entries // user provided at least two entries
// note: validation asserts that the list is max of two dual stack entries // note: validation asserts that the list is max of two dual stack entries
if len(serviceClusterIPRangeList) > 1 { if len(serviceClusterIPRangeList) > 1 {
_, secondaryServiceClusterCIDR, err := net.ParseCIDR(serviceClusterIPRangeList[1]) _, secondaryServiceClusterCIDR, err := netutils.ParseCIDRSloppy(serviceClusterIPRangeList[1])
if err != nil { if err != nil {
return net.IP{}, net.IPNet{}, net.IPNet{}, fmt.Errorf("service-cluster-ip-range[1] is not an ip net") return net.IP{}, net.IPNet{}, net.IPNet{}, fmt.Errorf("service-cluster-ip-range[1] is not an ip net")
} }

View File

@ -127,14 +127,14 @@ func startNodeIpamController(ctx ControllerContext) (http.Handler, bool, error)
// service cidr processing // service cidr processing
if len(strings.TrimSpace(ctx.ComponentConfig.NodeIPAMController.ServiceCIDR)) != 0 { if len(strings.TrimSpace(ctx.ComponentConfig.NodeIPAMController.ServiceCIDR)) != 0 {
_, serviceCIDR, err = net.ParseCIDR(ctx.ComponentConfig.NodeIPAMController.ServiceCIDR) _, serviceCIDR, err = netutils.ParseCIDRSloppy(ctx.ComponentConfig.NodeIPAMController.ServiceCIDR)
if err != nil { if err != nil {
klog.Warningf("Unsuccessful parsing of service CIDR %v: %v", ctx.ComponentConfig.NodeIPAMController.ServiceCIDR, err) klog.Warningf("Unsuccessful parsing of service CIDR %v: %v", ctx.ComponentConfig.NodeIPAMController.ServiceCIDR, err)
} }
} }
if len(strings.TrimSpace(ctx.ComponentConfig.NodeIPAMController.SecondaryServiceCIDR)) != 0 { if len(strings.TrimSpace(ctx.ComponentConfig.NodeIPAMController.SecondaryServiceCIDR)) != 0 {
_, secondaryServiceCIDR, err = net.ParseCIDR(ctx.ComponentConfig.NodeIPAMController.SecondaryServiceCIDR) _, secondaryServiceCIDR, err = netutils.ParseCIDRSloppy(ctx.ComponentConfig.NodeIPAMController.SecondaryServiceCIDR)
if err != nil { if err != nil {
klog.Warningf("Unsuccessful parsing of service CIDR %v: %v", ctx.ComponentConfig.NodeIPAMController.SecondaryServiceCIDR, err) klog.Warningf("Unsuccessful parsing of service CIDR %v: %v", ctx.ComponentConfig.NodeIPAMController.SecondaryServiceCIDR, err)
} }

View File

@ -45,6 +45,7 @@ import (
kubectrlmgrconfigscheme "k8s.io/kubernetes/pkg/controller/apis/config/scheme" kubectrlmgrconfigscheme "k8s.io/kubernetes/pkg/controller/apis/config/scheme"
"k8s.io/kubernetes/pkg/controller/garbagecollector" "k8s.io/kubernetes/pkg/controller/garbagecollector"
garbagecollectorconfig "k8s.io/kubernetes/pkg/controller/garbagecollector/config" garbagecollectorconfig "k8s.io/kubernetes/pkg/controller/garbagecollector/config"
netutils "k8s.io/utils/net"
// add the kubernetes feature gates // add the kubernetes feature gates
_ "k8s.io/kubernetes/pkg/features" _ "k8s.io/kubernetes/pkg/features"
@ -427,7 +428,7 @@ func (s KubeControllerManagerOptions) Config(allControllers []string, disabledBy
return nil, err return nil, err
} }
if err := s.SecureServing.MaybeDefaultWithSelfSignedCerts("localhost", nil, []net.IP{net.ParseIP("127.0.0.1")}); err != nil { if err := s.SecureServing.MaybeDefaultWithSelfSignedCerts("localhost", nil, []net.IP{netutils.ParseIPSloppy("127.0.0.1")}); err != nil {
return nil, fmt.Errorf("error creating self-signed certificates: %v", err) return nil, fmt.Errorf("error creating self-signed certificates: %v", err)
} }

View File

@ -17,7 +17,6 @@ limitations under the License.
package options package options
import ( import (
"net"
"reflect" "reflect"
"sort" "sort"
"testing" "testing"
@ -61,6 +60,7 @@ import (
attachdetachconfig "k8s.io/kubernetes/pkg/controller/volume/attachdetach/config" attachdetachconfig "k8s.io/kubernetes/pkg/controller/volume/attachdetach/config"
ephemeralvolumeconfig "k8s.io/kubernetes/pkg/controller/volume/ephemeral/config" ephemeralvolumeconfig "k8s.io/kubernetes/pkg/controller/volume/ephemeral/config"
persistentvolumeconfig "k8s.io/kubernetes/pkg/controller/volume/persistentvolume/config" persistentvolumeconfig "k8s.io/kubernetes/pkg/controller/volume/persistentvolume/config"
netutils "k8s.io/utils/net"
) )
var args = []string{ var args = []string{
@ -403,7 +403,7 @@ func TestAddFlags(t *testing.T) {
}, },
SecureServing: (&apiserveroptions.SecureServingOptions{ SecureServing: (&apiserveroptions.SecureServingOptions{
BindPort: 10001, BindPort: 10001,
BindAddress: net.ParseIP("192.168.4.21"), BindAddress: netutils.ParseIPSloppy("192.168.4.21"),
ServerCert: apiserveroptions.GeneratableKeyCert{ ServerCert: apiserveroptions.GeneratableKeyCert{
CertDirectory: "/a/b/c", CertDirectory: "/a/b/c",
PairName: "kube-controller-manager", PairName: "kube-controller-manager",

View File

@ -86,7 +86,7 @@ import (
utilipvs "k8s.io/kubernetes/pkg/util/ipvs" utilipvs "k8s.io/kubernetes/pkg/util/ipvs"
"k8s.io/kubernetes/pkg/util/oom" "k8s.io/kubernetes/pkg/util/oom"
"k8s.io/utils/exec" "k8s.io/utils/exec"
utilsnet "k8s.io/utils/net" netutils "k8s.io/utils/net"
utilpointer "k8s.io/utils/pointer" utilpointer "k8s.io/utils/pointer"
) )
@ -836,13 +836,13 @@ func (s *ProxyServer) CleanupAndExit() error {
// 2. the primary IP from the Node object, if set // 2. the primary IP from the Node object, if set
// 3. if no IP is found it defaults to 127.0.0.1 and IPv4 // 3. if no IP is found it defaults to 127.0.0.1 and IPv4
func detectNodeIP(client clientset.Interface, hostname, bindAddress string) net.IP { func detectNodeIP(client clientset.Interface, hostname, bindAddress string) net.IP {
nodeIP := net.ParseIP(bindAddress) nodeIP := netutils.ParseIPSloppy(bindAddress)
if nodeIP.IsUnspecified() { if nodeIP.IsUnspecified() {
nodeIP = utilnode.GetNodeIP(client, hostname) nodeIP = utilnode.GetNodeIP(client, hostname)
} }
if nodeIP == nil { if nodeIP == nil {
klog.V(0).Infof("can't determine this node's IP, assuming 127.0.0.1; if this is incorrect, please set the --bind-address flag") klog.V(0).Infof("can't determine this node's IP, assuming 127.0.0.1; if this is incorrect, please set the --bind-address flag")
nodeIP = net.ParseIP("127.0.0.1") nodeIP = netutils.ParseIPSloppy("127.0.0.1")
} }
return nodeIP return nodeIP
} }
@ -853,8 +853,8 @@ func detectNodeIP(client clientset.Interface, hostname, bindAddress string) net.
func nodeIPTuple(bindAddress string) [2]net.IP { func nodeIPTuple(bindAddress string) [2]net.IP {
nodes := [2]net.IP{net.IPv4zero, net.IPv6zero} nodes := [2]net.IP{net.IPv4zero, net.IPv6zero}
adr := net.ParseIP(bindAddress) adr := netutils.ParseIPSloppy(bindAddress)
if utilsnet.IsIPv6(adr) { if netutils.IsIPv6(adr) {
nodes[1] = adr nodes[1] = adr
} else { } else {
nodes[0] = adr nodes[0] = adr

View File

@ -24,7 +24,6 @@ import (
"context" "context"
"errors" "errors"
"fmt" "fmt"
"net"
goruntime "runtime" goruntime "runtime"
"strings" "strings"
"time" "time"
@ -65,7 +64,7 @@ import (
utilnode "k8s.io/kubernetes/pkg/util/node" utilnode "k8s.io/kubernetes/pkg/util/node"
utilsysctl "k8s.io/kubernetes/pkg/util/sysctl" utilsysctl "k8s.io/kubernetes/pkg/util/sysctl"
"k8s.io/utils/exec" "k8s.io/utils/exec"
utilsnet "k8s.io/utils/net" netutils "k8s.io/utils/net"
"k8s.io/klog/v2" "k8s.io/klog/v2"
) )
@ -177,7 +176,7 @@ func newProxyServer(
klog.V(2).InfoS("DetectLocalMode", "LocalMode", string(detectLocalMode)) klog.V(2).InfoS("DetectLocalMode", "LocalMode", string(detectLocalMode))
primaryProtocol := utiliptables.ProtocolIPv4 primaryProtocol := utiliptables.ProtocolIPv4
if utilsnet.IsIPv6(nodeIP) { if netutils.IsIPv6(nodeIP) {
primaryProtocol = utiliptables.ProtocolIPv6 primaryProtocol = utiliptables.ProtocolIPv6
} }
iptInterface = utiliptables.New(execer, primaryProtocol) iptInterface = utiliptables.New(execer, primaryProtocol)
@ -350,7 +349,7 @@ func newProxyServer(
// TODO this has side effects that should only happen when Run() is invoked. // TODO this has side effects that should only happen when Run() is invoked.
proxier, err = userspace.NewProxier( proxier, err = userspace.NewProxier(
userspace.NewLoadBalancerRR(), userspace.NewLoadBalancerRR(),
net.ParseIP(config.BindAddress), netutils.ParseIPSloppy(config.BindAddress),
iptInterface, iptInterface,
execer, execer,
*utilnet.ParsePortRangeOrDie(config.PortRange), *utilnet.ParsePortRangeOrDie(config.PortRange),
@ -504,7 +503,7 @@ func getDualStackLocalDetectorTuple(mode proxyconfigapi.LocalMode, config *proxy
} }
// localDetectors, like ipt, need to be of the order [IPv4, IPv6], but PodCIDRs is setup so that PodCIDRs[0] == PodCIDR. // localDetectors, like ipt, need to be of the order [IPv4, IPv6], but PodCIDRs is setup so that PodCIDRs[0] == PodCIDR.
// so have to handle the case where PodCIDR can be IPv6 and set that to localDetectors[1] // so have to handle the case where PodCIDR can be IPv6 and set that to localDetectors[1]
if utilsnet.IsIPv6CIDRString(nodeInfo.Spec.PodCIDR) { if netutils.IsIPv6CIDRString(nodeInfo.Spec.PodCIDR) {
localDetectors[1], err = proxyutiliptables.NewDetectLocalByCIDR(nodeInfo.Spec.PodCIDR, ipt[1]) localDetectors[1], err = proxyutiliptables.NewDetectLocalByCIDR(nodeInfo.Spec.PodCIDR, ipt[1])
if err != nil { if err != nil {
return localDetectors, err return localDetectors, err
@ -538,7 +537,7 @@ func cidrTuple(cidrList string) [2]string {
foundIPv6 := false foundIPv6 := false
for _, cidr := range strings.Split(cidrList, ",") { for _, cidr := range strings.Split(cidrList, ",") {
if utilsnet.IsIPv6CIDRString(cidr) && !foundIPv6 { if netutils.IsIPv6CIDRString(cidr) && !foundIPv6 {
cidrs[1] = cidr cidrs[1] = cidr
foundIPv6 = true foundIPv6 = true
} else if !foundIPv4 { } else if !foundIPv4 {

View File

@ -26,6 +26,7 @@ import (
v1 "k8s.io/api/core/v1" v1 "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
netutils "k8s.io/utils/net"
clientsetfake "k8s.io/client-go/kubernetes/fake" clientsetfake "k8s.io/client-go/kubernetes/fake"
@ -232,21 +233,21 @@ func Test_detectNodeIP(t *testing.T) {
nodeInfo: makeNodeWithAddresses("", "", ""), nodeInfo: makeNodeWithAddresses("", "", ""),
hostname: "fakeHost", hostname: "fakeHost",
bindAddress: "10.0.0.1", bindAddress: "10.0.0.1",
expectedIP: net.ParseIP("10.0.0.1"), expectedIP: netutils.ParseIPSloppy("10.0.0.1"),
}, },
{ {
name: "Bind address IPv6 unicast address and no Node object", name: "Bind address IPv6 unicast address and no Node object",
nodeInfo: makeNodeWithAddresses("", "", ""), nodeInfo: makeNodeWithAddresses("", "", ""),
hostname: "fakeHost", hostname: "fakeHost",
bindAddress: "fd00:4321::2", bindAddress: "fd00:4321::2",
expectedIP: net.ParseIP("fd00:4321::2"), expectedIP: netutils.ParseIPSloppy("fd00:4321::2"),
}, },
{ {
name: "No Valid IP found", name: "No Valid IP found",
nodeInfo: makeNodeWithAddresses("", "", ""), nodeInfo: makeNodeWithAddresses("", "", ""),
hostname: "fakeHost", hostname: "fakeHost",
bindAddress: "", bindAddress: "",
expectedIP: net.ParseIP("127.0.0.1"), expectedIP: netutils.ParseIPSloppy("127.0.0.1"),
}, },
// Disabled because the GetNodeIP method has a backoff retry mechanism // Disabled because the GetNodeIP method has a backoff retry mechanism
// and the test takes more than 30 seconds // and the test takes more than 30 seconds
@ -256,63 +257,63 @@ func Test_detectNodeIP(t *testing.T) {
// nodeInfo: makeNodeWithAddresses("", "", ""), // nodeInfo: makeNodeWithAddresses("", "", ""),
// hostname: "fakeHost", // hostname: "fakeHost",
// bindAddress: "0.0.0.0", // bindAddress: "0.0.0.0",
// expectedIP: net.ParseIP("127.0.0.1"), // expectedIP: net.IP{127,0,0,1),
// }, // },
{ {
name: "Bind address 0.0.0.0 and node with IPv4 InternalIP set", name: "Bind address 0.0.0.0 and node with IPv4 InternalIP set",
nodeInfo: makeNodeWithAddresses("fakeHost", "192.168.1.1", "90.90.90.90"), nodeInfo: makeNodeWithAddresses("fakeHost", "192.168.1.1", "90.90.90.90"),
hostname: "fakeHost", hostname: "fakeHost",
bindAddress: "0.0.0.0", bindAddress: "0.0.0.0",
expectedIP: net.ParseIP("192.168.1.1"), expectedIP: netutils.ParseIPSloppy("192.168.1.1"),
}, },
{ {
name: "Bind address :: and node with IPv4 InternalIP set", name: "Bind address :: and node with IPv4 InternalIP set",
nodeInfo: makeNodeWithAddresses("fakeHost", "192.168.1.1", "90.90.90.90"), nodeInfo: makeNodeWithAddresses("fakeHost", "192.168.1.1", "90.90.90.90"),
hostname: "fakeHost", hostname: "fakeHost",
bindAddress: "::", bindAddress: "::",
expectedIP: net.ParseIP("192.168.1.1"), expectedIP: netutils.ParseIPSloppy("192.168.1.1"),
}, },
{ {
name: "Bind address 0.0.0.0 and node with IPv6 InternalIP set", name: "Bind address 0.0.0.0 and node with IPv6 InternalIP set",
nodeInfo: makeNodeWithAddresses("fakeHost", "fd00:1234::1", "2001:db8::2"), nodeInfo: makeNodeWithAddresses("fakeHost", "fd00:1234::1", "2001:db8::2"),
hostname: "fakeHost", hostname: "fakeHost",
bindAddress: "0.0.0.0", bindAddress: "0.0.0.0",
expectedIP: net.ParseIP("fd00:1234::1"), expectedIP: netutils.ParseIPSloppy("fd00:1234::1"),
}, },
{ {
name: "Bind address :: and node with IPv6 InternalIP set", name: "Bind address :: and node with IPv6 InternalIP set",
nodeInfo: makeNodeWithAddresses("fakeHost", "fd00:1234::1", "2001:db8::2"), nodeInfo: makeNodeWithAddresses("fakeHost", "fd00:1234::1", "2001:db8::2"),
hostname: "fakeHost", hostname: "fakeHost",
bindAddress: "::", bindAddress: "::",
expectedIP: net.ParseIP("fd00:1234::1"), expectedIP: netutils.ParseIPSloppy("fd00:1234::1"),
}, },
{ {
name: "Bind address 0.0.0.0 and node with only IPv4 ExternalIP set", name: "Bind address 0.0.0.0 and node with only IPv4 ExternalIP set",
nodeInfo: makeNodeWithAddresses("fakeHost", "", "90.90.90.90"), nodeInfo: makeNodeWithAddresses("fakeHost", "", "90.90.90.90"),
hostname: "fakeHost", hostname: "fakeHost",
bindAddress: "0.0.0.0", bindAddress: "0.0.0.0",
expectedIP: net.ParseIP("90.90.90.90"), expectedIP: netutils.ParseIPSloppy("90.90.90.90"),
}, },
{ {
name: "Bind address :: and node with only IPv4 ExternalIP set", name: "Bind address :: and node with only IPv4 ExternalIP set",
nodeInfo: makeNodeWithAddresses("fakeHost", "", "90.90.90.90"), nodeInfo: makeNodeWithAddresses("fakeHost", "", "90.90.90.90"),
hostname: "fakeHost", hostname: "fakeHost",
bindAddress: "::", bindAddress: "::",
expectedIP: net.ParseIP("90.90.90.90"), expectedIP: netutils.ParseIPSloppy("90.90.90.90"),
}, },
{ {
name: "Bind address 0.0.0.0 and node with only IPv6 ExternalIP set", name: "Bind address 0.0.0.0 and node with only IPv6 ExternalIP set",
nodeInfo: makeNodeWithAddresses("fakeHost", "", "2001:db8::2"), nodeInfo: makeNodeWithAddresses("fakeHost", "", "2001:db8::2"),
hostname: "fakeHost", hostname: "fakeHost",
bindAddress: "0.0.0.0", bindAddress: "0.0.0.0",
expectedIP: net.ParseIP("2001:db8::2"), expectedIP: netutils.ParseIPSloppy("2001:db8::2"),
}, },
{ {
name: "Bind address :: and node with only IPv6 ExternalIP set", name: "Bind address :: and node with only IPv6 ExternalIP set",
nodeInfo: makeNodeWithAddresses("fakeHost", "", "2001:db8::2"), nodeInfo: makeNodeWithAddresses("fakeHost", "", "2001:db8::2"),
hostname: "fakeHost", hostname: "fakeHost",
bindAddress: "::", bindAddress: "::",
expectedIP: net.ParseIP("2001:db8::2"), expectedIP: netutils.ParseIPSloppy("2001:db8::2"),
}, },
} }
for _, c := range cases { for _, c := range cases {

View File

@ -23,7 +23,6 @@ package app
import ( import (
"errors" "errors"
"fmt" "fmt"
"net"
goruntime "runtime" goruntime "runtime"
// Enable pprof HTTP handlers. // Enable pprof HTTP handlers.
@ -45,6 +44,7 @@ import (
utilnetsh "k8s.io/kubernetes/pkg/util/netsh" utilnetsh "k8s.io/kubernetes/pkg/util/netsh"
utilnode "k8s.io/kubernetes/pkg/util/node" utilnode "k8s.io/kubernetes/pkg/util/node"
"k8s.io/utils/exec" "k8s.io/utils/exec"
netutils "k8s.io/utils/net"
) )
// NewProxyServer returns a new ProxyServer. // NewProxyServer returns a new ProxyServer.
@ -148,7 +148,7 @@ func newProxyServer(config *proxyconfigapi.KubeProxyConfiguration, cleanupAndExi
proxier, err = winuserspace.NewProxier( proxier, err = winuserspace.NewProxier(
winuserspace.NewLoadBalancerRR(), winuserspace.NewLoadBalancerRR(),
net.ParseIP(config.BindAddress), netutils.ParseIPSloppy(config.BindAddress),
netshInterface, netshInterface,
*utilnet.ParsePortRangeOrDie(config.PortRange), *utilnet.ParsePortRangeOrDie(config.PortRange),
// TODO @pires replace below with default values, if applicable // TODO @pires replace below with default values, if applicable

View File

@ -26,6 +26,7 @@ import (
apiserveroptions "k8s.io/apiserver/pkg/server/options" apiserveroptions "k8s.io/apiserver/pkg/server/options"
schedulerappconfig "k8s.io/kubernetes/cmd/kube-scheduler/app/config" schedulerappconfig "k8s.io/kubernetes/cmd/kube-scheduler/app/config"
kubeschedulerconfig "k8s.io/kubernetes/pkg/scheduler/apis/config" kubeschedulerconfig "k8s.io/kubernetes/pkg/scheduler/apis/config"
netutils "k8s.io/utils/net"
) )
// CombinedInsecureServingOptions sets up to two insecure listeners for healthz and metrics. The flags // CombinedInsecureServingOptions sets up to two insecure listeners for healthz and metrics. The flags
@ -78,11 +79,11 @@ func (o *CombinedInsecureServingOptions) ApplyTo(c *schedulerappconfig.Config, c
if o.Healthz != nil { if o.Healthz != nil {
o.Healthz.BindPort = o.BindPort o.Healthz.BindPort = o.BindPort
o.Healthz.BindAddress = net.ParseIP(o.BindAddress) o.Healthz.BindAddress = netutils.ParseIPSloppy(o.BindAddress)
} }
if o.Metrics != nil { if o.Metrics != nil {
o.Metrics.BindPort = o.BindPort o.Metrics.BindPort = o.BindPort
o.Metrics.BindAddress = net.ParseIP(o.BindAddress) o.Metrics.BindAddress = netutils.ParseIPSloppy(o.BindAddress)
} }
return o.applyTo(c, componentConfig) return o.applyTo(c, componentConfig)
@ -125,7 +126,7 @@ func updateDeprecatedInsecureServingOptionsFromAddress(is *apiserveroptions.Depr
} else { } else {
// In the previous `validate` process, we can ensure that the `addr` is legal, so ignore the error // In the previous `validate` process, we can ensure that the `addr` is legal, so ignore the error
host, portInt, _ := splitHostIntPort(addr) host, portInt, _ := splitHostIntPort(addr)
is.BindAddress = net.ParseIP(host) is.BindAddress = netutils.ParseIPSloppy(host)
is.BindPort = portInt is.BindPort = portInt
} }
} }
@ -142,7 +143,7 @@ func (o *CombinedInsecureServingOptions) Validate() []error {
errors = append(errors, fmt.Errorf("--port %v must be between 0 and 65535, inclusive. 0 for turning off insecure (HTTP) port", o.BindPort)) errors = append(errors, fmt.Errorf("--port %v must be between 0 and 65535, inclusive. 0 for turning off insecure (HTTP) port", o.BindPort))
} }
if len(o.BindAddress) > 0 && net.ParseIP(o.BindAddress) == nil { if len(o.BindAddress) > 0 && netutils.ParseIPSloppy(o.BindAddress) == nil {
errors = append(errors, fmt.Errorf("--address %v is an invalid IP address", o.BindAddress)) errors = append(errors, fmt.Errorf("--address %v is an invalid IP address", o.BindAddress))
} }

View File

@ -45,6 +45,7 @@ import (
kubeschedulerconfig "k8s.io/kubernetes/pkg/scheduler/apis/config" kubeschedulerconfig "k8s.io/kubernetes/pkg/scheduler/apis/config"
"k8s.io/kubernetes/pkg/scheduler/apis/config/latest" "k8s.io/kubernetes/pkg/scheduler/apis/config/latest"
"k8s.io/kubernetes/pkg/scheduler/apis/config/validation" "k8s.io/kubernetes/pkg/scheduler/apis/config/validation"
netutils "k8s.io/utils/net"
) )
// Options has all the params needed to run a Scheduler // Options has all the params needed to run a Scheduler
@ -286,7 +287,7 @@ func (o *Options) Validate() []error {
// Config return a scheduler config object // Config return a scheduler config object
func (o *Options) Config() (*schedulerappconfig.Config, error) { func (o *Options) Config() (*schedulerappconfig.Config, error) {
if o.SecureServing != nil { if o.SecureServing != nil {
if err := o.SecureServing.MaybeDefaultWithSelfSignedCerts("localhost", nil, []net.IP{net.ParseIP("127.0.0.1")}); err != nil { if err := o.SecureServing.MaybeDefaultWithSelfSignedCerts("localhost", nil, []net.IP{netutils.ParseIPSloppy("127.0.0.1")}); err != nil {
return nil, fmt.Errorf("error creating self-signed certificates: %v", err) return nil, fmt.Errorf("error creating self-signed certificates: %v", err)
} }
} }

View File

@ -20,6 +20,8 @@ import (
"net" "net"
"strconv" "strconv"
netutils "k8s.io/utils/net"
"github.com/pkg/errors" "github.com/pkg/errors"
) )
@ -29,7 +31,7 @@ func APIEndpointFromString(apiEndpoint string) (APIEndpoint, error) {
if err != nil { if err != nil {
return APIEndpoint{}, errors.Wrapf(err, "invalid advertise address endpoint: %s", apiEndpoint) return APIEndpoint{}, errors.Wrapf(err, "invalid advertise address endpoint: %s", apiEndpoint)
} }
if net.ParseIP(apiEndpointHost) == nil { if netutils.ParseIPSloppy(apiEndpointHost) == nil {
return APIEndpoint{}, errors.Errorf("invalid API endpoint IP: %s", apiEndpointHost) return APIEndpoint{}, errors.Errorf("invalid API endpoint IP: %s", apiEndpointHost)
} }
apiEndpointPort, err := net.LookupPort("tcp", apiEndpointPortStr) apiEndpointPort, err := net.LookupPort("tcp", apiEndpointPortStr)

View File

@ -34,7 +34,7 @@ import (
bootstrapapi "k8s.io/cluster-bootstrap/token/api" bootstrapapi "k8s.io/cluster-bootstrap/token/api"
bootstraputil "k8s.io/cluster-bootstrap/token/util" bootstraputil "k8s.io/cluster-bootstrap/token/util"
"k8s.io/klog/v2" "k8s.io/klog/v2"
utilnet "k8s.io/utils/net" netutils "k8s.io/utils/net"
bootstraptokenv1 "k8s.io/kubernetes/cmd/kubeadm/app/apis/bootstraptoken/v1" bootstraptokenv1 "k8s.io/kubernetes/cmd/kubeadm/app/apis/bootstraptoken/v1"
"k8s.io/kubernetes/cmd/kubeadm/app/apis/kubeadm" "k8s.io/kubernetes/cmd/kubeadm/app/apis/kubeadm"
@ -319,7 +319,7 @@ func ValidateCertSANs(altnames []string, fldPath *field.Path) field.ErrorList {
for _, altname := range altnames { for _, altname := range altnames {
if errs := validation.IsDNS1123Subdomain(altname); len(errs) != 0 { if errs := validation.IsDNS1123Subdomain(altname); len(errs) != 0 {
if errs2 := validation.IsWildcardDNS1123Subdomain(altname); len(errs2) != 0 { if errs2 := validation.IsWildcardDNS1123Subdomain(altname); len(errs2) != 0 {
if net.ParseIP(altname) == nil { if netutils.ParseIPSloppy(altname) == nil {
allErrs = append(allErrs, field.Invalid(fldPath, altname, fmt.Sprintf("altname is not a valid IP address, DNS label or a DNS label with subdomain wildcards: %s; %s", strings.Join(errs, "; "), strings.Join(errs2, "; ")))) allErrs = append(allErrs, field.Invalid(fldPath, altname, fmt.Sprintf("altname is not a valid IP address, DNS label or a DNS label with subdomain wildcards: %s; %s", strings.Join(errs, "; "), strings.Join(errs2, "; "))))
} }
} }
@ -350,7 +350,7 @@ func ValidateURLs(urls []string, requireHTTPS bool, fldPath *field.Path) field.E
// ValidateIPFromString validates ip address // ValidateIPFromString validates ip address
func ValidateIPFromString(ipaddr string, fldPath *field.Path) field.ErrorList { func ValidateIPFromString(ipaddr string, fldPath *field.Path) field.ErrorList {
allErrs := field.ErrorList{} allErrs := field.ErrorList{}
if net.ParseIP(ipaddr) == nil { if netutils.ParseIPSloppy(ipaddr) == nil {
allErrs = append(allErrs, field.Invalid(fldPath, ipaddr, "ip address is not valid")) allErrs = append(allErrs, field.Invalid(fldPath, ipaddr, "ip address is not valid"))
} }
return allErrs return allErrs
@ -377,7 +377,7 @@ func ValidateHostPort(endpoint string, fldPath *field.Path) field.ErrorList {
// ValidateIPNetFromString validates network portion of ip address // ValidateIPNetFromString validates network portion of ip address
func ValidateIPNetFromString(subnetStr string, minAddrs int64, isDualStack bool, fldPath *field.Path) field.ErrorList { func ValidateIPNetFromString(subnetStr string, minAddrs int64, isDualStack bool, fldPath *field.Path) field.ErrorList {
allErrs := field.ErrorList{} allErrs := field.ErrorList{}
subnets, err := utilnet.ParseCIDRs(strings.Split(subnetStr, ",")) subnets, err := netutils.ParseCIDRs(strings.Split(subnetStr, ","))
if err != nil { if err != nil {
allErrs = append(allErrs, field.Invalid(fldPath, subnetStr, "couldn't parse subnet")) allErrs = append(allErrs, field.Invalid(fldPath, subnetStr, "couldn't parse subnet"))
return allErrs return allErrs
@ -388,7 +388,7 @@ func ValidateIPNetFromString(subnetStr string, minAddrs int64, isDualStack bool,
allErrs = append(allErrs, field.Invalid(fldPath, subnetStr, "expected one (IPv4 or IPv6) CIDR or two CIDRs from each family for dual-stack networking")) allErrs = append(allErrs, field.Invalid(fldPath, subnetStr, "expected one (IPv4 or IPv6) CIDR or two CIDRs from each family for dual-stack networking"))
// if DualStack and there are 2 CIDRs validate if there is at least one of each IP family // if DualStack and there are 2 CIDRs validate if there is at least one of each IP family
case isDualStack && len(subnets) == 2: case isDualStack && len(subnets) == 2:
areDualStackCIDRs, err := utilnet.IsDualStackCIDRs(subnets) areDualStackCIDRs, err := netutils.IsDualStackCIDRs(subnets)
if err != nil { if err != nil {
allErrs = append(allErrs, field.Invalid(fldPath, subnetStr, err.Error())) allErrs = append(allErrs, field.Invalid(fldPath, subnetStr, err.Error()))
} else if !areDualStackCIDRs { } else if !areDualStackCIDRs {
@ -400,13 +400,13 @@ func ValidateIPNetFromString(subnetStr string, minAddrs int64, isDualStack bool,
} }
// validate the subnet/s // validate the subnet/s
for _, s := range subnets { for _, s := range subnets {
numAddresses := utilnet.RangeSize(s) numAddresses := netutils.RangeSize(s)
if numAddresses < minAddrs { if numAddresses < minAddrs {
allErrs = append(allErrs, field.Invalid(fldPath, s.String(), fmt.Sprintf("subnet with %d address(es) is too small, the minimum is %d", numAddresses, minAddrs))) allErrs = append(allErrs, field.Invalid(fldPath, s.String(), fmt.Sprintf("subnet with %d address(es) is too small, the minimum is %d", numAddresses, minAddrs)))
} }
// Warn when the subnet is in site-local range - i.e. contains addresses that belong to fec0::/10 // Warn when the subnet is in site-local range - i.e. contains addresses that belong to fec0::/10
_, siteLocalNet, _ := net.ParseCIDR("fec0::/10") _, siteLocalNet, _ := netutils.ParseCIDRSloppy("fec0::/10")
if siteLocalNet.Contains(s.IP) || s.Contains(siteLocalNet.IP) { if siteLocalNet.Contains(s.IP) || s.Contains(siteLocalNet.IP) {
klog.Warningf("the subnet %v contains IPv6 site-local addresses that belong to fec0::/10 which has been deprecated by rfc3879", s) klog.Warningf("the subnet %v contains IPv6 site-local addresses that belong to fec0::/10 which has been deprecated by rfc3879", s)
} }
@ -422,7 +422,7 @@ func ValidateIPNetFromString(subnetStr string, minAddrs int64, isDualStack bool,
func ValidateServiceSubnetSize(subnetStr string, fldPath *field.Path) field.ErrorList { func ValidateServiceSubnetSize(subnetStr string, fldPath *field.Path) field.ErrorList {
allErrs := field.ErrorList{} allErrs := field.ErrorList{}
// subnets were already validated // subnets were already validated
subnets, _ := utilnet.ParseCIDRs(strings.Split(subnetStr, ",")) subnets, _ := netutils.ParseCIDRs(strings.Split(subnetStr, ","))
for _, serviceSubnet := range subnets { for _, serviceSubnet := range subnets {
ones, bits := serviceSubnet.Mask.Size() ones, bits := serviceSubnet.Mask.Size()
if bits-ones > constants.MaximumBitsForServiceSubnet { if bits-ones > constants.MaximumBitsForServiceSubnet {
@ -437,13 +437,13 @@ func ValidateServiceSubnetSize(subnetStr string, fldPath *field.Path) field.Erro
func ValidatePodSubnetNodeMask(subnetStr string, c *kubeadm.ClusterConfiguration, fldPath *field.Path) field.ErrorList { func ValidatePodSubnetNodeMask(subnetStr string, c *kubeadm.ClusterConfiguration, fldPath *field.Path) field.ErrorList {
allErrs := field.ErrorList{} allErrs := field.ErrorList{}
// subnets were already validated // subnets were already validated
subnets, _ := utilnet.ParseCIDRs(strings.Split(subnetStr, ",")) subnets, _ := netutils.ParseCIDRs(strings.Split(subnetStr, ","))
for _, podSubnet := range subnets { for _, podSubnet := range subnets {
// obtain podSubnet mask // obtain podSubnet mask
mask := podSubnet.Mask mask := podSubnet.Mask
maskSize, _ := mask.Size() maskSize, _ := mask.Size()
// obtain node-cidr-mask // obtain node-cidr-mask
nodeMask, err := getClusterNodeMask(c, utilnet.IsIPv6(podSubnet.IP)) nodeMask, err := getClusterNodeMask(c, netutils.IsIPv6(podSubnet.IP))
if err != nil { if err != nil {
allErrs = append(allErrs, field.Invalid(fldPath, podSubnet.String(), err.Error())) allErrs = append(allErrs, field.Invalid(fldPath, podSubnet.String(), err.Error()))
continue continue

View File

@ -17,10 +17,9 @@ limitations under the License.
package componentconfigs package componentconfigs
import ( import (
"net"
clientset "k8s.io/client-go/kubernetes" clientset "k8s.io/client-go/kubernetes"
kubeproxyconfig "k8s.io/kube-proxy/config/v1alpha1" kubeproxyconfig "k8s.io/kube-proxy/config/v1alpha1"
netutils "k8s.io/utils/net"
kubeadmapi "k8s.io/kubernetes/cmd/kubeadm/app/apis/kubeadm" kubeadmapi "k8s.io/kubernetes/cmd/kubeadm/app/apis/kubeadm"
kubeadmapiv1 "k8s.io/kubernetes/cmd/kubeadm/app/apis/kubeadm/v1beta3" kubeadmapiv1 "k8s.io/kubernetes/cmd/kubeadm/app/apis/kubeadm/v1beta3"
@ -76,7 +75,7 @@ func (kp *kubeProxyConfig) Unmarshal(docmap kubeadmapi.DocumentMap) error {
} }
func kubeProxyDefaultBindAddress(localAdvertiseAddress string) string { func kubeProxyDefaultBindAddress(localAdvertiseAddress string) string {
ip := net.ParseIP(localAdvertiseAddress) ip := netutils.ParseIPSloppy(localAdvertiseAddress)
if ip.To4() != nil { if ip.To4() != nil {
return kubeadmapiv1.DefaultProxyBindAddressv4 return kubeadmapiv1.DefaultProxyBindAddressv4
} }

View File

@ -34,7 +34,7 @@ import (
apimachineryversion "k8s.io/apimachinery/pkg/version" apimachineryversion "k8s.io/apimachinery/pkg/version"
bootstrapapi "k8s.io/cluster-bootstrap/token/api" bootstrapapi "k8s.io/cluster-bootstrap/token/api"
componentversion "k8s.io/component-base/version" componentversion "k8s.io/component-base/version"
utilnet "k8s.io/utils/net" netutils "k8s.io/utils/net"
) )
const ( const (
@ -635,7 +635,7 @@ func GetDNSIP(svcSubnetList string, isDualStack bool) (net.IP, error) {
} }
// Selects the 10th IP in service subnet CIDR range as dnsIP // Selects the 10th IP in service subnet CIDR range as dnsIP
dnsIP, err := utilnet.GetIndexedIP(svcSubnetCIDR, 10) dnsIP, err := netutils.GetIndexedIP(svcSubnetCIDR, 10)
if err != nil { if err != nil {
return nil, errors.Wrap(err, "unable to get internal Kubernetes Service IP from the given service CIDR") return nil, errors.Wrap(err, "unable to get internal Kubernetes Service IP from the given service CIDR")
} }
@ -649,7 +649,7 @@ func GetKubernetesServiceCIDR(svcSubnetList string, isDualStack bool) (*net.IPNe
// The default service address family for the cluster is the address family of the first // The default service address family for the cluster is the address family of the first
// service cluster IP range configured via the `--service-cluster-ip-range` flag // service cluster IP range configured via the `--service-cluster-ip-range` flag
// of the kube-controller-manager and kube-apiserver. // of the kube-controller-manager and kube-apiserver.
svcSubnets, err := utilnet.ParseCIDRs(strings.Split(svcSubnetList, ",")) svcSubnets, err := netutils.ParseCIDRs(strings.Split(svcSubnetList, ","))
if err != nil { if err != nil {
return nil, errors.Wrapf(err, "unable to parse ServiceSubnet %v", svcSubnetList) return nil, errors.Wrapf(err, "unable to parse ServiceSubnet %v", svcSubnetList)
} }
@ -659,7 +659,7 @@ func GetKubernetesServiceCIDR(svcSubnetList string, isDualStack bool) (*net.IPNe
return svcSubnets[0], nil return svcSubnets[0], nil
} }
// internal IP address for the API server // internal IP address for the API server
_, svcSubnet, err := net.ParseCIDR(svcSubnetList) _, svcSubnet, err := netutils.ParseCIDRSloppy(svcSubnetList)
if err != nil { if err != nil {
return nil, errors.Wrapf(err, "unable to parse ServiceSubnet %v", svcSubnetList) return nil, errors.Wrapf(err, "unable to parse ServiceSubnet %v", svcSubnetList)
} }
@ -672,7 +672,7 @@ func GetAPIServerVirtualIP(svcSubnetList string, isDualStack bool) (net.IP, erro
if err != nil { if err != nil {
return nil, errors.Wrap(err, "unable to get internal Kubernetes Service IP from the given service CIDR") return nil, errors.Wrap(err, "unable to get internal Kubernetes Service IP from the given service CIDR")
} }
internalAPIServerVirtualIP, err := utilnet.GetIndexedIP(svcSubnet, 1) internalAPIServerVirtualIP, err := netutils.GetIndexedIP(svcSubnet, 1)
if err != nil { if err != nil {
return nil, errors.Wrapf(err, "unable to get the first IP address from the given CIDR: %s", svcSubnet.String()) return nil, errors.Wrapf(err, "unable to get the first IP address from the given CIDR: %s", svcSubnet.String())
} }

View File

@ -27,6 +27,7 @@ import (
"time" "time"
certutil "k8s.io/client-go/util/cert" certutil "k8s.io/client-go/util/cert"
netutils "k8s.io/utils/net"
kubeadmapi "k8s.io/kubernetes/cmd/kubeadm/app/apis/kubeadm" kubeadmapi "k8s.io/kubernetes/cmd/kubeadm/app/apis/kubeadm"
certtestutil "k8s.io/kubernetes/cmd/kubeadm/app/util/certs" certtestutil "k8s.io/kubernetes/cmd/kubeadm/app/util/certs"
@ -46,7 +47,7 @@ var (
CommonName: "test-common-name", CommonName: "test-common-name",
Organization: []string{"sig-cluster-lifecycle"}, Organization: []string{"sig-cluster-lifecycle"},
AltNames: certutil.AltNames{ AltNames: certutil.AltNames{
IPs: []net.IP{net.ParseIP("10.100.0.1")}, IPs: []net.IP{netutils.ParseIPSloppy("10.100.0.1")},
DNSNames: []string{"test-domain.space"}, DNSNames: []string{"test-domain.space"},
}, },
Usages: []x509.ExtKeyUsage{x509.ExtKeyUsageClientAuth}, Usages: []x509.ExtKeyUsage{x509.ExtKeyUsageClientAuth},
@ -234,7 +235,7 @@ func TestCertToConfig(t *testing.T) {
CommonName: "test-common-name", CommonName: "test-common-name",
Organization: []string{"sig-cluster-lifecycle"}, Organization: []string{"sig-cluster-lifecycle"},
AltNames: certutil.AltNames{ AltNames: certutil.AltNames{
IPs: []net.IP{net.ParseIP("10.100.0.1")}, IPs: []net.IP{netutils.ParseIPSloppy("10.100.0.1")},
DNSNames: []string{"test-domain.space"}, DNSNames: []string{"test-domain.space"},
}, },
Usages: []x509.ExtKeyUsage{x509.ExtKeyUsageClientAuth}, Usages: []x509.ExtKeyUsage{x509.ExtKeyUsageClientAuth},
@ -247,7 +248,7 @@ func TestCertToConfig(t *testing.T) {
}, },
ExtKeyUsage: []x509.ExtKeyUsage{x509.ExtKeyUsageClientAuth}, ExtKeyUsage: []x509.ExtKeyUsage{x509.ExtKeyUsageClientAuth},
DNSNames: []string{"test-domain.space"}, DNSNames: []string{"test-domain.space"},
IPAddresses: []net.IP{net.ParseIP("10.100.0.1")}, IPAddresses: []net.IP{netutils.ParseIPSloppy("10.100.0.1")},
} }
cfg := certToConfig(cert) cfg := certToConfig(cert)

View File

@ -27,6 +27,7 @@ import (
"k8s.io/client-go/tools/clientcmd" "k8s.io/client-go/tools/clientcmd"
certutil "k8s.io/client-go/util/cert" certutil "k8s.io/client-go/util/cert"
"k8s.io/client-go/util/keyutil" "k8s.io/client-go/util/keyutil"
netutils "k8s.io/utils/net"
kubeadmconstants "k8s.io/kubernetes/cmd/kubeadm/app/constants" kubeadmconstants "k8s.io/kubernetes/cmd/kubeadm/app/constants"
kubeconfigutil "k8s.io/kubernetes/cmd/kubeadm/app/util/kubeconfig" kubeconfigutil "k8s.io/kubernetes/cmd/kubeadm/app/util/kubeconfig"
@ -161,7 +162,7 @@ func writeTestKubeconfig(t *testing.T, dir, name string, caCert *x509.Certificat
Organization: []string{"sig-cluster-lifecycle"}, Organization: []string{"sig-cluster-lifecycle"},
Usages: []x509.ExtKeyUsage{x509.ExtKeyUsageClientAuth}, Usages: []x509.ExtKeyUsage{x509.ExtKeyUsageClientAuth},
AltNames: certutil.AltNames{ AltNames: certutil.AltNames{
IPs: []net.IP{net.ParseIP("10.100.0.1")}, IPs: []net.IP{netutils.ParseIPSloppy("10.100.0.1")},
DNSNames: []string{"test-domain.space"}, DNSNames: []string{"test-domain.space"},
}, },
}, },

View File

@ -46,7 +46,7 @@ import (
"k8s.io/klog/v2" "k8s.io/klog/v2"
system "k8s.io/system-validators/validators" system "k8s.io/system-validators/validators"
utilsexec "k8s.io/utils/exec" utilsexec "k8s.io/utils/exec"
utilsnet "k8s.io/utils/net" netutils "k8s.io/utils/net"
kubeadmapi "k8s.io/kubernetes/cmd/kubeadm/app/apis/kubeadm" kubeadmapi "k8s.io/kubernetes/cmd/kubeadm/app/apis/kubeadm"
"k8s.io/kubernetes/cmd/kubeadm/app/constants" "k8s.io/kubernetes/cmd/kubeadm/app/constants"
@ -432,7 +432,7 @@ func (hst HTTPProxyCheck) Name() string {
func (hst HTTPProxyCheck) Check() (warnings, errorList []error) { func (hst HTTPProxyCheck) Check() (warnings, errorList []error) {
klog.V(1).Infoln("validating if the connectivity type is via proxy or direct") klog.V(1).Infoln("validating if the connectivity type is via proxy or direct")
u := &url.URL{Scheme: hst.Proto, Host: hst.Host} u := &url.URL{Scheme: hst.Proto, Host: hst.Host}
if utilsnet.IsIPv6String(hst.Host) { if netutils.IsIPv6String(hst.Host) {
u.Host = net.JoinHostPort(hst.Host, "1234") u.Host = net.JoinHostPort(hst.Host, "1234")
} }
@ -474,12 +474,12 @@ func (subnet HTTPProxyCIDRCheck) Check() (warnings, errorList []error) {
return nil, nil return nil, nil
} }
_, cidr, err := net.ParseCIDR(subnet.CIDR) _, cidr, err := netutils.ParseCIDRSloppy(subnet.CIDR)
if err != nil { if err != nil {
return nil, []error{errors.Wrapf(err, "error parsing CIDR %q", subnet.CIDR)} return nil, []error{errors.Wrapf(err, "error parsing CIDR %q", subnet.CIDR)}
} }
testIP, err := utilsnet.GetIndexedIP(cidr, 1) testIP, err := netutils.GetIndexedIP(cidr, 1)
if err != nil { if err != nil {
return nil, []error{errors.Wrapf(err, "unable to get first IP address from the given CIDR (%s)", cidr.String())} return nil, []error{errors.Wrapf(err, "unable to get first IP address from the given CIDR (%s)", cidr.String())}
} }
@ -941,8 +941,8 @@ func RunInitNodeChecks(execer utilsexec.Interface, cfg *kubeadmapi.InitConfigura
checks = addCommonChecks(execer, cfg.KubernetesVersion, &cfg.NodeRegistration, checks) checks = addCommonChecks(execer, cfg.KubernetesVersion, &cfg.NodeRegistration, checks)
// Check if Bridge-netfilter and IPv6 relevant flags are set // Check if Bridge-netfilter and IPv6 relevant flags are set
if ip := net.ParseIP(cfg.LocalAPIEndpoint.AdvertiseAddress); ip != nil { if ip := netutils.ParseIPSloppy(cfg.LocalAPIEndpoint.AdvertiseAddress); ip != nil {
if utilsnet.IsIPv6(ip) { if netutils.IsIPv6(ip) {
checks = append(checks, checks = append(checks,
FileContentCheck{Path: bridgenf6, Content: []byte{'1'}}, FileContentCheck{Path: bridgenf6, Content: []byte{'1'}},
FileContentCheck{Path: ipv6DefaultForwarding, Content: []byte{'1'}}, FileContentCheck{Path: ipv6DefaultForwarding, Content: []byte{'1'}},
@ -1006,8 +1006,8 @@ func RunJoinNodeChecks(execer utilsexec.Interface, cfg *kubeadmapi.JoinConfigura
checks = append(checks, checks = append(checks,
HTTPProxyCheck{Proto: "https", Host: ipstr}, HTTPProxyCheck{Proto: "https", Host: ipstr},
) )
if ip := net.ParseIP(ipstr); ip != nil { if ip := netutils.ParseIPSloppy(ipstr); ip != nil {
if utilsnet.IsIPv6(ip) { if netutils.IsIPv6(ip) {
addIPv6Checks = true addIPv6Checks = true
} }
} }

View File

@ -17,18 +17,17 @@ limitations under the License.
package apiclient package apiclient
import ( import (
"net"
"strings" "strings"
"github.com/pkg/errors" "github.com/pkg/errors"
"k8s.io/api/core/v1" v1 "k8s.io/api/core/v1"
apierrors "k8s.io/apimachinery/pkg/api/errors" apierrors "k8s.io/apimachinery/pkg/api/errors"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/runtime" "k8s.io/apimachinery/pkg/runtime"
"k8s.io/apimachinery/pkg/util/intstr" "k8s.io/apimachinery/pkg/util/intstr"
core "k8s.io/client-go/testing" core "k8s.io/client-go/testing"
utilnet "k8s.io/utils/net" netutils "k8s.io/utils/net"
"k8s.io/kubernetes/cmd/kubeadm/app/constants" "k8s.io/kubernetes/cmd/kubeadm/app/constants"
) )
@ -88,12 +87,12 @@ func (idr *InitDryRunGetter) handleKubernetesService(action core.GetAction) (boo
return false, nil, nil return false, nil, nil
} }
_, svcSubnet, err := net.ParseCIDR(idr.serviceSubnet) _, svcSubnet, err := netutils.ParseCIDRSloppy(idr.serviceSubnet)
if err != nil { if err != nil {
return true, nil, errors.Wrapf(err, "error parsing CIDR %q", idr.serviceSubnet) return true, nil, errors.Wrapf(err, "error parsing CIDR %q", idr.serviceSubnet)
} }
internalAPIServerVirtualIP, err := utilnet.GetIndexedIP(svcSubnet, 1) internalAPIServerVirtualIP, err := netutils.GetIndexedIP(svcSubnet, 1)
if err != nil { if err != nil {
return true, nil, errors.Wrapf(err, "unable to get first IP address from the given CIDR (%s)", svcSubnet.String()) return true, nil, errors.Wrapf(err, "unable to get first IP address from the given CIDR (%s)", svcSubnet.String())
} }

View File

@ -31,6 +31,7 @@ import (
apimachineryversion "k8s.io/apimachinery/pkg/version" apimachineryversion "k8s.io/apimachinery/pkg/version"
componentversion "k8s.io/component-base/version" componentversion "k8s.io/component-base/version"
"k8s.io/klog/v2" "k8s.io/klog/v2"
netutils "k8s.io/utils/net"
kubeadmapi "k8s.io/kubernetes/cmd/kubeadm/app/apis/kubeadm" kubeadmapi "k8s.io/kubernetes/cmd/kubeadm/app/apis/kubeadm"
kubeadmscheme "k8s.io/kubernetes/cmd/kubeadm/app/apis/kubeadm/scheme" kubeadmscheme "k8s.io/kubernetes/cmd/kubeadm/app/apis/kubeadm/scheme"
@ -139,7 +140,7 @@ func LowercaseSANs(sans []string) {
// VerifyAPIServerBindAddress can be used to verify if a bind address for the API Server is 0.0.0.0, // VerifyAPIServerBindAddress can be used to verify if a bind address for the API Server is 0.0.0.0,
// in which case this address is not valid and should not be used. // in which case this address is not valid and should not be used.
func VerifyAPIServerBindAddress(address string) error { func VerifyAPIServerBindAddress(address string) error {
ip := net.ParseIP(address) ip := netutils.ParseIPSloppy(address)
if ip == nil { if ip == nil {
return errors.Errorf("cannot parse IP address: %s", address) return errors.Errorf("cannot parse IP address: %s", address)
} }
@ -164,7 +165,7 @@ func ChooseAPIServerBindAddress(bindAddress net.IP) (net.IP, error) {
if err != nil { if err != nil {
if netutil.IsNoRoutesError(err) { if netutil.IsNoRoutesError(err) {
klog.Warningf("WARNING: could not obtain a bind address for the API Server: %v; using: %s", err, constants.DefaultAPIServerBindAddress) klog.Warningf("WARNING: could not obtain a bind address for the API Server: %v; using: %s", err, constants.DefaultAPIServerBindAddress)
defaultIP := net.ParseIP(constants.DefaultAPIServerBindAddress) defaultIP := netutils.ParseIPSloppy(constants.DefaultAPIServerBindAddress)
if defaultIP == nil { if defaultIP == nil {
return nil, errors.Errorf("cannot parse default IP address: %s", constants.DefaultAPIServerBindAddress) return nil, errors.Errorf("cannot parse default IP address: %s", constants.DefaultAPIServerBindAddress)
} }

View File

@ -31,6 +31,7 @@ import (
netutil "k8s.io/apimachinery/pkg/util/net" netutil "k8s.io/apimachinery/pkg/util/net"
bootstraputil "k8s.io/cluster-bootstrap/token/util" bootstraputil "k8s.io/cluster-bootstrap/token/util"
"k8s.io/klog/v2" "k8s.io/klog/v2"
netutils "k8s.io/utils/net"
bootstraptokenv1 "k8s.io/kubernetes/cmd/kubeadm/app/apis/bootstraptoken/v1" bootstraptokenv1 "k8s.io/kubernetes/cmd/kubeadm/app/apis/bootstraptoken/v1"
kubeadmapi "k8s.io/kubernetes/cmd/kubeadm/app/apis/kubeadm" kubeadmapi "k8s.io/kubernetes/cmd/kubeadm/app/apis/kubeadm"
@ -122,7 +123,7 @@ func SetNodeRegistrationDynamicDefaults(cfg *kubeadmapi.NodeRegistrationOptions,
// SetAPIEndpointDynamicDefaults checks and sets configuration values for the APIEndpoint object // SetAPIEndpointDynamicDefaults checks and sets configuration values for the APIEndpoint object
func SetAPIEndpointDynamicDefaults(cfg *kubeadmapi.APIEndpoint) error { func SetAPIEndpointDynamicDefaults(cfg *kubeadmapi.APIEndpoint) error {
// validate cfg.API.AdvertiseAddress. // validate cfg.API.AdvertiseAddress.
addressIP := net.ParseIP(cfg.AdvertiseAddress) addressIP := netutils.ParseIPSloppy(cfg.AdvertiseAddress)
if addressIP == nil && cfg.AdvertiseAddress != "" { if addressIP == nil && cfg.AdvertiseAddress != "" {
return errors.Errorf("couldn't use \"%s\" as \"apiserver-advertise-address\", must be ipv4 or ipv6 address", cfg.AdvertiseAddress) return errors.Errorf("couldn't use \"%s\" as \"apiserver-advertise-address\", must be ipv4 or ipv6 address", cfg.AdvertiseAddress)
} }

View File

@ -25,7 +25,7 @@ import (
"github.com/pkg/errors" "github.com/pkg/errors"
"k8s.io/apimachinery/pkg/util/validation" "k8s.io/apimachinery/pkg/util/validation"
utilsnet "k8s.io/utils/net" netutils "k8s.io/utils/net"
kubeadmapi "k8s.io/kubernetes/cmd/kubeadm/app/apis/kubeadm" kubeadmapi "k8s.io/kubernetes/cmd/kubeadm/app/apis/kubeadm"
) )
@ -100,7 +100,7 @@ func ParseHostPort(hostport string) (string, string, error) {
} }
// if host is a valid IP, returns it // if host is a valid IP, returns it
if ip := net.ParseIP(host); ip != nil { if ip := netutils.ParseIPSloppy(host); ip != nil {
return host, port, nil return host, port, nil
} }
@ -115,7 +115,7 @@ func ParseHostPort(hostport string) (string, string, error) {
// ParsePort parses a string representing a TCP port. // ParsePort parses a string representing a TCP port.
// If the string is not a valid representation of a TCP port, ParsePort returns an error. // If the string is not a valid representation of a TCP port, ParsePort returns an error.
func ParsePort(port string) (int, error) { func ParsePort(port string) (int, error) {
portInt, err := utilsnet.ParsePort(port, true) portInt, err := netutils.ParsePort(port, true)
if err == nil && (1 <= portInt && portInt <= 65535) { if err == nil && (1 <= portInt && portInt <= 65535) {
return portInt, nil return portInt, nil
} }
@ -133,7 +133,7 @@ func parseAPIEndpoint(localEndpoint *kubeadmapi.APIEndpoint) (net.IP, string, er
} }
// parse the AdvertiseAddress // parse the AdvertiseAddress
var ip = net.ParseIP(localEndpoint.AdvertiseAddress) var ip = netutils.ParseIPSloppy(localEndpoint.AdvertiseAddress)
if ip == nil { if ip == nil {
return nil, "", errors.Errorf("invalid value `%s` given for api.advertiseAddress", localEndpoint.AdvertiseAddress) return nil, "", errors.Errorf("invalid value `%s` given for api.advertiseAddress", localEndpoint.AdvertiseAddress)
} }

View File

@ -41,6 +41,7 @@ import (
"k8s.io/apimachinery/pkg/util/validation" "k8s.io/apimachinery/pkg/util/validation"
certutil "k8s.io/client-go/util/cert" certutil "k8s.io/client-go/util/cert"
"k8s.io/client-go/util/keyutil" "k8s.io/client-go/util/keyutil"
netutils "k8s.io/utils/net"
kubeadmapi "k8s.io/kubernetes/cmd/kubeadm/app/apis/kubeadm" kubeadmapi "k8s.io/kubernetes/cmd/kubeadm/app/apis/kubeadm"
kubeadmconstants "k8s.io/kubernetes/cmd/kubeadm/app/constants" kubeadmconstants "k8s.io/kubernetes/cmd/kubeadm/app/constants"
@ -417,7 +418,7 @@ func pathForCSR(pkiPath, name string) string {
// GetAPIServerAltNames builds an AltNames object for to be used when generating apiserver certificate // GetAPIServerAltNames builds an AltNames object for to be used when generating apiserver certificate
func GetAPIServerAltNames(cfg *kubeadmapi.InitConfiguration) (*certutil.AltNames, error) { func GetAPIServerAltNames(cfg *kubeadmapi.InitConfiguration) (*certutil.AltNames, error) {
// advertise address // advertise address
advertiseAddress := net.ParseIP(cfg.LocalAPIEndpoint.AdvertiseAddress) advertiseAddress := netutils.ParseIPSloppy(cfg.LocalAPIEndpoint.AdvertiseAddress)
if advertiseAddress == nil { if advertiseAddress == nil {
return nil, errors.Errorf("error parsing LocalAPIEndpoint AdvertiseAddress %v: is not a valid textual representation of an IP address", return nil, errors.Errorf("error parsing LocalAPIEndpoint AdvertiseAddress %v: is not a valid textual representation of an IP address",
cfg.LocalAPIEndpoint.AdvertiseAddress) cfg.LocalAPIEndpoint.AdvertiseAddress)
@ -446,7 +447,7 @@ func GetAPIServerAltNames(cfg *kubeadmapi.InitConfiguration) (*certutil.AltNames
// add cluster controlPlaneEndpoint if present (dns or ip) // add cluster controlPlaneEndpoint if present (dns or ip)
if len(cfg.ControlPlaneEndpoint) > 0 { if len(cfg.ControlPlaneEndpoint) > 0 {
if host, _, err := kubeadmutil.ParseHostPort(cfg.ControlPlaneEndpoint); err == nil { if host, _, err := kubeadmutil.ParseHostPort(cfg.ControlPlaneEndpoint); err == nil {
if ip := net.ParseIP(host); ip != nil { if ip := netutils.ParseIPSloppy(host); ip != nil {
altNames.IPs = append(altNames.IPs, ip) altNames.IPs = append(altNames.IPs, ip)
} else { } else {
altNames.DNSNames = append(altNames.DNSNames, host) altNames.DNSNames = append(altNames.DNSNames, host)
@ -478,7 +479,7 @@ func GetEtcdPeerAltNames(cfg *kubeadmapi.InitConfiguration) (*certutil.AltNames,
// getAltNames builds an AltNames object with the cfg and certName. // getAltNames builds an AltNames object with the cfg and certName.
func getAltNames(cfg *kubeadmapi.InitConfiguration, certName string) (*certutil.AltNames, error) { func getAltNames(cfg *kubeadmapi.InitConfiguration, certName string) (*certutil.AltNames, error) {
// advertise address // advertise address
advertiseAddress := net.ParseIP(cfg.LocalAPIEndpoint.AdvertiseAddress) advertiseAddress := netutils.ParseIPSloppy(cfg.LocalAPIEndpoint.AdvertiseAddress)
if advertiseAddress == nil { if advertiseAddress == nil {
return nil, errors.Errorf("error parsing LocalAPIEndpoint AdvertiseAddress %v: is not a valid textual representation of an IP address", return nil, errors.Errorf("error parsing LocalAPIEndpoint AdvertiseAddress %v: is not a valid textual representation of an IP address",
cfg.LocalAPIEndpoint.AdvertiseAddress) cfg.LocalAPIEndpoint.AdvertiseAddress)
@ -508,7 +509,7 @@ func getAltNames(cfg *kubeadmapi.InitConfiguration, certName string) (*certutil.
// certNames is used to print user facing warnings and should be the name of the cert the altNames will be used for // certNames is used to print user facing warnings and should be the name of the cert the altNames will be used for
func appendSANsToAltNames(altNames *certutil.AltNames, SANs []string, certName string) { func appendSANsToAltNames(altNames *certutil.AltNames, SANs []string, certName string) {
for _, altname := range SANs { for _, altname := range SANs {
if ip := net.ParseIP(altname); ip != nil { if ip := netutils.ParseIPSloppy(altname); ip != nil {
altNames.IPs = append(altNames.IPs, ip) altNames.IPs = append(altNames.IPs, ip)
} else if len(validation.IsDNS1123Subdomain(altname)) == 0 { } else if len(validation.IsDNS1123Subdomain(altname)) == 0 {
altNames.DNSNames = append(altNames.DNSNames, altname) altNames.DNSNames = append(altNames.DNSNames, altname)

View File

@ -30,6 +30,7 @@ import (
"testing" "testing"
certutil "k8s.io/client-go/util/cert" certutil "k8s.io/client-go/util/cert"
netutils "k8s.io/utils/net"
kubeadmapi "k8s.io/kubernetes/cmd/kubeadm/app/apis/kubeadm" kubeadmapi "k8s.io/kubernetes/cmd/kubeadm/app/apis/kubeadm"
) )
@ -633,7 +634,7 @@ func TestGetAPIServerAltNames(t *testing.T) {
for _, IPAddress := range rt.expectedIPAddresses { for _, IPAddress := range rt.expectedIPAddresses {
found := false found := false
for _, val := range altNames.IPs { for _, val := range altNames.IPs {
if val.Equal(net.ParseIP(IPAddress)) { if val.Equal(netutils.ParseIPSloppy(IPAddress)) {
found = true found = true
break break
} }
@ -698,7 +699,7 @@ func TestGetEtcdAltNames(t *testing.T) {
t.Run(IPAddress, func(t *testing.T) { t.Run(IPAddress, func(t *testing.T) {
found := false found := false
for _, val := range altNames.IPs { for _, val := range altNames.IPs {
if val.Equal(net.ParseIP(IPAddress)) { if val.Equal(netutils.ParseIPSloppy(IPAddress)) {
found = true found = true
break break
} }
@ -757,7 +758,7 @@ func TestGetEtcdPeerAltNames(t *testing.T) {
for _, IPAddress := range expectedIPAddresses { for _, IPAddress := range expectedIPAddresses {
found := false found := false
for _, val := range altNames.IPs { for _, val := range altNames.IPs {
if val.Equal(net.ParseIP(IPAddress)) { if val.Equal(netutils.ParseIPSloppy(IPAddress)) {
found = true found = true
break break
} }

View File

@ -103,7 +103,7 @@ import (
"k8s.io/kubernetes/pkg/volume/util/hostutil" "k8s.io/kubernetes/pkg/volume/util/hostutil"
"k8s.io/kubernetes/pkg/volume/util/subpath" "k8s.io/kubernetes/pkg/volume/util/subpath"
"k8s.io/utils/exec" "k8s.io/utils/exec"
utilnet "k8s.io/utils/net" netutils "k8s.io/utils/net"
) )
const ( const (
@ -1122,7 +1122,7 @@ func RunKubelet(kubeServer *options.KubeletServer, kubeDeps *kubelet.Dependencie
var nodeIPs []net.IP var nodeIPs []net.IP
if kubeServer.NodeIP != "" { if kubeServer.NodeIP != "" {
for _, ip := range strings.Split(kubeServer.NodeIP, ",") { for _, ip := range strings.Split(kubeServer.NodeIP, ",") {
parsedNodeIP := net.ParseIP(strings.TrimSpace(ip)) parsedNodeIP := netutils.ParseIPSloppy(strings.TrimSpace(ip))
if parsedNodeIP == nil { if parsedNodeIP == nil {
klog.InfoS("Could not parse --node-ip ignoring", "IP", ip) klog.InfoS("Could not parse --node-ip ignoring", "IP", ip)
} else { } else {
@ -1132,7 +1132,7 @@ func RunKubelet(kubeServer *options.KubeletServer, kubeDeps *kubelet.Dependencie
} }
if !utilfeature.DefaultFeatureGate.Enabled(features.IPv6DualStack) && len(nodeIPs) > 1 { if !utilfeature.DefaultFeatureGate.Enabled(features.IPv6DualStack) && len(nodeIPs) > 1 {
return fmt.Errorf("dual-stack --node-ip %q not supported in a single-stack cluster", kubeServer.NodeIP) return fmt.Errorf("dual-stack --node-ip %q not supported in a single-stack cluster", kubeServer.NodeIP)
} else if len(nodeIPs) > 2 || (len(nodeIPs) == 2 && utilnet.IsIPv6(nodeIPs[0]) == utilnet.IsIPv6(nodeIPs[1])) { } else if len(nodeIPs) > 2 || (len(nodeIPs) == 2 && netutils.IsIPv6(nodeIPs[0]) == netutils.IsIPv6(nodeIPs[1])) {
return fmt.Errorf("bad --node-ip %q; must contain either a single IP or a dual-stack pair of IPs", kubeServer.NodeIP) return fmt.Errorf("bad --node-ip %q; must contain either a single IP or a dual-stack pair of IPs", kubeServer.NodeIP)
} else if len(nodeIPs) == 2 && kubeServer.CloudProvider != "" { } else if len(nodeIPs) == 2 && kubeServer.CloudProvider != "" {
return fmt.Errorf("dual-stack --node-ip %q not supported when using a cloud provider", kubeServer.NodeIP) return fmt.Errorf("dual-stack --node-ip %q not supported when using a cloud provider", kubeServer.NodeIP)
@ -1224,7 +1224,7 @@ func startKubelet(k kubelet.Bootstrap, podCfg *config.PodConfig, kubeCfg *kubele
go k.ListenAndServe(kubeCfg, kubeDeps.TLSOptions, kubeDeps.Auth) go k.ListenAndServe(kubeCfg, kubeDeps.TLSOptions, kubeDeps.Auth)
} }
if kubeCfg.ReadOnlyPort > 0 { if kubeCfg.ReadOnlyPort > 0 {
go k.ListenAndServeReadOnly(net.ParseIP(kubeCfg.Address), uint(kubeCfg.ReadOnlyPort)) go k.ListenAndServeReadOnly(netutils.ParseIPSloppy(kubeCfg.Address), uint(kubeCfg.ReadOnlyPort))
} }
if utilfeature.DefaultFeatureGate.Enabled(features.KubeletPodResources) { if utilfeature.DefaultFeatureGate.Enabled(features.KubeletPodResources) {
go k.ListenAndServePodResources() go k.ListenAndServePodResources()

View File

@ -3,6 +3,7 @@ module k8s.io/kubernetes/hack/tools
go 1.16 go 1.16
require ( require (
github.com/aojea/sloppy-netparser v0.0.0-20210819225411-1b3bd8b3b975
github.com/cespare/prettybench v0.0.0-20150116022406-03b8cfe5406c github.com/cespare/prettybench v0.0.0-20150116022406-03b8cfe5406c
github.com/client9/misspell v0.3.4 github.com/client9/misspell v0.3.4
github.com/golangci/golangci-lint v1.41.1 github.com/golangci/golangci-lint v1.41.1

View File

@ -66,6 +66,8 @@ github.com/andybalholm/brotli v1.0.0/go.mod h1:loMXtMfwqflxFJPmdbJO0a3KNoPuLBgiu
github.com/anmitsu/go-shlex v0.0.0-20161002113705-648efa622239 h1:kFOfPq6dUM1hTo4JG6LR5AXSUEsOjtdm0kw0FtQtMJA= github.com/anmitsu/go-shlex v0.0.0-20161002113705-648efa622239 h1:kFOfPq6dUM1hTo4JG6LR5AXSUEsOjtdm0kw0FtQtMJA=
github.com/anmitsu/go-shlex v0.0.0-20161002113705-648efa622239/go.mod h1:2FmKhYUyUczH0OGQWaF5ceTx0UBShxjsH6f8oGKYe2c= github.com/anmitsu/go-shlex v0.0.0-20161002113705-648efa622239/go.mod h1:2FmKhYUyUczH0OGQWaF5ceTx0UBShxjsH6f8oGKYe2c=
github.com/antihax/optional v0.0.0-20180407024304-ca021399b1a6/go.mod h1:V8iCPQYkqmusNa815XgQio277wI47sdRh1dUOLdyC6Q= github.com/antihax/optional v0.0.0-20180407024304-ca021399b1a6/go.mod h1:V8iCPQYkqmusNa815XgQio277wI47sdRh1dUOLdyC6Q=
github.com/aojea/sloppy-netparser v0.0.0-20210819225411-1b3bd8b3b975 h1:3bpBhtHNVCpJiyO1r7w0BjGhQPPk2eD1ZsVAVS5vmiE=
github.com/aojea/sloppy-netparser v0.0.0-20210819225411-1b3bd8b3b975/go.mod h1:VP81Qd6FKAazakPswOou8ULXGU/j5QH0VcGPzehHx3s=
github.com/aokoli/goutils v1.0.1/go.mod h1:SijmP0QR8LtwsmDs8Yii5Z/S4trXFGFC2oO5g9DP+DQ= github.com/aokoli/goutils v1.0.1/go.mod h1:SijmP0QR8LtwsmDs8Yii5Z/S4trXFGFC2oO5g9DP+DQ=
github.com/armon/circbuf v0.0.0-20150827004946-bbbad097214e/go.mod h1:3U/XgcO3hCbHZ8TKRvWD2dDTCfh9M9ya+I9JpbB7O8o= github.com/armon/circbuf v0.0.0-20150827004946-bbbad097214e/go.mod h1:3U/XgcO3hCbHZ8TKRvWD2dDTCfh9M9ya+I9JpbB7O8o=
github.com/armon/consul-api v0.0.0-20180202201655-eb2c6b5be1b6/go.mod h1:grANhF5doyWs3UAsr3K4I6qtAmlQcZDesFNEHPZAzj8= github.com/armon/consul-api v0.0.0-20180202201655-eb2c6b5be1b6/go.mod h1:grANhF5doyWs3UAsr3K4I6qtAmlQcZDesFNEHPZAzj8=
@ -187,6 +189,7 @@ github.com/go-kit/kit v0.8.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2
github.com/go-kit/kit v0.9.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as= github.com/go-kit/kit v0.9.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as=
github.com/go-logfmt/logfmt v0.3.0/go.mod h1:Qt1PoO58o5twSAckw1HlFXLmHsOX5/0LbT9GBnD5lWE= github.com/go-logfmt/logfmt v0.3.0/go.mod h1:Qt1PoO58o5twSAckw1HlFXLmHsOX5/0LbT9GBnD5lWE=
github.com/go-logfmt/logfmt v0.4.0/go.mod h1:3RMwSq7FuexP4Kalkev3ejPJsZTpXXBr9+V4qmtdjCk= github.com/go-logfmt/logfmt v0.4.0/go.mod h1:3RMwSq7FuexP4Kalkev3ejPJsZTpXXBr9+V4qmtdjCk=
github.com/go-logr/logr v0.1.0/go.mod h1:ixOQHD9gLJUVQQ2ZOR7zLEifBX6tGkNJF4QyIY7sIas=
github.com/go-ole/go-ole v1.2.4/go.mod h1:XCwSNxSkXRo4vlyPy93sltvi/qJq0jqQhjqQNIwKuxM= github.com/go-ole/go-ole v1.2.4/go.mod h1:XCwSNxSkXRo4vlyPy93sltvi/qJq0jqQhjqQNIwKuxM=
github.com/go-redis/redis v6.15.8+incompatible/go.mod h1:NAIEuMOZ/fxfXJIrKDQDz8wamY7mA7PouImQ2Jvg6kA= github.com/go-redis/redis v6.15.8+incompatible/go.mod h1:NAIEuMOZ/fxfXJIrKDQDz8wamY7mA7PouImQ2Jvg6kA=
github.com/go-sql-driver/mysql v1.4.0/go.mod h1:zAC/RDZ24gD3HViQzih4MyKcchzm+sOG5ZlKdlhCg5w= github.com/go-sql-driver/mysql v1.4.0/go.mod h1:zAC/RDZ24gD3HViQzih4MyKcchzm+sOG5ZlKdlhCg5w=
@ -637,8 +640,9 @@ github.com/sonatard/noctx v0.0.1/go.mod h1:9D2D/EoULe8Yy2joDHJj7bv3sZoq9AaSb8B4l
github.com/sourcegraph/go-diff v0.6.1 h1:hmA1LzxW0n1c3Q4YbrFgg4P99GSnebYa3x8gr0HZqLQ= github.com/sourcegraph/go-diff v0.6.1 h1:hmA1LzxW0n1c3Q4YbrFgg4P99GSnebYa3x8gr0HZqLQ=
github.com/sourcegraph/go-diff v0.6.1/go.mod h1:iBszgVvyxdc8SFZ7gm69go2KDdt3ag071iBaWPF6cjs= github.com/sourcegraph/go-diff v0.6.1/go.mod h1:iBszgVvyxdc8SFZ7gm69go2KDdt3ag071iBaWPF6cjs=
github.com/spaolacci/murmur3 v0.0.0-20180118202830-f09979ecbc72/go.mod h1:JwIasOWyU6f++ZhiEuf87xNszmSA2myDM2Kzu9HwQUA= github.com/spaolacci/murmur3 v0.0.0-20180118202830-f09979ecbc72/go.mod h1:JwIasOWyU6f++ZhiEuf87xNszmSA2myDM2Kzu9HwQUA=
github.com/spf13/afero v1.1.2 h1:m8/z1t7/fwjysjQRYbP0RD+bUIF/8tJwPdEZsI83ACI=
github.com/spf13/afero v1.1.2/go.mod h1:j4pytiNVoe2o6bmDsKpLACNPDBIoEAkihy7loJ1B0CQ= github.com/spf13/afero v1.1.2/go.mod h1:j4pytiNVoe2o6bmDsKpLACNPDBIoEAkihy7loJ1B0CQ=
github.com/spf13/afero v1.2.2 h1:5jhuqJyZCZf2JRofRvN/nIFgIWNzPa3/Vz8mYylgbWc=
github.com/spf13/afero v1.2.2/go.mod h1:9ZxEEn6pIJ8Rxe320qSDBk6AsU0r9pR7Q4OcevTdifk=
github.com/spf13/cast v1.3.0 h1:oget//CVOEoFewqQxwr0Ej5yjygnqGkvggSE/gB35Q8= github.com/spf13/cast v1.3.0 h1:oget//CVOEoFewqQxwr0Ej5yjygnqGkvggSE/gB35Q8=
github.com/spf13/cast v1.3.0/go.mod h1:Qx5cxh0v+4UWYiBimWS+eyWzqEqokIECu5etghLkUJE= github.com/spf13/cast v1.3.0/go.mod h1:Qx5cxh0v+4UWYiBimWS+eyWzqEqokIECu5etghLkUJE=
github.com/spf13/cobra v0.0.3/go.mod h1:1l0Ry5zgKvJasoi3XT1TypsSe7PqH0Sj9dhYf7v3XqQ= github.com/spf13/cobra v0.0.3/go.mod h1:1l0Ry5zgKvJasoi3XT1TypsSe7PqH0Sj9dhYf7v3XqQ=
@ -1014,8 +1018,9 @@ golang.org/x/tools v0.0.0-20210101214203-2dba1e4ea05c/go.mod h1:emZCQorbCU4vsT4f
golang.org/x/tools v0.0.0-20210104081019-d8d6ddbec6ee/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= golang.org/x/tools v0.0.0-20210104081019-d8d6ddbec6ee/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA=
golang.org/x/tools v0.1.0/go.mod h1:xkSsbof2nBLbhDlRMhhhyNLN/zl3eTqcnHD5viDpcZ0= golang.org/x/tools v0.1.0/go.mod h1:xkSsbof2nBLbhDlRMhhhyNLN/zl3eTqcnHD5viDpcZ0=
golang.org/x/tools v0.1.2/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk= golang.org/x/tools v0.1.2/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk=
golang.org/x/tools v0.1.3 h1:L69ShwSZEyCsLKoAxDKeMvLDZkumEe8gXUZAjab0tX8=
golang.org/x/tools v0.1.3/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk= golang.org/x/tools v0.1.3/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk=
golang.org/x/tools v0.1.5 h1:ouewzE6p+/VEB31YYnTbEJdi8pFqKp4P4n85vwo3DHA=
golang.org/x/tools v0.1.5/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk=
golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
@ -1157,6 +1162,8 @@ honnef.co/go/tools v0.2.0 h1:ws8AfbgTX3oIczLPNPCu5166oBg9ST2vNs0rcht+mDE=
honnef.co/go/tools v0.2.0/go.mod h1:lPVVZ2BS5TfnjLyizF7o7hv7j9/L+8cZY2hLyjP9cGY= honnef.co/go/tools v0.2.0/go.mod h1:lPVVZ2BS5TfnjLyizF7o7hv7j9/L+8cZY2hLyjP9cGY=
k8s.io/klog/hack/tools v0.0.0-20210303110520-14dec3377f55 h1:dLsq+jacIVLNk1Jmh5RFmlTiD5kIwjYN5hh8udCyeDc= k8s.io/klog/hack/tools v0.0.0-20210303110520-14dec3377f55 h1:dLsq+jacIVLNk1Jmh5RFmlTiD5kIwjYN5hh8udCyeDc=
k8s.io/klog/hack/tools v0.0.0-20210303110520-14dec3377f55/go.mod h1:peYvfmhJdUiWTjdEpxAPkauLKX+lwVMfcSIMynAWZ14= k8s.io/klog/hack/tools v0.0.0-20210303110520-14dec3377f55/go.mod h1:peYvfmhJdUiWTjdEpxAPkauLKX+lwVMfcSIMynAWZ14=
k8s.io/klog/v2 v2.0.0/go.mod h1:PBfzABfn139FHAV07az/IF9Wp1bkk3vpT2XSJ76fSDE=
k8s.io/utils v0.0.0-20210802155522-efc7438f0176/go.mod h1:jPW/WVKK9YHAvNhRxK0md/EJ228hCsBRufyofKtW8HA=
mvdan.cc/gofumpt v0.1.1 h1:bi/1aS/5W00E2ny5q65w9SnKpWEF/UIOqDYBILpo9rA= mvdan.cc/gofumpt v0.1.1 h1:bi/1aS/5W00E2ny5q65w9SnKpWEF/UIOqDYBILpo9rA=
mvdan.cc/gofumpt v0.1.1/go.mod h1:yXG1r1WqZVKWbVRtBWKWX9+CxGYfA51nSomhM0woR48= mvdan.cc/gofumpt v0.1.1/go.mod h1:yXG1r1WqZVKWbVRtBWKWX9+CxGYfA51nSomhM0woR48=
mvdan.cc/interfacer v0.0.0-20180901003855-c20040233aed h1:WX1yoOaKQfddO/mLzdV4wptyWgoH/6hwLs7QHTixo0I= mvdan.cc/interfacer v0.0.0-20180901003855-c20040233aed h1:WX1yoOaKQfddO/mLzdV4wptyWgoH/6hwLs7QHTixo0I=

View File

@ -20,6 +20,7 @@ package tools
import ( import (
// linting tools // linting tools
_ "github.com/aojea/sloppy-netparser"
_ "github.com/client9/misspell/cmd/misspell" _ "github.com/client9/misspell/cmd/misspell"
_ "github.com/golangci/golangci-lint/cmd/golangci-lint" _ "github.com/golangci/golangci-lint/cmd/golangci-lint"
_ "github.com/google/go-flow-levee/cmd/levee" _ "github.com/google/go-flow-levee/cmd/levee"

66
hack/update-netparse-cve.sh Executable file
View File

@ -0,0 +1,66 @@
#!/usr/bin/env bash
# Copyright 2021 The Kubernetes Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# This script replace "net" stdlib IP and CIDR parsers
# with the ones forked in k8s.io/utils/net to parse IP addresses
# because of the compatibility break introduced in golang 1.17
# Reference: #100895
# Usage: `hack/update-netparse-cve.sh`.
set -o errexit
set -o nounset
set -o pipefail
KUBE_ROOT=$(dirname "${BASH_SOURCE[0]}")/..
source "${KUBE_ROOT}/hack/lib/init.sh"
source "${KUBE_ROOT}/hack/lib/util.sh"
kube::golang::verify_go_version
# Ensure that we find the binaries we build before anything else.
export GOBIN="${KUBE_OUTPUT_BINPATH}"
PATH="${GOBIN}:${PATH}"
# Explicitly opt into go modules, even though we're inside a GOPATH directory
export GO111MODULE=on
# Install golangci-lint
echo 'installing net parser converter'
pushd "${KUBE_ROOT}/hack/tools" >/dev/null
go install github.com/aojea/sloppy-netparser
popd >/dev/null
cd "${KUBE_ROOT}"
find_files() {
find . -not \( \
\( \
-wholename './output' \
-o -wholename './.git' \
-o -wholename './_output' \
-o -wholename './_gopath' \
-o -wholename './release' \
-o -wholename './target' \
-o -wholename '*/third_party/*' \
-o -wholename '*/vendor/*' \
-o -wholename './staging/src/k8s.io/client-go/*vendor/*' \
\) -prune \
\) -name '*.go'
}
# replace net.ParseIP() and netParseIPCDR
find_files | xargs sloppy-netparser

76
hack/verify-netparse-cve.sh Executable file
View File

@ -0,0 +1,76 @@
#!/usr/bin/env bash
# Copyright 2021 The Kubernetes Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# This script checks if the "net" stdlib IP and CIDR parsers are used
# instead of the ones forked in k8s.io/utils/net to parse IP addresses
# because of the compatibility break introduced in golang 1.17
# Reference: #100895
# Usage: `hack/verify-netparse-cve.sh`.
set -o errexit
set -o nounset
set -o pipefail
KUBE_ROOT=$(dirname "${BASH_SOURCE[0]}")/..
source "${KUBE_ROOT}/hack/lib/init.sh"
cd "${KUBE_ROOT}"
rc=0
find_files() {
find . -not \( \
\( \
-wholename './output' \
-o -wholename './.git' \
-o -wholename './_output' \
-o -wholename './_gopath' \
-o -wholename './release' \
-o -wholename './target' \
-o -wholename '*/third_party/*' \
-o -wholename '*/vendor/*' \
-o -wholename './staging/src/k8s.io/client-go/*vendor/*' \
\) -prune \
\) -name '*.go'
}
# find files using net.ParseIP()
netparseip_matches=$(find_files | xargs grep -nE "net.ParseIP\(.*\)" 2>/dev/null) || true
if [[ -n "${netparseip_matches}" ]]; then
echo "net.ParseIP reject leading zeros in the dot-decimal notation of IPv4 addresses since golang 1.17:" >&2
echo "${netparseip_matches}" >&2
echo >&2
echo "Use k8s.io/utils/net ParseIPSloppy() to parse IP addresses. Kubernetes #100895" >&2
echo >&2
echo "Run ./hack/update-netparse-cve.sh" >&2
echo >&2
rc=1
fi
# find files using net.ParseCIDR()
netparsecidrs_matches=$(find_files | xargs grep -nE "net.ParseCIDR\(.*\)" 2>/dev/null) || true
if [[ -n "${netparsecidrs_matches}" ]]; then
echo "net.ParseCIDR reject leading zeros in the dot-decimal notation of IPv4 addresses since golang 1.17:" >&2
echo "${netparsecidrs_matches}" >&2
echo >&2
echo "Use k8s.io/utils/net ParseCIDRSloppy() to parse network CIDRs. Kubernetes #100895" >&2
echo >&2
echo "Run ./hack/update-netparse-cve.sh" >&2
echo >&2
rc=1
fi
exit $rc

View File

@ -3112,7 +3112,7 @@ func validatePodDNSConfig(dnsConfig *core.PodDNSConfig, dnsPolicy *core.DNSPolic
allErrs = append(allErrs, field.Invalid(fldPath.Child("nameservers"), dnsConfig.Nameservers, fmt.Sprintf("must not have more than %v nameservers", MaxDNSNameservers))) allErrs = append(allErrs, field.Invalid(fldPath.Child("nameservers"), dnsConfig.Nameservers, fmt.Sprintf("must not have more than %v nameservers", MaxDNSNameservers)))
} }
for i, ns := range dnsConfig.Nameservers { for i, ns := range dnsConfig.Nameservers {
if ip := net.ParseIP(ns); ip == nil { if ip := netutils.ParseIPSloppy(ns); ip == nil {
allErrs = append(allErrs, field.Invalid(fldPath.Child("nameservers").Index(i), ns, "must be valid IP address")) allErrs = append(allErrs, field.Invalid(fldPath.Child("nameservers").Index(i), ns, "must be valid IP address"))
} }
} }
@ -3246,7 +3246,7 @@ func validateOnlyAddedTolerations(newTolerations []core.Toleration, oldToleratio
func ValidateHostAliases(hostAliases []core.HostAlias, fldPath *field.Path) field.ErrorList { func ValidateHostAliases(hostAliases []core.HostAlias, fldPath *field.Path) field.ErrorList {
allErrs := field.ErrorList{} allErrs := field.ErrorList{}
for _, hostAlias := range hostAliases { for _, hostAlias := range hostAliases {
if ip := net.ParseIP(hostAlias.IP); ip == nil { if ip := netutils.ParseIPSloppy(hostAlias.IP); ip == nil {
allErrs = append(allErrs, field.Invalid(fldPath.Child("ip"), hostAlias.IP, "must be valid IP address")) allErrs = append(allErrs, field.Invalid(fldPath.Child("ip"), hostAlias.IP, "must be valid IP address"))
} }
for _, hostname := range hostAlias.Hostnames { for _, hostname := range hostAlias.Hostnames {
@ -5840,7 +5840,7 @@ func validateEndpointAddress(address *core.EndpointAddress, fldPath *field.Path)
// - https://www.iana.org/assignments/ipv6-multicast-addresses/ipv6-multicast-addresses.xhtml // - https://www.iana.org/assignments/ipv6-multicast-addresses/ipv6-multicast-addresses.xhtml
func ValidateNonSpecialIP(ipAddress string, fldPath *field.Path) field.ErrorList { func ValidateNonSpecialIP(ipAddress string, fldPath *field.Path) field.ErrorList {
allErrs := field.ErrorList{} allErrs := field.ErrorList{}
ip := net.ParseIP(ipAddress) ip := netutils.ParseIPSloppy(ipAddress)
if ip == nil { if ip == nil {
allErrs = append(allErrs, field.Invalid(fldPath, ipAddress, "must be a valid IP address")) allErrs = append(allErrs, field.Invalid(fldPath, ipAddress, "must be a valid IP address"))
return allErrs return allErrs
@ -6160,7 +6160,7 @@ func ValidateLoadBalancerStatus(status *core.LoadBalancerStatus, fldPath *field.
for i, ingress := range status.Ingress { for i, ingress := range status.Ingress {
idxPath := fldPath.Child("ingress").Index(i) idxPath := fldPath.Child("ingress").Index(i)
if len(ingress.IP) > 0 { if len(ingress.IP) > 0 {
if isIP := (net.ParseIP(ingress.IP) != nil); !isIP { if isIP := (netutils.ParseIPSloppy(ingress.IP) != nil); !isIP {
allErrs = append(allErrs, field.Invalid(idxPath.Child("ip"), ingress.IP, "must be a valid IP address")) allErrs = append(allErrs, field.Invalid(idxPath.Child("ip"), ingress.IP, "must be a valid IP address"))
} }
} }
@ -6168,7 +6168,7 @@ func ValidateLoadBalancerStatus(status *core.LoadBalancerStatus, fldPath *field.
for _, msg := range validation.IsDNS1123Subdomain(ingress.Hostname) { for _, msg := range validation.IsDNS1123Subdomain(ingress.Hostname) {
allErrs = append(allErrs, field.Invalid(idxPath.Child("hostname"), ingress.Hostname, msg)) allErrs = append(allErrs, field.Invalid(idxPath.Child("hostname"), ingress.Hostname, msg))
} }
if isIP := (net.ParseIP(ingress.Hostname) != nil); isIP { if isIP := (netutils.ParseIPSloppy(ingress.Hostname) != nil); isIP {
allErrs = append(allErrs, field.Invalid(idxPath.Child("hostname"), ingress.Hostname, "must be a DNS name, not an IP address")) allErrs = append(allErrs, field.Invalid(idxPath.Child("hostname"), ingress.Hostname, "must be a DNS name, not an IP address"))
} }
} }
@ -6198,7 +6198,7 @@ func validateVolumeNodeAffinity(nodeAffinity *core.VolumeNodeAffinity, fldPath *
// ValidateCIDR validates whether a CIDR matches the conventions expected by net.ParseCIDR // ValidateCIDR validates whether a CIDR matches the conventions expected by net.ParseCIDR
func ValidateCIDR(cidr string) (*net.IPNet, error) { func ValidateCIDR(cidr string) (*net.IPNet, error) {
_, net, err := net.ParseCIDR(cidr) _, net, err := netutils.ParseCIDRSloppy(cidr)
if err != nil { if err != nil {
return nil, err return nil, err
} }

View File

@ -18,7 +18,6 @@ package validation
import ( import (
"fmt" "fmt"
"net"
"strings" "strings"
apimachineryvalidation "k8s.io/apimachinery/pkg/api/validation" apimachineryvalidation "k8s.io/apimachinery/pkg/api/validation"
@ -33,6 +32,7 @@ import (
apivalidation "k8s.io/kubernetes/pkg/apis/core/validation" apivalidation "k8s.io/kubernetes/pkg/apis/core/validation"
"k8s.io/kubernetes/pkg/apis/networking" "k8s.io/kubernetes/pkg/apis/networking"
"k8s.io/kubernetes/pkg/features" "k8s.io/kubernetes/pkg/features"
netutils "k8s.io/utils/net"
utilpointer "k8s.io/utils/pointer" utilpointer "k8s.io/utils/pointer"
) )
@ -327,7 +327,7 @@ func validateIngressRules(ingressRules []networking.IngressRule, fldPath *field.
for i, ih := range ingressRules { for i, ih := range ingressRules {
wildcardHost := false wildcardHost := false
if len(ih.Host) > 0 { if len(ih.Host) > 0 {
if isIP := (net.ParseIP(ih.Host) != nil); isIP { if isIP := (netutils.ParseIPSloppy(ih.Host) != nil); isIP {
allErrs = append(allErrs, field.Invalid(fldPath.Index(i).Child("host"), ih.Host, "must be a DNS name, not an IP address")) allErrs = append(allErrs, field.Invalid(fldPath.Index(i).Child("host"), ih.Host, "must be a DNS name, not an IP address"))
} }
// TODO: Ports and ips are allowed in the host part of a url // TODO: Ports and ips are allowed in the host part of a url

View File

@ -18,7 +18,6 @@ package endpointslicemirroring
import ( import (
"fmt" "fmt"
"net"
"strings" "strings"
corev1 "k8s.io/api/core/v1" corev1 "k8s.io/api/core/v1"
@ -30,6 +29,7 @@ import (
"k8s.io/client-go/tools/leaderelection/resourcelock" "k8s.io/client-go/tools/leaderelection/resourcelock"
"k8s.io/kubernetes/pkg/apis/discovery/validation" "k8s.io/kubernetes/pkg/apis/discovery/validation"
endpointutil "k8s.io/kubernetes/pkg/controller/util/endpoint" endpointutil "k8s.io/kubernetes/pkg/controller/util/endpoint"
netutils "k8s.io/utils/net"
) )
// addrTypePortMapKey is used to uniquely identify groups of endpoint ports and // addrTypePortMapKey is used to uniquely identify groups of endpoint ports and
@ -50,7 +50,7 @@ func (pk addrTypePortMapKey) addressType() discovery.AddressType {
} }
func getAddressType(address string) *discovery.AddressType { func getAddressType(address string) *discovery.AddressType {
ip := net.ParseIP(address) ip := netutils.ParseIPSloppy(address)
if ip == nil { if ip == nil {
return nil return nil
} }

View File

@ -25,6 +25,7 @@ import (
"net" "net"
"k8s.io/klog/v2" "k8s.io/klog/v2"
netutils "k8s.io/utils/net"
v1 "k8s.io/api/core/v1" v1 "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
@ -80,7 +81,7 @@ func (a *adapter) Alias(ctx context.Context, node *v1.Node) (*net.IPNet, error)
klog.Warningf("Node %q has more than one alias assigned (%v), defaulting to the first", node.Name, cidrs) klog.Warningf("Node %q has more than one alias assigned (%v), defaulting to the first", node.Name, cidrs)
} }
_, cidrRange, err := net.ParseCIDR(cidrs[0]) _, cidrRange, err := netutils.ParseCIDRSloppy(cidrs[0])
if err != nil { if err != nil {
return nil, err return nil, err
} }

View File

@ -24,6 +24,7 @@ import (
"k8s.io/component-base/metrics/testutil" "k8s.io/component-base/metrics/testutil"
"k8s.io/klog/v2" "k8s.io/klog/v2"
netutils "k8s.io/utils/net"
) )
func TestCIDRSetFullyAllocated(t *testing.T) { func TestCIDRSetFullyAllocated(t *testing.T) {
@ -47,7 +48,7 @@ func TestCIDRSetFullyAllocated(t *testing.T) {
}, },
} }
for _, tc := range cases { for _, tc := range cases {
_, clusterCIDR, _ := net.ParseCIDR(tc.clusterCIDRStr) _, clusterCIDR, _ := netutils.ParseCIDRSloppy(tc.clusterCIDRStr)
a, err := NewCIDRSet(clusterCIDR, tc.subNetMaskSize) a, err := NewCIDRSet(clusterCIDR, tc.subNetMaskSize)
if err != nil { if err != nil {
t.Fatalf("unexpected error: %v for %v", err, tc.description) t.Fatalf("unexpected error: %v for %v", err, tc.description)
@ -198,7 +199,7 @@ func TestIndexToCIDRBlock(t *testing.T) {
}, },
} }
for _, tc := range cases { for _, tc := range cases {
_, clusterCIDR, _ := net.ParseCIDR(tc.clusterCIDRStr) _, clusterCIDR, _ := netutils.ParseCIDRSloppy(tc.clusterCIDRStr)
a, err := NewCIDRSet(clusterCIDR, tc.subnetMaskSize) a, err := NewCIDRSet(clusterCIDR, tc.subnetMaskSize)
if err != nil { if err != nil {
t.Fatalf("error for %v ", tc.description) t.Fatalf("error for %v ", tc.description)
@ -225,7 +226,7 @@ func TestCIDRSet_RandomishAllocation(t *testing.T) {
}, },
} }
for _, tc := range cases { for _, tc := range cases {
_, clusterCIDR, _ := net.ParseCIDR(tc.clusterCIDRStr) _, clusterCIDR, _ := netutils.ParseCIDRSloppy(tc.clusterCIDRStr)
a, err := NewCIDRSet(clusterCIDR, 24) a, err := NewCIDRSet(clusterCIDR, 24)
if err != nil { if err != nil {
t.Fatalf("Error allocating CIDRSet for %v", tc.description) t.Fatalf("Error allocating CIDRSet for %v", tc.description)
@ -286,7 +287,7 @@ func TestCIDRSet_AllocationOccupied(t *testing.T) {
}, },
} }
for _, tc := range cases { for _, tc := range cases {
_, clusterCIDR, _ := net.ParseCIDR(tc.clusterCIDRStr) _, clusterCIDR, _ := netutils.ParseCIDRSloppy(tc.clusterCIDRStr)
a, err := NewCIDRSet(clusterCIDR, 24) a, err := NewCIDRSet(clusterCIDR, 24)
if err != nil { if err != nil {
t.Fatalf("Error allocating CIDRSet for %v", tc.description) t.Fatalf("Error allocating CIDRSet for %v", tc.description)
@ -399,7 +400,7 @@ func TestDoubleOccupyRelease(t *testing.T) {
// operations have been executed. // operations have been executed.
numAllocatable24s := (1 << 8) - 3 numAllocatable24s := (1 << 8) - 3
_, clusterCIDR, _ := net.ParseCIDR(clusterCIDRStr) _, clusterCIDR, _ := netutils.ParseCIDRSloppy(clusterCIDRStr)
a, err := NewCIDRSet(clusterCIDR, 24) a, err := NewCIDRSet(clusterCIDR, 24)
if err != nil { if err != nil {
t.Fatalf("Error allocating CIDRSet") t.Fatalf("Error allocating CIDRSet")
@ -407,7 +408,7 @@ func TestDoubleOccupyRelease(t *testing.T) {
// Execute the operations // Execute the operations
for _, op := range operations { for _, op := range operations {
_, cidr, _ := net.ParseCIDR(op.cidrStr) _, cidr, _ := netutils.ParseCIDRSloppy(op.cidrStr)
switch op.operation { switch op.operation {
case "occupy": case "occupy":
a.Occupy(cidr) a.Occupy(cidr)
@ -557,7 +558,7 @@ func TestGetBitforCIDR(t *testing.T) {
} }
for _, tc := range cases { for _, tc := range cases {
_, clusterCIDR, err := net.ParseCIDR(tc.clusterCIDRStr) _, clusterCIDR, err := netutils.ParseCIDRSloppy(tc.clusterCIDRStr)
if err != nil { if err != nil {
t.Fatalf("unexpected error: %v for %v", err, tc.description) t.Fatalf("unexpected error: %v for %v", err, tc.description)
} }
@ -566,7 +567,7 @@ func TestGetBitforCIDR(t *testing.T) {
if err != nil { if err != nil {
t.Fatalf("Error allocating CIDRSet for %v", tc.description) t.Fatalf("Error allocating CIDRSet for %v", tc.description)
} }
_, subnetCIDR, err := net.ParseCIDR(tc.subNetCIDRStr) _, subnetCIDR, err := netutils.ParseCIDRSloppy(tc.subNetCIDRStr)
if err != nil { if err != nil {
t.Fatalf("unexpected error: %v for %v", err, tc.description) t.Fatalf("unexpected error: %v for %v", err, tc.description)
} }
@ -727,7 +728,7 @@ func TestOccupy(t *testing.T) {
} }
for _, tc := range cases { for _, tc := range cases {
_, clusterCIDR, err := net.ParseCIDR(tc.clusterCIDRStr) _, clusterCIDR, err := netutils.ParseCIDRSloppy(tc.clusterCIDRStr)
if err != nil { if err != nil {
t.Fatalf("unexpected error: %v for %v", err, tc.description) t.Fatalf("unexpected error: %v for %v", err, tc.description)
} }
@ -737,7 +738,7 @@ func TestOccupy(t *testing.T) {
t.Fatalf("Error allocating CIDRSet for %v", tc.description) t.Fatalf("Error allocating CIDRSet for %v", tc.description)
} }
_, subnetCIDR, err := net.ParseCIDR(tc.subNetCIDRStr) _, subnetCIDR, err := netutils.ParseCIDRSloppy(tc.subNetCIDRStr)
if err != nil { if err != nil {
t.Fatalf("unexpected error: %v for %v", err, tc.description) t.Fatalf("unexpected error: %v for %v", err, tc.description)
} }
@ -796,7 +797,7 @@ func TestCIDRSetv6(t *testing.T) {
} }
for _, tc := range cases { for _, tc := range cases {
t.Run(tc.description, func(t *testing.T) { t.Run(tc.description, func(t *testing.T) {
_, clusterCIDR, _ := net.ParseCIDR(tc.clusterCIDRStr) _, clusterCIDR, _ := netutils.ParseCIDRSloppy(tc.clusterCIDRStr)
a, err := NewCIDRSet(clusterCIDR, tc.subNetMaskSize) a, err := NewCIDRSet(clusterCIDR, tc.subNetMaskSize)
if gotErr := err != nil; gotErr != tc.expectErr { if gotErr := err != nil; gotErr != tc.expectErr {
t.Fatalf("NewCIDRSet(%v, %v) = %v, %v; gotErr = %t, want %t", clusterCIDR, tc.subNetMaskSize, a, err, gotErr, tc.expectErr) t.Fatalf("NewCIDRSet(%v, %v) = %v, %v; gotErr = %t, want %t", clusterCIDR, tc.subNetMaskSize, a, err, gotErr, tc.expectErr)
@ -834,7 +835,7 @@ func TestCIDRSetv6(t *testing.T) {
func TestCidrSetMetrics(t *testing.T) { func TestCidrSetMetrics(t *testing.T) {
cidr := "10.0.0.0/16" cidr := "10.0.0.0/16"
_, clusterCIDR, _ := net.ParseCIDR(cidr) _, clusterCIDR, _ := netutils.ParseCIDRSloppy(cidr)
// We have 256 free cidrs // We have 256 free cidrs
a, err := NewCIDRSet(clusterCIDR, 24) a, err := NewCIDRSet(clusterCIDR, 24)
if err != nil { if err != nil {
@ -880,7 +881,7 @@ func TestCidrSetMetrics(t *testing.T) {
func TestCidrSetMetricsHistogram(t *testing.T) { func TestCidrSetMetricsHistogram(t *testing.T) {
cidr := "10.0.0.0/16" cidr := "10.0.0.0/16"
_, clusterCIDR, _ := net.ParseCIDR(cidr) _, clusterCIDR, _ := netutils.ParseCIDRSloppy(cidr)
// We have 256 free cidrs // We have 256 free cidrs
a, err := NewCIDRSet(clusterCIDR, 24) a, err := NewCIDRSet(clusterCIDR, 24)
if err != nil { if err != nil {
@ -890,7 +891,7 @@ func TestCidrSetMetricsHistogram(t *testing.T) {
// Allocate half of the range // Allocate half of the range
// Occupy does not update the nextCandidate // Occupy does not update the nextCandidate
_, halfClusterCIDR, _ := net.ParseCIDR("10.0.0.0/17") _, halfClusterCIDR, _ := netutils.ParseCIDRSloppy("10.0.0.0/17")
a.Occupy(halfClusterCIDR) a.Occupy(halfClusterCIDR)
em := testMetrics{ em := testMetrics{
usage: 0.5, usage: 0.5,
@ -917,7 +918,7 @@ func TestCidrSetMetricsHistogram(t *testing.T) {
func TestCidrSetMetricsDual(t *testing.T) { func TestCidrSetMetricsDual(t *testing.T) {
// create IPv4 cidrSet // create IPv4 cidrSet
cidrIPv4 := "10.0.0.0/16" cidrIPv4 := "10.0.0.0/16"
_, clusterCIDRv4, _ := net.ParseCIDR(cidrIPv4) _, clusterCIDRv4, _ := netutils.ParseCIDRSloppy(cidrIPv4)
a, err := NewCIDRSet(clusterCIDRv4, 24) a, err := NewCIDRSet(clusterCIDRv4, 24)
if err != nil { if err != nil {
t.Fatalf("unexpected error creating CidrSet: %v", err) t.Fatalf("unexpected error creating CidrSet: %v", err)
@ -925,7 +926,7 @@ func TestCidrSetMetricsDual(t *testing.T) {
clearMetrics(map[string]string{"clusterCIDR": cidrIPv4}) clearMetrics(map[string]string{"clusterCIDR": cidrIPv4})
// create IPv6 cidrSet // create IPv6 cidrSet
cidrIPv6 := "2001:db8::/48" cidrIPv6 := "2001:db8::/48"
_, clusterCIDRv6, _ := net.ParseCIDR(cidrIPv6) _, clusterCIDRv6, _ := netutils.ParseCIDRSloppy(cidrIPv6)
b, err := NewCIDRSet(clusterCIDRv6, 64) b, err := NewCIDRSet(clusterCIDRv6, 64)
if err != nil { if err != nil {
t.Fatalf("unexpected error creating CidrSet: %v", err) t.Fatalf("unexpected error creating CidrSet: %v", err)
@ -1012,7 +1013,7 @@ func expectMetrics(t *testing.T, label string, em testMetrics) {
// Benchmarks // Benchmarks
func benchmarkAllocateAllIPv6(cidr string, subnetMaskSize int, b *testing.B) { func benchmarkAllocateAllIPv6(cidr string, subnetMaskSize int, b *testing.B) {
_, clusterCIDR, _ := net.ParseCIDR(cidr) _, clusterCIDR, _ := netutils.ParseCIDRSloppy(cidr)
a, _ := NewCIDRSet(clusterCIDR, subnetMaskSize) a, _ := NewCIDRSet(clusterCIDR, subnetMaskSize)
for n := 0; n < b.N; n++ { for n := 0; n < b.N; n++ {
// Allocate the whole range + 1 // Allocate the whole range + 1

View File

@ -318,7 +318,7 @@ func needPodCIDRsUpdate(node *v1.Node, podCIDRs []*net.IPNet) (bool, error) {
if node.Spec.PodCIDR == "" { if node.Spec.PodCIDR == "" {
return true, nil return true, nil
} }
_, nodePodCIDR, err := net.ParseCIDR(node.Spec.PodCIDR) _, nodePodCIDR, err := netutils.ParseCIDRSloppy(node.Spec.PodCIDR)
if err != nil { if err != nil {
klog.ErrorS(err, "Found invalid node.Spec.PodCIDR", "node.Spec.PodCIDR", node.Spec.PodCIDR) klog.ErrorS(err, "Found invalid node.Spec.PodCIDR", "node.Spec.PodCIDR", node.Spec.PodCIDR)
// We will try to overwrite with new CIDR(s) // We will try to overwrite with new CIDR(s)

View File

@ -25,8 +25,9 @@ import (
"time" "time"
"k8s.io/klog/v2" "k8s.io/klog/v2"
netutils "k8s.io/utils/net"
"k8s.io/api/core/v1" v1 "k8s.io/api/core/v1"
informers "k8s.io/client-go/informers/core/v1" informers "k8s.io/client-go/informers/core/v1"
clientset "k8s.io/client-go/kubernetes" clientset "k8s.io/client-go/kubernetes"
"k8s.io/client-go/tools/cache" "k8s.io/client-go/tools/cache"
@ -119,7 +120,7 @@ func (c *Controller) Start(nodeInformer informers.NodeInformer) error {
} }
for _, node := range nodes.Items { for _, node := range nodes.Items {
if node.Spec.PodCIDR != "" { if node.Spec.PodCIDR != "" {
_, cidrRange, err := net.ParseCIDR(node.Spec.PodCIDR) _, cidrRange, err := netutils.ParseCIDRSloppy(node.Spec.PodCIDR)
if err == nil { if err == nil {
c.set.Occupy(cidrRange) c.set.Occupy(cidrRange)
klog.V(3).Infof("Occupying CIDR for node %q (%v)", node.Name, node.Spec.PodCIDR) klog.V(3).Infof("Occupying CIDR for node %q (%v)", node.Name, node.Spec.PodCIDR)

View File

@ -21,8 +21,9 @@ import (
"net" "net"
"sync" "sync"
"k8s.io/api/core/v1" v1 "k8s.io/api/core/v1"
"k8s.io/klog/v2" "k8s.io/klog/v2"
netutils "k8s.io/utils/net"
apierrors "k8s.io/apimachinery/pkg/api/errors" apierrors "k8s.io/apimachinery/pkg/api/errors"
"k8s.io/apimachinery/pkg/types" "k8s.io/apimachinery/pkg/types"
@ -224,7 +225,7 @@ func (r *rangeAllocator) occupyCIDRs(node *v1.Node) error {
return nil return nil
} }
for idx, cidr := range node.Spec.PodCIDRs { for idx, cidr := range node.Spec.PodCIDRs {
_, podCIDR, err := net.ParseCIDR(cidr) _, podCIDR, err := netutils.ParseCIDRSloppy(cidr)
if err != nil { if err != nil {
return fmt.Errorf("failed to parse node %s, CIDR %s", node.Name, node.Spec.PodCIDR) return fmt.Errorf("failed to parse node %s, CIDR %s", node.Name, node.Spec.PodCIDR)
} }
@ -286,7 +287,7 @@ func (r *rangeAllocator) ReleaseCIDR(node *v1.Node) error {
} }
for idx, cidr := range node.Spec.PodCIDRs { for idx, cidr := range node.Spec.PodCIDRs {
_, podCIDR, err := net.ParseCIDR(cidr) _, podCIDR, err := netutils.ParseCIDRSloppy(cidr)
if err != nil { if err != nil {
return fmt.Errorf("failed to parse CIDR %s on Node %v: %v", cidr, node.Name, err) return fmt.Errorf("failed to parse CIDR %s on Node %v: %v", cidr, node.Name, err)
} }

View File

@ -30,6 +30,7 @@ import (
"k8s.io/client-go/kubernetes/fake" "k8s.io/client-go/kubernetes/fake"
"k8s.io/kubernetes/pkg/controller" "k8s.io/kubernetes/pkg/controller"
"k8s.io/kubernetes/pkg/controller/testutil" "k8s.io/kubernetes/pkg/controller/testutil"
netutils "k8s.io/utils/net"
) )
const testNodePollInterval = 10 * time.Millisecond const testNodePollInterval = 10 * time.Millisecond
@ -86,7 +87,7 @@ func TestOccupyPreExistingCIDR(t *testing.T) {
}, },
allocatorParams: CIDRAllocatorParams{ allocatorParams: CIDRAllocatorParams{
ClusterCIDRs: func() []*net.IPNet { ClusterCIDRs: func() []*net.IPNet {
_, clusterCIDRv4, _ := net.ParseCIDR("10.10.0.0/16") _, clusterCIDRv4, _ := netutils.ParseCIDRSloppy("10.10.0.0/16")
return []*net.IPNet{clusterCIDRv4} return []*net.IPNet{clusterCIDRv4}
}(), }(),
ServiceCIDR: nil, ServiceCIDR: nil,
@ -111,8 +112,8 @@ func TestOccupyPreExistingCIDR(t *testing.T) {
}, },
allocatorParams: CIDRAllocatorParams{ allocatorParams: CIDRAllocatorParams{
ClusterCIDRs: func() []*net.IPNet { ClusterCIDRs: func() []*net.IPNet {
_, clusterCIDRv4, _ := net.ParseCIDR("10.10.0.0/16") _, clusterCIDRv4, _ := netutils.ParseCIDRSloppy("10.10.0.0/16")
_, clusterCIDRv6, _ := net.ParseCIDR("ace:cab:deca::/8") _, clusterCIDRv6, _ := netutils.ParseCIDRSloppy("ace:cab:deca::/8")
return []*net.IPNet{clusterCIDRv4, clusterCIDRv6} return []*net.IPNet{clusterCIDRv4, clusterCIDRv6}
}(), }(),
ServiceCIDR: nil, ServiceCIDR: nil,
@ -140,7 +141,7 @@ func TestOccupyPreExistingCIDR(t *testing.T) {
}, },
allocatorParams: CIDRAllocatorParams{ allocatorParams: CIDRAllocatorParams{
ClusterCIDRs: func() []*net.IPNet { ClusterCIDRs: func() []*net.IPNet {
_, clusterCIDRv4, _ := net.ParseCIDR("10.10.0.0/16") _, clusterCIDRv4, _ := netutils.ParseCIDRSloppy("10.10.0.0/16")
return []*net.IPNet{clusterCIDRv4} return []*net.IPNet{clusterCIDRv4}
}(), }(),
ServiceCIDR: nil, ServiceCIDR: nil,
@ -168,8 +169,8 @@ func TestOccupyPreExistingCIDR(t *testing.T) {
}, },
allocatorParams: CIDRAllocatorParams{ allocatorParams: CIDRAllocatorParams{
ClusterCIDRs: func() []*net.IPNet { ClusterCIDRs: func() []*net.IPNet {
_, clusterCIDRv4, _ := net.ParseCIDR("10.10.0.0/16") _, clusterCIDRv4, _ := netutils.ParseCIDRSloppy("10.10.0.0/16")
_, clusterCIDRv6, _ := net.ParseCIDR("ace:cab:deca::/8") _, clusterCIDRv6, _ := netutils.ParseCIDRSloppy("ace:cab:deca::/8")
return []*net.IPNet{clusterCIDRv4, clusterCIDRv6} return []*net.IPNet{clusterCIDRv4, clusterCIDRv6}
}(), }(),
ServiceCIDR: nil, ServiceCIDR: nil,
@ -198,7 +199,7 @@ func TestOccupyPreExistingCIDR(t *testing.T) {
}, },
allocatorParams: CIDRAllocatorParams{ allocatorParams: CIDRAllocatorParams{
ClusterCIDRs: func() []*net.IPNet { ClusterCIDRs: func() []*net.IPNet {
_, clusterCIDRv4, _ := net.ParseCIDR("10.10.0.0/16") _, clusterCIDRv4, _ := netutils.ParseCIDRSloppy("10.10.0.0/16")
return []*net.IPNet{clusterCIDRv4} return []*net.IPNet{clusterCIDRv4}
}(), }(),
ServiceCIDR: nil, ServiceCIDR: nil,
@ -227,7 +228,7 @@ func TestOccupyPreExistingCIDR(t *testing.T) {
}, },
allocatorParams: CIDRAllocatorParams{ allocatorParams: CIDRAllocatorParams{
ClusterCIDRs: func() []*net.IPNet { ClusterCIDRs: func() []*net.IPNet {
_, clusterCIDRv4, _ := net.ParseCIDR("10.10.0.0/16") _, clusterCIDRv4, _ := netutils.ParseCIDRSloppy("10.10.0.0/16")
return []*net.IPNet{clusterCIDRv4} return []*net.IPNet{clusterCIDRv4}
}(), }(),
ServiceCIDR: nil, ServiceCIDR: nil,
@ -256,8 +257,8 @@ func TestOccupyPreExistingCIDR(t *testing.T) {
}, },
allocatorParams: CIDRAllocatorParams{ allocatorParams: CIDRAllocatorParams{
ClusterCIDRs: func() []*net.IPNet { ClusterCIDRs: func() []*net.IPNet {
_, clusterCIDRv4, _ := net.ParseCIDR("10.10.0.0/16") _, clusterCIDRv4, _ := netutils.ParseCIDRSloppy("10.10.0.0/16")
_, clusterCIDRv6, _ := net.ParseCIDR("ace:cab:deca::/8") _, clusterCIDRv6, _ := netutils.ParseCIDRSloppy("ace:cab:deca::/8")
return []*net.IPNet{clusterCIDRv4, clusterCIDRv6} return []*net.IPNet{clusterCIDRv4, clusterCIDRv6}
}(), }(),
ServiceCIDR: nil, ServiceCIDR: nil,
@ -286,8 +287,8 @@ func TestOccupyPreExistingCIDR(t *testing.T) {
}, },
allocatorParams: CIDRAllocatorParams{ allocatorParams: CIDRAllocatorParams{
ClusterCIDRs: func() []*net.IPNet { ClusterCIDRs: func() []*net.IPNet {
_, clusterCIDRv4, _ := net.ParseCIDR("10.10.0.0/16") _, clusterCIDRv4, _ := netutils.ParseCIDRSloppy("10.10.0.0/16")
_, clusterCIDRv6, _ := net.ParseCIDR("ace:cab:deca::/8") _, clusterCIDRv6, _ := netutils.ParseCIDRSloppy("ace:cab:deca::/8")
return []*net.IPNet{clusterCIDRv4, clusterCIDRv6} return []*net.IPNet{clusterCIDRv4, clusterCIDRv6}
}(), }(),
ServiceCIDR: nil, ServiceCIDR: nil,
@ -341,7 +342,7 @@ func TestAllocateOrOccupyCIDRSuccess(t *testing.T) {
}, },
allocatorParams: CIDRAllocatorParams{ allocatorParams: CIDRAllocatorParams{
ClusterCIDRs: func() []*net.IPNet { ClusterCIDRs: func() []*net.IPNet {
_, clusterCIDR, _ := net.ParseCIDR("127.123.234.0/24") _, clusterCIDR, _ := netutils.ParseCIDRSloppy("127.123.234.0/24")
return []*net.IPNet{clusterCIDR} return []*net.IPNet{clusterCIDR}
}(), }(),
ServiceCIDR: nil, ServiceCIDR: nil,
@ -366,11 +367,11 @@ func TestAllocateOrOccupyCIDRSuccess(t *testing.T) {
}, },
allocatorParams: CIDRAllocatorParams{ allocatorParams: CIDRAllocatorParams{
ClusterCIDRs: func() []*net.IPNet { ClusterCIDRs: func() []*net.IPNet {
_, clusterCIDR, _ := net.ParseCIDR("127.123.234.0/24") _, clusterCIDR, _ := netutils.ParseCIDRSloppy("127.123.234.0/24")
return []*net.IPNet{clusterCIDR} return []*net.IPNet{clusterCIDR}
}(), }(),
ServiceCIDR: func() *net.IPNet { ServiceCIDR: func() *net.IPNet {
_, serviceCIDR, _ := net.ParseCIDR("127.123.234.0/26") _, serviceCIDR, _ := netutils.ParseCIDRSloppy("127.123.234.0/26")
return serviceCIDR return serviceCIDR
}(), }(),
SecondaryServiceCIDR: nil, SecondaryServiceCIDR: nil,
@ -395,11 +396,11 @@ func TestAllocateOrOccupyCIDRSuccess(t *testing.T) {
}, },
allocatorParams: CIDRAllocatorParams{ allocatorParams: CIDRAllocatorParams{
ClusterCIDRs: func() []*net.IPNet { ClusterCIDRs: func() []*net.IPNet {
_, clusterCIDR, _ := net.ParseCIDR("127.123.234.0/24") _, clusterCIDR, _ := netutils.ParseCIDRSloppy("127.123.234.0/24")
return []*net.IPNet{clusterCIDR} return []*net.IPNet{clusterCIDR}
}(), }(),
ServiceCIDR: func() *net.IPNet { ServiceCIDR: func() *net.IPNet {
_, serviceCIDR, _ := net.ParseCIDR("127.123.234.0/26") _, serviceCIDR, _ := netutils.ParseCIDRSloppy("127.123.234.0/26")
return serviceCIDR return serviceCIDR
}(), }(),
SecondaryServiceCIDR: nil, SecondaryServiceCIDR: nil,
@ -426,12 +427,12 @@ func TestAllocateOrOccupyCIDRSuccess(t *testing.T) {
}, },
allocatorParams: CIDRAllocatorParams{ allocatorParams: CIDRAllocatorParams{
ClusterCIDRs: func() []*net.IPNet { ClusterCIDRs: func() []*net.IPNet {
_, clusterCIDRv4, _ := net.ParseCIDR("127.123.234.0/8") _, clusterCIDRv4, _ := netutils.ParseCIDRSloppy("127.123.234.0/8")
_, clusterCIDRv6, _ := net.ParseCIDR("ace:cab:deca::/84") _, clusterCIDRv6, _ := netutils.ParseCIDRSloppy("ace:cab:deca::/84")
return []*net.IPNet{clusterCIDRv4, clusterCIDRv6} return []*net.IPNet{clusterCIDRv4, clusterCIDRv6}
}(), }(),
ServiceCIDR: func() *net.IPNet { ServiceCIDR: func() *net.IPNet {
_, serviceCIDR, _ := net.ParseCIDR("127.123.234.0/26") _, serviceCIDR, _ := netutils.ParseCIDRSloppy("127.123.234.0/26")
return serviceCIDR return serviceCIDR
}(), }(),
SecondaryServiceCIDR: nil, SecondaryServiceCIDR: nil,
@ -452,12 +453,12 @@ func TestAllocateOrOccupyCIDRSuccess(t *testing.T) {
}, },
allocatorParams: CIDRAllocatorParams{ allocatorParams: CIDRAllocatorParams{
ClusterCIDRs: func() []*net.IPNet { ClusterCIDRs: func() []*net.IPNet {
_, clusterCIDRv4, _ := net.ParseCIDR("127.123.234.0/8") _, clusterCIDRv4, _ := netutils.ParseCIDRSloppy("127.123.234.0/8")
_, clusterCIDRv6, _ := net.ParseCIDR("ace:cab:deca::/84") _, clusterCIDRv6, _ := netutils.ParseCIDRSloppy("ace:cab:deca::/84")
return []*net.IPNet{clusterCIDRv6, clusterCIDRv4} return []*net.IPNet{clusterCIDRv6, clusterCIDRv4}
}(), }(),
ServiceCIDR: func() *net.IPNet { ServiceCIDR: func() *net.IPNet {
_, serviceCIDR, _ := net.ParseCIDR("127.123.234.0/26") _, serviceCIDR, _ := netutils.ParseCIDRSloppy("127.123.234.0/26")
return serviceCIDR return serviceCIDR
}(), }(),
SecondaryServiceCIDR: nil, SecondaryServiceCIDR: nil,
@ -478,13 +479,13 @@ func TestAllocateOrOccupyCIDRSuccess(t *testing.T) {
}, },
allocatorParams: CIDRAllocatorParams{ allocatorParams: CIDRAllocatorParams{
ClusterCIDRs: func() []*net.IPNet { ClusterCIDRs: func() []*net.IPNet {
_, clusterCIDRv4, _ := net.ParseCIDR("127.123.234.0/8") _, clusterCIDRv4, _ := netutils.ParseCIDRSloppy("127.123.234.0/8")
_, clusterCIDRv6, _ := net.ParseCIDR("ace:cab:deca::/84") _, clusterCIDRv6, _ := netutils.ParseCIDRSloppy("ace:cab:deca::/84")
_, clusterCIDRv4_2, _ := net.ParseCIDR("10.0.0.0/8") _, clusterCIDRv4_2, _ := netutils.ParseCIDRSloppy("10.0.0.0/8")
return []*net.IPNet{clusterCIDRv4, clusterCIDRv6, clusterCIDRv4_2} return []*net.IPNet{clusterCIDRv4, clusterCIDRv6, clusterCIDRv4_2}
}(), }(),
ServiceCIDR: func() *net.IPNet { ServiceCIDR: func() *net.IPNet {
_, serviceCIDR, _ := net.ParseCIDR("127.123.234.0/26") _, serviceCIDR, _ := netutils.ParseCIDRSloppy("127.123.234.0/26")
return serviceCIDR return serviceCIDR
}(), }(),
SecondaryServiceCIDR: nil, SecondaryServiceCIDR: nil,
@ -521,7 +522,7 @@ func TestAllocateOrOccupyCIDRSuccess(t *testing.T) {
}, },
allocatorParams: CIDRAllocatorParams{ allocatorParams: CIDRAllocatorParams{
ClusterCIDRs: func() []*net.IPNet { ClusterCIDRs: func() []*net.IPNet {
_, clusterCIDR, _ := net.ParseCIDR("10.10.0.0/22") _, clusterCIDR, _ := netutils.ParseCIDRSloppy("10.10.0.0/22")
return []*net.IPNet{clusterCIDR} return []*net.IPNet{clusterCIDR}
}(), }(),
ServiceCIDR: nil, ServiceCIDR: nil,
@ -557,7 +558,7 @@ func TestAllocateOrOccupyCIDRSuccess(t *testing.T) {
// pre allocate the cidrs as per the test // pre allocate the cidrs as per the test
for idx, allocatedList := range tc.allocatedCIDRs { for idx, allocatedList := range tc.allocatedCIDRs {
for _, allocated := range allocatedList { for _, allocated := range allocatedList {
_, cidr, err := net.ParseCIDR(allocated) _, cidr, err := netutils.ParseCIDRSloppy(allocated)
if err != nil { if err != nil {
t.Fatalf("%v: unexpected error when parsing CIDR %v: %v", tc.description, allocated, err) t.Fatalf("%v: unexpected error when parsing CIDR %v: %v", tc.description, allocated, err)
} }
@ -623,7 +624,7 @@ func TestAllocateOrOccupyCIDRFailure(t *testing.T) {
}, },
allocatorParams: CIDRAllocatorParams{ allocatorParams: CIDRAllocatorParams{
ClusterCIDRs: func() []*net.IPNet { ClusterCIDRs: func() []*net.IPNet {
_, clusterCIDR, _ := net.ParseCIDR("127.123.234.0/28") _, clusterCIDR, _ := netutils.ParseCIDRSloppy("127.123.234.0/28")
return []*net.IPNet{clusterCIDR} return []*net.IPNet{clusterCIDR}
}(), }(),
ServiceCIDR: nil, ServiceCIDR: nil,
@ -654,7 +655,7 @@ func TestAllocateOrOccupyCIDRFailure(t *testing.T) {
// this is a bit of white box testing // this is a bit of white box testing
for setIdx, allocatedList := range tc.allocatedCIDRs { for setIdx, allocatedList := range tc.allocatedCIDRs {
for _, allocated := range allocatedList { for _, allocated := range allocatedList {
_, cidr, err := net.ParseCIDR(allocated) _, cidr, err := netutils.ParseCIDRSloppy(allocated)
if err != nil { if err != nil {
t.Fatalf("%v: unexpected error when parsing CIDR %v: %v", tc.description, cidr, err) t.Fatalf("%v: unexpected error when parsing CIDR %v: %v", tc.description, cidr, err)
} }
@ -727,7 +728,7 @@ func TestReleaseCIDRSuccess(t *testing.T) {
}, },
allocatorParams: CIDRAllocatorParams{ allocatorParams: CIDRAllocatorParams{
ClusterCIDRs: func() []*net.IPNet { ClusterCIDRs: func() []*net.IPNet {
_, clusterCIDR, _ := net.ParseCIDR("127.123.234.0/28") _, clusterCIDR, _ := netutils.ParseCIDRSloppy("127.123.234.0/28")
return []*net.IPNet{clusterCIDR} return []*net.IPNet{clusterCIDR}
}(), }(),
ServiceCIDR: nil, ServiceCIDR: nil,
@ -759,7 +760,7 @@ func TestReleaseCIDRSuccess(t *testing.T) {
}, },
allocatorParams: CIDRAllocatorParams{ allocatorParams: CIDRAllocatorParams{
ClusterCIDRs: func() []*net.IPNet { ClusterCIDRs: func() []*net.IPNet {
_, clusterCIDR, _ := net.ParseCIDR("127.123.234.0/28") _, clusterCIDR, _ := netutils.ParseCIDRSloppy("127.123.234.0/28")
return []*net.IPNet{clusterCIDR} return []*net.IPNet{clusterCIDR}
}(), }(),
ServiceCIDR: nil, ServiceCIDR: nil,
@ -796,7 +797,7 @@ func TestReleaseCIDRSuccess(t *testing.T) {
// this is a bit of white box testing // this is a bit of white box testing
for setIdx, allocatedList := range tc.allocatedCIDRs { for setIdx, allocatedList := range tc.allocatedCIDRs {
for _, allocated := range allocatedList { for _, allocated := range allocatedList {
_, cidr, err := net.ParseCIDR(allocated) _, cidr, err := netutils.ParseCIDRSloppy(allocated)
if err != nil { if err != nil {
t.Fatalf("%v: unexpected error when parsing CIDR %v: %v", tc.description, allocated, err) t.Fatalf("%v: unexpected error when parsing CIDR %v: %v", tc.description, allocated, err)
} }

View File

@ -23,8 +23,9 @@ import (
"time" "time"
"k8s.io/klog/v2" "k8s.io/klog/v2"
netutils "k8s.io/utils/net"
"k8s.io/api/core/v1" v1 "k8s.io/api/core/v1"
"k8s.io/kubernetes/pkg/controller/nodeipam/ipam/cidrset" "k8s.io/kubernetes/pkg/controller/nodeipam/ipam/cidrset"
) )
@ -281,7 +282,7 @@ func (op *updateOp) updateAliasFromNode(ctx context.Context, sync *NodeSync, nod
return fmt.Errorf("cannot sync to cloud in mode %q", sync.mode) return fmt.Errorf("cannot sync to cloud in mode %q", sync.mode)
} }
_, aliasRange, err := net.ParseCIDR(node.Spec.PodCIDR) _, aliasRange, err := netutils.ParseCIDRSloppy(node.Spec.PodCIDR)
if err != nil { if err != nil {
klog.Errorf("Could not parse PodCIDR (%q) for node %q: %v", klog.Errorf("Could not parse PodCIDR (%q) for node %q: %v",
node.Spec.PodCIDR, node.Name, err) node.Spec.PodCIDR, node.Name, err)
@ -364,7 +365,7 @@ func (op *deleteOp) run(sync *NodeSync) error {
return nil return nil
} }
_, cidrRange, err := net.ParseCIDR(op.node.Spec.PodCIDR) _, cidrRange, err := netutils.ParseCIDRSloppy(op.node.Spec.PodCIDR)
if err != nil { if err != nil {
klog.Errorf("Deleted node %q has an invalid podCIDR %q: %v", klog.Errorf("Deleted node %q has an invalid podCIDR %q: %v",
op.node.Name, op.node.Spec.PodCIDR, err) op.node.Name, op.node.Spec.PodCIDR, err)

View File

@ -28,12 +28,13 @@ import (
"k8s.io/klog/v2" "k8s.io/klog/v2"
"k8s.io/kubernetes/pkg/controller/nodeipam/ipam/cidrset" "k8s.io/kubernetes/pkg/controller/nodeipam/ipam/cidrset"
"k8s.io/kubernetes/pkg/controller/nodeipam/ipam/test" "k8s.io/kubernetes/pkg/controller/nodeipam/ipam/test"
netutils "k8s.io/utils/net"
"k8s.io/api/core/v1" v1 "k8s.io/api/core/v1"
) )
var ( var (
_, clusterCIDRRange, _ = net.ParseCIDR("10.1.0.0/16") _, clusterCIDRRange, _ = netutils.ParseCIDRSloppy("10.1.0.0/16")
) )
type fakeEvent struct { type fakeEvent struct {

View File

@ -18,12 +18,14 @@ package test
import ( import (
"net" "net"
netutils "k8s.io/utils/net"
) )
// MustParseCIDR returns the CIDR range parsed from s or panics if the string // MustParseCIDR returns the CIDR range parsed from s or panics if the string
// cannot be parsed. // cannot be parsed.
func MustParseCIDR(s string) *net.IPNet { func MustParseCIDR(s string) *net.IPNet {
_, ret, err := net.ParseCIDR(s) _, ret, err := netutils.ParseCIDRSloppy(s)
if err != nil { if err != nil {
panic(err) panic(err)
} }

View File

@ -89,8 +89,8 @@ func TestNewNodeIpamControllerWithCIDRMasks(t *testing.T) {
} { } {
t.Run(tc.desc, func(t *testing.T) { t.Run(tc.desc, func(t *testing.T) {
clusterCidrs, _ := netutils.ParseCIDRs(strings.Split(tc.clusterCIDR, ",")) clusterCidrs, _ := netutils.ParseCIDRs(strings.Split(tc.clusterCIDR, ","))
_, serviceCIDRIpNet, _ := net.ParseCIDR(tc.serviceCIDR) _, serviceCIDRIpNet, _ := netutils.ParseCIDRSloppy(tc.serviceCIDR)
_, secondaryServiceCIDRIpNet, _ := net.ParseCIDR(tc.secondaryServiceCIDR) _, secondaryServiceCIDRIpNet, _ := netutils.ParseCIDRSloppy(tc.secondaryServiceCIDR)
if os.Getenv("EXIT_ON_FATAL") == "1" { if os.Getenv("EXIT_ON_FATAL") == "1" {
// This is the subprocess which runs the actual code. // This is the subprocess which runs the actual code.

View File

@ -17,7 +17,6 @@ limitations under the License.
package controlplane package controlplane
import ( import (
"net"
"reflect" "reflect"
"testing" "testing"
@ -28,6 +27,7 @@ import (
"k8s.io/client-go/kubernetes/fake" "k8s.io/client-go/kubernetes/fake"
core "k8s.io/client-go/testing" core "k8s.io/client-go/testing"
"k8s.io/kubernetes/pkg/controlplane/reconcilers" "k8s.io/kubernetes/pkg/controlplane/reconcilers"
netutils "k8s.io/utils/net"
) )
func TestReconcileEndpoints(t *testing.T) { func TestReconcileEndpoints(t *testing.T) {
@ -401,7 +401,7 @@ func TestReconcileEndpoints(t *testing.T) {
} }
epAdapter := reconcilers.NewEndpointsAdapter(fakeClient.CoreV1(), nil) epAdapter := reconcilers.NewEndpointsAdapter(fakeClient.CoreV1(), nil)
reconciler := reconcilers.NewMasterCountEndpointReconciler(test.additionalMasters+1, epAdapter) reconciler := reconcilers.NewMasterCountEndpointReconciler(test.additionalMasters+1, epAdapter)
err := reconciler.ReconcileEndpoints(test.serviceName, net.ParseIP(test.ip), test.endpointPorts, true) err := reconciler.ReconcileEndpoints(test.serviceName, netutils.ParseIPSloppy(test.ip), test.endpointPorts, true)
if err != nil { if err != nil {
t.Errorf("case %q: unexpected error: %v", test.testName, err) t.Errorf("case %q: unexpected error: %v", test.testName, err)
} }
@ -520,7 +520,7 @@ func TestReconcileEndpoints(t *testing.T) {
} }
epAdapter := reconcilers.NewEndpointsAdapter(fakeClient.CoreV1(), nil) epAdapter := reconcilers.NewEndpointsAdapter(fakeClient.CoreV1(), nil)
reconciler := reconcilers.NewMasterCountEndpointReconciler(test.additionalMasters+1, epAdapter) reconciler := reconcilers.NewMasterCountEndpointReconciler(test.additionalMasters+1, epAdapter)
err := reconciler.ReconcileEndpoints(test.serviceName, net.ParseIP(test.ip), test.endpointPorts, false) err := reconciler.ReconcileEndpoints(test.serviceName, netutils.ParseIPSloppy(test.ip), test.endpointPorts, false)
if err != nil { if err != nil {
t.Errorf("case %q: unexpected error: %v", test.testName, err) t.Errorf("case %q: unexpected error: %v", test.testName, err)
} }
@ -585,7 +585,7 @@ func TestEmptySubsets(t *testing.T) {
endpointPorts := []corev1.EndpointPort{ endpointPorts := []corev1.EndpointPort{
{Name: "foo", Port: 8080, Protocol: "TCP"}, {Name: "foo", Port: 8080, Protocol: "TCP"},
} }
err := reconciler.RemoveEndpoints("foo", net.ParseIP("1.2.3.4"), endpointPorts) err := reconciler.RemoveEndpoints("foo", netutils.ParseIPSloppy("1.2.3.4"), endpointPorts)
if err != nil { if err != nil {
t.Errorf("unexpected error: %v", err) t.Errorf("unexpected error: %v", err)
} }
@ -631,7 +631,7 @@ func TestCreateOrUpdateMasterService(t *testing.T) {
master := Controller{} master := Controller{}
fakeClient := fake.NewSimpleClientset() fakeClient := fake.NewSimpleClientset()
master.ServiceClient = fakeClient.CoreV1() master.ServiceClient = fakeClient.CoreV1()
master.CreateOrUpdateMasterServiceIfNeeded(test.serviceName, net.ParseIP("1.2.3.4"), test.servicePorts, test.serviceType, false) master.CreateOrUpdateMasterServiceIfNeeded(test.serviceName, netutils.ParseIPSloppy("1.2.3.4"), test.servicePorts, test.serviceType, false)
creates := []core.CreateAction{} creates := []core.CreateAction{}
for _, action := range fakeClient.Actions() { for _, action := range fakeClient.Actions() {
if action.GetVerb() == "create" { if action.GetVerb() == "create" {
@ -913,7 +913,7 @@ func TestCreateOrUpdateMasterService(t *testing.T) {
master := Controller{} master := Controller{}
fakeClient := fake.NewSimpleClientset(test.service) fakeClient := fake.NewSimpleClientset(test.service)
master.ServiceClient = fakeClient.CoreV1() master.ServiceClient = fakeClient.CoreV1()
err := master.CreateOrUpdateMasterServiceIfNeeded(test.serviceName, net.ParseIP("1.2.3.4"), test.servicePorts, test.serviceType, true) err := master.CreateOrUpdateMasterServiceIfNeeded(test.serviceName, netutils.ParseIPSloppy("1.2.3.4"), test.servicePorts, test.serviceType, true)
if err != nil { if err != nil {
t.Errorf("case %q: unexpected error: %v", test.testName, err) t.Errorf("case %q: unexpected error: %v", test.testName, err)
} }
@ -972,7 +972,7 @@ func TestCreateOrUpdateMasterService(t *testing.T) {
master := Controller{} master := Controller{}
fakeClient := fake.NewSimpleClientset(test.service) fakeClient := fake.NewSimpleClientset(test.service)
master.ServiceClient = fakeClient.CoreV1() master.ServiceClient = fakeClient.CoreV1()
err := master.CreateOrUpdateMasterServiceIfNeeded(test.serviceName, net.ParseIP("1.2.3.4"), test.servicePorts, test.serviceType, false) err := master.CreateOrUpdateMasterServiceIfNeeded(test.serviceName, netutils.ParseIPSloppy("1.2.3.4"), test.servicePorts, test.serviceType, false)
if err != nil { if err != nil {
t.Errorf("case %q: unexpected error: %v", test.testName, err) t.Errorf("case %q: unexpected error: %v", test.testName, err)
} }

View File

@ -57,6 +57,7 @@ import (
certificatesrest "k8s.io/kubernetes/pkg/registry/certificates/rest" certificatesrest "k8s.io/kubernetes/pkg/registry/certificates/rest"
corerest "k8s.io/kubernetes/pkg/registry/core/rest" corerest "k8s.io/kubernetes/pkg/registry/core/rest"
"k8s.io/kubernetes/pkg/registry/registrytest" "k8s.io/kubernetes/pkg/registry/registrytest"
netutils "k8s.io/utils/net"
"github.com/stretchr/testify/assert" "github.com/stretchr/testify/assert"
) )
@ -72,7 +73,7 @@ func setUp(t *testing.T) (*etcd3testing.EtcdTestServer, Config, *assert.Assertio
APIServerServicePort: 443, APIServerServicePort: 443,
MasterCount: 1, MasterCount: 1,
EndpointReconcilerType: reconcilers.MasterCountReconcilerType, EndpointReconcilerType: reconcilers.MasterCountReconcilerType,
ServiceIPRange: net.IPNet{IP: net.ParseIP("10.0.0.0"), Mask: net.CIDRMask(24, 32)}, ServiceIPRange: net.IPNet{IP: netutils.ParseIPSloppy("10.0.0.0"), Mask: net.CIDRMask(24, 32)},
}, },
} }
@ -101,7 +102,7 @@ func setUp(t *testing.T) (*etcd3testing.EtcdTestServer, Config, *assert.Assertio
config.GenericConfig.Version = &kubeVersion config.GenericConfig.Version = &kubeVersion
config.ExtraConfig.StorageFactory = storageFactory config.ExtraConfig.StorageFactory = storageFactory
config.GenericConfig.LoopbackClientConfig = &restclient.Config{APIPath: "/api", ContentConfig: restclient.ContentConfig{NegotiatedSerializer: legacyscheme.Codecs}} config.GenericConfig.LoopbackClientConfig = &restclient.Config{APIPath: "/api", ContentConfig: restclient.ContentConfig{NegotiatedSerializer: legacyscheme.Codecs}}
config.GenericConfig.PublicAddress = net.ParseIP("192.168.10.4") config.GenericConfig.PublicAddress = netutils.ParseIPSloppy("192.168.10.4")
config.GenericConfig.LegacyAPIGroupPrefixes = sets.NewString("/api") config.GenericConfig.LegacyAPIGroupPrefixes = sets.NewString("/api")
config.ExtraConfig.KubeletClientConfig = kubeletclient.KubeletClientConfig{Port: 10250} config.ExtraConfig.KubeletClientConfig = kubeletclient.KubeletClientConfig{Port: 10250}
config.ExtraConfig.ProxyTransport = utilnet.SetTransportDefaults(&http.Transport{ config.ExtraConfig.ProxyTransport = utilnet.SetTransportDefaults(&http.Transport{

View File

@ -23,7 +23,6 @@ https://github.com/openshift/origin/blob/bb340c5dd5ff72718be86fb194dedc0faed7f4c
import ( import (
"context" "context"
"net"
"reflect" "reflect"
"testing" "testing"
@ -31,6 +30,7 @@ import (
discoveryv1 "k8s.io/api/discovery/v1" discoveryv1 "k8s.io/api/discovery/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/client-go/kubernetes/fake" "k8s.io/client-go/kubernetes/fake"
netutils "k8s.io/utils/net"
) )
type fakeLeases struct { type fakeLeases struct {
@ -459,7 +459,7 @@ func TestLeaseEndpointReconciler(t *testing.T) {
epAdapter := EndpointsAdapter{endpointClient: clientset.CoreV1()} epAdapter := EndpointsAdapter{endpointClient: clientset.CoreV1()}
r := NewLeaseEndpointReconciler(epAdapter, fakeLeases) r := NewLeaseEndpointReconciler(epAdapter, fakeLeases)
err := r.ReconcileEndpoints(test.serviceName, net.ParseIP(test.ip), test.endpointPorts, true) err := r.ReconcileEndpoints(test.serviceName, netutils.ParseIPSloppy(test.ip), test.endpointPorts, true)
if err != nil { if err != nil {
t.Errorf("case %q: unexpected error: %v", test.testName, err) t.Errorf("case %q: unexpected error: %v", test.testName, err)
} }
@ -560,7 +560,7 @@ func TestLeaseEndpointReconciler(t *testing.T) {
} }
epAdapter := EndpointsAdapter{endpointClient: clientset.CoreV1()} epAdapter := EndpointsAdapter{endpointClient: clientset.CoreV1()}
r := NewLeaseEndpointReconciler(epAdapter, fakeLeases) r := NewLeaseEndpointReconciler(epAdapter, fakeLeases)
err := r.ReconcileEndpoints(test.serviceName, net.ParseIP(test.ip), test.endpointPorts, false) err := r.ReconcileEndpoints(test.serviceName, netutils.ParseIPSloppy(test.ip), test.endpointPorts, false)
if err != nil { if err != nil {
t.Errorf("case %q: unexpected error: %v", test.testName, err) t.Errorf("case %q: unexpected error: %v", test.testName, err)
} }
@ -680,7 +680,7 @@ func TestLeaseRemoveEndpoints(t *testing.T) {
} }
epAdapter := EndpointsAdapter{endpointClient: clientset.CoreV1()} epAdapter := EndpointsAdapter{endpointClient: clientset.CoreV1()}
r := NewLeaseEndpointReconciler(epAdapter, fakeLeases) r := NewLeaseEndpointReconciler(epAdapter, fakeLeases)
err := r.RemoveEndpoints(test.serviceName, net.ParseIP(test.ip), test.endpointPorts) err := r.RemoveEndpoints(test.serviceName, netutils.ParseIPSloppy(test.ip), test.endpointPorts)
if err != nil { if err != nil {
t.Errorf("case %q: unexpected error: %v", test.testName, err) t.Errorf("case %q: unexpected error: %v", test.testName, err)
} }

View File

@ -20,13 +20,14 @@ import (
"net" "net"
utilnet "k8s.io/apimachinery/pkg/util/net" utilnet "k8s.io/apimachinery/pkg/util/net"
netutils "k8s.io/utils/net"
) )
// DefaultServiceNodePortRange is the default port range for NodePort services. // DefaultServiceNodePortRange is the default port range for NodePort services.
var DefaultServiceNodePortRange = utilnet.PortRange{Base: 30000, Size: 2768} var DefaultServiceNodePortRange = utilnet.PortRange{Base: 30000, Size: 2768}
// DefaultServiceIPCIDR is a CIDR notation of IP range from which to allocate service cluster IPs // DefaultServiceIPCIDR is a CIDR notation of IP range from which to allocate service cluster IPs
var DefaultServiceIPCIDR = net.IPNet{IP: net.ParseIP("10.0.0.0"), Mask: net.CIDRMask(24, 32)} var DefaultServiceIPCIDR = net.IPNet{IP: netutils.ParseIPSloppy("10.0.0.0"), Mask: net.CIDRMask(24, 32)}
// DefaultEtcdPathPrefix is the default key prefix of etcd for API Server // DefaultEtcdPathPrefix is the default key prefix of etcd for API Server
const DefaultEtcdPathPrefix = "/registry" const DefaultEtcdPathPrefix = "/registry"

View File

@ -18,16 +18,15 @@ limitations under the License.
package options package options
import ( import (
"net"
genericoptions "k8s.io/apiserver/pkg/server/options" genericoptions "k8s.io/apiserver/pkg/server/options"
netutils "k8s.io/utils/net"
) )
// NewSecureServingOptions gives default values for the kube-apiserver which are not the options wanted by // NewSecureServingOptions gives default values for the kube-apiserver which are not the options wanted by
// "normal" API servers running on the platform // "normal" API servers running on the platform
func NewSecureServingOptions() *genericoptions.SecureServingOptionsWithLoopback { func NewSecureServingOptions() *genericoptions.SecureServingOptionsWithLoopback {
o := genericoptions.SecureServingOptions{ o := genericoptions.SecureServingOptions{
BindAddress: net.ParseIP("0.0.0.0"), BindAddress: netutils.ParseIPSloppy("0.0.0.0"),
BindPort: 6443, BindPort: 6443,
Required: true, Required: true,
ServerCert: genericoptions.GeneratableKeyCert{ ServerCert: genericoptions.GeneratableKeyCert{

View File

@ -35,6 +35,7 @@ import (
"k8s.io/component-base/metrics/legacyregistry" "k8s.io/component-base/metrics/legacyregistry"
kubeletconfig "k8s.io/kubernetes/pkg/kubelet/apis/config" kubeletconfig "k8s.io/kubernetes/pkg/kubelet/apis/config"
"k8s.io/kubernetes/pkg/kubelet/metrics" "k8s.io/kubernetes/pkg/kubelet/metrics"
netutils "k8s.io/utils/net"
) )
// NewKubeletServerCertificateManager creates a certificate manager for the kubelet when retrieving a server certificate // NewKubeletServerCertificateManager creates a certificate manager for the kubelet when retrieving a server certificate
@ -159,13 +160,13 @@ func addressesToHostnamesAndIPs(addresses []v1.NodeAddress) (dnsNames []string,
switch address.Type { switch address.Type {
case v1.NodeHostName: case v1.NodeHostName:
if ip := net.ParseIP(address.Address); ip != nil { if ip := netutils.ParseIPSloppy(address.Address); ip != nil {
seenIPs[address.Address] = true seenIPs[address.Address] = true
} else { } else {
seenDNSNames[address.Address] = true seenDNSNames[address.Address] = true
} }
case v1.NodeExternalIP, v1.NodeInternalIP: case v1.NodeExternalIP, v1.NodeInternalIP:
if ip := net.ParseIP(address.Address); ip != nil { if ip := netutils.ParseIPSloppy(address.Address); ip != nil {
seenIPs[address.Address] = true seenIPs[address.Address] = true
} }
case v1.NodeExternalDNS, v1.NodeInternalDNS: case v1.NodeExternalDNS, v1.NodeInternalDNS:
@ -177,7 +178,7 @@ func addressesToHostnamesAndIPs(addresses []v1.NodeAddress) (dnsNames []string,
dnsNames = append(dnsNames, dnsName) dnsNames = append(dnsNames, dnsName)
} }
for ip := range seenIPs { for ip := range seenIPs {
ips = append(ips, net.ParseIP(ip)) ips = append(ips, netutils.ParseIPSloppy(ip))
} }
// return in stable order // return in stable order

View File

@ -21,7 +21,8 @@ import (
"reflect" "reflect"
"testing" "testing"
"k8s.io/api/core/v1" v1 "k8s.io/api/core/v1"
netutils "k8s.io/utils/net"
) )
func TestAddressesToHostnamesAndIPs(t *testing.T) { func TestAddressesToHostnamesAndIPs(t *testing.T) {
@ -62,7 +63,7 @@ func TestAddressesToHostnamesAndIPs(t *testing.T) {
{Type: v1.NodeExternalIP, Address: "1.1.1.1"}, {Type: v1.NodeExternalIP, Address: "1.1.1.1"},
}, },
wantDNSNames: []string{"hostname"}, wantDNSNames: []string{"hostname"},
wantIPs: []net.IP{net.ParseIP("1.1.1.1")}, wantIPs: []net.IP{netutils.ParseIPSloppy("1.1.1.1")},
}, },
{ {
name: "order values", name: "order values",
@ -75,7 +76,7 @@ func TestAddressesToHostnamesAndIPs(t *testing.T) {
{Type: v1.NodeInternalIP, Address: "3.3.3.3"}, {Type: v1.NodeInternalIP, Address: "3.3.3.3"},
}, },
wantDNSNames: []string{"hostname-1", "hostname-2", "hostname-3"}, wantDNSNames: []string{"hostname-1", "hostname-2", "hostname-3"},
wantIPs: []net.IP{net.ParseIP("1.1.1.1"), net.ParseIP("2.2.2.2"), net.ParseIP("3.3.3.3")}, wantIPs: []net.IP{netutils.ParseIPSloppy("1.1.1.1"), netutils.ParseIPSloppy("2.2.2.2"), netutils.ParseIPSloppy("3.3.3.3")},
}, },
{ {
name: "handle IP and DNS hostnames", name: "handle IP and DNS hostnames",
@ -84,7 +85,7 @@ func TestAddressesToHostnamesAndIPs(t *testing.T) {
{Type: v1.NodeHostName, Address: "1.1.1.1"}, {Type: v1.NodeHostName, Address: "1.1.1.1"},
}, },
wantDNSNames: []string{"hostname"}, wantDNSNames: []string{"hostname"},
wantIPs: []net.IP{net.ParseIP("1.1.1.1")}, wantIPs: []net.IP{netutils.ParseIPSloppy("1.1.1.1")},
}, },
} }
for _, tt := range tests { for _, tt := range tests {

View File

@ -21,12 +21,12 @@ package hostport
import ( import (
"bytes" "bytes"
"fmt" "fmt"
"net"
"strings" "strings"
"time" "time"
"k8s.io/apimachinery/pkg/util/sets" "k8s.io/apimachinery/pkg/util/sets"
utiliptables "k8s.io/kubernetes/pkg/util/iptables" utiliptables "k8s.io/kubernetes/pkg/util/iptables"
netutils "k8s.io/utils/net"
) )
type fakeChain struct { type fakeChain struct {
@ -192,7 +192,7 @@ func normalizeRule(rule string) (string, error) {
arg := remaining[:end] arg := remaining[:end]
// Normalize un-prefixed IP addresses like iptables does // Normalize un-prefixed IP addresses like iptables does
if net.ParseIP(arg) != nil { if netutils.ParseIPSloppy(arg) != nil {
arg += "/32" arg += "/32"
} }

View File

@ -20,7 +20,6 @@ package hostport
import ( import (
"bytes" "bytes"
"net"
"strings" "strings"
"testing" "testing"
@ -28,6 +27,7 @@ import (
v1 "k8s.io/api/core/v1" v1 "k8s.io/api/core/v1"
utiliptables "k8s.io/kubernetes/pkg/util/iptables" utiliptables "k8s.io/kubernetes/pkg/util/iptables"
"k8s.io/utils/exec" "k8s.io/utils/exec"
netutils "k8s.io/utils/net"
) )
func TestOpenCloseHostports(t *testing.T) { func TestOpenCloseHostports(t *testing.T) {
@ -249,7 +249,7 @@ func TestHostportManager(t *testing.T) {
mapping: &PodPortMapping{ mapping: &PodPortMapping{
Name: "pod1", Name: "pod1",
Namespace: "ns1", Namespace: "ns1",
IP: net.ParseIP("10.1.1.2"), IP: netutils.ParseIPSloppy("10.1.1.2"),
HostNetwork: false, HostNetwork: false,
PortMappings: []*PortMapping{ PortMappings: []*PortMapping{
{ {
@ -276,7 +276,7 @@ func TestHostportManager(t *testing.T) {
mapping: &PodPortMapping{ mapping: &PodPortMapping{
Name: "pod2", Name: "pod2",
Namespace: "ns1", Namespace: "ns1",
IP: net.ParseIP("10.1.1.3"), IP: netutils.ParseIPSloppy("10.1.1.3"),
HostNetwork: false, HostNetwork: false,
PortMappings: []*PortMapping{ PortMappings: []*PortMapping{
{ {
@ -303,7 +303,7 @@ func TestHostportManager(t *testing.T) {
mapping: &PodPortMapping{ mapping: &PodPortMapping{
Name: "pod3", Name: "pod3",
Namespace: "ns1", Namespace: "ns1",
IP: net.ParseIP("10.1.1.4"), IP: netutils.ParseIPSloppy("10.1.1.4"),
HostNetwork: false, HostNetwork: false,
PortMappings: []*PortMapping{ PortMappings: []*PortMapping{
{ {
@ -320,7 +320,7 @@ func TestHostportManager(t *testing.T) {
mapping: &PodPortMapping{ mapping: &PodPortMapping{
Name: "pod3", Name: "pod3",
Namespace: "ns1", Namespace: "ns1",
IP: net.ParseIP("192.168.12.12"), IP: netutils.ParseIPSloppy("192.168.12.12"),
HostNetwork: false, HostNetwork: false,
PortMappings: []*PortMapping{ PortMappings: []*PortMapping{
{ {
@ -337,7 +337,7 @@ func TestHostportManager(t *testing.T) {
mapping: &PodPortMapping{ mapping: &PodPortMapping{
Name: "pod4", Name: "pod4",
Namespace: "ns1", Namespace: "ns1",
IP: net.ParseIP("2001:beef::2"), IP: netutils.ParseIPSloppy("2001:beef::2"),
HostNetwork: false, HostNetwork: false,
PortMappings: []*PortMapping{ PortMappings: []*PortMapping{
{ {
@ -356,7 +356,7 @@ func TestHostportManager(t *testing.T) {
mapping: &PodPortMapping{ mapping: &PodPortMapping{
Name: "pod5", Name: "pod5",
Namespace: "ns5", Namespace: "ns5",
IP: net.ParseIP("10.1.1.5"), IP: netutils.ParseIPSloppy("10.1.1.5"),
HostNetwork: false, HostNetwork: false,
PortMappings: []*PortMapping{ PortMappings: []*PortMapping{
{ {
@ -380,7 +380,7 @@ func TestHostportManager(t *testing.T) {
mapping: &PodPortMapping{ mapping: &PodPortMapping{
Name: "pod6", Name: "pod6",
Namespace: "ns1", Namespace: "ns1",
IP: net.ParseIP("10.1.1.2"), IP: netutils.ParseIPSloppy("10.1.1.2"),
HostNetwork: false, HostNetwork: false,
PortMappings: []*PortMapping{ PortMappings: []*PortMapping{
{ {
@ -555,7 +555,7 @@ func TestHostportManagerIPv6(t *testing.T) {
mapping: &PodPortMapping{ mapping: &PodPortMapping{
Name: "pod1", Name: "pod1",
Namespace: "ns1", Namespace: "ns1",
IP: net.ParseIP("2001:beef::2"), IP: netutils.ParseIPSloppy("2001:beef::2"),
HostNetwork: false, HostNetwork: false,
PortMappings: []*PortMapping{ PortMappings: []*PortMapping{
{ {
@ -581,7 +581,7 @@ func TestHostportManagerIPv6(t *testing.T) {
mapping: &PodPortMapping{ mapping: &PodPortMapping{
Name: "pod2", Name: "pod2",
Namespace: "ns1", Namespace: "ns1",
IP: net.ParseIP("2001:beef::3"), IP: netutils.ParseIPSloppy("2001:beef::3"),
HostNetwork: false, HostNetwork: false,
PortMappings: []*PortMapping{ PortMappings: []*PortMapping{
{ {
@ -607,7 +607,7 @@ func TestHostportManagerIPv6(t *testing.T) {
mapping: &PodPortMapping{ mapping: &PodPortMapping{
Name: "pod3", Name: "pod3",
Namespace: "ns1", Namespace: "ns1",
IP: net.ParseIP("2001:beef::4"), IP: netutils.ParseIPSloppy("2001:beef::4"),
HostNetwork: false, HostNetwork: false,
PortMappings: []*PortMapping{ PortMappings: []*PortMapping{
{ {
@ -623,7 +623,7 @@ func TestHostportManagerIPv6(t *testing.T) {
mapping: &PodPortMapping{ mapping: &PodPortMapping{
Name: "pod4", Name: "pod4",
Namespace: "ns2", Namespace: "ns2",
IP: net.ParseIP("192.168.2.2"), IP: netutils.ParseIPSloppy("192.168.2.2"),
HostNetwork: false, HostNetwork: false,
PortMappings: []*PortMapping{ PortMappings: []*PortMapping{
{ {

View File

@ -259,7 +259,7 @@ func (plugin *kubenetNetworkPlugin) Event(name string, details map[string]interf
} }
for idx, currentPodCIDR := range podCIDRs { for idx, currentPodCIDR := range podCIDRs {
_, cidr, err := net.ParseCIDR(currentPodCIDR) _, cidr, err := netutils.ParseCIDRSloppy(currentPodCIDR)
if nil != err { if nil != err {
klog.InfoS("Failed to generate CNI network config with cidr at the index", "podCIDR", currentPodCIDR, "index", idx, "err", err) klog.InfoS("Failed to generate CNI network config with cidr at the index", "podCIDR", currentPodCIDR, "index", idx, "err", err)
return return
@ -451,7 +451,7 @@ func (plugin *kubenetNetworkPlugin) addPortMapping(id kubecontainer.ContainerID,
Namespace: namespace, Namespace: namespace,
Name: name, Name: name,
PortMappings: portMappings, PortMappings: portMappings,
IP: net.ParseIP(ip), IP: netutils.ParseIPSloppy(ip),
HostNetwork: false, HostNetwork: false,
} }
if netutils.IsIPv6(pm.IP) { if netutils.IsIPv6(pm.IP) {
@ -635,7 +635,7 @@ func (plugin *kubenetNetworkPlugin) getNetworkStatus(id kubecontainer.ContainerI
ips := make([]net.IP, 0, len(iplist)) ips := make([]net.IP, 0, len(iplist))
for _, ip := range iplist { for _, ip := range iplist {
ips = append(ips, net.ParseIP(ip)) ips = append(ips, netutils.ParseIPSloppy(ip))
} }
return &network.PodNetworkStatus{ return &network.PodNetworkStatus{

View File

@ -40,6 +40,7 @@ import (
sysctltest "k8s.io/kubernetes/pkg/util/sysctl/testing" sysctltest "k8s.io/kubernetes/pkg/util/sysctl/testing"
"k8s.io/utils/exec" "k8s.io/utils/exec"
fakeexec "k8s.io/utils/exec/testing" fakeexec "k8s.io/utils/exec/testing"
netutils "k8s.io/utils/net"
) )
// test it fulfills the NetworkPlugin interface // test it fulfills the NetworkPlugin interface
@ -337,7 +338,7 @@ func TestGetRoutesConfig(t *testing.T) {
} { } {
var cidrs []*net.IPNet var cidrs []*net.IPNet
for _, c := range test.cidrs { for _, c := range test.cidrs {
_, cidr, err := net.ParseCIDR(c) _, cidr, err := netutils.ParseCIDRSloppy(c)
assert.NoError(t, err) assert.NoError(t, err)
cidrs = append(cidrs, cidr) cidrs = append(cidrs, cidr)
} }
@ -378,7 +379,7 @@ func TestGetRangesConfig(t *testing.T) {
} { } {
var cidrs []*net.IPNet var cidrs []*net.IPNet
for _, c := range test.cidrs { for _, c := range test.cidrs {
_, cidr, err := net.ParseCIDR(c) _, cidr, err := netutils.ParseCIDRSloppy(c)
assert.NoError(t, err) assert.NoError(t, err)
cidrs = append(cidrs, cidr) cidrs = append(cidrs, cidr)
} }

View File

@ -36,6 +36,7 @@ import (
"k8s.io/kubernetes/pkg/kubelet/dockershim/network/metrics" "k8s.io/kubernetes/pkg/kubelet/dockershim/network/metrics"
utilsysctl "k8s.io/kubernetes/pkg/util/sysctl" utilsysctl "k8s.io/kubernetes/pkg/util/sysctl"
utilexec "k8s.io/utils/exec" utilexec "k8s.io/utils/exec"
netutils "k8s.io/utils/net"
utilfeature "k8s.io/apiserver/pkg/util/feature" utilfeature "k8s.io/apiserver/pkg/util/feature"
kubefeatures "k8s.io/kubernetes/pkg/features" kubefeatures "k8s.io/kubernetes/pkg/features"
@ -248,7 +249,7 @@ func getOnePodIP(execer utilexec.Interface, nsenterPath, netnsPath, interfaceNam
if len(fields) < 4 { if len(fields) < 4 {
return nil, fmt.Errorf("unexpected address output %s ", lines[0]) return nil, fmt.Errorf("unexpected address output %s ", lines[0])
} }
ip, _, err := net.ParseCIDR(fields[3]) ip, _, err := netutils.ParseCIDRSloppy(fields[3])
if err != nil { if err != nil {
return nil, fmt.Errorf("CNI failed to parse ip from output %s due to %v", output, err) return nil, fmt.Errorf("CNI failed to parse ip from output %s due to %v", output, err)
} }

View File

@ -20,7 +20,6 @@ package testing
import ( import (
"fmt" "fmt"
"net"
"sync" "sync"
"testing" "testing"
@ -29,6 +28,7 @@ import (
kubecontainer "k8s.io/kubernetes/pkg/kubelet/container" kubecontainer "k8s.io/kubernetes/pkg/kubelet/container"
"k8s.io/kubernetes/pkg/kubelet/dockershim/network" "k8s.io/kubernetes/pkg/kubelet/dockershim/network"
sysctltest "k8s.io/kubernetes/pkg/util/sysctl/testing" sysctltest "k8s.io/kubernetes/pkg/util/sysctl/testing"
netutils "k8s.io/utils/net"
"github.com/golang/mock/gomock" "github.com/golang/mock/gomock"
"github.com/stretchr/testify/assert" "github.com/stretchr/testify/assert"
@ -96,7 +96,7 @@ func TestPluginManager(t *testing.T) {
containerID := kubecontainer.ContainerID{ID: podName} containerID := kubecontainer.ContainerID{ID: podName}
fnp.EXPECT().SetUpPod("", podName, containerID).Return(nil).Times(4) fnp.EXPECT().SetUpPod("", podName, containerID).Return(nil).Times(4)
fnp.EXPECT().GetPodNetworkStatus("", podName, containerID).Return(&network.PodNetworkStatus{IP: net.ParseIP("1.2.3.4")}, nil).Times(4) fnp.EXPECT().GetPodNetworkStatus("", podName, containerID).Return(&network.PodNetworkStatus{IP: netutils.ParseIPSloppy("1.2.3.4")}, nil).Times(4)
fnp.EXPECT().TearDownPod("", podName, containerID).Return(nil).Times(4) fnp.EXPECT().TearDownPod("", podName, containerID).Return(nil).Times(4)
for x := 0; x < 4; x++ { for x := 0; x < 4; x++ {
@ -173,7 +173,7 @@ func (p *hookableFakeNetworkPlugin) TearDownPod(string, string, kubecontainer.Co
} }
func (p *hookableFakeNetworkPlugin) GetPodNetworkStatus(string, string, kubecontainer.ContainerID) (*network.PodNetworkStatus, error) { func (p *hookableFakeNetworkPlugin) GetPodNetworkStatus(string, string, kubecontainer.ContainerID) (*network.PodNetworkStatus, error) {
return &network.PodNetworkStatus{IP: net.ParseIP("10.1.2.3")}, nil return &network.PodNetworkStatus{IP: netutils.ParseIPSloppy("10.1.2.3")}, nil
} }
func (p *hookableFakeNetworkPlugin) Status() error { func (p *hookableFakeNetworkPlugin) Status() error {

View File

@ -38,6 +38,7 @@ import (
libcontaineruserns "github.com/opencontainers/runc/libcontainer/userns" libcontaineruserns "github.com/opencontainers/runc/libcontainer/userns"
"k8s.io/mount-utils" "k8s.io/mount-utils"
"k8s.io/utils/integer" "k8s.io/utils/integer"
netutils "k8s.io/utils/net"
v1 "k8s.io/api/core/v1" v1 "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
@ -505,7 +506,7 @@ func NewMainKubelet(kubeCfg *kubeletconfiginternal.KubeletConfiguration,
clusterDNS := make([]net.IP, 0, len(kubeCfg.ClusterDNS)) clusterDNS := make([]net.IP, 0, len(kubeCfg.ClusterDNS))
for _, ipEntry := range kubeCfg.ClusterDNS { for _, ipEntry := range kubeCfg.ClusterDNS {
ip := net.ParseIP(ipEntry) ip := netutils.ParseIPSloppy(ipEntry)
if ip == nil { if ip == nil {
klog.InfoS("Invalid clusterDNS IP", "IP", ipEntry) klog.InfoS("Invalid clusterDNS IP", "IP", ipEntry)
} else { } else {

View File

@ -58,6 +58,7 @@ import (
kubeletvolume "k8s.io/kubernetes/pkg/kubelet/volumemanager" kubeletvolume "k8s.io/kubernetes/pkg/kubelet/volumemanager"
taintutil "k8s.io/kubernetes/pkg/util/taints" taintutil "k8s.io/kubernetes/pkg/util/taints"
"k8s.io/kubernetes/pkg/volume/util" "k8s.io/kubernetes/pkg/volume/util"
netutils "k8s.io/utils/net"
) )
const ( const (
@ -2484,7 +2485,7 @@ func TestValidateNodeIPParam(t *testing.T) {
tests = append(tests, successTest) tests = append(tests, successTest)
} }
for _, test := range tests { for _, test := range tests {
err := validateNodeIP(net.ParseIP(test.nodeIP)) err := validateNodeIP(netutils.ParseIPSloppy(test.nodeIP))
if test.success { if test.success {
assert.NoError(t, err, "test %s", test.testName) assert.NoError(t, err, "test %s", test.testName)
} else { } else {

View File

@ -42,6 +42,7 @@ import (
core "k8s.io/client-go/testing" core "k8s.io/client-go/testing"
"k8s.io/client-go/tools/record" "k8s.io/client-go/tools/record"
featuregatetesting "k8s.io/component-base/featuregate/testing" featuregatetesting "k8s.io/component-base/featuregate/testing"
netutils "k8s.io/utils/net"
// TODO: remove this import if // TODO: remove this import if
// api.Registry.GroupOrDie(v1.GroupName).GroupVersions[0].String() is changed // api.Registry.GroupOrDie(v1.GroupName).GroupVersions[0].String() is changed
@ -3426,7 +3427,7 @@ func TestGenerateAPIPodStatusPodIPs(t *testing.T) {
defer testKubelet.Cleanup() defer testKubelet.Cleanup()
kl := testKubelet.kubelet kl := testKubelet.kubelet
if tc.nodeIP != "" { if tc.nodeIP != "" {
kl.nodeIPs = []net.IP{net.ParseIP(tc.nodeIP)} kl.nodeIPs = []net.IP{netutils.ParseIPSloppy(tc.nodeIP)}
} }
pod := podWithUIDNameNs("12345", "test-pod", "test-namespace") pod := podWithUIDNameNs("12345", "test-pod", "test-namespace")
@ -3530,7 +3531,7 @@ func TestSortPodIPs(t *testing.T) {
defer testKubelet.Cleanup() defer testKubelet.Cleanup()
kl := testKubelet.kubelet kl := testKubelet.kubelet
if tc.nodeIP != "" { if tc.nodeIP != "" {
kl.nodeIPs = []net.IP{net.ParseIP(tc.nodeIP)} kl.nodeIPs = []net.IP{netutils.ParseIPSloppy(tc.nodeIP)}
} }
podIPs := kl.sortPodIPs(tc.podIPs) podIPs := kl.sortPodIPs(tc.podIPs)

View File

@ -18,7 +18,6 @@ package kuberuntime
import ( import (
"fmt" "fmt"
"net"
"net/url" "net/url"
"runtime" "runtime"
"sort" "sort"
@ -33,6 +32,7 @@ import (
"k8s.io/kubernetes/pkg/kubelet/types" "k8s.io/kubernetes/pkg/kubelet/types"
"k8s.io/kubernetes/pkg/kubelet/util" "k8s.io/kubernetes/pkg/kubelet/util"
"k8s.io/kubernetes/pkg/kubelet/util/format" "k8s.io/kubernetes/pkg/kubelet/util/format"
netutils "k8s.io/utils/net"
) )
// createPodSandbox creates a pod sandbox and returns (podSandBoxID, message, error). // createPodSandbox creates a pod sandbox and returns (podSandBoxID, message, error).
@ -298,7 +298,7 @@ func (m *kubeGenericRuntimeManager) determinePodSandboxIPs(podNamespace, podName
// pick primary IP // pick primary IP
if len(podSandbox.Network.Ip) != 0 { if len(podSandbox.Network.Ip) != 0 {
if net.ParseIP(podSandbox.Network.Ip) == nil { if netutils.ParseIPSloppy(podSandbox.Network.Ip) == nil {
klog.InfoS("Pod Sandbox reported an unparseable primary IP", "pod", klog.KRef(podNamespace, podName), "IP", podSandbox.Network.Ip) klog.InfoS("Pod Sandbox reported an unparseable primary IP", "pod", klog.KRef(podNamespace, podName), "IP", podSandbox.Network.Ip)
return nil return nil
} }
@ -307,7 +307,7 @@ func (m *kubeGenericRuntimeManager) determinePodSandboxIPs(podNamespace, podName
// pick additional ips, if cri reported them // pick additional ips, if cri reported them
for _, podIP := range podSandbox.Network.AdditionalIps { for _, podIP := range podSandbox.Network.AdditionalIps {
if nil == net.ParseIP(podIP.Ip) { if nil == netutils.ParseIPSloppy(podIP.Ip) {
klog.InfoS("Pod Sandbox reported an unparseable additional IP", "pod", klog.KRef(podNamespace, podName), "IP", podIP.Ip) klog.InfoS("Pod Sandbox reported an unparseable additional IP", "pod", klog.KRef(podNamespace, podName), "IP", podIP.Ip)
return nil return nil
} }

View File

@ -35,6 +35,7 @@ import (
runtimeapi "k8s.io/cri-api/pkg/apis/runtime/v1alpha2" runtimeapi "k8s.io/cri-api/pkg/apis/runtime/v1alpha2"
"k8s.io/kubernetes/pkg/apis/core/validation" "k8s.io/kubernetes/pkg/apis/core/validation"
"k8s.io/kubernetes/pkg/features" "k8s.io/kubernetes/pkg/features"
netutils "k8s.io/utils/net"
"github.com/stretchr/testify/assert" "github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require" "github.com/stretchr/testify/require"
@ -350,7 +351,7 @@ func TestGetPodDNSType(t *testing.T) {
} }
testClusterDNSDomain := "TEST" testClusterDNSDomain := "TEST"
clusterNS := "203.0.113.1" clusterNS := "203.0.113.1"
testClusterDNS := []net.IP{net.ParseIP(clusterNS)} testClusterDNS := []net.IP{netutils.ParseIPSloppy(clusterNS)}
configurer := NewConfigurer(recorder, nodeRef, nil, nil, testClusterDNSDomain, "") configurer := NewConfigurer(recorder, nodeRef, nil, nil, testClusterDNSDomain, "")
@ -477,7 +478,7 @@ func testGetPodDNS(t *testing.T) {
} }
clusterNS := "203.0.113.1" clusterNS := "203.0.113.1"
testClusterDNSDomain := "kubernetes.io" testClusterDNSDomain := "kubernetes.io"
testClusterDNS := []net.IP{net.ParseIP(clusterNS)} testClusterDNS := []net.IP{netutils.ParseIPSloppy(clusterNS)}
configurer := NewConfigurer(recorder, nodeRef, nil, testClusterDNS, testClusterDNSDomain, "") configurer := NewConfigurer(recorder, nodeRef, nil, testClusterDNS, testClusterDNSDomain, "")
@ -606,7 +607,7 @@ func TestGetPodDNSCustom(t *testing.T) {
t.Fatal(err) t.Fatal(err)
} }
configurer := NewConfigurer(recorder, nodeRef, nil, []net.IP{net.ParseIP(testClusterNameserver)}, testClusterDNSDomain, tmpfile.Name()) configurer := NewConfigurer(recorder, nodeRef, nil, []net.IP{netutils.ParseIPSloppy(testClusterNameserver)}, testClusterDNSDomain, tmpfile.Name())
testCases := []struct { testCases := []struct {
desc string desc string

View File

@ -42,6 +42,7 @@ import (
kubecontainer "k8s.io/kubernetes/pkg/kubelet/container" kubecontainer "k8s.io/kubernetes/pkg/kubelet/container"
"k8s.io/kubernetes/pkg/kubelet/events" "k8s.io/kubernetes/pkg/kubelet/events"
"k8s.io/kubernetes/pkg/volume" "k8s.io/kubernetes/pkg/volume"
netutils "k8s.io/utils/net"
"k8s.io/klog/v2" "k8s.io/klog/v2"
) )
@ -149,13 +150,13 @@ func NodeAddress(nodeIPs []net.IP, // typically Kubelet.nodeIPs
// prefer addresses of the matching family // prefer addresses of the matching family
sortedAddresses := make([]v1.NodeAddress, 0, len(cloudNodeAddresses)) sortedAddresses := make([]v1.NodeAddress, 0, len(cloudNodeAddresses))
for _, nodeAddress := range cloudNodeAddresses { for _, nodeAddress := range cloudNodeAddresses {
ip := net.ParseIP(nodeAddress.Address) ip := netutils.ParseIPSloppy(nodeAddress.Address)
if ip == nil || isPreferredIPFamily(ip) { if ip == nil || isPreferredIPFamily(ip) {
sortedAddresses = append(sortedAddresses, nodeAddress) sortedAddresses = append(sortedAddresses, nodeAddress)
} }
} }
for _, nodeAddress := range cloudNodeAddresses { for _, nodeAddress := range cloudNodeAddresses {
ip := net.ParseIP(nodeAddress.Address) ip := netutils.ParseIPSloppy(nodeAddress.Address)
if ip != nil && !isPreferredIPFamily(ip) { if ip != nil && !isPreferredIPFamily(ip) {
sortedAddresses = append(sortedAddresses, nodeAddress) sortedAddresses = append(sortedAddresses, nodeAddress)
} }
@ -219,7 +220,7 @@ func NodeAddress(nodeIPs []net.IP, // typically Kubelet.nodeIPs
// unless nodeIP is "::", in which case it is reversed. // unless nodeIP is "::", in which case it is reversed.
if nodeIPSpecified { if nodeIPSpecified {
ipAddr = nodeIP ipAddr = nodeIP
} else if addr := net.ParseIP(hostname); addr != nil { } else if addr := netutils.ParseIPSloppy(hostname); addr != nil {
ipAddr = addr ipAddr = addr
} else { } else {
var addrs []net.IP var addrs []net.IP

View File

@ -27,7 +27,7 @@ import (
cadvisorapiv1 "github.com/google/cadvisor/info/v1" cadvisorapiv1 "github.com/google/cadvisor/info/v1"
"k8s.io/api/core/v1" v1 "k8s.io/api/core/v1"
apiequality "k8s.io/apimachinery/pkg/api/equality" apiequality "k8s.io/apimachinery/pkg/api/equality"
"k8s.io/apimachinery/pkg/api/resource" "k8s.io/apimachinery/pkg/api/resource"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
@ -44,6 +44,7 @@ import (
"k8s.io/kubernetes/pkg/kubelet/util/sliceutils" "k8s.io/kubernetes/pkg/kubelet/util/sliceutils"
"k8s.io/kubernetes/pkg/volume" "k8s.io/kubernetes/pkg/volume"
volumetest "k8s.io/kubernetes/pkg/volume/testing" volumetest "k8s.io/kubernetes/pkg/volume/testing"
netutils "k8s.io/utils/net"
"github.com/stretchr/testify/assert" "github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require" "github.com/stretchr/testify/require"
@ -66,7 +67,7 @@ func TestNodeAddress(t *testing.T) {
}{ }{
{ {
name: "A single InternalIP", name: "A single InternalIP",
nodeIP: net.ParseIP("10.1.1.1"), nodeIP: netutils.ParseIPSloppy("10.1.1.1"),
nodeAddresses: []v1.NodeAddress{ nodeAddresses: []v1.NodeAddress{
{Type: v1.NodeInternalIP, Address: "10.1.1.1"}, {Type: v1.NodeInternalIP, Address: "10.1.1.1"},
{Type: v1.NodeHostName, Address: testKubeletHostname}, {Type: v1.NodeHostName, Address: testKubeletHostname},
@ -79,7 +80,7 @@ func TestNodeAddress(t *testing.T) {
}, },
{ {
name: "NodeIP is external", name: "NodeIP is external",
nodeIP: net.ParseIP("55.55.55.55"), nodeIP: netutils.ParseIPSloppy("55.55.55.55"),
nodeAddresses: []v1.NodeAddress{ nodeAddresses: []v1.NodeAddress{
{Type: v1.NodeInternalIP, Address: "10.1.1.1"}, {Type: v1.NodeInternalIP, Address: "10.1.1.1"},
{Type: v1.NodeExternalIP, Address: "55.55.55.55"}, {Type: v1.NodeExternalIP, Address: "55.55.55.55"},
@ -95,7 +96,7 @@ func TestNodeAddress(t *testing.T) {
{ {
// Accommodating #45201 and #49202 // Accommodating #45201 and #49202
name: "InternalIP and ExternalIP are the same", name: "InternalIP and ExternalIP are the same",
nodeIP: net.ParseIP("55.55.55.55"), nodeIP: netutils.ParseIPSloppy("55.55.55.55"),
nodeAddresses: []v1.NodeAddress{ nodeAddresses: []v1.NodeAddress{
{Type: v1.NodeInternalIP, Address: "44.44.44.44"}, {Type: v1.NodeInternalIP, Address: "44.44.44.44"},
{Type: v1.NodeExternalIP, Address: "44.44.44.44"}, {Type: v1.NodeExternalIP, Address: "44.44.44.44"},
@ -112,7 +113,7 @@ func TestNodeAddress(t *testing.T) {
}, },
{ {
name: "An Internal/ExternalIP, an Internal/ExternalDNS", name: "An Internal/ExternalIP, an Internal/ExternalDNS",
nodeIP: net.ParseIP("10.1.1.1"), nodeIP: netutils.ParseIPSloppy("10.1.1.1"),
nodeAddresses: []v1.NodeAddress{ nodeAddresses: []v1.NodeAddress{
{Type: v1.NodeInternalIP, Address: "10.1.1.1"}, {Type: v1.NodeInternalIP, Address: "10.1.1.1"},
{Type: v1.NodeExternalIP, Address: "55.55.55.55"}, {Type: v1.NodeExternalIP, Address: "55.55.55.55"},
@ -131,7 +132,7 @@ func TestNodeAddress(t *testing.T) {
}, },
{ {
name: "An Internal with multiple internal IPs", name: "An Internal with multiple internal IPs",
nodeIP: net.ParseIP("10.1.1.1"), nodeIP: netutils.ParseIPSloppy("10.1.1.1"),
nodeAddresses: []v1.NodeAddress{ nodeAddresses: []v1.NodeAddress{
{Type: v1.NodeInternalIP, Address: "10.1.1.1"}, {Type: v1.NodeInternalIP, Address: "10.1.1.1"},
{Type: v1.NodeInternalIP, Address: "10.2.2.2"}, {Type: v1.NodeInternalIP, Address: "10.2.2.2"},
@ -148,7 +149,7 @@ func TestNodeAddress(t *testing.T) {
}, },
{ {
name: "An InternalIP that isn't valid: should error", name: "An InternalIP that isn't valid: should error",
nodeIP: net.ParseIP("10.2.2.2"), nodeIP: netutils.ParseIPSloppy("10.2.2.2"),
nodeAddresses: []v1.NodeAddress{ nodeAddresses: []v1.NodeAddress{
{Type: v1.NodeInternalIP, Address: "10.1.1.1"}, {Type: v1.NodeInternalIP, Address: "10.1.1.1"},
{Type: v1.NodeExternalIP, Address: "55.55.55.55"}, {Type: v1.NodeExternalIP, Address: "55.55.55.55"},
@ -181,7 +182,7 @@ func TestNodeAddress(t *testing.T) {
}, },
{ {
name: "cloud reports hostname, nodeIP is set, no override", name: "cloud reports hostname, nodeIP is set, no override",
nodeIP: net.ParseIP("10.1.1.1"), nodeIP: netutils.ParseIPSloppy("10.1.1.1"),
nodeAddresses: []v1.NodeAddress{ nodeAddresses: []v1.NodeAddress{
{Type: v1.NodeInternalIP, Address: "10.1.1.1"}, {Type: v1.NodeInternalIP, Address: "10.1.1.1"},
{Type: v1.NodeExternalIP, Address: "55.55.55.55"}, {Type: v1.NodeExternalIP, Address: "55.55.55.55"},
@ -211,7 +212,7 @@ func TestNodeAddress(t *testing.T) {
}, },
{ {
name: "cloud provider is external", name: "cloud provider is external",
nodeIP: net.ParseIP("10.0.0.1"), nodeIP: netutils.ParseIPSloppy("10.0.0.1"),
nodeAddresses: []v1.NodeAddress{}, nodeAddresses: []v1.NodeAddress{},
externalCloudProvider: true, externalCloudProvider: true,
expectedAddresses: []v1.NodeAddress{ expectedAddresses: []v1.NodeAddress{
@ -250,7 +251,7 @@ func TestNodeAddress(t *testing.T) {
}, },
{ {
name: "cloud doesn't report hostname, nodeIP is set, no override, detected hostname match", name: "cloud doesn't report hostname, nodeIP is set, no override, detected hostname match",
nodeIP: net.ParseIP("10.1.1.1"), nodeIP: netutils.ParseIPSloppy("10.1.1.1"),
nodeAddresses: []v1.NodeAddress{ nodeAddresses: []v1.NodeAddress{
{Type: v1.NodeInternalIP, Address: "10.1.1.1"}, {Type: v1.NodeInternalIP, Address: "10.1.1.1"},
{Type: v1.NodeExternalIP, Address: "55.55.55.55"}, {Type: v1.NodeExternalIP, Address: "55.55.55.55"},
@ -266,7 +267,7 @@ func TestNodeAddress(t *testing.T) {
}, },
{ {
name: "cloud doesn't report hostname, nodeIP is set, no override, detected hostname match with same type as nodeIP", name: "cloud doesn't report hostname, nodeIP is set, no override, detected hostname match with same type as nodeIP",
nodeIP: net.ParseIP("10.1.1.1"), nodeIP: netutils.ParseIPSloppy("10.1.1.1"),
nodeAddresses: []v1.NodeAddress{ nodeAddresses: []v1.NodeAddress{
{Type: v1.NodeInternalIP, Address: "10.1.1.1"}, {Type: v1.NodeInternalIP, Address: "10.1.1.1"},
{Type: v1.NodeInternalIP, Address: testKubeletHostname}, // cloud-reported address value matches detected hostname {Type: v1.NodeInternalIP, Address: testKubeletHostname}, // cloud-reported address value matches detected hostname
@ -323,7 +324,7 @@ func TestNodeAddress(t *testing.T) {
}, },
{ {
name: "Dual-stack cloud, IPv4 first, request IPv4", name: "Dual-stack cloud, IPv4 first, request IPv4",
nodeIP: net.ParseIP("0.0.0.0"), nodeIP: netutils.ParseIPSloppy("0.0.0.0"),
nodeAddresses: []v1.NodeAddress{ nodeAddresses: []v1.NodeAddress{
{Type: v1.NodeInternalIP, Address: "10.1.1.1"}, {Type: v1.NodeInternalIP, Address: "10.1.1.1"},
{Type: v1.NodeInternalIP, Address: "fc01:1234::5678"}, {Type: v1.NodeInternalIP, Address: "fc01:1234::5678"},
@ -338,7 +339,7 @@ func TestNodeAddress(t *testing.T) {
}, },
{ {
name: "Dual-stack cloud, IPv6 first, request IPv4", name: "Dual-stack cloud, IPv6 first, request IPv4",
nodeIP: net.ParseIP("0.0.0.0"), nodeIP: netutils.ParseIPSloppy("0.0.0.0"),
nodeAddresses: []v1.NodeAddress{ nodeAddresses: []v1.NodeAddress{
{Type: v1.NodeInternalIP, Address: "fc01:1234::5678"}, {Type: v1.NodeInternalIP, Address: "fc01:1234::5678"},
{Type: v1.NodeInternalIP, Address: "10.1.1.1"}, {Type: v1.NodeInternalIP, Address: "10.1.1.1"},
@ -353,7 +354,7 @@ func TestNodeAddress(t *testing.T) {
}, },
{ {
name: "Dual-stack cloud, IPv4 first, request IPv6", name: "Dual-stack cloud, IPv4 first, request IPv6",
nodeIP: net.ParseIP("::"), nodeIP: netutils.ParseIPSloppy("::"),
nodeAddresses: []v1.NodeAddress{ nodeAddresses: []v1.NodeAddress{
{Type: v1.NodeInternalIP, Address: "10.1.1.1"}, {Type: v1.NodeInternalIP, Address: "10.1.1.1"},
{Type: v1.NodeInternalIP, Address: "fc01:1234::5678"}, {Type: v1.NodeInternalIP, Address: "fc01:1234::5678"},
@ -368,7 +369,7 @@ func TestNodeAddress(t *testing.T) {
}, },
{ {
name: "Dual-stack cloud, IPv6 first, request IPv6", name: "Dual-stack cloud, IPv6 first, request IPv6",
nodeIP: net.ParseIP("::"), nodeIP: netutils.ParseIPSloppy("::"),
nodeAddresses: []v1.NodeAddress{ nodeAddresses: []v1.NodeAddress{
{Type: v1.NodeInternalIP, Address: "fc01:1234::5678"}, {Type: v1.NodeInternalIP, Address: "fc01:1234::5678"},
{Type: v1.NodeInternalIP, Address: "10.1.1.1"}, {Type: v1.NodeInternalIP, Address: "10.1.1.1"},
@ -448,7 +449,7 @@ func TestNodeAddress_NoCloudProvider(t *testing.T) {
}{ }{
{ {
name: "Single --node-ip", name: "Single --node-ip",
nodeIPs: []net.IP{net.ParseIP("10.1.1.1")}, nodeIPs: []net.IP{netutils.ParseIPSloppy("10.1.1.1")},
expectedAddresses: []v1.NodeAddress{ expectedAddresses: []v1.NodeAddress{
{Type: v1.NodeInternalIP, Address: "10.1.1.1"}, {Type: v1.NodeInternalIP, Address: "10.1.1.1"},
{Type: v1.NodeHostName, Address: testKubeletHostname}, {Type: v1.NodeHostName, Address: testKubeletHostname},
@ -456,7 +457,7 @@ func TestNodeAddress_NoCloudProvider(t *testing.T) {
}, },
{ {
name: "Dual --node-ips", name: "Dual --node-ips",
nodeIPs: []net.IP{net.ParseIP("10.1.1.1"), net.ParseIP("fd01::1234")}, nodeIPs: []net.IP{netutils.ParseIPSloppy("10.1.1.1"), netutils.ParseIPSloppy("fd01::1234")},
expectedAddresses: []v1.NodeAddress{ expectedAddresses: []v1.NodeAddress{
{Type: v1.NodeInternalIP, Address: "10.1.1.1"}, {Type: v1.NodeInternalIP, Address: "10.1.1.1"},
{Type: v1.NodeInternalIP, Address: "fd01::1234"}, {Type: v1.NodeInternalIP, Address: "fd01::1234"},

View File

@ -41,6 +41,7 @@ import (
"k8s.io/klog/v2" "k8s.io/klog/v2"
"k8s.io/kubernetes/pkg/kubelet/metrics/collectors" "k8s.io/kubernetes/pkg/kubelet/metrics/collectors"
"k8s.io/utils/clock" "k8s.io/utils/clock"
netutils "k8s.io/utils/net"
v1 "k8s.io/api/core/v1" v1 "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
@ -144,7 +145,7 @@ func ListenAndServeKubeletServer(
tlsOptions *TLSOptions, tlsOptions *TLSOptions,
auth AuthInterface) { auth AuthInterface) {
address := net.ParseIP(kubeCfg.Address) address := netutils.ParseIPSloppy(kubeCfg.Address)
port := uint(kubeCfg.Port) port := uint(kubeCfg.Port)
klog.InfoS("Starting to listen", "address", address, "port", port) klog.InfoS("Starting to listen", "address", address, "port", port)
handler := NewServer(host, resourceAnalyzer, auth, kubeCfg) handler := NewServer(host, resourceAnalyzer, auth, kubeCfg)

View File

@ -18,7 +18,6 @@ package kubemark
import ( import (
"fmt" "fmt"
"net"
"time" "time"
v1 "k8s.io/api/core/v1" v1 "k8s.io/api/core/v1"
@ -35,6 +34,7 @@ import (
utilnode "k8s.io/kubernetes/pkg/util/node" utilnode "k8s.io/kubernetes/pkg/util/node"
utilsysctl "k8s.io/kubernetes/pkg/util/sysctl" utilsysctl "k8s.io/kubernetes/pkg/util/sysctl"
utilexec "k8s.io/utils/exec" utilexec "k8s.io/utils/exec"
netutils "k8s.io/utils/net"
utilpointer "k8s.io/utils/pointer" utilpointer "k8s.io/utils/pointer"
"k8s.io/klog/v2" "k8s.io/klog/v2"
@ -83,7 +83,7 @@ func NewHollowProxyOrDie(
nodeIP := utilnode.GetNodeIP(client, nodeName) nodeIP := utilnode.GetNodeIP(client, nodeName)
if nodeIP == nil { if nodeIP == nil {
klog.V(0).Infof("can't determine this node's IP, assuming 127.0.0.1") klog.V(0).Infof("can't determine this node's IP, assuming 127.0.0.1")
nodeIP = net.ParseIP("127.0.0.1") nodeIP = netutils.ParseIPSloppy("127.0.0.1")
} }
// Real proxier with fake iptables, sysctl, etc underneath it. // Real proxier with fake iptables, sysctl, etc underneath it.
//var err error //var err error

View File

@ -18,7 +18,6 @@ package v1alpha1
import ( import (
"fmt" "fmt"
"net"
"time" "time"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
@ -28,6 +27,7 @@ import (
"k8s.io/kubernetes/pkg/cluster/ports" "k8s.io/kubernetes/pkg/cluster/ports"
"k8s.io/kubernetes/pkg/kubelet/qos" "k8s.io/kubernetes/pkg/kubelet/qos"
proxyutil "k8s.io/kubernetes/pkg/proxy/util" proxyutil "k8s.io/kubernetes/pkg/proxy/util"
netutils "k8s.io/utils/net"
"k8s.io/utils/pointer" "k8s.io/utils/pointer"
) )
@ -131,7 +131,7 @@ func SetDefaults_KubeProxyConfiguration(obj *kubeproxyconfigv1alpha1.KubeProxyCo
// based on the given bind address. IPv6 addresses are enclosed in square // based on the given bind address. IPv6 addresses are enclosed in square
// brackets for appending port. // brackets for appending port.
func getDefaultAddresses(bindAddress string) (defaultHealthzAddress, defaultMetricsAddress string) { func getDefaultAddresses(bindAddress string) (defaultHealthzAddress, defaultMetricsAddress string) {
if net.ParseIP(bindAddress).To4() != nil { if netutils.ParseIPSloppy(bindAddress).To4() != nil {
return "0.0.0.0", "127.0.0.1" return "0.0.0.0", "127.0.0.1"
} }
return "[::]", "[::1]" return "[::]", "[::1]"

View File

@ -66,7 +66,7 @@ func Validate(config *kubeproxyconfig.KubeProxyConfiguration) field.ErrorList {
allErrs = append(allErrs, field.Invalid(newPath.Child("ConfigSyncPeriod"), config.ConfigSyncPeriod, "must be greater than 0")) allErrs = append(allErrs, field.Invalid(newPath.Child("ConfigSyncPeriod"), config.ConfigSyncPeriod, "must be greater than 0"))
} }
if net.ParseIP(config.BindAddress) == nil { if netutils.ParseIPSloppy(config.BindAddress) == nil {
allErrs = append(allErrs, field.Invalid(newPath.Child("BindAddress"), config.BindAddress, "not a valid textual representation of an IP address")) allErrs = append(allErrs, field.Invalid(newPath.Child("BindAddress"), config.BindAddress, "not a valid textual representation of an IP address"))
} }
@ -94,7 +94,7 @@ func Validate(config *kubeproxyconfig.KubeProxyConfiguration) field.ErrorList {
allErrs = append(allErrs, field.Invalid(newPath.Child("ClusterCIDR"), config.ClusterCIDR, "only one CIDR allowed (e.g. 10.100.0.0/16 or fde4:8dba:82e1::/48)")) allErrs = append(allErrs, field.Invalid(newPath.Child("ClusterCIDR"), config.ClusterCIDR, "only one CIDR allowed (e.g. 10.100.0.0/16 or fde4:8dba:82e1::/48)"))
// if we are here means that len(cidrs) == 1, we need to validate it // if we are here means that len(cidrs) == 1, we need to validate it
default: default:
if _, _, err := net.ParseCIDR(config.ClusterCIDR); err != nil { if _, _, err := netutils.ParseCIDRSloppy(config.ClusterCIDR); err != nil {
allErrs = append(allErrs, field.Invalid(newPath.Child("ClusterCIDR"), config.ClusterCIDR, "must be a valid CIDR block (e.g. 10.100.0.0/16 or fde4:8dba:82e1::/48)")) allErrs = append(allErrs, field.Invalid(newPath.Child("ClusterCIDR"), config.ClusterCIDR, "must be a valid CIDR block (e.g. 10.100.0.0/16 or fde4:8dba:82e1::/48)"))
} }
} }
@ -228,7 +228,7 @@ func validateHostPort(input string, fldPath *field.Path) field.ErrorList {
return allErrs return allErrs
} }
if ip := net.ParseIP(hostIP); ip == nil { if ip := netutils.ParseIPSloppy(hostIP); ip == nil {
allErrs = append(allErrs, field.Invalid(fldPath, hostIP, "must be a valid IP")) allErrs = append(allErrs, field.Invalid(fldPath, hostIP, "must be a valid IP"))
} }
@ -275,7 +275,7 @@ func validateKubeProxyNodePortAddress(nodePortAddresses []string, fldPath *field
allErrs := field.ErrorList{} allErrs := field.ErrorList{}
for i := range nodePortAddresses { for i := range nodePortAddresses {
if _, _, err := net.ParseCIDR(nodePortAddresses[i]); err != nil { if _, _, err := netutils.ParseCIDRSloppy(nodePortAddresses[i]); err != nil {
allErrs = append(allErrs, field.Invalid(fldPath.Index(i), nodePortAddresses[i], "must be a valid CIDR")) allErrs = append(allErrs, field.Invalid(fldPath.Index(i), nodePortAddresses[i], "must be a valid CIDR"))
} }
} }
@ -305,7 +305,7 @@ func validateIPVSExcludeCIDRs(excludeCIDRs []string, fldPath *field.Path) field.
allErrs := field.ErrorList{} allErrs := field.ErrorList{}
for i := range excludeCIDRs { for i := range excludeCIDRs {
if _, _, err := net.ParseCIDR(excludeCIDRs[i]); err != nil { if _, _, err := netutils.ParseCIDRSloppy(excludeCIDRs[i]); err != nil {
allErrs = append(allErrs, field.Invalid(fldPath.Index(i), excludeCIDRs[i], "must be a valid CIDR")) allErrs = append(allErrs, field.Invalid(fldPath.Index(i), excludeCIDRs[i], "must be a valid CIDR"))
} }
} }

View File

@ -53,7 +53,7 @@ import (
utiliptables "k8s.io/kubernetes/pkg/util/iptables" utiliptables "k8s.io/kubernetes/pkg/util/iptables"
utilsysctl "k8s.io/kubernetes/pkg/util/sysctl" utilsysctl "k8s.io/kubernetes/pkg/util/sysctl"
utilexec "k8s.io/utils/exec" utilexec "k8s.io/utils/exec"
utilnet "k8s.io/utils/net" netutils "k8s.io/utils/net"
) )
const ( const (
@ -189,7 +189,7 @@ type Proxier struct {
mu sync.Mutex // protects the following fields mu sync.Mutex // protects the following fields
serviceMap proxy.ServiceMap serviceMap proxy.ServiceMap
endpointsMap proxy.EndpointsMap endpointsMap proxy.EndpointsMap
portsMap map[utilnet.LocalPort]utilnet.Closeable portsMap map[netutils.LocalPort]netutils.Closeable
nodeLabels map[string]string nodeLabels map[string]string
// endpointSlicesSynced, and servicesSynced are set to true // endpointSlicesSynced, and servicesSynced are set to true
// when corresponding objects are synced after startup. This is used to avoid // when corresponding objects are synced after startup. This is used to avoid
@ -208,7 +208,7 @@ type Proxier struct {
localDetector proxyutiliptables.LocalTrafficDetector localDetector proxyutiliptables.LocalTrafficDetector
hostname string hostname string
nodeIP net.IP nodeIP net.IP
portMapper utilnet.PortOpener portMapper netutils.PortOpener
recorder events.EventRecorder recorder events.EventRecorder
serviceHealthServer healthcheck.ServiceHealthServer serviceHealthServer healthcheck.ServiceHealthServer
@ -295,7 +295,7 @@ func NewProxier(ipt utiliptables.Interface,
} }
proxier := &Proxier{ proxier := &Proxier{
portsMap: make(map[utilnet.LocalPort]utilnet.Closeable), portsMap: make(map[netutils.LocalPort]netutils.Closeable),
serviceMap: make(proxy.ServiceMap), serviceMap: make(proxy.ServiceMap),
serviceChanges: proxy.NewServiceChangeTracker(newServiceInfo, ipFamily, recorder, nil), serviceChanges: proxy.NewServiceChangeTracker(newServiceInfo, ipFamily, recorder, nil),
endpointsMap: make(proxy.EndpointsMap), endpointsMap: make(proxy.EndpointsMap),
@ -308,7 +308,7 @@ func NewProxier(ipt utiliptables.Interface,
localDetector: localDetector, localDetector: localDetector,
hostname: hostname, hostname: hostname,
nodeIP: nodeIP, nodeIP: nodeIP,
portMapper: &utilnet.ListenPortOpener, portMapper: &netutils.ListenPortOpener,
recorder: recorder, recorder: recorder,
serviceHealthServer: serviceHealthServer, serviceHealthServer: serviceHealthServer,
healthzServer: healthzServer, healthzServer: healthzServer,
@ -966,7 +966,7 @@ func (proxier *Proxier) syncProxyRules() {
activeNATChains := map[utiliptables.Chain]bool{} // use a map as a set activeNATChains := map[utiliptables.Chain]bool{} // use a map as a set
// Accumulate the set of local ports that we will be holding open once this update is complete // Accumulate the set of local ports that we will be holding open once this update is complete
replacementPortsMap := map[utilnet.LocalPort]utilnet.Closeable{} replacementPortsMap := map[netutils.LocalPort]netutils.Closeable{}
// We are creating those slices ones here to avoid memory reallocations // We are creating those slices ones here to avoid memory reallocations
// in every loop. Note that reuse the memory, instead of doing: // in every loop. Note that reuse the memory, instead of doing:
@ -1006,10 +1006,10 @@ func (proxier *Proxier) syncProxyRules() {
klog.ErrorS(nil, "Failed to cast serviceInfo", "svcName", svcName.String()) klog.ErrorS(nil, "Failed to cast serviceInfo", "svcName", svcName.String())
continue continue
} }
isIPv6 := utilnet.IsIPv6(svcInfo.ClusterIP()) isIPv6 := netutils.IsIPv6(svcInfo.ClusterIP())
localPortIPFamily := utilnet.IPv4 localPortIPFamily := netutils.IPv4
if isIPv6 { if isIPv6 {
localPortIPFamily = utilnet.IPv6 localPortIPFamily = netutils.IPv6
} }
protocol := strings.ToLower(string(svcInfo.Protocol())) protocol := strings.ToLower(string(svcInfo.Protocol()))
svcNameString := svcInfo.serviceNameString svcNameString := svcInfo.serviceNameString
@ -1082,13 +1082,13 @@ func (proxier *Proxier) syncProxyRules() {
// If the "external" IP happens to be an IP that is local to this // If the "external" IP happens to be an IP that is local to this
// machine, hold the local port open so no other process can open it // machine, hold the local port open so no other process can open it
// (because the socket might open but it would never work). // (because the socket might open but it would never work).
if (svcInfo.Protocol() != v1.ProtocolSCTP) && localAddrSet.Has(net.ParseIP(externalIP)) { if (svcInfo.Protocol() != v1.ProtocolSCTP) && localAddrSet.Has(netutils.ParseIPSloppy(externalIP)) {
lp := utilnet.LocalPort{ lp := netutils.LocalPort{
Description: "externalIP for " + svcNameString, Description: "externalIP for " + svcNameString,
IP: externalIP, IP: externalIP,
IPFamily: localPortIPFamily, IPFamily: localPortIPFamily,
Port: svcInfo.Port(), Port: svcInfo.Port(),
Protocol: utilnet.Protocol(svcInfo.Protocol()), Protocol: netutils.Protocol(svcInfo.Protocol()),
} }
if proxier.portsMap[lp] != nil { if proxier.portsMap[lp] != nil {
klog.V(4).InfoS("Port was open before and is still needed", "port", lp.String()) klog.V(4).InfoS("Port was open before and is still needed", "port", lp.String())
@ -1117,7 +1117,7 @@ func (proxier *Proxier) syncProxyRules() {
args = append(args[:0], args = append(args[:0],
"-m", "comment", "--comment", fmt.Sprintf(`"%s external IP"`, svcNameString), "-m", "comment", "--comment", fmt.Sprintf(`"%s external IP"`, svcNameString),
"-m", protocol, "-p", protocol, "-m", protocol, "-p", protocol,
"-d", utilproxy.ToCIDR(net.ParseIP(externalIP)), "-d", utilproxy.ToCIDR(netutils.ParseIPSloppy(externalIP)),
"--dport", strconv.Itoa(svcInfo.Port()), "--dport", strconv.Itoa(svcInfo.Port()),
) )
@ -1144,7 +1144,7 @@ func (proxier *Proxier) syncProxyRules() {
"-A", string(kubeExternalServicesChain), "-A", string(kubeExternalServicesChain),
"-m", "comment", "--comment", fmt.Sprintf(`"%s has no endpoints"`, svcNameString), "-m", "comment", "--comment", fmt.Sprintf(`"%s has no endpoints"`, svcNameString),
"-m", protocol, "-p", protocol, "-m", protocol, "-p", protocol,
"-d", utilproxy.ToCIDR(net.ParseIP(externalIP)), "-d", utilproxy.ToCIDR(netutils.ParseIPSloppy(externalIP)),
"--dport", strconv.Itoa(svcInfo.Port()), "--dport", strconv.Itoa(svcInfo.Port()),
"-j", "REJECT", "-j", "REJECT",
) )
@ -1171,7 +1171,7 @@ func (proxier *Proxier) syncProxyRules() {
"-A", string(kubeServicesChain), "-A", string(kubeServicesChain),
"-m", "comment", "--comment", fmt.Sprintf(`"%s loadbalancer IP"`, svcNameString), "-m", "comment", "--comment", fmt.Sprintf(`"%s loadbalancer IP"`, svcNameString),
"-m", protocol, "-p", protocol, "-m", protocol, "-p", protocol,
"-d", utilproxy.ToCIDR(net.ParseIP(ingress)), "-d", utilproxy.ToCIDR(netutils.ParseIPSloppy(ingress)),
"--dport", strconv.Itoa(svcInfo.Port()), "--dport", strconv.Itoa(svcInfo.Port()),
) )
// jump to service firewall chain // jump to service firewall chain
@ -1199,7 +1199,7 @@ func (proxier *Proxier) syncProxyRules() {
allowFromNode := false allowFromNode := false
for _, src := range svcInfo.LoadBalancerSourceRanges() { for _, src := range svcInfo.LoadBalancerSourceRanges() {
utilproxy.WriteLine(proxier.natRules, append(args, "-s", src, "-j", string(chosenChain))...) utilproxy.WriteLine(proxier.natRules, append(args, "-s", src, "-j", string(chosenChain))...)
_, cidr, err := net.ParseCIDR(src) _, cidr, err := netutils.ParseCIDRSloppy(src)
if err != nil { if err != nil {
klog.ErrorS(err, "Error parsing CIDR in LoadBalancerSourceRanges, dropping it", "cidr", cidr) klog.ErrorS(err, "Error parsing CIDR in LoadBalancerSourceRanges, dropping it", "cidr", cidr)
} else if cidr.Contains(proxier.nodeIP) { } else if cidr.Contains(proxier.nodeIP) {
@ -1210,7 +1210,7 @@ func (proxier *Proxier) syncProxyRules() {
// loadbalancer's backend hosts. In this case, request will not hit the loadbalancer but loop back directly. // loadbalancer's backend hosts. In this case, request will not hit the loadbalancer but loop back directly.
// Need to add the following rule to allow request on host. // Need to add the following rule to allow request on host.
if allowFromNode { if allowFromNode {
utilproxy.WriteLine(proxier.natRules, append(args, "-s", utilproxy.ToCIDR(net.ParseIP(ingress)), "-j", string(chosenChain))...) utilproxy.WriteLine(proxier.natRules, append(args, "-s", utilproxy.ToCIDR(netutils.ParseIPSloppy(ingress)), "-j", string(chosenChain))...)
} }
} }
@ -1223,7 +1223,7 @@ func (proxier *Proxier) syncProxyRules() {
"-A", string(kubeExternalServicesChain), "-A", string(kubeExternalServicesChain),
"-m", "comment", "--comment", fmt.Sprintf(`"%s has no endpoints"`, svcNameString), "-m", "comment", "--comment", fmt.Sprintf(`"%s has no endpoints"`, svcNameString),
"-m", protocol, "-p", protocol, "-m", protocol, "-p", protocol,
"-d", utilproxy.ToCIDR(net.ParseIP(ingress)), "-d", utilproxy.ToCIDR(netutils.ParseIPSloppy(ingress)),
"--dport", strconv.Itoa(svcInfo.Port()), "--dport", strconv.Itoa(svcInfo.Port()),
"-j", "REJECT", "-j", "REJECT",
) )
@ -1241,14 +1241,14 @@ func (proxier *Proxier) syncProxyRules() {
continue continue
} }
lps := make([]utilnet.LocalPort, 0) lps := make([]netutils.LocalPort, 0)
for address := range nodeAddresses { for address := range nodeAddresses {
lp := utilnet.LocalPort{ lp := netutils.LocalPort{
Description: "nodePort for " + svcNameString, Description: "nodePort for " + svcNameString,
IP: address, IP: address,
IPFamily: localPortIPFamily, IPFamily: localPortIPFamily,
Port: svcInfo.NodePort(), Port: svcInfo.NodePort(),
Protocol: utilnet.Protocol(svcInfo.Protocol()), Protocol: netutils.Protocol(svcInfo.Protocol()),
} }
if utilproxy.IsZeroCIDR(address) { if utilproxy.IsZeroCIDR(address) {
// Empty IP address means all // Empty IP address means all
@ -1441,7 +1441,7 @@ func (proxier *Proxier) syncProxyRules() {
args = proxier.appendServiceCommentLocked(args, svcNameString) args = proxier.appendServiceCommentLocked(args, svcNameString)
// Handle traffic that loops back to the originator with SNAT. // Handle traffic that loops back to the originator with SNAT.
utilproxy.WriteLine(proxier.natRules, append(args, utilproxy.WriteLine(proxier.natRules, append(args,
"-s", utilproxy.ToCIDR(net.ParseIP(epIP)), "-s", utilproxy.ToCIDR(netutils.ParseIPSloppy(epIP)),
"-j", string(KubeMarkMasqChain))...) "-j", string(KubeMarkMasqChain))...)
// Update client-affinity lists. // Update client-affinity lists.
if svcInfo.SessionAffinityType() == v1.ServiceAffinityClientIP { if svcInfo.SessionAffinityType() == v1.ServiceAffinityClientIP {
@ -1564,7 +1564,7 @@ func (proxier *Proxier) syncProxyRules() {
break break
} }
// Ignore IP addresses with incorrect version // Ignore IP addresses with incorrect version
if isIPv6 && !utilnet.IsIPv6String(address) || !isIPv6 && utilnet.IsIPv6String(address) { if isIPv6 && !netutils.IsIPv6String(address) || !isIPv6 && netutils.IsIPv6String(address) {
klog.ErrorS(nil, "IP has incorrect IP version", "ip", address) klog.ErrorS(nil, "IP has incorrect IP version", "ip", address)
continue continue
} }

View File

@ -51,7 +51,7 @@ import (
iptablestest "k8s.io/kubernetes/pkg/util/iptables/testing" iptablestest "k8s.io/kubernetes/pkg/util/iptables/testing"
"k8s.io/utils/exec" "k8s.io/utils/exec"
fakeexec "k8s.io/utils/exec/testing" fakeexec "k8s.io/utils/exec/testing"
utilnet "k8s.io/utils/net" netutils "k8s.io/utils/net"
utilpointer "k8s.io/utils/pointer" utilpointer "k8s.io/utils/pointer"
) )
@ -285,7 +285,7 @@ func TestDeleteEndpointConnectionsIPv4(t *testing.T) {
var expExecs int var expExecs int
if conntrack.IsClearConntrackNeeded(tc.protocol) { if conntrack.IsClearConntrackNeeded(tc.protocol) {
isIPv6 := func(ip string) bool { isIPv6 := func(ip string) bool {
netIP := net.ParseIP(ip) netIP := netutils.ParseIPSloppy(ip)
return netIP.To4() == nil return netIP.To4() == nil
} }
endpointIP := utilproxy.IPPart(tc.endpoint) endpointIP := utilproxy.IPPart(tc.endpoint)
@ -428,7 +428,7 @@ func TestDeleteEndpointConnectionsIPv6(t *testing.T) {
var expExecs int var expExecs int
if conntrack.IsClearConntrackNeeded(tc.protocol) { if conntrack.IsClearConntrackNeeded(tc.protocol) {
isIPv6 := func(ip string) bool { isIPv6 := func(ip string) bool {
netIP := net.ParseIP(ip) netIP := netutils.ParseIPSloppy(ip)
return netIP.To4() == nil return netIP.To4() == nil
} }
endpointIP := utilproxy.IPPart(tc.endpoint) endpointIP := utilproxy.IPPart(tc.endpoint)
@ -471,12 +471,12 @@ func (f *fakeCloseable) Close() error {
// fakePortOpener implements portOpener. // fakePortOpener implements portOpener.
type fakePortOpener struct { type fakePortOpener struct {
openPorts []*utilnet.LocalPort openPorts []*netutils.LocalPort
} }
// OpenLocalPort fakes out the listen() and bind() used by syncProxyRules // OpenLocalPort fakes out the listen() and bind() used by syncProxyRules
// to lock a local port. // to lock a local port.
func (f *fakePortOpener) OpenLocalPort(lp *utilnet.LocalPort) (utilnet.Closeable, error) { func (f *fakePortOpener) OpenLocalPort(lp *netutils.LocalPort) (netutils.Closeable, error) {
f.openPorts = append(f.openPorts, lp) f.openPorts = append(f.openPorts, lp)
return &fakeCloseable{}, nil return &fakeCloseable{}, nil
} }
@ -501,8 +501,8 @@ func NewFakeProxier(ipt utiliptables.Interface) *Proxier {
masqueradeMark: "0x4000", masqueradeMark: "0x4000",
localDetector: detectLocal, localDetector: detectLocal,
hostname: testHostname, hostname: testHostname,
portsMap: make(map[utilnet.LocalPort]utilnet.Closeable), portsMap: make(map[netutils.LocalPort]netutils.Closeable),
portMapper: &fakePortOpener{[]*utilnet.LocalPort{}}, portMapper: &fakePortOpener{[]*netutils.LocalPort{}},
serviceHealthServer: healthcheck.NewFakeServiceHealthServer(), serviceHealthServer: healthcheck.NewFakeServiceHealthServer(),
precomputedProbabilities: make([]string, 0, 1001), precomputedProbabilities: make([]string, 0, 1001),
iptablesData: bytes.NewBuffer(nil), iptablesData: bytes.NewBuffer(nil),
@ -1123,9 +1123,9 @@ func TestNodePort(t *testing.T) {
) )
itf := net.Interface{Index: 0, MTU: 0, Name: "lo", HardwareAddr: nil, Flags: 0} itf := net.Interface{Index: 0, MTU: 0, Name: "lo", HardwareAddr: nil, Flags: 0}
addrs := []net.Addr{&net.IPNet{IP: net.ParseIP("127.0.0.1"), Mask: net.CIDRMask(16, 32)}} addrs := []net.Addr{&net.IPNet{IP: netutils.ParseIPSloppy("127.0.0.1"), Mask: net.CIDRMask(16, 32)}}
itf1 := net.Interface{Index: 1, MTU: 0, Name: "eth1", HardwareAddr: nil, Flags: 0} itf1 := net.Interface{Index: 1, MTU: 0, Name: "eth1", HardwareAddr: nil, Flags: 0}
addrs1 := []net.Addr{&net.IPNet{IP: net.ParseIP("::1/128"), Mask: net.CIDRMask(128, 128)}} addrs1 := []net.Addr{&net.IPNet{IP: netutils.ParseIPSloppy("::1/128"), Mask: net.CIDRMask(128, 128)}}
fp.networkInterfacer.(*utilproxytest.FakeNetwork).AddInterfaceAddr(&itf, addrs) fp.networkInterfacer.(*utilproxytest.FakeNetwork).AddInterfaceAddr(&itf, addrs)
fp.networkInterfacer.(*utilproxytest.FakeNetwork).AddInterfaceAddr(&itf1, addrs1) fp.networkInterfacer.(*utilproxytest.FakeNetwork).AddInterfaceAddr(&itf1, addrs1)
fp.nodePortAddresses = []string{} fp.nodePortAddresses = []string{}
@ -1175,9 +1175,9 @@ func TestHealthCheckNodePort(t *testing.T) {
) )
itf := net.Interface{Index: 0, MTU: 0, Name: "lo", HardwareAddr: nil, Flags: 0} itf := net.Interface{Index: 0, MTU: 0, Name: "lo", HardwareAddr: nil, Flags: 0}
addrs := []net.Addr{&net.IPNet{IP: net.ParseIP("127.0.0.1"), Mask: net.CIDRMask(16, 32)}} addrs := []net.Addr{&net.IPNet{IP: netutils.ParseIPSloppy("127.0.0.1"), Mask: net.CIDRMask(16, 32)}}
itf1 := net.Interface{Index: 1, MTU: 0, Name: "eth1", HardwareAddr: nil, Flags: 0} itf1 := net.Interface{Index: 1, MTU: 0, Name: "eth1", HardwareAddr: nil, Flags: 0}
addrs1 := []net.Addr{&net.IPNet{IP: net.ParseIP("::1"), Mask: net.CIDRMask(128, 128)}} addrs1 := []net.Addr{&net.IPNet{IP: netutils.ParseIPSloppy("::1"), Mask: net.CIDRMask(128, 128)}}
fp.networkInterfacer.(*utilproxytest.FakeNetwork).AddInterfaceAddr(&itf, addrs) fp.networkInterfacer.(*utilproxytest.FakeNetwork).AddInterfaceAddr(&itf, addrs)
fp.networkInterfacer.(*utilproxytest.FakeNetwork).AddInterfaceAddr(&itf1, addrs1) fp.networkInterfacer.(*utilproxytest.FakeNetwork).AddInterfaceAddr(&itf1, addrs1)
fp.nodePortAddresses = []string{"127.0.0.1/16"} fp.nodePortAddresses = []string{"127.0.0.1/16"}
@ -1615,7 +1615,7 @@ func onlyLocalNodePorts(t *testing.T, fp *Proxier, ipt *iptablestest.FakeIPTable
) )
itf := net.Interface{Index: 0, MTU: 0, Name: "eth0", HardwareAddr: nil, Flags: 0} itf := net.Interface{Index: 0, MTU: 0, Name: "eth0", HardwareAddr: nil, Flags: 0}
addrs := []net.Addr{&net.IPNet{IP: net.ParseIP("10.20.30.51"), Mask: net.CIDRMask(24, 32)}} addrs := []net.Addr{&net.IPNet{IP: netutils.ParseIPSloppy("10.20.30.51"), Mask: net.CIDRMask(24, 32)}}
fp.networkInterfacer.(*utilproxytest.FakeNetwork).AddInterfaceAddr(&itf, addrs) fp.networkInterfacer.(*utilproxytest.FakeNetwork).AddInterfaceAddr(&itf, addrs)
fp.nodePortAddresses = []string{"10.20.30.0/24"} fp.nodePortAddresses = []string{"10.20.30.0/24"}

View File

@ -17,12 +17,12 @@ limitations under the License.
package ipvs package ipvs
import ( import (
"net"
"reflect" "reflect"
"testing" "testing"
utilipvs "k8s.io/kubernetes/pkg/util/ipvs" utilipvs "k8s.io/kubernetes/pkg/util/ipvs"
utilipvstest "k8s.io/kubernetes/pkg/util/ipvs/testing" utilipvstest "k8s.io/kubernetes/pkg/util/ipvs/testing"
netutils "k8s.io/utils/net"
) )
func Test_GracefulDeleteRS(t *testing.T) { func Test_GracefulDeleteRS(t *testing.T) {
@ -37,12 +37,12 @@ func Test_GracefulDeleteRS(t *testing.T) {
{ {
name: "graceful delete, no connections results in deleting the real server immediatetly", name: "graceful delete, no connections results in deleting the real server immediatetly",
vs: &utilipvs.VirtualServer{ vs: &utilipvs.VirtualServer{
Address: net.ParseIP("1.1.1.1"), Address: netutils.ParseIPSloppy("1.1.1.1"),
Protocol: "tcp", Protocol: "tcp",
Port: uint16(80), Port: uint16(80),
}, },
rs: &utilipvs.RealServer{ rs: &utilipvs.RealServer{
Address: net.ParseIP("10.0.0.1"), Address: netutils.ParseIPSloppy("10.0.0.1"),
Port: uint16(80), Port: uint16(80),
Weight: 100, Weight: 100,
ActiveConn: 0, ActiveConn: 0,
@ -55,7 +55,7 @@ func Test_GracefulDeleteRS(t *testing.T) {
Port: 80, Port: 80,
Protocol: "tcp", Protocol: "tcp",
}: { }: {
Address: net.ParseIP("1.1.1.1"), Address: netutils.ParseIPSloppy("1.1.1.1"),
Protocol: "tcp", Protocol: "tcp",
Port: uint16(80), Port: uint16(80),
}, },
@ -67,7 +67,7 @@ func Test_GracefulDeleteRS(t *testing.T) {
Protocol: "tcp", Protocol: "tcp",
}: { }: {
{ {
Address: net.ParseIP("10.0.0.1"), Address: netutils.ParseIPSloppy("10.0.0.1"),
Port: uint16(80), Port: uint16(80),
Weight: 100, Weight: 100,
ActiveConn: 0, ActiveConn: 0,
@ -83,7 +83,7 @@ func Test_GracefulDeleteRS(t *testing.T) {
Port: 80, Port: 80,
Protocol: "tcp", Protocol: "tcp",
}: { }: {
Address: net.ParseIP("1.1.1.1"), Address: netutils.ParseIPSloppy("1.1.1.1"),
Protocol: "tcp", Protocol: "tcp",
Port: uint16(80), Port: uint16(80),
}, },
@ -101,12 +101,12 @@ func Test_GracefulDeleteRS(t *testing.T) {
{ {
name: "graceful delete, real server has active connections, weight should be 0 but don't delete", name: "graceful delete, real server has active connections, weight should be 0 but don't delete",
vs: &utilipvs.VirtualServer{ vs: &utilipvs.VirtualServer{
Address: net.ParseIP("1.1.1.1"), Address: netutils.ParseIPSloppy("1.1.1.1"),
Protocol: "tcp", Protocol: "tcp",
Port: uint16(80), Port: uint16(80),
}, },
rs: &utilipvs.RealServer{ rs: &utilipvs.RealServer{
Address: net.ParseIP("10.0.0.1"), Address: netutils.ParseIPSloppy("10.0.0.1"),
Port: uint16(80), Port: uint16(80),
Weight: 100, Weight: 100,
ActiveConn: 10, ActiveConn: 10,
@ -119,7 +119,7 @@ func Test_GracefulDeleteRS(t *testing.T) {
Port: 80, Port: 80,
Protocol: "tcp", Protocol: "tcp",
}: { }: {
Address: net.ParseIP("1.1.1.1"), Address: netutils.ParseIPSloppy("1.1.1.1"),
Protocol: "tcp", Protocol: "tcp",
Port: uint16(80), Port: uint16(80),
}, },
@ -131,7 +131,7 @@ func Test_GracefulDeleteRS(t *testing.T) {
Protocol: "tcp", Protocol: "tcp",
}: { }: {
{ {
Address: net.ParseIP("10.0.0.1"), Address: netutils.ParseIPSloppy("10.0.0.1"),
Port: uint16(80), Port: uint16(80),
Weight: 100, Weight: 100,
ActiveConn: 10, ActiveConn: 10,
@ -147,7 +147,7 @@ func Test_GracefulDeleteRS(t *testing.T) {
Port: 80, Port: 80,
Protocol: "tcp", Protocol: "tcp",
}: { }: {
Address: net.ParseIP("1.1.1.1"), Address: netutils.ParseIPSloppy("1.1.1.1"),
Protocol: "tcp", Protocol: "tcp",
Port: uint16(80), Port: uint16(80),
}, },
@ -159,7 +159,7 @@ func Test_GracefulDeleteRS(t *testing.T) {
Protocol: "tcp", Protocol: "tcp",
}: { }: {
{ {
Address: net.ParseIP("10.0.0.1"), Address: netutils.ParseIPSloppy("10.0.0.1"),
Port: uint16(80), Port: uint16(80),
Weight: 0, Weight: 0,
ActiveConn: 10, ActiveConn: 10,
@ -173,12 +173,12 @@ func Test_GracefulDeleteRS(t *testing.T) {
{ {
name: "graceful delete, real server has in-active connections, weight should be 0 but don't delete", name: "graceful delete, real server has in-active connections, weight should be 0 but don't delete",
vs: &utilipvs.VirtualServer{ vs: &utilipvs.VirtualServer{
Address: net.ParseIP("1.1.1.1"), Address: netutils.ParseIPSloppy("1.1.1.1"),
Protocol: "tcp", Protocol: "tcp",
Port: uint16(80), Port: uint16(80),
}, },
rs: &utilipvs.RealServer{ rs: &utilipvs.RealServer{
Address: net.ParseIP("10.0.0.1"), Address: netutils.ParseIPSloppy("10.0.0.1"),
Port: uint16(80), Port: uint16(80),
Weight: 100, Weight: 100,
ActiveConn: 0, ActiveConn: 0,
@ -191,7 +191,7 @@ func Test_GracefulDeleteRS(t *testing.T) {
Port: 80, Port: 80,
Protocol: "tcp", Protocol: "tcp",
}: { }: {
Address: net.ParseIP("1.1.1.1"), Address: netutils.ParseIPSloppy("1.1.1.1"),
Protocol: "tcp", Protocol: "tcp",
Port: uint16(80), Port: uint16(80),
}, },
@ -203,7 +203,7 @@ func Test_GracefulDeleteRS(t *testing.T) {
Protocol: "tcp", Protocol: "tcp",
}: { }: {
{ {
Address: net.ParseIP("10.0.0.1"), Address: netutils.ParseIPSloppy("10.0.0.1"),
Port: uint16(80), Port: uint16(80),
Weight: 100, Weight: 100,
ActiveConn: 0, ActiveConn: 0,
@ -219,7 +219,7 @@ func Test_GracefulDeleteRS(t *testing.T) {
Port: 80, Port: 80,
Protocol: "tcp", Protocol: "tcp",
}: { }: {
Address: net.ParseIP("1.1.1.1"), Address: netutils.ParseIPSloppy("1.1.1.1"),
Protocol: "tcp", Protocol: "tcp",
Port: uint16(80), Port: uint16(80),
}, },
@ -231,7 +231,7 @@ func Test_GracefulDeleteRS(t *testing.T) {
Protocol: "tcp", Protocol: "tcp",
}: { }: {
{ {
Address: net.ParseIP("10.0.0.1"), Address: netutils.ParseIPSloppy("10.0.0.1"),
Port: uint16(80), Port: uint16(80),
Weight: 0, Weight: 0,
ActiveConn: 0, ActiveConn: 0,
@ -245,12 +245,12 @@ func Test_GracefulDeleteRS(t *testing.T) {
{ {
name: "graceful delete, real server has connections, but udp connections are deleted immediately", name: "graceful delete, real server has connections, but udp connections are deleted immediately",
vs: &utilipvs.VirtualServer{ vs: &utilipvs.VirtualServer{
Address: net.ParseIP("1.1.1.1"), Address: netutils.ParseIPSloppy("1.1.1.1"),
Protocol: "udp", Protocol: "udp",
Port: uint16(80), Port: uint16(80),
}, },
rs: &utilipvs.RealServer{ rs: &utilipvs.RealServer{
Address: net.ParseIP("10.0.0.1"), Address: netutils.ParseIPSloppy("10.0.0.1"),
Port: uint16(80), Port: uint16(80),
Weight: 100, Weight: 100,
ActiveConn: 10, ActiveConn: 10,
@ -263,7 +263,7 @@ func Test_GracefulDeleteRS(t *testing.T) {
Port: 80, Port: 80,
Protocol: "udp", Protocol: "udp",
}: { }: {
Address: net.ParseIP("1.1.1.1"), Address: netutils.ParseIPSloppy("1.1.1.1"),
Protocol: "udp", Protocol: "udp",
Port: uint16(80), Port: uint16(80),
}, },
@ -275,7 +275,7 @@ func Test_GracefulDeleteRS(t *testing.T) {
Protocol: "udp", Protocol: "udp",
}: { }: {
{ {
Address: net.ParseIP("10.0.0.1"), Address: netutils.ParseIPSloppy("10.0.0.1"),
Port: uint16(80), Port: uint16(80),
Weight: 100, Weight: 100,
ActiveConn: 10, ActiveConn: 10,
@ -291,7 +291,7 @@ func Test_GracefulDeleteRS(t *testing.T) {
Port: 80, Port: 80,
Protocol: "udp", Protocol: "udp",
}: { }: {
Address: net.ParseIP("1.1.1.1"), Address: netutils.ParseIPSloppy("1.1.1.1"),
Protocol: "udp", Protocol: "udp",
Port: uint16(80), Port: uint16(80),
}, },
@ -309,12 +309,12 @@ func Test_GracefulDeleteRS(t *testing.T) {
{ {
name: "graceful delete, real server mismatch should be no-op", name: "graceful delete, real server mismatch should be no-op",
vs: &utilipvs.VirtualServer{ vs: &utilipvs.VirtualServer{
Address: net.ParseIP("1.1.1.1"), Address: netutils.ParseIPSloppy("1.1.1.1"),
Protocol: "tcp", Protocol: "tcp",
Port: uint16(80), Port: uint16(80),
}, },
rs: &utilipvs.RealServer{ rs: &utilipvs.RealServer{
Address: net.ParseIP("10.0.0.1"), Address: netutils.ParseIPSloppy("10.0.0.1"),
Port: uint16(81), // port mismatched Port: uint16(81), // port mismatched
Weight: 100, Weight: 100,
ActiveConn: 0, ActiveConn: 0,
@ -327,7 +327,7 @@ func Test_GracefulDeleteRS(t *testing.T) {
Port: 80, Port: 80,
Protocol: "tcp", Protocol: "tcp",
}: { }: {
Address: net.ParseIP("1.1.1.1"), Address: netutils.ParseIPSloppy("1.1.1.1"),
Protocol: "tcp", Protocol: "tcp",
Port: uint16(80), Port: uint16(80),
}, },
@ -339,7 +339,7 @@ func Test_GracefulDeleteRS(t *testing.T) {
Protocol: "tcp", Protocol: "tcp",
}: { }: {
{ {
Address: net.ParseIP("10.0.0.1"), Address: netutils.ParseIPSloppy("10.0.0.1"),
Port: uint16(80), Port: uint16(80),
Weight: 100, Weight: 100,
ActiveConn: 0, ActiveConn: 0,
@ -355,7 +355,7 @@ func Test_GracefulDeleteRS(t *testing.T) {
Port: 80, Port: 80,
Protocol: "tcp", Protocol: "tcp",
}: { }: {
Address: net.ParseIP("1.1.1.1"), Address: netutils.ParseIPSloppy("1.1.1.1"),
Protocol: "tcp", Protocol: "tcp",
Port: uint16(80), Port: uint16(80),
}, },
@ -367,7 +367,7 @@ func Test_GracefulDeleteRS(t *testing.T) {
Protocol: "tcp", Protocol: "tcp",
}: { }: {
{ {
Address: net.ParseIP("10.0.0.1"), Address: netutils.ParseIPSloppy("10.0.0.1"),
Port: uint16(80), Port: uint16(80),
Weight: 100, Weight: 100,
ActiveConn: 0, ActiveConn: 0,

View File

@ -20,9 +20,9 @@ package ipvs
import ( import (
"fmt" "fmt"
"net"
"k8s.io/apimachinery/pkg/util/sets" "k8s.io/apimachinery/pkg/util/sets"
netutils "k8s.io/utils/net"
"github.com/vishvananda/netlink" "github.com/vishvananda/netlink"
"golang.org/x/sys/unix" "golang.org/x/sys/unix"
@ -44,7 +44,7 @@ func (h *netlinkHandle) EnsureAddressBind(address, devName string) (exist bool,
if err != nil { if err != nil {
return false, fmt.Errorf("error get interface: %s, err: %v", devName, err) return false, fmt.Errorf("error get interface: %s, err: %v", devName, err)
} }
addr := net.ParseIP(address) addr := netutils.ParseIPSloppy(address)
if addr == nil { if addr == nil {
return false, fmt.Errorf("error parse ip address: %s", address) return false, fmt.Errorf("error parse ip address: %s", address)
} }
@ -64,7 +64,7 @@ func (h *netlinkHandle) UnbindAddress(address, devName string) error {
if err != nil { if err != nil {
return fmt.Errorf("error get interface: %s, err: %v", devName, err) return fmt.Errorf("error get interface: %s, err: %v", devName, err)
} }
addr := net.ParseIP(address) addr := netutils.ParseIPSloppy(address)
if addr == nil { if addr == nil {
return fmt.Errorf("error parse ip address: %s", address) return fmt.Errorf("error parse ip address: %s", address)
} }

View File

@ -34,7 +34,7 @@ import (
"k8s.io/klog/v2" "k8s.io/klog/v2"
utilexec "k8s.io/utils/exec" utilexec "k8s.io/utils/exec"
utilnet "k8s.io/utils/net" netutils "k8s.io/utils/net"
v1 "k8s.io/api/core/v1" v1 "k8s.io/api/core/v1"
discovery "k8s.io/api/discovery/v1" discovery "k8s.io/api/discovery/v1"
@ -222,7 +222,7 @@ type Proxier struct {
mu sync.Mutex // protects the following fields mu sync.Mutex // protects the following fields
serviceMap proxy.ServiceMap serviceMap proxy.ServiceMap
endpointsMap proxy.EndpointsMap endpointsMap proxy.EndpointsMap
portsMap map[utilnet.LocalPort]utilnet.Closeable portsMap map[netutils.LocalPort]netutils.Closeable
nodeLabels map[string]string nodeLabels map[string]string
// endpointSlicesSynced, and servicesSynced are set to true when // endpointSlicesSynced, and servicesSynced are set to true when
// corresponding objects are synced after startup. This is used to avoid updating // corresponding objects are synced after startup. This is used to avoid updating
@ -248,7 +248,7 @@ type Proxier struct {
localDetector proxyutiliptables.LocalTrafficDetector localDetector proxyutiliptables.LocalTrafficDetector
hostname string hostname string
nodeIP net.IP nodeIP net.IP
portMapper utilnet.PortOpener portMapper netutils.PortOpener
recorder events.EventRecorder recorder events.EventRecorder
serviceHealthServer healthcheck.ServiceHealthServer serviceHealthServer healthcheck.ServiceHealthServer
@ -312,7 +312,7 @@ func (r *realIPGetter) NodeIPs() (ips []net.IP, err error) {
} }
// translate ip string to IP // translate ip string to IP
for _, ipStr := range nodeAddress.UnsortedList() { for _, ipStr := range nodeAddress.UnsortedList() {
a := net.ParseIP(ipStr) a := netutils.ParseIPSloppy(ipStr)
if a.IsLoopback() { if a.IsLoopback() {
continue continue
} }
@ -451,11 +451,11 @@ func NewProxier(ipt utiliptables.Interface,
} }
// excludeCIDRs has been validated before, here we just parse it to IPNet list // excludeCIDRs has been validated before, here we just parse it to IPNet list
parsedExcludeCIDRs, _ := utilnet.ParseCIDRs(excludeCIDRs) parsedExcludeCIDRs, _ := netutils.ParseCIDRs(excludeCIDRs)
proxier := &Proxier{ proxier := &Proxier{
ipFamily: ipFamily, ipFamily: ipFamily,
portsMap: make(map[utilnet.LocalPort]utilnet.Closeable), portsMap: make(map[netutils.LocalPort]netutils.Closeable),
serviceMap: make(proxy.ServiceMap), serviceMap: make(proxy.ServiceMap),
serviceChanges: proxy.NewServiceChangeTracker(newServiceInfo, ipFamily, recorder, nil), serviceChanges: proxy.NewServiceChangeTracker(newServiceInfo, ipFamily, recorder, nil),
endpointsMap: make(proxy.EndpointsMap), endpointsMap: make(proxy.EndpointsMap),
@ -470,7 +470,7 @@ func NewProxier(ipt utiliptables.Interface,
localDetector: localDetector, localDetector: localDetector,
hostname: hostname, hostname: hostname,
nodeIP: nodeIP, nodeIP: nodeIP,
portMapper: &utilnet.ListenPortOpener, portMapper: &netutils.ListenPortOpener,
recorder: recorder, recorder: recorder,
serviceHealthServer: serviceHealthServer, serviceHealthServer: serviceHealthServer,
healthzServer: healthzServer, healthzServer: healthzServer,
@ -558,7 +558,7 @@ func NewDualStackProxier(
func filterCIDRs(wantIPv6 bool, cidrs []string) []string { func filterCIDRs(wantIPv6 bool, cidrs []string) []string {
var filteredCIDRs []string var filteredCIDRs []string
for _, cidr := range cidrs { for _, cidr := range cidrs {
if utilnet.IsIPv6CIDRString(cidr) == wantIPv6 { if netutils.IsIPv6CIDRString(cidr) == wantIPv6 {
filteredCIDRs = append(filteredCIDRs, cidr) filteredCIDRs = append(filteredCIDRs, cidr)
} }
} }
@ -1077,7 +1077,7 @@ func (proxier *Proxier) syncProxyRules() {
} }
// Accumulate the set of local ports that we will be holding open once this update is complete // Accumulate the set of local ports that we will be holding open once this update is complete
replacementPortsMap := map[utilnet.LocalPort]utilnet.Closeable{} replacementPortsMap := map[netutils.LocalPort]netutils.Closeable{}
// activeIPVSServices represents IPVS service successfully created in this round of sync // activeIPVSServices represents IPVS service successfully created in this round of sync
activeIPVSServices := map[string]bool{} activeIPVSServices := map[string]bool{}
// currentIPVSServices represent IPVS services listed from the system // currentIPVSServices represent IPVS services listed from the system
@ -1115,7 +1115,7 @@ func (proxier *Proxier) syncProxyRules() {
} else { } else {
nodeAddresses = nodeAddrSet.List() nodeAddresses = nodeAddrSet.List()
for _, address := range nodeAddresses { for _, address := range nodeAddresses {
a := net.ParseIP(address) a := netutils.ParseIPSloppy(address)
if a.IsLoopback() { if a.IsLoopback() {
continue continue
} }
@ -1134,7 +1134,7 @@ func (proxier *Proxier) syncProxyRules() {
// filter node IPs by proxier ipfamily // filter node IPs by proxier ipfamily
idx := 0 idx := 0
for _, nodeIP := range nodeIPs { for _, nodeIP := range nodeIPs {
if (proxier.ipFamily == v1.IPv6Protocol) == utilnet.IsIPv6(nodeIP) { if (proxier.ipFamily == v1.IPv6Protocol) == netutils.IsIPv6(nodeIP) {
nodeIPs[idx] = nodeIP nodeIPs[idx] = nodeIP
idx++ idx++
} }
@ -1151,10 +1151,10 @@ func (proxier *Proxier) syncProxyRules() {
klog.ErrorS(nil, "Failed to cast serviceInfo", "svcName", svcName.String()) klog.ErrorS(nil, "Failed to cast serviceInfo", "svcName", svcName.String())
continue continue
} }
isIPv6 := utilnet.IsIPv6(svcInfo.ClusterIP()) isIPv6 := netutils.IsIPv6(svcInfo.ClusterIP())
localPortIPFamily := utilnet.IPv4 localPortIPFamily := netutils.IPv4
if isIPv6 { if isIPv6 {
localPortIPFamily = utilnet.IPv6 localPortIPFamily = netutils.IPv6
} }
protocol := strings.ToLower(string(svcInfo.Protocol())) protocol := strings.ToLower(string(svcInfo.Protocol()))
// Precompute svcNameString; with many services the many calls // Precompute svcNameString; with many services the many calls
@ -1240,14 +1240,14 @@ func (proxier *Proxier) syncProxyRules() {
// If the "external" IP happens to be an IP that is local to this // If the "external" IP happens to be an IP that is local to this
// machine, hold the local port open so no other process can open it // machine, hold the local port open so no other process can open it
// (because the socket might open but it would never work). // (because the socket might open but it would never work).
if (svcInfo.Protocol() != v1.ProtocolSCTP) && localAddrSet.Has(net.ParseIP(externalIP)) { if (svcInfo.Protocol() != v1.ProtocolSCTP) && localAddrSet.Has(netutils.ParseIPSloppy(externalIP)) {
// We do not start listening on SCTP ports, according to our agreement in the SCTP support KEP // We do not start listening on SCTP ports, according to our agreement in the SCTP support KEP
lp := utilnet.LocalPort{ lp := netutils.LocalPort{
Description: "externalIP for " + svcNameString, Description: "externalIP for " + svcNameString,
IP: externalIP, IP: externalIP,
IPFamily: localPortIPFamily, IPFamily: localPortIPFamily,
Port: svcInfo.Port(), Port: svcInfo.Port(),
Protocol: utilnet.Protocol(svcInfo.Protocol()), Protocol: netutils.Protocol(svcInfo.Protocol()),
} }
if proxier.portsMap[lp] != nil { if proxier.portsMap[lp] != nil {
klog.V(4).InfoS("Port was open before and is still needed", "port", lp.String()) klog.V(4).InfoS("Port was open before and is still needed", "port", lp.String())
@ -1297,7 +1297,7 @@ func (proxier *Proxier) syncProxyRules() {
// ipvs call // ipvs call
serv := &utilipvs.VirtualServer{ serv := &utilipvs.VirtualServer{
Address: net.ParseIP(externalIP), Address: netutils.ParseIPSloppy(externalIP),
Port: uint16(svcInfo.Port()), Port: uint16(svcInfo.Port()),
Protocol: string(svcInfo.Protocol()), Protocol: string(svcInfo.Protocol()),
Scheduler: proxier.ipvsScheduler, Scheduler: proxier.ipvsScheduler,
@ -1372,7 +1372,7 @@ func (proxier *Proxier) syncProxyRules() {
proxier.ipsetList[kubeLoadBalancerSourceCIDRSet].activeEntries.Insert(entry.String()) proxier.ipsetList[kubeLoadBalancerSourceCIDRSet].activeEntries.Insert(entry.String())
// ignore error because it has been validated // ignore error because it has been validated
_, cidr, _ := net.ParseCIDR(src) _, cidr, _ := netutils.ParseCIDRSloppy(src)
if cidr.Contains(proxier.nodeIP) { if cidr.Contains(proxier.nodeIP) {
allowFromNode = true allowFromNode = true
} }
@ -1399,7 +1399,7 @@ func (proxier *Proxier) syncProxyRules() {
// ipvs call // ipvs call
serv := &utilipvs.VirtualServer{ serv := &utilipvs.VirtualServer{
Address: net.ParseIP(ingress), Address: netutils.ParseIPSloppy(ingress),
Port: uint16(svcInfo.Port()), Port: uint16(svcInfo.Port()),
Protocol: string(svcInfo.Protocol()), Protocol: string(svcInfo.Protocol()),
Scheduler: proxier.ipvsScheduler, Scheduler: proxier.ipvsScheduler,
@ -1427,14 +1427,14 @@ func (proxier *Proxier) syncProxyRules() {
continue continue
} }
var lps []utilnet.LocalPort var lps []netutils.LocalPort
for _, address := range nodeAddresses { for _, address := range nodeAddresses {
lp := utilnet.LocalPort{ lp := netutils.LocalPort{
Description: "nodePort for " + svcNameString, Description: "nodePort for " + svcNameString,
IP: address, IP: address,
IPFamily: localPortIPFamily, IPFamily: localPortIPFamily,
Port: svcInfo.NodePort(), Port: svcInfo.NodePort(),
Protocol: utilnet.Protocol(svcInfo.Protocol()), Protocol: netutils.Protocol(svcInfo.Protocol()),
} }
if utilproxy.IsZeroCIDR(address) { if utilproxy.IsZeroCIDR(address) {
// Empty IP address means all // Empty IP address means all
@ -1470,7 +1470,7 @@ func (proxier *Proxier) syncProxyRules() {
} }
klog.V(2).InfoS("Opened local port", "port", lp.String()) klog.V(2).InfoS("Opened local port", "port", lp.String())
if lp.Protocol == utilnet.UDP { if lp.Protocol == netutils.UDP {
conntrack.ClearEntriesForPort(proxier.exec, lp.Port, isIPv6, v1.ProtocolUDP) conntrack.ClearEntriesForPort(proxier.exec, lp.Port, isIPv6, v1.ProtocolUDP)
} }
replacementPortsMap[lp] = socket replacementPortsMap[lp] = socket
@ -2111,7 +2111,7 @@ func (proxier *Proxier) syncEndpoint(svcPortName proxy.ServicePortName, onlyNode
} }
newDest := &utilipvs.RealServer{ newDest := &utilipvs.RealServer{
Address: net.ParseIP(ip), Address: netutils.ParseIPSloppy(ip),
Port: uint16(portNum), Port: uint16(portNum),
Weight: 1, Weight: 1,
} }
@ -2154,7 +2154,7 @@ func (proxier *Proxier) syncEndpoint(svcPortName proxy.ServicePortName, onlyNode
} }
delDest := &utilipvs.RealServer{ delDest := &utilipvs.RealServer{
Address: net.ParseIP(ip), Address: netutils.ParseIPSloppy(ip),
Port: uint16(portNum), Port: uint16(portNum),
} }
@ -2169,13 +2169,13 @@ func (proxier *Proxier) syncEndpoint(svcPortName proxy.ServicePortName, onlyNode
} }
func (proxier *Proxier) cleanLegacyService(activeServices map[string]bool, currentServices map[string]*utilipvs.VirtualServer, legacyBindAddrs map[string]bool) { func (proxier *Proxier) cleanLegacyService(activeServices map[string]bool, currentServices map[string]*utilipvs.VirtualServer, legacyBindAddrs map[string]bool) {
isIPv6 := utilnet.IsIPv6(proxier.nodeIP) isIPv6 := netutils.IsIPv6(proxier.nodeIP)
for cs := range currentServices { for cs := range currentServices {
svc := currentServices[cs] svc := currentServices[cs]
if proxier.isIPInExcludeCIDRs(svc.Address) { if proxier.isIPInExcludeCIDRs(svc.Address) {
continue continue
} }
if utilnet.IsIPv6(svc.Address) != isIPv6 { if netutils.IsIPv6(svc.Address) != isIPv6 {
// Not our family // Not our family
continue continue
} }
@ -2210,9 +2210,9 @@ func (proxier *Proxier) isIPInExcludeCIDRs(ip net.IP) bool {
func (proxier *Proxier) getLegacyBindAddr(activeBindAddrs map[string]bool, currentBindAddrs []string) map[string]bool { func (proxier *Proxier) getLegacyBindAddr(activeBindAddrs map[string]bool, currentBindAddrs []string) map[string]bool {
legacyAddrs := make(map[string]bool) legacyAddrs := make(map[string]bool)
isIPv6 := utilnet.IsIPv6(proxier.nodeIP) isIPv6 := netutils.IsIPv6(proxier.nodeIP)
for _, addr := range currentBindAddrs { for _, addr := range currentBindAddrs {
addrIsIPv6 := utilnet.IsIPv6(net.ParseIP(addr)) addrIsIPv6 := netutils.IsIPv6(netutils.ParseIPSloppy(addr))
if addrIsIPv6 && !isIPv6 || !addrIsIPv6 && isIPv6 { if addrIsIPv6 && !isIPv6 || !addrIsIPv6 && isIPv6 {
continue continue
} }

View File

@ -50,9 +50,8 @@ import (
ipvstest "k8s.io/kubernetes/pkg/util/ipvs/testing" ipvstest "k8s.io/kubernetes/pkg/util/ipvs/testing"
"k8s.io/utils/exec" "k8s.io/utils/exec"
fakeexec "k8s.io/utils/exec/testing" fakeexec "k8s.io/utils/exec/testing"
netutils "k8s.io/utils/net"
utilpointer "k8s.io/utils/pointer" utilpointer "k8s.io/utils/pointer"
utilnet "k8s.io/utils/net"
) )
const testHostname = "test-hostname" const testHostname = "test-hostname"
@ -72,12 +71,12 @@ func (f *fakeIPGetter) BindedIPs() (sets.String, error) {
// fakePortOpener implements portOpener. // fakePortOpener implements portOpener.
type fakePortOpener struct { type fakePortOpener struct {
openPorts []*utilnet.LocalPort openPorts []*netutils.LocalPort
} }
// OpenLocalPort fakes out the listen() and bind() used by syncProxyRules // OpenLocalPort fakes out the listen() and bind() used by syncProxyRules
// to lock a local port. // to lock a local port.
func (f *fakePortOpener) OpenLocalPort(lp *utilnet.LocalPort) (utilnet.Closeable, error) { func (f *fakePortOpener) OpenLocalPort(lp *netutils.LocalPort) (netutils.Closeable, error) {
f.openPorts = append(f.openPorts, lp) f.openPorts = append(f.openPorts, lp)
return nil, nil return nil, nil
} }
@ -113,7 +112,7 @@ func NewFakeProxier(ipt utiliptables.Interface, ipvs utilipvs.Interface, ipset u
// filter node IPs by proxier ipfamily // filter node IPs by proxier ipfamily
idx := 0 idx := 0
for _, nodeIP := range nodeIPs { for _, nodeIP := range nodeIPs {
if (ipFamily == v1.IPv6Protocol) == utilnet.IsIPv6(nodeIP) { if (ipFamily == v1.IPv6Protocol) == netutils.IsIPv6(nodeIP) {
nodeIPs[idx] = nodeIP nodeIPs[idx] = nodeIP
idx++ idx++
} }
@ -153,8 +152,8 @@ func NewFakeProxier(ipt utiliptables.Interface, ipvs utilipvs.Interface, ipset u
strictARP: false, strictARP: false,
localDetector: proxyutiliptables.NewNoOpLocalDetector(), localDetector: proxyutiliptables.NewNoOpLocalDetector(),
hostname: testHostname, hostname: testHostname,
portsMap: make(map[utilnet.LocalPort]utilnet.Closeable), portsMap: make(map[netutils.LocalPort]netutils.Closeable),
portMapper: &fakePortOpener{[]*utilnet.LocalPort{}}, portMapper: &fakePortOpener{[]*netutils.LocalPort{}},
serviceHealthServer: healthcheck.NewFakeServiceHealthServer(), serviceHealthServer: healthcheck.NewFakeServiceHealthServer(),
ipvsScheduler: DefaultScheduler, ipvsScheduler: DefaultScheduler,
ipGetter: &fakeIPGetter{nodeIPs: nodeIPs}, ipGetter: &fakeIPGetter{nodeIPs: nodeIPs},
@ -513,8 +512,8 @@ func TestNodePortIPv4(t *testing.T) {
}), }),
}, },
nodeIPs: []net.IP{ nodeIPs: []net.IP{
net.ParseIP("100.101.102.103"), netutils.ParseIPSloppy("100.101.102.103"),
net.ParseIP("2001:db8::1:1"), netutils.ParseIPSloppy("2001:db8::1:1"),
}, },
nodePortAddresses: []string{}, nodePortAddresses: []string{},
expectedIPVS: &ipvstest.FakeIPVS{ expectedIPVS: &ipvstest.FakeIPVS{
@ -524,7 +523,7 @@ func TestNodePortIPv4(t *testing.T) {
Port: 80, Port: 80,
Protocol: "TCP", Protocol: "TCP",
}: { }: {
Address: net.ParseIP("10.20.30.41"), Address: netutils.ParseIPSloppy("10.20.30.41"),
Protocol: "TCP", Protocol: "TCP",
Port: uint16(80), Port: uint16(80),
Scheduler: "rr", Scheduler: "rr",
@ -534,7 +533,7 @@ func TestNodePortIPv4(t *testing.T) {
Port: 3001, Port: 3001,
Protocol: "TCP", Protocol: "TCP",
}: { }: {
Address: net.ParseIP("100.101.102.103"), Address: netutils.ParseIPSloppy("100.101.102.103"),
Protocol: "TCP", Protocol: "TCP",
Port: uint16(3001), Port: uint16(3001),
Scheduler: "rr", Scheduler: "rr",
@ -547,7 +546,7 @@ func TestNodePortIPv4(t *testing.T) {
Protocol: "TCP", Protocol: "TCP",
}: { }: {
{ {
Address: net.ParseIP("10.180.0.1"), Address: netutils.ParseIPSloppy("10.180.0.1"),
Port: uint16(80), Port: uint16(80),
Weight: 1, Weight: 1,
}, },
@ -558,7 +557,7 @@ func TestNodePortIPv4(t *testing.T) {
Protocol: "TCP", Protocol: "TCP",
}: { }: {
{ {
Address: net.ParseIP("10.180.0.1"), Address: netutils.ParseIPSloppy("10.180.0.1"),
Port: uint16(80), Port: uint16(80),
Weight: 1, Weight: 1,
}, },
@ -594,7 +593,7 @@ func TestNodePortIPv4(t *testing.T) {
}), }),
}, },
nodeIPs: []net.IP{ nodeIPs: []net.IP{
net.ParseIP("100.101.102.103"), netutils.ParseIPSloppy("100.101.102.103"),
}, },
nodePortAddresses: []string{"0.0.0.0/0"}, nodePortAddresses: []string{"0.0.0.0/0"},
expectedIPVS: &ipvstest.FakeIPVS{ expectedIPVS: &ipvstest.FakeIPVS{
@ -604,7 +603,7 @@ func TestNodePortIPv4(t *testing.T) {
Port: 80, Port: 80,
Protocol: "UDP", Protocol: "UDP",
}: { }: {
Address: net.ParseIP("10.20.30.41"), Address: netutils.ParseIPSloppy("10.20.30.41"),
Protocol: "UDP", Protocol: "UDP",
Port: uint16(80), Port: uint16(80),
Scheduler: "rr", Scheduler: "rr",
@ -614,7 +613,7 @@ func TestNodePortIPv4(t *testing.T) {
Port: 3001, Port: 3001,
Protocol: "UDP", Protocol: "UDP",
}: { }: {
Address: net.ParseIP("100.101.102.103"), Address: netutils.ParseIPSloppy("100.101.102.103"),
Protocol: "UDP", Protocol: "UDP",
Port: uint16(3001), Port: uint16(3001),
Scheduler: "rr", Scheduler: "rr",
@ -627,7 +626,7 @@ func TestNodePortIPv4(t *testing.T) {
Protocol: "UDP", Protocol: "UDP",
}: { }: {
{ {
Address: net.ParseIP("10.180.0.1"), Address: netutils.ParseIPSloppy("10.180.0.1"),
Port: uint16(80), Port: uint16(80),
Weight: 1, Weight: 1,
}, },
@ -638,7 +637,7 @@ func TestNodePortIPv4(t *testing.T) {
Protocol: "UDP", Protocol: "UDP",
}: { }: {
{ {
Address: net.ParseIP("10.180.0.1"), Address: netutils.ParseIPSloppy("10.180.0.1"),
Port: uint16(80), Port: uint16(80),
Weight: 1, Weight: 1,
}, },
@ -677,7 +676,7 @@ func TestNodePortIPv4(t *testing.T) {
}, },
endpoints: []*discovery.EndpointSlice{}, endpoints: []*discovery.EndpointSlice{},
nodeIPs: []net.IP{ nodeIPs: []net.IP{
net.ParseIP("100.101.102.103"), netutils.ParseIPSloppy("100.101.102.103"),
}, },
nodePortAddresses: []string{}, nodePortAddresses: []string{},
expectedIPVS: &ipvstest.FakeIPVS{ expectedIPVS: &ipvstest.FakeIPVS{
@ -687,7 +686,7 @@ func TestNodePortIPv4(t *testing.T) {
Port: 80, Port: 80,
Protocol: "TCP", Protocol: "TCP",
}: { }: {
Address: net.ParseIP("10.20.30.41"), Address: netutils.ParseIPSloppy("10.20.30.41"),
Protocol: "TCP", Protocol: "TCP",
Port: uint16(80), Port: uint16(80),
Scheduler: "rr", Scheduler: "rr",
@ -697,7 +696,7 @@ func TestNodePortIPv4(t *testing.T) {
Port: 3001, Port: 3001,
Protocol: "TCP", Protocol: "TCP",
}: { }: {
Address: net.ParseIP("100.101.102.103"), Address: netutils.ParseIPSloppy("100.101.102.103"),
Protocol: "TCP", Protocol: "TCP",
Port: uint16(3001), Port: uint16(3001),
Scheduler: "rr", Scheduler: "rr",
@ -745,12 +744,12 @@ func TestNodePortIPv4(t *testing.T) {
}), }),
}, },
nodeIPs: []net.IP{ nodeIPs: []net.IP{
net.ParseIP("100.101.102.103"), netutils.ParseIPSloppy("100.101.102.103"),
net.ParseIP("100.101.102.104"), netutils.ParseIPSloppy("100.101.102.104"),
net.ParseIP("100.101.102.105"), netutils.ParseIPSloppy("100.101.102.105"),
net.ParseIP("2001:db8::1:1"), netutils.ParseIPSloppy("2001:db8::1:1"),
net.ParseIP("2001:db8::1:2"), netutils.ParseIPSloppy("2001:db8::1:2"),
net.ParseIP("2001:db8::1:3"), netutils.ParseIPSloppy("2001:db8::1:3"),
}, },
nodePortAddresses: []string{}, nodePortAddresses: []string{},
expectedIPVS: &ipvstest.FakeIPVS{ expectedIPVS: &ipvstest.FakeIPVS{
@ -760,7 +759,7 @@ func TestNodePortIPv4(t *testing.T) {
Port: 80, Port: 80,
Protocol: "SCTP", Protocol: "SCTP",
}: { }: {
Address: net.ParseIP("10.20.30.41"), Address: netutils.ParseIPSloppy("10.20.30.41"),
Protocol: "SCTP", Protocol: "SCTP",
Port: uint16(80), Port: uint16(80),
Scheduler: "rr", Scheduler: "rr",
@ -770,7 +769,7 @@ func TestNodePortIPv4(t *testing.T) {
Port: 3001, Port: 3001,
Protocol: "SCTP", Protocol: "SCTP",
}: { }: {
Address: net.ParseIP("100.101.102.103"), Address: netutils.ParseIPSloppy("100.101.102.103"),
Protocol: "SCTP", Protocol: "SCTP",
Port: uint16(3001), Port: uint16(3001),
Scheduler: "rr", Scheduler: "rr",
@ -780,7 +779,7 @@ func TestNodePortIPv4(t *testing.T) {
Port: 3001, Port: 3001,
Protocol: "SCTP", Protocol: "SCTP",
}: { }: {
Address: net.ParseIP("100.101.102.104"), Address: netutils.ParseIPSloppy("100.101.102.104"),
Protocol: "SCTP", Protocol: "SCTP",
Port: uint16(3001), Port: uint16(3001),
Scheduler: "rr", Scheduler: "rr",
@ -790,7 +789,7 @@ func TestNodePortIPv4(t *testing.T) {
Port: 3001, Port: 3001,
Protocol: "SCTP", Protocol: "SCTP",
}: { }: {
Address: net.ParseIP("100.101.102.105"), Address: netutils.ParseIPSloppy("100.101.102.105"),
Protocol: "SCTP", Protocol: "SCTP",
Port: uint16(3001), Port: uint16(3001),
Scheduler: "rr", Scheduler: "rr",
@ -803,7 +802,7 @@ func TestNodePortIPv4(t *testing.T) {
Protocol: "SCTP", Protocol: "SCTP",
}: { }: {
{ {
Address: net.ParseIP("10.180.0.1"), Address: netutils.ParseIPSloppy("10.180.0.1"),
Port: uint16(80), Port: uint16(80),
Weight: 1, Weight: 1,
}, },
@ -814,7 +813,7 @@ func TestNodePortIPv4(t *testing.T) {
Protocol: "SCTP", Protocol: "SCTP",
}: { }: {
{ {
Address: net.ParseIP("10.180.0.1"), Address: netutils.ParseIPSloppy("10.180.0.1"),
Port: uint16(80), Port: uint16(80),
Weight: 1, Weight: 1,
}, },
@ -825,7 +824,7 @@ func TestNodePortIPv4(t *testing.T) {
Protocol: "SCTP", Protocol: "SCTP",
}: { }: {
{ {
Address: net.ParseIP("10.180.0.1"), Address: netutils.ParseIPSloppy("10.180.0.1"),
Port: uint16(80), Port: uint16(80),
Weight: 1, Weight: 1,
}, },
@ -836,7 +835,7 @@ func TestNodePortIPv4(t *testing.T) {
Protocol: "SCTP", Protocol: "SCTP",
}: { }: {
{ {
Address: net.ParseIP("10.180.0.1"), Address: netutils.ParseIPSloppy("10.180.0.1"),
Port: uint16(80), Port: uint16(80),
Weight: 1, Weight: 1,
}, },
@ -951,8 +950,8 @@ func TestNodePortIPv6(t *testing.T) {
}), }),
}, },
nodeIPs: []net.IP{ nodeIPs: []net.IP{
net.ParseIP("100.101.102.103"), netutils.ParseIPSloppy("100.101.102.103"),
net.ParseIP("2001:db8::1:1"), netutils.ParseIPSloppy("2001:db8::1:1"),
}, },
nodePortAddresses: []string{}, nodePortAddresses: []string{},
expectedIPVS: &ipvstest.FakeIPVS{ expectedIPVS: &ipvstest.FakeIPVS{
@ -962,7 +961,7 @@ func TestNodePortIPv6(t *testing.T) {
Port: 3001, Port: 3001,
Protocol: "TCP", Protocol: "TCP",
}: { }: {
Address: net.ParseIP("2001:db8::1:1"), Address: netutils.ParseIPSloppy("2001:db8::1:1"),
Protocol: "TCP", Protocol: "TCP",
Port: uint16(3001), Port: uint16(3001),
Scheduler: "rr", Scheduler: "rr",
@ -972,7 +971,7 @@ func TestNodePortIPv6(t *testing.T) {
Port: 80, Port: 80,
Protocol: "TCP", Protocol: "TCP",
}: { }: {
Address: net.ParseIP("2020::1"), Address: netutils.ParseIPSloppy("2020::1"),
Protocol: "TCP", Protocol: "TCP",
Port: uint16(80), Port: uint16(80),
Scheduler: "rr", Scheduler: "rr",
@ -985,7 +984,7 @@ func TestNodePortIPv6(t *testing.T) {
Protocol: "TCP", Protocol: "TCP",
}: { }: {
{ {
Address: net.ParseIP("1002:ab8::2:10"), Address: netutils.ParseIPSloppy("1002:ab8::2:10"),
Port: uint16(80), Port: uint16(80),
Weight: 1, Weight: 1,
}, },
@ -997,7 +996,7 @@ func TestNodePortIPv6(t *testing.T) {
Protocol: "TCP", Protocol: "TCP",
}: { }: {
{ {
Address: net.ParseIP("1002:ab8::2:10"), Address: netutils.ParseIPSloppy("1002:ab8::2:10"),
Port: uint16(80), Port: uint16(80),
Weight: 1, Weight: 1,
}, },
@ -1034,7 +1033,7 @@ func TestNodePortIPv6(t *testing.T) {
}), }),
}, },
nodeIPs: []net.IP{ nodeIPs: []net.IP{
net.ParseIP("100.101.102.103"), netutils.ParseIPSloppy("100.101.102.103"),
}, },
nodePortAddresses: []string{"0.0.0.0/0"}, nodePortAddresses: []string{"0.0.0.0/0"},
/*since this is a node with only IPv4, proxier should not do anything */ /*since this is a node with only IPv4, proxier should not do anything */
@ -1062,8 +1061,8 @@ func TestNodePortIPv6(t *testing.T) {
}, },
endpoints: []*discovery.EndpointSlice{}, endpoints: []*discovery.EndpointSlice{},
nodeIPs: []net.IP{ nodeIPs: []net.IP{
net.ParseIP("100.101.102.103"), netutils.ParseIPSloppy("100.101.102.103"),
net.ParseIP("2001:db8::1:1"), netutils.ParseIPSloppy("2001:db8::1:1"),
}, },
nodePortAddresses: []string{}, nodePortAddresses: []string{},
expectedIPVS: &ipvstest.FakeIPVS{ expectedIPVS: &ipvstest.FakeIPVS{
@ -1073,7 +1072,7 @@ func TestNodePortIPv6(t *testing.T) {
Port: 3001, Port: 3001,
Protocol: "TCP", Protocol: "TCP",
}: { }: {
Address: net.ParseIP("2001:db8::1:1"), Address: netutils.ParseIPSloppy("2001:db8::1:1"),
Protocol: "TCP", Protocol: "TCP",
Port: uint16(3001), Port: uint16(3001),
Scheduler: "rr", Scheduler: "rr",
@ -1083,7 +1082,7 @@ func TestNodePortIPv6(t *testing.T) {
Port: 80, Port: 80,
Protocol: "TCP", Protocol: "TCP",
}: { }: {
Address: net.ParseIP("2020::1"), Address: netutils.ParseIPSloppy("2020::1"),
Protocol: "TCP", Protocol: "TCP",
Port: uint16(80), Port: uint16(80),
Scheduler: "rr", Scheduler: "rr",
@ -1132,8 +1131,8 @@ func TestNodePortIPv6(t *testing.T) {
}), }),
}, },
nodeIPs: []net.IP{ nodeIPs: []net.IP{
net.ParseIP("2001:db8::1:1"), netutils.ParseIPSloppy("2001:db8::1:1"),
net.ParseIP("2001:db8::1:2"), netutils.ParseIPSloppy("2001:db8::1:2"),
}, },
nodePortAddresses: []string{}, nodePortAddresses: []string{},
expectedIPVS: &ipvstest.FakeIPVS{ expectedIPVS: &ipvstest.FakeIPVS{
@ -1143,7 +1142,7 @@ func TestNodePortIPv6(t *testing.T) {
Port: 3001, Port: 3001,
Protocol: "SCTP", Protocol: "SCTP",
}: { }: {
Address: net.ParseIP("2001:db8::1:1"), Address: netutils.ParseIPSloppy("2001:db8::1:1"),
Protocol: "SCTP", Protocol: "SCTP",
Port: uint16(3001), Port: uint16(3001),
Scheduler: "rr", Scheduler: "rr",
@ -1153,7 +1152,7 @@ func TestNodePortIPv6(t *testing.T) {
Port: 3001, Port: 3001,
Protocol: "SCTP", Protocol: "SCTP",
}: { }: {
Address: net.ParseIP("2001:db8::1:2"), Address: netutils.ParseIPSloppy("2001:db8::1:2"),
Protocol: "SCTP", Protocol: "SCTP",
Port: uint16(3001), Port: uint16(3001),
Scheduler: "rr", Scheduler: "rr",
@ -1163,7 +1162,7 @@ func TestNodePortIPv6(t *testing.T) {
Port: 80, Port: 80,
Protocol: "SCTP", Protocol: "SCTP",
}: { }: {
Address: net.ParseIP("2020::1"), Address: netutils.ParseIPSloppy("2020::1"),
Protocol: "SCTP", Protocol: "SCTP",
Port: uint16(80), Port: uint16(80),
Scheduler: "rr", Scheduler: "rr",
@ -1176,7 +1175,7 @@ func TestNodePortIPv6(t *testing.T) {
Protocol: "SCTP", Protocol: "SCTP",
}: { }: {
{ {
Address: net.ParseIP("2001::1"), Address: netutils.ParseIPSloppy("2001::1"),
Port: uint16(80), Port: uint16(80),
Weight: 1, Weight: 1,
}, },
@ -1187,7 +1186,7 @@ func TestNodePortIPv6(t *testing.T) {
Protocol: "SCTP", Protocol: "SCTP",
}: { }: {
{ {
Address: net.ParseIP("2001::1"), Address: netutils.ParseIPSloppy("2001::1"),
Port: uint16(80), Port: uint16(80),
Weight: 1, Weight: 1,
}, },
@ -1198,7 +1197,7 @@ func TestNodePortIPv6(t *testing.T) {
Protocol: "SCTP", Protocol: "SCTP",
}: { }: {
{ {
Address: net.ParseIP("2001::1"), Address: netutils.ParseIPSloppy("2001::1"),
Port: uint16(80), Port: uint16(80),
Weight: 1, Weight: 1,
}, },
@ -1313,7 +1312,7 @@ func TestIPv4Proxier(t *testing.T) {
Port: 80, Port: 80,
Protocol: "TCP", Protocol: "TCP",
}: { }: {
Address: net.ParseIP("10.20.30.41"), Address: netutils.ParseIPSloppy("10.20.30.41"),
Protocol: "TCP", Protocol: "TCP",
Port: uint16(80), Port: uint16(80),
Scheduler: "rr", Scheduler: "rr",
@ -1326,7 +1325,7 @@ func TestIPv4Proxier(t *testing.T) {
Protocol: "TCP", Protocol: "TCP",
}: { }: {
{ {
Address: net.ParseIP("10.180.0.1"), Address: netutils.ParseIPSloppy("10.180.0.1"),
Port: uint16(80), Port: uint16(80),
Weight: 1, Weight: 1,
}, },
@ -1354,7 +1353,7 @@ func TestIPv4Proxier(t *testing.T) {
Port: 80, Port: 80,
Protocol: "TCP", Protocol: "TCP",
}: { }: {
Address: net.ParseIP("10.20.30.41"), Address: netutils.ParseIPSloppy("10.20.30.41"),
Protocol: "TCP", Protocol: "TCP",
Port: uint16(80), Port: uint16(80),
Scheduler: "rr", Scheduler: "rr",
@ -1451,7 +1450,7 @@ func TestIPv6Proxier(t *testing.T) {
Port: 8080, Port: 8080,
Protocol: "TCP", Protocol: "TCP",
}: { }: {
Address: net.ParseIP("1002:ab8::2:1"), Address: netutils.ParseIPSloppy("1002:ab8::2:1"),
Protocol: "TCP", Protocol: "TCP",
Port: uint16(8080), Port: uint16(8080),
Scheduler: "rr", Scheduler: "rr",
@ -1464,7 +1463,7 @@ func TestIPv6Proxier(t *testing.T) {
Protocol: "TCP", Protocol: "TCP",
}: { }: {
{ {
Address: net.ParseIP("1009:ab8::5:6"), Address: netutils.ParseIPSloppy("1009:ab8::5:6"),
Port: uint16(8080), Port: uint16(8080),
Weight: 1, Weight: 1,
}, },
@ -1492,7 +1491,7 @@ func TestIPv6Proxier(t *testing.T) {
Port: 80, Port: 80,
Protocol: "TCP", Protocol: "TCP",
}: { }: {
Address: net.ParseIP("2001::1"), Address: netutils.ParseIPSloppy("2001::1"),
Protocol: "TCP", Protocol: "TCP",
Port: uint16(80), Port: uint16(80),
Scheduler: "rr", Scheduler: "rr",
@ -1832,7 +1831,7 @@ func TestLoadBalancer(t *testing.T) {
} }
func TestOnlyLocalNodePorts(t *testing.T) { func TestOnlyLocalNodePorts(t *testing.T) {
nodeIP := net.ParseIP("100.101.102.103") nodeIP := netutils.ParseIPSloppy("100.101.102.103")
ipt, fp := buildFakeProxier() ipt, fp := buildFakeProxier()
svcIP := "10.20.30.41" svcIP := "10.20.30.41"
@ -1882,9 +1881,9 @@ func TestOnlyLocalNodePorts(t *testing.T) {
) )
itf := net.Interface{Index: 0, MTU: 0, Name: "eth0", HardwareAddr: nil, Flags: 0} itf := net.Interface{Index: 0, MTU: 0, Name: "eth0", HardwareAddr: nil, Flags: 0}
addrs := []net.Addr{&net.IPNet{IP: net.ParseIP("100.101.102.103"), Mask: net.CIDRMask(24, 32)}} addrs := []net.Addr{&net.IPNet{IP: netutils.ParseIPSloppy("100.101.102.103"), Mask: net.CIDRMask(24, 32)}}
itf1 := net.Interface{Index: 1, MTU: 0, Name: "eth1", HardwareAddr: nil, Flags: 0} itf1 := net.Interface{Index: 1, MTU: 0, Name: "eth1", HardwareAddr: nil, Flags: 0}
addrs1 := []net.Addr{&net.IPNet{IP: net.ParseIP("2001:db8::"), Mask: net.CIDRMask(64, 128)}} addrs1 := []net.Addr{&net.IPNet{IP: netutils.ParseIPSloppy("2001:db8::"), Mask: net.CIDRMask(64, 128)}}
fp.networkInterfacer.(*proxyutiltest.FakeNetwork).AddInterfaceAddr(&itf, addrs) fp.networkInterfacer.(*proxyutiltest.FakeNetwork).AddInterfaceAddr(&itf, addrs)
fp.networkInterfacer.(*proxyutiltest.FakeNetwork).AddInterfaceAddr(&itf1, addrs1) fp.networkInterfacer.(*proxyutiltest.FakeNetwork).AddInterfaceAddr(&itf1, addrs1)
fp.nodePortAddresses = []string{"100.101.102.0/24", "2001:db8::0/64"} fp.nodePortAddresses = []string{"100.101.102.0/24", "2001:db8::0/64"}
@ -1962,9 +1961,9 @@ func TestHealthCheckNodePort(t *testing.T) {
) )
itf := net.Interface{Index: 0, MTU: 0, Name: "eth0", HardwareAddr: nil, Flags: 0} itf := net.Interface{Index: 0, MTU: 0, Name: "eth0", HardwareAddr: nil, Flags: 0}
addrs := []net.Addr{&net.IPNet{IP: net.ParseIP("100.101.102.103"), Mask: net.CIDRMask(24, 32)}} addrs := []net.Addr{&net.IPNet{IP: netutils.ParseIPSloppy("100.101.102.103"), Mask: net.CIDRMask(24, 32)}}
itf1 := net.Interface{Index: 1, MTU: 0, Name: "eth1", HardwareAddr: nil, Flags: 0} itf1 := net.Interface{Index: 1, MTU: 0, Name: "eth1", HardwareAddr: nil, Flags: 0}
addrs1 := []net.Addr{&net.IPNet{IP: net.ParseIP("2001:db8::"), Mask: net.CIDRMask(64, 128)}} addrs1 := []net.Addr{&net.IPNet{IP: netutils.ParseIPSloppy("2001:db8::"), Mask: net.CIDRMask(64, 128)}}
fp.networkInterfacer.(*proxyutiltest.FakeNetwork).AddInterfaceAddr(&itf, addrs) fp.networkInterfacer.(*proxyutiltest.FakeNetwork).AddInterfaceAddr(&itf, addrs)
fp.networkInterfacer.(*proxyutiltest.FakeNetwork).AddInterfaceAddr(&itf1, addrs1) fp.networkInterfacer.(*proxyutiltest.FakeNetwork).AddInterfaceAddr(&itf1, addrs1)
fp.nodePortAddresses = []string{"100.101.102.0/24", "2001:db8::0/64"} fp.nodePortAddresses = []string{"100.101.102.0/24", "2001:db8::0/64"}
@ -2528,7 +2527,7 @@ func TestSessionAffinity(t *testing.T) {
ipt := iptablestest.NewFake() ipt := iptablestest.NewFake()
ipvs := ipvstest.NewFake() ipvs := ipvstest.NewFake()
ipset := ipsettest.NewFake(testIPSetVersion) ipset := ipsettest.NewFake(testIPSetVersion)
nodeIP := net.ParseIP("100.101.102.103") nodeIP := netutils.ParseIPSloppy("100.101.102.103")
fp := NewFakeProxier(ipt, ipvs, ipset, []net.IP{nodeIP}, nil, v1.IPv4Protocol) fp := NewFakeProxier(ipt, ipvs, ipset, []net.IP{nodeIP}, nil, v1.IPv4Protocol)
svcIP := "10.20.30.41" svcIP := "10.20.30.41"
svcPort := 80 svcPort := 80
@ -3432,7 +3431,7 @@ func Test_syncService(t *testing.T) {
{ {
// case 0, old virtual server is same as new virtual server // case 0, old virtual server is same as new virtual server
oldVirtualServer: &utilipvs.VirtualServer{ oldVirtualServer: &utilipvs.VirtualServer{
Address: net.ParseIP("1.2.3.4"), Address: netutils.ParseIPSloppy("1.2.3.4"),
Protocol: string(v1.ProtocolTCP), Protocol: string(v1.ProtocolTCP),
Port: 80, Port: 80,
Scheduler: "rr", Scheduler: "rr",
@ -3440,7 +3439,7 @@ func Test_syncService(t *testing.T) {
}, },
svcName: "foo", svcName: "foo",
newVirtualServer: &utilipvs.VirtualServer{ newVirtualServer: &utilipvs.VirtualServer{
Address: net.ParseIP("1.2.3.4"), Address: netutils.ParseIPSloppy("1.2.3.4"),
Protocol: string(v1.ProtocolTCP), Protocol: string(v1.ProtocolTCP),
Port: 80, Port: 80,
Scheduler: "rr", Scheduler: "rr",
@ -3452,7 +3451,7 @@ func Test_syncService(t *testing.T) {
{ {
// case 1, old virtual server is different from new virtual server // case 1, old virtual server is different from new virtual server
oldVirtualServer: &utilipvs.VirtualServer{ oldVirtualServer: &utilipvs.VirtualServer{
Address: net.ParseIP("1.2.3.4"), Address: netutils.ParseIPSloppy("1.2.3.4"),
Protocol: string(v1.ProtocolTCP), Protocol: string(v1.ProtocolTCP),
Port: 8080, Port: 8080,
Scheduler: "rr", Scheduler: "rr",
@ -3460,7 +3459,7 @@ func Test_syncService(t *testing.T) {
}, },
svcName: "bar", svcName: "bar",
newVirtualServer: &utilipvs.VirtualServer{ newVirtualServer: &utilipvs.VirtualServer{
Address: net.ParseIP("1.2.3.4"), Address: netutils.ParseIPSloppy("1.2.3.4"),
Protocol: string(v1.ProtocolTCP), Protocol: string(v1.ProtocolTCP),
Port: 8080, Port: 8080,
Scheduler: "rr", Scheduler: "rr",
@ -3472,7 +3471,7 @@ func Test_syncService(t *testing.T) {
{ {
// case 2, old virtual server is different from new virtual server // case 2, old virtual server is different from new virtual server
oldVirtualServer: &utilipvs.VirtualServer{ oldVirtualServer: &utilipvs.VirtualServer{
Address: net.ParseIP("1.2.3.4"), Address: netutils.ParseIPSloppy("1.2.3.4"),
Protocol: string(v1.ProtocolTCP), Protocol: string(v1.ProtocolTCP),
Port: 8080, Port: 8080,
Scheduler: "rr", Scheduler: "rr",
@ -3480,7 +3479,7 @@ func Test_syncService(t *testing.T) {
}, },
svcName: "bar", svcName: "bar",
newVirtualServer: &utilipvs.VirtualServer{ newVirtualServer: &utilipvs.VirtualServer{
Address: net.ParseIP("1.2.3.4"), Address: netutils.ParseIPSloppy("1.2.3.4"),
Protocol: string(v1.ProtocolTCP), Protocol: string(v1.ProtocolTCP),
Port: 8080, Port: 8080,
Scheduler: "wlc", Scheduler: "wlc",
@ -3494,7 +3493,7 @@ func Test_syncService(t *testing.T) {
oldVirtualServer: nil, oldVirtualServer: nil,
svcName: "baz", svcName: "baz",
newVirtualServer: &utilipvs.VirtualServer{ newVirtualServer: &utilipvs.VirtualServer{
Address: net.ParseIP("1.2.3.4"), Address: netutils.ParseIPSloppy("1.2.3.4"),
Protocol: string(v1.ProtocolUDP), Protocol: string(v1.ProtocolUDP),
Port: 53, Port: 53,
Scheduler: "rr", Scheduler: "rr",
@ -3506,7 +3505,7 @@ func Test_syncService(t *testing.T) {
{ {
// case 4, SCTP, old virtual server is same as new virtual server // case 4, SCTP, old virtual server is same as new virtual server
oldVirtualServer: &utilipvs.VirtualServer{ oldVirtualServer: &utilipvs.VirtualServer{
Address: net.ParseIP("1.2.3.4"), Address: netutils.ParseIPSloppy("1.2.3.4"),
Protocol: string(v1.ProtocolSCTP), Protocol: string(v1.ProtocolSCTP),
Port: 80, Port: 80,
Scheduler: "rr", Scheduler: "rr",
@ -3514,7 +3513,7 @@ func Test_syncService(t *testing.T) {
}, },
svcName: "foo", svcName: "foo",
newVirtualServer: &utilipvs.VirtualServer{ newVirtualServer: &utilipvs.VirtualServer{
Address: net.ParseIP("1.2.3.4"), Address: netutils.ParseIPSloppy("1.2.3.4"),
Protocol: string(v1.ProtocolSCTP), Protocol: string(v1.ProtocolSCTP),
Port: 80, Port: 80,
Scheduler: "rr", Scheduler: "rr",
@ -3526,7 +3525,7 @@ func Test_syncService(t *testing.T) {
{ {
// case 5, old virtual server is different from new virtual server // case 5, old virtual server is different from new virtual server
oldVirtualServer: &utilipvs.VirtualServer{ oldVirtualServer: &utilipvs.VirtualServer{
Address: net.ParseIP("1.2.3.4"), Address: netutils.ParseIPSloppy("1.2.3.4"),
Protocol: string(v1.ProtocolSCTP), Protocol: string(v1.ProtocolSCTP),
Port: 8080, Port: 8080,
Scheduler: "rr", Scheduler: "rr",
@ -3534,7 +3533,7 @@ func Test_syncService(t *testing.T) {
}, },
svcName: "bar", svcName: "bar",
newVirtualServer: &utilipvs.VirtualServer{ newVirtualServer: &utilipvs.VirtualServer{
Address: net.ParseIP("1.2.3.4"), Address: netutils.ParseIPSloppy("1.2.3.4"),
Protocol: string(v1.ProtocolSCTP), Protocol: string(v1.ProtocolSCTP),
Port: 8080, Port: 8080,
Scheduler: "rr", Scheduler: "rr",
@ -3546,7 +3545,7 @@ func Test_syncService(t *testing.T) {
{ {
// case 6, old virtual server is different from new virtual server // case 6, old virtual server is different from new virtual server
oldVirtualServer: &utilipvs.VirtualServer{ oldVirtualServer: &utilipvs.VirtualServer{
Address: net.ParseIP("1.2.3.4"), Address: netutils.ParseIPSloppy("1.2.3.4"),
Protocol: string(v1.ProtocolSCTP), Protocol: string(v1.ProtocolSCTP),
Port: 8080, Port: 8080,
Scheduler: "rr", Scheduler: "rr",
@ -3554,7 +3553,7 @@ func Test_syncService(t *testing.T) {
}, },
svcName: "bar", svcName: "bar",
newVirtualServer: &utilipvs.VirtualServer{ newVirtualServer: &utilipvs.VirtualServer{
Address: net.ParseIP("1.2.3.4"), Address: netutils.ParseIPSloppy("1.2.3.4"),
Protocol: string(v1.ProtocolSCTP), Protocol: string(v1.ProtocolSCTP),
Port: 8080, Port: 8080,
Scheduler: "wlc", Scheduler: "wlc",
@ -3568,7 +3567,7 @@ func Test_syncService(t *testing.T) {
oldVirtualServer: nil, oldVirtualServer: nil,
svcName: "baz", svcName: "baz",
newVirtualServer: &utilipvs.VirtualServer{ newVirtualServer: &utilipvs.VirtualServer{
Address: net.ParseIP("1.2.3.4"), Address: netutils.ParseIPSloppy("1.2.3.4"),
Protocol: string(v1.ProtocolSCTP), Protocol: string(v1.ProtocolSCTP),
Port: 53, Port: 53,
Scheduler: "rr", Scheduler: "rr",
@ -3580,7 +3579,7 @@ func Test_syncService(t *testing.T) {
{ {
// case 8, virtual server address already binded, skip sync // case 8, virtual server address already binded, skip sync
oldVirtualServer: &utilipvs.VirtualServer{ oldVirtualServer: &utilipvs.VirtualServer{
Address: net.ParseIP("1.2.3.4"), Address: netutils.ParseIPSloppy("1.2.3.4"),
Protocol: string(v1.ProtocolSCTP), Protocol: string(v1.ProtocolSCTP),
Port: 53, Port: 53,
Scheduler: "rr", Scheduler: "rr",
@ -3588,7 +3587,7 @@ func Test_syncService(t *testing.T) {
}, },
svcName: "baz", svcName: "baz",
newVirtualServer: &utilipvs.VirtualServer{ newVirtualServer: &utilipvs.VirtualServer{
Address: net.ParseIP("1.2.3.4"), Address: netutils.ParseIPSloppy("1.2.3.4"),
Protocol: string(v1.ProtocolSCTP), Protocol: string(v1.ProtocolSCTP),
Port: 53, Port: 53,
Scheduler: "rr", Scheduler: "rr",
@ -3720,7 +3719,7 @@ func TestCleanLegacyService(t *testing.T) {
ipt := iptablestest.NewFake() ipt := iptablestest.NewFake()
ipvs := ipvstest.NewFake() ipvs := ipvstest.NewFake()
ipset := ipsettest.NewFake(testIPSetVersion) ipset := ipsettest.NewFake(testIPSetVersion)
excludeCIDRs, _ := utilnet.ParseCIDRs([]string{"3.3.3.0/24", "4.4.4.0/24"}) excludeCIDRs, _ := netutils.ParseCIDRs([]string{"3.3.3.0/24", "4.4.4.0/24"})
fp := NewFakeProxier(ipt, ipvs, ipset, nil, excludeCIDRs, v1.IPv4Protocol) fp := NewFakeProxier(ipt, ipvs, ipset, nil, excludeCIDRs, v1.IPv4Protocol)
// All ipvs services that were processed in the latest sync loop. // All ipvs services that were processed in the latest sync loop.
@ -3729,7 +3728,7 @@ func TestCleanLegacyService(t *testing.T) {
currentServices := map[string]*utilipvs.VirtualServer{ currentServices := map[string]*utilipvs.VirtualServer{
// Created by kube-proxy. // Created by kube-proxy.
"ipvs0": { "ipvs0": {
Address: net.ParseIP("1.1.1.1"), Address: netutils.ParseIPSloppy("1.1.1.1"),
Protocol: string(v1.ProtocolUDP), Protocol: string(v1.ProtocolUDP),
Port: 53, Port: 53,
Scheduler: "rr", Scheduler: "rr",
@ -3737,7 +3736,7 @@ func TestCleanLegacyService(t *testing.T) {
}, },
// Created by kube-proxy. // Created by kube-proxy.
"ipvs1": { "ipvs1": {
Address: net.ParseIP("2.2.2.2"), Address: netutils.ParseIPSloppy("2.2.2.2"),
Protocol: string(v1.ProtocolUDP), Protocol: string(v1.ProtocolUDP),
Port: 54, Port: 54,
Scheduler: "rr", Scheduler: "rr",
@ -3745,7 +3744,7 @@ func TestCleanLegacyService(t *testing.T) {
}, },
// Created by an external party. // Created by an external party.
"ipvs2": { "ipvs2": {
Address: net.ParseIP("3.3.3.3"), Address: netutils.ParseIPSloppy("3.3.3.3"),
Protocol: string(v1.ProtocolUDP), Protocol: string(v1.ProtocolUDP),
Port: 55, Port: 55,
Scheduler: "rr", Scheduler: "rr",
@ -3753,7 +3752,7 @@ func TestCleanLegacyService(t *testing.T) {
}, },
// Created by an external party. // Created by an external party.
"ipvs3": { "ipvs3": {
Address: net.ParseIP("4.4.4.4"), Address: netutils.ParseIPSloppy("4.4.4.4"),
Protocol: string(v1.ProtocolUDP), Protocol: string(v1.ProtocolUDP),
Port: 56, Port: 56,
Scheduler: "rr", Scheduler: "rr",
@ -3761,7 +3760,7 @@ func TestCleanLegacyService(t *testing.T) {
}, },
// Created by an external party. // Created by an external party.
"ipvs4": { "ipvs4": {
Address: net.ParseIP("5.5.5.5"), Address: netutils.ParseIPSloppy("5.5.5.5"),
Protocol: string(v1.ProtocolUDP), Protocol: string(v1.ProtocolUDP),
Port: 57, Port: 57,
Scheduler: "rr", Scheduler: "rr",
@ -3769,7 +3768,7 @@ func TestCleanLegacyService(t *testing.T) {
}, },
// Created by kube-proxy, but now stale. // Created by kube-proxy, but now stale.
"ipvs5": { "ipvs5": {
Address: net.ParseIP("6.6.6.6"), Address: netutils.ParseIPSloppy("6.6.6.6"),
Protocol: string(v1.ProtocolUDP), Protocol: string(v1.ProtocolUDP),
Port: 58, Port: 58,
Scheduler: "rr", Scheduler: "rr",
@ -3812,7 +3811,7 @@ func TestCleanLegacyService(t *testing.T) {
// check that address "1.1.1.1", "2.2.2.2", "3.3.3.3", "4.4.4.4" are bound, ignore ipv6 addresses // check that address "1.1.1.1", "2.2.2.2", "3.3.3.3", "4.4.4.4" are bound, ignore ipv6 addresses
remainingAddrsMap := make(map[string]bool) remainingAddrsMap := make(map[string]bool)
for _, a := range remainingAddrs { for _, a := range remainingAddrs {
if net.ParseIP(a).To4() == nil { if netutils.ParseIPSloppy(a).To4() == nil {
continue continue
} }
remainingAddrsMap[a] = true remainingAddrsMap[a] = true
@ -3834,21 +3833,21 @@ func TestCleanLegacyServiceWithRealServers(t *testing.T) {
// All ipvs services in the system. // All ipvs services in the system.
currentServices := map[string]*utilipvs.VirtualServer{ currentServices := map[string]*utilipvs.VirtualServer{
"ipvs0": { // deleted with real servers "ipvs0": { // deleted with real servers
Address: net.ParseIP("1.1.1.1"), Address: netutils.ParseIPSloppy("1.1.1.1"),
Protocol: string(v1.ProtocolUDP), Protocol: string(v1.ProtocolUDP),
Port: 53, Port: 53,
Scheduler: "rr", Scheduler: "rr",
Flags: utilipvs.FlagHashed, Flags: utilipvs.FlagHashed,
}, },
"ipvs1": { // deleted no real server "ipvs1": { // deleted no real server
Address: net.ParseIP("2.2.2.2"), Address: netutils.ParseIPSloppy("2.2.2.2"),
Protocol: string(v1.ProtocolUDP), Protocol: string(v1.ProtocolUDP),
Port: 54, Port: 54,
Scheduler: "rr", Scheduler: "rr",
Flags: utilipvs.FlagHashed, Flags: utilipvs.FlagHashed,
}, },
"ipvs2": { // not deleted "ipvs2": { // not deleted
Address: net.ParseIP("3.3.3.3"), Address: netutils.ParseIPSloppy("3.3.3.3"),
Protocol: string(v1.ProtocolUDP), Protocol: string(v1.ProtocolUDP),
Port: 54, Port: 54,
Scheduler: "rr", Scheduler: "rr",
@ -3859,13 +3858,13 @@ func TestCleanLegacyServiceWithRealServers(t *testing.T) {
// "ipvs0" has a real server, but it should still be deleted since the Service is deleted // "ipvs0" has a real server, but it should still be deleted since the Service is deleted
realServers := map[*utilipvs.VirtualServer]*utilipvs.RealServer{ realServers := map[*utilipvs.VirtualServer]*utilipvs.RealServer{
{ {
Address: net.ParseIP("1.1.1.1"), Address: netutils.ParseIPSloppy("1.1.1.1"),
Protocol: string(v1.ProtocolUDP), Protocol: string(v1.ProtocolUDP),
Port: 53, Port: 53,
Scheduler: "rr", Scheduler: "rr",
Flags: utilipvs.FlagHashed, Flags: utilipvs.FlagHashed,
}: { }: {
Address: net.ParseIP("10.180.0.1"), Address: netutils.ParseIPSloppy("10.180.0.1"),
Port: uint16(53), Port: uint16(53),
Weight: 1, Weight: 1,
}, },
@ -3905,7 +3904,7 @@ func TestCleanLegacyServiceWithRealServers(t *testing.T) {
// check that address is "3.3.3.3" // check that address is "3.3.3.3"
remainingAddrsMap := make(map[string]bool) remainingAddrsMap := make(map[string]bool)
for _, a := range remainingAddrs { for _, a := range remainingAddrs {
if net.ParseIP(a).To4() == nil { if netutils.ParseIPSloppy(a).To4() == nil {
continue continue
} }
remainingAddrsMap[a] = true remainingAddrsMap[a] = true
@ -3921,12 +3920,12 @@ func TestCleanLegacyRealServersExcludeCIDRs(t *testing.T) {
ipvs := ipvstest.NewFake() ipvs := ipvstest.NewFake()
ipset := ipsettest.NewFake(testIPSetVersion) ipset := ipsettest.NewFake(testIPSetVersion)
gtm := NewGracefulTerminationManager(ipvs) gtm := NewGracefulTerminationManager(ipvs)
excludeCIDRs, _ := utilnet.ParseCIDRs([]string{"4.4.4.4/32"}) excludeCIDRs, _ := netutils.ParseCIDRs([]string{"4.4.4.4/32"})
fp := NewFakeProxier(ipt, ipvs, ipset, nil, excludeCIDRs, v1.IPv4Protocol) fp := NewFakeProxier(ipt, ipvs, ipset, nil, excludeCIDRs, v1.IPv4Protocol)
fp.gracefuldeleteManager = gtm fp.gracefuldeleteManager = gtm
vs := &utilipvs.VirtualServer{ vs := &utilipvs.VirtualServer{
Address: net.ParseIP("4.4.4.4"), Address: netutils.ParseIPSloppy("4.4.4.4"),
Protocol: string(v1.ProtocolUDP), Protocol: string(v1.ProtocolUDP),
Port: 56, Port: 56,
Scheduler: "rr", Scheduler: "rr",
@ -3937,13 +3936,13 @@ func TestCleanLegacyRealServersExcludeCIDRs(t *testing.T) {
rss := []*utilipvs.RealServer{ rss := []*utilipvs.RealServer{
{ {
Address: net.ParseIP("10.10.10.10"), Address: netutils.ParseIPSloppy("10.10.10.10"),
Port: 56, Port: 56,
ActiveConn: 0, ActiveConn: 0,
InactiveConn: 0, InactiveConn: 0,
}, },
{ {
Address: net.ParseIP("11.11.11.11"), Address: netutils.ParseIPSloppy("11.11.11.11"),
Port: 56, Port: 56,
ActiveConn: 0, ActiveConn: 0,
InactiveConn: 0, InactiveConn: 0,
@ -3976,9 +3975,9 @@ func TestCleanLegacyService6(t *testing.T) {
ipt := iptablestest.NewFake() ipt := iptablestest.NewFake()
ipvs := ipvstest.NewFake() ipvs := ipvstest.NewFake()
ipset := ipsettest.NewFake(testIPSetVersion) ipset := ipsettest.NewFake(testIPSetVersion)
excludeCIDRs, _ := utilnet.ParseCIDRs([]string{"3000::/64", "4000::/64"}) excludeCIDRs, _ := netutils.ParseCIDRs([]string{"3000::/64", "4000::/64"})
fp := NewFakeProxier(ipt, ipvs, ipset, nil, excludeCIDRs, v1.IPv4Protocol) fp := NewFakeProxier(ipt, ipvs, ipset, nil, excludeCIDRs, v1.IPv4Protocol)
fp.nodeIP = net.ParseIP("::1") fp.nodeIP = netutils.ParseIPSloppy("::1")
// All ipvs services that were processed in the latest sync loop. // All ipvs services that were processed in the latest sync loop.
activeServices := map[string]bool{"ipvs0": true, "ipvs1": true} activeServices := map[string]bool{"ipvs0": true, "ipvs1": true}
@ -3986,7 +3985,7 @@ func TestCleanLegacyService6(t *testing.T) {
currentServices := map[string]*utilipvs.VirtualServer{ currentServices := map[string]*utilipvs.VirtualServer{
// Created by kube-proxy. // Created by kube-proxy.
"ipvs0": { "ipvs0": {
Address: net.ParseIP("1000::1"), Address: netutils.ParseIPSloppy("1000::1"),
Protocol: string(v1.ProtocolUDP), Protocol: string(v1.ProtocolUDP),
Port: 53, Port: 53,
Scheduler: "rr", Scheduler: "rr",
@ -3994,7 +3993,7 @@ func TestCleanLegacyService6(t *testing.T) {
}, },
// Created by kube-proxy. // Created by kube-proxy.
"ipvs1": { "ipvs1": {
Address: net.ParseIP("1000::2"), Address: netutils.ParseIPSloppy("1000::2"),
Protocol: string(v1.ProtocolUDP), Protocol: string(v1.ProtocolUDP),
Port: 54, Port: 54,
Scheduler: "rr", Scheduler: "rr",
@ -4002,7 +4001,7 @@ func TestCleanLegacyService6(t *testing.T) {
}, },
// Created by an external party. // Created by an external party.
"ipvs2": { "ipvs2": {
Address: net.ParseIP("3000::1"), Address: netutils.ParseIPSloppy("3000::1"),
Protocol: string(v1.ProtocolUDP), Protocol: string(v1.ProtocolUDP),
Port: 55, Port: 55,
Scheduler: "rr", Scheduler: "rr",
@ -4010,7 +4009,7 @@ func TestCleanLegacyService6(t *testing.T) {
}, },
// Created by an external party. // Created by an external party.
"ipvs3": { "ipvs3": {
Address: net.ParseIP("4000::1"), Address: netutils.ParseIPSloppy("4000::1"),
Protocol: string(v1.ProtocolUDP), Protocol: string(v1.ProtocolUDP),
Port: 56, Port: 56,
Scheduler: "rr", Scheduler: "rr",
@ -4018,7 +4017,7 @@ func TestCleanLegacyService6(t *testing.T) {
}, },
// Created by an external party. // Created by an external party.
"ipvs4": { "ipvs4": {
Address: net.ParseIP("5000::1"), Address: netutils.ParseIPSloppy("5000::1"),
Protocol: string(v1.ProtocolUDP), Protocol: string(v1.ProtocolUDP),
Port: 57, Port: 57,
Scheduler: "rr", Scheduler: "rr",
@ -4026,7 +4025,7 @@ func TestCleanLegacyService6(t *testing.T) {
}, },
// Created by kube-proxy, but now stale. // Created by kube-proxy, but now stale.
"ipvs5": { "ipvs5": {
Address: net.ParseIP("1000::6"), Address: netutils.ParseIPSloppy("1000::6"),
Protocol: string(v1.ProtocolUDP), Protocol: string(v1.ProtocolUDP),
Port: 58, Port: 58,
Scheduler: "rr", Scheduler: "rr",
@ -4069,7 +4068,7 @@ func TestCleanLegacyService6(t *testing.T) {
// check that address "1000::1", "1000::2", "3000::1", "4000::1" are still bound, ignore ipv4 addresses // check that address "1000::1", "1000::2", "3000::1", "4000::1" are still bound, ignore ipv4 addresses
remainingAddrsMap := make(map[string]bool) remainingAddrsMap := make(map[string]bool)
for _, a := range remainingAddrs { for _, a := range remainingAddrs {
if net.ParseIP(a).To4() != nil { if netutils.ParseIPSloppy(a).To4() != nil {
continue continue
} }
remainingAddrsMap[a] = true remainingAddrsMap[a] = true

View File

@ -25,6 +25,7 @@ import (
"k8s.io/client-go/tools/events" "k8s.io/client-go/tools/events"
"k8s.io/klog/v2" "k8s.io/klog/v2"
netutils "k8s.io/utils/net"
v1 "k8s.io/api/core/v1" v1 "k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/types" "k8s.io/apimachinery/pkg/types"
@ -155,7 +156,7 @@ func (sct *ServiceChangeTracker) newBaseServiceInfo(port *v1.ServicePort, servic
clusterIP := utilproxy.GetClusterIPByFamily(sct.ipFamily, service) clusterIP := utilproxy.GetClusterIPByFamily(sct.ipFamily, service)
info := &BaseServiceInfo{ info := &BaseServiceInfo{
clusterIP: net.ParseIP(clusterIP), clusterIP: netutils.ParseIPSloppy(clusterIP),
port: int(port.Port), port: int(port.Port),
protocol: port.Protocol, protocol: port.Protocol,
nodePort: int(port.NodePort), nodePort: int(port.NodePort),

View File

@ -17,7 +17,6 @@ limitations under the License.
package proxy package proxy
import ( import (
"net"
"reflect" "reflect"
"testing" "testing"
"time" "time"
@ -29,13 +28,14 @@ import (
"k8s.io/apimachinery/pkg/types" "k8s.io/apimachinery/pkg/types"
"k8s.io/apimachinery/pkg/util/intstr" "k8s.io/apimachinery/pkg/util/intstr"
"k8s.io/apimachinery/pkg/util/sets" "k8s.io/apimachinery/pkg/util/sets"
netutils "k8s.io/utils/net"
) )
const testHostname = "test-hostname" const testHostname = "test-hostname"
func makeTestServiceInfo(clusterIP string, port int, protocol string, healthcheckNodePort int, svcInfoFuncs ...func(*BaseServiceInfo)) *BaseServiceInfo { func makeTestServiceInfo(clusterIP string, port int, protocol string, healthcheckNodePort int, svcInfoFuncs ...func(*BaseServiceInfo)) *BaseServiceInfo {
info := &BaseServiceInfo{ info := &BaseServiceInfo{
clusterIP: net.ParseIP(clusterIP), clusterIP: netutils.ParseIPSloppy(clusterIP),
port: port, port: port,
protocol: v1.Protocol(protocol), protocol: v1.Protocol(protocol),
} }

View File

@ -527,7 +527,7 @@ func (proxier *Proxier) mergeService(service *v1.Service) sets.String {
continue continue
} }
serviceIP := net.ParseIP(service.Spec.ClusterIP) serviceIP := netutils.ParseIPSloppy(service.Spec.ClusterIP)
klog.V(1).InfoS("Adding new service", "serviceName", serviceName, "addr", net.JoinHostPort(serviceIP.String(), strconv.Itoa(int(servicePort.Port))), "protocol", servicePort.Protocol) klog.V(1).InfoS("Adding new service", "serviceName", serviceName, "addr", net.JoinHostPort(serviceIP.String(), strconv.Itoa(int(servicePort.Port))), "protocol", servicePort.Protocol)
info, err = proxier.addServiceOnPortInternal(serviceName, servicePort.Protocol, proxyPort, proxier.udpIdleTimeout) info, err = proxier.addServiceOnPortInternal(serviceName, servicePort.Protocol, proxyPort, proxier.udpIdleTimeout)
if err != nil { if err != nil {
@ -711,7 +711,7 @@ func sameConfig(info *ServiceInfo, service *v1.Service, port *v1.ServicePort) bo
if info.protocol != port.Protocol || info.portal.port != int(port.Port) || info.nodePort != int(port.NodePort) { if info.protocol != port.Protocol || info.portal.port != int(port.Port) || info.nodePort != int(port.NodePort) {
return false return false
} }
if !info.portal.ip.Equal(net.ParseIP(service.Spec.ClusterIP)) { if !info.portal.ip.Equal(netutils.ParseIPSloppy(service.Spec.ClusterIP)) {
return false return false
} }
if !ipsEqual(info.externalIPs, service.Spec.ExternalIPs) { if !ipsEqual(info.externalIPs, service.Spec.ExternalIPs) {
@ -744,14 +744,14 @@ func (proxier *Proxier) openPortal(service proxy.ServicePortName, info *ServiceI
return err return err
} }
for _, publicIP := range info.externalIPs { for _, publicIP := range info.externalIPs {
err = proxier.openOnePortal(portal{net.ParseIP(publicIP), info.portal.port, true}, info.protocol, proxier.listenIP, info.proxyPort, service) err = proxier.openOnePortal(portal{netutils.ParseIPSloppy(publicIP), info.portal.port, true}, info.protocol, proxier.listenIP, info.proxyPort, service)
if err != nil { if err != nil {
return err return err
} }
} }
for _, ingress := range info.loadBalancerStatus.Ingress { for _, ingress := range info.loadBalancerStatus.Ingress {
if ingress.IP != "" { if ingress.IP != "" {
err = proxier.openOnePortal(portal{net.ParseIP(ingress.IP), info.portal.port, false}, info.protocol, proxier.listenIP, info.proxyPort, service) err = proxier.openOnePortal(portal{netutils.ParseIPSloppy(ingress.IP), info.portal.port, false}, info.protocol, proxier.listenIP, info.proxyPort, service)
if err != nil { if err != nil {
return err return err
} }
@ -923,11 +923,11 @@ func (proxier *Proxier) closePortal(service proxy.ServicePortName, info *Service
// Collect errors and report them all at the end. // Collect errors and report them all at the end.
el := proxier.closeOnePortal(info.portal, info.protocol, proxier.listenIP, info.proxyPort, service) el := proxier.closeOnePortal(info.portal, info.protocol, proxier.listenIP, info.proxyPort, service)
for _, publicIP := range info.externalIPs { for _, publicIP := range info.externalIPs {
el = append(el, proxier.closeOnePortal(portal{net.ParseIP(publicIP), info.portal.port, true}, info.protocol, proxier.listenIP, info.proxyPort, service)...) el = append(el, proxier.closeOnePortal(portal{netutils.ParseIPSloppy(publicIP), info.portal.port, true}, info.protocol, proxier.listenIP, info.proxyPort, service)...)
} }
for _, ingress := range info.loadBalancerStatus.Ingress { for _, ingress := range info.loadBalancerStatus.Ingress {
if ingress.IP != "" { if ingress.IP != "" {
el = append(el, proxier.closeOnePortal(portal{net.ParseIP(ingress.IP), info.portal.port, false}, info.protocol, proxier.listenIP, info.proxyPort, service)...) el = append(el, proxier.closeOnePortal(portal{netutils.ParseIPSloppy(ingress.IP), info.portal.port, false}, info.protocol, proxier.listenIP, info.proxyPort, service)...)
} }
} }
if info.nodePort != 0 { if info.nodePort != 0 {
@ -1116,11 +1116,11 @@ func iptablesFlush(ipt iptables.Interface) error {
} }
// Used below. // Used below.
var zeroIPv4 = net.ParseIP("0.0.0.0") var zeroIPv4 = netutils.ParseIPSloppy("0.0.0.0")
var localhostIPv4 = net.ParseIP("127.0.0.1") var localhostIPv4 = netutils.ParseIPSloppy("127.0.0.1")
var zeroIPv6 = net.ParseIP("::") var zeroIPv6 = netutils.ParseIPSloppy("::")
var localhostIPv6 = net.ParseIP("::1") var localhostIPv6 = netutils.ParseIPSloppy("::1")
// Build a slice of iptables args that are common to from-container and from-host portal rules. // Build a slice of iptables args that are common to from-container and from-host portal rules.
func iptablesCommonPortalArgs(destIP net.IP, addPhysicalInterfaceMatch bool, addDstLocalMatch bool, destPort int, protocol v1.Protocol, service proxy.ServicePortName) []string { func iptablesCommonPortalArgs(destIP net.IP, addPhysicalInterfaceMatch bool, addDstLocalMatch bool, destPort int, protocol v1.Protocol, service proxy.ServicePortName) []string {

View File

@ -39,6 +39,7 @@ import (
ipttest "k8s.io/kubernetes/pkg/util/iptables/testing" ipttest "k8s.io/kubernetes/pkg/util/iptables/testing"
"k8s.io/utils/exec" "k8s.io/utils/exec"
fakeexec "k8s.io/utils/exec/testing" fakeexec "k8s.io/utils/exec/testing"
netutils "k8s.io/utils/net"
) )
const ( const (
@ -328,7 +329,7 @@ func TestTCPProxy(t *testing.T) {
fexec := makeFakeExec() fexec := makeFakeExec()
p, err := createProxier(lb, net.ParseIP("0.0.0.0"), ipttest.NewFake(), fexec, net.ParseIP("127.0.0.1"), nil, time.Minute, time.Second, udpIdleTimeoutForTest, newProxySocket) p, err := createProxier(lb, netutils.ParseIPSloppy("0.0.0.0"), ipttest.NewFake(), fexec, netutils.ParseIPSloppy("127.0.0.1"), nil, time.Minute, time.Second, udpIdleTimeoutForTest, newProxySocket)
if err != nil { if err != nil {
t.Fatal(err) t.Fatal(err)
} }
@ -353,7 +354,7 @@ func TestUDPProxy(t *testing.T) {
fexec := makeFakeExec() fexec := makeFakeExec()
p, err := createProxier(lb, net.ParseIP("0.0.0.0"), ipttest.NewFake(), fexec, net.ParseIP("127.0.0.1"), nil, time.Minute, time.Second, udpIdleTimeoutForTest, newProxySocket) p, err := createProxier(lb, netutils.ParseIPSloppy("0.0.0.0"), ipttest.NewFake(), fexec, netutils.ParseIPSloppy("127.0.0.1"), nil, time.Minute, time.Second, udpIdleTimeoutForTest, newProxySocket)
if err != nil { if err != nil {
t.Fatal(err) t.Fatal(err)
} }
@ -378,7 +379,7 @@ func TestUDPProxyTimeout(t *testing.T) {
fexec := makeFakeExec() fexec := makeFakeExec()
p, err := createProxier(lb, net.ParseIP("0.0.0.0"), ipttest.NewFake(), fexec, net.ParseIP("127.0.0.1"), nil, time.Minute, time.Second, udpIdleTimeoutForTest, newProxySocket) p, err := createProxier(lb, netutils.ParseIPSloppy("0.0.0.0"), ipttest.NewFake(), fexec, netutils.ParseIPSloppy("127.0.0.1"), nil, time.Minute, time.Second, udpIdleTimeoutForTest, newProxySocket)
if err != nil { if err != nil {
t.Fatal(err) t.Fatal(err)
} }
@ -417,7 +418,7 @@ func TestMultiPortProxy(t *testing.T) {
fexec := makeFakeExec() fexec := makeFakeExec()
p, err := createProxier(lb, net.ParseIP("0.0.0.0"), ipttest.NewFake(), fexec, net.ParseIP("127.0.0.1"), nil, time.Minute, time.Second, udpIdleTimeoutForTest, newProxySocket) p, err := createProxier(lb, netutils.ParseIPSloppy("0.0.0.0"), ipttest.NewFake(), fexec, netutils.ParseIPSloppy("127.0.0.1"), nil, time.Minute, time.Second, udpIdleTimeoutForTest, newProxySocket)
if err != nil { if err != nil {
t.Fatal(err) t.Fatal(err)
} }
@ -440,7 +441,7 @@ func TestMultiPortOnServiceAdd(t *testing.T) {
lb := NewLoadBalancerRR() lb := NewLoadBalancerRR()
fexec := makeFakeExec() fexec := makeFakeExec()
p, err := createProxier(lb, net.ParseIP("0.0.0.0"), ipttest.NewFake(), fexec, net.ParseIP("127.0.0.1"), nil, time.Minute, time.Second, udpIdleTimeoutForTest, newProxySocket) p, err := createProxier(lb, netutils.ParseIPSloppy("0.0.0.0"), ipttest.NewFake(), fexec, netutils.ParseIPSloppy("127.0.0.1"), nil, time.Minute, time.Second, udpIdleTimeoutForTest, newProxySocket)
if err != nil { if err != nil {
t.Fatal(err) t.Fatal(err)
} }
@ -473,7 +474,7 @@ func TestTCPProxyStop(t *testing.T) {
fexec := makeFakeExec() fexec := makeFakeExec()
p, err := createProxier(lb, net.ParseIP("0.0.0.0"), ipttest.NewFake(), fexec, net.ParseIP("127.0.0.1"), nil, time.Minute, time.Second, udpIdleTimeoutForTest, newProxySocket) p, err := createProxier(lb, netutils.ParseIPSloppy("0.0.0.0"), ipttest.NewFake(), fexec, netutils.ParseIPSloppy("127.0.0.1"), nil, time.Minute, time.Second, udpIdleTimeoutForTest, newProxySocket)
if err != nil { if err != nil {
t.Fatal(err) t.Fatal(err)
} }
@ -505,7 +506,7 @@ func TestUDPProxyStop(t *testing.T) {
fexec := makeFakeExec() fexec := makeFakeExec()
p, err := createProxier(lb, net.ParseIP("0.0.0.0"), ipttest.NewFake(), fexec, net.ParseIP("127.0.0.1"), nil, time.Minute, time.Second, udpIdleTimeoutForTest, newProxySocket) p, err := createProxier(lb, netutils.ParseIPSloppy("0.0.0.0"), ipttest.NewFake(), fexec, netutils.ParseIPSloppy("127.0.0.1"), nil, time.Minute, time.Second, udpIdleTimeoutForTest, newProxySocket)
if err != nil { if err != nil {
t.Fatal(err) t.Fatal(err)
} }
@ -537,7 +538,7 @@ func TestTCPProxyUpdateDelete(t *testing.T) {
fexec := makeFakeExec() fexec := makeFakeExec()
p, err := createProxier(lb, net.ParseIP("0.0.0.0"), ipttest.NewFake(), fexec, net.ParseIP("127.0.0.1"), nil, time.Minute, time.Second, udpIdleTimeoutForTest, newProxySocket) p, err := createProxier(lb, netutils.ParseIPSloppy("0.0.0.0"), ipttest.NewFake(), fexec, netutils.ParseIPSloppy("127.0.0.1"), nil, time.Minute, time.Second, udpIdleTimeoutForTest, newProxySocket)
if err != nil { if err != nil {
t.Fatal(err) t.Fatal(err)
} }
@ -569,7 +570,7 @@ func TestUDPProxyUpdateDelete(t *testing.T) {
fexec := makeFakeExec() fexec := makeFakeExec()
p, err := createProxier(lb, net.ParseIP("0.0.0.0"), ipttest.NewFake(), fexec, net.ParseIP("127.0.0.1"), nil, time.Minute, time.Second, udpIdleTimeoutForTest, newProxySocket) p, err := createProxier(lb, netutils.ParseIPSloppy("0.0.0.0"), ipttest.NewFake(), fexec, netutils.ParseIPSloppy("127.0.0.1"), nil, time.Minute, time.Second, udpIdleTimeoutForTest, newProxySocket)
if err != nil { if err != nil {
t.Fatal(err) t.Fatal(err)
} }
@ -602,7 +603,7 @@ func TestTCPProxyUpdateDeleteUpdate(t *testing.T) {
fexec := makeFakeExec() fexec := makeFakeExec()
p, err := createProxier(lb, net.ParseIP("0.0.0.0"), ipttest.NewFake(), fexec, net.ParseIP("127.0.0.1"), nil, time.Minute, time.Second, udpIdleTimeoutForTest, newProxySocket) p, err := createProxier(lb, netutils.ParseIPSloppy("0.0.0.0"), ipttest.NewFake(), fexec, netutils.ParseIPSloppy("127.0.0.1"), nil, time.Minute, time.Second, udpIdleTimeoutForTest, newProxySocket)
if err != nil { if err != nil {
t.Fatal(err) t.Fatal(err)
} }
@ -640,7 +641,7 @@ func TestUDPProxyUpdateDeleteUpdate(t *testing.T) {
fexec := makeFakeExec() fexec := makeFakeExec()
p, err := createProxier(lb, net.ParseIP("0.0.0.0"), ipttest.NewFake(), fexec, net.ParseIP("127.0.0.1"), nil, time.Minute, time.Second, udpIdleTimeoutForTest, newProxySocket) p, err := createProxier(lb, netutils.ParseIPSloppy("0.0.0.0"), ipttest.NewFake(), fexec, netutils.ParseIPSloppy("127.0.0.1"), nil, time.Minute, time.Second, udpIdleTimeoutForTest, newProxySocket)
if err != nil { if err != nil {
t.Fatal(err) t.Fatal(err)
} }
@ -678,7 +679,7 @@ func TestTCPProxyUpdatePort(t *testing.T) {
fexec := makeFakeExec() fexec := makeFakeExec()
p, err := createProxier(lb, net.ParseIP("0.0.0.0"), ipttest.NewFake(), fexec, net.ParseIP("127.0.0.1"), nil, time.Minute, time.Second, udpIdleTimeoutForTest, newProxySocket) p, err := createProxier(lb, netutils.ParseIPSloppy("0.0.0.0"), ipttest.NewFake(), fexec, netutils.ParseIPSloppy("127.0.0.1"), nil, time.Minute, time.Second, udpIdleTimeoutForTest, newProxySocket)
if err != nil { if err != nil {
t.Fatal(err) t.Fatal(err)
} }
@ -716,7 +717,7 @@ func TestUDPProxyUpdatePort(t *testing.T) {
fexec := makeFakeExec() fexec := makeFakeExec()
p, err := createProxier(lb, net.ParseIP("0.0.0.0"), ipttest.NewFake(), fexec, net.ParseIP("127.0.0.1"), nil, time.Minute, time.Second, udpIdleTimeoutForTest, newProxySocket) p, err := createProxier(lb, netutils.ParseIPSloppy("0.0.0.0"), ipttest.NewFake(), fexec, netutils.ParseIPSloppy("127.0.0.1"), nil, time.Minute, time.Second, udpIdleTimeoutForTest, newProxySocket)
if err != nil { if err != nil {
t.Fatal(err) t.Fatal(err)
} }
@ -754,7 +755,7 @@ func TestProxyUpdatePublicIPs(t *testing.T) {
fexec := makeFakeExec() fexec := makeFakeExec()
p, err := createProxier(lb, net.ParseIP("0.0.0.0"), ipttest.NewFake(), fexec, net.ParseIP("127.0.0.1"), nil, time.Minute, time.Second, udpIdleTimeoutForTest, newProxySocket) p, err := createProxier(lb, netutils.ParseIPSloppy("0.0.0.0"), ipttest.NewFake(), fexec, netutils.ParseIPSloppy("127.0.0.1"), nil, time.Minute, time.Second, udpIdleTimeoutForTest, newProxySocket)
if err != nil { if err != nil {
t.Fatal(err) t.Fatal(err)
} }
@ -793,7 +794,7 @@ func TestProxyUpdatePortal(t *testing.T) {
fexec := makeFakeExec() fexec := makeFakeExec()
p, err := createProxier(lb, net.ParseIP("0.0.0.0"), ipttest.NewFake(), fexec, net.ParseIP("127.0.0.1"), nil, time.Minute, time.Second, udpIdleTimeoutForTest, newProxySocket) p, err := createProxier(lb, netutils.ParseIPSloppy("0.0.0.0"), ipttest.NewFake(), fexec, netutils.ParseIPSloppy("127.0.0.1"), nil, time.Minute, time.Second, udpIdleTimeoutForTest, newProxySocket)
if err != nil { if err != nil {
t.Fatal(err) t.Fatal(err)
} }
@ -851,7 +852,7 @@ func TestOnServiceAddChangeMap(t *testing.T) {
fexec := makeFakeExec() fexec := makeFakeExec()
// Use long minSyncPeriod so we can test that immediate syncs work // Use long minSyncPeriod so we can test that immediate syncs work
p, err := createProxier(NewLoadBalancerRR(), net.ParseIP("0.0.0.0"), ipttest.NewFake(), fexec, net.ParseIP("127.0.0.1"), nil, time.Minute, time.Minute, udpIdleTimeoutForTest, newProxySocket) p, err := createProxier(NewLoadBalancerRR(), netutils.ParseIPSloppy("0.0.0.0"), ipttest.NewFake(), fexec, netutils.ParseIPSloppy("127.0.0.1"), nil, time.Minute, time.Minute, udpIdleTimeoutForTest, newProxySocket)
if err != nil { if err != nil {
t.Fatal(err) t.Fatal(err)
} }

View File

@ -22,13 +22,14 @@ import (
"strconv" "strconv"
"k8s.io/klog/v2" "k8s.io/klog/v2"
netutils "k8s.io/utils/net"
) )
// IPPart returns just the IP part of an IP or IP:port or endpoint string. If the IP // IPPart returns just the IP part of an IP or IP:port or endpoint string. If the IP
// part is an IPv6 address enclosed in brackets (e.g. "[fd00:1::5]:9999"), // part is an IPv6 address enclosed in brackets (e.g. "[fd00:1::5]:9999"),
// then the brackets are stripped as well. // then the brackets are stripped as well.
func IPPart(s string) string { func IPPart(s string) string {
if ip := net.ParseIP(s); ip != nil { if ip := netutils.ParseIPSloppy(s); ip != nil {
// IP address without port // IP address without port
return s return s
} }
@ -39,7 +40,7 @@ func IPPart(s string) string {
return "" return ""
} }
// Check if host string is a valid IP address // Check if host string is a valid IP address
ip := net.ParseIP(host) ip := netutils.ParseIPSloppy(host)
if ip == nil { if ip == nil {
klog.Errorf("invalid IP part '%s'", host) klog.Errorf("invalid IP part '%s'", host)
return "" return ""

View File

@ -17,8 +17,9 @@ limitations under the License.
package util package util
import ( import (
"net"
"testing" "testing"
netutils "k8s.io/utils/net"
) )
func TestIPPart(t *testing.T) { func TestIPPart(t *testing.T) {
@ -112,7 +113,7 @@ func TestToCIDR(t *testing.T) {
} }
for _, tc := range testCases { for _, tc := range testCases {
ip := net.ParseIP(tc.ip) ip := netutils.ParseIPSloppy(tc.ip)
addr := ToCIDR(ip) addr := ToCIDR(ip)
if addr != tc.expectedAddr { if addr != tc.expectedAddr {
t.Errorf("Unexpected host address for %s: Expected: %s, Got %s", tc.ip, tc.expectedAddr, addr) t.Errorf("Unexpected host address for %s: Expected: %s, Got %s", tc.ip, tc.expectedAddr, addr)

View File

@ -18,11 +18,10 @@ package iptables
import ( import (
"fmt" "fmt"
"net"
"k8s.io/klog/v2" "k8s.io/klog/v2"
utiliptables "k8s.io/kubernetes/pkg/util/iptables" utiliptables "k8s.io/kubernetes/pkg/util/iptables"
utilnet "k8s.io/utils/net" netutils "k8s.io/utils/net"
) )
// LocalTrafficDetector in a interface to take action (jump) based on whether traffic originated locally // LocalTrafficDetector in a interface to take action (jump) based on whether traffic originated locally
@ -66,10 +65,10 @@ type detectLocalByCIDR struct {
// NewDetectLocalByCIDR implements the LocalTrafficDetector interface using a CIDR. This can be used when a single CIDR // NewDetectLocalByCIDR implements the LocalTrafficDetector interface using a CIDR. This can be used when a single CIDR
// range can be used to capture the notion of local traffic. // range can be used to capture the notion of local traffic.
func NewDetectLocalByCIDR(cidr string, ipt utiliptables.Interface) (LocalTrafficDetector, error) { func NewDetectLocalByCIDR(cidr string, ipt utiliptables.Interface) (LocalTrafficDetector, error) {
if utilnet.IsIPv6CIDRString(cidr) != ipt.IsIPv6() { if netutils.IsIPv6CIDRString(cidr) != ipt.IsIPv6() {
return nil, fmt.Errorf("CIDR %s has incorrect IP version: expect isIPv6=%t", cidr, ipt.IsIPv6()) return nil, fmt.Errorf("CIDR %s has incorrect IP version: expect isIPv6=%t", cidr, ipt.IsIPv6())
} }
_, _, err := net.ParseCIDR(cidr) _, _, err := netutils.ParseCIDRSloppy(cidr)
if err != nil { if err != nil {
return nil, err return nil, err
} }

View File

@ -32,7 +32,7 @@ import (
"k8s.io/client-go/tools/events" "k8s.io/client-go/tools/events"
helper "k8s.io/kubernetes/pkg/apis/core/v1/helper" helper "k8s.io/kubernetes/pkg/apis/core/v1/helper"
utilsysctl "k8s.io/kubernetes/pkg/util/sysctl" utilsysctl "k8s.io/kubernetes/pkg/util/sysctl"
utilnet "k8s.io/utils/net" netutils "k8s.io/utils/net"
"k8s.io/klog/v2" "k8s.io/klog/v2"
) )
@ -88,7 +88,7 @@ func IsZeroCIDR(cidr string) bool {
// IsProxyableIP checks if a given IP address is permitted to be proxied // IsProxyableIP checks if a given IP address is permitted to be proxied
func IsProxyableIP(ip string) error { func IsProxyableIP(ip string) error {
netIP := net.ParseIP(ip) netIP := netutils.ParseIPSloppy(ip)
if netIP == nil { if netIP == nil {
return ErrAddressNotAllowed return ErrAddressNotAllowed
} }
@ -146,7 +146,7 @@ func GetLocalAddrs() ([]net.IP, error) {
} }
for _, addr := range addrs { for _, addr := range addrs {
ip, _, err := net.ParseCIDR(addr.String()) ip, _, err := netutils.ParseCIDRSloppy(addr.String())
if err != nil { if err != nil {
return nil, err return nil, err
} }
@ -159,7 +159,7 @@ func GetLocalAddrs() ([]net.IP, error) {
// GetLocalAddrSet return a local IPSet. // GetLocalAddrSet return a local IPSet.
// If failed to get local addr, will assume no local ips. // If failed to get local addr, will assume no local ips.
func GetLocalAddrSet() utilnet.IPSet { func GetLocalAddrSet() netutils.IPSet {
localAddrs, err := GetLocalAddrs() localAddrs, err := GetLocalAddrs()
if err != nil { if err != nil {
klog.ErrorS(err, "Failed to get local addresses assuming no local IPs") klog.ErrorS(err, "Failed to get local addresses assuming no local IPs")
@ -167,7 +167,7 @@ func GetLocalAddrSet() utilnet.IPSet {
klog.InfoS("No local addresses were found") klog.InfoS("No local addresses were found")
} }
localAddrSet := utilnet.IPSet{} localAddrSet := netutils.IPSet{}
localAddrSet.Insert(localAddrs...) localAddrSet.Insert(localAddrs...)
return localAddrSet return localAddrSet
} }
@ -220,7 +220,7 @@ func GetNodeAddresses(cidrs []string, nw NetworkInterfacer) (sets.String, error)
continue continue
} }
_, ipNet, _ := net.ParseCIDR(cidr) _, ipNet, _ := netutils.ParseCIDRSloppy(cidr)
for _, addr := range addrs { for _, addr := range addrs {
var ip net.IP var ip net.IP
// nw.InterfaceAddrs may return net.IPAddr or net.IPNet on windows, and it will return net.IPNet on linux. // nw.InterfaceAddrs may return net.IPAddr or net.IPNet on windows, and it will return net.IPNet on linux.
@ -234,10 +234,10 @@ func GetNodeAddresses(cidrs []string, nw NetworkInterfacer) (sets.String, error)
} }
if ipNet.Contains(ip) { if ipNet.Contains(ip) {
if utilnet.IsIPv6(ip) && !uniqueAddressList.Has(IPv6ZeroCIDR) { if netutils.IsIPv6(ip) && !uniqueAddressList.Has(IPv6ZeroCIDR) {
uniqueAddressList.Insert(ip.String()) uniqueAddressList.Insert(ip.String())
} }
if !utilnet.IsIPv6(ip) && !uniqueAddressList.Has(IPv4ZeroCIDR) { if !netutils.IsIPv6(ip) && !uniqueAddressList.Has(IPv4ZeroCIDR) {
uniqueAddressList.Insert(ip.String()) uniqueAddressList.Insert(ip.String())
} }
} }
@ -295,23 +295,23 @@ func MapCIDRsByIPFamily(cidrStrings []string) map[v1.IPFamily][]string {
} }
func getIPFamilyFromIP(ipStr string) (v1.IPFamily, error) { func getIPFamilyFromIP(ipStr string) (v1.IPFamily, error) {
netIP := net.ParseIP(ipStr) netIP := netutils.ParseIPSloppy(ipStr)
if netIP == nil { if netIP == nil {
return "", ErrAddressNotAllowed return "", ErrAddressNotAllowed
} }
if utilnet.IsIPv6(netIP) { if netutils.IsIPv6(netIP) {
return v1.IPv6Protocol, nil return v1.IPv6Protocol, nil
} }
return v1.IPv4Protocol, nil return v1.IPv4Protocol, nil
} }
func getIPFamilyFromCIDR(cidrStr string) (v1.IPFamily, error) { func getIPFamilyFromCIDR(cidrStr string) (v1.IPFamily, error) {
_, netCIDR, err := net.ParseCIDR(cidrStr) _, netCIDR, err := netutils.ParseCIDRSloppy(cidrStr)
if err != nil { if err != nil {
return "", ErrAddressNotAllowed return "", ErrAddressNotAllowed
} }
if utilnet.IsIPv6CIDR(netCIDR) { if netutils.IsIPv6CIDR(netCIDR) {
return v1.IPv6Protocol, nil return v1.IPv6Protocol, nil
} }
return v1.IPv4Protocol, nil return v1.IPv4Protocol, nil
@ -335,7 +335,7 @@ func AppendPortIfNeeded(addr string, port int32) string {
} }
// Simply return for invalid case. This should be caught by validation instead. // Simply return for invalid case. This should be caught by validation instead.
ip := net.ParseIP(addr) ip := netutils.ParseIPSloppy(addr)
if ip == nil { if ip == nil {
return addr return addr
} }
@ -441,7 +441,7 @@ func GetClusterIPByFamily(ipFamily v1.IPFamily, service *v1.Service) string {
} }
IsIPv6Family := (ipFamily == v1.IPv6Protocol) IsIPv6Family := (ipFamily == v1.IPv6Protocol)
if IsIPv6Family == utilnet.IsIPv6String(service.Spec.ClusterIP) { if IsIPv6Family == netutils.IsIPv6String(service.Spec.ClusterIP) {
return service.Spec.ClusterIP return service.Spec.ClusterIP
} }
@ -492,7 +492,7 @@ func WriteBytesLine(buf *bytes.Buffer, bytes []byte) {
// RevertPorts is closing ports in replacementPortsMap but not in originalPortsMap. In other words, it only // RevertPorts is closing ports in replacementPortsMap but not in originalPortsMap. In other words, it only
// closes the ports opened in this sync. // closes the ports opened in this sync.
func RevertPorts(replacementPortsMap, originalPortsMap map[utilnet.LocalPort]utilnet.Closeable) { func RevertPorts(replacementPortsMap, originalPortsMap map[netutils.LocalPort]netutils.Closeable) {
for k, v := range replacementPortsMap { for k, v := range replacementPortsMap {
// Only close newly opened local ports - leave ones that were open before this update // Only close newly opened local ports - leave ones that were open before this update
if originalPortsMap[k] == nil { if originalPortsMap[k] == nil {

View File

@ -30,7 +30,7 @@ import (
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/util/sets" "k8s.io/apimachinery/pkg/util/sets"
fake "k8s.io/kubernetes/pkg/proxy/util/testing" fake "k8s.io/kubernetes/pkg/proxy/util/testing"
utilnet "k8s.io/utils/net" netutils "k8s.io/utils/net"
) )
func TestValidateWorks(t *testing.T) { func TestValidateWorks(t *testing.T) {
@ -141,7 +141,7 @@ func (r *dummyResolver) LookupIPAddr(ctx context.Context, host string) ([]net.IP
} }
resp := []net.IPAddr{} resp := []net.IPAddr{}
for _, ipString := range r.ips { for _, ipString := range r.ips {
resp = append(resp, net.IPAddr{IP: net.ParseIP(ipString)}) resp = append(resp, net.IPAddr{IP: netutils.ParseIPSloppy(ipString)})
} }
return resp, nil return resp, nil
} }
@ -187,13 +187,13 @@ func TestIsAllowedHost(t *testing.T) {
for i := range testCases { for i := range testCases {
var denyList []*net.IPNet var denyList []*net.IPNet
for _, cidrStr := range testCases[i].denied { for _, cidrStr := range testCases[i].denied {
_, ipNet, err := net.ParseCIDR(cidrStr) _, ipNet, err := netutils.ParseCIDRSloppy(cidrStr)
if err != nil { if err != nil {
t.Fatalf("bad IP for test case: %v: %v", cidrStr, err) t.Fatalf("bad IP for test case: %v: %v", cidrStr, err)
} }
denyList = append(denyList, ipNet) denyList = append(denyList, ipNet)
} }
got := IsAllowedHost(net.ParseIP(testCases[i].ip), denyList) got := IsAllowedHost(netutils.ParseIPSloppy(testCases[i].ip), denyList)
if testCases[i].want != got { if testCases[i].want != got {
t.Errorf("case %d: expected %v, got %v", i, testCases[i].want, got) t.Errorf("case %d: expected %v, got %v", i, testCases[i].want, got)
} }
@ -281,7 +281,7 @@ func TestShouldSkipService(t *testing.T) {
func TestNewFilteredDialContext(t *testing.T) { func TestNewFilteredDialContext(t *testing.T) {
_, cidr, _ := net.ParseCIDR("1.1.1.1/28") _, cidr, _ := netutils.ParseCIDRSloppy("1.1.1.1/28")
testCases := []struct { testCases := []struct {
name string name string
@ -324,7 +324,7 @@ func TestNewFilteredDialContext(t *testing.T) {
opts: &FilteredDialOptions{AllowLocalLoopback: false}, opts: &FilteredDialOptions{AllowLocalLoopback: false},
dial: "127.0.0.1:8080", dial: "127.0.0.1:8080",
expectResolve: "127.0.0.1", expectResolve: "127.0.0.1",
resolveTo: []net.IPAddr{{IP: net.ParseIP("127.0.0.1")}}, resolveTo: []net.IPAddr{{IP: netutils.ParseIPSloppy("127.0.0.1")}},
expectWrappedDial: false, expectWrappedDial: false,
expectErr: "address not allowed", expectErr: "address not allowed",
}, },
@ -333,7 +333,7 @@ func TestNewFilteredDialContext(t *testing.T) {
opts: &FilteredDialOptions{AllowLocalLoopback: false, DialHostCIDRDenylist: []*net.IPNet{cidr}}, opts: &FilteredDialOptions{AllowLocalLoopback: false, DialHostCIDRDenylist: []*net.IPNet{cidr}},
dial: "foo.com:8080", dial: "foo.com:8080",
expectResolve: "foo.com", expectResolve: "foo.com",
resolveTo: []net.IPAddr{{IP: net.ParseIP("1.1.1.1")}}, resolveTo: []net.IPAddr{{IP: netutils.ParseIPSloppy("1.1.1.1")}},
expectWrappedDial: false, expectWrappedDial: false,
expectErr: "address not allowed", expectErr: "address not allowed",
}, },
@ -342,7 +342,7 @@ func TestNewFilteredDialContext(t *testing.T) {
opts: &FilteredDialOptions{AllowLocalLoopback: false, DialHostCIDRDenylist: []*net.IPNet{cidr}}, opts: &FilteredDialOptions{AllowLocalLoopback: false, DialHostCIDRDenylist: []*net.IPNet{cidr}},
dial: "foo.com:8080", dial: "foo.com:8080",
expectResolve: "foo.com", expectResolve: "foo.com",
resolveTo: []net.IPAddr{{IP: net.ParseIP("2.2.2.2")}}, resolveTo: []net.IPAddr{{IP: netutils.ParseIPSloppy("2.2.2.2")}},
expectWrappedDial: true, expectWrappedDial: true,
expectErr: "", expectErr: "",
}, },
@ -417,11 +417,11 @@ func TestGetNodeAddresses(t *testing.T) {
itfAddrsPairs: []InterfaceAddrsPair{ itfAddrsPairs: []InterfaceAddrsPair{
{ {
itf: net.Interface{Index: 0, MTU: 0, Name: "eth0", HardwareAddr: nil, Flags: 0}, itf: net.Interface{Index: 0, MTU: 0, Name: "eth0", HardwareAddr: nil, Flags: 0},
addrs: []net.Addr{&net.IPNet{IP: net.ParseIP("10.20.30.51"), Mask: net.CIDRMask(24, 32)}}, addrs: []net.Addr{&net.IPNet{IP: netutils.ParseIPSloppy("10.20.30.51"), Mask: net.CIDRMask(24, 32)}},
}, },
{ {
itf: net.Interface{Index: 2, MTU: 0, Name: "eth1", HardwareAddr: nil, Flags: 0}, itf: net.Interface{Index: 2, MTU: 0, Name: "eth1", HardwareAddr: nil, Flags: 0},
addrs: []net.Addr{&net.IPNet{IP: net.ParseIP("100.200.201.1"), Mask: net.CIDRMask(24, 32)}}, addrs: []net.Addr{&net.IPNet{IP: netutils.ParseIPSloppy("100.200.201.1"), Mask: net.CIDRMask(24, 32)}},
}, },
}, },
expected: sets.NewString("10.20.30.51"), expected: sets.NewString("10.20.30.51"),
@ -432,11 +432,11 @@ func TestGetNodeAddresses(t *testing.T) {
itfAddrsPairs: []InterfaceAddrsPair{ itfAddrsPairs: []InterfaceAddrsPair{
{ {
itf: net.Interface{Index: 0, MTU: 0, Name: "eth0", HardwareAddr: nil, Flags: 0}, itf: net.Interface{Index: 0, MTU: 0, Name: "eth0", HardwareAddr: nil, Flags: 0},
addrs: []net.Addr{&net.IPNet{IP: net.ParseIP("10.20.30.51"), Mask: net.CIDRMask(24, 32)}}, addrs: []net.Addr{&net.IPNet{IP: netutils.ParseIPSloppy("10.20.30.51"), Mask: net.CIDRMask(24, 32)}},
}, },
{ {
itf: net.Interface{Index: 1, MTU: 0, Name: "lo", HardwareAddr: nil, Flags: 0}, itf: net.Interface{Index: 1, MTU: 0, Name: "lo", HardwareAddr: nil, Flags: 0},
addrs: []net.Addr{&net.IPNet{IP: net.ParseIP("127.0.0.1"), Mask: net.CIDRMask(8, 32)}}, addrs: []net.Addr{&net.IPNet{IP: netutils.ParseIPSloppy("127.0.0.1"), Mask: net.CIDRMask(8, 32)}},
}, },
}, },
expected: sets.NewString("0.0.0.0/0"), expected: sets.NewString("0.0.0.0/0"),
@ -447,11 +447,11 @@ func TestGetNodeAddresses(t *testing.T) {
itfAddrsPairs: []InterfaceAddrsPair{ itfAddrsPairs: []InterfaceAddrsPair{
{ {
itf: net.Interface{Index: 0, MTU: 0, Name: "eth0", HardwareAddr: nil, Flags: 0}, itf: net.Interface{Index: 0, MTU: 0, Name: "eth0", HardwareAddr: nil, Flags: 0},
addrs: []net.Addr{&net.IPNet{IP: net.ParseIP("2001:db8::1"), Mask: net.CIDRMask(32, 128)}}, addrs: []net.Addr{&net.IPNet{IP: netutils.ParseIPSloppy("2001:db8::1"), Mask: net.CIDRMask(32, 128)}},
}, },
{ {
itf: net.Interface{Index: 1, MTU: 0, Name: "lo", HardwareAddr: nil, Flags: 0}, itf: net.Interface{Index: 1, MTU: 0, Name: "lo", HardwareAddr: nil, Flags: 0},
addrs: []net.Addr{&net.IPNet{IP: net.ParseIP("::1"), Mask: net.CIDRMask(128, 128)}}, addrs: []net.Addr{&net.IPNet{IP: netutils.ParseIPSloppy("::1"), Mask: net.CIDRMask(128, 128)}},
}, },
}, },
expected: sets.NewString("2001:db8::1", "::1"), expected: sets.NewString("2001:db8::1", "::1"),
@ -462,11 +462,11 @@ func TestGetNodeAddresses(t *testing.T) {
itfAddrsPairs: []InterfaceAddrsPair{ itfAddrsPairs: []InterfaceAddrsPair{
{ {
itf: net.Interface{Index: 0, MTU: 0, Name: "eth0", HardwareAddr: nil, Flags: 0}, itf: net.Interface{Index: 0, MTU: 0, Name: "eth0", HardwareAddr: nil, Flags: 0},
addrs: []net.Addr{&net.IPNet{IP: net.ParseIP("2001:db8::1"), Mask: net.CIDRMask(32, 128)}}, addrs: []net.Addr{&net.IPNet{IP: netutils.ParseIPSloppy("2001:db8::1"), Mask: net.CIDRMask(32, 128)}},
}, },
{ {
itf: net.Interface{Index: 1, MTU: 0, Name: "lo", HardwareAddr: nil, Flags: 0}, itf: net.Interface{Index: 1, MTU: 0, Name: "lo", HardwareAddr: nil, Flags: 0},
addrs: []net.Addr{&net.IPNet{IP: net.ParseIP("::1"), Mask: net.CIDRMask(128, 128)}}, addrs: []net.Addr{&net.IPNet{IP: netutils.ParseIPSloppy("::1"), Mask: net.CIDRMask(128, 128)}},
}, },
}, },
expected: sets.NewString("::/0"), expected: sets.NewString("::/0"),
@ -477,11 +477,11 @@ func TestGetNodeAddresses(t *testing.T) {
itfAddrsPairs: []InterfaceAddrsPair{ itfAddrsPairs: []InterfaceAddrsPair{
{ {
itf: net.Interface{Index: 0, MTU: 0, Name: "eth0", HardwareAddr: nil, Flags: 0}, itf: net.Interface{Index: 0, MTU: 0, Name: "eth0", HardwareAddr: nil, Flags: 0},
addrs: []net.Addr{&net.IPNet{IP: net.ParseIP("10.20.30.51"), Mask: net.CIDRMask(24, 32)}}, addrs: []net.Addr{&net.IPNet{IP: netutils.ParseIPSloppy("10.20.30.51"), Mask: net.CIDRMask(24, 32)}},
}, },
{ {
itf: net.Interface{Index: 1, MTU: 0, Name: "lo", HardwareAddr: nil, Flags: 0}, itf: net.Interface{Index: 1, MTU: 0, Name: "lo", HardwareAddr: nil, Flags: 0},
addrs: []net.Addr{&net.IPNet{IP: net.ParseIP("127.0.0.1"), Mask: net.CIDRMask(8, 32)}}, addrs: []net.Addr{&net.IPNet{IP: netutils.ParseIPSloppy("127.0.0.1"), Mask: net.CIDRMask(8, 32)}},
}, },
}, },
expected: sets.NewString("127.0.0.1"), expected: sets.NewString("127.0.0.1"),
@ -492,7 +492,7 @@ func TestGetNodeAddresses(t *testing.T) {
itfAddrsPairs: []InterfaceAddrsPair{ itfAddrsPairs: []InterfaceAddrsPair{
{ {
itf: net.Interface{Index: 1, MTU: 0, Name: "lo", HardwareAddr: nil, Flags: 0}, itf: net.Interface{Index: 1, MTU: 0, Name: "lo", HardwareAddr: nil, Flags: 0},
addrs: []net.Addr{&net.IPNet{IP: net.ParseIP("127.0.1.1"), Mask: net.CIDRMask(8, 32)}}, addrs: []net.Addr{&net.IPNet{IP: netutils.ParseIPSloppy("127.0.1.1"), Mask: net.CIDRMask(8, 32)}},
}, },
}, },
expected: sets.NewString("127.0.1.1"), expected: sets.NewString("127.0.1.1"),
@ -503,11 +503,11 @@ func TestGetNodeAddresses(t *testing.T) {
itfAddrsPairs: []InterfaceAddrsPair{ itfAddrsPairs: []InterfaceAddrsPair{
{ {
itf: net.Interface{Index: 0, MTU: 0, Name: "eth0", HardwareAddr: nil, Flags: 0}, itf: net.Interface{Index: 0, MTU: 0, Name: "eth0", HardwareAddr: nil, Flags: 0},
addrs: []net.Addr{&net.IPNet{IP: net.ParseIP("10.20.30.51"), Mask: net.CIDRMask(24, 32)}}, addrs: []net.Addr{&net.IPNet{IP: netutils.ParseIPSloppy("10.20.30.51"), Mask: net.CIDRMask(24, 32)}},
}, },
{ {
itf: net.Interface{Index: 2, MTU: 0, Name: "eth1", HardwareAddr: nil, Flags: 0}, itf: net.Interface{Index: 2, MTU: 0, Name: "eth1", HardwareAddr: nil, Flags: 0},
addrs: []net.Addr{&net.IPNet{IP: net.ParseIP("100.200.201.1"), Mask: net.CIDRMask(24, 32)}}, addrs: []net.Addr{&net.IPNet{IP: netutils.ParseIPSloppy("100.200.201.1"), Mask: net.CIDRMask(24, 32)}},
}, },
}, },
expected: sets.NewString("10.20.30.51", "100.200.201.1"), expected: sets.NewString("10.20.30.51", "100.200.201.1"),
@ -518,11 +518,11 @@ func TestGetNodeAddresses(t *testing.T) {
itfAddrsPairs: []InterfaceAddrsPair{ itfAddrsPairs: []InterfaceAddrsPair{
{ {
itf: net.Interface{Index: 0, MTU: 0, Name: "eth0", HardwareAddr: nil, Flags: 0}, itf: net.Interface{Index: 0, MTU: 0, Name: "eth0", HardwareAddr: nil, Flags: 0},
addrs: []net.Addr{&net.IPNet{IP: net.ParseIP("192.168.1.2"), Mask: net.CIDRMask(24, 32)}}, addrs: []net.Addr{&net.IPNet{IP: netutils.ParseIPSloppy("192.168.1.2"), Mask: net.CIDRMask(24, 32)}},
}, },
{ {
itf: net.Interface{Index: 1, MTU: 0, Name: "lo", HardwareAddr: nil, Flags: 0}, itf: net.Interface{Index: 1, MTU: 0, Name: "lo", HardwareAddr: nil, Flags: 0},
addrs: []net.Addr{&net.IPNet{IP: net.ParseIP("127.0.0.1"), Mask: net.CIDRMask(8, 32)}}, addrs: []net.Addr{&net.IPNet{IP: netutils.ParseIPSloppy("127.0.0.1"), Mask: net.CIDRMask(8, 32)}},
}, },
}, },
expected: nil, expected: nil,
@ -534,11 +534,11 @@ func TestGetNodeAddresses(t *testing.T) {
itfAddrsPairs: []InterfaceAddrsPair{ itfAddrsPairs: []InterfaceAddrsPair{
{ {
itf: net.Interface{Index: 0, MTU: 0, Name: "eth0", HardwareAddr: nil, Flags: 0}, itf: net.Interface{Index: 0, MTU: 0, Name: "eth0", HardwareAddr: nil, Flags: 0},
addrs: []net.Addr{&net.IPNet{IP: net.ParseIP("192.168.1.2"), Mask: net.CIDRMask(24, 32)}}, addrs: []net.Addr{&net.IPNet{IP: netutils.ParseIPSloppy("192.168.1.2"), Mask: net.CIDRMask(24, 32)}},
}, },
{ {
itf: net.Interface{Index: 1, MTU: 0, Name: "lo", HardwareAddr: nil, Flags: 0}, itf: net.Interface{Index: 1, MTU: 0, Name: "lo", HardwareAddr: nil, Flags: 0},
addrs: []net.Addr{&net.IPNet{IP: net.ParseIP("127.0.0.1"), Mask: net.CIDRMask(8, 32)}}, addrs: []net.Addr{&net.IPNet{IP: netutils.ParseIPSloppy("127.0.0.1"), Mask: net.CIDRMask(8, 32)}},
}, },
}, },
expected: sets.NewString("0.0.0.0/0", "::/0"), expected: sets.NewString("0.0.0.0/0", "::/0"),
@ -549,11 +549,11 @@ func TestGetNodeAddresses(t *testing.T) {
itfAddrsPairs: []InterfaceAddrsPair{ itfAddrsPairs: []InterfaceAddrsPair{
{ {
itf: net.Interface{Index: 0, MTU: 0, Name: "eth0", HardwareAddr: nil, Flags: 0}, itf: net.Interface{Index: 0, MTU: 0, Name: "eth0", HardwareAddr: nil, Flags: 0},
addrs: []net.Addr{&net.IPNet{IP: net.ParseIP("2001:db8::1"), Mask: net.CIDRMask(32, 128)}}, addrs: []net.Addr{&net.IPNet{IP: netutils.ParseIPSloppy("2001:db8::1"), Mask: net.CIDRMask(32, 128)}},
}, },
{ {
itf: net.Interface{Index: 1, MTU: 0, Name: "lo", HardwareAddr: nil, Flags: 0}, itf: net.Interface{Index: 1, MTU: 0, Name: "lo", HardwareAddr: nil, Flags: 0},
addrs: []net.Addr{&net.IPNet{IP: net.ParseIP("::1"), Mask: net.CIDRMask(128, 128)}}, addrs: []net.Addr{&net.IPNet{IP: netutils.ParseIPSloppy("::1"), Mask: net.CIDRMask(128, 128)}},
}, },
}, },
expected: sets.NewString("0.0.0.0/0", "::/0"), expected: sets.NewString("0.0.0.0/0", "::/0"),
@ -564,7 +564,7 @@ func TestGetNodeAddresses(t *testing.T) {
itfAddrsPairs: []InterfaceAddrsPair{ itfAddrsPairs: []InterfaceAddrsPair{
{ {
itf: net.Interface{Index: 0, MTU: 0, Name: "eth0", HardwareAddr: nil, Flags: 0}, itf: net.Interface{Index: 0, MTU: 0, Name: "eth0", HardwareAddr: nil, Flags: 0},
addrs: []net.Addr{&net.IPNet{IP: net.ParseIP("1.2.3.4"), Mask: net.CIDRMask(30, 32)}}, addrs: []net.Addr{&net.IPNet{IP: netutils.ParseIPSloppy("1.2.3.4"), Mask: net.CIDRMask(30, 32)}},
}, },
}, },
expected: sets.NewString("0.0.0.0/0"), expected: sets.NewString("0.0.0.0/0"),
@ -575,11 +575,11 @@ func TestGetNodeAddresses(t *testing.T) {
itfAddrsPairs: []InterfaceAddrsPair{ itfAddrsPairs: []InterfaceAddrsPair{
{ {
itf: net.Interface{Index: 0, MTU: 0, Name: "eth0", HardwareAddr: nil, Flags: 0}, itf: net.Interface{Index: 0, MTU: 0, Name: "eth0", HardwareAddr: nil, Flags: 0},
addrs: []net.Addr{&net.IPNet{IP: net.ParseIP("1.2.3.4"), Mask: net.CIDRMask(30, 32)}}, addrs: []net.Addr{&net.IPNet{IP: netutils.ParseIPSloppy("1.2.3.4"), Mask: net.CIDRMask(30, 32)}},
}, },
{ {
itf: net.Interface{Index: 1, MTU: 0, Name: "lo", HardwareAddr: nil, Flags: 0}, itf: net.Interface{Index: 1, MTU: 0, Name: "lo", HardwareAddr: nil, Flags: 0},
addrs: []net.Addr{&net.IPNet{IP: net.ParseIP("::1"), Mask: net.CIDRMask(128, 128)}}, addrs: []net.Addr{&net.IPNet{IP: netutils.ParseIPSloppy("::1"), Mask: net.CIDRMask(128, 128)}},
}, },
}, },
expected: sets.NewString("0.0.0.0/0", "::1"), expected: sets.NewString("0.0.0.0/0", "::1"),
@ -590,11 +590,11 @@ func TestGetNodeAddresses(t *testing.T) {
itfAddrsPairs: []InterfaceAddrsPair{ itfAddrsPairs: []InterfaceAddrsPair{
{ {
itf: net.Interface{Index: 0, MTU: 0, Name: "eth0", HardwareAddr: nil, Flags: 0}, itf: net.Interface{Index: 0, MTU: 0, Name: "eth0", HardwareAddr: nil, Flags: 0},
addrs: []net.Addr{&net.IPNet{IP: net.ParseIP("1.2.3.4"), Mask: net.CIDRMask(30, 32)}}, addrs: []net.Addr{&net.IPNet{IP: netutils.ParseIPSloppy("1.2.3.4"), Mask: net.CIDRMask(30, 32)}},
}, },
{ {
itf: net.Interface{Index: 1, MTU: 0, Name: "lo", HardwareAddr: nil, Flags: 0}, itf: net.Interface{Index: 1, MTU: 0, Name: "lo", HardwareAddr: nil, Flags: 0},
addrs: []net.Addr{&net.IPNet{IP: net.ParseIP("::1"), Mask: net.CIDRMask(128, 128)}}, addrs: []net.Addr{&net.IPNet{IP: netutils.ParseIPSloppy("::1"), Mask: net.CIDRMask(128, 128)}},
}, },
}, },
expected: sets.NewString("::/0", "1.2.3.4"), expected: sets.NewString("::/0", "1.2.3.4"),
@ -1063,22 +1063,22 @@ func (c *fakeClosable) Close() error {
func TestRevertPorts(t *testing.T) { func TestRevertPorts(t *testing.T) {
testCases := []struct { testCases := []struct {
replacementPorts []utilnet.LocalPort replacementPorts []netutils.LocalPort
existingPorts []utilnet.LocalPort existingPorts []netutils.LocalPort
expectToBeClose []bool expectToBeClose []bool
}{ }{
{ {
replacementPorts: []utilnet.LocalPort{ replacementPorts: []netutils.LocalPort{
{Port: 5001}, {Port: 5001},
{Port: 5002}, {Port: 5002},
{Port: 5003}, {Port: 5003},
}, },
existingPorts: []utilnet.LocalPort{}, existingPorts: []netutils.LocalPort{},
expectToBeClose: []bool{true, true, true}, expectToBeClose: []bool{true, true, true},
}, },
{ {
replacementPorts: []utilnet.LocalPort{}, replacementPorts: []netutils.LocalPort{},
existingPorts: []utilnet.LocalPort{ existingPorts: []netutils.LocalPort{
{Port: 5001}, {Port: 5001},
{Port: 5002}, {Port: 5002},
{Port: 5003}, {Port: 5003},
@ -1086,12 +1086,12 @@ func TestRevertPorts(t *testing.T) {
expectToBeClose: []bool{}, expectToBeClose: []bool{},
}, },
{ {
replacementPorts: []utilnet.LocalPort{ replacementPorts: []netutils.LocalPort{
{Port: 5001}, {Port: 5001},
{Port: 5002}, {Port: 5002},
{Port: 5003}, {Port: 5003},
}, },
existingPorts: []utilnet.LocalPort{ existingPorts: []netutils.LocalPort{
{Port: 5001}, {Port: 5001},
{Port: 5002}, {Port: 5002},
{Port: 5003}, {Port: 5003},
@ -1099,24 +1099,24 @@ func TestRevertPorts(t *testing.T) {
expectToBeClose: []bool{false, false, false}, expectToBeClose: []bool{false, false, false},
}, },
{ {
replacementPorts: []utilnet.LocalPort{ replacementPorts: []netutils.LocalPort{
{Port: 5001}, {Port: 5001},
{Port: 5002}, {Port: 5002},
{Port: 5003}, {Port: 5003},
}, },
existingPorts: []utilnet.LocalPort{ existingPorts: []netutils.LocalPort{
{Port: 5001}, {Port: 5001},
{Port: 5003}, {Port: 5003},
}, },
expectToBeClose: []bool{false, true, false}, expectToBeClose: []bool{false, true, false},
}, },
{ {
replacementPorts: []utilnet.LocalPort{ replacementPorts: []netutils.LocalPort{
{Port: 5001}, {Port: 5001},
{Port: 5002}, {Port: 5002},
{Port: 5003}, {Port: 5003},
}, },
existingPorts: []utilnet.LocalPort{ existingPorts: []netutils.LocalPort{
{Port: 5001}, {Port: 5001},
{Port: 5002}, {Port: 5002},
{Port: 5003}, {Port: 5003},
@ -1127,11 +1127,11 @@ func TestRevertPorts(t *testing.T) {
} }
for i, tc := range testCases { for i, tc := range testCases {
replacementPortsMap := make(map[utilnet.LocalPort]utilnet.Closeable) replacementPortsMap := make(map[netutils.LocalPort]netutils.Closeable)
for _, lp := range tc.replacementPorts { for _, lp := range tc.replacementPorts {
replacementPortsMap[lp] = &fakeClosable{} replacementPortsMap[lp] = &fakeClosable{}
} }
existingPortsMap := make(map[utilnet.LocalPort]utilnet.Closeable) existingPortsMap := make(map[netutils.LocalPort]netutils.Closeable)
for _, lp := range tc.existingPorts { for _, lp := range tc.existingPorts {
existingPortsMap[lp] = &fakeClosable{} existingPortsMap[lp] = &fakeClosable{}
} }

View File

@ -21,10 +21,11 @@ package winkernel
import ( import (
"encoding/json" "encoding/json"
"fmt" "fmt"
"strings"
"github.com/Microsoft/hcsshim" "github.com/Microsoft/hcsshim"
"k8s.io/klog/v2" "k8s.io/klog/v2"
"net" netutils "k8s.io/utils/net"
"strings"
) )
type HostNetworkService interface { type HostNetworkService interface {
@ -113,7 +114,7 @@ func (hns hnsV1) createEndpoint(ep *endpointsInfo, networkName string) (*endpoin
} }
hnsEndpoint := &hcsshim.HNSEndpoint{ hnsEndpoint := &hcsshim.HNSEndpoint{
MacAddress: ep.macAddress, MacAddress: ep.macAddress,
IPAddress: net.ParseIP(ep.ip), IPAddress: netutils.ParseIPSloppy(ep.ip),
} }
var createdEndpoint *hcsshim.HNSEndpoint var createdEndpoint *hcsshim.HNSEndpoint

View File

@ -52,7 +52,7 @@ import (
"k8s.io/kubernetes/pkg/proxy/metaproxier" "k8s.io/kubernetes/pkg/proxy/metaproxier"
"k8s.io/kubernetes/pkg/proxy/metrics" "k8s.io/kubernetes/pkg/proxy/metrics"
"k8s.io/kubernetes/pkg/util/async" "k8s.io/kubernetes/pkg/util/async"
utilnet "k8s.io/utils/net" netutils "k8s.io/utils/net"
) )
// KernelCompatTester tests whether the required kernel capabilities are // KernelCompatTester tests whether the required kernel capabilities are
@ -424,7 +424,7 @@ func (proxier *Proxier) newEndpointInfo(baseInfo *proxy.BaseEndpointInfo) proxy.
ip: baseInfo.IP(), ip: baseInfo.IP(),
port: uint16(portNumber), port: uint16(portNumber),
isLocal: baseInfo.GetIsLocal(), isLocal: baseInfo.GetIsLocal(),
macAddress: conjureMac("02-11", net.ParseIP(baseInfo.IP())), macAddress: conjureMac("02-11", netutils.ParseIPSloppy(baseInfo.IP())),
refCount: new(uint16), refCount: new(uint16),
hnsID: "", hnsID: "",
hns: proxier.hns, hns: proxier.hns,
@ -510,7 +510,7 @@ func (proxier *Proxier) newServiceInfo(port *v1.ServicePort, service *v1.Service
} }
for _, ingress := range service.Status.LoadBalancer.Ingress { for _, ingress := range service.Status.LoadBalancer.Ingress {
if net.ParseIP(ingress.IP) != nil { if netutils.ParseIPSloppy(ingress.IP) != nil {
info.loadBalancerIngressIPs = append(info.loadBalancerIngressIPs, &loadBalancerIngressInfo{ip: ingress.IP}) info.loadBalancerIngressIPs = append(info.loadBalancerIngressIPs, &loadBalancerIngressInfo{ip: ingress.IP})
} }
} }
@ -520,11 +520,11 @@ func (proxier *Proxier) newServiceInfo(port *v1.ServicePort, service *v1.Service
func (network hnsNetworkInfo) findRemoteSubnetProviderAddress(ip string) string { func (network hnsNetworkInfo) findRemoteSubnetProviderAddress(ip string) string {
var providerAddress string var providerAddress string
for _, rs := range network.remoteSubnets { for _, rs := range network.remoteSubnets {
_, ipNet, err := net.ParseCIDR(rs.destinationPrefix) _, ipNet, err := netutils.ParseCIDRSloppy(rs.destinationPrefix)
if err != nil { if err != nil {
klog.ErrorS(err, "Failed to parse CIDR") klog.ErrorS(err, "Failed to parse CIDR")
} }
if ipNet.Contains(net.ParseIP(ip)) { if ipNet.Contains(netutils.ParseIPSloppy(ip)) {
providerAddress = rs.providerAddress providerAddress = rs.providerAddress
} }
if ip == rs.providerAddress { if ip == rs.providerAddress {
@ -634,7 +634,7 @@ func NewProxier(
if nodeIP == nil { if nodeIP == nil {
klog.InfoS("invalid nodeIP, initializing kube-proxy with 127.0.0.1 as nodeIP") klog.InfoS("invalid nodeIP, initializing kube-proxy with 127.0.0.1 as nodeIP")
nodeIP = net.ParseIP("127.0.0.1") nodeIP = netutils.ParseIPSloppy("127.0.0.1")
} }
if len(clusterCIDR) == 0 { if len(clusterCIDR) == 0 {
@ -705,7 +705,7 @@ func NewProxier(
for _, inter := range interfaces { for _, inter := range interfaces {
addresses, _ := inter.Addrs() addresses, _ := inter.Addrs()
for _, addr := range addresses { for _, addr := range addresses {
addrIP, _, _ := net.ParseCIDR(addr.String()) addrIP, _, _ := netutils.ParseCIDRSloppy(addr.String())
if addrIP.String() == nodeIP.String() { if addrIP.String() == nodeIP.String() {
klog.V(2).InfoS("record Host MAC address", "addr", inter.HardwareAddr.String()) klog.V(2).InfoS("record Host MAC address", "addr", inter.HardwareAddr.String())
hostMac = inter.HardwareAddr.String() hostMac = inter.HardwareAddr.String()
@ -717,7 +717,7 @@ func NewProxier(
} }
} }
isIPv6 := utilnet.IsIPv6(nodeIP) isIPv6 := netutils.IsIPv6(nodeIP)
proxier := &Proxier{ proxier := &Proxier{
endPointsRefCount: make(endPointsReferenceCountMap), endPointsRefCount: make(endPointsReferenceCountMap),
serviceMap: make(proxy.ServiceMap), serviceMap: make(proxy.ServiceMap),
@ -1179,7 +1179,7 @@ func (proxier *Proxier) syncProxyRules() {
hnsEndpoint := &endpointsInfo{ hnsEndpoint := &endpointsInfo{
ip: ep.ip, ip: ep.ip,
isLocal: false, isLocal: false,
macAddress: conjureMac("02-11", net.ParseIP(ep.ip)), macAddress: conjureMac("02-11", netutils.ParseIPSloppy(ep.ip)),
providerAddress: providerAddress, providerAddress: providerAddress,
} }

View File

@ -33,6 +33,7 @@ import (
"k8s.io/apimachinery/pkg/util/intstr" "k8s.io/apimachinery/pkg/util/intstr"
"k8s.io/kubernetes/pkg/proxy" "k8s.io/kubernetes/pkg/proxy"
"k8s.io/kubernetes/pkg/proxy/healthcheck" "k8s.io/kubernetes/pkg/proxy/healthcheck"
netutils "k8s.io/utils/net"
utilpointer "k8s.io/utils/pointer" utilpointer "k8s.io/utils/pointer"
) )
@ -73,9 +74,9 @@ func (hns fakeHNS) getEndpointByID(id string) (*endpointsInfo, error) {
} }
func (hns fakeHNS) getEndpointByIpAddress(ip string, networkName string) (*endpointsInfo, error) { func (hns fakeHNS) getEndpointByIpAddress(ip string, networkName string) (*endpointsInfo, error) {
_, ipNet, _ := net.ParseCIDR(destinationPrefix) _, ipNet, _ := netutils.ParseCIDRSloppy(destinationPrefix)
if ipNet.Contains(net.ParseIP(ip)) { if ipNet.Contains(netutils.ParseIPSloppy(ip)) {
return &endpointsInfo{ return &endpointsInfo{
ip: ip, ip: ip,
isLocal: false, isLocal: false,
@ -144,7 +145,7 @@ func NewFakeProxier(syncPeriod time.Duration, minSyncPeriod time.Duration, clust
func TestCreateServiceVip(t *testing.T) { func TestCreateServiceVip(t *testing.T) {
syncPeriod := 30 * time.Second syncPeriod := 30 * time.Second
proxier := NewFakeProxier(syncPeriod, syncPeriod, clusterCIDR, "testhost", net.ParseIP("10.0.0.1"), NETWORK_TYPE_OVERLAY) proxier := NewFakeProxier(syncPeriod, syncPeriod, clusterCIDR, "testhost", netutils.ParseIPSloppy("10.0.0.1"), NETWORK_TYPE_OVERLAY)
if proxier == nil { if proxier == nil {
t.Error() t.Error()
} }
@ -199,7 +200,7 @@ func TestCreateServiceVip(t *testing.T) {
func TestCreateRemoteEndpointOverlay(t *testing.T) { func TestCreateRemoteEndpointOverlay(t *testing.T) {
syncPeriod := 30 * time.Second syncPeriod := 30 * time.Second
proxier := NewFakeProxier(syncPeriod, syncPeriod, clusterCIDR, "testhost", net.ParseIP("10.0.0.1"), NETWORK_TYPE_OVERLAY) proxier := NewFakeProxier(syncPeriod, syncPeriod, clusterCIDR, "testhost", netutils.ParseIPSloppy("10.0.0.1"), NETWORK_TYPE_OVERLAY)
if proxier == nil { if proxier == nil {
t.Error() t.Error()
} }
@ -264,7 +265,7 @@ func TestCreateRemoteEndpointOverlay(t *testing.T) {
func TestCreateRemoteEndpointL2Bridge(t *testing.T) { func TestCreateRemoteEndpointL2Bridge(t *testing.T) {
syncPeriod := 30 * time.Second syncPeriod := 30 * time.Second
proxier := NewFakeProxier(syncPeriod, syncPeriod, clusterCIDR, "testhost", net.ParseIP("10.0.0.1"), "L2Bridge") proxier := NewFakeProxier(syncPeriod, syncPeriod, clusterCIDR, "testhost", netutils.ParseIPSloppy("10.0.0.1"), "L2Bridge")
if proxier == nil { if proxier == nil {
t.Error() t.Error()
} }
@ -328,7 +329,7 @@ func TestCreateRemoteEndpointL2Bridge(t *testing.T) {
func TestSharedRemoteEndpointDelete(t *testing.T) { func TestSharedRemoteEndpointDelete(t *testing.T) {
syncPeriod := 30 * time.Second syncPeriod := 30 * time.Second
tcpProtocol := v1.ProtocolTCP tcpProtocol := v1.ProtocolTCP
proxier := NewFakeProxier(syncPeriod, syncPeriod, clusterCIDR, "testhost", net.ParseIP("10.0.0.1"), "L2Bridge") proxier := NewFakeProxier(syncPeriod, syncPeriod, clusterCIDR, "testhost", netutils.ParseIPSloppy("10.0.0.1"), "L2Bridge")
if proxier == nil { if proxier == nil {
t.Error() t.Error()
} }
@ -470,7 +471,7 @@ func TestSharedRemoteEndpointDelete(t *testing.T) {
} }
func TestSharedRemoteEndpointUpdate(t *testing.T) { func TestSharedRemoteEndpointUpdate(t *testing.T) {
syncPeriod := 30 * time.Second syncPeriod := 30 * time.Second
proxier := NewFakeProxier(syncPeriod, syncPeriod, clusterCIDR, "testhost", net.ParseIP("10.0.0.1"), "L2Bridge") proxier := NewFakeProxier(syncPeriod, syncPeriod, clusterCIDR, "testhost", netutils.ParseIPSloppy("10.0.0.1"), "L2Bridge")
if proxier == nil { if proxier == nil {
t.Error() t.Error()
} }
@ -645,7 +646,7 @@ func TestSharedRemoteEndpointUpdate(t *testing.T) {
func TestCreateLoadBalancer(t *testing.T) { func TestCreateLoadBalancer(t *testing.T) {
syncPeriod := 30 * time.Second syncPeriod := 30 * time.Second
tcpProtocol := v1.ProtocolTCP tcpProtocol := v1.ProtocolTCP
proxier := NewFakeProxier(syncPeriod, syncPeriod, clusterCIDR, "testhost", net.ParseIP("10.0.0.1"), NETWORK_TYPE_OVERLAY) proxier := NewFakeProxier(syncPeriod, syncPeriod, clusterCIDR, "testhost", netutils.ParseIPSloppy("10.0.0.1"), NETWORK_TYPE_OVERLAY)
if proxier == nil { if proxier == nil {
t.Error() t.Error()
} }
@ -703,7 +704,7 @@ func TestCreateLoadBalancer(t *testing.T) {
func TestCreateDsrLoadBalancer(t *testing.T) { func TestCreateDsrLoadBalancer(t *testing.T) {
syncPeriod := 30 * time.Second syncPeriod := 30 * time.Second
proxier := NewFakeProxier(syncPeriod, syncPeriod, clusterCIDR, "testhost", net.ParseIP("10.0.0.1"), NETWORK_TYPE_OVERLAY) proxier := NewFakeProxier(syncPeriod, syncPeriod, clusterCIDR, "testhost", netutils.ParseIPSloppy("10.0.0.1"), NETWORK_TYPE_OVERLAY)
if proxier == nil { if proxier == nil {
t.Error() t.Error()
} }
@ -765,7 +766,7 @@ func TestCreateDsrLoadBalancer(t *testing.T) {
func TestEndpointSlice(t *testing.T) { func TestEndpointSlice(t *testing.T) {
syncPeriod := 30 * time.Second syncPeriod := 30 * time.Second
proxier := NewFakeProxier(syncPeriod, syncPeriod, clusterCIDR, "testhost", net.ParseIP("10.0.0.1"), NETWORK_TYPE_OVERLAY) proxier := NewFakeProxier(syncPeriod, syncPeriod, clusterCIDR, "testhost", netutils.ParseIPSloppy("10.0.0.1"), NETWORK_TYPE_OVERLAY)
if proxier == nil { if proxier == nil {
t.Error() t.Error()
} }

View File

@ -26,6 +26,7 @@ import (
"time" "time"
"k8s.io/klog/v2" "k8s.io/klog/v2"
netutils "k8s.io/utils/net"
v1 "k8s.io/api/core/v1" v1 "k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/types" "k8s.io/apimachinery/pkg/types"
@ -106,8 +107,8 @@ var (
) )
// Used below. // Used below.
var localhostIPv4 = net.ParseIP("127.0.0.1") var localhostIPv4 = netutils.ParseIPSloppy("127.0.0.1")
var localhostIPv6 = net.ParseIP("::1") var localhostIPv6 = netutils.ParseIPSloppy("::1")
// NewProxier returns a new Proxier given a LoadBalancer and an address on // NewProxier returns a new Proxier given a LoadBalancer and an address on
// which to listen. It is assumed that there is only a single Proxier active // which to listen. It is assumed that there is only a single Proxier active
@ -208,7 +209,7 @@ func (proxier *Proxier) setServiceInfo(service ServicePortPortalName, info *serv
func (proxier *Proxier) addServicePortPortal(servicePortPortalName ServicePortPortalName, protocol v1.Protocol, listenIP string, port int, timeout time.Duration) (*serviceInfo, error) { func (proxier *Proxier) addServicePortPortal(servicePortPortalName ServicePortPortalName, protocol v1.Protocol, listenIP string, port int, timeout time.Duration) (*serviceInfo, error) {
var serviceIP net.IP var serviceIP net.IP
if listenIP != allAvailableInterfaces { if listenIP != allAvailableInterfaces {
if serviceIP = net.ParseIP(listenIP); serviceIP == nil { if serviceIP = netutils.ParseIPSloppy(listenIP); serviceIP == nil {
return nil, fmt.Errorf("could not parse ip '%q'", listenIP) return nil, fmt.Errorf("could not parse ip '%q'", listenIP)
} }
// add the IP address. Node port binds to all interfaces. // add the IP address. Node port binds to all interfaces.
@ -259,7 +260,7 @@ func (proxier *Proxier) closeServicePortPortal(servicePortPortalName ServicePort
// close the PortalProxy by deleting the service IP address // close the PortalProxy by deleting the service IP address
if info.portal.ip != allAvailableInterfaces { if info.portal.ip != allAvailableInterfaces {
serviceIP := net.ParseIP(info.portal.ip) serviceIP := netutils.ParseIPSloppy(info.portal.ip)
args := proxier.netshIPv4AddressDeleteArgs(serviceIP) args := proxier.netshIPv4AddressDeleteArgs(serviceIP)
if err := proxier.netsh.DeleteIPAddress(args); err != nil { if err := proxier.netsh.DeleteIPAddress(args); err != nil {
return err return err

View File

@ -36,6 +36,7 @@ import (
"k8s.io/apimachinery/pkg/util/runtime" "k8s.io/apimachinery/pkg/util/runtime"
"k8s.io/kubernetes/pkg/proxy" "k8s.io/kubernetes/pkg/proxy"
netshtest "k8s.io/kubernetes/pkg/util/netsh/testing" netshtest "k8s.io/kubernetes/pkg/util/netsh/testing"
netutils "k8s.io/utils/net"
) )
const ( const (
@ -251,7 +252,7 @@ func TestTCPProxy(t *testing.T) {
}) })
listenIP := "0.0.0.0" listenIP := "0.0.0.0"
p, err := createProxier(lb, net.ParseIP(listenIP), netshtest.NewFake(), net.ParseIP("127.0.0.1"), time.Minute, udpIdleTimeoutForTest) p, err := createProxier(lb, netutils.ParseIPSloppy(listenIP), netshtest.NewFake(), netutils.ParseIPSloppy("127.0.0.1"), time.Minute, udpIdleTimeoutForTest)
if err != nil { if err != nil {
t.Fatal(err) t.Fatal(err)
} }
@ -278,7 +279,7 @@ func TestUDPProxy(t *testing.T) {
}) })
listenIP := "0.0.0.0" listenIP := "0.0.0.0"
p, err := createProxier(lb, net.ParseIP(listenIP), netshtest.NewFake(), net.ParseIP("127.0.0.1"), time.Minute, udpIdleTimeoutForTest) p, err := createProxier(lb, netutils.ParseIPSloppy(listenIP), netshtest.NewFake(), netutils.ParseIPSloppy("127.0.0.1"), time.Minute, udpIdleTimeoutForTest)
if err != nil { if err != nil {
t.Fatal(err) t.Fatal(err)
} }
@ -305,7 +306,7 @@ func TestUDPProxyTimeout(t *testing.T) {
}) })
listenIP := "0.0.0.0" listenIP := "0.0.0.0"
p, err := createProxier(lb, net.ParseIP(listenIP), netshtest.NewFake(), net.ParseIP("127.0.0.1"), time.Minute, udpIdleTimeoutForTest) p, err := createProxier(lb, netutils.ParseIPSloppy(listenIP), netshtest.NewFake(), netutils.ParseIPSloppy("127.0.0.1"), time.Minute, udpIdleTimeoutForTest)
if err != nil { if err != nil {
t.Fatal(err) t.Fatal(err)
} }
@ -344,7 +345,7 @@ func TestMultiPortProxy(t *testing.T) {
}) })
listenIP := "0.0.0.0" listenIP := "0.0.0.0"
p, err := createProxier(lb, net.ParseIP(listenIP), netshtest.NewFake(), net.ParseIP("127.0.0.1"), time.Minute, udpIdleTimeoutForTest) p, err := createProxier(lb, netutils.ParseIPSloppy(listenIP), netshtest.NewFake(), netutils.ParseIPSloppy("127.0.0.1"), time.Minute, udpIdleTimeoutForTest)
if err != nil { if err != nil {
t.Fatal(err) t.Fatal(err)
} }
@ -374,7 +375,7 @@ func TestMultiPortOnServiceAdd(t *testing.T) {
serviceX := proxy.ServicePortName{NamespacedName: types.NamespacedName{Namespace: "testnamespace", Name: "echo"}, Port: "x"} serviceX := proxy.ServicePortName{NamespacedName: types.NamespacedName{Namespace: "testnamespace", Name: "echo"}, Port: "x"}
listenIP := "0.0.0.0" listenIP := "0.0.0.0"
p, err := createProxier(lb, net.ParseIP(listenIP), netshtest.NewFake(), net.ParseIP("127.0.0.1"), time.Minute, udpIdleTimeoutForTest) p, err := createProxier(lb, netutils.ParseIPSloppy(listenIP), netshtest.NewFake(), netutils.ParseIPSloppy("127.0.0.1"), time.Minute, udpIdleTimeoutForTest)
if err != nil { if err != nil {
t.Fatal(err) t.Fatal(err)
} }
@ -440,7 +441,7 @@ func TestTCPProxyStop(t *testing.T) {
}) })
listenIP := "0.0.0.0" listenIP := "0.0.0.0"
p, err := createProxier(lb, net.ParseIP(listenIP), netshtest.NewFake(), net.ParseIP("127.0.0.1"), time.Minute, udpIdleTimeoutForTest) p, err := createProxier(lb, netutils.ParseIPSloppy(listenIP), netshtest.NewFake(), netutils.ParseIPSloppy("127.0.0.1"), time.Minute, udpIdleTimeoutForTest)
if err != nil { if err != nil {
t.Fatal(err) t.Fatal(err)
} }
@ -484,7 +485,7 @@ func TestUDPProxyStop(t *testing.T) {
}) })
listenIP := "0.0.0.0" listenIP := "0.0.0.0"
p, err := createProxier(lb, net.ParseIP(listenIP), netshtest.NewFake(), net.ParseIP("127.0.0.1"), time.Minute, udpIdleTimeoutForTest) p, err := createProxier(lb, netutils.ParseIPSloppy(listenIP), netshtest.NewFake(), netutils.ParseIPSloppy("127.0.0.1"), time.Minute, udpIdleTimeoutForTest)
if err != nil { if err != nil {
t.Fatal(err) t.Fatal(err)
} }
@ -522,7 +523,7 @@ func TestTCPProxyUpdateDelete(t *testing.T) {
}) })
listenIP := "0.0.0.0" listenIP := "0.0.0.0"
p, err := createProxier(lb, net.ParseIP(listenIP), netshtest.NewFake(), net.ParseIP("127.0.0.1"), time.Minute, udpIdleTimeoutForTest) p, err := createProxier(lb, netutils.ParseIPSloppy(listenIP), netshtest.NewFake(), netutils.ParseIPSloppy("127.0.0.1"), time.Minute, udpIdleTimeoutForTest)
if err != nil { if err != nil {
t.Fatal(err) t.Fatal(err)
} }
@ -567,7 +568,7 @@ func TestUDPProxyUpdateDelete(t *testing.T) {
}) })
listenIP := "0.0.0.0" listenIP := "0.0.0.0"
p, err := createProxier(lb, net.ParseIP(listenIP), netshtest.NewFake(), net.ParseIP("127.0.0.1"), time.Minute, udpIdleTimeoutForTest) p, err := createProxier(lb, netutils.ParseIPSloppy(listenIP), netshtest.NewFake(), netutils.ParseIPSloppy("127.0.0.1"), time.Minute, udpIdleTimeoutForTest)
if err != nil { if err != nil {
t.Fatal(err) t.Fatal(err)
} }
@ -612,7 +613,7 @@ func TestTCPProxyUpdateDeleteUpdate(t *testing.T) {
lb.OnEndpointsAdd(endpoint) lb.OnEndpointsAdd(endpoint)
listenIP := "0.0.0.0" listenIP := "0.0.0.0"
p, err := createProxier(lb, net.ParseIP(listenIP), netshtest.NewFake(), net.ParseIP("127.0.0.1"), time.Minute, udpIdleTimeoutForTest) p, err := createProxier(lb, netutils.ParseIPSloppy(listenIP), netshtest.NewFake(), netutils.ParseIPSloppy("127.0.0.1"), time.Minute, udpIdleTimeoutForTest)
if err != nil { if err != nil {
t.Fatal(err) t.Fatal(err)
} }
@ -674,7 +675,7 @@ func TestUDPProxyUpdateDeleteUpdate(t *testing.T) {
lb.OnEndpointsAdd(endpoint) lb.OnEndpointsAdd(endpoint)
listenIP := "0.0.0.0" listenIP := "0.0.0.0"
p, err := createProxier(lb, net.ParseIP(listenIP), netshtest.NewFake(), net.ParseIP("127.0.0.1"), time.Minute, udpIdleTimeoutForTest) p, err := createProxier(lb, netutils.ParseIPSloppy(listenIP), netshtest.NewFake(), netutils.ParseIPSloppy("127.0.0.1"), time.Minute, udpIdleTimeoutForTest)
if err != nil { if err != nil {
t.Fatal(err) t.Fatal(err)
} }
@ -735,7 +736,7 @@ func TestTCPProxyUpdatePort(t *testing.T) {
}) })
listenIP := "0.0.0.0" listenIP := "0.0.0.0"
p, err := createProxier(lb, net.ParseIP(listenIP), netshtest.NewFake(), net.ParseIP("127.0.0.1"), time.Minute, udpIdleTimeoutForTest) p, err := createProxier(lb, netutils.ParseIPSloppy(listenIP), netshtest.NewFake(), netutils.ParseIPSloppy("127.0.0.1"), time.Minute, udpIdleTimeoutForTest)
if err != nil { if err != nil {
t.Fatal(err) t.Fatal(err)
} }
@ -783,7 +784,7 @@ func TestUDPProxyUpdatePort(t *testing.T) {
}) })
listenIP := "0.0.0.0" listenIP := "0.0.0.0"
p, err := createProxier(lb, net.ParseIP(listenIP), netshtest.NewFake(), net.ParseIP("127.0.0.1"), time.Minute, udpIdleTimeoutForTest) p, err := createProxier(lb, netutils.ParseIPSloppy(listenIP), netshtest.NewFake(), netutils.ParseIPSloppy("127.0.0.1"), time.Minute, udpIdleTimeoutForTest)
if err != nil { if err != nil {
t.Fatal(err) t.Fatal(err)
} }
@ -828,7 +829,7 @@ func TestProxyUpdatePublicIPs(t *testing.T) {
}) })
listenIP := "0.0.0.0" listenIP := "0.0.0.0"
p, err := createProxier(lb, net.ParseIP(listenIP), netshtest.NewFake(), net.ParseIP("127.0.0.1"), time.Minute, udpIdleTimeoutForTest) p, err := createProxier(lb, netutils.ParseIPSloppy(listenIP), netshtest.NewFake(), netutils.ParseIPSloppy("127.0.0.1"), time.Minute, udpIdleTimeoutForTest)
if err != nil { if err != nil {
t.Fatal(err) t.Fatal(err)
} }
@ -881,7 +882,7 @@ func TestProxyUpdatePortal(t *testing.T) {
lb.OnEndpointsAdd(endpoint) lb.OnEndpointsAdd(endpoint)
listenIP := "0.0.0.0" listenIP := "0.0.0.0"
p, err := createProxier(lb, net.ParseIP(listenIP), netshtest.NewFake(), net.ParseIP("127.0.0.1"), time.Minute, udpIdleTimeoutForTest) p, err := createProxier(lb, netutils.ParseIPSloppy(listenIP), netshtest.NewFake(), netutils.ParseIPSloppy("127.0.0.1"), time.Minute, udpIdleTimeoutForTest)
if err != nil { if err != nil {
t.Fatal(err) t.Fatal(err)
} }

View File

@ -24,7 +24,7 @@ import (
api "k8s.io/kubernetes/pkg/apis/core" api "k8s.io/kubernetes/pkg/apis/core"
"k8s.io/kubernetes/pkg/registry/core/service/allocator" "k8s.io/kubernetes/pkg/registry/core/service/allocator"
utilnet "k8s.io/utils/net" netutils "k8s.io/utils/net"
) )
// Interface manages the allocation of IP addresses out of a range. Interface // Interface manages the allocation of IP addresses out of a range. Interface
@ -85,12 +85,12 @@ type Range struct {
func New(cidr *net.IPNet, allocatorFactory allocator.AllocatorFactory) (*Range, error) { func New(cidr *net.IPNet, allocatorFactory allocator.AllocatorFactory) (*Range, error) {
registerMetrics() registerMetrics()
max := utilnet.RangeSize(cidr) max := netutils.RangeSize(cidr)
base := utilnet.BigForIP(cidr.IP) base := netutils.BigForIP(cidr.IP)
rangeSpec := cidr.String() rangeSpec := cidr.String()
var family api.IPFamily var family api.IPFamily
if utilnet.IsIPv6CIDR(cidr) { if netutils.IsIPv6CIDR(cidr) {
family = api.IPv6Protocol family = api.IPv6Protocol
// Limit the max size, since the allocator keeps a bitmap of that size. // Limit the max size, since the allocator keeps a bitmap of that size.
if max > 65536 { if max > 65536 {
@ -126,7 +126,7 @@ func NewInMemory(cidr *net.IPNet) (*Range, error) {
// NewFromSnapshot allocates a Range and initializes it from a snapshot. // NewFromSnapshot allocates a Range and initializes it from a snapshot.
func NewFromSnapshot(snap *api.RangeAllocation) (*Range, error) { func NewFromSnapshot(snap *api.RangeAllocation) (*Range, error) {
_, ipnet, err := net.ParseCIDR(snap.Range) _, ipnet, err := netutils.ParseCIDRSloppy(snap.Range)
if err != nil { if err != nil {
return nil, err return nil, err
} }
@ -219,7 +219,7 @@ func (r *Range) AllocateNext() (net.IP, error) {
clusterIPAllocated.WithLabelValues(label.String()).Set(float64(r.Used())) clusterIPAllocated.WithLabelValues(label.String()).Set(float64(r.Used()))
clusterIPAvailable.WithLabelValues(label.String()).Set(float64(r.Free())) clusterIPAvailable.WithLabelValues(label.String()).Set(float64(r.Free()))
return utilnet.AddIPOffset(r.base, offset), nil return netutils.AddIPOffset(r.base, offset), nil
} }
// Release releases the IP back to the pool. Releasing an // Release releases the IP back to the pool. Releasing an
@ -244,7 +244,7 @@ func (r *Range) Release(ip net.IP) error {
// ForEach calls the provided function for each allocated IP. // ForEach calls the provided function for each allocated IP.
func (r *Range) ForEach(fn func(net.IP)) { func (r *Range) ForEach(fn func(net.IP)) {
r.alloc.ForEach(func(offset int) { r.alloc.ForEach(func(offset int) {
ip, _ := utilnet.GetIndexedIP(r.net, offset+1) // +1 because Range doesn't store IP 0 ip, _ := netutils.GetIndexedIP(r.net, offset+1) // +1 because Range doesn't store IP 0
fn(ip) fn(ip)
}) })
} }
@ -310,5 +310,5 @@ func (r *Range) contains(ip net.IP) (bool, int) {
// calculateIPOffset calculates the integer offset of ip from base such that // calculateIPOffset calculates the integer offset of ip from base such that
// base + offset = ip. It requires ip >= base. // base + offset = ip. It requires ip >= base.
func calculateIPOffset(base *big.Int, ip net.IP) int { func calculateIPOffset(base *big.Int, ip net.IP) int {
return int(big.NewInt(0).Sub(utilnet.BigForIP(ip), base).Int64()) return int(big.NewInt(0).Sub(netutils.BigForIP(ip), base).Int64())
} }

View File

@ -23,6 +23,7 @@ import (
"k8s.io/apimachinery/pkg/util/sets" "k8s.io/apimachinery/pkg/util/sets"
"k8s.io/component-base/metrics/testutil" "k8s.io/component-base/metrics/testutil"
api "k8s.io/kubernetes/pkg/apis/core" api "k8s.io/kubernetes/pkg/apis/core"
netutils "k8s.io/utils/net"
) )
func TestAllocate(t *testing.T) { func TestAllocate(t *testing.T) {
@ -65,7 +66,7 @@ func TestAllocate(t *testing.T) {
}, },
} }
for _, tc := range testCases { for _, tc := range testCases {
_, cidr, err := net.ParseCIDR(tc.cidr) _, cidr, err := netutils.ParseCIDRSloppy(tc.cidr)
if err != nil { if err != nil {
t.Fatal(err) t.Fatal(err)
} }
@ -110,7 +111,7 @@ func TestAllocate(t *testing.T) {
t.Fatal(err) t.Fatal(err)
} }
released := net.ParseIP(tc.released) released := netutils.ParseIPSloppy(tc.released)
if err := r.Release(released); err != nil { if err := r.Release(released); err != nil {
t.Fatal(err) t.Fatal(err)
} }
@ -132,12 +133,12 @@ func TestAllocate(t *testing.T) {
t.Fatal(err) t.Fatal(err)
} }
for _, outOfRange := range tc.outOfRange { for _, outOfRange := range tc.outOfRange {
err = r.Allocate(net.ParseIP(outOfRange)) err = r.Allocate(netutils.ParseIPSloppy(outOfRange))
if _, ok := err.(*ErrNotInRange); !ok { if _, ok := err.(*ErrNotInRange); !ok {
t.Fatal(err) t.Fatal(err)
} }
} }
if err := r.Allocate(net.ParseIP(tc.alreadyAllocated)); err != ErrAllocated { if err := r.Allocate(netutils.ParseIPSloppy(tc.alreadyAllocated)); err != ErrAllocated {
t.Fatal(err) t.Fatal(err)
} }
if f := r.Free(); f != 1 { if f := r.Free(); f != 1 {
@ -159,7 +160,7 @@ func TestAllocate(t *testing.T) {
} }
func TestAllocateTiny(t *testing.T) { func TestAllocateTiny(t *testing.T) {
_, cidr, err := net.ParseCIDR("192.168.1.0/32") _, cidr, err := netutils.ParseCIDRSloppy("192.168.1.0/32")
if err != nil { if err != nil {
t.Fatal(err) t.Fatal(err)
} }
@ -176,7 +177,7 @@ func TestAllocateTiny(t *testing.T) {
} }
func TestAllocateSmall(t *testing.T) { func TestAllocateSmall(t *testing.T) {
_, cidr, err := net.ParseCIDR("192.168.1.240/30") _, cidr, err := netutils.ParseCIDRSloppy("192.168.1.240/30")
if err != nil { if err != nil {
t.Fatal(err) t.Fatal(err)
} }
@ -199,10 +200,10 @@ func TestAllocateSmall(t *testing.T) {
found.Insert(ip.String()) found.Insert(ip.String())
} }
for s := range found { for s := range found {
if !r.Has(net.ParseIP(s)) { if !r.Has(netutils.ParseIPSloppy(s)) {
t.Fatalf("missing: %s", s) t.Fatalf("missing: %s", s)
} }
if err := r.Allocate(net.ParseIP(s)); err != ErrAllocated { if err := r.Allocate(netutils.ParseIPSloppy(s)); err != ErrAllocated {
t.Fatal(err) t.Fatal(err)
} }
} }
@ -220,7 +221,7 @@ func TestAllocateSmall(t *testing.T) {
} }
func TestForEach(t *testing.T) { func TestForEach(t *testing.T) {
_, cidr, err := net.ParseCIDR("192.168.1.0/24") _, cidr, err := netutils.ParseCIDRSloppy("192.168.1.0/24")
if err != nil { if err != nil {
t.Fatal(err) t.Fatal(err)
} }
@ -238,7 +239,7 @@ func TestForEach(t *testing.T) {
t.Fatal(err) t.Fatal(err)
} }
for ips := range tc { for ips := range tc {
ip := net.ParseIP(ips) ip := netutils.ParseIPSloppy(ips)
if err := r.Allocate(ip); err != nil { if err := r.Allocate(ip); err != nil {
t.Errorf("[%d] error allocating IP %v: %v", i, ip, err) t.Errorf("[%d] error allocating IP %v: %v", i, ip, err)
} }
@ -260,7 +261,7 @@ func TestForEach(t *testing.T) {
} }
func TestSnapshot(t *testing.T) { func TestSnapshot(t *testing.T) {
_, cidr, err := net.ParseCIDR("192.168.1.0/24") _, cidr, err := netutils.ParseCIDRSloppy("192.168.1.0/24")
if err != nil { if err != nil {
t.Fatal(err) t.Fatal(err)
} }
@ -283,7 +284,7 @@ func TestSnapshot(t *testing.T) {
t.Fatal(err) t.Fatal(err)
} }
_, network, err := net.ParseCIDR(dst.Range) _, network, err := netutils.ParseCIDRSloppy(dst.Range)
if err != nil { if err != nil {
t.Fatal(err) t.Fatal(err)
} }
@ -292,7 +293,7 @@ func TestSnapshot(t *testing.T) {
t.Fatalf("mismatched networks: %s : %s", network, cidr) t.Fatalf("mismatched networks: %s : %s", network, cidr)
} }
_, otherCidr, err := net.ParseCIDR("192.168.2.0/24") _, otherCidr, err := netutils.ParseCIDRSloppy("192.168.2.0/24")
if err != nil { if err != nil {
t.Fatal(err) t.Fatal(err)
} }
@ -322,7 +323,7 @@ func TestSnapshot(t *testing.T) {
} }
func TestNewFromSnapshot(t *testing.T) { func TestNewFromSnapshot(t *testing.T) {
_, cidr, err := net.ParseCIDR("192.168.0.0/24") _, cidr, err := netutils.ParseCIDRSloppy("192.168.0.0/24")
if err != nil { if err != nil {
t.Fatal(err) t.Fatal(err)
} }
@ -366,7 +367,7 @@ func TestNewFromSnapshot(t *testing.T) {
func TestClusterIPMetrics(t *testing.T) { func TestClusterIPMetrics(t *testing.T) {
// create IPv4 allocator // create IPv4 allocator
cidrIPv4 := "10.0.0.0/24" cidrIPv4 := "10.0.0.0/24"
_, clusterCIDRv4, _ := net.ParseCIDR(cidrIPv4) _, clusterCIDRv4, _ := netutils.ParseCIDRSloppy(cidrIPv4)
a, err := NewInMemory(clusterCIDRv4) a, err := NewInMemory(clusterCIDRv4)
if err != nil { if err != nil {
t.Fatalf("unexpected error creating CidrSet: %v", err) t.Fatalf("unexpected error creating CidrSet: %v", err)
@ -374,7 +375,7 @@ func TestClusterIPMetrics(t *testing.T) {
clearMetrics(map[string]string{"cidr": cidrIPv4}) clearMetrics(map[string]string{"cidr": cidrIPv4})
// create IPv6 allocator // create IPv6 allocator
cidrIPv6 := "2001:db8::/112" cidrIPv6 := "2001:db8::/112"
_, clusterCIDRv6, _ := net.ParseCIDR(cidrIPv6) _, clusterCIDRv6, _ := netutils.ParseCIDRSloppy(cidrIPv6)
b, err := NewInMemory(clusterCIDRv6) b, err := NewInMemory(clusterCIDRv6)
if err != nil { if err != nil {
t.Fatalf("unexpected error creating CidrSet: %v", err) t.Fatalf("unexpected error creating CidrSet: %v", err)
@ -420,10 +421,10 @@ func TestClusterIPMetrics(t *testing.T) {
// try to allocate the same IP addresses // try to allocate the same IP addresses
for s := range found { for s := range found {
if !a.Has(net.ParseIP(s)) { if !a.Has(netutils.ParseIPSloppy(s)) {
t.Fatalf("missing: %s", s) t.Fatalf("missing: %s", s)
} }
if err := a.Allocate(net.ParseIP(s)); err != ErrAllocated { if err := a.Allocate(netutils.ParseIPSloppy(s)); err != ErrAllocated {
t.Fatal(err) t.Fatal(err)
} }
} }
@ -437,10 +438,10 @@ func TestClusterIPMetrics(t *testing.T) {
// release the addresses allocated // release the addresses allocated
for s := range found { for s := range found {
if !a.Has(net.ParseIP(s)) { if !a.Has(netutils.ParseIPSloppy(s)) {
t.Fatalf("missing: %s", s) t.Fatalf("missing: %s", s)
} }
if err := a.Release(net.ParseIP(s)); err != nil { if err := a.Release(netutils.ParseIPSloppy(s)); err != nil {
t.Fatal(err) t.Fatal(err)
} }
} }

View File

@ -35,7 +35,7 @@ import (
"k8s.io/kubernetes/pkg/apis/core/v1/helper" "k8s.io/kubernetes/pkg/apis/core/v1/helper"
"k8s.io/kubernetes/pkg/registry/core/rangeallocation" "k8s.io/kubernetes/pkg/registry/core/rangeallocation"
"k8s.io/kubernetes/pkg/registry/core/service/ipallocator" "k8s.io/kubernetes/pkg/registry/core/service/ipallocator"
netutil "k8s.io/utils/net" netutils "k8s.io/utils/net"
) )
// Repair is a controller loop that periodically examines all service ClusterIP allocations // Repair is a controller loop that periodically examines all service ClusterIP allocations
@ -82,7 +82,7 @@ func NewRepair(interval time.Duration, serviceClient corev1client.ServicesGetter
primary := v1.IPv4Protocol primary := v1.IPv4Protocol
secondary := v1.IPv6Protocol secondary := v1.IPv6Protocol
if netutil.IsIPv6(network.IP) { if netutils.IsIPv6(network.IP) {
primary = v1.IPv6Protocol primary = v1.IPv6Protocol
} }
@ -196,7 +196,7 @@ func (c *Repair) runOnce() error {
} }
getFamilyByIP := func(ip net.IP) v1.IPFamily { getFamilyByIP := func(ip net.IP) v1.IPFamily {
if netutil.IsIPv6(ip) { if netutils.IsIPv6(ip) {
return v1.IPv6Protocol return v1.IPv6Protocol
} }
return v1.IPv4Protocol return v1.IPv4Protocol
@ -210,7 +210,7 @@ func (c *Repair) runOnce() error {
} }
for _, ip := range svc.Spec.ClusterIPs { for _, ip := range svc.Spec.ClusterIPs {
ip := net.ParseIP(ip) ip := netutils.ParseIPSloppy(ip)
if ip == nil { if ip == nil {
// cluster IP is corrupt // cluster IP is corrupt
c.recorder.Eventf(&svc, v1.EventTypeWarning, "ClusterIPNotValid", "Cluster IP %s is not a valid IP; please recreate service", ip) c.recorder.Eventf(&svc, v1.EventTypeWarning, "ClusterIPNotValid", "Cluster IP %s is not a valid IP; please recreate service", ip)

View File

@ -28,6 +28,7 @@ import (
"k8s.io/client-go/kubernetes/fake" "k8s.io/client-go/kubernetes/fake"
api "k8s.io/kubernetes/pkg/apis/core" api "k8s.io/kubernetes/pkg/apis/core"
"k8s.io/kubernetes/pkg/registry/core/service/ipallocator" "k8s.io/kubernetes/pkg/registry/core/service/ipallocator"
netutils "k8s.io/utils/net"
utilfeature "k8s.io/apiserver/pkg/util/feature" utilfeature "k8s.io/apiserver/pkg/util/feature"
featuregatetesting "k8s.io/component-base/featuregate/testing" featuregatetesting "k8s.io/component-base/featuregate/testing"
@ -60,7 +61,7 @@ func TestRepair(t *testing.T) {
ipregistry := &mockRangeRegistry{ ipregistry := &mockRangeRegistry{
item: &api.RangeAllocation{Range: "192.168.1.0/24"}, item: &api.RangeAllocation{Range: "192.168.1.0/24"},
} }
_, cidr, _ := net.ParseCIDR(ipregistry.item.Range) _, cidr, _ := netutils.ParseCIDRSloppy(ipregistry.item.Range)
r := NewRepair(0, fakeClient.CoreV1(), fakeClient.CoreV1(), cidr, ipregistry, nil, nil) r := NewRepair(0, fakeClient.CoreV1(), fakeClient.CoreV1(), cidr, ipregistry, nil, nil)
if err := r.RunOnce(); err != nil { if err := r.RunOnce(); err != nil {
@ -81,12 +82,12 @@ func TestRepair(t *testing.T) {
} }
func TestRepairLeak(t *testing.T) { func TestRepairLeak(t *testing.T) {
_, cidr, _ := net.ParseCIDR("192.168.1.0/24") _, cidr, _ := netutils.ParseCIDRSloppy("192.168.1.0/24")
previous, err := ipallocator.NewInMemory(cidr) previous, err := ipallocator.NewInMemory(cidr)
if err != nil { if err != nil {
t.Fatal(err) t.Fatal(err)
} }
previous.Allocate(net.ParseIP("192.168.1.10")) previous.Allocate(netutils.ParseIPSloppy("192.168.1.10"))
var dst api.RangeAllocation var dst api.RangeAllocation
err = previous.Snapshot(&dst) err = previous.Snapshot(&dst)
@ -115,7 +116,7 @@ func TestRepairLeak(t *testing.T) {
if err != nil { if err != nil {
t.Fatal(err) t.Fatal(err)
} }
if !after.Has(net.ParseIP("192.168.1.10")) { if !after.Has(netutils.ParseIPSloppy("192.168.1.10")) {
t.Errorf("expected ipallocator to still have leaked IP") t.Errorf("expected ipallocator to still have leaked IP")
} }
} }
@ -127,13 +128,13 @@ func TestRepairLeak(t *testing.T) {
if err != nil { if err != nil {
t.Fatal(err) t.Fatal(err)
} }
if after.Has(net.ParseIP("192.168.1.10")) { if after.Has(netutils.ParseIPSloppy("192.168.1.10")) {
t.Errorf("expected ipallocator to not have leaked IP") t.Errorf("expected ipallocator to not have leaked IP")
} }
} }
func TestRepairWithExisting(t *testing.T) { func TestRepairWithExisting(t *testing.T) {
_, cidr, _ := net.ParseCIDR("192.168.1.0/24") _, cidr, _ := netutils.ParseCIDRSloppy("192.168.1.0/24")
previous, err := ipallocator.NewInMemory(cidr) previous, err := ipallocator.NewInMemory(cidr)
if err != nil { if err != nil {
t.Fatal(err) t.Fatal(err)
@ -211,7 +212,7 @@ func TestRepairWithExisting(t *testing.T) {
if err != nil { if err != nil {
t.Fatal(err) t.Fatal(err)
} }
if !after.Has(net.ParseIP("192.168.1.1")) || !after.Has(net.ParseIP("192.168.1.100")) { if !after.Has(netutils.ParseIPSloppy("192.168.1.1")) || !after.Has(netutils.ParseIPSloppy("192.168.1.100")) {
t.Errorf("unexpected ipallocator state: %#v", after) t.Errorf("unexpected ipallocator state: %#v", after)
} }
if free := after.Free(); free != 252 { if free := after.Free(); free != 252 {
@ -220,7 +221,7 @@ func TestRepairWithExisting(t *testing.T) {
} }
func makeRangeRegistry(t *testing.T, cidrRange string) *mockRangeRegistry { func makeRangeRegistry(t *testing.T, cidrRange string) *mockRangeRegistry {
_, cidr, _ := net.ParseCIDR(cidrRange) _, cidr, _ := netutils.ParseCIDRSloppy(cidrRange)
previous, err := ipallocator.NewInMemory(cidr) previous, err := ipallocator.NewInMemory(cidr)
if err != nil { if err != nil {
t.Fatal(err) t.Fatal(err)
@ -247,7 +248,7 @@ func makeFakeClientSet() *fake.Clientset {
return fake.NewSimpleClientset() return fake.NewSimpleClientset()
} }
func makeIPNet(cidr string) *net.IPNet { func makeIPNet(cidr string) *net.IPNet {
_, net, _ := net.ParseCIDR(cidr) _, net, _ := netutils.ParseCIDRSloppy(cidr)
return net return net
} }
func TestShouldWorkOnSecondary(t *testing.T) { func TestShouldWorkOnSecondary(t *testing.T) {
@ -337,8 +338,8 @@ func TestRepairDualStack(t *testing.T) {
item: &api.RangeAllocation{Range: "2000::/108"}, item: &api.RangeAllocation{Range: "2000::/108"},
} }
_, cidr, _ := net.ParseCIDR(ipregistry.item.Range) _, cidr, _ := netutils.ParseCIDRSloppy(ipregistry.item.Range)
_, secondaryCIDR, _ := net.ParseCIDR(secondaryIPRegistry.item.Range) _, secondaryCIDR, _ := netutils.ParseCIDRSloppy(secondaryIPRegistry.item.Range)
r := NewRepair(0, fakeClient.CoreV1(), fakeClient.CoreV1(), cidr, ipregistry, secondaryCIDR, secondaryIPRegistry) r := NewRepair(0, fakeClient.CoreV1(), fakeClient.CoreV1(), cidr, ipregistry, secondaryCIDR, secondaryIPRegistry)
if err := r.RunOnce(); err != nil { if err := r.RunOnce(); err != nil {
@ -369,20 +370,20 @@ func TestRepairDualStack(t *testing.T) {
func TestRepairLeakDualStack(t *testing.T) { func TestRepairLeakDualStack(t *testing.T) {
defer featuregatetesting.SetFeatureGateDuringTest(t, utilfeature.DefaultFeatureGate, features.IPv6DualStack, true)() defer featuregatetesting.SetFeatureGateDuringTest(t, utilfeature.DefaultFeatureGate, features.IPv6DualStack, true)()
_, cidr, _ := net.ParseCIDR("192.168.1.0/24") _, cidr, _ := netutils.ParseCIDRSloppy("192.168.1.0/24")
previous, err := ipallocator.NewInMemory(cidr) previous, err := ipallocator.NewInMemory(cidr)
if err != nil { if err != nil {
t.Fatal(err) t.Fatal(err)
} }
previous.Allocate(net.ParseIP("192.168.1.10")) previous.Allocate(netutils.ParseIPSloppy("192.168.1.10"))
_, secondaryCIDR, _ := net.ParseCIDR("2000::/108") _, secondaryCIDR, _ := netutils.ParseCIDRSloppy("2000::/108")
secondaryPrevious, err := ipallocator.NewInMemory(secondaryCIDR) secondaryPrevious, err := ipallocator.NewInMemory(secondaryCIDR)
if err != nil { if err != nil {
t.Fatal(err) t.Fatal(err)
} }
secondaryPrevious.Allocate(net.ParseIP("2000::1")) secondaryPrevious.Allocate(netutils.ParseIPSloppy("2000::1"))
var dst api.RangeAllocation var dst api.RangeAllocation
err = previous.Snapshot(&dst) err = previous.Snapshot(&dst)
@ -427,14 +428,14 @@ func TestRepairLeakDualStack(t *testing.T) {
if err != nil { if err != nil {
t.Fatal(err) t.Fatal(err)
} }
if !after.Has(net.ParseIP("192.168.1.10")) { if !after.Has(netutils.ParseIPSloppy("192.168.1.10")) {
t.Errorf("expected ipallocator to still have leaked IP") t.Errorf("expected ipallocator to still have leaked IP")
} }
secondaryAfter, err := ipallocator.NewFromSnapshot(secondaryIPRegistry.updated) secondaryAfter, err := ipallocator.NewFromSnapshot(secondaryIPRegistry.updated)
if err != nil { if err != nil {
t.Fatal(err) t.Fatal(err)
} }
if !secondaryAfter.Has(net.ParseIP("2000::1")) { if !secondaryAfter.Has(netutils.ParseIPSloppy("2000::1")) {
t.Errorf("expected ipallocator to still have leaked IP") t.Errorf("expected ipallocator to still have leaked IP")
} }
} }
@ -447,14 +448,14 @@ func TestRepairLeakDualStack(t *testing.T) {
if err != nil { if err != nil {
t.Fatal(err) t.Fatal(err)
} }
if after.Has(net.ParseIP("192.168.1.10")) { if after.Has(netutils.ParseIPSloppy("192.168.1.10")) {
t.Errorf("expected ipallocator to not have leaked IP") t.Errorf("expected ipallocator to not have leaked IP")
} }
secondaryAfter, err := ipallocator.NewFromSnapshot(secondaryIPRegistry.updated) secondaryAfter, err := ipallocator.NewFromSnapshot(secondaryIPRegistry.updated)
if err != nil { if err != nil {
t.Fatal(err) t.Fatal(err)
} }
if secondaryAfter.Has(net.ParseIP("2000::1")) { if secondaryAfter.Has(netutils.ParseIPSloppy("2000::1")) {
t.Errorf("expected ipallocator to not have leaked IP") t.Errorf("expected ipallocator to not have leaked IP")
} }
} }
@ -466,13 +467,13 @@ func TestRepairWithExistingDualStack(t *testing.T) {
// this will work every where except alloc & validation // this will work every where except alloc & validation
defer featuregatetesting.SetFeatureGateDuringTest(t, utilfeature.DefaultFeatureGate, features.IPv6DualStack, true)() defer featuregatetesting.SetFeatureGateDuringTest(t, utilfeature.DefaultFeatureGate, features.IPv6DualStack, true)()
_, cidr, _ := net.ParseCIDR("192.168.1.0/24") _, cidr, _ := netutils.ParseCIDRSloppy("192.168.1.0/24")
previous, err := ipallocator.NewInMemory(cidr) previous, err := ipallocator.NewInMemory(cidr)
if err != nil { if err != nil {
t.Fatal(err) t.Fatal(err)
} }
_, secondaryCIDR, _ := net.ParseCIDR("2000::/108") _, secondaryCIDR, _ := netutils.ParseCIDRSloppy("2000::/108")
secondaryPrevious, err := ipallocator.NewInMemory(secondaryCIDR) secondaryPrevious, err := ipallocator.NewInMemory(secondaryCIDR)
if err != nil { if err != nil {
t.Fatal(err) t.Fatal(err)
@ -613,7 +614,7 @@ func TestRepairWithExistingDualStack(t *testing.T) {
t.Fatal(err) t.Fatal(err)
} }
if !after.Has(net.ParseIP("192.168.1.1")) || !after.Has(net.ParseIP("192.168.1.100")) { if !after.Has(netutils.ParseIPSloppy("192.168.1.1")) || !after.Has(netutils.ParseIPSloppy("192.168.1.100")) {
t.Errorf("unexpected ipallocator state: %#v", after) t.Errorf("unexpected ipallocator state: %#v", after)
} }
if free := after.Free(); free != 251 { if free := after.Free(); free != 251 {
@ -624,7 +625,7 @@ func TestRepairWithExistingDualStack(t *testing.T) {
if err != nil { if err != nil {
t.Fatal(err) t.Fatal(err)
} }
if !secondaryAfter.Has(net.ParseIP("2000::1")) || !secondaryAfter.Has(net.ParseIP("2000::2")) { if !secondaryAfter.Has(netutils.ParseIPSloppy("2000::1")) || !secondaryAfter.Has(netutils.ParseIPSloppy("2000::2")) {
t.Errorf("unexpected ipallocator state: %#v", secondaryAfter) t.Errorf("unexpected ipallocator state: %#v", secondaryAfter)
} }
if free := secondaryAfter.Free(); free != 65533 { if free := secondaryAfter.Free(); free != 65533 {

View File

@ -18,7 +18,6 @@ package storage
import ( import (
"context" "context"
"net"
"strings" "strings"
"testing" "testing"
@ -32,11 +31,12 @@ import (
allocatorstore "k8s.io/kubernetes/pkg/registry/core/service/allocator/storage" allocatorstore "k8s.io/kubernetes/pkg/registry/core/service/allocator/storage"
"k8s.io/kubernetes/pkg/registry/core/service/ipallocator" "k8s.io/kubernetes/pkg/registry/core/service/ipallocator"
"k8s.io/kubernetes/pkg/registry/registrytest" "k8s.io/kubernetes/pkg/registry/registrytest"
netutils "k8s.io/utils/net"
) )
func newStorage(t *testing.T) (*etcd3testing.EtcdTestServer, ipallocator.Interface, allocator.Interface, storage.Interface, factory.DestroyFunc) { func newStorage(t *testing.T) (*etcd3testing.EtcdTestServer, ipallocator.Interface, allocator.Interface, storage.Interface, factory.DestroyFunc) {
etcdStorage, server := registrytest.NewEtcdStorage(t, "") etcdStorage, server := registrytest.NewEtcdStorage(t, "")
_, cidr, err := net.ParseCIDR("192.168.1.0/24") _, cidr, err := netutils.ParseCIDRSloppy("192.168.1.0/24")
if err != nil { if err != nil {
t.Fatal(err) t.Fatal(err)
} }
@ -66,7 +66,7 @@ func newStorage(t *testing.T) (*etcd3testing.EtcdTestServer, ipallocator.Interfa
} }
func validNewRangeAllocation() *api.RangeAllocation { func validNewRangeAllocation() *api.RangeAllocation {
_, cidr, _ := net.ParseCIDR("192.168.1.0/24") _, cidr, _ := netutils.ParseCIDRSloppy("192.168.1.0/24")
return &api.RangeAllocation{ return &api.RangeAllocation{
Range: cidr.String(), Range: cidr.String(),
} }
@ -79,7 +79,7 @@ func key() string {
func TestEmpty(t *testing.T) { func TestEmpty(t *testing.T) {
_, storage, _, _, destroyFunc := newStorage(t) _, storage, _, _, destroyFunc := newStorage(t)
defer destroyFunc() defer destroyFunc()
if err := storage.Allocate(net.ParseIP("192.168.1.2")); !strings.Contains(err.Error(), "cannot allocate resources of type serviceipallocations at this time") { if err := storage.Allocate(netutils.ParseIPSloppy("192.168.1.2")); !strings.Contains(err.Error(), "cannot allocate resources of type serviceipallocations at this time") {
t.Fatal(err) t.Fatal(err)
} }
} }
@ -87,7 +87,7 @@ func TestEmpty(t *testing.T) {
func TestErrors(t *testing.T) { func TestErrors(t *testing.T) {
_, storage, _, _, destroyFunc := newStorage(t) _, storage, _, _, destroyFunc := newStorage(t)
defer destroyFunc() defer destroyFunc()
err := storage.Allocate(net.ParseIP("192.168.0.0")) err := storage.Allocate(netutils.ParseIPSloppy("192.168.0.0"))
if _, ok := err.(*ipallocator.ErrNotInRange); !ok { if _, ok := err.(*ipallocator.ErrNotInRange); !ok {
t.Fatal(err) t.Fatal(err)
} }
@ -100,7 +100,7 @@ func TestStore(t *testing.T) {
t.Fatalf("unexpected error: %v", err) t.Fatalf("unexpected error: %v", err)
} }
if err := storage.Allocate(net.ParseIP("192.168.1.2")); err != nil { if err := storage.Allocate(netutils.ParseIPSloppy("192.168.1.2")); err != nil {
t.Fatal(err) t.Fatal(err)
} }
ok, err := backing.Allocate(1) ok, err := backing.Allocate(1)
@ -110,7 +110,7 @@ func TestStore(t *testing.T) {
if ok { if ok {
t.Fatal("Expected allocation to fail") t.Fatal("Expected allocation to fail")
} }
if err := storage.Allocate(net.ParseIP("192.168.1.2")); err != ipallocator.ErrAllocated { if err := storage.Allocate(netutils.ParseIPSloppy("192.168.1.2")); err != ipallocator.ErrAllocated {
t.Fatal(err) t.Fatal(err)
} }
} }

View File

@ -45,7 +45,7 @@ import (
registry "k8s.io/kubernetes/pkg/registry/core/service" registry "k8s.io/kubernetes/pkg/registry/core/service"
"k8s.io/kubernetes/pkg/registry/core/service/ipallocator" "k8s.io/kubernetes/pkg/registry/core/service/ipallocator"
"k8s.io/kubernetes/pkg/registry/core/service/portallocator" "k8s.io/kubernetes/pkg/registry/core/service/portallocator"
netutil "k8s.io/utils/net" netutils "k8s.io/utils/net"
"sigs.k8s.io/structured-merge-diff/v4/fieldpath" "sigs.k8s.io/structured-merge-diff/v4/fieldpath"
) )
@ -108,7 +108,7 @@ func NewREST(
// detect this cluster default Service IPFamily (ipfamily of --service-cluster-ip-range[0]) // detect this cluster default Service IPFamily (ipfamily of --service-cluster-ip-range[0])
serviceIPFamily := api.IPv4Protocol serviceIPFamily := api.IPv4Protocol
cidr := serviceIPs.CIDR() cidr := serviceIPs.CIDR()
if netutil.IsIPv6CIDR(&cidr) { if netutils.IsIPv6CIDR(&cidr) {
serviceIPFamily = api.IPv6Protocol serviceIPFamily = api.IPv6Protocol
} }
@ -612,7 +612,7 @@ func (rs *REST) allocClusterIPs(service *api.Service, toAlloc map[api.IPFamily]s
} }
allocated[family] = allocatedIP.String() allocated[family] = allocatedIP.String()
} else { } else {
parsedIP := net.ParseIP(ip) parsedIP := netutils.ParseIPSloppy(ip)
if err := allocator.Allocate(parsedIP); err != nil { if err := allocator.Allocate(parsedIP); err != nil {
el := field.ErrorList{field.Invalid(field.NewPath("spec", "clusterIPs"), service.Spec.ClusterIPs, fmt.Sprintf("failed to allocate IP %v: %v", ip, err))} el := field.ErrorList{field.Invalid(field.NewPath("spec", "clusterIPs"), service.Spec.ClusterIPs, fmt.Sprintf("failed to allocate IP %v: %v", ip, err))}
return allocated, errors.NewInvalid(api.Kind("Service"), service.Name, el) return allocated, errors.NewInvalid(api.Kind("Service"), service.Name, el)
@ -638,7 +638,7 @@ func (rs *REST) releaseClusterIPs(toRelease map[api.IPFamily]string) (map[api.IP
continue continue
} }
parsedIP := net.ParseIP(ip) parsedIP := netutils.ParseIPSloppy(ip)
if err := allocator.Release(parsedIP); err != nil { if err := allocator.Release(parsedIP); err != nil {
return released, err return released, err
} }
@ -825,7 +825,7 @@ func (rs *REST) releaseServiceClusterIP(service *api.Service) (released map[api.
// we need to do that to handle cases where allocator is no longer configured on // we need to do that to handle cases where allocator is no longer configured on
// cluster // cluster
if netutil.IsIPv6String(service.Spec.ClusterIP) { if netutils.IsIPv6String(service.Spec.ClusterIP) {
toRelease[api.IPv6Protocol] = service.Spec.ClusterIP toRelease[api.IPv6Protocol] = service.Spec.ClusterIP
} else { } else {
toRelease[api.IPv4Protocol] = service.Spec.ClusterIP toRelease[api.IPv4Protocol] = service.Spec.ClusterIP
@ -852,7 +852,7 @@ func (rs *REST) releaseServiceClusterIPs(service *api.Service) (released map[api
toRelease := make(map[api.IPFamily]string) toRelease := make(map[api.IPFamily]string)
for _, ip := range service.Spec.ClusterIPs { for _, ip := range service.Spec.ClusterIPs {
if netutil.IsIPv6String(ip) { if netutils.IsIPv6String(ip) {
toRelease[api.IPv6Protocol] = ip toRelease[api.IPv6Protocol] = ip
} else { } else {
toRelease[api.IPv4Protocol] = ip toRelease[api.IPv4Protocol] = ip
@ -974,7 +974,7 @@ func (rs *REST) tryDefaultValidateServiceClusterIPFields(oldService, service *ap
// we have previously validated for ip correctness and if family exist it will match ip family // we have previously validated for ip correctness and if family exist it will match ip family
// so the following is safe to do // so the following is safe to do
isIPv6 := netutil.IsIPv6String(ip) isIPv6 := netutils.IsIPv6String(ip)
// Family is not specified yet. // Family is not specified yet.
if i >= len(service.Spec.IPFamilies) { if i >= len(service.Spec.IPFamilies) {

View File

@ -49,7 +49,7 @@ import (
"k8s.io/kubernetes/pkg/registry/core/service/ipallocator" "k8s.io/kubernetes/pkg/registry/core/service/ipallocator"
"k8s.io/kubernetes/pkg/registry/core/service/portallocator" "k8s.io/kubernetes/pkg/registry/core/service/portallocator"
"k8s.io/kubernetes/pkg/registry/registrytest" "k8s.io/kubernetes/pkg/registry/registrytest"
netutil "k8s.io/utils/net" netutils "k8s.io/utils/net"
utilpointer "k8s.io/utils/pointer" utilpointer "k8s.io/utils/pointer"
"sigs.k8s.io/structured-merge-diff/v4/fieldpath" "sigs.k8s.io/structured-merge-diff/v4/fieldpath"
) )
@ -245,14 +245,14 @@ func NewTestRESTWithPods(t *testing.T, endpoints []*api.Endpoints, pods []api.Po
} }
func makeIPNet(t *testing.T) *net.IPNet { func makeIPNet(t *testing.T) *net.IPNet {
_, net, err := net.ParseCIDR("1.2.3.0/24") _, net, err := netutils.ParseCIDRSloppy("1.2.3.0/24")
if err != nil { if err != nil {
t.Error(err) t.Error(err)
} }
return net return net
} }
func makeIPNet6(t *testing.T) *net.IPNet { func makeIPNet6(t *testing.T) *net.IPNet {
_, net, err := net.ParseCIDR("2000::/108") _, net, err := netutils.ParseCIDRSloppy("2000::/108")
if err != nil { if err != nil {
t.Error(err) t.Error(err)
} }
@ -261,7 +261,7 @@ func makeIPNet6(t *testing.T) *net.IPNet {
func ipIsAllocated(t *testing.T, alloc ipallocator.Interface, ipstr string) bool { func ipIsAllocated(t *testing.T, alloc ipallocator.Interface, ipstr string) bool {
t.Helper() t.Helper()
ip := net.ParseIP(ipstr) ip := netutils.ParseIPSloppy(ipstr)
if ip == nil { if ip == nil {
t.Errorf("error parsing IP %q", ipstr) t.Errorf("error parsing IP %q", ipstr)
return false return false
@ -334,7 +334,7 @@ func TestServiceRegistryCreate(t *testing.T) {
allocator := storage.serviceIPAllocatorsByFamily[family] allocator := storage.serviceIPAllocatorsByFamily[family]
c := allocator.CIDR() c := allocator.CIDR()
cidr := &c cidr := &c
if !cidr.Contains(net.ParseIP(createdService.Spec.ClusterIPs[i])) { if !cidr.Contains(netutils.ParseIPSloppy(createdService.Spec.ClusterIPs[i])) {
t.Errorf("Unexpected ClusterIP: %s", createdService.Spec.ClusterIPs[i]) t.Errorf("Unexpected ClusterIP: %s", createdService.Spec.ClusterIPs[i])
} }
} }
@ -1309,7 +1309,7 @@ func TestServiceRegistryIPAllocation(t *testing.T) {
if createdSvc1.Name != "foo" { if createdSvc1.Name != "foo" {
t.Errorf("Expected foo, but got %v", createdSvc1.Name) t.Errorf("Expected foo, but got %v", createdSvc1.Name)
} }
if !makeIPNet(t).Contains(net.ParseIP(createdSvc1.Spec.ClusterIPs[0])) { if !makeIPNet(t).Contains(netutils.ParseIPSloppy(createdSvc1.Spec.ClusterIPs[0])) {
t.Errorf("Unexpected ClusterIP: %s", createdSvc1.Spec.ClusterIPs[0]) t.Errorf("Unexpected ClusterIP: %s", createdSvc1.Spec.ClusterIPs[0])
} }
@ -1323,7 +1323,7 @@ func TestServiceRegistryIPAllocation(t *testing.T) {
if createdSvc2.Name != "bar" { if createdSvc2.Name != "bar" {
t.Errorf("Expected bar, but got %v", createdSvc2.Name) t.Errorf("Expected bar, but got %v", createdSvc2.Name)
} }
if !makeIPNet(t).Contains(net.ParseIP(createdSvc2.Spec.ClusterIPs[0])) { if !makeIPNet(t).Contains(netutils.ParseIPSloppy(createdSvc2.Spec.ClusterIPs[0])) {
t.Errorf("Unexpected ClusterIP: %s", createdSvc2.Spec.ClusterIPs[0]) t.Errorf("Unexpected ClusterIP: %s", createdSvc2.Spec.ClusterIPs[0])
} }
@ -1362,7 +1362,7 @@ func TestServiceRegistryIPReallocation(t *testing.T) {
if createdSvc1.Name != "foo" { if createdSvc1.Name != "foo" {
t.Errorf("Expected foo, but got %v", createdSvc1.Name) t.Errorf("Expected foo, but got %v", createdSvc1.Name)
} }
if !makeIPNet(t).Contains(net.ParseIP(createdSvc1.Spec.ClusterIPs[0])) { if !makeIPNet(t).Contains(netutils.ParseIPSloppy(createdSvc1.Spec.ClusterIPs[0])) {
t.Errorf("Unexpected ClusterIP: %s", createdSvc1.Spec.ClusterIPs[0]) t.Errorf("Unexpected ClusterIP: %s", createdSvc1.Spec.ClusterIPs[0])
} }
@ -1381,7 +1381,7 @@ func TestServiceRegistryIPReallocation(t *testing.T) {
if createdSvc2.Name != "bar" { if createdSvc2.Name != "bar" {
t.Errorf("Expected bar, but got %v", createdSvc2.Name) t.Errorf("Expected bar, but got %v", createdSvc2.Name)
} }
if !makeIPNet(t).Contains(net.ParseIP(createdSvc2.Spec.ClusterIPs[0])) { if !makeIPNet(t).Contains(netutils.ParseIPSloppy(createdSvc2.Spec.ClusterIPs[0])) {
t.Errorf("Unexpected ClusterIP: %s", createdSvc2.Spec.ClusterIPs[0]) t.Errorf("Unexpected ClusterIP: %s", createdSvc2.Spec.ClusterIPs[0])
} }
} }
@ -1400,7 +1400,7 @@ func TestServiceRegistryIPUpdate(t *testing.T) {
if createdService.Spec.Ports[0].Port != svc.Spec.Ports[0].Port { if createdService.Spec.Ports[0].Port != svc.Spec.Ports[0].Port {
t.Errorf("Expected port %d, but got %v", svc.Spec.Ports[0].Port, createdService.Spec.Ports[0].Port) t.Errorf("Expected port %d, but got %v", svc.Spec.Ports[0].Port, createdService.Spec.Ports[0].Port)
} }
if !makeIPNet(t).Contains(net.ParseIP(createdService.Spec.ClusterIPs[0])) { if !makeIPNet(t).Contains(netutils.ParseIPSloppy(createdService.Spec.ClusterIPs[0])) {
t.Errorf("Unexpected ClusterIP: %s", createdService.Spec.ClusterIPs[0]) t.Errorf("Unexpected ClusterIP: %s", createdService.Spec.ClusterIPs[0])
} }
@ -1451,7 +1451,7 @@ func TestServiceRegistryIPLoadBalancer(t *testing.T) {
if createdService.Spec.Ports[0].Port != svc.Spec.Ports[0].Port { if createdService.Spec.Ports[0].Port != svc.Spec.Ports[0].Port {
t.Errorf("Expected port %d, but got %v", svc.Spec.Ports[0].Port, createdService.Spec.Ports[0].Port) t.Errorf("Expected port %d, but got %v", svc.Spec.Ports[0].Port, createdService.Spec.Ports[0].Port)
} }
if !makeIPNet(t).Contains(net.ParseIP(createdService.Spec.ClusterIPs[0])) { if !makeIPNet(t).Contains(netutils.ParseIPSloppy(createdService.Spec.ClusterIPs[0])) {
t.Errorf("Unexpected ClusterIP: %s", createdService.Spec.ClusterIPs[0]) t.Errorf("Unexpected ClusterIP: %s", createdService.Spec.ClusterIPs[0])
} }
@ -1797,7 +1797,7 @@ func TestInitClusterIP(t *testing.T) {
if !ok { if !ok {
t.Fatalf("test is incorrect, allocator does not exist on rest") t.Fatalf("test is incorrect, allocator does not exist on rest")
} }
if err := allocator.Allocate(net.ParseIP(ip)); err != nil { if err := allocator.Allocate(netutils.ParseIPSloppy(ip)); err != nil {
t.Fatalf("test is incorrect, allocator failed to pre allocate IP with error:%v", err) t.Fatalf("test is incorrect, allocator failed to pre allocate IP with error:%v", err)
} }
} }
@ -1821,7 +1821,7 @@ func TestInitClusterIP(t *testing.T) {
if newSvc.Spec.ClusterIPs[0] != api.ClusterIPNone { if newSvc.Spec.ClusterIPs[0] != api.ClusterIPNone {
for _, ip := range newSvc.Spec.ClusterIPs { for _, ip := range newSvc.Spec.ClusterIPs {
family := api.IPv4Protocol family := api.IPv4Protocol
if netutil.IsIPv6String(ip) { if netutils.IsIPv6String(ip) {
family = api.IPv6Protocol family = api.IPv6Protocol
} }
allocator := storage.serviceIPAllocatorsByFamily[family] allocator := storage.serviceIPAllocatorsByFamily[family]
@ -2225,7 +2225,7 @@ func TestServiceUpgrade(t *testing.T) {
// allocated IP // allocated IP
for family, ip := range testCase.allocateIPsBeforeUpdate { for family, ip := range testCase.allocateIPsBeforeUpdate {
alloc := storage.serviceIPAllocatorsByFamily[family] alloc := storage.serviceIPAllocatorsByFamily[family]
if err := alloc.Allocate(net.ParseIP(ip)); err != nil { if err := alloc.Allocate(netutils.ParseIPSloppy(ip)); err != nil {
t.Fatalf("test is incorrect, unable to preallocate ip:%v", ip) t.Fatalf("test is incorrect, unable to preallocate ip:%v", ip)
} }
} }
@ -3653,7 +3653,7 @@ func isValidClusterIPFields(t *testing.T, storage *REST, pre *api.Service, post
} }
// ips must match families // ips must match families
for i, ip := range post.Spec.ClusterIPs { for i, ip := range post.Spec.ClusterIPs {
isIPv6 := netutil.IsIPv6String(ip) isIPv6 := netutils.IsIPv6String(ip)
if isIPv6 && post.Spec.IPFamilies[i] != api.IPv6Protocol { if isIPv6 && post.Spec.IPFamilies[i] != api.IPv6Protocol {
t.Fatalf("ips does not match assigned families %+v %+v", post.Spec.ClusterIPs, post.Spec.IPFamilies) t.Fatalf("ips does not match assigned families %+v %+v", post.Spec.ClusterIPs, post.Spec.IPFamilies)
} }

View File

@ -17,7 +17,6 @@ limitations under the License.
package storage package storage
import ( import (
"net"
"reflect" "reflect"
"testing" "testing"
@ -31,6 +30,7 @@ import (
etcd3testing "k8s.io/apiserver/pkg/storage/etcd3/testing" etcd3testing "k8s.io/apiserver/pkg/storage/etcd3/testing"
api "k8s.io/kubernetes/pkg/apis/core" api "k8s.io/kubernetes/pkg/apis/core"
"k8s.io/kubernetes/pkg/registry/registrytest" "k8s.io/kubernetes/pkg/registry/registrytest"
netutils "k8s.io/utils/net"
utilfeature "k8s.io/apiserver/pkg/util/feature" utilfeature "k8s.io/apiserver/pkg/util/feature"
featuregatetesting "k8s.io/component-base/featuregate/testing" featuregatetesting "k8s.io/component-base/featuregate/testing"
@ -421,7 +421,7 @@ func TestServiceDefaultOnRead(t *testing.T) {
ResourcePrefix: "services", ResourcePrefix: "services",
} }
_, cidr, err := net.ParseCIDR("10.0.0.0/24") _, cidr, err := netutils.ParseCIDRSloppy("10.0.0.0/24")
if err != nil { if err != nil {
t.Fatalf("failed to parse CIDR") t.Fatalf("failed to parse CIDR")
} }
@ -479,7 +479,7 @@ func TestServiceDefaulting(t *testing.T) {
ResourcePrefix: "services", ResourcePrefix: "services",
} }
_, cidr, err := net.ParseCIDR(primaryCIDR) _, cidr, err := netutils.ParseCIDRSloppy(primaryCIDR)
if err != nil { if err != nil {
t.Fatalf("failed to parse CIDR %s", primaryCIDR) t.Fatalf("failed to parse CIDR %s", primaryCIDR)
} }

View File

@ -17,7 +17,6 @@ limitations under the License.
package service package service
import ( import (
"net"
"reflect" "reflect"
"testing" "testing"
@ -34,11 +33,12 @@ import (
utilfeature "k8s.io/apiserver/pkg/util/feature" utilfeature "k8s.io/apiserver/pkg/util/feature"
featuregatetesting "k8s.io/component-base/featuregate/testing" featuregatetesting "k8s.io/component-base/featuregate/testing"
"k8s.io/kubernetes/pkg/features" "k8s.io/kubernetes/pkg/features"
netutils "k8s.io/utils/net"
utilpointer "k8s.io/utils/pointer" utilpointer "k8s.io/utils/pointer"
) )
func newStrategy(cidr string, hasSecondary bool) (testStrategy Strategy, testStatusStrategy Strategy) { func newStrategy(cidr string, hasSecondary bool) (testStrategy Strategy, testStatusStrategy Strategy) {
_, testCIDR, err := net.ParseCIDR(cidr) _, testCIDR, err := netutils.ParseCIDRSloppy(cidr)
if err != nil { if err != nil {
panic("invalid CIDR") panic("invalid CIDR")
} }

View File

@ -28,6 +28,7 @@ import (
"k8s.io/kube-scheduler/config/v1beta1" "k8s.io/kube-scheduler/config/v1beta1"
"k8s.io/kubernetes/pkg/features" "k8s.io/kubernetes/pkg/features"
"k8s.io/kubernetes/pkg/scheduler/apis/config" "k8s.io/kubernetes/pkg/scheduler/apis/config"
netutils "k8s.io/utils/net"
"k8s.io/utils/pointer" "k8s.io/utils/pointer"
) )
@ -143,7 +144,7 @@ func SetDefaults_KubeSchedulerConfiguration(obj *v1beta1.KubeSchedulerConfigurat
} else { } else {
// Something went wrong splitting the host/port, could just be a missing port so check if the // Something went wrong splitting the host/port, could just be a missing port so check if the
// existing value is a valid IP address. If so, use that with the default scheduler port // existing value is a valid IP address. If so, use that with the default scheduler port
if host := net.ParseIP(*obj.HealthzBindAddress); host != nil { if host := netutils.ParseIPSloppy(*obj.HealthzBindAddress); host != nil {
hostPort := net.JoinHostPort(*obj.HealthzBindAddress, strconv.Itoa(config.DefaultInsecureSchedulerPort)) hostPort := net.JoinHostPort(*obj.HealthzBindAddress, strconv.Itoa(config.DefaultInsecureSchedulerPort))
obj.HealthzBindAddress = &hostPort obj.HealthzBindAddress = &hostPort
} else { } else {
@ -165,7 +166,7 @@ func SetDefaults_KubeSchedulerConfiguration(obj *v1beta1.KubeSchedulerConfigurat
} else { } else {
// Something went wrong splitting the host/port, could just be a missing port so check if the // Something went wrong splitting the host/port, could just be a missing port so check if the
// existing value is a valid IP address. If so, use that with the default scheduler port // existing value is a valid IP address. If so, use that with the default scheduler port
if host := net.ParseIP(*obj.MetricsBindAddress); host != nil { if host := netutils.ParseIPSloppy(*obj.MetricsBindAddress); host != nil {
hostPort := net.JoinHostPort(*obj.MetricsBindAddress, strconv.Itoa(config.DefaultInsecureSchedulerPort)) hostPort := net.JoinHostPort(*obj.MetricsBindAddress, strconv.Itoa(config.DefaultInsecureSchedulerPort))
obj.MetricsBindAddress = &hostPort obj.MetricsBindAddress = &hostPort
} else { } else {

Some files were not shown because too many files have changed in this diff Show More